mirror of
https://github.com/torvalds/linux.git
synced 2025-11-30 23:16:01 +07:00
treewide: remove MIGRATEPAGE_SUCCESS
At this point MIGRATEPAGE_SUCCESS is misnamed for all folio users, and now that we remove MIGRATEPAGE_UNMAP, it's really the only "success" return value that the code uses and expects. Let's just get rid of MIGRATEPAGE_SUCCESS completely and just use "0" for success. Link: https://lkml.kernel.org/r/20250811143949.1117439-3-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Zi Yan <ziy@nvidia.com> [mm] Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com> [jfs] Acked-by: David Sterba <dsterba@suse.com> [btrfs] Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Reviewed-by: Byungchul Park <byungchul@sk.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Benjamin LaHaise <bcrl@kvack.org> Cc: Chris Mason <clm@fb.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dave Kleikamp <shaggy@kernel.org> Cc: Eugenio Pé rez <eperezma@redhat.com> Cc: Gregory Price <gourry@gourry.net> Cc: "Huang, Ying" <ying.huang@linux.alibaba.com> Cc: Jan Kara <jack@suse.cz> Cc: Jason Wang <jasowang@redhat.com> Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: Joshua Hahn <joshua.hahnjy@gmail.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mathew Brost <matthew.brost@intel.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Rakie Kim <rakie.kim@sk.com> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Cc: Lance Yang <lance.yang@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
95c2908f1a
commit
fb49a4425c
@@ -545,7 +545,7 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
|
||||
/* balloon page list reference */
|
||||
put_page(page);
|
||||
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cmm_balloon_compaction_init(void)
|
||||
|
||||
@@ -1806,7 +1806,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
|
||||
* the list after acquiring the lock.
|
||||
*/
|
||||
get_page(newpage);
|
||||
ret = MIGRATEPAGE_SUCCESS;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
/* Update the balloon list under the @pages_lock */
|
||||
@@ -1817,7 +1817,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
|
||||
* If we succeed just insert it to the list and update the statistics
|
||||
* under the lock.
|
||||
*/
|
||||
if (ret == MIGRATEPAGE_SUCCESS) {
|
||||
if (!ret) {
|
||||
balloon_page_insert(&b->b_dev_info, newpage);
|
||||
__count_vm_event(BALLOON_MIGRATE);
|
||||
}
|
||||
|
||||
@@ -875,7 +875,7 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
|
||||
balloon_page_finalize(page);
|
||||
put_page(page); /* balloon reference */
|
||||
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BALLOON_COMPACTION */
|
||||
|
||||
|
||||
2
fs/aio.c
2
fs/aio.c
@@ -445,7 +445,7 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
|
||||
folio_get(dst);
|
||||
|
||||
rc = folio_migrate_mapping(mapping, dst, src, 1);
|
||||
if (rc != MIGRATEPAGE_SUCCESS) {
|
||||
if (rc) {
|
||||
folio_put(dst);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@@ -7421,7 +7421,7 @@ static int btrfs_migrate_folio(struct address_space *mapping,
|
||||
{
|
||||
int ret = filemap_migrate_folio(mapping, dst, src, mode);
|
||||
|
||||
if (ret != MIGRATEPAGE_SUCCESS)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (folio_test_ordered(src)) {
|
||||
@@ -7429,7 +7429,7 @@ static int btrfs_migrate_folio(struct address_space *mapping,
|
||||
folio_set_ordered(dst);
|
||||
}
|
||||
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define btrfs_migrate_folio NULL
|
||||
|
||||
@@ -1052,7 +1052,7 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping,
|
||||
int rc;
|
||||
|
||||
rc = migrate_huge_page_move_mapping(mapping, dst, src);
|
||||
if (rc != MIGRATEPAGE_SUCCESS)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (hugetlb_folio_subpool(src)) {
|
||||
@@ -1063,7 +1063,7 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping,
|
||||
|
||||
folio_migrate_flags(dst, src);
|
||||
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define hugetlbfs_migrate_folio NULL
|
||||
|
||||
@@ -169,7 +169,7 @@ static int __metapage_migrate_folio(struct address_space *mapping,
|
||||
}
|
||||
|
||||
rc = filemap_migrate_folio(mapping, dst, src, mode);
|
||||
if (rc != MIGRATEPAGE_SUCCESS)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
for (i = 0; i < MPS_PER_PAGE; i++) {
|
||||
@@ -199,7 +199,7 @@ static int __metapage_migrate_folio(struct address_space *mapping,
|
||||
}
|
||||
}
|
||||
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_MIGRATION */
|
||||
|
||||
@@ -242,7 +242,7 @@ static int __metapage_migrate_folio(struct address_space *mapping,
|
||||
return -EAGAIN;
|
||||
|
||||
rc = filemap_migrate_folio(mapping, dst, src, mode);
|
||||
if (rc != MIGRATEPAGE_SUCCESS)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (unlikely(insert_metapage(dst, mp)))
|
||||
@@ -253,7 +253,7 @@ static int __metapage_migrate_folio(struct address_space *mapping,
|
||||
mp->folio = dst;
|
||||
remove_metapage(src, mp);
|
||||
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_MIGRATION */
|
||||
|
||||
|
||||
@@ -12,13 +12,6 @@ typedef void free_folio_t(struct folio *folio, unsigned long private);
|
||||
|
||||
struct migration_target_control;
|
||||
|
||||
/*
|
||||
* Return values from addresss_space_operations.migratepage():
|
||||
* - negative errno on page migration failure;
|
||||
* - zero on page migration success;
|
||||
*/
|
||||
#define MIGRATEPAGE_SUCCESS 0
|
||||
|
||||
/**
|
||||
* struct movable_operations - Driver page migration
|
||||
* @isolate_page:
|
||||
@@ -34,8 +27,7 @@ struct migration_target_control;
|
||||
* @src page. The driver should copy the contents of the
|
||||
* @src page to the @dst page and set up the fields of @dst page.
|
||||
* Both pages are locked.
|
||||
* If page migration is successful, the driver should
|
||||
* return MIGRATEPAGE_SUCCESS.
|
||||
* If page migration is successful, the driver should return 0.
|
||||
* If the driver cannot migrate the page at the moment, it can return
|
||||
* -EAGAIN. The VM interprets this as a temporary migration failure and
|
||||
* will retry it later. Any other error value is a permanent migration
|
||||
|
||||
40
mm/migrate.c
40
mm/migrate.c
@@ -231,18 +231,17 @@ static void putback_movable_ops_page(struct page *page)
|
||||
* src and dst are also released by migration core. These pages will not be
|
||||
* folios in the future, so that must be reworked.
|
||||
*
|
||||
* Returns MIGRATEPAGE_SUCCESS on success, otherwise a negative error
|
||||
* code.
|
||||
* Returns 0 on success, otherwise a negative error code.
|
||||
*/
|
||||
static int migrate_movable_ops_page(struct page *dst, struct page *src,
|
||||
enum migrate_mode mode)
|
||||
{
|
||||
int rc = MIGRATEPAGE_SUCCESS;
|
||||
int rc;
|
||||
|
||||
VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src);
|
||||
VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(src), src);
|
||||
rc = page_movable_ops(src)->migrate_page(dst, src, mode);
|
||||
if (rc == MIGRATEPAGE_SUCCESS)
|
||||
if (!rc)
|
||||
ClearPageMovableOpsIsolated(src);
|
||||
return rc;
|
||||
}
|
||||
@@ -587,7 +586,7 @@ static int __folio_migrate_mapping(struct address_space *mapping,
|
||||
if (folio_test_swapbacked(folio))
|
||||
__folio_set_swapbacked(newfolio);
|
||||
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
oldzone = folio_zone(folio);
|
||||
@@ -688,7 +687,7 @@ static int __folio_migrate_mapping(struct address_space *mapping,
|
||||
}
|
||||
local_irq_enable();
|
||||
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int folio_migrate_mapping(struct address_space *mapping,
|
||||
@@ -737,7 +736,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
|
||||
|
||||
xas_unlock_irq(&xas);
|
||||
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -853,14 +852,14 @@ static int __migrate_folio(struct address_space *mapping, struct folio *dst,
|
||||
return rc;
|
||||
|
||||
rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
|
||||
if (rc != MIGRATEPAGE_SUCCESS)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (src_private)
|
||||
folio_attach_private(dst, folio_detach_private(src));
|
||||
|
||||
folio_migrate_flags(dst, src);
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -967,7 +966,7 @@ recheck_buffers:
|
||||
}
|
||||
|
||||
rc = filemap_migrate_folio(mapping, dst, src, mode);
|
||||
if (rc != MIGRATEPAGE_SUCCESS)
|
||||
if (rc)
|
||||
goto unlock_buffers;
|
||||
|
||||
bh = head;
|
||||
@@ -1071,7 +1070,7 @@ static int fallback_migrate_folio(struct address_space *mapping,
|
||||
*
|
||||
* Return value:
|
||||
* < 0 - error code
|
||||
* MIGRATEPAGE_SUCCESS - success
|
||||
* 0 - success
|
||||
*/
|
||||
static int move_to_new_folio(struct folio *dst, struct folio *src,
|
||||
enum migrate_mode mode)
|
||||
@@ -1099,7 +1098,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
|
||||
else
|
||||
rc = fallback_migrate_folio(mapping, dst, src, mode);
|
||||
|
||||
if (rc == MIGRATEPAGE_SUCCESS) {
|
||||
if (!rc) {
|
||||
/*
|
||||
* For pagecache folios, src->mapping must be cleared before src
|
||||
* is freed. Anonymous folios must stay anonymous until freed.
|
||||
@@ -1449,7 +1448,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
|
||||
if (folio_ref_count(src) == 1) {
|
||||
/* page was freed from under us. So we are done. */
|
||||
folio_putback_hugetlb(src);
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
dst = get_new_folio(src, private);
|
||||
@@ -1512,8 +1511,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
|
||||
rc = move_to_new_folio(dst, src, mode);
|
||||
|
||||
if (page_was_mapped)
|
||||
remove_migration_ptes(src,
|
||||
rc == MIGRATEPAGE_SUCCESS ? dst : src, 0);
|
||||
remove_migration_ptes(src, !rc ? dst : src, 0);
|
||||
|
||||
unlock_put_anon:
|
||||
folio_unlock(dst);
|
||||
@@ -1522,7 +1520,7 @@ put_anon:
|
||||
if (anon_vma)
|
||||
put_anon_vma(anon_vma);
|
||||
|
||||
if (rc == MIGRATEPAGE_SUCCESS) {
|
||||
if (!rc) {
|
||||
move_hugetlb_state(src, dst, reason);
|
||||
put_new_folio = NULL;
|
||||
}
|
||||
@@ -1530,7 +1528,7 @@ put_anon:
|
||||
out_unlock:
|
||||
folio_unlock(src);
|
||||
out:
|
||||
if (rc == MIGRATEPAGE_SUCCESS)
|
||||
if (!rc)
|
||||
folio_putback_hugetlb(src);
|
||||
else if (rc != -EAGAIN)
|
||||
list_move_tail(&src->lru, ret);
|
||||
@@ -1640,7 +1638,7 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
|
||||
reason, ret_folios);
|
||||
/*
|
||||
* The rules are:
|
||||
* Success: hugetlb folio will be put back
|
||||
* 0: hugetlb folio will be put back
|
||||
* -EAGAIN: stay on the from list
|
||||
* -ENOMEM: stay on the from list
|
||||
* Other errno: put on ret_folios list
|
||||
@@ -1657,7 +1655,7 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
|
||||
retry++;
|
||||
nr_retry_pages += nr_pages;
|
||||
break;
|
||||
case MIGRATEPAGE_SUCCESS:
|
||||
case 0:
|
||||
stats->nr_succeeded += nr_pages;
|
||||
break;
|
||||
default:
|
||||
@@ -1711,7 +1709,7 @@ static void migrate_folios_move(struct list_head *src_folios,
|
||||
reason, ret_folios);
|
||||
/*
|
||||
* The rules are:
|
||||
* Success: folio will be freed
|
||||
* 0: folio will be freed
|
||||
* -EAGAIN: stay on the unmap_folios list
|
||||
* Other errno: put on ret_folios list
|
||||
*/
|
||||
@@ -1721,7 +1719,7 @@ static void migrate_folios_move(struct list_head *src_folios,
|
||||
*thp_retry += is_thp;
|
||||
*nr_retry_pages += nr_pages;
|
||||
break;
|
||||
case MIGRATEPAGE_SUCCESS:
|
||||
case 0:
|
||||
stats->nr_succeeded += nr_pages;
|
||||
stats->nr_thp_succeeded += is_thp;
|
||||
break;
|
||||
|
||||
@@ -778,7 +778,7 @@ static void __migrate_device_pages(unsigned long *src_pfns,
|
||||
if (migrate && migrate->fault_page == page)
|
||||
extra_cnt = 1;
|
||||
r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt);
|
||||
if (r != MIGRATEPAGE_SUCCESS)
|
||||
if (r)
|
||||
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
|
||||
else
|
||||
folio_migrate_flags(newfolio, folio);
|
||||
|
||||
@@ -1746,7 +1746,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
||||
* instead.
|
||||
*/
|
||||
if (!zpdesc->zspage)
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
return 0;
|
||||
|
||||
/* The page is locked, so this pointer must remain valid */
|
||||
zspage = get_zspage(zpdesc);
|
||||
@@ -1813,7 +1813,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
||||
reset_zpdesc(zpdesc);
|
||||
zpdesc_put(zpdesc);
|
||||
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void zs_page_putback(struct page *page)
|
||||
|
||||
Reference in New Issue
Block a user