forked from mirrors/linux
mm: separate move/undo parts from migrate_pages_batch()
Functionally, no change. This is a preparation for luf mechanism that requires to use separated folio lists for its own handling during migration. Refactored migrate_pages_batch() so as to separate move/undo parts from migrate_pages_batch(). Link: https://lkml.kernel.org/r/20250115103403.11882-1-byungchul@sk.com Signed-off-by: Byungchul Park <byungchul@sk.com> Reviewed-by: Shivank Garg <shivankg@amd.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
ff9b7e0b17
commit
f752e677f8
1 changed files with 83 additions and 51 deletions
134
mm/migrate.c
134
mm/migrate.c
|
|
@ -1687,6 +1687,81 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
|
||||||
return nr_failed;
|
return nr_failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void migrate_folios_move(struct list_head *src_folios,
|
||||||
|
struct list_head *dst_folios,
|
||||||
|
free_folio_t put_new_folio, unsigned long private,
|
||||||
|
enum migrate_mode mode, int reason,
|
||||||
|
struct list_head *ret_folios,
|
||||||
|
struct migrate_pages_stats *stats,
|
||||||
|
int *retry, int *thp_retry, int *nr_failed,
|
||||||
|
int *nr_retry_pages)
|
||||||
|
{
|
||||||
|
struct folio *folio, *folio2, *dst, *dst2;
|
||||||
|
bool is_thp;
|
||||||
|
int nr_pages;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
dst = list_first_entry(dst_folios, struct folio, lru);
|
||||||
|
dst2 = list_next_entry(dst, lru);
|
||||||
|
list_for_each_entry_safe(folio, folio2, src_folios, lru) {
|
||||||
|
is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
|
||||||
|
nr_pages = folio_nr_pages(folio);
|
||||||
|
|
||||||
|
cond_resched();
|
||||||
|
|
||||||
|
rc = migrate_folio_move(put_new_folio, private,
|
||||||
|
folio, dst, mode,
|
||||||
|
reason, ret_folios);
|
||||||
|
/*
|
||||||
|
* The rules are:
|
||||||
|
* Success: folio will be freed
|
||||||
|
* -EAGAIN: stay on the unmap_folios list
|
||||||
|
* Other errno: put on ret_folios list
|
||||||
|
*/
|
||||||
|
switch (rc) {
|
||||||
|
case -EAGAIN:
|
||||||
|
*retry += 1;
|
||||||
|
*thp_retry += is_thp;
|
||||||
|
*nr_retry_pages += nr_pages;
|
||||||
|
break;
|
||||||
|
case MIGRATEPAGE_SUCCESS:
|
||||||
|
stats->nr_succeeded += nr_pages;
|
||||||
|
stats->nr_thp_succeeded += is_thp;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
*nr_failed += 1;
|
||||||
|
stats->nr_thp_failed += is_thp;
|
||||||
|
stats->nr_failed_pages += nr_pages;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
dst = dst2;
|
||||||
|
dst2 = list_next_entry(dst, lru);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void migrate_folios_undo(struct list_head *src_folios,
|
||||||
|
struct list_head *dst_folios,
|
||||||
|
free_folio_t put_new_folio, unsigned long private,
|
||||||
|
struct list_head *ret_folios)
|
||||||
|
{
|
||||||
|
struct folio *folio, *folio2, *dst, *dst2;
|
||||||
|
|
||||||
|
dst = list_first_entry(dst_folios, struct folio, lru);
|
||||||
|
dst2 = list_next_entry(dst, lru);
|
||||||
|
list_for_each_entry_safe(folio, folio2, src_folios, lru) {
|
||||||
|
int old_page_state = 0;
|
||||||
|
struct anon_vma *anon_vma = NULL;
|
||||||
|
|
||||||
|
__migrate_folio_extract(dst, &old_page_state, &anon_vma);
|
||||||
|
migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
|
||||||
|
anon_vma, true, ret_folios);
|
||||||
|
list_del(&dst->lru);
|
||||||
|
migrate_folio_undo_dst(dst, true, put_new_folio, private);
|
||||||
|
dst = dst2;
|
||||||
|
dst2 = list_next_entry(dst, lru);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* migrate_pages_batch() first unmaps folios in the from list as many as
|
* migrate_pages_batch() first unmaps folios in the from list as many as
|
||||||
* possible, then move the unmapped folios.
|
* possible, then move the unmapped folios.
|
||||||
|
|
@ -1709,7 +1784,7 @@ static int migrate_pages_batch(struct list_head *from,
|
||||||
int pass = 0;
|
int pass = 0;
|
||||||
bool is_thp = false;
|
bool is_thp = false;
|
||||||
bool is_large = false;
|
bool is_large = false;
|
||||||
struct folio *folio, *folio2, *dst = NULL, *dst2;
|
struct folio *folio, *folio2, *dst = NULL;
|
||||||
int rc, rc_saved = 0, nr_pages;
|
int rc, rc_saved = 0, nr_pages;
|
||||||
LIST_HEAD(unmap_folios);
|
LIST_HEAD(unmap_folios);
|
||||||
LIST_HEAD(dst_folios);
|
LIST_HEAD(dst_folios);
|
||||||
|
|
@ -1880,42 +1955,11 @@ static int migrate_pages_batch(struct list_head *from,
|
||||||
thp_retry = 0;
|
thp_retry = 0;
|
||||||
nr_retry_pages = 0;
|
nr_retry_pages = 0;
|
||||||
|
|
||||||
dst = list_first_entry(&dst_folios, struct folio, lru);
|
/* Move the unmapped folios */
|
||||||
dst2 = list_next_entry(dst, lru);
|
migrate_folios_move(&unmap_folios, &dst_folios,
|
||||||
list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
|
put_new_folio, private, mode, reason,
|
||||||
is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
|
ret_folios, stats, &retry, &thp_retry,
|
||||||
nr_pages = folio_nr_pages(folio);
|
&nr_failed, &nr_retry_pages);
|
||||||
|
|
||||||
cond_resched();
|
|
||||||
|
|
||||||
rc = migrate_folio_move(put_new_folio, private,
|
|
||||||
folio, dst, mode,
|
|
||||||
reason, ret_folios);
|
|
||||||
/*
|
|
||||||
* The rules are:
|
|
||||||
* Success: folio will be freed
|
|
||||||
* -EAGAIN: stay on the unmap_folios list
|
|
||||||
* Other errno: put on ret_folios list
|
|
||||||
*/
|
|
||||||
switch(rc) {
|
|
||||||
case -EAGAIN:
|
|
||||||
retry++;
|
|
||||||
thp_retry += is_thp;
|
|
||||||
nr_retry_pages += nr_pages;
|
|
||||||
break;
|
|
||||||
case MIGRATEPAGE_SUCCESS:
|
|
||||||
stats->nr_succeeded += nr_pages;
|
|
||||||
stats->nr_thp_succeeded += is_thp;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
nr_failed++;
|
|
||||||
stats->nr_thp_failed += is_thp;
|
|
||||||
stats->nr_failed_pages += nr_pages;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
dst = dst2;
|
|
||||||
dst2 = list_next_entry(dst, lru);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
nr_failed += retry;
|
nr_failed += retry;
|
||||||
stats->nr_thp_failed += thp_retry;
|
stats->nr_thp_failed += thp_retry;
|
||||||
|
|
@ -1924,20 +1968,8 @@ static int migrate_pages_batch(struct list_head *from,
|
||||||
rc = rc_saved ? : nr_failed;
|
rc = rc_saved ? : nr_failed;
|
||||||
out:
|
out:
|
||||||
/* Cleanup remaining folios */
|
/* Cleanup remaining folios */
|
||||||
dst = list_first_entry(&dst_folios, struct folio, lru);
|
migrate_folios_undo(&unmap_folios, &dst_folios,
|
||||||
dst2 = list_next_entry(dst, lru);
|
put_new_folio, private, ret_folios);
|
||||||
list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
|
|
||||||
int old_page_state = 0;
|
|
||||||
struct anon_vma *anon_vma = NULL;
|
|
||||||
|
|
||||||
__migrate_folio_extract(dst, &old_page_state, &anon_vma);
|
|
||||||
migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
|
|
||||||
anon_vma, true, ret_folios);
|
|
||||||
list_del(&dst->lru);
|
|
||||||
migrate_folio_undo_dst(dst, true, put_new_folio, private);
|
|
||||||
dst = dst2;
|
|
||||||
dst2 = list_next_entry(dst, lru);
|
|
||||||
}
|
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue