Skip to content

Commit 2ef7dbb

Browse files
yhuang-intelakpm00
authored andcommitted
migrate_pages: try migrate in batch asynchronously firstly
When we have locked more than one folios, we cannot wait the lock or bit (e.g., page lock, buffer head lock, writeback bit) synchronously. Otherwise deadlock may be triggered. This make it hard to batch the synchronous migration directly. This patch re-enables batching synchronous migration via trying to migrate in batch asynchronously firstly. And any folios that are failed to be migrated asynchronously will be migrated synchronously one by one. Test shows that this can restore the TLB flushing batching performance for synchronous migration effectively. Link: https://lkml.kernel.org/r/[email protected] Fixes: 5dfab10 ("migrate_pages: batch _unmap and _move") Signed-off-by: "Huang, Ying" <[email protected]> Tested-by: Hugh Dickins <[email protected]> Reviewed-by: Baolin Wang <[email protected]> Cc: "Xu, Pengfei" <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Stefan Roesch <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Xin Hao <[email protected]> Cc: Zi Yan <[email protected]> Cc: Yang Shi <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Mike Kravetz <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent a21d213 commit 2ef7dbb

File tree

1 file changed

+62
-18
lines changed

1 file changed

+62
-18
lines changed

mm/migrate.c

Lines changed: 62 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1112,9 +1112,8 @@ static void migrate_folio_done(struct folio *src,
11121112
/* Obtain the lock on page, remove all ptes. */
11131113
static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
11141114
unsigned long private, struct folio *src,
1115-
struct folio **dstp, int force,
1116-
enum migrate_mode mode, enum migrate_reason reason,
1117-
struct list_head *ret)
1115+
struct folio **dstp, enum migrate_mode mode,
1116+
enum migrate_reason reason, struct list_head *ret)
11181117
{
11191118
struct folio *dst;
11201119
int rc = -EAGAIN;
@@ -1144,7 +1143,7 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
11441143
dst->private = NULL;
11451144

11461145
if (!folio_trylock(src)) {
1147-
if (!force || mode == MIGRATE_ASYNC)
1146+
if (mode == MIGRATE_ASYNC)
11481147
goto out;
11491148

11501149
/*
@@ -1182,8 +1181,6 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
11821181
rc = -EBUSY;
11831182
goto out;
11841183
}
1185-
if (!force)
1186-
goto out;
11871184
folio_wait_writeback(src);
11881185
}
11891186

@@ -1497,6 +1494,9 @@ static inline int try_split_folio(struct folio *folio, struct list_head *split_f
14971494
#define NR_MAX_BATCHED_MIGRATION 512
14981495
#endif
14991496
#define NR_MAX_MIGRATE_PAGES_RETRY 10
1497+
#define NR_MAX_MIGRATE_ASYNC_RETRY 3
1498+
#define NR_MAX_MIGRATE_SYNC_RETRY \
1499+
(NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
15001500

15011501
struct migrate_pages_stats {
15021502
int nr_succeeded; /* Normal and large folios migrated successfully, in
@@ -1678,8 +1678,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
16781678
}
16791679

16801680
rc = migrate_folio_unmap(get_new_page, put_new_page, private,
1681-
folio, &dst, pass > 2, mode,
1682-
reason, ret_folios);
1681+
folio, &dst, mode, reason, ret_folios);
16831682
/*
16841683
* The rules are:
16851684
* Success: folio will be freed
@@ -1857,6 +1856,51 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
18571856
return rc;
18581857
}
18591858

1859+
static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
1860+
free_page_t put_new_page, unsigned long private,
1861+
enum migrate_mode mode, int reason, struct list_head *ret_folios,
1862+
struct list_head *split_folios, struct migrate_pages_stats *stats)
1863+
{
1864+
int rc, nr_failed = 0;
1865+
LIST_HEAD(folios);
1866+
struct migrate_pages_stats astats;
1867+
1868+
memset(&astats, 0, sizeof(astats));
1869+
/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1870+
rc = migrate_pages_batch(from, get_new_page, put_new_page, private, MIGRATE_ASYNC,
1871+
reason, &folios, split_folios, &astats,
1872+
NR_MAX_MIGRATE_ASYNC_RETRY);
1873+
stats->nr_succeeded += astats.nr_succeeded;
1874+
stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1875+
stats->nr_thp_split += astats.nr_thp_split;
1876+
if (rc < 0) {
1877+
stats->nr_failed_pages += astats.nr_failed_pages;
1878+
stats->nr_thp_failed += astats.nr_thp_failed;
1879+
list_splice_tail(&folios, ret_folios);
1880+
return rc;
1881+
}
1882+
stats->nr_thp_failed += astats.nr_thp_split;
1883+
nr_failed += astats.nr_thp_split;
1884+
/*
1885+
* Fall back to migrate all failed folios one by one synchronously. All
1886+
* failed folios except split THPs will be retried, so their failure
1887+
* isn't counted
1888+
*/
1889+
list_splice_tail_init(&folios, from);
1890+
while (!list_empty(from)) {
1891+
list_move(from->next, &folios);
1892+
rc = migrate_pages_batch(&folios, get_new_page, put_new_page,
1893+
private, mode, reason, ret_folios,
1894+
split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1895+
list_splice_tail_init(&folios, ret_folios);
1896+
if (rc < 0)
1897+
return rc;
1898+
nr_failed += rc;
1899+
}
1900+
1901+
return nr_failed;
1902+
}
1903+
18601904
/*
18611905
* migrate_pages - migrate the folios specified in a list, to the free folios
18621906
* supplied as the target for the page migration
@@ -1888,7 +1932,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
18881932
enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
18891933
{
18901934
int rc, rc_gather;
1891-
int nr_pages, batch;
1935+
int nr_pages;
18921936
struct folio *folio, *folio2;
18931937
LIST_HEAD(folios);
18941938
LIST_HEAD(ret_folios);
@@ -1904,10 +1948,6 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
19041948
if (rc_gather < 0)
19051949
goto out;
19061950

1907-
if (mode == MIGRATE_ASYNC)
1908-
batch = NR_MAX_BATCHED_MIGRATION;
1909-
else
1910-
batch = 1;
19111951
again:
19121952
nr_pages = 0;
19131953
list_for_each_entry_safe(folio, folio2, from, lru) {
@@ -1918,16 +1958,20 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
19181958
}
19191959

19201960
nr_pages += folio_nr_pages(folio);
1921-
if (nr_pages >= batch)
1961+
if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
19221962
break;
19231963
}
1924-
if (nr_pages >= batch)
1964+
if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
19251965
list_cut_before(&folios, from, &folio2->lru);
19261966
else
19271967
list_splice_init(from, &folios);
1928-
rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
1929-
mode, reason, &ret_folios, &split_folios, &stats,
1930-
NR_MAX_MIGRATE_PAGES_RETRY);
1968+
if (mode == MIGRATE_ASYNC)
1969+
rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
1970+
mode, reason, &ret_folios, &split_folios, &stats,
1971+
NR_MAX_MIGRATE_PAGES_RETRY);
1972+
else
1973+
rc = migrate_pages_sync(&folios, get_new_page, put_new_page, private,
1974+
mode, reason, &ret_folios, &split_folios, &stats);
19311975
list_splice_tail_init(&folios, &ret_folios);
19321976
if (rc < 0) {
19331977
rc_gather = rc;

0 commit comments

Comments
 (0)