diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2012-10-25 08:16:34 -0400 |
---|---|---|
committer | Mel Gorman <mgorman@suse.de> | 2012-12-11 09:42:41 -0500 |
commit | 7039e1dbec6eeaa8ecab43a82d6589eeced995c3 (patch) | |
tree | ffd8b5a571c5d385c61122da097cdd8b666ca02f /mm/migrate.c | |
parent | 771fb4d806a92bf6c988fcfbd286ae40a9374332 (diff) |
mm: migrate: Introduce migrate_misplaced_page()
Note: This was originally based on Peter's patch "mm/migrate: Introduce
migrate_misplaced_page()" but borrows extremely heavily from Andrea's
"autonuma: memory follows CPU algorithm and task/mm_autonuma stats
collection". The end result is barely recognisable so signed-offs
had to be dropped. If original authors are ok with it, I'll
re-add the signed-off-bys.
Add migrate_misplaced_page() which deals with migrating pages from
faults.
Based-on-work-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Based-on-work-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Based-on-work-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 108 |
1 files changed, 106 insertions, 2 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 27be9c923dc1..d168aec98427 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -282,7 +282,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, | |||
282 | struct page *newpage, struct page *page, | 282 | struct page *newpage, struct page *page, |
283 | struct buffer_head *head, enum migrate_mode mode) | 283 | struct buffer_head *head, enum migrate_mode mode) |
284 | { | 284 | { |
285 | int expected_count; | 285 | int expected_count = 0; |
286 | void **pslot; | 286 | void **pslot; |
287 | 287 | ||
288 | if (!mapping) { | 288 | if (!mapping) { |
@@ -1415,4 +1415,108 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, | |||
1415 | } | 1415 | } |
1416 | return err; | 1416 | return err; |
1417 | } | 1417 | } |
1418 | #endif | 1418 | |
1419 | #ifdef CONFIG_NUMA_BALANCING | ||
1420 | /* | ||
1421 | * Returns true if this is a safe migration target node for misplaced NUMA | ||
1422 | * pages. Currently it only checks the watermarks which crude | ||
1423 | */ | ||
1424 | static bool migrate_balanced_pgdat(struct pglist_data *pgdat, | ||
1425 | int nr_migrate_pages) | ||
1426 | { | ||
1427 | int z; | ||
1428 | for (z = pgdat->nr_zones - 1; z >= 0; z--) { | ||
1429 | struct zone *zone = pgdat->node_zones + z; | ||
1430 | |||
1431 | if (!populated_zone(zone)) | ||
1432 | continue; | ||
1433 | |||
1434 | if (zone->all_unreclaimable) | ||
1435 | continue; | ||
1436 | |||
1437 | /* Avoid waking kswapd by allocating pages_to_migrate pages. */ | ||
1438 | if (!zone_watermark_ok(zone, 0, | ||
1439 | high_wmark_pages(zone) + | ||
1440 | nr_migrate_pages, | ||
1441 | 0, 0)) | ||
1442 | continue; | ||
1443 | return true; | ||
1444 | } | ||
1445 | return false; | ||
1446 | } | ||
1447 | |||
1448 | static struct page *alloc_misplaced_dst_page(struct page *page, | ||
1449 | unsigned long data, | ||
1450 | int **result) | ||
1451 | { | ||
1452 | int nid = (int) data; | ||
1453 | struct page *newpage; | ||
1454 | |||
1455 | newpage = alloc_pages_exact_node(nid, | ||
1456 | (GFP_HIGHUSER_MOVABLE | GFP_THISNODE | | ||
1457 | __GFP_NOMEMALLOC | __GFP_NORETRY | | ||
1458 | __GFP_NOWARN) & | ||
1459 | ~GFP_IOFS, 0); | ||
1460 | return newpage; | ||
1461 | } | ||
1462 | |||
1463 | /* | ||
1464 | * Attempt to migrate a misplaced page to the specified destination | ||
1465 | * node. Caller is expected to have an elevated reference count on | ||
1466 | * the page that will be dropped by this function before returning. | ||
1467 | */ | ||
1468 | int migrate_misplaced_page(struct page *page, int node) | ||
1469 | { | ||
1470 | int isolated = 0; | ||
1471 | LIST_HEAD(migratepages); | ||
1472 | |||
1473 | /* | ||
1474 | * Don't migrate pages that are mapped in multiple processes. | ||
1475 | * TODO: Handle false sharing detection instead of this hammer | ||
1476 | */ | ||
1477 | if (page_mapcount(page) != 1) { | ||
1478 | put_page(page); | ||
1479 | goto out; | ||
1480 | } | ||
1481 | |||
1482 | /* Avoid migrating to a node that is nearly full */ | ||
1483 | if (migrate_balanced_pgdat(NODE_DATA(node), 1)) { | ||
1484 | int page_lru; | ||
1485 | |||
1486 | if (isolate_lru_page(page)) { | ||
1487 | put_page(page); | ||
1488 | goto out; | ||
1489 | } | ||
1490 | isolated = 1; | ||
1491 | |||
1492 | /* | ||
1493 | * Page is isolated which takes a reference count so now the | ||
1494 | * callers reference can be safely dropped without the page | ||
1495 | * disappearing underneath us during migration | ||
1496 | */ | ||
1497 | put_page(page); | ||
1498 | |||
1499 | page_lru = page_is_file_cache(page); | ||
1500 | inc_zone_page_state(page, NR_ISOLATED_ANON + page_lru); | ||
1501 | list_add(&page->lru, &migratepages); | ||
1502 | } | ||
1503 | |||
1504 | if (isolated) { | ||
1505 | int nr_remaining; | ||
1506 | |||
1507 | nr_remaining = migrate_pages(&migratepages, | ||
1508 | alloc_misplaced_dst_page, | ||
1509 | node, false, MIGRATE_ASYNC, | ||
1510 | MR_NUMA_MISPLACED); | ||
1511 | if (nr_remaining) { | ||
1512 | putback_lru_pages(&migratepages); | ||
1513 | isolated = 0; | ||
1514 | } | ||
1515 | } | ||
1516 | BUG_ON(!list_empty(&migratepages)); | ||
1517 | out: | ||
1518 | return isolated; | ||
1519 | } | ||
1520 | #endif /* CONFIG_NUMA_BALANCING */ | ||
1521 | |||
1522 | #endif /* CONFIG_NUMA */ | ||