aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-11-14 16:41:46 -0500
committerMel Gorman <mgorman@suse.de>2012-12-11 09:42:50 -0500
commita8f6077213d285ca08dbf6d4a67470787388138b (patch)
tree1ce0c66b221a97e485d513552c75d29fe84b74bd /mm/migrate.c
parent8177a420ed7c16c171ed3c3aec5b0676db38c247 (diff)
mm: numa: Rate limit the amount of memory that is migrated between nodes
NOTE: This is very heavily based on similar logic in autonuma. It should be signed off by Andrea but because there was no standalone patch and it's sufficiently different from what he did that the signed-off is omitted. Will be added back if requested. If a large number of pages are misplaced then the memory bus can be saturated just migrating pages between nodes. This patch rate-limits the amount of memory that can be migrating between nodes. Signed-off-by: Mel Gorman <mgorman@suse.de>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c30
1 files changed, 29 insertions, 1 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 23bba5d6edff..4b8267f1842f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1461,12 +1461,21 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
1461} 1461}
1462 1462
1463/* 1463/*
1464 * page migration rate limiting control.
1465 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1466 * window of time. Default here says do not migrate more than 1280M per second.
1467 */
1468static unsigned int migrate_interval_millisecs __read_mostly = 100;
1469static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1470
1471/*
1464 * Attempt to migrate a misplaced page to the specified destination 1472 * Attempt to migrate a misplaced page to the specified destination
1465 * node. Caller is expected to have an elevated reference count on 1473 * node. Caller is expected to have an elevated reference count on
1466 * the page that will be dropped by this function before returning. 1474 * the page that will be dropped by this function before returning.
1467 */ 1475 */
1468int migrate_misplaced_page(struct page *page, int node) 1476int migrate_misplaced_page(struct page *page, int node)
1469{ 1477{
1478 pg_data_t *pgdat = NODE_DATA(node);
1470 int isolated = 0; 1479 int isolated = 0;
1471 LIST_HEAD(migratepages); 1480 LIST_HEAD(migratepages);
1472 1481
@@ -1479,8 +1488,27 @@ int migrate_misplaced_page(struct page *page, int node)
1479 goto out; 1488 goto out;
1480 } 1489 }
1481 1490
1491 /*
1492 * Rate-limit the amount of data that is being migrated to a node.
1493 * Optimal placement is no good if the memory bus is saturated and
1494 * all the time is being spent migrating!
1495 */
1496 spin_lock(&pgdat->numabalancing_migrate_lock);
1497 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1498 pgdat->numabalancing_migrate_nr_pages = 0;
1499 pgdat->numabalancing_migrate_next_window = jiffies +
1500 msecs_to_jiffies(migrate_interval_millisecs);
1501 }
1502 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1503 spin_unlock(&pgdat->numabalancing_migrate_lock);
1504 put_page(page);
1505 goto out;
1506 }
1507 pgdat->numabalancing_migrate_nr_pages++;
1508 spin_unlock(&pgdat->numabalancing_migrate_lock);
1509
1482 /* Avoid migrating to a node that is nearly full */ 1510 /* Avoid migrating to a node that is nearly full */
1483 if (migrate_balanced_pgdat(NODE_DATA(node), 1)) { 1511 if (migrate_balanced_pgdat(pgdat, 1)) {
1484 int page_lru; 1512 int page_lru;
1485 1513
1486 if (isolate_lru_page(page)) { 1514 if (isolate_lru_page(page)) {