aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c89
1 files changed, 45 insertions, 44 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 9194375b2307..a8025befc323 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -72,28 +72,12 @@ int migrate_prep_local(void)
72} 72}
73 73
74/* 74/*
75 * Add isolated pages on the list back to the LRU under page lock
76 * to avoid leaking evictable pages back onto unevictable list.
77 */
78void putback_lru_pages(struct list_head *l)
79{
80 struct page *page;
81 struct page *page2;
82
83 list_for_each_entry_safe(page, page2, l, lru) {
84 list_del(&page->lru);
85 dec_zone_page_state(page, NR_ISOLATED_ANON +
86 page_is_file_cache(page));
87 putback_lru_page(page);
88 }
89}
90
91/*
92 * Put previously isolated pages back onto the appropriate lists 75 * Put previously isolated pages back onto the appropriate lists
93 * from where they were once taken off for compaction/migration. 76 * from where they were once taken off for compaction/migration.
94 * 77 *
95 * This function shall be used instead of putback_lru_pages(), 78 * This function shall be used whenever the isolated pageset has been
96 * whenever the isolated pageset has been built by isolate_migratepages_range() 79 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
80 * and isolate_huge_page().
97 */ 81 */
98void putback_movable_pages(struct list_head *l) 82void putback_movable_pages(struct list_head *l)
99{ 83{
@@ -199,7 +183,12 @@ out:
199 */ 183 */
200static void remove_migration_ptes(struct page *old, struct page *new) 184static void remove_migration_ptes(struct page *old, struct page *new)
201{ 185{
202 rmap_walk(new, remove_migration_pte, old); 186 struct rmap_walk_control rwc = {
187 .rmap_one = remove_migration_pte,
188 .arg = old,
189 };
190
191 rmap_walk(new, &rwc);
203} 192}
204 193
205/* 194/*
@@ -563,14 +552,6 @@ void migrate_page_copy(struct page *newpage, struct page *page)
563 * Migration functions 552 * Migration functions
564 ***********************************************************/ 553 ***********************************************************/
565 554
566/* Always fail migration. Used for mappings that are not movable */
567int fail_migrate_page(struct address_space *mapping,
568 struct page *newpage, struct page *page)
569{
570 return -EIO;
571}
572EXPORT_SYMBOL(fail_migrate_page);
573
574/* 555/*
575 * Common logic to directly migrate a single page suitable for 556 * Common logic to directly migrate a single page suitable for
576 * pages that do not use PagePrivate/PagePrivate2. 557 * pages that do not use PagePrivate/PagePrivate2.
@@ -1008,7 +989,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
1008{ 989{
1009 int rc = 0; 990 int rc = 0;
1010 int *result = NULL; 991 int *result = NULL;
1011 struct page *new_hpage = get_new_page(hpage, private, &result); 992 struct page *new_hpage;
1012 struct anon_vma *anon_vma = NULL; 993 struct anon_vma *anon_vma = NULL;
1013 994
1014 /* 995 /*
@@ -1018,9 +999,12 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
1018 * tables or check whether the hugepage is pmd-based or not before 999 * tables or check whether the hugepage is pmd-based or not before
1019 * kicking migration. 1000 * kicking migration.
1020 */ 1001 */
1021 if (!hugepage_migration_support(page_hstate(hpage))) 1002 if (!hugepage_migration_support(page_hstate(hpage))) {
1003 putback_active_hugepage(hpage);
1022 return -ENOSYS; 1004 return -ENOSYS;
1005 }
1023 1006
1007 new_hpage = get_new_page(hpage, private, &result);
1024 if (!new_hpage) 1008 if (!new_hpage)
1025 return -ENOMEM; 1009 return -ENOMEM;
1026 1010
@@ -1120,7 +1104,12 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1120 nr_succeeded++; 1104 nr_succeeded++;
1121 break; 1105 break;
1122 default: 1106 default:
1123 /* Permanent failure */ 1107 /*
1108 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1109 * unlike -EAGAIN case, the failed page is
1110 * removed from migration page list and not
1111 * retried in the next outer loop.
1112 */
1124 nr_failed++; 1113 nr_failed++;
1125 break; 1114 break;
1126 } 1115 }
@@ -1594,31 +1583,38 @@ bool migrate_ratelimited(int node)
1594} 1583}
1595 1584
1596/* Returns true if the node is migrate rate-limited after the update */ 1585/* Returns true if the node is migrate rate-limited after the update */
1597bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages) 1586static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1587 unsigned long nr_pages)
1598{ 1588{
1599 bool rate_limited = false;
1600
1601 /* 1589 /*
1602 * Rate-limit the amount of data that is being migrated to a node. 1590 * Rate-limit the amount of data that is being migrated to a node.
1603 * Optimal placement is no good if the memory bus is saturated and 1591 * Optimal placement is no good if the memory bus is saturated and
1604 * all the time is being spent migrating! 1592 * all the time is being spent migrating!
1605 */ 1593 */
1606 spin_lock(&pgdat->numabalancing_migrate_lock);
1607 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) { 1594 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1595 spin_lock(&pgdat->numabalancing_migrate_lock);
1608 pgdat->numabalancing_migrate_nr_pages = 0; 1596 pgdat->numabalancing_migrate_nr_pages = 0;
1609 pgdat->numabalancing_migrate_next_window = jiffies + 1597 pgdat->numabalancing_migrate_next_window = jiffies +
1610 msecs_to_jiffies(migrate_interval_millisecs); 1598 msecs_to_jiffies(migrate_interval_millisecs);
1599 spin_unlock(&pgdat->numabalancing_migrate_lock);
1611 } 1600 }
1612 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) 1601 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1613 rate_limited = true; 1602 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1614 else 1603 nr_pages);
1615 pgdat->numabalancing_migrate_nr_pages += nr_pages; 1604 return true;
1616 spin_unlock(&pgdat->numabalancing_migrate_lock); 1605 }
1617 1606
1618 return rate_limited; 1607 /*
1608 * This is an unlocked non-atomic update so errors are possible.
1609 * The consequences are failing to migrate when we potentiall should
1610 * have which is not severe enough to warrant locking. If it is ever
1611 * a problem, it can be converted to a per-cpu counter.
1612 */
1613 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1614 return false;
1619} 1615}
1620 1616
1621int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 1617static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1622{ 1618{
1623 int page_lru; 1619 int page_lru;
1624 1620
@@ -1705,7 +1701,12 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1705 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, 1701 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1706 node, MIGRATE_ASYNC, MR_NUMA_MISPLACED); 1702 node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
1707 if (nr_remaining) { 1703 if (nr_remaining) {
1708 putback_lru_pages(&migratepages); 1704 if (!list_empty(&migratepages)) {
1705 list_del(&page->lru);
1706 dec_zone_page_state(page, NR_ISOLATED_ANON +
1707 page_is_file_cache(page));
1708 putback_lru_page(page);
1709 }
1709 isolated = 0; 1710 isolated = 0;
1710 } else 1711 } else
1711 count_vm_numa_event(NUMA_PAGE_MIGRATE); 1712 count_vm_numa_event(NUMA_PAGE_MIGRATE);