aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c62
1 files changed, 4 insertions, 58 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index d6a2e89b086a..84381b55b2bd 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -275,6 +275,9 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
275 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) 275 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
276 mlock_vma_page(new); 276 mlock_vma_page(new);
277 277
278 if (PageTransHuge(page) && PageMlocked(page))
279 clear_page_mlock(page);
280
278 /* No need to invalidate - it was non-present before */ 281 /* No need to invalidate - it was non-present before */
279 update_mmu_cache(vma, pvmw.address, pvmw.pte); 282 update_mmu_cache(vma, pvmw.address, pvmw.pte);
280 } 283 }
@@ -1411,7 +1414,7 @@ retry:
1411 * we encounter them after the rest of the list 1414 * we encounter them after the rest of the list
1412 * is processed. 1415 * is processed.
1413 */ 1416 */
1414 if (PageTransHuge(page)) { 1417 if (PageTransHuge(page) && !PageHuge(page)) {
1415 lock_page(page); 1418 lock_page(page);
1416 rc = split_huge_page_to_list(page, from); 1419 rc = split_huge_page_to_list(page, from);
1417 unlock_page(page); 1420 unlock_page(page);
@@ -1855,46 +1858,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
1855 return newpage; 1858 return newpage;
1856} 1859}
1857 1860
1858/*
1859 * page migration rate limiting control.
1860 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1861 * window of time. Default here says do not migrate more than 1280M per second.
1862 */
1863static unsigned int migrate_interval_millisecs __read_mostly = 100;
1864static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1865
1866/* Returns true if the node is migrate rate-limited after the update */
1867static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1868 unsigned long nr_pages)
1869{
1870 /*
1871 * Rate-limit the amount of data that is being migrated to a node.
1872 * Optimal placement is no good if the memory bus is saturated and
1873 * all the time is being spent migrating!
1874 */
1875 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1876 spin_lock(&pgdat->numabalancing_migrate_lock);
1877 pgdat->numabalancing_migrate_nr_pages = 0;
1878 pgdat->numabalancing_migrate_next_window = jiffies +
1879 msecs_to_jiffies(migrate_interval_millisecs);
1880 spin_unlock(&pgdat->numabalancing_migrate_lock);
1881 }
1882 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1883 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1884 nr_pages);
1885 return true;
1886 }
1887
1888 /*
1889 * This is an unlocked non-atomic update so errors are possible.
1890 * The consequences are failing to migrate when we potentiall should
1891 * have which is not severe enough to warrant locking. If it is ever
1892 * a problem, it can be converted to a per-cpu counter.
1893 */
1894 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1895 return false;
1896}
1897
1898static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 1861static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1899{ 1862{
1900 int page_lru; 1863 int page_lru;
@@ -1967,14 +1930,6 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1967 if (page_is_file_cache(page) && PageDirty(page)) 1930 if (page_is_file_cache(page) && PageDirty(page))
1968 goto out; 1931 goto out;
1969 1932
1970 /*
1971 * Rate-limit the amount of data that is being migrated to a node.
1972 * Optimal placement is no good if the memory bus is saturated and
1973 * all the time is being spent migrating!
1974 */
1975 if (numamigrate_update_ratelimit(pgdat, 1))
1976 goto out;
1977
1978 isolated = numamigrate_isolate_page(pgdat, page); 1933 isolated = numamigrate_isolate_page(pgdat, page);
1979 if (!isolated) 1934 if (!isolated)
1980 goto out; 1935 goto out;
@@ -2021,14 +1976,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
2021 unsigned long mmun_start = address & HPAGE_PMD_MASK; 1976 unsigned long mmun_start = address & HPAGE_PMD_MASK;
2022 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE; 1977 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
2023 1978
2024 /*
2025 * Rate-limit the amount of data that is being migrated to a node.
2026 * Optimal placement is no good if the memory bus is saturated and
2027 * all the time is being spent migrating!
2028 */
2029 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
2030 goto out_dropref;
2031
2032 new_page = alloc_pages_node(node, 1979 new_page = alloc_pages_node(node,
2033 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE), 1980 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
2034 HPAGE_PMD_ORDER); 1981 HPAGE_PMD_ORDER);
@@ -2125,7 +2072,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
2125 2072
2126out_fail: 2073out_fail:
2127 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 2074 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2128out_dropref:
2129 ptl = pmd_lock(mm, pmd); 2075 ptl = pmd_lock(mm, pmd);
2130 if (pmd_same(*pmd, entry)) { 2076 if (pmd_same(*pmd, entry)) {
2131 entry = pmd_modify(entry, vma->vm_page_prot); 2077 entry = pmd_modify(entry, vma->vm_page_prot);