aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c95
1 files changed, 24 insertions, 71 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b5783d81eda9..bfa142e67b1c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -20,6 +20,7 @@
20#include <linux/mman.h> 20#include <linux/mman.h>
21#include <linux/pagemap.h> 21#include <linux/pagemap.h>
22#include <linux/migrate.h> 22#include <linux/migrate.h>
23#include <linux/hashtable.h>
23 24
24#include <asm/tlb.h> 25#include <asm/tlb.h>
25#include <asm/pgalloc.h> 26#include <asm/pgalloc.h>
@@ -62,12 +63,11 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
62static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1; 63static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
63 64
64static int khugepaged(void *none); 65static int khugepaged(void *none);
65static int mm_slots_hash_init(void);
66static int khugepaged_slab_init(void); 66static int khugepaged_slab_init(void);
67static void khugepaged_slab_free(void);
68 67
69#define MM_SLOTS_HASH_HEADS 1024 68#define MM_SLOTS_HASH_BITS 10
70static struct hlist_head *mm_slots_hash __read_mostly; 69static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
70
71static struct kmem_cache *mm_slot_cache __read_mostly; 71static struct kmem_cache *mm_slot_cache __read_mostly;
72 72
73/** 73/**
@@ -105,7 +105,6 @@ static int set_recommended_min_free_kbytes(void)
105 struct zone *zone; 105 struct zone *zone;
106 int nr_zones = 0; 106 int nr_zones = 0;
107 unsigned long recommended_min; 107 unsigned long recommended_min;
108 extern int min_free_kbytes;
109 108
110 if (!khugepaged_enabled()) 109 if (!khugepaged_enabled())
111 return 0; 110 return 0;
@@ -634,12 +633,6 @@ static int __init hugepage_init(void)
634 if (err) 633 if (err)
635 goto out; 634 goto out;
636 635
637 err = mm_slots_hash_init();
638 if (err) {
639 khugepaged_slab_free();
640 goto out;
641 }
642
643 register_shrinker(&huge_zero_page_shrinker); 636 register_shrinker(&huge_zero_page_shrinker);
644 637
645 /* 638 /*
@@ -1302,7 +1295,6 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1302 int target_nid; 1295 int target_nid;
1303 int current_nid = -1; 1296 int current_nid = -1;
1304 bool migrated; 1297 bool migrated;
1305 bool page_locked = false;
1306 1298
1307 spin_lock(&mm->page_table_lock); 1299 spin_lock(&mm->page_table_lock);
1308 if (unlikely(!pmd_same(pmd, *pmdp))) 1300 if (unlikely(!pmd_same(pmd, *pmdp)))
@@ -1324,7 +1316,6 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1324 /* Acquire the page lock to serialise THP migrations */ 1316 /* Acquire the page lock to serialise THP migrations */
1325 spin_unlock(&mm->page_table_lock); 1317 spin_unlock(&mm->page_table_lock);
1326 lock_page(page); 1318 lock_page(page);
1327 page_locked = true;
1328 1319
1329 /* Confirm the PTE did not while locked */ 1320 /* Confirm the PTE did not while locked */
1330 spin_lock(&mm->page_table_lock); 1321 spin_lock(&mm->page_table_lock);
@@ -1337,34 +1328,26 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1337 1328
1338 /* Migrate the THP to the requested node */ 1329 /* Migrate the THP to the requested node */
1339 migrated = migrate_misplaced_transhuge_page(mm, vma, 1330 migrated = migrate_misplaced_transhuge_page(mm, vma,
1340 pmdp, pmd, addr, 1331 pmdp, pmd, addr, page, target_nid);
1341 page, target_nid); 1332 if (!migrated)
1342 if (migrated) 1333 goto check_same;
1343 current_nid = target_nid;
1344 else {
1345 spin_lock(&mm->page_table_lock);
1346 if (unlikely(!pmd_same(pmd, *pmdp))) {
1347 unlock_page(page);
1348 goto out_unlock;
1349 }
1350 goto clear_pmdnuma;
1351 }
1352 1334
1353 task_numa_fault(current_nid, HPAGE_PMD_NR, migrated); 1335 task_numa_fault(target_nid, HPAGE_PMD_NR, true);
1354 return 0; 1336 return 0;
1355 1337
1338check_same:
1339 spin_lock(&mm->page_table_lock);
1340 if (unlikely(!pmd_same(pmd, *pmdp)))
1341 goto out_unlock;
1356clear_pmdnuma: 1342clear_pmdnuma:
1357 pmd = pmd_mknonnuma(pmd); 1343 pmd = pmd_mknonnuma(pmd);
1358 set_pmd_at(mm, haddr, pmdp, pmd); 1344 set_pmd_at(mm, haddr, pmdp, pmd);
1359 VM_BUG_ON(pmd_numa(*pmdp)); 1345 VM_BUG_ON(pmd_numa(*pmdp));
1360 update_mmu_cache_pmd(vma, addr, pmdp); 1346 update_mmu_cache_pmd(vma, addr, pmdp);
1361 if (page_locked)
1362 unlock_page(page);
1363
1364out_unlock: 1347out_unlock:
1365 spin_unlock(&mm->page_table_lock); 1348 spin_unlock(&mm->page_table_lock);
1366 if (current_nid != -1) 1349 if (current_nid != -1)
1367 task_numa_fault(current_nid, HPAGE_PMD_NR, migrated); 1350 task_numa_fault(current_nid, HPAGE_PMD_NR, false);
1368 return 0; 1351 return 0;
1369} 1352}
1370 1353
@@ -1656,7 +1639,7 @@ static void __split_huge_page_refcount(struct page *page)
1656 page_tail->mapping = page->mapping; 1639 page_tail->mapping = page->mapping;
1657 1640
1658 page_tail->index = page->index + i; 1641 page_tail->index = page->index + i;
1659 page_xchg_last_nid(page_tail, page_last_nid(page)); 1642 page_nid_xchg_last(page_tail, page_nid_last(page));
1660 1643
1661 BUG_ON(!PageAnon(page_tail)); 1644 BUG_ON(!PageAnon(page_tail));
1662 BUG_ON(!PageUptodate(page_tail)); 1645 BUG_ON(!PageUptodate(page_tail));
@@ -1846,7 +1829,7 @@ int split_huge_page(struct page *page)
1846 1829
1847 BUG_ON(PageCompound(page)); 1830 BUG_ON(PageCompound(page));
1848out_unlock: 1831out_unlock:
1849 anon_vma_unlock(anon_vma); 1832 anon_vma_unlock_write(anon_vma);
1850 put_anon_vma(anon_vma); 1833 put_anon_vma(anon_vma);
1851out: 1834out:
1852 return ret; 1835 return ret;
@@ -1908,12 +1891,6 @@ static int __init khugepaged_slab_init(void)
1908 return 0; 1891 return 0;
1909} 1892}
1910 1893
1911static void __init khugepaged_slab_free(void)
1912{
1913 kmem_cache_destroy(mm_slot_cache);
1914 mm_slot_cache = NULL;
1915}
1916
1917static inline struct mm_slot *alloc_mm_slot(void) 1894static inline struct mm_slot *alloc_mm_slot(void)
1918{ 1895{
1919 if (!mm_slot_cache) /* initialization failed */ 1896 if (!mm_slot_cache) /* initialization failed */
@@ -1926,47 +1903,23 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
1926 kmem_cache_free(mm_slot_cache, mm_slot); 1903 kmem_cache_free(mm_slot_cache, mm_slot);
1927} 1904}
1928 1905
1929static int __init mm_slots_hash_init(void)
1930{
1931 mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
1932 GFP_KERNEL);
1933 if (!mm_slots_hash)
1934 return -ENOMEM;
1935 return 0;
1936}
1937
1938#if 0
1939static void __init mm_slots_hash_free(void)
1940{
1941 kfree(mm_slots_hash);
1942 mm_slots_hash = NULL;
1943}
1944#endif
1945
1946static struct mm_slot *get_mm_slot(struct mm_struct *mm) 1906static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1947{ 1907{
1948 struct mm_slot *mm_slot; 1908 struct mm_slot *mm_slot;
1949 struct hlist_head *bucket;
1950 struct hlist_node *node; 1909 struct hlist_node *node;
1951 1910
1952 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) 1911 hash_for_each_possible(mm_slots_hash, mm_slot, node, hash, (unsigned long)mm)
1953 % MM_SLOTS_HASH_HEADS];
1954 hlist_for_each_entry(mm_slot, node, bucket, hash) {
1955 if (mm == mm_slot->mm) 1912 if (mm == mm_slot->mm)
1956 return mm_slot; 1913 return mm_slot;
1957 } 1914
1958 return NULL; 1915 return NULL;
1959} 1916}
1960 1917
1961static void insert_to_mm_slots_hash(struct mm_struct *mm, 1918static void insert_to_mm_slots_hash(struct mm_struct *mm,
1962 struct mm_slot *mm_slot) 1919 struct mm_slot *mm_slot)
1963{ 1920{
1964 struct hlist_head *bucket;
1965
1966 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1967 % MM_SLOTS_HASH_HEADS];
1968 mm_slot->mm = mm; 1921 mm_slot->mm = mm;
1969 hlist_add_head(&mm_slot->hash, bucket); 1922 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
1970} 1923}
1971 1924
1972static inline int khugepaged_test_exit(struct mm_struct *mm) 1925static inline int khugepaged_test_exit(struct mm_struct *mm)
@@ -2035,7 +1988,7 @@ void __khugepaged_exit(struct mm_struct *mm)
2035 spin_lock(&khugepaged_mm_lock); 1988 spin_lock(&khugepaged_mm_lock);
2036 mm_slot = get_mm_slot(mm); 1989 mm_slot = get_mm_slot(mm);
2037 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 1990 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
2038 hlist_del(&mm_slot->hash); 1991 hash_del(&mm_slot->hash);
2039 list_del(&mm_slot->mm_node); 1992 list_del(&mm_slot->mm_node);
2040 free = 1; 1993 free = 1;
2041 } 1994 }
@@ -2368,7 +2321,7 @@ static void collapse_huge_page(struct mm_struct *mm,
2368 BUG_ON(!pmd_none(*pmd)); 2321 BUG_ON(!pmd_none(*pmd));
2369 set_pmd_at(mm, address, pmd, _pmd); 2322 set_pmd_at(mm, address, pmd, _pmd);
2370 spin_unlock(&mm->page_table_lock); 2323 spin_unlock(&mm->page_table_lock);
2371 anon_vma_unlock(vma->anon_vma); 2324 anon_vma_unlock_write(vma->anon_vma);
2372 goto out; 2325 goto out;
2373 } 2326 }
2374 2327
@@ -2376,7 +2329,7 @@ static void collapse_huge_page(struct mm_struct *mm,
2376 * All pages are isolated and locked so anon_vma rmap 2329 * All pages are isolated and locked so anon_vma rmap
2377 * can't run anymore. 2330 * can't run anymore.
2378 */ 2331 */
2379 anon_vma_unlock(vma->anon_vma); 2332 anon_vma_unlock_write(vma->anon_vma);
2380 2333
2381 __collapse_huge_page_copy(pte, new_page, vma, address, ptl); 2334 __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
2382 pte_unmap(pte); 2335 pte_unmap(pte);
@@ -2423,7 +2376,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
2423 struct page *page; 2376 struct page *page;
2424 unsigned long _address; 2377 unsigned long _address;
2425 spinlock_t *ptl; 2378 spinlock_t *ptl;
2426 int node = -1; 2379 int node = NUMA_NO_NODE;
2427 2380
2428 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2381 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2429 2382
@@ -2453,7 +2406,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
2453 * be more sophisticated and look at more pages, 2406 * be more sophisticated and look at more pages,
2454 * but isn't for now. 2407 * but isn't for now.
2455 */ 2408 */
2456 if (node == -1) 2409 if (node == NUMA_NO_NODE)
2457 node = page_to_nid(page); 2410 node = page_to_nid(page);
2458 VM_BUG_ON(PageCompound(page)); 2411 VM_BUG_ON(PageCompound(page));
2459 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2412 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
@@ -2484,7 +2437,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
2484 2437
2485 if (khugepaged_test_exit(mm)) { 2438 if (khugepaged_test_exit(mm)) {
2486 /* free mm_slot */ 2439 /* free mm_slot */
2487 hlist_del(&mm_slot->hash); 2440 hash_del(&mm_slot->hash);
2488 list_del(&mm_slot->mm_node); 2441 list_del(&mm_slot->mm_node);
2489 2442
2490 /* 2443 /*