diff options
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 46 |
1 files changed, 24 insertions, 22 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index dee6cf4e6d34..04306b9de90d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -690,15 +690,11 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) | |||
690 | */ | 690 | */ |
691 | int PageHuge(struct page *page) | 691 | int PageHuge(struct page *page) |
692 | { | 692 | { |
693 | compound_page_dtor *dtor; | ||
694 | |||
695 | if (!PageCompound(page)) | 693 | if (!PageCompound(page)) |
696 | return 0; | 694 | return 0; |
697 | 695 | ||
698 | page = compound_head(page); | 696 | page = compound_head(page); |
699 | dtor = get_compound_page_dtor(page); | 697 | return get_compound_page_dtor(page) == free_huge_page; |
700 | |||
701 | return dtor == free_huge_page; | ||
702 | } | 698 | } |
703 | EXPORT_SYMBOL_GPL(PageHuge); | 699 | EXPORT_SYMBOL_GPL(PageHuge); |
704 | 700 | ||
@@ -708,16 +704,11 @@ EXPORT_SYMBOL_GPL(PageHuge); | |||
708 | */ | 704 | */ |
709 | int PageHeadHuge(struct page *page_head) | 705 | int PageHeadHuge(struct page *page_head) |
710 | { | 706 | { |
711 | compound_page_dtor *dtor; | ||
712 | |||
713 | if (!PageHead(page_head)) | 707 | if (!PageHead(page_head)) |
714 | return 0; | 708 | return 0; |
715 | 709 | ||
716 | dtor = get_compound_page_dtor(page_head); | 710 | return get_compound_page_dtor(page_head) == free_huge_page; |
717 | |||
718 | return dtor == free_huge_page; | ||
719 | } | 711 | } |
720 | EXPORT_SYMBOL_GPL(PageHeadHuge); | ||
721 | 712 | ||
722 | pgoff_t __basepage_index(struct page *page) | 713 | pgoff_t __basepage_index(struct page *page) |
723 | { | 714 | { |
@@ -1280,9 +1271,9 @@ int __weak alloc_bootmem_huge_page(struct hstate *h) | |||
1280 | for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { | 1271 | for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { |
1281 | void *addr; | 1272 | void *addr; |
1282 | 1273 | ||
1283 | addr = __alloc_bootmem_node_nopanic(NODE_DATA(node), | 1274 | addr = memblock_virt_alloc_try_nid_nopanic( |
1284 | huge_page_size(h), huge_page_size(h), 0); | 1275 | huge_page_size(h), huge_page_size(h), |
1285 | 1276 | 0, BOOTMEM_ALLOC_ACCESSIBLE, node); | |
1286 | if (addr) { | 1277 | if (addr) { |
1287 | /* | 1278 | /* |
1288 | * Use the beginning of the huge page to store the | 1279 | * Use the beginning of the huge page to store the |
@@ -1322,8 +1313,8 @@ static void __init gather_bootmem_prealloc(void) | |||
1322 | 1313 | ||
1323 | #ifdef CONFIG_HIGHMEM | 1314 | #ifdef CONFIG_HIGHMEM |
1324 | page = pfn_to_page(m->phys >> PAGE_SHIFT); | 1315 | page = pfn_to_page(m->phys >> PAGE_SHIFT); |
1325 | free_bootmem_late((unsigned long)m, | 1316 | memblock_free_late(__pa(m), |
1326 | sizeof(struct huge_bootmem_page)); | 1317 | sizeof(struct huge_bootmem_page)); |
1327 | #else | 1318 | #else |
1328 | page = virt_to_page(m); | 1319 | page = virt_to_page(m); |
1329 | #endif | 1320 | #endif |
@@ -2355,17 +2346,27 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
2355 | int cow; | 2346 | int cow; |
2356 | struct hstate *h = hstate_vma(vma); | 2347 | struct hstate *h = hstate_vma(vma); |
2357 | unsigned long sz = huge_page_size(h); | 2348 | unsigned long sz = huge_page_size(h); |
2349 | unsigned long mmun_start; /* For mmu_notifiers */ | ||
2350 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
2351 | int ret = 0; | ||
2358 | 2352 | ||
2359 | cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; | 2353 | cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; |
2360 | 2354 | ||
2355 | mmun_start = vma->vm_start; | ||
2356 | mmun_end = vma->vm_end; | ||
2357 | if (cow) | ||
2358 | mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end); | ||
2359 | |||
2361 | for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { | 2360 | for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { |
2362 | spinlock_t *src_ptl, *dst_ptl; | 2361 | spinlock_t *src_ptl, *dst_ptl; |
2363 | src_pte = huge_pte_offset(src, addr); | 2362 | src_pte = huge_pte_offset(src, addr); |
2364 | if (!src_pte) | 2363 | if (!src_pte) |
2365 | continue; | 2364 | continue; |
2366 | dst_pte = huge_pte_alloc(dst, addr, sz); | 2365 | dst_pte = huge_pte_alloc(dst, addr, sz); |
2367 | if (!dst_pte) | 2366 | if (!dst_pte) { |
2368 | goto nomem; | 2367 | ret = -ENOMEM; |
2368 | break; | ||
2369 | } | ||
2369 | 2370 | ||
2370 | /* If the pagetables are shared don't copy or take references */ | 2371 | /* If the pagetables are shared don't copy or take references */ |
2371 | if (dst_pte == src_pte) | 2372 | if (dst_pte == src_pte) |
@@ -2386,10 +2387,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
2386 | spin_unlock(src_ptl); | 2387 | spin_unlock(src_ptl); |
2387 | spin_unlock(dst_ptl); | 2388 | spin_unlock(dst_ptl); |
2388 | } | 2389 | } |
2389 | return 0; | ||
2390 | 2390 | ||
2391 | nomem: | 2391 | if (cow) |
2392 | return -ENOMEM; | 2392 | mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end); |
2393 | |||
2394 | return ret; | ||
2393 | } | 2395 | } |
2394 | 2396 | ||
2395 | static int is_hugetlb_entry_migration(pte_t pte) | 2397 | static int is_hugetlb_entry_migration(pte_t pte) |
@@ -3079,7 +3081,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3079 | same_page: | 3081 | same_page: |
3080 | if (pages) { | 3082 | if (pages) { |
3081 | pages[i] = mem_map_offset(page, pfn_offset); | 3083 | pages[i] = mem_map_offset(page, pfn_offset); |
3082 | get_page(pages[i]); | 3084 | get_page_foll(pages[i]); |
3083 | } | 3085 | } |
3084 | 3086 | ||
3085 | if (vmas) | 3087 | if (vmas) |