diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 28 | ||||
-rw-r--r-- | mm/memory.c | 2 |
2 files changed, 27 insertions, 3 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c39e4beeb63a..bc727122dd44 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2429,6 +2429,25 @@ again: | |||
2429 | tlb_end_vma(tlb, vma); | 2429 | tlb_end_vma(tlb, vma); |
2430 | } | 2430 | } |
2431 | 2431 | ||
2432 | void __unmap_hugepage_range_final(struct mmu_gather *tlb, | ||
2433 | struct vm_area_struct *vma, unsigned long start, | ||
2434 | unsigned long end, struct page *ref_page) | ||
2435 | { | ||
2436 | __unmap_hugepage_range(tlb, vma, start, end, ref_page); | ||
2437 | |||
2438 | /* | ||
2439 | * Clear this flag so that x86's huge_pmd_share page_table_shareable | ||
2440 | * test will fail on a vma being torn down, and not grab a page table | ||
2441 | * on its way out. We're lucky that the flag has such an appropriate | ||
2442 | * name, and can in fact be safely cleared here. We could clear it | ||
2443 | * before the __unmap_hugepage_range above, but all that's necessary | ||
2444 | * is to clear it before releasing the i_mmap_mutex. This works | ||
2445 | * because in the context this is called, the VMA is about to be | ||
2446 | * destroyed and the i_mmap_mutex is held. | ||
2447 | */ | ||
2448 | vma->vm_flags &= ~VM_MAYSHARE; | ||
2449 | } | ||
2450 | |||
2432 | void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | 2451 | void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, |
2433 | unsigned long end, struct page *ref_page) | 2452 | unsigned long end, struct page *ref_page) |
2434 | { | 2453 | { |
@@ -3012,9 +3031,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma, | |||
3012 | } | 3031 | } |
3013 | } | 3032 | } |
3014 | spin_unlock(&mm->page_table_lock); | 3033 | spin_unlock(&mm->page_table_lock); |
3015 | mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); | 3034 | /* |
3016 | 3035 | * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare | |
3036 | * may have cleared our pud entry and done put_page on the page table: | ||
3037 | * once we release i_mmap_mutex, another task can do the final put_page | ||
3038 | * and that page table be reused and filled with junk. | ||
3039 | */ | ||
3017 | flush_tlb_range(vma, start, end); | 3040 | flush_tlb_range(vma, start, end); |
3041 | mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); | ||
3018 | } | 3042 | } |
3019 | 3043 | ||
3020 | int hugetlb_reserve_pages(struct inode *inode, | 3044 | int hugetlb_reserve_pages(struct inode *inode, |
diff --git a/mm/memory.c b/mm/memory.c index ec72a616ccd4..482f089765ff 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1345,7 +1345,7 @@ static void unmap_single_vma(struct mmu_gather *tlb, | |||
1345 | */ | 1345 | */ |
1346 | if (vma->vm_file) { | 1346 | if (vma->vm_file) { |
1347 | mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); | 1347 | mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); |
1348 | __unmap_hugepage_range(tlb, vma, start, end, NULL); | 1348 | __unmap_hugepage_range_final(tlb, vma, start, end, NULL); |
1349 | mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); | 1349 | mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); |
1350 | } | 1350 | } |
1351 | } else | 1351 | } else |