diff options
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 919b86a2164d..47f6070d7c46 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1457,7 +1457,7 @@ int __weak alloc_bootmem_huge_page(struct hstate *h) | |||
1457 | return 0; | 1457 | return 0; |
1458 | 1458 | ||
1459 | found: | 1459 | found: |
1460 | BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1)); | 1460 | BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); |
1461 | /* Put them into a private list first because mem_map is not up yet */ | 1461 | /* Put them into a private list first because mem_map is not up yet */ |
1462 | list_add(&m->list, &huge_boot_pages); | 1462 | list_add(&m->list, &huge_boot_pages); |
1463 | m->hstate = h; | 1463 | m->hstate = h; |
@@ -2083,7 +2083,7 @@ static void hugetlb_register_node(struct node *node) | |||
2083 | * devices of nodes that have memory. All on-line nodes should have | 2083 | * devices of nodes that have memory. All on-line nodes should have |
2084 | * registered their associated device by this time. | 2084 | * registered their associated device by this time. |
2085 | */ | 2085 | */ |
2086 | static void hugetlb_register_all_nodes(void) | 2086 | static void __init hugetlb_register_all_nodes(void) |
2087 | { | 2087 | { |
2088 | int nid; | 2088 | int nid; |
2089 | 2089 | ||
@@ -2726,9 +2726,9 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb, | |||
2726 | * on its way out. We're lucky that the flag has such an appropriate | 2726 | * on its way out. We're lucky that the flag has such an appropriate |
2727 | * name, and can in fact be safely cleared here. We could clear it | 2727 | * name, and can in fact be safely cleared here. We could clear it |
2728 | * before the __unmap_hugepage_range above, but all that's necessary | 2728 | * before the __unmap_hugepage_range above, but all that's necessary |
2729 | * is to clear it before releasing the i_mmap_mutex. This works | 2729 | * is to clear it before releasing the i_mmap_rwsem. This works |
2730 | * because in the context this is called, the VMA is about to be | 2730 | * because in the context this is called, the VMA is about to be |
2731 | * destroyed and the i_mmap_mutex is held. | 2731 | * destroyed and the i_mmap_rwsem is held. |
2732 | */ | 2732 | */ |
2733 | vma->vm_flags &= ~VM_MAYSHARE; | 2733 | vma->vm_flags &= ~VM_MAYSHARE; |
2734 | } | 2734 | } |
@@ -2774,7 +2774,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2774 | * this mapping should be shared between all the VMAs, | 2774 | * this mapping should be shared between all the VMAs, |
2775 | * __unmap_hugepage_range() is called as the lock is already held | 2775 | * __unmap_hugepage_range() is called as the lock is already held |
2776 | */ | 2776 | */ |
2777 | mutex_lock(&mapping->i_mmap_mutex); | 2777 | i_mmap_lock_write(mapping); |
2778 | vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { | 2778 | vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { |
2779 | /* Do not unmap the current VMA */ | 2779 | /* Do not unmap the current VMA */ |
2780 | if (iter_vma == vma) | 2780 | if (iter_vma == vma) |
@@ -2791,7 +2791,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2791 | unmap_hugepage_range(iter_vma, address, | 2791 | unmap_hugepage_range(iter_vma, address, |
2792 | address + huge_page_size(h), page); | 2792 | address + huge_page_size(h), page); |
2793 | } | 2793 | } |
2794 | mutex_unlock(&mapping->i_mmap_mutex); | 2794 | i_mmap_unlock_write(mapping); |
2795 | } | 2795 | } |
2796 | 2796 | ||
2797 | /* | 2797 | /* |
@@ -3348,7 +3348,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | |||
3348 | flush_cache_range(vma, address, end); | 3348 | flush_cache_range(vma, address, end); |
3349 | 3349 | ||
3350 | mmu_notifier_invalidate_range_start(mm, start, end); | 3350 | mmu_notifier_invalidate_range_start(mm, start, end); |
3351 | mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); | 3351 | i_mmap_lock_write(vma->vm_file->f_mapping); |
3352 | for (; address < end; address += huge_page_size(h)) { | 3352 | for (; address < end; address += huge_page_size(h)) { |
3353 | spinlock_t *ptl; | 3353 | spinlock_t *ptl; |
3354 | ptep = huge_pte_offset(mm, address); | 3354 | ptep = huge_pte_offset(mm, address); |
@@ -3370,13 +3370,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | |||
3370 | spin_unlock(ptl); | 3370 | spin_unlock(ptl); |
3371 | } | 3371 | } |
3372 | /* | 3372 | /* |
3373 | * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare | 3373 | * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare |
3374 | * may have cleared our pud entry and done put_page on the page table: | 3374 | * may have cleared our pud entry and done put_page on the page table: |
3375 | * once we release i_mmap_mutex, another task can do the final put_page | 3375 | * once we release i_mmap_rwsem, another task can do the final put_page |
3376 | * and that page table be reused and filled with junk. | 3376 | * and that page table be reused and filled with junk. |
3377 | */ | 3377 | */ |
3378 | flush_tlb_range(vma, start, end); | 3378 | flush_tlb_range(vma, start, end); |
3379 | mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); | 3379 | i_mmap_unlock_write(vma->vm_file->f_mapping); |
3380 | mmu_notifier_invalidate_range_end(mm, start, end); | 3380 | mmu_notifier_invalidate_range_end(mm, start, end); |
3381 | 3381 | ||
3382 | return pages << h->order; | 3382 | return pages << h->order; |
@@ -3525,7 +3525,7 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) | |||
3525 | * and returns the corresponding pte. While this is not necessary for the | 3525 | * and returns the corresponding pte. While this is not necessary for the |
3526 | * !shared pmd case because we can allocate the pmd later as well, it makes the | 3526 | * !shared pmd case because we can allocate the pmd later as well, it makes the |
3527 | * code much cleaner. pmd allocation is essential for the shared case because | 3527 | * code much cleaner. pmd allocation is essential for the shared case because |
3528 | * pud has to be populated inside the same i_mmap_mutex section - otherwise | 3528 | * pud has to be populated inside the same i_mmap_rwsem section - otherwise |
3529 | * racing tasks could either miss the sharing (see huge_pte_offset) or select a | 3529 | * racing tasks could either miss the sharing (see huge_pte_offset) or select a |
3530 | * bad pmd for sharing. | 3530 | * bad pmd for sharing. |
3531 | */ | 3531 | */ |
@@ -3544,7 +3544,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
3544 | if (!vma_shareable(vma, addr)) | 3544 | if (!vma_shareable(vma, addr)) |
3545 | return (pte_t *)pmd_alloc(mm, pud, addr); | 3545 | return (pte_t *)pmd_alloc(mm, pud, addr); |
3546 | 3546 | ||
3547 | mutex_lock(&mapping->i_mmap_mutex); | 3547 | i_mmap_lock_write(mapping); |
3548 | vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { | 3548 | vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { |
3549 | if (svma == vma) | 3549 | if (svma == vma) |
3550 | continue; | 3550 | continue; |
@@ -3572,7 +3572,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
3572 | spin_unlock(ptl); | 3572 | spin_unlock(ptl); |
3573 | out: | 3573 | out: |
3574 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | 3574 | pte = (pte_t *)pmd_alloc(mm, pud, addr); |
3575 | mutex_unlock(&mapping->i_mmap_mutex); | 3575 | i_mmap_unlock_write(mapping); |
3576 | return pte; | 3576 | return pte; |
3577 | } | 3577 | } |
3578 | 3578 | ||