aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7c02b9dadfb0..dd30f22b35e0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -13,6 +13,7 @@
13#include <linux/nodemask.h> 13#include <linux/nodemask.h>
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/mempolicy.h> 15#include <linux/mempolicy.h>
16#include <linux/compiler.h>
16#include <linux/cpuset.h> 17#include <linux/cpuset.h>
17#include <linux/mutex.h> 18#include <linux/mutex.h>
18#include <linux/bootmem.h> 19#include <linux/bootmem.h>
@@ -1535,6 +1536,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1535 while (min_count < persistent_huge_pages(h)) { 1536 while (min_count < persistent_huge_pages(h)) {
1536 if (!free_pool_huge_page(h, nodes_allowed, 0)) 1537 if (!free_pool_huge_page(h, nodes_allowed, 0))
1537 break; 1538 break;
1539 cond_resched_lock(&hugetlb_lock);
1538 } 1540 }
1539 while (count < persistent_huge_pages(h)) { 1541 while (count < persistent_huge_pages(h)) {
1540 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 1542 if (!adjust_pool_surplus(h, nodes_allowed, 1))
@@ -2690,7 +2692,8 @@ retry_avoidcopy:
2690 BUG_ON(huge_pte_none(pte)); 2692 BUG_ON(huge_pte_none(pte));
2691 spin_lock(ptl); 2693 spin_lock(ptl);
2692 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2694 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2693 if (likely(pte_same(huge_ptep_get(ptep), pte))) 2695 if (likely(ptep &&
2696 pte_same(huge_ptep_get(ptep), pte)))
2694 goto retry_avoidcopy; 2697 goto retry_avoidcopy;
2695 /* 2698 /*
2696 * race occurs while re-acquiring page table 2699 * race occurs while re-acquiring page table
@@ -2734,7 +2737,7 @@ retry_avoidcopy:
2734 */ 2737 */
2735 spin_lock(ptl); 2738 spin_lock(ptl);
2736 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2739 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2737 if (likely(pte_same(huge_ptep_get(ptep), pte))) { 2740 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
2738 ClearPagePrivate(new_page); 2741 ClearPagePrivate(new_page);
2739 2742
2740 /* Break COW */ 2743 /* Break COW */
@@ -2896,8 +2899,7 @@ retry:
2896 if (anon_rmap) { 2899 if (anon_rmap) {
2897 ClearPagePrivate(page); 2900 ClearPagePrivate(page);
2898 hugepage_add_new_anon_rmap(page, vma, address); 2901 hugepage_add_new_anon_rmap(page, vma, address);
2899 } 2902 } else
2900 else
2901 page_dup_rmap(page); 2903 page_dup_rmap(page);
2902 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 2904 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2903 && (vma->vm_flags & VM_SHARED))); 2905 && (vma->vm_flags & VM_SHARED)));
@@ -3185,6 +3187,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3185 BUG_ON(address >= end); 3187 BUG_ON(address >= end);
3186 flush_cache_range(vma, address, end); 3188 flush_cache_range(vma, address, end);
3187 3189
3190 mmu_notifier_invalidate_range_start(mm, start, end);
3188 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); 3191 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
3189 for (; address < end; address += huge_page_size(h)) { 3192 for (; address < end; address += huge_page_size(h)) {
3190 spinlock_t *ptl; 3193 spinlock_t *ptl;
@@ -3214,6 +3217,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3214 */ 3217 */
3215 flush_tlb_range(vma, start, end); 3218 flush_tlb_range(vma, start, end);
3216 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); 3219 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3220 mmu_notifier_invalidate_range_end(mm, start, end);
3217 3221
3218 return pages << h->order; 3222 return pages << h->order;
3219} 3223}
@@ -3518,7 +3522,7 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
3518#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 3522#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3519 3523
3520/* Can be overriden by architectures */ 3524/* Can be overriden by architectures */
3521__attribute__((weak)) struct page * 3525struct page * __weak
3522follow_huge_pud(struct mm_struct *mm, unsigned long address, 3526follow_huge_pud(struct mm_struct *mm, unsigned long address,
3523 pud_t *pud, int write) 3527 pud_t *pud, int write)
3524{ 3528{