diff options
Diffstat (limited to 'arch/x86/mm/hugetlbpage.c')
-rw-r--r-- | arch/x86/mm/hugetlbpage.c | 21 |
1 files changed, 16 insertions, 5 deletions
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index f6679a7fb8ca..b91e48512425 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c | |||
@@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) | |||
56 | } | 56 | } |
57 | 57 | ||
58 | /* | 58 | /* |
59 | * search for a shareable pmd page for hugetlb. | 59 | * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() |
60 | * and returns the corresponding pte. While this is not necessary for the | ||
61 | * !shared pmd case because we can allocate the pmd later as well, it makes the | ||
62 | * code much cleaner. pmd allocation is essential for the shared case because | ||
63 | * pud has to be populated inside the same i_mmap_mutex section - otherwise | ||
64 | * racing tasks could either miss the sharing (see huge_pte_offset) or select a | ||
65 | * bad pmd for sharing. | ||
60 | */ | 66 | */ |
61 | static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | 67 | static pte_t * |
68 | huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | ||
62 | { | 69 | { |
63 | struct vm_area_struct *vma = find_vma(mm, addr); | 70 | struct vm_area_struct *vma = find_vma(mm, addr); |
64 | struct address_space *mapping = vma->vm_file->f_mapping; | 71 | struct address_space *mapping = vma->vm_file->f_mapping; |
@@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
68 | struct vm_area_struct *svma; | 75 | struct vm_area_struct *svma; |
69 | unsigned long saddr; | 76 | unsigned long saddr; |
70 | pte_t *spte = NULL; | 77 | pte_t *spte = NULL; |
78 | pte_t *pte; | ||
71 | 79 | ||
72 | if (!vma_shareable(vma, addr)) | 80 | if (!vma_shareable(vma, addr)) |
73 | return; | 81 | return (pte_t *)pmd_alloc(mm, pud, addr); |
74 | 82 | ||
75 | mutex_lock(&mapping->i_mmap_mutex); | 83 | mutex_lock(&mapping->i_mmap_mutex); |
76 | vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { | 84 | vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { |
@@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
97 | put_page(virt_to_page(spte)); | 105 | put_page(virt_to_page(spte)); |
98 | spin_unlock(&mm->page_table_lock); | 106 | spin_unlock(&mm->page_table_lock); |
99 | out: | 107 | out: |
108 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | ||
100 | mutex_unlock(&mapping->i_mmap_mutex); | 109 | mutex_unlock(&mapping->i_mmap_mutex); |
110 | return pte; | ||
101 | } | 111 | } |
102 | 112 | ||
103 | /* | 113 | /* |
@@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, | |||
142 | } else { | 152 | } else { |
143 | BUG_ON(sz != PMD_SIZE); | 153 | BUG_ON(sz != PMD_SIZE); |
144 | if (pud_none(*pud)) | 154 | if (pud_none(*pud)) |
145 | huge_pmd_share(mm, addr, pud); | 155 | pte = huge_pmd_share(mm, addr, pud); |
146 | pte = (pte_t *) pmd_alloc(mm, pud, addr); | 156 | else |
157 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | ||
147 | } | 158 | } |
148 | } | 159 | } |
149 | BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); | 160 | BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); |