diff options
Diffstat (limited to 'arch/ppc64/mm/hugetlbpage.c')
-rw-r--r-- | arch/ppc64/mm/hugetlbpage.c | 388 |
1 files changed, 212 insertions, 176 deletions
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index f9524602818d..e7833c80eb68 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c | |||
@@ -27,124 +27,94 @@ | |||
27 | 27 | ||
28 | #include <linux/sysctl.h> | 28 | #include <linux/sysctl.h> |
29 | 29 | ||
30 | #define HUGEPGDIR_SHIFT (HPAGE_SHIFT + PAGE_SHIFT - 3) | 30 | #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT) |
31 | #define HUGEPGDIR_SIZE (1UL << HUGEPGDIR_SHIFT) | 31 | #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT) |
32 | #define HUGEPGDIR_MASK (~(HUGEPGDIR_SIZE-1)) | ||
33 | 32 | ||
34 | #define HUGEPTE_INDEX_SIZE 9 | 33 | /* Modelled after find_linux_pte() */ |
35 | #define HUGEPGD_INDEX_SIZE 10 | 34 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
36 | |||
37 | #define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE) | ||
38 | #define PTRS_PER_HUGEPGD (1 << HUGEPGD_INDEX_SIZE) | ||
39 | |||
40 | static inline int hugepgd_index(unsigned long addr) | ||
41 | { | ||
42 | return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT; | ||
43 | } | ||
44 | |||
45 | static pud_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) | ||
46 | { | 35 | { |
47 | int index; | 36 | pgd_t *pg; |
37 | pud_t *pu; | ||
38 | pmd_t *pm; | ||
39 | pte_t *pt; | ||
48 | 40 | ||
49 | if (! mm->context.huge_pgdir) | 41 | BUG_ON(! in_hugepage_area(mm->context, addr)); |
50 | return NULL; | ||
51 | 42 | ||
43 | addr &= HPAGE_MASK; | ||
44 | |||
45 | pg = pgd_offset(mm, addr); | ||
46 | if (!pgd_none(*pg)) { | ||
47 | pu = pud_offset(pg, addr); | ||
48 | if (!pud_none(*pu)) { | ||
49 | pm = pmd_offset(pu, addr); | ||
50 | pt = (pte_t *)pm; | ||
51 | BUG_ON(!pmd_none(*pm) | ||
52 | && !(pte_present(*pt) && pte_huge(*pt))); | ||
53 | return pt; | ||
54 | } | ||
55 | } | ||
52 | 56 | ||
53 | index = hugepgd_index(addr); | 57 | return NULL; |
54 | BUG_ON(index >= PTRS_PER_HUGEPGD); | ||
55 | return (pud_t *)(mm->context.huge_pgdir + index); | ||
56 | } | 58 | } |
57 | 59 | ||
58 | static inline pte_t *hugepte_offset(pud_t *dir, unsigned long addr) | 60 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) |
59 | { | 61 | { |
60 | int index; | 62 | pgd_t *pg; |
61 | 63 | pud_t *pu; | |
62 | if (pud_none(*dir)) | 64 | pmd_t *pm; |
63 | return NULL; | 65 | pte_t *pt; |
64 | 66 | ||
65 | index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE; | ||
66 | return (pte_t *)pud_page(*dir) + index; | ||
67 | } | ||
68 | |||
69 | static pud_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) | ||
70 | { | ||
71 | BUG_ON(! in_hugepage_area(mm->context, addr)); | 67 | BUG_ON(! in_hugepage_area(mm->context, addr)); |
72 | 68 | ||
73 | if (! mm->context.huge_pgdir) { | 69 | addr &= HPAGE_MASK; |
74 | pgd_t *new; | ||
75 | spin_unlock(&mm->page_table_lock); | ||
76 | /* Don't use pgd_alloc(), because we want __GFP_REPEAT */ | ||
77 | new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT); | ||
78 | BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE)); | ||
79 | spin_lock(&mm->page_table_lock); | ||
80 | 70 | ||
81 | /* | 71 | pg = pgd_offset(mm, addr); |
82 | * Because we dropped the lock, we should re-check the | 72 | pu = pud_alloc(mm, pg, addr); |
83 | * entry, as somebody else could have populated it.. | ||
84 | */ | ||
85 | if (mm->context.huge_pgdir) | ||
86 | pgd_free(new); | ||
87 | else | ||
88 | mm->context.huge_pgdir = new; | ||
89 | } | ||
90 | return hugepgd_offset(mm, addr); | ||
91 | } | ||
92 | 73 | ||
93 | static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr) | 74 | if (pu) { |
94 | { | 75 | pm = pmd_alloc(mm, pu, addr); |
95 | if (! pud_present(*dir)) { | 76 | if (pm) { |
96 | pte_t *new; | 77 | pt = (pte_t *)pm; |
97 | 78 | BUG_ON(!pmd_none(*pm) | |
98 | spin_unlock(&mm->page_table_lock); | 79 | && !(pte_present(*pt) && pte_huge(*pt))); |
99 | new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT); | 80 | return pt; |
100 | BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE)); | ||
101 | spin_lock(&mm->page_table_lock); | ||
102 | /* | ||
103 | * Because we dropped the lock, we should re-check the | ||
104 | * entry, as somebody else could have populated it.. | ||
105 | */ | ||
106 | if (pud_present(*dir)) { | ||
107 | if (new) | ||
108 | kmem_cache_free(zero_cache, new); | ||
109 | } else { | ||
110 | struct page *ptepage; | ||
111 | |||
112 | if (! new) | ||
113 | return NULL; | ||
114 | ptepage = virt_to_page(new); | ||
115 | ptepage->mapping = (void *) mm; | ||
116 | ptepage->index = addr & HUGEPGDIR_MASK; | ||
117 | pud_populate(mm, dir, new); | ||
118 | } | 81 | } |
119 | } | 82 | } |
120 | 83 | ||
121 | return hugepte_offset(dir, addr); | 84 | return NULL; |
122 | } | 85 | } |
123 | 86 | ||
124 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 87 | #define HUGEPTE_BATCH_SIZE (HPAGE_SIZE / PMD_SIZE) |
125 | { | ||
126 | pud_t *pud; | ||
127 | 88 | ||
128 | BUG_ON(! in_hugepage_area(mm->context, addr)); | 89 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
90 | pte_t *ptep, pte_t pte) | ||
91 | { | ||
92 | int i; | ||
129 | 93 | ||
130 | pud = hugepgd_offset(mm, addr); | 94 | if (pte_present(*ptep)) { |
131 | if (! pud) | 95 | pte_clear(mm, addr, ptep); |
132 | return NULL; | 96 | flush_tlb_pending(); |
97 | } | ||
133 | 98 | ||
134 | return hugepte_offset(pud, addr); | 99 | for (i = 0; i < HUGEPTE_BATCH_SIZE; i++) { |
100 | *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); | ||
101 | ptep++; | ||
102 | } | ||
135 | } | 103 | } |
136 | 104 | ||
137 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | 105 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
106 | pte_t *ptep) | ||
138 | { | 107 | { |
139 | pud_t *pud; | 108 | unsigned long old = pte_update(ptep, ~0UL); |
109 | int i; | ||
140 | 110 | ||
141 | BUG_ON(! in_hugepage_area(mm->context, addr)); | 111 | if (old & _PAGE_HASHPTE) |
112 | hpte_update(mm, addr, old, 0); | ||
142 | 113 | ||
143 | pud = hugepgd_alloc(mm, addr); | 114 | for (i = 1; i < HUGEPTE_BATCH_SIZE; i++) |
144 | if (! pud) | 115 | ptep[i] = __pte(0); |
145 | return NULL; | ||
146 | 116 | ||
147 | return hugepte_alloc(mm, pud, addr); | 117 | return __pte(old); |
148 | } | 118 | } |
149 | 119 | ||
150 | /* | 120 | /* |
@@ -162,15 +132,17 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len) | |||
162 | return 0; | 132 | return 0; |
163 | } | 133 | } |
164 | 134 | ||
165 | static void flush_segments(void *parm) | 135 | static void flush_low_segments(void *parm) |
166 | { | 136 | { |
167 | u16 segs = (unsigned long) parm; | 137 | u16 areas = (unsigned long) parm; |
168 | unsigned long i; | 138 | unsigned long i; |
169 | 139 | ||
170 | asm volatile("isync" : : : "memory"); | 140 | asm volatile("isync" : : : "memory"); |
171 | 141 | ||
172 | for (i = 0; i < 16; i++) { | 142 | BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS); |
173 | if (! (segs & (1U << i))) | 143 | |
144 | for (i = 0; i < NUM_LOW_AREAS; i++) { | ||
145 | if (! (areas & (1U << i))) | ||
174 | continue; | 146 | continue; |
175 | asm volatile("slbie %0" : : "r" (i << SID_SHIFT)); | 147 | asm volatile("slbie %0" : : "r" (i << SID_SHIFT)); |
176 | } | 148 | } |
@@ -178,13 +150,33 @@ static void flush_segments(void *parm) | |||
178 | asm volatile("isync" : : : "memory"); | 150 | asm volatile("isync" : : : "memory"); |
179 | } | 151 | } |
180 | 152 | ||
181 | static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg) | 153 | static void flush_high_segments(void *parm) |
182 | { | 154 | { |
183 | unsigned long start = seg << SID_SHIFT; | 155 | u16 areas = (unsigned long) parm; |
184 | unsigned long end = (seg+1) << SID_SHIFT; | 156 | unsigned long i, j; |
157 | |||
158 | asm volatile("isync" : : : "memory"); | ||
159 | |||
160 | BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS); | ||
161 | |||
162 | for (i = 0; i < NUM_HIGH_AREAS; i++) { | ||
163 | if (! (areas & (1U << i))) | ||
164 | continue; | ||
165 | for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) | ||
166 | asm volatile("slbie %0" | ||
167 | :: "r" ((i << HTLB_AREA_SHIFT) + (j << SID_SHIFT))); | ||
168 | } | ||
169 | |||
170 | asm volatile("isync" : : : "memory"); | ||
171 | } | ||
172 | |||
173 | static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area) | ||
174 | { | ||
175 | unsigned long start = area << SID_SHIFT; | ||
176 | unsigned long end = (area+1) << SID_SHIFT; | ||
185 | struct vm_area_struct *vma; | 177 | struct vm_area_struct *vma; |
186 | 178 | ||
187 | BUG_ON(seg >= 16); | 179 | BUG_ON(area >= NUM_LOW_AREAS); |
188 | 180 | ||
189 | /* Check no VMAs are in the region */ | 181 | /* Check no VMAs are in the region */ |
190 | vma = find_vma(mm, start); | 182 | vma = find_vma(mm, start); |
@@ -194,20 +186,39 @@ static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg) | |||
194 | return 0; | 186 | return 0; |
195 | } | 187 | } |
196 | 188 | ||
197 | static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs) | 189 | static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area) |
190 | { | ||
191 | unsigned long start = area << HTLB_AREA_SHIFT; | ||
192 | unsigned long end = (area+1) << HTLB_AREA_SHIFT; | ||
193 | struct vm_area_struct *vma; | ||
194 | |||
195 | BUG_ON(area >= NUM_HIGH_AREAS); | ||
196 | |||
197 | /* Check no VMAs are in the region */ | ||
198 | vma = find_vma(mm, start); | ||
199 | if (vma && (vma->vm_start < end)) | ||
200 | return -EBUSY; | ||
201 | |||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas) | ||
198 | { | 206 | { |
199 | unsigned long i; | 207 | unsigned long i; |
200 | 208 | ||
201 | newsegs &= ~(mm->context.htlb_segs); | 209 | BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS); |
202 | if (! newsegs) | 210 | BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS); |
211 | |||
212 | newareas &= ~(mm->context.low_htlb_areas); | ||
213 | if (! newareas) | ||
203 | return 0; /* The segments we want are already open */ | 214 | return 0; /* The segments we want are already open */ |
204 | 215 | ||
205 | for (i = 0; i < 16; i++) | 216 | for (i = 0; i < NUM_LOW_AREAS; i++) |
206 | if ((1 << i) & newsegs) | 217 | if ((1 << i) & newareas) |
207 | if (prepare_low_seg_for_htlb(mm, i) != 0) | 218 | if (prepare_low_area_for_htlb(mm, i) != 0) |
208 | return -EBUSY; | 219 | return -EBUSY; |
209 | 220 | ||
210 | mm->context.htlb_segs |= newsegs; | 221 | mm->context.low_htlb_areas |= newareas; |
211 | 222 | ||
212 | /* update the paca copy of the context struct */ | 223 | /* update the paca copy of the context struct */ |
213 | get_paca()->context = mm->context; | 224 | get_paca()->context = mm->context; |
@@ -215,29 +226,63 @@ static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs) | |||
215 | /* the context change must make it to memory before the flush, | 226 | /* the context change must make it to memory before the flush, |
216 | * so that further SLB misses do the right thing. */ | 227 | * so that further SLB misses do the right thing. */ |
217 | mb(); | 228 | mb(); |
218 | on_each_cpu(flush_segments, (void *)(unsigned long)newsegs, 0, 1); | 229 | on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1); |
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) | ||
235 | { | ||
236 | unsigned long i; | ||
237 | |||
238 | BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS); | ||
239 | BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8) | ||
240 | != NUM_HIGH_AREAS); | ||
241 | |||
242 | newareas &= ~(mm->context.high_htlb_areas); | ||
243 | if (! newareas) | ||
244 | return 0; /* The areas we want are already open */ | ||
245 | |||
246 | for (i = 0; i < NUM_HIGH_AREAS; i++) | ||
247 | if ((1 << i) & newareas) | ||
248 | if (prepare_high_area_for_htlb(mm, i) != 0) | ||
249 | return -EBUSY; | ||
250 | |||
251 | mm->context.high_htlb_areas |= newareas; | ||
252 | |||
253 | /* update the paca copy of the context struct */ | ||
254 | get_paca()->context = mm->context; | ||
255 | |||
256 | /* the context change must make it to memory before the flush, | ||
257 | * so that further SLB misses do the right thing. */ | ||
258 | mb(); | ||
259 | on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1); | ||
219 | 260 | ||
220 | return 0; | 261 | return 0; |
221 | } | 262 | } |
222 | 263 | ||
223 | int prepare_hugepage_range(unsigned long addr, unsigned long len) | 264 | int prepare_hugepage_range(unsigned long addr, unsigned long len) |
224 | { | 265 | { |
225 | if (within_hugepage_high_range(addr, len)) | 266 | int err; |
226 | return 0; | 267 | |
227 | else if ((addr < 0x100000000UL) && ((addr+len) < 0x100000000UL)) { | 268 | if ( (addr+len) < addr ) |
228 | int err; | 269 | return -EINVAL; |
229 | /* Yes, we need both tests, in case addr+len overflows | 270 | |
230 | * 64-bit arithmetic */ | 271 | if ((addr + len) < 0x100000000UL) |
231 | err = open_low_hpage_segs(current->mm, | 272 | err = open_low_hpage_areas(current->mm, |
232 | LOW_ESID_MASK(addr, len)); | 273 | LOW_ESID_MASK(addr, len)); |
233 | if (err) | 274 | else |
234 | printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)" | 275 | err = open_high_hpage_areas(current->mm, |
235 | " failed (segs: 0x%04hx)\n", addr, len, | 276 | HTLB_AREA_MASK(addr, len)); |
236 | LOW_ESID_MASK(addr, len)); | 277 | if (err) { |
278 | printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)" | ||
279 | " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n", | ||
280 | addr, len, | ||
281 | LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len)); | ||
237 | return err; | 282 | return err; |
238 | } | 283 | } |
239 | 284 | ||
240 | return -EINVAL; | 285 | return 0; |
241 | } | 286 | } |
242 | 287 | ||
243 | struct page * | 288 | struct page * |
@@ -309,8 +354,8 @@ full_search: | |||
309 | vma = find_vma(mm, addr); | 354 | vma = find_vma(mm, addr); |
310 | continue; | 355 | continue; |
311 | } | 356 | } |
312 | if (touches_hugepage_high_range(addr, len)) { | 357 | if (touches_hugepage_high_range(mm, addr, len)) { |
313 | addr = TASK_HPAGE_END; | 358 | addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT); |
314 | vma = find_vma(mm, addr); | 359 | vma = find_vma(mm, addr); |
315 | continue; | 360 | continue; |
316 | } | 361 | } |
@@ -389,8 +434,9 @@ hugepage_recheck: | |||
389 | if (touches_hugepage_low_range(mm, addr, len)) { | 434 | if (touches_hugepage_low_range(mm, addr, len)) { |
390 | addr = (addr & ((~0) << SID_SHIFT)) - len; | 435 | addr = (addr & ((~0) << SID_SHIFT)) - len; |
391 | goto hugepage_recheck; | 436 | goto hugepage_recheck; |
392 | } else if (touches_hugepage_high_range(addr, len)) { | 437 | } else if (touches_hugepage_high_range(mm, addr, len)) { |
393 | addr = TASK_HPAGE_BASE - len; | 438 | addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len; |
439 | goto hugepage_recheck; | ||
394 | } | 440 | } |
395 | 441 | ||
396 | /* | 442 | /* |
@@ -481,23 +527,28 @@ static unsigned long htlb_get_low_area(unsigned long len, u16 segmask) | |||
481 | return -ENOMEM; | 527 | return -ENOMEM; |
482 | } | 528 | } |
483 | 529 | ||
484 | static unsigned long htlb_get_high_area(unsigned long len) | 530 | static unsigned long htlb_get_high_area(unsigned long len, u16 areamask) |
485 | { | 531 | { |
486 | unsigned long addr = TASK_HPAGE_BASE; | 532 | unsigned long addr = 0x100000000UL; |
487 | struct vm_area_struct *vma; | 533 | struct vm_area_struct *vma; |
488 | 534 | ||
489 | vma = find_vma(current->mm, addr); | 535 | vma = find_vma(current->mm, addr); |
490 | for (vma = find_vma(current->mm, addr); | 536 | while (addr + len <= TASK_SIZE_USER64) { |
491 | addr + len <= TASK_HPAGE_END; | ||
492 | vma = vma->vm_next) { | ||
493 | BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */ | 537 | BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */ |
494 | BUG_ON(! within_hugepage_high_range(addr, len)); | 538 | |
539 | if (! __within_hugepage_high_range(addr, len, areamask)) { | ||
540 | addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT); | ||
541 | vma = find_vma(current->mm, addr); | ||
542 | continue; | ||
543 | } | ||
495 | 544 | ||
496 | if (!vma || (addr + len) <= vma->vm_start) | 545 | if (!vma || (addr + len) <= vma->vm_start) |
497 | return addr; | 546 | return addr; |
498 | addr = ALIGN(vma->vm_end, HPAGE_SIZE); | 547 | addr = ALIGN(vma->vm_end, HPAGE_SIZE); |
499 | /* Because we're in a hugepage region, this alignment | 548 | /* Depending on segmask this might not be a confirmed |
500 | * should not skip us over any VMAs */ | 549 | * hugepage region, so the ALIGN could have skipped |
550 | * some VMAs */ | ||
551 | vma = find_vma(current->mm, addr); | ||
501 | } | 552 | } |
502 | 553 | ||
503 | return -ENOMEM; | 554 | return -ENOMEM; |
@@ -507,6 +558,9 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
507 | unsigned long len, unsigned long pgoff, | 558 | unsigned long len, unsigned long pgoff, |
508 | unsigned long flags) | 559 | unsigned long flags) |
509 | { | 560 | { |
561 | int lastshift; | ||
562 | u16 areamask, curareas; | ||
563 | |||
510 | if (len & ~HPAGE_MASK) | 564 | if (len & ~HPAGE_MASK) |
511 | return -EINVAL; | 565 | return -EINVAL; |
512 | 566 | ||
@@ -514,67 +568,49 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
514 | return -EINVAL; | 568 | return -EINVAL; |
515 | 569 | ||
516 | if (test_thread_flag(TIF_32BIT)) { | 570 | if (test_thread_flag(TIF_32BIT)) { |
517 | int lastshift = 0; | 571 | curareas = current->mm->context.low_htlb_areas; |
518 | u16 segmask, cursegs = current->mm->context.htlb_segs; | ||
519 | 572 | ||
520 | /* First see if we can do the mapping in the existing | 573 | /* First see if we can do the mapping in the existing |
521 | * low hpage segments */ | 574 | * low areas */ |
522 | addr = htlb_get_low_area(len, cursegs); | 575 | addr = htlb_get_low_area(len, curareas); |
523 | if (addr != -ENOMEM) | 576 | if (addr != -ENOMEM) |
524 | return addr; | 577 | return addr; |
525 | 578 | ||
526 | for (segmask = LOW_ESID_MASK(0x100000000UL-len, len); | 579 | lastshift = 0; |
527 | ! lastshift; segmask >>=1) { | 580 | for (areamask = LOW_ESID_MASK(0x100000000UL-len, len); |
528 | if (segmask & 1) | 581 | ! lastshift; areamask >>=1) { |
582 | if (areamask & 1) | ||
529 | lastshift = 1; | 583 | lastshift = 1; |
530 | 584 | ||
531 | addr = htlb_get_low_area(len, cursegs | segmask); | 585 | addr = htlb_get_low_area(len, curareas | areamask); |
532 | if ((addr != -ENOMEM) | 586 | if ((addr != -ENOMEM) |
533 | && open_low_hpage_segs(current->mm, segmask) == 0) | 587 | && open_low_hpage_areas(current->mm, areamask) == 0) |
534 | return addr; | 588 | return addr; |
535 | } | 589 | } |
536 | printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open" | ||
537 | " enough segments\n"); | ||
538 | return -ENOMEM; | ||
539 | } else { | 590 | } else { |
540 | return htlb_get_high_area(len); | 591 | curareas = current->mm->context.high_htlb_areas; |
541 | } | ||
542 | } | ||
543 | |||
544 | void hugetlb_mm_free_pgd(struct mm_struct *mm) | ||
545 | { | ||
546 | int i; | ||
547 | pgd_t *pgdir; | ||
548 | |||
549 | spin_lock(&mm->page_table_lock); | ||
550 | |||
551 | pgdir = mm->context.huge_pgdir; | ||
552 | if (! pgdir) | ||
553 | goto out; | ||
554 | |||
555 | mm->context.huge_pgdir = NULL; | ||
556 | 592 | ||
557 | /* cleanup any hugepte pages leftover */ | 593 | /* First see if we can do the mapping in the existing |
558 | for (i = 0; i < PTRS_PER_HUGEPGD; i++) { | 594 | * high areas */ |
559 | pud_t *pud = (pud_t *)(pgdir + i); | 595 | addr = htlb_get_high_area(len, curareas); |
560 | 596 | if (addr != -ENOMEM) | |
561 | if (! pud_none(*pud)) { | 597 | return addr; |
562 | pte_t *pte = (pte_t *)pud_page(*pud); | ||
563 | struct page *ptepage = virt_to_page(pte); | ||
564 | 598 | ||
565 | ptepage->mapping = NULL; | 599 | lastshift = 0; |
600 | for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len); | ||
601 | ! lastshift; areamask >>=1) { | ||
602 | if (areamask & 1) | ||
603 | lastshift = 1; | ||
566 | 604 | ||
567 | BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE)); | 605 | addr = htlb_get_high_area(len, curareas | areamask); |
568 | kmem_cache_free(zero_cache, pte); | 606 | if ((addr != -ENOMEM) |
607 | && open_high_hpage_areas(current->mm, areamask) == 0) | ||
608 | return addr; | ||
569 | } | 609 | } |
570 | pud_clear(pud); | ||
571 | } | 610 | } |
572 | 611 | printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open" | |
573 | BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE)); | 612 | " enough areas\n"); |
574 | kmem_cache_free(zero_cache, pgdir); | 613 | return -ENOMEM; |
575 | |||
576 | out: | ||
577 | spin_unlock(&mm->page_table_lock); | ||
578 | } | 614 | } |
579 | 615 | ||
580 | int hash_huge_page(struct mm_struct *mm, unsigned long access, | 616 | int hash_huge_page(struct mm_struct *mm, unsigned long access, |