diff options
author | Michel Lespinasse <walken@google.com> | 2013-04-29 14:53:53 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2013-04-29 21:05:17 -0400 |
commit | fba2369e6ceb7ae688e91063821bae5140e26393 (patch) | |
tree | 48644a2c78db6e8ef5c260d413dc953cd45b45e5 | |
parent | 34d07177b802e963f3b14bf3dd8caf294f41fea7 (diff) |
mm: use vm_unmapped_area() on powerpc architecture
Update the powerpc slice_get_unmapped_area function to make use of
vm_unmapped_area() instead of implementing a brute force search.
Signed-off-by: Michel Lespinasse <walken@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Tested-by: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r-- | arch/powerpc/mm/slice.c | 123 |
1 files changed, 78 insertions, 45 deletions
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 999a74f25ebe..3e99c149271a 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c | |||
@@ -237,36 +237,69 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz | |||
237 | #endif | 237 | #endif |
238 | } | 238 | } |
239 | 239 | ||
240 | /* | ||
241 | * Compute which slice addr is part of; | ||
242 | * set *boundary_addr to the start or end boundary of that slice | ||
243 | * (depending on 'end' parameter); | ||
244 | * return boolean indicating if the slice is marked as available in the | ||
245 | * 'available' slice_mark. | ||
246 | */ | ||
247 | static bool slice_scan_available(unsigned long addr, | ||
248 | struct slice_mask available, | ||
249 | int end, | ||
250 | unsigned long *boundary_addr) | ||
251 | { | ||
252 | unsigned long slice; | ||
253 | if (addr < SLICE_LOW_TOP) { | ||
254 | slice = GET_LOW_SLICE_INDEX(addr); | ||
255 | *boundary_addr = (slice + end) << SLICE_LOW_SHIFT; | ||
256 | return !!(available.low_slices & (1u << slice)); | ||
257 | } else { | ||
258 | slice = GET_HIGH_SLICE_INDEX(addr); | ||
259 | *boundary_addr = (slice + end) ? | ||
260 | ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP; | ||
261 | return !!(available.high_slices & (1u << slice)); | ||
262 | } | ||
263 | } | ||
264 | |||
240 | static unsigned long slice_find_area_bottomup(struct mm_struct *mm, | 265 | static unsigned long slice_find_area_bottomup(struct mm_struct *mm, |
241 | unsigned long len, | 266 | unsigned long len, |
242 | struct slice_mask available, | 267 | struct slice_mask available, |
243 | int psize) | 268 | int psize) |
244 | { | 269 | { |
245 | struct vm_area_struct *vma; | ||
246 | unsigned long addr; | ||
247 | struct slice_mask mask; | ||
248 | int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); | 270 | int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); |
271 | unsigned long addr, found, next_end; | ||
272 | struct vm_unmapped_area_info info; | ||
249 | 273 | ||
250 | addr = TASK_UNMAPPED_BASE; | 274 | info.flags = 0; |
251 | 275 | info.length = len; | |
252 | for (;;) { | 276 | info.align_mask = PAGE_MASK & ((1ul << pshift) - 1); |
253 | addr = _ALIGN_UP(addr, 1ul << pshift); | 277 | info.align_offset = 0; |
254 | if ((TASK_SIZE - len) < addr) | ||
255 | break; | ||
256 | vma = find_vma(mm, addr); | ||
257 | BUG_ON(vma && (addr >= vma->vm_end)); | ||
258 | 278 | ||
259 | mask = slice_range_to_mask(addr, len); | 279 | addr = TASK_UNMAPPED_BASE; |
260 | if (!slice_check_fit(mask, available)) { | 280 | while (addr < TASK_SIZE) { |
261 | if (addr < SLICE_LOW_TOP) | 281 | info.low_limit = addr; |
262 | addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT); | 282 | if (!slice_scan_available(addr, available, 1, &addr)) |
263 | else | ||
264 | addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); | ||
265 | continue; | 283 | continue; |
284 | |||
285 | next_slice: | ||
286 | /* | ||
287 | * At this point [info.low_limit; addr) covers | ||
288 | * available slices only and ends at a slice boundary. | ||
289 | * Check if we need to reduce the range, or if we can | ||
290 | * extend it to cover the next available slice. | ||
291 | */ | ||
292 | if (addr >= TASK_SIZE) | ||
293 | addr = TASK_SIZE; | ||
294 | else if (slice_scan_available(addr, available, 1, &next_end)) { | ||
295 | addr = next_end; | ||
296 | goto next_slice; | ||
266 | } | 297 | } |
267 | if (!vma || addr + len <= vma->vm_start) | 298 | info.high_limit = addr; |
268 | return addr; | 299 | |
269 | addr = vma->vm_end; | 300 | found = vm_unmapped_area(&info); |
301 | if (!(found & ~PAGE_MASK)) | ||
302 | return found; | ||
270 | } | 303 | } |
271 | 304 | ||
272 | return -ENOMEM; | 305 | return -ENOMEM; |
@@ -277,39 +310,39 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, | |||
277 | struct slice_mask available, | 310 | struct slice_mask available, |
278 | int psize) | 311 | int psize) |
279 | { | 312 | { |
280 | struct vm_area_struct *vma; | ||
281 | unsigned long addr; | ||
282 | struct slice_mask mask; | ||
283 | int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); | 313 | int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); |
314 | unsigned long addr, found, prev; | ||
315 | struct vm_unmapped_area_info info; | ||
284 | 316 | ||
285 | addr = mm->mmap_base; | 317 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
286 | while (addr > len) { | 318 | info.length = len; |
287 | /* Go down by chunk size */ | 319 | info.align_mask = PAGE_MASK & ((1ul << pshift) - 1); |
288 | addr = _ALIGN_DOWN(addr - len, 1ul << pshift); | 320 | info.align_offset = 0; |
289 | 321 | ||
290 | /* Check for hit with different page size */ | 322 | addr = mm->mmap_base; |
291 | mask = slice_range_to_mask(addr, len); | 323 | while (addr > PAGE_SIZE) { |
292 | if (!slice_check_fit(mask, available)) { | 324 | info.high_limit = addr; |
293 | if (addr < SLICE_LOW_TOP) | 325 | if (!slice_scan_available(addr - 1, available, 0, &addr)) |
294 | addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT); | ||
295 | else if (addr < (1ul << SLICE_HIGH_SHIFT)) | ||
296 | addr = SLICE_LOW_TOP; | ||
297 | else | ||
298 | addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT); | ||
299 | continue; | 326 | continue; |
300 | } | ||
301 | 327 | ||
328 | prev_slice: | ||
302 | /* | 329 | /* |
303 | * Lookup failure means no vma is above this address, | 330 | * At this point [addr; info.high_limit) covers |
304 | * else if new region fits below vma->vm_start, | 331 | * available slices only and starts at a slice boundary. |
305 | * return with success: | 332 | * Check if we need to reduce the range, or if we can |
333 | * extend it to cover the previous available slice. | ||
306 | */ | 334 | */ |
307 | vma = find_vma(mm, addr); | 335 | if (addr < PAGE_SIZE) |
308 | if (!vma || (addr + len) <= vma->vm_start) | 336 | addr = PAGE_SIZE; |
309 | return addr; | 337 | else if (slice_scan_available(addr - 1, available, 0, &prev)) { |
338 | addr = prev; | ||
339 | goto prev_slice; | ||
340 | } | ||
341 | info.low_limit = addr; | ||
310 | 342 | ||
311 | /* try just below the current vma->vm_start */ | 343 | found = vm_unmapped_area(&info); |
312 | addr = vma->vm_start; | 344 | if (!(found & ~PAGE_MASK)) |
345 | return found; | ||
313 | } | 346 | } |
314 | 347 | ||
315 | /* | 348 | /* |