diff options
author | Tejun Heo <tj@kernel.org> | 2009-03-06 10:44:09 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-03-06 10:44:09 -0500 |
commit | 9f7dcf224bd09ec9ebcbfb383bf2c465e0e0b03d (patch) | |
tree | 9e8b07047080fbdb4f4c8f1554d5208570e64fe2 /mm | |
parent | 1880d93b80acc3171850e9df5048bcb26b75c2f5 (diff) |
percpu: move chunk area map extension out of area allocation
Impact: code reorganization for later changes
Separate out chunk area map extension into a separate function -
pcpu_extend_area_map() - and call it directly from pcpu_alloc() such
that pcpu_alloc_area() is guaranteed to have enough area map slots on
invocation.
With this change, pcpu_alloc_area() does only area allocation and the
only failure mode is when the chunk doens't have enough room, so
there's no need to distinguish it from memory allocation failures.
Make it return -1 on such cases instead of hacky -ENOSPC.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/percpu.c | 108 |
1 files changed, 60 insertions, 48 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index f1d0e905850c..7d9bc35e8ed2 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -307,6 +307,50 @@ static void pcpu_chunk_addr_insert(struct pcpu_chunk *new) | |||
307 | } | 307 | } |
308 | 308 | ||
309 | /** | 309 | /** |
310 | * pcpu_extend_area_map - extend area map for allocation | ||
311 | * @chunk: target chunk | ||
312 | * | ||
313 | * Extend area map of @chunk so that it can accomodate an allocation. | ||
314 | * A single allocation can split an area into three areas, so this | ||
315 | * function makes sure that @chunk->map has at least two extra slots. | ||
316 | * | ||
317 | * RETURNS: | ||
318 | * 0 if noop, 1 if successfully extended, -errno on failure. | ||
319 | */ | ||
320 | static int pcpu_extend_area_map(struct pcpu_chunk *chunk) | ||
321 | { | ||
322 | int new_alloc; | ||
323 | int *new; | ||
324 | size_t size; | ||
325 | |||
326 | /* has enough? */ | ||
327 | if (chunk->map_alloc >= chunk->map_used + 2) | ||
328 | return 0; | ||
329 | |||
330 | new_alloc = PCPU_DFL_MAP_ALLOC; | ||
331 | while (new_alloc < chunk->map_used + 2) | ||
332 | new_alloc *= 2; | ||
333 | |||
334 | new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); | ||
335 | if (!new) | ||
336 | return -ENOMEM; | ||
337 | |||
338 | size = chunk->map_alloc * sizeof(chunk->map[0]); | ||
339 | memcpy(new, chunk->map, size); | ||
340 | |||
341 | /* | ||
342 | * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is | ||
343 | * one of the first chunks and still using static map. | ||
344 | */ | ||
345 | if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) | ||
346 | pcpu_mem_free(chunk->map, size); | ||
347 | |||
348 | chunk->map_alloc = new_alloc; | ||
349 | chunk->map = new; | ||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | /** | ||
310 | * pcpu_split_block - split a map block | 354 | * pcpu_split_block - split a map block |
311 | * @chunk: chunk of interest | 355 | * @chunk: chunk of interest |
312 | * @i: index of map block to split | 356 | * @i: index of map block to split |
@@ -321,44 +365,16 @@ static void pcpu_chunk_addr_insert(struct pcpu_chunk *new) | |||
321 | * depending on @head, is reduced by @tail bytes and @tail byte block | 365 | * depending on @head, is reduced by @tail bytes and @tail byte block |
322 | * is inserted after the target block. | 366 | * is inserted after the target block. |
323 | * | 367 | * |
324 | * RETURNS: | 368 | * @chunk->map must have enough free slots to accomodate the split. |
325 | * 0 on success, -errno on failure. | ||
326 | */ | 369 | */ |
327 | static int pcpu_split_block(struct pcpu_chunk *chunk, int i, int head, int tail) | 370 | static void pcpu_split_block(struct pcpu_chunk *chunk, int i, |
371 | int head, int tail) | ||
328 | { | 372 | { |
329 | int nr_extra = !!head + !!tail; | 373 | int nr_extra = !!head + !!tail; |
330 | int target = chunk->map_used + nr_extra; | ||
331 | |||
332 | /* reallocation required? */ | ||
333 | if (chunk->map_alloc < target) { | ||
334 | int new_alloc; | ||
335 | int *new; | ||
336 | size_t size; | ||
337 | |||
338 | new_alloc = PCPU_DFL_MAP_ALLOC; | ||
339 | while (new_alloc < target) | ||
340 | new_alloc *= 2; | ||
341 | |||
342 | new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); | ||
343 | if (!new) | ||
344 | return -ENOMEM; | ||
345 | |||
346 | size = chunk->map_alloc * sizeof(chunk->map[0]); | ||
347 | memcpy(new, chunk->map, size); | ||
348 | |||
349 | /* | ||
350 | * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the | ||
351 | * chunk is one of the first chunks and still using | ||
352 | * static map. | ||
353 | */ | ||
354 | if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) | ||
355 | pcpu_mem_free(chunk->map, size); | ||
356 | 374 | ||
357 | chunk->map_alloc = new_alloc; | 375 | BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); |
358 | chunk->map = new; | ||
359 | } | ||
360 | 376 | ||
361 | /* insert a new subblock */ | 377 | /* insert new subblocks */ |
362 | memmove(&chunk->map[i + nr_extra], &chunk->map[i], | 378 | memmove(&chunk->map[i + nr_extra], &chunk->map[i], |
363 | sizeof(chunk->map[0]) * (chunk->map_used - i)); | 379 | sizeof(chunk->map[0]) * (chunk->map_used - i)); |
364 | chunk->map_used += nr_extra; | 380 | chunk->map_used += nr_extra; |
@@ -371,7 +387,6 @@ static int pcpu_split_block(struct pcpu_chunk *chunk, int i, int head, int tail) | |||
371 | chunk->map[i++] -= tail; | 387 | chunk->map[i++] -= tail; |
372 | chunk->map[i] = tail; | 388 | chunk->map[i] = tail; |
373 | } | 389 | } |
374 | return 0; | ||
375 | } | 390 | } |
376 | 391 | ||
377 | /** | 392 | /** |
@@ -384,8 +399,11 @@ static int pcpu_split_block(struct pcpu_chunk *chunk, int i, int head, int tail) | |||
384 | * Note that this function only allocates the offset. It doesn't | 399 | * Note that this function only allocates the offset. It doesn't |
385 | * populate or map the area. | 400 | * populate or map the area. |
386 | * | 401 | * |
402 | * @chunk->map must have at least two free slots. | ||
403 | * | ||
387 | * RETURNS: | 404 | * RETURNS: |
388 | * Allocated offset in @chunk on success, -errno on failure. | 405 | * Allocated offset in @chunk on success, -1 if no matching area is |
406 | * found. | ||
389 | */ | 407 | */ |
390 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) | 408 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) |
391 | { | 409 | { |
@@ -433,8 +451,7 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) | |||
433 | 451 | ||
434 | /* split if warranted */ | 452 | /* split if warranted */ |
435 | if (head || tail) { | 453 | if (head || tail) { |
436 | if (pcpu_split_block(chunk, i, head, tail)) | 454 | pcpu_split_block(chunk, i, head, tail); |
437 | return -ENOMEM; | ||
438 | if (head) { | 455 | if (head) { |
439 | i++; | 456 | i++; |
440 | off += head; | 457 | off += head; |
@@ -461,14 +478,8 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) | |||
461 | chunk->contig_hint = max_contig; /* fully scanned */ | 478 | chunk->contig_hint = max_contig; /* fully scanned */ |
462 | pcpu_chunk_relocate(chunk, oslot); | 479 | pcpu_chunk_relocate(chunk, oslot); |
463 | 480 | ||
464 | /* | 481 | /* tell the upper layer that this chunk has no matching area */ |
465 | * Tell the upper layer that this chunk has no area left. | 482 | return -1; |
466 | * Note that this is not an error condition but a notification | ||
467 | * to upper layer that it needs to look at other chunks. | ||
468 | * -ENOSPC is chosen as it isn't used in memory subsystem and | ||
469 | * matches the meaning in a way. | ||
470 | */ | ||
471 | return -ENOSPC; | ||
472 | } | 483 | } |
473 | 484 | ||
474 | /** | 485 | /** |
@@ -755,7 +766,8 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved) | |||
755 | /* serve reserved allocations from the reserved chunk if available */ | 766 | /* serve reserved allocations from the reserved chunk if available */ |
756 | if (reserved && pcpu_reserved_chunk) { | 767 | if (reserved && pcpu_reserved_chunk) { |
757 | chunk = pcpu_reserved_chunk; | 768 | chunk = pcpu_reserved_chunk; |
758 | if (size > chunk->contig_hint) | 769 | if (size > chunk->contig_hint || |
770 | pcpu_extend_area_map(chunk) < 0) | ||
759 | goto out_unlock; | 771 | goto out_unlock; |
760 | off = pcpu_alloc_area(chunk, size, align); | 772 | off = pcpu_alloc_area(chunk, size, align); |
761 | if (off >= 0) | 773 | if (off >= 0) |
@@ -768,11 +780,11 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved) | |||
768 | list_for_each_entry(chunk, &pcpu_slot[slot], list) { | 780 | list_for_each_entry(chunk, &pcpu_slot[slot], list) { |
769 | if (size > chunk->contig_hint) | 781 | if (size > chunk->contig_hint) |
770 | continue; | 782 | continue; |
783 | if (pcpu_extend_area_map(chunk) < 0) | ||
784 | goto out_unlock; | ||
771 | off = pcpu_alloc_area(chunk, size, align); | 785 | off = pcpu_alloc_area(chunk, size, align); |
772 | if (off >= 0) | 786 | if (off >= 0) |
773 | goto area_found; | 787 | goto area_found; |
774 | if (off != -ENOSPC) | ||
775 | goto out_unlock; | ||
776 | } | 788 | } |
777 | } | 789 | } |
778 | 790 | ||