diff options
author | Tejun Heo <tj@kernel.org> | 2014-09-02 14:46:02 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-09-02 14:46:02 -0400 |
commit | a16037c8dfc2734c1a2c8e3ffd4766ed25f2a41d (patch) | |
tree | e3cf6de55397465346f73ecded1e7b4edda4950b /mm | |
parent | b38d08f3181c5025a7ce84646494cc4748492a3b (diff) |
percpu: make pcpu_alloc_area() capable of allocating only from populated areas
Update pcpu_alloc_area() so that it can skip unpopulated areas if the
new parameter @pop_only is true. This is implemented by a new
function, pcpu_fit_in_area(), which determines the amount of head
padding considering the alignment and populated state.
@pop_only is currently always false but this will be used to implement
atomic allocation.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/percpu.c | 65 |
1 files changed, 58 insertions, 7 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index e59f7b405bed..e18aa143aab1 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -400,10 +400,60 @@ out_unlock: | |||
400 | } | 400 | } |
401 | 401 | ||
402 | /** | 402 | /** |
403 | * pcpu_fit_in_area - try to fit the requested allocation in a candidate area | ||
404 | * @chunk: chunk the candidate area belongs to | ||
405 | * @off: the offset to the start of the candidate area | ||
406 | * @this_size: the size of the candidate area | ||
407 | * @size: the size of the target allocation | ||
408 | * @align: the alignment of the target allocation | ||
409 | * @pop_only: only allocate from already populated region | ||
410 | * | ||
411 | * We're trying to allocate @size bytes aligned at @align. @chunk's area | ||
412 | * at @off sized @this_size is a candidate. This function determines | ||
413 | * whether the target allocation fits in the candidate area and returns the | ||
414 | * number of bytes to pad after @off. If the target area doesn't fit, -1 | ||
415 | * is returned. | ||
416 | * | ||
417 | * If @pop_only is %true, this function only considers the already | ||
418 | * populated part of the candidate area. | ||
419 | */ | ||
420 | static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size, | ||
421 | int size, int align, bool pop_only) | ||
422 | { | ||
423 | int cand_off = off; | ||
424 | |||
425 | while (true) { | ||
426 | int head = ALIGN(cand_off, align) - off; | ||
427 | int page_start, page_end, rs, re; | ||
428 | |||
429 | if (this_size < head + size) | ||
430 | return -1; | ||
431 | |||
432 | if (!pop_only) | ||
433 | return head; | ||
434 | |||
435 | /* | ||
436 | * If the first unpopulated page is beyond the end of the | ||
437 | * allocation, the whole allocation is populated; | ||
438 | * otherwise, retry from the end of the unpopulated area. | ||
439 | */ | ||
440 | page_start = PFN_DOWN(head + off); | ||
441 | page_end = PFN_UP(head + off + size); | ||
442 | |||
443 | rs = page_start; | ||
444 | pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size)); | ||
445 | if (rs >= page_end) | ||
446 | return head; | ||
447 | cand_off = re * PAGE_SIZE; | ||
448 | } | ||
449 | } | ||
450 | |||
451 | /** | ||
403 | * pcpu_alloc_area - allocate area from a pcpu_chunk | 452 | * pcpu_alloc_area - allocate area from a pcpu_chunk |
404 | * @chunk: chunk of interest | 453 | * @chunk: chunk of interest |
405 | * @size: wanted size in bytes | 454 | * @size: wanted size in bytes |
406 | * @align: wanted align | 455 | * @align: wanted align |
456 | * @pop_only: allocate only from the populated area | ||
407 | * | 457 | * |
408 | * Try to allocate @size bytes area aligned at @align from @chunk. | 458 | * Try to allocate @size bytes area aligned at @align from @chunk. |
409 | * Note that this function only allocates the offset. It doesn't | 459 | * Note that this function only allocates the offset. It doesn't |
@@ -418,7 +468,8 @@ out_unlock: | |||
418 | * Allocated offset in @chunk on success, -1 if no matching area is | 468 | * Allocated offset in @chunk on success, -1 if no matching area is |
419 | * found. | 469 | * found. |
420 | */ | 470 | */ |
421 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) | 471 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align, |
472 | bool pop_only) | ||
422 | { | 473 | { |
423 | int oslot = pcpu_chunk_slot(chunk); | 474 | int oslot = pcpu_chunk_slot(chunk); |
424 | int max_contig = 0; | 475 | int max_contig = 0; |
@@ -434,11 +485,11 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) | |||
434 | if (off & 1) | 485 | if (off & 1) |
435 | continue; | 486 | continue; |
436 | 487 | ||
437 | /* extra for alignment requirement */ | ||
438 | head = ALIGN(off, align) - off; | ||
439 | |||
440 | this_size = (p[1] & ~1) - off; | 488 | this_size = (p[1] & ~1) - off; |
441 | if (this_size < head + size) { | 489 | |
490 | head = pcpu_fit_in_area(chunk, off, this_size, size, align, | ||
491 | pop_only); | ||
492 | if (head < 0) { | ||
442 | if (!seen_free) { | 493 | if (!seen_free) { |
443 | chunk->first_free = i; | 494 | chunk->first_free = i; |
444 | seen_free = true; | 495 | seen_free = true; |
@@ -730,7 +781,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) | |||
730 | spin_lock_irqsave(&pcpu_lock, flags); | 781 | spin_lock_irqsave(&pcpu_lock, flags); |
731 | } | 782 | } |
732 | 783 | ||
733 | off = pcpu_alloc_area(chunk, size, align); | 784 | off = pcpu_alloc_area(chunk, size, align, false); |
734 | if (off >= 0) | 785 | if (off >= 0) |
735 | goto area_found; | 786 | goto area_found; |
736 | 787 | ||
@@ -761,7 +812,7 @@ restart: | |||
761 | goto restart; | 812 | goto restart; |
762 | } | 813 | } |
763 | 814 | ||
764 | off = pcpu_alloc_area(chunk, size, align); | 815 | off = pcpu_alloc_area(chunk, size, align, false); |
765 | if (off >= 0) | 816 | if (off >= 0) |
766 | goto area_found; | 817 | goto area_found; |
767 | } | 818 | } |