diff options
author | Li Yang <leoli@freescale.com> | 2007-06-18 07:29:21 -0400 |
---|---|---|
committer | Kumar Gala <galak@kernel.crashing.org> | 2007-06-19 23:35:53 -0400 |
commit | 7c8545e98468c53809fc06788a3b9a34dff05240 (patch) | |
tree | 11f7cff9f7f0f67b04db8234c41de5d6bc871b4c /arch/powerpc/lib | |
parent | 7b7a57c77dccddd84b6aa02a38deee7ad97c977a (diff) |
[POWERPC] rheap - eliminates internal fragments caused by alignment
The patch adds fragments caused by rh_alloc_align() back to free list, instead
of allocating the whole chunk of memory. This will greatly improve memory
utilization managed by rheap.
It solves MURAM not enough problem with 3 UCCs enabled on MPC8323.
Signed-off-by: Li Yang <leoli@freescale.com>
Acked-by: Joakim Tjernlund <joakim.tjernlund@transmode.se>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r-- | arch/powerpc/lib/rheap.c | 48 |
1 files changed, 29 insertions, 19 deletions
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c index 180ee2933ab9..2f24ea0d723a 100644 --- a/arch/powerpc/lib/rheap.c +++ b/arch/powerpc/lib/rheap.c | |||
@@ -437,27 +437,26 @@ unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const ch | |||
437 | struct list_head *l; | 437 | struct list_head *l; |
438 | rh_block_t *blk; | 438 | rh_block_t *blk; |
439 | rh_block_t *newblk; | 439 | rh_block_t *newblk; |
440 | unsigned long start; | 440 | unsigned long start, sp_size; |
441 | 441 | ||
442 | /* Validate size, and alignment must be power of two */ | 442 | /* Validate size, and alignment must be power of two */ |
443 | if (size <= 0 || (alignment & (alignment - 1)) != 0) | 443 | if (size <= 0 || (alignment & (alignment - 1)) != 0) |
444 | return (unsigned long) -EINVAL; | 444 | return (unsigned long) -EINVAL; |
445 | 445 | ||
446 | /* given alignment larger that default rheap alignment */ | ||
447 | if (alignment > info->alignment) | ||
448 | size += alignment - 1; | ||
449 | |||
450 | /* Align to configured alignment */ | 446 | /* Align to configured alignment */ |
451 | size = (size + (info->alignment - 1)) & ~(info->alignment - 1); | 447 | size = (size + (info->alignment - 1)) & ~(info->alignment - 1); |
452 | 448 | ||
453 | if (assure_empty(info, 1) < 0) | 449 | if (assure_empty(info, 2) < 0) |
454 | return (unsigned long) -ENOMEM; | 450 | return (unsigned long) -ENOMEM; |
455 | 451 | ||
456 | blk = NULL; | 452 | blk = NULL; |
457 | list_for_each(l, &info->free_list) { | 453 | list_for_each(l, &info->free_list) { |
458 | blk = list_entry(l, rh_block_t, list); | 454 | blk = list_entry(l, rh_block_t, list); |
459 | if (size <= blk->size) | 455 | if (size <= blk->size) { |
460 | break; | 456 | start = (blk->start + alignment - 1) & ~(alignment - 1); |
457 | if (start + size <= blk->start + blk->size) | ||
458 | break; | ||
459 | } | ||
461 | blk = NULL; | 460 | blk = NULL; |
462 | } | 461 | } |
463 | 462 | ||
@@ -470,25 +469,36 @@ unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const ch | |||
470 | list_del(&blk->list); | 469 | list_del(&blk->list); |
471 | newblk = blk; | 470 | newblk = blk; |
472 | } else { | 471 | } else { |
472 | /* Fragment caused, split if needed */ | ||
473 | /* Create block for fragment in the beginning */ | ||
474 | sp_size = start - blk->start; | ||
475 | if (sp_size) { | ||
476 | rh_block_t *spblk; | ||
477 | |||
478 | spblk = get_slot(info); | ||
479 | spblk->start = blk->start; | ||
480 | spblk->size = sp_size; | ||
481 | /* add before the blk */ | ||
482 | list_add(&spblk->list, blk->list.prev); | ||
483 | } | ||
473 | newblk = get_slot(info); | 484 | newblk = get_slot(info); |
474 | newblk->start = blk->start; | 485 | newblk->start = start; |
475 | newblk->size = size; | 486 | newblk->size = size; |
476 | 487 | ||
477 | /* blk still in free list, with updated start, size */ | 488 | /* blk still in free list, with updated start and size |
478 | blk->start += size; | 489 | * for fragment in the end */ |
479 | blk->size -= size; | 490 | blk->start = start + size; |
491 | blk->size -= sp_size + size; | ||
492 | /* No fragment in the end, remove blk */ | ||
493 | if (blk->size == 0) { | ||
494 | list_del(&blk->list); | ||
495 | release_slot(info, blk); | ||
496 | } | ||
480 | } | 497 | } |
481 | 498 | ||
482 | newblk->owner = owner; | 499 | newblk->owner = owner; |
483 | start = newblk->start; | ||
484 | attach_taken_block(info, newblk); | 500 | attach_taken_block(info, newblk); |
485 | 501 | ||
486 | /* for larger alignment return fixed up pointer */ | ||
487 | /* this is no problem with the deallocator since */ | ||
488 | /* we scan for pointers that lie in the blocks */ | ||
489 | if (alignment > info->alignment) | ||
490 | start = (start + alignment - 1) & ~(alignment - 1); | ||
491 | |||
492 | return start; | 502 | return start; |
493 | } | 503 | } |
494 | 504 | ||