diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2006-02-01 06:05:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-02-01 11:53:17 -0500 |
commit | fbaccacff1f17c65ae0972085368a7ec75be6062 (patch) | |
tree | a559ee27ac259f7a48c036ec8eba94a04748c25e /mm/slab.c | |
parent | 5ec8a847bb8ae2ba6395cfb7cb4bfdc78ada82ed (diff) |
[PATCH] slab: cache_estimate cleanup
Clean up cache_estimate() in mm/slab.c and improves the algorithm from O(n) to
O(1). We first calculate the maximum number of objects a slab can hold after
struct slab and kmem_bufctl_t for each object has been given enough space.
After that, to respect alignment rules, we decrease the number of objects if
necessary. As required padding is at most align-1 and memory of obj_size is
at least align, it is always enough to decrease number of objects by one.
The optimization was originally made by Balbir Singh with more improvements
from Steven Rostedt. Manfred Spraul provider further modifications: no loop
at all for the off-slab case and added comments to explain the background.
Acked-by: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 81 |
1 files changed, 59 insertions, 22 deletions
@@ -702,32 +702,69 @@ kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) | |||
702 | } | 702 | } |
703 | EXPORT_SYMBOL(kmem_find_general_cachep); | 703 | EXPORT_SYMBOL(kmem_find_general_cachep); |
704 | 704 | ||
705 | /* Cal the num objs, wastage, and bytes left over for a given slab size. */ | 705 | static size_t slab_mgmt_size(size_t nr_objs, size_t align) |
706 | static void cache_estimate(unsigned long gfporder, size_t size, size_t align, | ||
707 | int flags, size_t *left_over, unsigned int *num) | ||
708 | { | 706 | { |
709 | int i; | 707 | return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); |
710 | size_t wastage = PAGE_SIZE << gfporder; | 708 | } |
711 | size_t extra = 0; | ||
712 | size_t base = 0; | ||
713 | 709 | ||
714 | if (!(flags & CFLGS_OFF_SLAB)) { | 710 | /* Calculate the number of objects and left-over bytes for a given |
715 | base = sizeof(struct slab); | 711 | buffer size. */ |
716 | extra = sizeof(kmem_bufctl_t); | 712 | static void cache_estimate(unsigned long gfporder, size_t buffer_size, |
717 | } | 713 | size_t align, int flags, size_t *left_over, |
718 | i = 0; | 714 | unsigned int *num) |
719 | while (i * size + ALIGN(base + i * extra, align) <= wastage) | 715 | { |
720 | i++; | 716 | int nr_objs; |
721 | if (i > 0) | 717 | size_t mgmt_size; |
722 | i--; | 718 | size_t slab_size = PAGE_SIZE << gfporder; |
723 | 719 | ||
724 | if (i > SLAB_LIMIT) | 720 | /* |
725 | i = SLAB_LIMIT; | 721 | * The slab management structure can be either off the slab or |
722 | * on it. For the latter case, the memory allocated for a | ||
723 | * slab is used for: | ||
724 | * | ||
725 | * - The struct slab | ||
726 | * - One kmem_bufctl_t for each object | ||
727 | * - Padding to respect alignment of @align | ||
728 | * - @buffer_size bytes for each object | ||
729 | * | ||
730 | * If the slab management structure is off the slab, then the | ||
731 | * alignment will already be calculated into the size. Because | ||
732 | * the slabs are all pages aligned, the objects will be at the | ||
733 | * correct alignment when allocated. | ||
734 | */ | ||
735 | if (flags & CFLGS_OFF_SLAB) { | ||
736 | mgmt_size = 0; | ||
737 | nr_objs = slab_size / buffer_size; | ||
726 | 738 | ||
727 | *num = i; | 739 | if (nr_objs > SLAB_LIMIT) |
728 | wastage -= i * size; | 740 | nr_objs = SLAB_LIMIT; |
729 | wastage -= ALIGN(base + i * extra, align); | 741 | } else { |
730 | *left_over = wastage; | 742 | /* |
743 | * Ignore padding for the initial guess. The padding | ||
744 | * is at most @align-1 bytes, and @buffer_size is at | ||
745 | * least @align. In the worst case, this result will | ||
746 | * be one greater than the number of objects that fit | ||
747 | * into the memory allocation when taking the padding | ||
748 | * into account. | ||
749 | */ | ||
750 | nr_objs = (slab_size - sizeof(struct slab)) / | ||
751 | (buffer_size + sizeof(kmem_bufctl_t)); | ||
752 | |||
753 | /* | ||
754 | * This calculated number will be either the right | ||
755 | * amount, or one greater than what we want. | ||
756 | */ | ||
757 | if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size | ||
758 | > slab_size) | ||
759 | nr_objs--; | ||
760 | |||
761 | if (nr_objs > SLAB_LIMIT) | ||
762 | nr_objs = SLAB_LIMIT; | ||
763 | |||
764 | mgmt_size = slab_mgmt_size(nr_objs, align); | ||
765 | } | ||
766 | *num = nr_objs; | ||
767 | *left_over = slab_size - nr_objs*buffer_size - mgmt_size; | ||
731 | } | 768 | } |
732 | 769 | ||
733 | #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) | 770 | #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) |