diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 81 |
1 files changed, 59 insertions, 22 deletions
@@ -702,32 +702,69 @@ kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) | |||
702 | } | 702 | } |
703 | EXPORT_SYMBOL(kmem_find_general_cachep); | 703 | EXPORT_SYMBOL(kmem_find_general_cachep); |
704 | 704 | ||
705 | /* Cal the num objs, wastage, and bytes left over for a given slab size. */ | 705 | static size_t slab_mgmt_size(size_t nr_objs, size_t align) |
706 | static void cache_estimate(unsigned long gfporder, size_t size, size_t align, | ||
707 | int flags, size_t *left_over, unsigned int *num) | ||
708 | { | 706 | { |
709 | int i; | 707 | return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); |
710 | size_t wastage = PAGE_SIZE << gfporder; | 708 | } |
711 | size_t extra = 0; | ||
712 | size_t base = 0; | ||
713 | 709 | ||
714 | if (!(flags & CFLGS_OFF_SLAB)) { | 710 | /* Calculate the number of objects and left-over bytes for a given |
715 | base = sizeof(struct slab); | 711 | buffer size. */ |
716 | extra = sizeof(kmem_bufctl_t); | 712 | static void cache_estimate(unsigned long gfporder, size_t buffer_size, |
717 | } | 713 | size_t align, int flags, size_t *left_over, |
718 | i = 0; | 714 | unsigned int *num) |
719 | while (i * size + ALIGN(base + i * extra, align) <= wastage) | 715 | { |
720 | i++; | 716 | int nr_objs; |
721 | if (i > 0) | 717 | size_t mgmt_size; |
722 | i--; | 718 | size_t slab_size = PAGE_SIZE << gfporder; |
723 | 719 | ||
724 | if (i > SLAB_LIMIT) | 720 | /* |
725 | i = SLAB_LIMIT; | 721 | * The slab management structure can be either off the slab or |
722 | * on it. For the latter case, the memory allocated for a | ||
723 | * slab is used for: | ||
724 | * | ||
725 | * - The struct slab | ||
726 | * - One kmem_bufctl_t for each object | ||
727 | * - Padding to respect alignment of @align | ||
728 | * - @buffer_size bytes for each object | ||
729 | * | ||
730 | * If the slab management structure is off the slab, then the | ||
731 | * alignment will already be calculated into the size. Because | ||
732 | * the slabs are all pages aligned, the objects will be at the | ||
733 | * correct alignment when allocated. | ||
734 | */ | ||
735 | if (flags & CFLGS_OFF_SLAB) { | ||
736 | mgmt_size = 0; | ||
737 | nr_objs = slab_size / buffer_size; | ||
726 | 738 | ||
727 | *num = i; | 739 | if (nr_objs > SLAB_LIMIT) |
728 | wastage -= i * size; | 740 | nr_objs = SLAB_LIMIT; |
729 | wastage -= ALIGN(base + i * extra, align); | 741 | } else { |
730 | *left_over = wastage; | 742 | /* |
743 | * Ignore padding for the initial guess. The padding | ||
744 | * is at most @align-1 bytes, and @buffer_size is at | ||
745 | * least @align. In the worst case, this result will | ||
746 | * be one greater than the number of objects that fit | ||
747 | * into the memory allocation when taking the padding | ||
748 | * into account. | ||
749 | */ | ||
750 | nr_objs = (slab_size - sizeof(struct slab)) / | ||
751 | (buffer_size + sizeof(kmem_bufctl_t)); | ||
752 | |||
753 | /* | ||
754 | * This calculated number will be either the right | ||
755 | * amount, or one greater than what we want. | ||
756 | */ | ||
757 | if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size | ||
758 | > slab_size) | ||
759 | nr_objs--; | ||
760 | |||
761 | if (nr_objs > SLAB_LIMIT) | ||
762 | nr_objs = SLAB_LIMIT; | ||
763 | |||
764 | mgmt_size = slab_mgmt_size(nr_objs, align); | ||
765 | } | ||
766 | *num = nr_objs; | ||
767 | *left_over = slab_size - nr_objs*buffer_size - mgmt_size; | ||
731 | } | 768 | } |
732 | 769 | ||
733 | #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) | 770 | #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) |