aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-04-14 12:11:40 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2008-04-27 11:28:17 -0400
commit205ab99dd103e3dd5b0964dad8a16dfe2db69b2e (patch)
tree026a601ea25681cbf34f37360880f78a305a32f5 /mm
parent834f3d119234b35a1985a2449831d99356637937 (diff)
slub: Update statistics handling for variable order slabs
Change the statistics to consider that slabs of the same slabcache can have different number of objects in them since they may be of different order. Provide a new sysfs field total_objects which shows the total objects that the allocated slabs of a slabcache could hold. Add a max field that holds the largest slab order that was ever used for a slab cache. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c150
1 files changed, 97 insertions, 53 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 0a220df5ed7c..c8514e93ffdf 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -886,7 +886,7 @@ static inline unsigned long slabs_node(struct kmem_cache *s, int node)
886 return atomic_long_read(&n->nr_slabs); 886 return atomic_long_read(&n->nr_slabs);
887} 887}
888 888
889static inline void inc_slabs_node(struct kmem_cache *s, int node) 889static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
890{ 890{
891 struct kmem_cache_node *n = get_node(s, node); 891 struct kmem_cache_node *n = get_node(s, node);
892 892
@@ -896,14 +896,17 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node)
896 * dilemma by deferring the increment of the count during 896 * dilemma by deferring the increment of the count during
897 * bootstrap (see early_kmem_cache_node_alloc). 897 * bootstrap (see early_kmem_cache_node_alloc).
898 */ 898 */
899 if (!NUMA_BUILD || n) 899 if (!NUMA_BUILD || n) {
900 atomic_long_inc(&n->nr_slabs); 900 atomic_long_inc(&n->nr_slabs);
901 atomic_long_add(objects, &n->total_objects);
902 }
901} 903}
902static inline void dec_slabs_node(struct kmem_cache *s, int node) 904static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
903{ 905{
904 struct kmem_cache_node *n = get_node(s, node); 906 struct kmem_cache_node *n = get_node(s, node);
905 907
906 atomic_long_dec(&n->nr_slabs); 908 atomic_long_dec(&n->nr_slabs);
909 atomic_long_sub(objects, &n->total_objects);
907} 910}
908 911
909/* Object debug checks for alloc/free paths */ 912/* Object debug checks for alloc/free paths */
@@ -1101,9 +1104,12 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
1101 1104
1102static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1105static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1103 { return 0; } 1106 { return 0; }
1104static inline void inc_slabs_node(struct kmem_cache *s, int node) {} 1107static inline void inc_slabs_node(struct kmem_cache *s, int node,
1105static inline void dec_slabs_node(struct kmem_cache *s, int node) {} 1108 int objects) {}
1109static inline void dec_slabs_node(struct kmem_cache *s, int node,
1110 int objects) {}
1106#endif 1111#endif
1112
1107/* 1113/*
1108 * Slab allocation and freeing 1114 * Slab allocation and freeing
1109 */ 1115 */
@@ -1155,7 +1161,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1155 if (!page) 1161 if (!page)
1156 goto out; 1162 goto out;
1157 1163
1158 inc_slabs_node(s, page_to_nid(page)); 1164 inc_slabs_node(s, page_to_nid(page), page->objects);
1159 page->slab = s; 1165 page->slab = s;
1160 page->flags |= 1 << PG_slab; 1166 page->flags |= 1 << PG_slab;
1161 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | 1167 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
@@ -1230,7 +1236,7 @@ static void free_slab(struct kmem_cache *s, struct page *page)
1230 1236
1231static void discard_slab(struct kmem_cache *s, struct page *page) 1237static void discard_slab(struct kmem_cache *s, struct page *page)
1232{ 1238{
1233 dec_slabs_node(s, page_to_nid(page)); 1239 dec_slabs_node(s, page_to_nid(page), page->objects);
1234 free_slab(s, page); 1240 free_slab(s, page);
1235} 1241}
1236 1242
@@ -2144,7 +2150,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2144 init_tracking(kmalloc_caches, n); 2150 init_tracking(kmalloc_caches, n);
2145#endif 2151#endif
2146 init_kmem_cache_node(n); 2152 init_kmem_cache_node(n);
2147 inc_slabs_node(kmalloc_caches, node); 2153 inc_slabs_node(kmalloc_caches, node, page->objects);
2148 2154
2149 /* 2155 /*
2150 * lockdep requires consistent irq usage for each lock 2156 * lockdep requires consistent irq usage for each lock
@@ -2341,6 +2347,8 @@ static int calculate_sizes(struct kmem_cache *s)
2341 * Determine the number of objects per slab 2347 * Determine the number of objects per slab
2342 */ 2348 */
2343 s->oo = oo_make(order, size); 2349 s->oo = oo_make(order, size);
2350 if (oo_objects(s->oo) > oo_objects(s->max))
2351 s->max = s->oo;
2344 2352
2345 return !!oo_objects(s->oo); 2353 return !!oo_objects(s->oo);
2346 2354
@@ -2813,7 +2821,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
2813 struct kmem_cache_node *n; 2821 struct kmem_cache_node *n;
2814 struct page *page; 2822 struct page *page;
2815 struct page *t; 2823 struct page *t;
2816 int objects = oo_objects(s->oo); 2824 int objects = oo_objects(s->max);
2817 struct list_head *slabs_by_inuse = 2825 struct list_head *slabs_by_inuse =
2818 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); 2826 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2819 unsigned long flags; 2827 unsigned long flags;
@@ -3276,7 +3284,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3276} 3284}
3277 3285
3278#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO) 3286#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
3279static unsigned long count_partial(struct kmem_cache_node *n) 3287static unsigned long count_partial(struct kmem_cache_node *n,
3288 int (*get_count)(struct page *))
3280{ 3289{
3281 unsigned long flags; 3290 unsigned long flags;
3282 unsigned long x = 0; 3291 unsigned long x = 0;
@@ -3284,10 +3293,25 @@ static unsigned long count_partial(struct kmem_cache_node *n)
3284 3293
3285 spin_lock_irqsave(&n->list_lock, flags); 3294 spin_lock_irqsave(&n->list_lock, flags);
3286 list_for_each_entry(page, &n->partial, lru) 3295 list_for_each_entry(page, &n->partial, lru)
3287 x += page->inuse; 3296 x += get_count(page);
3288 spin_unlock_irqrestore(&n->list_lock, flags); 3297 spin_unlock_irqrestore(&n->list_lock, flags);
3289 return x; 3298 return x;
3290} 3299}
3300
3301static int count_inuse(struct page *page)
3302{
3303 return page->inuse;
3304}
3305
3306static int count_total(struct page *page)
3307{
3308 return page->objects;
3309}
3310
3311static int count_free(struct page *page)
3312{
3313 return page->objects - page->inuse;
3314}
3291#endif 3315#endif
3292 3316
3293#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 3317#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
@@ -3376,7 +3400,7 @@ static long validate_slab_cache(struct kmem_cache *s)
3376{ 3400{
3377 int node; 3401 int node;
3378 unsigned long count = 0; 3402 unsigned long count = 0;
3379 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->oo)) * 3403 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3380 sizeof(unsigned long), GFP_KERNEL); 3404 sizeof(unsigned long), GFP_KERNEL);
3381 3405
3382 if (!map) 3406 if (!map)
@@ -3676,22 +3700,23 @@ static int list_locations(struct kmem_cache *s, char *buf,
3676} 3700}
3677 3701
3678enum slab_stat_type { 3702enum slab_stat_type {
3679 SL_FULL, 3703 SL_ALL, /* All slabs */
3680 SL_PARTIAL, 3704 SL_PARTIAL, /* Only partially allocated slabs */
3681 SL_CPU, 3705 SL_CPU, /* Only slabs used for cpu caches */
3682 SL_OBJECTS 3706 SL_OBJECTS, /* Determine allocated objects not slabs */
3707 SL_TOTAL /* Determine object capacity not slabs */
3683}; 3708};
3684 3709
3685#define SO_FULL (1 << SL_FULL) 3710#define SO_ALL (1 << SL_ALL)
3686#define SO_PARTIAL (1 << SL_PARTIAL) 3711#define SO_PARTIAL (1 << SL_PARTIAL)
3687#define SO_CPU (1 << SL_CPU) 3712#define SO_CPU (1 << SL_CPU)
3688#define SO_OBJECTS (1 << SL_OBJECTS) 3713#define SO_OBJECTS (1 << SL_OBJECTS)
3714#define SO_TOTAL (1 << SL_TOTAL)
3689 3715
3690static ssize_t show_slab_objects(struct kmem_cache *s, 3716static ssize_t show_slab_objects(struct kmem_cache *s,
3691 char *buf, unsigned long flags) 3717 char *buf, unsigned long flags)
3692{ 3718{
3693 unsigned long total = 0; 3719 unsigned long total = 0;
3694 int cpu;
3695 int node; 3720 int node;
3696 int x; 3721 int x;
3697 unsigned long *nodes; 3722 unsigned long *nodes;
@@ -3702,56 +3727,60 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
3702 return -ENOMEM; 3727 return -ENOMEM;
3703 per_cpu = nodes + nr_node_ids; 3728 per_cpu = nodes + nr_node_ids;
3704 3729
3705 for_each_possible_cpu(cpu) { 3730 if (flags & SO_CPU) {
3706 struct page *page; 3731 int cpu;
3707 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3708 3732
3709 if (!c) 3733 for_each_possible_cpu(cpu) {
3710 continue; 3734 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3711 3735
3712 page = c->page; 3736 if (!c || c->node < 0)
3713 node = c->node; 3737 continue;
3714 if (node < 0) 3738
3715 continue; 3739 if (c->page) {
3716 if (page) { 3740 if (flags & SO_TOTAL)
3717 if (flags & SO_CPU) { 3741 x = c->page->objects;
3718 if (flags & SO_OBJECTS) 3742 else if (flags & SO_OBJECTS)
3719 x = page->inuse; 3743 x = c->page->inuse;
3720 else 3744 else
3721 x = 1; 3745 x = 1;
3746
3722 total += x; 3747 total += x;
3723 nodes[node] += x; 3748 nodes[c->node] += x;
3724 } 3749 }
3725 per_cpu[node]++; 3750 per_cpu[c->node]++;
3726 } 3751 }
3727 } 3752 }
3728 3753
3729 for_each_node_state(node, N_NORMAL_MEMORY) { 3754 if (flags & SO_ALL) {
3730 struct kmem_cache_node *n = get_node(s, node); 3755 for_each_node_state(node, N_NORMAL_MEMORY) {
3756 struct kmem_cache_node *n = get_node(s, node);
3757
3758 if (flags & SO_TOTAL)
3759 x = atomic_long_read(&n->total_objects);
3760 else if (flags & SO_OBJECTS)
3761 x = atomic_long_read(&n->total_objects) -
3762 count_partial(n, count_free);
3731 3763
3732 if (flags & SO_PARTIAL) {
3733 if (flags & SO_OBJECTS)
3734 x = count_partial(n);
3735 else 3764 else
3736 x = n->nr_partial; 3765 x = atomic_long_read(&n->nr_slabs);
3737 total += x; 3766 total += x;
3738 nodes[node] += x; 3767 nodes[node] += x;
3739 } 3768 }
3740 3769
3741 if (flags & SO_FULL) { 3770 } else if (flags & SO_PARTIAL) {
3742 int full_slabs = atomic_long_read(&n->nr_slabs) 3771 for_each_node_state(node, N_NORMAL_MEMORY) {
3743 - per_cpu[node] 3772 struct kmem_cache_node *n = get_node(s, node);
3744 - n->nr_partial;
3745 3773
3746 if (flags & SO_OBJECTS) 3774 if (flags & SO_TOTAL)
3747 x = full_slabs * oo_objects(s->oo); 3775 x = count_partial(n, count_total);
3776 else if (flags & SO_OBJECTS)
3777 x = count_partial(n, count_inuse);
3748 else 3778 else
3749 x = full_slabs; 3779 x = n->nr_partial;
3750 total += x; 3780 total += x;
3751 nodes[node] += x; 3781 nodes[node] += x;
3752 } 3782 }
3753 } 3783 }
3754
3755 x = sprintf(buf, "%lu", total); 3784 x = sprintf(buf, "%lu", total);
3756#ifdef CONFIG_NUMA 3785#ifdef CONFIG_NUMA
3757 for_each_node_state(node, N_NORMAL_MEMORY) 3786 for_each_node_state(node, N_NORMAL_MEMORY)
@@ -3852,7 +3881,7 @@ SLAB_ATTR_RO(aliases);
3852 3881
3853static ssize_t slabs_show(struct kmem_cache *s, char *buf) 3882static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3854{ 3883{
3855 return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU); 3884 return show_slab_objects(s, buf, SO_ALL);
3856} 3885}
3857SLAB_ATTR_RO(slabs); 3886SLAB_ATTR_RO(slabs);
3858 3887
@@ -3870,10 +3899,22 @@ SLAB_ATTR_RO(cpu_slabs);
3870 3899
3871static ssize_t objects_show(struct kmem_cache *s, char *buf) 3900static ssize_t objects_show(struct kmem_cache *s, char *buf)
3872{ 3901{
3873 return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS); 3902 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
3874} 3903}
3875SLAB_ATTR_RO(objects); 3904SLAB_ATTR_RO(objects);
3876 3905
3906static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
3907{
3908 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
3909}
3910SLAB_ATTR_RO(objects_partial);
3911
3912static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
3913{
3914 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
3915}
3916SLAB_ATTR_RO(total_objects);
3917
3877static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 3918static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3878{ 3919{
3879 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); 3920 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
@@ -4131,6 +4172,8 @@ static struct attribute *slab_attrs[] = {
4131 &objs_per_slab_attr.attr, 4172 &objs_per_slab_attr.attr,
4132 &order_attr.attr, 4173 &order_attr.attr,
4133 &objects_attr.attr, 4174 &objects_attr.attr,
4175 &objects_partial_attr.attr,
4176 &total_objects_attr.attr,
4134 &slabs_attr.attr, 4177 &slabs_attr.attr,
4135 &partial_attr.attr, 4178 &partial_attr.attr,
4136 &cpu_slabs_attr.attr, 4179 &cpu_slabs_attr.attr,
@@ -4459,7 +4502,8 @@ static int s_show(struct seq_file *m, void *p)
4459 unsigned long nr_partials = 0; 4502 unsigned long nr_partials = 0;
4460 unsigned long nr_slabs = 0; 4503 unsigned long nr_slabs = 0;
4461 unsigned long nr_inuse = 0; 4504 unsigned long nr_inuse = 0;
4462 unsigned long nr_objs; 4505 unsigned long nr_objs = 0;
4506 unsigned long nr_free = 0;
4463 struct kmem_cache *s; 4507 struct kmem_cache *s;
4464 int node; 4508 int node;
4465 4509
@@ -4473,11 +4517,11 @@ static int s_show(struct seq_file *m, void *p)
4473 4517
4474 nr_partials += n->nr_partial; 4518 nr_partials += n->nr_partial;
4475 nr_slabs += atomic_long_read(&n->nr_slabs); 4519 nr_slabs += atomic_long_read(&n->nr_slabs);
4476 nr_inuse += count_partial(n); 4520 nr_objs += atomic_long_read(&n->total_objects);
4521 nr_free += count_partial(n, count_free);
4477 } 4522 }
4478 4523
4479 nr_objs = nr_slabs * oo_objects(s->oo); 4524 nr_inuse = nr_objs - nr_free;
4480 nr_inuse += (nr_slabs - nr_partials) * oo_objects(s->oo);
4481 4525
4482 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, 4526 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
4483 nr_objs, s->size, oo_objects(s->oo), 4527 nr_objs, s->size, oo_objects(s->oo),