diff options
author | Christoph Lameter <clameter@sgi.com> | 2008-04-14 12:11:40 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2008-04-27 11:28:17 -0400 |
commit | 205ab99dd103e3dd5b0964dad8a16dfe2db69b2e (patch) | |
tree | 026a601ea25681cbf34f37360880f78a305a32f5 | |
parent | 834f3d119234b35a1985a2449831d99356637937 (diff) |
slub: Update statistics handling for variable order slabs
Change the statistics to consider that slabs of the same slabcache
can have different number of objects in them since they may be of
different order.
Provide a new sysfs field
total_objects
which shows the total objects that the allocated slabs of a slabcache
could hold.
Add a max field that holds the largest slab order that was ever used
for a slab cache.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r-- | Documentation/vm/slabinfo.c | 27 | ||||
-rw-r--r-- | include/linux/slub_def.h | 2 | ||||
-rw-r--r-- | mm/slub.c | 150 |
3 files changed, 110 insertions, 69 deletions
diff --git a/Documentation/vm/slabinfo.c b/Documentation/vm/slabinfo.c index 22d7e3e4d60c..d3ce295bffac 100644 --- a/Documentation/vm/slabinfo.c +++ b/Documentation/vm/slabinfo.c | |||
@@ -31,7 +31,7 @@ struct slabinfo { | |||
31 | int hwcache_align, object_size, objs_per_slab; | 31 | int hwcache_align, object_size, objs_per_slab; |
32 | int sanity_checks, slab_size, store_user, trace; | 32 | int sanity_checks, slab_size, store_user, trace; |
33 | int order, poison, reclaim_account, red_zone; | 33 | int order, poison, reclaim_account, red_zone; |
34 | unsigned long partial, objects, slabs; | 34 | unsigned long partial, objects, slabs, objects_partial, objects_total; |
35 | unsigned long alloc_fastpath, alloc_slowpath; | 35 | unsigned long alloc_fastpath, alloc_slowpath; |
36 | unsigned long free_fastpath, free_slowpath; | 36 | unsigned long free_fastpath, free_slowpath; |
37 | unsigned long free_frozen, free_add_partial, free_remove_partial; | 37 | unsigned long free_frozen, free_add_partial, free_remove_partial; |
@@ -540,7 +540,8 @@ void slabcache(struct slabinfo *s) | |||
540 | return; | 540 | return; |
541 | 541 | ||
542 | store_size(size_str, slab_size(s)); | 542 | store_size(size_str, slab_size(s)); |
543 | snprintf(dist_str, 40, "%lu/%lu/%d", s->slabs, s->partial, s->cpu_slabs); | 543 | snprintf(dist_str, 40, "%lu/%lu/%d", s->slabs - s->cpu_slabs, |
544 | s->partial, s->cpu_slabs); | ||
544 | 545 | ||
545 | if (!line++) | 546 | if (!line++) |
546 | first_line(); | 547 | first_line(); |
@@ -776,7 +777,6 @@ void totals(void) | |||
776 | unsigned long used; | 777 | unsigned long used; |
777 | unsigned long long wasted; | 778 | unsigned long long wasted; |
778 | unsigned long long objwaste; | 779 | unsigned long long objwaste; |
779 | long long objects_in_partial_slabs; | ||
780 | unsigned long percentage_partial_slabs; | 780 | unsigned long percentage_partial_slabs; |
781 | unsigned long percentage_partial_objs; | 781 | unsigned long percentage_partial_objs; |
782 | 782 | ||
@@ -790,18 +790,11 @@ void totals(void) | |||
790 | wasted = size - used; | 790 | wasted = size - used; |
791 | objwaste = s->slab_size - s->object_size; | 791 | objwaste = s->slab_size - s->object_size; |
792 | 792 | ||
793 | objects_in_partial_slabs = s->objects - | ||
794 | (s->slabs - s->partial - s ->cpu_slabs) * | ||
795 | s->objs_per_slab; | ||
796 | |||
797 | if (objects_in_partial_slabs < 0) | ||
798 | objects_in_partial_slabs = 0; | ||
799 | |||
800 | percentage_partial_slabs = s->partial * 100 / s->slabs; | 793 | percentage_partial_slabs = s->partial * 100 / s->slabs; |
801 | if (percentage_partial_slabs > 100) | 794 | if (percentage_partial_slabs > 100) |
802 | percentage_partial_slabs = 100; | 795 | percentage_partial_slabs = 100; |
803 | 796 | ||
804 | percentage_partial_objs = objects_in_partial_slabs * 100 | 797 | percentage_partial_objs = s->objects_partial * 100 |
805 | / s->objects; | 798 | / s->objects; |
806 | 799 | ||
807 | if (percentage_partial_objs > 100) | 800 | if (percentage_partial_objs > 100) |
@@ -823,8 +816,8 @@ void totals(void) | |||
823 | min_objects = s->objects; | 816 | min_objects = s->objects; |
824 | if (used < min_used) | 817 | if (used < min_used) |
825 | min_used = used; | 818 | min_used = used; |
826 | if (objects_in_partial_slabs < min_partobj) | 819 | if (s->objects_partial < min_partobj) |
827 | min_partobj = objects_in_partial_slabs; | 820 | min_partobj = s->objects_partial; |
828 | if (percentage_partial_slabs < min_ppart) | 821 | if (percentage_partial_slabs < min_ppart) |
829 | min_ppart = percentage_partial_slabs; | 822 | min_ppart = percentage_partial_slabs; |
830 | if (percentage_partial_objs < min_ppartobj) | 823 | if (percentage_partial_objs < min_ppartobj) |
@@ -848,8 +841,8 @@ void totals(void) | |||
848 | max_objects = s->objects; | 841 | max_objects = s->objects; |
849 | if (used > max_used) | 842 | if (used > max_used) |
850 | max_used = used; | 843 | max_used = used; |
851 | if (objects_in_partial_slabs > max_partobj) | 844 | if (s->objects_partial > max_partobj) |
852 | max_partobj = objects_in_partial_slabs; | 845 | max_partobj = s->objects_partial; |
853 | if (percentage_partial_slabs > max_ppart) | 846 | if (percentage_partial_slabs > max_ppart) |
854 | max_ppart = percentage_partial_slabs; | 847 | max_ppart = percentage_partial_slabs; |
855 | if (percentage_partial_objs > max_ppartobj) | 848 | if (percentage_partial_objs > max_ppartobj) |
@@ -864,7 +857,7 @@ void totals(void) | |||
864 | 857 | ||
865 | total_objects += s->objects; | 858 | total_objects += s->objects; |
866 | total_used += used; | 859 | total_used += used; |
867 | total_partobj += objects_in_partial_slabs; | 860 | total_partobj += s->objects_partial; |
868 | total_ppart += percentage_partial_slabs; | 861 | total_ppart += percentage_partial_slabs; |
869 | total_ppartobj += percentage_partial_objs; | 862 | total_ppartobj += percentage_partial_objs; |
870 | 863 | ||
@@ -1160,6 +1153,8 @@ void read_slab_dir(void) | |||
1160 | slab->hwcache_align = get_obj("hwcache_align"); | 1153 | slab->hwcache_align = get_obj("hwcache_align"); |
1161 | slab->object_size = get_obj("object_size"); | 1154 | slab->object_size = get_obj("object_size"); |
1162 | slab->objects = get_obj("objects"); | 1155 | slab->objects = get_obj("objects"); |
1156 | slab->objects_partial = get_obj("objects_partial"); | ||
1157 | slab->objects_total = get_obj("objects_total"); | ||
1163 | slab->objs_per_slab = get_obj("objs_per_slab"); | 1158 | slab->objs_per_slab = get_obj("objs_per_slab"); |
1164 | slab->order = get_obj("order"); | 1159 | slab->order = get_obj("order"); |
1165 | slab->partial = get_obj("partial"); | 1160 | slab->partial = get_obj("partial"); |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 4131e5fbd18b..4236b5dee812 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -48,6 +48,7 @@ struct kmem_cache_node { | |||
48 | struct list_head partial; | 48 | struct list_head partial; |
49 | #ifdef CONFIG_SLUB_DEBUG | 49 | #ifdef CONFIG_SLUB_DEBUG |
50 | atomic_long_t nr_slabs; | 50 | atomic_long_t nr_slabs; |
51 | atomic_long_t total_objects; | ||
51 | struct list_head full; | 52 | struct list_head full; |
52 | #endif | 53 | #endif |
53 | }; | 54 | }; |
@@ -79,6 +80,7 @@ struct kmem_cache { | |||
79 | struct kmem_cache_node local_node; | 80 | struct kmem_cache_node local_node; |
80 | 81 | ||
81 | /* Allocation and freeing of slabs */ | 82 | /* Allocation and freeing of slabs */ |
83 | struct kmem_cache_order_objects max; | ||
82 | gfp_t allocflags; /* gfp flags to use on each alloc */ | 84 | gfp_t allocflags; /* gfp flags to use on each alloc */ |
83 | int refcount; /* Refcount for slab cache destroy */ | 85 | int refcount; /* Refcount for slab cache destroy */ |
84 | void (*ctor)(struct kmem_cache *, void *); | 86 | void (*ctor)(struct kmem_cache *, void *); |
@@ -886,7 +886,7 @@ static inline unsigned long slabs_node(struct kmem_cache *s, int node) | |||
886 | return atomic_long_read(&n->nr_slabs); | 886 | return atomic_long_read(&n->nr_slabs); |
887 | } | 887 | } |
888 | 888 | ||
889 | static inline void inc_slabs_node(struct kmem_cache *s, int node) | 889 | static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) |
890 | { | 890 | { |
891 | struct kmem_cache_node *n = get_node(s, node); | 891 | struct kmem_cache_node *n = get_node(s, node); |
892 | 892 | ||
@@ -896,14 +896,17 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node) | |||
896 | * dilemma by deferring the increment of the count during | 896 | * dilemma by deferring the increment of the count during |
897 | * bootstrap (see early_kmem_cache_node_alloc). | 897 | * bootstrap (see early_kmem_cache_node_alloc). |
898 | */ | 898 | */ |
899 | if (!NUMA_BUILD || n) | 899 | if (!NUMA_BUILD || n) { |
900 | atomic_long_inc(&n->nr_slabs); | 900 | atomic_long_inc(&n->nr_slabs); |
901 | atomic_long_add(objects, &n->total_objects); | ||
902 | } | ||
901 | } | 903 | } |
902 | static inline void dec_slabs_node(struct kmem_cache *s, int node) | 904 | static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) |
903 | { | 905 | { |
904 | struct kmem_cache_node *n = get_node(s, node); | 906 | struct kmem_cache_node *n = get_node(s, node); |
905 | 907 | ||
906 | atomic_long_dec(&n->nr_slabs); | 908 | atomic_long_dec(&n->nr_slabs); |
909 | atomic_long_sub(objects, &n->total_objects); | ||
907 | } | 910 | } |
908 | 911 | ||
909 | /* Object debug checks for alloc/free paths */ | 912 | /* Object debug checks for alloc/free paths */ |
@@ -1101,9 +1104,12 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize, | |||
1101 | 1104 | ||
1102 | static inline unsigned long slabs_node(struct kmem_cache *s, int node) | 1105 | static inline unsigned long slabs_node(struct kmem_cache *s, int node) |
1103 | { return 0; } | 1106 | { return 0; } |
1104 | static inline void inc_slabs_node(struct kmem_cache *s, int node) {} | 1107 | static inline void inc_slabs_node(struct kmem_cache *s, int node, |
1105 | static inline void dec_slabs_node(struct kmem_cache *s, int node) {} | 1108 | int objects) {} |
1109 | static inline void dec_slabs_node(struct kmem_cache *s, int node, | ||
1110 | int objects) {} | ||
1106 | #endif | 1111 | #endif |
1112 | |||
1107 | /* | 1113 | /* |
1108 | * Slab allocation and freeing | 1114 | * Slab allocation and freeing |
1109 | */ | 1115 | */ |
@@ -1155,7 +1161,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1155 | if (!page) | 1161 | if (!page) |
1156 | goto out; | 1162 | goto out; |
1157 | 1163 | ||
1158 | inc_slabs_node(s, page_to_nid(page)); | 1164 | inc_slabs_node(s, page_to_nid(page), page->objects); |
1159 | page->slab = s; | 1165 | page->slab = s; |
1160 | page->flags |= 1 << PG_slab; | 1166 | page->flags |= 1 << PG_slab; |
1161 | if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | | 1167 | if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | |
@@ -1230,7 +1236,7 @@ static void free_slab(struct kmem_cache *s, struct page *page) | |||
1230 | 1236 | ||
1231 | static void discard_slab(struct kmem_cache *s, struct page *page) | 1237 | static void discard_slab(struct kmem_cache *s, struct page *page) |
1232 | { | 1238 | { |
1233 | dec_slabs_node(s, page_to_nid(page)); | 1239 | dec_slabs_node(s, page_to_nid(page), page->objects); |
1234 | free_slab(s, page); | 1240 | free_slab(s, page); |
1235 | } | 1241 | } |
1236 | 1242 | ||
@@ -2144,7 +2150,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, | |||
2144 | init_tracking(kmalloc_caches, n); | 2150 | init_tracking(kmalloc_caches, n); |
2145 | #endif | 2151 | #endif |
2146 | init_kmem_cache_node(n); | 2152 | init_kmem_cache_node(n); |
2147 | inc_slabs_node(kmalloc_caches, node); | 2153 | inc_slabs_node(kmalloc_caches, node, page->objects); |
2148 | 2154 | ||
2149 | /* | 2155 | /* |
2150 | * lockdep requires consistent irq usage for each lock | 2156 | * lockdep requires consistent irq usage for each lock |
@@ -2341,6 +2347,8 @@ static int calculate_sizes(struct kmem_cache *s) | |||
2341 | * Determine the number of objects per slab | 2347 | * Determine the number of objects per slab |
2342 | */ | 2348 | */ |
2343 | s->oo = oo_make(order, size); | 2349 | s->oo = oo_make(order, size); |
2350 | if (oo_objects(s->oo) > oo_objects(s->max)) | ||
2351 | s->max = s->oo; | ||
2344 | 2352 | ||
2345 | return !!oo_objects(s->oo); | 2353 | return !!oo_objects(s->oo); |
2346 | 2354 | ||
@@ -2813,7 +2821,7 @@ int kmem_cache_shrink(struct kmem_cache *s) | |||
2813 | struct kmem_cache_node *n; | 2821 | struct kmem_cache_node *n; |
2814 | struct page *page; | 2822 | struct page *page; |
2815 | struct page *t; | 2823 | struct page *t; |
2816 | int objects = oo_objects(s->oo); | 2824 | int objects = oo_objects(s->max); |
2817 | struct list_head *slabs_by_inuse = | 2825 | struct list_head *slabs_by_inuse = |
2818 | kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); | 2826 | kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); |
2819 | unsigned long flags; | 2827 | unsigned long flags; |
@@ -3276,7 +3284,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3276 | } | 3284 | } |
3277 | 3285 | ||
3278 | #if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO) | 3286 | #if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO) |
3279 | static unsigned long count_partial(struct kmem_cache_node *n) | 3287 | static unsigned long count_partial(struct kmem_cache_node *n, |
3288 | int (*get_count)(struct page *)) | ||
3280 | { | 3289 | { |
3281 | unsigned long flags; | 3290 | unsigned long flags; |
3282 | unsigned long x = 0; | 3291 | unsigned long x = 0; |
@@ -3284,10 +3293,25 @@ static unsigned long count_partial(struct kmem_cache_node *n) | |||
3284 | 3293 | ||
3285 | spin_lock_irqsave(&n->list_lock, flags); | 3294 | spin_lock_irqsave(&n->list_lock, flags); |
3286 | list_for_each_entry(page, &n->partial, lru) | 3295 | list_for_each_entry(page, &n->partial, lru) |
3287 | x += page->inuse; | 3296 | x += get_count(page); |
3288 | spin_unlock_irqrestore(&n->list_lock, flags); | 3297 | spin_unlock_irqrestore(&n->list_lock, flags); |
3289 | return x; | 3298 | return x; |
3290 | } | 3299 | } |
3300 | |||
3301 | static int count_inuse(struct page *page) | ||
3302 | { | ||
3303 | return page->inuse; | ||
3304 | } | ||
3305 | |||
3306 | static int count_total(struct page *page) | ||
3307 | { | ||
3308 | return page->objects; | ||
3309 | } | ||
3310 | |||
3311 | static int count_free(struct page *page) | ||
3312 | { | ||
3313 | return page->objects - page->inuse; | ||
3314 | } | ||
3291 | #endif | 3315 | #endif |
3292 | 3316 | ||
3293 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) | 3317 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) |
@@ -3376,7 +3400,7 @@ static long validate_slab_cache(struct kmem_cache *s) | |||
3376 | { | 3400 | { |
3377 | int node; | 3401 | int node; |
3378 | unsigned long count = 0; | 3402 | unsigned long count = 0; |
3379 | unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->oo)) * | 3403 | unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * |
3380 | sizeof(unsigned long), GFP_KERNEL); | 3404 | sizeof(unsigned long), GFP_KERNEL); |
3381 | 3405 | ||
3382 | if (!map) | 3406 | if (!map) |
@@ -3676,22 +3700,23 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
3676 | } | 3700 | } |
3677 | 3701 | ||
3678 | enum slab_stat_type { | 3702 | enum slab_stat_type { |
3679 | SL_FULL, | 3703 | SL_ALL, /* All slabs */ |
3680 | SL_PARTIAL, | 3704 | SL_PARTIAL, /* Only partially allocated slabs */ |
3681 | SL_CPU, | 3705 | SL_CPU, /* Only slabs used for cpu caches */ |
3682 | SL_OBJECTS | 3706 | SL_OBJECTS, /* Determine allocated objects not slabs */ |
3707 | SL_TOTAL /* Determine object capacity not slabs */ | ||
3683 | }; | 3708 | }; |
3684 | 3709 | ||
3685 | #define SO_FULL (1 << SL_FULL) | 3710 | #define SO_ALL (1 << SL_ALL) |
3686 | #define SO_PARTIAL (1 << SL_PARTIAL) | 3711 | #define SO_PARTIAL (1 << SL_PARTIAL) |
3687 | #define SO_CPU (1 << SL_CPU) | 3712 | #define SO_CPU (1 << SL_CPU) |
3688 | #define SO_OBJECTS (1 << SL_OBJECTS) | 3713 | #define SO_OBJECTS (1 << SL_OBJECTS) |
3714 | #define SO_TOTAL (1 << SL_TOTAL) | ||
3689 | 3715 | ||
3690 | static ssize_t show_slab_objects(struct kmem_cache *s, | 3716 | static ssize_t show_slab_objects(struct kmem_cache *s, |
3691 | char *buf, unsigned long flags) | 3717 | char *buf, unsigned long flags) |
3692 | { | 3718 | { |
3693 | unsigned long total = 0; | 3719 | unsigned long total = 0; |
3694 | int cpu; | ||
3695 | int node; | 3720 | int node; |
3696 | int x; | 3721 | int x; |
3697 | unsigned long *nodes; | 3722 | unsigned long *nodes; |
@@ -3702,56 +3727,60 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
3702 | return -ENOMEM; | 3727 | return -ENOMEM; |
3703 | per_cpu = nodes + nr_node_ids; | 3728 | per_cpu = nodes + nr_node_ids; |
3704 | 3729 | ||
3705 | for_each_possible_cpu(cpu) { | 3730 | if (flags & SO_CPU) { |
3706 | struct page *page; | 3731 | int cpu; |
3707 | struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); | ||
3708 | 3732 | ||
3709 | if (!c) | 3733 | for_each_possible_cpu(cpu) { |
3710 | continue; | 3734 | struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); |
3711 | 3735 | ||
3712 | page = c->page; | 3736 | if (!c || c->node < 0) |
3713 | node = c->node; | 3737 | continue; |
3714 | if (node < 0) | 3738 | |
3715 | continue; | 3739 | if (c->page) { |
3716 | if (page) { | 3740 | if (flags & SO_TOTAL) |
3717 | if (flags & SO_CPU) { | 3741 | x = c->page->objects; |
3718 | if (flags & SO_OBJECTS) | 3742 | else if (flags & SO_OBJECTS) |
3719 | x = page->inuse; | 3743 | x = c->page->inuse; |
3720 | else | 3744 | else |
3721 | x = 1; | 3745 | x = 1; |
3746 | |||
3722 | total += x; | 3747 | total += x; |
3723 | nodes[node] += x; | 3748 | nodes[c->node] += x; |
3724 | } | 3749 | } |
3725 | per_cpu[node]++; | 3750 | per_cpu[c->node]++; |
3726 | } | 3751 | } |
3727 | } | 3752 | } |
3728 | 3753 | ||
3729 | for_each_node_state(node, N_NORMAL_MEMORY) { | 3754 | if (flags & SO_ALL) { |
3730 | struct kmem_cache_node *n = get_node(s, node); | 3755 | for_each_node_state(node, N_NORMAL_MEMORY) { |
3756 | struct kmem_cache_node *n = get_node(s, node); | ||
3757 | |||
3758 | if (flags & SO_TOTAL) | ||
3759 | x = atomic_long_read(&n->total_objects); | ||
3760 | else if (flags & SO_OBJECTS) | ||
3761 | x = atomic_long_read(&n->total_objects) - | ||
3762 | count_partial(n, count_free); | ||
3731 | 3763 | ||
3732 | if (flags & SO_PARTIAL) { | ||
3733 | if (flags & SO_OBJECTS) | ||
3734 | x = count_partial(n); | ||
3735 | else | 3764 | else |
3736 | x = n->nr_partial; | 3765 | x = atomic_long_read(&n->nr_slabs); |
3737 | total += x; | 3766 | total += x; |
3738 | nodes[node] += x; | 3767 | nodes[node] += x; |
3739 | } | 3768 | } |
3740 | 3769 | ||
3741 | if (flags & SO_FULL) { | 3770 | } else if (flags & SO_PARTIAL) { |
3742 | int full_slabs = atomic_long_read(&n->nr_slabs) | 3771 | for_each_node_state(node, N_NORMAL_MEMORY) { |
3743 | - per_cpu[node] | 3772 | struct kmem_cache_node *n = get_node(s, node); |
3744 | - n->nr_partial; | ||
3745 | 3773 | ||
3746 | if (flags & SO_OBJECTS) | 3774 | if (flags & SO_TOTAL) |
3747 | x = full_slabs * oo_objects(s->oo); | 3775 | x = count_partial(n, count_total); |
3776 | else if (flags & SO_OBJECTS) | ||
3777 | x = count_partial(n, count_inuse); | ||
3748 | else | 3778 | else |
3749 | x = full_slabs; | 3779 | x = n->nr_partial; |
3750 | total += x; | 3780 | total += x; |
3751 | nodes[node] += x; | 3781 | nodes[node] += x; |
3752 | } | 3782 | } |
3753 | } | 3783 | } |
3754 | |||
3755 | x = sprintf(buf, "%lu", total); | 3784 | x = sprintf(buf, "%lu", total); |
3756 | #ifdef CONFIG_NUMA | 3785 | #ifdef CONFIG_NUMA |
3757 | for_each_node_state(node, N_NORMAL_MEMORY) | 3786 | for_each_node_state(node, N_NORMAL_MEMORY) |
@@ -3852,7 +3881,7 @@ SLAB_ATTR_RO(aliases); | |||
3852 | 3881 | ||
3853 | static ssize_t slabs_show(struct kmem_cache *s, char *buf) | 3882 | static ssize_t slabs_show(struct kmem_cache *s, char *buf) |
3854 | { | 3883 | { |
3855 | return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU); | 3884 | return show_slab_objects(s, buf, SO_ALL); |
3856 | } | 3885 | } |
3857 | SLAB_ATTR_RO(slabs); | 3886 | SLAB_ATTR_RO(slabs); |
3858 | 3887 | ||
@@ -3870,10 +3899,22 @@ SLAB_ATTR_RO(cpu_slabs); | |||
3870 | 3899 | ||
3871 | static ssize_t objects_show(struct kmem_cache *s, char *buf) | 3900 | static ssize_t objects_show(struct kmem_cache *s, char *buf) |
3872 | { | 3901 | { |
3873 | return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS); | 3902 | return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); |
3874 | } | 3903 | } |
3875 | SLAB_ATTR_RO(objects); | 3904 | SLAB_ATTR_RO(objects); |
3876 | 3905 | ||
3906 | static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) | ||
3907 | { | ||
3908 | return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); | ||
3909 | } | ||
3910 | SLAB_ATTR_RO(objects_partial); | ||
3911 | |||
3912 | static ssize_t total_objects_show(struct kmem_cache *s, char *buf) | ||
3913 | { | ||
3914 | return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); | ||
3915 | } | ||
3916 | SLAB_ATTR_RO(total_objects); | ||
3917 | |||
3877 | static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) | 3918 | static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) |
3878 | { | 3919 | { |
3879 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); | 3920 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); |
@@ -4131,6 +4172,8 @@ static struct attribute *slab_attrs[] = { | |||
4131 | &objs_per_slab_attr.attr, | 4172 | &objs_per_slab_attr.attr, |
4132 | &order_attr.attr, | 4173 | &order_attr.attr, |
4133 | &objects_attr.attr, | 4174 | &objects_attr.attr, |
4175 | &objects_partial_attr.attr, | ||
4176 | &total_objects_attr.attr, | ||
4134 | &slabs_attr.attr, | 4177 | &slabs_attr.attr, |
4135 | &partial_attr.attr, | 4178 | &partial_attr.attr, |
4136 | &cpu_slabs_attr.attr, | 4179 | &cpu_slabs_attr.attr, |
@@ -4459,7 +4502,8 @@ static int s_show(struct seq_file *m, void *p) | |||
4459 | unsigned long nr_partials = 0; | 4502 | unsigned long nr_partials = 0; |
4460 | unsigned long nr_slabs = 0; | 4503 | unsigned long nr_slabs = 0; |
4461 | unsigned long nr_inuse = 0; | 4504 | unsigned long nr_inuse = 0; |
4462 | unsigned long nr_objs; | 4505 | unsigned long nr_objs = 0; |
4506 | unsigned long nr_free = 0; | ||
4463 | struct kmem_cache *s; | 4507 | struct kmem_cache *s; |
4464 | int node; | 4508 | int node; |
4465 | 4509 | ||
@@ -4473,11 +4517,11 @@ static int s_show(struct seq_file *m, void *p) | |||
4473 | 4517 | ||
4474 | nr_partials += n->nr_partial; | 4518 | nr_partials += n->nr_partial; |
4475 | nr_slabs += atomic_long_read(&n->nr_slabs); | 4519 | nr_slabs += atomic_long_read(&n->nr_slabs); |
4476 | nr_inuse += count_partial(n); | 4520 | nr_objs += atomic_long_read(&n->total_objects); |
4521 | nr_free += count_partial(n, count_free); | ||
4477 | } | 4522 | } |
4478 | 4523 | ||
4479 | nr_objs = nr_slabs * oo_objects(s->oo); | 4524 | nr_inuse = nr_objs - nr_free; |
4480 | nr_inuse += (nr_slabs - nr_partials) * oo_objects(s->oo); | ||
4481 | 4525 | ||
4482 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, | 4526 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, |
4483 | nr_objs, s->size, oo_objects(s->oo), | 4527 | nr_objs, s->size, oo_objects(s->oo), |