diff options
-rw-r--r-- | include/linux/slub_def.h | 2 | ||||
-rw-r--r-- | mm/slub.c | 105 |
2 files changed, 94 insertions, 13 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 40801e754afb..b7d9408a00ff 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -200,4 +200,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
200 | } | 200 | } |
201 | #endif | 201 | #endif |
202 | 202 | ||
203 | extern const struct seq_operations slabinfo_op; | ||
204 | |||
203 | #endif /* _LINUX_SLUB_DEF_H */ | 205 | #endif /* _LINUX_SLUB_DEF_H */ |
@@ -3076,6 +3076,19 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3076 | return slab_alloc(s, gfpflags, node, caller); | 3076 | return slab_alloc(s, gfpflags, node, caller); |
3077 | } | 3077 | } |
3078 | 3078 | ||
3079 | static unsigned long count_partial(struct kmem_cache_node *n) | ||
3080 | { | ||
3081 | unsigned long flags; | ||
3082 | unsigned long x = 0; | ||
3083 | struct page *page; | ||
3084 | |||
3085 | spin_lock_irqsave(&n->list_lock, flags); | ||
3086 | list_for_each_entry(page, &n->partial, lru) | ||
3087 | x += page->inuse; | ||
3088 | spin_unlock_irqrestore(&n->list_lock, flags); | ||
3089 | return x; | ||
3090 | } | ||
3091 | |||
3079 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) | 3092 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) |
3080 | static int validate_slab(struct kmem_cache *s, struct page *page, | 3093 | static int validate_slab(struct kmem_cache *s, struct page *page, |
3081 | unsigned long *map) | 3094 | unsigned long *map) |
@@ -3458,19 +3471,6 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
3458 | return n; | 3471 | return n; |
3459 | } | 3472 | } |
3460 | 3473 | ||
3461 | static unsigned long count_partial(struct kmem_cache_node *n) | ||
3462 | { | ||
3463 | unsigned long flags; | ||
3464 | unsigned long x = 0; | ||
3465 | struct page *page; | ||
3466 | |||
3467 | spin_lock_irqsave(&n->list_lock, flags); | ||
3468 | list_for_each_entry(page, &n->partial, lru) | ||
3469 | x += page->inuse; | ||
3470 | spin_unlock_irqrestore(&n->list_lock, flags); | ||
3471 | return x; | ||
3472 | } | ||
3473 | |||
3474 | enum slab_stat_type { | 3474 | enum slab_stat_type { |
3475 | SL_FULL, | 3475 | SL_FULL, |
3476 | SL_PARTIAL, | 3476 | SL_PARTIAL, |
@@ -4123,3 +4123,82 @@ static int __init slab_sysfs_init(void) | |||
4123 | 4123 | ||
4124 | __initcall(slab_sysfs_init); | 4124 | __initcall(slab_sysfs_init); |
4125 | #endif | 4125 | #endif |
4126 | |||
4127 | /* | ||
4128 | * The /proc/slabinfo ABI | ||
4129 | */ | ||
4130 | #ifdef CONFIG_PROC_FS | ||
4131 | |||
4132 | static void print_slabinfo_header(struct seq_file *m) | ||
4133 | { | ||
4134 | seq_puts(m, "slabinfo - version: 2.1\n"); | ||
4135 | seq_puts(m, "# name <active_objs> <num_objs> <objsize> " | ||
4136 | "<objperslab> <pagesperslab>"); | ||
4137 | seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); | ||
4138 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); | ||
4139 | seq_putc(m, '\n'); | ||
4140 | } | ||
4141 | |||
4142 | static void *s_start(struct seq_file *m, loff_t *pos) | ||
4143 | { | ||
4144 | loff_t n = *pos; | ||
4145 | |||
4146 | down_read(&slub_lock); | ||
4147 | if (!n) | ||
4148 | print_slabinfo_header(m); | ||
4149 | |||
4150 | return seq_list_start(&slab_caches, *pos); | ||
4151 | } | ||
4152 | |||
4153 | static void *s_next(struct seq_file *m, void *p, loff_t *pos) | ||
4154 | { | ||
4155 | return seq_list_next(p, &slab_caches, pos); | ||
4156 | } | ||
4157 | |||
4158 | static void s_stop(struct seq_file *m, void *p) | ||
4159 | { | ||
4160 | up_read(&slub_lock); | ||
4161 | } | ||
4162 | |||
4163 | static int s_show(struct seq_file *m, void *p) | ||
4164 | { | ||
4165 | unsigned long nr_partials = 0; | ||
4166 | unsigned long nr_slabs = 0; | ||
4167 | unsigned long nr_inuse = 0; | ||
4168 | unsigned long nr_objs; | ||
4169 | struct kmem_cache *s; | ||
4170 | int node; | ||
4171 | |||
4172 | s = list_entry(p, struct kmem_cache, list); | ||
4173 | |||
4174 | for_each_online_node(node) { | ||
4175 | struct kmem_cache_node *n = get_node(s, node); | ||
4176 | |||
4177 | if (!n) | ||
4178 | continue; | ||
4179 | |||
4180 | nr_partials += n->nr_partial; | ||
4181 | nr_slabs += atomic_long_read(&n->nr_slabs); | ||
4182 | nr_inuse += count_partial(n); | ||
4183 | } | ||
4184 | |||
4185 | nr_objs = nr_slabs * s->objects; | ||
4186 | nr_inuse += (nr_slabs - nr_partials) * s->objects; | ||
4187 | |||
4188 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, | ||
4189 | nr_objs, s->size, s->objects, (1 << s->order)); | ||
4190 | seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); | ||
4191 | seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, | ||
4192 | 0UL); | ||
4193 | seq_putc(m, '\n'); | ||
4194 | return 0; | ||
4195 | } | ||
4196 | |||
4197 | const struct seq_operations slabinfo_op = { | ||
4198 | .start = s_start, | ||
4199 | .next = s_next, | ||
4200 | .stop = s_stop, | ||
4201 | .show = s_show, | ||
4202 | }; | ||
4203 | |||
4204 | #endif /* CONFIG_PROC_FS */ | ||