aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-01-23 18:07:21 -0500
committerPaul Mackerras <paulus@samba.org>2008-01-23 18:07:21 -0500
commit9156ad48338e0306e508ead5c0d9986050744475 (patch)
tree37f3a90e38190052ecf3cdf9171dfdddd37b56fd /mm/slub.c
parentfa28237cfcc5827553044cbd6ee52e33692b0faa (diff)
parent8f7b3d156d348b6766833cd4e272d0d19b501e64 (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c116
1 files changed, 101 insertions, 15 deletions
diff --git a/mm/slub.c b/mm/slub.c
index b9f37cb0f2e..474945ecd89 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -172,7 +172,7 @@ static inline void ClearSlabDebug(struct page *page)
172 * Mininum number of partial slabs. These will be left on the partial 172 * Mininum number of partial slabs. These will be left on the partial
173 * lists even if they are empty. kmem_cache_shrink may reclaim them. 173 * lists even if they are empty. kmem_cache_shrink may reclaim them.
174 */ 174 */
175#define MIN_PARTIAL 2 175#define MIN_PARTIAL 5
176 176
177/* 177/*
178 * Maximum number of desirable partial slabs. 178 * Maximum number of desirable partial slabs.
@@ -1613,7 +1613,7 @@ checks_ok:
1613 * then add it. 1613 * then add it.
1614 */ 1614 */
1615 if (unlikely(!prior)) 1615 if (unlikely(!prior))
1616 add_partial(get_node(s, page_to_nid(page)), page); 1616 add_partial_tail(get_node(s, page_to_nid(page)), page);
1617 1617
1618out_unlock: 1618out_unlock:
1619 slab_unlock(page); 1619 slab_unlock(page);
@@ -3076,6 +3076,19 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3076 return slab_alloc(s, gfpflags, node, caller); 3076 return slab_alloc(s, gfpflags, node, caller);
3077} 3077}
3078 3078
3079static unsigned long count_partial(struct kmem_cache_node *n)
3080{
3081 unsigned long flags;
3082 unsigned long x = 0;
3083 struct page *page;
3084
3085 spin_lock_irqsave(&n->list_lock, flags);
3086 list_for_each_entry(page, &n->partial, lru)
3087 x += page->inuse;
3088 spin_unlock_irqrestore(&n->list_lock, flags);
3089 return x;
3090}
3091
3079#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 3092#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
3080static int validate_slab(struct kmem_cache *s, struct page *page, 3093static int validate_slab(struct kmem_cache *s, struct page *page,
3081 unsigned long *map) 3094 unsigned long *map)
@@ -3458,19 +3471,6 @@ static int list_locations(struct kmem_cache *s, char *buf,
3458 return n; 3471 return n;
3459} 3472}
3460 3473
3461static unsigned long count_partial(struct kmem_cache_node *n)
3462{
3463 unsigned long flags;
3464 unsigned long x = 0;
3465 struct page *page;
3466
3467 spin_lock_irqsave(&n->list_lock, flags);
3468 list_for_each_entry(page, &n->partial, lru)
3469 x += page->inuse;
3470 spin_unlock_irqrestore(&n->list_lock, flags);
3471 return x;
3472}
3473
3474enum slab_stat_type { 3474enum slab_stat_type {
3475 SL_FULL, 3475 SL_FULL,
3476 SL_PARTIAL, 3476 SL_PARTIAL,
@@ -4123,3 +4123,89 @@ static int __init slab_sysfs_init(void)
4123 4123
4124__initcall(slab_sysfs_init); 4124__initcall(slab_sysfs_init);
4125#endif 4125#endif
4126
4127/*
4128 * The /proc/slabinfo ABI
4129 */
4130#ifdef CONFIG_SLABINFO
4131
4132ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4133 size_t count, loff_t *ppos)
4134{
4135 return -EINVAL;
4136}
4137
4138
4139static void print_slabinfo_header(struct seq_file *m)
4140{
4141 seq_puts(m, "slabinfo - version: 2.1\n");
4142 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
4143 "<objperslab> <pagesperslab>");
4144 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4145 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4146 seq_putc(m, '\n');
4147}
4148
4149static void *s_start(struct seq_file *m, loff_t *pos)
4150{
4151 loff_t n = *pos;
4152
4153 down_read(&slub_lock);
4154 if (!n)
4155 print_slabinfo_header(m);
4156
4157 return seq_list_start(&slab_caches, *pos);
4158}
4159
4160static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4161{
4162 return seq_list_next(p, &slab_caches, pos);
4163}
4164
4165static void s_stop(struct seq_file *m, void *p)
4166{
4167 up_read(&slub_lock);
4168}
4169
4170static int s_show(struct seq_file *m, void *p)
4171{
4172 unsigned long nr_partials = 0;
4173 unsigned long nr_slabs = 0;
4174 unsigned long nr_inuse = 0;
4175 unsigned long nr_objs;
4176 struct kmem_cache *s;
4177 int node;
4178
4179 s = list_entry(p, struct kmem_cache, list);
4180
4181 for_each_online_node(node) {
4182 struct kmem_cache_node *n = get_node(s, node);
4183
4184 if (!n)
4185 continue;
4186
4187 nr_partials += n->nr_partial;
4188 nr_slabs += atomic_long_read(&n->nr_slabs);
4189 nr_inuse += count_partial(n);
4190 }
4191
4192 nr_objs = nr_slabs * s->objects;
4193 nr_inuse += (nr_slabs - nr_partials) * s->objects;
4194
4195 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
4196 nr_objs, s->size, s->objects, (1 << s->order));
4197 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
4198 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
4199 0UL);
4200 seq_putc(m, '\n');
4201 return 0;
4202}
4203
4204const struct seq_operations slabinfo_op = {
4205 .start = s_start,
4206 .next = s_next,
4207 .stop = s_stop,
4208 .show = s_show,
4209};
4210
4211#endif /* CONFIG_SLABINFO */