aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c289
1 files changed, 70 insertions, 219 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 33d3363658df..2c3a2e0394db 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -162,23 +162,6 @@
162 */ 162 */
163static bool pfmemalloc_active __read_mostly; 163static bool pfmemalloc_active __read_mostly;
164 164
165/* Legal flag mask for kmem_cache_create(). */
166#if DEBUG
167# define CREATE_MASK (SLAB_RED_ZONE | \
168 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
169 SLAB_CACHE_DMA | \
170 SLAB_STORE_USER | \
171 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
172 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
173 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
174#else
175# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
176 SLAB_CACHE_DMA | \
177 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
178 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
179 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
180#endif
181
182/* 165/*
183 * kmem_bufctl_t: 166 * kmem_bufctl_t:
184 * 167 *
@@ -564,15 +547,11 @@ static struct cache_names __initdata cache_names[] = {
564#undef CACHE 547#undef CACHE
565}; 548};
566 549
567static struct arraycache_init initarray_cache __initdata =
568 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
569static struct arraycache_init initarray_generic = 550static struct arraycache_init initarray_generic =
570 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 551 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
571 552
572/* internal cache of cache description objs */ 553/* internal cache of cache description objs */
573static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
574static struct kmem_cache kmem_cache_boot = { 554static struct kmem_cache kmem_cache_boot = {
575 .nodelists = kmem_cache_nodelists,
576 .batchcount = 1, 555 .batchcount = 1,
577 .limit = BOOT_CPUCACHE_ENTRIES, 556 .limit = BOOT_CPUCACHE_ENTRIES,
578 .shared = 1, 557 .shared = 1,
@@ -1577,28 +1556,33 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1577} 1556}
1578 1557
1579/* 1558/*
1559 * The memory after the last cpu cache pointer is used for the
1560 * the nodelists pointer.
1561 */
1562static void setup_nodelists_pointer(struct kmem_cache *cachep)
1563{
1564 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
1565}
1566
1567/*
1580 * Initialisation. Called after the page allocator have been initialised and 1568 * Initialisation. Called after the page allocator have been initialised and
1581 * before smp_init(). 1569 * before smp_init().
1582 */ 1570 */
1583void __init kmem_cache_init(void) 1571void __init kmem_cache_init(void)
1584{ 1572{
1585 size_t left_over;
1586 struct cache_sizes *sizes; 1573 struct cache_sizes *sizes;
1587 struct cache_names *names; 1574 struct cache_names *names;
1588 int i; 1575 int i;
1589 int order;
1590 int node;
1591 1576
1592 kmem_cache = &kmem_cache_boot; 1577 kmem_cache = &kmem_cache_boot;
1578 setup_nodelists_pointer(kmem_cache);
1593 1579
1594 if (num_possible_nodes() == 1) 1580 if (num_possible_nodes() == 1)
1595 use_alien_caches = 0; 1581 use_alien_caches = 0;
1596 1582
1597 for (i = 0; i < NUM_INIT_LISTS; i++) { 1583 for (i = 0; i < NUM_INIT_LISTS; i++)
1598 kmem_list3_init(&initkmem_list3[i]); 1584 kmem_list3_init(&initkmem_list3[i]);
1599 if (i < MAX_NUMNODES) 1585
1600 kmem_cache->nodelists[i] = NULL;
1601 }
1602 set_up_list3s(kmem_cache, CACHE_CACHE); 1586 set_up_list3s(kmem_cache, CACHE_CACHE);
1603 1587
1604 /* 1588 /*
@@ -1629,37 +1613,16 @@ void __init kmem_cache_init(void)
1629 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1613 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1630 */ 1614 */
1631 1615
1632 node = numa_mem_id();
1633
1634 /* 1) create the kmem_cache */ 1616 /* 1) create the kmem_cache */
1635 INIT_LIST_HEAD(&slab_caches);
1636 list_add(&kmem_cache->list, &slab_caches);
1637 kmem_cache->colour_off = cache_line_size();
1638 kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
1639 kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1640 1617
1641 /* 1618 /*
1642 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids 1619 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1643 */ 1620 */
1644 kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + 1621 create_boot_cache(kmem_cache, "kmem_cache",
1645 nr_node_ids * sizeof(struct kmem_list3 *); 1622 offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1646 kmem_cache->object_size = kmem_cache->size; 1623 nr_node_ids * sizeof(struct kmem_list3 *),
1647 kmem_cache->size = ALIGN(kmem_cache->object_size, 1624 SLAB_HWCACHE_ALIGN);
1648 cache_line_size()); 1625 list_add(&kmem_cache->list, &slab_caches);
1649 kmem_cache->reciprocal_buffer_size =
1650 reciprocal_value(kmem_cache->size);
1651
1652 for (order = 0; order < MAX_ORDER; order++) {
1653 cache_estimate(order, kmem_cache->size,
1654 cache_line_size(), 0, &left_over, &kmem_cache->num);
1655 if (kmem_cache->num)
1656 break;
1657 }
1658 BUG_ON(!kmem_cache->num);
1659 kmem_cache->gfporder = order;
1660 kmem_cache->colour = left_over / kmem_cache->colour_off;
1661 kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
1662 sizeof(struct slab), cache_line_size());
1663 1626
1664 /* 2+3) create the kmalloc caches */ 1627 /* 2+3) create the kmalloc caches */
1665 sizes = malloc_sizes; 1628 sizes = malloc_sizes;
@@ -1671,23 +1634,13 @@ void __init kmem_cache_init(void)
1671 * bug. 1634 * bug.
1672 */ 1635 */
1673 1636
1674 sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1637 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
1675 sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name; 1638 sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
1676 sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size; 1639
1677 sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size; 1640 if (INDEX_AC != INDEX_L3)
1678 sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN; 1641 sizes[INDEX_L3].cs_cachep =
1679 __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); 1642 create_kmalloc_cache(names[INDEX_L3].name,
1680 list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); 1643 sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
1681
1682 if (INDEX_AC != INDEX_L3) {
1683 sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1684 sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
1685 sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
1686 sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
1687 sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1688 __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1689 list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
1690 }
1691 1644
1692 slab_early_init = 0; 1645 slab_early_init = 0;
1693 1646
@@ -1699,24 +1652,14 @@ void __init kmem_cache_init(void)
1699 * Note for systems short on memory removing the alignment will 1652 * Note for systems short on memory removing the alignment will
1700 * allow tighter packing of the smaller caches. 1653 * allow tighter packing of the smaller caches.
1701 */ 1654 */
1702 if (!sizes->cs_cachep) { 1655 if (!sizes->cs_cachep)
1703 sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1656 sizes->cs_cachep = create_kmalloc_cache(names->name,
1704 sizes->cs_cachep->name = names->name; 1657 sizes->cs_size, ARCH_KMALLOC_FLAGS);
1705 sizes->cs_cachep->size = sizes->cs_size; 1658
1706 sizes->cs_cachep->object_size = sizes->cs_size;
1707 sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1708 __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1709 list_add(&sizes->cs_cachep->list, &slab_caches);
1710 }
1711#ifdef CONFIG_ZONE_DMA 1659#ifdef CONFIG_ZONE_DMA
1712 sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 1660 sizes->cs_dmacachep = create_kmalloc_cache(
1713 sizes->cs_dmacachep->name = names->name_dma; 1661 names->name_dma, sizes->cs_size,
1714 sizes->cs_dmacachep->size = sizes->cs_size; 1662 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
1715 sizes->cs_dmacachep->object_size = sizes->cs_size;
1716 sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
1717 __kmem_cache_create(sizes->cs_dmacachep,
1718 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
1719 list_add(&sizes->cs_dmacachep->list, &slab_caches);
1720#endif 1663#endif
1721 sizes++; 1664 sizes++;
1722 names++; 1665 names++;
@@ -1727,7 +1670,6 @@ void __init kmem_cache_init(void)
1727 1670
1728 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); 1671 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1729 1672
1730 BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
1731 memcpy(ptr, cpu_cache_get(kmem_cache), 1673 memcpy(ptr, cpu_cache_get(kmem_cache),
1732 sizeof(struct arraycache_init)); 1674 sizeof(struct arraycache_init));
1733 /* 1675 /*
@@ -2282,7 +2224,15 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2282 2224
2283 if (slab_state == DOWN) { 2225 if (slab_state == DOWN) {
2284 /* 2226 /*
2285 * Note: the first kmem_cache_create must create the cache 2227 * Note: Creation of first cache (kmem_cache).
2228 * The setup_list3s is taken care
2229 * of by the caller of __kmem_cache_create
2230 */
2231 cachep->array[smp_processor_id()] = &initarray_generic.cache;
2232 slab_state = PARTIAL;
2233 } else if (slab_state == PARTIAL) {
2234 /*
2235 * Note: the second kmem_cache_create must create the cache
2286 * that's used by kmalloc(24), otherwise the creation of 2236 * that's used by kmalloc(24), otherwise the creation of
2287 * further caches will BUG(). 2237 * further caches will BUG().
2288 */ 2238 */
@@ -2290,7 +2240,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2290 2240
2291 /* 2241 /*
2292 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is 2242 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2293 * the first cache, then we need to set up all its list3s, 2243 * the second cache, then we need to set up all its list3s,
2294 * otherwise the creation of further caches will BUG(). 2244 * otherwise the creation of further caches will BUG().
2295 */ 2245 */
2296 set_up_list3s(cachep, SIZE_AC); 2246 set_up_list3s(cachep, SIZE_AC);
@@ -2299,6 +2249,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2299 else 2249 else
2300 slab_state = PARTIAL_ARRAYCACHE; 2250 slab_state = PARTIAL_ARRAYCACHE;
2301 } else { 2251 } else {
2252 /* Remaining boot caches */
2302 cachep->array[smp_processor_id()] = 2253 cachep->array[smp_processor_id()] =
2303 kmalloc(sizeof(struct arraycache_init), gfp); 2254 kmalloc(sizeof(struct arraycache_init), gfp);
2304 2255
@@ -2331,11 +2282,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2331 2282
2332/** 2283/**
2333 * __kmem_cache_create - Create a cache. 2284 * __kmem_cache_create - Create a cache.
2334 * @name: A string which is used in /proc/slabinfo to identify this cache. 2285 * @cachep: cache management descriptor
2335 * @size: The size of objects to be created in this cache.
2336 * @align: The required alignment for the objects.
2337 * @flags: SLAB flags 2286 * @flags: SLAB flags
2338 * @ctor: A constructor for the objects.
2339 * 2287 *
2340 * Returns a ptr to the cache on success, NULL on failure. 2288 * Returns a ptr to the cache on success, NULL on failure.
2341 * Cannot be called within a int, but can be interrupted. 2289 * Cannot be called within a int, but can be interrupted.
@@ -2378,11 +2326,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2378 if (flags & SLAB_DESTROY_BY_RCU) 2326 if (flags & SLAB_DESTROY_BY_RCU)
2379 BUG_ON(flags & SLAB_POISON); 2327 BUG_ON(flags & SLAB_POISON);
2380#endif 2328#endif
2381 /*
2382 * Always checks flags, a caller might be expecting debug support which
2383 * isn't available.
2384 */
2385 BUG_ON(flags & ~CREATE_MASK);
2386 2329
2387 /* 2330 /*
2388 * Check that size is in terms of words. This is needed to avoid 2331 * Check that size is in terms of words. This is needed to avoid
@@ -2394,22 +2337,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2394 size &= ~(BYTES_PER_WORD - 1); 2337 size &= ~(BYTES_PER_WORD - 1);
2395 } 2338 }
2396 2339
2397 /* calculate the final buffer alignment: */
2398
2399 /* 1) arch recommendation: can be overridden for debug */
2400 if (flags & SLAB_HWCACHE_ALIGN) {
2401 /*
2402 * Default alignment: as specified by the arch code. Except if
2403 * an object is really small, then squeeze multiple objects into
2404 * one cacheline.
2405 */
2406 ralign = cache_line_size();
2407 while (size <= ralign / 2)
2408 ralign /= 2;
2409 } else {
2410 ralign = BYTES_PER_WORD;
2411 }
2412
2413 /* 2340 /*
2414 * Redzoning and user store require word alignment or possibly larger. 2341 * Redzoning and user store require word alignment or possibly larger.
2415 * Note this will be overridden by architecture or caller mandated 2342 * Note this will be overridden by architecture or caller mandated
@@ -2426,10 +2353,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2426 size &= ~(REDZONE_ALIGN - 1); 2353 size &= ~(REDZONE_ALIGN - 1);
2427 } 2354 }
2428 2355
2429 /* 2) arch mandated alignment */
2430 if (ralign < ARCH_SLAB_MINALIGN) {
2431 ralign = ARCH_SLAB_MINALIGN;
2432 }
2433 /* 3) caller mandated alignment */ 2356 /* 3) caller mandated alignment */
2434 if (ralign < cachep->align) { 2357 if (ralign < cachep->align) {
2435 ralign = cachep->align; 2358 ralign = cachep->align;
@@ -2447,7 +2370,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2447 else 2370 else
2448 gfp = GFP_NOWAIT; 2371 gfp = GFP_NOWAIT;
2449 2372
2450 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; 2373 setup_nodelists_pointer(cachep);
2451#if DEBUG 2374#if DEBUG
2452 2375
2453 /* 2376 /*
@@ -3969,12 +3892,6 @@ void kfree(const void *objp)
3969} 3892}
3970EXPORT_SYMBOL(kfree); 3893EXPORT_SYMBOL(kfree);
3971 3894
3972unsigned int kmem_cache_size(struct kmem_cache *cachep)
3973{
3974 return cachep->object_size;
3975}
3976EXPORT_SYMBOL(kmem_cache_size);
3977
3978/* 3895/*
3979 * This initializes kmem_list3 or resizes various caches for all nodes. 3896 * This initializes kmem_list3 or resizes various caches for all nodes.
3980 */ 3897 */
@@ -4276,54 +4193,8 @@ out:
4276} 4193}
4277 4194
4278#ifdef CONFIG_SLABINFO 4195#ifdef CONFIG_SLABINFO
4279 4196void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4280static void print_slabinfo_header(struct seq_file *m)
4281{ 4197{
4282 /*
4283 * Output format version, so at least we can change it
4284 * without _too_ many complaints.
4285 */
4286#if STATS
4287 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
4288#else
4289 seq_puts(m, "slabinfo - version: 2.1\n");
4290#endif
4291 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
4292 "<objperslab> <pagesperslab>");
4293 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4294 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4295#if STATS
4296 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
4297 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
4298 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
4299#endif
4300 seq_putc(m, '\n');
4301}
4302
4303static void *s_start(struct seq_file *m, loff_t *pos)
4304{
4305 loff_t n = *pos;
4306
4307 mutex_lock(&slab_mutex);
4308 if (!n)
4309 print_slabinfo_header(m);
4310
4311 return seq_list_start(&slab_caches, *pos);
4312}
4313
4314static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4315{
4316 return seq_list_next(p, &slab_caches, pos);
4317}
4318
4319static void s_stop(struct seq_file *m, void *p)
4320{
4321 mutex_unlock(&slab_mutex);
4322}
4323
4324static int s_show(struct seq_file *m, void *p)
4325{
4326 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4327 struct slab *slabp; 4198 struct slab *slabp;
4328 unsigned long active_objs; 4199 unsigned long active_objs;
4329 unsigned long num_objs; 4200 unsigned long num_objs;
@@ -4378,13 +4249,20 @@ static int s_show(struct seq_file *m, void *p)
4378 if (error) 4249 if (error)
4379 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 4250 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4380 4251
4381 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 4252 sinfo->active_objs = active_objs;
4382 name, active_objs, num_objs, cachep->size, 4253 sinfo->num_objs = num_objs;
4383 cachep->num, (1 << cachep->gfporder)); 4254 sinfo->active_slabs = active_slabs;
4384 seq_printf(m, " : tunables %4u %4u %4u", 4255 sinfo->num_slabs = num_slabs;
4385 cachep->limit, cachep->batchcount, cachep->shared); 4256 sinfo->shared_avail = shared_avail;
4386 seq_printf(m, " : slabdata %6lu %6lu %6lu", 4257 sinfo->limit = cachep->limit;
4387 active_slabs, num_slabs, shared_avail); 4258 sinfo->batchcount = cachep->batchcount;
4259 sinfo->shared = cachep->shared;
4260 sinfo->objects_per_slab = cachep->num;
4261 sinfo->cache_order = cachep->gfporder;
4262}
4263
4264void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4265{
4388#if STATS 4266#if STATS
4389 { /* list3 stats */ 4267 { /* list3 stats */
4390 unsigned long high = cachep->high_mark; 4268 unsigned long high = cachep->high_mark;
@@ -4414,31 +4292,8 @@ static int s_show(struct seq_file *m, void *p)
4414 allochit, allocmiss, freehit, freemiss); 4292 allochit, allocmiss, freehit, freemiss);
4415 } 4293 }
4416#endif 4294#endif
4417 seq_putc(m, '\n');
4418 return 0;
4419} 4295}
4420 4296
4421/*
4422 * slabinfo_op - iterator that generates /proc/slabinfo
4423 *
4424 * Output layout:
4425 * cache-name
4426 * num-active-objs
4427 * total-objs
4428 * object size
4429 * num-active-slabs
4430 * total-slabs
4431 * num-pages-per-slab
4432 * + further values on SMP and with statistics enabled
4433 */
4434
4435static const struct seq_operations slabinfo_op = {
4436 .start = s_start,
4437 .next = s_next,
4438 .stop = s_stop,
4439 .show = s_show,
4440};
4441
4442#define MAX_SLABINFO_WRITE 128 4297#define MAX_SLABINFO_WRITE 128
4443/** 4298/**
4444 * slabinfo_write - Tuning for the slab allocator 4299 * slabinfo_write - Tuning for the slab allocator
@@ -4447,7 +4302,7 @@ static const struct seq_operations slabinfo_op = {
4447 * @count: data length 4302 * @count: data length
4448 * @ppos: unused 4303 * @ppos: unused
4449 */ 4304 */
4450static ssize_t slabinfo_write(struct file *file, const char __user *buffer, 4305ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4451 size_t count, loff_t *ppos) 4306 size_t count, loff_t *ppos)
4452{ 4307{
4453 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 4308 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
@@ -4490,19 +4345,6 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4490 return res; 4345 return res;
4491} 4346}
4492 4347
4493static int slabinfo_open(struct inode *inode, struct file *file)
4494{
4495 return seq_open(file, &slabinfo_op);
4496}
4497
4498static const struct file_operations proc_slabinfo_operations = {
4499 .open = slabinfo_open,
4500 .read = seq_read,
4501 .write = slabinfo_write,
4502 .llseek = seq_lseek,
4503 .release = seq_release,
4504};
4505
4506#ifdef CONFIG_DEBUG_SLAB_LEAK 4348#ifdef CONFIG_DEBUG_SLAB_LEAK
4507 4349
4508static void *leaks_start(struct seq_file *m, loff_t *pos) 4350static void *leaks_start(struct seq_file *m, loff_t *pos)
@@ -4631,6 +4473,16 @@ static int leaks_show(struct seq_file *m, void *p)
4631 return 0; 4473 return 0;
4632} 4474}
4633 4475
4476static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4477{
4478 return seq_list_next(p, &slab_caches, pos);
4479}
4480
4481static void s_stop(struct seq_file *m, void *p)
4482{
4483 mutex_unlock(&slab_mutex);
4484}
4485
4634static const struct seq_operations slabstats_op = { 4486static const struct seq_operations slabstats_op = {
4635 .start = leaks_start, 4487 .start = leaks_start,
4636 .next = s_next, 4488 .next = s_next,
@@ -4665,7 +4517,6 @@ static const struct file_operations proc_slabstats_operations = {
4665 4517
4666static int __init slab_proc_init(void) 4518static int __init slab_proc_init(void)
4667{ 4519{
4668 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
4669#ifdef CONFIG_DEBUG_SLAB_LEAK 4520#ifdef CONFIG_DEBUG_SLAB_LEAK
4670 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); 4521 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4671#endif 4522#endif