aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 15:44:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 15:44:30 -0400
commitf99b7880cb9863e11441bd8b2f31d4f556ef1a44 (patch)
tree6f3dc6e33e847b431dd899bd968d799f0d4a8fff /mm/slab.c
parent02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff)
parent7ea466f2256b02a7047dfd47d76a2f6c1e427e3e (diff)
Merge branch 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: slab: fix DEBUG_SLAB warning slab: shrink sizeof(struct kmem_cache) slab: fix DEBUG_SLAB build SLUB: Fix missing <linux/stacktrace.h> include slub: reduce overhead of slub_debug slub: Add method to verify memory is not freed slub: Enable backtrace for create/delete points slab allocators: Provide generic description of alignment defines slab, slub, slob: Unify alignment definition slob/lockdep: Fix gfp flags passed to lockdep
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/mm/slab.c b/mm/slab.c
index d96e223de77..1e523ed47c6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -574,7 +574,9 @@ static struct arraycache_init initarray_generic =
574 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 574 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
575 575
576/* internal cache of cache description objs */ 576/* internal cache of cache description objs */
577static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES];
577static struct kmem_cache cache_cache = { 578static struct kmem_cache cache_cache = {
579 .nodelists = cache_cache_nodelists,
578 .batchcount = 1, 580 .batchcount = 1,
579 .limit = BOOT_CPUCACHE_ENTRIES, 581 .limit = BOOT_CPUCACHE_ENTRIES,
580 .shared = 1, 582 .shared = 1,
@@ -1492,11 +1494,10 @@ void __init kmem_cache_init(void)
1492 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; 1494 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1493 1495
1494 /* 1496 /*
1495 * struct kmem_cache size depends on nr_node_ids, which 1497 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1496 * can be less than MAX_NUMNODES.
1497 */ 1498 */
1498 cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) + 1499 cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1499 nr_node_ids * sizeof(struct kmem_list3 *); 1500 nr_node_ids * sizeof(struct kmem_list3 *);
1500#if DEBUG 1501#if DEBUG
1501 cache_cache.obj_size = cache_cache.buffer_size; 1502 cache_cache.obj_size = cache_cache.buffer_size;
1502#endif 1503#endif
@@ -2308,6 +2309,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2308 if (!cachep) 2309 if (!cachep)
2309 goto oops; 2310 goto oops;
2310 2311
2312 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
2311#if DEBUG 2313#if DEBUG
2312 cachep->obj_size = size; 2314 cachep->obj_size = size;
2313 2315
@@ -3153,12 +3155,11 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3153 objp += obj_offset(cachep); 3155 objp += obj_offset(cachep);
3154 if (cachep->ctor && cachep->flags & SLAB_POISON) 3156 if (cachep->ctor && cachep->flags & SLAB_POISON)
3155 cachep->ctor(objp); 3157 cachep->ctor(objp);
3156#if ARCH_SLAB_MINALIGN 3158 if (ARCH_SLAB_MINALIGN &&
3157 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3159 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3158 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3160 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3159 objp, ARCH_SLAB_MINALIGN); 3161 objp, (int)ARCH_SLAB_MINALIGN);
3160 } 3162 }
3161#endif
3162 return objp; 3163 return objp;
3163} 3164}
3164#else 3165#else