From 50953fe9e00ebbeffa032a565ab2f08312d51a87 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:50:16 -0700 Subject: slab allocators: Remove SLAB_DEBUG_INITIAL flag I have never seen a use of SLAB_DEBUG_INITIAL. It is only supported by SLAB. I think its purpose was to have a callback after an object has been freed to verify that the state is the constructor state again? The callback is performed before each freeing of an object. I would think that it is much easier to check the object state manually before the free. That also places the check near the code object manipulation of the object. Also the SLAB_DEBUG_INITIAL callback is only performed if the kernel was compiled with SLAB debugging on. If there would be code in a constructor handling SLAB_DEBUG_INITIAL then it would have to be conditional on SLAB_DEBUG otherwise it would just be dead code. But there is no such code in the kernel. I think SLUB_DEBUG_INITIAL is too problematic to make real use of, difficult to understand and there are easier ways to accomplish the same effect (i.e. add debug code before kfree). There is a related flag SLAB_CTOR_VERIFY that is frequently checked to be clear in fs inode caches. Remove the pointless checks (they would even be pointless without removeal of SLAB_DEBUG_INITIAL) from the fs constructors. This is the last slab flag that SLUB did not support. Remove the check for unimplemented flags from SLUB. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 3 +-- mm/shmem.c | 3 +-- mm/slab.c | 20 ++------------------ mm/slub.c | 10 ---------- 4 files changed, 4 insertions(+), 32 deletions(-) (limited to 'mm') diff --git a/mm/rmap.c b/mm/rmap.c index 59da5b734c80..75a32be64a21 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -162,8 +162,7 @@ void anon_vma_unlink(struct vm_area_struct *vma) static void anon_vma_ctor(void *data, struct kmem_cache *cachep, unsigned long flags) { - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { struct anon_vma *anon_vma = data; spin_lock_init(&anon_vma->lock); diff --git a/mm/shmem.c b/mm/shmem.c index b2a35ebf071a..f01e8deed645 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2358,8 +2358,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, { struct shmem_inode_info *p = (struct shmem_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CTOR_CONSTRUCTOR) { inode_init_once(&p->vfs_inode); #ifdef CONFIG_TMPFS_POSIX_ACL p->i_acl = NULL; diff --git a/mm/slab.c b/mm/slab.c index 2a3cbd6e675d..a877d6f3d687 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -116,8 +116,7 @@ #include /* - * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL, - * SLAB_RED_ZONE & SLAB_POISON. + * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. * 0 for faster, smaller code (especially in the critical paths). * * STATS - 1 to collect stats for /proc/slabinfo. @@ -172,7 +171,7 @@ /* Legal flag mask for kmem_cache_create(). */ #if DEBUG -# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ +# define CREATE_MASK (SLAB_RED_ZONE | \ SLAB_POISON | SLAB_HWCACHE_ALIGN | \ SLAB_CACHE_DMA | \ SLAB_STORE_USER | \ @@ -2184,12 +2183,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, #if DEBUG WARN_ON(strchr(name, ' ')); /* It confuses parsers */ - if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { - /* No constructor, but inital state check requested */ - printk(KERN_ERR "%s: No con, but init state check " - "requested - %s\n", __FUNCTION__, name); - flags &= ~SLAB_DEBUG_INITIAL; - } #if FORCED_DEBUG /* * Enable redzoning and last user accounting, except for caches with @@ -2895,15 +2888,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, BUG_ON(objnr >= cachep->num); BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); - if (cachep->flags & SLAB_DEBUG_INITIAL) { - /* - * Need to call the slab's constructor so the caller can - * perform a verify of its state (debugging). Called without - * the cache-lock held. - */ - cachep->ctor(objp + obj_offset(cachep), - cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY); - } if (cachep->flags & SLAB_POISON && cachep->dtor) { /* we want to cache poison the object, * call the destruction callback diff --git a/mm/slub.c b/mm/slub.c index 79940e98e5e6..bd86182e595e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -97,9 +97,6 @@ * * - Support PAGE_ALLOC_DEBUG. Should be easy to do. * - * - SLAB_DEBUG_INITIAL is not supported but I have never seen a use of - * it. - * * - Variable sizing of the per node arrays */ @@ -125,11 +122,6 @@ #endif -/* - * Flags from the regular SLAB that SLUB does not support: - */ -#define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL) - /* * Mininum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. @@ -1748,8 +1740,6 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, s->flags = flags; s->align = align; - BUG_ON(flags & SLUB_UNIMPLEMENTED); - /* * The page->offset field is only 16 bit wide. This is an offset * in units of words from the beginning of an object. If the slab -- cgit v1.2.2