diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-17 01:10:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-17 08:23:04 -0400 |
commit | a35afb830f8d71ec211531aeb9a621b09a2efb39 (patch) | |
tree | 198280081e1f8b2f6c450742a5075cc7904a3d58 /mm/slab.c | |
parent | 5577bd8a85c8b7643a241789b14fafa9c8a6c7db (diff) |
Remove SLAB_CTOR_CONSTRUCTOR
SLAB_CTOR_CONSTRUCTOR is always specified. No point in checking it.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Steven French <sfrench@us.ibm.com>
Cc: Michael Halcrow <mhalcrow@us.ibm.com>
Cc: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Cc: Miklos Szeredi <miklos@szeredi.hu>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Dave Kleikamp <shaggy@austin.ibm.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Cc: "J. Bruce Fields" <bfields@fieldses.org>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Mark Fasheh <mark.fasheh@oracle.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jan Kara <jack@ucw.cz>
Cc: David Chinner <dgc@sgi.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 12 |
1 files changed, 5 insertions, 7 deletions
@@ -2610,7 +2610,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) | |||
2610 | } | 2610 | } |
2611 | 2611 | ||
2612 | static void cache_init_objs(struct kmem_cache *cachep, | 2612 | static void cache_init_objs(struct kmem_cache *cachep, |
2613 | struct slab *slabp, unsigned long ctor_flags) | 2613 | struct slab *slabp) |
2614 | { | 2614 | { |
2615 | int i; | 2615 | int i; |
2616 | 2616 | ||
@@ -2634,7 +2634,7 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2634 | */ | 2634 | */ |
2635 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) | 2635 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) |
2636 | cachep->ctor(objp + obj_offset(cachep), cachep, | 2636 | cachep->ctor(objp + obj_offset(cachep), cachep, |
2637 | ctor_flags); | 2637 | 0); |
2638 | 2638 | ||
2639 | if (cachep->flags & SLAB_RED_ZONE) { | 2639 | if (cachep->flags & SLAB_RED_ZONE) { |
2640 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) | 2640 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) |
@@ -2650,7 +2650,7 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2650 | cachep->buffer_size / PAGE_SIZE, 0); | 2650 | cachep->buffer_size / PAGE_SIZE, 0); |
2651 | #else | 2651 | #else |
2652 | if (cachep->ctor) | 2652 | if (cachep->ctor) |
2653 | cachep->ctor(objp, cachep, ctor_flags); | 2653 | cachep->ctor(objp, cachep, 0); |
2654 | #endif | 2654 | #endif |
2655 | slab_bufctl(slabp)[i] = i + 1; | 2655 | slab_bufctl(slabp)[i] = i + 1; |
2656 | } | 2656 | } |
@@ -2739,7 +2739,6 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2739 | struct slab *slabp; | 2739 | struct slab *slabp; |
2740 | size_t offset; | 2740 | size_t offset; |
2741 | gfp_t local_flags; | 2741 | gfp_t local_flags; |
2742 | unsigned long ctor_flags; | ||
2743 | struct kmem_list3 *l3; | 2742 | struct kmem_list3 *l3; |
2744 | 2743 | ||
2745 | /* | 2744 | /* |
@@ -2748,7 +2747,6 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2748 | */ | 2747 | */ |
2749 | BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); | 2748 | BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); |
2750 | 2749 | ||
2751 | ctor_flags = SLAB_CTOR_CONSTRUCTOR; | ||
2752 | local_flags = (flags & GFP_LEVEL_MASK); | 2750 | local_flags = (flags & GFP_LEVEL_MASK); |
2753 | /* Take the l3 list lock to change the colour_next on this node */ | 2751 | /* Take the l3 list lock to change the colour_next on this node */ |
2754 | check_irq_off(); | 2752 | check_irq_off(); |
@@ -2793,7 +2791,7 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2793 | slabp->nodeid = nodeid; | 2791 | slabp->nodeid = nodeid; |
2794 | slab_map_pages(cachep, slabp, objp); | 2792 | slab_map_pages(cachep, slabp, objp); |
2795 | 2793 | ||
2796 | cache_init_objs(cachep, slabp, ctor_flags); | 2794 | cache_init_objs(cachep, slabp); |
2797 | 2795 | ||
2798 | if (local_flags & __GFP_WAIT) | 2796 | if (local_flags & __GFP_WAIT) |
2799 | local_irq_disable(); | 2797 | local_irq_disable(); |
@@ -3077,7 +3075,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3077 | #endif | 3075 | #endif |
3078 | objp += obj_offset(cachep); | 3076 | objp += obj_offset(cachep); |
3079 | if (cachep->ctor && cachep->flags & SLAB_POISON) | 3077 | if (cachep->ctor && cachep->flags & SLAB_POISON) |
3080 | cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); | 3078 | cachep->ctor(objp, cachep, 0); |
3081 | #if ARCH_SLAB_MINALIGN | 3079 | #if ARCH_SLAB_MINALIGN |
3082 | if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { | 3080 | if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { |
3083 | printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", | 3081 | printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", |