diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-17 01:10:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-17 08:23:04 -0400 |
commit | a35afb830f8d71ec211531aeb9a621b09a2efb39 (patch) | |
tree | 198280081e1f8b2f6c450742a5075cc7904a3d58 /mm | |
parent | 5577bd8a85c8b7643a241789b14fafa9c8a6c7db (diff) |
Remove SLAB_CTOR_CONSTRUCTOR
SLAB_CTOR_CONSTRUCTOR is always specified. No point in checking it.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Steven French <sfrench@us.ibm.com>
Cc: Michael Halcrow <mhalcrow@us.ibm.com>
Cc: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Cc: Miklos Szeredi <miklos@szeredi.hu>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Dave Kleikamp <shaggy@austin.ibm.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Cc: "J. Bruce Fields" <bfields@fieldses.org>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Mark Fasheh <mark.fasheh@oracle.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jan Kara <jack@ucw.cz>
Cc: David Chinner <dgc@sgi.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/rmap.c | 8 | ||||
-rw-r--r-- | mm/shmem.c | 8 | ||||
-rw-r--r-- | mm/slab.c | 12 | ||||
-rw-r--r-- | mm/slob.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 2 |
5 files changed, 13 insertions, 19 deletions
@@ -162,12 +162,10 @@ void anon_vma_unlink(struct vm_area_struct *vma) | |||
162 | static void anon_vma_ctor(void *data, struct kmem_cache *cachep, | 162 | static void anon_vma_ctor(void *data, struct kmem_cache *cachep, |
163 | unsigned long flags) | 163 | unsigned long flags) |
164 | { | 164 | { |
165 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 165 | struct anon_vma *anon_vma = data; |
166 | struct anon_vma *anon_vma = data; | ||
167 | 166 | ||
168 | spin_lock_init(&anon_vma->lock); | 167 | spin_lock_init(&anon_vma->lock); |
169 | INIT_LIST_HEAD(&anon_vma->head); | 168 | INIT_LIST_HEAD(&anon_vma->head); |
170 | } | ||
171 | } | 169 | } |
172 | 170 | ||
173 | void __init anon_vma_init(void) | 171 | void __init anon_vma_init(void) |
diff --git a/mm/shmem.c b/mm/shmem.c index f01e8deed645..e537317bec4d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -2358,13 +2358,11 @@ static void init_once(void *foo, struct kmem_cache *cachep, | |||
2358 | { | 2358 | { |
2359 | struct shmem_inode_info *p = (struct shmem_inode_info *) foo; | 2359 | struct shmem_inode_info *p = (struct shmem_inode_info *) foo; |
2360 | 2360 | ||
2361 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 2361 | inode_init_once(&p->vfs_inode); |
2362 | inode_init_once(&p->vfs_inode); | ||
2363 | #ifdef CONFIG_TMPFS_POSIX_ACL | 2362 | #ifdef CONFIG_TMPFS_POSIX_ACL |
2364 | p->i_acl = NULL; | 2363 | p->i_acl = NULL; |
2365 | p->i_default_acl = NULL; | 2364 | p->i_default_acl = NULL; |
2366 | #endif | 2365 | #endif |
2367 | } | ||
2368 | } | 2366 | } |
2369 | 2367 | ||
2370 | static int init_inodecache(void) | 2368 | static int init_inodecache(void) |
@@ -2610,7 +2610,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) | |||
2610 | } | 2610 | } |
2611 | 2611 | ||
2612 | static void cache_init_objs(struct kmem_cache *cachep, | 2612 | static void cache_init_objs(struct kmem_cache *cachep, |
2613 | struct slab *slabp, unsigned long ctor_flags) | 2613 | struct slab *slabp) |
2614 | { | 2614 | { |
2615 | int i; | 2615 | int i; |
2616 | 2616 | ||
@@ -2634,7 +2634,7 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2634 | */ | 2634 | */ |
2635 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) | 2635 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) |
2636 | cachep->ctor(objp + obj_offset(cachep), cachep, | 2636 | cachep->ctor(objp + obj_offset(cachep), cachep, |
2637 | ctor_flags); | 2637 | 0); |
2638 | 2638 | ||
2639 | if (cachep->flags & SLAB_RED_ZONE) { | 2639 | if (cachep->flags & SLAB_RED_ZONE) { |
2640 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) | 2640 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) |
@@ -2650,7 +2650,7 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2650 | cachep->buffer_size / PAGE_SIZE, 0); | 2650 | cachep->buffer_size / PAGE_SIZE, 0); |
2651 | #else | 2651 | #else |
2652 | if (cachep->ctor) | 2652 | if (cachep->ctor) |
2653 | cachep->ctor(objp, cachep, ctor_flags); | 2653 | cachep->ctor(objp, cachep, 0); |
2654 | #endif | 2654 | #endif |
2655 | slab_bufctl(slabp)[i] = i + 1; | 2655 | slab_bufctl(slabp)[i] = i + 1; |
2656 | } | 2656 | } |
@@ -2739,7 +2739,6 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2739 | struct slab *slabp; | 2739 | struct slab *slabp; |
2740 | size_t offset; | 2740 | size_t offset; |
2741 | gfp_t local_flags; | 2741 | gfp_t local_flags; |
2742 | unsigned long ctor_flags; | ||
2743 | struct kmem_list3 *l3; | 2742 | struct kmem_list3 *l3; |
2744 | 2743 | ||
2745 | /* | 2744 | /* |
@@ -2748,7 +2747,6 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2748 | */ | 2747 | */ |
2749 | BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); | 2748 | BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); |
2750 | 2749 | ||
2751 | ctor_flags = SLAB_CTOR_CONSTRUCTOR; | ||
2752 | local_flags = (flags & GFP_LEVEL_MASK); | 2750 | local_flags = (flags & GFP_LEVEL_MASK); |
2753 | /* Take the l3 list lock to change the colour_next on this node */ | 2751 | /* Take the l3 list lock to change the colour_next on this node */ |
2754 | check_irq_off(); | 2752 | check_irq_off(); |
@@ -2793,7 +2791,7 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2793 | slabp->nodeid = nodeid; | 2791 | slabp->nodeid = nodeid; |
2794 | slab_map_pages(cachep, slabp, objp); | 2792 | slab_map_pages(cachep, slabp, objp); |
2795 | 2793 | ||
2796 | cache_init_objs(cachep, slabp, ctor_flags); | 2794 | cache_init_objs(cachep, slabp); |
2797 | 2795 | ||
2798 | if (local_flags & __GFP_WAIT) | 2796 | if (local_flags & __GFP_WAIT) |
2799 | local_irq_disable(); | 2797 | local_irq_disable(); |
@@ -3077,7 +3075,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3077 | #endif | 3075 | #endif |
3078 | objp += obj_offset(cachep); | 3076 | objp += obj_offset(cachep); |
3079 | if (cachep->ctor && cachep->flags & SLAB_POISON) | 3077 | if (cachep->ctor && cachep->flags & SLAB_POISON) |
3080 | cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); | 3078 | cachep->ctor(objp, cachep, 0); |
3081 | #if ARCH_SLAB_MINALIGN | 3079 | #if ARCH_SLAB_MINALIGN |
3082 | if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { | 3080 | if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { |
3083 | printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", | 3081 | printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", |
@@ -327,7 +327,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags) | |||
327 | b = (void *)__get_free_pages(flags, get_order(c->size)); | 327 | b = (void *)__get_free_pages(flags, get_order(c->size)); |
328 | 328 | ||
329 | if (c->ctor) | 329 | if (c->ctor) |
330 | c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR); | 330 | c->ctor(b, c, 0); |
331 | 331 | ||
332 | return b; | 332 | return b; |
333 | } | 333 | } |
@@ -994,7 +994,7 @@ static void setup_object(struct kmem_cache *s, struct page *page, | |||
994 | } | 994 | } |
995 | 995 | ||
996 | if (unlikely(s->ctor)) | 996 | if (unlikely(s->ctor)) |
997 | s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR); | 997 | s->ctor(object, s, 0); |
998 | } | 998 | } |
999 | 999 | ||
1000 | static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | 1000 | static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) |