aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-10-17 02:25:51 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-17 11:42:45 -0400
commit4ba9b9d0ba0a49d91fa6417c7510ee36f48cf957 (patch)
tree191b4f45f926e44b882b1e87a9a85dc12230b892 /mm/slab.c
parentb811c202a0edadaac7242ab834fe7ba409978ae7 (diff)
Slab API: remove useless ctor parameter and reorder parameters
Slab constructors currently have a flags parameter that is never used. And the order of the arguments is opposite to other slab functions. The object pointer is placed before the kmem_cache pointer. Convert ctor(void *object, struct kmem_cache *s, unsigned long flags) to ctor(struct kmem_cache *s, void *object) throughout the kernel [akpm@linux-foundation.org: coupla fixes] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/mm/slab.c b/mm/slab.c
index e34bcb87a6ee..18fa1a65f57b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -408,7 +408,7 @@ struct kmem_cache {
408 unsigned int dflags; /* dynamic flags */ 408 unsigned int dflags; /* dynamic flags */
409 409
410 /* constructor func */ 410 /* constructor func */
411 void (*ctor) (void *, struct kmem_cache *, unsigned long); 411 void (*ctor)(struct kmem_cache *, void *);
412 412
413/* 5) cache creation/removal */ 413/* 5) cache creation/removal */
414 const char *name; 414 const char *name;
@@ -2129,7 +2129,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2129struct kmem_cache * 2129struct kmem_cache *
2130kmem_cache_create (const char *name, size_t size, size_t align, 2130kmem_cache_create (const char *name, size_t size, size_t align,
2131 unsigned long flags, 2131 unsigned long flags,
2132 void (*ctor)(void*, struct kmem_cache *, unsigned long)) 2132 void (*ctor)(struct kmem_cache *, void *))
2133{ 2133{
2134 size_t left_over, slab_size, ralign; 2134 size_t left_over, slab_size, ralign;
2135 struct kmem_cache *cachep = NULL, *pc; 2135 struct kmem_cache *cachep = NULL, *pc;
@@ -2636,8 +2636,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2636 * They must also be threaded. 2636 * They must also be threaded.
2637 */ 2637 */
2638 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2638 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2639 cachep->ctor(objp + obj_offset(cachep), cachep, 2639 cachep->ctor(cachep, objp + obj_offset(cachep));
2640 0);
2641 2640
2642 if (cachep->flags & SLAB_RED_ZONE) { 2641 if (cachep->flags & SLAB_RED_ZONE) {
2643 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2642 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
@@ -2653,7 +2652,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2653 cachep->buffer_size / PAGE_SIZE, 0); 2652 cachep->buffer_size / PAGE_SIZE, 0);
2654#else 2653#else
2655 if (cachep->ctor) 2654 if (cachep->ctor)
2656 cachep->ctor(objp, cachep, 0); 2655 cachep->ctor(cachep, objp);
2657#endif 2656#endif
2658 slab_bufctl(slabp)[i] = i + 1; 2657 slab_bufctl(slabp)[i] = i + 1;
2659 } 2658 }
@@ -3078,7 +3077,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3078#endif 3077#endif
3079 objp += obj_offset(cachep); 3078 objp += obj_offset(cachep);
3080 if (cachep->ctor && cachep->flags & SLAB_POISON) 3079 if (cachep->ctor && cachep->flags & SLAB_POISON)
3081 cachep->ctor(objp, cachep, 0); 3080 cachep->ctor(cachep, objp);
3082#if ARCH_SLAB_MINALIGN 3081#if ARCH_SLAB_MINALIGN
3083 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3082 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3084 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3083 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",