aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 2043102c0425..1dc0ce1d0d5d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2610,7 +2610,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2610} 2610}
2611 2611
2612static void cache_init_objs(struct kmem_cache *cachep, 2612static void cache_init_objs(struct kmem_cache *cachep,
2613 struct slab *slabp, unsigned long ctor_flags) 2613 struct slab *slabp)
2614{ 2614{
2615 int i; 2615 int i;
2616 2616
@@ -2634,7 +2634,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2634 */ 2634 */
2635 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2635 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2636 cachep->ctor(objp + obj_offset(cachep), cachep, 2636 cachep->ctor(objp + obj_offset(cachep), cachep,
2637 ctor_flags); 2637 0);
2638 2638
2639 if (cachep->flags & SLAB_RED_ZONE) { 2639 if (cachep->flags & SLAB_RED_ZONE) {
2640 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2640 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
@@ -2650,7 +2650,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2650 cachep->buffer_size / PAGE_SIZE, 0); 2650 cachep->buffer_size / PAGE_SIZE, 0);
2651#else 2651#else
2652 if (cachep->ctor) 2652 if (cachep->ctor)
2653 cachep->ctor(objp, cachep, ctor_flags); 2653 cachep->ctor(objp, cachep, 0);
2654#endif 2654#endif
2655 slab_bufctl(slabp)[i] = i + 1; 2655 slab_bufctl(slabp)[i] = i + 1;
2656 } 2656 }
@@ -2739,7 +2739,6 @@ static int cache_grow(struct kmem_cache *cachep,
2739 struct slab *slabp; 2739 struct slab *slabp;
2740 size_t offset; 2740 size_t offset;
2741 gfp_t local_flags; 2741 gfp_t local_flags;
2742 unsigned long ctor_flags;
2743 struct kmem_list3 *l3; 2742 struct kmem_list3 *l3;
2744 2743
2745 /* 2744 /*
@@ -2748,7 +2747,6 @@ static int cache_grow(struct kmem_cache *cachep,
2748 */ 2747 */
2749 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 2748 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
2750 2749
2751 ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2752 local_flags = (flags & GFP_LEVEL_MASK); 2750 local_flags = (flags & GFP_LEVEL_MASK);
2753 /* Take the l3 list lock to change the colour_next on this node */ 2751 /* Take the l3 list lock to change the colour_next on this node */
2754 check_irq_off(); 2752 check_irq_off();
@@ -2793,7 +2791,7 @@ static int cache_grow(struct kmem_cache *cachep,
2793 slabp->nodeid = nodeid; 2791 slabp->nodeid = nodeid;
2794 slab_map_pages(cachep, slabp, objp); 2792 slab_map_pages(cachep, slabp, objp);
2795 2793
2796 cache_init_objs(cachep, slabp, ctor_flags); 2794 cache_init_objs(cachep, slabp);
2797 2795
2798 if (local_flags & __GFP_WAIT) 2796 if (local_flags & __GFP_WAIT)
2799 local_irq_disable(); 2797 local_irq_disable();
@@ -3077,7 +3075,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3077#endif 3075#endif
3078 objp += obj_offset(cachep); 3076 objp += obj_offset(cachep);
3079 if (cachep->ctor && cachep->flags & SLAB_POISON) 3077 if (cachep->ctor && cachep->flags & SLAB_POISON)
3080 cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); 3078 cachep->ctor(objp, cachep, 0);
3081#if ARCH_SLAB_MINALIGN 3079#if ARCH_SLAB_MINALIGN
3082 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3080 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3083 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3081 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",