aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-06 17:50:17 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:57 -0400
commit4f104934591ed98534b3a4c3d17d972b790e9c42 (patch)
tree149d7ba5ab6b9b7f8a82eb3ce41cb36f28bccaf9 /mm
parent50953fe9e00ebbeffa032a565ab2f08312d51a87 (diff)
slab allocators: Remove SLAB_CTOR_ATOMIC
SLAB_CTOR atomic is never used which is no surprise since I cannot imagine that one would want to do something serious in a constructor or destructor. In particular given that the slab allocators run with interrupts disabled. Actions in constructors and destructors are by their nature very limited and usually do not go beyond initializing variables and list operations. (The i386 pgd ctor and dtors do take a spinlock in constructor and destructor..... I think that is the furthest we go at this point.) There is no flag passed to the destructor so removing SLAB_CTOR_ATOMIC also establishes a certain symmetry. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c17
-rw-r--r--mm/slub.c10
2 files changed, 4 insertions, 23 deletions
diff --git a/mm/slab.c b/mm/slab.c
index a877d6f3d687..52ecf7599a7b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2752,13 +2752,6 @@ static int cache_grow(struct kmem_cache *cachep,
2752 2752
2753 ctor_flags = SLAB_CTOR_CONSTRUCTOR; 2753 ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2754 local_flags = (flags & GFP_LEVEL_MASK); 2754 local_flags = (flags & GFP_LEVEL_MASK);
2755 if (!(local_flags & __GFP_WAIT))
2756 /*
2757 * Not allowed to sleep. Need to tell a constructor about
2758 * this - it might need to know...
2759 */
2760 ctor_flags |= SLAB_CTOR_ATOMIC;
2761
2762 /* Take the l3 list lock to change the colour_next on this node */ 2755 /* Take the l3 list lock to change the colour_next on this node */
2763 check_irq_off(); 2756 check_irq_off();
2764 l3 = cachep->nodelists[nodeid]; 2757 l3 = cachep->nodelists[nodeid];
@@ -3092,14 +3085,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3092 } 3085 }
3093#endif 3086#endif
3094 objp += obj_offset(cachep); 3087 objp += obj_offset(cachep);
3095 if (cachep->ctor && cachep->flags & SLAB_POISON) { 3088 if (cachep->ctor && cachep->flags & SLAB_POISON)
3096 unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; 3089 cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR);
3097
3098 if (!(flags & __GFP_WAIT))
3099 ctor_flags |= SLAB_CTOR_ATOMIC;
3100
3101 cachep->ctor(objp, cachep, ctor_flags);
3102 }
3103#if ARCH_SLAB_MINALIGN 3090#if ARCH_SLAB_MINALIGN
3104 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3091 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3105 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3092 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
diff --git a/mm/slub.c b/mm/slub.c
index bd86182e595e..347e44821bcb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -802,14 +802,8 @@ static void setup_object(struct kmem_cache *s, struct page *page,
802 init_tracking(s, object); 802 init_tracking(s, object);
803 } 803 }
804 804
805 if (unlikely(s->ctor)) { 805 if (unlikely(s->ctor))
806 int mode = SLAB_CTOR_CONSTRUCTOR; 806 s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR);
807
808 if (!(s->flags & __GFP_WAIT))
809 mode |= SLAB_CTOR_ATOMIC;
810
811 s->ctor(object, s, mode);
812 }
813} 807}
814 808
815static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 809static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)