diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-17 01:10:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-17 08:23:03 -0400 |
commit | c59def9f222d44bb7e2f0a559f2906191a0862d7 (patch) | |
tree | 51bc0c12906b13887c6e53d1e06b48b411443129 /mm/slob.c | |
parent | afc0cedbe9138e3e8b38bfa1e4dfd01a2c537d62 (diff) |
Slab allocators: Drop support for destructors
There is no user of destructors left. There is no reason why we should keep
checking for destructors calls in the slab allocators.
The RFC for this patch was discussed at
http://marc.info/?l=linux-kernel&m=117882364330705&w=2
Destructors were mainly used for list management which required them to take a
spinlock. Taking a spinlock in a destructor is a bit risky since the slab
allocators may run the destructors anytime they decide a slab is no longer
needed.
Patch drops destructor support. Any attempt to use a destructor will BUG().
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Acked-by: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 5 |
1 files changed, 0 insertions, 5 deletions
@@ -280,7 +280,6 @@ struct kmem_cache { | |||
280 | unsigned long flags; | 280 | unsigned long flags; |
281 | const char *name; | 281 | const char *name; |
282 | void (*ctor)(void *, struct kmem_cache *, unsigned long); | 282 | void (*ctor)(void *, struct kmem_cache *, unsigned long); |
283 | void (*dtor)(void *, struct kmem_cache *, unsigned long); | ||
284 | }; | 283 | }; |
285 | 284 | ||
286 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, | 285 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, |
@@ -296,13 +295,11 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
296 | c->name = name; | 295 | c->name = name; |
297 | c->size = size; | 296 | c->size = size; |
298 | if (flags & SLAB_DESTROY_BY_RCU) { | 297 | if (flags & SLAB_DESTROY_BY_RCU) { |
299 | BUG_ON(dtor); | ||
300 | /* leave room for rcu footer at the end of object */ | 298 | /* leave room for rcu footer at the end of object */ |
301 | c->size += sizeof(struct slob_rcu); | 299 | c->size += sizeof(struct slob_rcu); |
302 | } | 300 | } |
303 | c->flags = flags; | 301 | c->flags = flags; |
304 | c->ctor = ctor; | 302 | c->ctor = ctor; |
305 | c->dtor = dtor; | ||
306 | /* ignore alignment unless it's forced */ | 303 | /* ignore alignment unless it's forced */ |
307 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; | 304 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; |
308 | if (c->align < align) | 305 | if (c->align < align) |
@@ -371,8 +368,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b) | |||
371 | slob_rcu->size = c->size; | 368 | slob_rcu->size = c->size; |
372 | call_rcu(&slob_rcu->head, kmem_rcu_free); | 369 | call_rcu(&slob_rcu->head, kmem_rcu_free); |
373 | } else { | 370 | } else { |
374 | if (c->dtor) | ||
375 | c->dtor(b, c, 0); | ||
376 | __kmem_cache_free(b, c->size); | 371 | __kmem_cache_free(b, c->size); |
377 | } | 372 | } |
378 | } | 373 | } |