aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-17 01:10:50 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-17 08:23:03 -0400
commitc59def9f222d44bb7e2f0a559f2906191a0862d7 (patch)
tree51bc0c12906b13887c6e53d1e06b48b411443129 /mm/slub.c
parentafc0cedbe9138e3e8b38bfa1e4dfd01a2c537d62 (diff)
Slab allocators: Drop support for destructors
There is no user of destructors left. There is no reason why we should keep checking for destructors calls in the slab allocators. The RFC for this patch was discussed at http://marc.info/?l=linux-kernel&m=117882364330705&w=2 Destructors were mainly used for list management which required them to take a spinlock. Taking a spinlock in a destructor is a bit risky since the slab allocators may run the destructors anytime they decide a slab is no longer needed. Patch drops destructor support. Any attempt to use a destructor will BUG(). Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Acked-by: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c45
1 files changed, 14 insertions, 31 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 5e3e8bc9838f..022c1b4d74d4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -891,13 +891,13 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
891 * On 32 bit platforms the limit is 256k. On 64bit platforms 891 * On 32 bit platforms the limit is 256k. On 64bit platforms
892 * the limit is 512k. 892 * the limit is 512k.
893 * 893 *
894 * Debugging or ctor/dtors may create a need to move the free 894 * Debugging or ctor may create a need to move the free
895 * pointer. Fail if this happens. 895 * pointer. Fail if this happens.
896 */ 896 */
897 if (s->size >= 65535 * sizeof(void *)) { 897 if (s->size >= 65535 * sizeof(void *)) {
898 BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | 898 BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
899 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); 899 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
900 BUG_ON(s->ctor || s->dtor); 900 BUG_ON(s->ctor);
901 } 901 }
902 else 902 else
903 /* 903 /*
@@ -1030,15 +1030,12 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1030{ 1030{
1031 int pages = 1 << s->order; 1031 int pages = 1 << s->order;
1032 1032
1033 if (unlikely(SlabDebug(page) || s->dtor)) { 1033 if (unlikely(SlabDebug(page))) {
1034 void *p; 1034 void *p;
1035 1035
1036 slab_pad_check(s, page); 1036 slab_pad_check(s, page);
1037 for_each_object(p, s, page_address(page)) { 1037 for_each_object(p, s, page_address(page))
1038 if (s->dtor)
1039 s->dtor(p, s, 0);
1040 check_object(s, page, p, 0); 1038 check_object(s, page, p, 0);
1041 }
1042 } 1039 }
1043 1040
1044 mod_zone_page_state(page_zone(page), 1041 mod_zone_page_state(page_zone(page),
@@ -1871,7 +1868,7 @@ static int calculate_sizes(struct kmem_cache *s)
1871 * then we should never poison the object itself. 1868 * then we should never poison the object itself.
1872 */ 1869 */
1873 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 1870 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
1874 !s->ctor && !s->dtor) 1871 !s->ctor)
1875 s->flags |= __OBJECT_POISON; 1872 s->flags |= __OBJECT_POISON;
1876 else 1873 else
1877 s->flags &= ~__OBJECT_POISON; 1874 s->flags &= ~__OBJECT_POISON;
@@ -1901,7 +1898,7 @@ static int calculate_sizes(struct kmem_cache *s)
1901 1898
1902#ifdef CONFIG_SLUB_DEBUG 1899#ifdef CONFIG_SLUB_DEBUG
1903 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 1900 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
1904 s->ctor || s->dtor)) { 1901 s->ctor)) {
1905 /* 1902 /*
1906 * Relocate free pointer after the object if it is not 1903 * Relocate free pointer after the object if it is not
1907 * permitted to overwrite the first word of the object on 1904 * permitted to overwrite the first word of the object on
@@ -1970,13 +1967,11 @@ static int calculate_sizes(struct kmem_cache *s)
1970static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 1967static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
1971 const char *name, size_t size, 1968 const char *name, size_t size,
1972 size_t align, unsigned long flags, 1969 size_t align, unsigned long flags,
1973 void (*ctor)(void *, struct kmem_cache *, unsigned long), 1970 void (*ctor)(void *, struct kmem_cache *, unsigned long))
1974 void (*dtor)(void *, struct kmem_cache *, unsigned long))
1975{ 1971{
1976 memset(s, 0, kmem_size); 1972 memset(s, 0, kmem_size);
1977 s->name = name; 1973 s->name = name;
1978 s->ctor = ctor; 1974 s->ctor = ctor;
1979 s->dtor = dtor;
1980 s->objsize = size; 1975 s->objsize = size;
1981 s->flags = flags; 1976 s->flags = flags;
1982 s->align = align; 1977 s->align = align;
@@ -2161,7 +2156,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2161 2156
2162 down_write(&slub_lock); 2157 down_write(&slub_lock);
2163 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2158 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2164 flags, NULL, NULL)) 2159 flags, NULL))
2165 goto panic; 2160 goto panic;
2166 2161
2167 list_add(&s->list, &slab_caches); 2162 list_add(&s->list, &slab_caches);
@@ -2463,7 +2458,7 @@ static int slab_unmergeable(struct kmem_cache *s)
2463 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 2458 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
2464 return 1; 2459 return 1;
2465 2460
2466 if (s->ctor || s->dtor) 2461 if (s->ctor)
2467 return 1; 2462 return 1;
2468 2463
2469 return 0; 2464 return 0;
@@ -2471,15 +2466,14 @@ static int slab_unmergeable(struct kmem_cache *s)
2471 2466
2472static struct kmem_cache *find_mergeable(size_t size, 2467static struct kmem_cache *find_mergeable(size_t size,
2473 size_t align, unsigned long flags, 2468 size_t align, unsigned long flags,
2474 void (*ctor)(void *, struct kmem_cache *, unsigned long), 2469 void (*ctor)(void *, struct kmem_cache *, unsigned long))
2475 void (*dtor)(void *, struct kmem_cache *, unsigned long))
2476{ 2470{
2477 struct list_head *h; 2471 struct list_head *h;
2478 2472
2479 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 2473 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
2480 return NULL; 2474 return NULL;
2481 2475
2482 if (ctor || dtor) 2476 if (ctor)
2483 return NULL; 2477 return NULL;
2484 2478
2485 size = ALIGN(size, sizeof(void *)); 2479 size = ALIGN(size, sizeof(void *));
@@ -2521,8 +2515,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2521{ 2515{
2522 struct kmem_cache *s; 2516 struct kmem_cache *s;
2523 2517
2518 BUG_ON(dtor);
2524 down_write(&slub_lock); 2519 down_write(&slub_lock);
2525 s = find_mergeable(size, align, flags, ctor, dtor); 2520 s = find_mergeable(size, align, flags, ctor);
2526 if (s) { 2521 if (s) {
2527 s->refcount++; 2522 s->refcount++;
2528 /* 2523 /*
@@ -2536,7 +2531,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2536 } else { 2531 } else {
2537 s = kmalloc(kmem_size, GFP_KERNEL); 2532 s = kmalloc(kmem_size, GFP_KERNEL);
2538 if (s && kmem_cache_open(s, GFP_KERNEL, name, 2533 if (s && kmem_cache_open(s, GFP_KERNEL, name,
2539 size, align, flags, ctor, dtor)) { 2534 size, align, flags, ctor)) {
2540 if (sysfs_slab_add(s)) { 2535 if (sysfs_slab_add(s)) {
2541 kfree(s); 2536 kfree(s);
2542 goto err; 2537 goto err;
@@ -3177,17 +3172,6 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3177} 3172}
3178SLAB_ATTR_RO(ctor); 3173SLAB_ATTR_RO(ctor);
3179 3174
3180static ssize_t dtor_show(struct kmem_cache *s, char *buf)
3181{
3182 if (s->dtor) {
3183 int n = sprint_symbol(buf, (unsigned long)s->dtor);
3184
3185 return n + sprintf(buf + n, "\n");
3186 }
3187 return 0;
3188}
3189SLAB_ATTR_RO(dtor);
3190
3191static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3175static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3192{ 3176{
3193 return sprintf(buf, "%d\n", s->refcount - 1); 3177 return sprintf(buf, "%d\n", s->refcount - 1);
@@ -3419,7 +3403,6 @@ static struct attribute * slab_attrs[] = {
3419 &partial_attr.attr, 3403 &partial_attr.attr,
3420 &cpu_slabs_attr.attr, 3404 &cpu_slabs_attr.attr,
3421 &ctor_attr.attr, 3405 &ctor_attr.attr,
3422 &dtor_attr.attr,
3423 &aliases_attr.attr, 3406 &aliases_attr.attr,
3424 &align_attr.attr, 3407 &align_attr.attr,
3425 &sanity_checks_attr.attr, 3408 &sanity_checks_attr.attr,