diff options
author | Ravikiran G Thirumalai <kiran@scalex86.org> | 2006-03-22 03:08:12 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-22 10:53:58 -0500 |
commit | b5d8ca7c50826c0b456b4a646875dc573adfde2b (patch) | |
tree | dc04800243ac331c862c1cdbeebd82bf369602d1 /mm | |
parent | a737b3e2fcf96f576fa3e2e382236d9ee94f383f (diff) |
[PATCH] slab: remove cachep->spinlock
Remove cachep->spinlock. Locking has moved to the kmem_list3 and most of
the structures protected earlier by cachep->spinlock is now protected by
the l3->list_lock. slab cache tunables like batchcount are accessed always
with the cache_chain_mutex held.
Patch tested on SMP and NUMA kernels with dbench processes running,
constant onlining/offlining, and constant cache tuning, all at the same
time.
Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org>
Cc: Christoph Lameter <christoph@lameter.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 20 |
1 files changed, 9 insertions, 11 deletions
@@ -372,17 +372,19 @@ static void kmem_list3_init(struct kmem_list3 *parent) | |||
372 | struct kmem_cache { | 372 | struct kmem_cache { |
373 | /* 1) per-cpu data, touched during every alloc/free */ | 373 | /* 1) per-cpu data, touched during every alloc/free */ |
374 | struct array_cache *array[NR_CPUS]; | 374 | struct array_cache *array[NR_CPUS]; |
375 | /* 2) Cache tunables. Protected by cache_chain_mutex */ | ||
375 | unsigned int batchcount; | 376 | unsigned int batchcount; |
376 | unsigned int limit; | 377 | unsigned int limit; |
377 | unsigned int shared; | 378 | unsigned int shared; |
379 | |||
378 | unsigned int buffer_size; | 380 | unsigned int buffer_size; |
379 | /* 2) touched by every alloc & free from the backend */ | 381 | /* 3) touched by every alloc & free from the backend */ |
380 | struct kmem_list3 *nodelists[MAX_NUMNODES]; | 382 | struct kmem_list3 *nodelists[MAX_NUMNODES]; |
383 | |||
381 | unsigned int flags; /* constant flags */ | 384 | unsigned int flags; /* constant flags */ |
382 | unsigned int num; /* # of objs per slab */ | 385 | unsigned int num; /* # of objs per slab */ |
383 | spinlock_t spinlock; | ||
384 | 386 | ||
385 | /* 3) cache_grow/shrink */ | 387 | /* 4) cache_grow/shrink */ |
386 | /* order of pgs per slab (2^n) */ | 388 | /* order of pgs per slab (2^n) */ |
387 | unsigned int gfporder; | 389 | unsigned int gfporder; |
388 | 390 | ||
@@ -401,11 +403,11 @@ struct kmem_cache { | |||
401 | /* de-constructor func */ | 403 | /* de-constructor func */ |
402 | void (*dtor) (void *, struct kmem_cache *, unsigned long); | 404 | void (*dtor) (void *, struct kmem_cache *, unsigned long); |
403 | 405 | ||
404 | /* 4) cache creation/removal */ | 406 | /* 5) cache creation/removal */ |
405 | const char *name; | 407 | const char *name; |
406 | struct list_head next; | 408 | struct list_head next; |
407 | 409 | ||
408 | /* 5) statistics */ | 410 | /* 6) statistics */ |
409 | #if STATS | 411 | #if STATS |
410 | unsigned long num_active; | 412 | unsigned long num_active; |
411 | unsigned long num_allocations; | 413 | unsigned long num_allocations; |
@@ -661,7 +663,6 @@ static struct kmem_cache cache_cache = { | |||
661 | .shared = 1, | 663 | .shared = 1, |
662 | .buffer_size = sizeof(struct kmem_cache), | 664 | .buffer_size = sizeof(struct kmem_cache), |
663 | .flags = SLAB_NO_REAP, | 665 | .flags = SLAB_NO_REAP, |
664 | .spinlock = SPIN_LOCK_UNLOCKED, | ||
665 | .name = "kmem_cache", | 666 | .name = "kmem_cache", |
666 | #if DEBUG | 667 | #if DEBUG |
667 | .obj_size = sizeof(struct kmem_cache), | 668 | .obj_size = sizeof(struct kmem_cache), |
@@ -2057,7 +2058,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2057 | cachep->gfpflags = 0; | 2058 | cachep->gfpflags = 0; |
2058 | if (flags & SLAB_CACHE_DMA) | 2059 | if (flags & SLAB_CACHE_DMA) |
2059 | cachep->gfpflags |= GFP_DMA; | 2060 | cachep->gfpflags |= GFP_DMA; |
2060 | spin_lock_init(&cachep->spinlock); | ||
2061 | cachep->buffer_size = size; | 2061 | cachep->buffer_size = size; |
2062 | 2062 | ||
2063 | if (flags & CFLGS_OFF_SLAB) | 2063 | if (flags & CFLGS_OFF_SLAB) |
@@ -3425,6 +3425,7 @@ static void do_ccupdate_local(void *info) | |||
3425 | new->new[smp_processor_id()] = old; | 3425 | new->new[smp_processor_id()] = old; |
3426 | } | 3426 | } |
3427 | 3427 | ||
3428 | /* Always called with the cache_chain_mutex held */ | ||
3428 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | 3429 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, |
3429 | int batchcount, int shared) | 3430 | int batchcount, int shared) |
3430 | { | 3431 | { |
@@ -3446,11 +3447,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
3446 | smp_call_function_all_cpus(do_ccupdate_local, (void *)&new); | 3447 | smp_call_function_all_cpus(do_ccupdate_local, (void *)&new); |
3447 | 3448 | ||
3448 | check_irq_on(); | 3449 | check_irq_on(); |
3449 | spin_lock(&cachep->spinlock); | ||
3450 | cachep->batchcount = batchcount; | 3450 | cachep->batchcount = batchcount; |
3451 | cachep->limit = limit; | 3451 | cachep->limit = limit; |
3452 | cachep->shared = shared; | 3452 | cachep->shared = shared; |
3453 | spin_unlock(&cachep->spinlock); | ||
3454 | 3453 | ||
3455 | for_each_online_cpu(i) { | 3454 | for_each_online_cpu(i) { |
3456 | struct array_cache *ccold = new.new[i]; | 3455 | struct array_cache *ccold = new.new[i]; |
@@ -3471,6 +3470,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
3471 | return 0; | 3470 | return 0; |
3472 | } | 3471 | } |
3473 | 3472 | ||
3473 | /* Called with cache_chain_mutex held always */ | ||
3474 | static void enable_cpucache(struct kmem_cache *cachep) | 3474 | static void enable_cpucache(struct kmem_cache *cachep) |
3475 | { | 3475 | { |
3476 | int err; | 3476 | int err; |
@@ -3705,7 +3705,6 @@ static int s_show(struct seq_file *m, void *p) | |||
3705 | int node; | 3705 | int node; |
3706 | struct kmem_list3 *l3; | 3706 | struct kmem_list3 *l3; |
3707 | 3707 | ||
3708 | spin_lock(&cachep->spinlock); | ||
3709 | active_objs = 0; | 3708 | active_objs = 0; |
3710 | num_slabs = 0; | 3709 | num_slabs = 0; |
3711 | for_each_online_node(node) { | 3710 | for_each_online_node(node) { |
@@ -3788,7 +3787,6 @@ static int s_show(struct seq_file *m, void *p) | |||
3788 | } | 3787 | } |
3789 | #endif | 3788 | #endif |
3790 | seq_putc(m, '\n'); | 3789 | seq_putc(m, '\n'); |
3791 | spin_unlock(&cachep->spinlock); | ||
3792 | return 0; | 3790 | return 0; |
3793 | } | 3791 | } |
3794 | 3792 | ||