aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorRavikiran G Thirumalai <kiran@scalex86.org>2006-02-05 02:27:58 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-05 14:06:53 -0500
commitca3b9b91735316f0ec7f01976f85842e0bfe5c6e (patch)
treefc58744ab0371adb5942462ceab3fa70d0ef9375 /mm/slab.c
parent2e1217cf96b54d3b2d0162930608159e73507fbf (diff)
[PATCH] NUMA slab locking fixes: irq disabling from cahep->spinlock to l3 lock
Earlier, we had to disable on chip interrupts while taking the cachep->spinlock because, at cache_grow, on every addition of a slab to a slab cache, we incremented colour_next which was protected by the cachep->spinlock, and cache_grow could occur at interrupt context. Since, now we protect the per-node colour_next with the node's list_lock, we do not need to disable on chip interrupts while taking the per-cache spinlock, but we just need to disable interrupts when taking the per-node kmem_list3 list_lock. Signed-off-by: Alok N Kataria <alokk@calsoftinc.com> Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org> Signed-off-by: Shai Fultheim <shai@scalex86.org> Cc: Christoph Lameter <christoph@lameter.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 2317096166dd..d3f68543f9f4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -987,7 +987,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
987 cpumask_t mask; 987 cpumask_t mask;
988 988
989 mask = node_to_cpumask(node); 989 mask = node_to_cpumask(node);
990 spin_lock_irq(&cachep->spinlock); 990 spin_lock(&cachep->spinlock);
991 /* cpu is dead; no one can alloc from it. */ 991 /* cpu is dead; no one can alloc from it. */
992 nc = cachep->array[cpu]; 992 nc = cachep->array[cpu];
993 cachep->array[cpu] = NULL; 993 cachep->array[cpu] = NULL;
@@ -996,7 +996,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
996 if (!l3) 996 if (!l3)
997 goto unlock_cache; 997 goto unlock_cache;
998 998
999 spin_lock(&l3->list_lock); 999 spin_lock_irq(&l3->list_lock);
1000 1000
1001 /* Free limit for this kmem_list3 */ 1001 /* Free limit for this kmem_list3 */
1002 l3->free_limit -= cachep->batchcount; 1002 l3->free_limit -= cachep->batchcount;
@@ -1004,7 +1004,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
1004 free_block(cachep, nc->entry, nc->avail, node); 1004 free_block(cachep, nc->entry, nc->avail, node);
1005 1005
1006 if (!cpus_empty(mask)) { 1006 if (!cpus_empty(mask)) {
1007 spin_unlock(&l3->list_lock); 1007 spin_unlock_irq(&l3->list_lock);
1008 goto unlock_cache; 1008 goto unlock_cache;
1009 } 1009 }
1010 1010
@@ -1023,13 +1023,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
1023 /* free slabs belonging to this node */ 1023 /* free slabs belonging to this node */
1024 if (__node_shrink(cachep, node)) { 1024 if (__node_shrink(cachep, node)) {
1025 cachep->nodelists[node] = NULL; 1025 cachep->nodelists[node] = NULL;
1026 spin_unlock(&l3->list_lock); 1026 spin_unlock_irq(&l3->list_lock);
1027 kfree(l3); 1027 kfree(l3);
1028 } else { 1028 } else {
1029 spin_unlock(&l3->list_lock); 1029 spin_unlock_irq(&l3->list_lock);
1030 } 1030 }
1031 unlock_cache: 1031 unlock_cache:
1032 spin_unlock_irq(&cachep->spinlock); 1032 spin_unlock(&cachep->spinlock);
1033 kfree(nc); 1033 kfree(nc);
1034 } 1034 }
1035 mutex_unlock(&cache_chain_mutex); 1035 mutex_unlock(&cache_chain_mutex);
@@ -2011,18 +2011,18 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2011 2011
2012 smp_call_function_all_cpus(do_drain, cachep); 2012 smp_call_function_all_cpus(do_drain, cachep);
2013 check_irq_on(); 2013 check_irq_on();
2014 spin_lock_irq(&cachep->spinlock); 2014 spin_lock(&cachep->spinlock);
2015 for_each_online_node(node) { 2015 for_each_online_node(node) {
2016 l3 = cachep->nodelists[node]; 2016 l3 = cachep->nodelists[node];
2017 if (l3) { 2017 if (l3) {
2018 spin_lock(&l3->list_lock); 2018 spin_lock_irq(&l3->list_lock);
2019 drain_array_locked(cachep, l3->shared, 1, node); 2019 drain_array_locked(cachep, l3->shared, 1, node);
2020 spin_unlock(&l3->list_lock); 2020 spin_unlock_irq(&l3->list_lock);
2021 if (l3->alien) 2021 if (l3->alien)
2022 drain_alien_cache(cachep, l3); 2022 drain_alien_cache(cachep, l3);
2023 } 2023 }
2024 } 2024 }
2025 spin_unlock_irq(&cachep->spinlock); 2025 spin_unlock(&cachep->spinlock);
2026} 2026}
2027 2027
2028static int __node_shrink(struct kmem_cache *cachep, int node) 2028static int __node_shrink(struct kmem_cache *cachep, int node)
@@ -2338,7 +2338,6 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2338 2338
2339 offset *= cachep->colour_off; 2339 offset *= cachep->colour_off;
2340 2340
2341 check_irq_off();
2342 if (local_flags & __GFP_WAIT) 2341 if (local_flags & __GFP_WAIT)
2343 local_irq_enable(); 2342 local_irq_enable();
2344 2343
@@ -2725,6 +2724,7 @@ static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int node
2725 BUG_ON(!l3); 2724 BUG_ON(!l3);
2726 2725
2727 retry: 2726 retry:
2727 check_irq_off();
2728 spin_lock(&l3->list_lock); 2728 spin_lock(&l3->list_lock);
2729 entry = l3->slabs_partial.next; 2729 entry = l3->slabs_partial.next;
2730 if (entry == &l3->slabs_partial) { 2730 if (entry == &l3->slabs_partial) {
@@ -3304,11 +3304,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount
3304 smp_call_function_all_cpus(do_ccupdate_local, (void *)&new); 3304 smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
3305 3305
3306 check_irq_on(); 3306 check_irq_on();
3307 spin_lock_irq(&cachep->spinlock); 3307 spin_lock(&cachep->spinlock);
3308 cachep->batchcount = batchcount; 3308 cachep->batchcount = batchcount;
3309 cachep->limit = limit; 3309 cachep->limit = limit;
3310 cachep->shared = shared; 3310 cachep->shared = shared;
3311 spin_unlock_irq(&cachep->spinlock); 3311 spin_unlock(&cachep->spinlock);
3312 3312
3313 for_each_online_cpu(i) { 3313 for_each_online_cpu(i) {
3314 struct array_cache *ccold = new.new[i]; 3314 struct array_cache *ccold = new.new[i];
@@ -3564,8 +3564,7 @@ static int s_show(struct seq_file *m, void *p)
3564 int node; 3564 int node;
3565 struct kmem_list3 *l3; 3565 struct kmem_list3 *l3;
3566 3566
3567 check_irq_on(); 3567 spin_lock(&cachep->spinlock);
3568 spin_lock_irq(&cachep->spinlock);
3569 active_objs = 0; 3568 active_objs = 0;
3570 num_slabs = 0; 3569 num_slabs = 0;
3571 for_each_online_node(node) { 3570 for_each_online_node(node) {
@@ -3573,7 +3572,8 @@ static int s_show(struct seq_file *m, void *p)
3573 if (!l3) 3572 if (!l3)
3574 continue; 3573 continue;
3575 3574
3576 spin_lock(&l3->list_lock); 3575 check_irq_on();
3576 spin_lock_irq(&l3->list_lock);
3577 3577
3578 list_for_each(q, &l3->slabs_full) { 3578 list_for_each(q, &l3->slabs_full) {
3579 slabp = list_entry(q, struct slab, list); 3579 slabp = list_entry(q, struct slab, list);
@@ -3600,7 +3600,7 @@ static int s_show(struct seq_file *m, void *p)
3600 free_objects += l3->free_objects; 3600 free_objects += l3->free_objects;
3601 shared_avail += l3->shared->avail; 3601 shared_avail += l3->shared->avail;
3602 3602
3603 spin_unlock(&l3->list_lock); 3603 spin_unlock_irq(&l3->list_lock);
3604 } 3604 }
3605 num_slabs += active_slabs; 3605 num_slabs += active_slabs;
3606 num_objs = num_slabs * cachep->num; 3606 num_objs = num_slabs * cachep->num;
@@ -3644,7 +3644,7 @@ static int s_show(struct seq_file *m, void *p)
3644 } 3644 }
3645#endif 3645#endif
3646 seq_putc(m, '\n'); 3646 seq_putc(m, '\n');
3647 spin_unlock_irq(&cachep->spinlock); 3647 spin_unlock(&cachep->spinlock);
3648 return 0; 3648 return 0;
3649} 3649}
3650 3650