diff options
author | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-11-23 15:01:15 -0500 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-11-30 12:16:08 -0500 |
commit | ce79ddc8e2376a9a93c7d42daf89bfcbb9187e62 (patch) | |
tree | cddd58bd72e34bb1955bafb944bf262c20c2de04 /mm/slab.c | |
parent | 648f4e3e50c4793d9dbf9a09afa193631f76fa26 (diff) |
SLAB: Fix lockdep annotations for CPU hotplug
As reported by Paul McKenney:
I am seeing some lockdep complaints in rcutorture runs that include
frequent CPU-hotplug operations. The tests are otherwise successful.
My first thought was to send a patch that gave each array_cache
structure's ->lock field its own struct lock_class_key, but you already
have a init_lock_keys() that seems to be intended to deal with this.
------------------------------------------------------------------------
=============================================
[ INFO: possible recursive locking detected ]
2.6.32-rc4-autokern1 #1
---------------------------------------------
syslogd/2908 is trying to acquire lock:
(&nc->lock){..-...}, at: [<c0000000001407f4>] .kmem_cache_free+0x118/0x2d4
but task is already holding lock:
(&nc->lock){..-...}, at: [<c0000000001411bc>] .kfree+0x1f0/0x324
other info that might help us debug this:
3 locks held by syslogd/2908:
#0: (&u->readlock){+.+.+.}, at: [<c0000000004556f8>] .unix_dgram_recvmsg+0x70/0x338
#1: (&nc->lock){..-...}, at: [<c0000000001411bc>] .kfree+0x1f0/0x324
#2: (&parent->list_lock){-.-...}, at: [<c000000000140f64>] .__drain_alien_cache+0x50/0xb8
stack backtrace:
Call Trace:
[c0000000e8ccafc0] [c0000000000101e4] .show_stack+0x70/0x184 (unreliable)
[c0000000e8ccb070] [c0000000000afebc] .validate_chain+0x6ec/0xf58
[c0000000e8ccb180] [c0000000000b0ff0] .__lock_acquire+0x8c8/0x974
[c0000000e8ccb280] [c0000000000b2290] .lock_acquire+0x140/0x18c
[c0000000e8ccb350] [c000000000468df0] ._spin_lock+0x48/0x70
[c0000000e8ccb3e0] [c0000000001407f4] .kmem_cache_free+0x118/0x2d4
[c0000000e8ccb4a0] [c000000000140b90] .free_block+0x130/0x1a8
[c0000000e8ccb540] [c000000000140f94] .__drain_alien_cache+0x80/0xb8
[c0000000e8ccb5e0] [c0000000001411e0] .kfree+0x214/0x324
[c0000000e8ccb6a0] [c0000000003ca860] .skb_release_data+0xe8/0x104
[c0000000e8ccb730] [c0000000003ca2ec] .__kfree_skb+0x20/0xd4
[c0000000e8ccb7b0] [c0000000003cf2c8] .skb_free_datagram+0x1c/0x5c
[c0000000e8ccb830] [c00000000045597c] .unix_dgram_recvmsg+0x2f4/0x338
[c0000000e8ccb920] [c0000000003c0f14] .sock_recvmsg+0xf4/0x13c
[c0000000e8ccbb30] [c0000000003c28ec] .SyS_recvfrom+0xb4/0x130
[c0000000e8ccbcb0] [c0000000003bfb78] .sys_recv+0x18/0x2c
[c0000000e8ccbd20] [c0000000003ed388] .compat_sys_recv+0x14/0x28
[c0000000e8ccbd90] [c0000000003ee1bc] .compat_sys_socketcall+0x178/0x220
[c0000000e8ccbe30] [c0000000000085d4] syscall_exit+0x0/0x40
This patch fixes the issue by setting up lockdep annotations during CPU
hotplug.
Reported-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 108 |
1 files changed, 61 insertions, 47 deletions
@@ -604,6 +604,26 @@ static struct kmem_cache cache_cache = { | |||
604 | 604 | ||
605 | #define BAD_ALIEN_MAGIC 0x01020304ul | 605 | #define BAD_ALIEN_MAGIC 0x01020304ul |
606 | 606 | ||
607 | /* | ||
608 | * chicken and egg problem: delay the per-cpu array allocation | ||
609 | * until the general caches are up. | ||
610 | */ | ||
611 | static enum { | ||
612 | NONE, | ||
613 | PARTIAL_AC, | ||
614 | PARTIAL_L3, | ||
615 | EARLY, | ||
616 | FULL | ||
617 | } g_cpucache_up; | ||
618 | |||
619 | /* | ||
620 | * used by boot code to determine if it can use slab based allocator | ||
621 | */ | ||
622 | int slab_is_available(void) | ||
623 | { | ||
624 | return g_cpucache_up >= EARLY; | ||
625 | } | ||
626 | |||
607 | #ifdef CONFIG_LOCKDEP | 627 | #ifdef CONFIG_LOCKDEP |
608 | 628 | ||
609 | /* | 629 | /* |
@@ -620,40 +640,52 @@ static struct kmem_cache cache_cache = { | |||
620 | static struct lock_class_key on_slab_l3_key; | 640 | static struct lock_class_key on_slab_l3_key; |
621 | static struct lock_class_key on_slab_alc_key; | 641 | static struct lock_class_key on_slab_alc_key; |
622 | 642 | ||
623 | static inline void init_lock_keys(void) | 643 | static void init_node_lock_keys(int q) |
624 | |||
625 | { | 644 | { |
626 | int q; | ||
627 | struct cache_sizes *s = malloc_sizes; | 645 | struct cache_sizes *s = malloc_sizes; |
628 | 646 | ||
629 | while (s->cs_size != ULONG_MAX) { | 647 | if (g_cpucache_up != FULL) |
630 | for_each_node(q) { | 648 | return; |
631 | struct array_cache **alc; | 649 | |
632 | int r; | 650 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { |
633 | struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; | 651 | struct array_cache **alc; |
634 | if (!l3 || OFF_SLAB(s->cs_cachep)) | 652 | struct kmem_list3 *l3; |
635 | continue; | 653 | int r; |
636 | lockdep_set_class(&l3->list_lock, &on_slab_l3_key); | 654 | |
637 | alc = l3->alien; | 655 | l3 = s->cs_cachep->nodelists[q]; |
638 | /* | 656 | if (!l3 || OFF_SLAB(s->cs_cachep)) |
639 | * FIXME: This check for BAD_ALIEN_MAGIC | 657 | return; |
640 | * should go away when common slab code is taught to | 658 | lockdep_set_class(&l3->list_lock, &on_slab_l3_key); |
641 | * work even without alien caches. | 659 | alc = l3->alien; |
642 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC | 660 | /* |
643 | * for alloc_alien_cache, | 661 | * FIXME: This check for BAD_ALIEN_MAGIC |
644 | */ | 662 | * should go away when common slab code is taught to |
645 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) | 663 | * work even without alien caches. |
646 | continue; | 664 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC |
647 | for_each_node(r) { | 665 | * for alloc_alien_cache, |
648 | if (alc[r]) | 666 | */ |
649 | lockdep_set_class(&alc[r]->lock, | 667 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) |
650 | &on_slab_alc_key); | 668 | return; |
651 | } | 669 | for_each_node(r) { |
670 | if (alc[r]) | ||
671 | lockdep_set_class(&alc[r]->lock, | ||
672 | &on_slab_alc_key); | ||
652 | } | 673 | } |
653 | s++; | ||
654 | } | 674 | } |
655 | } | 675 | } |
676 | |||
677 | static inline void init_lock_keys(void) | ||
678 | { | ||
679 | int node; | ||
680 | |||
681 | for_each_node(node) | ||
682 | init_node_lock_keys(node); | ||
683 | } | ||
656 | #else | 684 | #else |
685 | static void init_node_lock_keys(int q) | ||
686 | { | ||
687 | } | ||
688 | |||
657 | static inline void init_lock_keys(void) | 689 | static inline void init_lock_keys(void) |
658 | { | 690 | { |
659 | } | 691 | } |
@@ -665,26 +697,6 @@ static inline void init_lock_keys(void) | |||
665 | static DEFINE_MUTEX(cache_chain_mutex); | 697 | static DEFINE_MUTEX(cache_chain_mutex); |
666 | static struct list_head cache_chain; | 698 | static struct list_head cache_chain; |
667 | 699 | ||
668 | /* | ||
669 | * chicken and egg problem: delay the per-cpu array allocation | ||
670 | * until the general caches are up. | ||
671 | */ | ||
672 | static enum { | ||
673 | NONE, | ||
674 | PARTIAL_AC, | ||
675 | PARTIAL_L3, | ||
676 | EARLY, | ||
677 | FULL | ||
678 | } g_cpucache_up; | ||
679 | |||
680 | /* | ||
681 | * used by boot code to determine if it can use slab based allocator | ||
682 | */ | ||
683 | int slab_is_available(void) | ||
684 | { | ||
685 | return g_cpucache_up >= EARLY; | ||
686 | } | ||
687 | |||
688 | static DEFINE_PER_CPU(struct delayed_work, reap_work); | 700 | static DEFINE_PER_CPU(struct delayed_work, reap_work); |
689 | 701 | ||
690 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 702 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
@@ -1254,6 +1266,8 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
1254 | kfree(shared); | 1266 | kfree(shared); |
1255 | free_alien_cache(alien); | 1267 | free_alien_cache(alien); |
1256 | } | 1268 | } |
1269 | init_node_lock_keys(node); | ||
1270 | |||
1257 | return 0; | 1271 | return 0; |
1258 | bad: | 1272 | bad: |
1259 | cpuup_canceled(cpu); | 1273 | cpuup_canceled(cpu); |