diff options
author | Neil Horman <nhorman@tuxdriver.com> | 2010-03-16 04:14:33 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-03-16 17:15:45 -0400 |
commit | a2f46ee1ba5ee249ce2ca1ee7a7a0ac46529fb4f (patch) | |
tree | a791769005885ab26a3353b70940c635b63b3e8d | |
parent | c3259c8a7060d480e8eb2166da0a99d6879146b4 (diff) |
tipc: fix lockdep warning on address assignment
So in the forward porting of various tipc packages, I was constantly
getting this lockdep warning everytime I used tipc-config to set a network
address for the protocol:
[ INFO: possible circular locking dependency detected ]
2.6.33 #1
tipc-config/1326 is trying to acquire lock:
(ref_table_lock){+.-...}, at: [<ffffffffa0315148>] tipc_ref_discard+0x53/0xd4 [tipc]
but task is already holding lock:
(&(&entry->lock)->rlock#2){+.-...}, at: [<ffffffffa03150d5>] tipc_ref_lock+0x43/0x63 [tipc]
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #1 (&(&entry->lock)->rlock#2){+.-...}:
[<ffffffff8107b508>] __lock_acquire+0xb67/0xd0f
[<ffffffff8107b78c>] lock_acquire+0xdc/0x102
[<ffffffff8145471e>] _raw_spin_lock_bh+0x3b/0x6e
[<ffffffffa03152b1>] tipc_ref_acquire+0xe8/0x11b [tipc]
[<ffffffffa031433f>] tipc_createport_raw+0x78/0x1b9 [tipc]
[<ffffffffa031450b>] tipc_createport+0x8b/0x125 [tipc]
[<ffffffffa030f221>] tipc_subscr_start+0xce/0x126 [tipc]
[<ffffffffa0308fb2>] process_signal_queue+0x47/0x7d [tipc]
[<ffffffff81053e0c>] tasklet_action+0x8c/0xf4
[<ffffffff81054bd8>] __do_softirq+0xf8/0x1cd
[<ffffffff8100aadc>] call_softirq+0x1c/0x30
[<ffffffff810549f4>] _local_bh_enable_ip+0xb8/0xd7
[<ffffffff81054a21>] local_bh_enable_ip+0xe/0x10
[<ffffffff81454d31>] _raw_spin_unlock_bh+0x34/0x39
[<ffffffffa0308eb8>] spin_unlock_bh.clone.0+0x15/0x17 [tipc]
[<ffffffffa0308f47>] tipc_k_signal+0x8d/0xb1 [tipc]
[<ffffffffa0308dd9>] tipc_core_start+0x8a/0xad [tipc]
[<ffffffffa01b1087>] 0xffffffffa01b1087
[<ffffffff8100207d>] do_one_initcall+0x72/0x18a
[<ffffffff810872fb>] sys_init_module+0xd8/0x23a
[<ffffffff81009b42>] system_call_fastpath+0x16/0x1b
-> #0 (ref_table_lock){+.-...}:
[<ffffffff8107b3b2>] __lock_acquire+0xa11/0xd0f
[<ffffffff8107b78c>] lock_acquire+0xdc/0x102
[<ffffffff81454836>] _raw_write_lock_bh+0x3b/0x6e
[<ffffffffa0315148>] tipc_ref_discard+0x53/0xd4 [tipc]
[<ffffffffa03141ee>] tipc_deleteport+0x40/0x119 [tipc]
[<ffffffffa0316e35>] release+0xeb/0x137 [tipc]
[<ffffffff8139dbf4>] sock_release+0x1f/0x6f
[<ffffffff8139dc6b>] sock_close+0x27/0x2b
[<ffffffff811116f6>] __fput+0x12a/0x1df
[<ffffffff811117c5>] fput+0x1a/0x1c
[<ffffffff8110e49b>] filp_close+0x68/0x72
[<ffffffff8110e552>] sys_close+0xad/0xe7
[<ffffffff81009b42>] system_call_fastpath+0x16/0x1b
Finally decided I should fix this. Its a straightforward inversion,
tipc_ref_acquire takes two locks in this order:
ref_table_lock
entry->lock
while tipc_deleteport takes them in this order:
entry->lock (via tipc_port_lock())
ref_table_lock (via tipc_ref_discard())
when the same entry is referenced, we get the above warning. The fix is equally
straightforward. Theres no real relation between the entry->lock and the
ref_table_lock (they just are needed at the same time), so move the entry->lock
aquisition in tipc_ref_acquire down, after we unlock ref_table_lock (this is
safe since the ref_table_lock guards changes to the reference table, and we've
already claimed a slot there. I've tested the below fix and confirmed that it
clears up the lockdep issue
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Allan Stephens <allan.stephens@windriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/tipc/ref.c | 26 |
1 files changed, 16 insertions, 10 deletions
diff --git a/net/tipc/ref.c b/net/tipc/ref.c index 414fc34b8bea..8dea66500cf5 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c | |||
@@ -153,11 +153,11 @@ void tipc_ref_table_stop(void) | |||
153 | 153 | ||
154 | u32 tipc_ref_acquire(void *object, spinlock_t **lock) | 154 | u32 tipc_ref_acquire(void *object, spinlock_t **lock) |
155 | { | 155 | { |
156 | struct reference *entry; | ||
157 | u32 index; | 156 | u32 index; |
158 | u32 index_mask; | 157 | u32 index_mask; |
159 | u32 next_plus_upper; | 158 | u32 next_plus_upper; |
160 | u32 ref; | 159 | u32 ref; |
160 | struct reference *entry = NULL; | ||
161 | 161 | ||
162 | if (!object) { | 162 | if (!object) { |
163 | err("Attempt to acquire reference to non-existent object\n"); | 163 | err("Attempt to acquire reference to non-existent object\n"); |
@@ -175,30 +175,36 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) | |||
175 | index = tipc_ref_table.first_free; | 175 | index = tipc_ref_table.first_free; |
176 | entry = &(tipc_ref_table.entries[index]); | 176 | entry = &(tipc_ref_table.entries[index]); |
177 | index_mask = tipc_ref_table.index_mask; | 177 | index_mask = tipc_ref_table.index_mask; |
178 | /* take lock in case a previous user of entry still holds it */ | ||
179 | spin_lock_bh(&entry->lock); | ||
180 | next_plus_upper = entry->ref; | 178 | next_plus_upper = entry->ref; |
181 | tipc_ref_table.first_free = next_plus_upper & index_mask; | 179 | tipc_ref_table.first_free = next_plus_upper & index_mask; |
182 | ref = (next_plus_upper & ~index_mask) + index; | 180 | ref = (next_plus_upper & ~index_mask) + index; |
183 | entry->ref = ref; | ||
184 | entry->object = object; | ||
185 | *lock = &entry->lock; | ||
186 | } | 181 | } |
187 | else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { | 182 | else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { |
188 | index = tipc_ref_table.init_point++; | 183 | index = tipc_ref_table.init_point++; |
189 | entry = &(tipc_ref_table.entries[index]); | 184 | entry = &(tipc_ref_table.entries[index]); |
190 | spin_lock_init(&entry->lock); | 185 | spin_lock_init(&entry->lock); |
191 | spin_lock_bh(&entry->lock); | ||
192 | ref = tipc_ref_table.start_mask + index; | 186 | ref = tipc_ref_table.start_mask + index; |
193 | entry->ref = ref; | ||
194 | entry->object = object; | ||
195 | *lock = &entry->lock; | ||
196 | } | 187 | } |
197 | else { | 188 | else { |
198 | ref = 0; | 189 | ref = 0; |
199 | } | 190 | } |
200 | write_unlock_bh(&ref_table_lock); | 191 | write_unlock_bh(&ref_table_lock); |
201 | 192 | ||
193 | /* | ||
194 | * Grab the lock so no one else can modify this entry | ||
195 | * While we assign its ref value & object pointer | ||
196 | */ | ||
197 | if (entry) { | ||
198 | spin_lock_bh(&entry->lock); | ||
199 | entry->ref = ref; | ||
200 | entry->object = object; | ||
201 | *lock = &entry->lock; | ||
202 | /* | ||
203 | * keep it locked, the caller is responsible | ||
204 | * for unlocking this when they're done with it | ||
205 | */ | ||
206 | } | ||
207 | |||
202 | return ref; | 208 | return ref; |
203 | } | 209 | } |
204 | 210 | ||