diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-02-21 02:51:47 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-02-26 14:42:48 -0500 |
commit | 2c4f6219aca5939b57596278ea8b014275d4917b (patch) | |
tree | 4635aab17f05da9945e112c61c54e93788417f4e /net/ipv4 | |
parent | 7f62ad5d37f4e43c841e92c6f159c93dcf2d2cdd (diff) |
[TCP]: Fix MD5 signature pool locking.
The locking calls assumed that these code paths were only
invoked in software interrupt context, but that isn't true.
Therefore we need to use spin_{lock,unlock}_bh() throughout.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index ac6516c642a1..74c4d103ebc2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2266,12 +2266,12 @@ void tcp_free_md5sig_pool(void) | |||
2266 | { | 2266 | { |
2267 | struct tcp_md5sig_pool **pool = NULL; | 2267 | struct tcp_md5sig_pool **pool = NULL; |
2268 | 2268 | ||
2269 | spin_lock(&tcp_md5sig_pool_lock); | 2269 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2270 | if (--tcp_md5sig_users == 0) { | 2270 | if (--tcp_md5sig_users == 0) { |
2271 | pool = tcp_md5sig_pool; | 2271 | pool = tcp_md5sig_pool; |
2272 | tcp_md5sig_pool = NULL; | 2272 | tcp_md5sig_pool = NULL; |
2273 | } | 2273 | } |
2274 | spin_unlock(&tcp_md5sig_pool_lock); | 2274 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2275 | if (pool) | 2275 | if (pool) |
2276 | __tcp_free_md5sig_pool(pool); | 2276 | __tcp_free_md5sig_pool(pool); |
2277 | } | 2277 | } |
@@ -2314,36 +2314,36 @@ struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void) | |||
2314 | int alloc = 0; | 2314 | int alloc = 0; |
2315 | 2315 | ||
2316 | retry: | 2316 | retry: |
2317 | spin_lock(&tcp_md5sig_pool_lock); | 2317 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2318 | pool = tcp_md5sig_pool; | 2318 | pool = tcp_md5sig_pool; |
2319 | if (tcp_md5sig_users++ == 0) { | 2319 | if (tcp_md5sig_users++ == 0) { |
2320 | alloc = 1; | 2320 | alloc = 1; |
2321 | spin_unlock(&tcp_md5sig_pool_lock); | 2321 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2322 | } else if (!pool) { | 2322 | } else if (!pool) { |
2323 | tcp_md5sig_users--; | 2323 | tcp_md5sig_users--; |
2324 | spin_unlock(&tcp_md5sig_pool_lock); | 2324 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2325 | cpu_relax(); | 2325 | cpu_relax(); |
2326 | goto retry; | 2326 | goto retry; |
2327 | } else | 2327 | } else |
2328 | spin_unlock(&tcp_md5sig_pool_lock); | 2328 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2329 | 2329 | ||
2330 | if (alloc) { | 2330 | if (alloc) { |
2331 | /* we cannot hold spinlock here because this may sleep. */ | 2331 | /* we cannot hold spinlock here because this may sleep. */ |
2332 | struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); | 2332 | struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); |
2333 | spin_lock(&tcp_md5sig_pool_lock); | 2333 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2334 | if (!p) { | 2334 | if (!p) { |
2335 | tcp_md5sig_users--; | 2335 | tcp_md5sig_users--; |
2336 | spin_unlock(&tcp_md5sig_pool_lock); | 2336 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2337 | return NULL; | 2337 | return NULL; |
2338 | } | 2338 | } |
2339 | pool = tcp_md5sig_pool; | 2339 | pool = tcp_md5sig_pool; |
2340 | if (pool) { | 2340 | if (pool) { |
2341 | /* oops, it has already been assigned. */ | 2341 | /* oops, it has already been assigned. */ |
2342 | spin_unlock(&tcp_md5sig_pool_lock); | 2342 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2343 | __tcp_free_md5sig_pool(p); | 2343 | __tcp_free_md5sig_pool(p); |
2344 | } else { | 2344 | } else { |
2345 | tcp_md5sig_pool = pool = p; | 2345 | tcp_md5sig_pool = pool = p; |
2346 | spin_unlock(&tcp_md5sig_pool_lock); | 2346 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2347 | } | 2347 | } |
2348 | } | 2348 | } |
2349 | return pool; | 2349 | return pool; |
@@ -2354,11 +2354,11 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | |||
2354 | struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) | 2354 | struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) |
2355 | { | 2355 | { |
2356 | struct tcp_md5sig_pool **p; | 2356 | struct tcp_md5sig_pool **p; |
2357 | spin_lock(&tcp_md5sig_pool_lock); | 2357 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2358 | p = tcp_md5sig_pool; | 2358 | p = tcp_md5sig_pool; |
2359 | if (p) | 2359 | if (p) |
2360 | tcp_md5sig_users++; | 2360 | tcp_md5sig_users++; |
2361 | spin_unlock(&tcp_md5sig_pool_lock); | 2361 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2362 | return (p ? *per_cpu_ptr(p, cpu) : NULL); | 2362 | return (p ? *per_cpu_ptr(p, cpu) : NULL); |
2363 | } | 2363 | } |
2364 | 2364 | ||