diff options
Diffstat (limited to 'net/core/sock_reuseport.c')
-rw-r--r-- | net/core/sock_reuseport.c | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index eed1ebf7f29d..b1e0dbea1e8c 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c | |||
@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk) | |||
36 | * soft irq of receive path or setsockopt from process context | 36 | * soft irq of receive path or setsockopt from process context |
37 | */ | 37 | */ |
38 | spin_lock_bh(&reuseport_lock); | 38 | spin_lock_bh(&reuseport_lock); |
39 | WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb, | 39 | |
40 | lockdep_is_held(&reuseport_lock)), | 40 | /* Allocation attempts can occur concurrently via the setsockopt path |
41 | "multiple allocations for the same socket"); | 41 | * and the bind/hash path. Nothing to do when we lose the race. |
42 | */ | ||
43 | if (rcu_dereference_protected(sk->sk_reuseport_cb, | ||
44 | lockdep_is_held(&reuseport_lock))) | ||
45 | goto out; | ||
46 | |||
42 | reuse = __reuseport_alloc(INIT_SOCKS); | 47 | reuse = __reuseport_alloc(INIT_SOCKS); |
43 | if (!reuse) { | 48 | if (!reuse) { |
44 | spin_unlock_bh(&reuseport_lock); | 49 | spin_unlock_bh(&reuseport_lock); |
@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk) | |||
49 | reuse->num_socks = 1; | 54 | reuse->num_socks = 1; |
50 | rcu_assign_pointer(sk->sk_reuseport_cb, reuse); | 55 | rcu_assign_pointer(sk->sk_reuseport_cb, reuse); |
51 | 56 | ||
57 | out: | ||
52 | spin_unlock_bh(&reuseport_lock); | 58 | spin_unlock_bh(&reuseport_lock); |
53 | 59 | ||
54 | return 0; | 60 | return 0; |