diff options
author | Dmitry Mishin <dim@openvz.org> | 2006-08-31 18:28:39 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-09-22 18:18:47 -0400 |
commit | fda9ef5d679b07c9d9097aaf6ef7f069d794a8f9 (patch) | |
tree | 6a265dc2038bc5568c5a499e6c8d4733650ed3f7 /net/core | |
parent | dc435e6dac1439340eaeceef84022c4e4749796d (diff) |
[NET]: Fix sk->sk_filter field access
Function sk_filter() is called from tcp_v{4,6}_rcv() functions with arg
needlock = 0, while socket is not locked at that moment. In order to avoid
this and similar issues in the future, use rcu for sk->sk_filter field read
protection.
Signed-off-by: Dmitry Mishin <dim@openvz.org>
Signed-off-by: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Signed-off-by: Kirill Korotaev <dev@openvz.org>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/filter.c | 8 | ||||
-rw-r--r-- | net/core/sock.c | 22 |
2 files changed, 13 insertions, 17 deletions
diff --git a/net/core/filter.c b/net/core/filter.c index 5b4486a60cf6..6732782a5a40 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -422,10 +422,10 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
422 | if (!err) { | 422 | if (!err) { |
423 | struct sk_filter *old_fp; | 423 | struct sk_filter *old_fp; |
424 | 424 | ||
425 | spin_lock_bh(&sk->sk_lock.slock); | 425 | rcu_read_lock_bh(); |
426 | old_fp = sk->sk_filter; | 426 | old_fp = rcu_dereference(sk->sk_filter); |
427 | sk->sk_filter = fp; | 427 | rcu_assign_pointer(sk->sk_filter, fp); |
428 | spin_unlock_bh(&sk->sk_lock.slock); | 428 | rcu_read_unlock_bh(); |
429 | fp = old_fp; | 429 | fp = old_fp; |
430 | } | 430 | } |
431 | 431 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index cfaf09039b02..b77e155cbe6c 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -247,11 +247,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
247 | goto out; | 247 | goto out; |
248 | } | 248 | } |
249 | 249 | ||
250 | /* It would be deadlock, if sock_queue_rcv_skb is used | 250 | err = sk_filter(sk, skb); |
251 | with socket lock! We assume that users of this | ||
252 | function are lock free. | ||
253 | */ | ||
254 | err = sk_filter(sk, skb, 1); | ||
255 | if (err) | 251 | if (err) |
256 | goto out; | 252 | goto out; |
257 | 253 | ||
@@ -278,7 +274,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb) | |||
278 | { | 274 | { |
279 | int rc = NET_RX_SUCCESS; | 275 | int rc = NET_RX_SUCCESS; |
280 | 276 | ||
281 | if (sk_filter(sk, skb, 0)) | 277 | if (sk_filter(sk, skb)) |
282 | goto discard_and_relse; | 278 | goto discard_and_relse; |
283 | 279 | ||
284 | skb->dev = NULL; | 280 | skb->dev = NULL; |
@@ -606,15 +602,15 @@ set_rcvbuf: | |||
606 | break; | 602 | break; |
607 | 603 | ||
608 | case SO_DETACH_FILTER: | 604 | case SO_DETACH_FILTER: |
609 | spin_lock_bh(&sk->sk_lock.slock); | 605 | rcu_read_lock_bh(); |
610 | filter = sk->sk_filter; | 606 | filter = rcu_dereference(sk->sk_filter); |
611 | if (filter) { | 607 | if (filter) { |
612 | sk->sk_filter = NULL; | 608 | rcu_assign_pointer(sk->sk_filter, NULL); |
613 | spin_unlock_bh(&sk->sk_lock.slock); | ||
614 | sk_filter_release(sk, filter); | 609 | sk_filter_release(sk, filter); |
610 | rcu_read_unlock_bh(); | ||
615 | break; | 611 | break; |
616 | } | 612 | } |
617 | spin_unlock_bh(&sk->sk_lock.slock); | 613 | rcu_read_unlock_bh(); |
618 | ret = -ENONET; | 614 | ret = -ENONET; |
619 | break; | 615 | break; |
620 | 616 | ||
@@ -884,10 +880,10 @@ void sk_free(struct sock *sk) | |||
884 | if (sk->sk_destruct) | 880 | if (sk->sk_destruct) |
885 | sk->sk_destruct(sk); | 881 | sk->sk_destruct(sk); |
886 | 882 | ||
887 | filter = sk->sk_filter; | 883 | filter = rcu_dereference(sk->sk_filter); |
888 | if (filter) { | 884 | if (filter) { |
889 | sk_filter_release(sk, filter); | 885 | sk_filter_release(sk, filter); |
890 | sk->sk_filter = NULL; | 886 | rcu_assign_pointer(sk->sk_filter, NULL); |
891 | } | 887 | } |
892 | 888 | ||
893 | sock_disable_timestamp(sk); | 889 | sock_disable_timestamp(sk); |