aboutsummaryrefslogtreecommitdiffstats
path: root/net/packet
diff options
context:
space:
mode:
authorDmitry Mishin <dim@openvz.org>2006-08-31 18:28:39 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-09-22 18:18:47 -0400
commitfda9ef5d679b07c9d9097aaf6ef7f069d794a8f9 (patch)
tree6a265dc2038bc5568c5a499e6c8d4733650ed3f7 /net/packet
parentdc435e6dac1439340eaeceef84022c4e4749796d (diff)
[NET]: Fix sk->sk_filter field access
Function sk_filter() is called from tcp_v{4,6}_rcv() functions with arg needlock = 0, while socket is not locked at that moment. In order to avoid this and similar issues in the future, use rcu for sk->sk_filter field read protection. Signed-off-by: Dmitry Mishin <dim@openvz.org> Signed-off-by: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> Signed-off-by: Kirill Korotaev <dev@openvz.org>
Diffstat (limited to 'net/packet')
-rw-r--r--net/packet/af_packet.c43
1 files changed, 18 insertions, 25 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 300215bdbf46..f4ccb90e6739 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -427,21 +427,24 @@ out_unlock:
427} 427}
428#endif 428#endif
429 429
430static inline unsigned run_filter(struct sk_buff *skb, struct sock *sk, unsigned res) 430static inline int run_filter(struct sk_buff *skb, struct sock *sk,
431 unsigned *snaplen)
431{ 432{
432 struct sk_filter *filter; 433 struct sk_filter *filter;
434 int err = 0;
433 435
434 bh_lock_sock(sk); 436 rcu_read_lock_bh();
435 filter = sk->sk_filter; 437 filter = rcu_dereference(sk->sk_filter);
436 /* 438 if (filter != NULL) {
437 * Our caller already checked that filter != NULL but we need to 439 err = sk_run_filter(skb, filter->insns, filter->len);
438 * verify that under bh_lock_sock() to be safe 440 if (!err)
439 */ 441 err = -EPERM;
440 if (likely(filter != NULL)) 442 else if (*snaplen > err)
441 res = sk_run_filter(skb, filter->insns, filter->len); 443 *snaplen = err;
442 bh_unlock_sock(sk); 444 }
445 rcu_read_unlock_bh();
443 446
444 return res; 447 return err;
445} 448}
446 449
447/* 450/*
@@ -491,13 +494,8 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
491 494
492 snaplen = skb->len; 495 snaplen = skb->len;
493 496
494 if (sk->sk_filter) { 497 if (run_filter(skb, sk, &snaplen) < 0)
495 unsigned res = run_filter(skb, sk, snaplen); 498 goto drop_n_restore;
496 if (res == 0)
497 goto drop_n_restore;
498 if (snaplen > res)
499 snaplen = res;
500 }
501 499
502 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 500 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
503 (unsigned)sk->sk_rcvbuf) 501 (unsigned)sk->sk_rcvbuf)
@@ -593,13 +591,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
593 591
594 snaplen = skb->len; 592 snaplen = skb->len;
595 593
596 if (sk->sk_filter) { 594 if (run_filter(skb, sk, &snaplen) < 0)
597 unsigned res = run_filter(skb, sk, snaplen); 595 goto drop_n_restore;
598 if (res == 0)
599 goto drop_n_restore;
600 if (snaplen > res)
601 snaplen = res;
602 }
603 596
604 if (sk->sk_type == SOCK_DGRAM) { 597 if (sk->sk_type == SOCK_DGRAM) {
605 macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16; 598 macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;