aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2012-04-05 18:17:46 -0400
committerLuis Henriques <luis.henriques@canonical.com>2012-05-01 06:00:21 -0400
commit61b8c5f2b51df9a53c197dec8598458ec4c20d44 (patch)
treed538ca1bef0af37e76393cbe7fd93a84be973edf /net
parent97f8f5d03a24c4b982e2b5fe0d5ecba68b20dea3 (diff)
netlink: fix races after skb queueing
BugLink: http://bugs.launchpad.net/bugs/990544 [ Upstream commit 4a7e7c2ad540e54c75489a70137bf0ec15d3a127 ] As soon as an skb is queued into socket receive_queue, another thread can consume it, so we are not allowed to reference skb anymore, or risk use after free. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Tim Gardner <tim.gardner@canonical.com>
Diffstat (limited to 'net')
-rw-r--r--net/netlink/af_netlink.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 6ef64adf736..24bc620b539 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -830,12 +830,19 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
830 return 0; 830 return 0;
831} 831}
832 832
833int netlink_sendskb(struct sock *sk, struct sk_buff *skb) 833static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
834{ 834{
835 int len = skb->len; 835 int len = skb->len;
836 836
837 skb_queue_tail(&sk->sk_receive_queue, skb); 837 skb_queue_tail(&sk->sk_receive_queue, skb);
838 sk->sk_data_ready(sk, len); 838 sk->sk_data_ready(sk, len);
839 return len;
840}
841
842int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
843{
844 int len = __netlink_sendskb(sk, skb);
845
839 sock_put(sk); 846 sock_put(sk);
840 return len; 847 return len;
841} 848}
@@ -960,8 +967,7 @@ static inline int netlink_broadcast_deliver(struct sock *sk,
960 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 967 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
961 !test_bit(0, &nlk->state)) { 968 !test_bit(0, &nlk->state)) {
962 skb_set_owner_r(skb, sk); 969 skb_set_owner_r(skb, sk);
963 skb_queue_tail(&sk->sk_receive_queue, skb); 970 __netlink_sendskb(sk, skb);
964 sk->sk_data_ready(sk, skb->len);
965 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf; 971 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
966 } 972 }
967 return -1; 973 return -1;
@@ -1682,10 +1688,8 @@ static int netlink_dump(struct sock *sk)
1682 1688
1683 if (sk_filter(sk, skb)) 1689 if (sk_filter(sk, skb))
1684 kfree_skb(skb); 1690 kfree_skb(skb);
1685 else { 1691 else
1686 skb_queue_tail(&sk->sk_receive_queue, skb); 1692 __netlink_sendskb(sk, skb);
1687 sk->sk_data_ready(sk, skb->len);
1688 }
1689 return 0; 1693 return 0;
1690 } 1694 }
1691 1695
@@ -1697,10 +1701,8 @@ static int netlink_dump(struct sock *sk)
1697 1701
1698 if (sk_filter(sk, skb)) 1702 if (sk_filter(sk, skb))
1699 kfree_skb(skb); 1703 kfree_skb(skb);
1700 else { 1704 else
1701 skb_queue_tail(&sk->sk_receive_queue, skb); 1705 __netlink_sendskb(sk, skb);
1702 sk->sk_data_ready(sk, skb->len);
1703 }
1704 1706
1705 if (cb->done) 1707 if (cb->done)
1706 cb->done(cb); 1708 cb->done(cb);