aboutsummaryrefslogtreecommitdiffstats
path: root/net/packet
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-04-11 16:15:36 -0400
committerDavid S. Miller <davem@davemloft.net>2014-04-11 16:15:36 -0400
commit676d23690fb62b5d51ba5d659935e9f7d9da9f8e (patch)
treef6fbceee43e05c724868153ca37b702fb5e43b8c /net/packet
parentad20d5f673898578f9d8a156d7a4c921f5ca4584 (diff)
net: Fix use after free by removing length arg from sk_data_ready callbacks.
Several spots in the kernel perform a sequence like: skb_queue_tail(&sk->s_receive_queue, skb); sk->sk_data_ready(sk, skb->len); But at the moment we place the SKB onto the socket receive queue it can be consumed and freed up. So this skb->len access is potentially to freed up memory. Furthermore, the skb->len can be modified by the consumer so it is possible that the value isn't accurate. And finally, no actual implementation of this callback actually uses the length argument. And since nobody actually cared about it's value, lots of call sites pass arbitrary values in such as '0' and even '1'. So just remove the length argument from the callback, that way there is no confusion whatsoever and all of these use-after-free cases get fixed as a side effect. Based upon a patch by Eric Dumazet and his suggestion to audit this issue tree-wide. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/packet')
-rw-r--r--net/packet/af_packet.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 72e0c71fb01d..b85c67ccb797 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1848,7 +1848,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1848 skb->dropcount = atomic_read(&sk->sk_drops); 1848 skb->dropcount = atomic_read(&sk->sk_drops);
1849 __skb_queue_tail(&sk->sk_receive_queue, skb); 1849 __skb_queue_tail(&sk->sk_receive_queue, skb);
1850 spin_unlock(&sk->sk_receive_queue.lock); 1850 spin_unlock(&sk->sk_receive_queue.lock);
1851 sk->sk_data_ready(sk, skb->len); 1851 sk->sk_data_ready(sk);
1852 return 0; 1852 return 0;
1853 1853
1854drop_n_acct: 1854drop_n_acct:
@@ -2054,7 +2054,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2054 else 2054 else
2055 prb_clear_blk_fill_status(&po->rx_ring); 2055 prb_clear_blk_fill_status(&po->rx_ring);
2056 2056
2057 sk->sk_data_ready(sk, 0); 2057 sk->sk_data_ready(sk);
2058 2058
2059drop_n_restore: 2059drop_n_restore:
2060 if (skb_head != skb->data && skb_shared(skb)) { 2060 if (skb_head != skb->data && skb_shared(skb)) {
@@ -2069,7 +2069,7 @@ ring_is_full:
2069 po->stats.stats1.tp_drops++; 2069 po->stats.stats1.tp_drops++;
2070 spin_unlock(&sk->sk_receive_queue.lock); 2070 spin_unlock(&sk->sk_receive_queue.lock);
2071 2071
2072 sk->sk_data_ready(sk, 0); 2072 sk->sk_data_ready(sk);
2073 kfree_skb(copy_skb); 2073 kfree_skb(copy_skb);
2074 goto drop_n_restore; 2074 goto drop_n_restore;
2075} 2075}