diff options
author | Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de> | 2014-03-17 13:30:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-18 15:59:25 -0400 |
commit | 8cfad496c4257441710735ccef622f3829870164 (patch) | |
tree | e5574aa1fac1cac858f94fffb571516bc35809dc /net/ieee802154 | |
parent | 3c5dfeff932224d3c97cee9fd0d1e2876d700ad3 (diff) |
ieee802154: properly unshare skbs in ieee802154 *_rcv functions
ieee802154 sockets do not properly unshare received skbs, which leads to
panics (at least) when they are used in conjunction with 6lowpan, so
run skb_share_check on received skbs.
6lowpan also contains a use-after-free, which is trivially fixed by
replacing the inlined skb_share_check with the explicit call.
Signed-off-by: Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
Tested-by: Alexander Aring <alex.aring@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ieee802154')
-rw-r--r-- | net/ieee802154/6lowpan_rtnl.c | 29 | ||||
-rw-r--r-- | net/ieee802154/dgram.c | 4 | ||||
-rw-r--r-- | net/ieee802154/raw.c | 4 |
3 files changed, 21 insertions, 16 deletions
diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c index 606039442a59..0f5a69ed746d 100644 --- a/net/ieee802154/6lowpan_rtnl.c +++ b/net/ieee802154/6lowpan_rtnl.c | |||
@@ -447,10 +447,13 @@ static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
447 | static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, | 447 | static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, |
448 | struct packet_type *pt, struct net_device *orig_dev) | 448 | struct packet_type *pt, struct net_device *orig_dev) |
449 | { | 449 | { |
450 | struct sk_buff *local_skb; | ||
451 | struct ieee802154_hdr hdr; | 450 | struct ieee802154_hdr hdr; |
452 | int ret; | 451 | int ret; |
453 | 452 | ||
453 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
454 | if (!skb) | ||
455 | goto drop; | ||
456 | |||
454 | if (!netif_running(dev)) | 457 | if (!netif_running(dev)) |
455 | goto drop_skb; | 458 | goto drop_skb; |
456 | 459 | ||
@@ -460,42 +463,36 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, | |||
460 | if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) | 463 | if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) |
461 | goto drop_skb; | 464 | goto drop_skb; |
462 | 465 | ||
463 | local_skb = skb_clone(skb, GFP_ATOMIC); | ||
464 | if (!local_skb) | ||
465 | goto drop_skb; | ||
466 | |||
467 | kfree_skb(skb); | ||
468 | |||
469 | /* check that it's our buffer */ | 466 | /* check that it's our buffer */ |
470 | if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { | 467 | if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { |
471 | local_skb->protocol = htons(ETH_P_IPV6); | 468 | skb->protocol = htons(ETH_P_IPV6); |
472 | local_skb->pkt_type = PACKET_HOST; | 469 | skb->pkt_type = PACKET_HOST; |
473 | 470 | ||
474 | /* Pull off the 1-byte of 6lowpan header. */ | 471 | /* Pull off the 1-byte of 6lowpan header. */ |
475 | skb_pull(local_skb, 1); | 472 | skb_pull(skb, 1); |
476 | 473 | ||
477 | ret = lowpan_give_skb_to_devices(local_skb, NULL); | 474 | ret = lowpan_give_skb_to_devices(skb, NULL); |
478 | if (ret == NET_RX_DROP) | 475 | if (ret == NET_RX_DROP) |
479 | goto drop; | 476 | goto drop; |
480 | } else { | 477 | } else { |
481 | switch (skb->data[0] & 0xe0) { | 478 | switch (skb->data[0] & 0xe0) { |
482 | case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ | 479 | case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ |
483 | ret = process_data(local_skb, &hdr); | 480 | ret = process_data(skb, &hdr); |
484 | if (ret == NET_RX_DROP) | 481 | if (ret == NET_RX_DROP) |
485 | goto drop; | 482 | goto drop; |
486 | break; | 483 | break; |
487 | case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ | 484 | case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ |
488 | ret = lowpan_frag_rcv(local_skb, LOWPAN_DISPATCH_FRAG1); | 485 | ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1); |
489 | if (ret == 1) { | 486 | if (ret == 1) { |
490 | ret = process_data(local_skb, &hdr); | 487 | ret = process_data(skb, &hdr); |
491 | if (ret == NET_RX_DROP) | 488 | if (ret == NET_RX_DROP) |
492 | goto drop; | 489 | goto drop; |
493 | } | 490 | } |
494 | break; | 491 | break; |
495 | case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ | 492 | case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ |
496 | ret = lowpan_frag_rcv(local_skb, LOWPAN_DISPATCH_FRAGN); | 493 | ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN); |
497 | if (ret == 1) { | 494 | if (ret == 1) { |
498 | ret = process_data(local_skb, &hdr); | 495 | ret = process_data(skb, &hdr); |
499 | if (ret == NET_RX_DROP) | 496 | if (ret == NET_RX_DROP) |
500 | goto drop; | 497 | goto drop; |
501 | } | 498 | } |
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index 4c47154041b0..6d251a35bdc4 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c | |||
@@ -329,6 +329,10 @@ out: | |||
329 | 329 | ||
330 | static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb) | 330 | static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb) |
331 | { | 331 | { |
332 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
333 | if (!skb) | ||
334 | return NET_RX_DROP; | ||
335 | |||
332 | if (sock_queue_rcv_skb(sk, skb) < 0) { | 336 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
333 | kfree_skb(skb); | 337 | kfree_skb(skb); |
334 | return NET_RX_DROP; | 338 | return NET_RX_DROP; |
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c index e5258cf6773b..74d54fae33d7 100644 --- a/net/ieee802154/raw.c +++ b/net/ieee802154/raw.c | |||
@@ -213,6 +213,10 @@ out: | |||
213 | 213 | ||
214 | static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) | 214 | static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) |
215 | { | 215 | { |
216 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
217 | if (!skb) | ||
218 | return NET_RX_DROP; | ||
219 | |||
216 | if (sock_queue_rcv_skb(sk, skb) < 0) { | 220 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
217 | kfree_skb(skb); | 221 | kfree_skb(skb); |
218 | return NET_RX_DROP; | 222 | return NET_RX_DROP; |