diff options
| author | Martin Townsend <mtownsend1973@gmail.com> | 2014-10-23 10:40:53 -0400 |
|---|---|---|
| committer | Marcel Holtmann <marcel@holtmann.org> | 2014-10-27 10:51:15 -0400 |
| commit | f8b361768ea2eaf9b21dfbe7388958ec31798c8b (patch) | |
| tree | e2b9ea93b72fbf33d6f9ee84a7fc63edfaba8c92 /net/ieee802154 | |
| parent | f81f466ca588a5bd868008154050305481f241d4 (diff) | |
6lowpan: remove skb_deliver from IPHC
Separating skb delivery from decompression ensures that we can support further
decompression schemes and removes the mixed return value of error codes with
NET_RX_FOO.
Signed-off-by: Martin Townsend <mtownsend1973@gmail.com>
Acked-by: Alexander Aring <alex.aring@gmail.com>
Acked-by: Jukka Rissanen <jukka.rissanen@linux.intel.com>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
Diffstat (limited to 'net/ieee802154')
| -rw-r--r-- | net/ieee802154/6lowpan_rtnl.c | 41 |
1 files changed, 26 insertions, 15 deletions
diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c index 1779a08d110a..15c7717c5e06 100644 --- a/net/ieee802154/6lowpan_rtnl.c +++ b/net/ieee802154/6lowpan_rtnl.c | |||
| @@ -141,20 +141,28 @@ static int lowpan_give_skb_to_devices(struct sk_buff *skb, | |||
| 141 | struct sk_buff *skb_cp; | 141 | struct sk_buff *skb_cp; |
| 142 | int stat = NET_RX_SUCCESS; | 142 | int stat = NET_RX_SUCCESS; |
| 143 | 143 | ||
| 144 | skb->protocol = htons(ETH_P_IPV6); | ||
| 145 | skb->pkt_type = PACKET_HOST; | ||
| 146 | |||
| 144 | rcu_read_lock(); | 147 | rcu_read_lock(); |
| 145 | list_for_each_entry_rcu(entry, &lowpan_devices, list) | 148 | list_for_each_entry_rcu(entry, &lowpan_devices, list) |
| 146 | if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) { | 149 | if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) { |
| 147 | skb_cp = skb_copy(skb, GFP_ATOMIC); | 150 | skb_cp = skb_copy(skb, GFP_ATOMIC); |
| 148 | if (!skb_cp) { | 151 | if (!skb_cp) { |
| 149 | stat = -ENOMEM; | 152 | kfree_skb(skb); |
| 150 | break; | 153 | rcu_read_unlock(); |
| 154 | return NET_RX_DROP; | ||
| 151 | } | 155 | } |
| 152 | 156 | ||
| 153 | skb_cp->dev = entry->ldev; | 157 | skb_cp->dev = entry->ldev; |
| 154 | stat = netif_rx(skb_cp); | 158 | stat = netif_rx(skb_cp); |
| 159 | if (stat == NET_RX_DROP) | ||
| 160 | break; | ||
| 155 | } | 161 | } |
| 156 | rcu_read_unlock(); | 162 | rcu_read_unlock(); |
| 157 | 163 | ||
| 164 | consume_skb(skb); | ||
| 165 | |||
| 158 | return stat; | 166 | return stat; |
| 159 | } | 167 | } |
| 160 | 168 | ||
| @@ -190,8 +198,7 @@ static int process_data(struct sk_buff *skb, const struct ieee802154_hdr *hdr) | |||
| 190 | 198 | ||
| 191 | return lowpan_process_data(skb, skb->dev, sap, sa.addr_type, | 199 | return lowpan_process_data(skb, skb->dev, sap, sa.addr_type, |
| 192 | IEEE802154_ADDR_LEN, dap, da.addr_type, | 200 | IEEE802154_ADDR_LEN, dap, da.addr_type, |
| 193 | IEEE802154_ADDR_LEN, iphc0, iphc1, | 201 | IEEE802154_ADDR_LEN, iphc0, iphc1); |
| 194 | lowpan_give_skb_to_devices); | ||
| 195 | 202 | ||
| 196 | drop: | 203 | drop: |
| 197 | kfree_skb(skb); | 204 | kfree_skb(skb); |
| @@ -528,44 +535,48 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 528 | 535 | ||
| 529 | /* check that it's our buffer */ | 536 | /* check that it's our buffer */ |
| 530 | if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { | 537 | if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { |
| 531 | skb->protocol = htons(ETH_P_IPV6); | ||
| 532 | skb->pkt_type = PACKET_HOST; | ||
| 533 | |||
| 534 | /* Pull off the 1-byte of 6lowpan header. */ | 538 | /* Pull off the 1-byte of 6lowpan header. */ |
| 535 | skb_pull(skb, 1); | 539 | skb_pull(skb, 1); |
| 536 | 540 | return lowpan_give_skb_to_devices(skb, NULL); | |
| 537 | ret = lowpan_give_skb_to_devices(skb, NULL); | ||
| 538 | if (ret == NET_RX_DROP) | ||
| 539 | goto drop; | ||
| 540 | } else { | 541 | } else { |
| 541 | switch (skb->data[0] & 0xe0) { | 542 | switch (skb->data[0] & 0xe0) { |
| 542 | case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ | 543 | case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ |
| 543 | ret = process_data(skb, &hdr); | 544 | ret = process_data(skb, &hdr); |
| 544 | if (ret == NET_RX_DROP) | 545 | if (ret == NET_RX_DROP) |
| 545 | goto drop; | 546 | goto drop; |
| 546 | break; | 547 | |
| 548 | return lowpan_give_skb_to_devices(skb, NULL); | ||
| 547 | case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ | 549 | case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ |
| 548 | ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1); | 550 | ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1); |
| 549 | if (ret == 1) { | 551 | if (ret == 1) { |
| 550 | ret = process_data(skb, &hdr); | 552 | ret = process_data(skb, &hdr); |
| 551 | if (ret == NET_RX_DROP) | 553 | if (ret == NET_RX_DROP) |
| 552 | goto drop; | 554 | goto drop; |
| 555 | |||
| 556 | return lowpan_give_skb_to_devices(skb, NULL); | ||
| 557 | } else if (ret == -1) { | ||
| 558 | return NET_RX_DROP; | ||
| 559 | } else { | ||
| 560 | return NET_RX_SUCCESS; | ||
| 553 | } | 561 | } |
| 554 | break; | ||
| 555 | case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ | 562 | case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ |
| 556 | ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN); | 563 | ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN); |
| 557 | if (ret == 1) { | 564 | if (ret == 1) { |
| 558 | ret = process_data(skb, &hdr); | 565 | ret = process_data(skb, &hdr); |
| 559 | if (ret == NET_RX_DROP) | 566 | if (ret == NET_RX_DROP) |
| 560 | goto drop; | 567 | goto drop; |
| 568 | |||
| 569 | return lowpan_give_skb_to_devices(skb, NULL); | ||
| 570 | } else if (ret == -1) { | ||
| 571 | return NET_RX_DROP; | ||
| 572 | } else { | ||
| 573 | return NET_RX_SUCCESS; | ||
| 561 | } | 574 | } |
| 562 | break; | ||
| 563 | default: | 575 | default: |
| 564 | break; | 576 | break; |
| 565 | } | 577 | } |
| 566 | } | 578 | } |
| 567 | 579 | ||
| 568 | return NET_RX_SUCCESS; | ||
| 569 | drop_skb: | 580 | drop_skb: |
| 570 | kfree_skb(skb); | 581 | kfree_skb(skb); |
| 571 | drop: | 582 | drop: |
