aboutsummaryrefslogtreecommitdiffstats
path: root/net/ieee802154
diff options
context:
space:
mode:
authoralex.bluesman.smirnov@gmail.com <alex.bluesman.smirnov@gmail.com>2011-11-10 02:38:38 -0500
committerDavid S. Miller <davem@davemloft.net>2011-11-14 00:19:42 -0500
commit719269afbc69ab96339aad6c2d3b32f7d8311146 (patch)
tree26a25b23d982a8bccaa807ac813e3b387d133697 /net/ieee802154
parent2a24444f8f2bea694003e3eac5c2f8d9a386bdc5 (diff)
6LoWPAN: add fragmentation support
This patch adds support for frame fragmentation. Signed-off-by: Alexander Smirnov <alex.bluesman.smirnov@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ieee802154')
-rw-r--r--net/ieee802154/6lowpan.c260
-rw-r--r--net/ieee802154/6lowpan.h18
2 files changed, 274 insertions, 4 deletions
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 19d6aefe97d4..7d4cb58bbddc 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -113,6 +113,20 @@ struct lowpan_dev_record {
113 struct list_head list; 113 struct list_head list;
114}; 114};
115 115
116struct lowpan_fragment {
117 struct sk_buff *skb; /* skb to be assembled */
118 spinlock_t lock; /* concurency lock */
119 u16 length; /* length to be assemled */
120 u32 bytes_rcv; /* bytes received */
121 u16 tag; /* current fragment tag */
122 struct timer_list timer; /* assembling timer */
123 struct list_head list; /* fragments list */
124};
125
126static unsigned short fragment_tag;
127static LIST_HEAD(lowpan_fragments);
128spinlock_t flist_lock;
129
116static inline struct 130static inline struct
117lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) 131lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
118{ 132{
@@ -244,6 +258,17 @@ static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
244 return ret; 258 return ret;
245} 259}
246 260
261static u16 lowpan_fetch_skb_u16(struct sk_buff *skb)
262{
263 u16 ret;
264
265 BUG_ON(!pskb_may_pull(skb, 2));
266
267 ret = skb->data[0] | (skb->data[1] << 8);
268 skb_pull(skb, 2);
269 return ret;
270}
271
247static int lowpan_header_create(struct sk_buff *skb, 272static int lowpan_header_create(struct sk_buff *skb,
248 struct net_device *dev, 273 struct net_device *dev,
249 unsigned short type, const void *_daddr, 274 unsigned short type, const void *_daddr,
@@ -467,6 +492,7 @@ static int lowpan_header_create(struct sk_buff *skb,
467 memcpy(&(sa.hwaddr), saddr, 8); 492 memcpy(&(sa.hwaddr), saddr, 8);
468 493
469 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; 494 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
495
470 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, 496 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
471 type, (void *)&da, (void *)&sa, skb->len); 497 type, (void *)&da, (void *)&sa, skb->len);
472 } 498 }
@@ -511,6 +537,21 @@ static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
511 return stat; 537 return stat;
512} 538}
513 539
540static void lowpan_fragment_timer_expired(unsigned long entry_addr)
541{
542 struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
543
544 pr_debug("%s: timer expired for frame with tag %d\n", __func__,
545 entry->tag);
546
547 spin_lock(&flist_lock);
548 list_del(&entry->list);
549 spin_unlock(&flist_lock);
550
551 dev_kfree_skb(entry->skb);
552 kfree(entry);
553}
554
514static int 555static int
515lowpan_process_data(struct sk_buff *skb) 556lowpan_process_data(struct sk_buff *skb)
516{ 557{
@@ -525,6 +566,107 @@ lowpan_process_data(struct sk_buff *skb)
525 if (skb->len < 2) 566 if (skb->len < 2)
526 goto drop; 567 goto drop;
527 iphc0 = lowpan_fetch_skb_u8(skb); 568 iphc0 = lowpan_fetch_skb_u8(skb);
569
570 /* fragments assembling */
571 switch (iphc0 & LOWPAN_DISPATCH_MASK) {
572 case LOWPAN_DISPATCH_FRAG1:
573 case LOWPAN_DISPATCH_FRAGN:
574 {
575 struct lowpan_fragment *frame;
576 u8 len, offset;
577 u16 tag;
578 bool found = false;
579
580 len = lowpan_fetch_skb_u8(skb); /* frame length */
581 tag = lowpan_fetch_skb_u16(skb);
582
583 /*
584 * check if frame assembling with the same tag is
585 * already in progress
586 */
587 spin_lock(&flist_lock);
588
589 list_for_each_entry(frame, &lowpan_fragments, list)
590 if (frame->tag == tag) {
591 found = true;
592 break;
593 }
594
595 /* alloc new frame structure */
596 if (!found) {
597 frame = kzalloc(sizeof(struct lowpan_fragment),
598 GFP_ATOMIC);
599 if (!frame)
600 goto unlock_and_drop;
601
602 INIT_LIST_HEAD(&frame->list);
603
604 frame->length = (iphc0 & 7) | (len << 3);
605 frame->tag = tag;
606
607 /* allocate buffer for frame assembling */
608 frame->skb = alloc_skb(frame->length +
609 sizeof(struct ipv6hdr), GFP_ATOMIC);
610
611 if (!frame->skb) {
612 kfree(frame);
613 goto unlock_and_drop;
614 }
615
616 frame->skb->priority = skb->priority;
617 frame->skb->dev = skb->dev;
618
619 /* reserve headroom for uncompressed ipv6 header */
620 skb_reserve(frame->skb, sizeof(struct ipv6hdr));
621 skb_put(frame->skb, frame->length);
622
623 init_timer(&frame->timer);
624 /* time out is the same as for ipv6 - 60 sec */
625 frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
626 frame->timer.data = (unsigned long)frame;
627 frame->timer.function = lowpan_fragment_timer_expired;
628
629 add_timer(&frame->timer);
630
631 list_add_tail(&frame->list, &lowpan_fragments);
632 }
633
634 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
635 goto unlock_and_drop;
636
637 offset = lowpan_fetch_skb_u8(skb); /* fetch offset */
638
639 /* if payload fits buffer, copy it */
640 if (likely((offset * 8 + skb->len) <= frame->length))
641 skb_copy_to_linear_data_offset(frame->skb, offset * 8,
642 skb->data, skb->len);
643 else
644 goto unlock_and_drop;
645
646 frame->bytes_rcv += skb->len;
647
648 /* frame assembling complete */
649 if ((frame->bytes_rcv == frame->length) &&
650 frame->timer.expires > jiffies) {
651 /* if timer haven't expired - first of all delete it */
652 del_timer(&frame->timer);
653 list_del(&frame->list);
654 spin_unlock(&flist_lock);
655
656 dev_kfree_skb(skb);
657 skb = frame->skb;
658 kfree(frame);
659 iphc0 = lowpan_fetch_skb_u8(skb);
660 break;
661 }
662 spin_unlock(&flist_lock);
663
664 return kfree_skb(skb), 0;
665 }
666 default:
667 break;
668 }
669
528 iphc1 = lowpan_fetch_skb_u8(skb); 670 iphc1 = lowpan_fetch_skb_u8(skb);
529 671
530 _saddr = mac_cb(skb)->sa.hwaddr; 672 _saddr = mac_cb(skb)->sa.hwaddr;
@@ -674,6 +816,9 @@ lowpan_process_data(struct sk_buff *skb)
674 lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, 816 lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
675 sizeof(hdr)); 817 sizeof(hdr));
676 return lowpan_skb_deliver(skb, &hdr); 818 return lowpan_skb_deliver(skb, &hdr);
819
820unlock_and_drop:
821 spin_unlock(&flist_lock);
677drop: 822drop:
678 kfree_skb(skb); 823 kfree_skb(skb);
679 return -EINVAL; 824 return -EINVAL;
@@ -692,18 +837,118 @@ static int lowpan_set_address(struct net_device *dev, void *p)
692 return 0; 837 return 0;
693} 838}
694 839
840static int lowpan_get_mac_header_length(struct sk_buff *skb)
841{
842 /*
843 * Currently long addressing mode is supported only, so the overall
844 * header size is 21:
845 * FC SeqNum DPAN DA SA Sec
846 * 2 + 1 + 2 + 8 + 8 + 0 = 21
847 */
848 return 21;
849}
850
851static int
852lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
853 int mlen, int plen, int offset)
854{
855 struct sk_buff *frag;
856 int hlen, ret;
857
858 /* if payload length is zero, therefore it's a first fragment */
859 hlen = (plen == 0 ? LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE);
860
861 lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
862
863 frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE);
864 if (!frag)
865 return -ENOMEM;
866
867 frag->priority = skb->priority;
868 frag->dev = skb->dev;
869
870 /* copy header, MFR and payload */
871 memcpy(skb_put(frag, mlen), skb->data, mlen);
872 memcpy(skb_put(frag, hlen), head, hlen);
873
874 if (plen)
875 skb_copy_from_linear_data_offset(skb, offset + mlen,
876 skb_put(frag, plen), plen);
877
878 lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data,
879 frag->len);
880
881 ret = dev_queue_xmit(frag);
882
883 if (ret < 0)
884 dev_kfree_skb(frag);
885
886 return ret;
887}
888
889static int
890lowpan_skb_fragmentation(struct sk_buff *skb)
891{
892 int err, header_length, payload_length, tag, offset = 0;
893 u8 head[5];
894
895 header_length = lowpan_get_mac_header_length(skb);
896 payload_length = skb->len - header_length;
897 tag = fragment_tag++;
898
899 /* first fragment header */
900 head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7);
901 head[1] = (payload_length >> 3) & 0xff;
902 head[2] = tag & 0xff;
903 head[3] = tag >> 8;
904
905 err = lowpan_fragment_xmit(skb, head, header_length, 0, 0);
906
907 /* next fragment header */
908 head[0] &= ~LOWPAN_DISPATCH_FRAG1;
909 head[0] |= LOWPAN_DISPATCH_FRAGN;
910
911 while ((payload_length - offset > 0) && (err >= 0)) {
912 int len = LOWPAN_FRAG_SIZE;
913
914 head[4] = offset / 8;
915
916 if (payload_length - offset < len)
917 len = payload_length - offset;
918
919 err = lowpan_fragment_xmit(skb, head, header_length,
920 len, offset);
921 offset += len;
922 }
923
924 return err;
925}
926
695static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) 927static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
696{ 928{
697 int err = 0; 929 int err = -1;
698 930
699 pr_debug("(%s): package xmit\n", __func__); 931 pr_debug("(%s): package xmit\n", __func__);
700 932
701 skb->dev = lowpan_dev_info(dev)->real_dev; 933 skb->dev = lowpan_dev_info(dev)->real_dev;
702 if (skb->dev == NULL) { 934 if (skb->dev == NULL) {
703 pr_debug("(%s) ERROR: no real wpan device found\n", __func__); 935 pr_debug("(%s) ERROR: no real wpan device found\n", __func__);
704 dev_kfree_skb(skb); 936 goto error;
705 } else 937 }
938
939 if (skb->len <= IEEE802154_MTU) {
706 err = dev_queue_xmit(skb); 940 err = dev_queue_xmit(skb);
941 goto out;
942 }
943
944 pr_debug("(%s): frame is too big, fragmentation is needed\n",
945 __func__);
946 err = lowpan_skb_fragmentation(skb);
947error:
948 dev_kfree_skb(skb);
949out:
950 if (err < 0)
951 pr_debug("(%s): ERROR: xmit failed\n", __func__);
707 952
708 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK); 953 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
709} 954}
@@ -765,8 +1010,15 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
765 goto drop; 1010 goto drop;
766 1011
767 /* check that it's our buffer */ 1012 /* check that it's our buffer */
768 if ((skb->data[0] & 0xe0) == 0x60) 1013 switch (skb->data[0] & 0xe0) {
1014 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
1015 case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
1016 case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
769 lowpan_process_data(skb); 1017 lowpan_process_data(skb);
1018 break;
1019 default:
1020 break;
1021 }
770 1022
771 return NET_RX_SUCCESS; 1023 return NET_RX_SUCCESS;
772 1024
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index 5d8cf80b930d..5d2e5a03742f 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -159,6 +159,24 @@
159#define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */ 159#define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */
160#define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */ 160#define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */
161 161
162#define LOWPAN_DISPATCH_MASK 0xf8 /* 11111000 */
163
164#define LOWPAN_FRAG_TIMEOUT (HZ * 60) /* time-out 60 sec */
165
166#define LOWPAN_FRAG1_HEAD_SIZE 0x4
167#define LOWPAN_FRAGN_HEAD_SIZE 0x5
168
169/*
170 * According IEEE802.15.4 standard:
171 * - MTU is 127 octets
172 * - maximum MHR size is 37 octets
173 * - MFR size is 2 octets
174 *
175 * so minimal payload size that we may guarantee is:
176 * MTU - MHR - MFR = 88 octets
177 */
178#define LOWPAN_FRAG_SIZE 88
179
162/* 180/*
163 * Values of fields within the IPHC encoding first byte 181 * Values of fields within the IPHC encoding first byte
164 * (C stands for compressed and I for inline) 182 * (C stands for compressed and I for inline)