aboutsummaryrefslogtreecommitdiffstats
path: root/net/ieee802154/6lowpan.c
diff options
context:
space:
mode:
authoralex.bluesman.smirnov@gmail.com <alex.bluesman.smirnov@gmail.com>2012-07-10 17:22:48 -0400
committerDavid S. Miller <davem@davemloft.net>2012-07-12 10:54:46 -0400
commit33c34c5e9310622d5ed9a53d571f92824044020f (patch)
tree46be5ba0abb2f892ee99d82eb2d7268355a99688 /net/ieee802154/6lowpan.c
parentabbee2effcbce55440accb0a1dd315562875efa2 (diff)
6lowpan: rework fragment-deleting routine
6lowpan module starts collecting incomming frames and fragments right after lowpan_module_init() therefor it will be better to clean unfinished fragments in lowpan_cleanup_module() function instead of doing it when link goes down. Changed spinlocks type to prevent deadlock with expired timer event and removed unused one. Signed-off-by: Alexander Smirnov <alex.bluesman.smirnov@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ieee802154/6lowpan.c')
-rw-r--r--net/ieee802154/6lowpan.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 2e790fbe848d..6871ec1b30f8 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -113,7 +113,6 @@ struct lowpan_dev_record {
113 113
114struct lowpan_fragment { 114struct lowpan_fragment {
115 struct sk_buff *skb; /* skb to be assembled */ 115 struct sk_buff *skb; /* skb to be assembled */
116 spinlock_t lock; /* concurency lock */
117 u16 length; /* length to be assemled */ 116 u16 length; /* length to be assemled */
118 u32 bytes_rcv; /* bytes received */ 117 u32 bytes_rcv; /* bytes received */
119 u16 tag; /* current fragment tag */ 118 u16 tag; /* current fragment tag */
@@ -637,10 +636,7 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr)
637 636
638 pr_debug("timer expired for frame with tag %d\n", entry->tag); 637 pr_debug("timer expired for frame with tag %d\n", entry->tag);
639 638
640 spin_lock(&flist_lock);
641 list_del(&entry->list); 639 list_del(&entry->list);
642 spin_unlock(&flist_lock);
643
644 dev_kfree_skb(entry->skb); 640 dev_kfree_skb(entry->skb);
645 kfree(entry); 641 kfree(entry);
646} 642}
@@ -727,7 +723,7 @@ lowpan_process_data(struct sk_buff *skb)
727 * check if frame assembling with the same tag is 723 * check if frame assembling with the same tag is
728 * already in progress 724 * already in progress
729 */ 725 */
730 spin_lock(&flist_lock); 726 spin_lock_bh(&flist_lock);
731 727
732 list_for_each_entry(frame, &lowpan_fragments, list) 728 list_for_each_entry(frame, &lowpan_fragments, list)
733 if (frame->tag == tag) { 729 if (frame->tag == tag) {
@@ -761,9 +757,9 @@ lowpan_process_data(struct sk_buff *skb)
761 if ((frame->bytes_rcv == frame->length) && 757 if ((frame->bytes_rcv == frame->length) &&
762 frame->timer.expires > jiffies) { 758 frame->timer.expires > jiffies) {
763 /* if timer haven't expired - first of all delete it */ 759 /* if timer haven't expired - first of all delete it */
764 del_timer(&frame->timer); 760 del_timer_sync(&frame->timer);
765 list_del(&frame->list); 761 list_del(&frame->list);
766 spin_unlock(&flist_lock); 762 spin_unlock_bh(&flist_lock);
767 763
768 dev_kfree_skb(skb); 764 dev_kfree_skb(skb);
769 skb = frame->skb; 765 skb = frame->skb;
@@ -774,7 +770,7 @@ lowpan_process_data(struct sk_buff *skb)
774 770
775 break; 771 break;
776 } 772 }
777 spin_unlock(&flist_lock); 773 spin_unlock_bh(&flist_lock);
778 774
779 return kfree_skb(skb), 0; 775 return kfree_skb(skb), 0;
780 } 776 }
@@ -929,7 +925,7 @@ lowpan_process_data(struct sk_buff *skb)
929 return lowpan_skb_deliver(skb, &hdr); 925 return lowpan_skb_deliver(skb, &hdr);
930 926
931unlock_and_drop: 927unlock_and_drop:
932 spin_unlock(&flist_lock); 928 spin_unlock_bh(&flist_lock);
933drop: 929drop:
934 kfree_skb(skb); 930 kfree_skb(skb);
935 return -EINVAL; 931 return -EINVAL;
@@ -1196,19 +1192,9 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
1196 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); 1192 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
1197 struct net_device *real_dev = lowpan_dev->real_dev; 1193 struct net_device *real_dev = lowpan_dev->real_dev;
1198 struct lowpan_dev_record *entry, *tmp; 1194 struct lowpan_dev_record *entry, *tmp;
1199 struct lowpan_fragment *frame, *tframe;
1200 1195
1201 ASSERT_RTNL(); 1196 ASSERT_RTNL();
1202 1197
1203 spin_lock(&flist_lock);
1204 list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
1205 del_timer(&frame->timer);
1206 list_del(&frame->list);
1207 dev_kfree_skb(frame->skb);
1208 kfree(frame);
1209 }
1210 spin_unlock(&flist_lock);
1211
1212 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); 1198 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
1213 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { 1199 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
1214 if (entry->ldev == dev) { 1200 if (entry->ldev == dev) {
@@ -1264,9 +1250,24 @@ out:
1264 1250
1265static void __exit lowpan_cleanup_module(void) 1251static void __exit lowpan_cleanup_module(void)
1266{ 1252{
1253 struct lowpan_fragment *frame, *tframe;
1254
1267 lowpan_netlink_fini(); 1255 lowpan_netlink_fini();
1268 1256
1269 dev_remove_pack(&lowpan_packet_type); 1257 dev_remove_pack(&lowpan_packet_type);
1258
1259 /* Now 6lowpan packet_type is removed, so no new fragments are
1260 * expected on RX, therefore that's the time to clean incomplete
1261 * fragments.
1262 */
1263 spin_lock_bh(&flist_lock);
1264 list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
1265 del_timer_sync(&frame->timer);
1266 list_del(&frame->list);
1267 dev_kfree_skb(frame->skb);
1268 kfree(frame);
1269 }
1270 spin_unlock_bh(&flist_lock);
1270} 1271}
1271 1272
1272module_init(lowpan_init_module); 1273module_init(lowpan_init_module);