aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/appletalk/ddp.c31
-rw-r--r--net/bluetooth/cmtp/capi.c2
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/core/datagram.c181
-rw-r--r--net/core/dev.c14
-rw-r--r--net/core/iovec.c4
-rw-r--r--net/core/neighbour.c46
-rw-r--r--net/core/pktgen.c6
-rw-r--r--net/core/skb_dma_map.c13
-rw-r--r--net/core/skbuff.c237
-rw-r--r--net/core/sock.c32
-rw-r--r--net/core/user_dma.c46
-rw-r--r--net/ieee802154/Kconfig12
-rw-r--r--net/ieee802154/Makefile5
-rw-r--r--net/ieee802154/af802154.h36
-rw-r--r--net/ieee802154/af_ieee802154.c372
-rw-r--r--net/ieee802154/dgram.c394
-rw-r--r--net/ieee802154/netlink.c523
-rw-r--r--net/ieee802154/nl_policy.c52
-rw-r--r--net/ieee802154/raw.c254
-rw-r--r--net/ipv4/ip_fragment.c4
-rw-r--r--net/ipv4/ip_output.c7
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_sctp.c5
-rw-r--r--net/ipv6/ip6_output.c7
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c4
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/mac80211/agg-tx.c6
-rw-r--r--net/mac80211/cfg.c4
-rw-r--r--net/mac80211/ieee80211_i.h5
-rw-r--r--net/mac80211/main.c61
-rw-r--r--net/mac80211/mlme.c57
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/rx.c27
-rw-r--r--net/mac80211/tx.c19
-rw-r--r--net/mac80211/util.c46
-rw-r--r--net/mac80211/wme.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/phonet/pep-gprs.c4
-rw-r--r--net/phonet/pep.c4
-rw-r--r--net/rfkill/Kconfig2
-rw-r--r--net/rfkill/core.c93
-rw-r--r--net/sched/sch_hfsc.c8
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sctp/ulpevent.c7
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c1
-rw-r--r--net/wireless/core.c19
-rw-r--r--net/wireless/reg.c7
-rw-r--r--net/xfrm/xfrm_algo.c41
53 files changed, 2189 insertions, 539 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 714e1c3536be..fe649081fbdc 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -758,7 +758,7 @@ static void __exit vlan_cleanup_module(void)
758 BUG_ON(!hlist_empty(&vlan_group_hash[i])); 758 BUG_ON(!hlist_empty(&vlan_group_hash[i]));
759 759
760 unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops); 760 unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops);
761 synchronize_net(); 761 rcu_barrier(); /* Wait for completion of call_rcu()'s */
762 762
763 vlan_gvrp_uninit(); 763 vlan_gvrp_uninit();
764} 764}
diff --git a/net/Kconfig b/net/Kconfig
index c19f549c8e74..7051b9710675 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -179,6 +179,7 @@ source "net/lapb/Kconfig"
179source "net/econet/Kconfig" 179source "net/econet/Kconfig"
180source "net/wanrouter/Kconfig" 180source "net/wanrouter/Kconfig"
181source "net/phonet/Kconfig" 181source "net/phonet/Kconfig"
182source "net/ieee802154/Kconfig"
182source "net/sched/Kconfig" 183source "net/sched/Kconfig"
183source "net/dcb/Kconfig" 184source "net/dcb/Kconfig"
184 185
diff --git a/net/Makefile b/net/Makefile
index 9e00a55a901b..ba324aefda73 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -60,6 +60,7 @@ obj-$(CONFIG_NET_9P) += 9p/
60ifneq ($(CONFIG_DCB),) 60ifneq ($(CONFIG_DCB),)
61obj-y += dcb/ 61obj-y += dcb/
62endif 62endif
63obj-y += ieee802154/
63 64
64ifeq ($(CONFIG_NET),y) 65ifeq ($(CONFIG_NET),y)
65obj-$(CONFIG_SYSCTL) += sysctl_net.o 66obj-$(CONFIG_SYSCTL) += sysctl_net.o
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index d6a9243641af..b603cbacdc58 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -939,6 +939,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
939 int len, unsigned long sum) 939 int len, unsigned long sum)
940{ 940{
941 int start = skb_headlen(skb); 941 int start = skb_headlen(skb);
942 struct sk_buff *frag_iter;
942 int i, copy; 943 int i, copy;
943 944
944 /* checksum stuff in header space */ 945 /* checksum stuff in header space */
@@ -977,26 +978,22 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
977 start = end; 978 start = end;
978 } 979 }
979 980
980 if (skb_shinfo(skb)->frag_list) { 981 skb_walk_frags(skb, frag_iter) {
981 struct sk_buff *list = skb_shinfo(skb)->frag_list; 982 int end;
982
983 for (; list; list = list->next) {
984 int end;
985 983
986 WARN_ON(start > offset + len); 984 WARN_ON(start > offset + len);
987 985
988 end = start + list->len; 986 end = start + frag_iter->len;
989 if ((copy = end - offset) > 0) { 987 if ((copy = end - offset) > 0) {
990 if (copy > len) 988 if (copy > len)
991 copy = len; 989 copy = len;
992 sum = atalk_sum_skb(list, offset - start, 990 sum = atalk_sum_skb(frag_iter, offset - start,
993 copy, sum); 991 copy, sum);
994 if ((len -= copy) == 0) 992 if ((len -= copy) == 0)
995 return sum; 993 return sum;
996 offset += copy; 994 offset += copy;
997 }
998 start = end;
999 } 995 }
996 start = end;
1000 } 997 }
1001 998
1002 BUG_ON(len > 0); 999 BUG_ON(len > 0);
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 78958c0f9a40..97f8d68d574d 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -382,7 +382,7 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl)
382 382
383 BT_DBG("ctrl %p", ctrl); 383 BT_DBG("ctrl %p", ctrl);
384 384
385 capi_ctr_reseted(ctrl); 385 capi_ctr_down(ctrl);
386 386
387 atomic_inc(&session->terminate); 387 atomic_inc(&session->terminate);
388 cmtp_schedule(session); 388 cmtp_schedule(session);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 10f0528c3bf5..e733725b11d4 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -903,6 +903,8 @@ static __exit void can_exit(void)
903 } 903 }
904 spin_unlock(&can_rcvlists_lock); 904 spin_unlock(&can_rcvlists_lock);
905 905
906 rcu_barrier(); /* Wait for completion of call_rcu()'s */
907
906 kmem_cache_destroy(rcv_cache); 908 kmem_cache_destroy(rcv_cache);
907} 909}
908 910
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e2a36f05cdf7..58abee1f1df1 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -282,6 +282,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
282{ 282{
283 int start = skb_headlen(skb); 283 int start = skb_headlen(skb);
284 int i, copy = start - offset; 284 int i, copy = start - offset;
285 struct sk_buff *frag_iter;
285 286
286 /* Copy header. */ 287 /* Copy header. */
287 if (copy > 0) { 288 if (copy > 0) {
@@ -322,28 +323,24 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
322 start = end; 323 start = end;
323 } 324 }
324 325
325 if (skb_shinfo(skb)->frag_list) { 326 skb_walk_frags(skb, frag_iter) {
326 struct sk_buff *list = skb_shinfo(skb)->frag_list; 327 int end;
327 328
328 for (; list; list = list->next) { 329 WARN_ON(start > offset + len);
329 int end; 330
330 331 end = start + frag_iter->len;
331 WARN_ON(start > offset + len); 332 if ((copy = end - offset) > 0) {
332 333 if (copy > len)
333 end = start + list->len; 334 copy = len;
334 if ((copy = end - offset) > 0) { 335 if (skb_copy_datagram_iovec(frag_iter,
335 if (copy > len) 336 offset - start,
336 copy = len; 337 to, copy))
337 if (skb_copy_datagram_iovec(list, 338 goto fault;
338 offset - start, 339 if ((len -= copy) == 0)
339 to, copy)) 340 return 0;
340 goto fault; 341 offset += copy;
341 if ((len -= copy) == 0)
342 return 0;
343 offset += copy;
344 }
345 start = end;
346 } 342 }
343 start = end;
347 } 344 }
348 if (!len) 345 if (!len)
349 return 0; 346 return 0;
@@ -369,6 +366,7 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
369{ 366{
370 int start = skb_headlen(skb); 367 int start = skb_headlen(skb);
371 int i, copy = start - offset; 368 int i, copy = start - offset;
369 struct sk_buff *frag_iter;
372 370
373 /* Copy header. */ 371 /* Copy header. */
374 if (copy > 0) { 372 if (copy > 0) {
@@ -411,30 +409,26 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
411 start = end; 409 start = end;
412 } 410 }
413 411
414 if (skb_shinfo(skb)->frag_list) { 412 skb_walk_frags(skb, frag_iter) {
415 struct sk_buff *list = skb_shinfo(skb)->frag_list; 413 int end;
416 414
417 for (; list; list = list->next) { 415 WARN_ON(start > offset + len);
418 int end; 416
419 417 end = start + frag_iter->len;
420 WARN_ON(start > offset + len); 418 if ((copy = end - offset) > 0) {
421 419 if (copy > len)
422 end = start + list->len; 420 copy = len;
423 if ((copy = end - offset) > 0) { 421 if (skb_copy_datagram_const_iovec(frag_iter,
424 if (copy > len) 422 offset - start,
425 copy = len; 423 to, to_offset,
426 if (skb_copy_datagram_const_iovec(list, 424 copy))
427 offset - start, 425 goto fault;
428 to, to_offset, 426 if ((len -= copy) == 0)
429 copy)) 427 return 0;
430 goto fault; 428 offset += copy;
431 if ((len -= copy) == 0) 429 to_offset += copy;
432 return 0;
433 offset += copy;
434 to_offset += copy;
435 }
436 start = end;
437 } 430 }
431 start = end;
438 } 432 }
439 if (!len) 433 if (!len)
440 return 0; 434 return 0;
@@ -461,12 +455,14 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
461{ 455{
462 int start = skb_headlen(skb); 456 int start = skb_headlen(skb);
463 int i, copy = start - offset; 457 int i, copy = start - offset;
458 struct sk_buff *frag_iter;
464 459
465 /* Copy header. */ 460 /* Copy header. */
466 if (copy > 0) { 461 if (copy > 0) {
467 if (copy > len) 462 if (copy > len)
468 copy = len; 463 copy = len;
469 if (memcpy_fromiovecend(skb->data + offset, from, 0, copy)) 464 if (memcpy_fromiovecend(skb->data + offset, from, from_offset,
465 copy))
470 goto fault; 466 goto fault;
471 if ((len -= copy) == 0) 467 if ((len -= copy) == 0)
472 return 0; 468 return 0;
@@ -505,31 +501,27 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
505 start = end; 501 start = end;
506 } 502 }
507 503
508 if (skb_shinfo(skb)->frag_list) { 504 skb_walk_frags(skb, frag_iter) {
509 struct sk_buff *list = skb_shinfo(skb)->frag_list; 505 int end;
510 506
511 for (; list; list = list->next) { 507 WARN_ON(start > offset + len);
512 int end; 508
513 509 end = start + frag_iter->len;
514 WARN_ON(start > offset + len); 510 if ((copy = end - offset) > 0) {
515 511 if (copy > len)
516 end = start + list->len; 512 copy = len;
517 if ((copy = end - offset) > 0) { 513 if (skb_copy_datagram_from_iovec(frag_iter,
518 if (copy > len) 514 offset - start,
519 copy = len; 515 from,
520 if (skb_copy_datagram_from_iovec(list, 516 from_offset,
521 offset - start, 517 copy))
522 from, 518 goto fault;
523 from_offset, 519 if ((len -= copy) == 0)
524 copy)) 520 return 0;
525 goto fault; 521 offset += copy;
526 if ((len -= copy) == 0) 522 from_offset += copy;
527 return 0;
528 offset += copy;
529 from_offset += copy;
530 }
531 start = end;
532 } 523 }
524 start = end;
533 } 525 }
534 if (!len) 526 if (!len)
535 return 0; 527 return 0;
@@ -544,8 +536,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
544 __wsum *csump) 536 __wsum *csump)
545{ 537{
546 int start = skb_headlen(skb); 538 int start = skb_headlen(skb);
547 int pos = 0;
548 int i, copy = start - offset; 539 int i, copy = start - offset;
540 struct sk_buff *frag_iter;
541 int pos = 0;
549 542
550 /* Copy header. */ 543 /* Copy header. */
551 if (copy > 0) { 544 if (copy > 0) {
@@ -596,33 +589,29 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
596 start = end; 589 start = end;
597 } 590 }
598 591
599 if (skb_shinfo(skb)->frag_list) { 592 skb_walk_frags(skb, frag_iter) {
600 struct sk_buff *list = skb_shinfo(skb)->frag_list; 593 int end;
601 594
602 for (; list; list=list->next) { 595 WARN_ON(start > offset + len);
603 int end; 596
604 597 end = start + frag_iter->len;
605 WARN_ON(start > offset + len); 598 if ((copy = end - offset) > 0) {
606 599 __wsum csum2 = 0;
607 end = start + list->len; 600 if (copy > len)
608 if ((copy = end - offset) > 0) { 601 copy = len;
609 __wsum csum2 = 0; 602 if (skb_copy_and_csum_datagram(frag_iter,
610 if (copy > len) 603 offset - start,
611 copy = len; 604 to, copy,
612 if (skb_copy_and_csum_datagram(list, 605 &csum2))
613 offset - start, 606 goto fault;
614 to, copy, 607 *csump = csum_block_add(*csump, csum2, pos);
615 &csum2)) 608 if ((len -= copy) == 0)
616 goto fault; 609 return 0;
617 *csump = csum_block_add(*csump, csum2, pos); 610 offset += copy;
618 if ((len -= copy) == 0) 611 to += copy;
619 return 0; 612 pos += copy;
620 offset += copy;
621 to += copy;
622 pos += copy;
623 }
624 start = end;
625 } 613 }
614 start = end;
626 } 615 }
627 if (!len) 616 if (!len)
628 return 0; 617 return 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index 1f38401fc028..11560e3258b5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -269,7 +269,8 @@ static const unsigned short netdev_lock_type[] =
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, 270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, 271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
272 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE}; 272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY,
273 ARPHRD_VOID, ARPHRD_NONE};
273 274
274static const char *netdev_lock_name[] = 275static const char *netdev_lock_name[] =
275 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
@@ -286,7 +287,8 @@ static const char *netdev_lock_name[] =
286 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
287 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", 288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
288 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", 289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
289 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"}; 290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY",
291 "_xmit_VOID", "_xmit_NONE"};
290 292
291static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
292static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 294static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -1820,7 +1822,7 @@ int dev_queue_xmit(struct sk_buff *skb)
1820 if (netif_needs_gso(dev, skb)) 1822 if (netif_needs_gso(dev, skb))
1821 goto gso; 1823 goto gso;
1822 1824
1823 if (skb_shinfo(skb)->frag_list && 1825 if (skb_has_frags(skb) &&
1824 !(dev->features & NETIF_F_FRAGLIST) && 1826 !(dev->features & NETIF_F_FRAGLIST) &&
1825 __skb_linearize(skb)) 1827 __skb_linearize(skb))
1826 goto out_kfree_skb; 1828 goto out_kfree_skb;
@@ -2407,7 +2409,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2407 if (!(skb->dev->features & NETIF_F_GRO)) 2409 if (!(skb->dev->features & NETIF_F_GRO))
2408 goto normal; 2410 goto normal;
2409 2411
2410 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list) 2412 if (skb_is_gso(skb) || skb_has_frags(skb))
2411 goto normal; 2413 goto normal;
2412 2414
2413 rcu_read_lock(); 2415 rcu_read_lock();
@@ -3655,8 +3657,8 @@ static int dev_addr_init(struct net_device *dev)
3655 /* rtnl_mutex must be held here */ 3657 /* rtnl_mutex must be held here */
3656 3658
3657 INIT_LIST_HEAD(&dev->dev_addr_list); 3659 INIT_LIST_HEAD(&dev->dev_addr_list);
3658 memset(addr, 0, sizeof(*addr)); 3660 memset(addr, 0, sizeof(addr));
3659 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(*addr), 3661 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(addr),
3660 NETDEV_HW_ADDR_T_LAN); 3662 NETDEV_HW_ADDR_T_LAN);
3661 if (!err) { 3663 if (!err) {
3662 /* 3664 /*
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 40a76ce19d9f..16ad45d4882b 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -112,9 +112,9 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
112 continue; 112 continue;
113 } 113 }
114 copy = min_t(unsigned int, iov->iov_len - offset, len); 114 copy = min_t(unsigned int, iov->iov_len - offset, len);
115 offset = 0; 115 if (copy_to_user(iov->iov_base + offset, kdata, copy))
116 if (copy_to_user(iov->iov_base, kdata, copy))
117 return -EFAULT; 116 return -EFAULT;
117 offset = 0;
118 kdata += copy; 118 kdata += copy;
119 len -= copy; 119 len -= copy;
120 } 120 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index c54229befcfe..163b4f5b0365 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -771,6 +771,28 @@ static __inline__ int neigh_max_probes(struct neighbour *n)
771 p->ucast_probes + p->app_probes + p->mcast_probes); 771 p->ucast_probes + p->app_probes + p->mcast_probes);
772} 772}
773 773
774static void neigh_invalidate(struct neighbour *neigh)
775{
776 struct sk_buff *skb;
777
778 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
779 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
780 neigh->updated = jiffies;
781
782 /* It is very thin place. report_unreachable is very complicated
783 routine. Particularly, it can hit the same neighbour entry!
784
785 So that, we try to be accurate and avoid dead loop. --ANK
786 */
787 while (neigh->nud_state == NUD_FAILED &&
788 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
789 write_unlock(&neigh->lock);
790 neigh->ops->error_report(neigh, skb);
791 write_lock(&neigh->lock);
792 }
793 skb_queue_purge(&neigh->arp_queue);
794}
795
774/* Called when a timer expires for a neighbour entry. */ 796/* Called when a timer expires for a neighbour entry. */
775 797
776static void neigh_timer_handler(unsigned long arg) 798static void neigh_timer_handler(unsigned long arg)
@@ -835,26 +857,9 @@ static void neigh_timer_handler(unsigned long arg)
835 857
836 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && 858 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
837 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { 859 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
838 struct sk_buff *skb;
839
840 neigh->nud_state = NUD_FAILED; 860 neigh->nud_state = NUD_FAILED;
841 neigh->updated = jiffies;
842 notify = 1; 861 notify = 1;
843 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); 862 neigh_invalidate(neigh);
844 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
845
846 /* It is very thin place. report_unreachable is very complicated
847 routine. Particularly, it can hit the same neighbour entry!
848
849 So that, we try to be accurate and avoid dead loop. --ANK
850 */
851 while (neigh->nud_state == NUD_FAILED &&
852 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
853 write_unlock(&neigh->lock);
854 neigh->ops->error_report(neigh, skb);
855 write_lock(&neigh->lock);
856 }
857 skb_queue_purge(&neigh->arp_queue);
858 } 863 }
859 864
860 if (neigh->nud_state & NUD_IN_TIMER) { 865 if (neigh->nud_state & NUD_IN_TIMER) {
@@ -1001,6 +1006,11 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1001 neigh->nud_state = new; 1006 neigh->nud_state = new;
1002 err = 0; 1007 err = 0;
1003 notify = old & NUD_VALID; 1008 notify = old & NUD_VALID;
1009 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1010 (new & NUD_FAILED)) {
1011 neigh_invalidate(neigh);
1012 notify = 1;
1013 }
1004 goto out; 1014 goto out;
1005 } 1015 }
1006 1016
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b8ccd3c88d63..19b8c20e98a4 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3691,8 +3691,7 @@ out1:
3691#ifdef CONFIG_XFRM 3691#ifdef CONFIG_XFRM
3692 free_SAs(pkt_dev); 3692 free_SAs(pkt_dev);
3693#endif 3693#endif
3694 if (pkt_dev->flows) 3694 vfree(pkt_dev->flows);
3695 vfree(pkt_dev->flows);
3696 kfree(pkt_dev); 3695 kfree(pkt_dev);
3697 return err; 3696 return err;
3698} 3697}
@@ -3791,8 +3790,7 @@ static int pktgen_remove_device(struct pktgen_thread *t,
3791#ifdef CONFIG_XFRM 3790#ifdef CONFIG_XFRM
3792 free_SAs(pkt_dev); 3791 free_SAs(pkt_dev);
3793#endif 3792#endif
3794 if (pkt_dev->flows) 3793 vfree(pkt_dev->flows);
3795 vfree(pkt_dev->flows);
3796 kfree(pkt_dev); 3794 kfree(pkt_dev);
3797 return 0; 3795 return 0;
3798} 3796}
diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c
index 86234923a3b7..79687dfd6957 100644
--- a/net/core/skb_dma_map.c
+++ b/net/core/skb_dma_map.c
@@ -20,7 +20,7 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb,
20 if (dma_mapping_error(dev, map)) 20 if (dma_mapping_error(dev, map))
21 goto out_err; 21 goto out_err;
22 22
23 sp->dma_maps[0] = map; 23 sp->dma_head = map;
24 for (i = 0; i < sp->nr_frags; i++) { 24 for (i = 0; i < sp->nr_frags; i++) {
25 skb_frag_t *fp = &sp->frags[i]; 25 skb_frag_t *fp = &sp->frags[i];
26 26
@@ -28,9 +28,8 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb,
28 fp->size, dir); 28 fp->size, dir);
29 if (dma_mapping_error(dev, map)) 29 if (dma_mapping_error(dev, map))
30 goto unwind; 30 goto unwind;
31 sp->dma_maps[i + 1] = map; 31 sp->dma_maps[i] = map;
32 } 32 }
33 sp->num_dma_maps = i + 1;
34 33
35 return 0; 34 return 0;
36 35
@@ -38,10 +37,10 @@ unwind:
38 while (--i >= 0) { 37 while (--i >= 0) {
39 skb_frag_t *fp = &sp->frags[i]; 38 skb_frag_t *fp = &sp->frags[i];
40 39
41 dma_unmap_page(dev, sp->dma_maps[i + 1], 40 dma_unmap_page(dev, sp->dma_maps[i],
42 fp->size, dir); 41 fp->size, dir);
43 } 42 }
44 dma_unmap_single(dev, sp->dma_maps[0], 43 dma_unmap_single(dev, sp->dma_head,
45 skb_headlen(skb), dir); 44 skb_headlen(skb), dir);
46out_err: 45out_err:
47 return -ENOMEM; 46 return -ENOMEM;
@@ -54,12 +53,12 @@ void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
54 struct skb_shared_info *sp = skb_shinfo(skb); 53 struct skb_shared_info *sp = skb_shinfo(skb);
55 int i; 54 int i;
56 55
57 dma_unmap_single(dev, sp->dma_maps[0], 56 dma_unmap_single(dev, sp->dma_head,
58 skb_headlen(skb), dir); 57 skb_headlen(skb), dir);
59 for (i = 0; i < sp->nr_frags; i++) { 58 for (i = 0; i < sp->nr_frags; i++) {
60 skb_frag_t *fp = &sp->frags[i]; 59 skb_frag_t *fp = &sp->frags[i];
61 60
62 dma_unmap_page(dev, sp->dma_maps[i + 1], 61 dma_unmap_page(dev, sp->dma_maps[i],
63 fp->size, dir); 62 fp->size, dir);
64 } 63 }
65} 64}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6adf19ec95cc..b94d777e3eb4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -210,7 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
210 shinfo->gso_type = 0; 210 shinfo->gso_type = 0;
211 shinfo->ip6_frag_id = 0; 211 shinfo->ip6_frag_id = 0;
212 shinfo->tx_flags.flags = 0; 212 shinfo->tx_flags.flags = 0;
213 shinfo->frag_list = NULL; 213 skb_frag_list_init(skb);
214 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); 214 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
215 215
216 if (fclone) { 216 if (fclone) {
@@ -323,7 +323,7 @@ static void skb_clone_fraglist(struct sk_buff *skb)
323{ 323{
324 struct sk_buff *list; 324 struct sk_buff *list;
325 325
326 for (list = skb_shinfo(skb)->frag_list; list; list = list->next) 326 skb_walk_frags(skb, list)
327 skb_get(list); 327 skb_get(list);
328} 328}
329 329
@@ -338,7 +338,7 @@ static void skb_release_data(struct sk_buff *skb)
338 put_page(skb_shinfo(skb)->frags[i].page); 338 put_page(skb_shinfo(skb)->frags[i].page);
339 } 339 }
340 340
341 if (skb_shinfo(skb)->frag_list) 341 if (skb_has_frags(skb))
342 skb_drop_fraglist(skb); 342 skb_drop_fraglist(skb);
343 343
344 kfree(skb->head); 344 kfree(skb->head);
@@ -503,7 +503,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
503 shinfo->gso_type = 0; 503 shinfo->gso_type = 0;
504 shinfo->ip6_frag_id = 0; 504 shinfo->ip6_frag_id = 0;
505 shinfo->tx_flags.flags = 0; 505 shinfo->tx_flags.flags = 0;
506 shinfo->frag_list = NULL; 506 skb_frag_list_init(skb);
507 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); 507 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
508 508
509 memset(skb, 0, offsetof(struct sk_buff, tail)); 509 memset(skb, 0, offsetof(struct sk_buff, tail));
@@ -552,7 +552,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
552 new->vlan_tci = old->vlan_tci; 552 new->vlan_tci = old->vlan_tci;
553#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) 553#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
554 new->do_not_encrypt = old->do_not_encrypt; 554 new->do_not_encrypt = old->do_not_encrypt;
555 new->requeue = old->requeue;
556#endif 555#endif
557 556
558 skb_copy_secmark(new, old); 557 skb_copy_secmark(new, old);
@@ -758,7 +757,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
758 skb_shinfo(n)->nr_frags = i; 757 skb_shinfo(n)->nr_frags = i;
759 } 758 }
760 759
761 if (skb_shinfo(skb)->frag_list) { 760 if (skb_has_frags(skb)) {
762 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 761 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
763 skb_clone_fraglist(n); 762 skb_clone_fraglist(n);
764 } 763 }
@@ -821,7 +820,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 820 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
822 get_page(skb_shinfo(skb)->frags[i].page); 821 get_page(skb_shinfo(skb)->frags[i].page);
823 822
824 if (skb_shinfo(skb)->frag_list) 823 if (skb_has_frags(skb))
825 skb_clone_fraglist(skb); 824 skb_clone_fraglist(skb);
826 825
827 skb_release_data(skb); 826 skb_release_data(skb);
@@ -1093,7 +1092,7 @@ drop_pages:
1093 for (; i < nfrags; i++) 1092 for (; i < nfrags; i++)
1094 put_page(skb_shinfo(skb)->frags[i].page); 1093 put_page(skb_shinfo(skb)->frags[i].page);
1095 1094
1096 if (skb_shinfo(skb)->frag_list) 1095 if (skb_has_frags(skb))
1097 skb_drop_fraglist(skb); 1096 skb_drop_fraglist(skb);
1098 goto done; 1097 goto done;
1099 } 1098 }
@@ -1188,7 +1187,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1188 /* Optimization: no fragments, no reasons to preestimate 1187 /* Optimization: no fragments, no reasons to preestimate
1189 * size of pulled pages. Superb. 1188 * size of pulled pages. Superb.
1190 */ 1189 */
1191 if (!skb_shinfo(skb)->frag_list) 1190 if (!skb_has_frags(skb))
1192 goto pull_pages; 1191 goto pull_pages;
1193 1192
1194 /* Estimate size of pulled pages. */ 1193 /* Estimate size of pulled pages. */
@@ -1285,8 +1284,9 @@ EXPORT_SYMBOL(__pskb_pull_tail);
1285 1284
1286int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1285int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1287{ 1286{
1288 int i, copy;
1289 int start = skb_headlen(skb); 1287 int start = skb_headlen(skb);
1288 struct sk_buff *frag_iter;
1289 int i, copy;
1290 1290
1291 if (offset > (int)skb->len - len) 1291 if (offset > (int)skb->len - len)
1292 goto fault; 1292 goto fault;
@@ -1328,28 +1328,23 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1328 start = end; 1328 start = end;
1329 } 1329 }
1330 1330
1331 if (skb_shinfo(skb)->frag_list) { 1331 skb_walk_frags(skb, frag_iter) {
1332 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1332 int end;
1333 1333
1334 for (; list; list = list->next) { 1334 WARN_ON(start > offset + len);
1335 int end; 1335
1336 1336 end = start + frag_iter->len;
1337 WARN_ON(start > offset + len); 1337 if ((copy = end - offset) > 0) {
1338 1338 if (copy > len)
1339 end = start + list->len; 1339 copy = len;
1340 if ((copy = end - offset) > 0) { 1340 if (skb_copy_bits(frag_iter, offset - start, to, copy))
1341 if (copy > len) 1341 goto fault;
1342 copy = len; 1342 if ((len -= copy) == 0)
1343 if (skb_copy_bits(list, offset - start, 1343 return 0;
1344 to, copy)) 1344 offset += copy;
1345 goto fault; 1345 to += copy;
1346 if ((len -= copy) == 0)
1347 return 0;
1348 offset += copy;
1349 to += copy;
1350 }
1351 start = end;
1352 } 1346 }
1347 start = end;
1353 } 1348 }
1354 if (!len) 1349 if (!len)
1355 return 0; 1350 return 0;
@@ -1534,6 +1529,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1534 .ops = &sock_pipe_buf_ops, 1529 .ops = &sock_pipe_buf_ops,
1535 .spd_release = sock_spd_release, 1530 .spd_release = sock_spd_release,
1536 }; 1531 };
1532 struct sk_buff *frag_iter;
1537 struct sock *sk = skb->sk; 1533 struct sock *sk = skb->sk;
1538 1534
1539 /* 1535 /*
@@ -1548,13 +1544,11 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1548 /* 1544 /*
1549 * now see if we have a frag_list to map 1545 * now see if we have a frag_list to map
1550 */ 1546 */
1551 if (skb_shinfo(skb)->frag_list) { 1547 skb_walk_frags(skb, frag_iter) {
1552 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1548 if (!tlen)
1553 1549 break;
1554 for (; list && tlen; list = list->next) { 1550 if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk))
1555 if (__skb_splice_bits(list, &offset, &tlen, &spd, sk)) 1551 break;
1556 break;
1557 }
1558 } 1552 }
1559 1553
1560done: 1554done:
@@ -1593,8 +1587,9 @@ done:
1593 1587
1594int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1588int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1595{ 1589{
1596 int i, copy;
1597 int start = skb_headlen(skb); 1590 int start = skb_headlen(skb);
1591 struct sk_buff *frag_iter;
1592 int i, copy;
1598 1593
1599 if (offset > (int)skb->len - len) 1594 if (offset > (int)skb->len - len)
1600 goto fault; 1595 goto fault;
@@ -1635,28 +1630,24 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1635 start = end; 1630 start = end;
1636 } 1631 }
1637 1632
1638 if (skb_shinfo(skb)->frag_list) { 1633 skb_walk_frags(skb, frag_iter) {
1639 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1634 int end;
1640 1635
1641 for (; list; list = list->next) { 1636 WARN_ON(start > offset + len);
1642 int end; 1637
1643 1638 end = start + frag_iter->len;
1644 WARN_ON(start > offset + len); 1639 if ((copy = end - offset) > 0) {
1645 1640 if (copy > len)
1646 end = start + list->len; 1641 copy = len;
1647 if ((copy = end - offset) > 0) { 1642 if (skb_store_bits(frag_iter, offset - start,
1648 if (copy > len) 1643 from, copy))
1649 copy = len; 1644 goto fault;
1650 if (skb_store_bits(list, offset - start, 1645 if ((len -= copy) == 0)
1651 from, copy)) 1646 return 0;
1652 goto fault; 1647 offset += copy;
1653 if ((len -= copy) == 0) 1648 from += copy;
1654 return 0;
1655 offset += copy;
1656 from += copy;
1657 }
1658 start = end;
1659 } 1649 }
1650 start = end;
1660 } 1651 }
1661 if (!len) 1652 if (!len)
1662 return 0; 1653 return 0;
@@ -1673,6 +1664,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1673{ 1664{
1674 int start = skb_headlen(skb); 1665 int start = skb_headlen(skb);
1675 int i, copy = start - offset; 1666 int i, copy = start - offset;
1667 struct sk_buff *frag_iter;
1676 int pos = 0; 1668 int pos = 0;
1677 1669
1678 /* Checksum header. */ 1670 /* Checksum header. */
@@ -1712,29 +1704,25 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1712 start = end; 1704 start = end;
1713 } 1705 }
1714 1706
1715 if (skb_shinfo(skb)->frag_list) { 1707 skb_walk_frags(skb, frag_iter) {
1716 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1708 int end;
1717 1709
1718 for (; list; list = list->next) { 1710 WARN_ON(start > offset + len);
1719 int end; 1711
1720 1712 end = start + frag_iter->len;
1721 WARN_ON(start > offset + len); 1713 if ((copy = end - offset) > 0) {
1722 1714 __wsum csum2;
1723 end = start + list->len; 1715 if (copy > len)
1724 if ((copy = end - offset) > 0) { 1716 copy = len;
1725 __wsum csum2; 1717 csum2 = skb_checksum(frag_iter, offset - start,
1726 if (copy > len) 1718 copy, 0);
1727 copy = len; 1719 csum = csum_block_add(csum, csum2, pos);
1728 csum2 = skb_checksum(list, offset - start, 1720 if ((len -= copy) == 0)
1729 copy, 0); 1721 return csum;
1730 csum = csum_block_add(csum, csum2, pos); 1722 offset += copy;
1731 if ((len -= copy) == 0) 1723 pos += copy;
1732 return csum;
1733 offset += copy;
1734 pos += copy;
1735 }
1736 start = end;
1737 } 1724 }
1725 start = end;
1738 } 1726 }
1739 BUG_ON(len); 1727 BUG_ON(len);
1740 1728
@@ -1749,6 +1737,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1749{ 1737{
1750 int start = skb_headlen(skb); 1738 int start = skb_headlen(skb);
1751 int i, copy = start - offset; 1739 int i, copy = start - offset;
1740 struct sk_buff *frag_iter;
1752 int pos = 0; 1741 int pos = 0;
1753 1742
1754 /* Copy header. */ 1743 /* Copy header. */
@@ -1793,31 +1782,27 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1793 start = end; 1782 start = end;
1794 } 1783 }
1795 1784
1796 if (skb_shinfo(skb)->frag_list) { 1785 skb_walk_frags(skb, frag_iter) {
1797 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1786 __wsum csum2;
1787 int end;
1798 1788
1799 for (; list; list = list->next) { 1789 WARN_ON(start > offset + len);
1800 __wsum csum2; 1790
1801 int end; 1791 end = start + frag_iter->len;
1802 1792 if ((copy = end - offset) > 0) {
1803 WARN_ON(start > offset + len); 1793 if (copy > len)
1804 1794 copy = len;
1805 end = start + list->len; 1795 csum2 = skb_copy_and_csum_bits(frag_iter,
1806 if ((copy = end - offset) > 0) { 1796 offset - start,
1807 if (copy > len) 1797 to, copy, 0);
1808 copy = len; 1798 csum = csum_block_add(csum, csum2, pos);
1809 csum2 = skb_copy_and_csum_bits(list, 1799 if ((len -= copy) == 0)
1810 offset - start, 1800 return csum;
1811 to, copy, 0); 1801 offset += copy;
1812 csum = csum_block_add(csum, csum2, pos); 1802 to += copy;
1813 if ((len -= copy) == 0) 1803 pos += copy;
1814 return csum;
1815 offset += copy;
1816 to += copy;
1817 pos += copy;
1818 }
1819 start = end;
1820 } 1804 }
1805 start = end;
1821 } 1806 }
1822 BUG_ON(len); 1807 BUG_ON(len);
1823 return csum; 1808 return csum;
@@ -2327,8 +2312,7 @@ next_skb:
2327 st->frag_data = NULL; 2312 st->frag_data = NULL;
2328 } 2313 }
2329 2314
2330 if (st->root_skb == st->cur_skb && 2315 if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) {
2331 skb_shinfo(st->root_skb)->frag_list) {
2332 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2316 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2333 st->frag_idx = 0; 2317 st->frag_idx = 0;
2334 goto next_skb; 2318 goto next_skb;
@@ -2639,7 +2623,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2639 } else 2623 } else
2640 skb_get(fskb2); 2624 skb_get(fskb2);
2641 2625
2642 BUG_ON(skb_shinfo(nskb)->frag_list); 2626 SKB_FRAG_ASSERT(nskb);
2643 skb_shinfo(nskb)->frag_list = fskb2; 2627 skb_shinfo(nskb)->frag_list = fskb2;
2644 } 2628 }
2645 2629
@@ -2796,6 +2780,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2796{ 2780{
2797 int start = skb_headlen(skb); 2781 int start = skb_headlen(skb);
2798 int i, copy = start - offset; 2782 int i, copy = start - offset;
2783 struct sk_buff *frag_iter;
2799 int elt = 0; 2784 int elt = 0;
2800 2785
2801 if (copy > 0) { 2786 if (copy > 0) {
@@ -2829,26 +2814,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2829 start = end; 2814 start = end;
2830 } 2815 }
2831 2816
2832 if (skb_shinfo(skb)->frag_list) { 2817 skb_walk_frags(skb, frag_iter) {
2833 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2818 int end;
2834
2835 for (; list; list = list->next) {
2836 int end;
2837 2819
2838 WARN_ON(start > offset + len); 2820 WARN_ON(start > offset + len);
2839 2821
2840 end = start + list->len; 2822 end = start + frag_iter->len;
2841 if ((copy = end - offset) > 0) { 2823 if ((copy = end - offset) > 0) {
2842 if (copy > len) 2824 if (copy > len)
2843 copy = len; 2825 copy = len;
2844 elt += __skb_to_sgvec(list, sg+elt, offset - start, 2826 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
2845 copy); 2827 copy);
2846 if ((len -= copy) == 0) 2828 if ((len -= copy) == 0)
2847 return elt; 2829 return elt;
2848 offset += copy; 2830 offset += copy;
2849 }
2850 start = end;
2851 } 2831 }
2832 start = end;
2852 } 2833 }
2853 BUG_ON(len); 2834 BUG_ON(len);
2854 return elt; 2835 return elt;
@@ -2896,7 +2877,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2896 return -ENOMEM; 2877 return -ENOMEM;
2897 2878
2898 /* Easy case. Most of packets will go this way. */ 2879 /* Easy case. Most of packets will go this way. */
2899 if (!skb_shinfo(skb)->frag_list) { 2880 if (!skb_has_frags(skb)) {
2900 /* A little of trouble, not enough of space for trailer. 2881 /* A little of trouble, not enough of space for trailer.
2901 * This should not happen, when stack is tuned to generate 2882 * This should not happen, when stack is tuned to generate
2902 * good frames. OK, on miss we reallocate and reserve even more 2883 * good frames. OK, on miss we reallocate and reserve even more
@@ -2931,7 +2912,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2931 2912
2932 if (skb1->next == NULL && tailbits) { 2913 if (skb1->next == NULL && tailbits) {
2933 if (skb_shinfo(skb1)->nr_frags || 2914 if (skb_shinfo(skb1)->nr_frags ||
2934 skb_shinfo(skb1)->frag_list || 2915 skb_has_frags(skb1) ||
2935 skb_tailroom(skb1) < tailbits) 2916 skb_tailroom(skb1) < tailbits)
2936 ntail = tailbits + 128; 2917 ntail = tailbits + 128;
2937 } 2918 }
@@ -2940,7 +2921,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2940 skb_cloned(skb1) || 2921 skb_cloned(skb1) ||
2941 ntail || 2922 ntail ||
2942 skb_shinfo(skb1)->nr_frags || 2923 skb_shinfo(skb1)->nr_frags ||
2943 skb_shinfo(skb1)->frag_list) { 2924 skb_has_frags(skb1)) {
2944 struct sk_buff *skb2; 2925 struct sk_buff *skb2;
2945 2926
2946 /* Fuck, we are miserable poor guys... */ 2927 /* Fuck, we are miserable poor guys... */
@@ -3026,12 +3007,12 @@ EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3026 */ 3007 */
3027bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3008bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3028{ 3009{
3029 if (unlikely(start > skb->len - 2) || 3010 if (unlikely(start > skb_headlen(skb)) ||
3030 unlikely((int)start + off > skb->len - 2)) { 3011 unlikely((int)start + off > skb_headlen(skb) - 2)) {
3031 if (net_ratelimit()) 3012 if (net_ratelimit())
3032 printk(KERN_WARNING 3013 printk(KERN_WARNING
3033 "bad partial csum: csum=%u/%u len=%u\n", 3014 "bad partial csum: csum=%u/%u len=%u\n",
3034 start, off, skb->len); 3015 start, off, skb_headlen(skb));
3035 return false; 3016 return false;
3036 } 3017 }
3037 skb->ip_summed = CHECKSUM_PARTIAL; 3018 skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/net/core/sock.c b/net/core/sock.c
index 58dec9dff99a..06e26b77ad9e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -155,6 +155,7 @@ static const char *af_family_key_strings[AF_MAX+1] = {
155 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 155 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
156 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 156 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
157 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 157 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
158 "sk_lock-AF_IEEE802154",
158 "sk_lock-AF_MAX" 159 "sk_lock-AF_MAX"
159}; 160};
160static const char *af_family_slock_key_strings[AF_MAX+1] = { 161static const char *af_family_slock_key_strings[AF_MAX+1] = {
@@ -170,6 +171,7 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = {
170 "slock-27" , "slock-28" , "slock-AF_CAN" , 171 "slock-27" , "slock-28" , "slock-AF_CAN" ,
171 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 172 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
172 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 173 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
174 "slock-AF_IEEE802154",
173 "slock-AF_MAX" 175 "slock-AF_MAX"
174}; 176};
175static const char *af_family_clock_key_strings[AF_MAX+1] = { 177static const char *af_family_clock_key_strings[AF_MAX+1] = {
@@ -185,6 +187,7 @@ static const char *af_family_clock_key_strings[AF_MAX+1] = {
185 "clock-27" , "clock-28" , "clock-AF_CAN" , 187 "clock-27" , "clock-28" , "clock-AF_CAN" ,
186 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 188 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
187 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 189 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
190 "clock-AF_IEEE802154",
188 "clock-AF_MAX" 191 "clock-AF_MAX"
189}; 192};
190 193
@@ -1005,7 +1008,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1005} 1008}
1006EXPORT_SYMBOL(sk_alloc); 1009EXPORT_SYMBOL(sk_alloc);
1007 1010
1008void sk_free(struct sock *sk) 1011static void __sk_free(struct sock *sk)
1009{ 1012{
1010 struct sk_filter *filter; 1013 struct sk_filter *filter;
1011 1014
@@ -1028,6 +1031,17 @@ void sk_free(struct sock *sk)
1028 put_net(sock_net(sk)); 1031 put_net(sock_net(sk));
1029 sk_prot_free(sk->sk_prot_creator, sk); 1032 sk_prot_free(sk->sk_prot_creator, sk);
1030} 1033}
1034
1035void sk_free(struct sock *sk)
1036{
1037 /*
1038 * We substract one from sk_wmem_alloc and can know if
1039 * some packets are still in some tx queue.
1040 * If not null, sock_wfree() will call __sk_free(sk) later
1041 */
1042 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1043 __sk_free(sk);
1044}
1031EXPORT_SYMBOL(sk_free); 1045EXPORT_SYMBOL(sk_free);
1032 1046
1033/* 1047/*
@@ -1068,7 +1082,10 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1068 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1082 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1069 1083
1070 atomic_set(&newsk->sk_rmem_alloc, 0); 1084 atomic_set(&newsk->sk_rmem_alloc, 0);
1071 atomic_set(&newsk->sk_wmem_alloc, 0); 1085 /*
1086 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1087 */
1088 atomic_set(&newsk->sk_wmem_alloc, 1);
1072 atomic_set(&newsk->sk_omem_alloc, 0); 1089 atomic_set(&newsk->sk_omem_alloc, 0);
1073 skb_queue_head_init(&newsk->sk_receive_queue); 1090 skb_queue_head_init(&newsk->sk_receive_queue);
1074 skb_queue_head_init(&newsk->sk_write_queue); 1091 skb_queue_head_init(&newsk->sk_write_queue);
@@ -1172,12 +1189,18 @@ void __init sk_init(void)
1172void sock_wfree(struct sk_buff *skb) 1189void sock_wfree(struct sk_buff *skb)
1173{ 1190{
1174 struct sock *sk = skb->sk; 1191 struct sock *sk = skb->sk;
1192 int res;
1175 1193
1176 /* In case it might be waiting for more memory. */ 1194 /* In case it might be waiting for more memory. */
1177 atomic_sub(skb->truesize, &sk->sk_wmem_alloc); 1195 res = atomic_sub_return(skb->truesize, &sk->sk_wmem_alloc);
1178 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) 1196 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
1179 sk->sk_write_space(sk); 1197 sk->sk_write_space(sk);
1180 sock_put(sk); 1198 /*
1199 * if sk_wmem_alloc reached 0, we are last user and should
1200 * free this sock, as sk_free() call could not do it.
1201 */
1202 if (res == 0)
1203 __sk_free(sk);
1181} 1204}
1182EXPORT_SYMBOL(sock_wfree); 1205EXPORT_SYMBOL(sock_wfree);
1183 1206
@@ -1816,6 +1839,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1816 sk->sk_stamp = ktime_set(-1L, 0); 1839 sk->sk_stamp = ktime_set(-1L, 0);
1817 1840
1818 atomic_set(&sk->sk_refcnt, 1); 1841 atomic_set(&sk->sk_refcnt, 1);
1842 atomic_set(&sk->sk_wmem_alloc, 1);
1819 atomic_set(&sk->sk_drops, 0); 1843 atomic_set(&sk->sk_drops, 0);
1820} 1844}
1821EXPORT_SYMBOL(sock_init_data); 1845EXPORT_SYMBOL(sock_init_data);
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 164b090d5ac3..25d717ebc92e 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -51,6 +51,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
51{ 51{
52 int start = skb_headlen(skb); 52 int start = skb_headlen(skb);
53 int i, copy = start - offset; 53 int i, copy = start - offset;
54 struct sk_buff *frag_iter;
54 dma_cookie_t cookie = 0; 55 dma_cookie_t cookie = 0;
55 56
56 /* Copy header. */ 57 /* Copy header. */
@@ -94,31 +95,28 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
94 start = end; 95 start = end;
95 } 96 }
96 97
97 if (skb_shinfo(skb)->frag_list) { 98 skb_walk_frags(skb, frag_iter) {
98 struct sk_buff *list = skb_shinfo(skb)->frag_list; 99 int end;
99 100
100 for (; list; list = list->next) { 101 WARN_ON(start > offset + len);
101 int end; 102
102 103 end = start + frag_iter->len;
103 WARN_ON(start > offset + len); 104 copy = end - offset;
104 105 if (copy > 0) {
105 end = start + list->len; 106 if (copy > len)
106 copy = end - offset; 107 copy = len;
107 if (copy > 0) { 108 cookie = dma_skb_copy_datagram_iovec(chan, frag_iter,
108 if (copy > len) 109 offset - start,
109 copy = len; 110 to, copy,
110 cookie = dma_skb_copy_datagram_iovec(chan, list, 111 pinned_list);
111 offset - start, to, copy, 112 if (cookie < 0)
112 pinned_list); 113 goto fault;
113 if (cookie < 0) 114 len -= copy;
114 goto fault; 115 if (len == 0)
115 len -= copy; 116 goto end;
116 if (len == 0) 117 offset += copy;
117 goto end;
118 offset += copy;
119 }
120 start = end;
121 } 118 }
119 start = end;
122 } 120 }
123 121
124end: 122end:
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig
new file mode 100644
index 000000000000..1c1de97d264a
--- /dev/null
+++ b/net/ieee802154/Kconfig
@@ -0,0 +1,12 @@
1config IEEE802154
2 tristate "IEEE Std 802.15.4 Low-Rate Wireless Personal Area Networks support (EXPERIMENTAL)"
3 depends on EXPERIMENTAL
4 ---help---
5 IEEE Std 802.15.4 defines a low data rate, low power and low
6 complexity short range wireless personal area networks. It was
7 designed to organise networks of sensors, switches, etc automation
8 devices. Maximum allowed data rate is 250 kb/s and typical personal
9 operating space around 10m.
10
11 Say Y here to compile LR-WPAN support into the kernel or say M to
12 compile it as modules.
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
new file mode 100644
index 000000000000..f99338a26100
--- /dev/null
+++ b/net/ieee802154/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_IEEE802154) += nl802154.o af_802154.o
2nl802154-y := netlink.o nl_policy.o
3af_802154-y := af_ieee802154.o raw.o dgram.o
4
5ccflags-y += -Wall -DDEBUG
diff --git a/net/ieee802154/af802154.h b/net/ieee802154/af802154.h
new file mode 100644
index 000000000000..b1ec52537522
--- /dev/null
+++ b/net/ieee802154/af802154.h
@@ -0,0 +1,36 @@
1/*
2 * Internal interfaces for ieee 802.15.4 address family.
3 *
4 * Copyright 2007, 2008, 2009 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 */
23
24#ifndef AF802154_H
25#define AF802154_H
26
27struct sk_buff;
28struct net_devce;
29extern struct proto ieee802154_raw_prot;
30extern struct proto ieee802154_dgram_prot;
31void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb);
32int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb);
33struct net_device *ieee802154_get_dev(struct net *net,
34 struct ieee802154_addr *addr);
35
36#endif
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
new file mode 100644
index 000000000000..882a927cefae
--- /dev/null
+++ b/net/ieee802154/af_ieee802154.c
@@ -0,0 +1,372 @@
1/*
2 * IEEE802154.4 socket interface
3 *
4 * Copyright 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
22 */
23
24#include <linux/net.h>
25#include <linux/capability.h>
26#include <linux/module.h>
27#include <linux/if_arp.h>
28#include <linux/if.h>
29#include <linux/termios.h> /* For TIOCOUTQ/INQ */
30#include <linux/list.h>
31#include <net/datalink.h>
32#include <net/psnap.h>
33#include <net/sock.h>
34#include <net/tcp_states.h>
35#include <net/route.h>
36
37#include <net/ieee802154/af_ieee802154.h>
38#include <net/ieee802154/netdevice.h>
39
40#include "af802154.h"
41
42#define DBG_DUMP(data, len) { \
43 int i; \
44 pr_debug("function: %s: data: len %d:\n", __func__, len); \
45 for (i = 0; i < len; i++) {\
46 pr_debug("%02x: %02x\n", i, (data)[i]); \
47 } \
48}
49
50/*
51 * Utility function for families
52 */
53struct net_device *ieee802154_get_dev(struct net *net,
54 struct ieee802154_addr *addr)
55{
56 struct net_device *dev = NULL;
57 struct net_device *tmp;
58 u16 pan_id, short_addr;
59
60 switch (addr->addr_type) {
61 case IEEE802154_ADDR_LONG:
62 rtnl_lock();
63 dev = dev_getbyhwaddr(net, ARPHRD_IEEE802154, addr->hwaddr);
64 if (dev)
65 dev_hold(dev);
66 rtnl_unlock();
67 break;
68 case IEEE802154_ADDR_SHORT:
69 if (addr->pan_id == 0xffff ||
70 addr->short_addr == IEEE802154_ADDR_UNDEF ||
71 addr->short_addr == 0xffff)
72 break;
73
74 rtnl_lock();
75
76 for_each_netdev(net, tmp) {
77 if (tmp->type != ARPHRD_IEEE802154)
78 continue;
79
80 pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp);
81 short_addr =
82 ieee802154_mlme_ops(tmp)->get_short_addr(tmp);
83
84 if (pan_id == addr->pan_id &&
85 short_addr == addr->short_addr) {
86 dev = tmp;
87 dev_hold(dev);
88 break;
89 }
90 }
91
92 rtnl_unlock();
93 break;
94 default:
95 pr_warning("Unsupported ieee802154 address type: %d\n",
96 addr->addr_type);
97 break;
98 }
99
100 return dev;
101}
102
103static int ieee802154_sock_release(struct socket *sock)
104{
105 struct sock *sk = sock->sk;
106
107 if (sk) {
108 sock->sk = NULL;
109 sk->sk_prot->close(sk, 0);
110 }
111 return 0;
112}
113static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
114 struct msghdr *msg, size_t len)
115{
116 struct sock *sk = sock->sk;
117
118 return sk->sk_prot->sendmsg(iocb, sk, msg, len);
119}
120
121static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
122 int addr_len)
123{
124 struct sock *sk = sock->sk;
125
126 if (sk->sk_prot->bind)
127 return sk->sk_prot->bind(sk, uaddr, addr_len);
128
129 return sock_no_bind(sock, uaddr, addr_len);
130}
131
132static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
133 int addr_len, int flags)
134{
135 struct sock *sk = sock->sk;
136
137 if (uaddr->sa_family == AF_UNSPEC)
138 return sk->sk_prot->disconnect(sk, flags);
139
140 return sk->sk_prot->connect(sk, uaddr, addr_len);
141}
142
143static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
144 unsigned int cmd)
145{
146 struct ifreq ifr;
147 int ret = -EINVAL;
148 struct net_device *dev;
149
150 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
151 return -EFAULT;
152
153 ifr.ifr_name[IFNAMSIZ-1] = 0;
154
155 dev_load(sock_net(sk), ifr.ifr_name);
156 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
157 if (dev->type == ARPHRD_IEEE802154 ||
158 dev->type == ARPHRD_IEEE802154_PHY)
159 ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
160
161 if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq)))
162 ret = -EFAULT;
163 dev_put(dev);
164
165 return ret;
166}
167
168static int ieee802154_sock_ioctl(struct socket *sock, unsigned int cmd,
169 unsigned long arg)
170{
171 struct sock *sk = sock->sk;
172
173 switch (cmd) {
174 case SIOCGSTAMP:
175 return sock_get_timestamp(sk, (struct timeval __user *)arg);
176 case SIOCGSTAMPNS:
177 return sock_get_timestampns(sk, (struct timespec __user *)arg);
178 case SIOCGIFADDR:
179 case SIOCSIFADDR:
180 return ieee802154_dev_ioctl(sk, (struct ifreq __user *)arg,
181 cmd);
182 default:
183 if (!sk->sk_prot->ioctl)
184 return -ENOIOCTLCMD;
185 return sk->sk_prot->ioctl(sk, cmd, arg);
186 }
187}
188
189static const struct proto_ops ieee802154_raw_ops = {
190 .family = PF_IEEE802154,
191 .owner = THIS_MODULE,
192 .release = ieee802154_sock_release,
193 .bind = ieee802154_sock_bind,
194 .connect = ieee802154_sock_connect,
195 .socketpair = sock_no_socketpair,
196 .accept = sock_no_accept,
197 .getname = sock_no_getname,
198 .poll = datagram_poll,
199 .ioctl = ieee802154_sock_ioctl,
200 .listen = sock_no_listen,
201 .shutdown = sock_no_shutdown,
202 .setsockopt = sock_common_setsockopt,
203 .getsockopt = sock_common_getsockopt,
204 .sendmsg = ieee802154_sock_sendmsg,
205 .recvmsg = sock_common_recvmsg,
206 .mmap = sock_no_mmap,
207 .sendpage = sock_no_sendpage,
208#ifdef CONFIG_COMPAT
209 .compat_setsockopt = compat_sock_common_setsockopt,
210 .compat_getsockopt = compat_sock_common_getsockopt,
211#endif
212};
213
214static const struct proto_ops ieee802154_dgram_ops = {
215 .family = PF_IEEE802154,
216 .owner = THIS_MODULE,
217 .release = ieee802154_sock_release,
218 .bind = ieee802154_sock_bind,
219 .connect = ieee802154_sock_connect,
220 .socketpair = sock_no_socketpair,
221 .accept = sock_no_accept,
222 .getname = sock_no_getname,
223 .poll = datagram_poll,
224 .ioctl = ieee802154_sock_ioctl,
225 .listen = sock_no_listen,
226 .shutdown = sock_no_shutdown,
227 .setsockopt = sock_common_setsockopt,
228 .getsockopt = sock_common_getsockopt,
229 .sendmsg = ieee802154_sock_sendmsg,
230 .recvmsg = sock_common_recvmsg,
231 .mmap = sock_no_mmap,
232 .sendpage = sock_no_sendpage,
233#ifdef CONFIG_COMPAT
234 .compat_setsockopt = compat_sock_common_setsockopt,
235 .compat_getsockopt = compat_sock_common_getsockopt,
236#endif
237};
238
239
240/*
241 * Create a socket. Initialise the socket, blank the addresses
242 * set the state.
243 */
244static int ieee802154_create(struct net *net, struct socket *sock,
245 int protocol)
246{
247 struct sock *sk;
248 int rc;
249 struct proto *proto;
250 const struct proto_ops *ops;
251
252 if (net != &init_net)
253 return -EAFNOSUPPORT;
254
255 switch (sock->type) {
256 case SOCK_RAW:
257 proto = &ieee802154_raw_prot;
258 ops = &ieee802154_raw_ops;
259 break;
260 case SOCK_DGRAM:
261 proto = &ieee802154_dgram_prot;
262 ops = &ieee802154_dgram_ops;
263 break;
264 default:
265 rc = -ESOCKTNOSUPPORT;
266 goto out;
267 }
268
269 rc = -ENOMEM;
270 sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto);
271 if (!sk)
272 goto out;
273 rc = 0;
274
275 sock->ops = ops;
276
277 sock_init_data(sock, sk);
278 /* FIXME: sk->sk_destruct */
279 sk->sk_family = PF_IEEE802154;
280
281 /* Checksums on by default */
282 sock_set_flag(sk, SOCK_ZAPPED);
283
284 if (sk->sk_prot->hash)
285 sk->sk_prot->hash(sk);
286
287 if (sk->sk_prot->init) {
288 rc = sk->sk_prot->init(sk);
289 if (rc)
290 sk_common_release(sk);
291 }
292out:
293 return rc;
294}
295
296static struct net_proto_family ieee802154_family_ops = {
297 .family = PF_IEEE802154,
298 .create = ieee802154_create,
299 .owner = THIS_MODULE,
300};
301
302static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
303 struct packet_type *pt, struct net_device *orig_dev)
304{
305 DBG_DUMP(skb->data, skb->len);
306 if (!netif_running(dev))
307 return -ENODEV;
308 pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
309
310 if (!net_eq(dev_net(dev), &init_net))
311 goto drop;
312
313 ieee802154_raw_deliver(dev, skb);
314
315 if (dev->type != ARPHRD_IEEE802154)
316 goto drop;
317
318 if (skb->pkt_type != PACKET_OTHERHOST)
319 return ieee802154_dgram_deliver(dev, skb);
320
321drop:
322 kfree_skb(skb);
323 return NET_RX_DROP;
324}
325
326
327static struct packet_type ieee802154_packet_type = {
328 .type = __constant_htons(ETH_P_IEEE802154),
329 .func = ieee802154_rcv,
330};
331
332static int __init af_ieee802154_init(void)
333{
334 int rc = -EINVAL;
335
336 rc = proto_register(&ieee802154_raw_prot, 1);
337 if (rc)
338 goto out;
339
340 rc = proto_register(&ieee802154_dgram_prot, 1);
341 if (rc)
342 goto err_dgram;
343
344 /* Tell SOCKET that we are alive */
345 rc = sock_register(&ieee802154_family_ops);
346 if (rc)
347 goto err_sock;
348 dev_add_pack(&ieee802154_packet_type);
349
350 rc = 0;
351 goto out;
352
353err_sock:
354 proto_unregister(&ieee802154_dgram_prot);
355err_dgram:
356 proto_unregister(&ieee802154_raw_prot);
357out:
358 return rc;
359}
360static void __exit af_ieee802154_remove(void)
361{
362 dev_remove_pack(&ieee802154_packet_type);
363 sock_unregister(PF_IEEE802154);
364 proto_unregister(&ieee802154_dgram_prot);
365 proto_unregister(&ieee802154_raw_prot);
366}
367
368module_init(af_ieee802154_init);
369module_exit(af_ieee802154_remove);
370
371MODULE_LICENSE("GPL");
372MODULE_ALIAS_NETPROTO(PF_IEEE802154);
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
new file mode 100644
index 000000000000..1779677aed46
--- /dev/null
+++ b/net/ieee802154/dgram.c
@@ -0,0 +1,394 @@
1/*
2 * ZigBee socket interface
3 *
4 * Copyright 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 */
23
24#include <linux/net.h>
25#include <linux/module.h>
26#include <linux/if_arp.h>
27#include <linux/list.h>
28#include <net/sock.h>
29#include <net/ieee802154/af_ieee802154.h>
30#include <net/ieee802154/mac_def.h>
31#include <net/ieee802154/netdevice.h>
32
33#include <asm/ioctls.h>
34
35#include "af802154.h"
36
37static HLIST_HEAD(dgram_head);
38static DEFINE_RWLOCK(dgram_lock);
39
40struct dgram_sock {
41 struct sock sk;
42
43 int bound;
44 struct ieee802154_addr src_addr;
45 struct ieee802154_addr dst_addr;
46};
47
48static inline struct dgram_sock *dgram_sk(const struct sock *sk)
49{
50 return container_of(sk, struct dgram_sock, sk);
51}
52
53
54static void dgram_hash(struct sock *sk)
55{
56 write_lock_bh(&dgram_lock);
57 sk_add_node(sk, &dgram_head);
58 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
59 write_unlock_bh(&dgram_lock);
60}
61
62static void dgram_unhash(struct sock *sk)
63{
64 write_lock_bh(&dgram_lock);
65 if (sk_del_node_init(sk))
66 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
67 write_unlock_bh(&dgram_lock);
68}
69
70static int dgram_init(struct sock *sk)
71{
72 struct dgram_sock *ro = dgram_sk(sk);
73
74 ro->dst_addr.addr_type = IEEE802154_ADDR_LONG;
75 ro->dst_addr.pan_id = 0xffff;
76 memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr));
77 return 0;
78}
79
80static void dgram_close(struct sock *sk, long timeout)
81{
82 sk_common_release(sk);
83}
84
85static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
86{
87 struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
88 struct dgram_sock *ro = dgram_sk(sk);
89 int err = 0;
90 struct net_device *dev;
91
92 ro->bound = 0;
93
94 if (len < sizeof(*addr))
95 return -EINVAL;
96
97 if (addr->family != AF_IEEE802154)
98 return -EINVAL;
99
100 lock_sock(sk);
101
102 dev = ieee802154_get_dev(sock_net(sk), &addr->addr);
103 if (!dev) {
104 err = -ENODEV;
105 goto out;
106 }
107
108 if (dev->type != ARPHRD_IEEE802154) {
109 err = -ENODEV;
110 goto out_put;
111 }
112
113 memcpy(&ro->src_addr, &addr->addr, sizeof(struct ieee802154_addr));
114
115 ro->bound = 1;
116out_put:
117 dev_put(dev);
118out:
119 release_sock(sk);
120
121 return err;
122}
123
124static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
125{
126 switch (cmd) {
127 case SIOCOUTQ:
128 {
129 int amount = atomic_read(&sk->sk_wmem_alloc);
130 return put_user(amount, (int __user *)arg);
131 }
132
133 case SIOCINQ:
134 {
135 struct sk_buff *skb;
136 unsigned long amount;
137
138 amount = 0;
139 spin_lock_bh(&sk->sk_receive_queue.lock);
140 skb = skb_peek(&sk->sk_receive_queue);
141 if (skb != NULL) {
142 /*
143 * We will only return the amount
144 * of this packet since that is all
145 * that will be read.
146 */
147 /* FIXME: parse the header for more correct value */
148 amount = skb->len - (3+8+8);
149 }
150 spin_unlock_bh(&sk->sk_receive_queue.lock);
151 return put_user(amount, (int __user *)arg);
152 }
153
154 }
155 return -ENOIOCTLCMD;
156}
157
158/* FIXME: autobind */
159static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
160 int len)
161{
162 struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
163 struct dgram_sock *ro = dgram_sk(sk);
164 int err = 0;
165
166 if (len < sizeof(*addr))
167 return -EINVAL;
168
169 if (addr->family != AF_IEEE802154)
170 return -EINVAL;
171
172 lock_sock(sk);
173
174 if (!ro->bound) {
175 err = -ENETUNREACH;
176 goto out;
177 }
178
179 memcpy(&ro->dst_addr, &addr->addr, sizeof(struct ieee802154_addr));
180
181out:
182 release_sock(sk);
183 return err;
184}
185
186static int dgram_disconnect(struct sock *sk, int flags)
187{
188 struct dgram_sock *ro = dgram_sk(sk);
189
190 lock_sock(sk);
191
192 ro->dst_addr.addr_type = IEEE802154_ADDR_LONG;
193 memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr));
194
195 release_sock(sk);
196
197 return 0;
198}
199
200static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
201 struct msghdr *msg, size_t size)
202{
203 struct net_device *dev;
204 unsigned mtu;
205 struct sk_buff *skb;
206 struct dgram_sock *ro = dgram_sk(sk);
207 int err;
208
209 if (msg->msg_flags & MSG_OOB) {
210 pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
211 return -EOPNOTSUPP;
212 }
213
214 if (!ro->bound)
215 dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
216 else
217 dev = ieee802154_get_dev(sock_net(sk), &ro->src_addr);
218
219 if (!dev) {
220 pr_debug("no dev\n");
221 err = -ENXIO;
222 goto out;
223 }
224 mtu = dev->mtu;
225 pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
226
227 skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
228 msg->msg_flags & MSG_DONTWAIT,
229 &err);
230 if (!skb)
231 goto out_dev;
232
233 skb_reserve(skb, LL_RESERVED_SPACE(dev));
234
235 skb_reset_network_header(skb);
236
237 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA | MAC_CB_FLAG_ACKREQ;
238 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
239 err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr,
240 ro->bound ? &ro->src_addr : NULL, size);
241 if (err < 0)
242 goto out_skb;
243
244 skb_reset_mac_header(skb);
245
246 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
247 if (err < 0)
248 goto out_skb;
249
250 if (size > mtu) {
251 pr_debug("size = %Zu, mtu = %u\n", size, mtu);
252 err = -EINVAL;
253 goto out_skb;
254 }
255
256 skb->dev = dev;
257 skb->sk = sk;
258 skb->protocol = htons(ETH_P_IEEE802154);
259
260 dev_put(dev);
261
262 err = dev_queue_xmit(skb);
263 if (err > 0)
264 err = net_xmit_errno(err);
265
266 return err ?: size;
267
268out_skb:
269 kfree_skb(skb);
270out_dev:
271 dev_put(dev);
272out:
273 return err;
274}
275
276static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
277 struct msghdr *msg, size_t len, int noblock, int flags,
278 int *addr_len)
279{
280 size_t copied = 0;
281 int err = -EOPNOTSUPP;
282 struct sk_buff *skb;
283
284 skb = skb_recv_datagram(sk, flags, noblock, &err);
285 if (!skb)
286 goto out;
287
288 copied = skb->len;
289 if (len < copied) {
290 msg->msg_flags |= MSG_TRUNC;
291 copied = len;
292 }
293
294 /* FIXME: skip headers if necessary ?! */
295 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
296 if (err)
297 goto done;
298
299 sock_recv_timestamp(msg, sk, skb);
300
301 if (flags & MSG_TRUNC)
302 copied = skb->len;
303done:
304 skb_free_datagram(sk, skb);
305out:
306 if (err)
307 return err;
308 return copied;
309}
310
311static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
312{
313 if (sock_queue_rcv_skb(sk, skb) < 0) {
314 atomic_inc(&sk->sk_drops);
315 kfree_skb(skb);
316 return NET_RX_DROP;
317 }
318
319 return NET_RX_SUCCESS;
320}
321
322static inline int ieee802154_match_sock(u8 *hw_addr, u16 pan_id,
323 u16 short_addr, struct dgram_sock *ro)
324{
325 if (!ro->bound)
326 return 1;
327
328 if (ro->src_addr.addr_type == IEEE802154_ADDR_LONG &&
329 !memcmp(ro->src_addr.hwaddr, hw_addr, IEEE802154_ADDR_LEN))
330 return 1;
331
332 if (ro->src_addr.addr_type == IEEE802154_ADDR_SHORT &&
333 pan_id == ro->src_addr.pan_id &&
334 short_addr == ro->src_addr.short_addr)
335 return 1;
336
337 return 0;
338}
339
340int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
341{
342 struct sock *sk, *prev = NULL;
343 struct hlist_node *node;
344 int ret = NET_RX_SUCCESS;
345 u16 pan_id, short_addr;
346
347 /* Data frame processing */
348 BUG_ON(dev->type != ARPHRD_IEEE802154);
349
350 pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
351 short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
352
353 read_lock(&dgram_lock);
354 sk_for_each(sk, node, &dgram_head) {
355 if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr,
356 dgram_sk(sk))) {
357 if (prev) {
358 struct sk_buff *clone;
359 clone = skb_clone(skb, GFP_ATOMIC);
360 if (clone)
361 dgram_rcv_skb(prev, clone);
362 }
363
364 prev = sk;
365 }
366 }
367
368 if (prev)
369 dgram_rcv_skb(prev, skb);
370 else {
371 kfree_skb(skb);
372 ret = NET_RX_DROP;
373 }
374 read_unlock(&dgram_lock);
375
376 return ret;
377}
378
379struct proto ieee802154_dgram_prot = {
380 .name = "IEEE-802.15.4-MAC",
381 .owner = THIS_MODULE,
382 .obj_size = sizeof(struct dgram_sock),
383 .init = dgram_init,
384 .close = dgram_close,
385 .bind = dgram_bind,
386 .sendmsg = dgram_sendmsg,
387 .recvmsg = dgram_recvmsg,
388 .hash = dgram_hash,
389 .unhash = dgram_unhash,
390 .connect = dgram_connect,
391 .disconnect = dgram_disconnect,
392 .ioctl = dgram_ioctl,
393};
394
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
new file mode 100644
index 000000000000..105ad10876af
--- /dev/null
+++ b/net/ieee802154/netlink.c
@@ -0,0 +1,523 @@
1/*
2 * Netlink inteface for IEEE 802.15.4 stack
3 *
4 * Copyright 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 */
23
24#include <linux/kernel.h>
25#include <linux/if_arp.h>
26#include <linux/netdevice.h>
27#include <net/netlink.h>
28#include <net/genetlink.h>
29#include <linux/nl802154.h>
30#include <net/ieee802154/af_ieee802154.h>
31#include <net/ieee802154/nl802154.h>
32#include <net/ieee802154/netdevice.h>
33
34static unsigned int ieee802154_seq_num;
35
36static struct genl_family ieee802154_coordinator_family = {
37 .id = GENL_ID_GENERATE,
38 .hdrsize = 0,
39 .name = IEEE802154_NL_NAME,
40 .version = 1,
41 .maxattr = IEEE802154_ATTR_MAX,
42};
43
44static struct genl_multicast_group ieee802154_coord_mcgrp = {
45 .name = IEEE802154_MCAST_COORD_NAME,
46};
47
48static struct genl_multicast_group ieee802154_beacon_mcgrp = {
49 .name = IEEE802154_MCAST_BEACON_NAME,
50};
51
52/* Requests to userspace */
53static struct sk_buff *ieee802154_nl_create(int flags, u8 req)
54{
55 void *hdr;
56 struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
57
58 if (!msg)
59 return NULL;
60
61 hdr = genlmsg_put(msg, 0, ieee802154_seq_num++,
62 &ieee802154_coordinator_family, flags, req);
63 if (!hdr) {
64 nlmsg_free(msg);
65 return NULL;
66 }
67
68 return msg;
69}
70
71static int ieee802154_nl_finish(struct sk_buff *msg)
72{
73 /* XXX: nlh is right at the start of msg */
74 void *hdr = genlmsg_data(NLMSG_DATA(msg->data));
75
76 if (!genlmsg_end(msg, hdr))
77 goto out;
78
79 return genlmsg_multicast(msg, 0, ieee802154_coord_mcgrp.id,
80 GFP_ATOMIC);
81out:
82 nlmsg_free(msg);
83 return -ENOBUFS;
84}
85
86int ieee802154_nl_assoc_indic(struct net_device *dev,
87 struct ieee802154_addr *addr, u8 cap)
88{
89 struct sk_buff *msg;
90
91 pr_debug("%s\n", __func__);
92
93 if (addr->addr_type != IEEE802154_ADDR_LONG) {
94 pr_err("%s: received non-long source address!\n", __func__);
95 return -EINVAL;
96 }
97
98 msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC);
99 if (!msg)
100 return -ENOBUFS;
101
102 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
103 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
104 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
105 dev->dev_addr);
106
107 NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
108 addr->hwaddr);
109
110 NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap);
111
112 return ieee802154_nl_finish(msg);
113
114nla_put_failure:
115 nlmsg_free(msg);
116 return -ENOBUFS;
117}
118EXPORT_SYMBOL(ieee802154_nl_assoc_indic);
119
120int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr,
121 u8 status)
122{
123 struct sk_buff *msg;
124
125 pr_debug("%s\n", __func__);
126
127 msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF);
128 if (!msg)
129 return -ENOBUFS;
130
131 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
132 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
133 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
134 dev->dev_addr);
135
136 NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr);
137 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
138
139 return ieee802154_nl_finish(msg);
140
141nla_put_failure:
142 nlmsg_free(msg);
143 return -ENOBUFS;
144}
145EXPORT_SYMBOL(ieee802154_nl_assoc_confirm);
146
147int ieee802154_nl_disassoc_indic(struct net_device *dev,
148 struct ieee802154_addr *addr, u8 reason)
149{
150 struct sk_buff *msg;
151
152 pr_debug("%s\n", __func__);
153
154 msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC);
155 if (!msg)
156 return -ENOBUFS;
157
158 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
159 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
160 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
161 dev->dev_addr);
162
163 if (addr->addr_type == IEEE802154_ADDR_LONG)
164 NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
165 addr->hwaddr);
166 else
167 NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
168 addr->short_addr);
169
170 NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason);
171
172 return ieee802154_nl_finish(msg);
173
174nla_put_failure:
175 nlmsg_free(msg);
176 return -ENOBUFS;
177}
178EXPORT_SYMBOL(ieee802154_nl_disassoc_indic);
179
180int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status)
181{
182 struct sk_buff *msg;
183
184 pr_debug("%s\n", __func__);
185
186 msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_CONF);
187 if (!msg)
188 return -ENOBUFS;
189
190 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
191 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
192 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
193 dev->dev_addr);
194
195 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
196
197 return ieee802154_nl_finish(msg);
198
199nla_put_failure:
200 nlmsg_free(msg);
201 return -ENOBUFS;
202}
203EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm);
204
205int ieee802154_nl_beacon_indic(struct net_device *dev,
206 u16 panid, u16 coord_addr)
207{
208 struct sk_buff *msg;
209
210 pr_debug("%s\n", __func__);
211
212 msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC);
213 if (!msg)
214 return -ENOBUFS;
215
216 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
217 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
218 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
219 dev->dev_addr);
220 NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr);
221 NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid);
222
223 return ieee802154_nl_finish(msg);
224
225nla_put_failure:
226 nlmsg_free(msg);
227 return -ENOBUFS;
228}
229EXPORT_SYMBOL(ieee802154_nl_beacon_indic);
230
231int ieee802154_nl_scan_confirm(struct net_device *dev,
232 u8 status, u8 scan_type, u32 unscanned,
233 u8 *edl/* , struct list_head *pan_desc_list */)
234{
235 struct sk_buff *msg;
236
237 pr_debug("%s\n", __func__);
238
239 msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF);
240 if (!msg)
241 return -ENOBUFS;
242
243 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
244 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
245 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
246 dev->dev_addr);
247
248 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
249 NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type);
250 NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned);
251
252 if (edl)
253 NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
254
255 return ieee802154_nl_finish(msg);
256
257nla_put_failure:
258 nlmsg_free(msg);
259 return -ENOBUFS;
260}
261EXPORT_SYMBOL(ieee802154_nl_scan_confirm);
262
263/* Requests from userspace */
264static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
265{
266 struct net_device *dev;
267
268 if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
269 char name[IFNAMSIZ + 1];
270 nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME],
271 sizeof(name));
272 dev = dev_get_by_name(&init_net, name);
273 } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX])
274 dev = dev_get_by_index(&init_net,
275 nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX]));
276 else
277 return NULL;
278
279 if (dev->type != ARPHRD_IEEE802154) {
280 dev_put(dev);
281 return NULL;
282 }
283
284 return dev;
285}
286
287static int ieee802154_associate_req(struct sk_buff *skb,
288 struct genl_info *info)
289{
290 struct net_device *dev;
291 struct ieee802154_addr addr;
292 int ret = -EINVAL;
293
294 if (!info->attrs[IEEE802154_ATTR_CHANNEL] ||
295 !info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
296 (!info->attrs[IEEE802154_ATTR_COORD_HW_ADDR] &&
297 !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]) ||
298 !info->attrs[IEEE802154_ATTR_CAPABILITY])
299 return -EINVAL;
300
301 dev = ieee802154_nl_get_dev(info);
302 if (!dev)
303 return -ENODEV;
304
305 if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) {
306 addr.addr_type = IEEE802154_ADDR_LONG;
307 nla_memcpy(addr.hwaddr,
308 info->attrs[IEEE802154_ATTR_COORD_HW_ADDR],
309 IEEE802154_ADDR_LEN);
310 } else {
311 addr.addr_type = IEEE802154_ADDR_SHORT;
312 addr.short_addr = nla_get_u16(
313 info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
314 }
315 addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
316
317 ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr,
318 nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]),
319 nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY]));
320
321 dev_put(dev);
322 return ret;
323}
324
325static int ieee802154_associate_resp(struct sk_buff *skb,
326 struct genl_info *info)
327{
328 struct net_device *dev;
329 struct ieee802154_addr addr;
330 int ret = -EINVAL;
331
332 if (!info->attrs[IEEE802154_ATTR_STATUS] ||
333 !info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] ||
334 !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR])
335 return -EINVAL;
336
337 dev = ieee802154_nl_get_dev(info);
338 if (!dev)
339 return -ENODEV;
340
341 addr.addr_type = IEEE802154_ADDR_LONG;
342 nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
343 IEEE802154_ADDR_LEN);
344 addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
345
346
347 ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr,
348 nla_get_u16(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
349 nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS]));
350
351 dev_put(dev);
352 return ret;
353}
354
355static int ieee802154_disassociate_req(struct sk_buff *skb,
356 struct genl_info *info)
357{
358 struct net_device *dev;
359 struct ieee802154_addr addr;
360 int ret = -EINVAL;
361
362 if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] &&
363 !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) ||
364 !info->attrs[IEEE802154_ATTR_REASON])
365 return -EINVAL;
366
367 dev = ieee802154_nl_get_dev(info);
368 if (!dev)
369 return -ENODEV;
370
371 if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) {
372 addr.addr_type = IEEE802154_ADDR_LONG;
373 nla_memcpy(addr.hwaddr,
374 info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
375 IEEE802154_ADDR_LEN);
376 } else {
377 addr.addr_type = IEEE802154_ADDR_SHORT;
378 addr.short_addr = nla_get_u16(
379 info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]);
380 }
381 addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
382
383 ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr,
384 nla_get_u8(info->attrs[IEEE802154_ATTR_REASON]));
385
386 dev_put(dev);
387 return ret;
388}
389
390/*
391 * PANid, channel, beacon_order = 15, superframe_order = 15,
392 * PAN_coordinator, battery_life_extension = 0,
393 * coord_realignment = 0, security_enable = 0
394*/
395static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
396{
397 struct net_device *dev;
398 struct ieee802154_addr addr;
399
400 u8 channel, bcn_ord, sf_ord;
401 int pan_coord, blx, coord_realign;
402 int ret;
403
404 if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
405 !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] ||
406 !info->attrs[IEEE802154_ATTR_CHANNEL] ||
407 !info->attrs[IEEE802154_ATTR_BCN_ORD] ||
408 !info->attrs[IEEE802154_ATTR_SF_ORD] ||
409 !info->attrs[IEEE802154_ATTR_PAN_COORD] ||
410 !info->attrs[IEEE802154_ATTR_BAT_EXT] ||
411 !info->attrs[IEEE802154_ATTR_COORD_REALIGN]
412 )
413 return -EINVAL;
414
415 dev = ieee802154_nl_get_dev(info);
416 if (!dev)
417 return -ENODEV;
418
419 addr.addr_type = IEEE802154_ADDR_SHORT;
420 addr.short_addr = nla_get_u16(
421 info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
422 addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
423
424 channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]);
425 bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]);
426 sf_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_SF_ORD]);
427 pan_coord = nla_get_u8(info->attrs[IEEE802154_ATTR_PAN_COORD]);
428 blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]);
429 coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]);
430
431 ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel,
432 bcn_ord, sf_ord, pan_coord, blx, coord_realign);
433
434 dev_put(dev);
435 return ret;
436}
437
438static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
439{
440 struct net_device *dev;
441 int ret;
442 u8 type;
443 u32 channels;
444 u8 duration;
445
446 if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] ||
447 !info->attrs[IEEE802154_ATTR_CHANNELS] ||
448 !info->attrs[IEEE802154_ATTR_DURATION])
449 return -EINVAL;
450
451 dev = ieee802154_nl_get_dev(info);
452 if (!dev)
453 return -ENODEV;
454
455 type = nla_get_u8(info->attrs[IEEE802154_ATTR_SCAN_TYPE]);
456 channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]);
457 duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]);
458
459 ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels,
460 duration);
461
462 dev_put(dev);
463 return ret;
464}
465
466#define IEEE802154_OP(_cmd, _func) \
467 { \
468 .cmd = _cmd, \
469 .policy = ieee802154_policy, \
470 .doit = _func, \
471 .dumpit = NULL, \
472 .flags = GENL_ADMIN_PERM, \
473 }
474
475static struct genl_ops ieee802154_coordinator_ops[] = {
476 IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
477 IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
478 IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req),
479 IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req),
480 IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req),
481};
482
483static int __init ieee802154_nl_init(void)
484{
485 int rc;
486 int i;
487
488 rc = genl_register_family(&ieee802154_coordinator_family);
489 if (rc)
490 goto fail;
491
492 rc = genl_register_mc_group(&ieee802154_coordinator_family,
493 &ieee802154_coord_mcgrp);
494 if (rc)
495 goto fail;
496
497 rc = genl_register_mc_group(&ieee802154_coordinator_family,
498 &ieee802154_beacon_mcgrp);
499 if (rc)
500 goto fail;
501
502
503 for (i = 0; i < ARRAY_SIZE(ieee802154_coordinator_ops); i++) {
504 rc = genl_register_ops(&ieee802154_coordinator_family,
505 &ieee802154_coordinator_ops[i]);
506 if (rc)
507 goto fail;
508 }
509
510 return 0;
511
512fail:
513 genl_unregister_family(&ieee802154_coordinator_family);
514 return rc;
515}
516module_init(ieee802154_nl_init);
517
518static void __exit ieee802154_nl_exit(void)
519{
520 genl_unregister_family(&ieee802154_coordinator_family);
521}
522module_exit(ieee802154_nl_exit);
523
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
new file mode 100644
index 000000000000..c7d71d1adcac
--- /dev/null
+++ b/net/ieee802154/nl_policy.c
@@ -0,0 +1,52 @@
1/*
2 * nl802154.h
3 *
4 * Copyright (C) 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 */
20
21#include <linux/kernel.h>
22#include <net/netlink.h>
23#include <linux/nl802154.h>
24
25#define NLA_HW_ADDR NLA_U64
26
27struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
28 [IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, },
29 [IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, },
30
31 [IEEE802154_ATTR_STATUS] = { .type = NLA_U8, },
32 [IEEE802154_ATTR_SHORT_ADDR] = { .type = NLA_U16, },
33 [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
34 [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
35 [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
36 [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
37 [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
38 [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },
39 [IEEE802154_ATTR_SRC_SHORT_ADDR] = { .type = NLA_U16, },
40 [IEEE802154_ATTR_SRC_HW_ADDR] = { .type = NLA_HW_ADDR, },
41 [IEEE802154_ATTR_SRC_PAN_ID] = { .type = NLA_U16, },
42 [IEEE802154_ATTR_DEST_SHORT_ADDR] = { .type = NLA_U16, },
43 [IEEE802154_ATTR_DEST_HW_ADDR] = { .type = NLA_HW_ADDR, },
44 [IEEE802154_ATTR_DEST_PAN_ID] = { .type = NLA_U16, },
45
46 [IEEE802154_ATTR_CAPABILITY] = { .type = NLA_U8, },
47 [IEEE802154_ATTR_REASON] = { .type = NLA_U8, },
48 [IEEE802154_ATTR_SCAN_TYPE] = { .type = NLA_U8, },
49 [IEEE802154_ATTR_CHANNELS] = { .type = NLA_U32, },
50 [IEEE802154_ATTR_DURATION] = { .type = NLA_U8, },
51 [IEEE802154_ATTR_ED_LIST] = { .len = 27 },
52};
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
new file mode 100644
index 000000000000..fca44d59f97e
--- /dev/null
+++ b/net/ieee802154/raw.c
@@ -0,0 +1,254 @@
1/*
2 * Raw IEEE 802.15.4 sockets
3 *
4 * Copyright 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 */
23
24#include <linux/net.h>
25#include <linux/module.h>
26#include <linux/if_arp.h>
27#include <linux/list.h>
28#include <net/sock.h>
29#include <net/ieee802154/af_ieee802154.h>
30
31#include "af802154.h"
32
33static HLIST_HEAD(raw_head);
34static DEFINE_RWLOCK(raw_lock);
35
36static void raw_hash(struct sock *sk)
37{
38 write_lock_bh(&raw_lock);
39 sk_add_node(sk, &raw_head);
40 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
41 write_unlock_bh(&raw_lock);
42}
43
44static void raw_unhash(struct sock *sk)
45{
46 write_lock_bh(&raw_lock);
47 if (sk_del_node_init(sk))
48 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
49 write_unlock_bh(&raw_lock);
50}
51
52static void raw_close(struct sock *sk, long timeout)
53{
54 sk_common_release(sk);
55}
56
57static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int len)
58{
59 struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
60 int err = 0;
61 struct net_device *dev = NULL;
62
63 if (len < sizeof(*addr))
64 return -EINVAL;
65
66 if (addr->family != AF_IEEE802154)
67 return -EINVAL;
68
69 lock_sock(sk);
70
71 dev = ieee802154_get_dev(sock_net(sk), &addr->addr);
72 if (!dev) {
73 err = -ENODEV;
74 goto out;
75 }
76
77 if (dev->type != ARPHRD_IEEE802154_PHY &&
78 dev->type != ARPHRD_IEEE802154) {
79 err = -ENODEV;
80 goto out_put;
81 }
82
83 sk->sk_bound_dev_if = dev->ifindex;
84 sk_dst_reset(sk);
85
86out_put:
87 dev_put(dev);
88out:
89 release_sock(sk);
90
91 return err;
92}
93
94static int raw_connect(struct sock *sk, struct sockaddr *uaddr,
95 int addr_len)
96{
97 return -ENOTSUPP;
98}
99
100static int raw_disconnect(struct sock *sk, int flags)
101{
102 return 0;
103}
104
105static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
106 size_t size)
107{
108 struct net_device *dev;
109 unsigned mtu;
110 struct sk_buff *skb;
111 int err;
112
113 if (msg->msg_flags & MSG_OOB) {
114 pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
115 return -EOPNOTSUPP;
116 }
117
118 lock_sock(sk);
119 if (!sk->sk_bound_dev_if)
120 dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
121 else
122 dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if);
123 release_sock(sk);
124
125 if (!dev) {
126 pr_debug("no dev\n");
127 err = -ENXIO;
128 goto out;
129 }
130
131 mtu = dev->mtu;
132 pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
133
134 if (size > mtu) {
135 pr_debug("size = %Zu, mtu = %u\n", size, mtu);
136 err = -EINVAL;
137 goto out_dev;
138 }
139
140 skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
141 msg->msg_flags & MSG_DONTWAIT, &err);
142 if (!skb)
143 goto out_dev;
144
145 skb_reserve(skb, LL_RESERVED_SPACE(dev));
146
147 skb_reset_mac_header(skb);
148 skb_reset_network_header(skb);
149
150 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
151 if (err < 0)
152 goto out_skb;
153
154 skb->dev = dev;
155 skb->sk = sk;
156 skb->protocol = htons(ETH_P_IEEE802154);
157
158 dev_put(dev);
159
160 err = dev_queue_xmit(skb);
161 if (err > 0)
162 err = net_xmit_errno(err);
163
164 return err ?: size;
165
166out_skb:
167 kfree_skb(skb);
168out_dev:
169 dev_put(dev);
170out:
171 return err;
172}
173
174static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
175 size_t len, int noblock, int flags, int *addr_len)
176{
177 size_t copied = 0;
178 int err = -EOPNOTSUPP;
179 struct sk_buff *skb;
180
181 skb = skb_recv_datagram(sk, flags, noblock, &err);
182 if (!skb)
183 goto out;
184
185 copied = skb->len;
186 if (len < copied) {
187 msg->msg_flags |= MSG_TRUNC;
188 copied = len;
189 }
190
191 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
192 if (err)
193 goto done;
194
195 sock_recv_timestamp(msg, sk, skb);
196
197 if (flags & MSG_TRUNC)
198 copied = skb->len;
199done:
200 skb_free_datagram(sk, skb);
201out:
202 if (err)
203 return err;
204 return copied;
205}
206
207static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
208{
209 if (sock_queue_rcv_skb(sk, skb) < 0) {
210 atomic_inc(&sk->sk_drops);
211 kfree_skb(skb);
212 return NET_RX_DROP;
213 }
214
215 return NET_RX_SUCCESS;
216}
217
218
219void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
220{
221 struct sock *sk;
222 struct hlist_node *node;
223
224 read_lock(&raw_lock);
225 sk_for_each(sk, node, &raw_head) {
226 bh_lock_sock(sk);
227 if (!sk->sk_bound_dev_if ||
228 sk->sk_bound_dev_if == dev->ifindex) {
229
230 struct sk_buff *clone;
231
232 clone = skb_clone(skb, GFP_ATOMIC);
233 if (clone)
234 raw_rcv_skb(sk, clone);
235 }
236 bh_unlock_sock(sk);
237 }
238 read_unlock(&raw_lock);
239}
240
241struct proto ieee802154_raw_prot = {
242 .name = "IEEE-802.15.4-RAW",
243 .owner = THIS_MODULE,
244 .obj_size = sizeof(struct sock),
245 .close = raw_close,
246 .bind = raw_bind,
247 .sendmsg = raw_sendmsg,
248 .recvmsg = raw_recvmsg,
249 .hash = raw_hash,
250 .unhash = raw_unhash,
251 .connect = raw_connect,
252 .disconnect = raw_disconnect,
253};
254
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 1f1b82475eaf..575f9bd51ccd 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -507,7 +507,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
507 /* If the first fragment is fragmented itself, we split 507 /* If the first fragment is fragmented itself, we split
508 * it to two chunks: the first with data and paged part 508 * it to two chunks: the first with data and paged part
509 * and the second, holding only fragments. */ 509 * and the second, holding only fragments. */
510 if (skb_shinfo(head)->frag_list) { 510 if (skb_has_frags(head)) {
511 struct sk_buff *clone; 511 struct sk_buff *clone;
512 int i, plen = 0; 512 int i, plen = 0;
513 513
@@ -516,7 +516,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
516 clone->next = head->next; 516 clone->next = head->next;
517 head->next = clone; 517 head->next = clone;
518 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 518 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
519 skb_shinfo(head)->frag_list = NULL; 519 skb_frag_list_init(head);
520 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 520 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
521 plen += skb_shinfo(head)->frags[i].size; 521 plen += skb_shinfo(head)->frags[i].size;
522 clone->len = clone->data_len = head->data_len - plen; 522 clone->len = clone->data_len = head->data_len - plen;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 3d6167fb2d97..247026282669 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -474,7 +474,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
474 * LATER: this step can be merged to real generation of fragments, 474 * LATER: this step can be merged to real generation of fragments,
475 * we can switch to copy when see the first bad fragment. 475 * we can switch to copy when see the first bad fragment.
476 */ 476 */
477 if (skb_shinfo(skb)->frag_list) { 477 if (skb_has_frags(skb)) {
478 struct sk_buff *frag; 478 struct sk_buff *frag;
479 int first_len = skb_pagelen(skb); 479 int first_len = skb_pagelen(skb);
480 int truesizes = 0; 480 int truesizes = 0;
@@ -485,7 +485,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
485 skb_cloned(skb)) 485 skb_cloned(skb))
486 goto slow_path; 486 goto slow_path;
487 487
488 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { 488 skb_walk_frags(skb, frag) {
489 /* Correct geometry. */ 489 /* Correct geometry. */
490 if (frag->len > mtu || 490 if (frag->len > mtu ||
491 ((frag->len & 7) && frag->next) || 491 ((frag->len & 7) && frag->next) ||
@@ -498,7 +498,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
498 498
499 BUG_ON(frag->sk); 499 BUG_ON(frag->sk);
500 if (skb->sk) { 500 if (skb->sk) {
501 sock_hold(skb->sk);
502 frag->sk = skb->sk; 501 frag->sk = skb->sk;
503 frag->destructor = sock_wfree; 502 frag->destructor = sock_wfree;
504 truesizes += frag->truesize; 503 truesizes += frag->truesize;
@@ -510,7 +509,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
510 err = 0; 509 err = 0;
511 offset = 0; 510 offset = 0;
512 frag = skb_shinfo(skb)->frag_list; 511 frag = skb_shinfo(skb)->frag_list;
513 skb_shinfo(skb)->frag_list = NULL; 512 skb_frag_list_init(skb);
514 skb->data_len = first_len - skb_headlen(skb); 513 skb->data_len = first_len - skb_headlen(skb);
515 skb->truesize -= truesizes; 514 skb->truesize -= truesizes;
516 skb->len = first_len; 515 skb->len = first_len;
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c
index 65e470bc6123..3fc598eeeb1a 100644
--- a/net/ipv4/netfilter/nf_nat_proto_sctp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_sctp.c
@@ -33,6 +33,7 @@ sctp_manip_pkt(struct sk_buff *skb,
33 enum nf_nat_manip_type maniptype) 33 enum nf_nat_manip_type maniptype)
34{ 34{
35 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); 35 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
36 struct sk_buff *frag;
36 sctp_sctphdr_t *hdr; 37 sctp_sctphdr_t *hdr;
37 unsigned int hdroff = iphdroff + iph->ihl*4; 38 unsigned int hdroff = iphdroff + iph->ihl*4;
38 __be32 oldip, newip; 39 __be32 oldip, newip;
@@ -57,8 +58,8 @@ sctp_manip_pkt(struct sk_buff *skb,
57 } 58 }
58 59
59 crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff); 60 crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff);
60 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) 61 skb_walk_frags(skb, frag)
61 crc32 = sctp_update_cksum((u8 *)skb->data, skb_headlen(skb), 62 crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag),
62 crc32); 63 crc32);
63 crc32 = sctp_end_cksum(crc32); 64 crc32 = sctp_end_cksum(crc32);
64 hdr->checksum = crc32; 65 hdr->checksum = crc32;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index c8dc8e5a822f..7c76e3d18215 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -658,7 +658,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
658 } 658 }
659 mtu -= hlen + sizeof(struct frag_hdr); 659 mtu -= hlen + sizeof(struct frag_hdr);
660 660
661 if (skb_shinfo(skb)->frag_list) { 661 if (skb_has_frags(skb)) {
662 int first_len = skb_pagelen(skb); 662 int first_len = skb_pagelen(skb);
663 int truesizes = 0; 663 int truesizes = 0;
664 664
@@ -667,7 +667,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
667 skb_cloned(skb)) 667 skb_cloned(skb))
668 goto slow_path; 668 goto slow_path;
669 669
670 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { 670 skb_walk_frags(skb, frag) {
671 /* Correct geometry. */ 671 /* Correct geometry. */
672 if (frag->len > mtu || 672 if (frag->len > mtu ||
673 ((frag->len & 7) && frag->next) || 673 ((frag->len & 7) && frag->next) ||
@@ -680,7 +680,6 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
680 680
681 BUG_ON(frag->sk); 681 BUG_ON(frag->sk);
682 if (skb->sk) { 682 if (skb->sk) {
683 sock_hold(skb->sk);
684 frag->sk = skb->sk; 683 frag->sk = skb->sk;
685 frag->destructor = sock_wfree; 684 frag->destructor = sock_wfree;
686 truesizes += frag->truesize; 685 truesizes += frag->truesize;
@@ -690,7 +689,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
690 err = 0; 689 err = 0;
691 offset = 0; 690 offset = 0;
692 frag = skb_shinfo(skb)->frag_list; 691 frag = skb_shinfo(skb)->frag_list;
693 skb_shinfo(skb)->frag_list = NULL; 692 skb_frag_list_init(skb);
694 /* BUILD HEADER */ 693 /* BUILD HEADER */
695 694
696 *prevhdr = NEXTHDR_FRAGMENT; 695 *prevhdr = NEXTHDR_FRAGMENT;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 058a5e4a60c3..f3aba255ad9f 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -409,7 +409,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
409 /* If the first fragment is fragmented itself, we split 409 /* If the first fragment is fragmented itself, we split
410 * it to two chunks: the first with data and paged part 410 * it to two chunks: the first with data and paged part
411 * and the second, holding only fragments. */ 411 * and the second, holding only fragments. */
412 if (skb_shinfo(head)->frag_list) { 412 if (skb_has_frags(head)) {
413 struct sk_buff *clone; 413 struct sk_buff *clone;
414 int i, plen = 0; 414 int i, plen = 0;
415 415
@@ -420,7 +420,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
420 clone->next = head->next; 420 clone->next = head->next;
421 head->next = clone; 421 head->next = clone;
422 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 422 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
423 skb_shinfo(head)->frag_list = NULL; 423 skb_frag_list_init(head);
424 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 424 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
425 plen += skb_shinfo(head)->frags[i].size; 425 plen += skb_shinfo(head)->frags[i].size;
426 clone->len = clone->data_len = head->data_len - plen; 426 clone->len = clone->data_len = head->data_len - plen;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 54a387d31e1a..2642a41a8535 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -494,7 +494,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
494 /* If the first fragment is fragmented itself, we split 494 /* If the first fragment is fragmented itself, we split
495 * it to two chunks: the first with data and paged part 495 * it to two chunks: the first with data and paged part
496 * and the second, holding only fragments. */ 496 * and the second, holding only fragments. */
497 if (skb_shinfo(head)->frag_list) { 497 if (skb_has_frags(head)) {
498 struct sk_buff *clone; 498 struct sk_buff *clone;
499 int i, plen = 0; 499 int i, plen = 0;
500 500
@@ -503,7 +503,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
503 clone->next = head->next; 503 clone->next = head->next;
504 head->next = clone; 504 head->next = clone;
505 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 505 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
506 skb_shinfo(head)->frag_list = NULL; 506 skb_frag_list_init(head);
507 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 507 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
508 plen += skb_shinfo(head)->frags[i].size; 508 plen += skb_shinfo(head)->frags[i].size;
509 clone->len = clone->data_len = head->data_len - plen; 509 clone->len = clone->data_len = head->data_len - plen;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 43d00ffd3988..9e5762ad307d 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -132,6 +132,9 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
132 132
133 state = &sta->ampdu_mlme.tid_state_tx[tid]; 133 state = &sta->ampdu_mlme.tid_state_tx[tid];
134 134
135 if (*state == HT_AGG_STATE_OPERATIONAL)
136 sta->ampdu_mlme.addba_req_num[tid] = 0;
137
135 *state = HT_AGG_STATE_REQ_STOP_BA_MSK | 138 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
136 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 139 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
137 140
@@ -337,6 +340,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
337 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 340 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
338 sta->ampdu_mlme.tid_tx[tid]->ssn, 341 sta->ampdu_mlme.tid_tx[tid]->ssn,
339 0x40, 5000); 342 0x40, 5000);
343 sta->ampdu_mlme.addba_req_num[tid]++;
340 /* activate the timer for the recipient's addBA response */ 344 /* activate the timer for the recipient's addBA response */
341 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = 345 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
342 jiffies + ADDBA_RESP_INTERVAL; 346 jiffies + ADDBA_RESP_INTERVAL;
@@ -606,7 +610,6 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
606 610
607 *state = HT_AGG_STATE_IDLE; 611 *state = HT_AGG_STATE_IDLE;
608 /* from now on packets are no longer put onto sta->pending */ 612 /* from now on packets are no longer put onto sta->pending */
609 sta->ampdu_mlme.addba_req_num[tid] = 0;
610 kfree(sta->ampdu_mlme.tid_tx[tid]); 613 kfree(sta->ampdu_mlme.tid_tx[tid]);
611 sta->ampdu_mlme.tid_tx[tid] = NULL; 614 sta->ampdu_mlme.tid_tx[tid] = NULL;
612 615
@@ -689,7 +692,6 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
689 692
690 sta->ampdu_mlme.addba_req_num[tid] = 0; 693 sta->ampdu_mlme.addba_req_num[tid] = 0;
691 } else { 694 } else {
692 sta->ampdu_mlme.addba_req_num[tid]++;
693 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); 695 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
694 } 696 }
695 spin_unlock_bh(&sta->lock); 697 spin_unlock_bh(&sta->lock);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index a9211cc183cb..3f47276caeb8 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1122,8 +1122,8 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1122 p.txop = params->txop; 1122 p.txop = params->txop;
1123 if (drv_conf_tx(local, params->queue, &p)) { 1123 if (drv_conf_tx(local, params->queue, &p)) {
1124 printk(KERN_DEBUG "%s: failed to set TX queue " 1124 printk(KERN_DEBUG "%s: failed to set TX queue "
1125 "parameters for queue %d\n", local->mdev->name, 1125 "parameters for queue %d\n",
1126 params->queue); 1126 wiphy_name(local->hw.wiphy), params->queue);
1127 return -EINVAL; 1127 return -EINVAL;
1128 } 1128 }
1129 1129
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c088c46704a3..4dbc28964196 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -589,6 +589,7 @@ enum queue_stop_reason {
589 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 589 IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
590 IEEE80211_QUEUE_STOP_REASON_SUSPEND, 590 IEEE80211_QUEUE_STOP_REASON_SUSPEND,
591 IEEE80211_QUEUE_STOP_REASON_PENDING, 591 IEEE80211_QUEUE_STOP_REASON_PENDING,
592 IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
592}; 593};
593 594
594struct ieee80211_master_priv { 595struct ieee80211_master_priv {
@@ -1121,6 +1122,10 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
1121 enum queue_stop_reason reason); 1122 enum queue_stop_reason reason);
1122void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, 1123void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
1123 enum queue_stop_reason reason); 1124 enum queue_stop_reason reason);
1125void ieee80211_add_pending_skb(struct ieee80211_local *local,
1126 struct sk_buff *skb);
1127int ieee80211_add_pending_skbs(struct ieee80211_local *local,
1128 struct sk_buff_head *skbs);
1124 1129
1125void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1130void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1126 u16 transaction, u16 auth_alg, 1131 u16 transaction, u16 auth_alg,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 2683df918073..092a017b237e 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -369,60 +369,12 @@ static void ieee80211_tasklet_handler(unsigned long data)
369 } 369 }
370} 370}
371 371
372/* Remove added headers (e.g., QoS control), encryption header/MIC, etc. to
373 * make a prepared TX frame (one that has been given to hw) to look like brand
374 * new IEEE 802.11 frame that is ready to go through TX processing again.
375 */
376static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
377 struct ieee80211_key *key,
378 struct sk_buff *skb)
379{
380 unsigned int hdrlen, iv_len, mic_len;
381 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
382
383 hdrlen = ieee80211_hdrlen(hdr->frame_control);
384
385 if (!key)
386 goto no_key;
387
388 switch (key->conf.alg) {
389 case ALG_WEP:
390 iv_len = WEP_IV_LEN;
391 mic_len = WEP_ICV_LEN;
392 break;
393 case ALG_TKIP:
394 iv_len = TKIP_IV_LEN;
395 mic_len = TKIP_ICV_LEN;
396 break;
397 case ALG_CCMP:
398 iv_len = CCMP_HDR_LEN;
399 mic_len = CCMP_MIC_LEN;
400 break;
401 default:
402 goto no_key;
403 }
404
405 if (skb->len >= hdrlen + mic_len &&
406 !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
407 skb_trim(skb, skb->len - mic_len);
408 if (skb->len >= hdrlen + iv_len) {
409 memmove(skb->data + iv_len, skb->data, hdrlen);
410 hdr = (struct ieee80211_hdr *)skb_pull(skb, iv_len);
411 }
412
413no_key:
414 if (ieee80211_is_data_qos(hdr->frame_control)) {
415 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
416 memmove(skb->data + IEEE80211_QOS_CTL_LEN, skb->data,
417 hdrlen - IEEE80211_QOS_CTL_LEN);
418 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
419 }
420}
421
422static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, 372static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
423 struct sta_info *sta, 373 struct sta_info *sta,
424 struct sk_buff *skb) 374 struct sk_buff *skb)
425{ 375{
376 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
377
426 sta->tx_filtered_count++; 378 sta->tx_filtered_count++;
427 379
428 /* 380 /*
@@ -464,16 +416,15 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
464 */ 416 */
465 if (test_sta_flags(sta, WLAN_STA_PS) && 417 if (test_sta_flags(sta, WLAN_STA_PS) &&
466 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { 418 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
467 ieee80211_remove_tx_extra(local, sta->key, skb);
468 skb_queue_tail(&sta->tx_filtered, skb); 419 skb_queue_tail(&sta->tx_filtered, skb);
469 return; 420 return;
470 } 421 }
471 422
472 if (!test_sta_flags(sta, WLAN_STA_PS) && !skb->requeue) { 423 if (!test_sta_flags(sta, WLAN_STA_PS) &&
424 !(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
473 /* Software retry the packet once */ 425 /* Software retry the packet once */
474 skb->requeue = 1; 426 info->flags |= IEEE80211_TX_INTFL_RETRIED;
475 ieee80211_remove_tx_extra(local, sta->key, skb); 427 ieee80211_add_pending_skb(local, skb);
476 dev_queue_xmit(skb);
477 return; 428 return;
478 } 429 }
479 430
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 509469cb9265..d779c57a8220 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -621,9 +621,6 @@ static void ieee80211_change_ps(struct ieee80211_local *local)
621 struct ieee80211_conf *conf = &local->hw.conf; 621 struct ieee80211_conf *conf = &local->hw.conf;
622 622
623 if (local->ps_sdata) { 623 if (local->ps_sdata) {
624 if (!(local->ps_sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED))
625 return;
626
627 ieee80211_enable_ps(local, local->ps_sdata); 624 ieee80211_enable_ps(local, local->ps_sdata);
628 } else if (conf->flags & IEEE80211_CONF_PS) { 625 } else if (conf->flags & IEEE80211_CONF_PS) {
629 conf->flags &= ~IEEE80211_CONF_PS; 626 conf->flags &= ~IEEE80211_CONF_PS;
@@ -653,7 +650,9 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
653 count++; 650 count++;
654 } 651 }
655 652
656 if (count == 1 && found->u.mgd.powersave) { 653 if (count == 1 && found->u.mgd.powersave &&
654 (found->u.mgd.flags & IEEE80211_STA_ASSOCIATED) &&
655 !(found->u.mgd.flags & IEEE80211_STA_PROBEREQ_POLL)) {
657 s32 beaconint_us; 656 s32 beaconint_us;
658 657
659 if (latency < 0) 658 if (latency < 0)
@@ -793,13 +792,13 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
793#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 792#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
794 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " 793 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
795 "cWmin=%d cWmax=%d txop=%d\n", 794 "cWmin=%d cWmax=%d txop=%d\n",
796 local->mdev->name, queue, aci, acm, params.aifs, params.cw_min, 795 wiphy_name(local->hw.wiphy), queue, aci, acm,
797 params.cw_max, params.txop); 796 params.aifs, params.cw_min, params.cw_max, params.txop);
798#endif 797#endif
799 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx) 798 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx)
800 printk(KERN_DEBUG "%s: failed to set TX queue " 799 printk(KERN_DEBUG "%s: failed to set TX queue "
801 "parameters for queue %d\n", local->mdev->name, 800 "parameters for queue %d\n",
802 queue); 801 wiphy_name(local->hw.wiphy), queue);
803 } 802 }
804} 803}
805 804
@@ -1322,6 +1321,11 @@ void ieee80211_beacon_loss_work(struct work_struct *work)
1322#endif 1321#endif
1323 1322
1324 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; 1323 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1324
1325 mutex_lock(&sdata->local->iflist_mtx);
1326 ieee80211_recalc_ps(sdata->local, -1);
1327 mutex_unlock(&sdata->local->iflist_mtx);
1328
1325 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, 1329 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
1326 ifmgd->ssid_len, NULL, 0); 1330 ifmgd->ssid_len, NULL, 0);
1327 1331
@@ -1342,6 +1346,7 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
1342 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1346 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1343 struct ieee80211_local *local = sdata->local; 1347 struct ieee80211_local *local = sdata->local;
1344 struct sta_info *sta; 1348 struct sta_info *sta;
1349 unsigned long last_rx;
1345 bool disassoc = false; 1350 bool disassoc = false;
1346 1351
1347 /* TODO: start monitoring current AP signal quality and number of 1352 /* TODO: start monitoring current AP signal quality and number of
@@ -1358,17 +1363,21 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
1358 printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n", 1363 printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n",
1359 sdata->dev->name, ifmgd->bssid); 1364 sdata->dev->name, ifmgd->bssid);
1360 disassoc = true; 1365 disassoc = true;
1361 goto unlock; 1366 rcu_read_unlock();
1367 goto out;
1362 } 1368 }
1363 1369
1370 last_rx = sta->last_rx;
1371 rcu_read_unlock();
1372
1364 if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) && 1373 if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) &&
1365 time_after(jiffies, sta->last_rx + IEEE80211_PROBE_WAIT)) { 1374 time_after(jiffies, last_rx + IEEE80211_PROBE_WAIT)) {
1366 printk(KERN_DEBUG "%s: no probe response from AP %pM " 1375 printk(KERN_DEBUG "%s: no probe response from AP %pM "
1367 "- disassociating\n", 1376 "- disassociating\n",
1368 sdata->dev->name, ifmgd->bssid); 1377 sdata->dev->name, ifmgd->bssid);
1369 disassoc = true; 1378 disassoc = true;
1370 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; 1379 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
1371 goto unlock; 1380 goto out;
1372 } 1381 }
1373 1382
1374 /* 1383 /*
@@ -1387,26 +1396,29 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
1387 } 1396 }
1388#endif 1397#endif
1389 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; 1398 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1399 mutex_lock(&local->iflist_mtx);
1400 ieee80211_recalc_ps(local, -1);
1401 mutex_unlock(&local->iflist_mtx);
1390 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, 1402 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
1391 ifmgd->ssid_len, NULL, 0); 1403 ifmgd->ssid_len, NULL, 0);
1392 mod_timer(&ifmgd->timer, jiffies + IEEE80211_PROBE_WAIT); 1404 mod_timer(&ifmgd->timer, jiffies + IEEE80211_PROBE_WAIT);
1393 goto unlock; 1405 goto out;
1394 } 1406 }
1395 1407
1396 if (time_after(jiffies, sta->last_rx + IEEE80211_PROBE_IDLE_TIME)) { 1408 if (time_after(jiffies, last_rx + IEEE80211_PROBE_IDLE_TIME)) {
1397 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; 1409 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1410 mutex_lock(&local->iflist_mtx);
1411 ieee80211_recalc_ps(local, -1);
1412 mutex_unlock(&local->iflist_mtx);
1398 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, 1413 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
1399 ifmgd->ssid_len, NULL, 0); 1414 ifmgd->ssid_len, NULL, 0);
1400 } 1415 }
1401 1416
1417 out:
1402 if (!disassoc) 1418 if (!disassoc)
1403 mod_timer(&ifmgd->timer, 1419 mod_timer(&ifmgd->timer,
1404 jiffies + IEEE80211_MONITORING_INTERVAL); 1420 jiffies + IEEE80211_MONITORING_INTERVAL);
1405 1421 else
1406 unlock:
1407 rcu_read_unlock();
1408
1409 if (disassoc)
1410 ieee80211_set_disassoc(sdata, true, true, 1422 ieee80211_set_disassoc(sdata, true, true,
1411 WLAN_REASON_PREV_AUTH_NOT_VALID); 1423 WLAN_REASON_PREV_AUTH_NOT_VALID);
1412} 1424}
@@ -1889,8 +1901,12 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1889 ieee80211_authenticate(sdata); 1901 ieee80211_authenticate(sdata);
1890 } 1902 }
1891 1903
1892 if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) 1904 if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) {
1893 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; 1905 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
1906 mutex_lock(&sdata->local->iflist_mtx);
1907 ieee80211_recalc_ps(sdata->local, -1);
1908 mutex_unlock(&sdata->local->iflist_mtx);
1909 }
1894} 1910}
1895 1911
1896/* 1912/*
@@ -1948,6 +1964,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1948 } 1964 }
1949#endif 1965#endif
1950 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; 1966 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
1967 mutex_lock(&local->iflist_mtx);
1968 ieee80211_recalc_ps(local, -1);
1969 mutex_unlock(&local->iflist_mtx);
1951 } 1970 }
1952 1971
1953 ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); 1972 ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 0a11515341ba..b218b98fba7f 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -215,7 +215,7 @@ minstrel_get_next_sample(struct minstrel_sta_info *mi)
215 unsigned int sample_ndx; 215 unsigned int sample_ndx;
216 sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column); 216 sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column);
217 mi->sample_idx++; 217 mi->sample_idx++;
218 if (mi->sample_idx > (mi->n_rates - 2)) { 218 if ((int) mi->sample_idx > (mi->n_rates - 2)) {
219 mi->sample_idx = 0; 219 mi->sample_idx = 0;
220 mi->sample_column++; 220 mi->sample_column++;
221 if (mi->sample_column >= SAMPLE_COLUMNS) 221 if (mi->sample_column >= SAMPLE_COLUMNS)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6a9b8e63a6bf..de5bba7f910a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -797,8 +797,7 @@ static int ap_sta_ps_end(struct sta_info *sta)
797{ 797{
798 struct ieee80211_sub_if_data *sdata = sta->sdata; 798 struct ieee80211_sub_if_data *sdata = sta->sdata;
799 struct ieee80211_local *local = sdata->local; 799 struct ieee80211_local *local = sdata->local;
800 struct sk_buff *skb; 800 int sent, buffered;
801 int sent = 0;
802 801
803 atomic_dec(&sdata->bss->num_sta_ps); 802 atomic_dec(&sdata->bss->num_sta_ps);
804 803
@@ -814,22 +813,16 @@ static int ap_sta_ps_end(struct sta_info *sta)
814#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 813#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
815 814
816 /* Send all buffered frames to the station */ 815 /* Send all buffered frames to the station */
817 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { 816 sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered);
818 sent++; 817 buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf);
819 skb->requeue = 1; 818 sent += buffered;
820 dev_queue_xmit(skb); 819 local->total_ps_buffered -= buffered;
821 } 820
822 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
823 local->total_ps_buffered--;
824 sent++;
825#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 821#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
826 printk(KERN_DEBUG "%s: STA %pM aid %d send PS frame " 822 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
827 "since STA not sleeping anymore\n", sdata->dev->name, 823 "since STA not sleeping anymore\n", sdata->dev->name,
828 sta->sta.addr, sta->sta.aid); 824 sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
829#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 825#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
830 skb->requeue = 1;
831 dev_queue_xmit(skb);
832 }
833 826
834 return sent; 827 return sent;
835} 828}
@@ -1335,7 +1328,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1335 * mac80211. That also explains the __skb_push() 1328 * mac80211. That also explains the __skb_push()
1336 * below. 1329 * below.
1337 */ 1330 */
1338 align = (unsigned long)skb->data & 3; 1331 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1339 if (align) { 1332 if (align) {
1340 if (WARN_ON(skb_headroom(skb) < 3)) { 1333 if (WARN_ON(skb_headroom(skb) < 3)) {
1341 dev_kfree_skb(skb); 1334 dev_kfree_skb(skb);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 1436f747531a..364222bfb10d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -400,6 +400,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
400 sta_info_set_tim_bit(sta); 400 sta_info_set_tim_bit(sta);
401 401
402 info->control.jiffies = jiffies; 402 info->control.jiffies = jiffies;
403 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
403 skb_queue_tail(&sta->ps_tx_buf, tx->skb); 404 skb_queue_tail(&sta->ps_tx_buf, tx->skb);
404 return TX_QUEUED; 405 return TX_QUEUED;
405 } 406 }
@@ -420,7 +421,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
420 * frame filtering and keeps a station blacklist on its own 421 * frame filtering and keeps a station blacklist on its own
421 * (e.g: p54), so that frames can be delivered unimpeded. 422 * (e.g: p54), so that frames can be delivered unimpeded.
422 * 423 *
423 * Note: It should be save to disable the filter now. 424 * Note: It should be safe to disable the filter now.
424 * As, it is really unlikely that we still have any pending 425 * As, it is really unlikely that we still have any pending
425 * frame for this station in the hw's buffers/fifos left, 426 * frame for this station in the hw's buffers/fifos left,
426 * that is not rejected with a unsuccessful tx_status yet. 427 * that is not rejected with a unsuccessful tx_status yet.
@@ -907,9 +908,8 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
907 * deal with packet injection down monitor interface 908 * deal with packet injection down monitor interface
908 * with Radiotap Header -- only called for monitor mode interface 909 * with Radiotap Header -- only called for monitor mode interface
909 */ 910 */
910static ieee80211_tx_result 911static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
911__ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, 912 struct sk_buff *skb)
912 struct sk_buff *skb)
913{ 913{
914 /* 914 /*
915 * this is the moment to interpret and discard the radiotap header that 915 * this is the moment to interpret and discard the radiotap header that
@@ -960,7 +960,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
960 * on transmission 960 * on transmission
961 */ 961 */
962 if (skb->len < (iterator.max_length + FCS_LEN)) 962 if (skb->len < (iterator.max_length + FCS_LEN))
963 return TX_DROP; 963 return false;
964 964
965 skb_trim(skb, skb->len - FCS_LEN); 965 skb_trim(skb, skb->len - FCS_LEN);
966 } 966 }
@@ -982,7 +982,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
982 } 982 }
983 983
984 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ 984 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
985 return TX_DROP; 985 return false;
986 986
987 /* 987 /*
988 * remove the radiotap header 988 * remove the radiotap header
@@ -991,7 +991,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
991 */ 991 */
992 skb_pull(skb, iterator.max_length); 992 skb_pull(skb, iterator.max_length);
993 993
994 return TX_CONTINUE; 994 return true;
995} 995}
996 996
997/* 997/*
@@ -1025,7 +1025,7 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1025 /* process and remove the injection radiotap header */ 1025 /* process and remove the injection radiotap header */
1026 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1026 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1027 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) { 1027 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) {
1028 if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP) 1028 if (!__ieee80211_parse_tx_radiotap(tx, skb))
1029 return TX_DROP; 1029 return TX_DROP;
1030 1030
1031 /* 1031 /*
@@ -1415,7 +1415,8 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1415 } 1415 }
1416 1416
1417 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && 1417 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
1418 local->hw.conf.dynamic_ps_timeout > 0) { 1418 local->hw.conf.dynamic_ps_timeout > 0 &&
1419 !local->sw_scanning && !local->hw_scanning && local->ps_sdata) {
1419 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 1420 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1420 ieee80211_stop_queues_by_reason(&local->hw, 1421 ieee80211_stop_queues_by_reason(&local->hw,
1421 IEEE80211_QUEUE_STOP_REASON_PS); 1422 IEEE80211_QUEUE_STOP_REASON_PS);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 22f63815fb36..66ce96a69f31 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -341,6 +341,52 @@ void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue)
341} 341}
342EXPORT_SYMBOL(ieee80211_stop_queue); 342EXPORT_SYMBOL(ieee80211_stop_queue);
343 343
344void ieee80211_add_pending_skb(struct ieee80211_local *local,
345 struct sk_buff *skb)
346{
347 struct ieee80211_hw *hw = &local->hw;
348 unsigned long flags;
349 int queue = skb_get_queue_mapping(skb);
350
351 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
352 __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
353 __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_PENDING);
354 skb_queue_tail(&local->pending[queue], skb);
355 __ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
356 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
357}
358
359int ieee80211_add_pending_skbs(struct ieee80211_local *local,
360 struct sk_buff_head *skbs)
361{
362 struct ieee80211_hw *hw = &local->hw;
363 struct sk_buff *skb;
364 unsigned long flags;
365 int queue, ret = 0, i;
366
367 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
368 for (i = 0; i < hw->queues; i++)
369 __ieee80211_stop_queue(hw, i,
370 IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
371
372 while ((skb = skb_dequeue(skbs))) {
373 ret++;
374 queue = skb_get_queue_mapping(skb);
375 skb_queue_tail(&local->pending[queue], skb);
376 }
377
378 for (i = 0; i < hw->queues; i++) {
379 if (ret)
380 __ieee80211_stop_queue(hw, i,
381 IEEE80211_QUEUE_STOP_REASON_PENDING);
382 __ieee80211_wake_queue(hw, i,
383 IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
384 }
385 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
386
387 return ret;
388}
389
344void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, 390void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
345 enum queue_stop_reason reason) 391 enum queue_stop_reason reason)
346{ 392{
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 694343b9102b..116a923b14d6 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -101,7 +101,7 @@ u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
101 * Now we know the 1d priority, fill in the QoS header if 101 * Now we know the 1d priority, fill in the QoS header if
102 * there is one (and we haven't done this before). 102 * there is one (and we haven't done this before).
103 */ 103 */
104 if (!skb->requeue && ieee80211_is_data_qos(hdr->frame_control)) { 104 if (ieee80211_is_data_qos(hdr->frame_control)) {
105 u8 *p = ieee80211_get_qos_ctl(hdr); 105 u8 *p = ieee80211_get_qos_ctl(hdr);
106 u8 ack_policy = 0; 106 u8 ack_policy = 0;
107 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 107 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 8c860112ce05..71daa0934b6c 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * This is a module which is used for queueing packets and communicating with 2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink. 3 * userspace via nfnetlink.
4 * 4 *
5 * (C) 2005 by Harald Welte <laforge@netfilter.org> 5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
6 * (C) 2007 by Patrick McHardy <kaber@trash.net> 6 * (C) 2007 by Patrick McHardy <kaber@trash.net>
@@ -932,6 +932,8 @@ static void __exit nfnetlink_queue_fini(void)
932#endif 932#endif
933 nfnetlink_subsys_unregister(&nfqnl_subsys); 933 nfnetlink_subsys_unregister(&nfqnl_subsys);
934 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 934 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
935
936 rcu_barrier(); /* Wait for completion of call_rcu()'s */
935} 937}
936 938
937MODULE_DESCRIPTION("netfilter packet queue handler"); 939MODULE_DESCRIPTION("netfilter packet queue handler");
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 851f6a3f8ddd..480839dfc560 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -115,10 +115,10 @@ static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb)
115 rskb->truesize += rskb->len; 115 rskb->truesize += rskb->len;
116 116
117 /* Avoid nested fragments */ 117 /* Avoid nested fragments */
118 for (fs = skb_shinfo(skb)->frag_list; fs; fs = fs->next) 118 skb_walk_frags(skb, fs)
119 flen += fs->len; 119 flen += fs->len;
120 skb->next = skb_shinfo(skb)->frag_list; 120 skb->next = skb_shinfo(skb)->frag_list;
121 skb_shinfo(skb)->frag_list = NULL; 121 skb_frag_list_init(skb);
122 skb->len -= flen; 122 skb->len -= flen;
123 skb->data_len -= flen; 123 skb->data_len -= flen;
124 skb->truesize -= flen; 124 skb->truesize -= flen;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 8ad2b5333881..eef833ea6d7b 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -940,10 +940,10 @@ int pep_write(struct sock *sk, struct sk_buff *skb)
940 rskb->truesize += rskb->len; 940 rskb->truesize += rskb->len;
941 941
942 /* Avoid nested fragments */ 942 /* Avoid nested fragments */
943 for (fs = skb_shinfo(skb)->frag_list; fs; fs = fs->next) 943 skb_walk_frags(skb, fs)
944 flen += fs->len; 944 flen += fs->len;
945 skb->next = skb_shinfo(skb)->frag_list; 945 skb->next = skb_shinfo(skb)->frag_list;
946 skb_shinfo(skb)->frag_list = NULL; 946 skb_frag_list_init(skb);
947 skb->len -= flen; 947 skb->len -= flen;
948 skb->data_len -= flen; 948 skb->data_len -= flen;
949 skb->truesize -= flen; 949 skb->truesize -= flen;
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index fd7600d8ab14..eaf765876458 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -18,7 +18,7 @@ config RFKILL_LEDS
18 default y 18 default y
19 19
20config RFKILL_INPUT 20config RFKILL_INPUT
21 bool "RF switch input support" 21 bool "RF switch input support" if EMBEDDED
22 depends on RFKILL 22 depends on RFKILL
23 depends on INPUT = y || RFKILL = INPUT 23 depends on INPUT = y || RFKILL = INPUT
24 default y if !EMBEDDED 24 default y if !EMBEDDED
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 11b7314723df..4e68ab439d5d 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -57,6 +57,7 @@ struct rfkill {
57 57
58 bool registered; 58 bool registered;
59 bool suspended; 59 bool suspended;
60 bool persistent;
60 61
61 const struct rfkill_ops *ops; 62 const struct rfkill_ops *ops;
62 void *data; 63 void *data;
@@ -116,11 +117,9 @@ MODULE_PARM_DESC(default_state,
116 "Default initial state for all radio types, 0 = radio off"); 117 "Default initial state for all radio types, 0 = radio off");
117 118
118static struct { 119static struct {
119 bool cur, def; 120 bool cur, sav;
120} rfkill_global_states[NUM_RFKILL_TYPES]; 121} rfkill_global_states[NUM_RFKILL_TYPES];
121 122
122static unsigned long rfkill_states_default_locked;
123
124static bool rfkill_epo_lock_active; 123static bool rfkill_epo_lock_active;
125 124
126 125
@@ -392,7 +391,7 @@ void rfkill_epo(void)
392 rfkill_set_block(rfkill, true); 391 rfkill_set_block(rfkill, true);
393 392
394 for (i = 0; i < NUM_RFKILL_TYPES; i++) { 393 for (i = 0; i < NUM_RFKILL_TYPES; i++) {
395 rfkill_global_states[i].def = rfkill_global_states[i].cur; 394 rfkill_global_states[i].sav = rfkill_global_states[i].cur;
396 rfkill_global_states[i].cur = true; 395 rfkill_global_states[i].cur = true;
397 } 396 }
398 397
@@ -417,7 +416,7 @@ void rfkill_restore_states(void)
417 416
418 rfkill_epo_lock_active = false; 417 rfkill_epo_lock_active = false;
419 for (i = 0; i < NUM_RFKILL_TYPES; i++) 418 for (i = 0; i < NUM_RFKILL_TYPES; i++)
420 __rfkill_switch_all(i, rfkill_global_states[i].def); 419 __rfkill_switch_all(i, rfkill_global_states[i].sav);
421 mutex_unlock(&rfkill_global_mutex); 420 mutex_unlock(&rfkill_global_mutex);
422} 421}
423 422
@@ -464,29 +463,6 @@ bool rfkill_get_global_sw_state(const enum rfkill_type type)
464} 463}
465#endif 464#endif
466 465
467void rfkill_set_global_sw_state(const enum rfkill_type type, bool blocked)
468{
469 BUG_ON(type == RFKILL_TYPE_ALL);
470
471 mutex_lock(&rfkill_global_mutex);
472
473 /* don't allow unblock when epo */
474 if (rfkill_epo_lock_active && !blocked)
475 goto out;
476
477 /* too late */
478 if (rfkill_states_default_locked & BIT(type))
479 goto out;
480
481 rfkill_states_default_locked |= BIT(type);
482
483 rfkill_global_states[type].cur = blocked;
484 rfkill_global_states[type].def = blocked;
485 out:
486 mutex_unlock(&rfkill_global_mutex);
487}
488EXPORT_SYMBOL(rfkill_set_global_sw_state);
489
490 466
491bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) 467bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
492{ 468{
@@ -532,13 +508,14 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
532 blocked = blocked || hwblock; 508 blocked = blocked || hwblock;
533 spin_unlock_irqrestore(&rfkill->lock, flags); 509 spin_unlock_irqrestore(&rfkill->lock, flags);
534 510
535 if (!rfkill->registered) 511 if (!rfkill->registered) {
536 return blocked; 512 rfkill->persistent = true;
537 513 } else {
538 if (prev != blocked && !hwblock) 514 if (prev != blocked && !hwblock)
539 schedule_work(&rfkill->uevent_work); 515 schedule_work(&rfkill->uevent_work);
540 516
541 rfkill_led_trigger_event(rfkill); 517 rfkill_led_trigger_event(rfkill);
518 }
542 519
543 return blocked; 520 return blocked;
544} 521}
@@ -563,13 +540,14 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
563 540
564 spin_unlock_irqrestore(&rfkill->lock, flags); 541 spin_unlock_irqrestore(&rfkill->lock, flags);
565 542
566 if (!rfkill->registered) 543 if (!rfkill->registered) {
567 return; 544 rfkill->persistent = true;
568 545 } else {
569 if (swprev != sw || hwprev != hw) 546 if (swprev != sw || hwprev != hw)
570 schedule_work(&rfkill->uevent_work); 547 schedule_work(&rfkill->uevent_work);
571 548
572 rfkill_led_trigger_event(rfkill); 549 rfkill_led_trigger_event(rfkill);
550 }
573} 551}
574EXPORT_SYMBOL(rfkill_set_states); 552EXPORT_SYMBOL(rfkill_set_states);
575 553
@@ -750,15 +728,11 @@ static int rfkill_resume(struct device *dev)
750 struct rfkill *rfkill = to_rfkill(dev); 728 struct rfkill *rfkill = to_rfkill(dev);
751 bool cur; 729 bool cur;
752 730
753 mutex_lock(&rfkill_global_mutex); 731 cur = !!(rfkill->state & RFKILL_BLOCK_SW);
754 cur = rfkill_global_states[rfkill->type].cur;
755 rfkill_set_block(rfkill, cur); 732 rfkill_set_block(rfkill, cur);
756 mutex_unlock(&rfkill_global_mutex);
757 733
758 rfkill->suspended = false; 734 rfkill->suspended = false;
759 735
760 schedule_work(&rfkill->uevent_work);
761
762 rfkill_resume_polling(rfkill); 736 rfkill_resume_polling(rfkill);
763 737
764 return 0; 738 return 0;
@@ -888,15 +862,6 @@ int __must_check rfkill_register(struct rfkill *rfkill)
888 dev_set_name(dev, "rfkill%lu", rfkill_no); 862 dev_set_name(dev, "rfkill%lu", rfkill_no);
889 rfkill_no++; 863 rfkill_no++;
890 864
891 if (!(rfkill_states_default_locked & BIT(rfkill->type))) {
892 /* first of its kind */
893 BUILD_BUG_ON(NUM_RFKILL_TYPES >
894 sizeof(rfkill_states_default_locked) * 8);
895 rfkill_states_default_locked |= BIT(rfkill->type);
896 rfkill_global_states[rfkill->type].cur =
897 rfkill_global_states[rfkill->type].def;
898 }
899
900 list_add_tail(&rfkill->node, &rfkill_list); 865 list_add_tail(&rfkill->node, &rfkill_list);
901 866
902 error = device_add(dev); 867 error = device_add(dev);
@@ -916,7 +881,17 @@ int __must_check rfkill_register(struct rfkill *rfkill)
916 if (rfkill->ops->poll) 881 if (rfkill->ops->poll)
917 schedule_delayed_work(&rfkill->poll_work, 882 schedule_delayed_work(&rfkill->poll_work,
918 round_jiffies_relative(POLL_INTERVAL)); 883 round_jiffies_relative(POLL_INTERVAL));
919 schedule_work(&rfkill->sync_work); 884
885 if (!rfkill->persistent || rfkill_epo_lock_active) {
886 schedule_work(&rfkill->sync_work);
887 } else {
888#ifdef CONFIG_RFKILL_INPUT
889 bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
890
891 if (!atomic_read(&rfkill_input_disabled))
892 __rfkill_switch_all(rfkill->type, soft_blocked);
893#endif
894 }
920 895
921 rfkill_send_events(rfkill, RFKILL_OP_ADD); 896 rfkill_send_events(rfkill, RFKILL_OP_ADD);
922 897
@@ -1134,7 +1109,8 @@ static int rfkill_fop_release(struct inode *inode, struct file *file)
1134 1109
1135#ifdef CONFIG_RFKILL_INPUT 1110#ifdef CONFIG_RFKILL_INPUT
1136 if (data->input_handler) 1111 if (data->input_handler)
1137 atomic_dec(&rfkill_input_disabled); 1112 if (atomic_dec_return(&rfkill_input_disabled) == 0)
1113 printk(KERN_DEBUG "rfkill: input handler enabled\n");
1138#endif 1114#endif
1139 1115
1140 kfree(data); 1116 kfree(data);
@@ -1157,7 +1133,8 @@ static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1157 mutex_lock(&data->mtx); 1133 mutex_lock(&data->mtx);
1158 1134
1159 if (!data->input_handler) { 1135 if (!data->input_handler) {
1160 atomic_inc(&rfkill_input_disabled); 1136 if (atomic_inc_return(&rfkill_input_disabled) == 1)
1137 printk(KERN_DEBUG "rfkill: input handler disabled\n");
1161 data->input_handler = true; 1138 data->input_handler = true;
1162 } 1139 }
1163 1140
@@ -1191,7 +1168,7 @@ static int __init rfkill_init(void)
1191 int i; 1168 int i;
1192 1169
1193 for (i = 0; i < NUM_RFKILL_TYPES; i++) 1170 for (i = 0; i < NUM_RFKILL_TYPES; i++)
1194 rfkill_global_states[i].def = !rfkill_default_state; 1171 rfkill_global_states[i].cur = !rfkill_default_state;
1195 1172
1196 error = class_register(&rfkill_class); 1173 error = class_register(&rfkill_class);
1197 if (error) 1174 if (error)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 5022f9c1f34b..362c2811b2df 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -372,7 +372,7 @@ cftree_update(struct hfsc_class *cl)
372 * ism: (psched_us/byte) << ISM_SHIFT 372 * ism: (psched_us/byte) << ISM_SHIFT
373 * dx: psched_us 373 * dx: psched_us
374 * 374 *
375 * The clock source resolution with ktime is 1.024us. 375 * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
376 * 376 *
377 * sm and ism are scaled in order to keep effective digits. 377 * sm and ism are scaled in order to keep effective digits.
378 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective 378 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
@@ -383,9 +383,11 @@ cftree_update(struct hfsc_class *cl)
383 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3 383 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
384 * 384 *
385 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125 385 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
386 *
387 * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
386 */ 388 */
387#define SM_SHIFT 20 389#define SM_SHIFT (30 - PSCHED_SHIFT)
388#define ISM_SHIFT 18 390#define ISM_SHIFT (8 + PSCHED_SHIFT)
389 391
390#define SM_MASK ((1ULL << SM_SHIFT) - 1) 392#define SM_MASK ((1ULL << SM_SHIFT) - 1)
391#define ISM_MASK ((1ULL << ISM_SHIFT) - 1) 393#define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
diff --git a/net/sctp/input.c b/net/sctp/input.c
index d2e98803ffe3..c0c973e67add 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -81,13 +81,13 @@ static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
81/* Calculate the SCTP checksum of an SCTP packet. */ 81/* Calculate the SCTP checksum of an SCTP packet. */
82static inline int sctp_rcv_checksum(struct sk_buff *skb) 82static inline int sctp_rcv_checksum(struct sk_buff *skb)
83{ 83{
84 struct sk_buff *list = skb_shinfo(skb)->frag_list;
85 struct sctphdr *sh = sctp_hdr(skb); 84 struct sctphdr *sh = sctp_hdr(skb);
86 __le32 cmp = sh->checksum; 85 __le32 cmp = sh->checksum;
86 struct sk_buff *list;
87 __le32 val; 87 __le32 val;
88 __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); 88 __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
89 89
90 for (; list; list = list->next) 90 skb_walk_frags(skb, list)
91 tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), 91 tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list),
92 tmp); 92 tmp);
93 93
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index cb2c50dbd421..79cbd47f4df7 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1370,6 +1370,8 @@ SCTP_STATIC __exit void sctp_exit(void)
1370 sctp_proc_exit(); 1370 sctp_proc_exit();
1371 cleanup_sctp_mibs(); 1371 cleanup_sctp_mibs();
1372 1372
1373 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1374
1373 kmem_cache_destroy(sctp_chunk_cachep); 1375 kmem_cache_destroy(sctp_chunk_cachep);
1374 kmem_cache_destroy(sctp_bucket_cachep); 1376 kmem_cache_destroy(sctp_bucket_cachep);
1375} 1377}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 7c3dfd2d9489..0f01e5d8a24f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1881,7 +1881,7 @@ static int sctp_skb_pull(struct sk_buff *skb, int len)
1881 len -= skb_len; 1881 len -= skb_len;
1882 __skb_pull(skb, skb_len); 1882 __skb_pull(skb, skb_len);
1883 1883
1884 for (list = skb_shinfo(skb)->frag_list; list; list = list->next) { 1884 skb_walk_frags(skb, list) {
1885 rlen = sctp_skb_pull(list, len); 1885 rlen = sctp_skb_pull(list, len);
1886 skb->len -= (len-rlen); 1886 skb->len -= (len-rlen);
1887 skb->data_len -= (len-rlen); 1887 skb->data_len -= (len-rlen);
@@ -6660,7 +6660,7 @@ static void sctp_sock_rfree_frag(struct sk_buff *skb)
6660 goto done; 6660 goto done;
6661 6661
6662 /* Don't forget the fragments. */ 6662 /* Don't forget the fragments. */
6663 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) 6663 skb_walk_frags(skb, frag)
6664 sctp_sock_rfree_frag(frag); 6664 sctp_sock_rfree_frag(frag);
6665 6665
6666done: 6666done:
@@ -6675,7 +6675,7 @@ static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
6675 goto done; 6675 goto done;
6676 6676
6677 /* Don't forget the fragments. */ 6677 /* Don't forget the fragments. */
6678 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) 6678 skb_walk_frags(skb, frag)
6679 sctp_skb_set_owner_r_frag(frag, sk); 6679 sctp_skb_set_owner_r_frag(frag, sk);
6680 6680
6681done: 6681done:
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 5f186ca550d7..8b3560fd876d 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -976,9 +976,8 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
976 * In general, the skb passed from IP can have only 1 level of 976 * In general, the skb passed from IP can have only 1 level of
977 * fragments. But we allow multiple levels of fragments. 977 * fragments. But we allow multiple levels of fragments.
978 */ 978 */
979 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { 979 skb_walk_frags(skb, frag)
980 sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc); 980 sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc);
981 }
982} 981}
983 982
984/* Do accounting for bytes just read by user and release the references to 983/* Do accounting for bytes just read by user and release the references to
@@ -1003,7 +1002,7 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
1003 goto done; 1002 goto done;
1004 1003
1005 /* Don't forget the fragments. */ 1004 /* Don't forget the fragments. */
1006 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { 1005 skb_walk_frags(skb, frag) {
1007 /* NOTE: skb_shinfos are recursive. Although IP returns 1006 /* NOTE: skb_shinfos are recursive. Although IP returns
1008 * skb's with only 1 level of fragments, SCTP reassembly can 1007 * skb's with only 1 level of fragments, SCTP reassembly can
1009 * increase the levels. 1008 * increase the levels.
@@ -1026,7 +1025,7 @@ static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
1026 goto done; 1025 goto done;
1027 1026
1028 /* Don't forget the fragments. */ 1027 /* Don't forget the fragments. */
1029 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { 1028 skb_walk_frags(skb, frag) {
1030 /* NOTE: skb_shinfos are recursive. Although IP returns 1029 /* NOTE: skb_shinfos are recursive. Although IP returns
1031 * skb's with only 1 level of fragments, SCTP reassembly can 1030 * skb's with only 1 level of fragments, SCTP reassembly can
1032 * increase the levels. 1031 * increase the levels.
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index e630b38a6047..66d458fc6920 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1548,6 +1548,7 @@ static void __exit exit_rpcsec_gss(void)
1548{ 1548{
1549 gss_svc_shutdown(); 1549 gss_svc_shutdown();
1550 rpcauth_unregister(&authgss_ops); 1550 rpcauth_unregister(&authgss_ops);
1551 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1551} 1552}
1552 1553
1553MODULE_LICENSE("GPL"); 1554MODULE_LICENSE("GPL");
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3b74b88e10a3..d5850292b3df 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -395,21 +395,23 @@ int wiphy_register(struct wiphy *wiphy)
395 /* check and set up bitrates */ 395 /* check and set up bitrates */
396 ieee80211_set_bitrate_flags(wiphy); 396 ieee80211_set_bitrate_flags(wiphy);
397 397
398 mutex_lock(&cfg80211_mutex);
399
400 /* set up regulatory info */
401 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
402
403 res = device_add(&drv->wiphy.dev); 398 res = device_add(&drv->wiphy.dev);
404 if (res) 399 if (res)
405 goto out_unlock; 400 return res;
406 401
407 res = rfkill_register(drv->rfkill); 402 res = rfkill_register(drv->rfkill);
408 if (res) 403 if (res)
409 goto out_rm_dev; 404 goto out_rm_dev;
410 405
406 mutex_lock(&cfg80211_mutex);
407
408 /* set up regulatory info */
409 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
410
411 list_add(&drv->list, &cfg80211_drv_list); 411 list_add(&drv->list, &cfg80211_drv_list);
412 412
413 mutex_unlock(&cfg80211_mutex);
414
413 /* add to debugfs */ 415 /* add to debugfs */
414 drv->wiphy.debugfsdir = 416 drv->wiphy.debugfsdir =
415 debugfs_create_dir(wiphy_name(&drv->wiphy), 417 debugfs_create_dir(wiphy_name(&drv->wiphy),
@@ -430,13 +432,10 @@ int wiphy_register(struct wiphy *wiphy)
430 432
431 cfg80211_debugfs_drv_add(drv); 433 cfg80211_debugfs_drv_add(drv);
432 434
433 res = 0; 435 return 0;
434 goto out_unlock;
435 436
436 out_rm_dev: 437 out_rm_dev:
437 device_del(&drv->wiphy.dev); 438 device_del(&drv->wiphy.dev);
438 out_unlock:
439 mutex_unlock(&cfg80211_mutex);
440 return res; 439 return res;
441} 440}
442EXPORT_SYMBOL(wiphy_register); 441EXPORT_SYMBOL(wiphy_register);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index ea4c299fbe3b..5e14371cda70 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2129,7 +2129,12 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2129 * driver wanted to the wiphy to deal with conflicts 2129 * driver wanted to the wiphy to deal with conflicts
2130 */ 2130 */
2131 2131
2132 BUG_ON(request_wiphy->regd); 2132 /*
2133 * Userspace could have sent two replies with only
2134 * one kernel request.
2135 */
2136 if (request_wiphy->regd)
2137 return -EALREADY;
2133 2138
2134 r = reg_copy_regd(&request_wiphy->regd, rd); 2139 r = reg_copy_regd(&request_wiphy->regd, rd);
2135 if (r) 2140 if (r)
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 96036cf2216d..d31ccb487730 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -696,8 +696,9 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
696{ 696{
697 int start = skb_headlen(skb); 697 int start = skb_headlen(skb);
698 int i, copy = start - offset; 698 int i, copy = start - offset;
699 int err; 699 struct sk_buff *frag_iter;
700 struct scatterlist sg; 700 struct scatterlist sg;
701 int err;
701 702
702 /* Checksum header. */ 703 /* Checksum header. */
703 if (copy > 0) { 704 if (copy > 0) {
@@ -742,28 +743,24 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
742 start = end; 743 start = end;
743 } 744 }
744 745
745 if (skb_shinfo(skb)->frag_list) { 746 skb_walk_frags(skb, frag_iter) {
746 struct sk_buff *list = skb_shinfo(skb)->frag_list; 747 int end;
747 748
748 for (; list; list = list->next) { 749 WARN_ON(start > offset + len);
749 int end; 750
750 751 end = start + frag_iter->len;
751 WARN_ON(start > offset + len); 752 if ((copy = end - offset) > 0) {
752 753 if (copy > len)
753 end = start + list->len; 754 copy = len;
754 if ((copy = end - offset) > 0) { 755 err = skb_icv_walk(frag_iter, desc, offset-start,
755 if (copy > len) 756 copy, icv_update);
756 copy = len; 757 if (unlikely(err))
757 err = skb_icv_walk(list, desc, offset-start, 758 return err;
758 copy, icv_update); 759 if ((len -= copy) == 0)
759 if (unlikely(err)) 760 return 0;
760 return err; 761 offset += copy;
761 if ((len -= copy) == 0)
762 return 0;
763 offset += copy;
764 }
765 start = end;
766 } 762 }
763 start = end;
767 } 764 }
768 BUG_ON(len); 765 BUG_ON(len);
769 return 0; 766 return 0;