aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-06 19:16:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-06 19:16:28 -0500
commita707271a8180eb60edc3bd9dc3cb425c7547fd76 (patch)
tree89e99bfe15f6fa5379555c2c57f38ea9502a2d90 /net
parentf0a679afefc0b6288310f88606b4bb1f243f1aa9 (diff)
parentfe0d692bbc645786bce1a98439e548ae619269f5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "I'm hoping this is the very last batch of networking fixes for 3.13, here goes nothing: 1) Fix crashes in VLAN's header_ops passthru. 2) Bridge multicast code needs to use BH spinlocks to prevent deadlocks with timers. From Curt Brune. 3) ipv6 tunnels lack proper synchornization when updating percpu statistics. From Li RongQing. 4) Fixes to bnx2x driver from Yaniv Rosner, Dmitry Kravkov and Michal Kalderon. 5) Avoid undefined operator evaluation order in llc code, from Daniel Borkmann. 6) Error paths in various GSO offload paths do not unwind properly, in particular they must undo any modifications they have made to the SKB. From Wei-Chun Chao. 7) Fix RX refill races during restore in virtio-net, from Jason Wang. 8) Fix SKB use after free in LLC code, from Daniel Borkmann. 9) Missing unlock and OOPS in netpoll code when VLAN tag handling fails. 10) Fix vxlan device attachment wrt ipv6, from Fan Du. 11) Don't allow creating infiniband links to non-infiniband devices, from Hangbin Liu. 12) Revert FEC phy reset active low change, it breaks things. From Fabio Estevam. 13) Fix header pointer handling in 6lowpan header building code, from Daniel Borkmann. 14) Fix RSS handling in be2net driver, from Vasundhara Volam. 15) Fix modem port indexing in HSO driver, from Dan Williams" * http://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (38 commits) bridge: use spin_lock_bh() in br_multicast_set_hash_max ipv6: don't install anycast address for /128 addresses on routers hso: fix handling of modem port SERIAL_STATE notifications isdn: Drop big endian cpp checks from telespci and hfc_pci drivers be2net: fix max_evt_qs calculation for BE3 in SR-IOV config be2net: increase the timeout value for loopback-test FW cmd be2net: disable RSS when number of RXQs is reduced to 1 via set-channels xen-netback: Include header for vmalloc net: 6lowpan: fix lowpan_header_create non-compression memcpy call fec: Revert "fec: Do not assume that PHY reset is active low" bnx2x: fix VLAN configuration for VFs. bnx2x: fix AFEX memory overflow bnx2x: Clean before update RSS arrives bnx2x: Correct number of MSI-X vectors for VFs bnx2x: limit number of interrupt vectors for 57711 qlcnic: Fix bug in Tx completion path infiniband: make sure the src net is infiniband when create new link {vxlan, inet6} Mark vxlan_dev flags with VXLAN_F_IPV6 properly cxgb4: allow large buffer size to have page size netpoll: Fix missing TXQ unlock and and OOPS. ...
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c19
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/netpoll.c11
-rw-r--r--net/ieee802154/6lowpan.c2
-rw-r--r--net/ipv4/gre_offload.c11
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv4/udp_offload.c37
-rw-r--r--net/ipv6/addrconf.c17
-rw-r--r--net/ipv6/ip6_tunnel.c21
-rw-r--r--net/ipv6/ip6_vti.c24
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/llc/af_llc.c5
-rw-r--r--net/sctp/outqueue.c32
14 files changed, 105 insertions, 88 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 762896ebfcf5..47c908f1f626 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -530,6 +530,23 @@ static const struct header_ops vlan_header_ops = {
530 .parse = eth_header_parse, 530 .parse = eth_header_parse,
531}; 531};
532 532
533static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
534 unsigned short type,
535 const void *daddr, const void *saddr,
536 unsigned int len)
537{
538 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
539 struct net_device *real_dev = vlan->real_dev;
540
541 return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
542}
543
544static const struct header_ops vlan_passthru_header_ops = {
545 .create = vlan_passthru_hard_header,
546 .rebuild = dev_rebuild_header,
547 .parse = eth_header_parse,
548};
549
533static struct device_type vlan_type = { 550static struct device_type vlan_type = {
534 .name = "vlan", 551 .name = "vlan",
535}; 552};
@@ -573,7 +590,7 @@ static int vlan_dev_init(struct net_device *dev)
573 590
574 dev->needed_headroom = real_dev->needed_headroom; 591 dev->needed_headroom = real_dev->needed_headroom;
575 if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { 592 if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
576 dev->header_ops = real_dev->header_ops; 593 dev->header_ops = &vlan_passthru_header_ops;
577 dev->hard_header_len = real_dev->hard_header_len; 594 dev->hard_header_len = real_dev->hard_header_len;
578 } else { 595 } else {
579 dev->header_ops = &vlan_header_ops; 596 dev->header_ops = &vlan_header_ops;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 4c214b2b88ef..ef66365b7354 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1998,7 +1998,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1998 u32 old; 1998 u32 old;
1999 struct net_bridge_mdb_htable *mdb; 1999 struct net_bridge_mdb_htable *mdb;
2000 2000
2001 spin_lock(&br->multicast_lock); 2001 spin_lock_bh(&br->multicast_lock);
2002 if (!netif_running(br->dev)) 2002 if (!netif_running(br->dev))
2003 goto unlock; 2003 goto unlock;
2004 2004
@@ -2030,7 +2030,7 @@ rollback:
2030 } 2030 }
2031 2031
2032unlock: 2032unlock:
2033 spin_unlock(&br->multicast_lock); 2033 spin_unlock_bh(&br->multicast_lock);
2034 2034
2035 return err; 2035 return err;
2036} 2036}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 36b1443f9ae4..932c6d7cf666 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1275,7 +1275,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1275 1275
1276 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, 1276 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1277 skb->len) < 0 && 1277 skb->len) < 0 &&
1278 dev->header_ops->rebuild(skb)) 1278 dev_rebuild_header(skb))
1279 return 0; 1279 return 0;
1280 1280
1281 return dev_queue_xmit(skb); 1281 return dev_queue_xmit(skb);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 8f971990677c..303097874633 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
386 !vlan_hw_offload_capable(netif_skb_features(skb), 386 !vlan_hw_offload_capable(netif_skb_features(skb),
387 skb->vlan_proto)) { 387 skb->vlan_proto)) {
388 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); 388 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
389 if (unlikely(!skb)) 389 if (unlikely(!skb)) {
390 break; 390 /* This is actually a packet drop, but we
391 * don't want the code at the end of this
392 * function to try and re-queue a NULL skb.
393 */
394 status = NETDEV_TX_OK;
395 goto unlock_txq;
396 }
391 skb->vlan_tci = 0; 397 skb->vlan_tci = 0;
392 } 398 }
393 399
@@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
395 if (status == NETDEV_TX_OK) 401 if (status == NETDEV_TX_OK)
396 txq_trans_update(txq); 402 txq_trans_update(txq);
397 } 403 }
404 unlock_txq:
398 __netif_tx_unlock(txq); 405 __netif_tx_unlock(txq);
399 406
400 if (status == NETDEV_TX_OK) 407 if (status == NETDEV_TX_OK)
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 459e200c08a4..a2d2456a557a 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -547,7 +547,7 @@ static int lowpan_header_create(struct sk_buff *skb,
547 hc06_ptr += 3; 547 hc06_ptr += 3;
548 } else { 548 } else {
549 /* compress nothing */ 549 /* compress nothing */
550 memcpy(hc06_ptr, &hdr, 4); 550 memcpy(hc06_ptr, hdr, 4);
551 /* replace the top byte with new ECN | DSCP format */ 551 /* replace the top byte with new ECN | DSCP format */
552 *hc06_ptr = tmp; 552 *hc06_ptr = tmp;
553 hc06_ptr += 4; 553 hc06_ptr += 4;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index e5d436188464..2cd02f32f99f 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -28,6 +28,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
28 netdev_features_t enc_features; 28 netdev_features_t enc_features;
29 int ghl = GRE_HEADER_SECTION; 29 int ghl = GRE_HEADER_SECTION;
30 struct gre_base_hdr *greh; 30 struct gre_base_hdr *greh;
31 u16 mac_offset = skb->mac_header;
31 int mac_len = skb->mac_len; 32 int mac_len = skb->mac_len;
32 __be16 protocol = skb->protocol; 33 __be16 protocol = skb->protocol;
33 int tnl_hlen; 34 int tnl_hlen;
@@ -58,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
58 } else 59 } else
59 csum = false; 60 csum = false;
60 61
62 if (unlikely(!pskb_may_pull(skb, ghl)))
63 goto out;
64
61 /* setup inner skb. */ 65 /* setup inner skb. */
62 skb->protocol = greh->protocol; 66 skb->protocol = greh->protocol;
63 skb->encapsulation = 0; 67 skb->encapsulation = 0;
64 68
65 if (unlikely(!pskb_may_pull(skb, ghl)))
66 goto out;
67
68 __skb_pull(skb, ghl); 69 __skb_pull(skb, ghl);
69 skb_reset_mac_header(skb); 70 skb_reset_mac_header(skb);
70 skb_set_network_header(skb, skb_inner_network_offset(skb)); 71 skb_set_network_header(skb, skb_inner_network_offset(skb));
@@ -73,8 +74,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
73 /* segment inner packet. */ 74 /* segment inner packet. */
74 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 75 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
75 segs = skb_mac_gso_segment(skb, enc_features); 76 segs = skb_mac_gso_segment(skb, enc_features);
76 if (!segs || IS_ERR(segs)) 77 if (!segs || IS_ERR(segs)) {
78 skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
77 goto out; 79 goto out;
80 }
78 81
79 skb = segs; 82 skb = segs;
80 tnl_hlen = skb_tnl_header_len(skb); 83 tnl_hlen = skb_tnl_header_len(skb);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f140048334ce..a7e4729e974b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2478,6 +2478,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2478 netdev_features_t features) 2478 netdev_features_t features)
2479{ 2479{
2480 struct sk_buff *segs = ERR_PTR(-EINVAL); 2480 struct sk_buff *segs = ERR_PTR(-EINVAL);
2481 u16 mac_offset = skb->mac_header;
2481 int mac_len = skb->mac_len; 2482 int mac_len = skb->mac_len;
2482 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); 2483 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
2483 __be16 protocol = skb->protocol; 2484 __be16 protocol = skb->protocol;
@@ -2497,8 +2498,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2497 /* segment inner packet. */ 2498 /* segment inner packet. */
2498 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 2499 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
2499 segs = skb_mac_gso_segment(skb, enc_features); 2500 segs = skb_mac_gso_segment(skb, enc_features);
2500 if (!segs || IS_ERR(segs)) 2501 if (!segs || IS_ERR(segs)) {
2502 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
2503 mac_len);
2501 goto out; 2504 goto out;
2505 }
2502 2506
2503 outer_hlen = skb_tnl_header_len(skb); 2507 outer_hlen = skb_tnl_header_len(skb);
2504 skb = segs; 2508 skb = segs;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 83206de2bc76..79c62bdcd3c5 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -41,6 +41,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
41{ 41{
42 struct sk_buff *segs = ERR_PTR(-EINVAL); 42 struct sk_buff *segs = ERR_PTR(-EINVAL);
43 unsigned int mss; 43 unsigned int mss;
44 int offset;
45 __wsum csum;
46
47 if (skb->encapsulation &&
48 skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) {
49 segs = skb_udp_tunnel_segment(skb, features);
50 goto out;
51 }
44 52
45 mss = skb_shinfo(skb)->gso_size; 53 mss = skb_shinfo(skb)->gso_size;
46 if (unlikely(skb->len <= mss)) 54 if (unlikely(skb->len <= mss))
@@ -63,27 +71,20 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
63 goto out; 71 goto out;
64 } 72 }
65 73
74 /* Do software UFO. Complete and fill in the UDP checksum as
75 * HW cannot do checksum of UDP packets sent as multiple
76 * IP fragments.
77 */
78 offset = skb_checksum_start_offset(skb);
79 csum = skb_checksum(skb, offset, skb->len - offset, 0);
80 offset += skb->csum_offset;
81 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
82 skb->ip_summed = CHECKSUM_NONE;
83
66 /* Fragment the skb. IP headers of the fragments are updated in 84 /* Fragment the skb. IP headers of the fragments are updated in
67 * inet_gso_segment() 85 * inet_gso_segment()
68 */ 86 */
69 if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) 87 segs = skb_segment(skb, features);
70 segs = skb_udp_tunnel_segment(skb, features);
71 else {
72 int offset;
73 __wsum csum;
74
75 /* Do software UFO. Complete and fill in the UDP checksum as
76 * HW cannot do checksum of UDP packets sent as multiple
77 * IP fragments.
78 */
79 offset = skb_checksum_start_offset(skb);
80 csum = skb_checksum(skb, offset, skb->len - offset, 0);
81 offset += skb->csum_offset;
82 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
83 skb->ip_summed = CHECKSUM_NONE;
84
85 segs = skb_segment(skb, features);
86 }
87out: 88out:
88 return segs; 89 return segs;
89} 90}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d5fa5b8c443e..f62c72b59f8e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1671,7 +1671,7 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
1671static void addrconf_join_anycast(struct inet6_ifaddr *ifp) 1671static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
1672{ 1672{
1673 struct in6_addr addr; 1673 struct in6_addr addr;
1674 if (ifp->prefix_len == 127) /* RFC 6164 */ 1674 if (ifp->prefix_len >= 127) /* RFC 6164 */
1675 return; 1675 return;
1676 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); 1676 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
1677 if (ipv6_addr_any(&addr)) 1677 if (ipv6_addr_any(&addr))
@@ -1682,7 +1682,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
1682static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) 1682static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
1683{ 1683{
1684 struct in6_addr addr; 1684 struct in6_addr addr;
1685 if (ifp->prefix_len == 127) /* RFC 6164 */ 1685 if (ifp->prefix_len >= 127) /* RFC 6164 */
1686 return; 1686 return;
1687 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); 1687 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
1688 if (ipv6_addr_any(&addr)) 1688 if (ipv6_addr_any(&addr))
@@ -3456,7 +3456,12 @@ restart:
3456 &inet6_addr_lst[i], addr_lst) { 3456 &inet6_addr_lst[i], addr_lst) {
3457 unsigned long age; 3457 unsigned long age;
3458 3458
3459 if (ifp->flags & IFA_F_PERMANENT) 3459 /* When setting preferred_lft to a value not zero or
3460 * infinity, while valid_lft is infinity
3461 * IFA_F_PERMANENT has a non-infinity life time.
3462 */
3463 if ((ifp->flags & IFA_F_PERMANENT) &&
3464 (ifp->prefered_lft == INFINITY_LIFE_TIME))
3460 continue; 3465 continue;
3461 3466
3462 spin_lock(&ifp->lock); 3467 spin_lock(&ifp->lock);
@@ -3481,7 +3486,8 @@ restart:
3481 ifp->flags |= IFA_F_DEPRECATED; 3486 ifp->flags |= IFA_F_DEPRECATED;
3482 } 3487 }
3483 3488
3484 if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)) 3489 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
3490 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
3485 next = ifp->tstamp + ifp->valid_lft * HZ; 3491 next = ifp->tstamp + ifp->valid_lft * HZ;
3486 3492
3487 spin_unlock(&ifp->lock); 3493 spin_unlock(&ifp->lock);
@@ -3761,7 +3767,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
3761 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), 3767 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
3762 ifa->idev->dev->ifindex); 3768 ifa->idev->dev->ifindex);
3763 3769
3764 if (!(ifa->flags&IFA_F_PERMANENT)) { 3770 if (!((ifa->flags&IFA_F_PERMANENT) &&
3771 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
3765 preferred = ifa->prefered_lft; 3772 preferred = ifa->prefered_lft;
3766 valid = ifa->valid_lft; 3773 valid = ifa->valid_lft;
3767 if (preferred != INFINITY_LIFE_TIME) { 3774 if (preferred != INFINITY_LIFE_TIME) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index d6062325db08..7881965a8248 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -103,16 +103,25 @@ struct ip6_tnl_net {
103 103
104static struct net_device_stats *ip6_get_stats(struct net_device *dev) 104static struct net_device_stats *ip6_get_stats(struct net_device *dev)
105{ 105{
106 struct pcpu_tstats sum = { 0 }; 106 struct pcpu_tstats tmp, sum = { 0 };
107 int i; 107 int i;
108 108
109 for_each_possible_cpu(i) { 109 for_each_possible_cpu(i) {
110 unsigned int start;
110 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 111 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
111 112
112 sum.rx_packets += tstats->rx_packets; 113 do {
113 sum.rx_bytes += tstats->rx_bytes; 114 start = u64_stats_fetch_begin_bh(&tstats->syncp);
114 sum.tx_packets += tstats->tx_packets; 115 tmp.rx_packets = tstats->rx_packets;
115 sum.tx_bytes += tstats->tx_bytes; 116 tmp.rx_bytes = tstats->rx_bytes;
117 tmp.tx_packets = tstats->tx_packets;
118 tmp.tx_bytes = tstats->tx_bytes;
119 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
120
121 sum.rx_packets += tmp.rx_packets;
122 sum.rx_bytes += tmp.rx_bytes;
123 sum.tx_packets += tmp.tx_packets;
124 sum.tx_bytes += tmp.tx_bytes;
116 } 125 }
117 dev->stats.rx_packets = sum.rx_packets; 126 dev->stats.rx_packets = sum.rx_packets;
118 dev->stats.rx_bytes = sum.rx_bytes; 127 dev->stats.rx_bytes = sum.rx_bytes;
@@ -824,8 +833,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
824 } 833 }
825 834
826 tstats = this_cpu_ptr(t->dev->tstats); 835 tstats = this_cpu_ptr(t->dev->tstats);
836 u64_stats_update_begin(&tstats->syncp);
827 tstats->rx_packets++; 837 tstats->rx_packets++;
828 tstats->rx_bytes += skb->len; 838 tstats->rx_bytes += skb->len;
839 u64_stats_update_end(&tstats->syncp);
829 840
830 netif_rx(skb); 841 netif_rx(skb);
831 842
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index ed94ba61dda0..a4564b05c47b 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -75,26 +75,6 @@ struct vti6_net {
75 struct ip6_tnl __rcu **tnls[2]; 75 struct ip6_tnl __rcu **tnls[2];
76}; 76};
77 77
78static struct net_device_stats *vti6_get_stats(struct net_device *dev)
79{
80 struct pcpu_tstats sum = { 0 };
81 int i;
82
83 for_each_possible_cpu(i) {
84 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
85
86 sum.rx_packets += tstats->rx_packets;
87 sum.rx_bytes += tstats->rx_bytes;
88 sum.tx_packets += tstats->tx_packets;
89 sum.tx_bytes += tstats->tx_bytes;
90 }
91 dev->stats.rx_packets = sum.rx_packets;
92 dev->stats.rx_bytes = sum.rx_bytes;
93 dev->stats.tx_packets = sum.tx_packets;
94 dev->stats.tx_bytes = sum.tx_bytes;
95 return &dev->stats;
96}
97
98#define for_each_vti6_tunnel_rcu(start) \ 78#define for_each_vti6_tunnel_rcu(start) \
99 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 79 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
100 80
@@ -331,8 +311,10 @@ static int vti6_rcv(struct sk_buff *skb)
331 } 311 }
332 312
333 tstats = this_cpu_ptr(t->dev->tstats); 313 tstats = this_cpu_ptr(t->dev->tstats);
314 u64_stats_update_begin(&tstats->syncp);
334 tstats->rx_packets++; 315 tstats->rx_packets++;
335 tstats->rx_bytes += skb->len; 316 tstats->rx_bytes += skb->len;
317 u64_stats_update_end(&tstats->syncp);
336 318
337 skb->mark = 0; 319 skb->mark = 0;
338 secpath_reset(skb); 320 secpath_reset(skb);
@@ -716,7 +698,7 @@ static const struct net_device_ops vti6_netdev_ops = {
716 .ndo_start_xmit = vti6_tnl_xmit, 698 .ndo_start_xmit = vti6_tnl_xmit,
717 .ndo_do_ioctl = vti6_ioctl, 699 .ndo_do_ioctl = vti6_ioctl,
718 .ndo_change_mtu = vti6_change_mtu, 700 .ndo_change_mtu = vti6_change_mtu,
719 .ndo_get_stats = vti6_get_stats, 701 .ndo_get_stats64 = ip_tunnel_get_stats64,
720}; 702};
721 703
722/** 704/**
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index c87482252577..d3005b34476a 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -702,8 +702,10 @@ static int ipip6_rcv(struct sk_buff *skb)
702 } 702 }
703 703
704 tstats = this_cpu_ptr(tunnel->dev->tstats); 704 tstats = this_cpu_ptr(tunnel->dev->tstats);
705 u64_stats_update_begin(&tstats->syncp);
705 tstats->rx_packets++; 706 tstats->rx_packets++;
706 tstats->rx_bytes += skb->len; 707 tstats->rx_bytes += skb->len;
708 u64_stats_update_end(&tstats->syncp);
707 709
708 netif_rx(skb); 710 netif_rx(skb);
709 711
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 7b01b9f5846c..c71b699eb555 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
715 unsigned long cpu_flags; 715 unsigned long cpu_flags;
716 size_t copied = 0; 716 size_t copied = 0;
717 u32 peek_seq = 0; 717 u32 peek_seq = 0;
718 u32 *seq; 718 u32 *seq, skb_len;
719 unsigned long used; 719 unsigned long used;
720 int target; /* Read at least this many bytes */ 720 int target; /* Read at least this many bytes */
721 long timeo; 721 long timeo;
@@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
812 } 812 }
813 continue; 813 continue;
814 found_ok_skb: 814 found_ok_skb:
815 skb_len = skb->len;
815 /* Ok so how much can we use? */ 816 /* Ok so how much can we use? */
816 used = skb->len - offset; 817 used = skb->len - offset;
817 if (len < used) 818 if (len < used)
@@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
844 } 845 }
845 846
846 /* Partial read */ 847 /* Partial read */
847 if (used + offset < skb->len) 848 if (used + offset < skb_len)
848 continue; 849 continue;
849 } while (len > 0); 850 } while (len > 0);
850 851
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index f51ba985a36e..59268f6e2c36 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -208,8 +208,6 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
208 INIT_LIST_HEAD(&q->retransmit); 208 INIT_LIST_HEAD(&q->retransmit);
209 INIT_LIST_HEAD(&q->sacked); 209 INIT_LIST_HEAD(&q->sacked);
210 INIT_LIST_HEAD(&q->abandoned); 210 INIT_LIST_HEAD(&q->abandoned);
211
212 q->empty = 1;
213} 211}
214 212
215/* Free the outqueue structure and any related pending chunks. 213/* Free the outqueue structure and any related pending chunks.
@@ -332,7 +330,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
332 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); 330 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
333 else 331 else
334 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS); 332 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
335 q->empty = 0;
336 break; 333 break;
337 } 334 }
338 } else { 335 } else {
@@ -654,7 +651,6 @@ redo:
654 if (chunk->fast_retransmit == SCTP_NEED_FRTX) 651 if (chunk->fast_retransmit == SCTP_NEED_FRTX)
655 chunk->fast_retransmit = SCTP_DONT_FRTX; 652 chunk->fast_retransmit = SCTP_DONT_FRTX;
656 653
657 q->empty = 0;
658 q->asoc->stats.rtxchunks++; 654 q->asoc->stats.rtxchunks++;
659 break; 655 break;
660 } 656 }
@@ -1065,8 +1061,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
1065 1061
1066 sctp_transport_reset_timers(transport); 1062 sctp_transport_reset_timers(transport);
1067 1063
1068 q->empty = 0;
1069
1070 /* Only let one DATA chunk get bundled with a 1064 /* Only let one DATA chunk get bundled with a
1071 * COOKIE-ECHO chunk. 1065 * COOKIE-ECHO chunk.
1072 */ 1066 */
@@ -1275,29 +1269,17 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1275 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, 1269 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
1276 asoc->adv_peer_ack_point); 1270 asoc->adv_peer_ack_point);
1277 1271
1278 /* See if all chunks are acked. 1272 return sctp_outq_is_empty(q);
1279 * Make sure the empty queue handler will get run later.
1280 */
1281 q->empty = (list_empty(&q->out_chunk_list) &&
1282 list_empty(&q->retransmit));
1283 if (!q->empty)
1284 goto finish;
1285
1286 list_for_each_entry(transport, transport_list, transports) {
1287 q->empty = q->empty && list_empty(&transport->transmitted);
1288 if (!q->empty)
1289 goto finish;
1290 }
1291
1292 pr_debug("%s: sack queue is empty\n", __func__);
1293finish:
1294 return q->empty;
1295} 1273}
1296 1274
1297/* Is the outqueue empty? */ 1275/* Is the outqueue empty?
1276 * The queue is empty when we have not pending data, no in-flight data
1277 * and nothing pending retransmissions.
1278 */
1298int sctp_outq_is_empty(const struct sctp_outq *q) 1279int sctp_outq_is_empty(const struct sctp_outq *q)
1299{ 1280{
1300 return q->empty; 1281 return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
1282 list_empty(&q->retransmit);
1301} 1283}
1302 1284
1303/******************************************************************** 1285/********************************************************************