aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-04-14 15:44:14 -0400
committerDavid S. Miller <davem@davemloft.net>2015-04-14 15:44:14 -0400
commit87ffabb1f055e14e7d171c6599539a154d647904 (patch)
tree56f227c9cb668686ca5c2cd9a7ae188b74c0f2f2
parent5e0e0dc10a8be16239eae24ab775de60821eee2d (diff)
parentb50edd7812852d989f2ef09dcfc729690f54a42d (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The dwmac-socfpga.c conflict was a case of a bug fix overlapping changes in net-next to handle an error pointer differently. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/networking/rds.txt9
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/phy/Kconfig4
-rw-r--r--drivers/net/phy/broadcom.c14
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/vxlan.c20
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--net/ipv4/fou.c4
-rw-r--r--net/ipv4/geneve.c8
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv6/ip6_vti.c3
-rw-r--r--net/rds/connection.c3
-rw-r--r--net/rds/rds.h1
-rw-r--r--net/rds/send.c33
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/xfrm/xfrm_input.c10
17 files changed, 85 insertions, 37 deletions
diff --git a/Documentation/networking/rds.txt b/Documentation/networking/rds.txt
index c67077cbeb80..e1a3d59bbe0f 100644
--- a/Documentation/networking/rds.txt
+++ b/Documentation/networking/rds.txt
@@ -62,11 +62,10 @@ Socket Interface
62================ 62================
63 63
64 AF_RDS, PF_RDS, SOL_RDS 64 AF_RDS, PF_RDS, SOL_RDS
65 These constants haven't been assigned yet, because RDS isn't in 65 AF_RDS and PF_RDS are the domain type to be used with socket(2)
66 mainline yet. Currently, the kernel module assigns some constant 66 to create RDS sockets. SOL_RDS is the socket-level to be used
67 and publishes it to user space through two sysctl files 67 with setsockopt(2) and getsockopt(2) for RDS specific socket
68 /proc/sys/net/rds/pf_rds 68 options.
69 /proc/sys/net/rds/sol_rds
70 69
71 fd = socket(PF_RDS, SOCK_SEQPACKET, 0); 70 fd = socket(PF_RDS, SOCK_SEQPACKET, 0);
72 This creates a new, unbound RDS socket. 71 This creates a new, unbound RDS socket.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 4b0494b9cc7c..1bf1cdce74ac 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -99,6 +99,7 @@
99#define BE_NAPI_WEIGHT 64 99#define BE_NAPI_WEIGHT 64
100#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ 100#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
101#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) 101#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
102#define MAX_NUM_POST_ERX_DB 255u
102 103
103#define MAX_VFS 30 /* Max VFs supported by BE3 FW */ 104#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
104#define FW_VER_LEN 32 105#define FW_VER_LEN 32
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 5ff7fba9b67c..fb0bc3c3620e 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2122,7 +2122,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
2122 if (rxo->rx_post_starved) 2122 if (rxo->rx_post_starved)
2123 rxo->rx_post_starved = false; 2123 rxo->rx_post_starved = false;
2124 do { 2124 do {
2125 notify = min(256u, posted); 2125 notify = min(MAX_NUM_POST_ERX_DB, posted);
2126 be_rxq_notify(adapter, rxq->id, notify); 2126 be_rxq_notify(adapter, rxq->id, notify);
2127 posted -= notify; 2127 posted -= notify;
2128 } while (posted); 2128 } while (posted);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 16adbc481772..8fadaa14b9f0 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -68,8 +68,8 @@ config SMSC_PHY
68config BROADCOM_PHY 68config BROADCOM_PHY
69 tristate "Drivers for Broadcom PHYs" 69 tristate "Drivers for Broadcom PHYs"
70 ---help--- 70 ---help---
71 Currently supports the BCM5411, BCM5421, BCM5461, BCM5464, BCM5481 71 Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
72 and BCM5482 PHYs. 72 BCM5481 and BCM5482 PHYs.
73 73
74config BCM63XX_PHY 74config BCM63XX_PHY
75 tristate "Drivers for Broadcom 63xx SOCs internal PHY" 75 tristate "Drivers for Broadcom 63xx SOCs internal PHY"
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index a52afb26421b..9c71295f2fef 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -549,6 +549,19 @@ static struct phy_driver broadcom_drivers[] = {
549 .config_intr = bcm54xx_config_intr, 549 .config_intr = bcm54xx_config_intr,
550 .driver = { .owner = THIS_MODULE }, 550 .driver = { .owner = THIS_MODULE },
551}, { 551}, {
552 .phy_id = PHY_ID_BCM54616S,
553 .phy_id_mask = 0xfffffff0,
554 .name = "Broadcom BCM54616S",
555 .features = PHY_GBIT_FEATURES |
556 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
557 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
558 .config_init = bcm54xx_config_init,
559 .config_aneg = genphy_config_aneg,
560 .read_status = genphy_read_status,
561 .ack_interrupt = bcm54xx_ack_interrupt,
562 .config_intr = bcm54xx_config_intr,
563 .driver = { .owner = THIS_MODULE },
564}, {
552 .phy_id = PHY_ID_BCM5464, 565 .phy_id = PHY_ID_BCM5464,
553 .phy_id_mask = 0xfffffff0, 566 .phy_id_mask = 0xfffffff0,
554 .name = "Broadcom BCM5464", 567 .name = "Broadcom BCM5464",
@@ -660,6 +673,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
660 { PHY_ID_BCM5411, 0xfffffff0 }, 673 { PHY_ID_BCM5411, 0xfffffff0 },
661 { PHY_ID_BCM5421, 0xfffffff0 }, 674 { PHY_ID_BCM5421, 0xfffffff0 },
662 { PHY_ID_BCM5461, 0xfffffff0 }, 675 { PHY_ID_BCM5461, 0xfffffff0 },
676 { PHY_ID_BCM54616S, 0xfffffff0 },
663 { PHY_ID_BCM5464, 0xfffffff0 }, 677 { PHY_ID_BCM5464, 0xfffffff0 },
664 { PHY_ID_BCM5482, 0xfffffff0 }, 678 { PHY_ID_BCM5482, 0xfffffff0 },
665 { PHY_ID_BCM5482, 0xfffffff0 }, 679 { PHY_ID_BCM5482, 0xfffffff0 },
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 777757ae1973..733f4feb2ef3 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1072,7 +1072,7 @@ static void __handle_set_rx_mode(struct usbnet *dev)
1072 * especially now that control transfers can be queued. 1072 * especially now that control transfers can be queued.
1073 */ 1073 */
1074static void 1074static void
1075kevent (struct work_struct *work) 1075usbnet_deferred_kevent (struct work_struct *work)
1076{ 1076{
1077 struct usbnet *dev = 1077 struct usbnet *dev =
1078 container_of(work, struct usbnet, kevent); 1078 container_of(work, struct usbnet, kevent);
@@ -1626,7 +1626,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1626 skb_queue_head_init(&dev->rxq_pause); 1626 skb_queue_head_init(&dev->rxq_pause);
1627 dev->bh.func = usbnet_bh; 1627 dev->bh.func = usbnet_bh;
1628 dev->bh.data = (unsigned long) dev; 1628 dev->bh.data = (unsigned long) dev;
1629 INIT_WORK (&dev->kevent, kevent); 1629 INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
1630 init_usb_anchor(&dev->deferred); 1630 init_usb_anchor(&dev->deferred);
1631 dev->delay.function = usbnet_bh; 1631 dev->delay.function = usbnet_bh;
1632 dev->delay.data = (unsigned long) dev; 1632 dev->delay.data = (unsigned long) dev;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 577c9b071ad9..154116aafd0d 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1699,12 +1699,6 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
1699 } 1699 }
1700 } 1700 }
1701 1701
1702 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1703 if (IS_ERR(skb)) {
1704 err = -EINVAL;
1705 goto err;
1706 }
1707
1708 skb_scrub_packet(skb, xnet); 1702 skb_scrub_packet(skb, xnet);
1709 1703
1710 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len 1704 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
@@ -1724,6 +1718,12 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
1724 goto err; 1718 goto err;
1725 } 1719 }
1726 1720
1721 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1722 if (IS_ERR(skb)) {
1723 err = -EINVAL;
1724 goto err;
1725 }
1726
1727 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1727 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1728 vxh->vx_flags = htonl(VXLAN_HF_VNI); 1728 vxh->vx_flags = htonl(VXLAN_HF_VNI);
1729 vxh->vx_vni = md->vni; 1729 vxh->vx_vni = md->vni;
@@ -1784,10 +1784,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
1784 } 1784 }
1785 } 1785 }
1786 1786
1787 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1788 if (IS_ERR(skb))
1789 return PTR_ERR(skb);
1790
1791 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 1787 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1792 + VXLAN_HLEN + sizeof(struct iphdr) 1788 + VXLAN_HLEN + sizeof(struct iphdr)
1793 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); 1789 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
@@ -1803,6 +1799,10 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
1803 if (WARN_ON(!skb)) 1799 if (WARN_ON(!skb))
1804 return -ENOMEM; 1800 return -ENOMEM;
1805 1801
1802 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1803 if (IS_ERR(skb))
1804 return PTR_ERR(skb);
1805
1806 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1806 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1807 vxh->vx_flags = htonl(VXLAN_HF_VNI); 1807 vxh->vx_flags = htonl(VXLAN_HF_VNI);
1808 vxh->vx_vni = md->vni; 1808 vxh->vx_vni = md->vni;
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index cab606617522..ae2982c0f7a6 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -11,6 +11,7 @@
11#define PHY_ID_BCM5421 0x002060e0 11#define PHY_ID_BCM5421 0x002060e0
12#define PHY_ID_BCM5464 0x002060b0 12#define PHY_ID_BCM5464 0x002060b0
13#define PHY_ID_BCM5461 0x002060c0 13#define PHY_ID_BCM5461 0x002060c0
14#define PHY_ID_BCM54616S 0x03625d10
14#define PHY_ID_BCM57780 0x03625d90 15#define PHY_ID_BCM57780 0x03625d90
15 16
16#define PHY_ID_BCM7250 0xae025280 17#define PHY_ID_BCM7250 0xae025280
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 263710259774..af150b43b214 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -886,12 +886,12 @@ EXPORT_SYMBOL(gue_build_header);
886 886
887#ifdef CONFIG_NET_FOU_IP_TUNNELS 887#ifdef CONFIG_NET_FOU_IP_TUNNELS
888 888
889static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = { 889static const struct ip_tunnel_encap_ops fou_iptun_ops = {
890 .encap_hlen = fou_encap_hlen, 890 .encap_hlen = fou_encap_hlen,
891 .build_header = fou_build_header, 891 .build_header = fou_build_header,
892}; 892};
893 893
894static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = { 894static const struct ip_tunnel_encap_ops gue_iptun_ops = {
895 .encap_hlen = gue_encap_hlen, 895 .encap_hlen = gue_encap_hlen,
896 .build_header = gue_build_header, 896 .build_header = gue_build_header,
897}; 897};
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index b77f5e84c623..8986e63f3bda 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -113,10 +113,6 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
113 int min_headroom; 113 int min_headroom;
114 int err; 114 int err;
115 115
116 skb = udp_tunnel_handle_offloads(skb, csum);
117 if (IS_ERR(skb))
118 return PTR_ERR(skb);
119
120 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 116 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
121 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr) 117 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
122 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); 118 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
@@ -131,6 +127,10 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
131 if (unlikely(!skb)) 127 if (unlikely(!skb))
132 return -ENOMEM; 128 return -ENOMEM;
133 129
130 skb = udp_tunnel_handle_offloads(skb, csum);
131 if (IS_ERR(skb))
132 return PTR_ERR(skb);
133
134 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); 134 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
135 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); 135 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
136 136
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e662d85d1635..8c8d7e06b72f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2994,6 +2994,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2994 rcu_read_unlock(); 2994 rcu_read_unlock();
2995#endif 2995#endif
2996 2996
2997 /* Do not fool tcpdump (if any), clean our debris */
2998 skb->tstamp.tv64 = 0;
2997 return skb; 2999 return skb;
2998} 3000}
2999EXPORT_SYMBOL(tcp_make_synack); 3001EXPORT_SYMBOL(tcp_make_synack);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index b53148444e15..ed9d681207fa 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -288,8 +288,7 @@ static struct ip6_tnl *vti6_locate(struct net *net, struct __ip6_tnl_parm *p,
288static void vti6_dev_uninit(struct net_device *dev) 288static void vti6_dev_uninit(struct net_device *dev)
289{ 289{
290 struct ip6_tnl *t = netdev_priv(dev); 290 struct ip6_tnl *t = netdev_priv(dev);
291 struct net *net = dev_net(dev); 291 struct vti6_net *ip6n = net_generic(t->net, vti6_net_id);
292 struct vti6_net *ip6n = net_generic(net, vti6_net_id);
293 292
294 if (dev == ip6n->fb_tnl_dev) 293 if (dev == ip6n->fb_tnl_dev)
295 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 294 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 378c3a6acf84..14f041398ca1 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -130,7 +130,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
130 rcu_read_lock(); 130 rcu_read_lock();
131 conn = rds_conn_lookup(head, laddr, faddr, trans); 131 conn = rds_conn_lookup(head, laddr, faddr, trans);
132 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && 132 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
133 !is_outgoing) { 133 laddr == faddr && !is_outgoing) {
134 /* This is a looped back IB connection, and we're 134 /* This is a looped back IB connection, and we're
135 * called by the code handling the incoming connect. 135 * called by the code handling the incoming connect.
136 * We need a second connection object into which we 136 * We need a second connection object into which we
@@ -193,6 +193,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
193 } 193 }
194 194
195 atomic_set(&conn->c_state, RDS_CONN_DOWN); 195 atomic_set(&conn->c_state, RDS_CONN_DOWN);
196 conn->c_send_gen = 0;
196 conn->c_reconnect_jiffies = 0; 197 conn->c_reconnect_jiffies = 0;
197 INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker); 198 INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
198 INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker); 199 INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
diff --git a/net/rds/rds.h b/net/rds/rds.h
index c3f2855c3d84..0d41155a2258 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -110,6 +110,7 @@ struct rds_connection {
110 void *c_transport_data; 110 void *c_transport_data;
111 111
112 atomic_t c_state; 112 atomic_t c_state;
113 unsigned long c_send_gen;
113 unsigned long c_flags; 114 unsigned long c_flags;
114 unsigned long c_reconnect_jiffies; 115 unsigned long c_reconnect_jiffies;
115 struct delayed_work c_send_w; 116 struct delayed_work c_send_w;
diff --git a/net/rds/send.c b/net/rds/send.c
index 44672befc0ee..e9430f537f9c 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -140,8 +140,11 @@ int rds_send_xmit(struct rds_connection *conn)
140 struct scatterlist *sg; 140 struct scatterlist *sg;
141 int ret = 0; 141 int ret = 0;
142 LIST_HEAD(to_be_dropped); 142 LIST_HEAD(to_be_dropped);
143 int batch_count;
144 unsigned long send_gen = 0;
143 145
144restart: 146restart:
147 batch_count = 0;
145 148
146 /* 149 /*
147 * sendmsg calls here after having queued its message on the send 150 * sendmsg calls here after having queued its message on the send
@@ -157,6 +160,17 @@ restart:
157 } 160 }
158 161
159 /* 162 /*
163 * we record the send generation after doing the xmit acquire.
164 * if someone else manages to jump in and do some work, we'll use
165 * this to avoid a goto restart farther down.
166 *
167 * The acquire_in_xmit() check above ensures that only one
168 * caller can increment c_send_gen at any time.
169 */
170 conn->c_send_gen++;
171 send_gen = conn->c_send_gen;
172
173 /*
160 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, 174 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
161 * we do the opposite to avoid races. 175 * we do the opposite to avoid races.
162 */ 176 */
@@ -202,6 +216,16 @@ restart:
202 if (!rm) { 216 if (!rm) {
203 unsigned int len; 217 unsigned int len;
204 218
219 batch_count++;
220
221 /* we want to process as big a batch as we can, but
222 * we also want to avoid softlockups. If we've been
223 * through a lot of messages, lets back off and see
224 * if anyone else jumps in
225 */
226 if (batch_count >= 1024)
227 goto over_batch;
228
205 spin_lock_irqsave(&conn->c_lock, flags); 229 spin_lock_irqsave(&conn->c_lock, flags);
206 230
207 if (!list_empty(&conn->c_send_queue)) { 231 if (!list_empty(&conn->c_send_queue)) {
@@ -357,9 +381,9 @@ restart:
357 } 381 }
358 } 382 }
359 383
384over_batch:
360 if (conn->c_trans->xmit_complete) 385 if (conn->c_trans->xmit_complete)
361 conn->c_trans->xmit_complete(conn); 386 conn->c_trans->xmit_complete(conn);
362
363 release_in_xmit(conn); 387 release_in_xmit(conn);
364 388
365 /* Nuke any messages we decided not to retransmit. */ 389 /* Nuke any messages we decided not to retransmit. */
@@ -380,10 +404,15 @@ restart:
380 * If the transport cannot continue (i.e ret != 0), then it must 404 * If the transport cannot continue (i.e ret != 0), then it must
381 * call us when more room is available, such as from the tx 405 * call us when more room is available, such as from the tx
382 * completion handler. 406 * completion handler.
407 *
408 * We have an extra generation check here so that if someone manages
409 * to jump in after our release_in_xmit, we'll see that they have done
410 * some work and we will skip our goto
383 */ 411 */
384 if (ret == 0) { 412 if (ret == 0) {
385 smp_mb(); 413 smp_mb();
386 if (!list_empty(&conn->c_send_queue)) { 414 if (!list_empty(&conn->c_send_queue) &&
415 send_gen == conn->c_send_gen) {
387 rds_stats_inc(s_send_lock_queue_raced); 416 rds_stats_inc(s_send_lock_queue_raced);
388 goto restart; 417 goto restart;
389 } 418 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 179f1c8c0d8b..956ead2cab9a 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -560,8 +560,8 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
560tfifo_dequeue: 560tfifo_dequeue:
561 skb = __skb_dequeue(&sch->q); 561 skb = __skb_dequeue(&sch->q);
562 if (skb) { 562 if (skb) {
563deliver:
564 qdisc_qstats_backlog_dec(sch, skb); 563 qdisc_qstats_backlog_dec(sch, skb);
564deliver:
565 qdisc_unthrottled(sch); 565 qdisc_unthrottled(sch);
566 qdisc_bstats_update(sch, skb); 566 qdisc_bstats_update(sch, skb);
567 return skb; 567 return skb;
@@ -578,6 +578,7 @@ deliver:
578 rb_erase(p, &q->t_root); 578 rb_erase(p, &q->t_root);
579 579
580 sch->q.qlen--; 580 sch->q.qlen--;
581 qdisc_qstats_backlog_dec(sch, skb);
581 skb->next = NULL; 582 skb->next = NULL;
582 skb->prev = NULL; 583 skb->prev = NULL;
583 skb->tstamp = netem_skb_cb(skb)->tstamp_save; 584 skb->tstamp = netem_skb_cb(skb)->tstamp_save;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 85d1d4764612..526c4feb3b50 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -238,11 +238,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
238 238
239 skb->sp->xvec[skb->sp->len++] = x; 239 skb->sp->xvec[skb->sp->len++] = x;
240 240
241 if (xfrm_tunnel_check(skb, x, family)) {
242 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
243 goto drop;
244 }
245
246 spin_lock(&x->lock); 241 spin_lock(&x->lock);
247 if (unlikely(x->km.state == XFRM_STATE_ACQ)) { 242 if (unlikely(x->km.state == XFRM_STATE_ACQ)) {
248 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); 243 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
@@ -271,6 +266,11 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
271 266
272 spin_unlock(&x->lock); 267 spin_unlock(&x->lock);
273 268
269 if (xfrm_tunnel_check(skb, x, family)) {
270 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
271 goto drop;
272 }
273
274 seq_hi = htonl(xfrm_replay_seqhi(x, seq)); 274 seq_hi = htonl(xfrm_replay_seqhi(x, seq));
275 275
276 XFRM_SKB_CB(skb)->seq.input.low = seq; 276 XFRM_SKB_CB(skb)->seq.input.low = seq;