aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-05 14:23:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-05 14:23:45 -0500
commit9d82f5eb3376cbae96ad36a063a9390de1694546 (patch)
treed52daee3296d28455aff25c98b23fffab5282cd8
parent14365ea2b868c96e18da73a3f454c7bcdb0627c5 (diff)
parenta409caecb2e17fc475533738dd1c69b32e13fe09 (diff)
MMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Stretch ACKs can kill performance with Reno and CUBIC congestion control, largely due to LRO and GRO. Fix from Neal Cardwell. 2) Fix userland breakage because we accidently emit zero length netlink messages from the bridging code. From Roopa Prabhu. 3) Carry handling in generic csum_tcpudp_nofold is broken, fix from Karl Beldan. 4) Remove bogus dev_set_net() calls from CAIF driver, from Nicolas Dichtel. 5) Make sure PPP deflation never returns a length greater then the output buffer, otherwise we overflow and trigger skb_over_panic(). Fix from Florian Westphal. 6) COSA driver needs VIRT_TO_BUS Kconfig dependencies, from Arnd Bergmann. 7) Don't increase route cached MTU on datagram too big ICMPs. From Li Wei. 8) Fix error path leaks in nf_tables, from Pablo Neira Ayuso. 9) Fix bitmask handling regression in netlink that broke things like acpi userland tools. From Pablo Neira Ayuso. 10) Wrong header pointer passed to param_type2af() in SCTP code, from Saran Maruti Ramanara. 11) Stacked vlans not handled correctly by vlan_get_protocol(), from Toshiaki Makita. 12) Add missing DMA memory barrier to xgene driver, from Iyappan Subramanian. 13) Fix crash in rate estimators, from Eric Dumazet. 14) We've been adding various workarounds, one after another, for the change which added the per-net tcp_sock. It was meant to reduce socket contention but added lots of problems. Reduce this instead to a proper per-cpu socket and that rids us of all the daemons. From Eric Dumazet. 15) Fix memory corruption and OOPS in mlx4 driver, from Jack Morgenstein. 16) When we disabled UFO in the virtio_net device, it introduces some serious performance regressions. The orignal problem was IPV6 fragment ID generation, so fix that properly instead. From Vlad Yasevich. 17) sr9700 driver build breaks on xtensa because it defines macros with the same name as those used by the arch code. Use more unique names. From Chen Gang. 18) Fix endianness in new virio 1.0 mode of the vhost net driver, from Michael S Tsirkin. 19) Several sysctls were setting the maxlen attribute incorrectly, from Sasha Levin. 20) Don't accept an FQ scheduler quantum of zero, that leads to crashes. From Kenneth Klette Jonassen. 21) Fix dumping of non-existing actions in the packet scheduler classifier. From Ignacy Gawędzki. 22) Return the write work_done value when doing TX work in the qlcnic driver. 23) ip6gre_err accesses the info field with the wrong endianness, from Sabrina Dubroca. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (54 commits) sit: fix some __be16/u16 mismatches ipv6: fix sparse errors in ip6_make_flowlabel() net: remove some sparse warnings flow_keys: n_proto type should be __be16 ip6_gre: fix endianness errors in ip6gre_err qlcnic: Fix NAPI poll routine for Tx completion amd-xgbe: Set RSS enablement based on hardware features amd-xgbe: Adjust for zero-based traffic class count cls_api.c: Fix dumping of non-existing actions' stats. pkt_sched: fq: avoid hang when quantum 0 net: rds: use correct size for max unacked packets and bytes vhost/net: fix up num_buffers endian-ness gianfar: correct the bad expression while writing bit-pattern net: usb: sr9700: Use 'SR_' prefix for the common register macros Revert "drivers/net: Disable UFO through virtio" Revert "drivers/net, ipv6: Select IPv6 fragment idents for virtio UFO packets" ipv6: Select fragment id during UFO segmentation if not set. xen-netback: stop the guest rx thread after a fatal error net/mlx4_core: Fix kernel Oops (mem corruption) when working with more than 80 VFs isdn: off by one in connect_res() ...
-rw-r--r--Documentation/networking/netlink_mmap.txt13
-rw-r--r--drivers/isdn/hardware/eicon/message.c2
-rw-r--r--drivers/net/caif/caif_hsi.c1
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c27
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c26
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1
-rw-r--r--drivers/net/hyperv/netvsc.c11
-rw-r--r--drivers/net/macvtap.c16
-rw-r--r--drivers/net/ppp/ppp_deflate.c2
-rw-r--r--drivers/net/tun.c25
-rw-r--r--drivers/net/usb/sr9700.c36
-rw-r--r--drivers/net/usb/sr9700.h66
-rw-r--r--drivers/net/virtio_net.c24
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wan/Kconfig6
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--include/linux/if_vlan.h60
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/net/flow_keys.h6
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ipv6.h7
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/sch_generic.h13
-rw-r--r--include/net/tcp.h4
-rw-r--r--lib/checksum.c12
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c29
-rw-r--r--net/caif/chnl_net.c1
-rw-r--r--net/core/dev.c37
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/ipv4/ip_output.c29
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cong.c32
-rw-r--r--net/ipv4/tcp_cubic.c39
-rw-r--r--net/ipv4/tcp_ipv4.c37
-rw-r--r--net/ipv4/tcp_scalable.c3
-rw-r--r--net/ipv4/tcp_veno.c2
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/ipv6/ip6_output.c14
-rw-r--r--net/ipv6/output_core.c41
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/ipv6/udp_offload.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c33
-rw-r--r--net/netfilter/nf_tables_api.c28
-rw-r--r--net/netfilter/nft_masq.c26
-rw-r--r--net/netfilter/nft_nat.c40
-rw-r--r--net/netfilter/nft_redir.c25
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/rds/sysctl.c4
-rw-r--r--net/sched/cls_api.c7
-rw-r--r--net/sched/sch_fq.c10
-rw-r--r--net/sctp/sm_make_chunk.c2
66 files changed, 524 insertions, 383 deletions
diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt
index c6af4bac5aa8..54f10478e8e3 100644
--- a/Documentation/networking/netlink_mmap.txt
+++ b/Documentation/networking/netlink_mmap.txt
@@ -199,16 +199,9 @@ frame header.
199TX limitations 199TX limitations
200-------------- 200--------------
201 201
202Kernel processing usually involves validation of the message received by 202As of Jan 2015 the message is always copied from the ring frame to an
203user-space, then processing its contents. The kernel must assure that 203allocated buffer due to unresolved security concerns.
204userspace is not able to modify the message contents after they have been 204See commit 4682a0358639b29cf ("netlink: Always copy on mmap TX.").
205validated. In order to do so, the message is copied from the ring frame
206to an allocated buffer if either of these conditions is false:
207
208- only a single mapping of the ring exists
209- the file descriptor is not shared between processes
210
211This means that for threaded programs, the kernel will fall back to copying.
212 205
213Example 206Example
214------- 207-------
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 0b380603a578..d7c286656a25 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -1474,7 +1474,7 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
1474 add_ai(plci, &parms[5]); 1474 add_ai(plci, &parms[5]);
1475 sig_req(plci, REJECT, 0); 1475 sig_req(plci, REJECT, 0);
1476 } 1476 }
1477 else if (Reject == 1 || Reject > 9) 1477 else if (Reject == 1 || Reject >= 9)
1478 { 1478 {
1479 add_ai(plci, &parms[5]); 1479 add_ai(plci, &parms[5]);
1480 sig_req(plci, HANGUP, 0); 1480 sig_req(plci, HANGUP, 0);
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 5e40a8b68cbe..b3b922adc0e4 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1415,7 +1415,6 @@ static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1415 1415
1416 cfhsi = netdev_priv(dev); 1416 cfhsi = netdev_priv(dev);
1417 cfhsi_netlink_parms(data, cfhsi); 1417 cfhsi_netlink_parms(data, cfhsi);
1418 dev_net_set(cfhsi->ndev, src_net);
1419 1418
1420 get_ops = symbol_get(cfhsi_get_ops); 1419 get_ops = symbol_get(cfhsi_get_ops);
1421 if (!get_ops) { 1420 if (!get_ops) {
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 7a5e4aa5415e..77f1f6048ddd 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -45,7 +45,7 @@ config AMD8111_ETH
45 45
46config LANCE 46config LANCE
47 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" 47 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
48 depends on ISA && ISA_DMA_API 48 depends on ISA && ISA_DMA_API && !ARM
49 ---help--- 49 ---help---
50 If you have a network (Ethernet) card of this type, say Y and read 50 If you have a network (Ethernet) card of this type, say Y and read
51 the Ethernet-HOWTO, available from 51 the Ethernet-HOWTO, available from
@@ -142,7 +142,7 @@ config PCMCIA_NMCLAN
142 142
143config NI65 143config NI65
144 tristate "NI6510 support" 144 tristate "NI6510 support"
145 depends on ISA && ISA_DMA_API 145 depends on ISA && ISA_DMA_API && !ARM
146 ---help--- 146 ---help---
147 If you have a network (Ethernet) card of this type, say Y and read 147 If you have a network (Ethernet) card of this type, say Y and read
148 the Ethernet-HOWTO, available from 148 the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 5b22764ba88d..27245efe9f50 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -952,6 +952,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
952 do { 952 do {
953 /* WARNING: MACE_IR is a READ/CLEAR port! */ 953 /* WARNING: MACE_IR is a READ/CLEAR port! */
954 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); 954 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
955 if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS)
956 return IRQ_NONE;
955 957
956 pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); 958 pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
957 959
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 7bb5f07dbeef..e5ffb2ccb67d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -523,6 +523,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
523 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 523 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
524 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 524 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
525 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 525 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
526 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
526 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 527 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
527 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 528 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
528 HASHTBLSZ); 529 HASHTBLSZ);
@@ -552,13 +553,14 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
552 break; 553 break;
553 } 554 }
554 555
555 /* The Queue and Channel counts are zero based so increment them 556 /* The Queue, Channel and TC counts are zero based so increment them
556 * to get the actual number 557 * to get the actual number
557 */ 558 */
558 hw_feat->rx_q_cnt++; 559 hw_feat->rx_q_cnt++;
559 hw_feat->tx_q_cnt++; 560 hw_feat->tx_q_cnt++;
560 hw_feat->rx_ch_cnt++; 561 hw_feat->rx_ch_cnt++;
561 hw_feat->tx_ch_cnt++; 562 hw_feat->tx_ch_cnt++;
563 hw_feat->tc_cnt++;
562 564
563 DBGPR("<--xgbe_get_all_hw_features\n"); 565 DBGPR("<--xgbe_get_all_hw_features\n");
564} 566}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 83a50280bb70..793f3b73eeff 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -369,6 +369,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
369 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 369 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
370 break; 370 break;
371 371
372 /* read fpqnum field after dataaddr field */
373 dma_rmb();
372 if (is_rx_desc(raw_desc)) 374 if (is_rx_desc(raw_desc))
373 ret = xgene_enet_rx_frame(ring, raw_desc); 375 ret = xgene_enet_rx_frame(ring, raw_desc);
374 else 376 else
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 7403dff8f14a..905ac5f5d9a6 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -32,7 +32,8 @@ config CS89x0
32 will be called cs89x0. 32 will be called cs89x0.
33 33
34config CS89x0_PLATFORM 34config CS89x0_PLATFORM
35 bool "CS89x0 platform driver support" 35 bool "CS89x0 platform driver support" if HAS_IOPORT_MAP
36 default !HAS_IOPORT_MAP
36 depends on CS89x0 37 depends on CS89x0
37 help 38 help
38 Say Y to compile the cs89x0 driver as a platform driver. This 39 Say Y to compile the cs89x0 driver as a platform driver. This
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 3e1a9c1a67a9..fda12fb32ec7 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1586,7 +1586,7 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1586 return -EBUSY; 1586 return -EBUSY;
1587 1587
1588 /* Fill regular entries */ 1588 /* Fill regular entries */
1589 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); 1589 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
1590 i++) 1590 i++)
1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1592 /* Fill the rest with fall-troughs */ 1592 /* Fill the rest with fall-troughs */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 63c807c9b21c..edea13b0ee85 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1907,7 +1907,8 @@ static void igbvf_watchdog_task(struct work_struct *work)
1907 1907
1908static int igbvf_tso(struct igbvf_adapter *adapter, 1908static int igbvf_tso(struct igbvf_adapter *adapter,
1909 struct igbvf_ring *tx_ring, 1909 struct igbvf_ring *tx_ring,
1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
1911 __be16 protocol)
1911{ 1912{
1912 struct e1000_adv_tx_context_desc *context_desc; 1913 struct e1000_adv_tx_context_desc *context_desc;
1913 struct igbvf_buffer *buffer_info; 1914 struct igbvf_buffer *buffer_info;
@@ -1927,7 +1928,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1927 l4len = tcp_hdrlen(skb); 1928 l4len = tcp_hdrlen(skb);
1928 *hdr_len += l4len; 1929 *hdr_len += l4len;
1929 1930
1930 if (skb->protocol == htons(ETH_P_IP)) { 1931 if (protocol == htons(ETH_P_IP)) {
1931 struct iphdr *iph = ip_hdr(skb); 1932 struct iphdr *iph = ip_hdr(skb);
1932 iph->tot_len = 0; 1933 iph->tot_len = 0;
1933 iph->check = 0; 1934 iph->check = 0;
@@ -1958,7 +1959,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1958 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1959 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1959 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1960 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1960 1961
1961 if (skb->protocol == htons(ETH_P_IP)) 1962 if (protocol == htons(ETH_P_IP))
1962 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1963 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1963 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1964 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1964 1965
@@ -1984,7 +1985,8 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1984 1985
1985static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, 1986static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
1986 struct igbvf_ring *tx_ring, 1987 struct igbvf_ring *tx_ring,
1987 struct sk_buff *skb, u32 tx_flags) 1988 struct sk_buff *skb, u32 tx_flags,
1989 __be16 protocol)
1988{ 1990{
1989 struct e1000_adv_tx_context_desc *context_desc; 1991 struct e1000_adv_tx_context_desc *context_desc;
1990 unsigned int i; 1992 unsigned int i;
@@ -2011,7 +2013,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
2011 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 2013 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2012 2014
2013 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2015 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2014 switch (skb->protocol) { 2016 switch (protocol) {
2015 case htons(ETH_P_IP): 2017 case htons(ETH_P_IP):
2016 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 2018 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2017 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2019 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -2211,6 +2213,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2211 u8 hdr_len = 0; 2213 u8 hdr_len = 0;
2212 int count = 0; 2214 int count = 0;
2213 int tso = 0; 2215 int tso = 0;
2216 __be16 protocol = vlan_get_protocol(skb);
2214 2217
2215 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2218 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2216 dev_kfree_skb_any(skb); 2219 dev_kfree_skb_any(skb);
@@ -2239,13 +2242,13 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2239 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); 2242 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
2240 } 2243 }
2241 2244
2242 if (skb->protocol == htons(ETH_P_IP)) 2245 if (protocol == htons(ETH_P_IP))
2243 tx_flags |= IGBVF_TX_FLAGS_IPV4; 2246 tx_flags |= IGBVF_TX_FLAGS_IPV4;
2244 2247
2245 first = tx_ring->next_to_use; 2248 first = tx_ring->next_to_use;
2246 2249
2247 tso = skb_is_gso(skb) ? 2250 tso = skb_is_gso(skb) ?
2248 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; 2251 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
2249 if (unlikely(tso < 0)) { 2252 if (unlikely(tso < 0)) {
2250 dev_kfree_skb_any(skb); 2253 dev_kfree_skb_any(skb);
2251 return NETDEV_TX_OK; 2254 return NETDEV_TX_OK;
@@ -2253,7 +2256,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2253 2256
2254 if (tso) 2257 if (tso)
2255 tx_flags |= IGBVF_TX_FLAGS_TSO; 2258 tx_flags |= IGBVF_TX_FLAGS_TSO;
2256 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && 2259 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
2257 (skb->ip_summed == CHECKSUM_PARTIAL)) 2260 (skb->ip_summed == CHECKSUM_PARTIAL))
2258 tx_flags |= IGBVF_TX_FLAGS_CSUM; 2261 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2259 2262
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 2ed2c7de2304..67b02bde179e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7227,11 +7227,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7227 if (!vhdr) 7227 if (!vhdr)
7228 goto out_drop; 7228 goto out_drop;
7229 7229
7230 protocol = vhdr->h_vlan_encapsulated_proto;
7231 tx_flags |= ntohs(vhdr->h_vlan_TCI) << 7230 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
7232 IXGBE_TX_FLAGS_VLAN_SHIFT; 7231 IXGBE_TX_FLAGS_VLAN_SHIFT;
7233 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; 7232 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
7234 } 7233 }
7234 protocol = vlan_get_protocol(skb);
7235 7235
7236 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 7236 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
7237 adapter->ptp_clock && 7237 adapter->ptp_clock &&
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 62a0d8e0f17d..38c7a0be8197 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3099,7 +3099,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3099 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 3099 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3100 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 3100 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3101 3101
3102 if (skb->protocol == htons(ETH_P_IP)) { 3102 if (first->protocol == htons(ETH_P_IP)) {
3103 struct iphdr *iph = ip_hdr(skb); 3103 struct iphdr *iph = ip_hdr(skb);
3104 iph->tot_len = 0; 3104 iph->tot_len = 0;
3105 iph->check = 0; 3105 iph->check = 0;
@@ -3156,7 +3156,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3156 3156
3157 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3157 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3158 u8 l4_hdr = 0; 3158 u8 l4_hdr = 0;
3159 switch (skb->protocol) { 3159 switch (first->protocol) {
3160 case htons(ETH_P_IP): 3160 case htons(ETH_P_IP):
3161 vlan_macip_lens |= skb_network_header_len(skb); 3161 vlan_macip_lens |= skb_network_header_len(skb);
3162 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 3162 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index bdd4eea2247c..210691c89b6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -235,7 +235,8 @@ do { \
235extern int mlx4_log_num_mgm_entry_size; 235extern int mlx4_log_num_mgm_entry_size;
236extern int log_mtts_per_seg; 236extern int log_mtts_per_seg;
237 237
238#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) 238#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \
239 MLX4_MFUNC_MAX))
239#define ALL_SLAVES 0xff 240#define ALL_SLAVES 0xff
240 241
241struct mlx4_bitmap { 242struct mlx4_bitmap {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 18e5de72e9b4..4e1f58cf19ce 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -967,7 +967,12 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
967 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, 967 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
968 budget); 968 budget);
969 work_done = qlcnic_process_rcv_ring(sds_ring, budget); 969 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
970 if ((work_done < budget) && tx_complete) { 970
971 /* Check if we need a repoll */
972 if (!tx_complete)
973 work_done = budget;
974
975 if (work_done < budget) {
971 napi_complete(&sds_ring->napi); 976 napi_complete(&sds_ring->napi);
972 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 977 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
973 qlcnic_enable_sds_intr(adapter, sds_ring); 978 qlcnic_enable_sds_intr(adapter, sds_ring);
@@ -992,6 +997,9 @@ static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
992 napi_complete(&tx_ring->napi); 997 napi_complete(&tx_ring->napi);
993 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 998 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
994 qlcnic_enable_tx_intr(adapter, tx_ring); 999 qlcnic_enable_tx_intr(adapter, tx_ring);
1000 } else {
1001 /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
1002 work_done = budget;
995 } 1003 }
996 1004
997 return work_done; 1005 return work_done;
@@ -1950,7 +1958,12 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
1950 1958
1951 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); 1959 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1952 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); 1960 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1953 if ((work_done < budget) && tx_complete) { 1961
1962 /* Check if we need a repoll */
1963 if (!tx_complete)
1964 work_done = budget;
1965
1966 if (work_done < budget) {
1954 napi_complete(&sds_ring->napi); 1967 napi_complete(&sds_ring->napi);
1955 qlcnic_enable_sds_intr(adapter, sds_ring); 1968 qlcnic_enable_sds_intr(adapter, sds_ring);
1956 } 1969 }
@@ -1973,7 +1986,12 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1973 1986
1974 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); 1987 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1975 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); 1988 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1976 if ((work_done < budget) && tx_complete) { 1989
1990 /* Check if we need a repoll */
1991 if (!tx_complete)
1992 work_done = budget;
1993
1994 if (work_done < budget) {
1977 napi_complete(&sds_ring->napi); 1995 napi_complete(&sds_ring->napi);
1978 qlcnic_enable_sds_intr(adapter, sds_ring); 1996 qlcnic_enable_sds_intr(adapter, sds_ring);
1979 } 1997 }
@@ -1995,6 +2013,9 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
1995 napi_complete(&tx_ring->napi); 2013 napi_complete(&tx_ring->napi);
1996 if (test_bit(__QLCNIC_DEV_UP , &adapter->state)) 2014 if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
1997 qlcnic_enable_tx_intr(adapter, tx_ring); 2015 qlcnic_enable_tx_intr(adapter, tx_ring);
2016 } else {
2017 /* need a repoll */
2018 work_done = budget;
1998 } 2019 }
1999 2020
2000 return work_done; 2021 return work_done;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 6c904a6cad2a..ef5aed3b1225 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2351,23 +2351,29 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
2351{ 2351{
2352 struct ql_adapter *qdev = netdev_priv(ndev); 2352 struct ql_adapter *qdev = netdev_priv(ndev);
2353 int status = 0; 2353 int status = 0;
2354 bool need_restart = netif_running(ndev);
2354 2355
2355 status = ql_adapter_down(qdev); 2356 if (need_restart) {
2356 if (status) { 2357 status = ql_adapter_down(qdev);
2357 netif_err(qdev, link, qdev->ndev, 2358 if (status) {
2358 "Failed to bring down the adapter\n"); 2359 netif_err(qdev, link, qdev->ndev,
2359 return status; 2360 "Failed to bring down the adapter\n");
2361 return status;
2362 }
2360 } 2363 }
2361 2364
2362 /* update the features with resent change */ 2365 /* update the features with resent change */
2363 ndev->features = features; 2366 ndev->features = features;
2364 2367
2365 status = ql_adapter_up(qdev); 2368 if (need_restart) {
2366 if (status) { 2369 status = ql_adapter_up(qdev);
2367 netif_err(qdev, link, qdev->ndev, 2370 if (status) {
2368 "Failed to bring up the adapter\n"); 2371 netif_err(qdev, link, qdev->ndev,
2369 return status; 2372 "Failed to bring up the adapter\n");
2373 return status;
2374 }
2370 } 2375 }
2376
2371 return status; 2377 return status;
2372} 2378}
2373 2379
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index d2835bf7b4fb..3699b98d5b2c 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1119,6 +1119,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
1119 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; 1119 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
1120 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; 1120 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
1121 } 1121 }
1122 nskb->queue_mapping = skb->queue_mapping;
1122 dev_kfree_skb(skb); 1123 dev_kfree_skb(skb);
1123 skb = nskb; 1124 skb = nskb;
1124 } 1125 }
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9f49c0129a78..7cd4eb38abfa 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -716,7 +716,7 @@ int netvsc_send(struct hv_device *device,
716 u64 req_id; 716 u64 req_id;
717 unsigned int section_index = NETVSC_INVALID_INDEX; 717 unsigned int section_index = NETVSC_INVALID_INDEX;
718 u32 msg_size = 0; 718 u32 msg_size = 0;
719 struct sk_buff *skb; 719 struct sk_buff *skb = NULL;
720 u16 q_idx = packet->q_idx; 720 u16 q_idx = packet->q_idx;
721 721
722 722
@@ -743,8 +743,6 @@ int netvsc_send(struct hv_device *device,
743 packet); 743 packet);
744 skb = (struct sk_buff *) 744 skb = (struct sk_buff *)
745 (unsigned long)packet->send_completion_tid; 745 (unsigned long)packet->send_completion_tid;
746 if (skb)
747 dev_kfree_skb_any(skb);
748 packet->page_buf_cnt = 0; 746 packet->page_buf_cnt = 0;
749 } 747 }
750 } 748 }
@@ -810,6 +808,13 @@ int netvsc_send(struct hv_device *device,
810 packet, ret); 808 packet, ret);
811 } 809 }
812 810
811 if (ret != 0) {
812 if (section_index != NETVSC_INVALID_INDEX)
813 netvsc_free_send_slot(net_device, section_index);
814 } else if (skb) {
815 dev_kfree_skb_any(skb);
816 }
817
813 return ret; 818 return ret;
814} 819}
815 820
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 7df221788cd4..919f4fccc322 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -17,7 +17,6 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/uio.h> 18#include <linux/uio.h>
19 19
20#include <net/ipv6.h>
21#include <net/net_namespace.h> 20#include <net/net_namespace.h>
22#include <net/rtnetlink.h> 21#include <net/rtnetlink.h>
23#include <net/sock.h> 22#include <net/sock.h>
@@ -81,7 +80,7 @@ static struct cdev macvtap_cdev;
81static const struct proto_ops macvtap_socket_ops; 80static const struct proto_ops macvtap_socket_ops;
82 81
83#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 82#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
84 NETIF_F_TSO6) 83 NETIF_F_TSO6 | NETIF_F_UFO)
85#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 84#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
86#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) 85#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
87 86
@@ -586,11 +585,7 @@ static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
586 gso_type = SKB_GSO_TCPV6; 585 gso_type = SKB_GSO_TCPV6;
587 break; 586 break;
588 case VIRTIO_NET_HDR_GSO_UDP: 587 case VIRTIO_NET_HDR_GSO_UDP:
589 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
590 current->comm);
591 gso_type = SKB_GSO_UDP; 588 gso_type = SKB_GSO_UDP;
592 if (skb->protocol == htons(ETH_P_IPV6))
593 ipv6_proxy_select_ident(skb);
594 break; 589 break;
595 default: 590 default:
596 return -EINVAL; 591 return -EINVAL;
@@ -636,6 +631,8 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
636 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 631 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
637 else if (sinfo->gso_type & SKB_GSO_TCPV6) 632 else if (sinfo->gso_type & SKB_GSO_TCPV6)
638 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 633 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
634 else if (sinfo->gso_type & SKB_GSO_UDP)
635 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
639 else 636 else
640 BUG(); 637 BUG();
641 if (sinfo->gso_type & SKB_GSO_TCP_ECN) 638 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
@@ -965,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
965 if (arg & TUN_F_TSO6) 962 if (arg & TUN_F_TSO6)
966 feature_mask |= NETIF_F_TSO6; 963 feature_mask |= NETIF_F_TSO6;
967 } 964 }
965
966 if (arg & TUN_F_UFO)
967 feature_mask |= NETIF_F_UFO;
968 } 968 }
969 969
970 /* tun/tap driver inverts the usage for TSO offloads, where 970 /* tun/tap driver inverts the usage for TSO offloads, where
@@ -975,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
975 * When user space turns off TSO, we turn off GSO/LRO so that 975 * When user space turns off TSO, we turn off GSO/LRO so that
976 * user-space will not receive TSO frames. 976 * user-space will not receive TSO frames.
977 */ 977 */
978 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) 978 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
979 features |= RX_OFFLOADS; 979 features |= RX_OFFLOADS;
980 else 980 else
981 features &= ~RX_OFFLOADS; 981 features &= ~RX_OFFLOADS;
@@ -1090,7 +1090,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1090 case TUNSETOFFLOAD: 1090 case TUNSETOFFLOAD:
1091 /* let the user check for future flags */ 1091 /* let the user check for future flags */
1092 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 1092 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1093 TUN_F_TSO_ECN)) 1093 TUN_F_TSO_ECN | TUN_F_UFO))
1094 return -EINVAL; 1094 return -EINVAL;
1095 1095
1096 rtnl_lock(); 1096 rtnl_lock();
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index 602c625d95d5..b5edc7f96a39 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
246 /* 246 /*
247 * See if we managed to reduce the size of the packet. 247 * See if we managed to reduce the size of the packet.
248 */ 248 */
249 if (olen < isize) { 249 if (olen < isize && olen <= osize) {
250 state->stats.comp_bytes += olen; 250 state->stats.comp_bytes += olen;
251 state->stats.comp_packets++; 251 state->stats.comp_packets++;
252 } else { 252 } else {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8c8dc16839a7..10f9e4021b5a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -65,7 +65,6 @@
65#include <linux/nsproxy.h> 65#include <linux/nsproxy.h>
66#include <linux/virtio_net.h> 66#include <linux/virtio_net.h>
67#include <linux/rcupdate.h> 67#include <linux/rcupdate.h>
68#include <net/ipv6.h>
69#include <net/net_namespace.h> 68#include <net/net_namespace.h>
70#include <net/netns/generic.h> 69#include <net/netns/generic.h>
71#include <net/rtnetlink.h> 70#include <net/rtnetlink.h>
@@ -187,7 +186,7 @@ struct tun_struct {
187 struct net_device *dev; 186 struct net_device *dev;
188 netdev_features_t set_features; 187 netdev_features_t set_features;
189#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 188#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
190 NETIF_F_TSO6) 189 NETIF_F_TSO6|NETIF_F_UFO)
191 190
192 int vnet_hdr_sz; 191 int vnet_hdr_sz;
193 int sndbuf; 192 int sndbuf;
@@ -1167,8 +1166,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1167 break; 1166 break;
1168 } 1167 }
1169 1168
1170 skb_reset_network_header(skb);
1171
1172 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 1169 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1173 pr_debug("GSO!\n"); 1170 pr_debug("GSO!\n");
1174 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 1171 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -1179,20 +1176,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1179 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 1176 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1180 break; 1177 break;
1181 case VIRTIO_NET_HDR_GSO_UDP: 1178 case VIRTIO_NET_HDR_GSO_UDP:
1182 {
1183 static bool warned;
1184
1185 if (!warned) {
1186 warned = true;
1187 netdev_warn(tun->dev,
1188 "%s: using disabled UFO feature; please fix this program\n",
1189 current->comm);
1190 }
1191 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1179 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1192 if (skb->protocol == htons(ETH_P_IPV6))
1193 ipv6_proxy_select_ident(skb);
1194 break; 1180 break;
1195 }
1196 default: 1181 default:
1197 tun->dev->stats.rx_frame_errors++; 1182 tun->dev->stats.rx_frame_errors++;
1198 kfree_skb(skb); 1183 kfree_skb(skb);
@@ -1221,6 +1206,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1221 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1206 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1222 } 1207 }
1223 1208
1209 skb_reset_network_header(skb);
1224 skb_probe_transport_header(skb, 0); 1210 skb_probe_transport_header(skb, 0);
1225 1211
1226 rxhash = skb_get_hash(skb); 1212 rxhash = skb_get_hash(skb);
@@ -1298,6 +1284,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1298 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1284 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1299 else if (sinfo->gso_type & SKB_GSO_TCPV6) 1285 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1300 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1286 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1287 else if (sinfo->gso_type & SKB_GSO_UDP)
1288 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1301 else { 1289 else {
1302 pr_err("unexpected GSO type: " 1290 pr_err("unexpected GSO type: "
1303 "0x%x, gso_size %d, hdr_len %d\n", 1291 "0x%x, gso_size %d, hdr_len %d\n",
@@ -1746,6 +1734,11 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
1746 features |= NETIF_F_TSO6; 1734 features |= NETIF_F_TSO6;
1747 arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 1735 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1748 } 1736 }
1737
1738 if (arg & TUN_F_UFO) {
1739 features |= NETIF_F_UFO;
1740 arg &= ~TUN_F_UFO;
1741 }
1749 } 1742 }
1750 1743
1751 /* This gives the user a way to test for new features in future by 1744 /* This gives the user a way to test for new features in future by
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 99b69af14274..4a1e9c489f1f 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -77,7 +77,7 @@ static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
77 int ret; 77 int ret;
78 78
79 udelay(1); 79 udelay(1);
80 ret = sr_read_reg(dev, EPCR, &tmp); 80 ret = sr_read_reg(dev, SR_EPCR, &tmp);
81 if (ret < 0) 81 if (ret < 0)
82 return ret; 82 return ret;
83 83
@@ -98,15 +98,15 @@ static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg,
98 98
99 mutex_lock(&dev->phy_mutex); 99 mutex_lock(&dev->phy_mutex);
100 100
101 sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); 101 sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
102 sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); 102 sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
103 103
104 ret = wait_phy_eeprom_ready(dev, phy); 104 ret = wait_phy_eeprom_ready(dev, phy);
105 if (ret < 0) 105 if (ret < 0)
106 goto out_unlock; 106 goto out_unlock;
107 107
108 sr_write_reg(dev, EPCR, 0x0); 108 sr_write_reg(dev, SR_EPCR, 0x0);
109 ret = sr_read(dev, EPDR, 2, value); 109 ret = sr_read(dev, SR_EPDR, 2, value);
110 110
111 netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n", 111 netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
112 phy, reg, *value, ret); 112 phy, reg, *value, ret);
@@ -123,19 +123,19 @@ static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
123 123
124 mutex_lock(&dev->phy_mutex); 124 mutex_lock(&dev->phy_mutex);
125 125
126 ret = sr_write(dev, EPDR, 2, &value); 126 ret = sr_write(dev, SR_EPDR, 2, &value);
127 if (ret < 0) 127 if (ret < 0)
128 goto out_unlock; 128 goto out_unlock;
129 129
130 sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); 130 sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
131 sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : 131 sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
132 (EPCR_WEP | EPCR_ERPRW)); 132 (EPCR_WEP | EPCR_ERPRW));
133 133
134 ret = wait_phy_eeprom_ready(dev, phy); 134 ret = wait_phy_eeprom_ready(dev, phy);
135 if (ret < 0) 135 if (ret < 0)
136 goto out_unlock; 136 goto out_unlock;
137 137
138 sr_write_reg(dev, EPCR, 0x0); 138 sr_write_reg(dev, SR_EPCR, 0x0);
139 139
140out_unlock: 140out_unlock:
141 mutex_unlock(&dev->phy_mutex); 141 mutex_unlock(&dev->phy_mutex);
@@ -188,7 +188,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
188 if (loc == MII_BMSR) { 188 if (loc == MII_BMSR) {
189 u8 value; 189 u8 value;
190 190
191 sr_read_reg(dev, NSR, &value); 191 sr_read_reg(dev, SR_NSR, &value);
192 if (value & NSR_LINKST) 192 if (value & NSR_LINKST)
193 rc = 1; 193 rc = 1;
194 } 194 }
@@ -228,7 +228,7 @@ static u32 sr9700_get_link(struct net_device *netdev)
228 int rc = 0; 228 int rc = 0;
229 229
230 /* Get the Link Status directly */ 230 /* Get the Link Status directly */
231 sr_read_reg(dev, NSR, &value); 231 sr_read_reg(dev, SR_NSR, &value);
232 if (value & NSR_LINKST) 232 if (value & NSR_LINKST)
233 rc = 1; 233 rc = 1;
234 234
@@ -281,8 +281,8 @@ static void sr9700_set_multicast(struct net_device *netdev)
281 } 281 }
282 } 282 }
283 283
284 sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes); 284 sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes);
285 sr_write_reg_async(dev, RCR, rx_ctl); 285 sr_write_reg_async(dev, SR_RCR, rx_ctl);
286} 286}
287 287
288static int sr9700_set_mac_address(struct net_device *netdev, void *p) 288static int sr9700_set_mac_address(struct net_device *netdev, void *p)
@@ -297,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p)
297 } 297 }
298 298
299 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 299 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
300 sr_write_async(dev, PAR, 6, netdev->dev_addr); 300 sr_write_async(dev, SR_PAR, 6, netdev->dev_addr);
301 301
302 return 0; 302 return 0;
303} 303}
@@ -340,7 +340,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
340 mii->phy_id_mask = 0x1f; 340 mii->phy_id_mask = 0x1f;
341 mii->reg_num_mask = 0x1f; 341 mii->reg_num_mask = 0x1f;
342 342
343 sr_write_reg(dev, NCR, NCR_RST); 343 sr_write_reg(dev, SR_NCR, NCR_RST);
344 udelay(20); 344 udelay(20);
345 345
346 /* read MAC 346 /* read MAC
@@ -348,17 +348,17 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
348 * EEPROM automatically to PAR. In case there is no EEPROM externally, 348 * EEPROM automatically to PAR. In case there is no EEPROM externally,
349 * a default MAC address is stored in PAR for making chip work properly. 349 * a default MAC address is stored in PAR for making chip work properly.
350 */ 350 */
351 if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) { 351 if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
352 netdev_err(netdev, "Error reading MAC address\n"); 352 netdev_err(netdev, "Error reading MAC address\n");
353 ret = -ENODEV; 353 ret = -ENODEV;
354 goto out; 354 goto out;
355 } 355 }
356 356
357 /* power up and reset phy */ 357 /* power up and reset phy */
358 sr_write_reg(dev, PRR, PRR_PHY_RST); 358 sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
359 /* at least 10ms, here 20ms for safe */ 359 /* at least 10ms, here 20ms for safe */
360 mdelay(20); 360 mdelay(20);
361 sr_write_reg(dev, PRR, 0); 361 sr_write_reg(dev, SR_PRR, 0);
362 /* at least 1ms, here 2ms for reading right register */ 362 /* at least 1ms, here 2ms for reading right register */
363 udelay(2 * 1000); 363 udelay(2 * 1000);
364 364
diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h
index fd687c575e74..258b030277e7 100644
--- a/drivers/net/usb/sr9700.h
+++ b/drivers/net/usb/sr9700.h
@@ -14,13 +14,13 @@
14/* sr9700 spec. register table on Linux platform */ 14/* sr9700 spec. register table on Linux platform */
15 15
16/* Network Control Reg */ 16/* Network Control Reg */
17#define NCR 0x00 17#define SR_NCR 0x00
18#define NCR_RST (1 << 0) 18#define NCR_RST (1 << 0)
19#define NCR_LBK (3 << 1) 19#define NCR_LBK (3 << 1)
20#define NCR_FDX (1 << 3) 20#define NCR_FDX (1 << 3)
21#define NCR_WAKEEN (1 << 6) 21#define NCR_WAKEEN (1 << 6)
22/* Network Status Reg */ 22/* Network Status Reg */
23#define NSR 0x01 23#define SR_NSR 0x01
24#define NSR_RXRDY (1 << 0) 24#define NSR_RXRDY (1 << 0)
25#define NSR_RXOV (1 << 1) 25#define NSR_RXOV (1 << 1)
26#define NSR_TX1END (1 << 2) 26#define NSR_TX1END (1 << 2)
@@ -30,7 +30,7 @@
30#define NSR_LINKST (1 << 6) 30#define NSR_LINKST (1 << 6)
31#define NSR_SPEED (1 << 7) 31#define NSR_SPEED (1 << 7)
32/* Tx Control Reg */ 32/* Tx Control Reg */
33#define TCR 0x02 33#define SR_TCR 0x02
34#define TCR_CRC_DIS (1 << 1) 34#define TCR_CRC_DIS (1 << 1)
35#define TCR_PAD_DIS (1 << 2) 35#define TCR_PAD_DIS (1 << 2)
36#define TCR_LC_CARE (1 << 3) 36#define TCR_LC_CARE (1 << 3)
@@ -38,7 +38,7 @@
38#define TCR_EXCECM (1 << 5) 38#define TCR_EXCECM (1 << 5)
39#define TCR_LF_EN (1 << 6) 39#define TCR_LF_EN (1 << 6)
40/* Tx Status Reg for Packet Index 1 */ 40/* Tx Status Reg for Packet Index 1 */
41#define TSR1 0x03 41#define SR_TSR1 0x03
42#define TSR1_EC (1 << 2) 42#define TSR1_EC (1 << 2)
43#define TSR1_COL (1 << 3) 43#define TSR1_COL (1 << 3)
44#define TSR1_LC (1 << 4) 44#define TSR1_LC (1 << 4)
@@ -46,7 +46,7 @@
46#define TSR1_LOC (1 << 6) 46#define TSR1_LOC (1 << 6)
47#define TSR1_TLF (1 << 7) 47#define TSR1_TLF (1 << 7)
48/* Tx Status Reg for Packet Index 2 */ 48/* Tx Status Reg for Packet Index 2 */
49#define TSR2 0x04 49#define SR_TSR2 0x04
50#define TSR2_EC (1 << 2) 50#define TSR2_EC (1 << 2)
51#define TSR2_COL (1 << 3) 51#define TSR2_COL (1 << 3)
52#define TSR2_LC (1 << 4) 52#define TSR2_LC (1 << 4)
@@ -54,7 +54,7 @@
54#define TSR2_LOC (1 << 6) 54#define TSR2_LOC (1 << 6)
55#define TSR2_TLF (1 << 7) 55#define TSR2_TLF (1 << 7)
56/* Rx Control Reg*/ 56/* Rx Control Reg*/
57#define RCR 0x05 57#define SR_RCR 0x05
58#define RCR_RXEN (1 << 0) 58#define RCR_RXEN (1 << 0)
59#define RCR_PRMSC (1 << 1) 59#define RCR_PRMSC (1 << 1)
60#define RCR_RUNT (1 << 2) 60#define RCR_RUNT (1 << 2)
@@ -62,87 +62,87 @@
62#define RCR_DIS_CRC (1 << 4) 62#define RCR_DIS_CRC (1 << 4)
63#define RCR_DIS_LONG (1 << 5) 63#define RCR_DIS_LONG (1 << 5)
64/* Rx Status Reg */ 64/* Rx Status Reg */
65#define RSR 0x06 65#define SR_RSR 0x06
66#define RSR_AE (1 << 2) 66#define RSR_AE (1 << 2)
67#define RSR_MF (1 << 6) 67#define RSR_MF (1 << 6)
68#define RSR_RF (1 << 7) 68#define RSR_RF (1 << 7)
69/* Rx Overflow Counter Reg */ 69/* Rx Overflow Counter Reg */
70#define ROCR 0x07 70#define SR_ROCR 0x07
71#define ROCR_ROC (0x7F << 0) 71#define ROCR_ROC (0x7F << 0)
72#define ROCR_RXFU (1 << 7) 72#define ROCR_RXFU (1 << 7)
73/* Back Pressure Threshold Reg */ 73/* Back Pressure Threshold Reg */
74#define BPTR 0x08 74#define SR_BPTR 0x08
75#define BPTR_JPT (0x0F << 0) 75#define BPTR_JPT (0x0F << 0)
76#define BPTR_BPHW (0x0F << 4) 76#define BPTR_BPHW (0x0F << 4)
77/* Flow Control Threshold Reg */ 77/* Flow Control Threshold Reg */
78#define FCTR 0x09 78#define SR_FCTR 0x09
79#define FCTR_LWOT (0x0F << 0) 79#define FCTR_LWOT (0x0F << 0)
80#define FCTR_HWOT (0x0F << 4) 80#define FCTR_HWOT (0x0F << 4)
81/* rx/tx Flow Control Reg */ 81/* rx/tx Flow Control Reg */
82#define FCR 0x0A 82#define SR_FCR 0x0A
83#define FCR_FLCE (1 << 0) 83#define FCR_FLCE (1 << 0)
84#define FCR_BKPA (1 << 4) 84#define FCR_BKPA (1 << 4)
85#define FCR_TXPEN (1 << 5) 85#define FCR_TXPEN (1 << 5)
86#define FCR_TXPF (1 << 6) 86#define FCR_TXPF (1 << 6)
87#define FCR_TXP0 (1 << 7) 87#define FCR_TXP0 (1 << 7)
88/* Eeprom & Phy Control Reg */ 88/* Eeprom & Phy Control Reg */
89#define EPCR 0x0B 89#define SR_EPCR 0x0B
90#define EPCR_ERRE (1 << 0) 90#define EPCR_ERRE (1 << 0)
91#define EPCR_ERPRW (1 << 1) 91#define EPCR_ERPRW (1 << 1)
92#define EPCR_ERPRR (1 << 2) 92#define EPCR_ERPRR (1 << 2)
93#define EPCR_EPOS (1 << 3) 93#define EPCR_EPOS (1 << 3)
94#define EPCR_WEP (1 << 4) 94#define EPCR_WEP (1 << 4)
95/* Eeprom & Phy Address Reg */ 95/* Eeprom & Phy Address Reg */
96#define EPAR 0x0C 96#define SR_EPAR 0x0C
97#define EPAR_EROA (0x3F << 0) 97#define EPAR_EROA (0x3F << 0)
98#define EPAR_PHY_ADR_MASK (0x03 << 6) 98#define EPAR_PHY_ADR_MASK (0x03 << 6)
99#define EPAR_PHY_ADR (0x01 << 6) 99#define EPAR_PHY_ADR (0x01 << 6)
100/* Eeprom & Phy Data Reg */ 100/* Eeprom & Phy Data Reg */
101#define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */ 101#define SR_EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */
102/* Wakeup Control Reg */ 102/* Wakeup Control Reg */
103#define WCR 0x0F 103#define SR_WCR 0x0F
104#define WCR_MAGICST (1 << 0) 104#define WCR_MAGICST (1 << 0)
105#define WCR_LINKST (1 << 2) 105#define WCR_LINKST (1 << 2)
106#define WCR_MAGICEN (1 << 3) 106#define WCR_MAGICEN (1 << 3)
107#define WCR_LINKEN (1 << 5) 107#define WCR_LINKEN (1 << 5)
108/* Physical Address Reg */ 108/* Physical Address Reg */
109#define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */ 109#define SR_PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */
110/* Multicast Address Reg */ 110/* Multicast Address Reg */
111#define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */ 111#define SR_MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */
112/* 0x1e unused */ 112/* 0x1e unused */
113/* Phy Reset Reg */ 113/* Phy Reset Reg */
114#define PRR 0x1F 114#define SR_PRR 0x1F
115#define PRR_PHY_RST (1 << 0) 115#define PRR_PHY_RST (1 << 0)
116/* Tx sdram Write Pointer Address Low */ 116/* Tx sdram Write Pointer Address Low */
117#define TWPAL 0x20 117#define SR_TWPAL 0x20
118/* Tx sdram Write Pointer Address High */ 118/* Tx sdram Write Pointer Address High */
119#define TWPAH 0x21 119#define SR_TWPAH 0x21
120/* Tx sdram Read Pointer Address Low */ 120/* Tx sdram Read Pointer Address Low */
121#define TRPAL 0x22 121#define SR_TRPAL 0x22
122/* Tx sdram Read Pointer Address High */ 122/* Tx sdram Read Pointer Address High */
123#define TRPAH 0x23 123#define SR_TRPAH 0x23
124/* Rx sdram Write Pointer Address Low */ 124/* Rx sdram Write Pointer Address Low */
125#define RWPAL 0x24 125#define SR_RWPAL 0x24
126/* Rx sdram Write Pointer Address High */ 126/* Rx sdram Write Pointer Address High */
127#define RWPAH 0x25 127#define SR_RWPAH 0x25
128/* Rx sdram Read Pointer Address Low */ 128/* Rx sdram Read Pointer Address Low */
129#define RRPAL 0x26 129#define SR_RRPAL 0x26
130/* Rx sdram Read Pointer Address High */ 130/* Rx sdram Read Pointer Address High */
131#define RRPAH 0x27 131#define SR_RRPAH 0x27
132/* Vendor ID register */ 132/* Vendor ID register */
133#define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */ 133#define SR_VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */
134/* Product ID register */ 134/* Product ID register */
135#define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */ 135#define SR_PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */
136/* CHIP Revision register */ 136/* CHIP Revision register */
137#define CHIPR 0x2C 137#define SR_CHIPR 0x2C
138/* 0x2D --> 0xEF unused */ 138/* 0x2D --> 0xEF unused */
139/* USB Device Address */ 139/* USB Device Address */
140#define USBDA 0xF0 140#define SR_USBDA 0xF0
141#define USBDA_USBFA (0x7F << 0) 141#define USBDA_USBFA (0x7F << 0)
142/* RX packet Counter Reg */ 142/* RX packet Counter Reg */
143#define RXC 0xF1 143#define SR_RXC 0xF1
144/* Tx packet Counter & USB Status Reg */ 144/* Tx packet Counter & USB Status Reg */
145#define TXC_USBS 0xF2 145#define SR_TXC_USBS 0xF2
146#define TXC_USBS_TXC0 (1 << 0) 146#define TXC_USBS_TXC0 (1 << 0)
147#define TXC_USBS_TXC1 (1 << 1) 147#define TXC_USBS_TXC1 (1 << 1)
148#define TXC_USBS_TXC2 (1 << 2) 148#define TXC_USBS_TXC2 (1 << 2)
@@ -150,7 +150,7 @@
150#define TXC_USBS_SUSFLAG (1 << 6) 150#define TXC_USBS_SUSFLAG (1 << 6)
151#define TXC_USBS_RXFAULT (1 << 7) 151#define TXC_USBS_RXFAULT (1 << 7)
152/* USB Control register */ 152/* USB Control register */
153#define USBC 0xF4 153#define SR_USBC 0xF4
154#define USBC_EP3NAK (1 << 4) 154#define USBC_EP3NAK (1 << 4)
155#define USBC_EP3ACK (1 << 5) 155#define USBC_EP3ACK (1 << 5)
156 156
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5ca97713bfb3..059fdf1bf5ee 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -490,17 +490,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
490 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 490 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
491 break; 491 break;
492 case VIRTIO_NET_HDR_GSO_UDP: 492 case VIRTIO_NET_HDR_GSO_UDP:
493 {
494 static bool warned;
495
496 if (!warned) {
497 warned = true;
498 netdev_warn(dev,
499 "host using disabled UFO feature; please fix it\n");
500 }
501 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 493 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
502 break; 494 break;
503 }
504 case VIRTIO_NET_HDR_GSO_TCPV6: 495 case VIRTIO_NET_HDR_GSO_TCPV6:
505 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 496 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
506 break; 497 break;
@@ -888,6 +879,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
888 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 879 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
889 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 880 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
890 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 881 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
882 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
883 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
891 else 884 else
892 BUG(); 885 BUG();
893 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 886 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
@@ -1748,7 +1741,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1748 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1741 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1749 1742
1750 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 1743 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1751 dev->hw_features |= NETIF_F_TSO 1744 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
1752 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 1745 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
1753 } 1746 }
1754 /* Individual feature bits: what can host handle? */ 1747 /* Individual feature bits: what can host handle? */
@@ -1758,9 +1751,11 @@ static int virtnet_probe(struct virtio_device *vdev)
1758 dev->hw_features |= NETIF_F_TSO6; 1751 dev->hw_features |= NETIF_F_TSO6;
1759 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 1752 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1760 dev->hw_features |= NETIF_F_TSO_ECN; 1753 dev->hw_features |= NETIF_F_TSO_ECN;
1754 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1755 dev->hw_features |= NETIF_F_UFO;
1761 1756
1762 if (gso) 1757 if (gso)
1763 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 1758 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1764 /* (!csum && gso) case will be fixed by register_netdev() */ 1759 /* (!csum && gso) case will be fixed by register_netdev() */
1765 } 1760 }
1766 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 1761 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -1798,7 +1793,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1798 /* If we can receive ANY GSO packets, we must allocate large ones. */ 1793 /* If we can receive ANY GSO packets, we must allocate large ones. */
1799 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 1794 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1800 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 1795 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1801 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) 1796 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
1797 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
1802 vi->big_packets = true; 1798 vi->big_packets = true;
1803 1799
1804 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 1800 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -1994,9 +1990,9 @@ static struct virtio_device_id id_table[] = {
1994static unsigned int features[] = { 1990static unsigned int features[] = {
1995 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 1991 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1996 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 1992 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1997 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, 1993 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1998 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 1994 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1999 VIRTIO_NET_F_GUEST_ECN, 1995 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
2000 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 1996 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
2001 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 1997 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
2002 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, 1998 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7fbd89fbe107..a8c755dcab14 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2432,10 +2432,10 @@ static void vxlan_sock_work(struct work_struct *work)
2432 dev_put(vxlan->dev); 2432 dev_put(vxlan->dev);
2433} 2433}
2434 2434
2435static int vxlan_newlink(struct net *net, struct net_device *dev, 2435static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2436 struct nlattr *tb[], struct nlattr *data[]) 2436 struct nlattr *tb[], struct nlattr *data[])
2437{ 2437{
2438 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2438 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2439 struct vxlan_dev *vxlan = netdev_priv(dev); 2439 struct vxlan_dev *vxlan = netdev_priv(dev);
2440 struct vxlan_rdst *dst = &vxlan->default_dst; 2440 struct vxlan_rdst *dst = &vxlan->default_dst;
2441 __u32 vni; 2441 __u32 vni;
@@ -2445,7 +2445,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2445 if (!data[IFLA_VXLAN_ID]) 2445 if (!data[IFLA_VXLAN_ID])
2446 return -EINVAL; 2446 return -EINVAL;
2447 2447
2448 vxlan->net = dev_net(dev); 2448 vxlan->net = src_net;
2449 2449
2450 vni = nla_get_u32(data[IFLA_VXLAN_ID]); 2450 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2451 dst->remote_vni = vni; 2451 dst->remote_vni = vni;
@@ -2481,7 +2481,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2481 if (data[IFLA_VXLAN_LINK] && 2481 if (data[IFLA_VXLAN_LINK] &&
2482 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { 2482 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
2483 struct net_device *lowerdev 2483 struct net_device *lowerdev
2484 = __dev_get_by_index(net, dst->remote_ifindex); 2484 = __dev_get_by_index(src_net, dst->remote_ifindex);
2485 2485
2486 if (!lowerdev) { 2486 if (!lowerdev) {
2487 pr_info("ifindex %d does not exist\n", dst->remote_ifindex); 2487 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2557,7 +2557,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2557 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) 2557 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2558 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; 2558 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2559 2559
2560 if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, 2560 if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
2561 vxlan->dst_port)) { 2561 vxlan->dst_port)) {
2562 pr_info("duplicate VNI %u\n", vni); 2562 pr_info("duplicate VNI %u\n", vni);
2563 return -EEXIST; 2563 return -EEXIST;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 94e234975c61..a2fdd15f285a 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -25,7 +25,7 @@ if WAN
25# There is no way to detect a comtrol sv11 - force it modular for now. 25# There is no way to detect a comtrol sv11 - force it modular for now.
26config HOSTESS_SV11 26config HOSTESS_SV11
27 tristate "Comtrol Hostess SV-11 support" 27 tristate "Comtrol Hostess SV-11 support"
28 depends on ISA && m && ISA_DMA_API && INET && HDLC 28 depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
29 help 29 help
30 Driver for Comtrol Hostess SV-11 network card which 30 Driver for Comtrol Hostess SV-11 network card which
31 operates on low speed synchronous serial links at up to 31 operates on low speed synchronous serial links at up to
@@ -37,7 +37,7 @@ config HOSTESS_SV11
37# The COSA/SRP driver has not been tested as non-modular yet. 37# The COSA/SRP driver has not been tested as non-modular yet.
38config COSA 38config COSA
39 tristate "COSA/SRP sync serial boards support" 39 tristate "COSA/SRP sync serial boards support"
40 depends on ISA && m && ISA_DMA_API && HDLC 40 depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS
41 ---help--- 41 ---help---
42 Driver for COSA and SRP synchronous serial boards. 42 Driver for COSA and SRP synchronous serial boards.
43 43
@@ -87,7 +87,7 @@ config LANMEDIA
87# There is no way to detect a Sealevel board. Force it modular 87# There is no way to detect a Sealevel board. Force it modular
88config SEALEVEL_4021 88config SEALEVEL_4021
89 tristate "Sealevel Systems 4021 support" 89 tristate "Sealevel Systems 4021 support"
90 depends on ISA && m && ISA_DMA_API && INET && HDLC 90 depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
91 help 91 help
92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. 92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
93 93
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 9259a732e8a4..037f74f0fcf6 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -578,6 +578,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
578 goto err_rx_unbind; 578 goto err_rx_unbind;
579 } 579 }
580 queue->task = task; 580 queue->task = task;
581 get_task_struct(task);
581 582
582 task = kthread_create(xenvif_dealloc_kthread, 583 task = kthread_create(xenvif_dealloc_kthread,
583 (void *)queue, "%s-dealloc", queue->name); 584 (void *)queue, "%s-dealloc", queue->name);
@@ -634,6 +635,7 @@ void xenvif_disconnect(struct xenvif *vif)
634 635
635 if (queue->task) { 636 if (queue->task) {
636 kthread_stop(queue->task); 637 kthread_stop(queue->task);
638 put_task_struct(queue->task);
637 queue->task = NULL; 639 queue->task = NULL;
638 } 640 }
639 641
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 908e65e9b821..c8ce701a7efb 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -2109,8 +2109,7 @@ int xenvif_kthread_guest_rx(void *data)
2109 */ 2109 */
2110 if (unlikely(vif->disabled && queue->id == 0)) { 2110 if (unlikely(vif->disabled && queue->id == 0)) {
2111 xenvif_carrier_off(vif); 2111 xenvif_carrier_off(vif);
2112 xenvif_rx_queue_purge(queue); 2112 break;
2113 continue;
2114 } 2113 }
2115 2114
2116 if (!skb_queue_empty(&queue->rx_queue)) 2115 if (!skb_queue_empty(&queue->rx_queue))
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index d415d69dc237..9484d5652ca5 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -650,8 +650,10 @@ static void handle_rx(struct vhost_net *net)
650 break; 650 break;
651 } 651 }
652 /* TODO: Should check and handle checksum. */ 652 /* TODO: Should check and handle checksum. */
653
654 hdr.num_buffers = cpu_to_vhost16(vq, headcount);
653 if (likely(mergeable) && 655 if (likely(mergeable) &&
654 memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount, 656 memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
655 offsetof(typeof(hdr), num_buffers), 657 offsetof(typeof(hdr), num_buffers),
656 sizeof hdr.num_buffers)) { 658 sizeof hdr.num_buffers)) {
657 vq_err(vq, "Failed num_buffers write"); 659 vq_err(vq, "Failed num_buffers write");
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 515a35e2a48a..960e666c51e4 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -472,27 +472,59 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
472/** 472/**
473 * vlan_get_protocol - get protocol EtherType. 473 * vlan_get_protocol - get protocol EtherType.
474 * @skb: skbuff to query 474 * @skb: skbuff to query
475 * @type: first vlan protocol
476 * @depth: buffer to store length of eth and vlan tags in bytes
475 * 477 *
476 * Returns the EtherType of the packet, regardless of whether it is 478 * Returns the EtherType of the packet, regardless of whether it is
477 * vlan encapsulated (normal or hardware accelerated) or not. 479 * vlan encapsulated (normal or hardware accelerated) or not.
478 */ 480 */
479static inline __be16 vlan_get_protocol(const struct sk_buff *skb) 481static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
482 int *depth)
480{ 483{
481 __be16 protocol = 0; 484 unsigned int vlan_depth = skb->mac_len;
482 485
483 if (vlan_tx_tag_present(skb) || 486 /* if type is 802.1Q/AD then the header should already be
484 skb->protocol != cpu_to_be16(ETH_P_8021Q)) 487 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
485 protocol = skb->protocol; 488 * ETH_HLEN otherwise
486 else { 489 */
487 __be16 proto, *protop; 490 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
488 protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr, 491 if (vlan_depth) {
489 h_vlan_encapsulated_proto), 492 if (WARN_ON(vlan_depth < VLAN_HLEN))
490 sizeof(proto), &proto); 493 return 0;
491 if (likely(protop)) 494 vlan_depth -= VLAN_HLEN;
492 protocol = *protop; 495 } else {
496 vlan_depth = ETH_HLEN;
497 }
498 do {
499 struct vlan_hdr *vh;
500
501 if (unlikely(!pskb_may_pull(skb,
502 vlan_depth + VLAN_HLEN)))
503 return 0;
504
505 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
506 type = vh->h_vlan_encapsulated_proto;
507 vlan_depth += VLAN_HLEN;
508 } while (type == htons(ETH_P_8021Q) ||
509 type == htons(ETH_P_8021AD));
493 } 510 }
494 511
495 return protocol; 512 if (depth)
513 *depth = vlan_depth;
514
515 return type;
516}
517
518/**
519 * vlan_get_protocol - get protocol EtherType.
520 * @skb: skbuff to query
521 *
522 * Returns the EtherType of the packet, regardless of whether it is
523 * vlan encapsulated (normal or hardware accelerated) or not.
524 */
525static inline __be16 vlan_get_protocol(struct sk_buff *skb)
526{
527 return __vlan_get_protocol(skb, skb->protocol, NULL);
496} 528}
497 529
498static inline void vlan_set_encap_proto(struct sk_buff *skb, 530static inline void vlan_set_encap_proto(struct sk_buff *skb,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 25c791e295fd..5f3a9aa7225d 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -97,7 +97,7 @@ enum {
97 MLX4_MAX_NUM_PF = 16, 97 MLX4_MAX_NUM_PF = 16,
98 MLX4_MAX_NUM_VF = 126, 98 MLX4_MAX_NUM_VF = 126,
99 MLX4_MAX_NUM_VF_P_PORT = 64, 99 MLX4_MAX_NUM_VF_P_PORT = 64,
100 MLX4_MFUNC_MAX = 80, 100 MLX4_MFUNC_MAX = 128,
101 MLX4_MAX_EQ_NUM = 1024, 101 MLX4_MAX_EQ_NUM = 1024,
102 MLX4_MFUNC_EQ_NUM = 4, 102 MLX4_MFUNC_EQ_NUM = 4,
103 MLX4_MFUNC_MAX_EQES = 8, 103 MLX4_MFUNC_MAX_EQES = 8,
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
index 7ee2df083542..dc8fd81412bf 100644
--- a/include/net/flow_keys.h
+++ b/include/net/flow_keys.h
@@ -22,9 +22,9 @@ struct flow_keys {
22 __be32 ports; 22 __be32 ports;
23 __be16 port16[2]; 23 __be16 port16[2];
24 }; 24 };
25 u16 thoff; 25 u16 thoff;
26 u16 n_proto; 26 __be16 n_proto;
27 u8 ip_proto; 27 u8 ip_proto;
28}; 28};
29 29
30bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, 30bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
diff --git a/include/net/ip.h b/include/net/ip.h
index f7cbd703d15d..09cf5aebb283 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -181,7 +181,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
181 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; 181 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
182} 182}
183 183
184void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, 184void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
185 const struct ip_options *sopt, 185 const struct ip_options *sopt,
186 __be32 daddr, __be32 saddr, 186 __be32 daddr, __be32 saddr,
187 const struct ip_reply_arg *arg, 187 const struct ip_reply_arg *arg,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4292929392b0..6e416f6d3e3c 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -671,6 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
671 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); 671 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
672} 672}
673 673
674u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
675 struct in6_addr *src);
676void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
674void ipv6_proxy_select_ident(struct sk_buff *skb); 677void ipv6_proxy_select_ident(struct sk_buff *skb);
675 678
676int ip6_dst_hoplimit(struct dst_entry *dst); 679int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -708,7 +711,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
708 __be32 flowlabel, bool autolabel) 711 __be32 flowlabel, bool autolabel)
709{ 712{
710 if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) { 713 if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
711 __be32 hash; 714 u32 hash;
712 715
713 hash = skb_get_hash(skb); 716 hash = skb_get_hash(skb);
714 717
@@ -718,7 +721,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
718 */ 721 */
719 hash ^= hash >> 12; 722 hash ^= hash >> 12;
720 723
721 flowlabel = hash & IPV6_FLOWLABEL_MASK; 724 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
722 } 725 }
723 726
724 return flowlabel; 727 return flowlabel;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 3ae969e3acf0..9eaaa7884586 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -530,6 +530,8 @@ enum nft_chain_type {
530 530
531int nft_chain_validate_dependency(const struct nft_chain *chain, 531int nft_chain_validate_dependency(const struct nft_chain *chain,
532 enum nft_chain_type type); 532 enum nft_chain_type type);
533int nft_chain_validate_hooks(const struct nft_chain *chain,
534 unsigned int hook_flags);
533 535
534struct nft_stats { 536struct nft_stats {
535 u64 bytes; 537 u64 bytes;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 24945cefc4fd..0ffef1a38efc 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -52,6 +52,7 @@ struct netns_ipv4 {
52 struct inet_peer_base *peers; 52 struct inet_peer_base *peers;
53 struct tcpm_hash_bucket *tcp_metrics_hash; 53 struct tcpm_hash_bucket *tcp_metrics_hash;
54 unsigned int tcp_metrics_hash_log; 54 unsigned int tcp_metrics_hash_log;
55 struct sock * __percpu *tcp_sk;
55 struct netns_frags frags; 56 struct netns_frags frags;
56#ifdef CONFIG_NETFILTER 57#ifdef CONFIG_NETFILTER
57 struct xt_table *iptable_filter; 58 struct xt_table *iptable_filter;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 3d282cbb66bf..c605d305c577 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -79,6 +79,9 @@ struct Qdisc {
79 struct netdev_queue *dev_queue; 79 struct netdev_queue *dev_queue;
80 80
81 struct gnet_stats_rate_est64 rate_est; 81 struct gnet_stats_rate_est64 rate_est;
82 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
83 struct gnet_stats_queue __percpu *cpu_qstats;
84
82 struct Qdisc *next_sched; 85 struct Qdisc *next_sched;
83 struct sk_buff *gso_skb; 86 struct sk_buff *gso_skb;
84 /* 87 /*
@@ -86,15 +89,9 @@ struct Qdisc {
86 */ 89 */
87 unsigned long state; 90 unsigned long state;
88 struct sk_buff_head q; 91 struct sk_buff_head q;
89 union { 92 struct gnet_stats_basic_packed bstats;
90 struct gnet_stats_basic_packed bstats;
91 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
92 } __packed;
93 unsigned int __state; 93 unsigned int __state;
94 union { 94 struct gnet_stats_queue qstats;
95 struct gnet_stats_queue qstats;
96 struct gnet_stats_queue __percpu *cpu_qstats;
97 } __packed;
98 struct rcu_head rcu_head; 95 struct rcu_head rcu_head;
99 int padded; 96 int padded;
100 atomic_t refcnt; 97 atomic_t refcnt;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f50f29faf76f..9d9111ef43ae 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -834,8 +834,8 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
834void tcp_get_allowed_congestion_control(char *buf, size_t len); 834void tcp_get_allowed_congestion_control(char *buf, size_t len);
835int tcp_set_allowed_congestion_control(char *allowed); 835int tcp_set_allowed_congestion_control(char *allowed);
836int tcp_set_congestion_control(struct sock *sk, const char *name); 836int tcp_set_congestion_control(struct sock *sk, const char *name);
837void tcp_slow_start(struct tcp_sock *tp, u32 acked); 837u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
838void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); 838void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
839 839
840u32 tcp_reno_ssthresh(struct sock *sk); 840u32 tcp_reno_ssthresh(struct sock *sk);
841void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); 841void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
diff --git a/lib/checksum.c b/lib/checksum.c
index 129775eb6de6..8b39e86dbab5 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
181EXPORT_SYMBOL(csum_partial_copy); 181EXPORT_SYMBOL(csum_partial_copy);
182 182
183#ifndef csum_tcpudp_nofold 183#ifndef csum_tcpudp_nofold
184static inline u32 from64to32(u64 x)
185{
186 /* add up 32-bit and 32-bit for 32+c bit */
187 x = (x & 0xffffffff) + (x >> 32);
188 /* add up carry.. */
189 x = (x & 0xffffffff) + (x >> 32);
190 return (u32)x;
191}
192
184__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 193__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
185 unsigned short len, 194 unsigned short len,
186 unsigned short proto, 195 unsigned short proto,
@@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
195#else 204#else
196 s += (proto + len) << 8; 205 s += (proto + len) << 8;
197#endif 206#endif
198 s += (s >> 32); 207 return (__force __wsum)from64to32(s);
199 return (__force __wsum)s;
200} 208}
201EXPORT_SYMBOL(csum_tcpudp_nofold); 209EXPORT_SYMBOL(csum_tcpudp_nofold);
202#endif 210#endif
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index b0330aecbf97..3244aead0926 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -265,22 +265,12 @@ out:
265 data[NFT_REG_VERDICT].verdict = NF_DROP; 265 data[NFT_REG_VERDICT].verdict = NF_DROP;
266} 266}
267 267
268static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain) 268static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
269 const struct nft_expr *expr,
270 const struct nft_data **data)
269{ 271{
270 struct nft_base_chain *basechain; 272 return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
271 273 (1 << NF_BR_LOCAL_IN));
272 if (chain->flags & NFT_BASE_CHAIN) {
273 basechain = nft_base_chain(chain);
274
275 switch (basechain->ops[0].hooknum) {
276 case NF_BR_PRE_ROUTING:
277 case NF_BR_LOCAL_IN:
278 break;
279 default:
280 return -EOPNOTSUPP;
281 }
282 }
283 return 0;
284} 274}
285 275
286static int nft_reject_bridge_init(const struct nft_ctx *ctx, 276static int nft_reject_bridge_init(const struct nft_ctx *ctx,
@@ -290,7 +280,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx,
290 struct nft_reject *priv = nft_expr_priv(expr); 280 struct nft_reject *priv = nft_expr_priv(expr);
291 int icmp_code, err; 281 int icmp_code, err;
292 282
293 err = nft_reject_bridge_validate_hooks(ctx->chain); 283 err = nft_reject_bridge_validate(ctx, expr, NULL);
294 if (err < 0) 284 if (err < 0)
295 return err; 285 return err;
296 286
@@ -341,13 +331,6 @@ nla_put_failure:
341 return -1; 331 return -1;
342} 332}
343 333
344static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
345 const struct nft_expr *expr,
346 const struct nft_data **data)
347{
348 return nft_reject_bridge_validate_hooks(ctx->chain);
349}
350
351static struct nft_expr_type nft_reject_bridge_type; 334static struct nft_expr_type nft_reject_bridge_type;
352static const struct nft_expr_ops nft_reject_bridge_ops = { 335static const struct nft_expr_ops nft_reject_bridge_ops = {
353 .type = &nft_reject_bridge_type, 336 .type = &nft_reject_bridge_type,
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 4589ff67bfa9..67a4a36febd1 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -470,7 +470,6 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
470 ASSERT_RTNL(); 470 ASSERT_RTNL();
471 caifdev = netdev_priv(dev); 471 caifdev = netdev_priv(dev);
472 caif_netlink_parms(data, &caifdev->conn_req); 472 caif_netlink_parms(data, &caifdev->conn_req);
473 dev_net_set(caifdev->netdev, src_net);
474 473
475 ret = register_netdevice(dev); 474 ret = register_netdevice(dev);
476 if (ret) 475 if (ret)
diff --git a/net/core/dev.c b/net/core/dev.c
index 171420e75b03..7fe82929f509 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2352,7 +2352,6 @@ EXPORT_SYMBOL(skb_checksum_help);
2352 2352
2353__be16 skb_network_protocol(struct sk_buff *skb, int *depth) 2353__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2354{ 2354{
2355 unsigned int vlan_depth = skb->mac_len;
2356 __be16 type = skb->protocol; 2355 __be16 type = skb->protocol;
2357 2356
2358 /* Tunnel gso handlers can set protocol to ethernet. */ 2357 /* Tunnel gso handlers can set protocol to ethernet. */
@@ -2366,35 +2365,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2366 type = eth->h_proto; 2365 type = eth->h_proto;
2367 } 2366 }
2368 2367
2369 /* if skb->protocol is 802.1Q/AD then the header should already be 2368 return __vlan_get_protocol(skb, type, depth);
2370 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2371 * ETH_HLEN otherwise
2372 */
2373 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2374 if (vlan_depth) {
2375 if (WARN_ON(vlan_depth < VLAN_HLEN))
2376 return 0;
2377 vlan_depth -= VLAN_HLEN;
2378 } else {
2379 vlan_depth = ETH_HLEN;
2380 }
2381 do {
2382 struct vlan_hdr *vh;
2383
2384 if (unlikely(!pskb_may_pull(skb,
2385 vlan_depth + VLAN_HLEN)))
2386 return 0;
2387
2388 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2389 type = vh->h_vlan_encapsulated_proto;
2390 vlan_depth += VLAN_HLEN;
2391 } while (type == htons(ETH_P_8021Q) ||
2392 type == htons(ETH_P_8021AD));
2393 }
2394
2395 *depth = vlan_depth;
2396
2397 return type;
2398} 2369}
2399 2370
2400/** 2371/**
@@ -5323,7 +5294,7 @@ void netdev_upper_dev_unlink(struct net_device *dev,
5323} 5294}
5324EXPORT_SYMBOL(netdev_upper_dev_unlink); 5295EXPORT_SYMBOL(netdev_upper_dev_unlink);
5325 5296
5326void netdev_adjacent_add_links(struct net_device *dev) 5297static void netdev_adjacent_add_links(struct net_device *dev)
5327{ 5298{
5328 struct netdev_adjacent *iter; 5299 struct netdev_adjacent *iter;
5329 5300
@@ -5348,7 +5319,7 @@ void netdev_adjacent_add_links(struct net_device *dev)
5348 } 5319 }
5349} 5320}
5350 5321
5351void netdev_adjacent_del_links(struct net_device *dev) 5322static void netdev_adjacent_del_links(struct net_device *dev)
5352{ 5323{
5353 struct netdev_adjacent *iter; 5324 struct netdev_adjacent *iter;
5354 5325
@@ -6656,7 +6627,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6656 if (!queue) 6627 if (!queue)
6657 return NULL; 6628 return NULL;
6658 netdev_init_one_queue(dev, queue, NULL); 6629 netdev_init_one_queue(dev, queue, NULL);
6659 queue->qdisc = &noop_qdisc; 6630 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
6660 queue->qdisc_sleeping = &noop_qdisc; 6631 queue->qdisc_sleeping = &noop_qdisc;
6661 rcu_assign_pointer(dev->ingress_queue, queue); 6632 rcu_assign_pointer(dev->ingress_queue, queue);
6662#endif 6633#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9cf6fe9ddc0c..446cbaf81185 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2895,12 +2895,16 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
2895 goto errout; 2895 goto errout;
2896 } 2896 }
2897 2897
2898 if (!skb->len)
2899 goto errout;
2900
2898 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 2901 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
2899 return 0; 2902 return 0;
2900errout: 2903errout:
2901 WARN_ON(err == -EMSGSIZE); 2904 WARN_ON(err == -EMSGSIZE);
2902 kfree_skb(skb); 2905 kfree_skb(skb);
2903 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 2906 if (err)
2907 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
2904 return err; 2908 return err;
2905} 2909}
2906 2910
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index b50861b22b6b..c373c0708d97 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1506,23 +1506,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1506/* 1506/*
1507 * Generic function to send a packet as reply to another packet. 1507 * Generic function to send a packet as reply to another packet.
1508 * Used to send some TCP resets/acks so far. 1508 * Used to send some TCP resets/acks so far.
1509 *
1510 * Use a fake percpu inet socket to avoid false sharing and contention.
1511 */ 1509 */
1512static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = { 1510void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1513 .sk = {
1514 .__sk_common = {
1515 .skc_refcnt = ATOMIC_INIT(1),
1516 },
1517 .sk_wmem_alloc = ATOMIC_INIT(1),
1518 .sk_allocation = GFP_ATOMIC,
1519 .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
1520 },
1521 .pmtudisc = IP_PMTUDISC_WANT,
1522 .uc_ttl = -1,
1523};
1524
1525void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1526 const struct ip_options *sopt, 1511 const struct ip_options *sopt,
1527 __be32 daddr, __be32 saddr, 1512 __be32 daddr, __be32 saddr,
1528 const struct ip_reply_arg *arg, 1513 const struct ip_reply_arg *arg,
@@ -1532,9 +1517,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1532 struct ipcm_cookie ipc; 1517 struct ipcm_cookie ipc;
1533 struct flowi4 fl4; 1518 struct flowi4 fl4;
1534 struct rtable *rt = skb_rtable(skb); 1519 struct rtable *rt = skb_rtable(skb);
1520 struct net *net = sock_net(sk);
1535 struct sk_buff *nskb; 1521 struct sk_buff *nskb;
1536 struct sock *sk;
1537 struct inet_sock *inet;
1538 int err; 1522 int err;
1539 1523
1540 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) 1524 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
@@ -1565,15 +1549,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1565 if (IS_ERR(rt)) 1549 if (IS_ERR(rt))
1566 return; 1550 return;
1567 1551
1568 inet = &get_cpu_var(unicast_sock); 1552 inet_sk(sk)->tos = arg->tos;
1569 1553
1570 inet->tos = arg->tos;
1571 sk = &inet->sk;
1572 sk->sk_priority = skb->priority; 1554 sk->sk_priority = skb->priority;
1573 sk->sk_protocol = ip_hdr(skb)->protocol; 1555 sk->sk_protocol = ip_hdr(skb)->protocol;
1574 sk->sk_bound_dev_if = arg->bound_dev_if; 1556 sk->sk_bound_dev_if = arg->bound_dev_if;
1575 sock_net_set(sk, net);
1576 __skb_queue_head_init(&sk->sk_write_queue);
1577 sk->sk_sndbuf = sysctl_wmem_default; 1557 sk->sk_sndbuf = sysctl_wmem_default;
1578 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, 1558 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1579 len, 0, &ipc, &rt, MSG_DONTWAIT); 1559 len, 0, &ipc, &rt, MSG_DONTWAIT);
@@ -1589,13 +1569,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1589 arg->csumoffset) = csum_fold(csum_add(nskb->csum, 1569 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1590 arg->csum)); 1570 arg->csum));
1591 nskb->ip_summed = CHECKSUM_NONE; 1571 nskb->ip_summed = CHECKSUM_NONE;
1592 skb_orphan(nskb);
1593 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); 1572 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
1594 ip_push_pending_frames(sk, &fl4); 1573 ip_push_pending_frames(sk, &fl4);
1595 } 1574 }
1596out: 1575out:
1597 put_cpu_var(unicast_sock);
1598
1599 ip_rt_put(rt); 1576 ip_rt_put(rt);
1600} 1577}
1601 1578
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d58dd0ec3e53..52e1f2bf0ca2 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -966,6 +966,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
966 if (dst->dev->mtu < mtu) 966 if (dst->dev->mtu < mtu)
967 return; 967 return;
968 968
969 if (rt->rt_pmtu && rt->rt_pmtu < mtu)
970 return;
971
969 if (mtu < ip_rt_min_pmtu) 972 if (mtu < ip_rt_min_pmtu)
970 mtu = ip_rt_min_pmtu; 973 mtu = ip_rt_min_pmtu;
971 974
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index bb395d46a389..c037644eafb7 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -150,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
150 tcp_slow_start(tp, acked); 150 tcp_slow_start(tp, acked);
151 else { 151 else {
152 bictcp_update(ca, tp->snd_cwnd); 152 bictcp_update(ca, tp->snd_cwnd);
153 tcp_cong_avoid_ai(tp, ca->cnt); 153 tcp_cong_avoid_ai(tp, ca->cnt, 1);
154 } 154 }
155} 155}
156 156
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 27ead0dd16bc..8670e68e2ce6 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -291,26 +291,32 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
291 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and 291 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
292 * returns the leftover acks to adjust cwnd in congestion avoidance mode. 292 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
293 */ 293 */
294void tcp_slow_start(struct tcp_sock *tp, u32 acked) 294u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
295{ 295{
296 u32 cwnd = tp->snd_cwnd + acked; 296 u32 cwnd = tp->snd_cwnd + acked;
297 297
298 if (cwnd > tp->snd_ssthresh) 298 if (cwnd > tp->snd_ssthresh)
299 cwnd = tp->snd_ssthresh + 1; 299 cwnd = tp->snd_ssthresh + 1;
300 acked -= cwnd - tp->snd_cwnd;
300 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); 301 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
302
303 return acked;
301} 304}
302EXPORT_SYMBOL_GPL(tcp_slow_start); 305EXPORT_SYMBOL_GPL(tcp_slow_start);
303 306
304/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */ 307/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
305void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w) 308 * for every packet that was ACKed.
309 */
310void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
306{ 311{
312 tp->snd_cwnd_cnt += acked;
307 if (tp->snd_cwnd_cnt >= w) { 313 if (tp->snd_cwnd_cnt >= w) {
308 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 314 u32 delta = tp->snd_cwnd_cnt / w;
309 tp->snd_cwnd++; 315
310 tp->snd_cwnd_cnt = 0; 316 tp->snd_cwnd_cnt -= delta * w;
311 } else { 317 tp->snd_cwnd += delta;
312 tp->snd_cwnd_cnt++;
313 } 318 }
319 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
314} 320}
315EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); 321EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
316 322
@@ -329,11 +335,13 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
329 return; 335 return;
330 336
331 /* In "safe" area, increase. */ 337 /* In "safe" area, increase. */
332 if (tp->snd_cwnd <= tp->snd_ssthresh) 338 if (tp->snd_cwnd <= tp->snd_ssthresh) {
333 tcp_slow_start(tp, acked); 339 acked = tcp_slow_start(tp, acked);
340 if (!acked)
341 return;
342 }
334 /* In dangerous area, increase slowly. */ 343 /* In dangerous area, increase slowly. */
335 else 344 tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
336 tcp_cong_avoid_ai(tp, tp->snd_cwnd);
337} 345}
338EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 346EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
339 347
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 6b6002416a73..4b276d1ed980 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -93,9 +93,7 @@ struct bictcp {
93 u32 epoch_start; /* beginning of an epoch */ 93 u32 epoch_start; /* beginning of an epoch */
94 u32 ack_cnt; /* number of acks */ 94 u32 ack_cnt; /* number of acks */
95 u32 tcp_cwnd; /* estimated tcp cwnd */ 95 u32 tcp_cwnd; /* estimated tcp cwnd */
96#define ACK_RATIO_SHIFT 4 96 u16 unused;
97#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT)
98 u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */
99 u8 sample_cnt; /* number of samples to decide curr_rtt */ 97 u8 sample_cnt; /* number of samples to decide curr_rtt */
100 u8 found; /* the exit point is found? */ 98 u8 found; /* the exit point is found? */
101 u32 round_start; /* beginning of each round */ 99 u32 round_start; /* beginning of each round */
@@ -114,7 +112,6 @@ static inline void bictcp_reset(struct bictcp *ca)
114 ca->bic_K = 0; 112 ca->bic_K = 0;
115 ca->delay_min = 0; 113 ca->delay_min = 0;
116 ca->epoch_start = 0; 114 ca->epoch_start = 0;
117 ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
118 ca->ack_cnt = 0; 115 ca->ack_cnt = 0;
119 ca->tcp_cwnd = 0; 116 ca->tcp_cwnd = 0;
120 ca->found = 0; 117 ca->found = 0;
@@ -205,23 +202,30 @@ static u32 cubic_root(u64 a)
205/* 202/*
206 * Compute congestion window to use. 203 * Compute congestion window to use.
207 */ 204 */
208static inline void bictcp_update(struct bictcp *ca, u32 cwnd) 205static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
209{ 206{
210 u32 delta, bic_target, max_cnt; 207 u32 delta, bic_target, max_cnt;
211 u64 offs, t; 208 u64 offs, t;
212 209
213 ca->ack_cnt++; /* count the number of ACKs */ 210 ca->ack_cnt += acked; /* count the number of ACKed packets */
214 211
215 if (ca->last_cwnd == cwnd && 212 if (ca->last_cwnd == cwnd &&
216 (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32) 213 (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
217 return; 214 return;
218 215
216 /* The CUBIC function can update ca->cnt at most once per jiffy.
217 * On all cwnd reduction events, ca->epoch_start is set to 0,
218 * which will force a recalculation of ca->cnt.
219 */
220 if (ca->epoch_start && tcp_time_stamp == ca->last_time)
221 goto tcp_friendliness;
222
219 ca->last_cwnd = cwnd; 223 ca->last_cwnd = cwnd;
220 ca->last_time = tcp_time_stamp; 224 ca->last_time = tcp_time_stamp;
221 225
222 if (ca->epoch_start == 0) { 226 if (ca->epoch_start == 0) {
223 ca->epoch_start = tcp_time_stamp; /* record beginning */ 227 ca->epoch_start = tcp_time_stamp; /* record beginning */
224 ca->ack_cnt = 1; /* start counting */ 228 ca->ack_cnt = acked; /* start counting */
225 ca->tcp_cwnd = cwnd; /* syn with cubic */ 229 ca->tcp_cwnd = cwnd; /* syn with cubic */
226 230
227 if (ca->last_max_cwnd <= cwnd) { 231 if (ca->last_max_cwnd <= cwnd) {
@@ -283,6 +287,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
283 if (ca->last_max_cwnd == 0 && ca->cnt > 20) 287 if (ca->last_max_cwnd == 0 && ca->cnt > 20)
284 ca->cnt = 20; /* increase cwnd 5% per RTT */ 288 ca->cnt = 20; /* increase cwnd 5% per RTT */
285 289
290tcp_friendliness:
286 /* TCP Friendly */ 291 /* TCP Friendly */
287 if (tcp_friendliness) { 292 if (tcp_friendliness) {
288 u32 scale = beta_scale; 293 u32 scale = beta_scale;
@@ -301,7 +306,6 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
301 } 306 }
302 } 307 }
303 308
304 ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
305 if (ca->cnt == 0) /* cannot be zero */ 309 if (ca->cnt == 0) /* cannot be zero */
306 ca->cnt = 1; 310 ca->cnt = 1;
307} 311}
@@ -317,11 +321,12 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
317 if (tp->snd_cwnd <= tp->snd_ssthresh) { 321 if (tp->snd_cwnd <= tp->snd_ssthresh) {
318 if (hystart && after(ack, ca->end_seq)) 322 if (hystart && after(ack, ca->end_seq))
319 bictcp_hystart_reset(sk); 323 bictcp_hystart_reset(sk);
320 tcp_slow_start(tp, acked); 324 acked = tcp_slow_start(tp, acked);
321 } else { 325 if (!acked)
322 bictcp_update(ca, tp->snd_cwnd); 326 return;
323 tcp_cong_avoid_ai(tp, ca->cnt);
324 } 327 }
328 bictcp_update(ca, tp->snd_cwnd, acked);
329 tcp_cong_avoid_ai(tp, ca->cnt, acked);
325} 330}
326 331
327static u32 bictcp_recalc_ssthresh(struct sock *sk) 332static u32 bictcp_recalc_ssthresh(struct sock *sk)
@@ -411,20 +416,10 @@ static void hystart_update(struct sock *sk, u32 delay)
411 */ 416 */
412static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) 417static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
413{ 418{
414 const struct inet_connection_sock *icsk = inet_csk(sk);
415 const struct tcp_sock *tp = tcp_sk(sk); 419 const struct tcp_sock *tp = tcp_sk(sk);
416 struct bictcp *ca = inet_csk_ca(sk); 420 struct bictcp *ca = inet_csk_ca(sk);
417 u32 delay; 421 u32 delay;
418 422
419 if (icsk->icsk_ca_state == TCP_CA_Open) {
420 u32 ratio = ca->delayed_ack;
421
422 ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
423 ratio += cnt;
424
425 ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
426 }
427
428 /* Some calls are for duplicates without timetamps */ 423 /* Some calls are for duplicates without timetamps */
429 if (rtt_us < 0) 424 if (rtt_us < 0)
430 return; 425 return;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a3f72d7fc06c..d22f54482bab 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -683,7 +683,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
683 arg.bound_dev_if = sk->sk_bound_dev_if; 683 arg.bound_dev_if = sk->sk_bound_dev_if;
684 684
685 arg.tos = ip_hdr(skb)->tos; 685 arg.tos = ip_hdr(skb)->tos;
686 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, 686 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
687 skb, &TCP_SKB_CB(skb)->header.h4.opt,
687 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 688 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
688 &arg, arg.iov[0].iov_len); 689 &arg, arg.iov[0].iov_len);
689 690
@@ -767,7 +768,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
767 if (oif) 768 if (oif)
768 arg.bound_dev_if = oif; 769 arg.bound_dev_if = oif;
769 arg.tos = tos; 770 arg.tos = tos;
770 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, 771 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
772 skb, &TCP_SKB_CB(skb)->header.h4.opt,
771 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 773 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
772 &arg, arg.iov[0].iov_len); 774 &arg, arg.iov[0].iov_len);
773 775
@@ -2428,14 +2430,39 @@ struct proto tcp_prot = {
2428}; 2430};
2429EXPORT_SYMBOL(tcp_prot); 2431EXPORT_SYMBOL(tcp_prot);
2430 2432
2433static void __net_exit tcp_sk_exit(struct net *net)
2434{
2435 int cpu;
2436
2437 for_each_possible_cpu(cpu)
2438 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2439 free_percpu(net->ipv4.tcp_sk);
2440}
2441
2431static int __net_init tcp_sk_init(struct net *net) 2442static int __net_init tcp_sk_init(struct net *net)
2432{ 2443{
2444 int res, cpu;
2445
2446 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2447 if (!net->ipv4.tcp_sk)
2448 return -ENOMEM;
2449
2450 for_each_possible_cpu(cpu) {
2451 struct sock *sk;
2452
2453 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2454 IPPROTO_TCP, net);
2455 if (res)
2456 goto fail;
2457 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2458 }
2433 net->ipv4.sysctl_tcp_ecn = 2; 2459 net->ipv4.sysctl_tcp_ecn = 2;
2434 return 0; 2460 return 0;
2435}
2436 2461
2437static void __net_exit tcp_sk_exit(struct net *net) 2462fail:
2438{ 2463 tcp_sk_exit(net);
2464
2465 return res;
2439} 2466}
2440 2467
2441static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) 2468static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 6824afb65d93..333bcb2415ff 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -25,7 +25,8 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
25 if (tp->snd_cwnd <= tp->snd_ssthresh) 25 if (tp->snd_cwnd <= tp->snd_ssthresh)
26 tcp_slow_start(tp, acked); 26 tcp_slow_start(tp, acked);
27 else 27 else
28 tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)); 28 tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
29 1);
29} 30}
30 31
31static u32 tcp_scalable_ssthresh(struct sock *sk) 32static u32 tcp_scalable_ssthresh(struct sock *sk)
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index a4d2d2d88dca..112151eeee45 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -159,7 +159,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
159 /* In the "non-congestive state", increase cwnd 159 /* In the "non-congestive state", increase cwnd
160 * every rtt. 160 * every rtt.
161 */ 161 */
162 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 162 tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
163 } else { 163 } else {
164 /* In the "congestive state", increase cwnd 164 /* In the "congestive state", increase cwnd
165 * every other rtt. 165 * every other rtt.
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index cd7273218598..17d35662930d 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -92,7 +92,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
92 92
93 } else { 93 } else {
94 /* Reno */ 94 /* Reno */
95 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 95 tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
96 } 96 }
97 97
98 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt. 98 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 13cda4c6313b..01ccc28a686f 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -417,7 +417,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
417 if (code == ICMPV6_HDR_FIELD) 417 if (code == ICMPV6_HDR_FIELD)
418 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 418 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
419 419
420 if (teli && teli == info - 2) { 420 if (teli && teli == be32_to_cpu(info) - 2) {
421 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 421 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
422 if (tel->encap_limit == 0) { 422 if (tel->encap_limit == 0) {
423 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 423 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
@@ -429,7 +429,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
429 } 429 }
430 break; 430 break;
431 case ICMPV6_PKT_TOOBIG: 431 case ICMPV6_PKT_TOOBIG:
432 mtu = info - offset; 432 mtu = be32_to_cpu(info) - offset;
433 if (mtu < IPV6_MIN_MTU) 433 if (mtu < IPV6_MIN_MTU)
434 mtu = IPV6_MIN_MTU; 434 mtu = IPV6_MIN_MTU;
435 t->dev->mtu = mtu; 435 t->dev->mtu = mtu;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ce69a12ae48c..d28f2a2efb32 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -537,20 +537,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
537 skb_copy_secmark(to, from); 537 skb_copy_secmark(to, from);
538} 538}
539 539
540static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
541{
542 static u32 ip6_idents_hashrnd __read_mostly;
543 u32 hash, id;
544
545 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
546
547 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
548 hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
549
550 id = ip_idents_reserve(hash, 1);
551 fhdr->identification = htonl(id);
552}
553
554int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 540int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
555{ 541{
556 struct sk_buff *frag; 542 struct sk_buff *frag;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 97f41a3e68d9..54520a0bd5e3 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -9,6 +9,24 @@
9#include <net/addrconf.h> 9#include <net/addrconf.h>
10#include <net/secure_seq.h> 10#include <net/secure_seq.h>
11 11
12u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, struct in6_addr *src)
13{
14 u32 hash, id;
15
16 hash = __ipv6_addr_jhash(dst, hashrnd);
17 hash = __ipv6_addr_jhash(src, hash);
18
19 /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
20 * set the hight order instead thus minimizing possible future
21 * collisions.
22 */
23 id = ip_idents_reserve(hash, 1);
24 if (unlikely(!id))
25 id = 1 << 31;
26
27 return id;
28}
29
12/* This function exists only for tap drivers that must support broken 30/* This function exists only for tap drivers that must support broken
13 * clients requesting UFO without specifying an IPv6 fragment ID. 31 * clients requesting UFO without specifying an IPv6 fragment ID.
14 * 32 *
@@ -22,7 +40,7 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
22 static u32 ip6_proxy_idents_hashrnd __read_mostly; 40 static u32 ip6_proxy_idents_hashrnd __read_mostly;
23 struct in6_addr buf[2]; 41 struct in6_addr buf[2];
24 struct in6_addr *addrs; 42 struct in6_addr *addrs;
25 u32 hash, id; 43 u32 id;
26 44
27 addrs = skb_header_pointer(skb, 45 addrs = skb_header_pointer(skb,
28 skb_network_offset(skb) + 46 skb_network_offset(skb) +
@@ -34,14 +52,25 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
34 net_get_random_once(&ip6_proxy_idents_hashrnd, 52 net_get_random_once(&ip6_proxy_idents_hashrnd,
35 sizeof(ip6_proxy_idents_hashrnd)); 53 sizeof(ip6_proxy_idents_hashrnd));
36 54
37 hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd); 55 id = __ipv6_select_ident(ip6_proxy_idents_hashrnd,
38 hash = __ipv6_addr_jhash(&addrs[0], hash); 56 &addrs[1], &addrs[0]);
39 57 skb_shinfo(skb)->ip6_frag_id = id;
40 id = ip_idents_reserve(hash, 1);
41 skb_shinfo(skb)->ip6_frag_id = htonl(id);
42} 58}
43EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); 59EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
44 60
61void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
62{
63 static u32 ip6_idents_hashrnd __read_mostly;
64 u32 id;
65
66 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
67
68 id = __ipv6_select_ident(ip6_idents_hashrnd, &rt->rt6i_dst.addr,
69 &rt->rt6i_src.addr);
70 fhdr->identification = htonl(id);
71}
72EXPORT_SYMBOL(ipv6_select_ident);
73
45int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 74int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
46{ 75{
47 u16 offset = sizeof(struct ipv6hdr); 76 u16 offset = sizeof(struct ipv6hdr);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 213546bd6d5d..cdbfe5af6187 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1506,12 +1506,12 @@ static bool ipip6_netlink_encap_parms(struct nlattr *data[],
1506 1506
1507 if (data[IFLA_IPTUN_ENCAP_SPORT]) { 1507 if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1508 ret = true; 1508 ret = true;
1509 ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]); 1509 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1510 } 1510 }
1511 1511
1512 if (data[IFLA_IPTUN_ENCAP_DPORT]) { 1512 if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1513 ret = true; 1513 ret = true;
1514 ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]); 1514 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1515 } 1515 }
1516 1516
1517 return ret; 1517 return ret;
@@ -1707,9 +1707,9 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
1707 1707
1708 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, 1708 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
1709 tunnel->encap.type) || 1709 tunnel->encap.type) ||
1710 nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT, 1710 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
1711 tunnel->encap.sport) || 1711 tunnel->encap.sport) ||
1712 nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, 1712 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
1713 tunnel->encap.dport) || 1713 tunnel->encap.dport) ||
1714 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, 1714 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
1715 tunnel->encap.flags)) 1715 tunnel->encap.flags))
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index b6aa8ed18257..a56276996b72 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -52,6 +52,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
52 52
53 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); 53 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
54 54
55 /* Set the IPv6 fragment id if not set yet */
56 if (!skb_shinfo(skb)->ip6_frag_id)
57 ipv6_proxy_select_ident(skb);
58
55 segs = NULL; 59 segs = NULL;
56 goto out; 60 goto out;
57 } 61 }
@@ -108,7 +112,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
108 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); 112 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
109 fptr->nexthdr = nexthdr; 113 fptr->nexthdr = nexthdr;
110 fptr->reserved = 0; 114 fptr->reserved = 0;
111 fptr->identification = skb_shinfo(skb)->ip6_frag_id; 115 if (skb_shinfo(skb)->ip6_frag_id)
116 fptr->identification = skb_shinfo(skb)->ip6_frag_id;
117 else
118 ipv6_select_ident(fptr,
119 (struct rt6_info *)skb_dst(skb));
112 120
113 /* Fragment the skb. ipv6 header and the remaining fields of the 121 /* Fragment the skb. ipv6 header and the remaining fields of the
114 * fragment header are updated in ipv6_gso_segment() 122 * fragment header are updated in ipv6_gso_segment()
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 990decba1fe4..b87ca32efa0b 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -659,16 +659,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
659 return err; 659 return err;
660} 660}
661 661
662static int ip_vs_route_me_harder(int af, struct sk_buff *skb) 662static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
663 unsigned int hooknum)
663{ 664{
665 if (!sysctl_snat_reroute(skb))
666 return 0;
667 /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
668 if (NF_INET_LOCAL_IN == hooknum)
669 return 0;
664#ifdef CONFIG_IP_VS_IPV6 670#ifdef CONFIG_IP_VS_IPV6
665 if (af == AF_INET6) { 671 if (af == AF_INET6) {
666 if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0) 672 struct dst_entry *dst = skb_dst(skb);
673
674 if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
675 ip6_route_me_harder(skb) != 0)
667 return 1; 676 return 1;
668 } else 677 } else
669#endif 678#endif
670 if ((sysctl_snat_reroute(skb) || 679 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
671 skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
672 ip_route_me_harder(skb, RTN_LOCAL) != 0) 680 ip_route_me_harder(skb, RTN_LOCAL) != 0)
673 return 1; 681 return 1;
674 682
@@ -791,7 +799,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
791 union nf_inet_addr *snet, 799 union nf_inet_addr *snet,
792 __u8 protocol, struct ip_vs_conn *cp, 800 __u8 protocol, struct ip_vs_conn *cp,
793 struct ip_vs_protocol *pp, 801 struct ip_vs_protocol *pp,
794 unsigned int offset, unsigned int ihl) 802 unsigned int offset, unsigned int ihl,
803 unsigned int hooknum)
795{ 804{
796 unsigned int verdict = NF_DROP; 805 unsigned int verdict = NF_DROP;
797 806
@@ -821,7 +830,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
821#endif 830#endif
822 ip_vs_nat_icmp(skb, pp, cp, 1); 831 ip_vs_nat_icmp(skb, pp, cp, 1);
823 832
824 if (ip_vs_route_me_harder(af, skb)) 833 if (ip_vs_route_me_harder(af, skb, hooknum))
825 goto out; 834 goto out;
826 835
827 /* do the statistics and put it back */ 836 /* do the statistics and put it back */
@@ -916,7 +925,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
916 925
917 snet.ip = iph->saddr; 926 snet.ip = iph->saddr;
918 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, 927 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
919 pp, ciph.len, ihl); 928 pp, ciph.len, ihl, hooknum);
920} 929}
921 930
922#ifdef CONFIG_IP_VS_IPV6 931#ifdef CONFIG_IP_VS_IPV6
@@ -981,7 +990,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
981 snet.in6 = ciph.saddr.in6; 990 snet.in6 = ciph.saddr.in6;
982 writable = ciph.len; 991 writable = ciph.len;
983 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp, 992 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
984 pp, writable, sizeof(struct ipv6hdr)); 993 pp, writable, sizeof(struct ipv6hdr),
994 hooknum);
985} 995}
986#endif 996#endif
987 997
@@ -1040,7 +1050,8 @@ static inline bool is_new_conn(const struct sk_buff *skb,
1040 */ 1050 */
1041static unsigned int 1051static unsigned int
1042handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 1052handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1043 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 1053 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
1054 unsigned int hooknum)
1044{ 1055{
1045 struct ip_vs_protocol *pp = pd->pp; 1056 struct ip_vs_protocol *pp = pd->pp;
1046 1057
@@ -1078,7 +1089,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1078 * if it came from this machine itself. So re-compute 1089 * if it came from this machine itself. So re-compute
1079 * the routing information. 1090 * the routing information.
1080 */ 1091 */
1081 if (ip_vs_route_me_harder(af, skb)) 1092 if (ip_vs_route_me_harder(af, skb, hooknum))
1082 goto drop; 1093 goto drop;
1083 1094
1084 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT"); 1095 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
@@ -1181,7 +1192,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1181 cp = pp->conn_out_get(af, skb, &iph, 0); 1192 cp = pp->conn_out_get(af, skb, &iph, 0);
1182 1193
1183 if (likely(cp)) 1194 if (likely(cp))
1184 return handle_response(af, skb, pd, cp, &iph); 1195 return handle_response(af, skb, pd, cp, &iph, hooknum);
1185 if (sysctl_nat_icmp_send(net) && 1196 if (sysctl_nat_icmp_send(net) &&
1186 (pp->protocol == IPPROTO_TCP || 1197 (pp->protocol == IPPROTO_TCP ||
1187 pp->protocol == IPPROTO_UDP || 1198 pp->protocol == IPPROTO_UDP ||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 3b3ddb4fb9ee..1ff04bcd4871 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1134,9 +1134,11 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
1134 /* Restore old counters on this cpu, no problem. Per-cpu statistics 1134 /* Restore old counters on this cpu, no problem. Per-cpu statistics
1135 * are not exposed to userspace. 1135 * are not exposed to userspace.
1136 */ 1136 */
1137 preempt_disable();
1137 stats = this_cpu_ptr(newstats); 1138 stats = this_cpu_ptr(newstats);
1138 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); 1139 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
1139 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); 1140 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
1141 preempt_enable();
1140 1142
1141 return newstats; 1143 return newstats;
1142} 1144}
@@ -1262,8 +1264,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1262 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1264 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
1263 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, 1265 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
1264 sizeof(struct nft_trans_chain)); 1266 sizeof(struct nft_trans_chain));
1265 if (trans == NULL) 1267 if (trans == NULL) {
1268 free_percpu(stats);
1266 return -ENOMEM; 1269 return -ENOMEM;
1270 }
1267 1271
1268 nft_trans_chain_stats(trans) = stats; 1272 nft_trans_chain_stats(trans) = stats;
1269 nft_trans_chain_update(trans) = true; 1273 nft_trans_chain_update(trans) = true;
@@ -1319,8 +1323,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1319 hookfn = type->hooks[hooknum]; 1323 hookfn = type->hooks[hooknum];
1320 1324
1321 basechain = kzalloc(sizeof(*basechain), GFP_KERNEL); 1325 basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
1322 if (basechain == NULL) 1326 if (basechain == NULL) {
1327 module_put(type->owner);
1323 return -ENOMEM; 1328 return -ENOMEM;
1329 }
1324 1330
1325 if (nla[NFTA_CHAIN_COUNTERS]) { 1331 if (nla[NFTA_CHAIN_COUNTERS]) {
1326 stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); 1332 stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
@@ -3753,6 +3759,24 @@ int nft_chain_validate_dependency(const struct nft_chain *chain,
3753} 3759}
3754EXPORT_SYMBOL_GPL(nft_chain_validate_dependency); 3760EXPORT_SYMBOL_GPL(nft_chain_validate_dependency);
3755 3761
3762int nft_chain_validate_hooks(const struct nft_chain *chain,
3763 unsigned int hook_flags)
3764{
3765 struct nft_base_chain *basechain;
3766
3767 if (chain->flags & NFT_BASE_CHAIN) {
3768 basechain = nft_base_chain(chain);
3769
3770 if ((1 << basechain->ops[0].hooknum) & hook_flags)
3771 return 0;
3772
3773 return -EOPNOTSUPP;
3774 }
3775
3776 return 0;
3777}
3778EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
3779
3756/* 3780/*
3757 * Loop detection - walk through the ruleset beginning at the destination chain 3781 * Loop detection - walk through the ruleset beginning at the destination chain
3758 * of a new jump until either the source chain is reached (loop) or all 3782 * of a new jump until either the source chain is reached (loop) or all
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index d1ffd5eb3a9b..9aea747b43ea 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -21,6 +21,21 @@ const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
21}; 21};
22EXPORT_SYMBOL_GPL(nft_masq_policy); 22EXPORT_SYMBOL_GPL(nft_masq_policy);
23 23
24int nft_masq_validate(const struct nft_ctx *ctx,
25 const struct nft_expr *expr,
26 const struct nft_data **data)
27{
28 int err;
29
30 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
31 if (err < 0)
32 return err;
33
34 return nft_chain_validate_hooks(ctx->chain,
35 (1 << NF_INET_POST_ROUTING));
36}
37EXPORT_SYMBOL_GPL(nft_masq_validate);
38
24int nft_masq_init(const struct nft_ctx *ctx, 39int nft_masq_init(const struct nft_ctx *ctx,
25 const struct nft_expr *expr, 40 const struct nft_expr *expr,
26 const struct nlattr * const tb[]) 41 const struct nlattr * const tb[])
@@ -28,8 +43,8 @@ int nft_masq_init(const struct nft_ctx *ctx,
28 struct nft_masq *priv = nft_expr_priv(expr); 43 struct nft_masq *priv = nft_expr_priv(expr);
29 int err; 44 int err;
30 45
31 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 46 err = nft_masq_validate(ctx, expr, NULL);
32 if (err < 0) 47 if (err)
33 return err; 48 return err;
34 49
35 if (tb[NFTA_MASQ_FLAGS] == NULL) 50 if (tb[NFTA_MASQ_FLAGS] == NULL)
@@ -60,12 +75,5 @@ nla_put_failure:
60} 75}
61EXPORT_SYMBOL_GPL(nft_masq_dump); 76EXPORT_SYMBOL_GPL(nft_masq_dump);
62 77
63int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
64 const struct nft_data **data)
65{
66 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
67}
68EXPORT_SYMBOL_GPL(nft_masq_validate);
69
70MODULE_LICENSE("GPL"); 78MODULE_LICENSE("GPL");
71MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); 79MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index aff54fb1c8a0..a0837c6c9283 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -88,17 +88,40 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
88 [NFTA_NAT_FLAGS] = { .type = NLA_U32 }, 88 [NFTA_NAT_FLAGS] = { .type = NLA_U32 },
89}; 89};
90 90
91static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 91static int nft_nat_validate(const struct nft_ctx *ctx,
92 const struct nlattr * const tb[]) 92 const struct nft_expr *expr,
93 const struct nft_data **data)
93{ 94{
94 struct nft_nat *priv = nft_expr_priv(expr); 95 struct nft_nat *priv = nft_expr_priv(expr);
95 u32 family;
96 int err; 96 int err;
97 97
98 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 98 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
99 if (err < 0) 99 if (err < 0)
100 return err; 100 return err;
101 101
102 switch (priv->type) {
103 case NFT_NAT_SNAT:
104 err = nft_chain_validate_hooks(ctx->chain,
105 (1 << NF_INET_POST_ROUTING) |
106 (1 << NF_INET_LOCAL_IN));
107 break;
108 case NFT_NAT_DNAT:
109 err = nft_chain_validate_hooks(ctx->chain,
110 (1 << NF_INET_PRE_ROUTING) |
111 (1 << NF_INET_LOCAL_OUT));
112 break;
113 }
114
115 return err;
116}
117
118static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
119 const struct nlattr * const tb[])
120{
121 struct nft_nat *priv = nft_expr_priv(expr);
122 u32 family;
123 int err;
124
102 if (tb[NFTA_NAT_TYPE] == NULL || 125 if (tb[NFTA_NAT_TYPE] == NULL ||
103 (tb[NFTA_NAT_REG_ADDR_MIN] == NULL && 126 (tb[NFTA_NAT_REG_ADDR_MIN] == NULL &&
104 tb[NFTA_NAT_REG_PROTO_MIN] == NULL)) 127 tb[NFTA_NAT_REG_PROTO_MIN] == NULL))
@@ -115,6 +138,10 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
115 return -EINVAL; 138 return -EINVAL;
116 } 139 }
117 140
141 err = nft_nat_validate(ctx, expr, NULL);
142 if (err < 0)
143 return err;
144
118 if (tb[NFTA_NAT_FAMILY] == NULL) 145 if (tb[NFTA_NAT_FAMILY] == NULL)
119 return -EINVAL; 146 return -EINVAL;
120 147
@@ -219,13 +246,6 @@ nla_put_failure:
219 return -1; 246 return -1;
220} 247}
221 248
222static int nft_nat_validate(const struct nft_ctx *ctx,
223 const struct nft_expr *expr,
224 const struct nft_data **data)
225{
226 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
227}
228
229static struct nft_expr_type nft_nat_type; 249static struct nft_expr_type nft_nat_type;
230static const struct nft_expr_ops nft_nat_ops = { 250static const struct nft_expr_ops nft_nat_ops = {
231 .type = &nft_nat_type, 251 .type = &nft_nat_type,
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index 9e8093f28311..d7e9e93a4e90 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -23,6 +23,22 @@ const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
23}; 23};
24EXPORT_SYMBOL_GPL(nft_redir_policy); 24EXPORT_SYMBOL_GPL(nft_redir_policy);
25 25
26int nft_redir_validate(const struct nft_ctx *ctx,
27 const struct nft_expr *expr,
28 const struct nft_data **data)
29{
30 int err;
31
32 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
33 if (err < 0)
34 return err;
35
36 return nft_chain_validate_hooks(ctx->chain,
37 (1 << NF_INET_PRE_ROUTING) |
38 (1 << NF_INET_LOCAL_OUT));
39}
40EXPORT_SYMBOL_GPL(nft_redir_validate);
41
26int nft_redir_init(const struct nft_ctx *ctx, 42int nft_redir_init(const struct nft_ctx *ctx,
27 const struct nft_expr *expr, 43 const struct nft_expr *expr,
28 const struct nlattr * const tb[]) 44 const struct nlattr * const tb[])
@@ -30,7 +46,7 @@ int nft_redir_init(const struct nft_ctx *ctx,
30 struct nft_redir *priv = nft_expr_priv(expr); 46 struct nft_redir *priv = nft_expr_priv(expr);
31 int err; 47 int err;
32 48
33 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 49 err = nft_redir_validate(ctx, expr, NULL);
34 if (err < 0) 50 if (err < 0)
35 return err; 51 return err;
36 52
@@ -88,12 +104,5 @@ nla_put_failure:
88} 104}
89EXPORT_SYMBOL_GPL(nft_redir_dump); 105EXPORT_SYMBOL_GPL(nft_redir_dump);
90 106
91int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
92 const struct nft_data **data)
93{
94 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
95}
96EXPORT_SYMBOL_GPL(nft_redir_validate);
97
98MODULE_LICENSE("GPL"); 107MODULE_LICENSE("GPL");
99MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); 108MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 02fdde28dada..75532efa51cd 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1438,7 +1438,7 @@ static void netlink_undo_bind(int group, long unsigned int groups,
1438 1438
1439 for (undo = 0; undo < group; undo++) 1439 for (undo = 0; undo < group; undo++)
1440 if (test_bit(undo, &groups)) 1440 if (test_bit(undo, &groups))
1441 nlk->netlink_unbind(sock_net(sk), undo); 1441 nlk->netlink_unbind(sock_net(sk), undo + 1);
1442} 1442}
1443 1443
1444static int netlink_bind(struct socket *sock, struct sockaddr *addr, 1444static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1476,7 +1476,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1476 for (group = 0; group < nlk->ngroups; group++) { 1476 for (group = 0; group < nlk->ngroups; group++) {
1477 if (!test_bit(group, &groups)) 1477 if (!test_bit(group, &groups))
1478 continue; 1478 continue;
1479 err = nlk->netlink_bind(net, group); 1479 err = nlk->netlink_bind(net, group + 1);
1480 if (!err) 1480 if (!err)
1481 continue; 1481 continue;
1482 netlink_undo_bind(group, groups, sk); 1482 netlink_undo_bind(group, groups, sk);
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index c3b0cd43eb56..c173f69e1479 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -71,14 +71,14 @@ static struct ctl_table rds_sysctl_rds_table[] = {
71 { 71 {
72 .procname = "max_unacked_packets", 72 .procname = "max_unacked_packets",
73 .data = &rds_sysctl_max_unacked_packets, 73 .data = &rds_sysctl_max_unacked_packets,
74 .maxlen = sizeof(unsigned long), 74 .maxlen = sizeof(int),
75 .mode = 0644, 75 .mode = 0644,
76 .proc_handler = proc_dointvec, 76 .proc_handler = proc_dointvec,
77 }, 77 },
78 { 78 {
79 .procname = "max_unacked_bytes", 79 .procname = "max_unacked_bytes",
80 .data = &rds_sysctl_max_unacked_bytes, 80 .data = &rds_sysctl_max_unacked_bytes,
81 .maxlen = sizeof(unsigned long), 81 .maxlen = sizeof(int),
82 .mode = 0644, 82 .mode = 0644,
83 .proc_handler = proc_dointvec, 83 .proc_handler = proc_dointvec,
84 }, 84 },
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index aad6a679fb13..baef987fe2c0 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -556,8 +556,9 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
556} 556}
557EXPORT_SYMBOL(tcf_exts_change); 557EXPORT_SYMBOL(tcf_exts_change);
558 558
559#define tcf_exts_first_act(ext) \ 559#define tcf_exts_first_act(ext) \
560 list_first_entry(&(exts)->actions, struct tc_action, list) 560 list_first_entry_or_null(&(exts)->actions, \
561 struct tc_action, list)
561 562
562int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 563int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
563{ 564{
@@ -603,7 +604,7 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
603{ 604{
604#ifdef CONFIG_NET_CLS_ACT 605#ifdef CONFIG_NET_CLS_ACT
605 struct tc_action *a = tcf_exts_first_act(exts); 606 struct tc_action *a = tcf_exts_first_act(exts);
606 if (tcf_action_copy_stats(skb, a, 1) < 0) 607 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
607 return -1; 608 return -1;
608#endif 609#endif
609 return 0; 610 return 0;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 9b05924cc386..333cd94ba381 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -670,8 +670,14 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
670 if (tb[TCA_FQ_FLOW_PLIMIT]) 670 if (tb[TCA_FQ_FLOW_PLIMIT])
671 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); 671 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
672 672
673 if (tb[TCA_FQ_QUANTUM]) 673 if (tb[TCA_FQ_QUANTUM]) {
674 q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 674 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
675
676 if (quantum > 0)
677 q->quantum = quantum;
678 else
679 err = -EINVAL;
680 }
675 681
676 if (tb[TCA_FQ_INITIAL_QUANTUM]) 682 if (tb[TCA_FQ_INITIAL_QUANTUM])
677 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); 683 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index e49e231cef52..06320c8c1c86 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2608,7 +2608,7 @@ do_addr_param:
2608 2608
2609 addr_param = param.v + sizeof(sctp_addip_param_t); 2609 addr_param = param.v + sizeof(sctp_addip_param_t);
2610 2610
2611 af = sctp_get_af_specific(param_type2af(param.p->type)); 2611 af = sctp_get_af_specific(param_type2af(addr_param->p.type));
2612 if (af == NULL) 2612 if (af == NULL)
2613 break; 2613 break;
2614 2614