aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-04-21 15:57:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-21 15:57:34 -0400
commitc5edde3a81149d29ceae4221f09f4c7bc2f70846 (patch)
tree5260db3beec59da2a2276231d083e8e2b63b8c2a
parentf862d66a1a4609e61cd87c7ff4c7d9a234103d67 (diff)
parentb4f70527f052b0c00be4d7cac562baa75b212df5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix memory leak in iwlwifi, from Matti Gottlieb. 2) Add missing registration of netfilter arp_tables into initial namespace, from Florian Westphal. 3) Fix potential NULL deref in DecNET routing code. 4) Restrict NETLINK_URELEASE to truly bound sockets only, from Dmitry Ivanov. 5) Fix dst ref counting in VRF, from David Ahern. 6) Fix TSO segmenting limits in i40e driver, from Alexander Duyck. 7) Fix heap leak in PACKET_DIAG_MCLIST, from Mathias Krause. 8) Ravalidate IPV6 datagram socket cached routes properly, particularly with UDP, from Martin KaFai Lau. 9) Fix endian bug in RDS dp_ack_seq handling, from Qing Huang. 10) Fix stats typing in bcmgenet driver, from Eric Dumazet. 11) Openvswitch needs to orphan SKBs before ipv6 fragmentation handing, from Joe Stringer. 12) SPI device reference leak in spi_ks8895 PHY driver, from Mark Brown. 13) atl2 doesn't actually support scatter-gather, so don't advertise the feature. From Ben Hucthings. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (72 commits) openvswitch: use flow protocol when recalculating ipv6 checksums Driver: Vmxnet3: set CHECKSUM_UNNECESSARY for IPv6 packets atl2: Disable unimplemented scatter/gather feature net/mlx4_en: Split SW RX dropped counter per RX ring net/mlx4_core: Don't allow to VF change global pause settings net/mlx4_core: Avoid repeated calls to pci enable/disable net/mlx4_core: Implement pci_resume callback net: phy: spi_ks8895: Don't leak references to SPI devices net: ethernet: davinci_emac: Fix platform_data overwrite net: ethernet: davinci_emac: Fix Unbalanced pm_runtime_enable qede: Fix single MTU sized packet from firmware GRO flow qede: Fix setting Skb network header qede: Fix various memory allocation error flows for fastpath tcp: Merge tx_flags and tskey in tcp_shifted_skb tcp: Merge tx_flags and tskey in tcp_collapse_retrans drivers: net: cpsw: fix wrong regs access in cpsw_ndo_open tcp: Fix SOF_TIMESTAMPING_TX_ACK when handling dup acks openvswitch: Orphan skbs before IPv6 defrag Revert "Prevent NUll pointer dereference with two PHYs on cpsw" VSOCK: Only check error on skb_recv_datagram when skb is NULL ...
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt7
-rw-r--r--drivers/bcma/main.c17
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/dsa/mv88e6xxx.c34
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c33
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c157
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c16
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/phy/spi_ks8995.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c177
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--include/linux/mlx4/device.h7
-rw-r--r--include/linux/rculist_nulls.h39
-rw-r--r--include/net/cls_cgroup.h7
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/route.h3
-rw-r--r--include/net/sctp/structs.h8
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--kernel/bpf/verifier.c1
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/decnet/dn_route.c9
-rw-r--r--net/ipv4/netfilter/arptable_filter.c6
-rw-r--r--net/ipv4/route.c19
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_output.c16
-rw-r--r--net/ipv4/udp.c9
-rw-r--r--net/ipv6/addrconf.c22
-rw-r--r--net/ipv6/datagram.c169
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/udp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/openvswitch/actions.c4
-rw-r--r--net/openvswitch/conntrack.c1
-rw-r--r--net/packet/af_packet.c1
-rw-r--r--net/rds/cong.c4
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sctp/outqueue.c15
-rw-r--r--net/sctp/sm_make_chunk.c3
-rw-r--r--net/sctp/sm_sideeffect.c36
-rw-r--r--net/sctp/transport.c19
-rw-r--r--net/tipc/core.c1
-rw-r--r--net/tipc/core.h3
-rw-r--r--net/tipc/name_distr.c35
-rw-r--r--net/vmw_vsock/vmci_transport.c7
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/net/reuseport_dualstack.c208
84 files changed, 1009 insertions, 529 deletions
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index 5ca79290eabf..32eaaca04d9b 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -9,7 +9,8 @@ have dual GMAC each represented by a child node..
9Required properties: 9Required properties:
10- compatible: Should be "mediatek,mt7623-eth" 10- compatible: Should be "mediatek,mt7623-eth"
11- reg: Address and length of the register set for the device 11- reg: Address and length of the register set for the device
12- interrupts: Should contain the frame engines interrupt 12- interrupts: Should contain the three frame engines interrupts in numeric
13 order. These are fe_int0, fe_int1 and fe_int2.
13- clocks: the clock used by the core 14- clocks: the clock used by the core
14- clock-names: the names of the clock listed in the clocks property. These are 15- clock-names: the names of the clock listed in the clocks property. These are
15 "ethif", "esw", "gp2", "gp1" 16 "ethif", "esw", "gp2", "gp1"
@@ -42,7 +43,9 @@ eth: ethernet@1b100000 {
42 <&ethsys CLK_ETHSYS_GP2>, 43 <&ethsys CLK_ETHSYS_GP2>,
43 <&ethsys CLK_ETHSYS_GP1>; 44 <&ethsys CLK_ETHSYS_GP1>;
44 clock-names = "ethif", "esw", "gp2", "gp1"; 45 clock-names = "ethif", "esw", "gp2", "gp1";
45 interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW>; 46 interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW
47 GIC_SPI 199 IRQ_TYPE_LEVEL_LOW
48 GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
46 power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; 49 power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
47 resets = <&ethsys MT2701_ETHSYS_ETH_RST>; 50 resets = <&ethsys MT2701_ETHSYS_ETH_RST>;
48 reset-names = "eth"; 51 reset-names = "eth";
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 786be8fed39e..1f635471f318 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
136 return false; 136 return false;
137} 137}
138 138
139#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
140static struct device_node *bcma_of_find_child_device(struct platform_device *parent, 139static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
141 struct bcma_device *core) 140 struct bcma_device *core)
142{ 141{
@@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
184 struct of_phandle_args out_irq; 183 struct of_phandle_args out_irq;
185 int ret; 184 int ret;
186 185
187 if (!parent || !parent->dev.of_node) 186 if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
188 return 0; 187 return 0;
189 188
190 ret = bcma_of_irq_parse(parent, core, &out_irq, num); 189 ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
202{ 201{
203 struct device_node *node; 202 struct device_node *node;
204 203
204 if (!IS_ENABLED(CONFIG_OF_IRQ))
205 return;
206
205 node = bcma_of_find_child_device(parent, core); 207 node = bcma_of_find_child_device(parent, core);
206 if (node) 208 if (node)
207 core->dev.of_node = node; 209 core->dev.of_node = node;
208 210
209 core->irq = bcma_of_get_irq(parent, core, 0); 211 core->irq = bcma_of_get_irq(parent, core, 0);
210} 212}
211#else
212static void bcma_of_fill_device(struct platform_device *parent,
213 struct bcma_device *core)
214{
215}
216static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
217 struct bcma_device *core, int num)
218{
219 return 0;
220}
221#endif /* CONFIG_OF */
222 213
223unsigned int bcma_core_irq(struct bcma_device *core, int num) 214unsigned int bcma_core_irq(struct bcma_device *core, int num)
224{ 215{
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 0d29b5a6356d..99e5f9751e8b 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
715 if (!maddr || maddr->family != AF_ISDN) 715 if (!maddr || maddr->family != AF_ISDN)
716 return -EINVAL; 716 return -EINVAL;
717 717
718 if (addr_len < sizeof(struct sockaddr_mISDN))
719 return -EINVAL;
720
718 lock_sock(sk); 721 lock_sock(sk);
719 722
720 if (_pms(sk)->dev) { 723 if (_pms(sk)->dev) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2a1ba62b7da2..a24c18eee598 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -195,6 +195,7 @@ config GENEVE
195 195
196config MACSEC 196config MACSEC
197 tristate "IEEE 802.1AE MAC-level encryption (MACsec)" 197 tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
198 select CRYPTO
198 select CRYPTO_AES 199 select CRYPTO_AES
199 select CRYPTO_GCM 200 select CRYPTO_GCM
200 ---help--- 201 ---help---
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 50454be86570..a2904029cccc 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2181,27 +2181,10 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2181 struct net_device *bridge) 2181 struct net_device *bridge)
2182{ 2182{
2183 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2183 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2184 u16 fid;
2185 int i, err; 2184 int i, err;
2186 2185
2187 mutex_lock(&ps->smi_mutex); 2186 mutex_lock(&ps->smi_mutex);
2188 2187
2189 /* Get or create the bridge FID and assign it to the port */
2190 for (i = 0; i < ps->num_ports; ++i)
2191 if (ps->ports[i].bridge_dev == bridge)
2192 break;
2193
2194 if (i < ps->num_ports)
2195 err = _mv88e6xxx_port_fid_get(ds, i, &fid);
2196 else
2197 err = _mv88e6xxx_fid_new(ds, &fid);
2198 if (err)
2199 goto unlock;
2200
2201 err = _mv88e6xxx_port_fid_set(ds, port, fid);
2202 if (err)
2203 goto unlock;
2204
2205 /* Assign the bridge and remap each port's VLANTable */ 2188 /* Assign the bridge and remap each port's VLANTable */
2206 ps->ports[port].bridge_dev = bridge; 2189 ps->ports[port].bridge_dev = bridge;
2207 2190
@@ -2213,7 +2196,6 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2213 } 2196 }
2214 } 2197 }
2215 2198
2216unlock:
2217 mutex_unlock(&ps->smi_mutex); 2199 mutex_unlock(&ps->smi_mutex);
2218 2200
2219 return err; 2201 return err;
@@ -2223,16 +2205,10 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
2223{ 2205{
2224 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2206 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2225 struct net_device *bridge = ps->ports[port].bridge_dev; 2207 struct net_device *bridge = ps->ports[port].bridge_dev;
2226 u16 fid;
2227 int i; 2208 int i;
2228 2209
2229 mutex_lock(&ps->smi_mutex); 2210 mutex_lock(&ps->smi_mutex);
2230 2211
2231 /* Give the port a fresh Filtering Information Database */
2232 if (_mv88e6xxx_fid_new(ds, &fid) ||
2233 _mv88e6xxx_port_fid_set(ds, port, fid))
2234 netdev_warn(ds->ports[port], "failed to assign a new FID\n");
2235
2236 /* Unassign the bridge and remap each port's VLANTable */ 2212 /* Unassign the bridge and remap each port's VLANTable */
2237 ps->ports[port].bridge_dev = NULL; 2213 ps->ports[port].bridge_dev = NULL;
2238 2214
@@ -2476,9 +2452,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2476 * the other bits clear. 2452 * the other bits clear.
2477 */ 2453 */
2478 reg = 1 << port; 2454 reg = 1 << port;
2479 /* Disable learning for DSA and CPU ports */ 2455 /* Disable learning for CPU port */
2480 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) 2456 if (dsa_is_cpu_port(ds, port))
2481 reg = PORT_ASSOC_VECTOR_LOCKED_PORT; 2457 reg = 0;
2482 2458
2483 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg); 2459 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
2484 if (ret) 2460 if (ret)
@@ -2558,11 +2534,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2558 if (ret) 2534 if (ret)
2559 goto abort; 2535 goto abort;
2560 2536
2561 /* Port based VLAN map: give each port its own address 2537 /* Port based VLAN map: give each port the same default address
2562 * database, and allow bidirectional communication between the 2538 * database, and allow bidirectional communication between the
2563 * CPU and DSA port(s), and the other ports. 2539 * CPU and DSA port(s), and the other ports.
2564 */ 2540 */
2565 ret = _mv88e6xxx_port_fid_set(ds, port, port + 1); 2541 ret = _mv88e6xxx_port_fid_set(ds, port, 0);
2566 if (ret) 2542 if (ret)
2567 goto abort; 2543 goto abort;
2568 2544
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 8f76f4558a88..2ff465848b65 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1412 1412
1413 err = -EIO; 1413 err = -EIO;
1414 1414
1415 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX; 1415 netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
1416 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); 1416 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1417 1417
1418 /* Init PHY as early as possible due to power saving issue */ 1418 /* Init PHY as early as possible due to power saving issue */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 99b30a952b38..38db2e4d7d54 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
1572 dev_warn(&core->dev, "Using random MAC: %pM\n", mac); 1572 dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1573 } 1573 }
1574 1574
1575 /* This (reset &) enable is not preset in specs or reference driver but
1576 * Broadcom does it in arch PCI code when enabling fake PCI device.
1577 */
1578 bcma_core_enable(core, 0);
1579
1575 /* Allocation and references */ 1580 /* Allocation and references */
1576 net_dev = alloc_etherdev(sizeof(*bgmac)); 1581 net_dev = alloc_etherdev(sizeof(*bgmac));
1577 if (!net_dev) 1582 if (!net_dev)
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4fbb093e0d84..9a03c142b742 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -199,9 +199,9 @@
199#define BGMAC_CMDCFG_TAI 0x00000200 199#define BGMAC_CMDCFG_TAI 0x00000200
200#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */ 200#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
201#define BGMAC_CMDCFG_HD_SHIFT 10 201#define BGMAC_CMDCFG_HD_SHIFT 10
202#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */ 202#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
203#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */ 203#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
204#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) 204#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
205#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */ 205#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
206#define BGMAC_CMDCFG_AE 0x00400000 206#define BGMAC_CMDCFG_AE 0x00400000
207#define BGMAC_CMDCFG_CFE 0x00800000 207#define BGMAC_CMDCFG_CFE 0x00800000
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index cf6445d148ca..44ad1490b472 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
878 else 878 else
879 p = (char *)priv; 879 p = (char *)priv;
880 p += s->stat_offset; 880 p += s->stat_offset;
881 data[i] = *(u32 *)p; 881 if (sizeof(unsigned long) != sizeof(u32) &&
882 s->stat_sizeof == sizeof(unsigned long))
883 data[i] = *(unsigned long *)p;
884 else
885 data[i] = *(u32 *)p;
882 } 886 }
883} 887}
884 888
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 967951582e03..d20539a6d162 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx)
1011 } 1011 }
1012 1012
1013 lmac++; 1013 lmac++;
1014 if (lmac == MAX_LMAC_PER_BGX) 1014 if (lmac == MAX_LMAC_PER_BGX) {
1015 of_node_put(node);
1015 break; 1016 break;
1017 }
1016 } 1018 }
1017 of_node_put(node);
1018 return 0; 1019 return 0;
1019 1020
1020defer: 1021defer:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 984a3cc26f86..326d4009525e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1451,6 +1451,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1451 unsigned int mmd, unsigned int reg, u16 *valp); 1451 unsigned int mmd, unsigned int reg, u16 *valp);
1452int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 1452int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1453 unsigned int mmd, unsigned int reg, u16 val); 1453 unsigned int mmd, unsigned int reg, u16 val);
1454int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
1455 unsigned int vf, unsigned int iqtype, unsigned int iqid,
1456 unsigned int fl0id, unsigned int fl1id);
1454int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1457int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1455 unsigned int vf, unsigned int iqtype, unsigned int iqid, 1458 unsigned int vf, unsigned int iqtype, unsigned int iqid,
1456 unsigned int fl0id, unsigned int fl1id); 1459 unsigned int fl0id, unsigned int fl1id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 13b144bcf725..6278e5a74b74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2981,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
2981void t4_free_sge_resources(struct adapter *adap) 2981void t4_free_sge_resources(struct adapter *adap)
2982{ 2982{
2983 int i; 2983 int i;
2984 struct sge_eth_rxq *eq = adap->sge.ethrxq; 2984 struct sge_eth_rxq *eq;
2985 struct sge_eth_txq *etq = adap->sge.ethtxq; 2985 struct sge_eth_txq *etq;
2986
2987 /* stop all Rx queues in order to start them draining */
2988 for (i = 0; i < adap->sge.ethqsets; i++) {
2989 eq = &adap->sge.ethrxq[i];
2990 if (eq->rspq.desc)
2991 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
2992 FW_IQ_TYPE_FL_INT_CAP,
2993 eq->rspq.cntxt_id,
2994 eq->fl.size ? eq->fl.cntxt_id : 0xffff,
2995 0xffff);
2996 }
2986 2997
2987 /* clean up Ethernet Tx/Rx queues */ 2998 /* clean up Ethernet Tx/Rx queues */
2988 for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { 2999 for (i = 0; i < adap->sge.ethqsets; i++) {
3000 eq = &adap->sge.ethrxq[i];
2989 if (eq->rspq.desc) 3001 if (eq->rspq.desc)
2990 free_rspq_fl(adap, &eq->rspq, 3002 free_rspq_fl(adap, &eq->rspq,
2991 eq->fl.size ? &eq->fl : NULL); 3003 eq->fl.size ? &eq->fl : NULL);
3004
3005 etq = &adap->sge.ethtxq[i];
2992 if (etq->q.desc) { 3006 if (etq->q.desc) {
2993 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 3007 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
2994 etq->q.cntxt_id); 3008 etq->q.cntxt_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index c7efb116b577..71586a3e0f61 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -6950,6 +6950,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
6950} 6950}
6951 6951
6952/** 6952/**
6953 * t4_iq_stop - stop an ingress queue and its FLs
6954 * @adap: the adapter
6955 * @mbox: mailbox to use for the FW command
6956 * @pf: the PF owning the queues
6957 * @vf: the VF owning the queues
6958 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
6959 * @iqid: ingress queue id
6960 * @fl0id: FL0 queue id or 0xffff if no attached FL0
6961 * @fl1id: FL1 queue id or 0xffff if no attached FL1
6962 *
6963 * Stops an ingress queue and its associated FLs, if any. This causes
6964 * any current or future data/messages destined for these queues to be
6965 * tossed.
6966 */
6967int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
6968 unsigned int vf, unsigned int iqtype, unsigned int iqid,
6969 unsigned int fl0id, unsigned int fl1id)
6970{
6971 struct fw_iq_cmd c;
6972
6973 memset(&c, 0, sizeof(c));
6974 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
6975 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
6976 FW_IQ_CMD_VFN_V(vf));
6977 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
6978 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
6979 c.iqid = cpu_to_be16(iqid);
6980 c.fl0id = cpu_to_be16(fl0id);
6981 c.fl1id = cpu_to_be16(fl1id);
6982 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6983}
6984
6985/**
6953 * t4_iq_free - free an ingress queue and its FLs 6986 * t4_iq_free - free an ingress queue and its FLs
6954 * @adap: the adapter 6987 * @adap: the adapter
6955 * @mbox: mailbox to use for the FW command 6988 * @mbox: mailbox to use for the FW command
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 62ccebc5f728..8cf943db5662 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1223,18 +1223,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
1223 if (err) 1223 if (err)
1224 return err; 1224 return err;
1225 1225
1226 /* verify upper 16 bits are zero */
1227 if (vid >> 16)
1228 return FM10K_ERR_PARAM;
1229
1230 set = !(vid & FM10K_VLAN_CLEAR); 1226 set = !(vid & FM10K_VLAN_CLEAR);
1231 vid &= ~FM10K_VLAN_CLEAR; 1227 vid &= ~FM10K_VLAN_CLEAR;
1232 1228
1233 err = fm10k_iov_select_vid(vf_info, (u16)vid); 1229 /* if the length field has been set, this is a multi-bit
1234 if (err < 0) 1230 * update request. For multi-bit requests, simply disallow
1235 return err; 1231 * them when the pf_vid has been set. In this case, the PF
1232 * should have already cleared the VLAN_TABLE, and if we
1233 * allowed them, it could allow a rogue VF to receive traffic
1234 * on a VLAN it was not assigned. In the single-bit case, we
1235 * need to modify requests for VLAN 0 to use the default PF or
1236 * SW vid when assigned.
1237 */
1236 1238
1237 vid = err; 1239 if (vid >> 16) {
1240 /* prevent multi-bit requests when PF has
1241 * administratively set the VLAN for this VF
1242 */
1243 if (vf_info->pf_vid)
1244 return FM10K_ERR_PARAM;
1245 } else {
1246 err = fm10k_iov_select_vid(vf_info, (u16)vid);
1247 if (err < 0)
1248 return err;
1249
1250 vid = err;
1251 }
1238 1252
1239 /* update VSI info for VF in regards to VLAN table */ 1253 /* update VSI info for VF in regards to VLAN table */
1240 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); 1254 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 084d0ab316b7..6a49b7ae511c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2594,35 +2594,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2594} 2594}
2595 2595
2596/** 2596/**
2597 * __i40e_chk_linearize - Check if there are more than 8 fragments per packet 2597 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
2598 * @skb: send buffer 2598 * @skb: send buffer
2599 * 2599 *
2600 * Note: Our HW can't scatter-gather more than 8 fragments to build 2600 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2601 * a packet on the wire and so we need to figure out the cases where we 2601 * and so we need to figure out the cases where we need to linearize the skb.
2602 * need to linearize the skb. 2602 *
2603 * For TSO we need to count the TSO header and segment payload separately.
2604 * As such we need to check cases where we have 7 fragments or more as we
2605 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2606 * the segment payload in the first descriptor, and another 7 for the
2607 * fragments.
2603 **/ 2608 **/
2604bool __i40e_chk_linearize(struct sk_buff *skb) 2609bool __i40e_chk_linearize(struct sk_buff *skb)
2605{ 2610{
2606 const struct skb_frag_struct *frag, *stale; 2611 const struct skb_frag_struct *frag, *stale;
2607 int gso_size, nr_frags, sum; 2612 int nr_frags, sum;
2608
2609 /* check to see if TSO is enabled, if so we may get a repreive */
2610 gso_size = skb_shinfo(skb)->gso_size;
2611 if (unlikely(!gso_size))
2612 return true;
2613 2613
2614 /* no need to check if number of frags is less than 8 */ 2614 /* no need to check if number of frags is less than 7 */
2615 nr_frags = skb_shinfo(skb)->nr_frags; 2615 nr_frags = skb_shinfo(skb)->nr_frags;
2616 if (nr_frags < I40E_MAX_BUFFER_TXD) 2616 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
2617 return false; 2617 return false;
2618 2618
2619 /* We need to walk through the list and validate that each group 2619 /* We need to walk through the list and validate that each group
2620 * of 6 fragments totals at least gso_size. However we don't need 2620 * of 6 fragments totals at least gso_size. However we don't need
2621 * to perform such validation on the first or last 6 since the first 2621 * to perform such validation on the last 6 since the last 6 cannot
2622 * 6 cannot inherit any data from a descriptor before them, and the 2622 * inherit any data from a descriptor after them.
2623 * last 6 cannot inherit any data from a descriptor after them.
2624 */ 2623 */
2625 nr_frags -= I40E_MAX_BUFFER_TXD - 1; 2624 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
2626 frag = &skb_shinfo(skb)->frags[0]; 2625 frag = &skb_shinfo(skb)->frags[0];
2627 2626
2628 /* Initialize size to the negative value of gso_size minus 1. We 2627 /* Initialize size to the negative value of gso_size minus 1. We
@@ -2631,21 +2630,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
2631 * descriptors for a single transmit as the header and previous 2630 * descriptors for a single transmit as the header and previous
2632 * fragment are already consuming 2 descriptors. 2631 * fragment are already consuming 2 descriptors.
2633 */ 2632 */
2634 sum = 1 - gso_size; 2633 sum = 1 - skb_shinfo(skb)->gso_size;
2635 2634
2636 /* Add size of frags 1 through 5 to create our initial sum */ 2635 /* Add size of frags 0 through 4 to create our initial sum */
2637 sum += skb_frag_size(++frag); 2636 sum += skb_frag_size(frag++);
2638 sum += skb_frag_size(++frag); 2637 sum += skb_frag_size(frag++);
2639 sum += skb_frag_size(++frag); 2638 sum += skb_frag_size(frag++);
2640 sum += skb_frag_size(++frag); 2639 sum += skb_frag_size(frag++);
2641 sum += skb_frag_size(++frag); 2640 sum += skb_frag_size(frag++);
2642 2641
2643 /* Walk through fragments adding latest fragment, testing it, and 2642 /* Walk through fragments adding latest fragment, testing it, and
2644 * then removing stale fragments from the sum. 2643 * then removing stale fragments from the sum.
2645 */ 2644 */
2646 stale = &skb_shinfo(skb)->frags[0]; 2645 stale = &skb_shinfo(skb)->frags[0];
2647 for (;;) { 2646 for (;;) {
2648 sum += skb_frag_size(++frag); 2647 sum += skb_frag_size(frag++);
2649 2648
2650 /* if sum is negative we failed to make sufficient progress */ 2649 /* if sum is negative we failed to make sufficient progress */
2651 if (sum < 0) 2650 if (sum < 0)
@@ -2655,7 +2654,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
2655 if (!--nr_frags) 2654 if (!--nr_frags)
2656 break; 2655 break;
2657 2656
2658 sum -= skb_frag_size(++stale); 2657 sum -= skb_frag_size(stale++);
2659 } 2658 }
2660 2659
2661 return false; 2660 return false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index cdd5dc00aec5..a9bd70537d65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -413,10 +413,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
413 **/ 413 **/
414static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 414static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
415{ 415{
416 /* we can only support up to 8 data buffers for a single send */ 416 /* Both TSO and single send will work if count is less than 8 */
417 if (likely(count <= I40E_MAX_BUFFER_TXD)) 417 if (likely(count < I40E_MAX_BUFFER_TXD))
418 return false; 418 return false;
419 419
420 return __i40e_chk_linearize(skb); 420 if (skb_is_gso(skb))
421 return __i40e_chk_linearize(skb);
422
423 /* we can support up to 8 data buffers for a single send */
424 return count != I40E_MAX_BUFFER_TXD;
421} 425}
422#endif /* _I40E_TXRX_H_ */ 426#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ebcc25c05796..cea97daa844c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1796,35 +1796,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1796} 1796}
1797 1797
1798/** 1798/**
1799 * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet 1799 * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
1800 * @skb: send buffer 1800 * @skb: send buffer
1801 * 1801 *
1802 * Note: Our HW can't scatter-gather more than 8 fragments to build 1802 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
1803 * a packet on the wire and so we need to figure out the cases where we 1803 * and so we need to figure out the cases where we need to linearize the skb.
1804 * need to linearize the skb. 1804 *
1805 * For TSO we need to count the TSO header and segment payload separately.
1806 * As such we need to check cases where we have 7 fragments or more as we
1807 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
1808 * the segment payload in the first descriptor, and another 7 for the
1809 * fragments.
1805 **/ 1810 **/
1806bool __i40evf_chk_linearize(struct sk_buff *skb) 1811bool __i40evf_chk_linearize(struct sk_buff *skb)
1807{ 1812{
1808 const struct skb_frag_struct *frag, *stale; 1813 const struct skb_frag_struct *frag, *stale;
1809 int gso_size, nr_frags, sum; 1814 int nr_frags, sum;
1810
1811 /* check to see if TSO is enabled, if so we may get a repreive */
1812 gso_size = skb_shinfo(skb)->gso_size;
1813 if (unlikely(!gso_size))
1814 return true;
1815 1815
1816 /* no need to check if number of frags is less than 8 */ 1816 /* no need to check if number of frags is less than 7 */
1817 nr_frags = skb_shinfo(skb)->nr_frags; 1817 nr_frags = skb_shinfo(skb)->nr_frags;
1818 if (nr_frags < I40E_MAX_BUFFER_TXD) 1818 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
1819 return false; 1819 return false;
1820 1820
1821 /* We need to walk through the list and validate that each group 1821 /* We need to walk through the list and validate that each group
1822 * of 6 fragments totals at least gso_size. However we don't need 1822 * of 6 fragments totals at least gso_size. However we don't need
1823 * to perform such validation on the first or last 6 since the first 1823 * to perform such validation on the last 6 since the last 6 cannot
1824 * 6 cannot inherit any data from a descriptor before them, and the 1824 * inherit any data from a descriptor after them.
1825 * last 6 cannot inherit any data from a descriptor after them.
1826 */ 1825 */
1827 nr_frags -= I40E_MAX_BUFFER_TXD - 1; 1826 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
1828 frag = &skb_shinfo(skb)->frags[0]; 1827 frag = &skb_shinfo(skb)->frags[0];
1829 1828
1830 /* Initialize size to the negative value of gso_size minus 1. We 1829 /* Initialize size to the negative value of gso_size minus 1. We
@@ -1833,21 +1832,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
1833 * descriptors for a single transmit as the header and previous 1832 * descriptors for a single transmit as the header and previous
1834 * fragment are already consuming 2 descriptors. 1833 * fragment are already consuming 2 descriptors.
1835 */ 1834 */
1836 sum = 1 - gso_size; 1835 sum = 1 - skb_shinfo(skb)->gso_size;
1837 1836
1838 /* Add size of frags 1 through 5 to create our initial sum */ 1837 /* Add size of frags 0 through 4 to create our initial sum */
1839 sum += skb_frag_size(++frag); 1838 sum += skb_frag_size(frag++);
1840 sum += skb_frag_size(++frag); 1839 sum += skb_frag_size(frag++);
1841 sum += skb_frag_size(++frag); 1840 sum += skb_frag_size(frag++);
1842 sum += skb_frag_size(++frag); 1841 sum += skb_frag_size(frag++);
1843 sum += skb_frag_size(++frag); 1842 sum += skb_frag_size(frag++);
1844 1843
1845 /* Walk through fragments adding latest fragment, testing it, and 1844 /* Walk through fragments adding latest fragment, testing it, and
1846 * then removing stale fragments from the sum. 1845 * then removing stale fragments from the sum.
1847 */ 1846 */
1848 stale = &skb_shinfo(skb)->frags[0]; 1847 stale = &skb_shinfo(skb)->frags[0];
1849 for (;;) { 1848 for (;;) {
1850 sum += skb_frag_size(++frag); 1849 sum += skb_frag_size(frag++);
1851 1850
1852 /* if sum is negative we failed to make sufficient progress */ 1851 /* if sum is negative we failed to make sufficient progress */
1853 if (sum < 0) 1852 if (sum < 0)
@@ -1857,7 +1856,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
1857 if (!--nr_frags) 1856 if (!--nr_frags)
1858 break; 1857 break;
1859 1858
1860 sum -= skb_frag_size(++stale); 1859 sum -= skb_frag_size(stale++);
1861 } 1860 }
1862 1861
1863 return false; 1862 return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c1dd8c5c9666..0429553fe887 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -395,10 +395,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
395 **/ 395 **/
396static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 396static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
397{ 397{
398 /* we can only support up to 8 data buffers for a single send */ 398 /* Both TSO and single send will work if count is less than 8 */
399 if (likely(count <= I40E_MAX_BUFFER_TXD)) 399 if (likely(count < I40E_MAX_BUFFER_TXD))
400 return false; 400 return false;
401 401
402 return __i40evf_chk_linearize(skb); 402 if (skb_is_gso(skb))
403 return __i40evf_chk_linearize(skb);
404
405 /* we can support up to 8 data buffers for a single send */
406 return count != I40E_MAX_BUFFER_TXD;
403} 407}
404#endif /* _I40E_TXRX_H_ */ 408#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f69584a9b47f..c761194bb323 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
337 case ETH_SS_STATS: 337 case ETH_SS_STATS:
338 return bitmap_iterator_count(&it) + 338 return bitmap_iterator_count(&it) +
339 (priv->tx_ring_num * 2) + 339 (priv->tx_ring_num * 2) +
340 (priv->rx_ring_num * 2); 340 (priv->rx_ring_num * 3);
341 case ETH_SS_TEST: 341 case ETH_SS_TEST:
342 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags 342 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
343 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; 343 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
404 for (i = 0; i < priv->rx_ring_num; i++) { 404 for (i = 0; i < priv->rx_ring_num; i++) {
405 data[index++] = priv->rx_ring[i]->packets; 405 data[index++] = priv->rx_ring[i]->packets;
406 data[index++] = priv->rx_ring[i]->bytes; 406 data[index++] = priv->rx_ring[i]->bytes;
407 data[index++] = priv->rx_ring[i]->dropped;
407 } 408 }
408 spin_unlock_bh(&priv->stats_lock); 409 spin_unlock_bh(&priv->stats_lock);
409 410
@@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev,
477 "rx%d_packets", i); 478 "rx%d_packets", i);
478 sprintf(data + (index++) * ETH_GSTRING_LEN, 479 sprintf(data + (index++) * ETH_GSTRING_LEN,
479 "rx%d_bytes", i); 480 "rx%d_bytes", i);
481 sprintf(data + (index++) * ETH_GSTRING_LEN,
482 "rx%d_dropped", i);
480 } 483 }
481 break; 484 break;
482 case ETH_SS_PRIV_FLAGS: 485 case ETH_SS_PRIV_FLAGS:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 3904b5fc0b7c..20b6c2e678b8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
158 u64 in_mod = reset << 8 | port; 158 u64 in_mod = reset << 8 | port;
159 int err; 159 int err;
160 int i, counter_index; 160 int i, counter_index;
161 unsigned long sw_rx_dropped = 0;
161 162
162 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 163 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
163 if (IS_ERR(mailbox)) 164 if (IS_ERR(mailbox))
@@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
180 for (i = 0; i < priv->rx_ring_num; i++) { 181 for (i = 0; i < priv->rx_ring_num; i++) {
181 stats->rx_packets += priv->rx_ring[i]->packets; 182 stats->rx_packets += priv->rx_ring[i]->packets;
182 stats->rx_bytes += priv->rx_ring[i]->bytes; 183 stats->rx_bytes += priv->rx_ring[i]->bytes;
184 sw_rx_dropped += priv->rx_ring[i]->dropped;
183 priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; 185 priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
184 priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; 186 priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
185 priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; 187 priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
@@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
236 &mlx4_en_stats->MCAST_prio_1, 238 &mlx4_en_stats->MCAST_prio_1,
237 NUM_PRIORITIES); 239 NUM_PRIORITIES);
238 stats->collisions = 0; 240 stats->collisions = 0;
239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); 241 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
242 sw_rx_dropped;
240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); 243 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
241 stats->rx_over_errors = 0; 244 stats->rx_over_errors = 0;
242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); 245 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 86bcfe510e4e..b723e3bcab39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
61 gfp_t gfp = _gfp; 61 gfp_t gfp = _gfp;
62 62
63 if (order) 63 if (order)
64 gfp |= __GFP_COMP | __GFP_NOWARN; 64 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
65 page = alloc_pages(gfp, order); 65 page = alloc_pages(gfp, order);
66 if (likely(page)) 66 if (likely(page))
67 break; 67 break;
@@ -126,7 +126,9 @@ out:
126 dma_unmap_page(priv->ddev, page_alloc[i].dma, 126 dma_unmap_page(priv->ddev, page_alloc[i].dma,
127 page_alloc[i].page_size, PCI_DMA_FROMDEVICE); 127 page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
128 page = page_alloc[i].page; 128 page = page_alloc[i].page;
129 set_page_count(page, 1); 129 /* Revert changes done by mlx4_alloc_pages */
130 page_ref_sub(page, page_alloc[i].page_size /
131 priv->frag_info[i].frag_stride - 1);
130 put_page(page); 132 put_page(page);
131 } 133 }
132 } 134 }
@@ -176,7 +178,9 @@ out:
176 dma_unmap_page(priv->ddev, page_alloc->dma, 178 dma_unmap_page(priv->ddev, page_alloc->dma,
177 page_alloc->page_size, PCI_DMA_FROMDEVICE); 179 page_alloc->page_size, PCI_DMA_FROMDEVICE);
178 page = page_alloc->page; 180 page = page_alloc->page;
179 set_page_count(page, 1); 181 /* Revert changes done by mlx4_alloc_pages */
182 page_ref_sub(page, page_alloc->page_size /
183 priv->frag_info[i].frag_stride - 1);
180 put_page(page); 184 put_page(page);
181 page_alloc->page = NULL; 185 page_alloc->page = NULL;
182 } 186 }
@@ -939,7 +943,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
939 /* GRO not possible, complete processing here */ 943 /* GRO not possible, complete processing here */
940 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); 944 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
941 if (!skb) { 945 if (!skb) {
942 priv->stats.rx_dropped++; 946 ring->dropped++;
943 goto next; 947 goto next;
944 } 948 }
945 949
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 358f7230da58..12c77a70abdb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
3172 return 0; 3172 return 0;
3173} 3173}
3174 3174
3175static int mlx4_pci_enable_device(struct mlx4_dev *dev)
3176{
3177 struct pci_dev *pdev = dev->persist->pdev;
3178 int err = 0;
3179
3180 mutex_lock(&dev->persist->pci_status_mutex);
3181 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
3182 err = pci_enable_device(pdev);
3183 if (!err)
3184 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
3185 }
3186 mutex_unlock(&dev->persist->pci_status_mutex);
3187
3188 return err;
3189}
3190
3191static void mlx4_pci_disable_device(struct mlx4_dev *dev)
3192{
3193 struct pci_dev *pdev = dev->persist->pdev;
3194
3195 mutex_lock(&dev->persist->pci_status_mutex);
3196 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
3197 pci_disable_device(pdev);
3198 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
3199 }
3200 mutex_unlock(&dev->persist->pci_status_mutex);
3201}
3202
3175static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 3203static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
3176 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3204 int total_vfs, int *nvfs, struct mlx4_priv *priv,
3177 int reset_flow) 3205 int reset_flow)
@@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3582 3610
3583 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3611 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3584 3612
3585 err = pci_enable_device(pdev); 3613 err = mlx4_pci_enable_device(&priv->dev);
3586 if (err) { 3614 if (err) {
3587 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3615 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3588 return err; 3616 return err;
@@ -3715,7 +3743,7 @@ err_release_regions:
3715 pci_release_regions(pdev); 3743 pci_release_regions(pdev);
3716 3744
3717err_disable_pdev: 3745err_disable_pdev:
3718 pci_disable_device(pdev); 3746 mlx4_pci_disable_device(&priv->dev);
3719 pci_set_drvdata(pdev, NULL); 3747 pci_set_drvdata(pdev, NULL);
3720 return err; 3748 return err;
3721} 3749}
@@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3775 priv->pci_dev_data = id->driver_data; 3803 priv->pci_dev_data = id->driver_data;
3776 mutex_init(&dev->persist->device_state_mutex); 3804 mutex_init(&dev->persist->device_state_mutex);
3777 mutex_init(&dev->persist->interface_state_mutex); 3805 mutex_init(&dev->persist->interface_state_mutex);
3806 mutex_init(&dev->persist->pci_status_mutex);
3778 3807
3779 ret = devlink_register(devlink, &pdev->dev); 3808 ret = devlink_register(devlink, &pdev->dev);
3780 if (ret) 3809 if (ret)
@@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
3923 } 3952 }
3924 3953
3925 pci_release_regions(pdev); 3954 pci_release_regions(pdev);
3926 pci_disable_device(pdev); 3955 mlx4_pci_disable_device(dev);
3927 devlink_unregister(devlink); 3956 devlink_unregister(devlink);
3928 kfree(dev->persist); 3957 kfree(dev->persist);
3929 devlink_free(devlink); 3958 devlink_free(devlink);
@@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
4042 if (state == pci_channel_io_perm_failure) 4071 if (state == pci_channel_io_perm_failure)
4043 return PCI_ERS_RESULT_DISCONNECT; 4072 return PCI_ERS_RESULT_DISCONNECT;
4044 4073
4045 pci_disable_device(pdev); 4074 mlx4_pci_disable_device(persist->dev);
4046 return PCI_ERS_RESULT_NEED_RESET; 4075 return PCI_ERS_RESULT_NEED_RESET;
4047} 4076}
4048 4077
@@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
4050{ 4079{
4051 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4080 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4052 struct mlx4_dev *dev = persist->dev; 4081 struct mlx4_dev *dev = persist->dev;
4053 struct mlx4_priv *priv = mlx4_priv(dev); 4082 int err;
4054 int ret;
4055 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4056 int total_vfs;
4057 4083
4058 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 4084 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
4059 ret = pci_enable_device(pdev); 4085 err = mlx4_pci_enable_device(dev);
4060 if (ret) { 4086 if (err) {
4061 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); 4087 mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
4062 return PCI_ERS_RESULT_DISCONNECT; 4088 return PCI_ERS_RESULT_DISCONNECT;
4063 } 4089 }
4064 4090
4065 pci_set_master(pdev); 4091 pci_set_master(pdev);
4066 pci_restore_state(pdev); 4092 pci_restore_state(pdev);
4067 pci_save_state(pdev); 4093 pci_save_state(pdev);
4094 return PCI_ERS_RESULT_RECOVERED;
4095}
4068 4096
4097static void mlx4_pci_resume(struct pci_dev *pdev)
4098{
4099 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4100 struct mlx4_dev *dev = persist->dev;
4101 struct mlx4_priv *priv = mlx4_priv(dev);
4102 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4103 int total_vfs;
4104 int err;
4105
4106 mlx4_err(dev, "%s was called\n", __func__);
4069 total_vfs = dev->persist->num_vfs; 4107 total_vfs = dev->persist->num_vfs;
4070 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4108 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4071 4109
4072 mutex_lock(&persist->interface_state_mutex); 4110 mutex_lock(&persist->interface_state_mutex);
4073 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4111 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4074 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 4112 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
4075 priv, 1); 4113 priv, 1);
4076 if (ret) { 4114 if (err) {
4077 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", 4115 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
4078 __func__, ret); 4116 __func__, err);
4079 goto end; 4117 goto end;
4080 } 4118 }
4081 4119
4082 ret = restore_current_port_types(dev, dev->persist-> 4120 err = restore_current_port_types(dev, dev->persist->
4083 curr_port_type, dev->persist-> 4121 curr_port_type, dev->persist->
4084 curr_port_poss_type); 4122 curr_port_poss_type);
4085 if (ret) 4123 if (err)
4086 mlx4_err(dev, "could not restore original port types (%d)\n", ret); 4124 mlx4_err(dev, "could not restore original port types (%d)\n", err);
4087 } 4125 }
4088end: 4126end:
4089 mutex_unlock(&persist->interface_state_mutex); 4127 mutex_unlock(&persist->interface_state_mutex);
4090 4128
4091 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
4092} 4129}
4093 4130
4094static void mlx4_shutdown(struct pci_dev *pdev) 4131static void mlx4_shutdown(struct pci_dev *pdev)
@@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev)
4105static const struct pci_error_handlers mlx4_err_handler = { 4142static const struct pci_error_handlers mlx4_err_handler = {
4106 .error_detected = mlx4_pci_err_detected, 4143 .error_detected = mlx4_pci_err_detected,
4107 .slot_reset = mlx4_pci_slot_reset, 4144 .slot_reset = mlx4_pci_slot_reset,
4145 .resume = mlx4_pci_resume,
4108}; 4146};
4109 4147
4110static struct pci_driver mlx4_driver = { 4148static struct pci_driver mlx4_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ef9683101ead..c9d7fc5159f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx {
586 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; 586 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
587 int init_port_ref[MLX4_MAX_PORTS + 1]; 587 int init_port_ref[MLX4_MAX_PORTS + 1];
588 u16 max_mtu[MLX4_MAX_PORTS + 1]; 588 u16 max_mtu[MLX4_MAX_PORTS + 1];
589 u8 pptx;
590 u8 pprx;
589 int disable_mcast_ref[MLX4_MAX_PORTS + 1]; 591 int disable_mcast_ref[MLX4_MAX_PORTS + 1];
590 struct mlx4_resource_tracker res_tracker; 592 struct mlx4_resource_tracker res_tracker;
591 struct workqueue_struct *comm_wq; 593 struct workqueue_struct *comm_wq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d12ab6a73344..63b1aeae2c03 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -323,6 +323,7 @@ struct mlx4_en_rx_ring {
323 unsigned long csum_ok; 323 unsigned long csum_ok;
324 unsigned long csum_none; 324 unsigned long csum_none;
325 unsigned long csum_complete; 325 unsigned long csum_complete;
326 unsigned long dropped;
326 int hwtstamp_rx_filter; 327 int hwtstamp_rx_filter;
327 cpumask_var_t affinity_mask; 328 cpumask_var_t affinity_mask;
328}; 329};
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 211c65087997..087b23b320cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
1317 } 1317 }
1318 1318
1319 gen_context->mtu = cpu_to_be16(master->max_mtu[port]); 1319 gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
1320 /* Slave cannot change Global Pause configuration */
1321 if (slave != mlx4_master_func_num(dev) &&
1322 ((gen_context->pptx != master->pptx) ||
1323 (gen_context->pprx != master->pprx))) {
1324 gen_context->pptx = master->pptx;
1325 gen_context->pprx = master->pprx;
1326 mlx4_warn(dev,
1327 "denying Global Pause change for slave:%d\n",
1328 slave);
1329 } else {
1330 master->pptx = gen_context->pptx;
1331 master->pprx = gen_context->pprx;
1332 }
1320 break; 1333 break;
1321 case MLX4_SET_PORT_GID_TABLE: 1334 case MLX4_SET_PORT_GID_TABLE:
1322 /* change to MULTIPLE entries: number of guest's gids 1335 /* change to MULTIPLE entries: number of guest's gids
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 518af329502d..7869465435fa 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -750,6 +750,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
750 return false; 750 return false;
751} 751}
752 752
753static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
754{
755 qed_chain_consume(&rxq->rx_bd_ring);
756 rxq->sw_rx_cons++;
757}
758
753/* This function reuses the buffer(from an offset) from 759/* This function reuses the buffer(from an offset) from
754 * consumer index to producer index in the bd ring 760 * consumer index to producer index in the bd ring
755 */ 761 */
@@ -773,6 +779,21 @@ static inline void qede_reuse_page(struct qede_dev *edev,
773 curr_cons->data = NULL; 779 curr_cons->data = NULL;
774} 780}
775 781
782/* In case of allocation failures reuse buffers
783 * from consumer index to produce buffers for firmware
784 */
785static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
786 struct qede_dev *edev, u8 count)
787{
788 struct sw_rx_data *curr_cons;
789
790 for (; count > 0; count--) {
791 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
792 qede_reuse_page(edev, rxq, curr_cons);
793 qede_rx_bd_ring_consume(rxq);
794 }
795}
796
776static inline int qede_realloc_rx_buffer(struct qede_dev *edev, 797static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
777 struct qede_rx_queue *rxq, 798 struct qede_rx_queue *rxq,
778 struct sw_rx_data *curr_cons) 799 struct sw_rx_data *curr_cons)
@@ -781,8 +802,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
781 curr_cons->page_offset += rxq->rx_buf_seg_size; 802 curr_cons->page_offset += rxq->rx_buf_seg_size;
782 803
783 if (curr_cons->page_offset == PAGE_SIZE) { 804 if (curr_cons->page_offset == PAGE_SIZE) {
784 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) 805 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
806 /* Since we failed to allocate new buffer
807 * current buffer can be used again.
808 */
809 curr_cons->page_offset -= rxq->rx_buf_seg_size;
810
785 return -ENOMEM; 811 return -ENOMEM;
812 }
786 813
787 dma_unmap_page(&edev->pdev->dev, curr_cons->mapping, 814 dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
788 PAGE_SIZE, DMA_FROM_DEVICE); 815 PAGE_SIZE, DMA_FROM_DEVICE);
@@ -901,7 +928,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
901 len_on_bd); 928 len_on_bd);
902 929
903 if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) { 930 if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
904 tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 931 /* Incr page ref count to reuse on allocation failure
932 * so that it doesn't get freed while freeing SKB.
933 */
934 atomic_inc(&current_bd->data->_count);
905 goto out; 935 goto out;
906 } 936 }
907 937
@@ -915,6 +945,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
915 return 0; 945 return 0;
916 946
917out: 947out:
948 tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
949 qede_recycle_rx_bd_ring(rxq, edev, 1);
918 return -ENOMEM; 950 return -ENOMEM;
919} 951}
920 952
@@ -966,8 +998,9 @@ static void qede_tpa_start(struct qede_dev *edev,
966 tpa_info->skb = netdev_alloc_skb(edev->ndev, 998 tpa_info->skb = netdev_alloc_skb(edev->ndev,
967 le16_to_cpu(cqe->len_on_first_bd)); 999 le16_to_cpu(cqe->len_on_first_bd));
968 if (unlikely(!tpa_info->skb)) { 1000 if (unlikely(!tpa_info->skb)) {
1001 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
969 tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 1002 tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
970 return; 1003 goto cons_buf;
971 } 1004 }
972 1005
973 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); 1006 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
@@ -990,6 +1023,7 @@ static void qede_tpa_start(struct qede_dev *edev,
990 /* This is needed in order to enable forwarding support */ 1023 /* This is needed in order to enable forwarding support */
991 qede_set_gro_params(edev, tpa_info->skb, cqe); 1024 qede_set_gro_params(edev, tpa_info->skb, cqe);
992 1025
1026cons_buf: /* We still need to handle bd_len_list to consume buffers */
993 if (likely(cqe->ext_bd_len_list[0])) 1027 if (likely(cqe->ext_bd_len_list[0]))
994 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, 1028 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
995 le16_to_cpu(cqe->ext_bd_len_list[0])); 1029 le16_to_cpu(cqe->ext_bd_len_list[0]));
@@ -1007,7 +1041,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb)
1007 const struct iphdr *iph = ip_hdr(skb); 1041 const struct iphdr *iph = ip_hdr(skb);
1008 struct tcphdr *th; 1042 struct tcphdr *th;
1009 1043
1010 skb_set_network_header(skb, 0);
1011 skb_set_transport_header(skb, sizeof(struct iphdr)); 1044 skb_set_transport_header(skb, sizeof(struct iphdr));
1012 th = tcp_hdr(skb); 1045 th = tcp_hdr(skb);
1013 1046
@@ -1022,7 +1055,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb)
1022 struct ipv6hdr *iph = ipv6_hdr(skb); 1055 struct ipv6hdr *iph = ipv6_hdr(skb);
1023 struct tcphdr *th; 1056 struct tcphdr *th;
1024 1057
1025 skb_set_network_header(skb, 0);
1026 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 1058 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
1027 th = tcp_hdr(skb); 1059 th = tcp_hdr(skb);
1028 1060
@@ -1037,8 +1069,21 @@ static void qede_gro_receive(struct qede_dev *edev,
1037 struct sk_buff *skb, 1069 struct sk_buff *skb,
1038 u16 vlan_tag) 1070 u16 vlan_tag)
1039{ 1071{
1072 /* FW can send a single MTU sized packet from gro flow
1073 * due to aggregation timeout/last segment etc. which
1074 * is not expected to be a gro packet. If a skb has zero
1075 * frags then simply push it in the stack as non gso skb.
1076 */
1077 if (unlikely(!skb->data_len)) {
1078 skb_shinfo(skb)->gso_type = 0;
1079 skb_shinfo(skb)->gso_size = 0;
1080 goto send_skb;
1081 }
1082
1040#ifdef CONFIG_INET 1083#ifdef CONFIG_INET
1041 if (skb_shinfo(skb)->gso_size) { 1084 if (skb_shinfo(skb)->gso_size) {
1085 skb_set_network_header(skb, 0);
1086
1042 switch (skb->protocol) { 1087 switch (skb->protocol) {
1043 case htons(ETH_P_IP): 1088 case htons(ETH_P_IP):
1044 qede_gro_ip_csum(skb); 1089 qede_gro_ip_csum(skb);
@@ -1053,6 +1098,8 @@ static void qede_gro_receive(struct qede_dev *edev,
1053 } 1098 }
1054 } 1099 }
1055#endif 1100#endif
1101
1102send_skb:
1056 skb_record_rx_queue(skb, fp->rss_id); 1103 skb_record_rx_queue(skb, fp->rss_id);
1057 qede_skb_receive(edev, fp, skb, vlan_tag); 1104 qede_skb_receive(edev, fp, skb, vlan_tag);
1058} 1105}
@@ -1244,17 +1291,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1244 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", 1291 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
1245 sw_comp_cons, parse_flag); 1292 sw_comp_cons, parse_flag);
1246 rxq->rx_hw_errors++; 1293 rxq->rx_hw_errors++;
1247 qede_reuse_page(edev, rxq, sw_rx_data); 1294 qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
1248 goto next_rx; 1295 goto next_cqe;
1249 } 1296 }
1250 1297
1251 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); 1298 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
1252 if (unlikely(!skb)) { 1299 if (unlikely(!skb)) {
1253 DP_NOTICE(edev, 1300 DP_NOTICE(edev,
1254 "Build_skb failed, dropping incoming packet\n"); 1301 "Build_skb failed, dropping incoming packet\n");
1255 qede_reuse_page(edev, rxq, sw_rx_data); 1302 qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
1256 rxq->rx_alloc_errors++; 1303 rxq->rx_alloc_errors++;
1257 goto next_rx; 1304 goto next_cqe;
1258 } 1305 }
1259 1306
1260 /* Copy data into SKB */ 1307 /* Copy data into SKB */
@@ -1288,11 +1335,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1288 if (unlikely(qede_realloc_rx_buffer(edev, rxq, 1335 if (unlikely(qede_realloc_rx_buffer(edev, rxq,
1289 sw_rx_data))) { 1336 sw_rx_data))) {
1290 DP_ERR(edev, "Failed to allocate rx buffer\n"); 1337 DP_ERR(edev, "Failed to allocate rx buffer\n");
1338 /* Incr page ref count to reuse on allocation
1339 * failure so that it doesn't get freed while
1340 * freeing SKB.
1341 */
1342
1343 atomic_inc(&sw_rx_data->data->_count);
1291 rxq->rx_alloc_errors++; 1344 rxq->rx_alloc_errors++;
1345 qede_recycle_rx_bd_ring(rxq, edev,
1346 fp_cqe->bd_num);
1347 dev_kfree_skb_any(skb);
1292 goto next_cqe; 1348 goto next_cqe;
1293 } 1349 }
1294 } 1350 }
1295 1351
1352 qede_rx_bd_ring_consume(rxq);
1353
1296 if (fp_cqe->bd_num != 1) { 1354 if (fp_cqe->bd_num != 1) {
1297 u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len); 1355 u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
1298 u8 num_frags; 1356 u8 num_frags;
@@ -1303,18 +1361,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1303 num_frags--) { 1361 num_frags--) {
1304 u16 cur_size = pkt_len > rxq->rx_buf_size ? 1362 u16 cur_size = pkt_len > rxq->rx_buf_size ?
1305 rxq->rx_buf_size : pkt_len; 1363 rxq->rx_buf_size : pkt_len;
1364 if (unlikely(!cur_size)) {
1365 DP_ERR(edev,
1366 "Still got %d BDs for mapping jumbo, but length became 0\n",
1367 num_frags);
1368 qede_recycle_rx_bd_ring(rxq, edev,
1369 num_frags);
1370 dev_kfree_skb_any(skb);
1371 goto next_cqe;
1372 }
1306 1373
1307 WARN_ONCE(!cur_size, 1374 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
1308 "Still got %d BDs for mapping jumbo, but length became 0\n", 1375 qede_recycle_rx_bd_ring(rxq, edev,
1309 num_frags); 1376 num_frags);
1310 1377 dev_kfree_skb_any(skb);
1311 if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
1312 goto next_cqe; 1378 goto next_cqe;
1379 }
1313 1380
1314 rxq->sw_rx_cons++;
1315 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; 1381 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1316 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; 1382 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1317 qed_chain_consume(&rxq->rx_bd_ring); 1383 qede_rx_bd_ring_consume(rxq);
1384
1318 dma_unmap_page(&edev->pdev->dev, 1385 dma_unmap_page(&edev->pdev->dev,
1319 sw_rx_data->mapping, 1386 sw_rx_data->mapping,
1320 PAGE_SIZE, DMA_FROM_DEVICE); 1387 PAGE_SIZE, DMA_FROM_DEVICE);
@@ -1330,7 +1397,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1330 pkt_len -= cur_size; 1397 pkt_len -= cur_size;
1331 } 1398 }
1332 1399
1333 if (pkt_len) 1400 if (unlikely(pkt_len))
1334 DP_ERR(edev, 1401 DP_ERR(edev,
1335 "Mapped all BDs of jumbo, but still have %d bytes\n", 1402 "Mapped all BDs of jumbo, but still have %d bytes\n",
1336 pkt_len); 1403 pkt_len);
@@ -1349,10 +1416,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1349 skb_record_rx_queue(skb, fp->rss_id); 1416 skb_record_rx_queue(skb, fp->rss_id);
1350 1417
1351 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); 1418 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
1352
1353 qed_chain_consume(&rxq->rx_bd_ring);
1354next_rx:
1355 rxq->sw_rx_cons++;
1356next_rx_only: 1419next_rx_only:
1357 rx_pkt++; 1420 rx_pkt++;
1358 1421
@@ -2257,7 +2320,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
2257 struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; 2320 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
2258 struct sw_rx_data *replace_buf = &tpa_info->replace_buf; 2321 struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
2259 2322
2260 if (replace_buf) { 2323 if (replace_buf->data) {
2261 dma_unmap_page(&edev->pdev->dev, 2324 dma_unmap_page(&edev->pdev->dev,
2262 dma_unmap_addr(replace_buf, mapping), 2325 dma_unmap_addr(replace_buf, mapping),
2263 PAGE_SIZE, DMA_FROM_DEVICE); 2326 PAGE_SIZE, DMA_FROM_DEVICE);
@@ -2377,7 +2440,7 @@ err:
2377static int qede_alloc_mem_rxq(struct qede_dev *edev, 2440static int qede_alloc_mem_rxq(struct qede_dev *edev,
2378 struct qede_rx_queue *rxq) 2441 struct qede_rx_queue *rxq)
2379{ 2442{
2380 int i, rc, size, num_allocated; 2443 int i, rc, size;
2381 2444
2382 rxq->num_rx_buffers = edev->q_num_rx_buffers; 2445 rxq->num_rx_buffers = edev->q_num_rx_buffers;
2383 2446
@@ -2394,6 +2457,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
2394 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); 2457 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
2395 if (!rxq->sw_rx_ring) { 2458 if (!rxq->sw_rx_ring) {
2396 DP_ERR(edev, "Rx buffers ring allocation failed\n"); 2459 DP_ERR(edev, "Rx buffers ring allocation failed\n");
2460 rc = -ENOMEM;
2397 goto err; 2461 goto err;
2398 } 2462 }
2399 2463
@@ -2421,26 +2485,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
2421 /* Allocate buffers for the Rx ring */ 2485 /* Allocate buffers for the Rx ring */
2422 for (i = 0; i < rxq->num_rx_buffers; i++) { 2486 for (i = 0; i < rxq->num_rx_buffers; i++) {
2423 rc = qede_alloc_rx_buffer(edev, rxq); 2487 rc = qede_alloc_rx_buffer(edev, rxq);
2424 if (rc) 2488 if (rc) {
2425 break; 2489 DP_ERR(edev,
2426 } 2490 "Rx buffers allocation failed at index %d\n", i);
2427 num_allocated = i; 2491 goto err;
2428 if (!num_allocated) { 2492 }
2429 DP_ERR(edev, "Rx buffers allocation failed\n");
2430 goto err;
2431 } else if (num_allocated < rxq->num_rx_buffers) {
2432 DP_NOTICE(edev,
2433 "Allocated less buffers than desired (%d allocated)\n",
2434 num_allocated);
2435 } 2493 }
2436 2494
2437 qede_alloc_sge_mem(edev, rxq); 2495 rc = qede_alloc_sge_mem(edev, rxq);
2438
2439 return 0;
2440
2441err: 2496err:
2442 qede_free_mem_rxq(edev, rxq); 2497 return rc;
2443 return -ENOMEM;
2444} 2498}
2445 2499
2446static void qede_free_mem_txq(struct qede_dev *edev, 2500static void qede_free_mem_txq(struct qede_dev *edev,
@@ -2523,10 +2577,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
2523 } 2577 }
2524 2578
2525 return 0; 2579 return 0;
2526
2527err: 2580err:
2528 qede_free_mem_fp(edev, fp); 2581 return rc;
2529 return -ENOMEM;
2530} 2582}
2531 2583
2532static void qede_free_mem_load(struct qede_dev *edev) 2584static void qede_free_mem_load(struct qede_dev *edev)
@@ -2549,22 +2601,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
2549 struct qede_fastpath *fp = &edev->fp_array[rss_id]; 2601 struct qede_fastpath *fp = &edev->fp_array[rss_id];
2550 2602
2551 rc = qede_alloc_mem_fp(edev, fp); 2603 rc = qede_alloc_mem_fp(edev, fp);
2552 if (rc) 2604 if (rc) {
2553 break;
2554 }
2555
2556 if (rss_id != QEDE_RSS_CNT(edev)) {
2557 /* Failed allocating memory for all the queues */
2558 if (!rss_id) {
2559 DP_ERR(edev, 2605 DP_ERR(edev,
2560 "Failed to allocate memory for the leading queue\n"); 2606 "Failed to allocate memory for fastpath - rss id = %d\n",
2561 rc = -ENOMEM; 2607 rss_id);
2562 } else { 2608 qede_free_mem_load(edev);
2563 DP_NOTICE(edev, 2609 return rc;
2564 "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
2565 QEDE_RSS_CNT(edev), rss_id);
2566 } 2610 }
2567 edev->num_rss = rss_id;
2568 } 2611 }
2569 2612
2570 return 0; 2613 return 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 087e14a3fba7..9e2a0bd8f5a8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1691,6 +1691,9 @@ static int ravb_set_gti(struct net_device *ndev)
1691 rate = clk_get_rate(clk); 1691 rate = clk_get_rate(clk);
1692 clk_put(clk); 1692 clk_put(clk);
1693 1693
1694 if (!rate)
1695 return -EINVAL;
1696
1694 inc = 1000000000ULL << 20; 1697 inc = 1000000000ULL << 20;
1695 do_div(inc, rate); 1698 do_div(inc, rate);
1696 1699
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 004e2d7560fd..ceea74cc2229 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2194,17 +2194,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
2194 __func__); 2194 __func__);
2195 return ret; 2195 return ret;
2196 } 2196 }
2197 ret = sh_eth_dev_init(ndev, false); 2197 ret = sh_eth_dev_init(ndev, true);
2198 if (ret < 0) { 2198 if (ret < 0) {
2199 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", 2199 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2200 __func__); 2200 __func__);
2201 return ret; 2201 return ret;
2202 } 2202 }
2203 2203
2204 mdp->irq_enabled = true;
2205 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2206 /* Setting the Rx mode will start the Rx process. */
2207 sh_eth_write(ndev, EDRRR_R, EDRRR);
2208 netif_device_attach(ndev); 2204 netif_device_attach(ndev);
2209 } 2205 }
2210 2206
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index f0d797ab74d8..44022b1845ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -34,6 +34,9 @@
34#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 34#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
35#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010 35#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
36 36
37#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028
38#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
39
37#define EMAC_SPLITTER_CTRL_REG 0x0 40#define EMAC_SPLITTER_CTRL_REG 0x0
38#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3 41#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
39#define EMAC_SPLITTER_CTRL_SPEED_10 0x2 42#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
@@ -148,7 +151,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
148 int phymode = dwmac->interface; 151 int phymode = dwmac->interface;
149 u32 reg_offset = dwmac->reg_offset; 152 u32 reg_offset = dwmac->reg_offset;
150 u32 reg_shift = dwmac->reg_shift; 153 u32 reg_shift = dwmac->reg_shift;
151 u32 ctrl, val; 154 u32 ctrl, val, module;
152 155
153 switch (phymode) { 156 switch (phymode) {
154 case PHY_INTERFACE_MODE_RGMII: 157 case PHY_INTERFACE_MODE_RGMII:
@@ -175,12 +178,19 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
175 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); 178 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
176 ctrl |= val << reg_shift; 179 ctrl |= val << reg_shift;
177 180
178 if (dwmac->f2h_ptp_ref_clk) 181 if (dwmac->f2h_ptp_ref_clk) {
179 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); 182 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
180 else 183 regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
184 &module);
185 module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
186 regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
187 module);
188 } else {
181 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2)); 189 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
190 }
182 191
183 regmap_write(sys_mgr_base_addr, reg_offset, ctrl); 192 regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
193
184 return 0; 194 return 0;
185} 195}
186 196
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 42fdfd4d9d4f..bbb77cd8ad67 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1251,12 +1251,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
1251 int i, ret; 1251 int i, ret;
1252 u32 reg; 1252 u32 reg;
1253 1253
1254 pm_runtime_get_sync(&priv->pdev->dev);
1255
1254 if (!cpsw_common_res_usage_state(priv)) 1256 if (!cpsw_common_res_usage_state(priv))
1255 cpsw_intr_disable(priv); 1257 cpsw_intr_disable(priv);
1256 netif_carrier_off(ndev); 1258 netif_carrier_off(ndev);
1257 1259
1258 pm_runtime_get_sync(&priv->pdev->dev);
1259
1260 reg = priv->version; 1260 reg = priv->version;
1261 1261
1262 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", 1262 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 5d9abedd6b75..58d58f002559 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1878,8 +1878,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1878 pdata->hw_ram_addr = auxdata->hw_ram_addr; 1878 pdata->hw_ram_addr = auxdata->hw_ram_addr;
1879 } 1879 }
1880 1880
1881 pdev->dev.platform_data = pdata;
1882
1883 return pdata; 1881 return pdata;
1884} 1882}
1885 1883
@@ -2101,6 +2099,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
2101 cpdma_ctlr_destroy(priv->dma); 2099 cpdma_ctlr_destroy(priv->dma);
2102 2100
2103 unregister_netdev(ndev); 2101 unregister_netdev(ndev);
2102 pm_runtime_disable(&pdev->dev);
2104 free_netdev(ndev); 2103 free_netdev(ndev);
2105 2104
2106 return 0; 2105 return 0;
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index b5d50d458728..93ffedfa2994 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -441,7 +441,7 @@ static int ks8995_probe(struct spi_device *spi)
441 return -ENOMEM; 441 return -ENOMEM;
442 442
443 mutex_init(&ks->lock); 443 mutex_init(&ks->lock);
444 ks->spi = spi_dev_get(spi); 444 ks->spi = spi;
445 ks->chip = &ks8995_chip[variant]; 445 ks->chip = &ks8995_chip[variant];
446 446
447 if (ks->spi->dev.of_node) { 447 if (ks->spi->dev.of_node) {
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bdd83d95ec0a..96a5028621c8 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
617 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 617 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
618 .driver_info = (unsigned long)&cdc_mbim_info, 618 .driver_info = (unsigned long)&cdc_mbim_info,
619 }, 619 },
620 /* Huawei E3372 fails unless NDP comes after the IP packets */ 620
621 { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 621 /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
622 * (12d1:157d), are known to fail unless the NDP is placed
623 * after the IP packets. Applying the quirk to all Huawei
624 * devices is broader than necessary, but harmless.
625 */
626 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
622 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end, 627 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
623 }, 628 },
624 /* default entry */ 629 /* default entry */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b2348f67b00a..db8022ae415b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1152 union Vmxnet3_GenericDesc *gdesc) 1152 union Vmxnet3_GenericDesc *gdesc)
1153{ 1153{
1154 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { 1154 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1155 /* typical case: TCP/UDP over IP and both csums are correct */ 1155 if (gdesc->rcd.v4 &&
1156 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == 1156 (le32_to_cpu(gdesc->dword[3]) &
1157 VMXNET3_RCD_CSUM_OK) { 1157 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1158 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1160 BUG_ON(gdesc->rcd.frg);
1161 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1162 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1158 skb->ip_summed = CHECKSUM_UNNECESSARY; 1163 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1164 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1160 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
1161 BUG_ON(gdesc->rcd.frg); 1165 BUG_ON(gdesc->rcd.frg);
1162 } else { 1166 } else {
1163 if (gdesc->rcd.csum) { 1167 if (gdesc->rcd.csum) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 729c344e6774..c4825392d64b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040600 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9a9fabb900c1..8a8f1e58b415 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -60,41 +60,6 @@ struct pcpu_dstats {
60 struct u64_stats_sync syncp; 60 struct u64_stats_sync syncp;
61}; 61};
62 62
63static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
64{
65 return dst;
66}
67
68static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
69{
70 return ip_local_out(net, sk, skb);
71}
72
73static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
74{
75 /* TO-DO: return max ethernet size? */
76 return dst->dev->mtu;
77}
78
79static void vrf_dst_destroy(struct dst_entry *dst)
80{
81 /* our dst lives forever - or until the device is closed */
82}
83
84static unsigned int vrf_default_advmss(const struct dst_entry *dst)
85{
86 return 65535 - 40;
87}
88
89static struct dst_ops vrf_dst_ops = {
90 .family = AF_INET,
91 .local_out = vrf_ip_local_out,
92 .check = vrf_ip_check,
93 .mtu = vrf_v4_mtu,
94 .destroy = vrf_dst_destroy,
95 .default_advmss = vrf_default_advmss,
96};
97
98/* neighbor handling is done with actual device; do not want 63/* neighbor handling is done with actual device; do not want
99 * to flip skb->dev for those ndisc packets. This really fails 64 * to flip skb->dev for those ndisc packets. This really fails
100 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is 65 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
@@ -349,46 +314,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
349} 314}
350 315
351#if IS_ENABLED(CONFIG_IPV6) 316#if IS_ENABLED(CONFIG_IPV6)
352static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
353{
354 return dst;
355}
356
357static struct dst_ops vrf_dst_ops6 = {
358 .family = AF_INET6,
359 .local_out = ip6_local_out,
360 .check = vrf_ip6_check,
361 .mtu = vrf_v4_mtu,
362 .destroy = vrf_dst_destroy,
363 .default_advmss = vrf_default_advmss,
364};
365
366static int init_dst_ops6_kmem_cachep(void)
367{
368 vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
369 sizeof(struct rt6_info),
370 0,
371 SLAB_HWCACHE_ALIGN,
372 NULL);
373
374 if (!vrf_dst_ops6.kmem_cachep)
375 return -ENOMEM;
376
377 return 0;
378}
379
380static void free_dst_ops6_kmem_cachep(void)
381{
382 kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
383}
384
385static int vrf_input6(struct sk_buff *skb)
386{
387 skb->dev->stats.rx_errors++;
388 kfree_skb(skb);
389 return 0;
390}
391
392/* modelled after ip6_finish_output2 */ 317/* modelled after ip6_finish_output2 */
393static int vrf_finish_output6(struct net *net, struct sock *sk, 318static int vrf_finish_output6(struct net *net, struct sock *sk,
394 struct sk_buff *skb) 319 struct sk_buff *skb)
@@ -429,67 +354,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
429 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); 354 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
430} 355}
431 356
432static void vrf_rt6_destroy(struct net_vrf *vrf) 357static void vrf_rt6_release(struct net_vrf *vrf)
433{ 358{
434 dst_destroy(&vrf->rt6->dst); 359 dst_release(&vrf->rt6->dst);
435 free_percpu(vrf->rt6->rt6i_pcpu);
436 vrf->rt6 = NULL; 360 vrf->rt6 = NULL;
437} 361}
438 362
439static int vrf_rt6_create(struct net_device *dev) 363static int vrf_rt6_create(struct net_device *dev)
440{ 364{
441 struct net_vrf *vrf = netdev_priv(dev); 365 struct net_vrf *vrf = netdev_priv(dev);
442 struct dst_entry *dst; 366 struct net *net = dev_net(dev);
443 struct rt6_info *rt6; 367 struct rt6_info *rt6;
444 int cpu;
445 int rc = -ENOMEM; 368 int rc = -ENOMEM;
446 369
447 rt6 = dst_alloc(&vrf_dst_ops6, dev, 0, 370 rt6 = ip6_dst_alloc(net, dev,
448 DST_OBSOLETE_NONE, 371 DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
449 (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
450 if (!rt6) 372 if (!rt6)
451 goto out; 373 goto out;
452 374
453 dst = &rt6->dst;
454
455 rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
456 if (!rt6->rt6i_pcpu) {
457 dst_destroy(dst);
458 goto out;
459 }
460 for_each_possible_cpu(cpu) {
461 struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
462 *p = NULL;
463 }
464
465 memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
466
467 INIT_LIST_HEAD(&rt6->rt6i_siblings);
468 INIT_LIST_HEAD(&rt6->rt6i_uncached);
469
470 rt6->dst.input = vrf_input6;
471 rt6->dst.output = vrf_output6; 375 rt6->dst.output = vrf_output6;
472 376 rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
473 rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id); 377 dst_hold(&rt6->dst);
474
475 atomic_set(&rt6->dst.__refcnt, 2);
476
477 vrf->rt6 = rt6; 378 vrf->rt6 = rt6;
478 rc = 0; 379 rc = 0;
479out: 380out:
480 return rc; 381 return rc;
481} 382}
482#else 383#else
483static int init_dst_ops6_kmem_cachep(void) 384static void vrf_rt6_release(struct net_vrf *vrf)
484{
485 return 0;
486}
487
488static void free_dst_ops6_kmem_cachep(void)
489{
490}
491
492static void vrf_rt6_destroy(struct net_vrf *vrf)
493{ 385{
494} 386}
495 387
@@ -557,11 +449,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
557 !(IPCB(skb)->flags & IPSKB_REROUTED)); 449 !(IPCB(skb)->flags & IPSKB_REROUTED));
558} 450}
559 451
560static void vrf_rtable_destroy(struct net_vrf *vrf) 452static void vrf_rtable_release(struct net_vrf *vrf)
561{ 453{
562 struct dst_entry *dst = (struct dst_entry *)vrf->rth; 454 struct dst_entry *dst = (struct dst_entry *)vrf->rth;
563 455
564 dst_destroy(dst); 456 dst_release(dst);
565 vrf->rth = NULL; 457 vrf->rth = NULL;
566} 458}
567 459
@@ -570,22 +462,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
570 struct net_vrf *vrf = netdev_priv(dev); 462 struct net_vrf *vrf = netdev_priv(dev);
571 struct rtable *rth; 463 struct rtable *rth;
572 464
573 rth = dst_alloc(&vrf_dst_ops, dev, 2, 465 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
574 DST_OBSOLETE_NONE,
575 (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
576 if (rth) { 466 if (rth) {
577 rth->dst.output = vrf_output; 467 rth->dst.output = vrf_output;
578 rth->rt_genid = rt_genid_ipv4(dev_net(dev));
579 rth->rt_flags = 0;
580 rth->rt_type = RTN_UNICAST;
581 rth->rt_is_input = 0;
582 rth->rt_iif = 0;
583 rth->rt_pmtu = 0;
584 rth->rt_gateway = 0;
585 rth->rt_uses_gateway = 0;
586 rth->rt_table_id = vrf->tb_id; 468 rth->rt_table_id = vrf->tb_id;
587 INIT_LIST_HEAD(&rth->rt_uncached);
588 rth->rt_uncached_list = NULL;
589 } 469 }
590 470
591 return rth; 471 return rth;
@@ -673,8 +553,8 @@ static void vrf_dev_uninit(struct net_device *dev)
673 struct net_device *port_dev; 553 struct net_device *port_dev;
674 struct list_head *iter; 554 struct list_head *iter;
675 555
676 vrf_rtable_destroy(vrf); 556 vrf_rtable_release(vrf);
677 vrf_rt6_destroy(vrf); 557 vrf_rt6_release(vrf);
678 558
679 netdev_for_each_lower_dev(dev, port_dev, iter) 559 netdev_for_each_lower_dev(dev, port_dev, iter)
680 vrf_del_slave(dev, port_dev); 560 vrf_del_slave(dev, port_dev);
@@ -704,7 +584,7 @@ static int vrf_dev_init(struct net_device *dev)
704 return 0; 584 return 0;
705 585
706out_rth: 586out_rth:
707 vrf_rtable_destroy(vrf); 587 vrf_rtable_release(vrf);
708out_stats: 588out_stats:
709 free_percpu(dev->dstats); 589 free_percpu(dev->dstats);
710 dev->dstats = NULL; 590 dev->dstats = NULL;
@@ -737,7 +617,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
737 struct net_vrf *vrf = netdev_priv(dev); 617 struct net_vrf *vrf = netdev_priv(dev);
738 618
739 rth = vrf->rth; 619 rth = vrf->rth;
740 atomic_inc(&rth->dst.__refcnt); 620 dst_hold(&rth->dst);
741 } 621 }
742 622
743 return rth; 623 return rth;
@@ -788,7 +668,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
788 struct net_vrf *vrf = netdev_priv(dev); 668 struct net_vrf *vrf = netdev_priv(dev);
789 669
790 rt = vrf->rt6; 670 rt = vrf->rt6;
791 atomic_inc(&rt->dst.__refcnt); 671 dst_hold(&rt->dst);
792 } 672 }
793 673
794 return (struct dst_entry *)rt; 674 return (struct dst_entry *)rt;
@@ -946,19 +826,6 @@ static int __init vrf_init_module(void)
946{ 826{
947 int rc; 827 int rc;
948 828
949 vrf_dst_ops.kmem_cachep =
950 kmem_cache_create("vrf_ip_dst_cache",
951 sizeof(struct rtable), 0,
952 SLAB_HWCACHE_ALIGN,
953 NULL);
954
955 if (!vrf_dst_ops.kmem_cachep)
956 return -ENOMEM;
957
958 rc = init_dst_ops6_kmem_cachep();
959 if (rc != 0)
960 goto error2;
961
962 register_netdevice_notifier(&vrf_notifier_block); 829 register_netdevice_notifier(&vrf_notifier_block);
963 830
964 rc = rtnl_link_register(&vrf_link_ops); 831 rc = rtnl_link_register(&vrf_link_ops);
@@ -969,22 +836,10 @@ static int __init vrf_init_module(void)
969 836
970error: 837error:
971 unregister_netdevice_notifier(&vrf_notifier_block); 838 unregister_netdevice_notifier(&vrf_notifier_block);
972 free_dst_ops6_kmem_cachep();
973error2:
974 kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
975 return rc; 839 return rc;
976} 840}
977 841
978static void __exit vrf_cleanup_module(void)
979{
980 rtnl_link_unregister(&vrf_link_ops);
981 unregister_netdevice_notifier(&vrf_notifier_block);
982 kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
983 free_dst_ops6_kmem_cachep();
984}
985
986module_init(vrf_init_module); 842module_init(vrf_init_module);
987module_exit(vrf_cleanup_module);
988MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); 843MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
989MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); 844MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
990MODULE_LICENSE("GPL"); 845MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 72380af9dc52..b0603e796ad8 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
5680 INIT_WORK(&wl->firmware_load, b43_request_firmware); 5680 INIT_WORK(&wl->firmware_load, b43_request_firmware);
5681 schedule_work(&wl->firmware_load); 5681 schedule_work(&wl->firmware_load);
5682 5682
5683bcma_out:
5684 return err; 5683 return err;
5685 5684
5686bcma_err_wireless_exit: 5685bcma_err_wireless_exit:
5687 ieee80211_free_hw(wl->hw); 5686 ieee80211_free_hw(wl->hw);
5687bcma_out:
5688 kfree(dev);
5688 return err; 5689 return err;
5689} 5690}
5690 5691
@@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
5712 b43_rng_exit(wl); 5713 b43_rng_exit(wl);
5713 5714
5714 b43_leds_unregister(wl); 5715 b43_leds_unregister(wl);
5715
5716 ieee80211_free_hw(wl->hw); 5716 ieee80211_free_hw(wl->hw);
5717 kfree(wldev->dev);
5717} 5718}
5718 5719
5719static struct bcma_driver b43_bcma_driver = { 5720static struct bcma_driver b43_bcma_driver = {
@@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5796 5797
5797 b43_leds_unregister(wl); 5798 b43_leds_unregister(wl);
5798 b43_wireless_exit(dev, wl); 5799 b43_wireless_exit(dev, wl);
5800 kfree(dev);
5799} 5801}
5800 5802
5801static struct ssb_driver b43_ssb_driver = { 5803static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 76e649c680a1..a50f4df7eae7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1147,6 +1147,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1147 /* the fw is stopped, the aux sta is dead: clean up driver state */ 1147 /* the fw is stopped, the aux sta is dead: clean up driver state */
1148 iwl_mvm_del_aux_sta(mvm); 1148 iwl_mvm_del_aux_sta(mvm);
1149 1149
1150 iwl_free_fw_paging(mvm);
1151
1150 /* 1152 /*
1151 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() 1153 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1152 * won't be called in this case). 1154 * won't be called in this case).
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5e8ab796d5bc..d278399097dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -761,8 +761,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
761 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) 761 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
762 kfree(mvm->nvm_sections[i].data); 762 kfree(mvm->nvm_sections[i].data);
763 763
764 iwl_free_fw_paging(mvm);
765
766 iwl_mvm_tof_clean(mvm); 764 iwl_mvm_tof_clean(mvm);
767 765
768 ieee80211_free_hw(mvm->hw); 766 ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index eb39c7e09781..b2b79354d5c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -732,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
732 */ 732 */
733 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); 733 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
734 if (val & (BIT(1) | BIT(17))) { 734 if (val & (BIT(1) | BIT(17))) {
735 IWL_INFO(trans, 735 IWL_DEBUG_INFO(trans,
736 "can't access the RSA semaphore it is write protected\n"); 736 "can't access the RSA semaphore it is write protected\n");
737 return 0; 737 return 0;
738 } 738 }
739 739
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 95dcbff4673b..6a8245c4ea48 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
2488 for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) 2488 for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
2489 rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p]; 2489 rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
2490 2490
2491 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 2491 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
2492 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", 2492 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
2493 rtldm->thermalvalue, thermal_value); 2493 rtldm->thermalvalue, thermal_value);
2494 /*Record last Power Tracking Thermal Value*/ 2494 /*Record last Power Tracking Thermal Value*/
2495 rtldm->thermalvalue = thermal_value; 2495 rtldm->thermalvalue = thermal_value;
2496 } 2496 }
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 8541a913f6a3..d1f904c8b2cb 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -828,6 +828,11 @@ struct mlx4_vf_dev {
828 u8 n_ports; 828 u8 n_ports;
829}; 829};
830 830
831enum mlx4_pci_status {
832 MLX4_PCI_STATUS_DISABLED,
833 MLX4_PCI_STATUS_ENABLED,
834};
835
831struct mlx4_dev_persistent { 836struct mlx4_dev_persistent {
832 struct pci_dev *pdev; 837 struct pci_dev *pdev;
833 struct mlx4_dev *dev; 838 struct mlx4_dev *dev;
@@ -841,6 +846,8 @@ struct mlx4_dev_persistent {
841 u8 state; 846 u8 state;
842 struct mutex interface_state_mutex; /* protect SW state */ 847 struct mutex interface_state_mutex; /* protect SW state */
843 u8 interface_state; 848 u8 interface_state;
849 struct mutex pci_status_mutex; /* sync pci state */
850 enum mlx4_pci_status pci_status;
844}; 851};
845 852
846struct mlx4_dev { 853struct mlx4_dev {
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 1c33dd7da4a7..4ae95f7e8597 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
98 if (!is_a_nulls(first)) 98 if (!is_a_nulls(first))
99 first->pprev = &n->next; 99 first->pprev = &n->next;
100} 100}
101
102/**
103 * hlist_nulls_add_tail_rcu
104 * @n: the element to add to the hash list.
105 * @h: the list to add to.
106 *
107 * Description:
108 * Adds the specified element to the end of the specified hlist_nulls,
109 * while permitting racing traversals. NOTE: tail insertion requires
110 * list traversal.
111 *
112 * The caller must take whatever precautions are necessary
113 * (such as holding appropriate locks) to avoid racing
114 * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
115 * or hlist_nulls_del_rcu(), running on this same list.
116 * However, it is perfectly legal to run concurrently with
117 * the _rcu list-traversal primitives, such as
118 * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
119 * problems on Alpha CPUs. Regardless of the type of CPU, the
120 * list-traversal primitive must be guarded by rcu_read_lock().
121 */
122static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
123 struct hlist_nulls_head *h)
124{
125 struct hlist_nulls_node *i, *last = NULL;
126
127 for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
128 i = hlist_nulls_next_rcu(i))
129 last = i;
130
131 if (last) {
132 n->next = last->next;
133 n->pprev = &last->next;
134 rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
135 } else {
136 hlist_nulls_add_head_rcu(n, h);
137 }
138}
139
101/** 140/**
102 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type 141 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
103 * @tpos: the type * to use as a loop cursor. 142 * @tpos: the type * to use as a loop cursor.
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index c0a92e2c286d..74c9693d4941 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -17,6 +17,7 @@
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <net/sock.h> 19#include <net/sock.h>
20#include <net/inet_sock.h>
20 21
21#ifdef CONFIG_CGROUP_NET_CLASSID 22#ifdef CONFIG_CGROUP_NET_CLASSID
22struct cgroup_cls_state { 23struct cgroup_cls_state {
@@ -63,11 +64,13 @@ static inline u32 task_get_classid(const struct sk_buff *skb)
63 * softirqs always disables bh. 64 * softirqs always disables bh.
64 */ 65 */
65 if (in_serving_softirq()) { 66 if (in_serving_softirq()) {
67 struct sock *sk = skb_to_full_sk(skb);
68
66 /* If there is an sock_cgroup_classid we'll use that. */ 69 /* If there is an sock_cgroup_classid we'll use that. */
67 if (!skb->sk) 70 if (!sk || !sk_fullsock(sk))
68 return 0; 71 return 0;
69 72
70 classid = sock_cgroup_classid(&skb->sk->sk_cgrp_data); 73 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
71 } 74 }
72 75
73 return classid; 76 return classid;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 295d291269e2..54c779416eec 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -101,6 +101,9 @@ void fib6_force_start_gc(struct net *net);
101struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, 101struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
102 const struct in6_addr *addr, bool anycast); 102 const struct in6_addr *addr, bool anycast);
103 103
104struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
105 int flags);
106
104/* 107/*
105 * support functions for ND 108 * support functions for ND
106 * 109 *
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index d0aeb97aec5d..1be050ada8c5 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -959,6 +959,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
959int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); 959int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
960int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr, 960int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
961 int addr_len); 961 int addr_len);
962int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
963void ip6_datagram_release_cb(struct sock *sk);
962 964
963int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, 965int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
964 int *addr_len); 966 int *addr_len);
diff --git a/include/net/route.h b/include/net/route.h
index 9b0a523bb428..6de665bf1750 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -209,6 +209,9 @@ unsigned int inet_addr_type_dev_table(struct net *net,
209void ip_rt_multicast_event(struct in_device *); 209void ip_rt_multicast_event(struct in_device *);
210int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); 210int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
211void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); 211void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
212struct rtable *rt_dst_alloc(struct net_device *dev,
213 unsigned int flags, u16 type,
214 bool nopolicy, bool noxfrm, bool will_cache);
212 215
213struct in_ifaddr; 216struct in_ifaddr;
214void fib_add_ifaddr(struct in_ifaddr *); 217void fib_add_ifaddr(struct in_ifaddr *);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 6df1ce7a411c..5a404c354f4c 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -847,6 +847,11 @@ struct sctp_transport {
847 */ 847 */
848 ktime_t last_time_heard; 848 ktime_t last_time_heard;
849 849
850 /* When was the last time that we sent a chunk using this
851 * transport? We use this to check for idle transports
852 */
853 unsigned long last_time_sent;
854
850 /* Last time(in jiffies) when cwnd is reduced due to the congestion 855 /* Last time(in jiffies) when cwnd is reduced due to the congestion
851 * indication based on ECNE chunk. 856 * indication based on ECNE chunk.
852 */ 857 */
@@ -952,7 +957,8 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
952 struct sctp_sock *); 957 struct sctp_sock *);
953void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk); 958void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
954void sctp_transport_free(struct sctp_transport *); 959void sctp_transport_free(struct sctp_transport *);
955void sctp_transport_reset_timers(struct sctp_transport *); 960void sctp_transport_reset_t3_rtx(struct sctp_transport *);
961void sctp_transport_reset_hb_timer(struct sctp_transport *);
956int sctp_transport_hold(struct sctp_transport *); 962int sctp_transport_hold(struct sctp_transport *);
957void sctp_transport_put(struct sctp_transport *); 963void sctp_transport_put(struct sctp_transport *);
958void sctp_transport_update_rto(struct sctp_transport *, __u32); 964void sctp_transport_update_rto(struct sctp_transport *, __u32);
diff --git a/include/net/sock.h b/include/net/sock.h
index 255d3e03727b..121ffc115c4f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -630,7 +630,11 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
630 630
631static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 631static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
632{ 632{
633 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 633 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
634 sk->sk_family == AF_INET6)
635 hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
636 else
637 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
634} 638}
635 639
636static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 640static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b91370f61be6..6db10228113f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -552,6 +552,8 @@ void tcp_send_ack(struct sock *sk);
552void tcp_send_delayed_ack(struct sock *sk); 552void tcp_send_delayed_ack(struct sock *sk);
553void tcp_send_loss_probe(struct sock *sk); 553void tcp_send_loss_probe(struct sock *sk);
554bool tcp_schedule_loss_probe(struct sock *sk); 554bool tcp_schedule_loss_probe(struct sock *sk);
555void tcp_skb_collapse_tstamp(struct sk_buff *skb,
556 const struct sk_buff *next_skb);
555 557
556/* tcp_input.c */ 558/* tcp_input.c */
557void tcp_resume_early_retransmit(struct sock *sk); 559void tcp_resume_early_retransmit(struct sock *sk);
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index b71fd0b5cbad..813ffb2e22c9 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -96,6 +96,7 @@ header-y += cyclades.h
96header-y += cycx_cfm.h 96header-y += cycx_cfm.h
97header-y += dcbnl.h 97header-y += dcbnl.h
98header-y += dccp.h 98header-y += dccp.h
99header-y += devlink.h
99header-y += dlmconstants.h 100header-y += dlmconstants.h
100header-y += dlm_device.h 101header-y += dlm_device.h
101header-y += dlm.h 102header-y += dlm.h
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2e08f8e9b771..618ef77c302a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1374,6 +1374,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
1374 } 1374 }
1375 1375
1376 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 1376 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
1377 BPF_SIZE(insn->code) == BPF_DW ||
1377 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 1378 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
1378 verbose("BPF_LD_ABS uses reserved fields\n"); 1379 verbose("BPF_LD_ABS uses reserved fields\n");
1379 return -EINVAL; 1380 return -EINVAL;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 8570bc7744c2..5a61f35412a0 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -370,7 +370,11 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
370 left - sizeof(struct ebt_entry_match) < m->match_size) 370 left - sizeof(struct ebt_entry_match) < m->match_size)
371 return -EINVAL; 371 return -EINVAL;
372 372
373 match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0); 373 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
374 if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
375 request_module("ebt_%s", m->u.name);
376 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
377 }
374 if (IS_ERR(match)) 378 if (IS_ERR(match))
375 return PTR_ERR(match); 379 return PTR_ERR(match);
376 m->u.match = match; 380 m->u.match = match;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d04c2d1c8c87..e561f9f07d6d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4502,13 +4502,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
4502 __skb_push(skb, offset); 4502 __skb_push(skb, offset);
4503 err = __vlan_insert_tag(skb, skb->vlan_proto, 4503 err = __vlan_insert_tag(skb, skb->vlan_proto,
4504 skb_vlan_tag_get(skb)); 4504 skb_vlan_tag_get(skb));
4505 if (err) 4505 if (err) {
4506 __skb_pull(skb, offset);
4506 return err; 4507 return err;
4508 }
4509
4507 skb->protocol = skb->vlan_proto; 4510 skb->protocol = skb->vlan_proto;
4508 skb->mac_len += VLAN_HLEN; 4511 skb->mac_len += VLAN_HLEN;
4509 __skb_pull(skb, offset);
4510 4512
4511 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 4513 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
4514 __skb_pull(skb, offset);
4512 } 4515 }
4513 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4516 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
4514 return 0; 4517 return 0;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 607a14f20d88..b1dc096d22f8 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1034,10 +1034,13 @@ source_ok:
1034 if (!fld.daddr) { 1034 if (!fld.daddr) {
1035 fld.daddr = fld.saddr; 1035 fld.daddr = fld.saddr;
1036 1036
1037 err = -EADDRNOTAVAIL;
1038 if (dev_out) 1037 if (dev_out)
1039 dev_put(dev_out); 1038 dev_put(dev_out);
1039 err = -EINVAL;
1040 dev_out = init_net.loopback_dev; 1040 dev_out = init_net.loopback_dev;
1041 if (!dev_out->dn_ptr)
1042 goto out;
1043 err = -EADDRNOTAVAIL;
1041 dev_hold(dev_out); 1044 dev_hold(dev_out);
1042 if (!fld.daddr) { 1045 if (!fld.daddr) {
1043 fld.daddr = 1046 fld.daddr =
@@ -1110,6 +1113,8 @@ source_ok:
1110 if (dev_out == NULL) 1113 if (dev_out == NULL)
1111 goto out; 1114 goto out;
1112 dn_db = rcu_dereference_raw(dev_out->dn_ptr); 1115 dn_db = rcu_dereference_raw(dev_out->dn_ptr);
1116 if (!dn_db)
1117 goto e_inval;
1113 /* Possible improvement - check all devices for local addr */ 1118 /* Possible improvement - check all devices for local addr */
1114 if (dn_dev_islocal(dev_out, fld.daddr)) { 1119 if (dn_dev_islocal(dev_out, fld.daddr)) {
1115 dev_put(dev_out); 1120 dev_put(dev_out);
@@ -1151,6 +1156,8 @@ select_source:
1151 dev_put(dev_out); 1156 dev_put(dev_out);
1152 dev_out = init_net.loopback_dev; 1157 dev_out = init_net.loopback_dev;
1153 dev_hold(dev_out); 1158 dev_hold(dev_out);
1159 if (!dev_out->dn_ptr)
1160 goto e_inval;
1154 fld.flowidn_oif = dev_out->ifindex; 1161 fld.flowidn_oif = dev_out->ifindex;
1155 if (res.fi) 1162 if (res.fi)
1156 dn_fib_info_put(res.fi); 1163 dn_fib_info_put(res.fi);
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index dd8c80dc32a2..8f8713b4388f 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -81,6 +81,12 @@ static int __init arptable_filter_init(void)
81 return ret; 81 return ret;
82 } 82 }
83 83
84 ret = arptable_filter_table_init(&init_net);
85 if (ret) {
86 unregister_pernet_subsys(&arptable_filter_net_ops);
87 kfree(arpfilter_ops);
88 }
89
84 return ret; 90 return ret;
85} 91}
86 92
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 02c62299d717..60398a9370e7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1438,9 +1438,9 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1438#endif 1438#endif
1439} 1439}
1440 1440
1441static struct rtable *rt_dst_alloc(struct net_device *dev, 1441struct rtable *rt_dst_alloc(struct net_device *dev,
1442 unsigned int flags, u16 type, 1442 unsigned int flags, u16 type,
1443 bool nopolicy, bool noxfrm, bool will_cache) 1443 bool nopolicy, bool noxfrm, bool will_cache)
1444{ 1444{
1445 struct rtable *rt; 1445 struct rtable *rt;
1446 1446
@@ -1468,6 +1468,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
1468 1468
1469 return rt; 1469 return rt;
1470} 1470}
1471EXPORT_SYMBOL(rt_dst_alloc);
1471 1472
1472/* called in rcu_read_lock() section */ 1473/* called in rcu_read_lock() section */
1473static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1474static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2045,6 +2046,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2045 */ 2046 */
2046 if (fi && res->prefixlen < 4) 2047 if (fi && res->prefixlen < 4)
2047 fi = NULL; 2048 fi = NULL;
2049 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2050 (orig_oif != dev_out->ifindex)) {
2051 /* For local routes that require a particular output interface
2052 * we do not want to cache the result. Caching the result
2053 * causes incorrect behaviour when there are multiple source
2054 * addresses on the interface, the end result being that if the
2055 * intended recipient is waiting on that interface for the
2056 * packet he won't receive it because it will be delivered on
2057 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2058 * be set to the loopback interface as well.
2059 */
2060 fi = NULL;
2048 } 2061 }
2049 2062
2050 fnhe = NULL; 2063 fnhe = NULL;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e6e65f79ade8..c124c3c12f7c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1309,6 +1309,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1309 if (skb == tcp_highest_sack(sk)) 1309 if (skb == tcp_highest_sack(sk))
1310 tcp_advance_highest_sack(sk, skb); 1310 tcp_advance_highest_sack(sk, skb);
1311 1311
1312 tcp_skb_collapse_tstamp(prev, skb);
1312 tcp_unlink_write_queue(skb, sk); 1313 tcp_unlink_write_queue(skb, sk);
1313 sk_wmem_free_skb(sk, skb); 1314 sk_wmem_free_skb(sk, skb);
1314 1315
@@ -3098,7 +3099,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3098 3099
3099 shinfo = skb_shinfo(skb); 3100 shinfo = skb_shinfo(skb);
3100 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && 3101 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) &&
3101 between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) 3102 !before(shinfo->tskey, prior_snd_una) &&
3103 before(shinfo->tskey, tcp_sk(sk)->snd_una))
3102 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); 3104 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
3103} 3105}
3104 3106
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7d2dc015cd19..441ae9da3a23 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2441,6 +2441,20 @@ u32 __tcp_select_window(struct sock *sk)
2441 return window; 2441 return window;
2442} 2442}
2443 2443
2444void tcp_skb_collapse_tstamp(struct sk_buff *skb,
2445 const struct sk_buff *next_skb)
2446{
2447 const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb);
2448 u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2449
2450 if (unlikely(tsflags)) {
2451 struct skb_shared_info *shinfo = skb_shinfo(skb);
2452
2453 shinfo->tx_flags |= tsflags;
2454 shinfo->tskey = next_shinfo->tskey;
2455 }
2456}
2457
2444/* Collapses two adjacent SKB's during retransmission. */ 2458/* Collapses two adjacent SKB's during retransmission. */
2445static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 2459static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2446{ 2460{
@@ -2484,6 +2498,8 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2484 2498
2485 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2499 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2486 2500
2501 tcp_skb_collapse_tstamp(skb, next_skb);
2502
2487 sk_wmem_free_skb(sk, next_skb); 2503 sk_wmem_free_skb(sk, next_skb);
2488} 2504}
2489 2505
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 08eed5e16df0..a2e7f55a1f61 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -339,8 +339,13 @@ found:
339 339
340 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 340 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
341 spin_lock(&hslot2->lock); 341 spin_lock(&hslot2->lock);
342 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 342 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
343 &hslot2->head); 343 sk->sk_family == AF_INET6)
344 hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
345 &hslot2->head);
346 else
347 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
348 &hslot2->head);
344 hslot2->count++; 349 hslot2->count++;
345 spin_unlock(&hslot2->lock); 350 spin_unlock(&hslot2->lock);
346 } 351 }
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 27aed1afcf81..23cec53b568a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3255,6 +3255,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3255 void *ptr) 3255 void *ptr)
3256{ 3256{
3257 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3257 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3258 struct netdev_notifier_changeupper_info *info;
3258 struct inet6_dev *idev = __in6_dev_get(dev); 3259 struct inet6_dev *idev = __in6_dev_get(dev);
3259 int run_pending = 0; 3260 int run_pending = 0;
3260 int err; 3261 int err;
@@ -3413,6 +3414,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3413 if (idev) 3414 if (idev)
3414 addrconf_type_change(dev, event); 3415 addrconf_type_change(dev, event);
3415 break; 3416 break;
3417
3418 case NETDEV_CHANGEUPPER:
3419 info = ptr;
3420
3421 /* flush all routes if dev is linked to or unlinked from
3422 * an L3 master device (e.g., VRF)
3423 */
3424 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3425 addrconf_ifdown(dev, 0);
3416 } 3426 }
3417 3427
3418 return NOTIFY_OK; 3428 return NOTIFY_OK;
@@ -3438,6 +3448,12 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event)
3438 ipv6_mc_unmap(idev); 3448 ipv6_mc_unmap(idev);
3439} 3449}
3440 3450
3451static bool addr_is_local(const struct in6_addr *addr)
3452{
3453 return ipv6_addr_type(addr) &
3454 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3455}
3456
3441static int addrconf_ifdown(struct net_device *dev, int how) 3457static int addrconf_ifdown(struct net_device *dev, int how)
3442{ 3458{
3443 struct net *net = dev_net(dev); 3459 struct net *net = dev_net(dev);
@@ -3495,7 +3511,8 @@ restart:
3495 * address is retained on a down event 3511 * address is retained on a down event
3496 */ 3512 */
3497 if (!keep_addr || 3513 if (!keep_addr ||
3498 !(ifa->flags & IFA_F_PERMANENT)) { 3514 !(ifa->flags & IFA_F_PERMANENT) ||
3515 addr_is_local(&ifa->addr)) {
3499 hlist_del_init_rcu(&ifa->addr_lst); 3516 hlist_del_init_rcu(&ifa->addr_lst);
3500 goto restart; 3517 goto restart;
3501 } 3518 }
@@ -3544,7 +3561,8 @@ restart:
3544 write_unlock_bh(&idev->lock); 3561 write_unlock_bh(&idev->lock);
3545 spin_lock_bh(&ifa->lock); 3562 spin_lock_bh(&ifa->lock);
3546 3563
3547 if (keep_addr && (ifa->flags & IFA_F_PERMANENT)) { 3564 if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3565 !addr_is_local(&ifa->addr)) {
3548 /* set state to skip the notifier below */ 3566 /* set state to skip the notifier below */
3549 state = INET6_IFADDR_STATE_DEAD; 3567 state = INET6_IFADDR_STATE_DEAD;
3550 ifa->state = 0; 3568 ifa->state = 0;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 428162155280..9dd3882fe6bf 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -40,18 +40,114 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); 40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
41} 41}
42 42
43static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
44{
45 struct inet_sock *inet = inet_sk(sk);
46 struct ipv6_pinfo *np = inet6_sk(sk);
47
48 memset(fl6, 0, sizeof(*fl6));
49 fl6->flowi6_proto = sk->sk_protocol;
50 fl6->daddr = sk->sk_v6_daddr;
51 fl6->saddr = np->saddr;
52 fl6->flowi6_oif = sk->sk_bound_dev_if;
53 fl6->flowi6_mark = sk->sk_mark;
54 fl6->fl6_dport = inet->inet_dport;
55 fl6->fl6_sport = inet->inet_sport;
56 fl6->flowlabel = np->flow_label;
57
58 if (!fl6->flowi6_oif)
59 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
60
61 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
62 fl6->flowi6_oif = np->mcast_oif;
63
64 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
65}
66
67int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
68{
69 struct ip6_flowlabel *flowlabel = NULL;
70 struct in6_addr *final_p, final;
71 struct ipv6_txoptions *opt;
72 struct dst_entry *dst;
73 struct inet_sock *inet = inet_sk(sk);
74 struct ipv6_pinfo *np = inet6_sk(sk);
75 struct flowi6 fl6;
76 int err = 0;
77
78 if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) {
79 flowlabel = fl6_sock_lookup(sk, np->flow_label);
80 if (!flowlabel)
81 return -EINVAL;
82 }
83 ip6_datagram_flow_key_init(&fl6, sk);
84
85 rcu_read_lock();
86 opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
87 final_p = fl6_update_dst(&fl6, opt, &final);
88 rcu_read_unlock();
89
90 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
91 if (IS_ERR(dst)) {
92 err = PTR_ERR(dst);
93 goto out;
94 }
95
96 if (fix_sk_saddr) {
97 if (ipv6_addr_any(&np->saddr))
98 np->saddr = fl6.saddr;
99
100 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
101 sk->sk_v6_rcv_saddr = fl6.saddr;
102 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
103 if (sk->sk_prot->rehash)
104 sk->sk_prot->rehash(sk);
105 }
106 }
107
108 ip6_dst_store(sk, dst,
109 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
110 &sk->sk_v6_daddr : NULL,
111#ifdef CONFIG_IPV6_SUBTREES
112 ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
113 &np->saddr :
114#endif
115 NULL);
116
117out:
118 fl6_sock_release(flowlabel);
119 return err;
120}
121
122void ip6_datagram_release_cb(struct sock *sk)
123{
124 struct dst_entry *dst;
125
126 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr))
127 return;
128
129 rcu_read_lock();
130 dst = __sk_dst_get(sk);
131 if (!dst || !dst->obsolete ||
132 dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) {
133 rcu_read_unlock();
134 return;
135 }
136 rcu_read_unlock();
137
138 ip6_datagram_dst_update(sk, false);
139}
140EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
141
43static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 142static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
44{ 143{
45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 144 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
46 struct inet_sock *inet = inet_sk(sk); 145 struct inet_sock *inet = inet_sk(sk);
47 struct ipv6_pinfo *np = inet6_sk(sk); 146 struct ipv6_pinfo *np = inet6_sk(sk);
48 struct in6_addr *daddr, *final_p, final; 147 struct in6_addr *daddr;
49 struct dst_entry *dst;
50 struct flowi6 fl6;
51 struct ip6_flowlabel *flowlabel = NULL;
52 struct ipv6_txoptions *opt;
53 int addr_type; 148 int addr_type;
54 int err; 149 int err;
150 __be32 fl6_flowlabel = 0;
55 151
56 if (usin->sin6_family == AF_INET) { 152 if (usin->sin6_family == AF_INET) {
57 if (__ipv6_only_sock(sk)) 153 if (__ipv6_only_sock(sk))
@@ -66,15 +162,8 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a
66 if (usin->sin6_family != AF_INET6) 162 if (usin->sin6_family != AF_INET6)
67 return -EAFNOSUPPORT; 163 return -EAFNOSUPPORT;
68 164
69 memset(&fl6, 0, sizeof(fl6)); 165 if (np->sndflow)
70 if (np->sndflow) { 166 fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
71 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
72 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
73 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
74 if (!flowlabel)
75 return -EINVAL;
76 }
77 }
78 167
79 addr_type = ipv6_addr_type(&usin->sin6_addr); 168 addr_type = ipv6_addr_type(&usin->sin6_addr);
80 169
@@ -145,7 +234,7 @@ ipv4_connected:
145 } 234 }
146 235
147 sk->sk_v6_daddr = *daddr; 236 sk->sk_v6_daddr = *daddr;
148 np->flow_label = fl6.flowlabel; 237 np->flow_label = fl6_flowlabel;
149 238
150 inet->inet_dport = usin->sin6_port; 239 inet->inet_dport = usin->sin6_port;
151 240
@@ -154,59 +243,13 @@ ipv4_connected:
154 * destination cache for it. 243 * destination cache for it.
155 */ 244 */
156 245
157 fl6.flowi6_proto = sk->sk_protocol; 246 err = ip6_datagram_dst_update(sk, true);
158 fl6.daddr = sk->sk_v6_daddr; 247 if (err)
159 fl6.saddr = np->saddr;
160 fl6.flowi6_oif = sk->sk_bound_dev_if;
161 fl6.flowi6_mark = sk->sk_mark;
162 fl6.fl6_dport = inet->inet_dport;
163 fl6.fl6_sport = inet->inet_sport;
164
165 if (!fl6.flowi6_oif)
166 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
167
168 if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
169 fl6.flowi6_oif = np->mcast_oif;
170
171 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
172
173 rcu_read_lock();
174 opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
175 final_p = fl6_update_dst(&fl6, opt, &final);
176 rcu_read_unlock();
177
178 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
179 err = 0;
180 if (IS_ERR(dst)) {
181 err = PTR_ERR(dst);
182 goto out; 248 goto out;
183 }
184
185 /* source address lookup done in ip6_dst_lookup */
186
187 if (ipv6_addr_any(&np->saddr))
188 np->saddr = fl6.saddr;
189
190 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
191 sk->sk_v6_rcv_saddr = fl6.saddr;
192 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
193 if (sk->sk_prot->rehash)
194 sk->sk_prot->rehash(sk);
195 }
196
197 ip6_dst_store(sk, dst,
198 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
199 &sk->sk_v6_daddr : NULL,
200#ifdef CONFIG_IPV6_SUBTREES
201 ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
202 &np->saddr :
203#endif
204 NULL);
205 249
206 sk->sk_state = TCP_ESTABLISHED; 250 sk->sk_state = TCP_ESTABLISHED;
207 sk_set_txhash(sk); 251 sk_set_txhash(sk);
208out: 252out:
209 fl6_sock_release(flowlabel);
210 return err; 253 return err;
211} 254}
212 255
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ed446639219c..d916d6ab9ad2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -338,9 +338,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
338 return rt; 338 return rt;
339} 339}
340 340
341static struct rt6_info *ip6_dst_alloc(struct net *net, 341struct rt6_info *ip6_dst_alloc(struct net *net,
342 struct net_device *dev, 342 struct net_device *dev,
343 int flags) 343 int flags)
344{ 344{
345 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); 345 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
346 346
@@ -364,6 +364,7 @@ static struct rt6_info *ip6_dst_alloc(struct net *net,
364 364
365 return rt; 365 return rt;
366} 366}
367EXPORT_SYMBOL(ip6_dst_alloc);
367 368
368static void ip6_dst_destroy(struct dst_entry *dst) 369static void ip6_dst_destroy(struct dst_entry *dst)
369{ 370{
@@ -1417,8 +1418,20 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1417 1418
1418void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) 1419void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1419{ 1420{
1421 struct dst_entry *dst;
1422
1420 ip6_update_pmtu(skb, sock_net(sk), mtu, 1423 ip6_update_pmtu(skb, sock_net(sk), mtu,
1421 sk->sk_bound_dev_if, sk->sk_mark); 1424 sk->sk_bound_dev_if, sk->sk_mark);
1425
1426 dst = __sk_dst_get(sk);
1427 if (!dst || !dst->obsolete ||
1428 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
1429 return;
1430
1431 bh_lock_sock(sk);
1432 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
1433 ip6_datagram_dst_update(sk, false);
1434 bh_unlock_sock(sk);
1422} 1435}
1423EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); 1436EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1424 1437
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8125931106be..6bc5c664fa46 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1539,6 +1539,7 @@ struct proto udpv6_prot = {
1539 .sendmsg = udpv6_sendmsg, 1539 .sendmsg = udpv6_sendmsg,
1540 .recvmsg = udpv6_recvmsg, 1540 .recvmsg = udpv6_recvmsg,
1541 .backlog_rcv = __udpv6_queue_rcv_skb, 1541 .backlog_rcv = __udpv6_queue_rcv_skb,
1542 .release_cb = ip6_datagram_release_cb,
1542 .hash = udp_lib_hash, 1543 .hash = udp_lib_hash,
1543 .unhash = udp_lib_unhash, 1544 .unhash = udp_lib_unhash,
1544 .rehash = udp_v6_rehash, 1545 .rehash = udp_v6_rehash,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 278f3b9356ef..7cc1d9c22a9f 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -410,6 +410,8 @@ static void tcp_options(const struct sk_buff *skb,
410 length--; 410 length--;
411 continue; 411 continue;
412 default: 412 default:
413 if (length < 2)
414 return;
413 opsize=*ptr++; 415 opsize=*ptr++;
414 if (opsize < 2) /* "silly options" */ 416 if (opsize < 2) /* "silly options" */
415 return; 417 return;
@@ -470,6 +472,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
470 length--; 472 length--;
471 continue; 473 continue;
472 default: 474 default:
475 if (length < 2)
476 return;
473 opsize = *ptr++; 477 opsize = *ptr++;
474 if (opsize < 2) /* "silly options" */ 478 if (opsize < 2) /* "silly options" */
475 return; 479 return;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 215fc08c02ab..330ebd600f25 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -688,7 +688,7 @@ static int netlink_release(struct socket *sock)
688 688
689 skb_queue_purge(&sk->sk_write_queue); 689 skb_queue_purge(&sk->sk_write_queue);
690 690
691 if (nlk->portid) { 691 if (nlk->portid && nlk->bound) {
692 struct netlink_notify n = { 692 struct netlink_notify n = {
693 .net = sock_net(sk), 693 .net = sock_net(sk),
694 .protocol = sk->sk_protocol, 694 .protocol = sk->sk_protocol,
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index e9dd47b2a85b..879185fe183f 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -461,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
461 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked); 461 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
462 462
463 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) { 463 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
464 set_ipv6_addr(skb, key->ipv6_proto, saddr, masked, 464 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
465 true); 465 true);
466 memcpy(&flow_key->ipv6.addr.src, masked, 466 memcpy(&flow_key->ipv6.addr.src, masked,
467 sizeof(flow_key->ipv6.addr.src)); 467 sizeof(flow_key->ipv6.addr.src));
@@ -483,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
483 NULL, &flags) 483 NULL, &flags)
484 != NEXTHDR_ROUTING); 484 != NEXTHDR_ROUTING);
485 485
486 set_ipv6_addr(skb, key->ipv6_proto, daddr, masked, 486 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
487 recalc_csum); 487 recalc_csum);
488 memcpy(&flow_key->ipv6.addr.dst, masked, 488 memcpy(&flow_key->ipv6.addr.dst, masked,
489 sizeof(flow_key->ipv6.addr.dst)); 489 sizeof(flow_key->ipv6.addr.dst));
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 1b9d286756be..b5fea1101faa 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -367,6 +367,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
367 } else if (key->eth.type == htons(ETH_P_IPV6)) { 367 } else if (key->eth.type == htons(ETH_P_IPV6)) {
368 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; 368 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
369 369
370 skb_orphan(skb);
370 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 371 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
371 err = nf_ct_frag6_gather(net, skb, user); 372 err = nf_ct_frag6_gather(net, skb, user);
372 if (err) 373 if (err)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f12c17f355d9..18d0becbc46d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3521,6 +3521,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3521 i->ifindex = mreq->mr_ifindex; 3521 i->ifindex = mreq->mr_ifindex;
3522 i->alen = mreq->mr_alen; 3522 i->alen = mreq->mr_alen;
3523 memcpy(i->addr, mreq->mr_address, i->alen); 3523 memcpy(i->addr, mreq->mr_address, i->alen);
3524 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3524 i->count = 1; 3525 i->count = 1;
3525 i->next = po->mclist; 3526 i->next = po->mclist;
3526 po->mclist = i; 3527 po->mclist = i;
diff --git a/net/rds/cong.c b/net/rds/cong.c
index e6144b8246fd..6641bcf7c185 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -299,7 +299,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
299 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 299 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
300 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 300 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
301 301
302 __set_bit_le(off, (void *)map->m_page_addrs[i]); 302 set_bit_le(off, (void *)map->m_page_addrs[i]);
303} 303}
304 304
305void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) 305void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
@@ -313,7 +313,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
313 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 313 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
314 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 314 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
315 315
316 __clear_bit_le(off, (void *)map->m_page_addrs[i]); 316 clear_bit_le(off, (void *)map->m_page_addrs[i]);
317} 317}
318 318
319static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port) 319static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 8764970f0c24..310cabce2311 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -194,7 +194,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
194 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version); 194 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
195 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version); 195 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
196 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); 196 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
197 dp->dp_ack_seq = rds_ib_piggyb_ack(ic); 197 dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
198 198
199 /* Advertise flow control */ 199 /* Advertise flow control */
200 if (ic->i_flowctl) { 200 if (ic->i_flowctl) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index f18c35024207..80742edea96f 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
159 if (validate) 159 if (validate)
160 skb = validate_xmit_skb_list(skb, dev); 160 skb = validate_xmit_skb_list(skb, dev);
161 161
162 if (skb) { 162 if (likely(skb)) {
163 HARD_TX_LOCK(dev, txq, smp_processor_id()); 163 HARD_TX_LOCK(dev, txq, smp_processor_id());
164 if (!netif_xmit_frozen_or_stopped(txq)) 164 if (!netif_xmit_frozen_or_stopped(txq))
165 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 165 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
166 166
167 HARD_TX_UNLOCK(dev, txq); 167 HARD_TX_UNLOCK(dev, txq);
168 } else {
169 spin_lock(root_lock);
170 return qdisc_qlen(q);
168 } 171 }
169 spin_lock(root_lock); 172 spin_lock(root_lock);
170 173
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 8d3d3625130e..084718f9b3da 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -866,8 +866,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
866 * sender MUST assure that at least one T3-rtx 866 * sender MUST assure that at least one T3-rtx
867 * timer is running. 867 * timer is running.
868 */ 868 */
869 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) 869 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
870 sctp_transport_reset_timers(transport); 870 sctp_transport_reset_t3_rtx(transport);
871 transport->last_time_sent = jiffies;
872 }
871 } 873 }
872 break; 874 break;
873 875
@@ -924,8 +926,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
924 error = sctp_outq_flush_rtx(q, packet, 926 error = sctp_outq_flush_rtx(q, packet,
925 rtx_timeout, &start_timer); 927 rtx_timeout, &start_timer);
926 928
927 if (start_timer) 929 if (start_timer) {
928 sctp_transport_reset_timers(transport); 930 sctp_transport_reset_t3_rtx(transport);
931 transport->last_time_sent = jiffies;
932 }
929 933
930 /* This can happen on COOKIE-ECHO resend. Only 934 /* This can happen on COOKIE-ECHO resend. Only
931 * one chunk can get bundled with a COOKIE-ECHO. 935 * one chunk can get bundled with a COOKIE-ECHO.
@@ -1062,7 +1066,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1062 list_add_tail(&chunk->transmitted_list, 1066 list_add_tail(&chunk->transmitted_list,
1063 &transport->transmitted); 1067 &transport->transmitted);
1064 1068
1065 sctp_transport_reset_timers(transport); 1069 sctp_transport_reset_t3_rtx(transport);
1070 transport->last_time_sent = jiffies;
1066 1071
1067 /* Only let one DATA chunk get bundled with a 1072 /* Only let one DATA chunk get bundled with a
1068 * COOKIE-ECHO chunk. 1073 * COOKIE-ECHO chunk.
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 7f0bf798205b..56f364d8f932 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3080,8 +3080,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
3080 return SCTP_ERROR_RSRC_LOW; 3080 return SCTP_ERROR_RSRC_LOW;
3081 3081
3082 /* Start the heartbeat timer. */ 3082 /* Start the heartbeat timer. */
3083 if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) 3083 sctp_transport_reset_hb_timer(peer);
3084 sctp_transport_hold(peer);
3085 asoc->new_transport = peer; 3084 asoc->new_transport = peer;
3086 break; 3085 break;
3087 case SCTP_PARAM_DEL_IP: 3086 case SCTP_PARAM_DEL_IP:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 7fe56d0acabf..41b081a64752 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -69,8 +69,6 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
69 sctp_cmd_seq_t *commands, 69 sctp_cmd_seq_t *commands,
70 gfp_t gfp); 70 gfp_t gfp);
71 71
72static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
73 struct sctp_transport *t);
74/******************************************************************** 72/********************************************************************
75 * Helper functions 73 * Helper functions
76 ********************************************************************/ 74 ********************************************************************/
@@ -367,6 +365,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
367 struct sctp_association *asoc = transport->asoc; 365 struct sctp_association *asoc = transport->asoc;
368 struct sock *sk = asoc->base.sk; 366 struct sock *sk = asoc->base.sk;
369 struct net *net = sock_net(sk); 367 struct net *net = sock_net(sk);
368 u32 elapsed, timeout;
370 369
371 bh_lock_sock(sk); 370 bh_lock_sock(sk);
372 if (sock_owned_by_user(sk)) { 371 if (sock_owned_by_user(sk)) {
@@ -378,6 +377,16 @@ void sctp_generate_heartbeat_event(unsigned long data)
378 goto out_unlock; 377 goto out_unlock;
379 } 378 }
380 379
380 /* Check if we should still send the heartbeat or reschedule */
381 elapsed = jiffies - transport->last_time_sent;
382 timeout = sctp_transport_timeout(transport);
383 if (elapsed < timeout) {
384 elapsed = timeout - elapsed;
385 if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
386 sctp_transport_hold(transport);
387 goto out_unlock;
388 }
389
381 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, 390 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
382 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), 391 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
383 asoc->state, asoc->ep, asoc, 392 asoc->state, asoc->ep, asoc,
@@ -507,7 +516,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
507 0); 516 0);
508 517
509 /* Update the hb timer to resend a heartbeat every rto */ 518 /* Update the hb timer to resend a heartbeat every rto */
510 sctp_cmd_hb_timer_update(commands, transport); 519 sctp_transport_reset_hb_timer(transport);
511 } 520 }
512 521
513 if (transport->state != SCTP_INACTIVE && 522 if (transport->state != SCTP_INACTIVE &&
@@ -634,11 +643,8 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
634 * hold a reference on the transport to make sure none of 643 * hold a reference on the transport to make sure none of
635 * the needed data structures go away. 644 * the needed data structures go away.
636 */ 645 */
637 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { 646 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
638 647 sctp_transport_reset_hb_timer(t);
639 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
640 sctp_transport_hold(t);
641 }
642} 648}
643 649
644static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, 650static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
@@ -669,15 +675,6 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
669} 675}
670 676
671 677
672/* Helper function to update the heartbeat timer. */
673static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
674 struct sctp_transport *t)
675{
676 /* Update the heartbeat timer. */
677 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
678 sctp_transport_hold(t);
679}
680
681/* Helper function to handle the reception of an HEARTBEAT ACK. */ 678/* Helper function to handle the reception of an HEARTBEAT ACK. */
682static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, 679static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
683 struct sctp_association *asoc, 680 struct sctp_association *asoc,
@@ -742,8 +739,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
742 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); 739 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
743 740
744 /* Update the heartbeat timer. */ 741 /* Update the heartbeat timer. */
745 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 742 sctp_transport_reset_hb_timer(t);
746 sctp_transport_hold(t);
747 743
748 if (was_unconfirmed && asoc->peer.transport_count == 1) 744 if (was_unconfirmed && asoc->peer.transport_count == 1)
749 sctp_transport_immediate_rtx(t); 745 sctp_transport_immediate_rtx(t);
@@ -1614,7 +1610,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1614 1610
1615 case SCTP_CMD_HB_TIMER_UPDATE: 1611 case SCTP_CMD_HB_TIMER_UPDATE:
1616 t = cmd->obj.transport; 1612 t = cmd->obj.transport;
1617 sctp_cmd_hb_timer_update(commands, t); 1613 sctp_transport_reset_hb_timer(t);
1618 break; 1614 break;
1619 1615
1620 case SCTP_CMD_HB_TIMERS_STOP: 1616 case SCTP_CMD_HB_TIMERS_STOP:
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 9b6b48c7524e..81b86678be4d 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -183,7 +183,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
183/* Start T3_rtx timer if it is not already running and update the heartbeat 183/* Start T3_rtx timer if it is not already running and update the heartbeat
184 * timer. This routine is called every time a DATA chunk is sent. 184 * timer. This routine is called every time a DATA chunk is sent.
185 */ 185 */
186void sctp_transport_reset_timers(struct sctp_transport *transport) 186void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
187{ 187{
188 /* RFC 2960 6.3.2 Retransmission Timer Rules 188 /* RFC 2960 6.3.2 Retransmission Timer Rules
189 * 189 *
@@ -197,11 +197,18 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
197 if (!mod_timer(&transport->T3_rtx_timer, 197 if (!mod_timer(&transport->T3_rtx_timer,
198 jiffies + transport->rto)) 198 jiffies + transport->rto))
199 sctp_transport_hold(transport); 199 sctp_transport_hold(transport);
200}
201
202void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
203{
204 unsigned long expires;
200 205
201 /* When a data chunk is sent, reset the heartbeat interval. */ 206 /* When a data chunk is sent, reset the heartbeat interval. */
202 if (!mod_timer(&transport->hb_timer, 207 expires = jiffies + sctp_transport_timeout(transport);
203 sctp_transport_timeout(transport))) 208 if (time_before(transport->hb_timer.expires, expires) &&
204 sctp_transport_hold(transport); 209 !mod_timer(&transport->hb_timer,
210 expires + prandom_u32_max(transport->rto)))
211 sctp_transport_hold(transport);
205} 212}
206 213
207/* This transport has been assigned to an association. 214/* This transport has been assigned to an association.
@@ -595,13 +602,13 @@ void sctp_transport_burst_reset(struct sctp_transport *t)
595unsigned long sctp_transport_timeout(struct sctp_transport *trans) 602unsigned long sctp_transport_timeout(struct sctp_transport *trans)
596{ 603{
597 /* RTO + timer slack +/- 50% of RTO */ 604 /* RTO + timer slack +/- 50% of RTO */
598 unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto); 605 unsigned long timeout = trans->rto >> 1;
599 606
600 if (trans->state != SCTP_UNCONFIRMED && 607 if (trans->state != SCTP_UNCONFIRMED &&
601 trans->state != SCTP_PF) 608 trans->state != SCTP_PF)
602 timeout += trans->hbinterval; 609 timeout += trans->hbinterval;
603 610
604 return timeout + jiffies; 611 return timeout;
605} 612}
606 613
607/* Reset transport variables to their initial values */ 614/* Reset transport variables to their initial values */
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 03a842870c52..e2bdb07a49a2 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net)
69 if (err) 69 if (err)
70 goto out_nametbl; 70 goto out_nametbl;
71 71
72 INIT_LIST_HEAD(&tn->dist_queue);
72 err = tipc_topsrv_start(net); 73 err = tipc_topsrv_start(net);
73 if (err) 74 if (err)
74 goto out_subscr; 75 goto out_subscr;
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 5504d63503df..eff58dc53aa1 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -103,6 +103,9 @@ struct tipc_net {
103 spinlock_t nametbl_lock; 103 spinlock_t nametbl_lock;
104 struct name_table *nametbl; 104 struct name_table *nametbl;
105 105
106 /* Name dist queue */
107 struct list_head dist_queue;
108
106 /* Topology subscription server */ 109 /* Topology subscription server */
107 struct tipc_server *topsrv; 110 struct tipc_server *topsrv;
108 atomic_t subscription_count; 111 atomic_t subscription_count;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index ebe9d0ff6e9e..6b626a64b517 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -40,11 +40,6 @@
40 40
41int sysctl_tipc_named_timeout __read_mostly = 2000; 41int sysctl_tipc_named_timeout __read_mostly = 2000;
42 42
43/**
44 * struct tipc_dist_queue - queue holding deferred name table updates
45 */
46static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
47
48struct distr_queue_item { 43struct distr_queue_item {
49 struct distr_item i; 44 struct distr_item i;
50 u32 dtype; 45 u32 dtype;
@@ -229,12 +224,31 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
229 kfree_rcu(p, rcu); 224 kfree_rcu(p, rcu);
230} 225}
231 226
227/**
228 * tipc_dist_queue_purge - remove deferred updates from a node that went down
229 */
230static void tipc_dist_queue_purge(struct net *net, u32 addr)
231{
232 struct tipc_net *tn = net_generic(net, tipc_net_id);
233 struct distr_queue_item *e, *tmp;
234
235 spin_lock_bh(&tn->nametbl_lock);
236 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
237 if (e->node != addr)
238 continue;
239 list_del(&e->next);
240 kfree(e);
241 }
242 spin_unlock_bh(&tn->nametbl_lock);
243}
244
232void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) 245void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
233{ 246{
234 struct publication *publ, *tmp; 247 struct publication *publ, *tmp;
235 248
236 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) 249 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
237 tipc_publ_purge(net, publ, addr); 250 tipc_publ_purge(net, publ, addr);
251 tipc_dist_queue_purge(net, addr);
238} 252}
239 253
240/** 254/**
@@ -279,9 +293,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
279 * tipc_named_add_backlog - add a failed name table update to the backlog 293 * tipc_named_add_backlog - add a failed name table update to the backlog
280 * 294 *
281 */ 295 */
282static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) 296static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
297 u32 type, u32 node)
283{ 298{
284 struct distr_queue_item *e; 299 struct distr_queue_item *e;
300 struct tipc_net *tn = net_generic(net, tipc_net_id);
285 unsigned long now = get_jiffies_64(); 301 unsigned long now = get_jiffies_64();
286 302
287 e = kzalloc(sizeof(*e), GFP_ATOMIC); 303 e = kzalloc(sizeof(*e), GFP_ATOMIC);
@@ -291,7 +307,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
291 e->node = node; 307 e->node = node;
292 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout); 308 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
293 memcpy(e, i, sizeof(*i)); 309 memcpy(e, i, sizeof(*i));
294 list_add_tail(&e->next, &tipc_dist_queue); 310 list_add_tail(&e->next, &tn->dist_queue);
295} 311}
296 312
297/** 313/**
@@ -301,10 +317,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
301void tipc_named_process_backlog(struct net *net) 317void tipc_named_process_backlog(struct net *net)
302{ 318{
303 struct distr_queue_item *e, *tmp; 319 struct distr_queue_item *e, *tmp;
320 struct tipc_net *tn = net_generic(net, tipc_net_id);
304 char addr[16]; 321 char addr[16];
305 unsigned long now = get_jiffies_64(); 322 unsigned long now = get_jiffies_64();
306 323
307 list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { 324 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
308 if (time_after(e->expires, now)) { 325 if (time_after(e->expires, now)) {
309 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype)) 326 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
310 continue; 327 continue;
@@ -344,7 +361,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
344 node = msg_orignode(msg); 361 node = msg_orignode(msg);
345 while (count--) { 362 while (count--) {
346 if (!tipc_update_nametbl(net, item, node, mtype)) 363 if (!tipc_update_nametbl(net, item, node, mtype))
347 tipc_named_add_backlog(item, mtype, node); 364 tipc_named_add_backlog(net, item, mtype, node);
348 item++; 365 item++;
349 } 366 }
350 kfree_skb(skb); 367 kfree_skb(skb);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 662bdd20a748..56214736fe88 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1735,11 +1735,8 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
1735 /* Retrieve the head sk_buff from the socket's receive queue. */ 1735 /* Retrieve the head sk_buff from the socket's receive queue. */
1736 err = 0; 1736 err = 0;
1737 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); 1737 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
1738 if (err)
1739 return err;
1740
1741 if (!skb) 1738 if (!skb)
1742 return -EAGAIN; 1739 return err;
1743 1740
1744 dg = (struct vmci_datagram *)skb->data; 1741 dg = (struct vmci_datagram *)skb->data;
1745 if (!dg) 1742 if (!dg)
@@ -2154,7 +2151,7 @@ module_exit(vmci_transport_exit);
2154 2151
2155MODULE_AUTHOR("VMware, Inc."); 2152MODULE_AUTHOR("VMware, Inc.");
2156MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); 2153MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2157MODULE_VERSION("1.0.3.0-k"); 2154MODULE_VERSION("1.0.4.0-k");
2158MODULE_LICENSE("GPL v2"); 2155MODULE_LICENSE("GPL v2");
2159MODULE_ALIAS("vmware_vsock"); 2156MODULE_ALIAS("vmware_vsock");
2160MODULE_ALIAS_NETPROTO(PF_VSOCK); 2157MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 98c924260b3d..056a7307862b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -13216,7 +13216,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
13216 struct wireless_dev *wdev; 13216 struct wireless_dev *wdev;
13217 struct cfg80211_beacon_registration *reg, *tmp; 13217 struct cfg80211_beacon_registration *reg, *tmp;
13218 13218
13219 if (state != NETLINK_URELEASE) 13219 if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
13220 return NOTIFY_DONE; 13220 return NOTIFY_DONE;
13221 13221
13222 rcu_read_lock(); 13222 rcu_read_lock();
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 69bb3fc38fb2..0840684deb7d 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -3,3 +3,4 @@ psock_fanout
3psock_tpacket 3psock_tpacket
4reuseport_bpf 4reuseport_bpf
5reuseport_bpf_cpu 5reuseport_bpf_cpu
6reuseport_dualstack
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index c658792d47b4..0e5340742620 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -4,7 +4,7 @@ CFLAGS = -Wall -O2 -g
4 4
5CFLAGS += -I../../../../usr/include/ 5CFLAGS += -I../../../../usr/include/
6 6
7NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu 7NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu reuseport_dualstack
8 8
9all: $(NET_PROGS) 9all: $(NET_PROGS)
10%: %.c 10%: %.c
diff --git a/tools/testing/selftests/net/reuseport_dualstack.c b/tools/testing/selftests/net/reuseport_dualstack.c
new file mode 100644
index 000000000000..90958aaaafb9
--- /dev/null
+++ b/tools/testing/selftests/net/reuseport_dualstack.c
@@ -0,0 +1,208 @@
1/*
2 * It is possible to use SO_REUSEPORT to open multiple sockets bound to
3 * equivalent local addresses using AF_INET and AF_INET6 at the same time. If
4 * the AF_INET6 socket has IPV6_V6ONLY set, it's clear which socket should
5 * receive a given incoming packet. However, when it is not set, incoming v4
6 * packets should prefer the AF_INET socket(s). This behavior was defined with
7 * the original SO_REUSEPORT implementation, but broke with
8 * e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection")
9 * This test creates these mixed AF_INET/AF_INET6 sockets and asserts the
10 * AF_INET preference for v4 packets.
11 */
12
13#define _GNU_SOURCE
14
15#include <arpa/inet.h>
16#include <errno.h>
17#include <error.h>
18#include <linux/in.h>
19#include <linux/unistd.h>
20#include <stdio.h>
21#include <stdlib.h>
22#include <string.h>
23#include <sys/epoll.h>
24#include <sys/types.h>
25#include <sys/socket.h>
26#include <unistd.h>
27
28static const int PORT = 8888;
29
30static void build_rcv_fd(int family, int proto, int *rcv_fds, int count)
31{
32 struct sockaddr_storage addr;
33 struct sockaddr_in *addr4;
34 struct sockaddr_in6 *addr6;
35 int opt, i;
36
37 switch (family) {
38 case AF_INET:
39 addr4 = (struct sockaddr_in *)&addr;
40 addr4->sin_family = AF_INET;
41 addr4->sin_addr.s_addr = htonl(INADDR_ANY);
42 addr4->sin_port = htons(PORT);
43 break;
44 case AF_INET6:
45 addr6 = (struct sockaddr_in6 *)&addr;
46 addr6->sin6_family = AF_INET6;
47 addr6->sin6_addr = in6addr_any;
48 addr6->sin6_port = htons(PORT);
49 break;
50 default:
51 error(1, 0, "Unsupported family %d", family);
52 }
53
54 for (i = 0; i < count; ++i) {
55 rcv_fds[i] = socket(family, proto, 0);
56 if (rcv_fds[i] < 0)
57 error(1, errno, "failed to create receive socket");
58
59 opt = 1;
60 if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt,
61 sizeof(opt)))
62 error(1, errno, "failed to set SO_REUSEPORT");
63
64 if (bind(rcv_fds[i], (struct sockaddr *)&addr, sizeof(addr)))
65 error(1, errno, "failed to bind receive socket");
66
67 if (proto == SOCK_STREAM && listen(rcv_fds[i], 10))
68 error(1, errno, "failed to listen on receive port");
69 }
70}
71
72static void send_from_v4(int proto)
73{
74 struct sockaddr_in saddr, daddr;
75 int fd;
76
77 saddr.sin_family = AF_INET;
78 saddr.sin_addr.s_addr = htonl(INADDR_ANY);
79 saddr.sin_port = 0;
80
81 daddr.sin_family = AF_INET;
82 daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
83 daddr.sin_port = htons(PORT);
84
85 fd = socket(AF_INET, proto, 0);
86 if (fd < 0)
87 error(1, errno, "failed to create send socket");
88
89 if (bind(fd, (struct sockaddr *)&saddr, sizeof(saddr)))
90 error(1, errno, "failed to bind send socket");
91
92 if (connect(fd, (struct sockaddr *)&daddr, sizeof(daddr)))
93 error(1, errno, "failed to connect send socket");
94
95 if (send(fd, "a", 1, 0) < 0)
96 error(1, errno, "failed to send message");
97
98 close(fd);
99}
100
101static int receive_once(int epfd, int proto)
102{
103 struct epoll_event ev;
104 int i, fd;
105 char buf[8];
106
107 i = epoll_wait(epfd, &ev, 1, -1);
108 if (i < 0)
109 error(1, errno, "epoll_wait failed");
110
111 if (proto == SOCK_STREAM) {
112 fd = accept(ev.data.fd, NULL, NULL);
113 if (fd < 0)
114 error(1, errno, "failed to accept");
115 i = recv(fd, buf, sizeof(buf), 0);
116 close(fd);
117 } else {
118 i = recv(ev.data.fd, buf, sizeof(buf), 0);
119 }
120
121 if (i < 0)
122 error(1, errno, "failed to recv");
123
124 return ev.data.fd;
125}
126
127static void test(int *rcv_fds, int count, int proto)
128{
129 struct epoll_event ev;
130 int epfd, i, test_fd;
131 uint16_t test_family;
132 socklen_t len;
133
134 epfd = epoll_create(1);
135 if (epfd < 0)
136 error(1, errno, "failed to create epoll");
137
138 ev.events = EPOLLIN;
139 for (i = 0; i < count; ++i) {
140 ev.data.fd = rcv_fds[i];
141 if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fds[i], &ev))
142 error(1, errno, "failed to register sock epoll");
143 }
144
145 send_from_v4(proto);
146
147 test_fd = receive_once(epfd, proto);
148 if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
149 error(1, errno, "failed to read socket domain");
150 if (test_family != AF_INET)
151 error(1, 0, "expected to receive on v4 socket but got v6 (%d)",
152 test_family);
153
154 close(epfd);
155}
156
157int main(void)
158{
159 int rcv_fds[32], i;
160
161 fprintf(stderr, "---- UDP IPv4 created before IPv6 ----\n");
162 build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 5);
163 build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[5]), 5);
164 test(rcv_fds, 10, SOCK_DGRAM);
165 for (i = 0; i < 10; ++i)
166 close(rcv_fds[i]);
167
168 fprintf(stderr, "---- UDP IPv6 created before IPv4 ----\n");
169 build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 5);
170 build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[5]), 5);
171 test(rcv_fds, 10, SOCK_DGRAM);
172 for (i = 0; i < 10; ++i)
173 close(rcv_fds[i]);
174
175 /* NOTE: UDP socket lookups traverse a different code path when there
176 * are > 10 sockets in a group.
177 */
178 fprintf(stderr, "---- UDP IPv4 created before IPv6 (large) ----\n");
179 build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 16);
180 build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[16]), 16);
181 test(rcv_fds, 32, SOCK_DGRAM);
182 for (i = 0; i < 32; ++i)
183 close(rcv_fds[i]);
184
185 fprintf(stderr, "---- UDP IPv6 created before IPv4 (large) ----\n");
186 build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 16);
187 build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[16]), 16);
188 test(rcv_fds, 32, SOCK_DGRAM);
189 for (i = 0; i < 32; ++i)
190 close(rcv_fds[i]);
191
192 fprintf(stderr, "---- TCP IPv4 created before IPv6 ----\n");
193 build_rcv_fd(AF_INET, SOCK_STREAM, rcv_fds, 5);
194 build_rcv_fd(AF_INET6, SOCK_STREAM, &(rcv_fds[5]), 5);
195 test(rcv_fds, 10, SOCK_STREAM);
196 for (i = 0; i < 10; ++i)
197 close(rcv_fds[i]);
198
199 fprintf(stderr, "---- TCP IPv6 created before IPv4 ----\n");
200 build_rcv_fd(AF_INET6, SOCK_STREAM, rcv_fds, 5);
201 build_rcv_fd(AF_INET, SOCK_STREAM, &(rcv_fds[5]), 5);
202 test(rcv_fds, 10, SOCK_STREAM);
203 for (i = 0; i < 10; ++i)
204 close(rcv_fds[i]);
205
206 fprintf(stderr, "SUCCESS\n");
207 return 0;
208}