aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2018-01-29 15:26:40 -0500
committerJason Gunthorpe <jgg@mellanox.com>2018-01-30 11:30:00 -0500
commite7996a9a77fc669387da43ff4823b91cc4872bd0 (patch)
tree617f0a128e222539d67e8cccc359f1bc4b984900 /drivers/net
parentb5fa635aab8f0d39a824c01991266a6d06f007fb (diff)
parentd8a5b80568a9cb66810e75b182018e9edb68e8ff (diff)
Merge tag v4.15 of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
To resolve conflicts in: drivers/infiniband/hw/mlx5/main.c drivers/infiniband/hw/mlx5/qp.c From patches merged into the -rc cycle. The conflict resolution matches what linux-next has been carrying. Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/can/flexcan.c11
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c9
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c5
-rw-r--r--drivers/net/can/sja1000/peak_pci.c5
-rw-r--r--drivers/net/can/ti_hecc.c3
-rw-r--r--drivers/net/can/usb/ems_usb.c3
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c13
-rw-r--r--drivers/net/can/usb/mcba_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c21
-rw-r--r--drivers/net/can/usb/usb_8dev.c2
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c9
-rw-r--r--drivers/net/dsa/bcm_sf2.c1
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c34
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c90
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c45
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c82
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c80
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h6
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c164
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c57
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c21
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c17
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c9
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c13
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c11
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c110
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c26
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c4
-rw-r--r--drivers/net/ethernet/marvell/skge.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c55
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c9
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c27
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c76
-rw-r--r--drivers/net/ethernet/sfc/tx.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/geneve.c14
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c1
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/marvell.c18
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/mdio-xgene.c21
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/meson-gxl.c74
-rw-r--r--drivers/net/phy/micrel.c7
-rw-r--r--drivers/net/phy/phy.c9
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/phy/phylink.c8
-rw-r--r--drivers/net/phy/sfp-bus.c6
-rw-r--r--drivers/net/phy/sfp.c41
-rw-r--r--drivers/net/ppp/ppp_generic.c5
-rw-r--r--drivers/net/ppp/pppoe.c11
-rw-r--r--drivers/net/tap.c14
-rw-r--r--drivers/net/tun.c39
-rw-r--r--drivers/net/usb/lan78xx.c1
-rw-r--r--drivers/net/usb/qmi_wwan.c5
-rw-r--r--drivers/net/usb/r8152.c13
-rw-r--r--drivers/net/usb/usbnet.c13
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vrf.c5
-rw-r--r--drivers/net/vxlan.c17
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c23
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c20
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netfront.c1
177 files changed, 1893 insertions, 859 deletions
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index a13a4896a8bd..760d2c07e3a2 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -184,12 +184,12 @@
184 * Below is some version info we got: 184 * Below is some version info we got:
185 * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re- 185 * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
186 * Filter? connected? Passive detection ception in MB 186 * Filter? connected? Passive detection ception in MB
187 * MX25 FlexCAN2 03.00.00.00 no no ? no no 187 * MX25 FlexCAN2 03.00.00.00 no no no no no
188 * MX28 FlexCAN2 03.00.04.00 yes yes no no no 188 * MX28 FlexCAN2 03.00.04.00 yes yes no no no
189 * MX35 FlexCAN2 03.00.00.00 no no ? no no 189 * MX35 FlexCAN2 03.00.00.00 no no no no no
190 * MX53 FlexCAN2 03.00.00.00 yes no no no no 190 * MX53 FlexCAN2 03.00.00.00 yes no no no no
191 * MX6s FlexCAN3 10.00.12.00 yes yes no no yes 191 * MX6s FlexCAN3 10.00.12.00 yes yes no no yes
192 * VF610 FlexCAN3 ? no yes ? yes yes? 192 * VF610 FlexCAN3 ? no yes no yes yes?
193 * 193 *
194 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. 194 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
195 */ 195 */
@@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
297 297
298static const struct flexcan_devtype_data fsl_vf610_devtype_data = { 298static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
299 .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | 299 .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
300 FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, 300 FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
301 FLEXCAN_QUIRK_BROKEN_PERR_STATE,
301}; 302};
302 303
303static const struct can_bittiming_const flexcan_bittiming_const = { 304static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -525,7 +526,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
525 data = be32_to_cpup((__be32 *)&cf->data[0]); 526 data = be32_to_cpup((__be32 *)&cf->data[0]);
526 flexcan_write(data, &priv->tx_mb->data[0]); 527 flexcan_write(data, &priv->tx_mb->data[0]);
527 } 528 }
528 if (cf->can_dlc > 3) { 529 if (cf->can_dlc > 4) {
529 data = be32_to_cpup((__be32 *)&cf->data[4]); 530 data = be32_to_cpup((__be32 *)&cf->data[4]);
530 flexcan_write(data, &priv->tx_mb->data[1]); 531 flexcan_write(data, &priv->tx_mb->data[1]);
531 } 532 }
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 85268be0c913..55513411a82e 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -258,21 +258,18 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
258 /* if this frame is an echo, */ 258 /* if this frame is an echo, */
259 if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) && 259 if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
260 !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) { 260 !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
261 int n;
262 unsigned long flags; 261 unsigned long flags;
263 262
264 spin_lock_irqsave(&priv->echo_lock, flags); 263 spin_lock_irqsave(&priv->echo_lock, flags);
265 n = can_get_echo_skb(priv->ndev, msg->client); 264 can_get_echo_skb(priv->ndev, msg->client);
266 spin_unlock_irqrestore(&priv->echo_lock, flags); 265 spin_unlock_irqrestore(&priv->echo_lock, flags);
267 266
268 /* count bytes of the echo instead of skb */ 267 /* count bytes of the echo instead of skb */
269 stats->tx_bytes += cf_len; 268 stats->tx_bytes += cf_len;
270 stats->tx_packets++; 269 stats->tx_packets++;
271 270
272 if (n) { 271 /* restart tx queue (a slot is free) */
273 /* restart tx queue only if a slot is free */ 272 netif_wake_queue(priv->ndev);
274 netif_wake_queue(priv->ndev);
275 }
276 273
277 return 0; 274 return 0;
278 } 275 }
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index b4efd711f824..788c3464a3b0 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -825,7 +825,10 @@ err_release_regions:
825err_disable_pci: 825err_disable_pci:
826 pci_disable_device(pdev); 826 pci_disable_device(pdev);
827 827
828 return err; 828 /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
829 * the probe() function must return a negative errno in case of failure
830 * (err is unchanged if negative) */
831 return pcibios_err_to_errno(err);
829} 832}
830 833
831/* free the board structure object, as well as its resources: */ 834/* free the board structure object, as well as its resources: */
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 131026fbc2d7..5adc95c922ee 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -717,7 +717,10 @@ failure_release_regions:
717failure_disable_pci: 717failure_disable_pci:
718 pci_disable_device(pdev); 718 pci_disable_device(pdev);
719 719
720 return err; 720 /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
721 * the probe() function must return a negative errno in case of failure
722 * (err is unchanged if negative) */
723 return pcibios_err_to_errno(err);
721} 724}
722 725
723static void peak_pci_remove(struct pci_dev *pdev) 726static void peak_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 4d4941469cfc..db6ea936dc3f 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
637 mbx_mask = hecc_read(priv, HECC_CANMIM); 637 mbx_mask = hecc_read(priv, HECC_CANMIM);
638 mbx_mask |= HECC_TX_MBOX_MASK; 638 mbx_mask |= HECC_TX_MBOX_MASK;
639 hecc_write(priv, HECC_CANMIM, mbx_mask); 639 hecc_write(priv, HECC_CANMIM, mbx_mask);
640 } else {
641 /* repoll is done only if whole budget is used */
642 num_pkts = quota;
640 } 643 }
641 644
642 return num_pkts; 645 return num_pkts;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index b3d02759c226..12ff0020ecd6 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
288 288
289 case -ECONNRESET: /* unlink */ 289 case -ECONNRESET: /* unlink */
290 case -ENOENT: 290 case -ENOENT:
291 case -EPIPE:
292 case -EPROTO:
291 case -ESHUTDOWN: 293 case -ESHUTDOWN:
292 return; 294 return;
293 295
@@ -393,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
393 395
394 if (dev->can.state == CAN_STATE_ERROR_WARNING || 396 if (dev->can.state == CAN_STATE_ERROR_WARNING ||
395 dev->can.state == CAN_STATE_ERROR_PASSIVE) { 397 dev->can.state == CAN_STATE_ERROR_PASSIVE) {
398 cf->can_id |= CAN_ERR_CRTL;
396 cf->data[1] = (txerr > rxerr) ? 399 cf->data[1] = (txerr > rxerr) ?
397 CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; 400 CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
398 } 401 }
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 9fdb0f0bfa06..c6dcf93675c0 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
393 break; 393 break;
394 394
395 case -ENOENT: 395 case -ENOENT:
396 case -EPIPE:
397 case -EPROTO:
396 case -ESHUTDOWN: 398 case -ESHUTDOWN:
397 return; 399 return;
398 400
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 68ac3e88a8ce..8bf80ad9dc44 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
449 dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)", 449 dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
450 rc); 450 rc);
451 451
452 return rc; 452 return (rc > 0) ? 0 : rc;
453} 453}
454 454
455static void gs_usb_xmit_callback(struct urb *urb) 455static void gs_usb_xmit_callback(struct urb *urb)
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 9b18d96ef526..63587b8e6825 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
609 } 609 }
610 610
611 if (pos + tmp->len > actual_len) { 611 if (pos + tmp->len > actual_len) {
612 dev_err(dev->udev->dev.parent, 612 dev_err_ratelimited(dev->udev->dev.parent,
613 "Format error\n"); 613 "Format error\n");
614 break; 614 break;
615 } 615 }
616 616
@@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
813 if (err) { 813 if (err) {
814 netdev_err(netdev, "Error transmitting URB\n"); 814 netdev_err(netdev, "Error transmitting URB\n");
815 usb_unanchor_urb(urb); 815 usb_unanchor_urb(urb);
816 kfree(buf);
816 usb_free_urb(urb); 817 usb_free_urb(urb);
817 return err; 818 return err;
818 } 819 }
@@ -1325,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1325 case 0: 1326 case 0:
1326 break; 1327 break;
1327 case -ENOENT: 1328 case -ENOENT:
1329 case -EPIPE:
1330 case -EPROTO:
1328 case -ESHUTDOWN: 1331 case -ESHUTDOWN:
1329 return; 1332 return;
1330 default: 1333 default:
@@ -1333,7 +1336,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1333 goto resubmit_urb; 1336 goto resubmit_urb;
1334 } 1337 }
1335 1338
1336 while (pos <= urb->actual_length - MSG_HEADER_LEN) { 1339 while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
1337 msg = urb->transfer_buffer + pos; 1340 msg = urb->transfer_buffer + pos;
1338 1341
1339 /* The Kvaser firmware can only read and write messages that 1342 /* The Kvaser firmware can only read and write messages that
@@ -1352,7 +1355,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1352 } 1355 }
1353 1356
1354 if (pos + msg->len > urb->actual_length) { 1357 if (pos + msg->len > urb->actual_length) {
1355 dev_err(dev->udev->dev.parent, "Format error\n"); 1358 dev_err_ratelimited(dev->udev->dev.parent,
1359 "Format error\n");
1356 break; 1360 break;
1357 } 1361 }
1358 1362
@@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1768 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); 1772 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1769 1773
1770 usb_unanchor_urb(urb); 1774 usb_unanchor_urb(urb);
1775 kfree(buf);
1771 1776
1772 stats->tx_dropped++; 1777 stats->tx_dropped++;
1773 1778
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 7f0272558bef..8d8c2086424d 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -592,6 +592,8 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
592 break; 592 break;
593 593
594 case -ENOENT: 594 case -ENOENT:
595 case -EPIPE:
596 case -EPROTO:
595 case -ESHUTDOWN: 597 case -ESHUTDOWN:
596 return; 598 return;
597 599
@@ -862,7 +864,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
862 goto cleanup_unregister_candev; 864 goto cleanup_unregister_candev;
863 } 865 }
864 866
865 dev_info(&intf->dev, "Microchip CAN BUS analizer connected\n"); 867 dev_info(&intf->dev, "Microchip CAN BUS Analyzer connected\n");
866 868
867 return 0; 869 return 0;
868 870
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 7ccdc3e30c98..53d6bb045e9e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
184 void *cmd_head = pcan_usb_fd_cmd_buffer(dev); 184 void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
185 int err = 0; 185 int err = 0;
186 u8 *packet_ptr; 186 u8 *packet_ptr;
187 int i, n = 1, packet_len; 187 int packet_len;
188 ptrdiff_t cmd_len; 188 ptrdiff_t cmd_len;
189 189
190 /* usb device unregistered? */ 190 /* usb device unregistered? */
@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
201 } 201 }
202 202
203 packet_ptr = cmd_head; 203 packet_ptr = cmd_head;
204 packet_len = cmd_len;
204 205
205 /* firmware is not able to re-assemble 512 bytes buffer in full-speed */ 206 /* firmware is not able to re-assemble 512 bytes buffer in full-speed */
206 if ((dev->udev->speed != USB_SPEED_HIGH) && 207 if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
207 (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) { 208 packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
208 packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
209 n += cmd_len / packet_len;
210 } else {
211 packet_len = cmd_len;
212 }
213 209
214 for (i = 0; i < n; i++) { 210 do {
215 err = usb_bulk_msg(dev->udev, 211 err = usb_bulk_msg(dev->udev,
216 usb_sndbulkpipe(dev->udev, 212 usb_sndbulkpipe(dev->udev,
217 PCAN_USBPRO_EP_CMDOUT), 213 PCAN_USBPRO_EP_CMDOUT),
@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
224 } 220 }
225 221
226 packet_ptr += packet_len; 222 packet_ptr += packet_len;
227 } 223 cmd_len -= packet_len;
224
225 if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
226 packet_len = cmd_len;
227
228 } while (packet_len > 0);
228 229
229 return err; 230 return err;
230} 231}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index d000cb62d6ae..27861c417c94 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb)
524 break; 524 break;
525 525
526 case -ENOENT: 526 case -ENOENT:
527 case -EPIPE:
528 case -EPROTO:
527 case -ESHUTDOWN: 529 case -ESHUTDOWN:
528 return; 530 return;
529 531
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 8404e8852a0f..b4c4a2c76437 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
194 tbp = peer_tb; 194 tbp = peer_tb;
195 } 195 }
196 196
197 if (tbp[IFLA_IFNAME]) { 197 if (ifmp && tbp[IFLA_IFNAME]) {
198 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); 198 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
199 name_assign_type = NET_NAME_USER; 199 name_assign_type = NET_NAME_USER;
200 } else { 200 } else {
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index f5a8dd96fd75..4498ab897d94 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1500,10 +1500,13 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
1500{ 1500{
1501 struct b53_device *dev = ds->priv; 1501 struct b53_device *dev = ds->priv;
1502 1502
1503 /* Older models support a different tag format that we do not 1503 /* Older models (5325, 5365) support a different tag format that we do
1504 * support in net/dsa/tag_brcm.c yet. 1504 * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
1505 * mode to be turned on which means we need to specifically manage ARL
1506 * misses on multicast addresses (TBD).
1505 */ 1507 */
1506 if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port)) 1508 if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
1509 !b53_can_enable_brcm_tags(ds, port))
1507 return DSA_TAG_PROTO_NONE; 1510 return DSA_TAG_PROTO_NONE;
1508 1511
1509 /* Broadcom BCM58xx chips have a flow accelerator on Port 8 1512 /* Broadcom BCM58xx chips have a flow accelerator on Port 8
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index ea01f24f15e7..b62d47210db8 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -14,7 +14,6 @@
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/of.h>
18#include <linux/phy.h> 17#include <linux/phy.h>
19#include <linux/phy_fixed.h> 18#include <linux/phy_fixed.h>
20#include <linux/mii.h> 19#include <linux/mii.h>
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index b721a2009b50..23b45da784cb 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -625,7 +625,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
625 bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc, 625 bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
626 slice_num, false); 626 slice_num, false);
627 bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc, 627 bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
628 slice_num, true); 628 SLICE_NUM_MASK, true);
629 629
630 /* Insert into TCAM now because we need to insert a second rule */ 630 /* Insert into TCAM now because we need to insert a second rule */
631 bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]); 631 bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -699,7 +699,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
699 /* Insert into Action and policer RAMs now, set chain ID to 699 /* Insert into Action and policer RAMs now, set chain ID to
700 * the one we are chained to 700 * the one we are chained to
701 */ 701 */
702 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num, 702 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
703 queue_num, true); 703 queue_num, true);
704 if (ret) 704 if (ret)
705 goto out_err; 705 goto out_err;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8171055fde7a..66d33e97cbc5 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -339,7 +339,7 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
339 u16 mask; 339 u16 mask;
340 340
341 mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask); 341 mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask);
342 mask |= GENMASK(chip->g1_irq.nirqs, 0); 342 mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
343 mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask); 343 mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
344 344
345 free_irq(chip->irq, chip); 345 free_irq(chip->irq, chip);
@@ -395,7 +395,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
395 return 0; 395 return 0;
396 396
397out_disable: 397out_disable:
398 mask |= GENMASK(chip->g1_irq.nirqs, 0); 398 mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
399 mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask); 399 mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
400 400
401out_mapping: 401out_mapping:
@@ -2177,6 +2177,19 @@ static const struct of_device_id mv88e6xxx_mdio_external_match[] = {
2177 { }, 2177 { },
2178}; 2178};
2179 2179
2180static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
2181
2182{
2183 struct mv88e6xxx_mdio_bus *mdio_bus;
2184 struct mii_bus *bus;
2185
2186 list_for_each_entry(mdio_bus, &chip->mdios, list) {
2187 bus = mdio_bus->bus;
2188
2189 mdiobus_unregister(bus);
2190 }
2191}
2192
2180static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip, 2193static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
2181 struct device_node *np) 2194 struct device_node *np)
2182{ 2195{
@@ -2201,27 +2214,16 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
2201 match = of_match_node(mv88e6xxx_mdio_external_match, child); 2214 match = of_match_node(mv88e6xxx_mdio_external_match, child);
2202 if (match) { 2215 if (match) {
2203 err = mv88e6xxx_mdio_register(chip, child, true); 2216 err = mv88e6xxx_mdio_register(chip, child, true);
2204 if (err) 2217 if (err) {
2218 mv88e6xxx_mdios_unregister(chip);
2205 return err; 2219 return err;
2220 }
2206 } 2221 }
2207 } 2222 }
2208 2223
2209 return 0; 2224 return 0;
2210} 2225}
2211 2226
2212static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
2213
2214{
2215 struct mv88e6xxx_mdio_bus *mdio_bus;
2216 struct mii_bus *bus;
2217
2218 list_for_each_entry(mdio_bus, &chip->mdios, list) {
2219 bus = mdio_bus->bus;
2220
2221 mdiobus_unregister(bus);
2222 }
2223}
2224
2225static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) 2227static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
2226{ 2228{
2227 struct mv88e6xxx_chip *chip = ds->priv; 2229 struct mv88e6xxx_chip *chip = ds->priv;
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index a7801f6668a5..6315774d72b3 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -338,6 +338,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
338 cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX; 338 cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;
339 break; 339 break;
340 case PHY_INTERFACE_MODE_XGMII: 340 case PHY_INTERFACE_MODE_XGMII:
341 case PHY_INTERFACE_MODE_XAUI:
341 cmode = MV88E6XXX_PORT_STS_CMODE_XAUI; 342 cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;
342 break; 343 break;
343 case PHY_INTERFACE_MODE_RXAUI: 344 case PHY_INTERFACE_MODE_RXAUI:
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index f4e13a7014bd..36c8950dbd2d 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -602,7 +602,7 @@ struct vortex_private {
602 struct sk_buff* rx_skbuff[RX_RING_SIZE]; 602 struct sk_buff* rx_skbuff[RX_RING_SIZE];
603 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 603 struct sk_buff* tx_skbuff[TX_RING_SIZE];
604 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 604 unsigned int cur_rx, cur_tx; /* The next free ring entry */
605 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ 605 unsigned int dirty_tx; /* The ring entries to be free()ed. */
606 struct vortex_extra_stats xstats; /* NIC-specific extra stats */ 606 struct vortex_extra_stats xstats; /* NIC-specific extra stats */
607 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ 607 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
608 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ 608 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
@@ -618,7 +618,6 @@ struct vortex_private {
618 618
619 /* The remainder are related to chip state, mostly media selection. */ 619 /* The remainder are related to chip state, mostly media selection. */
620 struct timer_list timer; /* Media selection timer. */ 620 struct timer_list timer; /* Media selection timer. */
621 struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
622 int options; /* User-settable misc. driver options. */ 621 int options; /* User-settable misc. driver options. */
623 unsigned int media_override:4, /* Passed-in media type. */ 622 unsigned int media_override:4, /* Passed-in media type. */
624 default_media:4, /* Read from the EEPROM/Wn3_Config. */ 623 default_media:4, /* Read from the EEPROM/Wn3_Config. */
@@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits);
760static int mdio_read(struct net_device *dev, int phy_id, int location); 759static int mdio_read(struct net_device *dev, int phy_id, int location);
761static void mdio_write(struct net_device *vp, int phy_id, int location, int value); 760static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
762static void vortex_timer(struct timer_list *t); 761static void vortex_timer(struct timer_list *t);
763static void rx_oom_timer(struct timer_list *t);
764static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, 762static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
765 struct net_device *dev); 763 struct net_device *dev);
766static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, 764static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
@@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev)
1601 1599
1602 timer_setup(&vp->timer, vortex_timer, 0); 1600 timer_setup(&vp->timer, vortex_timer, 0);
1603 mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait)); 1601 mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
1604 timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);
1605 1602
1606 if (vortex_debug > 1) 1603 if (vortex_debug > 1)
1607 pr_debug("%s: Initial media type %s.\n", 1604 pr_debug("%s: Initial media type %s.\n",
@@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev)
1676 window_write16(vp, 0x0040, 4, Wn4_NetDiag); 1673 window_write16(vp, 0x0040, 4, Wn4_NetDiag);
1677 1674
1678 if (vp->full_bus_master_rx) { /* Boomerang bus master. */ 1675 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1679 vp->cur_rx = vp->dirty_rx = 0; 1676 vp->cur_rx = 0;
1680 /* Initialize the RxEarly register as recommended. */ 1677 /* Initialize the RxEarly register as recommended. */
1681 iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); 1678 iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
1682 iowrite32(0x0020, ioaddr + PktStatus); 1679 iowrite32(0x0020, ioaddr + PktStatus);
@@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev)
1729 struct vortex_private *vp = netdev_priv(dev); 1726 struct vortex_private *vp = netdev_priv(dev);
1730 int i; 1727 int i;
1731 int retval; 1728 int retval;
1729 dma_addr_t dma;
1732 1730
1733 /* Use the now-standard shared IRQ implementation. */ 1731 /* Use the now-standard shared IRQ implementation. */
1734 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? 1732 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
@@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev)
1753 break; /* Bad news! */ 1751 break; /* Bad news! */
1754 1752
1755 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ 1753 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
1756 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1754 dma = pci_map_single(VORTEX_PCI(vp), skb->data,
1755 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1756 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
1757 break;
1758 vp->rx_ring[i].addr = cpu_to_le32(dma);
1757 } 1759 }
1758 if (i != RX_RING_SIZE) { 1760 if (i != RX_RING_SIZE) {
1759 pr_emerg("%s: no memory for rx ring\n", dev->name); 1761 pr_emerg("%s: no memory for rx ring\n", dev->name);
@@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2067 int len = (skb->len + 3) & ~3; 2069 int len = (skb->len + 3) & ~3;
2068 vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, 2070 vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
2069 PCI_DMA_TODEVICE); 2071 PCI_DMA_TODEVICE);
2072 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
2073 dev_kfree_skb_any(skb);
2074 dev->stats.tx_dropped++;
2075 return NETDEV_TX_OK;
2076 }
2077
2070 spin_lock_irq(&vp->window_lock); 2078 spin_lock_irq(&vp->window_lock);
2071 window_set(vp, 7); 2079 window_set(vp, 7);
2072 iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); 2080 iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
@@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev)
2593 int entry = vp->cur_rx % RX_RING_SIZE; 2601 int entry = vp->cur_rx % RX_RING_SIZE;
2594 void __iomem *ioaddr = vp->ioaddr; 2602 void __iomem *ioaddr = vp->ioaddr;
2595 int rx_status; 2603 int rx_status;
2596 int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; 2604 int rx_work_limit = RX_RING_SIZE;
2597 2605
2598 if (vortex_debug > 5) 2606 if (vortex_debug > 5)
2599 pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); 2607 pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
@@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev)
2614 } else { 2622 } else {
2615 /* The packet length: up to 4.5K!. */ 2623 /* The packet length: up to 4.5K!. */
2616 int pkt_len = rx_status & 0x1fff; 2624 int pkt_len = rx_status & 0x1fff;
2617 struct sk_buff *skb; 2625 struct sk_buff *skb, *newskb;
2626 dma_addr_t newdma;
2618 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); 2627 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2619 2628
2620 if (vortex_debug > 4) 2629 if (vortex_debug > 4)
@@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev)
2633 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2642 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2634 vp->rx_copy++; 2643 vp->rx_copy++;
2635 } else { 2644 } else {
2645 /* Pre-allocate the replacement skb. If it or its
2646 * mapping fails then recycle the buffer thats already
2647 * in place
2648 */
2649 newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2650 if (!newskb) {
2651 dev->stats.rx_dropped++;
2652 goto clear_complete;
2653 }
2654 newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
2655 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2656 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
2657 dev->stats.rx_dropped++;
2658 consume_skb(newskb);
2659 goto clear_complete;
2660 }
2661
2636 /* Pass up the skbuff already on the Rx ring. */ 2662 /* Pass up the skbuff already on the Rx ring. */
2637 skb = vp->rx_skbuff[entry]; 2663 skb = vp->rx_skbuff[entry];
2638 vp->rx_skbuff[entry] = NULL; 2664 vp->rx_skbuff[entry] = newskb;
2665 vp->rx_ring[entry].addr = cpu_to_le32(newdma);
2639 skb_put(skb, pkt_len); 2666 skb_put(skb, pkt_len);
2640 pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2667 pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2641 vp->rx_nocopy++; 2668 vp->rx_nocopy++;
@@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev)
2653 netif_rx(skb); 2680 netif_rx(skb);
2654 dev->stats.rx_packets++; 2681 dev->stats.rx_packets++;
2655 } 2682 }
2656 entry = (++vp->cur_rx) % RX_RING_SIZE;
2657 }
2658 /* Refill the Rx ring buffers. */
2659 for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
2660 struct sk_buff *skb;
2661 entry = vp->dirty_rx % RX_RING_SIZE;
2662 if (vp->rx_skbuff[entry] == NULL) {
2663 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2664 if (skb == NULL) {
2665 static unsigned long last_jif;
2666 if (time_after(jiffies, last_jif + 10 * HZ)) {
2667 pr_warn("%s: memory shortage\n",
2668 dev->name);
2669 last_jif = jiffies;
2670 }
2671 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
2672 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
2673 break; /* Bad news! */
2674 }
2675 2683
2676 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2684clear_complete:
2677 vp->rx_skbuff[entry] = skb;
2678 }
2679 vp->rx_ring[entry].status = 0; /* Clear complete bit. */ 2685 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
2680 iowrite16(UpUnstall, ioaddr + EL3_CMD); 2686 iowrite16(UpUnstall, ioaddr + EL3_CMD);
2687 entry = (++vp->cur_rx) % RX_RING_SIZE;
2681 } 2688 }
2682 return 0; 2689 return 0;
2683} 2690}
2684 2691
2685/*
2686 * If we've hit a total OOM refilling the Rx ring we poll once a second
2687 * for some memory. Otherwise there is no way to restart the rx process.
2688 */
2689static void
2690rx_oom_timer(struct timer_list *t)
2691{
2692 struct vortex_private *vp = from_timer(vp, t, rx_oom_timer);
2693 struct net_device *dev = vp->mii.dev;
2694
2695 spin_lock_irq(&vp->lock);
2696 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
2697 boomerang_rx(dev);
2698 if (vortex_debug > 1) {
2699 pr_debug("%s: rx_oom_timer %s\n", dev->name,
2700 ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
2701 }
2702 spin_unlock_irq(&vp->lock);
2703}
2704
2705static void 2692static void
2706vortex_down(struct net_device *dev, int final_down) 2693vortex_down(struct net_device *dev, int final_down)
2707{ 2694{
@@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down)
2711 netdev_reset_queue(dev); 2698 netdev_reset_queue(dev);
2712 netif_stop_queue(dev); 2699 netif_stop_queue(dev);
2713 2700
2714 del_timer_sync(&vp->rx_oom_timer);
2715 del_timer_sync(&vp->timer); 2701 del_timer_sync(&vp->timer);
2716 2702
2717 /* Turn off statistics ASAP. We update dev->stats below. */ 2703 /* Turn off statistics ASAP. We update dev->stats below. */
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 97c5a89a9cf7..fbe21a817bd8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq;
75MODULE_DEVICE_TABLE(pci, ena_pci_tbl); 75MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
76 76
77static int ena_rss_init_default(struct ena_adapter *adapter); 77static int ena_rss_init_default(struct ena_adapter *adapter);
78static void check_for_admin_com_state(struct ena_adapter *adapter);
79static void ena_destroy_device(struct ena_adapter *adapter);
80static int ena_restore_device(struct ena_adapter *adapter);
78 81
79static void ena_tx_timeout(struct net_device *dev) 82static void ena_tx_timeout(struct net_device *dev)
80{ 83{
@@ -1565,7 +1568,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
1565 1568
1566static int ena_up_complete(struct ena_adapter *adapter) 1569static int ena_up_complete(struct ena_adapter *adapter)
1567{ 1570{
1568 int rc, i; 1571 int rc;
1569 1572
1570 rc = ena_rss_configure(adapter); 1573 rc = ena_rss_configure(adapter);
1571 if (rc) 1574 if (rc)
@@ -1584,17 +1587,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
1584 1587
1585 ena_napi_enable_all(adapter); 1588 ena_napi_enable_all(adapter);
1586 1589
1587 /* Enable completion queues interrupt */
1588 for (i = 0; i < adapter->num_queues; i++)
1589 ena_unmask_interrupt(&adapter->tx_ring[i],
1590 &adapter->rx_ring[i]);
1591
1592 /* schedule napi in case we had pending packets
1593 * from the last time we disable napi
1594 */
1595 for (i = 0; i < adapter->num_queues; i++)
1596 napi_schedule(&adapter->ena_napi[i].napi);
1597
1598 return 0; 1590 return 0;
1599} 1591}
1600 1592
@@ -1731,7 +1723,7 @@ create_err:
1731 1723
1732static int ena_up(struct ena_adapter *adapter) 1724static int ena_up(struct ena_adapter *adapter)
1733{ 1725{
1734 int rc; 1726 int rc, i;
1735 1727
1736 netdev_dbg(adapter->netdev, "%s\n", __func__); 1728 netdev_dbg(adapter->netdev, "%s\n", __func__);
1737 1729
@@ -1774,6 +1766,17 @@ static int ena_up(struct ena_adapter *adapter)
1774 1766
1775 set_bit(ENA_FLAG_DEV_UP, &adapter->flags); 1767 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1776 1768
1769 /* Enable completion queues interrupt */
1770 for (i = 0; i < adapter->num_queues; i++)
1771 ena_unmask_interrupt(&adapter->tx_ring[i],
1772 &adapter->rx_ring[i]);
1773
1774 /* schedule napi in case we had pending packets
1775 * from the last time we disable napi
1776 */
1777 for (i = 0; i < adapter->num_queues; i++)
1778 napi_schedule(&adapter->ena_napi[i].napi);
1779
1777 return rc; 1780 return rc;
1778 1781
1779err_up: 1782err_up:
@@ -1884,6 +1887,17 @@ static int ena_close(struct net_device *netdev)
1884 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 1887 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1885 ena_down(adapter); 1888 ena_down(adapter);
1886 1889
1890 /* Check for device status and issue reset if needed*/
1891 check_for_admin_com_state(adapter);
1892 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1893 netif_err(adapter, ifdown, adapter->netdev,
1894 "Destroy failure, restarting device\n");
1895 ena_dump_stats_to_dmesg(adapter);
1896 /* rtnl lock already obtained in dev_ioctl() layer */
1897 ena_destroy_device(adapter);
1898 ena_restore_device(adapter);
1899 }
1900
1887 return 0; 1901 return 0;
1888} 1902}
1889 1903
@@ -2544,11 +2558,12 @@ static void ena_destroy_device(struct ena_adapter *adapter)
2544 2558
2545 ena_com_set_admin_running_state(ena_dev, false); 2559 ena_com_set_admin_running_state(ena_dev, false);
2546 2560
2547 ena_close(netdev); 2561 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2562 ena_down(adapter);
2548 2563
2549 /* Before releasing the ENA resources, a device reset is required. 2564 /* Before releasing the ENA resources, a device reset is required.
2550 * (to prevent the device from accessing them). 2565 * (to prevent the device from accessing them).
2551 * In case the reset flag is set and the device is up, ena_close 2566 * In case the reset flag is set and the device is up, ena_down()
2552 * already perform the reset, so it can be skipped. 2567 * already perform the reset, so it can be skipped.
2553 */ 2568 */
2554 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) 2569 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 57e796870595..105fdb958cef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -50,7 +50,7 @@
50#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U 50#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U
51#define AQ_CFG_PCI_FUNC_PORTS 2U 51#define AQ_CFG_PCI_FUNC_PORTS 2U
52 52
53#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ) 53#define AQ_CFG_SERVICE_TIMER_INTERVAL (1 * HZ)
54#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ)) 54#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ))
55 55
56#define AQ_CFG_SKB_FRAGS_MAX 32U 56#define AQ_CFG_SKB_FRAGS_MAX 32U
@@ -80,6 +80,7 @@
80#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\ 80#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
81 __stringify(NIC_MINOR_DRIVER_VERSION)"."\ 81 __stringify(NIC_MINOR_DRIVER_VERSION)"."\
82 __stringify(NIC_BUILD_DRIVER_VERSION)"."\ 82 __stringify(NIC_BUILD_DRIVER_VERSION)"."\
83 __stringify(NIC_REVISION_DRIVER_VERSION) 83 __stringify(NIC_REVISION_DRIVER_VERSION) \
84 AQ_CFG_DRV_VERSION_SUFFIX
84 85
85#endif /* AQ_CFG_H */ 86#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 70efb7467bf3..f2d8063a2cef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -66,14 +66,14 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
66 "OutUCast", 66 "OutUCast",
67 "OutMCast", 67 "OutMCast",
68 "OutBCast", 68 "OutBCast",
69 "InUCastOctects", 69 "InUCastOctets",
70 "OutUCastOctects", 70 "OutUCastOctets",
71 "InMCastOctects", 71 "InMCastOctets",
72 "OutMCastOctects", 72 "OutMCastOctets",
73 "InBCastOctects", 73 "InBCastOctets",
74 "OutBCastOctects", 74 "OutBCastOctets",
75 "InOctects", 75 "InOctets",
76 "OutOctects", 76 "OutOctets",
77 "InPacketsDma", 77 "InPacketsDma",
78 "OutPacketsDma", 78 "OutPacketsDma",
79 "InOctetsDma", 79 "InOctetsDma",
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 0207927dc8a6..b3825de6cdfb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -46,6 +46,28 @@ struct aq_hw_link_status_s {
46 unsigned int mbps; 46 unsigned int mbps;
47}; 47};
48 48
49struct aq_stats_s {
50 u64 uprc;
51 u64 mprc;
52 u64 bprc;
53 u64 erpt;
54 u64 uptc;
55 u64 mptc;
56 u64 bptc;
57 u64 erpr;
58 u64 mbtc;
59 u64 bbtc;
60 u64 mbrc;
61 u64 bbrc;
62 u64 ubrc;
63 u64 ubtc;
64 u64 dpc;
65 u64 dma_pkt_rc;
66 u64 dma_pkt_tc;
67 u64 dma_oct_rc;
68 u64 dma_oct_tc;
69};
70
49#define AQ_HW_IRQ_INVALID 0U 71#define AQ_HW_IRQ_INVALID 0U
50#define AQ_HW_IRQ_LEGACY 1U 72#define AQ_HW_IRQ_LEGACY 1U
51#define AQ_HW_IRQ_MSI 2U 73#define AQ_HW_IRQ_MSI 2U
@@ -85,7 +107,9 @@ struct aq_hw_ops {
85 void (*destroy)(struct aq_hw_s *self); 107 void (*destroy)(struct aq_hw_s *self);
86 108
87 int (*get_hw_caps)(struct aq_hw_s *self, 109 int (*get_hw_caps)(struct aq_hw_s *self,
88 struct aq_hw_caps_s *aq_hw_caps); 110 struct aq_hw_caps_s *aq_hw_caps,
111 unsigned short device,
112 unsigned short subsystem_device);
89 113
90 int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, 114 int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
91 unsigned int frags); 115 unsigned int frags);
@@ -164,8 +188,7 @@ struct aq_hw_ops {
164 188
165 int (*hw_update_stats)(struct aq_hw_s *self); 189 int (*hw_update_stats)(struct aq_hw_s *self);
166 190
167 int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, 191 struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
168 unsigned int *p_count);
169 192
170 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); 193 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
171 194
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 78dfb2ab78ce..75a894a9251c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -37,6 +37,8 @@ static unsigned int aq_itr_rx;
37module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644); 37module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
38MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate"); 38MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
39 39
40static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
41
40static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 42static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
41{ 43{
42 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 44 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -166,11 +168,8 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
166static void aq_nic_service_timer_cb(struct timer_list *t) 168static void aq_nic_service_timer_cb(struct timer_list *t)
167{ 169{
168 struct aq_nic_s *self = from_timer(self, t, service_timer); 170 struct aq_nic_s *self = from_timer(self, t, service_timer);
169 struct net_device *ndev = aq_nic_get_ndev(self); 171 int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
170 int err = 0; 172 int err = 0;
171 unsigned int i = 0U;
172 struct aq_ring_stats_rx_s stats_rx;
173 struct aq_ring_stats_tx_s stats_tx;
174 173
175 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) 174 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
176 goto err_exit; 175 goto err_exit;
@@ -182,23 +181,14 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
182 if (self->aq_hw_ops.hw_update_stats) 181 if (self->aq_hw_ops.hw_update_stats)
183 self->aq_hw_ops.hw_update_stats(self->aq_hw); 182 self->aq_hw_ops.hw_update_stats(self->aq_hw);
184 183
185 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 184 aq_nic_update_ndev_stats(self);
186 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
187 for (i = AQ_DIMOF(self->aq_vec); i--;) {
188 if (self->aq_vec[i])
189 aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
190 }
191 185
192 ndev->stats.rx_packets = stats_rx.packets; 186 /* If no link - use faster timer rate to detect link up asap */
193 ndev->stats.rx_bytes = stats_rx.bytes; 187 if (!netif_carrier_ok(self->ndev))
194 ndev->stats.rx_errors = stats_rx.errors; 188 ctimer = max(ctimer / 2, 1);
195 ndev->stats.tx_packets = stats_tx.packets;
196 ndev->stats.tx_bytes = stats_tx.bytes;
197 ndev->stats.tx_errors = stats_tx.errors;
198 189
199err_exit: 190err_exit:
200 mod_timer(&self->service_timer, 191 mod_timer(&self->service_timer, jiffies + ctimer);
201 jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
202} 192}
203 193
204static void aq_nic_polling_timer_cb(struct timer_list *t) 194static void aq_nic_polling_timer_cb(struct timer_list *t)
@@ -222,7 +212,7 @@ static struct net_device *aq_nic_ndev_alloc(void)
222 212
223struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, 213struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
224 const struct ethtool_ops *et_ops, 214 const struct ethtool_ops *et_ops,
225 struct device *dev, 215 struct pci_dev *pdev,
226 struct aq_pci_func_s *aq_pci_func, 216 struct aq_pci_func_s *aq_pci_func,
227 unsigned int port, 217 unsigned int port,
228 const struct aq_hw_ops *aq_hw_ops) 218 const struct aq_hw_ops *aq_hw_ops)
@@ -242,7 +232,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
242 ndev->netdev_ops = ndev_ops; 232 ndev->netdev_ops = ndev_ops;
243 ndev->ethtool_ops = et_ops; 233 ndev->ethtool_ops = et_ops;
244 234
245 SET_NETDEV_DEV(ndev, dev); 235 SET_NETDEV_DEV(ndev, &pdev->dev);
246 236
247 ndev->if_port = port; 237 ndev->if_port = port;
248 self->ndev = ndev; 238 self->ndev = ndev;
@@ -254,7 +244,8 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
254 244
255 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, 245 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
256 &self->aq_hw_ops); 246 &self->aq_hw_ops);
257 err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); 247 err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
248 pdev->device, pdev->subsystem_device);
258 if (err < 0) 249 if (err < 0)
259 goto err_exit; 250 goto err_exit;
260 251
@@ -749,16 +740,40 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
749 740
750void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) 741void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
751{ 742{
752 struct aq_vec_s *aq_vec = NULL;
753 unsigned int i = 0U; 743 unsigned int i = 0U;
754 unsigned int count = 0U; 744 unsigned int count = 0U;
755 int err = 0; 745 struct aq_vec_s *aq_vec = NULL;
746 struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
756 747
757 err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count); 748 if (!stats)
758 if (err < 0)
759 goto err_exit; 749 goto err_exit;
760 750
761 data += count; 751 data[i] = stats->uprc + stats->mprc + stats->bprc;
752 data[++i] = stats->uprc;
753 data[++i] = stats->mprc;
754 data[++i] = stats->bprc;
755 data[++i] = stats->erpt;
756 data[++i] = stats->uptc + stats->mptc + stats->bptc;
757 data[++i] = stats->uptc;
758 data[++i] = stats->mptc;
759 data[++i] = stats->bptc;
760 data[++i] = stats->ubrc;
761 data[++i] = stats->ubtc;
762 data[++i] = stats->mbrc;
763 data[++i] = stats->mbtc;
764 data[++i] = stats->bbrc;
765 data[++i] = stats->bbtc;
766 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
767 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
768 data[++i] = stats->dma_pkt_rc;
769 data[++i] = stats->dma_pkt_tc;
770 data[++i] = stats->dma_oct_rc;
771 data[++i] = stats->dma_oct_tc;
772 data[++i] = stats->dpc;
773
774 i++;
775
776 data += i;
762 count = 0U; 777 count = 0U;
763 778
764 for (i = 0U, aq_vec = self->aq_vec[0]; 779 for (i = 0U, aq_vec = self->aq_vec[0];
@@ -768,7 +783,20 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
768 } 783 }
769 784
770err_exit:; 785err_exit:;
771 (void)err; 786}
787
788static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
789{
790 struct net_device *ndev = self->ndev;
791 struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
792
793 ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
794 ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
795 ndev->stats.rx_errors = stats->erpr;
796 ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc;
797 ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc;
798 ndev->stats.tx_errors = stats->erpt;
799 ndev->stats.multicast = stats->mprc;
772} 800}
773 801
774void aq_nic_get_link_ksettings(struct aq_nic_s *self, 802void aq_nic_get_link_ksettings(struct aq_nic_s *self,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 4309983acdd6..3c9f8db03d5f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -71,7 +71,7 @@ struct aq_nic_cfg_s {
71 71
72struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, 72struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
73 const struct ethtool_ops *et_ops, 73 const struct ethtool_ops *et_ops,
74 struct device *dev, 74 struct pci_dev *pdev,
75 struct aq_pci_func_s *aq_pci_func, 75 struct aq_pci_func_s *aq_pci_func,
76 unsigned int port, 76 unsigned int port,
77 const struct aq_hw_ops *aq_hw_ops); 77 const struct aq_hw_ops *aq_hw_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index cadaa646c89f..58c29d04b186 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -51,7 +51,8 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
51 pci_set_drvdata(pdev, self); 51 pci_set_drvdata(pdev, self);
52 self->pdev = pdev; 52 self->pdev = pdev;
53 53
54 err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); 54 err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device,
55 pdev->subsystem_device);
55 if (err < 0) 56 if (err < 0)
56 goto err_exit; 57 goto err_exit;
57 58
@@ -59,7 +60,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
59 60
60 for (port = 0; port < self->ports; ++port) { 61 for (port = 0; port < self->ports; ++port) {
61 struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, 62 struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
62 &pdev->dev, self, 63 pdev, self,
63 port, aq_hw_ops); 64 port, aq_hw_ops);
64 65
65 if (!aq_nic) { 66 if (!aq_nic) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 07b3c49a16a4..f18dce14c93c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -18,9 +18,20 @@
18#include "hw_atl_a0_internal.h" 18#include "hw_atl_a0_internal.h"
19 19
20static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, 20static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
21 struct aq_hw_caps_s *aq_hw_caps) 21 struct aq_hw_caps_s *aq_hw_caps,
22 unsigned short device,
23 unsigned short subsystem_device)
22{ 24{
23 memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); 25 memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
26
27 if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
28 aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
29
30 if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
31 aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
32 aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G;
33 }
34
24 return 0; 35 return 0;
25} 36}
26 37
@@ -333,6 +344,10 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
333 hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss); 344 hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
334 hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 345 hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
335 346
347 /* Reset link status and read out initial hardware counters */
348 self->aq_link_status.mbps = 0;
349 hw_atl_utils_update_stats(self);
350
336 err = aq_hw_err_from_flags(self); 351 err = aq_hw_err_from_flags(self);
337 if (err < 0) 352 if (err < 0)
338 goto err_exit; 353 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index ec68c20efcbd..e4a22ce7bf09 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -16,11 +16,23 @@
16#include "hw_atl_utils.h" 16#include "hw_atl_utils.h"
17#include "hw_atl_llh.h" 17#include "hw_atl_llh.h"
18#include "hw_atl_b0_internal.h" 18#include "hw_atl_b0_internal.h"
19#include "hw_atl_llh_internal.h"
19 20
20static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, 21static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
21 struct aq_hw_caps_s *aq_hw_caps) 22 struct aq_hw_caps_s *aq_hw_caps,
23 unsigned short device,
24 unsigned short subsystem_device)
22{ 25{
23 memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); 26 memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
27
28 if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
29 aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
30
31 if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
32 aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
33 aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G;
34 }
35
24 return 0; 36 return 0;
25} 37}
26 38
@@ -357,6 +369,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
357 }; 369 };
358 370
359 int err = 0; 371 int err = 0;
372 u32 val;
360 373
361 self->aq_nic_cfg = aq_nic_cfg; 374 self->aq_nic_cfg = aq_nic_cfg;
362 375
@@ -374,6 +387,20 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
374 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); 387 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
375 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 388 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
376 389
390 /* Force limit MRRS on RDM/TDM to 2K */
391 val = aq_hw_read_reg(self, pci_reg_control6_adr);
392 aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
393
394 /* TX DMA total request limit. B0 hardware is not capable to
395 * handle more than (8K-MRRS) incoming DMA data.
396 * Value 24 in 256byte units
397 */
398 aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
399
400 /* Reset link status and read out initial hardware counters */
401 self->aq_link_status.mbps = 0;
402 hw_atl_utils_update_stats(self);
403
377 err = aq_hw_err_from_flags(self); 404 err = aq_hw_err_from_flags(self);
378 if (err < 0) 405 if (err < 0)
379 goto err_exit; 406 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 5527fc0e5942..93450ec930e8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -2343,6 +2343,9 @@
2343#define tx_dma_desc_base_addrmsw_adr(descriptor) \ 2343#define tx_dma_desc_base_addrmsw_adr(descriptor) \
2344 (0x00007c04u + (descriptor) * 0x40) 2344 (0x00007c04u + (descriptor) * 0x40)
2345 2345
2346/* tx dma total request limit */
2347#define tx_dma_total_req_limit_adr 0x00007b20u
2348
2346/* tx interrupt moderation control register definitions 2349/* tx interrupt moderation control register definitions
2347 * Preprocessor definitions for TX Interrupt Moderation Control Register 2350 * Preprocessor definitions for TX Interrupt Moderation Control Register
2348 * Base Address: 0x00008980 2351 * Base Address: 0x00008980
@@ -2369,6 +2372,9 @@
2369/* default value of bitfield reg_res_dsbl */ 2372/* default value of bitfield reg_res_dsbl */
2370#define pci_reg_res_dsbl_default 0x1 2373#define pci_reg_res_dsbl_default 0x1
2371 2374
2375/* PCI core control register */
2376#define pci_reg_control6_adr 0x1014u
2377
2372/* global microprocessor scratch pad definitions */ 2378/* global microprocessor scratch pad definitions */
2373#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) 2379#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
2374 2380
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 1fe016fc4bc7..f2ce12ed4218 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -503,73 +503,43 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
503 struct hw_atl_s *hw_self = PHAL_ATLANTIC; 503 struct hw_atl_s *hw_self = PHAL_ATLANTIC;
504 struct hw_aq_atl_utils_mbox mbox; 504 struct hw_aq_atl_utils_mbox mbox;
505 505
506 if (!self->aq_link_status.mbps)
507 return 0;
508
509 hw_atl_utils_mpi_read_stats(self, &mbox); 506 hw_atl_utils_mpi_read_stats(self, &mbox);
510 507
511#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \ 508#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
512 mbox.stats._N_ - hw_self->last_stats._N_) 509 mbox.stats._N_ - hw_self->last_stats._N_)
513 510 if (self->aq_link_status.mbps) {
514 AQ_SDELTA(uprc); 511 AQ_SDELTA(uprc);
515 AQ_SDELTA(mprc); 512 AQ_SDELTA(mprc);
516 AQ_SDELTA(bprc); 513 AQ_SDELTA(bprc);
517 AQ_SDELTA(erpt); 514 AQ_SDELTA(erpt);
518 515
519 AQ_SDELTA(uptc); 516 AQ_SDELTA(uptc);
520 AQ_SDELTA(mptc); 517 AQ_SDELTA(mptc);
521 AQ_SDELTA(bptc); 518 AQ_SDELTA(bptc);
522 AQ_SDELTA(erpr); 519 AQ_SDELTA(erpr);
523 520
524 AQ_SDELTA(ubrc); 521 AQ_SDELTA(ubrc);
525 AQ_SDELTA(ubtc); 522 AQ_SDELTA(ubtc);
526 AQ_SDELTA(mbrc); 523 AQ_SDELTA(mbrc);
527 AQ_SDELTA(mbtc); 524 AQ_SDELTA(mbtc);
528 AQ_SDELTA(bbrc); 525 AQ_SDELTA(bbrc);
529 AQ_SDELTA(bbtc); 526 AQ_SDELTA(bbtc);
530 AQ_SDELTA(dpc); 527 AQ_SDELTA(dpc);
531 528 }
532#undef AQ_SDELTA 529#undef AQ_SDELTA
530 hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
531 hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
532 hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
533 hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
533 534
534 memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats)); 535 memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
535 536
536 return 0; 537 return 0;
537} 538}
538 539
539int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 540struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
540 u64 *data, unsigned int *p_count)
541{ 541{
542 struct hw_atl_s *hw_self = PHAL_ATLANTIC; 542 return &PHAL_ATLANTIC->curr_stats;
543 struct hw_atl_stats_s *stats = &hw_self->curr_stats;
544 int i = 0;
545
546 data[i] = stats->uprc + stats->mprc + stats->bprc;
547 data[++i] = stats->uprc;
548 data[++i] = stats->mprc;
549 data[++i] = stats->bprc;
550 data[++i] = stats->erpt;
551 data[++i] = stats->uptc + stats->mptc + stats->bptc;
552 data[++i] = stats->uptc;
553 data[++i] = stats->mptc;
554 data[++i] = stats->bptc;
555 data[++i] = stats->ubrc;
556 data[++i] = stats->ubtc;
557 data[++i] = stats->mbrc;
558 data[++i] = stats->mbtc;
559 data[++i] = stats->bbrc;
560 data[++i] = stats->bbtc;
561 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
562 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
563 data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self);
564 data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self);
565 data[++i] = stats_rx_dma_good_octet_counterlsw_get(self);
566 data[++i] = stats_tx_dma_good_octet_counterlsw_get(self);
567 data[++i] = stats->dpc;
568
569 if (p_count)
570 *p_count = ++i;
571
572 return 0;
573} 543}
574 544
575static const u32 hw_atl_utils_hw_mac_regs[] = { 545static const u32 hw_atl_utils_hw_mac_regs[] = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index c99cc690e425..21aeca6908d3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -129,7 +129,7 @@ struct __packed hw_aq_atl_utils_mbox {
129struct __packed hw_atl_s { 129struct __packed hw_atl_s {
130 struct aq_hw_s base; 130 struct aq_hw_s base;
131 struct hw_atl_stats_s last_stats; 131 struct hw_atl_stats_s last_stats;
132 struct hw_atl_stats_s curr_stats; 132 struct aq_stats_s curr_stats;
133 u64 speed; 133 u64 speed;
134 unsigned int chip_features; 134 unsigned int chip_features;
135 u32 fw_ver_actual; 135 u32 fw_ver_actual;
@@ -207,8 +207,6 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
207 207
208int hw_atl_utils_update_stats(struct aq_hw_s *self); 208int hw_atl_utils_update_stats(struct aq_hw_s *self);
209 209
210int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 210struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
211 u64 *data,
212 unsigned int *p_count);
213 211
214#endif /* HW_ATL_UTILS_H */ 212#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 0de858d215c2..9009f2651e70 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -11,8 +11,10 @@
11#define VER_H 11#define VER_H
12 12
13#define NIC_MAJOR_DRIVER_VERSION 1 13#define NIC_MAJOR_DRIVER_VERSION 1
14#define NIC_MINOR_DRIVER_VERSION 5 14#define NIC_MINOR_DRIVER_VERSION 6
15#define NIC_BUILD_DRIVER_VERSION 345 15#define NIC_BUILD_DRIVER_VERSION 13
16#define NIC_REVISION_DRIVER_VERSION 0 16#define NIC_REVISION_DRIVER_VERSION 0
17 17
18#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
19
18#endif /* VER_H */ 20#endif /* VER_H */
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index 3c63b16d485f..d9efbc8d783b 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -159,6 +159,8 @@ struct arc_emac_priv {
159 unsigned int link; 159 unsigned int link;
160 unsigned int duplex; 160 unsigned int duplex;
161 unsigned int speed; 161 unsigned int speed;
162
163 unsigned int rx_missed_errors;
162}; 164};
163 165
164/** 166/**
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 3241af1ce718..bd277b0dc615 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -26,6 +26,8 @@
26 26
27#include "emac.h" 27#include "emac.h"
28 28
29static void arc_emac_restart(struct net_device *ndev);
30
29/** 31/**
30 * arc_emac_tx_avail - Return the number of available slots in the tx ring. 32 * arc_emac_tx_avail - Return the number of available slots in the tx ring.
31 * @priv: Pointer to ARC EMAC private data structure. 33 * @priv: Pointer to ARC EMAC private data structure.
@@ -210,39 +212,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
210 continue; 212 continue;
211 } 213 }
212 214
213 pktlen = info & LEN_MASK; 215 /* Prepare the BD for next cycle. netif_receive_skb()
214 stats->rx_packets++; 216 * only if new skb was allocated and mapped to avoid holes
215 stats->rx_bytes += pktlen; 217 * in the RX fifo.
216 skb = rx_buff->skb; 218 */
217 skb_put(skb, pktlen); 219 skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
218 skb->dev = ndev; 220 if (unlikely(!skb)) {
219 skb->protocol = eth_type_trans(skb, ndev); 221 if (net_ratelimit())
220 222 netdev_err(ndev, "cannot allocate skb\n");
221 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), 223 /* Return ownership to EMAC */
222 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); 224 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
223
224 /* Prepare the BD for next cycle */
225 rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
226 EMAC_BUFFER_SIZE);
227 if (unlikely(!rx_buff->skb)) {
228 stats->rx_errors++; 225 stats->rx_errors++;
229 /* Because receive_skb is below, increment rx_dropped */
230 stats->rx_dropped++; 226 stats->rx_dropped++;
231 continue; 227 continue;
232 } 228 }
233 229
234 /* receive_skb only if new skb was allocated to avoid holes */ 230 addr = dma_map_single(&ndev->dev, (void *)skb->data,
235 netif_receive_skb(skb);
236
237 addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
238 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); 231 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
239 if (dma_mapping_error(&ndev->dev, addr)) { 232 if (dma_mapping_error(&ndev->dev, addr)) {
240 if (net_ratelimit()) 233 if (net_ratelimit())
241 netdev_err(ndev, "cannot dma map\n"); 234 netdev_err(ndev, "cannot map dma buffer\n");
242 dev_kfree_skb(rx_buff->skb); 235 dev_kfree_skb(skb);
236 /* Return ownership to EMAC */
237 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
243 stats->rx_errors++; 238 stats->rx_errors++;
239 stats->rx_dropped++;
244 continue; 240 continue;
245 } 241 }
242
243 /* unmap previosly mapped skb */
244 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
245 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
246
247 pktlen = info & LEN_MASK;
248 stats->rx_packets++;
249 stats->rx_bytes += pktlen;
250 skb_put(rx_buff->skb, pktlen);
251 rx_buff->skb->dev = ndev;
252 rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
253
254 netif_receive_skb(rx_buff->skb);
255
256 rx_buff->skb = skb;
246 dma_unmap_addr_set(rx_buff, addr, addr); 257 dma_unmap_addr_set(rx_buff, addr, addr);
247 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); 258 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
248 259
@@ -259,6 +270,53 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
259} 270}
260 271
261/** 272/**
273 * arc_emac_rx_miss_handle - handle R_MISS register
274 * @ndev: Pointer to the net_device structure.
275 */
276static void arc_emac_rx_miss_handle(struct net_device *ndev)
277{
278 struct arc_emac_priv *priv = netdev_priv(ndev);
279 struct net_device_stats *stats = &ndev->stats;
280 unsigned int miss;
281
282 miss = arc_reg_get(priv, R_MISS);
283 if (miss) {
284 stats->rx_errors += miss;
285 stats->rx_missed_errors += miss;
286 priv->rx_missed_errors += miss;
287 }
288}
289
290/**
291 * arc_emac_rx_stall_check - check RX stall
292 * @ndev: Pointer to the net_device structure.
293 * @budget: How many BDs requested to process on 1 call.
294 * @work_done: How many BDs processed
295 *
296 * Under certain conditions EMAC stop reception of incoming packets and
297 * continuously increment R_MISS register instead of saving data into
298 * provided buffer. This function detect that condition and restart
299 * EMAC.
300 */
301static void arc_emac_rx_stall_check(struct net_device *ndev,
302 int budget, unsigned int work_done)
303{
304 struct arc_emac_priv *priv = netdev_priv(ndev);
305 struct arc_emac_bd *rxbd;
306
307 if (work_done)
308 priv->rx_missed_errors = 0;
309
310 if (priv->rx_missed_errors && budget) {
311 rxbd = &priv->rxbd[priv->last_rx_bd];
312 if (le32_to_cpu(rxbd->info) & FOR_EMAC) {
313 arc_emac_restart(ndev);
314 priv->rx_missed_errors = 0;
315 }
316 }
317}
318
319/**
262 * arc_emac_poll - NAPI poll handler. 320 * arc_emac_poll - NAPI poll handler.
263 * @napi: Pointer to napi_struct structure. 321 * @napi: Pointer to napi_struct structure.
264 * @budget: How many BDs to process on 1 call. 322 * @budget: How many BDs to process on 1 call.
@@ -272,6 +330,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
272 unsigned int work_done; 330 unsigned int work_done;
273 331
274 arc_emac_tx_clean(ndev); 332 arc_emac_tx_clean(ndev);
333 arc_emac_rx_miss_handle(ndev);
275 334
276 work_done = arc_emac_rx(ndev, budget); 335 work_done = arc_emac_rx(ndev, budget);
277 if (work_done < budget) { 336 if (work_done < budget) {
@@ -279,6 +338,8 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
279 arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); 338 arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
280 } 339 }
281 340
341 arc_emac_rx_stall_check(ndev, budget, work_done);
342
282 return work_done; 343 return work_done;
283} 344}
284 345
@@ -320,6 +381,8 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
320 if (status & MSER_MASK) { 381 if (status & MSER_MASK) {
321 stats->rx_missed_errors += 0x100; 382 stats->rx_missed_errors += 0x100;
322 stats->rx_errors += 0x100; 383 stats->rx_errors += 0x100;
384 priv->rx_missed_errors += 0x100;
385 napi_schedule(&priv->napi);
323 } 386 }
324 387
325 if (status & RXCR_MASK) { 388 if (status & RXCR_MASK) {
@@ -732,6 +795,63 @@ static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
732} 795}
733 796
734 797
798/**
799 * arc_emac_restart - Restart EMAC
800 * @ndev: Pointer to net_device structure.
801 *
802 * This function do hardware reset of EMAC in order to restore
803 * network packets reception.
804 */
805static void arc_emac_restart(struct net_device *ndev)
806{
807 struct arc_emac_priv *priv = netdev_priv(ndev);
808 struct net_device_stats *stats = &ndev->stats;
809 int i;
810
811 if (net_ratelimit())
812 netdev_warn(ndev, "restarting stalled EMAC\n");
813
814 netif_stop_queue(ndev);
815
816 /* Disable interrupts */
817 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
818
819 /* Disable EMAC */
820 arc_reg_clr(priv, R_CTRL, EN_MASK);
821
822 /* Return the sk_buff to system */
823 arc_free_tx_queue(ndev);
824
825 /* Clean Tx BD's */
826 priv->txbd_curr = 0;
827 priv->txbd_dirty = 0;
828 memset(priv->txbd, 0, TX_RING_SZ);
829
830 for (i = 0; i < RX_BD_NUM; i++) {
831 struct arc_emac_bd *rxbd = &priv->rxbd[i];
832 unsigned int info = le32_to_cpu(rxbd->info);
833
834 if (!(info & FOR_EMAC)) {
835 stats->rx_errors++;
836 stats->rx_dropped++;
837 }
838 /* Return ownership to EMAC */
839 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
840 }
841 priv->last_rx_bd = 0;
842
843 /* Make sure info is visible to EMAC before enable */
844 wmb();
845
846 /* Enable interrupts */
847 arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
848
849 /* Enable EMAC */
850 arc_reg_or(priv, R_CTRL, EN_MASK);
851
852 netif_start_queue(ndev);
853}
854
735static const struct net_device_ops arc_emac_netdev_ops = { 855static const struct net_device_ops arc_emac_netdev_ops = {
736 .ndo_open = arc_emac_open, 856 .ndo_open = arc_emac_open,
737 .ndo_stop = arc_emac_stop, 857 .ndo_stop = arc_emac_stop,
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index e278e3d96ee0..16f9bee992fe 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -199,9 +199,11 @@ static int emac_rockchip_probe(struct platform_device *pdev)
199 199
200 /* RMII interface needs always a rate of 50MHz */ 200 /* RMII interface needs always a rate of 50MHz */
201 err = clk_set_rate(priv->refclk, 50000000); 201 err = clk_set_rate(priv->refclk, 50000000);
202 if (err) 202 if (err) {
203 dev_err(dev, 203 dev_err(dev,
204 "failed to change reference clock rate (%d)\n", err); 204 "failed to change reference clock rate (%d)\n", err);
205 goto out_regulator_disable;
206 }
205 207
206 if (priv->soc_data->need_div_macclk) { 208 if (priv->soc_data->need_div_macclk) {
207 priv->macclk = devm_clk_get(dev, "macclk"); 209 priv->macclk = devm_clk_get(dev, "macclk");
@@ -220,19 +222,24 @@ static int emac_rockchip_probe(struct platform_device *pdev)
220 222
221 /* RMII TX/RX needs always a rate of 25MHz */ 223 /* RMII TX/RX needs always a rate of 25MHz */
222 err = clk_set_rate(priv->macclk, 25000000); 224 err = clk_set_rate(priv->macclk, 25000000);
223 if (err) 225 if (err) {
224 dev_err(dev, 226 dev_err(dev,
225 "failed to change mac clock rate (%d)\n", err); 227 "failed to change mac clock rate (%d)\n", err);
228 goto out_clk_disable_macclk;
229 }
226 } 230 }
227 231
228 err = arc_emac_probe(ndev, interface); 232 err = arc_emac_probe(ndev, interface);
229 if (err) { 233 if (err) {
230 dev_err(dev, "failed to probe arc emac (%d)\n", err); 234 dev_err(dev, "failed to probe arc emac (%d)\n", err);
231 goto out_regulator_disable; 235 goto out_clk_disable_macclk;
232 } 236 }
233 237
234 return 0; 238 return 0;
235 239
240out_clk_disable_macclk:
241 if (priv->soc_data->need_div_macclk)
242 clk_disable_unprepare(priv->macclk);
236out_regulator_disable: 243out_regulator_disable:
237 if (priv->regulator) 244 if (priv->regulator)
238 regulator_disable(priv->regulator); 245 regulator_disable(priv->regulator);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4c739d5355d2..8ae269ec17a1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3030,7 +3030,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3030 3030
3031 del_timer_sync(&bp->timer); 3031 del_timer_sync(&bp->timer);
3032 3032
3033 if (IS_PF(bp)) { 3033 if (IS_PF(bp) && !BP_NOMCP(bp)) {
3034 /* Set ALWAYS_ALIVE bit in shmem */ 3034 /* Set ALWAYS_ALIVE bit in shmem */
3035 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 3035 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3036 bnx2x_drv_pulse(bp); 3036 bnx2x_drv_pulse(bp);
@@ -3116,7 +3116,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3116 bp->cnic_loaded = false; 3116 bp->cnic_loaded = false;
3117 3117
3118 /* Clear driver version indication in shmem */ 3118 /* Clear driver version indication in shmem */
3119 if (IS_PF(bp)) 3119 if (IS_PF(bp) && !BP_NOMCP(bp))
3120 bnx2x_update_mng_version(bp); 3120 bnx2x_update_mng_version(bp);
3121 3121
3122 /* Check if there are pending parity attentions. If there are - set 3122 /* Check if there are pending parity attentions. If there are - set
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 91e2a7560b48..ddd5d3ebd201 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
9578 9578
9579 do { 9579 do {
9580 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9580 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9581
9582 /* If we read all 0xFFs, means we are in PCI error state and
9583 * should bail out to avoid crashes on adapter's FW reads.
9584 */
9585 if (bp->common.shmem_base == 0xFFFFFFFF) {
9586 bp->flags |= NO_MCP_FLAG;
9587 return -ENODEV;
9588 }
9589
9581 if (bp->common.shmem_base) { 9590 if (bp->common.shmem_base) {
9582 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 9591 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9583 if (val & SHR_MEM_VALIDITY_MB) 9592 if (val & SHR_MEM_VALIDITY_MB)
@@ -14320,7 +14329,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14320 BNX2X_ERR("IO slot reset --> driver unload\n"); 14329 BNX2X_ERR("IO slot reset --> driver unload\n");
14321 14330
14322 /* MCP should have been reset; Need to wait for validity */ 14331 /* MCP should have been reset; Need to wait for validity */
14323 bnx2x_init_shmem(bp); 14332 if (bnx2x_init_shmem(bp)) {
14333 rtnl_unlock();
14334 return PCI_ERS_RESULT_DISCONNECT;
14335 }
14324 14336
14325 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 14337 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14326 u32 v; 14338 u32 v;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c5c38d4b7d1c..61ca4eb7c6fa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1883,7 +1883,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1883 * here forever if we consistently cannot allocate 1883 * here forever if we consistently cannot allocate
1884 * buffers. 1884 * buffers.
1885 */ 1885 */
1886 else if (rc == -ENOMEM) 1886 else if (rc == -ENOMEM && budget)
1887 rx_pkts++; 1887 rx_pkts++;
1888 else if (rc == -EBUSY) /* partial completion */ 1888 else if (rc == -EBUSY) /* partial completion */
1889 break; 1889 break;
@@ -1969,7 +1969,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1969 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1969 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1970 1970
1971 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1971 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
1972 if (likely(rc == -EIO)) 1972 if (likely(rc == -EIO) && budget)
1973 rx_pkts++; 1973 rx_pkts++;
1974 else if (rc == -EBUSY) /* partial completion */ 1974 else if (rc == -EBUSY) /* partial completion */
1975 break; 1975 break;
@@ -3368,6 +3368,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3368 u16 cp_ring_id, len = 0; 3368 u16 cp_ring_id, len = 0;
3369 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 3369 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3370 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 3370 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
3371 struct hwrm_short_input short_input = {0};
3371 3372
3372 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); 3373 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
3373 memset(resp, 0, PAGE_SIZE); 3374 memset(resp, 0, PAGE_SIZE);
@@ -3376,7 +3377,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3376 3377
3377 if (bp->flags & BNXT_FLAG_SHORT_CMD) { 3378 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
3378 void *short_cmd_req = bp->hwrm_short_cmd_req_addr; 3379 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
3379 struct hwrm_short_input short_input = {0};
3380 3380
3381 memcpy(short_cmd_req, req, msg_len); 3381 memcpy(short_cmd_req, req, msg_len);
3382 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN - 3382 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
@@ -8263,8 +8263,9 @@ static void bnxt_shutdown(struct pci_dev *pdev)
8263 if (netif_running(dev)) 8263 if (netif_running(dev))
8264 dev_close(dev); 8264 dev_close(dev);
8265 8265
8266 bnxt_ulp_shutdown(bp);
8267
8266 if (system_state == SYSTEM_POWER_OFF) { 8268 if (system_state == SYSTEM_POWER_OFF) {
8267 bnxt_ulp_shutdown(bp);
8268 bnxt_clear_int_mode(bp); 8269 bnxt_clear_int_mode(bp);
8269 pci_wake_from_d3(pdev, bp->wol); 8270 pci_wake_from_d3(pdev, bp->wol);
8270 pci_set_power_state(pdev, PCI_D3hot); 8271 pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 5ee18660bc33..c9617675f934 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
70 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); 70 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
71 return -EINVAL; 71 return -EINVAL;
72 } 72 }
73 if (vf_id >= bp->pf.max_vfs) { 73 if (vf_id >= bp->pf.active_vfs) {
74 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); 74 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
75 return -EINVAL; 75 return -EINVAL;
76 } 76 }
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index d5031f436f83..d8fee26cd45e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -56,7 +56,6 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
56{ 56{
57 int ifindex = tcf_mirred_ifindex(tc_act); 57 int ifindex = tcf_mirred_ifindex(tc_act);
58 struct net_device *dev; 58 struct net_device *dev;
59 u16 dst_fid;
60 59
61 dev = __dev_get_by_index(dev_net(bp->dev), ifindex); 60 dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
62 if (!dev) { 61 if (!dev) {
@@ -64,15 +63,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
64 return -EINVAL; 63 return -EINVAL;
65 } 64 }
66 65
67 /* find the FID from dev */
68 dst_fid = bnxt_flow_get_dst_fid(bp, dev);
69 if (dst_fid == BNXT_FID_INVALID) {
70 netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex);
71 return -EINVAL;
72 }
73
74 actions->flags |= BNXT_TC_ACTION_FLAG_FWD; 66 actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
75 actions->dst_fid = dst_fid;
76 actions->dst_dev = dev; 67 actions->dst_dev = dev;
77 return 0; 68 return 0;
78} 69}
@@ -160,13 +151,17 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
160 if (rc) 151 if (rc)
161 return rc; 152 return rc;
162 153
163 /* Tunnel encap/decap action must be accompanied by a redirect action */ 154 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
164 if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP || 155 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
165 actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) && 156 /* dst_fid is PF's fid */
166 !(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) { 157 actions->dst_fid = bp->pf.fw_fid;
167 netdev_info(bp->dev, 158 } else {
168 "error: no redir action along with encap/decap"); 159 /* find the FID from dst_dev */
169 return -EINVAL; 160 actions->dst_fid =
161 bnxt_flow_get_dst_fid(bp, actions->dst_dev);
162 if (actions->dst_fid == BNXT_FID_INVALID)
163 return -EINVAL;
164 }
170 } 165 }
171 166
172 return rc; 167 return rc;
@@ -426,7 +421,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
426 } 421 }
427 422
428 /* If all IP and L4 fields are wildcarded then this is an L2 flow */ 423 /* If all IP and L4 fields are wildcarded then this is an L2 flow */
429 if (is_wildcard(&l3_mask, sizeof(l3_mask)) && 424 if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
430 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { 425 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
431 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2; 426 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
432 } else { 427 } else {
@@ -532,10 +527,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
532 } 527 }
533 528
534 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { 529 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
535 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR | 530 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
536 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR;
537 ether_addr_copy(req.dst_macaddr, l2_info->dmac); 531 ether_addr_copy(req.dst_macaddr, l2_info->dmac);
538 ether_addr_copy(req.src_macaddr, l2_info->smac);
539 } 532 }
540 if (l2_info->num_vlans) { 533 if (l2_info->num_vlans) {
541 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID; 534 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
@@ -901,10 +894,10 @@ static void bnxt_tc_put_decap_handle(struct bnxt *bp,
901 894
902static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, 895static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
903 struct ip_tunnel_key *tun_key, 896 struct ip_tunnel_key *tun_key,
904 struct bnxt_tc_l2_key *l2_info, 897 struct bnxt_tc_l2_key *l2_info)
905 struct net_device *real_dst_dev)
906{ 898{
907#ifdef CONFIG_INET 899#ifdef CONFIG_INET
900 struct net_device *real_dst_dev = bp->dev;
908 struct flowi4 flow = { {0} }; 901 struct flowi4 flow = { {0} };
909 struct net_device *dst_dev; 902 struct net_device *dst_dev;
910 struct neighbour *nbr; 903 struct neighbour *nbr;
@@ -1008,14 +1001,13 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1008 */ 1001 */
1009 tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src; 1002 tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
1010 tun_key.tp_dst = flow->tun_key.tp_dst; 1003 tun_key.tp_dst = flow->tun_key.tp_dst;
1011 rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev); 1004 rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
1012 if (rc) 1005 if (rc)
1013 goto put_decap; 1006 goto put_decap;
1014 1007
1015 decap_key->ttl = tun_key.ttl;
1016 decap_l2_info = &decap_node->l2_info; 1008 decap_l2_info = &decap_node->l2_info;
1009 /* decap smac is wildcarded */
1017 ether_addr_copy(decap_l2_info->dmac, l2_info.smac); 1010 ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
1018 ether_addr_copy(decap_l2_info->smac, l2_info.dmac);
1019 if (l2_info.num_vlans) { 1011 if (l2_info.num_vlans) {
1020 decap_l2_info->num_vlans = l2_info.num_vlans; 1012 decap_l2_info->num_vlans = l2_info.num_vlans;
1021 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid; 1013 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
@@ -1095,8 +1087,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1095 if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) 1087 if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1096 goto done; 1088 goto done;
1097 1089
1098 rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info, 1090 rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
1099 flow->actions.dst_dev);
1100 if (rc) 1091 if (rc)
1101 goto put_encap; 1092 goto put_encap;
1102 1093
@@ -1169,6 +1160,15 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
1169 return 0; 1160 return 0;
1170} 1161}
1171 1162
1163static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
1164 u16 src_fid)
1165{
1166 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1167 flow->src_fid = bp->pf.fw_fid;
1168 else
1169 flow->src_fid = src_fid;
1170}
1171
1172/* Add a new flow or replace an existing flow. 1172/* Add a new flow or replace an existing flow.
1173 * Notes on locking: 1173 * Notes on locking:
1174 * There are essentially two critical sections here. 1174 * There are essentially two critical sections here.
@@ -1204,7 +1204,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1204 rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow); 1204 rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
1205 if (rc) 1205 if (rc)
1206 goto free_node; 1206 goto free_node;
1207 flow->src_fid = src_fid; 1207
1208 bnxt_tc_set_src_fid(bp, flow, src_fid);
1208 1209
1209 if (!bnxt_tc_can_offload(bp, flow)) { 1210 if (!bnxt_tc_can_offload(bp, flow)) {
1210 rc = -ENOSPC; 1211 rc = -ENOSPC;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index de51c2177d03..8995cfefbfcf 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,11 +4,13 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2014 Broadcom Corporation. 7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
8 * 9 *
9 * Firmware is: 10 * Firmware is:
10 * Derived from proprietary unpublished source code, 11 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation. 12 * Copyright (C) 2000-2016 Broadcom Corporation.
13 * Copyright (C) 2016-2017 Broadcom Ltd.
12 * 14 *
13 * Permission is hereby granted for the distribution of this firmware 15 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright 16 * data in hexadecimal or equivalent format, provided this copyright
@@ -10052,6 +10054,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
10052 10054
10053 tw32(GRC_MODE, tp->grc_mode | val); 10055 tw32(GRC_MODE, tp->grc_mode | val);
10054 10056
10057 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10058 * south bridge limitation. As a workaround, Driver is setting MRRS
10059 * to 2048 instead of default 4096.
10060 */
10061 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10062 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10063 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10064 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10065 }
10066
10055 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10067 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10056 val = tr32(GRC_MISC_CFG); 10068 val = tr32(GRC_MISC_CFG);
10057 val &= ~0xff; 10069 val &= ~0xff;
@@ -14225,7 +14237,10 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14225 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14237 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14226 * breaks all requests to 256 bytes. 14238 * breaks all requests to 256 bytes.
14227 */ 14239 */
14228 if (tg3_asic_rev(tp) == ASIC_REV_57766) 14240 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14241 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14242 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14243 tg3_asic_rev(tp) == ASIC_REV_5720)
14229 reset_phy = true; 14244 reset_phy = true;
14230 14245
14231 err = tg3_restart_hw(tp, reset_phy); 14246 err = tg3_restart_hw(tp, reset_phy);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index c2d02d02d1e6..1f0271fa7c74 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -5,7 +5,8 @@
5 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
6 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
7 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2004 Sun Microsystems Inc.
8 * Copyright (C) 2007-2014 Broadcom Corporation. 8 * Copyright (C) 2007-2016 Broadcom Corporation.
9 * Copyright (C) 2016-2017 Broadcom Limited.
9 */ 10 */
10 11
11#ifndef _T3_H 12#ifndef _T3_H
@@ -96,6 +97,7 @@
96#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106 97#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106
97#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109 98#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109
98#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a 99#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a
100#define TG3PCI_SUBDEVICE_ID_DELL_5762 0x07f0
99#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ 101#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ
100#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c 102#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c
101#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a 103#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a
@@ -281,6 +283,9 @@
281#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ 283#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
282#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ 284#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
283/* 0xa8 --> 0xb8 unused */ 285/* 0xa8 --> 0xb8 unused */
286#define TG3PCI_DEV_STATUS_CTRL 0x000000b4
287#define MAX_READ_REQ_SIZE_2048 0x00004000
288#define MAX_READ_REQ_MASK 0x00007000
284#define TG3PCI_DUAL_MAC_CTRL 0x000000b8 289#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
285#define DUAL_MAC_CTRL_CH_MASK 0x00000003 290#define DUAL_MAC_CTRL_CH_MASK 0x00000003
286#define DUAL_MAC_CTRL_ID 0x00000004 291#define DUAL_MAC_CTRL_ID 0x00000004
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 6aa0eee88ea5..a5eecd895a82 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1113,7 +1113,7 @@ static int liquidio_watchdog(void *param)
1113 dev_err(&oct->pci_dev->dev, 1113 dev_err(&oct->pci_dev->dev,
1114 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", 1114 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
1115 core); 1115 core);
1116 err_msg_was_printed[core] = true; 1116 err_msg_was_printed[core] = true;
1117 } 1117 }
1118 } 1118 }
1119 1119
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 8b2c31e2a2b0..a3d12dbde95b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1355,6 +1355,8 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
1355 1355
1356 /* Offload checksum calculation to HW */ 1356 /* Offload checksum calculation to HW */
1357 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1357 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1358 if (ip.v4->version == 4)
1359 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1358 hdr->l3_offset = skb_network_offset(skb); 1360 hdr->l3_offset = skb_network_offset(skb);
1359 hdr->l4_offset = skb_transport_offset(skb); 1361 hdr->l4_offset = skb_transport_offset(skb);
1360 1362
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6f9fa6e3c42a..d8424ed16c33 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -344,7 +344,6 @@ struct adapter_params {
344 344
345 unsigned int sf_size; /* serial flash size in bytes */ 345 unsigned int sf_size; /* serial flash size in bytes */
346 unsigned int sf_nsec; /* # of flash sectors */ 346 unsigned int sf_nsec; /* # of flash sectors */
347 unsigned int sf_fw_start; /* start of FW image in flash */
348 347
349 unsigned int fw_vers; /* firmware version */ 348 unsigned int fw_vers; /* firmware version */
350 unsigned int bs_vers; /* bootstrap version */ 349 unsigned int bs_vers; /* bootstrap version */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index d4a548a6a55c..a452d5a1b0f3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
111 ethtype_mask = 0; 111 ethtype_mask = 0;
112 } 112 }
113 113
114 if (ethtype_key == ETH_P_IPV6)
115 fs->type = 1;
116
114 fs->val.ethtype = ethtype_key; 117 fs->val.ethtype = ethtype_key;
115 fs->mask.ethtype = ethtype_mask; 118 fs->mask.ethtype = ethtype_mask;
116 fs->val.proto = key->ip_proto; 119 fs->val.proto = key->ip_proto;
@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
205 VLAN_PRIO_SHIFT); 208 VLAN_PRIO_SHIFT);
206 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority << 209 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
207 VLAN_PRIO_SHIFT); 210 VLAN_PRIO_SHIFT);
208 fs->val.ivlan = cpu_to_be16(vlan_tci); 211 fs->val.ivlan = vlan_tci;
209 fs->mask.ivlan = cpu_to_be16(vlan_tci_mask); 212 fs->mask.ivlan = vlan_tci_mask;
210 213
211 /* Chelsio adapters use ivlan_vld bit to match vlan packets 214 /* Chelsio adapters use ivlan_vld bit to match vlan packets
212 * as 802.1Q. Also, when vlan tag is present in packets, 215 * as 802.1Q. Also, when vlan tag is present in packets,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f63210f15579..375ef86a84da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2844,8 +2844,6 @@ enum {
2844 SF_RD_DATA_FAST = 0xb, /* read flash */ 2844 SF_RD_DATA_FAST = 0xb, /* read flash */
2845 SF_RD_ID = 0x9f, /* read ID */ 2845 SF_RD_ID = 0x9f, /* read ID */
2846 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 2846 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2847
2848 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
2849}; 2847};
2850 2848
2851/** 2849/**
@@ -3558,8 +3556,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3558 const __be32 *p = (const __be32 *)fw_data; 3556 const __be32 *p = (const __be32 *)fw_data;
3559 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 3557 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3560 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 3558 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3561 unsigned int fw_img_start = adap->params.sf_fw_start; 3559 unsigned int fw_start_sec = FLASH_FW_START_SEC;
3562 unsigned int fw_start_sec = fw_img_start / sf_sec_size; 3560 unsigned int fw_size = FLASH_FW_MAX_SIZE;
3561 unsigned int fw_start = FLASH_FW_START;
3563 3562
3564 if (!size) { 3563 if (!size) {
3565 dev_err(adap->pdev_dev, "FW image has no data\n"); 3564 dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -3575,9 +3574,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3575 "FW image size differs from size in FW header\n"); 3574 "FW image size differs from size in FW header\n");
3576 return -EINVAL; 3575 return -EINVAL;
3577 } 3576 }
3578 if (size > FW_MAX_SIZE) { 3577 if (size > fw_size) {
3579 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", 3578 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3580 FW_MAX_SIZE); 3579 fw_size);
3581 return -EFBIG; 3580 return -EFBIG;
3582 } 3581 }
3583 if (!t4_fw_matches_chip(adap, hdr)) 3582 if (!t4_fw_matches_chip(adap, hdr))
@@ -3604,11 +3603,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3604 */ 3603 */
3605 memcpy(first_page, fw_data, SF_PAGE_SIZE); 3604 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3606 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); 3605 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3607 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); 3606 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
3608 if (ret) 3607 if (ret)
3609 goto out; 3608 goto out;
3610 3609
3611 addr = fw_img_start; 3610 addr = fw_start;
3612 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 3611 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3613 addr += SF_PAGE_SIZE; 3612 addr += SF_PAGE_SIZE;
3614 fw_data += SF_PAGE_SIZE; 3613 fw_data += SF_PAGE_SIZE;
@@ -3618,7 +3617,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3618 } 3617 }
3619 3618
3620 ret = t4_write_flash(adap, 3619 ret = t4_write_flash(adap,
3621 fw_img_start + offsetof(struct fw_hdr, fw_ver), 3620 fw_start + offsetof(struct fw_hdr, fw_ver),
3622 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); 3621 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
3623out: 3622out:
3624 if (ret) 3623 if (ret)
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 410a0a95130b..b3e7fafee3df 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1913,3 +1913,7 @@ static struct platform_driver cs89x0_driver = {
1913module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe); 1913module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
1914 1914
1915#endif /* CONFIG_CS89x0_PLATFORM */ 1915#endif /* CONFIG_CS89x0_PLATFORM */
1916
1917MODULE_LICENSE("GPL");
1918MODULE_DESCRIPTION("Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 network driver");
1919MODULE_AUTHOR("Russell Nelson <nelson@crynwr.com>");
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c6e859a27ee6..e180657a02ef 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter)
4634 4634
4635 be_schedule_worker(adapter); 4635 be_schedule_worker(adapter);
4636 4636
4637 /*
4638 * The IF was destroyed and re-created. We need to clear
4639 * all promiscuous flags valid for the destroyed IF.
4640 * Without this promisc mode is not restored during
4641 * be_open() because the driver thinks that it is
4642 * already enabled in HW.
4643 */
4644 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4645
4637 if (netif_running(netdev)) 4646 if (netif_running(netdev))
4638 status = be_open(netdev); 4647 status = be_open(netdev);
4639 4648
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 610573855213..a74300a4459c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev)
818 for (i = 0; i < txq->bd.ring_size; i++) { 818 for (i = 0; i < txq->bd.ring_size; i++) {
819 /* Initialize the BD for every fragment in the page. */ 819 /* Initialize the BD for every fragment in the page. */
820 bdp->cbd_sc = cpu_to_fec16(0); 820 bdp->cbd_sc = cpu_to_fec16(0);
821 if (bdp->cbd_bufaddr &&
822 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
823 dma_unmap_single(&fep->pdev->dev,
824 fec32_to_cpu(bdp->cbd_bufaddr),
825 fec16_to_cpu(bdp->cbd_datlen),
826 DMA_TO_DEVICE);
821 if (txq->tx_skbuff[i]) { 827 if (txq->tx_skbuff[i]) {
822 dev_kfree_skb_any(txq->tx_skbuff[i]); 828 dev_kfree_skb_any(txq->tx_skbuff[i]);
823 txq->tx_skbuff[i] = NULL; 829 txq->tx_skbuff[i] = NULL;
@@ -3463,6 +3469,10 @@ fec_probe(struct platform_device *pdev)
3463 goto failed_regulator; 3469 goto failed_regulator;
3464 } 3470 }
3465 } else { 3471 } else {
3472 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
3473 ret = -EPROBE_DEFER;
3474 goto failed_regulator;
3475 }
3466 fep->reg_phy = NULL; 3476 fep->reg_phy = NULL;
3467 } 3477 }
3468 3478
@@ -3546,8 +3556,9 @@ failed_clk_ipg:
3546failed_clk: 3556failed_clk:
3547 if (of_phy_is_fixed_link(np)) 3557 if (of_phy_is_fixed_link(np))
3548 of_phy_deregister_fixed_link(np); 3558 of_phy_deregister_fixed_link(np);
3549failed_phy:
3550 of_node_put(phy_node); 3559 of_node_put(phy_node);
3560failed_phy:
3561 dev_id--;
3551failed_ioremap: 3562failed_ioremap:
3552 free_netdev(ndev); 3563 free_netdev(ndev);
3553 3564
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 7892f2f0c6b5..2c2976a2dda6 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -613,9 +613,11 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
613 return NETDEV_TX_OK; 613 return NETDEV_TX_OK;
614} 614}
615 615
616static void fs_timeout(struct net_device *dev) 616static void fs_timeout_work(struct work_struct *work)
617{ 617{
618 struct fs_enet_private *fep = netdev_priv(dev); 618 struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
619 timeout_work);
620 struct net_device *dev = fep->ndev;
619 unsigned long flags; 621 unsigned long flags;
620 int wake = 0; 622 int wake = 0;
621 623
@@ -627,7 +629,6 @@ static void fs_timeout(struct net_device *dev)
627 phy_stop(dev->phydev); 629 phy_stop(dev->phydev);
628 (*fep->ops->stop)(dev); 630 (*fep->ops->stop)(dev);
629 (*fep->ops->restart)(dev); 631 (*fep->ops->restart)(dev);
630 phy_start(dev->phydev);
631 } 632 }
632 633
633 phy_start(dev->phydev); 634 phy_start(dev->phydev);
@@ -639,6 +640,13 @@ static void fs_timeout(struct net_device *dev)
639 netif_wake_queue(dev); 640 netif_wake_queue(dev);
640} 641}
641 642
643static void fs_timeout(struct net_device *dev)
644{
645 struct fs_enet_private *fep = netdev_priv(dev);
646
647 schedule_work(&fep->timeout_work);
648}
649
642/*----------------------------------------------------------------------------- 650/*-----------------------------------------------------------------------------
643 * generic link-change handler - should be sufficient for most cases 651 * generic link-change handler - should be sufficient for most cases
644 *-----------------------------------------------------------------------------*/ 652 *-----------------------------------------------------------------------------*/
@@ -759,6 +767,7 @@ static int fs_enet_close(struct net_device *dev)
759 netif_stop_queue(dev); 767 netif_stop_queue(dev);
760 netif_carrier_off(dev); 768 netif_carrier_off(dev);
761 napi_disable(&fep->napi); 769 napi_disable(&fep->napi);
770 cancel_work_sync(&fep->timeout_work);
762 phy_stop(dev->phydev); 771 phy_stop(dev->phydev);
763 772
764 spin_lock_irqsave(&fep->lock, flags); 773 spin_lock_irqsave(&fep->lock, flags);
@@ -1019,6 +1028,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
1019 1028
1020 ndev->netdev_ops = &fs_enet_netdev_ops; 1029 ndev->netdev_ops = &fs_enet_netdev_ops;
1021 ndev->watchdog_timeo = 2 * HZ; 1030 ndev->watchdog_timeo = 2 * HZ;
1031 INIT_WORK(&fep->timeout_work, fs_timeout_work);
1022 netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight); 1032 netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
1023 1033
1024 ndev->ethtool_ops = &fs_ethtool_ops; 1034 ndev->ethtool_ops = &fs_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 92e06b37a199..195fae6aec4a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -125,6 +125,7 @@ struct fs_enet_private {
125 spinlock_t lock; /* during all ops except TX pckt processing */ 125 spinlock_t lock; /* during all ops except TX pckt processing */
126 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ 126 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
127 struct fs_platform_info *fpi; 127 struct fs_platform_info *fpi;
128 struct work_struct timeout_work;
128 const struct fs_ops *ops; 129 const struct fs_ops *ops;
129 int rx_ring, tx_ring; 130 int rx_ring, tx_ring;
130 dma_addr_t ring_mem_addr; 131 dma_addr_t ring_mem_addr;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 5be52d89b182..7f837006bb6a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1378,9 +1378,11 @@ static int gfar_probe(struct platform_device *ofdev)
1378 1378
1379 gfar_init_addr_hash_table(priv); 1379 gfar_init_addr_hash_table(priv);
1380 1380
1381 /* Insert receive time stamps into padding alignment bytes */ 1381 /* Insert receive time stamps into padding alignment bytes, and
1382 * plus 2 bytes padding to ensure the cpu alignment.
1383 */
1382 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1384 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1383 priv->padding = 8; 1385 priv->padding = 8 + DEFAULT_PADDING;
1384 1386
1385 if (dev->features & NETIF_F_IP_CSUM || 1387 if (dev->features & NETIF_F_IP_CSUM ||
1386 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1388 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
@@ -1790,6 +1792,7 @@ static int init_phy(struct net_device *dev)
1790 GFAR_SUPPORTED_GBIT : 0; 1792 GFAR_SUPPORTED_GBIT : 0;
1791 phy_interface_t interface; 1793 phy_interface_t interface;
1792 struct phy_device *phydev; 1794 struct phy_device *phydev;
1795 struct ethtool_eee edata;
1793 1796
1794 priv->oldlink = 0; 1797 priv->oldlink = 0;
1795 priv->oldspeed = 0; 1798 priv->oldspeed = 0;
@@ -1814,6 +1817,10 @@ static int init_phy(struct net_device *dev)
1814 /* Add support for flow control, but don't advertise it by default */ 1817 /* Add support for flow control, but don't advertise it by default */
1815 phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); 1818 phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1816 1819
1820 /* disable EEE autoneg, EEE not supported by eTSEC */
1821 memset(&edata, 0, sizeof(struct ethtool_eee));
1822 phy_ethtool_set_eee(phydev, &edata);
1823
1817 return 0; 1824 return 0;
1818} 1825}
1819 1826
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 544114281ea7..9f8d4f8e57e3 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
319 now = tmr_cnt_read(etsects); 319 now = tmr_cnt_read(etsects);
320 now += delta; 320 now += delta;
321 tmr_cnt_write(etsects, now); 321 tmr_cnt_write(etsects, now);
322 set_fipers(etsects);
322 323
323 spin_unlock_irqrestore(&etsects->lock, flags); 324 spin_unlock_irqrestore(&etsects->lock, flags);
324 325
325 set_fipers(etsects);
326
327 return 0; 326 return 0;
328} 327}
329 328
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 7feff2450ed6..241db3199b88 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -494,6 +494,9 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
494 case 16384: 494 case 16384:
495 ret |= EMAC_MR1_RFS_16K; 495 ret |= EMAC_MR1_RFS_16K;
496 break; 496 break;
497 case 8192:
498 ret |= EMAC4_MR1_RFS_8K;
499 break;
497 case 4096: 500 case 4096:
498 ret |= EMAC_MR1_RFS_4K; 501 ret |= EMAC_MR1_RFS_4K;
499 break; 502 break;
@@ -516,6 +519,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
516 case 16384: 519 case 16384:
517 ret |= EMAC4_MR1_TFS_16K; 520 ret |= EMAC4_MR1_TFS_16K;
518 break; 521 break;
522 case 8192:
523 ret |= EMAC4_MR1_TFS_8K;
524 break;
519 case 4096: 525 case 4096:
520 ret |= EMAC4_MR1_TFS_4K; 526 ret |= EMAC4_MR1_TFS_4K;
521 break; 527 break;
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index 5afcc27ceebb..c26d2631ca30 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -151,9 +151,11 @@ struct emac_regs {
151 151
152#define EMAC4_MR1_RFS_2K 0x00100000 152#define EMAC4_MR1_RFS_2K 0x00100000
153#define EMAC4_MR1_RFS_4K 0x00180000 153#define EMAC4_MR1_RFS_4K 0x00180000
154#define EMAC4_MR1_RFS_8K 0x00200000
154#define EMAC4_MR1_RFS_16K 0x00280000 155#define EMAC4_MR1_RFS_16K 0x00280000
155#define EMAC4_MR1_TFS_2K 0x00020000 156#define EMAC4_MR1_TFS_2K 0x00020000
156#define EMAC4_MR1_TFS_4K 0x00030000 157#define EMAC4_MR1_TFS_4K 0x00030000
158#define EMAC4_MR1_TFS_8K 0x00040000
157#define EMAC4_MR1_TFS_16K 0x00050000 159#define EMAC4_MR1_TFS_16K 0x00050000
158#define EMAC4_MR1_TR 0x00008000 160#define EMAC4_MR1_TR 0x00008000
159#define EMAC4_MR1_MWSW_001 0x00001000 161#define EMAC4_MR1_MWSW_001 0x00001000
@@ -242,7 +244,7 @@ struct emac_regs {
242#define EMAC_STACR_PHYE 0x00004000 244#define EMAC_STACR_PHYE 0x00004000
243#define EMAC_STACR_STAC_MASK 0x00003000 245#define EMAC_STACR_STAC_MASK 0x00003000
244#define EMAC_STACR_STAC_READ 0x00001000 246#define EMAC_STACR_STAC_READ 0x00001000
245#define EMAC_STACR_STAC_WRITE 0x00002000 247#define EMAC_STACR_STAC_WRITE 0x00000800
246#define EMAC_STACR_OPBC_MASK 0x00000C00 248#define EMAC_STACR_OPBC_MASK 0x00000C00
247#define EMAC_STACR_OPBC_50 0x00000000 249#define EMAC_STACR_OPBC_50 0x00000000
248#define EMAC_STACR_OPBC_66 0x00000400 250#define EMAC_STACR_OPBC_66 0x00000400
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1dc4aef37d3a..b65f5f3ac034 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -410,6 +410,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
410 struct ibmvnic_rx_pool *rx_pool; 410 struct ibmvnic_rx_pool *rx_pool;
411 int rx_scrqs; 411 int rx_scrqs;
412 int i, j, rc; 412 int i, j, rc;
413 u64 *size_array;
414
415 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
416 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
413 417
414 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 418 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
415 for (i = 0; i < rx_scrqs; i++) { 419 for (i = 0; i < rx_scrqs; i++) {
@@ -417,7 +421,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
417 421
418 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); 422 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
419 423
420 rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff); 424 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
425 free_long_term_buff(adapter, &rx_pool->long_term_buff);
426 rx_pool->buff_size = be64_to_cpu(size_array[i]);
427 alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
428 rx_pool->size *
429 rx_pool->buff_size);
430 } else {
431 rc = reset_long_term_buff(adapter,
432 &rx_pool->long_term_buff);
433 }
434
421 if (rc) 435 if (rc)
422 return rc; 436 return rc;
423 437
@@ -439,14 +453,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
439static void release_rx_pools(struct ibmvnic_adapter *adapter) 453static void release_rx_pools(struct ibmvnic_adapter *adapter)
440{ 454{
441 struct ibmvnic_rx_pool *rx_pool; 455 struct ibmvnic_rx_pool *rx_pool;
442 int rx_scrqs;
443 int i, j; 456 int i, j;
444 457
445 if (!adapter->rx_pool) 458 if (!adapter->rx_pool)
446 return; 459 return;
447 460
448 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 461 for (i = 0; i < adapter->num_active_rx_pools; i++) {
449 for (i = 0; i < rx_scrqs; i++) {
450 rx_pool = &adapter->rx_pool[i]; 462 rx_pool = &adapter->rx_pool[i];
451 463
452 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 464 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -469,6 +481,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
469 481
470 kfree(adapter->rx_pool); 482 kfree(adapter->rx_pool);
471 adapter->rx_pool = NULL; 483 adapter->rx_pool = NULL;
484 adapter->num_active_rx_pools = 0;
472} 485}
473 486
474static int init_rx_pools(struct net_device *netdev) 487static int init_rx_pools(struct net_device *netdev)
@@ -493,6 +506,8 @@ static int init_rx_pools(struct net_device *netdev)
493 return -1; 506 return -1;
494 } 507 }
495 508
509 adapter->num_active_rx_pools = 0;
510
496 for (i = 0; i < rxadd_subcrqs; i++) { 511 for (i = 0; i < rxadd_subcrqs; i++) {
497 rx_pool = &adapter->rx_pool[i]; 512 rx_pool = &adapter->rx_pool[i];
498 513
@@ -536,6 +551,8 @@ static int init_rx_pools(struct net_device *netdev)
536 rx_pool->next_free = 0; 551 rx_pool->next_free = 0;
537 } 552 }
538 553
554 adapter->num_active_rx_pools = rxadd_subcrqs;
555
539 return 0; 556 return 0;
540} 557}
541 558
@@ -586,13 +603,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
586static void release_tx_pools(struct ibmvnic_adapter *adapter) 603static void release_tx_pools(struct ibmvnic_adapter *adapter)
587{ 604{
588 struct ibmvnic_tx_pool *tx_pool; 605 struct ibmvnic_tx_pool *tx_pool;
589 int i, tx_scrqs; 606 int i;
590 607
591 if (!adapter->tx_pool) 608 if (!adapter->tx_pool)
592 return; 609 return;
593 610
594 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 611 for (i = 0; i < adapter->num_active_tx_pools; i++) {
595 for (i = 0; i < tx_scrqs; i++) {
596 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i); 612 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
597 tx_pool = &adapter->tx_pool[i]; 613 tx_pool = &adapter->tx_pool[i];
598 kfree(tx_pool->tx_buff); 614 kfree(tx_pool->tx_buff);
@@ -603,6 +619,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
603 619
604 kfree(adapter->tx_pool); 620 kfree(adapter->tx_pool);
605 adapter->tx_pool = NULL; 621 adapter->tx_pool = NULL;
622 adapter->num_active_tx_pools = 0;
606} 623}
607 624
608static int init_tx_pools(struct net_device *netdev) 625static int init_tx_pools(struct net_device *netdev)
@@ -619,6 +636,8 @@ static int init_tx_pools(struct net_device *netdev)
619 if (!adapter->tx_pool) 636 if (!adapter->tx_pool)
620 return -1; 637 return -1;
621 638
639 adapter->num_active_tx_pools = 0;
640
622 for (i = 0; i < tx_subcrqs; i++) { 641 for (i = 0; i < tx_subcrqs; i++) {
623 tx_pool = &adapter->tx_pool[i]; 642 tx_pool = &adapter->tx_pool[i];
624 643
@@ -666,6 +685,8 @@ static int init_tx_pools(struct net_device *netdev)
666 tx_pool->producer_index = 0; 685 tx_pool->producer_index = 0;
667 } 686 }
668 687
688 adapter->num_active_tx_pools = tx_subcrqs;
689
669 return 0; 690 return 0;
670} 691}
671 692
@@ -756,6 +777,12 @@ static int ibmvnic_login(struct net_device *netdev)
756 } 777 }
757 } while (adapter->renegotiate); 778 } while (adapter->renegotiate);
758 779
780 /* handle pending MAC address changes after successful login */
781 if (adapter->mac_change_pending) {
782 __ibmvnic_set_mac(netdev, &adapter->desired.mac);
783 adapter->mac_change_pending = false;
784 }
785
759 return 0; 786 return 0;
760} 787}
761 788
@@ -854,7 +881,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
854 if (adapter->vpd->buff) 881 if (adapter->vpd->buff)
855 len = adapter->vpd->len; 882 len = adapter->vpd->len;
856 883
857 reinit_completion(&adapter->fw_done); 884 init_completion(&adapter->fw_done);
858 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 885 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
859 crq.get_vpd_size.cmd = GET_VPD_SIZE; 886 crq.get_vpd_size.cmd = GET_VPD_SIZE;
860 ibmvnic_send_crq(adapter, &crq); 887 ibmvnic_send_crq(adapter, &crq);
@@ -916,6 +943,13 @@ static int init_resources(struct ibmvnic_adapter *adapter)
916 if (!adapter->vpd) 943 if (!adapter->vpd)
917 return -ENOMEM; 944 return -ENOMEM;
918 945
946 /* Vital Product Data (VPD) */
947 rc = ibmvnic_get_vpd(adapter);
948 if (rc) {
949 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
950 return rc;
951 }
952
919 adapter->map_id = 1; 953 adapter->map_id = 1;
920 adapter->napi = kcalloc(adapter->req_rx_queues, 954 adapter->napi = kcalloc(adapter->req_rx_queues,
921 sizeof(struct napi_struct), GFP_KERNEL); 955 sizeof(struct napi_struct), GFP_KERNEL);
@@ -989,15 +1023,10 @@ static int __ibmvnic_open(struct net_device *netdev)
989static int ibmvnic_open(struct net_device *netdev) 1023static int ibmvnic_open(struct net_device *netdev)
990{ 1024{
991 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1025 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
992 int rc, vpd; 1026 int rc;
993 1027
994 mutex_lock(&adapter->reset_lock); 1028 mutex_lock(&adapter->reset_lock);
995 1029
996 if (adapter->mac_change_pending) {
997 __ibmvnic_set_mac(netdev, &adapter->desired.mac);
998 adapter->mac_change_pending = false;
999 }
1000
1001 if (adapter->state != VNIC_CLOSED) { 1030 if (adapter->state != VNIC_CLOSED) {
1002 rc = ibmvnic_login(netdev); 1031 rc = ibmvnic_login(netdev);
1003 if (rc) { 1032 if (rc) {
@@ -1017,11 +1046,6 @@ static int ibmvnic_open(struct net_device *netdev)
1017 rc = __ibmvnic_open(netdev); 1046 rc = __ibmvnic_open(netdev);
1018 netif_carrier_on(netdev); 1047 netif_carrier_on(netdev);
1019 1048
1020 /* Vital Product Data (VPD) */
1021 vpd = ibmvnic_get_vpd(adapter);
1022 if (vpd)
1023 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1024
1025 mutex_unlock(&adapter->reset_lock); 1049 mutex_unlock(&adapter->reset_lock);
1026 1050
1027 return rc; 1051 return rc;
@@ -1275,6 +1299,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1275 unsigned char *dst; 1299 unsigned char *dst;
1276 u64 *handle_array; 1300 u64 *handle_array;
1277 int index = 0; 1301 int index = 0;
1302 u8 proto = 0;
1278 int ret = 0; 1303 int ret = 0;
1279 1304
1280 if (adapter->resetting) { 1305 if (adapter->resetting) {
@@ -1363,17 +1388,18 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1363 } 1388 }
1364 1389
1365 if (skb->protocol == htons(ETH_P_IP)) { 1390 if (skb->protocol == htons(ETH_P_IP)) {
1366 if (ip_hdr(skb)->version == 4) 1391 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1367 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 1392 proto = ip_hdr(skb)->protocol;
1368 else if (ip_hdr(skb)->version == 6) 1393 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1369 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 1394 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1370 1395 proto = ipv6_hdr(skb)->nexthdr;
1371 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1372 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1373 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
1374 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1375 } 1396 }
1376 1397
1398 if (proto == IPPROTO_TCP)
1399 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1400 else if (proto == IPPROTO_UDP)
1401 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1402
1377 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1403 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1378 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 1404 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1379 hdrs += 2; 1405 hdrs += 2;
@@ -1527,7 +1553,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1527 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1553 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1528 struct sockaddr *addr = p; 1554 struct sockaddr *addr = p;
1529 1555
1530 if (adapter->state != VNIC_OPEN) { 1556 if (adapter->state == VNIC_PROBED) {
1531 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr)); 1557 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
1532 adapter->mac_change_pending = true; 1558 adapter->mac_change_pending = true;
1533 return 0; 1559 return 0;
@@ -1545,6 +1571,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1545static int do_reset(struct ibmvnic_adapter *adapter, 1571static int do_reset(struct ibmvnic_adapter *adapter,
1546 struct ibmvnic_rwi *rwi, u32 reset_state) 1572 struct ibmvnic_rwi *rwi, u32 reset_state)
1547{ 1573{
1574 u64 old_num_rx_queues, old_num_tx_queues;
1548 struct net_device *netdev = adapter->netdev; 1575 struct net_device *netdev = adapter->netdev;
1549 int i, rc; 1576 int i, rc;
1550 1577
@@ -1554,6 +1581,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1554 netif_carrier_off(netdev); 1581 netif_carrier_off(netdev);
1555 adapter->reset_reason = rwi->reset_reason; 1582 adapter->reset_reason = rwi->reset_reason;
1556 1583
1584 old_num_rx_queues = adapter->req_rx_queues;
1585 old_num_tx_queues = adapter->req_tx_queues;
1586
1557 if (rwi->reset_reason == VNIC_RESET_MOBILITY) { 1587 if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1558 rc = ibmvnic_reenable_crq_queue(adapter); 1588 rc = ibmvnic_reenable_crq_queue(adapter);
1559 if (rc) 1589 if (rc)
@@ -1598,6 +1628,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1598 rc = init_resources(adapter); 1628 rc = init_resources(adapter);
1599 if (rc) 1629 if (rc)
1600 return rc; 1630 return rc;
1631 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1632 adapter->req_tx_queues != old_num_tx_queues) {
1633 release_rx_pools(adapter);
1634 release_tx_pools(adapter);
1635 init_rx_pools(netdev);
1636 init_tx_pools(netdev);
1601 } else { 1637 } else {
1602 rc = reset_tx_pools(adapter); 1638 rc = reset_tx_pools(adapter);
1603 if (rc) 1639 if (rc)
@@ -3345,7 +3381,11 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3345 return; 3381 return;
3346 } 3382 }
3347 3383
3384 adapter->ip_offload_ctrl.len =
3385 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3348 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB); 3386 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3387 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3388 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3349 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 3389 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3350 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum; 3390 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3351 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 3391 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
@@ -3585,7 +3625,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3585 *req_value, 3625 *req_value,
3586 (long int)be64_to_cpu(crq->request_capability_rsp. 3626 (long int)be64_to_cpu(crq->request_capability_rsp.
3587 number), name); 3627 number), name);
3588 *req_value = be64_to_cpu(crq->request_capability_rsp.number); 3628
3629 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3630 REQ_MTU) {
3631 pr_err("mtu of %llu is not supported. Reverting.\n",
3632 *req_value);
3633 *req_value = adapter->fallback.mtu;
3634 } else {
3635 *req_value =
3636 be64_to_cpu(crq->request_capability_rsp.number);
3637 }
3638
3589 ibmvnic_send_req_caps(adapter, 1); 3639 ibmvnic_send_req_caps(adapter, 1);
3590 return; 3640 return;
3591 default: 3641 default:
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4487f1e2c266..3aec42118db2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
1091 u64 opt_rxba_entries_per_subcrq; 1091 u64 opt_rxba_entries_per_subcrq;
1092 __be64 tx_rx_desc_req; 1092 __be64 tx_rx_desc_req;
1093 u8 map_id; 1093 u8 map_id;
1094 u64 num_active_rx_pools;
1095 u64 num_active_tx_pools;
1094 1096
1095 struct tasklet_struct tasklet; 1097 struct tasklet_struct tasklet;
1096 enum vnic_state state; 1098 enum vnic_state state;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index d7bdea79e9fa..8fd2458060a0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -331,7 +331,8 @@ struct e1000_adapter {
331enum e1000_state_t { 331enum e1000_state_t {
332 __E1000_TESTING, 332 __E1000_TESTING,
333 __E1000_RESETTING, 333 __E1000_RESETTING,
334 __E1000_DOWN 334 __E1000_DOWN,
335 __E1000_DISABLED
335}; 336};
336 337
337#undef pr_fmt 338#undef pr_fmt
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1982f7917a8d..3dd4aeb2706d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
945static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 945static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
946{ 946{
947 struct net_device *netdev; 947 struct net_device *netdev;
948 struct e1000_adapter *adapter; 948 struct e1000_adapter *adapter = NULL;
949 struct e1000_hw *hw; 949 struct e1000_hw *hw;
950 950
951 static int cards_found; 951 static int cards_found;
@@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
955 u16 tmp = 0; 955 u16 tmp = 0;
956 u16 eeprom_apme_mask = E1000_EEPROM_APME; 956 u16 eeprom_apme_mask = E1000_EEPROM_APME;
957 int bars, need_ioport; 957 int bars, need_ioport;
958 bool disable_dev = false;
958 959
959 /* do not allocate ioport bars when not needed */ 960 /* do not allocate ioport bars when not needed */
960 need_ioport = e1000_is_need_ioport(pdev); 961 need_ioport = e1000_is_need_ioport(pdev);
@@ -1259,11 +1260,13 @@ err_mdio_ioremap:
1259 iounmap(hw->ce4100_gbe_mdio_base_virt); 1260 iounmap(hw->ce4100_gbe_mdio_base_virt);
1260 iounmap(hw->hw_addr); 1261 iounmap(hw->hw_addr);
1261err_ioremap: 1262err_ioremap:
1263 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1262 free_netdev(netdev); 1264 free_netdev(netdev);
1263err_alloc_etherdev: 1265err_alloc_etherdev:
1264 pci_release_selected_regions(pdev, bars); 1266 pci_release_selected_regions(pdev, bars);
1265err_pci_reg: 1267err_pci_reg:
1266 pci_disable_device(pdev); 1268 if (!adapter || disable_dev)
1269 pci_disable_device(pdev);
1267 return err; 1270 return err;
1268} 1271}
1269 1272
@@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev)
1281 struct net_device *netdev = pci_get_drvdata(pdev); 1284 struct net_device *netdev = pci_get_drvdata(pdev);
1282 struct e1000_adapter *adapter = netdev_priv(netdev); 1285 struct e1000_adapter *adapter = netdev_priv(netdev);
1283 struct e1000_hw *hw = &adapter->hw; 1286 struct e1000_hw *hw = &adapter->hw;
1287 bool disable_dev;
1284 1288
1285 e1000_down_and_stop(adapter); 1289 e1000_down_and_stop(adapter);
1286 e1000_release_manageability(adapter); 1290 e1000_release_manageability(adapter);
@@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev)
1299 iounmap(hw->flash_address); 1303 iounmap(hw->flash_address);
1300 pci_release_selected_regions(pdev, adapter->bars); 1304 pci_release_selected_regions(pdev, adapter->bars);
1301 1305
1306 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1302 free_netdev(netdev); 1307 free_netdev(netdev);
1303 1308
1304 pci_disable_device(pdev); 1309 if (disable_dev)
1310 pci_disable_device(pdev);
1305} 1311}
1306 1312
1307/** 1313/**
@@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5156 if (netif_running(netdev)) 5162 if (netif_running(netdev))
5157 e1000_free_irq(adapter); 5163 e1000_free_irq(adapter);
5158 5164
5159 pci_disable_device(pdev); 5165 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5166 pci_disable_device(pdev);
5160 5167
5161 return 0; 5168 return 0;
5162} 5169}
@@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev)
5200 pr_err("Cannot enable PCI device from suspend\n"); 5207 pr_err("Cannot enable PCI device from suspend\n");
5201 return err; 5208 return err;
5202 } 5209 }
5210
5211 /* flush memory to make sure state is correct */
5212 smp_mb__before_atomic();
5213 clear_bit(__E1000_DISABLED, &adapter->flags);
5203 pci_set_master(pdev); 5214 pci_set_master(pdev);
5204 5215
5205 pci_enable_wake(pdev, PCI_D3hot, 0); 5216 pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5274 5285
5275 if (netif_running(netdev)) 5286 if (netif_running(netdev))
5276 e1000_down(adapter); 5287 e1000_down(adapter);
5277 pci_disable_device(pdev); 5288
5289 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5290 pci_disable_device(pdev);
5278 5291
5279 /* Request a slot slot reset. */ 5292 /* Request a slot slot reset. */
5280 return PCI_ERS_RESULT_NEED_RESET; 5293 return PCI_ERS_RESULT_NEED_RESET;
@@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5302 pr_err("Cannot re-enable PCI device after reset.\n"); 5315 pr_err("Cannot re-enable PCI device after reset.\n");
5303 return PCI_ERS_RESULT_DISCONNECT; 5316 return PCI_ERS_RESULT_DISCONNECT;
5304 } 5317 }
5318
5319 /* flush memory to make sure state is correct */
5320 smp_mb__before_atomic();
5321 clear_bit(__E1000_DISABLED, &adapter->flags);
5305 pci_set_master(pdev); 5322 pci_set_master(pdev);
5306 5323
5307 pci_enable_wake(pdev, PCI_D3hot, 0); 5324 pci_enable_wake(pdev, PCI_D3hot, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index d6d4ed7acf03..31277d3bb7dc 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1367,6 +1367,9 @@ out:
1367 * Checks to see of the link status of the hardware has changed. If a 1367 * Checks to see of the link status of the hardware has changed. If a
1368 * change in link status has been detected, then we read the PHY registers 1368 * change in link status has been detected, then we read the PHY registers
1369 * to get the current speed/duplex if link exists. 1369 * to get the current speed/duplex if link exists.
1370 *
1371 * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
1372 * up).
1370 **/ 1373 **/
1371static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 1374static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1372{ 1375{
@@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1382 * Change or Rx Sequence Error interrupt. 1385 * Change or Rx Sequence Error interrupt.
1383 */ 1386 */
1384 if (!mac->get_link_status) 1387 if (!mac->get_link_status)
1385 return 0; 1388 return 1;
1386 1389
1387 /* First we want to see if the MII Status Register reports 1390 /* First we want to see if the MII Status Register reports
1388 * link. If so, then we want to get the current speed/duplex 1391 * link. If so, then we want to get the current speed/duplex
@@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1613 * different link partner. 1616 * different link partner.
1614 */ 1617 */
1615 ret_val = e1000e_config_fc_after_link_up(hw); 1618 ret_val = e1000e_config_fc_after_link_up(hw);
1616 if (ret_val) 1619 if (ret_val) {
1617 e_dbg("Error configuring flow control\n"); 1620 e_dbg("Error configuring flow control\n");
1621 return ret_val;
1622 }
1618 1623
1619 return ret_val; 1624 return 1;
1620} 1625}
1621 1626
1622static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) 1627static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 7f605221a686..a434fecfdfeb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2463,7 +2463,6 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
2463 return err; 2463 return err;
2464} 2464}
2465 2465
2466#ifdef CONFIG_PM
2467/** 2466/**
2468 * fm10k_resume - Generic PM resume hook 2467 * fm10k_resume - Generic PM resume hook
2469 * @dev: generic device structure 2468 * @dev: generic device structure
@@ -2472,7 +2471,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
2472 * suspend or hibernation. This function does not need to handle lower PCIe 2471 * suspend or hibernation. This function does not need to handle lower PCIe
2473 * device state as the stack takes care of that for us. 2472 * device state as the stack takes care of that for us.
2474 **/ 2473 **/
2475static int fm10k_resume(struct device *dev) 2474static int __maybe_unused fm10k_resume(struct device *dev)
2476{ 2475{
2477 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); 2476 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
2478 struct net_device *netdev = interface->netdev; 2477 struct net_device *netdev = interface->netdev;
@@ -2499,7 +2498,7 @@ static int fm10k_resume(struct device *dev)
2499 * system suspend or hibernation. This function does not need to handle lower 2498 * system suspend or hibernation. This function does not need to handle lower
2500 * PCIe device state as the stack takes care of that for us. 2499 * PCIe device state as the stack takes care of that for us.
2501 **/ 2500 **/
2502static int fm10k_suspend(struct device *dev) 2501static int __maybe_unused fm10k_suspend(struct device *dev)
2503{ 2502{
2504 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); 2503 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
2505 struct net_device *netdev = interface->netdev; 2504 struct net_device *netdev = interface->netdev;
@@ -2511,8 +2510,6 @@ static int fm10k_suspend(struct device *dev)
2511 return 0; 2510 return 0;
2512} 2511}
2513 2512
2514#endif /* CONFIG_PM */
2515
2516/** 2513/**
2517 * fm10k_io_error_detected - called when PCI error is detected 2514 * fm10k_io_error_detected - called when PCI error is detected
2518 * @pdev: Pointer to PCI device 2515 * @pdev: Pointer to PCI device
@@ -2643,11 +2640,9 @@ static struct pci_driver fm10k_driver = {
2643 .id_table = fm10k_pci_tbl, 2640 .id_table = fm10k_pci_tbl,
2644 .probe = fm10k_probe, 2641 .probe = fm10k_probe,
2645 .remove = fm10k_remove, 2642 .remove = fm10k_remove,
2646#ifdef CONFIG_PM
2647 .driver = { 2643 .driver = {
2648 .pm = &fm10k_pm_ops, 2644 .pm = &fm10k_pm_ops,
2649 }, 2645 },
2650#endif /* CONFIG_PM */
2651 .sriov_configure = fm10k_iov_configure, 2646 .sriov_configure = fm10k_iov_configure,
2652 .err_handler = &fm10k_err_handler 2647 .err_handler = &fm10k_err_handler
2653}; 2648};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 321d8be80871..af792112a2d3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1573 else 1573 else
1574 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1574 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1575 1575
1576 /* Copy the address first, so that we avoid a possible race with
1577 * .set_rx_mode(). If we copy after changing the address in the filter
1578 * list, we might open ourselves to a narrow race window where
1579 * .set_rx_mode could delete our dev_addr filter and prevent traffic
1580 * from passing.
1581 */
1582 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1583
1576 spin_lock_bh(&vsi->mac_filter_hash_lock); 1584 spin_lock_bh(&vsi->mac_filter_hash_lock);
1577 i40e_del_mac_filter(vsi, netdev->dev_addr); 1585 i40e_del_mac_filter(vsi, netdev->dev_addr);
1578 i40e_add_mac_filter(vsi, addr->sa_data); 1586 i40e_add_mac_filter(vsi, addr->sa_data);
1579 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1587 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1580 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1581 if (vsi->type == I40E_VSI_MAIN) { 1588 if (vsi->type == I40E_VSI_MAIN) {
1582 i40e_status ret; 1589 i40e_status ret;
1583 1590
@@ -1923,6 +1930,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1923 struct i40e_netdev_priv *np = netdev_priv(netdev); 1930 struct i40e_netdev_priv *np = netdev_priv(netdev);
1924 struct i40e_vsi *vsi = np->vsi; 1931 struct i40e_vsi *vsi = np->vsi;
1925 1932
1933 /* Under some circumstances, we might receive a request to delete
1934 * our own device address from our uc list. Because we store the
1935 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1936 * such requests and not delete our device address from this list.
1937 */
1938 if (ether_addr_equal(addr, netdev->dev_addr))
1939 return 0;
1940
1926 i40e_del_mac_filter(vsi, addr); 1941 i40e_del_mac_filter(vsi, addr);
1927 1942
1928 return 0; 1943 return 0;
@@ -6038,8 +6053,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6038 /* Set Bit 7 to be valid */ 6053 /* Set Bit 7 to be valid */
6039 mode = I40E_AQ_SET_SWITCH_BIT7_VALID; 6054 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6040 6055
6041 /* Set L4type to both TCP and UDP support */ 6056 /* Set L4type for TCP support */
6042 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH; 6057 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6043 6058
6044 /* Set cloud filter mode */ 6059 /* Set cloud filter mode */
6045 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL; 6060 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
@@ -6969,18 +6984,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
6969 is_valid_ether_addr(filter->src_mac)) || 6984 is_valid_ether_addr(filter->src_mac)) ||
6970 (is_multicast_ether_addr(filter->dst_mac) && 6985 (is_multicast_ether_addr(filter->dst_mac) &&
6971 is_multicast_ether_addr(filter->src_mac))) 6986 is_multicast_ether_addr(filter->src_mac)))
6972 return -EINVAL; 6987 return -EOPNOTSUPP;
6973 6988
6974 /* Make sure port is specified, otherwise bail out, for channel 6989 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
6975 * specific cloud filter needs 'L4 port' to be non-zero 6990 * ports are not supported via big buffer now.
6976 */ 6991 */
6977 if (!filter->dst_port) 6992 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
6978 return -EINVAL; 6993 return -EOPNOTSUPP;
6979 6994
6980 /* adding filter using src_port/src_ip is not supported at this stage */ 6995 /* adding filter using src_port/src_ip is not supported at this stage */
6981 if (filter->src_port || filter->src_ipv4 || 6996 if (filter->src_port || filter->src_ipv4 ||
6982 !ipv6_addr_any(&filter->ip.v6.src_ip6)) 6997 !ipv6_addr_any(&filter->ip.v6.src_ip6))
6983 return -EINVAL; 6998 return -EOPNOTSUPP;
6984 6999
6985 /* copy element needed to add cloud filter from filter */ 7000 /* copy element needed to add cloud filter from filter */
6986 i40e_set_cld_element(filter, &cld_filter.element); 7001 i40e_set_cld_element(filter, &cld_filter.element);
@@ -6991,7 +7006,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
6991 is_multicast_ether_addr(filter->src_mac)) { 7006 is_multicast_ether_addr(filter->src_mac)) {
6992 /* MAC + IP : unsupported mode */ 7007 /* MAC + IP : unsupported mode */
6993 if (filter->dst_ipv4) 7008 if (filter->dst_ipv4)
6994 return -EINVAL; 7009 return -EOPNOTSUPP;
6995 7010
6996 /* since we validated that L4 port must be valid before 7011 /* since we validated that L4 port must be valid before
6997 * we get here, start with respective "flags" value 7012 * we get here, start with respective "flags" value
@@ -7356,7 +7371,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7356 7371
7357 if (tc < 0) { 7372 if (tc < 0) {
7358 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); 7373 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7359 return -EINVAL; 7374 return -EOPNOTSUPP;
7360 } 7375 }
7361 7376
7362 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || 7377 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
@@ -7490,6 +7505,8 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
7490{ 7505{
7491 struct i40e_vsi *vsi = np->vsi; 7506 struct i40e_vsi *vsi = np->vsi;
7492 7507
7508 if (!tc_can_offload(vsi->netdev))
7509 return -EOPNOTSUPP;
7493 if (cls_flower->common.chain_index) 7510 if (cls_flower->common.chain_index)
7494 return -EOPNOTSUPP; 7511 return -EOPNOTSUPP;
7495 7512
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4566d66ffc7c..5bc2748ac468 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3047,10 +3047,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
3047 /* Walk through fragments adding latest fragment, testing it, and 3047 /* Walk through fragments adding latest fragment, testing it, and
3048 * then removing stale fragments from the sum. 3048 * then removing stale fragments from the sum.
3049 */ 3049 */
3050 stale = &skb_shinfo(skb)->frags[0]; 3050 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3051 for (;;) { 3051 int stale_size = skb_frag_size(stale);
3052
3052 sum += skb_frag_size(frag++); 3053 sum += skb_frag_size(frag++);
3053 3054
3055 /* The stale fragment may present us with a smaller
3056 * descriptor than the actual fragment size. To account
3057 * for that we need to remove all the data on the front and
3058 * figure out what the remainder would be in the last
3059 * descriptor associated with the fragment.
3060 */
3061 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3062 int align_pad = -(stale->page_offset) &
3063 (I40E_MAX_READ_REQ_SIZE - 1);
3064
3065 sum -= align_pad;
3066 stale_size -= align_pad;
3067
3068 do {
3069 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3070 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3071 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3072 }
3073
3054 /* if sum is negative we failed to make sufficient progress */ 3074 /* if sum is negative we failed to make sufficient progress */
3055 if (sum < 0) 3075 if (sum < 0)
3056 return true; 3076 return true;
@@ -3058,7 +3078,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
3058 if (!nr_frags--) 3078 if (!nr_frags--)
3059 break; 3079 break;
3060 3080
3061 sum -= skb_frag_size(stale++); 3081 sum -= stale_size;
3062 } 3082 }
3063 3083
3064 return false; 3084 return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 50864f99446d..1ba29bb85b67 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -2012,10 +2012,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
2012 /* Walk through fragments adding latest fragment, testing it, and 2012 /* Walk through fragments adding latest fragment, testing it, and
2013 * then removing stale fragments from the sum. 2013 * then removing stale fragments from the sum.
2014 */ 2014 */
2015 stale = &skb_shinfo(skb)->frags[0]; 2015 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2016 for (;;) { 2016 int stale_size = skb_frag_size(stale);
2017
2017 sum += skb_frag_size(frag++); 2018 sum += skb_frag_size(frag++);
2018 2019
2020 /* The stale fragment may present us with a smaller
2021 * descriptor than the actual fragment size. To account
2022 * for that we need to remove all the data on the front and
2023 * figure out what the remainder would be in the last
2024 * descriptor associated with the fragment.
2025 */
2026 if (stale_size > I40E_MAX_DATA_PER_TXD) {
2027 int align_pad = -(stale->page_offset) &
2028 (I40E_MAX_READ_REQ_SIZE - 1);
2029
2030 sum -= align_pad;
2031 stale_size -= align_pad;
2032
2033 do {
2034 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
2035 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
2036 } while (stale_size > I40E_MAX_DATA_PER_TXD);
2037 }
2038
2019 /* if sum is negative we failed to make sufficient progress */ 2039 /* if sum is negative we failed to make sufficient progress */
2020 if (sum < 0) 2040 if (sum < 0)
2021 return true; 2041 return true;
@@ -2023,7 +2043,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
2023 if (!nr_frags--) 2043 if (!nr_frags--)
2024 break; 2044 break;
2025 2045
2026 sum -= skb_frag_size(stale++); 2046 sum -= stale_size;
2027 } 2047 }
2028 2048
2029 return false; 2049 return false;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index c9798210fa0f..0495487f7b42 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -344,7 +344,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
344 dev->regs + MVMDIO_ERR_INT_MASK); 344 dev->regs + MVMDIO_ERR_INT_MASK);
345 345
346 } else if (dev->err_interrupt == -EPROBE_DEFER) { 346 } else if (dev->err_interrupt == -EPROBE_DEFER) {
347 return -EPROBE_DEFER; 347 ret = -EPROBE_DEFER;
348 goto out_mdio;
348 } 349 }
349 350
350 if (pdev->dev.of_node) 351 if (pdev->dev.of_node)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bc93b69cfd1e..a539263cd79c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1214,6 +1214,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)
1214 val &= ~MVNETA_GMAC0_PORT_ENABLE; 1214 val &= ~MVNETA_GMAC0_PORT_ENABLE;
1215 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 1215 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1216 1216
1217 pp->link = 0;
1218 pp->duplex = -1;
1219 pp->speed = 0;
1220
1217 udelay(200); 1221 udelay(200);
1218} 1222}
1219 1223
@@ -1958,9 +1962,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
1958 1962
1959 if (!mvneta_rxq_desc_is_first_last(rx_status) || 1963 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1960 (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 1964 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1965 mvneta_rx_error(pp, rx_desc);
1961err_drop_frame: 1966err_drop_frame:
1962 dev->stats.rx_errors++; 1967 dev->stats.rx_errors++;
1963 mvneta_rx_error(pp, rx_desc);
1964 /* leave the descriptor untouched */ 1968 /* leave the descriptor untouched */
1965 continue; 1969 continue;
1966 } 1970 }
@@ -3011,7 +3015,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3011{ 3015{
3012 int queue; 3016 int queue;
3013 3017
3014 for (queue = 0; queue < txq_number; queue++) 3018 for (queue = 0; queue < rxq_number; queue++)
3015 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); 3019 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3016} 3020}
3017 3021
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index d83a78be98a2..634b2f41cc9e 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -85,7 +85,7 @@
85 85
86/* RSS Registers */ 86/* RSS Registers */
87#define MVPP22_RSS_INDEX 0x1500 87#define MVPP22_RSS_INDEX 0x1500
88#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) ((idx) << 8) 88#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
89#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) 89#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
90#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) 90#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
91#define MVPP22_RSS_TABLE_ENTRY 0x1508 91#define MVPP22_RSS_TABLE_ENTRY 0x1508
@@ -5598,7 +5598,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
5598 u32 txq_dma; 5598 u32 txq_dma;
5599 5599
5600 /* Allocate memory for TX descriptors */ 5600 /* Allocate memory for TX descriptors */
5601 aggr_txq->descs = dma_alloc_coherent(&pdev->dev, 5601 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
5602 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 5602 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
5603 &aggr_txq->descs_dma, GFP_KERNEL); 5603 &aggr_txq->descs_dma, GFP_KERNEL);
5604 if (!aggr_txq->descs) 5604 if (!aggr_txq->descs)
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 6e423f098a60..31efc47c847e 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4081,7 +4081,6 @@ static void skge_remove(struct pci_dev *pdev)
4081 if (hw->ports > 1) { 4081 if (hw->ports > 1) {
4082 skge_write32(hw, B0_IMSK, 0); 4082 skge_write32(hw, B0_IMSK, 0);
4083 skge_read32(hw, B0_IMSK); 4083 skge_read32(hw, B0_IMSK);
4084 free_irq(pdev->irq, hw);
4085 } 4084 }
4086 spin_unlock_irq(&hw->hw_lock); 4085 spin_unlock_irq(&hw->hw_lock);
4087 4086
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 54adfd967858..fc67e35b253e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1961,11 +1961,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
1961 /* set GE2 TUNE */ 1961 /* set GE2 TUNE */
1962 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); 1962 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
1963 1963
1964 /* GE1, Force 1000M/FD, FC ON */ 1964 /* Set linkdown as the default for each GMAC. Its own MCR would be set
1965 mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0)); 1965 * up with the more appropriate value when mtk_phy_link_adjust call is
1966 1966 * being invoked.
1967 /* GE2, Force 1000M/FD, FC ON */ 1967 */
1968 mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1)); 1968 for (i = 0; i < MTK_MAC_COUNT; i++)
1969 mtk_w32(eth, 0, MTK_MAC_MCR(i));
1969 1970
1970 /* Indicates CDM to parse the MTK special tag from CPU 1971 /* Indicates CDM to parse the MTK special tag from CPU
1971 * which also is working out for untag packets. 1972 * which also is working out for untag packets.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index e0eb695318e6..1fa4849a6f56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -188,7 +188,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
188 struct net_device *dev = mdev->pndev[port]; 188 struct net_device *dev = mdev->pndev[port];
189 struct mlx4_en_priv *priv = netdev_priv(dev); 189 struct mlx4_en_priv *priv = netdev_priv(dev);
190 struct net_device_stats *stats = &dev->stats; 190 struct net_device_stats *stats = &dev->stats;
191 struct mlx4_cmd_mailbox *mailbox; 191 struct mlx4_cmd_mailbox *mailbox, *mailbox_priority;
192 u64 in_mod = reset << 8 | port; 192 u64 in_mod = reset << 8 | port;
193 int err; 193 int err;
194 int i, counter_index; 194 int i, counter_index;
@@ -198,6 +198,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
198 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 198 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
199 if (IS_ERR(mailbox)) 199 if (IS_ERR(mailbox))
200 return PTR_ERR(mailbox); 200 return PTR_ERR(mailbox);
201
202 mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev);
203 if (IS_ERR(mailbox_priority)) {
204 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
205 return PTR_ERR(mailbox_priority);
206 }
207
201 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, 208 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
202 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, 209 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
203 MLX4_CMD_NATIVE); 210 MLX4_CMD_NATIVE);
@@ -206,6 +213,28 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
206 213
207 mlx4_en_stats = mailbox->buf; 214 mlx4_en_stats = mailbox->buf;
208 215
216 memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
217 counter_index = mlx4_get_default_counter_index(mdev->dev, port);
218 err = mlx4_get_counter_stats(mdev->dev, counter_index,
219 &tmp_counter_stats, reset);
220
221 /* 0xffs indicates invalid value */
222 memset(mailbox_priority->buf, 0xff,
223 sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
224
225 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
226 memset(mailbox_priority->buf, 0,
227 sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
228 err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma,
229 in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
230 0, MLX4_CMD_DUMP_ETH_STATS,
231 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
232 if (err)
233 goto out;
234 }
235
236 flowstats = mailbox_priority->buf;
237
209 spin_lock_bh(&priv->stats_lock); 238 spin_lock_bh(&priv->stats_lock);
210 239
211 mlx4_en_fold_software_stats(dev); 240 mlx4_en_fold_software_stats(dev);
@@ -345,31 +374,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
345 priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan); 374 priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
346 priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan); 375 priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
347 376
348 spin_unlock_bh(&priv->stats_lock);
349
350 memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
351 counter_index = mlx4_get_default_counter_index(mdev->dev, port);
352 err = mlx4_get_counter_stats(mdev->dev, counter_index,
353 &tmp_counter_stats, reset);
354
355 /* 0xffs indicates invalid value */
356 memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
357
358 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
359 memset(mailbox->buf, 0,
360 sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
361 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
362 in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
363 0, MLX4_CMD_DUMP_ETH_STATS,
364 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
365 if (err)
366 goto out;
367 }
368
369 flowstats = mailbox->buf;
370
371 spin_lock_bh(&priv->stats_lock);
372
373 if (tmp_counter_stats.counter_mode == 0) { 377 if (tmp_counter_stats.counter_mode == 0) {
374 priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes); 378 priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes);
375 priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes); 379 priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes);
@@ -410,6 +414,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
410 414
411out: 415out:
412 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 416 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
417 mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority);
413 return err; 418 return err;
414} 419}
415 420
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 88699b181946..946d9db7c8c2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -185,7 +185,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
185 if (priv->mdev->dev->caps.flags & 185 if (priv->mdev->dev->caps.flags &
186 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { 186 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
187 buf[3] = mlx4_en_test_registers(priv); 187 buf[3] = mlx4_en_test_registers(priv);
188 if (priv->port_up) 188 if (priv->port_up && dev->mtu >= MLX4_SELFTEST_LB_MIN_MTU)
189 buf[4] = mlx4_en_test_loopback(priv); 189 buf[4] = mlx4_en_test_loopback(priv);
190 } 190 }
191 191
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 1856e279a7e0..2b72677eccd4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -153,6 +153,9 @@
153#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) 153#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
154#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) 154#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
155#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN) 155#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
156#define PREAMBLE_LEN 8
157#define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
158 ETH_HLEN + PREAMBLE_LEN)
156 159
157#define MLX4_EN_MIN_MTU 46 160#define MLX4_EN_MIN_MTU 46
158/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple 161/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 04304dd894c6..606a0e0beeae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -611,7 +611,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
611 MLX4_MAX_PORTS; 611 MLX4_MAX_PORTS;
612 else 612 else
613 res_alloc->guaranteed[t] = 0; 613 res_alloc->guaranteed[t] = 0;
614 res_alloc->res_free -= res_alloc->guaranteed[t];
615 break; 614 break;
616 default: 615 default:
617 break; 616 break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 1fffdebbc9e8..e9a1fbcc4adf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
362 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 362 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
363 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 363 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
364 case MLX5_CMD_OP_QUERY_Q_COUNTER: 364 case MLX5_CMD_OP_QUERY_Q_COUNTER:
365 case MLX5_CMD_OP_SET_RATE_LIMIT: 365 case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
366 case MLX5_CMD_OP_QUERY_RATE_LIMIT: 366 case MLX5_CMD_OP_QUERY_RATE_LIMIT:
367 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 367 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
368 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 368 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
@@ -505,7 +505,7 @@ const char *mlx5_command_str(int command)
505 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 505 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
506 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 506 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
507 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 507 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
508 MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT); 508 MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
509 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); 509 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
510 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); 510 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
511 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); 511 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c0872b3284cb..c2d89bfa1a70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -82,6 +82,9 @@
82 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req) 82 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
83#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6) 83#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
84#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8) 84#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
85#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
86 (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
87 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
85 88
86#define MLX5_MPWRQ_LOG_WQE_SZ 18 89#define MLX5_MPWRQ_LOG_WQE_SZ 18
87#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ 90#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
@@ -590,6 +593,7 @@ struct mlx5e_channel {
590 struct mlx5_core_dev *mdev; 593 struct mlx5_core_dev *mdev;
591 struct hwtstamp_config *tstamp; 594 struct hwtstamp_config *tstamp;
592 int ix; 595 int ix;
596 int cpu;
593}; 597};
594 598
595struct mlx5e_channels { 599struct mlx5e_channels {
@@ -891,7 +895,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
891 u16 vid); 895 u16 vid);
892void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv); 896void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
893void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); 897void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
894void mlx5e_timestamp_set(struct mlx5e_priv *priv); 898void mlx5e_timestamp_init(struct mlx5e_priv *priv);
895 899
896struct mlx5e_redirect_rqt_param { 900struct mlx5e_redirect_rqt_param {
897 bool is_rss; 901 bool is_rss;
@@ -935,8 +939,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
935 u8 cq_period_mode); 939 u8 cq_period_mode);
936void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, 940void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
937 u8 cq_period_mode); 941 u8 cq_period_mode);
938void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev, 942void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
939 struct mlx5e_params *params, u8 rq_type); 943 struct mlx5e_params *params,
944 u8 rq_type);
940 945
941static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) 946static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
942{ 947{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index c6d90b6dd80e..3d46ef48d5b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -274,6 +274,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
274static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, 274static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
275 struct ieee_ets *ets) 275 struct ieee_ets *ets)
276{ 276{
277 bool have_ets_tc = false;
277 int bw_sum = 0; 278 int bw_sum = 0;
278 int i; 279 int i;
279 280
@@ -288,11 +289,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
288 } 289 }
289 290
290 /* Validate Bandwidth Sum */ 291 /* Validate Bandwidth Sum */
291 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 292 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
292 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) 293 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
294 have_ets_tc = true;
293 bw_sum += ets->tc_tx_bw[i]; 295 bw_sum += ets->tc_tx_bw[i];
296 }
297 }
294 298
295 if (bw_sum != 0 && bw_sum != 100) { 299 if (have_ets_tc && bw_sum != 100) {
296 netdev_err(netdev, 300 netdev_err(netdev,
297 "Failed to validate ETS: BW sum is illegal\n"); 301 "Failed to validate ETS: BW sum is illegal\n");
298 return -EINVAL; 302 return -EINVAL;
@@ -918,8 +922,9 @@ static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
918 922
919static void mlx5e_ets_init(struct mlx5e_priv *priv) 923static void mlx5e_ets_init(struct mlx5e_priv *priv)
920{ 924{
921 int i;
922 struct ieee_ets ets; 925 struct ieee_ets ets;
926 int err;
927 int i;
923 928
924 if (!MLX5_CAP_GEN(priv->mdev, ets)) 929 if (!MLX5_CAP_GEN(priv->mdev, ets))
925 return; 930 return;
@@ -932,11 +937,16 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
932 ets.prio_tc[i] = i; 937 ets.prio_tc[i] = i;
933 } 938 }
934 939
935 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ 940 if (ets.ets_cap > 1) {
936 ets.prio_tc[0] = 1; 941 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
937 ets.prio_tc[1] = 0; 942 ets.prio_tc[0] = 1;
943 ets.prio_tc[1] = 0;
944 }
938 945
939 mlx5e_dcbnl_ieee_setets_core(priv, &ets); 946 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
947 if (err)
948 netdev_err(priv->netdev,
949 "%s, Failed to init ETS: %d\n", __func__, err);
940} 950}
941 951
942enum { 952enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 23425f028405..ea5fff2c3143 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -207,8 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
207 return; 207 return;
208 208
209 mutex_lock(&priv->state_lock); 209 mutex_lock(&priv->state_lock);
210 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 210 mlx5e_update_stats(priv, true);
211 mlx5e_update_stats(priv, true);
212 mutex_unlock(&priv->state_lock); 211 mutex_unlock(&priv->state_lock);
213 212
214 for (i = 0; i < mlx5e_num_stats_grps; i++) 213 for (i = 0; i < mlx5e_num_stats_grps; i++)
@@ -1523,8 +1522,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
1523 new_channels.params = priv->channels.params; 1522 new_channels.params = priv->channels.params;
1524 MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); 1523 MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
1525 1524
1526 mlx5e_set_rq_type_params(priv->mdev, &new_channels.params, 1525 new_channels.params.mpwqe_log_stride_sz =
1527 new_channels.params.rq_wq_type); 1526 MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val);
1527 new_channels.params.mpwqe_log_num_strides =
1528 MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz;
1528 1529
1529 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 1530 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1530 priv->channels.params = new_channels.params; 1531 priv->channels.params = new_channels.params;
@@ -1536,6 +1537,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
1536 return err; 1537 return err;
1537 1538
1538 mlx5e_switch_priv_channels(priv, &new_channels, NULL); 1539 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
1540 mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n",
1541 MLX5E_GET_PFLAG(&priv->channels.params,
1542 MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
1543
1539 return 0; 1544 return 0;
1540} 1545}
1541 1546
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d2b057a3e512..d8aefeed124d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -71,11 +71,6 @@ struct mlx5e_channel_param {
71 struct mlx5e_cq_param icosq_cq; 71 struct mlx5e_cq_param icosq_cq;
72}; 72};
73 73
74static int mlx5e_get_node(struct mlx5e_priv *priv, int ix)
75{
76 return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix);
77}
78
79static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) 74static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
80{ 75{
81 return MLX5_CAP_GEN(mdev, striding_rq) && 76 return MLX5_CAP_GEN(mdev, striding_rq) &&
@@ -83,8 +78,8 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
83 MLX5_CAP_ETH(mdev, reg_umr_sq); 78 MLX5_CAP_ETH(mdev, reg_umr_sq);
84} 79}
85 80
86void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev, 81void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
87 struct mlx5e_params *params, u8 rq_type) 82 struct mlx5e_params *params, u8 rq_type)
88{ 83{
89 params->rq_wq_type = rq_type; 84 params->rq_wq_type = rq_type;
90 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 85 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -93,10 +88,8 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
93 params->log_rq_size = is_kdump_kernel() ? 88 params->log_rq_size = is_kdump_kernel() ?
94 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW : 89 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
95 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; 90 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
96 params->mpwqe_log_stride_sz = 91 params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
97 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ? 92 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
98 MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
99 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
100 params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - 93 params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
101 params->mpwqe_log_stride_sz; 94 params->mpwqe_log_stride_sz;
102 break; 95 break;
@@ -120,13 +113,14 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
120 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); 113 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
121} 114}
122 115
123static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 116static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
117 struct mlx5e_params *params)
124{ 118{
125 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) && 119 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
126 !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ? 120 !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
127 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : 121 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
128 MLX5_WQ_TYPE_LINKED_LIST; 122 MLX5_WQ_TYPE_LINKED_LIST;
129 mlx5e_set_rq_type_params(mdev, params, rq_type); 123 mlx5e_init_rq_type_params(mdev, params, rq_type);
130} 124}
131 125
132static void mlx5e_update_carrier(struct mlx5e_priv *priv) 126static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -444,17 +438,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
444 int wq_sz = mlx5_wq_ll_get_size(&rq->wq); 438 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
445 int mtt_sz = mlx5e_get_wqe_mtt_sz(); 439 int mtt_sz = mlx5e_get_wqe_mtt_sz();
446 int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1; 440 int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
447 int node = mlx5e_get_node(c->priv, c->ix);
448 int i; 441 int i;
449 442
450 rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), 443 rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
451 GFP_KERNEL, node); 444 GFP_KERNEL, cpu_to_node(c->cpu));
452 if (!rq->mpwqe.info) 445 if (!rq->mpwqe.info)
453 goto err_out; 446 goto err_out;
454 447
455 /* We allocate more than mtt_sz as we will align the pointer */ 448 /* We allocate more than mtt_sz as we will align the pointer */
456 rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, 449 rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
457 GFP_KERNEL, node); 450 cpu_to_node(c->cpu));
458 if (unlikely(!rq->mpwqe.mtt_no_align)) 451 if (unlikely(!rq->mpwqe.mtt_no_align))
459 goto err_free_wqe_info; 452 goto err_free_wqe_info;
460 453
@@ -562,7 +555,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
562 int err; 555 int err;
563 int i; 556 int i;
564 557
565 rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); 558 rqp->wq.db_numa_node = cpu_to_node(c->cpu);
566 559
567 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq, 560 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
568 &rq->wq_ctrl); 561 &rq->wq_ctrl);
@@ -629,8 +622,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
629 default: /* MLX5_WQ_TYPE_LINKED_LIST */ 622 default: /* MLX5_WQ_TYPE_LINKED_LIST */
630 rq->wqe.frag_info = 623 rq->wqe.frag_info =
631 kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info), 624 kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
632 GFP_KERNEL, 625 GFP_KERNEL, cpu_to_node(c->cpu));
633 mlx5e_get_node(c->priv, c->ix));
634 if (!rq->wqe.frag_info) { 626 if (!rq->wqe.frag_info) {
635 err = -ENOMEM; 627 err = -ENOMEM;
636 goto err_rq_wq_destroy; 628 goto err_rq_wq_destroy;
@@ -1000,13 +992,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
1000 sq->uar_map = mdev->mlx5e_res.bfreg.map; 992 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1001 sq->min_inline_mode = params->tx_min_inline_mode; 993 sq->min_inline_mode = params->tx_min_inline_mode;
1002 994
1003 param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); 995 param->wq.db_numa_node = cpu_to_node(c->cpu);
1004 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); 996 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1005 if (err) 997 if (err)
1006 return err; 998 return err;
1007 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; 999 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1008 1000
1009 err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix)); 1001 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
1010 if (err) 1002 if (err)
1011 goto err_sq_wq_destroy; 1003 goto err_sq_wq_destroy;
1012 1004
@@ -1053,13 +1045,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1053 sq->channel = c; 1045 sq->channel = c;
1054 sq->uar_map = mdev->mlx5e_res.bfreg.map; 1046 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1055 1047
1056 param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); 1048 param->wq.db_numa_node = cpu_to_node(c->cpu);
1057 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); 1049 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1058 if (err) 1050 if (err)
1059 return err; 1051 return err;
1060 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; 1052 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1061 1053
1062 err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix)); 1054 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1063 if (err) 1055 if (err)
1064 goto err_sq_wq_destroy; 1056 goto err_sq_wq_destroy;
1065 1057
@@ -1126,13 +1118,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1126 if (MLX5_IPSEC_DEV(c->priv->mdev)) 1118 if (MLX5_IPSEC_DEV(c->priv->mdev))
1127 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); 1119 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1128 1120
1129 param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); 1121 param->wq.db_numa_node = cpu_to_node(c->cpu);
1130 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); 1122 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1131 if (err) 1123 if (err)
1132 return err; 1124 return err;
1133 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; 1125 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1134 1126
1135 err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix)); 1127 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1136 if (err) 1128 if (err)
1137 goto err_sq_wq_destroy; 1129 goto err_sq_wq_destroy;
1138 1130
@@ -1504,8 +1496,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1504 struct mlx5_core_dev *mdev = c->priv->mdev; 1496 struct mlx5_core_dev *mdev = c->priv->mdev;
1505 int err; 1497 int err;
1506 1498
1507 param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix); 1499 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1508 param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); 1500 param->wq.db_numa_node = cpu_to_node(c->cpu);
1509 param->eq_ix = c->ix; 1501 param->eq_ix = c->ix;
1510 1502
1511 err = mlx5e_alloc_cq_common(mdev, param, cq); 1503 err = mlx5e_alloc_cq_common(mdev, param, cq);
@@ -1604,6 +1596,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
1604 mlx5e_free_cq(cq); 1596 mlx5e_free_cq(cq);
1605} 1597}
1606 1598
1599static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1600{
1601 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1602}
1603
1607static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, 1604static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1608 struct mlx5e_params *params, 1605 struct mlx5e_params *params,
1609 struct mlx5e_channel_param *cparam) 1606 struct mlx5e_channel_param *cparam)
@@ -1752,12 +1749,13 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1752{ 1749{
1753 struct mlx5e_cq_moder icocq_moder = {0, 0}; 1750 struct mlx5e_cq_moder icocq_moder = {0, 0};
1754 struct net_device *netdev = priv->netdev; 1751 struct net_device *netdev = priv->netdev;
1752 int cpu = mlx5e_get_cpu(priv, ix);
1755 struct mlx5e_channel *c; 1753 struct mlx5e_channel *c;
1756 unsigned int irq; 1754 unsigned int irq;
1757 int err; 1755 int err;
1758 int eqn; 1756 int eqn;
1759 1757
1760 c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix)); 1758 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1761 if (!c) 1759 if (!c)
1762 return -ENOMEM; 1760 return -ENOMEM;
1763 1761
@@ -1765,6 +1763,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1765 c->mdev = priv->mdev; 1763 c->mdev = priv->mdev;
1766 c->tstamp = &priv->tstamp; 1764 c->tstamp = &priv->tstamp;
1767 c->ix = ix; 1765 c->ix = ix;
1766 c->cpu = cpu;
1768 c->pdev = &priv->mdev->pdev->dev; 1767 c->pdev = &priv->mdev->pdev->dev;
1769 c->netdev = priv->netdev; 1768 c->netdev = priv->netdev;
1770 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); 1769 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
@@ -1853,8 +1852,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
1853 for (tc = 0; tc < c->num_tc; tc++) 1852 for (tc = 0; tc < c->num_tc; tc++)
1854 mlx5e_activate_txqsq(&c->sq[tc]); 1853 mlx5e_activate_txqsq(&c->sq[tc]);
1855 mlx5e_activate_rq(&c->rq); 1854 mlx5e_activate_rq(&c->rq);
1856 netif_set_xps_queue(c->netdev, 1855 netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
1857 mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix);
1858} 1856}
1859 1857
1860static void mlx5e_deactivate_channel(struct mlx5e_channel *c) 1858static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -2671,7 +2669,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2671 netif_carrier_on(netdev); 2669 netif_carrier_on(netdev);
2672} 2670}
2673 2671
2674void mlx5e_timestamp_set(struct mlx5e_priv *priv) 2672void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2675{ 2673{
2676 priv->tstamp.tx_type = HWTSTAMP_TX_OFF; 2674 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
2677 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; 2675 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -2692,7 +2690,6 @@ int mlx5e_open_locked(struct net_device *netdev)
2692 mlx5e_activate_priv_channels(priv); 2690 mlx5e_activate_priv_channels(priv);
2693 if (priv->profile->update_carrier) 2691 if (priv->profile->update_carrier)
2694 priv->profile->update_carrier(priv); 2692 priv->profile->update_carrier(priv);
2695 mlx5e_timestamp_set(priv);
2696 2693
2697 if (priv->profile->update_stats) 2694 if (priv->profile->update_stats)
2698 queue_delayed_work(priv->wq, &priv->update_stats_work, 0); 2695 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -3221,12 +3218,12 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3221 return 0; 3218 return 0;
3222} 3219}
3223 3220
3224#define MLX5E_SET_FEATURE(netdev, feature, enable) \ 3221#define MLX5E_SET_FEATURE(features, feature, enable) \
3225 do { \ 3222 do { \
3226 if (enable) \ 3223 if (enable) \
3227 netdev->features |= feature; \ 3224 *features |= feature; \
3228 else \ 3225 else \
3229 netdev->features &= ~feature; \ 3226 *features &= ~feature; \
3230 } while (0) 3227 } while (0)
3231 3228
3232typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable); 3229typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
@@ -3349,6 +3346,7 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
3349#endif 3346#endif
3350 3347
3351static int mlx5e_handle_feature(struct net_device *netdev, 3348static int mlx5e_handle_feature(struct net_device *netdev,
3349 netdev_features_t *features,
3352 netdev_features_t wanted_features, 3350 netdev_features_t wanted_features,
3353 netdev_features_t feature, 3351 netdev_features_t feature,
3354 mlx5e_feature_handler feature_handler) 3352 mlx5e_feature_handler feature_handler)
@@ -3367,34 +3365,40 @@ static int mlx5e_handle_feature(struct net_device *netdev,
3367 return err; 3365 return err;
3368 } 3366 }
3369 3367
3370 MLX5E_SET_FEATURE(netdev, feature, enable); 3368 MLX5E_SET_FEATURE(features, feature, enable);
3371 return 0; 3369 return 0;
3372} 3370}
3373 3371
3374static int mlx5e_set_features(struct net_device *netdev, 3372static int mlx5e_set_features(struct net_device *netdev,
3375 netdev_features_t features) 3373 netdev_features_t features)
3376{ 3374{
3375 netdev_features_t oper_features = netdev->features;
3377 int err; 3376 int err;
3378 3377
3379 err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO, 3378 err = mlx5e_handle_feature(netdev, &oper_features, features,
3380 set_feature_lro); 3379 NETIF_F_LRO, set_feature_lro);
3381 err |= mlx5e_handle_feature(netdev, features, 3380 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3382 NETIF_F_HW_VLAN_CTAG_FILTER, 3381 NETIF_F_HW_VLAN_CTAG_FILTER,
3383 set_feature_cvlan_filter); 3382 set_feature_cvlan_filter);
3384 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC, 3383 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3385 set_feature_tc_num_filters); 3384 NETIF_F_HW_TC, set_feature_tc_num_filters);
3386 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL, 3385 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3387 set_feature_rx_all); 3386 NETIF_F_RXALL, set_feature_rx_all);
3388 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS, 3387 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3389 set_feature_rx_fcs); 3388 NETIF_F_RXFCS, set_feature_rx_fcs);
3390 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX, 3389 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3391 set_feature_rx_vlan); 3390 NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
3392#ifdef CONFIG_RFS_ACCEL 3391#ifdef CONFIG_RFS_ACCEL
3393 err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE, 3392 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3394 set_feature_arfs); 3393 NETIF_F_NTUPLE, set_feature_arfs);
3395#endif 3394#endif
3396 3395
3397 return err ? -EINVAL : 0; 3396 if (err) {
3397 netdev->features = oper_features;
3398 return -EINVAL;
3399 }
3400
3401 return 0;
3398} 3402}
3399 3403
3400static netdev_features_t mlx5e_fix_features(struct net_device *netdev, 3404static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
@@ -3679,6 +3683,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
3679 struct sk_buff *skb, 3683 struct sk_buff *skb,
3680 netdev_features_t features) 3684 netdev_features_t features)
3681{ 3685{
3686 unsigned int offset = 0;
3682 struct udphdr *udph; 3687 struct udphdr *udph;
3683 u8 proto; 3688 u8 proto;
3684 u16 port; 3689 u16 port;
@@ -3688,7 +3693,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
3688 proto = ip_hdr(skb)->protocol; 3693 proto = ip_hdr(skb)->protocol;
3689 break; 3694 break;
3690 case htons(ETH_P_IPV6): 3695 case htons(ETH_P_IPV6):
3691 proto = ipv6_hdr(skb)->nexthdr; 3696 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
3692 break; 3697 break;
3693 default: 3698 default:
3694 goto out; 3699 goto out;
@@ -4140,6 +4145,8 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
4140 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 4145 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
4141 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); 4146 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
4142 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 4147 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
4148
4149 mlx5e_timestamp_init(priv);
4143} 4150}
4144 4151
4145static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) 4152static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2c43606c26b5..3409d86eb06b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -877,6 +877,8 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
877 877
878 mlx5e_build_rep_params(mdev, &priv->channels.params); 878 mlx5e_build_rep_params(mdev, &priv->channels.params);
879 mlx5e_build_rep_netdev(netdev); 879 mlx5e_build_rep_netdev(netdev);
880
881 mlx5e_timestamp_init(priv);
880} 882}
881 883
882static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) 884static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index e401d9d245f3..b69a705fd787 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -201,9 +201,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
201 return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : 201 return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
202 MLX5E_AM_STATS_WORSE; 202 MLX5E_AM_STATS_WORSE;
203 203
204 if (!prev->ppms)
205 return curr->ppms ? MLX5E_AM_STATS_BETTER :
206 MLX5E_AM_STATS_SAME;
207
204 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) 208 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
205 return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : 209 return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
206 MLX5E_AM_STATS_WORSE; 210 MLX5E_AM_STATS_WORSE;
211 if (!prev->epms)
212 return MLX5E_AM_STATS_SAME;
207 213
208 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) 214 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
209 return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : 215 return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 1f1f8af87d4d..5a4608281f38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -238,15 +238,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
238 int err = 0; 238 int err = 0;
239 239
240 /* Temporarily enable local_lb */ 240 /* Temporarily enable local_lb */
241 if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { 241 err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
242 mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb); 242 if (err)
243 if (!lbtp->local_lb) 243 return err;
244 mlx5_nic_vport_update_local_lb(priv->mdev, true); 244
245 if (!lbtp->local_lb) {
246 err = mlx5_nic_vport_update_local_lb(priv->mdev, true);
247 if (err)
248 return err;
245 } 249 }
246 250
247 err = mlx5e_refresh_tirs(priv, true); 251 err = mlx5e_refresh_tirs(priv, true);
248 if (err) 252 if (err)
249 return err; 253 goto out;
250 254
251 lbtp->loopback_ok = false; 255 lbtp->loopback_ok = false;
252 init_completion(&lbtp->comp); 256 init_completion(&lbtp->comp);
@@ -256,16 +260,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
256 lbtp->pt.dev = priv->netdev; 260 lbtp->pt.dev = priv->netdev;
257 lbtp->pt.af_packet_priv = lbtp; 261 lbtp->pt.af_packet_priv = lbtp;
258 dev_add_pack(&lbtp->pt); 262 dev_add_pack(&lbtp->pt);
263
264 return 0;
265
266out:
267 if (!lbtp->local_lb)
268 mlx5_nic_vport_update_local_lb(priv->mdev, false);
269
259 return err; 270 return err;
260} 271}
261 272
262static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, 273static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
263 struct mlx5e_lbt_priv *lbtp) 274 struct mlx5e_lbt_priv *lbtp)
264{ 275{
265 if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { 276 if (!lbtp->local_lb)
266 if (!lbtp->local_lb) 277 mlx5_nic_vport_update_local_lb(priv->mdev, false);
267 mlx5_nic_vport_update_local_lb(priv->mdev, false);
268 }
269 278
270 dev_remove_pack(&lbtp->pt); 279 dev_remove_pack(&lbtp->pt);
271 mlx5e_refresh_tirs(priv, false); 280 mlx5e_refresh_tirs(priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 7d3d503fa675..14d57828945d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -470,7 +470,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
470 break; 470 break;
471 case MLX5_EVENT_TYPE_CQ_ERROR: 471 case MLX5_EVENT_TYPE_CQ_ERROR:
472 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; 472 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
473 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", 473 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
474 cqn, eqe->data.cq_err.syndrome); 474 cqn, eqe->data.cq_err.syndrome);
475 mlx5_cq_event(dev, cqn, eqe->type); 475 mlx5_cq_event(dev, cqn, eqe->type);
476 break; 476 break;
@@ -782,7 +782,7 @@ err1:
782 return err; 782 return err;
783} 783}
784 784
785int mlx5_stop_eqs(struct mlx5_core_dev *dev) 785void mlx5_stop_eqs(struct mlx5_core_dev *dev)
786{ 786{
787 struct mlx5_eq_table *table = &dev->priv.eq_table; 787 struct mlx5_eq_table *table = &dev->priv.eq_table;
788 int err; 788 int err;
@@ -791,22 +791,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
791 if (MLX5_CAP_GEN(dev, pg)) { 791 if (MLX5_CAP_GEN(dev, pg)) {
792 err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq); 792 err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
793 if (err) 793 if (err)
794 return err; 794 mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
795 err);
795 } 796 }
796#endif 797#endif
797 798
798 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); 799 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
799 if (err) 800 if (err)
800 return err; 801 mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
802 err);
801 803
802 mlx5_destroy_unmap_eq(dev, &table->async_eq); 804 err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
805 if (err)
806 mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
807 err);
803 mlx5_cmd_use_polling(dev); 808 mlx5_cmd_use_polling(dev);
804 809
805 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); 810 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
806 if (err) 811 if (err)
807 mlx5_cmd_use_events(dev); 812 mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
808 813 err);
809 return err;
810} 814}
811 815
812int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 816int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
index 3c11d6e2160a..14962969c5ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
@@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,
66 u8 actual_size; 66 u8 actual_size;
67 int err; 67 int err;
68 68
69 if (!size)
70 return -EINVAL;
71
69 if (!fdev->mdev) 72 if (!fdev->mdev)
70 return -ENOTCONN; 73 return -ENOTCONN;
71 74
@@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,
95 u8 actual_size; 98 u8 actual_size;
96 int err; 99 int err;
97 100
101 if (!size)
102 return -EINVAL;
103
98 if (!fdev->mdev) 104 if (!fdev->mdev)
99 return -ENOTCONN; 105 return -ENOTCONN;
100 106
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index c70fd663a633..dfaad9ecb2b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -174,6 +174,8 @@ static void del_hw_fte(struct fs_node *node);
174static void del_sw_flow_table(struct fs_node *node); 174static void del_sw_flow_table(struct fs_node *node);
175static void del_sw_flow_group(struct fs_node *node); 175static void del_sw_flow_group(struct fs_node *node);
176static void del_sw_fte(struct fs_node *node); 176static void del_sw_fte(struct fs_node *node);
177static void del_sw_prio(struct fs_node *node);
178static void del_sw_ns(struct fs_node *node);
177/* Delete rule (destination) is special case that 179/* Delete rule (destination) is special case that
178 * requires to lock the FTE for all the deletion process. 180 * requires to lock the FTE for all the deletion process.
179 */ 181 */
@@ -408,6 +410,16 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
408 return NULL; 410 return NULL;
409} 411}
410 412
413static void del_sw_ns(struct fs_node *node)
414{
415 kfree(node);
416}
417
418static void del_sw_prio(struct fs_node *node)
419{
420 kfree(node);
421}
422
411static void del_hw_flow_table(struct fs_node *node) 423static void del_hw_flow_table(struct fs_node *node)
412{ 424{
413 struct mlx5_flow_table *ft; 425 struct mlx5_flow_table *ft;
@@ -2064,7 +2076,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2064 return ERR_PTR(-ENOMEM); 2076 return ERR_PTR(-ENOMEM);
2065 2077
2066 fs_prio->node.type = FS_TYPE_PRIO; 2078 fs_prio->node.type = FS_TYPE_PRIO;
2067 tree_init_node(&fs_prio->node, NULL, NULL); 2079 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2068 tree_add_node(&fs_prio->node, &ns->node); 2080 tree_add_node(&fs_prio->node, &ns->node);
2069 fs_prio->num_levels = num_levels; 2081 fs_prio->num_levels = num_levels;
2070 fs_prio->prio = prio; 2082 fs_prio->prio = prio;
@@ -2090,7 +2102,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
2090 return ERR_PTR(-ENOMEM); 2102 return ERR_PTR(-ENOMEM);
2091 2103
2092 fs_init_namespace(ns); 2104 fs_init_namespace(ns);
2093 tree_init_node(&ns->node, NULL, NULL); 2105 tree_init_node(&ns->node, NULL, del_sw_ns);
2094 tree_add_node(&ns->node, &prio->node); 2106 tree_add_node(&ns->node, &prio->node);
2095 list_add_tail(&ns->node.list, &prio->node.children); 2107 list_add_tail(&ns->node.list, &prio->node.children);
2096 2108
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 1a0e797ad001..21d29f7936f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -241,7 +241,7 @@ static void print_health_info(struct mlx5_core_dev *dev)
241 u32 fw; 241 u32 fw;
242 int i; 242 int i;
243 243
244 /* If the syndrom is 0, the device is OK and no need to print buffer */ 244 /* If the syndrome is 0, the device is OK and no need to print buffer */
245 if (!ioread8(&h->synd)) 245 if (!ioread8(&h->synd))
246 return; 246 return;
247 247
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 261b95d014a0..a281d95ce17c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -57,7 +57,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
57 struct mlx5e_params *params) 57 struct mlx5e_params *params)
58{ 58{
59 /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */ 59 /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
60 mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST); 60 mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
61 61
62 /* RQ size in ipoib by default is 512 */ 62 /* RQ size in ipoib by default is 512 */
63 params->log_rq_size = is_kdump_kernel() ? 63 params->log_rq_size = is_kdump_kernel() ?
@@ -86,6 +86,8 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
86 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); 86 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
87 mlx5i_build_nic_params(mdev, &priv->channels.params); 87 mlx5i_build_nic_params(mdev, &priv->channels.params);
88 88
89 mlx5e_timestamp_init(priv);
90
89 /* netdev init */ 91 /* netdev init */
90 netdev->hw_features |= NETIF_F_SG; 92 netdev->hw_features |= NETIF_F_SG;
91 netdev->hw_features |= NETIF_F_IP_CSUM; 93 netdev->hw_features |= NETIF_F_IP_CSUM;
@@ -450,7 +452,6 @@ static int mlx5i_open(struct net_device *netdev)
450 452
451 mlx5e_refresh_tirs(epriv, false); 453 mlx5e_refresh_tirs(epriv, false);
452 mlx5e_activate_priv_channels(epriv); 454 mlx5e_activate_priv_channels(epriv);
453 mlx5e_timestamp_set(epriv);
454 455
455 mutex_unlock(&epriv->state_lock); 456 mutex_unlock(&epriv->state_lock);
456 return 0; 457 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 4b6cb9b38686..e159243e0fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -451,9 +451,13 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
451 451
452 switch (clock->ptp_info.pin_config[pin].func) { 452 switch (clock->ptp_info.pin_config[pin].func) {
453 case PTP_PF_EXTTS: 453 case PTP_PF_EXTTS:
454 ptp_event.index = pin;
455 ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
456 be64_to_cpu(eqe->data.pps.time_stamp));
454 if (clock->pps_info.enabled) { 457 if (clock->pps_info.enabled) {
455 ptp_event.type = PTP_CLOCK_PPSUSR; 458 ptp_event.type = PTP_CLOCK_PPSUSR;
456 ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp); 459 ptp_event.pps_times.ts_real =
460 ns_to_timespec64(ptp_event.timestamp);
457 } else { 461 } else {
458 ptp_event.type = PTP_CLOCK_EXTTS; 462 ptp_event.type = PTP_CLOCK_EXTTS;
459 } 463 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d4a471a76d82..2ef641c91c26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -319,11 +319,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
319{ 319{
320 struct mlx5_priv *priv = &dev->priv; 320 struct mlx5_priv *priv = &dev->priv;
321 struct mlx5_eq_table *table = &priv->eq_table; 321 struct mlx5_eq_table *table = &priv->eq_table;
322 struct irq_affinity irqdesc = {
323 .pre_vectors = MLX5_EQ_VEC_COMP_BASE,
324 };
325 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); 322 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
326 int nvec; 323 int nvec;
324 int err;
327 325
328 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 326 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
329 MLX5_EQ_VEC_COMP_BASE; 327 MLX5_EQ_VEC_COMP_BASE;
@@ -333,22 +331,23 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
333 331
334 priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL); 332 priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
335 if (!priv->irq_info) 333 if (!priv->irq_info)
336 goto err_free_msix; 334 return -ENOMEM;
337 335
338 nvec = pci_alloc_irq_vectors_affinity(dev->pdev, 336 nvec = pci_alloc_irq_vectors(dev->pdev,
339 MLX5_EQ_VEC_COMP_BASE + 1, nvec, 337 MLX5_EQ_VEC_COMP_BASE + 1, nvec,
340 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 338 PCI_IRQ_MSIX);
341 &irqdesc); 339 if (nvec < 0) {
342 if (nvec < 0) 340 err = nvec;
343 return nvec; 341 goto err_free_irq_info;
342 }
344 343
345 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; 344 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
346 345
347 return 0; 346 return 0;
348 347
349err_free_msix: 348err_free_irq_info:
350 kfree(priv->irq_info); 349 kfree(priv->irq_info);
351 return -ENOMEM; 350 return err;
352} 351}
353 352
354static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev) 353static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
@@ -593,8 +592,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
593 int ret = 0; 592 int ret = 0;
594 593
595 /* Disable local_lb by default */ 594 /* Disable local_lb by default */
596 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && 595 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
597 MLX5_CAP_GEN(dev, disable_local_lb))
598 ret = mlx5_nic_vport_update_local_lb(dev, false); 596 ret = mlx5_nic_vport_update_local_lb(dev, false);
599 597
600 return ret; 598 return ret;
@@ -633,6 +631,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
633 return (u64)timer_l | (u64)timer_h1 << 32; 631 return (u64)timer_l | (u64)timer_h1 << 32;
634} 632}
635 633
634static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
635{
636 struct mlx5_priv *priv = &mdev->priv;
637 int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
638
639 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
640 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
641 return -ENOMEM;
642 }
643
644 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
645 priv->irq_info[i].mask);
646
647 if (IS_ENABLED(CONFIG_SMP) &&
648 irq_set_affinity_hint(irq, priv->irq_info[i].mask))
649 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
650
651 return 0;
652}
653
654static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
655{
656 struct mlx5_priv *priv = &mdev->priv;
657 int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
658
659 irq_set_affinity_hint(irq, NULL);
660 free_cpumask_var(priv->irq_info[i].mask);
661}
662
663static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
664{
665 int err;
666 int i;
667
668 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
669 err = mlx5_irq_set_affinity_hint(mdev, i);
670 if (err)
671 goto err_out;
672 }
673
674 return 0;
675
676err_out:
677 for (i--; i >= 0; i--)
678 mlx5_irq_clear_affinity_hint(mdev, i);
679
680 return err;
681}
682
683static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
684{
685 int i;
686
687 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
688 mlx5_irq_clear_affinity_hint(mdev, i);
689}
690
636int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, 691int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
637 unsigned int *irqn) 692 unsigned int *irqn)
638{ 693{
@@ -1079,9 +1134,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1079 goto err_stop_poll; 1134 goto err_stop_poll;
1080 } 1135 }
1081 1136
1082 if (boot && mlx5_init_once(dev, priv)) { 1137 if (boot) {
1083 dev_err(&pdev->dev, "sw objs init failed\n"); 1138 err = mlx5_init_once(dev, priv);
1084 goto err_stop_poll; 1139 if (err) {
1140 dev_err(&pdev->dev, "sw objs init failed\n");
1141 goto err_stop_poll;
1142 }
1085 } 1143 }
1086 1144
1087 err = mlx5_alloc_irq_vectors(dev); 1145 err = mlx5_alloc_irq_vectors(dev);
@@ -1091,8 +1149,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1091 } 1149 }
1092 1150
1093 dev->priv.uar = mlx5_get_uars_page(dev); 1151 dev->priv.uar = mlx5_get_uars_page(dev);
1094 if (!dev->priv.uar) { 1152 if (IS_ERR(dev->priv.uar)) {
1095 dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); 1153 dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
1154 err = PTR_ERR(dev->priv.uar);
1096 goto err_disable_msix; 1155 goto err_disable_msix;
1097 } 1156 }
1098 1157
@@ -1108,6 +1167,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1108 goto err_stop_eqs; 1167 goto err_stop_eqs;
1109 } 1168 }
1110 1169
1170 err = mlx5_irq_set_affinity_hints(dev);
1171 if (err) {
1172 dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
1173 goto err_affinity_hints;
1174 }
1175
1111 err = mlx5_init_fs(dev); 1176 err = mlx5_init_fs(dev);
1112 if (err) { 1177 if (err) {
1113 dev_err(&pdev->dev, "Failed to init flow steering\n"); 1178 dev_err(&pdev->dev, "Failed to init flow steering\n");
@@ -1165,6 +1230,9 @@ err_sriov:
1165 mlx5_cleanup_fs(dev); 1230 mlx5_cleanup_fs(dev);
1166 1231
1167err_fs: 1232err_fs:
1233 mlx5_irq_clear_affinity_hints(dev);
1234
1235err_affinity_hints:
1168 free_comp_eqs(dev); 1236 free_comp_eqs(dev);
1169 1237
1170err_stop_eqs: 1238err_stop_eqs:
@@ -1233,6 +1301,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1233 1301
1234 mlx5_sriov_detach(dev); 1302 mlx5_sriov_detach(dev);
1235 mlx5_cleanup_fs(dev); 1303 mlx5_cleanup_fs(dev);
1304 mlx5_irq_clear_affinity_hints(dev);
1236 free_comp_eqs(dev); 1305 free_comp_eqs(dev);
1237 mlx5_stop_eqs(dev); 1306 mlx5_stop_eqs(dev);
1238 mlx5_put_uars_page(dev, priv->uar); 1307 mlx5_put_uars_page(dev, priv->uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 0f5ddd22927d..02d6c5b5d502 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -259,8 +259,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
259err_cmd: 259err_cmd:
260 memset(din, 0, sizeof(din)); 260 memset(din, 0, sizeof(din));
261 memset(dout, 0, sizeof(dout)); 261 memset(dout, 0, sizeof(dout));
262 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); 262 MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
263 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); 263 MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
264 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); 264 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
265 return err; 265 return err;
266} 266}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index e651e4c02867..d3c33e9eea72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
125 return ret_entry; 125 return ret_entry;
126} 126}
127 127
128static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev, 128static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
129 u32 rate, u16 index) 129 u32 rate, u16 index)
130{ 130{
131 u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0}; 131 u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0};
132 u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0}; 132 u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
133 133
134 MLX5_SET(set_rate_limit_in, in, opcode, 134 MLX5_SET(set_pp_rate_limit_in, in, opcode,
135 MLX5_CMD_OP_SET_RATE_LIMIT); 135 MLX5_CMD_OP_SET_PP_RATE_LIMIT);
136 MLX5_SET(set_rate_limit_in, in, rate_limit_index, index); 136 MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
137 MLX5_SET(set_rate_limit_in, in, rate_limit, rate); 137 MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
138 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 138 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
139} 139}
140 140
@@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
173 entry->refcount++; 173 entry->refcount++;
174 } else { 174 } else {
175 /* new rate limit */ 175 /* new rate limit */
176 err = mlx5_set_rate_limit_cmd(dev, rate, entry->index); 176 err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
177 if (err) { 177 if (err) {
178 mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n", 178 mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
179 rate, err); 179 rate, err);
@@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
209 entry->refcount--; 209 entry->refcount--;
210 if (!entry->refcount) { 210 if (!entry->refcount) {
211 /* need to remove rate */ 211 /* need to remove rate */
212 mlx5_set_rate_limit_cmd(dev, 0, entry->index); 212 mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
213 entry->rate = 0; 213 entry->rate = 0;
214 } 214 }
215 215
@@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
262 /* Clear all configured rates */ 262 /* Clear all configured rates */
263 for (i = 0; i < table->max_size; i++) 263 for (i = 0; i < table->max_size; i++)
264 if (table->rl_entry[i].rate) 264 if (table->rl_entry[i].rate)
265 mlx5_set_rate_limit_cmd(dev, 0, 265 mlx5_set_pp_rate_limit_cmd(dev, 0,
266 table->rl_entry[i].index); 266 table->rl_entry[i].index);
267 267
268 kfree(dev->priv.rl_table.rl_entry); 268 kfree(dev->priv.rl_table.rl_entry);
269} 269}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 222b25908d01..8b97066dd1f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -168,18 +168,16 @@ struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
168 struct mlx5_uars_page *ret; 168 struct mlx5_uars_page *ret;
169 169
170 mutex_lock(&mdev->priv.bfregs.reg_head.lock); 170 mutex_lock(&mdev->priv.bfregs.reg_head.lock);
171 if (list_empty(&mdev->priv.bfregs.reg_head.list)) { 171 if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
172 ret = alloc_uars_page(mdev, false);
173 if (IS_ERR(ret)) {
174 ret = NULL;
175 goto out;
176 }
177 list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
178 } else {
179 ret = list_first_entry(&mdev->priv.bfregs.reg_head.list, 172 ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
180 struct mlx5_uars_page, list); 173 struct mlx5_uars_page, list);
181 kref_get(&ret->ref_count); 174 kref_get(&ret->ref_count);
175 goto out;
182 } 176 }
177 ret = alloc_uars_page(mdev, false);
178 if (IS_ERR(ret))
179 goto out;
180 list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
183out: 181out:
184 mutex_unlock(&mdev->priv.bfregs.reg_head.lock); 182 mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
185 183
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 9cb939b6a859..dfe36cf6fbea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -911,23 +911,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
911 void *in; 911 void *in;
912 int err; 912 int err;
913 913
914 mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable"); 914 if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
915 !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
916 return 0;
917
915 in = kvzalloc(inlen, GFP_KERNEL); 918 in = kvzalloc(inlen, GFP_KERNEL);
916 if (!in) 919 if (!in)
917 return -ENOMEM; 920 return -ENOMEM;
918 921
919 MLX5_SET(modify_nic_vport_context_in, in, 922 MLX5_SET(modify_nic_vport_context_in, in,
920 field_select.disable_mc_local_lb, 1);
921 MLX5_SET(modify_nic_vport_context_in, in,
922 nic_vport_context.disable_mc_local_lb, !enable); 923 nic_vport_context.disable_mc_local_lb, !enable);
923
924 MLX5_SET(modify_nic_vport_context_in, in,
925 field_select.disable_uc_local_lb, 1);
926 MLX5_SET(modify_nic_vport_context_in, in, 924 MLX5_SET(modify_nic_vport_context_in, in,
927 nic_vport_context.disable_uc_local_lb, !enable); 925 nic_vport_context.disable_uc_local_lb, !enable);
928 926
927 if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
928 MLX5_SET(modify_nic_vport_context_in, in,
929 field_select.disable_mc_local_lb, 1);
930
931 if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
932 MLX5_SET(modify_nic_vport_context_in, in,
933 field_select.disable_uc_local_lb, 1);
934
929 err = mlx5_modify_nic_vport_context(mdev, in, inlen); 935 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
930 936
937 if (!err)
938 mlx5_core_dbg(mdev, "%s local_lb\n",
939 enable ? "enable" : "disable");
940
931 kvfree(in); 941 kvfree(in);
932 return err; 942 return err;
933} 943}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index 07a9ba6cfc70..2f74953e4561 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
71 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; 71 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
72 struct mlx5e_vxlan *vxlan; 72 struct mlx5e_vxlan *vxlan;
73 73
74 spin_lock(&vxlan_db->lock); 74 spin_lock_bh(&vxlan_db->lock);
75 vxlan = radix_tree_lookup(&vxlan_db->tree, port); 75 vxlan = radix_tree_lookup(&vxlan_db->tree, port);
76 spin_unlock(&vxlan_db->lock); 76 spin_unlock_bh(&vxlan_db->lock);
77 77
78 return vxlan; 78 return vxlan;
79} 79}
@@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
88 struct mlx5e_vxlan *vxlan; 88 struct mlx5e_vxlan *vxlan;
89 int err; 89 int err;
90 90
91 if (mlx5e_vxlan_lookup_port(priv, port)) 91 mutex_lock(&priv->state_lock);
92 vxlan = mlx5e_vxlan_lookup_port(priv, port);
93 if (vxlan) {
94 atomic_inc(&vxlan->refcount);
92 goto free_work; 95 goto free_work;
96 }
93 97
94 if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) 98 if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
95 goto free_work; 99 goto free_work;
@@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
99 goto err_delete_port; 103 goto err_delete_port;
100 104
101 vxlan->udp_port = port; 105 vxlan->udp_port = port;
106 atomic_set(&vxlan->refcount, 1);
102 107
103 spin_lock_irq(&vxlan_db->lock); 108 spin_lock_bh(&vxlan_db->lock);
104 err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan); 109 err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
105 spin_unlock_irq(&vxlan_db->lock); 110 spin_unlock_bh(&vxlan_db->lock);
106 if (err) 111 if (err)
107 goto err_free; 112 goto err_free;
108 113
@@ -113,35 +118,39 @@ err_free:
113err_delete_port: 118err_delete_port:
114 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); 119 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
115free_work: 120free_work:
121 mutex_unlock(&priv->state_lock);
116 kfree(vxlan_work); 122 kfree(vxlan_work);
117} 123}
118 124
119static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) 125static void mlx5e_vxlan_del_port(struct work_struct *work)
120{ 126{
127 struct mlx5e_vxlan_work *vxlan_work =
128 container_of(work, struct mlx5e_vxlan_work, work);
129 struct mlx5e_priv *priv = vxlan_work->priv;
121 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; 130 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
131 u16 port = vxlan_work->port;
122 struct mlx5e_vxlan *vxlan; 132 struct mlx5e_vxlan *vxlan;
133 bool remove = false;
123 134
124 spin_lock_irq(&vxlan_db->lock); 135 mutex_lock(&priv->state_lock);
125 vxlan = radix_tree_delete(&vxlan_db->tree, port); 136 spin_lock_bh(&vxlan_db->lock);
126 spin_unlock_irq(&vxlan_db->lock); 137 vxlan = radix_tree_lookup(&vxlan_db->tree, port);
127
128 if (!vxlan) 138 if (!vxlan)
129 return; 139 goto out_unlock;
130
131 mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
132
133 kfree(vxlan);
134}
135 140
136static void mlx5e_vxlan_del_port(struct work_struct *work) 141 if (atomic_dec_and_test(&vxlan->refcount)) {
137{ 142 radix_tree_delete(&vxlan_db->tree, port);
138 struct mlx5e_vxlan_work *vxlan_work = 143 remove = true;
139 container_of(work, struct mlx5e_vxlan_work, work); 144 }
140 struct mlx5e_priv *priv = vxlan_work->priv;
141 u16 port = vxlan_work->port;
142 145
143 __mlx5e_vxlan_core_del_port(priv, port); 146out_unlock:
147 spin_unlock_bh(&vxlan_db->lock);
144 148
149 if (remove) {
150 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
151 kfree(vxlan);
152 }
153 mutex_unlock(&priv->state_lock);
145 kfree(vxlan_work); 154 kfree(vxlan_work);
146} 155}
147 156
@@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
171 struct mlx5e_vxlan *vxlan; 180 struct mlx5e_vxlan *vxlan;
172 unsigned int port = 0; 181 unsigned int port = 0;
173 182
174 spin_lock_irq(&vxlan_db->lock); 183 /* Lockless since we are the only radix-tree consumers, wq is disabled */
175 while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) { 184 while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
176 port = vxlan->udp_port; 185 port = vxlan->udp_port;
177 spin_unlock_irq(&vxlan_db->lock); 186 radix_tree_delete(&vxlan_db->tree, port);
178 __mlx5e_vxlan_core_del_port(priv, (u16)port); 187 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
179 spin_lock_irq(&vxlan_db->lock); 188 kfree(vxlan);
180 } 189 }
181 spin_unlock_irq(&vxlan_db->lock);
182} 190}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index 5def12c048e3..5ef6ae7d568a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -36,6 +36,7 @@
36#include "en.h" 36#include "en.h"
37 37
38struct mlx5e_vxlan { 38struct mlx5e_vxlan {
39 atomic_t refcount;
39 u16 udp_port; 40 u16 udp_port;
40}; 41};
41 42
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 23f7d828cf67..6ef20e5cc77d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1643 return 0; 1643 return 0;
1644 } 1644 }
1645 1645
1646 wmb(); /* reset needs to be written before we read control register */ 1646 /* Reset needs to be written before we read control register, and
1647 * we must wait for the HW to become responsive once again
1648 */
1649 wmb();
1650 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1651
1647 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); 1652 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1648 do { 1653 do {
1649 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); 1654 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index a6441208e9d9..fb082ad21b00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -59,6 +59,7 @@
59#define MLXSW_PCI_SW_RESET 0xF0010 59#define MLXSW_PCI_SW_RESET 0xF0010
60#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) 60#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
61#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 61#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
62#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
62#define MLXSW_PCI_FW_READY 0xA1844 63#define MLXSW_PCI_FW_READY 0xA1844
63#define MLXSW_PCI_FW_READY_MASK 0xFFFF 64#define MLXSW_PCI_FW_READY_MASK 0xFFFF
64#define MLXSW_PCI_FW_READY_MAGIC 0x5E 65#define MLXSW_PCI_FW_READY_MAGIC 0x5E
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 2d0897b7d860..c3837ca7a705 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4300,6 +4300,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4300 4300
4301static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4301static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4302{ 4302{
4303 u16 vid = 1;
4303 int err; 4304 int err;
4304 4305
4305 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4306 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
@@ -4312,8 +4313,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4312 true, false); 4313 true, false);
4313 if (err) 4314 if (err)
4314 goto err_port_vlan_set; 4315 goto err_port_vlan_set;
4316
4317 for (; vid <= VLAN_N_VID - 1; vid++) {
4318 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4319 vid, false);
4320 if (err)
4321 goto err_vid_learning_set;
4322 }
4323
4315 return 0; 4324 return 0;
4316 4325
4326err_vid_learning_set:
4327 for (vid--; vid >= 1; vid--)
4328 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4317err_port_vlan_set: 4329err_port_vlan_set:
4318 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4330 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4319err_port_stp_set: 4331err_port_stp_set:
@@ -4323,6 +4335,12 @@ err_port_stp_set:
4323 4335
4324static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4336static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4325{ 4337{
4338 u16 vid;
4339
4340 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4341 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4342 vid, true);
4343
4326 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4344 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4327 false, false); 4345 false, false);
4328 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4346 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
@@ -4358,7 +4376,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4358 } 4376 }
4359 if (!info->linking) 4377 if (!info->linking)
4360 break; 4378 break;
4361 if (netdev_has_any_upper_dev(upper_dev)) { 4379 if (netdev_has_any_upper_dev(upper_dev) &&
4380 (!netif_is_bridge_master(upper_dev) ||
4381 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4382 upper_dev))) {
4362 NL_SET_ERR_MSG(extack, 4383 NL_SET_ERR_MSG(extack,
4363 "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4384 "spectrum: Enslaving a port to a device that already has an upper device is not supported");
4364 return -EINVAL; 4385 return -EINVAL;
@@ -4486,6 +4507,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4486 u16 vid) 4507 u16 vid)
4487{ 4508{
4488 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4509 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4510 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4489 struct netdev_notifier_changeupper_info *info = ptr; 4511 struct netdev_notifier_changeupper_info *info = ptr;
4490 struct netlink_ext_ack *extack; 4512 struct netlink_ext_ack *extack;
4491 struct net_device *upper_dev; 4513 struct net_device *upper_dev;
@@ -4502,7 +4524,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4502 } 4524 }
4503 if (!info->linking) 4525 if (!info->linking)
4504 break; 4526 break;
4505 if (netdev_has_any_upper_dev(upper_dev)) { 4527 if (netdev_has_any_upper_dev(upper_dev) &&
4528 (!netif_is_bridge_master(upper_dev) ||
4529 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4530 upper_dev))) {
4506 NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4531 NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
4507 return -EINVAL; 4532 return -EINVAL;
4508 } 4533 }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 432ab9b12b7f..05ce1befd9b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -365,6 +365,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
365void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 365void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
366 struct net_device *brport_dev, 366 struct net_device *brport_dev,
367 struct net_device *br_dev); 367 struct net_device *br_dev);
368bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
369 const struct net_device *br_dev);
368 370
369/* spectrum.c */ 371/* spectrum.c */
370int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 372int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index c33beac5def0..b5397da94d7f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -46,7 +46,8 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
46 int tclass_num, u32 min, u32 max, 46 int tclass_num, u32 min, u32 max,
47 u32 probability, bool is_ecn) 47 u32 probability, bool is_ecn)
48{ 48{
49 char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)]; 49 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
50 char cwtp_cmd[MLXSW_REG_CWTP_LEN];
50 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 51 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
51 int err; 52 int err;
52 53
@@ -60,10 +61,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
60 if (err) 61 if (err)
61 return err; 62 return err;
62 63
63 mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num, 64 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
64 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn); 65 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
65 66
66 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd); 67 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
67} 68}
68 69
69static int 70static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 72ef4f8025f0..7042c855a5d6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -821,13 +821,18 @@ static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree; 821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
822 int err; 822 int err;
823 823
824 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
825 if (err)
826 return err;
827 fib->lpm_tree = new_tree; 824 fib->lpm_tree = new_tree;
828 mlxsw_sp_lpm_tree_hold(new_tree); 825 mlxsw_sp_lpm_tree_hold(new_tree);
826 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
827 if (err)
828 goto err_tree_bind;
829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree); 829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
830 return 0; 830 return 0;
831
832err_tree_bind:
833 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
834 fib->lpm_tree = old_tree;
835 return err;
831} 836}
832 837
833static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, 838static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
@@ -868,11 +873,14 @@ err_tree_replace:
868 return err; 873 return err;
869 874
870no_replace: 875no_replace:
871 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
872 if (err)
873 return err;
874 fib->lpm_tree = new_tree; 876 fib->lpm_tree = new_tree;
875 mlxsw_sp_lpm_tree_hold(new_tree); 877 mlxsw_sp_lpm_tree_hold(new_tree);
878 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
879 if (err) {
880 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
881 fib->lpm_tree = NULL;
882 return err;
883 }
876 return 0; 884 return 0;
877} 885}
878 886
@@ -1934,11 +1942,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1934 dipn = htonl(dip); 1942 dipn = htonl(dip);
1935 dev = mlxsw_sp->router->rifs[rif]->dev; 1943 dev = mlxsw_sp->router->rifs[rif]->dev;
1936 n = neigh_lookup(&arp_tbl, &dipn, dev); 1944 n = neigh_lookup(&arp_tbl, &dipn, dev);
1937 if (!n) { 1945 if (!n)
1938 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1939 &dip);
1940 return; 1946 return;
1941 }
1942 1947
1943 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip); 1948 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1944 neigh_event_send(n, NULL); 1949 neigh_event_send(n, NULL);
@@ -1965,11 +1970,8 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1965 1970
1966 dev = mlxsw_sp->router->rifs[rif]->dev; 1971 dev = mlxsw_sp->router->rifs[rif]->dev;
1967 n = neigh_lookup(&nd_tbl, &dip, dev); 1972 n = neigh_lookup(&nd_tbl, &dip, dev);
1968 if (!n) { 1973 if (!n)
1969 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1970 &dip);
1971 return; 1974 return;
1972 }
1973 1975
1974 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip); 1976 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1975 neigh_event_send(n, NULL); 1977 neigh_event_send(n, NULL);
@@ -2436,25 +2438,16 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2436 rhashtable_destroy(&mlxsw_sp->router->neigh_ht); 2438 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2437} 2439}
2438 2440
2439static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
2440 const struct mlxsw_sp_rif *rif)
2441{
2442 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2443
2444 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
2445 rif->rif_index, rif->addr);
2446 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2447}
2448
2449static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, 2441static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2450 struct mlxsw_sp_rif *rif) 2442 struct mlxsw_sp_rif *rif)
2451{ 2443{
2452 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; 2444 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2453 2445
2454 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
2455 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list, 2446 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2456 rif_list_node) 2447 rif_list_node) {
2448 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2457 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); 2449 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2450 }
2458} 2451}
2459 2452
2460enum mlxsw_sp_nexthop_type { 2453enum mlxsw_sp_nexthop_type {
@@ -3237,7 +3230,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3237{ 3230{
3238 if (!removing) 3231 if (!removing)
3239 nh->should_offload = 1; 3232 nh->should_offload = 1;
3240 else if (nh->offloaded) 3233 else
3241 nh->should_offload = 0; 3234 nh->should_offload = 0;
3242 nh->update = 1; 3235 nh->update = 1;
3243} 3236}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7b8548e25ae7..593ad31be749 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
152 return NULL; 152 return NULL;
153} 153}
154 154
155bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
156 const struct net_device *br_dev)
157{
158 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
159}
160
155static struct mlxsw_sp_bridge_device * 161static struct mlxsw_sp_bridge_device *
156mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, 162mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
157 struct net_device *br_dev) 163 struct net_device *br_dev)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index e379b78e86ef..13190aa09faf 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -82,10 +82,33 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
82 return nfp_net_ebpf_capable(nn) ? "BPF" : ""; 82 return nfp_net_ebpf_capable(nn) ? "BPF" : "";
83} 83}
84 84
85static int
86nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
87{
88 int err;
89
90 nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
91 if (!nn->app_priv)
92 return -ENOMEM;
93
94 err = nfp_app_nic_vnic_alloc(app, nn, id);
95 if (err)
96 goto err_free_priv;
97
98 return 0;
99err_free_priv:
100 kfree(nn->app_priv);
101 return err;
102}
103
85static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn) 104static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
86{ 105{
106 struct nfp_bpf_vnic *bv = nn->app_priv;
107
87 if (nn->dp.bpf_offload_xdp) 108 if (nn->dp.bpf_offload_xdp)
88 nfp_bpf_xdp_offload(app, nn, NULL); 109 nfp_bpf_xdp_offload(app, nn, NULL);
110 WARN_ON(bv->tc_prog);
111 kfree(bv);
89} 112}
90 113
91static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, 114static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
@@ -93,6 +116,9 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
93{ 116{
94 struct tc_cls_bpf_offload *cls_bpf = type_data; 117 struct tc_cls_bpf_offload *cls_bpf = type_data;
95 struct nfp_net *nn = cb_priv; 118 struct nfp_net *nn = cb_priv;
119 struct bpf_prog *oldprog;
120 struct nfp_bpf_vnic *bv;
121 int err;
96 122
97 if (type != TC_SETUP_CLSBPF || 123 if (type != TC_SETUP_CLSBPF ||
98 !tc_can_offload(nn->dp.netdev) || 124 !tc_can_offload(nn->dp.netdev) ||
@@ -100,8 +126,6 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
100 cls_bpf->common.protocol != htons(ETH_P_ALL) || 126 cls_bpf->common.protocol != htons(ETH_P_ALL) ||
101 cls_bpf->common.chain_index) 127 cls_bpf->common.chain_index)
102 return -EOPNOTSUPP; 128 return -EOPNOTSUPP;
103 if (nn->dp.bpf_offload_xdp)
104 return -EBUSY;
105 129
106 /* Only support TC direct action */ 130 /* Only support TC direct action */
107 if (!cls_bpf->exts_integrated || 131 if (!cls_bpf->exts_integrated ||
@@ -110,16 +134,25 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
110 return -EOPNOTSUPP; 134 return -EOPNOTSUPP;
111 } 135 }
112 136
113 switch (cls_bpf->command) { 137 if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
114 case TC_CLSBPF_REPLACE:
115 return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
116 case TC_CLSBPF_ADD:
117 return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
118 case TC_CLSBPF_DESTROY:
119 return nfp_net_bpf_offload(nn, NULL, true);
120 default:
121 return -EOPNOTSUPP; 138 return -EOPNOTSUPP;
139
140 bv = nn->app_priv;
141 oldprog = cls_bpf->oldprog;
142
143 /* Don't remove if oldprog doesn't match driver's state */
144 if (bv->tc_prog != oldprog) {
145 oldprog = NULL;
146 if (!cls_bpf->prog)
147 return 0;
122 } 148 }
149
150 err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog);
151 if (err)
152 return err;
153
154 bv->tc_prog = cls_bpf->prog;
155 return 0;
123} 156}
124 157
125static int nfp_bpf_setup_tc_block(struct net_device *netdev, 158static int nfp_bpf_setup_tc_block(struct net_device *netdev,
@@ -167,7 +200,7 @@ const struct nfp_app_type app_bpf = {
167 200
168 .extra_cap = nfp_bpf_extra_cap, 201 .extra_cap = nfp_bpf_extra_cap,
169 202
170 .vnic_alloc = nfp_app_nic_vnic_alloc, 203 .vnic_alloc = nfp_bpf_vnic_alloc,
171 .vnic_free = nfp_bpf_vnic_free, 204 .vnic_free = nfp_bpf_vnic_free,
172 205
173 .setup_tc = nfp_bpf_setup_tc, 206 .setup_tc = nfp_bpf_setup_tc,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 082a15f6dfb5..57b6043177a3 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -172,6 +172,14 @@ struct nfp_prog {
172 struct list_head insns; 172 struct list_head insns;
173}; 173};
174 174
175/**
176 * struct nfp_bpf_vnic - per-vNIC BPF priv structure
177 * @tc_prog: currently loaded cls_bpf program
178 */
179struct nfp_bpf_vnic {
180 struct bpf_prog *tc_prog;
181};
182
175int nfp_bpf_jit(struct nfp_prog *prog); 183int nfp_bpf_jit(struct nfp_prog *prog);
176 184
177extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops; 185extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1a603fdd9e80..99b0487b6d82 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
568 return err; 568 return err;
569 } 569 }
570 nn_writeb(nn, ctrl_offset, entry->entry); 570 nn_writeb(nn, ctrl_offset, entry->entry);
571 nfp_net_irq_unmask(nn, entry->entry);
571 572
572 return 0; 573 return 0;
573} 574}
@@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
582 unsigned int vector_idx) 583 unsigned int vector_idx)
583{ 584{
584 nn_writeb(nn, ctrl_offset, 0xff); 585 nn_writeb(nn, ctrl_offset, 0xff);
586 nn_pci_flush(nn);
585 free_irq(nn->irq_entries[vector_idx].vector, nn); 587 free_irq(nn->irq_entries[vector_idx].vector, nn);
586} 588}
587 589
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2801ecd09eab..6c02b2d6ba06 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -333,7 +333,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
333 ls >= ARRAY_SIZE(ls_to_ethtool)) 333 ls >= ARRAY_SIZE(ls_to_ethtool))
334 return 0; 334 return 0;
335 335
336 cmd->base.speed = ls_to_ethtool[sts]; 336 cmd->base.speed = ls_to_ethtool[ls];
337 cmd->base.duplex = DUPLEX_FULL; 337 cmd->base.duplex = DUPLEX_FULL;
338 338
339 return 0; 339 return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 924a05e05da0..78b36c67c232 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -84,16 +84,13 @@ nfp_repr_phy_port_get_stats64(struct nfp_port *port,
84{ 84{
85 u8 __iomem *mem = port->eth_stats; 85 u8 __iomem *mem = port->eth_stats;
86 86
87 /* TX and RX stats are flipped as we are returning the stats as seen 87 stats->tx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
88 * at the switch port corresponding to the phys port. 88 stats->tx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
89 */ 89 stats->tx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
90 stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
91 stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
92 stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
93 90
94 stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK); 91 stats->rx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
95 stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS); 92 stats->rx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
96 stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS); 93 stats->rx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
97} 94}
98 95
99static void 96static void
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index c8c4b3940564..b7abb8205d3a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -358,10 +358,27 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
358 kfree(p_rdma_info); 358 kfree(p_rdma_info);
359} 359}
360 360
361static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
362{
363 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
364
365 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
366
367 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
368 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
369 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
370}
371
372static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
373{
374 qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
375}
376
361static void qed_rdma_free(struct qed_hwfn *p_hwfn) 377static void qed_rdma_free(struct qed_hwfn *p_hwfn)
362{ 378{
363 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); 379 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
364 380
381 qed_rdma_free_reserved_lkey(p_hwfn);
365 qed_rdma_resc_free(p_hwfn); 382 qed_rdma_resc_free(p_hwfn);
366} 383}
367 384
@@ -615,9 +632,6 @@ static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
615{ 632{
616 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 633 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
617 634
618 /* The first DPI is reserved for the Kernel */
619 __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
620
621 /* Tid 0 will be used as the key for "reserved MR". 635 /* Tid 0 will be used as the key for "reserved MR".
622 * The driver should allocate memory for it so it can be loaded but no 636 * The driver should allocate memory for it so it can be loaded but no
623 * ramrod should be passed on it. 637 * ramrod should be passed on it.
@@ -797,17 +811,6 @@ static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
797 return p_hwfn->p_rdma_info->dev; 811 return p_hwfn->p_rdma_info->dev;
798} 812}
799 813
800static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
801{
802 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
803
804 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
805
806 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
807 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
808 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
809}
810
811static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) 814static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
812{ 815{
813 struct qed_hwfn *p_hwfn; 816 struct qed_hwfn *p_hwfn;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index be48d9abd001..3588081b2e27 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -776,6 +776,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
776 int rc = 0; 776 int rc = 0;
777 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; 777 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
778 bool b_ret_ent = true; 778 bool b_ret_ent = true;
779 bool eblock;
779 780
780 if (!p_hwfn) 781 if (!p_hwfn)
781 return -EINVAL; 782 return -EINVAL;
@@ -794,6 +795,11 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
794 if (rc) 795 if (rc)
795 goto spq_post_fail; 796 goto spq_post_fail;
796 797
798 /* Check if entry is in block mode before qed_spq_add_entry,
799 * which might kfree p_ent.
800 */
801 eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
802
797 /* Add the request to the pending queue */ 803 /* Add the request to the pending queue */
798 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); 804 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
799 if (rc) 805 if (rc)
@@ -811,7 +817,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
811 817
812 spin_unlock_bh(&p_spq->lock); 818 spin_unlock_bh(&p_spq->lock);
813 819
814 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) { 820 if (eblock) {
815 /* For entries in QED BLOCK mode, the completion code cannot 821 /* For entries in QED BLOCK mode, the completion code cannot
816 * perform the necessary cleanup - if it did, we couldn't 822 * perform the necessary cleanup - if it did, we couldn't
817 * access p_ent here to see whether it's successful or not. 823 * access p_ent here to see whether it's successful or not.
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 18461fcb9815..53dbf1e163a8 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -47,6 +47,7 @@
47#define MDIO_CLK_25_28 7 47#define MDIO_CLK_25_28 7
48 48
49#define MDIO_WAIT_TIMES 1000 49#define MDIO_WAIT_TIMES 1000
50#define MDIO_STATUS_DELAY_TIME 1
50 51
51static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) 52static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
52{ 53{
@@ -65,7 +66,7 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
65 66
66 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 67 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
67 !(reg & (MDIO_START | MDIO_BUSY)), 68 !(reg & (MDIO_START | MDIO_BUSY)),
68 100, MDIO_WAIT_TIMES * 100)) 69 MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
69 return -EIO; 70 return -EIO;
70 71
71 return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; 72 return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
@@ -88,8 +89,8 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
88 writel(reg, adpt->base + EMAC_MDIO_CTRL); 89 writel(reg, adpt->base + EMAC_MDIO_CTRL);
89 90
90 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 91 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
91 !(reg & (MDIO_START | MDIO_BUSY)), 100, 92 !(reg & (MDIO_START | MDIO_BUSY)),
92 MDIO_WAIT_TIMES * 100)) 93 MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
93 return -EIO; 94 return -EIO;
94 95
95 return 0; 96 return 0;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 70c92b649b29..38c924bdd32e 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -253,18 +253,18 @@ static int emac_open(struct net_device *netdev)
253 return ret; 253 return ret;
254 } 254 }
255 255
256 ret = emac_mac_up(adpt); 256 ret = adpt->phy.open(adpt);
257 if (ret) { 257 if (ret) {
258 emac_mac_rx_tx_rings_free_all(adpt); 258 emac_mac_rx_tx_rings_free_all(adpt);
259 free_irq(irq->irq, irq); 259 free_irq(irq->irq, irq);
260 return ret; 260 return ret;
261 } 261 }
262 262
263 ret = adpt->phy.open(adpt); 263 ret = emac_mac_up(adpt);
264 if (ret) { 264 if (ret) {
265 emac_mac_down(adpt);
266 emac_mac_rx_tx_rings_free_all(adpt); 265 emac_mac_rx_tx_rings_free_all(adpt);
267 free_irq(irq->irq, irq); 266 free_irq(irq->irq, irq);
267 adpt->phy.close(adpt);
268 return ret; 268 return ret;
269 } 269 }
270 270
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 71bee1af71ef..df21e900f874 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -195,6 +195,7 @@ err2:
195err1: 195err1:
196 rmnet_unregister_real_device(real_dev, port); 196 rmnet_unregister_real_device(real_dev, port);
197err0: 197err0:
198 kfree(ep);
198 return err; 199 return err;
199} 200}
200 201
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 29842ccc91a9..08e4afc0ab39 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -126,12 +126,12 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
126 126
127 if (skb_headroom(skb) < required_headroom) { 127 if (skb_headroom(skb) < required_headroom) {
128 if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) 128 if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
129 return RMNET_MAP_CONSUMED; 129 goto fail;
130 } 130 }
131 131
132 map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); 132 map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
133 if (!map_header) 133 if (!map_header)
134 return RMNET_MAP_CONSUMED; 134 goto fail;
135 135
136 if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) { 136 if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
137 if (mux_id == 0xff) 137 if (mux_id == 0xff)
@@ -143,6 +143,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
143 skb->protocol = htons(ETH_P_MAP); 143 skb->protocol = htons(ETH_P_MAP);
144 144
145 return RMNET_MAP_SUCCESS; 145 return RMNET_MAP_SUCCESS;
146
147fail:
148 kfree_skb(skb);
149 return RMNET_MAP_CONSUMED;
146} 150}
147 151
148static void 152static void
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index fc0d5fa65ad4..734286ebe5ef 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2244,19 +2244,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
2244 void __iomem *ioaddr = tp->mmio_addr; 2244 void __iomem *ioaddr = tp->mmio_addr;
2245 dma_addr_t paddr = tp->counters_phys_addr; 2245 dma_addr_t paddr = tp->counters_phys_addr;
2246 u32 cmd; 2246 u32 cmd;
2247 bool ret;
2248 2247
2249 RTL_W32(CounterAddrHigh, (u64)paddr >> 32); 2248 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
2249 RTL_R32(CounterAddrHigh);
2250 cmd = (u64)paddr & DMA_BIT_MASK(32); 2250 cmd = (u64)paddr & DMA_BIT_MASK(32);
2251 RTL_W32(CounterAddrLow, cmd); 2251 RTL_W32(CounterAddrLow, cmd);
2252 RTL_W32(CounterAddrLow, cmd | counter_cmd); 2252 RTL_W32(CounterAddrLow, cmd | counter_cmd);
2253 2253
2254 ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); 2254 return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
2255
2256 RTL_W32(CounterAddrLow, 0);
2257 RTL_W32(CounterAddrHigh, 0);
2258
2259 return ret;
2260} 2255}
2261 2256
2262static bool rtl8169_reset_counters(struct net_device *dev) 2257static bool rtl8169_reset_counters(struct net_device *dev)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 2b962d349f5f..009780df664b 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2308,32 +2308,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
2308 struct ravb_private *priv = netdev_priv(ndev); 2308 struct ravb_private *priv = netdev_priv(ndev);
2309 int ret = 0; 2309 int ret = 0;
2310 2310
2311 if (priv->wol_enabled) { 2311 /* If WoL is enabled set reset mode to rearm the WoL logic */
2312 /* Reduce the usecount of the clock to zero and then 2312 if (priv->wol_enabled)
2313 * restore it to its original value. This is done to force
2314 * the clock to be re-enabled which is a workaround
2315 * for renesas-cpg-mssr driver which do not enable clocks
2316 * when resuming from PSCI suspend/resume.
2317 *
2318 * Without this workaround the driver fails to communicate
2319 * with the hardware if WoL was enabled when the system
2320 * entered PSCI suspend. This is due to that if WoL is enabled
2321 * we explicitly keep the clock from being turned off when
2322 * suspending, but in PSCI sleep power is cut so the clock
2323 * is disabled anyhow, the clock driver is not aware of this
2324 * so the clock is not turned back on when resuming.
2325 *
2326 * TODO: once the renesas-cpg-mssr suspend/resume is working
2327 * this clock dance should be removed.
2328 */
2329 clk_disable(priv->clk);
2330 clk_disable(priv->clk);
2331 clk_enable(priv->clk);
2332 clk_enable(priv->clk);
2333
2334 /* Set reset mode to rearm the WoL logic */
2335 ravb_write(ndev, CCC_OPC_RESET, CCC); 2313 ravb_write(ndev, CCC_OPC_RESET, CCC);
2336 }
2337 2314
2338 /* All register have been reset to default values. 2315 /* All register have been reset to default values.
2339 * Restore all registers which where setup at probe time and 2316 * Restore all registers which where setup at probe time and
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7e060aa9fbed..53924a4fc31c 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
147 [FWNLCR0] = 0x0090, 147 [FWNLCR0] = 0x0090,
148 [FWALCR0] = 0x0094, 148 [FWALCR0] = 0x0094,
149 [TXNLCR1] = 0x00a0, 149 [TXNLCR1] = 0x00a0,
150 [TXALCR1] = 0x00a0, 150 [TXALCR1] = 0x00a4,
151 [RXNLCR1] = 0x00a8, 151 [RXNLCR1] = 0x00a8,
152 [RXALCR1] = 0x00ac, 152 [RXALCR1] = 0x00ac,
153 [FWNLCR1] = 0x00b0, 153 [FWNLCR1] = 0x00b0,
@@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
399 [FWNLCR0] = 0x0090, 399 [FWNLCR0] = 0x0090,
400 [FWALCR0] = 0x0094, 400 [FWALCR0] = 0x0094,
401 [TXNLCR1] = 0x00a0, 401 [TXNLCR1] = 0x00a0,
402 [TXALCR1] = 0x00a0, 402 [TXALCR1] = 0x00a4,
403 [RXNLCR1] = 0x00a8, 403 [RXNLCR1] = 0x00a8,
404 [RXALCR1] = 0x00ac, 404 [RXALCR1] = 0x00ac,
405 [FWNLCR1] = 0x00b0, 405 [FWNLCR1] = 0x00b0,
@@ -1149,7 +1149,8 @@ static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
1149 entry, le32_to_cpu(txdesc->status)); 1149 entry, le32_to_cpu(txdesc->status));
1150 /* Free the original skb. */ 1150 /* Free the original skb. */
1151 if (mdp->tx_skbuff[entry]) { 1151 if (mdp->tx_skbuff[entry]) {
1152 dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), 1152 dma_unmap_single(&mdp->pdev->dev,
1153 le32_to_cpu(txdesc->addr),
1153 le32_to_cpu(txdesc->len) >> 16, 1154 le32_to_cpu(txdesc->len) >> 16,
1154 DMA_TO_DEVICE); 1155 DMA_TO_DEVICE);
1155 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1156 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
@@ -1179,14 +1180,14 @@ static void sh_eth_ring_free(struct net_device *ndev)
1179 if (mdp->rx_skbuff[i]) { 1180 if (mdp->rx_skbuff[i]) {
1180 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; 1181 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1181 1182
1182 dma_unmap_single(&ndev->dev, 1183 dma_unmap_single(&mdp->pdev->dev,
1183 le32_to_cpu(rxdesc->addr), 1184 le32_to_cpu(rxdesc->addr),
1184 ALIGN(mdp->rx_buf_sz, 32), 1185 ALIGN(mdp->rx_buf_sz, 32),
1185 DMA_FROM_DEVICE); 1186 DMA_FROM_DEVICE);
1186 } 1187 }
1187 } 1188 }
1188 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1189 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1189 dma_free_coherent(NULL, ringsize, mdp->rx_ring, 1190 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
1190 mdp->rx_desc_dma); 1191 mdp->rx_desc_dma);
1191 mdp->rx_ring = NULL; 1192 mdp->rx_ring = NULL;
1192 } 1193 }
@@ -1203,7 +1204,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
1203 sh_eth_tx_free(ndev, false); 1204 sh_eth_tx_free(ndev, false);
1204 1205
1205 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1206 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1206 dma_free_coherent(NULL, ringsize, mdp->tx_ring, 1207 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
1207 mdp->tx_desc_dma); 1208 mdp->tx_desc_dma);
1208 mdp->tx_ring = NULL; 1209 mdp->tx_ring = NULL;
1209 } 1210 }
@@ -1245,9 +1246,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
1245 1246
1246 /* The size of the buffer is a multiple of 32 bytes. */ 1247 /* The size of the buffer is a multiple of 32 bytes. */
1247 buf_len = ALIGN(mdp->rx_buf_sz, 32); 1248 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1248 dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len, 1249 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
1249 DMA_FROM_DEVICE); 1250 DMA_FROM_DEVICE);
1250 if (dma_mapping_error(&ndev->dev, dma_addr)) { 1251 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1251 kfree_skb(skb); 1252 kfree_skb(skb);
1252 break; 1253 break;
1253 } 1254 }
@@ -1323,8 +1324,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
1323 1324
1324 /* Allocate all Rx descriptors. */ 1325 /* Allocate all Rx descriptors. */
1325 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1326 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1326 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 1327 mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
1327 GFP_KERNEL); 1328 &mdp->rx_desc_dma, GFP_KERNEL);
1328 if (!mdp->rx_ring) 1329 if (!mdp->rx_ring)
1329 goto ring_free; 1330 goto ring_free;
1330 1331
@@ -1332,8 +1333,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
1332 1333
1333 /* Allocate all Tx descriptors. */ 1334 /* Allocate all Tx descriptors. */
1334 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1335 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1335 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 1336 mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
1336 GFP_KERNEL); 1337 &mdp->tx_desc_dma, GFP_KERNEL);
1337 if (!mdp->tx_ring) 1338 if (!mdp->tx_ring)
1338 goto ring_free; 1339 goto ring_free;
1339 return 0; 1340 return 0;
@@ -1527,7 +1528,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1527 mdp->rx_skbuff[entry] = NULL; 1528 mdp->rx_skbuff[entry] = NULL;
1528 if (mdp->cd->rpadir) 1529 if (mdp->cd->rpadir)
1529 skb_reserve(skb, NET_IP_ALIGN); 1530 skb_reserve(skb, NET_IP_ALIGN);
1530 dma_unmap_single(&ndev->dev, dma_addr, 1531 dma_unmap_single(&mdp->pdev->dev, dma_addr,
1531 ALIGN(mdp->rx_buf_sz, 32), 1532 ALIGN(mdp->rx_buf_sz, 32),
1532 DMA_FROM_DEVICE); 1533 DMA_FROM_DEVICE);
1533 skb_put(skb, pkt_len); 1534 skb_put(skb, pkt_len);
@@ -1555,9 +1556,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1555 if (skb == NULL) 1556 if (skb == NULL)
1556 break; /* Better luck next round. */ 1557 break; /* Better luck next round. */
1557 sh_eth_set_receive_align(skb); 1558 sh_eth_set_receive_align(skb);
1558 dma_addr = dma_map_single(&ndev->dev, skb->data, 1559 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
1559 buf_len, DMA_FROM_DEVICE); 1560 buf_len, DMA_FROM_DEVICE);
1560 if (dma_mapping_error(&ndev->dev, dma_addr)) { 1561 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1561 kfree_skb(skb); 1562 kfree_skb(skb);
1562 break; 1563 break;
1563 } 1564 }
@@ -1891,6 +1892,16 @@ static int sh_eth_phy_init(struct net_device *ndev)
1891 return PTR_ERR(phydev); 1892 return PTR_ERR(phydev);
1892 } 1893 }
1893 1894
1895 /* mask with MAC supported features */
1896 if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
1897 int err = phy_set_max_speed(phydev, SPEED_100);
1898 if (err) {
1899 netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
1900 phy_disconnect(phydev);
1901 return err;
1902 }
1903 }
1904
1894 phy_attached_info(phydev); 1905 phy_attached_info(phydev);
1895 1906
1896 return 0; 1907 return 0;
@@ -2078,8 +2089,8 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
2078 add_reg(CSMR); 2089 add_reg(CSMR);
2079 if (cd->select_mii) 2090 if (cd->select_mii)
2080 add_reg(RMII_MII); 2091 add_reg(RMII_MII);
2081 add_reg(ARSTR);
2082 if (cd->tsu) { 2092 if (cd->tsu) {
2093 add_tsu_reg(ARSTR);
2083 add_tsu_reg(TSU_CTRST); 2094 add_tsu_reg(TSU_CTRST);
2084 add_tsu_reg(TSU_FWEN0); 2095 add_tsu_reg(TSU_FWEN0);
2085 add_tsu_reg(TSU_FWEN1); 2096 add_tsu_reg(TSU_FWEN1);
@@ -2441,9 +2452,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2441 /* soft swap. */ 2452 /* soft swap. */
2442 if (!mdp->cd->hw_swap) 2453 if (!mdp->cd->hw_swap)
2443 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); 2454 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2444 dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2455 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
2445 DMA_TO_DEVICE); 2456 DMA_TO_DEVICE);
2446 if (dma_mapping_error(&ndev->dev, dma_addr)) { 2457 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
2447 kfree_skb(skb); 2458 kfree_skb(skb);
2448 return NETDEV_TX_OK; 2459 return NETDEV_TX_OK;
2449 } 2460 }
@@ -3214,18 +3225,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
3214 /* ioremap the TSU registers */ 3225 /* ioremap the TSU registers */
3215 if (mdp->cd->tsu) { 3226 if (mdp->cd->tsu) {
3216 struct resource *rtsu; 3227 struct resource *rtsu;
3228
3217 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 3229 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3218 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); 3230 if (!rtsu) {
3219 if (IS_ERR(mdp->tsu_addr)) { 3231 dev_err(&pdev->dev, "no TSU resource\n");
3220 ret = PTR_ERR(mdp->tsu_addr); 3232 ret = -ENODEV;
3233 goto out_release;
3234 }
3235 /* We can only request the TSU region for the first port
3236 * of the two sharing this TSU for the probe to succeed...
3237 */
3238 if (devno % 2 == 0 &&
3239 !devm_request_mem_region(&pdev->dev, rtsu->start,
3240 resource_size(rtsu),
3241 dev_name(&pdev->dev))) {
3242 dev_err(&pdev->dev, "can't request TSU resource.\n");
3243 ret = -EBUSY;
3244 goto out_release;
3245 }
3246 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3247 resource_size(rtsu));
3248 if (!mdp->tsu_addr) {
3249 dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
3250 ret = -ENOMEM;
3221 goto out_release; 3251 goto out_release;
3222 } 3252 }
3223 mdp->port = devno % 2; 3253 mdp->port = devno % 2;
3224 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; 3254 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
3225 } 3255 }
3226 3256
3227 /* initialize first or needed device */ 3257 /* Need to init only the first port of the two sharing a TSU */
3228 if (!devno || pd->needs_init) { 3258 if (devno % 2 == 0) {
3229 if (mdp->cd->chip_reset) 3259 if (mdp->cd->chip_reset)
3230 mdp->cd->chip_reset(ndev); 3260 mdp->cd->chip_reset(ndev);
3231 3261
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 0ea7e16f2e6e..9937a2450e57 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -77,6 +77,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
77 } 77 }
78 78
79 if (buffer->flags & EFX_TX_BUF_SKB) { 79 if (buffer->flags & EFX_TX_BUF_SKB) {
80 EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
80 (*pkts_compl)++; 81 (*pkts_compl)++;
81 (*bytes_compl) += buffer->skb->len; 82 (*bytes_compl) += buffer->skb->len;
82 dev_consume_skb_any((struct sk_buff *)buffer->skb); 83 dev_consume_skb_any((struct sk_buff *)buffer->skb);
@@ -426,12 +427,14 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
426static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 427static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
427{ 428{
428 struct efx_tx_buffer *buffer; 429 struct efx_tx_buffer *buffer;
430 unsigned int bytes_compl = 0;
431 unsigned int pkts_compl = 0;
429 432
430 /* Work backwards until we hit the original insert pointer value */ 433 /* Work backwards until we hit the original insert pointer value */
431 while (tx_queue->insert_count != tx_queue->write_count) { 434 while (tx_queue->insert_count != tx_queue->write_count) {
432 --tx_queue->insert_count; 435 --tx_queue->insert_count;
433 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 436 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
434 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); 437 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
435 } 438 }
436} 439}
437 440
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e1e5ac053760..ce2ea2d491ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -409,7 +409,7 @@ struct stmmac_desc_ops {
409 /* get timestamp value */ 409 /* get timestamp value */
410 u64(*get_timestamp) (void *desc, u32 ats); 410 u64(*get_timestamp) (void *desc, u32 ats);
411 /* get rx timestamp status */ 411 /* get rx timestamp status */
412 int (*get_rx_timestamp_status) (void *desc, u32 ats); 412 int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
413 /* Display ring */ 413 /* Display ring */
414 void (*display_ring)(void *head, unsigned int size, bool rx); 414 void (*display_ring)(void *head, unsigned int size, bool rx);
415 /* set MSS via context descriptor */ 415 /* set MSS via context descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index e5ff734d4f9b..9eb7f65d8000 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -808,8 +808,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
808 val, reg); 808 val, reg);
809 809
810 if (gmac->variant->soc_has_internal_phy) { 810 if (gmac->variant->soc_has_internal_phy) {
811 if (of_property_read_bool(priv->plat->phy_node, 811 if (of_property_read_bool(node, "allwinner,leds-active-low"))
812 "allwinner,leds-active-low"))
813 reg |= H3_EPHY_LED_POL; 812 reg |= H3_EPHY_LED_POL;
814 else 813 else
815 reg &= ~H3_EPHY_LED_POL; 814 reg &= ~H3_EPHY_LED_POL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 4b286e27c4ca..7e089bf906b4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc)
258 return ret; 258 return ret;
259} 259}
260 260
261static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) 261static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
262 u32 ats)
262{ 263{
263 struct dma_desc *p = (struct dma_desc *)desc; 264 struct dma_desc *p = (struct dma_desc *)desc;
264 int ret = -EINVAL; 265 int ret = -EINVAL;
@@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
270 271
271 /* Check if timestamp is OK from context descriptor */ 272 /* Check if timestamp is OK from context descriptor */
272 do { 273 do {
273 ret = dwmac4_rx_check_timestamp(desc); 274 ret = dwmac4_rx_check_timestamp(next_desc);
274 if (ret < 0) 275 if (ret < 0)
275 goto exit; 276 goto exit;
276 i++; 277 i++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7546b3664113..2a828a312814 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
400 return ns; 400 return ns;
401} 401}
402 402
403static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) 403static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
404 u32 ats)
404{ 405{
405 if (ats) { 406 if (ats) {
406 struct dma_extended_desc *p = (struct dma_extended_desc *)desc; 407 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index f817f8f36569..db4cee57bb24 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)
265 return ns; 265 return ns;
266} 266}
267 267
268static int ndesc_get_rx_timestamp_status(void *desc, u32 ats) 268static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
269{ 269{
270 struct dma_desc *p = (struct dma_desc *)desc; 270 struct dma_desc *p = (struct dma_desc *)desc;
271 271
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 721b61655261..08c19ebd5306 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
34{ 34{
35 u32 value = readl(ioaddr + PTP_TCR); 35 u32 value = readl(ioaddr + PTP_TCR);
36 unsigned long data; 36 unsigned long data;
37 u32 reg_value;
37 38
38 /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second 39 /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
39 * formula = (1/ptp_clock) * 1000000000 40 * formula = (1/ptp_clock) * 1000000000
@@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
50 51
51 data &= PTP_SSIR_SSINC_MASK; 52 data &= PTP_SSIR_SSINC_MASK;
52 53
54 reg_value = data;
53 if (gmac4) 55 if (gmac4)
54 data = data << GMAC4_PTP_SSIR_SSINC_SHIFT; 56 reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
55 57
56 writel(data, ioaddr + PTP_SSIR); 58 writel(reg_value, ioaddr + PTP_SSIR);
57 59
58 return data; 60 return data;
59} 61}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f63c2ddced3c..c0af0bc4e714 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
364bool stmmac_eee_init(struct stmmac_priv *priv) 364bool stmmac_eee_init(struct stmmac_priv *priv)
365{ 365{
366 struct net_device *ndev = priv->dev; 366 struct net_device *ndev = priv->dev;
367 int interface = priv->plat->interface;
367 unsigned long flags; 368 unsigned long flags;
368 bool ret = false; 369 bool ret = false;
369 370
371 if ((interface != PHY_INTERFACE_MODE_MII) &&
372 (interface != PHY_INTERFACE_MODE_GMII) &&
373 !phy_interface_mode_is_rgmii(interface))
374 goto out;
375
370 /* Using PCS we cannot dial with the phy registers at this stage 376 /* Using PCS we cannot dial with the phy registers at this stage
371 * so we do not support extra feature like EEE. 377 * so we do not support extra feature like EEE.
372 */ 378 */
@@ -482,7 +488,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
482 desc = np; 488 desc = np;
483 489
484 /* Check if timestamp is available */ 490 /* Check if timestamp is available */
485 if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) { 491 if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
486 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 492 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
487 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 493 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
488 shhwtstamp = skb_hwtstamps(skb); 494 shhwtstamp = skb_hwtstamps(skb);
@@ -2588,6 +2594,7 @@ static int stmmac_open(struct net_device *dev)
2588 2594
2589 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 2595 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2590 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 2596 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2597 priv->mss = 0;
2591 2598
2592 ret = alloc_dma_desc_resources(priv); 2599 ret = alloc_dma_desc_resources(priv);
2593 if (ret < 0) { 2600 if (ret < 0) {
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index ed58c746e4af..f5a7eb22d0f5 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -715,7 +715,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
715 /* warning!!!! We are retrieving the virtual ptr in the sw_data 715 /* warning!!!! We are retrieving the virtual ptr in the sw_data
716 * field as a 32bit value. Will not work on 64bit machines 716 * field as a 32bit value. Will not work on 64bit machines
717 */ 717 */
718 page = (struct page *)GET_SW_DATA0(desc); 718 page = (struct page *)GET_SW_DATA0(ndesc);
719 719
720 if (likely(dma_buff && buf_len && page)) { 720 if (likely(dma_buff && buf_len && page)) {
721 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, 721 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index b718a02a6bb6..64fda2e1040e 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
825 if (IS_ERR(rt)) 825 if (IS_ERR(rt))
826 return PTR_ERR(rt); 826 return PTR_ERR(rt);
827 827
828 if (skb_dst(skb)) {
829 int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
830 GENEVE_BASE_HLEN - info->options_len - 14;
831
832 skb_dst_update_pmtu(skb, mtu);
833 }
834
828 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 835 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
829 if (geneve->collect_md) { 836 if (geneve->collect_md) {
830 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 837 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
864 if (IS_ERR(dst)) 871 if (IS_ERR(dst))
865 return PTR_ERR(dst); 872 return PTR_ERR(dst);
866 873
874 if (skb_dst(skb)) {
875 int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
876 GENEVE_BASE_HLEN - info->options_len - 14;
877
878 skb_dst_update_pmtu(skb, mtu);
879 }
880
867 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 881 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
868 if (geneve->collect_md) { 882 if (geneve->collect_md) {
869 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 883 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 8483f03d5a41..1ab97d99b9ba 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1379,8 +1379,8 @@ static int rr_close(struct net_device *dev)
1379 rrpriv->info_dma); 1379 rrpriv->info_dma);
1380 rrpriv->info = NULL; 1380 rrpriv->info = NULL;
1381 1381
1382 free_irq(pdev->irq, dev);
1383 spin_unlock_irqrestore(&rrpriv->lock, flags); 1382 spin_unlock_irqrestore(&rrpriv->lock, flags);
1383 free_irq(pdev->irq, dev);
1384 1384
1385 return 0; 1385 return 0;
1386} 1386}
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 11c1e7950fe5..77cc4fbaeace 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -393,6 +393,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
393 .flowi4_oif = dev->ifindex, 393 .flowi4_oif = dev->ifindex,
394 .flowi4_tos = RT_TOS(ip4h->tos), 394 .flowi4_tos = RT_TOS(ip4h->tos),
395 .flowi4_flags = FLOWI_FLAG_ANYSRC, 395 .flowi4_flags = FLOWI_FLAG_ANYSRC,
396 .flowi4_mark = skb->mark,
396 .daddr = ip4h->daddr, 397 .daddr = ip4h->daddr,
397 .saddr = ip4h->saddr, 398 .saddr = ip4h->saddr,
398 }; 399 };
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a178c5efd33e..a0f2be81d52e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1444,9 +1444,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1444 return 0; 1444 return 0;
1445 1445
1446unregister_netdev: 1446unregister_netdev:
1447 /* macvlan_uninit would free the macvlan port */
1447 unregister_netdevice(dev); 1448 unregister_netdevice(dev);
1449 return err;
1448destroy_macvlan_port: 1450destroy_macvlan_port:
1449 if (create) 1451 /* the macvlan port may be freed by macvlan_uninit when fail to register.
1452 * so we destroy the macvlan port only when it's valid.
1453 */
1454 if (create && macvlan_port_get_rtnl(dev))
1450 macvlan_port_destroy(port->dev); 1455 macvlan_port_destroy(port->dev);
1451 return err; 1456 return err;
1452} 1457}
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 5f93e6add563..e911e4990b20 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -239,14 +239,10 @@ static int at803x_resume(struct phy_device *phydev)
239{ 239{
240 int value; 240 int value;
241 241
242 mutex_lock(&phydev->lock);
243
244 value = phy_read(phydev, MII_BMCR); 242 value = phy_read(phydev, MII_BMCR);
245 value &= ~(BMCR_PDOWN | BMCR_ISOLATE); 243 value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
246 phy_write(phydev, MII_BMCR, value); 244 phy_write(phydev, MII_BMCR, value);
247 245
248 mutex_unlock(&phydev->lock);
249
250 return 0; 246 return 0;
251} 247}
252 248
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 4d02b27df044..82104edca393 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -637,6 +637,10 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
637 if (err < 0) 637 if (err < 0)
638 goto error; 638 goto error;
639 639
640 /* Do not touch the fiber page if we're in copper->sgmii mode */
641 if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
642 return 0;
643
640 /* Then the fiber link */ 644 /* Then the fiber link */
641 err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); 645 err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
642 if (err < 0) 646 if (err < 0)
@@ -875,6 +879,8 @@ static int m88e1510_config_init(struct phy_device *phydev)
875 879
876 /* SGMII-to-Copper mode initialization */ 880 /* SGMII-to-Copper mode initialization */
877 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { 881 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
882 u32 pause;
883
878 /* Select page 18 */ 884 /* Select page 18 */
879 err = marvell_set_page(phydev, 18); 885 err = marvell_set_page(phydev, 18);
880 if (err < 0) 886 if (err < 0)
@@ -898,6 +904,16 @@ static int m88e1510_config_init(struct phy_device *phydev)
898 err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); 904 err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
899 if (err < 0) 905 if (err < 0)
900 return err; 906 return err;
907
908 /* There appears to be a bug in the 88e1512 when used in
909 * SGMII to copper mode, where the AN advertisment register
910 * clears the pause bits each time a negotiation occurs.
911 * This means we can never be truely sure what was advertised,
912 * so disable Pause support.
913 */
914 pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
915 phydev->supported &= ~pause;
916 phydev->advertising &= ~pause;
901 } 917 }
902 918
903 return m88e1121_config_init(phydev); 919 return m88e1121_config_init(phydev);
@@ -2069,7 +2085,7 @@ static struct phy_driver marvell_drivers[] = {
2069 .flags = PHY_HAS_INTERRUPT, 2085 .flags = PHY_HAS_INTERRUPT,
2070 .probe = marvell_probe, 2086 .probe = marvell_probe,
2071 .config_init = &m88e1145_config_init, 2087 .config_init = &m88e1145_config_init,
2072 .config_aneg = &marvell_config_aneg, 2088 .config_aneg = &m88e1101_config_aneg,
2073 .read_status = &genphy_read_status, 2089 .read_status = &genphy_read_status,
2074 .ack_interrupt = &marvell_ack_interrupt, 2090 .ack_interrupt = &marvell_ack_interrupt,
2075 .config_intr = &marvell_config_intr, 2091 .config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 135296508a7e..6425ce04d3f9 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
118 118
119 data->regulator = devm_regulator_get(&pdev->dev, "phy"); 119 data->regulator = devm_regulator_get(&pdev->dev, "phy");
120 if (IS_ERR(data->regulator)) { 120 if (IS_ERR(data->regulator)) {
121 if (PTR_ERR(data->regulator) == -EPROBE_DEFER) 121 if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
122 return -EPROBE_DEFER; 122 ret = -EPROBE_DEFER;
123 goto err_out_free_mdiobus;
124 }
123 125
124 dev_info(&pdev->dev, "no regulator found\n"); 126 dev_info(&pdev->dev, "no regulator found\n");
125 data->regulator = NULL; 127 data->regulator = NULL;
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index bfd3090fb055..07c6048200c6 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
194 } 194 }
195 195
196 ret = xgene_enet_ecc_init(pdata); 196 ret = xgene_enet_ecc_init(pdata);
197 if (ret) 197 if (ret) {
198 if (pdata->dev->of_node)
199 clk_disable_unprepare(pdata->clk);
198 return ret; 200 return ret;
201 }
199 xgene_gmac_reset(pdata); 202 xgene_gmac_reset(pdata);
200 203
201 return 0; 204 return 0;
@@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev)
388 return ret; 391 return ret;
389 392
390 mdio_bus = mdiobus_alloc(); 393 mdio_bus = mdiobus_alloc();
391 if (!mdio_bus) 394 if (!mdio_bus) {
392 return -ENOMEM; 395 ret = -ENOMEM;
396 goto out_clk;
397 }
393 398
394 mdio_bus->name = "APM X-Gene MDIO bus"; 399 mdio_bus->name = "APM X-Gene MDIO bus";
395 400
@@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
418 mdio_bus->phy_mask = ~0; 423 mdio_bus->phy_mask = ~0;
419 ret = mdiobus_register(mdio_bus); 424 ret = mdiobus_register(mdio_bus);
420 if (ret) 425 if (ret)
421 goto out; 426 goto out_mdiobus;
422 427
423 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1, 428 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1,
424 acpi_register_phy, NULL, mdio_bus, NULL); 429 acpi_register_phy, NULL, mdio_bus, NULL);
@@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev)
426 } 431 }
427 432
428 if (ret) 433 if (ret)
429 goto out; 434 goto out_mdiobus;
430 435
431 pdata->mdio_bus = mdio_bus; 436 pdata->mdio_bus = mdio_bus;
432 xgene_mdio_status = true; 437 xgene_mdio_status = true;
433 438
434 return 0; 439 return 0;
435 440
436out: 441out_mdiobus:
437 mdiobus_free(mdio_bus); 442 mdiobus_free(mdio_bus);
438 443
444out_clk:
445 if (dev->of_node)
446 clk_disable_unprepare(pdata->clk);
447
439 return ret; 448 return ret;
440} 449}
441 450
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2df7b62c1a36..54d00a1d2bef 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -270,6 +270,7 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
270 270
271 if (addr == mdiodev->addr) { 271 if (addr == mdiodev->addr) {
272 dev->of_node = child; 272 dev->of_node = child;
273 dev->fwnode = of_fwnode_handle(child);
273 return; 274 return;
274 } 275 }
275 } 276 }
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 1ea69b7585d9..842eb871a6e3 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -22,6 +22,7 @@
22#include <linux/ethtool.h> 22#include <linux/ethtool.h>
23#include <linux/phy.h> 23#include <linux/phy.h>
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/bitfield.h>
25 26
26static int meson_gxl_config_init(struct phy_device *phydev) 27static int meson_gxl_config_init(struct phy_device *phydev)
27{ 28{
@@ -50,6 +51,77 @@ static int meson_gxl_config_init(struct phy_device *phydev)
50 return 0; 51 return 0;
51} 52}
52 53
54/* This function is provided to cope with the possible failures of this phy
55 * during aneg process. When aneg fails, the PHY reports that aneg is done
56 * but the value found in MII_LPA is wrong:
57 * - Early failures: MII_LPA is just 0x0001. if MII_EXPANSION reports that
58 * the link partner (LP) supports aneg but the LP never acked our base
59 * code word, it is likely that we never sent it to begin with.
60 * - Late failures: MII_LPA is filled with a value which seems to make sense
61 * but it actually is not what the LP is advertising. It seems that we
62 * can detect this using a magic bit in the WOL bank (reg 12 - bit 12).
63 * If this particular bit is not set when aneg is reported being done,
64 * it means MII_LPA is likely to be wrong.
65 *
66 * In both case, forcing a restart of the aneg process solve the problem.
67 * When this failure happens, the first retry is usually successful but,
68 * in some cases, it may take up to 6 retries to get a decent result
69 */
70static int meson_gxl_read_status(struct phy_device *phydev)
71{
72 int ret, wol, lpa, exp;
73
74 if (phydev->autoneg == AUTONEG_ENABLE) {
75 ret = genphy_aneg_done(phydev);
76 if (ret < 0)
77 return ret;
78 else if (!ret)
79 goto read_status_continue;
80
81 /* Need to access WOL bank, make sure the access is open */
82 ret = phy_write(phydev, 0x14, 0x0000);
83 if (ret)
84 return ret;
85 ret = phy_write(phydev, 0x14, 0x0400);
86 if (ret)
87 return ret;
88 ret = phy_write(phydev, 0x14, 0x0000);
89 if (ret)
90 return ret;
91 ret = phy_write(phydev, 0x14, 0x0400);
92 if (ret)
93 return ret;
94
95 /* Request LPI_STATUS WOL register */
96 ret = phy_write(phydev, 0x14, 0x8D80);
97 if (ret)
98 return ret;
99
100 /* Read LPI_STATUS value */
101 wol = phy_read(phydev, 0x15);
102 if (wol < 0)
103 return wol;
104
105 lpa = phy_read(phydev, MII_LPA);
106 if (lpa < 0)
107 return lpa;
108
109 exp = phy_read(phydev, MII_EXPANSION);
110 if (exp < 0)
111 return exp;
112
113 if (!(wol & BIT(12)) ||
114 ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) {
115 /* Looks like aneg failed after all */
116 phydev_dbg(phydev, "LPA corruption - aneg restart\n");
117 return genphy_restart_aneg(phydev);
118 }
119 }
120
121read_status_continue:
122 return genphy_read_status(phydev);
123}
124
53static struct phy_driver meson_gxl_phy[] = { 125static struct phy_driver meson_gxl_phy[] = {
54 { 126 {
55 .phy_id = 0x01814400, 127 .phy_id = 0x01814400,
@@ -60,7 +132,7 @@ static struct phy_driver meson_gxl_phy[] = {
60 .config_init = meson_gxl_config_init, 132 .config_init = meson_gxl_config_init,
61 .config_aneg = genphy_config_aneg, 133 .config_aneg = genphy_config_aneg,
62 .aneg_done = genphy_aneg_done, 134 .aneg_done = genphy_aneg_done,
63 .read_status = genphy_read_status, 135 .read_status = meson_gxl_read_status,
64 .suspend = genphy_suspend, 136 .suspend = genphy_suspend,
65 .resume = genphy_resume, 137 .resume = genphy_resume,
66 }, 138 },
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index fdb43dd9b5cd..422ff6333c52 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -496,16 +496,18 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
496 return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval); 496 return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
497} 497}
498 498
499/* Center KSZ9031RNX FLP timing at 16ms. */
499static int ksz9031_center_flp_timing(struct phy_device *phydev) 500static int ksz9031_center_flp_timing(struct phy_device *phydev)
500{ 501{
501 int result; 502 int result;
502 503
503 /* Center KSZ9031RNX FLP timing at 16ms. */
504 result = ksz9031_extended_write(phydev, OP_DATA, 0, 504 result = ksz9031_extended_write(phydev, OP_DATA, 0,
505 MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006); 505 MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
506 if (result)
507 return result;
508
506 result = ksz9031_extended_write(phydev, OP_DATA, 0, 509 result = ksz9031_extended_write(phydev, OP_DATA, 0,
507 MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80); 510 MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
508
509 if (result) 511 if (result)
510 return result; 512 return result;
511 513
@@ -622,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
622 phydev->link = 0; 624 phydev->link = 0;
623 if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) 625 if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
624 phydev->drv->config_intr(phydev); 626 phydev->drv->config_intr(phydev);
627 return genphy_config_aneg(phydev);
625 } 628 }
626 629
627 return 0; 630 return 0;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 2b1e67bc1e73..ed10d1fc8f59 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -828,7 +828,6 @@ EXPORT_SYMBOL(phy_stop);
828 */ 828 */
829void phy_start(struct phy_device *phydev) 829void phy_start(struct phy_device *phydev)
830{ 830{
831 bool do_resume = false;
832 int err = 0; 831 int err = 0;
833 832
834 mutex_lock(&phydev->lock); 833 mutex_lock(&phydev->lock);
@@ -841,6 +840,9 @@ void phy_start(struct phy_device *phydev)
841 phydev->state = PHY_UP; 840 phydev->state = PHY_UP;
842 break; 841 break;
843 case PHY_HALTED: 842 case PHY_HALTED:
843 /* if phy was suspended, bring the physical link up again */
844 phy_resume(phydev);
845
844 /* make sure interrupts are re-enabled for the PHY */ 846 /* make sure interrupts are re-enabled for the PHY */
845 if (phydev->irq != PHY_POLL) { 847 if (phydev->irq != PHY_POLL) {
846 err = phy_enable_interrupts(phydev); 848 err = phy_enable_interrupts(phydev);
@@ -849,17 +851,12 @@ void phy_start(struct phy_device *phydev)
849 } 851 }
850 852
851 phydev->state = PHY_RESUMING; 853 phydev->state = PHY_RESUMING;
852 do_resume = true;
853 break; 854 break;
854 default: 855 default:
855 break; 856 break;
856 } 857 }
857 mutex_unlock(&phydev->lock); 858 mutex_unlock(&phydev->lock);
858 859
859 /* if phy was suspended, bring the physical link up again */
860 if (do_resume)
861 phy_resume(phydev);
862
863 phy_trigger_machine(phydev, true); 860 phy_trigger_machine(phydev, true);
864} 861}
865EXPORT_SYMBOL(phy_start); 862EXPORT_SYMBOL(phy_start);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 67f25ac29025..b15b31ca2618 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -135,7 +135,9 @@ static int mdio_bus_phy_resume(struct device *dev)
135 if (!mdio_bus_phy_may_suspend(phydev)) 135 if (!mdio_bus_phy_may_suspend(phydev))
136 goto no_resume; 136 goto no_resume;
137 137
138 mutex_lock(&phydev->lock);
138 ret = phy_resume(phydev); 139 ret = phy_resume(phydev);
140 mutex_unlock(&phydev->lock);
139 if (ret < 0) 141 if (ret < 0)
140 return ret; 142 return ret;
141 143
@@ -1026,7 +1028,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
1026 if (err) 1028 if (err)
1027 goto error; 1029 goto error;
1028 1030
1031 mutex_lock(&phydev->lock);
1029 phy_resume(phydev); 1032 phy_resume(phydev);
1033 mutex_unlock(&phydev->lock);
1030 phy_led_triggers_register(phydev); 1034 phy_led_triggers_register(phydev);
1031 1035
1032 return err; 1036 return err;
@@ -1157,6 +1161,8 @@ int phy_resume(struct phy_device *phydev)
1157 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); 1161 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
1158 int ret = 0; 1162 int ret = 0;
1159 1163
1164 WARN_ON(!mutex_is_locked(&phydev->lock));
1165
1160 if (phydev->drv && phydrv->resume) 1166 if (phydev->drv && phydrv->resume)
1161 ret = phydrv->resume(phydev); 1167 ret = phydrv->resume(phydev);
1162 1168
@@ -1639,13 +1645,9 @@ int genphy_resume(struct phy_device *phydev)
1639{ 1645{
1640 int value; 1646 int value;
1641 1647
1642 mutex_lock(&phydev->lock);
1643
1644 value = phy_read(phydev, MII_BMCR); 1648 value = phy_read(phydev, MII_BMCR);
1645 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); 1649 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
1646 1650
1647 mutex_unlock(&phydev->lock);
1648
1649 return 0; 1651 return 0;
1650} 1652}
1651EXPORT_SYMBOL(genphy_resume); 1653EXPORT_SYMBOL(genphy_resume);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index e3bbc70372d3..249ce5cbea22 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -526,6 +526,7 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
526 pl->link_config.pause = MLO_PAUSE_AN; 526 pl->link_config.pause = MLO_PAUSE_AN;
527 pl->link_config.speed = SPEED_UNKNOWN; 527 pl->link_config.speed = SPEED_UNKNOWN;
528 pl->link_config.duplex = DUPLEX_UNKNOWN; 528 pl->link_config.duplex = DUPLEX_UNKNOWN;
529 pl->link_config.an_enabled = true;
529 pl->ops = ops; 530 pl->ops = ops;
530 __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); 531 __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
531 532
@@ -773,6 +774,7 @@ void phylink_stop(struct phylink *pl)
773 sfp_upstream_stop(pl->sfp_bus); 774 sfp_upstream_stop(pl->sfp_bus);
774 775
775 set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); 776 set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
777 queue_work(system_power_efficient_wq, &pl->resolve);
776 flush_work(&pl->resolve); 778 flush_work(&pl->resolve);
777} 779}
778EXPORT_SYMBOL_GPL(phylink_stop); 780EXPORT_SYMBOL_GPL(phylink_stop);
@@ -950,6 +952,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
950 mutex_lock(&pl->state_mutex); 952 mutex_lock(&pl->state_mutex);
951 /* Configure the MAC to match the new settings */ 953 /* Configure the MAC to match the new settings */
952 linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising); 954 linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising);
955 pl->link_config.interface = config.interface;
953 pl->link_config.speed = our_kset.base.speed; 956 pl->link_config.speed = our_kset.base.speed;
954 pl->link_config.duplex = our_kset.base.duplex; 957 pl->link_config.duplex = our_kset.base.duplex;
955 pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; 958 pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
@@ -1293,6 +1296,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
1293 switch (cmd) { 1296 switch (cmd) {
1294 case SIOCGMIIPHY: 1297 case SIOCGMIIPHY:
1295 mii->phy_id = pl->phydev->mdio.addr; 1298 mii->phy_id = pl->phydev->mdio.addr;
1299 /* fall through */
1296 1300
1297 case SIOCGMIIREG: 1301 case SIOCGMIIREG:
1298 ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); 1302 ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num);
@@ -1315,6 +1319,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
1315 switch (cmd) { 1319 switch (cmd) {
1316 case SIOCGMIIPHY: 1320 case SIOCGMIIPHY:
1317 mii->phy_id = 0; 1321 mii->phy_id = 0;
1322 /* fall through */
1318 1323
1319 case SIOCGMIIREG: 1324 case SIOCGMIIREG:
1320 ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); 1325 ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num);
@@ -1426,9 +1431,8 @@ static void phylink_sfp_link_down(void *upstream)
1426 WARN_ON(!lockdep_rtnl_is_held()); 1431 WARN_ON(!lockdep_rtnl_is_held());
1427 1432
1428 set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); 1433 set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
1434 queue_work(system_power_efficient_wq, &pl->resolve);
1429 flush_work(&pl->resolve); 1435 flush_work(&pl->resolve);
1430
1431 netif_carrier_off(pl->netdev);
1432} 1436}
1433 1437
1434static void phylink_sfp_link_up(void *upstream) 1438static void phylink_sfp_link_up(void *upstream)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 8a1b1f4c1b7c..ab64a142b832 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -356,7 +356,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream);
356void sfp_unregister_upstream(struct sfp_bus *bus) 356void sfp_unregister_upstream(struct sfp_bus *bus)
357{ 357{
358 rtnl_lock(); 358 rtnl_lock();
359 sfp_unregister_bus(bus); 359 if (bus->sfp)
360 sfp_unregister_bus(bus);
360 bus->upstream = NULL; 361 bus->upstream = NULL;
361 bus->netdev = NULL; 362 bus->netdev = NULL;
362 rtnl_unlock(); 363 rtnl_unlock();
@@ -459,7 +460,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);
459void sfp_unregister_socket(struct sfp_bus *bus) 460void sfp_unregister_socket(struct sfp_bus *bus)
460{ 461{
461 rtnl_lock(); 462 rtnl_lock();
462 sfp_unregister_bus(bus); 463 if (bus->netdev)
464 sfp_unregister_bus(bus);
463 bus->sfp_dev = NULL; 465 bus->sfp_dev = NULL;
464 bus->sfp = NULL; 466 bus->sfp = NULL;
465 bus->socket_ops = NULL; 467 bus->socket_ops = NULL;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index e381811e5f11..9dfc1c4c954f 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -351,12 +351,13 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
351{ 351{
352 unsigned int los = sfp->state & SFP_F_LOS; 352 unsigned int los = sfp->state & SFP_F_LOS;
353 353
354 /* FIXME: what if neither SFP_OPTIONS_LOS_INVERTED nor 354 /* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL
355 * SFP_OPTIONS_LOS_NORMAL are set? For now, we assume 355 * are set, we assume that no LOS signal is available.
356 * the same as SFP_OPTIONS_LOS_NORMAL set.
357 */ 356 */
358 if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED) 357 if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
359 los ^= SFP_F_LOS; 358 los ^= SFP_F_LOS;
359 else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL)))
360 los = 0;
360 361
361 if (los) 362 if (los)
362 sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0); 363 sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@@ -364,6 +365,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
364 sfp_sm_link_up(sfp); 365 sfp_sm_link_up(sfp);
365} 366}
366 367
368static bool sfp_los_event_active(struct sfp *sfp, unsigned int event)
369{
370 return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
371 event == SFP_E_LOS_LOW) ||
372 (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
373 event == SFP_E_LOS_HIGH);
374}
375
376static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
377{
378 return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
379 event == SFP_E_LOS_HIGH) ||
380 (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
381 event == SFP_E_LOS_LOW);
382}
383
367static void sfp_sm_fault(struct sfp *sfp, bool warn) 384static void sfp_sm_fault(struct sfp *sfp, bool warn)
368{ 385{
369 if (sfp->sm_retries && !--sfp->sm_retries) { 386 if (sfp->sm_retries && !--sfp->sm_retries) {
@@ -470,6 +487,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
470 return -EINVAL; 487 return -EINVAL;
471 } 488 }
472 489
490 /* If the module requires address swap mode, warn about it */
491 if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
492 dev_warn(sfp->dev,
493 "module address swap to access page 0xA2 is not supported.\n");
494
473 return sfp_module_insert(sfp->sfp_bus, &sfp->id); 495 return sfp_module_insert(sfp->sfp_bus, &sfp->id);
474} 496}
475 497
@@ -581,9 +603,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
581 case SFP_S_WAIT_LOS: 603 case SFP_S_WAIT_LOS:
582 if (event == SFP_E_TX_FAULT) 604 if (event == SFP_E_TX_FAULT)
583 sfp_sm_fault(sfp, true); 605 sfp_sm_fault(sfp, true);
584 else if (event == 606 else if (sfp_los_event_inactive(sfp, event))
585 (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
586 SFP_E_LOS_HIGH : SFP_E_LOS_LOW))
587 sfp_sm_link_up(sfp); 607 sfp_sm_link_up(sfp);
588 break; 608 break;
589 609
@@ -591,9 +611,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
591 if (event == SFP_E_TX_FAULT) { 611 if (event == SFP_E_TX_FAULT) {
592 sfp_sm_link_down(sfp); 612 sfp_sm_link_down(sfp);
593 sfp_sm_fault(sfp, true); 613 sfp_sm_fault(sfp, true);
594 } else if (event == 614 } else if (sfp_los_event_active(sfp, event)) {
595 (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
596 SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) {
597 sfp_sm_link_down(sfp); 615 sfp_sm_link_down(sfp);
598 sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0); 616 sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
599 } 617 }
@@ -639,7 +657,8 @@ static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo)
639{ 657{
640 /* locking... and check module is present */ 658 /* locking... and check module is present */
641 659
642 if (sfp->id.ext.sff8472_compliance) { 660 if (sfp->id.ext.sff8472_compliance &&
661 !(sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)) {
643 modinfo->type = ETH_MODULE_SFF_8472; 662 modinfo->type = ETH_MODULE_SFF_8472;
644 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 663 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
645 } else { 664 } else {
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index d8e5747ff4e3..264d4af0bf69 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1006,17 +1006,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
1006 if (!ifname_is_set) 1006 if (!ifname_is_set)
1007 snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index); 1007 snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
1008 1008
1009 mutex_unlock(&pn->all_ppp_mutex);
1010
1009 ret = register_netdevice(ppp->dev); 1011 ret = register_netdevice(ppp->dev);
1010 if (ret < 0) 1012 if (ret < 0)
1011 goto err_unit; 1013 goto err_unit;
1012 1014
1013 atomic_inc(&ppp_unit_count); 1015 atomic_inc(&ppp_unit_count);
1014 1016
1015 mutex_unlock(&pn->all_ppp_mutex);
1016
1017 return 0; 1017 return 0;
1018 1018
1019err_unit: 1019err_unit:
1020 mutex_lock(&pn->all_ppp_mutex);
1020 unit_put(&pn->units_idr, ppp->file.index); 1021 unit_put(&pn->units_idr, ppp->file.index);
1021err: 1022err:
1022 mutex_unlock(&pn->all_ppp_mutex); 1023 mutex_unlock(&pn->all_ppp_mutex);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 4e1da1645b15..5aa59f41bf8c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
842 struct pppoe_hdr *ph; 842 struct pppoe_hdr *ph;
843 struct net_device *dev; 843 struct net_device *dev;
844 char *start; 844 char *start;
845 int hlen;
845 846
846 lock_sock(sk); 847 lock_sock(sk);
847 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { 848 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
@@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
860 if (total_len > (dev->mtu + dev->hard_header_len)) 861 if (total_len > (dev->mtu + dev->hard_header_len))
861 goto end; 862 goto end;
862 863
863 864 hlen = LL_RESERVED_SPACE(dev);
864 skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32, 865 skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
865 0, GFP_KERNEL); 866 dev->needed_tailroom, 0, GFP_KERNEL);
866 if (!skb) { 867 if (!skb) {
867 error = -ENOMEM; 868 error = -ENOMEM;
868 goto end; 869 goto end;
869 } 870 }
870 871
871 /* Reserve space for headers. */ 872 /* Reserve space for headers. */
872 skb_reserve(skb, dev->hard_header_len); 873 skb_reserve(skb, hlen);
873 skb_reset_network_header(skb); 874 skb_reset_network_header(skb);
874 875
875 skb->dev = dev; 876 skb->dev = dev;
@@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
930 /* Copy the data if there is no space for the header or if it's 931 /* Copy the data if there is no space for the header or if it's
931 * read-only. 932 * read-only.
932 */ 933 */
933 if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len)) 934 if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
934 goto abort; 935 goto abort;
935 936
936 __skb_push(skb, sizeof(*ph)); 937 __skb_push(skb, sizeof(*ph));
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index e9489b88407c..0a886fda0129 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -829,8 +829,11 @@ static ssize_t tap_do_read(struct tap_queue *q,
829 DEFINE_WAIT(wait); 829 DEFINE_WAIT(wait);
830 ssize_t ret = 0; 830 ssize_t ret = 0;
831 831
832 if (!iov_iter_count(to)) 832 if (!iov_iter_count(to)) {
833 if (skb)
834 kfree_skb(skb);
833 return 0; 835 return 0;
836 }
834 837
835 if (skb) 838 if (skb)
836 goto put; 839 goto put;
@@ -1154,11 +1157,14 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m,
1154 size_t total_len, int flags) 1157 size_t total_len, int flags)
1155{ 1158{
1156 struct tap_queue *q = container_of(sock, struct tap_queue, sock); 1159 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1160 struct sk_buff *skb = m->msg_control;
1157 int ret; 1161 int ret;
1158 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) 1162 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
1163 if (skb)
1164 kfree_skb(skb);
1159 return -EINVAL; 1165 return -EINVAL;
1160 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, 1166 }
1161 m->msg_control); 1167 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
1162 if (ret > total_len) { 1168 if (ret > total_len) {
1163 m->msg_flags |= MSG_TRUNC; 1169 m->msg_flags |= MSG_TRUNC;
1164 ret = flags & MSG_TRUNC ? ret : total_len; 1170 ret = flags & MSG_TRUNC ? ret : total_len;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 95749006d687..a8ec589d1359 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -611,6 +611,14 @@ static void tun_queue_purge(struct tun_file *tfile)
611 skb_queue_purge(&tfile->sk.sk_error_queue); 611 skb_queue_purge(&tfile->sk.sk_error_queue);
612} 612}
613 613
614static void tun_cleanup_tx_array(struct tun_file *tfile)
615{
616 if (tfile->tx_array.ring.queue) {
617 skb_array_cleanup(&tfile->tx_array);
618 memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
619 }
620}
621
614static void __tun_detach(struct tun_file *tfile, bool clean) 622static void __tun_detach(struct tun_file *tfile, bool clean)
615{ 623{
616 struct tun_file *ntfile; 624 struct tun_file *ntfile;
@@ -657,8 +665,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
657 tun->dev->reg_state == NETREG_REGISTERED) 665 tun->dev->reg_state == NETREG_REGISTERED)
658 unregister_netdevice(tun->dev); 666 unregister_netdevice(tun->dev);
659 } 667 }
660 if (tun) 668 tun_cleanup_tx_array(tfile);
661 skb_array_cleanup(&tfile->tx_array);
662 sock_put(&tfile->sk); 669 sock_put(&tfile->sk);
663 } 670 }
664} 671}
@@ -700,11 +707,13 @@ static void tun_detach_all(struct net_device *dev)
700 /* Drop read queue */ 707 /* Drop read queue */
701 tun_queue_purge(tfile); 708 tun_queue_purge(tfile);
702 sock_put(&tfile->sk); 709 sock_put(&tfile->sk);
710 tun_cleanup_tx_array(tfile);
703 } 711 }
704 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 712 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
705 tun_enable_queue(tfile); 713 tun_enable_queue(tfile);
706 tun_queue_purge(tfile); 714 tun_queue_purge(tfile);
707 sock_put(&tfile->sk); 715 sock_put(&tfile->sk);
716 tun_cleanup_tx_array(tfile);
708 } 717 }
709 BUG_ON(tun->numdisabled != 0); 718 BUG_ON(tun->numdisabled != 0);
710 719
@@ -1952,8 +1961,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1952 1961
1953 tun_debug(KERN_INFO, tun, "tun_do_read\n"); 1962 tun_debug(KERN_INFO, tun, "tun_do_read\n");
1954 1963
1955 if (!iov_iter_count(to)) 1964 if (!iov_iter_count(to)) {
1965 if (skb)
1966 kfree_skb(skb);
1956 return 0; 1967 return 0;
1968 }
1957 1969
1958 if (!skb) { 1970 if (!skb) {
1959 /* Read frames from ring */ 1971 /* Read frames from ring */
@@ -2069,22 +2081,24 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2069{ 2081{
2070 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2082 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2071 struct tun_struct *tun = tun_get(tfile); 2083 struct tun_struct *tun = tun_get(tfile);
2084 struct sk_buff *skb = m->msg_control;
2072 int ret; 2085 int ret;
2073 2086
2074 if (!tun) 2087 if (!tun) {
2075 return -EBADFD; 2088 ret = -EBADFD;
2089 goto out_free_skb;
2090 }
2076 2091
2077 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 2092 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2078 ret = -EINVAL; 2093 ret = -EINVAL;
2079 goto out; 2094 goto out_put_tun;
2080 } 2095 }
2081 if (flags & MSG_ERRQUEUE) { 2096 if (flags & MSG_ERRQUEUE) {
2082 ret = sock_recv_errqueue(sock->sk, m, total_len, 2097 ret = sock_recv_errqueue(sock->sk, m, total_len,
2083 SOL_PACKET, TUN_TX_TIMESTAMP); 2098 SOL_PACKET, TUN_TX_TIMESTAMP);
2084 goto out; 2099 goto out;
2085 } 2100 }
2086 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, 2101 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
2087 m->msg_control);
2088 if (ret > (ssize_t)total_len) { 2102 if (ret > (ssize_t)total_len) {
2089 m->msg_flags |= MSG_TRUNC; 2103 m->msg_flags |= MSG_TRUNC;
2090 ret = flags & MSG_TRUNC ? ret : total_len; 2104 ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2092,6 +2106,13 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2092out: 2106out:
2093 tun_put(tun); 2107 tun_put(tun);
2094 return ret; 2108 return ret;
2109
2110out_put_tun:
2111 tun_put(tun);
2112out_free_skb:
2113 if (skb)
2114 kfree_skb(skb);
2115 return ret;
2095} 2116}
2096 2117
2097static int tun_peek_len(struct socket *sock) 2118static int tun_peek_len(struct socket *sock)
@@ -2839,6 +2860,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
2839 2860
2840 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 2861 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2841 2862
2863 memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
2864
2842 return 0; 2865 return 0;
2843} 2866}
2844 2867
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 94c7804903c4..ec56ff29aac4 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2396,6 +2396,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
2396 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; 2396 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2397 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; 2397 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2398 dev->rx_qlen = 4; 2398 dev->rx_qlen = 4;
2399 dev->tx_qlen = 4;
2399 } 2400 }
2400 2401
2401 ret = lan78xx_write_reg(dev, BURST_CAP, buf); 2402 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index c750cf7c042b..728819feab44 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -261,9 +261,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net)
261 net->hard_header_len = 0; 261 net->hard_header_len = 0;
262 net->addr_len = 0; 262 net->addr_len = 0;
263 net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 263 net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
264 set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
264 netdev_dbg(net, "mode: raw IP\n"); 265 netdev_dbg(net, "mode: raw IP\n");
265 } else if (!net->header_ops) { /* don't bother if already set */ 266 } else if (!net->header_ops) { /* don't bother if already set */
266 ether_setup(net); 267 ether_setup(net);
268 clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
267 netdev_dbg(net, "mode: Ethernet\n"); 269 netdev_dbg(net, "mode: Ethernet\n");
268 } 270 }
269 271
@@ -1098,6 +1100,7 @@ static const struct usb_device_id products[] = {
1098 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, 1100 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
1099 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, 1101 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
1100 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 1102 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
1103 {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
1101 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, 1104 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
1102 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 1105 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
1103 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 1106 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
@@ -1202,12 +1205,14 @@ static const struct usb_device_id products[] = {
1202 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ 1205 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
1203 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ 1206 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
1204 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ 1207 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
1208 {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
1205 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 1209 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1206 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1210 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1207 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1211 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
1208 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 1212 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
1209 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 1213 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
1210 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ 1214 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
1215 {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
1211 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 1216 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1212 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ 1217 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
1213 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ 1218 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d51d9abf7986..0657203ffb91 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -606,6 +606,7 @@ enum rtl8152_flags {
606 PHY_RESET, 606 PHY_RESET,
607 SCHEDULE_NAPI, 607 SCHEDULE_NAPI,
608 GREEN_ETHERNET, 608 GREEN_ETHERNET,
609 DELL_TB_RX_AGG_BUG,
609}; 610};
610 611
611/* Define these values to match your device */ 612/* Define these values to match your device */
@@ -1798,6 +1799,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1798 dev_kfree_skb_any(skb); 1799 dev_kfree_skb_any(skb);
1799 1800
1800 remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); 1801 remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
1802
1803 if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
1804 break;
1801 } 1805 }
1802 1806
1803 if (!skb_queue_empty(&skb_head)) { 1807 if (!skb_queue_empty(&skb_head)) {
@@ -4133,6 +4137,9 @@ static void r8153_init(struct r8152 *tp)
4133 /* rx aggregation */ 4137 /* rx aggregation */
4134 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); 4138 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
4135 ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); 4139 ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
4140 if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
4141 ocp_data |= RX_AGG_DISABLE;
4142
4136 ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); 4143 ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
4137 4144
4138 rtl_tally_reset(tp); 4145 rtl_tally_reset(tp);
@@ -5207,6 +5214,12 @@ static int rtl8152_probe(struct usb_interface *intf,
5207 netdev->hw_features &= ~NETIF_F_RXCSUM; 5214 netdev->hw_features &= ~NETIF_F_RXCSUM;
5208 } 5215 }
5209 5216
5217 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
5218 udev->serial && !strcmp(udev->serial, "000001000000")) {
5219 dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
5220 set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
5221 }
5222
5210 netdev->ethtool_ops = &ops; 5223 netdev->ethtool_ops = &ops;
5211 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); 5224 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
5212 5225
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 80348b6a8646..8a22ff67b026 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -457,12 +457,10 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
457void usbnet_defer_kevent (struct usbnet *dev, int work) 457void usbnet_defer_kevent (struct usbnet *dev, int work)
458{ 458{
459 set_bit (work, &dev->flags); 459 set_bit (work, &dev->flags);
460 if (!schedule_work (&dev->kevent)) { 460 if (!schedule_work (&dev->kevent))
461 if (net_ratelimit()) 461 netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
462 netdev_err(dev->net, "kevent %d may have been dropped\n", work); 462 else
463 } else {
464 netdev_dbg(dev->net, "kevent %d scheduled\n", work); 463 netdev_dbg(dev->net, "kevent %d scheduled\n", work);
465 }
466} 464}
467EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 465EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
468 466
@@ -484,7 +482,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
484 return -ENOLINK; 482 return -ENOLINK;
485 } 483 }
486 484
487 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 485 if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags))
486 skb = __netdev_alloc_skb(dev->net, size, flags);
487 else
488 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
488 if (!skb) { 489 if (!skb) {
489 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); 490 netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
490 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 491 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 19a985ef9104..559b215c0169 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -756,7 +756,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
756 int num_skb_frags; 756 int num_skb_frags;
757 757
758 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); 758 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
759 if (unlikely(!ctx)) { 759 if (unlikely(!buf)) {
760 pr_debug("%s: rx error: %d buffers out of %d missing\n", 760 pr_debug("%s: rx error: %d buffers out of %d missing\n",
761 dev->name, num_buf, 761 dev->name, num_buf,
762 virtio16_to_cpu(vi->vdev, 762 virtio16_to_cpu(vi->vdev,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d1c7029ded7c..cf95290b160c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1616 rq->rx_ring[i].basePA); 1616 rq->rx_ring[i].basePA);
1617 rq->rx_ring[i].base = NULL; 1617 rq->rx_ring[i].base = NULL;
1618 } 1618 }
1619 rq->buf_info[i] = NULL;
1620 } 1619 }
1621 1620
1622 if (rq->data_ring.base) { 1621 if (rq->data_ring.base) {
@@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1638 (rq->rx_ring[0].size + rq->rx_ring[1].size); 1637 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1639 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], 1638 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1640 rq->buf_info_pa); 1639 rq->buf_info_pa);
1640 rq->buf_info[0] = rq->buf_info[1] = NULL;
1641 } 1641 }
1642} 1642}
1643 1643
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index feb1b2e15c2e..139c61c8244a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -673,8 +673,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
673 struct sock *sk, 673 struct sock *sk,
674 struct sk_buff *skb) 674 struct sk_buff *skb)
675{ 675{
676 /* don't divert multicast */ 676 /* don't divert multicast or local broadcast */
677 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) 677 if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
678 ipv4_is_lbcast(ip_hdr(skb)->daddr))
678 return skb; 679 return skb;
679 680
680 if (qdisc_tx_is_default(vrf_dev)) 681 if (qdisc_tx_is_default(vrf_dev))
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 19b9cc51079e..c3e34e3c82a7 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2155,6 +2155,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2155 } 2155 }
2156 2156
2157 ndst = &rt->dst; 2157 ndst = &rt->dst;
2158 if (skb_dst(skb)) {
2159 int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
2160
2161 skb_dst_update_pmtu(skb, mtu);
2162 }
2163
2158 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2164 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2159 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 2165 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
2160 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), 2166 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@@ -2190,6 +2196,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2190 goto out_unlock; 2196 goto out_unlock;
2191 } 2197 }
2192 2198
2199 if (skb_dst(skb)) {
2200 int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
2201
2202 skb_dst_update_pmtu(skb, mtu);
2203 }
2204
2193 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2205 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2194 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2206 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2195 skb_scrub_packet(skb, xnet); 2207 skb_scrub_packet(skb, xnet);
@@ -3103,6 +3115,11 @@ static void vxlan_config_apply(struct net_device *dev,
3103 3115
3104 max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : 3116 max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
3105 VXLAN_HEADROOM); 3117 VXLAN_HEADROOM);
3118 if (max_mtu < ETH_MIN_MTU)
3119 max_mtu = ETH_MIN_MTU;
3120
3121 if (!changelink && !conf->mtu)
3122 dev->mtu = max_mtu;
3106 } 3123 }
3107 3124
3108 if (dev->mtu > max_mtu) 3125 if (dev->mtu > max_mtu)
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index f7d228b5ba93..987f1252a3cf 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
384 } 384 }
385 } 385 }
386 386
387 if (changed & IEEE80211_CONF_CHANGE_PS) {
388 list_for_each_entry(tmp, &wcn->vif_list, list) {
389 vif = wcn36xx_priv_to_vif(tmp);
390 if (hw->conf.flags & IEEE80211_CONF_PS) {
391 if (vif->bss_conf.ps) /* ps allowed ? */
392 wcn36xx_pmc_enter_bmps_state(wcn, vif);
393 } else {
394 wcn36xx_pmc_exit_bmps_state(wcn, vif);
395 }
396 }
397 }
398
387 mutex_unlock(&wcn->conf_mutex); 399 mutex_unlock(&wcn->conf_mutex);
388 400
389 return 0; 401 return 0;
@@ -747,17 +759,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
747 vif_priv->dtim_period = bss_conf->dtim_period; 759 vif_priv->dtim_period = bss_conf->dtim_period;
748 } 760 }
749 761
750 if (changed & BSS_CHANGED_PS) {
751 wcn36xx_dbg(WCN36XX_DBG_MAC,
752 "mac bss PS set %d\n",
753 bss_conf->ps);
754 if (bss_conf->ps) {
755 wcn36xx_pmc_enter_bmps_state(wcn, vif);
756 } else {
757 wcn36xx_pmc_exit_bmps_state(wcn, vif);
758 }
759 }
760
761 if (changed & BSS_CHANGED_BSSID) { 762 if (changed & BSS_CHANGED_BSSID) {
762 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n", 763 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
763 bss_conf->bssid); 764 bss_conf->bssid);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 589fe5f70971..1976b80c235f 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
45 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 45 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
46 46
47 if (WCN36XX_BMPS != vif_priv->pw_state) { 47 if (WCN36XX_BMPS != vif_priv->pw_state) {
48 wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n"); 48 /* Unbalanced call or last BMPS enter failed */
49 return -EINVAL; 49 wcn36xx_dbg(WCN36XX_DBG_PMC,
50 "Not in BMPS mode, no need to exit\n");
51 return -EALREADY;
50 } 52 }
51 wcn36xx_smd_exit_bmps(wcn, vif); 53 wcn36xx_smd_exit_bmps(wcn, vif);
52 vif_priv->pw_state = WCN36XX_FULL_POWER; 54 vif_priv->pw_state = WCN36XX_FULL_POWER;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 6a59d0609d30..9be0b051066a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -182,12 +182,9 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
182 182
183 err = request_firmware(&clm, clm_name, dev); 183 err = request_firmware(&clm, clm_name, dev);
184 if (err) { 184 if (err) {
185 if (err == -ENOENT) { 185 brcmf_info("no clm_blob available(err=%d), device may have limited channels available\n",
186 brcmf_dbg(INFO, "continue with CLM data currently present in firmware\n"); 186 err);
187 return 0; 187 return 0;
188 }
189 brcmf_err("request CLM blob file failed (%d)\n", err);
190 return err;
191 } 188 }
192 189
193 chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL); 190 chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 310c4e2746aa..cdf9e4161592 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -2070,7 +2070,7 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
2070 return head_pad; 2070 return head_pad;
2071} 2071}
2072 2072
2073/** 2073/*
2074 * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for 2074 * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
2075 * bus layer usage. 2075 * bus layer usage.
2076 */ 2076 */
@@ -4121,8 +4121,8 @@ release:
4121 sdio_release_host(sdiodev->func[1]); 4121 sdio_release_host(sdiodev->func[1]);
4122fail: 4122fail:
4123 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); 4123 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
4124 device_release_driver(dev);
4125 device_release_driver(&sdiodev->func[2]->dev); 4124 device_release_driver(&sdiodev->func[2]->dev);
4125 device_release_driver(dev);
4126} 4126}
4127 4127
4128struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) 4128struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 87b4434224a1..dfa111bb411e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -68,6 +68,9 @@
68 * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW 68 * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
69 * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames 69 * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames
70 * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames 70 * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
71 * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using
72 * monitor mode. Note this queue is the same as the queue for P2P device
73 * but we can't have active monitor mode along with P2P device anyway.
71 * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames 74 * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
72 * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure 75 * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
73 * that we are never left without the possibility to connect to an AP. 76 * that we are never left without the possibility to connect to an AP.
@@ -87,6 +90,7 @@ enum iwl_mvm_dqa_txq {
87 IWL_MVM_DQA_CMD_QUEUE = 0, 90 IWL_MVM_DQA_CMD_QUEUE = 0,
88 IWL_MVM_DQA_AUX_QUEUE = 1, 91 IWL_MVM_DQA_AUX_QUEUE = 1,
89 IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, 92 IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
93 IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2,
90 IWL_MVM_DQA_GCAST_QUEUE = 3, 94 IWL_MVM_DQA_GCAST_QUEUE = 3,
91 IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, 95 IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
92 IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, 96 IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 9c889a32fe24..223fb77a3aa9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -209,8 +209,6 @@ static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
209 209
210static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) 210static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
211{ 211{
212 iwl_fw_dbg_stop_recording(fwrt);
213
214 fwrt->dump.conf = FW_DBG_INVALID; 212 fwrt->dump.conf = FW_DBG_INVALID;
215} 213}
216 214
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index ca0b5536a8a6..921cab9e2d73 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -117,6 +117,7 @@
117#define FH_RSCSR_FRAME_INVALID 0x55550000 117#define FH_RSCSR_FRAME_INVALID 0x55550000
118#define FH_RSCSR_FRAME_ALIGN 0x40 118#define FH_RSCSR_FRAME_ALIGN 0x40
119#define FH_RSCSR_RPA_EN BIT(25) 119#define FH_RSCSR_RPA_EN BIT(25)
120#define FH_RSCSR_RADA_EN BIT(26)
120#define FH_RSCSR_RXQ_POS 16 121#define FH_RSCSR_RXQ_POS 16
121#define FH_RSCSR_RXQ_MASK 0x3F0000 122#define FH_RSCSR_RXQ_MASK 0x3F0000
122 123
@@ -128,7 +129,8 @@ struct iwl_rx_packet {
128 * 31: flag flush RB request 129 * 31: flag flush RB request
129 * 30: flag ignore TC (terminal counter) request 130 * 30: flag ignore TC (terminal counter) request
130 * 29: flag fast IRQ request 131 * 29: flag fast IRQ request
131 * 28-26: Reserved 132 * 28-27: Reserved
133 * 26: RADA enabled
132 * 25: Offload enabled 134 * 25: Offload enabled
133 * 24: RPF enabled 135 * 24: RPF enabled
134 * 23: RSS enabled 136 * 23: RSS enabled
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index a2bf530eeae4..2f22e14e00fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -787,7 +787,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
787 u32 action) 787 u32 action)
788{ 788{
789 struct iwl_mac_ctx_cmd cmd = {}; 789 struct iwl_mac_ctx_cmd cmd = {};
790 u32 tfd_queue_msk = 0; 790 u32 tfd_queue_msk = BIT(mvm->snif_queue);
791 int ret; 791 int ret;
792 792
793 WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); 793 WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 4575595ab022..55ab5349dd40 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -972,6 +972,7 @@ struct iwl_mvm {
972 972
973 /* Tx queues */ 973 /* Tx queues */
974 u16 aux_queue; 974 u16 aux_queue;
975 u16 snif_queue;
975 u16 probe_queue; 976 u16 probe_queue;
976 u16 p2p_dev_queue; 977 u16 p2p_dev_queue;
977 978
@@ -1060,6 +1061,7 @@ struct iwl_mvm {
1060 * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running 1061 * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
1061 * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done 1062 * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
1062 * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running 1063 * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
1064 * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
1063 */ 1065 */
1064enum iwl_mvm_status { 1066enum iwl_mvm_status {
1065 IWL_MVM_STATUS_HW_RFKILL, 1067 IWL_MVM_STATUS_HW_RFKILL,
@@ -1071,6 +1073,7 @@ enum iwl_mvm_status {
1071 IWL_MVM_STATUS_ROC_AUX_RUNNING, 1073 IWL_MVM_STATUS_ROC_AUX_RUNNING,
1072 IWL_MVM_STATUS_D3_RECONFIG, 1074 IWL_MVM_STATUS_D3_RECONFIG,
1073 IWL_MVM_STATUS_FIRMWARE_RUNNING, 1075 IWL_MVM_STATUS_FIRMWARE_RUNNING,
1076 IWL_MVM_STATUS_NEED_FLUSH_P2P,
1074}; 1077};
1075 1078
1076/* Keep track of completed init configuration */ 1079/* Keep track of completed init configuration */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 7078b7e458be..45470b6b351a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -624,6 +624,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
624 mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; 624 mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
625 625
626 mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; 626 mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
627 mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
627 mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; 628 mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
628 mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; 629 mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
629 630
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 76dc58381e1c..3b8d44361380 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -213,6 +213,7 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
213 struct ieee80211_rx_status *rx_status) 213 struct ieee80211_rx_status *rx_status)
214{ 214{
215 int energy_a, energy_b, max_energy; 215 int energy_a, energy_b, max_energy;
216 u32 rate_flags = le32_to_cpu(desc->rate_n_flags);
216 217
217 energy_a = desc->energy_a; 218 energy_a = desc->energy_a;
218 energy_a = energy_a ? -energy_a : S8_MIN; 219 energy_a = energy_a ? -energy_a : S8_MIN;
@@ -224,7 +225,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
224 energy_a, energy_b, max_energy); 225 energy_a, energy_b, max_energy);
225 226
226 rx_status->signal = max_energy; 227 rx_status->signal = max_energy;
227 rx_status->chains = 0; /* TODO: phy info */ 228 rx_status->chains =
229 (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
228 rx_status->chain_signal[0] = energy_a; 230 rx_status->chain_signal[0] = energy_a;
229 rx_status->chain_signal[1] = energy_b; 231 rx_status->chain_signal[1] = energy_b;
230 rx_status->chain_signal[2] = S8_MIN; 232 rx_status->chain_signal[2] = S8_MIN;
@@ -232,8 +234,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
232 234
233static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, 235static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
234 struct ieee80211_rx_status *stats, 236 struct ieee80211_rx_status *stats,
235 struct iwl_rx_mpdu_desc *desc, int queue, 237 struct iwl_rx_mpdu_desc *desc, u32 pkt_flags,
236 u8 *crypt_len) 238 int queue, u8 *crypt_len)
237{ 239{
238 u16 status = le16_to_cpu(desc->status); 240 u16 status = le16_to_cpu(desc->status);
239 241
@@ -253,6 +255,8 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
253 return -1; 255 return -1;
254 256
255 stats->flag |= RX_FLAG_DECRYPTED; 257 stats->flag |= RX_FLAG_DECRYPTED;
258 if (pkt_flags & FH_RSCSR_RADA_EN)
259 stats->flag |= RX_FLAG_MIC_STRIPPED;
256 *crypt_len = IEEE80211_CCMP_HDR_LEN; 260 *crypt_len = IEEE80211_CCMP_HDR_LEN;
257 return 0; 261 return 0;
258 case IWL_RX_MPDU_STATUS_SEC_TKIP: 262 case IWL_RX_MPDU_STATUS_SEC_TKIP:
@@ -270,6 +274,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
270 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == 274 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
271 IWL_RX_MPDU_STATUS_SEC_WEP) 275 IWL_RX_MPDU_STATUS_SEC_WEP)
272 *crypt_len = IEEE80211_WEP_IV_LEN; 276 *crypt_len = IEEE80211_WEP_IV_LEN;
277
278 if (pkt_flags & FH_RSCSR_RADA_EN)
279 stats->flag |= RX_FLAG_ICV_STRIPPED;
280
273 return 0; 281 return 0;
274 case IWL_RX_MPDU_STATUS_SEC_EXT_ENC: 282 case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
275 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) 283 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
@@ -848,7 +856,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
848 856
849 rx_status = IEEE80211_SKB_RXCB(skb); 857 rx_status = IEEE80211_SKB_RXCB(skb);
850 858
851 if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) { 859 if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc,
860 le32_to_cpu(pkt->len_n_flags), queue,
861 &crypt_len)) {
852 kfree_skb(skb); 862 kfree_skb(skb);
853 return; 863 return;
854 } 864 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index c19f98489d4e..1add5615fc3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1709,29 +1709,29 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1709 sta->sta_id = IWL_MVM_INVALID_STA; 1709 sta->sta_id = IWL_MVM_INVALID_STA;
1710} 1710}
1711 1711
1712static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) 1712static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
1713 u8 sta_id, u8 fifo)
1713{ 1714{
1714 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? 1715 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1715 mvm->cfg->base_params->wd_timeout : 1716 mvm->cfg->base_params->wd_timeout :
1716 IWL_WATCHDOG_DISABLED; 1717 IWL_WATCHDOG_DISABLED;
1717 1718
1718 if (iwl_mvm_has_new_tx_api(mvm)) { 1719 if (iwl_mvm_has_new_tx_api(mvm)) {
1719 int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue, 1720 int tvqm_queue =
1720 mvm->aux_sta.sta_id, 1721 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
1721 IWL_MAX_TID_COUNT, 1722 IWL_MAX_TID_COUNT,
1722 wdg_timeout); 1723 wdg_timeout);
1723 mvm->aux_queue = queue; 1724 *queue = tvqm_queue;
1724 } else { 1725 } else {
1725 struct iwl_trans_txq_scd_cfg cfg = { 1726 struct iwl_trans_txq_scd_cfg cfg = {
1726 .fifo = IWL_MVM_TX_FIFO_MCAST, 1727 .fifo = fifo,
1727 .sta_id = mvm->aux_sta.sta_id, 1728 .sta_id = sta_id,
1728 .tid = IWL_MAX_TID_COUNT, 1729 .tid = IWL_MAX_TID_COUNT,
1729 .aggregate = false, 1730 .aggregate = false,
1730 .frame_limit = IWL_FRAME_LIMIT, 1731 .frame_limit = IWL_FRAME_LIMIT,
1731 }; 1732 };
1732 1733
1733 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, 1734 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
1734 wdg_timeout);
1735 } 1735 }
1736} 1736}
1737 1737
@@ -1750,7 +1750,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1750 1750
1751 /* Map Aux queue to fifo - needs to happen before adding Aux station */ 1751 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1752 if (!iwl_mvm_has_new_tx_api(mvm)) 1752 if (!iwl_mvm_has_new_tx_api(mvm))
1753 iwl_mvm_enable_aux_queue(mvm); 1753 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1754 mvm->aux_sta.sta_id,
1755 IWL_MVM_TX_FIFO_MCAST);
1754 1756
1755 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, 1757 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1756 MAC_INDEX_AUX, 0); 1758 MAC_INDEX_AUX, 0);
@@ -1764,7 +1766,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1764 * to firmware so enable queue here - after the station was added 1766 * to firmware so enable queue here - after the station was added
1765 */ 1767 */
1766 if (iwl_mvm_has_new_tx_api(mvm)) 1768 if (iwl_mvm_has_new_tx_api(mvm))
1767 iwl_mvm_enable_aux_queue(mvm); 1769 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1770 mvm->aux_sta.sta_id,
1771 IWL_MVM_TX_FIFO_MCAST);
1768 1772
1769 return 0; 1773 return 0;
1770} 1774}
@@ -1772,10 +1776,31 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1772int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1776int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1773{ 1777{
1774 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1778 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1779 int ret;
1775 1780
1776 lockdep_assert_held(&mvm->mutex); 1781 lockdep_assert_held(&mvm->mutex);
1777 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, 1782
1783 /* Map snif queue to fifo - must happen before adding snif station */
1784 if (!iwl_mvm_has_new_tx_api(mvm))
1785 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1786 mvm->snif_sta.sta_id,
1787 IWL_MVM_TX_FIFO_BE);
1788
1789 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1778 mvmvif->id, 0); 1790 mvmvif->id, 0);
1791 if (ret)
1792 return ret;
1793
1794 /*
1795 * For 22000 firmware and on we cannot add queue to a station unknown
1796 * to firmware so enable queue here - after the station was added
1797 */
1798 if (iwl_mvm_has_new_tx_api(mvm))
1799 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1800 mvm->snif_sta.sta_id,
1801 IWL_MVM_TX_FIFO_BE);
1802
1803 return 0;
1779} 1804}
1780 1805
1781int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1806int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1784,6 +1809,8 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1784 1809
1785 lockdep_assert_held(&mvm->mutex); 1810 lockdep_assert_held(&mvm->mutex);
1786 1811
1812 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
1813 IWL_MAX_TID_COUNT, 0);
1787 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); 1814 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1788 if (ret) 1815 if (ret)
1789 IWL_WARN(mvm, "Failed sending remove station\n"); 1816 IWL_WARN(mvm, "Failed sending remove station\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 4d0314912e94..e25cda9fbf6c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -132,6 +132,24 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
132 * executed, and a new time event means a new command. 132 * executed, and a new time event means a new command.
133 */ 133 */
134 iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); 134 iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
135
136 /* Do the same for the P2P device queue (STA) */
137 if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
138 struct iwl_mvm_vif *mvmvif;
139
140 /*
141 * NB: access to this pointer would be racy, but the flush bit
142 * can only be set when we had a P2P-Device VIF, and we have a
143 * flush of this work in iwl_mvm_prepare_mac_removal() so it's
144 * not really racy.
145 */
146
147 if (!WARN_ON(!mvm->p2p_device_vif)) {
148 mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
149 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
150 CMD_ASYNC);
151 }
152 }
135} 153}
136 154
137static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) 155static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
@@ -855,10 +873,12 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
855 873
856 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 874 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
857 875
858 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) 876 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
859 iwl_mvm_remove_time_event(mvm, mvmvif, te_data); 877 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
860 else 878 set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
879 } else {
861 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); 880 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
881 }
862 882
863 iwl_mvm_roc_finished(mvm); 883 iwl_mvm_roc_finished(mvm);
864} 884}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 593b7f97b29c..333bcb75b8af 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -657,7 +657,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
657 if (ap_sta_id != IWL_MVM_INVALID_STA) 657 if (ap_sta_id != IWL_MVM_INVALID_STA)
658 sta_id = ap_sta_id; 658 sta_id = ap_sta_id;
659 } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { 659 } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
660 queue = mvm->aux_queue; 660 queue = mvm->snif_queue;
661 sta_id = mvm->snif_sta.sta_id;
661 } 662 }
662 } 663 }
663 664
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index d46115e2d69e..03ffd84786ca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1134,9 +1134,18 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
1134 unsigned int default_timeout = 1134 unsigned int default_timeout =
1135 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; 1135 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
1136 1136
1137 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) 1137 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
1138 /*
1139 * We can't know when the station is asleep or awake, so we
1140 * must disable the queue hang detection.
1141 */
1142 if (fw_has_capa(&mvm->fw->ucode_capa,
1143 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
1144 vif && vif->type == NL80211_IFTYPE_AP)
1145 return IWL_WATCHDOG_DISABLED;
1138 return iwlmvm_mod_params.tfd_q_hang_detect ? 1146 return iwlmvm_mod_params.tfd_q_hang_detect ?
1139 default_timeout : IWL_WATCHDOG_DISABLED; 1147 default_timeout : IWL_WATCHDOG_DISABLED;
1148 }
1140 1149
1141 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); 1150 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
1142 txq_timer = (void *)trigger->data; 1151 txq_timer = (void *)trigger->data;
@@ -1163,6 +1172,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
1163 return le32_to_cpu(txq_timer->p2p_go); 1172 return le32_to_cpu(txq_timer->p2p_go);
1164 case NL80211_IFTYPE_P2P_DEVICE: 1173 case NL80211_IFTYPE_P2P_DEVICE:
1165 return le32_to_cpu(txq_timer->p2p_device); 1174 return le32_to_cpu(txq_timer->p2p_device);
1175 case NL80211_IFTYPE_MONITOR:
1176 return default_timeout;
1166 default: 1177 default:
1167 WARN_ON(1); 1178 WARN_ON(1);
1168 return mvm->cfg->base_params->wd_timeout; 1179 return mvm->cfg->base_params->wd_timeout;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index f21fe59faccf..ccd7c33c4c28 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -553,6 +553,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
553 {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, 553 {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
554 {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, 554 {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
555 {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)}, 555 {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
556 {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
556 {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)}, 557 {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
557 {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)}, 558 {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
558 {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)}, 559 {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
@@ -664,6 +665,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
664 {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)}, 665 {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
665 {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)}, 666 {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
666 {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)}, 667 {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
668 {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)},
667 669
668#endif /* CONFIG_IWLMVM */ 670#endif /* CONFIG_IWLMVM */
669 671
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index d749abeca3ae..403e65c309d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -670,11 +670,15 @@ static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
670 return index & (q->n_window - 1); 670 return index & (q->n_window - 1);
671} 671}
672 672
673static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, 673static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
674 struct iwl_txq *txq, int idx) 674 struct iwl_txq *txq, int idx)
675{ 675{
676 return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq, 676 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
677 idx); 677
678 if (trans->cfg->use_tfh)
679 idx = iwl_pcie_get_cmd_index(txq, idx);
680
681 return txq->tfds + trans_pcie->tfd_size * idx;
678} 682}
679 683
680static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 684static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index c59f4581e972..ac05fd1e74c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -49,6 +49,7 @@
49 * 49 *
50 *****************************************************************************/ 50 *****************************************************************************/
51#include "iwl-trans.h" 51#include "iwl-trans.h"
52#include "iwl-prph.h"
52#include "iwl-context-info.h" 53#include "iwl-context-info.h"
53#include "internal.h" 54#include "internal.h"
54 55
@@ -156,6 +157,11 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
156 157
157 trans_pcie->is_down = true; 158 trans_pcie->is_down = true;
158 159
160 /* Stop dbgc before stopping device */
161 iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
162 udelay(100);
163 iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
164
159 /* tell the device to stop sending interrupts */ 165 /* tell the device to stop sending interrupts */
160 iwl_disable_interrupts(trans); 166 iwl_disable_interrupts(trans);
161 167
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index b7a51603465b..4541c86881d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -166,6 +166,7 @@ static void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
166 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 166 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
167 4, buf, i, 0); 167 4, buf, i, 0);
168 } 168 }
169 goto out;
169 170
170err_read: 171err_read:
171 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 172 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
@@ -1226,6 +1227,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1226 1227
1227 trans_pcie->is_down = true; 1228 trans_pcie->is_down = true;
1228 1229
1230 /* Stop dbgc before stopping device */
1231 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1232 iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
1233 } else {
1234 iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
1235 udelay(100);
1236 iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
1237 }
1238
1229 /* tell the device to stop sending interrupts */ 1239 /* tell the device to stop sending interrupts */
1230 iwl_disable_interrupts(trans); 1240 iwl_disable_interrupts(trans);
1231 1241
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 16b345f54ff0..6d0a907d5ba5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
171 171
172static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 172static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
173{ 173{
174 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
175
176 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 174 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
177 * idx is bounded by n_window 175 * idx is bounded by n_window
178 */ 176 */
@@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
181 lockdep_assert_held(&txq->lock); 179 lockdep_assert_held(&txq->lock);
182 180
183 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 181 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
184 iwl_pcie_get_tfd(trans_pcie, txq, idx)); 182 iwl_pcie_get_tfd(trans, txq, idx));
185 183
186 /* free SKB */ 184 /* free SKB */
187 if (txq->entries) { 185 if (txq->entries) {
@@ -364,11 +362,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
364 struct sk_buff *skb, 362 struct sk_buff *skb,
365 struct iwl_cmd_meta *out_meta) 363 struct iwl_cmd_meta *out_meta)
366{ 364{
367 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
368 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 365 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
369 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); 366 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
370 struct iwl_tfh_tfd *tfd = 367 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
371 iwl_pcie_get_tfd(trans_pcie, txq, idx);
372 dma_addr_t tb_phys; 368 dma_addr_t tb_phys;
373 bool amsdu; 369 bool amsdu;
374 int i, len, tb1_len, tb2_len, hdr_len; 370 int i, len, tb1_len, tb2_len, hdr_len;
@@ -565,8 +561,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
565 u8 group_id = iwl_cmd_groupid(cmd->id); 561 u8 group_id = iwl_cmd_groupid(cmd->id);
566 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 562 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
567 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 563 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
568 struct iwl_tfh_tfd *tfd = 564 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
569 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
570 565
571 memset(tfd, 0, sizeof(*tfd)); 566 memset(tfd, 0, sizeof(*tfd));
572 567
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fed6d842a5e1..3f85713c41dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
373{ 373{
374 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 374 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
375 int i, num_tbs; 375 int i, num_tbs;
376 void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); 376 void *tfd = iwl_pcie_get_tfd(trans, txq, index);
377 377
378 /* Sanity check on number of chunks */ 378 /* Sanity check on number of chunks */
379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
@@ -2018,7 +2018,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
2018 } 2018 }
2019 2019
2020 trace_iwlwifi_dev_tx(trans->dev, skb, 2020 trace_iwlwifi_dev_tx(trans->dev, skb,
2021 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2021 iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2022 trans_pcie->tfd_size, 2022 trans_pcie->tfd_size,
2023 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2023 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2024 hdr_len); 2024 hdr_len);
@@ -2092,7 +2092,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2092 IEEE80211_CCMP_HDR_LEN : 0; 2092 IEEE80211_CCMP_HDR_LEN : 0;
2093 2093
2094 trace_iwlwifi_dev_tx(trans->dev, skb, 2094 trace_iwlwifi_dev_tx(trans->dev, skb,
2095 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2095 iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2096 trans_pcie->tfd_size, 2096 trans_pcie->tfd_size,
2097 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 2097 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
2098 2098
@@ -2425,7 +2425,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2425 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, 2425 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
2426 IWL_FIRST_TB_SIZE); 2426 IWL_FIRST_TB_SIZE);
2427 2427
2428 tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); 2428 tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
2429 /* Set up entry for this TFD in Tx byte-count array */ 2429 /* Set up entry for this TFD in Tx byte-count array */
2430 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 2430 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
2431 iwl_pcie_tfd_get_num_tbs(trans, tfd)); 2431 iwl_pcie_tfd_get_num_tbs(trans, tfd));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 10b075a46b26..f6d4a50f1bdb 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -489,6 +489,7 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
489 489
490static spinlock_t hwsim_radio_lock; 490static spinlock_t hwsim_radio_lock;
491static LIST_HEAD(hwsim_radios); 491static LIST_HEAD(hwsim_radios);
492static struct workqueue_struct *hwsim_wq;
492static int hwsim_radio_idx; 493static int hwsim_radio_idx;
493 494
494static struct platform_driver mac80211_hwsim_driver = { 495static struct platform_driver mac80211_hwsim_driver = {
@@ -684,6 +685,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
684 hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN); 685 hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN);
685 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 686 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
686 IEEE80211_STYPE_NULLFUNC | 687 IEEE80211_STYPE_NULLFUNC |
688 IEEE80211_FCTL_TODS |
687 (ps ? IEEE80211_FCTL_PM : 0)); 689 (ps ? IEEE80211_FCTL_PM : 0));
688 hdr->duration_id = cpu_to_le16(0); 690 hdr->duration_id = cpu_to_le16(0);
689 memcpy(hdr->addr1, vp->bssid, ETH_ALEN); 691 memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
@@ -3119,6 +3121,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
3119 if (info->attrs[HWSIM_ATTR_CHANNELS]) 3121 if (info->attrs[HWSIM_ATTR_CHANNELS])
3120 param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); 3122 param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
3121 3123
3124 if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
3125 GENL_SET_ERR_MSG(info, "too many channels specified");
3126 return -EINVAL;
3127 }
3128
3122 if (info->attrs[HWSIM_ATTR_NO_VIF]) 3129 if (info->attrs[HWSIM_ATTR_NO_VIF])
3123 param.no_vif = true; 3130 param.no_vif = true;
3124 3131
@@ -3215,7 +3222,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
3215 if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) 3222 if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
3216 continue; 3223 continue;
3217 3224
3218 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 3225 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3219 if (!skb) { 3226 if (!skb) {
3220 res = -ENOMEM; 3227 res = -ENOMEM;
3221 goto out_err; 3228 goto out_err;
@@ -3341,7 +3348,7 @@ static void remove_user_radios(u32 portid)
3341 if (entry->destroy_on_close && entry->portid == portid) { 3348 if (entry->destroy_on_close && entry->portid == portid) {
3342 list_del(&entry->list); 3349 list_del(&entry->list);
3343 INIT_WORK(&entry->destroy_work, destroy_radio); 3350 INIT_WORK(&entry->destroy_work, destroy_radio);
3344 schedule_work(&entry->destroy_work); 3351 queue_work(hwsim_wq, &entry->destroy_work);
3345 } 3352 }
3346 } 3353 }
3347 spin_unlock_bh(&hwsim_radio_lock); 3354 spin_unlock_bh(&hwsim_radio_lock);
@@ -3416,7 +3423,7 @@ static void __net_exit hwsim_exit_net(struct net *net)
3416 3423
3417 list_del(&data->list); 3424 list_del(&data->list);
3418 INIT_WORK(&data->destroy_work, destroy_radio); 3425 INIT_WORK(&data->destroy_work, destroy_radio);
3419 schedule_work(&data->destroy_work); 3426 queue_work(hwsim_wq, &data->destroy_work);
3420 } 3427 }
3421 spin_unlock_bh(&hwsim_radio_lock); 3428 spin_unlock_bh(&hwsim_radio_lock);
3422} 3429}
@@ -3448,6 +3455,10 @@ static int __init init_mac80211_hwsim(void)
3448 3455
3449 spin_lock_init(&hwsim_radio_lock); 3456 spin_lock_init(&hwsim_radio_lock);
3450 3457
3458 hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
3459 if (!hwsim_wq)
3460 return -ENOMEM;
3461
3451 err = register_pernet_device(&hwsim_net_ops); 3462 err = register_pernet_device(&hwsim_net_ops);
3452 if (err) 3463 if (err)
3453 return err; 3464 return err;
@@ -3586,8 +3597,11 @@ static void __exit exit_mac80211_hwsim(void)
3586 hwsim_exit_netlink(); 3597 hwsim_exit_netlink();
3587 3598
3588 mac80211_hwsim_free(); 3599 mac80211_hwsim_free();
3600 flush_workqueue(hwsim_wq);
3601
3589 unregister_netdev(hwsim_mon); 3602 unregister_netdev(hwsim_mon);
3590 platform_driver_unregister(&mac80211_hwsim_driver); 3603 platform_driver_unregister(&mac80211_hwsim_driver);
3591 unregister_pernet_device(&hwsim_net_ops); 3604 unregister_pernet_device(&hwsim_net_ops);
3605 destroy_workqueue(hwsim_wq);
3592} 3606}
3593module_exit(exit_mac80211_hwsim); 3607module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index d6dff347f896..78ebe494fef0 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -186,7 +186,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
186 /* Obtain the queue to be used to transmit this packet */ 186 /* Obtain the queue to be used to transmit this packet */
187 index = skb_get_queue_mapping(skb); 187 index = skb_get_queue_mapping(skb);
188 if (index >= num_queues) { 188 if (index >= num_queues) {
189 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", 189 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
190 index, vif->dev->name); 190 index, vif->dev->name);
191 index %= num_queues; 191 index %= num_queues;
192 } 192 }
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c5a34671abda..9bd7ddeeb6a5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1326,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1326 1326
1327 netif_carrier_off(netdev); 1327 netif_carrier_off(netdev);
1328 1328
1329 xenbus_switch_state(dev, XenbusStateInitialising);
1329 return netdev; 1330 return netdev;
1330 1331
1331 exit: 1332 exit: