aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-01 15:49:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-01 15:49:03 -0400
commit8cf9f2a29ff1265a392e5b2461c69a9e53b4539f (patch)
tree1787dc13f3708c1f836f532a2956cd3a02fa6a41 /drivers
parentb8a78bb4d103b3ea069c4831081cb1ba17062a4b (diff)
parente8a732d1bc3ac313e22249c13a153c3fe54aa577 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix handling of pinned BPF map nodes in hash of maps, from Daniel Borkmann. 2) IPSEC ESP error paths leak memory, from Steffen Klassert. 3) We need an RCU grace period before freeing fib6_node objects, from Wei Wang. 4) Must check skb_put_padto() return value in HSR driver, from FLorian Fainelli. 5) Fix oops on PHY probe failure in ftgmac100 driver, from Andrew Jeffery. 6) Fix infinite loop in UDP queue when using SO_PEEK_OFF, from Eric Dumazet. 7) Use after free when tcf_chain_destroy() called multiple times, from Jiri Pirko. 8) Fix KSZ DSA tag layer multiple free of SKBS, from Florian Fainelli. 9) Fix leak of uninitialized memory in sctp_get_sctp_info(), inet_diag_msg_sctpladdrs_fill() and inet_diag_msg_sctpaddrs_fill(). From Stefano Brivio. 10) L2TP tunnel refcount fixes from Guillaume Nault. 11) Don't leak UDP secpath in udp_set_dev_scratch(), from Yossi Kauperman. 12) Revert a PHY layer change wrt. handling of PHY_HALTED state in phy_stop_machine(), it causes regressions for multiple people. From Florian Fainelli. 13) When packets are sent out of br0 we have to clear the offload_fwdq_mark value. 14) Several NULL pointer deref fixes in packet schedulers when their ->init() routine fails. From Nikolay Aleksandrov. 15) Aquantium devices cannot checksum offload correctly when the packet is <= 60 bytes. From Pavel Belous. 16) Fix vnet header access past end of buffer in AF_PACKET, from Benjamin Poirier. 17) Double free in probe error paths of nfp driver, from Dan Carpenter. 18) QOS capability not checked properly in DCB init paths of mlx5 driver, from Huy Nguyen. 19) Fix conflicts between firmware load failure and health_care timer in mlx5, also from Huy Nguyen. 20) Fix dangling page pointer when DMA mapping errors occur in mlx5, from Eran Ben ELisha. 21) ->ndo_setup_tc() in bnxt_en driver doesn't count rings properly, from Michael Chan. 22) Missing MSIX vector free in bnxt_en, also from Michael Chan. 23) Refcount leak in xfrm layer when using sk_policy, from Lorenzo Colitti. 24) Fix copy of uninitialized data in qlge driver, from Arnd Bergmann. 25) bpf_setsockopts() erroneously always returns -EINVAL even on success. Fix from Yuchung Cheng. 26) tipc_rcv() needs to linearize the SKB before parsing the inner headers, from Parthasarathy Bhuvaragan. 27) Fix deadlock between link status updates and link removal in netvsc driver, from Stephen Hemminger. 28) Missed locking of page fragment handling in ESP output, from Steffen Klassert. 29) Fix refcnt leak in ebpf congestion control code, from Sabrina Dubroca. 30) sxgbe_probe_config_dt() doesn't check devm_kzalloc()'s return value, from Christophe Jaillet. 31) Fix missing ipv6 rx_dst_cookie update when rx_dst is updated during early demux, from Paolo Abeni. 32) Several info leaks in xfrm_user layer, from Mathias Krause. 33) Fix out of bounds read in cxgb4 driver, from Stefano Brivio. 34) Properly propagate obsolete state of route upwards in ipv6 so that upper holders like xfrm can see it. From Xin Long. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (118 commits) udp: fix secpath leak bridge: switchdev: Clear forward mark when transmitting packet mlxsw: spectrum: Forbid linking to devices that have uppers wl1251: add a missing spin_lock_init() Revert "net: phy: Correctly process PHY_HALTED in phy_stop_machine()" net: dsa: bcm_sf2: Fix number of CFP entries for BCM7278 kcm: do not attach PF_KCM sockets to avoid deadlock sch_tbf: fix two null pointer dereferences on init failure sch_sfq: fix null pointer dereference on init failure sch_netem: avoid null pointer deref on init failure sch_fq_codel: avoid double free on init failure sch_cbq: fix null pointer dereferences on init failure sch_hfsc: fix null pointer deref and double free on init failure sch_hhf: fix null pointer dereference on init failure sch_multiq: fix double free on init failure sch_htb: fix crash on init failure net/mlx5e: Fix CQ moderation mode not set properly net/mlx5e: Fix inline header size for small packets net/mlx5: E-Switch, Unload the representors in the correct order net/mlx5e: Properly resolve TC offloaded ipv6 vxlan tunnel source address ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/dsa/bcm_sf2.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.h1
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c92
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c139
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c60
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c26
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c7
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c9
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c1
54 files changed, 377 insertions, 287 deletions
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 648f91b58d1e..9b6ce7c3f6c3 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1048,6 +1048,7 @@ struct bcm_sf2_of_data {
1048 u32 type; 1048 u32 type;
1049 const u16 *reg_offsets; 1049 const u16 *reg_offsets;
1050 unsigned int core_reg_align; 1050 unsigned int core_reg_align;
1051 unsigned int num_cfp_rules;
1051}; 1052};
1052 1053
1053/* Register offsets for the SWITCH_REG_* block */ 1054/* Register offsets for the SWITCH_REG_* block */
@@ -1071,6 +1072,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
1071 .type = BCM7445_DEVICE_ID, 1072 .type = BCM7445_DEVICE_ID,
1072 .core_reg_align = 0, 1073 .core_reg_align = 0,
1073 .reg_offsets = bcm_sf2_7445_reg_offsets, 1074 .reg_offsets = bcm_sf2_7445_reg_offsets,
1075 .num_cfp_rules = 256,
1074}; 1076};
1075 1077
1076static const u16 bcm_sf2_7278_reg_offsets[] = { 1078static const u16 bcm_sf2_7278_reg_offsets[] = {
@@ -1093,6 +1095,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
1093 .type = BCM7278_DEVICE_ID, 1095 .type = BCM7278_DEVICE_ID,
1094 .core_reg_align = 1, 1096 .core_reg_align = 1,
1095 .reg_offsets = bcm_sf2_7278_reg_offsets, 1097 .reg_offsets = bcm_sf2_7278_reg_offsets,
1098 .num_cfp_rules = 128,
1096}; 1099};
1097 1100
1098static const struct of_device_id bcm_sf2_of_match[] = { 1101static const struct of_device_id bcm_sf2_of_match[] = {
@@ -1149,6 +1152,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
1149 priv->type = data->type; 1152 priv->type = data->type;
1150 priv->reg_offsets = data->reg_offsets; 1153 priv->reg_offsets = data->reg_offsets;
1151 priv->core_reg_align = data->core_reg_align; 1154 priv->core_reg_align = data->core_reg_align;
1155 priv->num_cfp_rules = data->num_cfp_rules;
1152 1156
1153 /* Auto-detection using standard registers will not work, so 1157 /* Auto-detection using standard registers will not work, so
1154 * provide an indication of what kind of device we are for 1158 * provide an indication of what kind of device we are for
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 7d3030e04f11..7f9125eef3df 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -72,6 +72,7 @@ struct bcm_sf2_priv {
72 u32 type; 72 u32 type;
73 const u16 *reg_offsets; 73 const u16 *reg_offsets;
74 unsigned int core_reg_align; 74 unsigned int core_reg_align;
75 unsigned int num_cfp_rules;
75 76
76 /* spinlock protecting access to the indirect registers */ 77 /* spinlock protecting access to the indirect registers */
77 spinlock_t indir_lock; 78 spinlock_t indir_lock;
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 2fb32d67065f..8a1da7e67707 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -98,7 +98,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
98{ 98{
99 u32 reg; 99 u32 reg;
100 100
101 WARN_ON(addr >= CFP_NUM_RULES); 101 WARN_ON(addr >= priv->num_cfp_rules);
102 102
103 reg = core_readl(priv, CORE_CFP_ACC); 103 reg = core_readl(priv, CORE_CFP_ACC);
104 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); 104 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
@@ -109,7 +109,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
109static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) 109static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
110{ 110{
111 /* Entry #0 is reserved */ 111 /* Entry #0 is reserved */
112 return CFP_NUM_RULES - 1; 112 return priv->num_cfp_rules - 1;
113} 113}
114 114
115static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, 115static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
@@ -523,7 +523,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
523 if (!(reg & OP_STR_DONE)) 523 if (!(reg & OP_STR_DONE))
524 break; 524 break;
525 525
526 } while (index < CFP_NUM_RULES); 526 } while (index < priv->num_cfp_rules);
527 527
528 /* Put the TCAM size here */ 528 /* Put the TCAM size here */
529 nfc->data = bcm_sf2_cfp_rule_size(priv); 529 nfc->data = bcm_sf2_cfp_rule_size(priv);
@@ -544,7 +544,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
544 case ETHTOOL_GRXCLSRLCNT: 544 case ETHTOOL_GRXCLSRLCNT:
545 /* Subtract the default, unusable rule */ 545 /* Subtract the default, unusable rule */
546 nfc->rule_cnt = bitmap_weight(priv->cfp.used, 546 nfc->rule_cnt = bitmap_weight(priv->cfp.used,
547 CFP_NUM_RULES) - 1; 547 priv->num_cfp_rules) - 1;
548 /* We support specifying rule locations */ 548 /* We support specifying rule locations */
549 nfc->data |= RX_CLS_LOC_SPECIAL; 549 nfc->data |= RX_CLS_LOC_SPECIAL;
550 break; 550 break;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 1d307f2def2d..6e253d913fe2 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1661,21 +1661,21 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1661 return 0; 1661 return 0;
1662} 1662}
1663 1663
1664static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) 1664static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1665{ 1665{
1666 int ret; 1666 int ret;
1667 1667
1668 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) 1668 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1669 return 0; 1669 return;
1670 1670
1671 if (!IS_ENABLED(CONFIG_MDIO_XGENE)) 1671 if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1672 return 0; 1672 return;
1673 1673
1674 ret = xgene_enet_phy_connect(pdata->ndev); 1674 ret = xgene_enet_phy_connect(pdata->ndev);
1675 if (!ret) 1675 if (!ret)
1676 pdata->mdio_driver = true; 1676 pdata->mdio_driver = true;
1677 1677
1678 return 0; 1678 return;
1679} 1679}
1680 1680
1681static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata) 1681static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
@@ -1779,10 +1779,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1779 if (ret) 1779 if (ret)
1780 return ret; 1780 return ret;
1781 1781
1782 ret = xgene_enet_check_phy_handle(pdata);
1783 if (ret)
1784 return ret;
1785
1786 xgene_enet_gpiod_get(pdata); 1782 xgene_enet_gpiod_get(pdata);
1787 1783
1788 pdata->clk = devm_clk_get(&pdev->dev, NULL); 1784 pdata->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2097,9 +2093,11 @@ static int xgene_enet_probe(struct platform_device *pdev)
2097 goto err; 2093 goto err;
2098 } 2094 }
2099 2095
2096 xgene_enet_check_phy_handle(pdata);
2097
2100 ret = xgene_enet_init_hw(pdata); 2098 ret = xgene_enet_init_hw(pdata);
2101 if (ret) 2099 if (ret)
2102 goto err; 2100 goto err2;
2103 2101
2104 link_state = pdata->mac_ops->link_state; 2102 link_state = pdata->mac_ops->link_state;
2105 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 2103 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
@@ -2117,29 +2115,30 @@ static int xgene_enet_probe(struct platform_device *pdev)
2117 spin_lock_init(&pdata->stats_lock); 2115 spin_lock_init(&pdata->stats_lock);
2118 ret = xgene_extd_stats_init(pdata); 2116 ret = xgene_extd_stats_init(pdata);
2119 if (ret) 2117 if (ret)
2120 goto err2; 2118 goto err1;
2121 2119
2122 xgene_enet_napi_add(pdata); 2120 xgene_enet_napi_add(pdata);
2123 ret = register_netdev(ndev); 2121 ret = register_netdev(ndev);
2124 if (ret) { 2122 if (ret) {
2125 netdev_err(ndev, "Failed to register netdev\n"); 2123 netdev_err(ndev, "Failed to register netdev\n");
2126 goto err2; 2124 goto err1;
2127 } 2125 }
2128 2126
2129 return 0; 2127 return 0;
2130 2128
2131err2: 2129err1:
2132 /* 2130 /*
2133 * If necessary, free_netdev() will call netif_napi_del() and undo 2131 * If necessary, free_netdev() will call netif_napi_del() and undo
2134 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add(). 2132 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
2135 */ 2133 */
2136 2134
2135 xgene_enet_delete_desc_rings(pdata);
2136
2137err2:
2137 if (pdata->mdio_driver) 2138 if (pdata->mdio_driver)
2138 xgene_enet_phy_disconnect(pdata); 2139 xgene_enet_phy_disconnect(pdata);
2139 else if (phy_interface_mode_is_rgmii(pdata->phy_mode)) 2140 else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2140 xgene_enet_mdio_remove(pdata); 2141 xgene_enet_mdio_remove(pdata);
2141err1:
2142 xgene_enet_delete_desc_rings(pdata);
2143err: 2142err:
2144 free_netdev(ndev); 2143 free_netdev(ndev);
2145 return ret; 2144 return ret;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index fce0fd3f23ff..bf9b3f020e10 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -105,8 +105,7 @@ struct aq_hw_ops {
105 105
106 int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr); 106 int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
107 107
108 int (*hw_get_link_status)(struct aq_hw_s *self, 108 int (*hw_get_link_status)(struct aq_hw_s *self);
109 struct aq_hw_link_status_s *link_status);
110 109
111 int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed); 110 int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
112 111
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 9ee1c5016784..6ac9e2602d6d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -103,6 +103,8 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
103 else 103 else
104 cfg->vecs = 1U; 104 cfg->vecs = 1U;
105 105
106 cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
107
106 cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func); 108 cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
107 109
108 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || 110 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
@@ -123,33 +125,30 @@ static void aq_nic_service_timer_cb(unsigned long param)
123 struct net_device *ndev = aq_nic_get_ndev(self); 125 struct net_device *ndev = aq_nic_get_ndev(self);
124 int err = 0; 126 int err = 0;
125 unsigned int i = 0U; 127 unsigned int i = 0U;
126 struct aq_hw_link_status_s link_status;
127 struct aq_ring_stats_rx_s stats_rx; 128 struct aq_ring_stats_rx_s stats_rx;
128 struct aq_ring_stats_tx_s stats_tx; 129 struct aq_ring_stats_tx_s stats_tx;
129 130
130 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) 131 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
131 goto err_exit; 132 goto err_exit;
132 133
133 err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status); 134 err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
134 if (err < 0) 135 if (err < 0)
135 goto err_exit; 136 goto err_exit;
136 137
137 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 138 self->link_status = self->aq_hw->aq_link_status;
138 self->aq_nic_cfg.is_interrupt_moderation);
139
140 if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
141 if (link_status.mbps) {
142 aq_utils_obj_set(&self->header.flags,
143 AQ_NIC_FLAG_STARTED);
144 aq_utils_obj_clear(&self->header.flags,
145 AQ_NIC_LINK_DOWN);
146 netif_carrier_on(self->ndev);
147 } else {
148 netif_carrier_off(self->ndev);
149 aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
150 }
151 139
152 self->link_status = link_status; 140 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
141 self->aq_nic_cfg.is_interrupt_moderation);
142
143 if (self->link_status.mbps) {
144 aq_utils_obj_set(&self->header.flags,
145 AQ_NIC_FLAG_STARTED);
146 aq_utils_obj_clear(&self->header.flags,
147 AQ_NIC_LINK_DOWN);
148 netif_carrier_on(self->ndev);
149 } else {
150 netif_carrier_off(self->ndev);
151 aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
153 } 152 }
154 153
155 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 154 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
@@ -597,14 +596,11 @@ exit:
597} 596}
598 597
599int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) 598int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
600__releases(&ring->lock)
601__acquires(&ring->lock)
602{ 599{
603 struct aq_ring_s *ring = NULL; 600 struct aq_ring_s *ring = NULL;
604 unsigned int frags = 0U; 601 unsigned int frags = 0U;
605 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; 602 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
606 unsigned int tc = 0U; 603 unsigned int tc = 0U;
607 unsigned int trys = AQ_CFG_LOCK_TRYS;
608 int err = NETDEV_TX_OK; 604 int err = NETDEV_TX_OK;
609 bool is_nic_in_bad_state; 605 bool is_nic_in_bad_state;
610 606
@@ -628,36 +624,21 @@ __acquires(&ring->lock)
628 goto err_exit; 624 goto err_exit;
629 } 625 }
630 626
631 do { 627 frags = aq_nic_map_skb(self, skb, ring);
632 if (spin_trylock(&ring->header.lock)) {
633 frags = aq_nic_map_skb(self, skb, ring);
634
635 if (likely(frags)) {
636 err = self->aq_hw_ops.hw_ring_tx_xmit(
637 self->aq_hw,
638 ring, frags);
639 if (err >= 0) {
640 if (aq_ring_avail_dx(ring) <
641 AQ_CFG_SKB_FRAGS_MAX + 1)
642 aq_nic_ndev_queue_stop(
643 self,
644 ring->idx);
645
646 ++ring->stats.tx.packets;
647 ring->stats.tx.bytes += skb->len;
648 }
649 } else {
650 err = NETDEV_TX_BUSY;
651 }
652 628
653 spin_unlock(&ring->header.lock); 629 if (likely(frags)) {
654 break; 630 err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
655 } 631 ring,
656 } while (--trys); 632 frags);
633 if (err >= 0) {
634 if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
635 aq_nic_ndev_queue_stop(self, ring->idx);
657 636
658 if (!trys) { 637 ++ring->stats.tx.packets;
638 ring->stats.tx.bytes += skb->len;
639 }
640 } else {
659 err = NETDEV_TX_BUSY; 641 err = NETDEV_TX_BUSY;
660 goto err_exit;
661 } 642 }
662 643
663err_exit: 644err_exit:
@@ -688,11 +669,26 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
688 netdev_for_each_mc_addr(ha, ndev) { 669 netdev_for_each_mc_addr(ha, ndev) {
689 ether_addr_copy(self->mc_list.ar[i++], ha->addr); 670 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
690 ++self->mc_list.count; 671 ++self->mc_list.count;
672
673 if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
674 break;
691 } 675 }
692 676
693 return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, 677 if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
678 /* Number of filters is too big: atlantic does not support this.
679 * Force all multi filter to support this.
680 * With this we disable all UC filters and setup "all pass"
681 * multicast mask
682 */
683 self->packet_filter |= IFF_ALLMULTI;
684 self->aq_hw->aq_nic_cfg->mc_list_count = 0;
685 return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
686 self->packet_filter);
687 } else {
688 return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
694 self->mc_list.ar, 689 self->mc_list.ar,
695 self->mc_list.count); 690 self->mc_list.count);
691 }
696} 692}
697 693
698int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) 694int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 9a0817938eca..ec5579fb8268 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -101,7 +101,6 @@ int aq_ring_init(struct aq_ring_s *self)
101 self->hw_head = 0; 101 self->hw_head = 0;
102 self->sw_head = 0; 102 self->sw_head = 0;
103 self->sw_tail = 0; 103 self->sw_tail = 0;
104 spin_lock_init(&self->header.lock);
105 return 0; 104 return 0;
106} 105}
107 106
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
index f6012b34abe6..e12bcdfb874a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -17,7 +17,6 @@
17#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_) 17#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
18 18
19struct aq_obj_s { 19struct aq_obj_s {
20 spinlock_t lock; /* spinlock for nic/rings processing */
21 atomic_t flags; 20 atomic_t flags;
22}; 21};
23 22
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index ad5b4d4dac7f..fee446af748f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -34,8 +34,6 @@ struct aq_vec_s {
34#define AQ_VEC_RX_ID 1 34#define AQ_VEC_RX_ID 1
35 35
36static int aq_vec_poll(struct napi_struct *napi, int budget) 36static int aq_vec_poll(struct napi_struct *napi, int budget)
37__releases(&self->lock)
38__acquires(&self->lock)
39{ 37{
40 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); 38 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
41 struct aq_ring_s *ring = NULL; 39 struct aq_ring_s *ring = NULL;
@@ -47,7 +45,7 @@ __acquires(&self->lock)
47 45
48 if (!self) { 46 if (!self) {
49 err = -EINVAL; 47 err = -EINVAL;
50 } else if (spin_trylock(&self->header.lock)) { 48 } else {
51 for (i = 0U, ring = self->ring[0]; 49 for (i = 0U, ring = self->ring[0];
52 self->tx_rings > i; ++i, ring = self->ring[i]) { 50 self->tx_rings > i; ++i, ring = self->ring[i]) {
53 if (self->aq_hw_ops->hw_ring_tx_head_update) { 51 if (self->aq_hw_ops->hw_ring_tx_head_update) {
@@ -105,11 +103,8 @@ __acquires(&self->lock)
105 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 103 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
106 1U << self->aq_ring_param.vec_idx); 104 1U << self->aq_ring_param.vec_idx);
107 } 105 }
108
109err_exit:
110 spin_unlock(&self->header.lock);
111 } 106 }
112 107err_exit:
113 return work_done; 108 return work_done;
114} 109}
115 110
@@ -185,8 +180,6 @@ int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
185 self->aq_hw_ops = aq_hw_ops; 180 self->aq_hw_ops = aq_hw_ops;
186 self->aq_hw = aq_hw; 181 self->aq_hw = aq_hw;
187 182
188 spin_lock_init(&self->header.lock);
189
190 for (i = 0U, ring = self->ring[0]; 183 for (i = 0U, ring = self->ring[0];
191 self->tx_rings > i; ++i, ring = self->ring[i]) { 184 self->tx_rings > i; ++i, ring = self->ring[i]) {
192 err = aq_ring_init(&ring[AQ_VEC_TX_ID]); 185 err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index faeb4935ef3e..c5a02df7a48b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -629,6 +629,12 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
629 buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1; 629 buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
630 else if (0x0U == (pkt_type & 0x1CU)) 630 else if (0x0U == (pkt_type & 0x1CU))
631 buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1; 631 buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
632
633 /* Checksum offload workaround for small packets */
634 if (rxd_wb->pkt_len <= 60) {
635 buff->is_ip_cso = 0U;
636 buff->is_cso_err = 0U;
637 }
632 } 638 }
633 639
634 is_err &= ~0x18U; 640 is_err &= ~0x18U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 1bceb7358e5c..21784cc39dab 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -645,6 +645,12 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
645 buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; 645 buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
646 else if (0x0U == (pkt_type & 0x1CU)) 646 else if (0x0U == (pkt_type & 0x1CU))
647 buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; 647 buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
648
649 /* Checksum offload workaround for small packets */
650 if (rxd_wb->pkt_len <= 60) {
651 buff->is_ip_cso = 0U;
652 buff->is_cso_err = 0U;
653 }
648 } 654 }
649 655
650 is_err &= ~0x18U; 656 is_err &= ~0x18U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 8d6d8f5804da..4f5ec9a0fbfb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -141,6 +141,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
141 141
142 err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected, 142 err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
143 aq_hw_read_reg(self, 0x18U)); 143 aq_hw_read_reg(self, 0x18U));
144
145 if (err < 0)
146 pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n",
147 AQ_CFG_DRV_NAME,
148 aq_hw_caps->fw_ver_expected,
149 aq_hw_read_reg(self, 0x18U));
144 return err; 150 return err;
145} 151}
146 152
@@ -313,11 +319,11 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
313err_exit:; 319err_exit:;
314} 320}
315 321
316int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, 322int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
317 struct aq_hw_link_status_s *link_status)
318{ 323{
319 u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); 324 u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
320 u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT; 325 u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
326 struct aq_hw_link_status_s *link_status = &self->aq_link_status;
321 327
322 if (!link_speed_mask) { 328 if (!link_speed_mask) {
323 link_status->mbps = 0U; 329 link_status->mbps = 0U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index a66aee51ab5b..e0360a6b2202 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -180,8 +180,7 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
180int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, 180int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
181 enum hal_atl_utils_fw_state_e state); 181 enum hal_atl_utils_fw_state_e state);
182 182
183int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, 183int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
184 struct aq_hw_link_status_s *link_status);
185 184
186int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, 185int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
187 struct aq_hw_caps_s *aq_hw_caps, 186 struct aq_hw_caps_s *aq_hw_caps,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index dc3052751bc1..c28fa5a8734c 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -597,7 +597,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
597 597
598static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) 598static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
599{ 599{
600 dev_kfree_skb_any(cb->skb); 600 dev_consume_skb_any(cb->skb);
601 cb->skb = NULL; 601 cb->skb = NULL;
602 dma_unmap_addr_set(cb, dma_addr, 0); 602 dma_unmap_addr_set(cb, dma_addr, 0);
603} 603}
@@ -1346,6 +1346,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1346 1346
1347 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); 1347 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1348 if (!ring->cbs) { 1348 if (!ring->cbs) {
1349 dma_free_coherent(kdev, sizeof(struct dma_desc),
1350 ring->desc_cpu, ring->desc_dma);
1349 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1351 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1350 return -ENOMEM; 1352 return -ENOMEM;
1351 } 1353 }
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e7c8539cbddf..f20b3d2a4c23 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4647,7 +4647,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
4647 pf->port_id = le16_to_cpu(resp->port_id); 4647 pf->port_id = le16_to_cpu(resp->port_id);
4648 bp->dev->dev_port = pf->port_id; 4648 bp->dev->dev_port = pf->port_id;
4649 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 4649 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
4650 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
4651 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 4650 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4652 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 4651 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4653 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 4652 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
@@ -4687,16 +4686,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
4687 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 4686 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4688 4687
4689 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 4688 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
4690 mutex_unlock(&bp->hwrm_cmd_lock);
4691
4692 if (is_valid_ether_addr(vf->mac_addr)) {
4693 /* overwrite netdev dev_adr with admin VF MAC */
4694 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
4695 } else {
4696 eth_hw_addr_random(bp->dev);
4697 rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
4698 }
4699 return rc;
4700#endif 4689#endif
4701 } 4690 }
4702 4691
@@ -7152,6 +7141,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
7152 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 7141 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7153 netdev_reset_tc(dev); 7142 netdev_reset_tc(dev);
7154 } 7143 }
7144 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
7155 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 7145 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7156 bp->tx_nr_rings + bp->rx_nr_rings; 7146 bp->tx_nr_rings + bp->rx_nr_rings;
7157 bp->num_stat_ctxs = bp->cp_nr_rings; 7147 bp->num_stat_ctxs = bp->cp_nr_rings;
@@ -7661,6 +7651,28 @@ void bnxt_restore_pf_fw_resources(struct bnxt *bp)
7661 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP); 7651 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
7662} 7652}
7663 7653
7654static int bnxt_init_mac_addr(struct bnxt *bp)
7655{
7656 int rc = 0;
7657
7658 if (BNXT_PF(bp)) {
7659 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
7660 } else {
7661#ifdef CONFIG_BNXT_SRIOV
7662 struct bnxt_vf_info *vf = &bp->vf;
7663
7664 if (is_valid_ether_addr(vf->mac_addr)) {
7665 /* overwrite netdev dev_adr with admin VF MAC */
7666 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
7667 } else {
7668 eth_hw_addr_random(bp->dev);
7669 rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
7670 }
7671#endif
7672 }
7673 return rc;
7674}
7675
7664static void bnxt_parse_log_pcie_link(struct bnxt *bp) 7676static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7665{ 7677{
7666 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 7678 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -7789,7 +7801,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7789 rc = -1; 7801 rc = -1;
7790 goto init_err_pci_clean; 7802 goto init_err_pci_clean;
7791 } 7803 }
7792 7804 rc = bnxt_init_mac_addr(bp);
7805 if (rc) {
7806 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
7807 rc = -EADDRNOTAVAIL;
7808 goto init_err_pci_clean;
7809 }
7793 rc = bnxt_hwrm_queue_qportcfg(bp); 7810 rc = bnxt_hwrm_queue_qportcfg(bp);
7794 if (rc) { 7811 if (rc) {
7795 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", 7812 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 77da75a55c02..997e10e8b863 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -84,6 +84,8 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
84 84
85 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); 85 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
86 bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1); 86 bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
87 if (ulp->msix_requested)
88 edev->en_ops->bnxt_free_msix(edev, ulp_id);
87 } 89 }
88 if (ulp->max_async_event_id) 90 if (ulp->max_async_event_id)
89 bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); 91 bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a981c4ee9d72..fea3f9a5fb2d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1360,7 +1360,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1360 if (skb) { 1360 if (skb) {
1361 pkts_compl++; 1361 pkts_compl++;
1362 bytes_compl += GENET_CB(skb)->bytes_sent; 1362 bytes_compl += GENET_CB(skb)->bytes_sent;
1363 dev_kfree_skb_any(skb); 1363 dev_consume_skb_any(skb);
1364 } 1364 }
1365 1365
1366 txbds_processed++; 1366 txbds_processed++;
@@ -1875,7 +1875,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1875 cb = ring->cbs + i; 1875 cb = ring->cbs + i;
1876 skb = bcmgenet_rx_refill(priv, cb); 1876 skb = bcmgenet_rx_refill(priv, cb);
1877 if (skb) 1877 if (skb)
1878 dev_kfree_skb_any(skb); 1878 dev_consume_skb_any(skb);
1879 if (!cb->skb) 1879 if (!cb->skb)
1880 return -ENOMEM; 1880 return -ENOMEM;
1881 } 1881 }
@@ -1894,7 +1894,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1894 1894
1895 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); 1895 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
1896 if (skb) 1896 if (skb)
1897 dev_kfree_skb_any(skb); 1897 dev_consume_skb_any(skb);
1898 } 1898 }
1899} 1899}
1900 1900
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 82bf7aac6cdb..0293b41171a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -369,12 +369,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
369 list_del(&entry.list); 369 list_del(&entry.list);
370 spin_unlock(&adap->mbox_lock); 370 spin_unlock(&adap->mbox_lock);
371 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; 371 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
372 t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); 372 t4_record_mbox(adap, cmd, size, access, ret);
373 return ret; 373 return ret;
374 } 374 }
375 375
376 /* Copy in the new mailbox command and send it on its way ... */ 376 /* Copy in the new mailbox command and send it on its way ... */
377 t4_record_mbox(adap, cmd, MBOX_LEN, access, 0); 377 t4_record_mbox(adap, cmd, size, access, 0);
378 for (i = 0; i < size; i += 8) 378 for (i = 0; i < size; i += 8)
379 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); 379 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
380 380
@@ -426,7 +426,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
426 } 426 }
427 427
428 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT; 428 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
429 t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); 429 t4_record_mbox(adap, cmd, size, access, ret);
430 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", 430 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
431 *(const u8 *)cmd, mbox); 431 *(const u8 *)cmd, mbox);
432 t4_report_fw_error(adap); 432 t4_report_fw_error(adap);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 34dae51effd4..59da7ac3c108 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1863,7 +1863,6 @@ err_setup_mdio:
1863err_ioremap: 1863err_ioremap:
1864 release_resource(priv->res); 1864 release_resource(priv->res);
1865err_req_mem: 1865err_req_mem:
1866 netif_napi_del(&priv->napi);
1867 free_netdev(netdev); 1866 free_netdev(netdev);
1868err_alloc_etherdev: 1867err_alloc_etherdev:
1869 return err; 1868 return err;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 6e67d22fd0d5..1c7da16ad0ff 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
623 goto no_mem; 623 goto no_mem;
624 } 624 }
625 625
626 pdev->dev.of_node = node;
627 pdev->dev.parent = priv->dev;
626 set_dma_ops(&pdev->dev, get_dma_ops(priv->dev)); 628 set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
627 629
628 ret = platform_device_add_data(pdev, &data, sizeof(data)); 630 ret = platform_device_add_data(pdev, &data, sizeof(data));
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 48d21c1e09f2..4d598ca8503a 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -6504,7 +6504,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6504 struct resource *res; 6504 struct resource *res;
6505 const char *dt_mac_addr; 6505 const char *dt_mac_addr;
6506 const char *mac_from; 6506 const char *mac_from;
6507 char hw_mac_addr[ETH_ALEN]; 6507 char hw_mac_addr[ETH_ALEN] = {0};
6508 u32 id; 6508 u32 id;
6509 int features; 6509 int features;
6510 int phy_mode; 6510 int phy_mode;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0039b4725405..2f26fb34d741 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -263,6 +263,7 @@ struct mlx5e_dcbx {
263 263
264 /* The only setting that cannot be read from FW */ 264 /* The only setting that cannot be read from FW */
265 u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; 265 u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
266 u8 cap;
266}; 267};
267#endif 268#endif
268 269
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 2eb54d36e16e..c1d384fca4dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -288,13 +288,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
288static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev) 288static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
289{ 289{
290 struct mlx5e_priv *priv = netdev_priv(dev); 290 struct mlx5e_priv *priv = netdev_priv(dev);
291 struct mlx5e_dcbx *dcbx = &priv->dcbx;
292 u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE;
293
294 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
295 mode |= DCB_CAP_DCBX_HOST;
296 291
297 return mode; 292 return priv->dcbx.cap;
298} 293}
299 294
300static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) 295static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -312,6 +307,7 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
312 /* set dcbx to fw controlled */ 307 /* set dcbx to fw controlled */
313 if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) { 308 if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
314 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO; 309 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
310 dcbx->cap &= ~DCB_CAP_DCBX_HOST;
315 return 0; 311 return 0;
316 } 312 }
317 313
@@ -324,6 +320,8 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
324 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev))) 320 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
325 return 1; 321 return 1;
326 322
323 dcbx->cap = mode;
324
327 return 0; 325 return 0;
328} 326}
329 327
@@ -628,9 +626,9 @@ static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
628 *cap = false; 626 *cap = false;
629 break; 627 break;
630 case DCB_CAP_ATTR_DCBX: 628 case DCB_CAP_ATTR_DCBX:
631 *cap = (DCB_CAP_DCBX_LLD_MANAGED | 629 *cap = priv->dcbx.cap |
632 DCB_CAP_DCBX_VER_CEE | 630 DCB_CAP_DCBX_VER_CEE |
633 DCB_CAP_DCBX_STATIC); 631 DCB_CAP_DCBX_VER_IEEE;
634 break; 632 break;
635 default: 633 default:
636 *cap = 0; 634 *cap = 0;
@@ -754,8 +752,16 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
754{ 752{
755 struct mlx5e_dcbx *dcbx = &priv->dcbx; 753 struct mlx5e_dcbx *dcbx = &priv->dcbx;
756 754
755 if (!MLX5_CAP_GEN(priv->mdev, qos))
756 return;
757
757 if (MLX5_CAP_GEN(priv->mdev, dcbx)) 758 if (MLX5_CAP_GEN(priv->mdev, dcbx))
758 mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode); 759 mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
759 760
761 priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
762 DCB_CAP_DCBX_VER_IEEE;
763 if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
764 priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
765
760 mlx5e_ets_init(priv); 766 mlx5e_ets_init(priv);
761} 767}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 917fade5f5d5..f5594014715b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -641,8 +641,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
641 641
642 new_channels.params = priv->channels.params; 642 new_channels.params = priv->channels.params;
643 new_channels.params.num_channels = count; 643 new_channels.params.num_channels = count;
644 mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt, 644 if (!netif_is_rxfh_configured(priv->netdev))
645 MLX5E_INDIR_RQT_SIZE, count); 645 mlx5e_build_default_indir_rqt(priv->mdev,
646 new_channels.params.indirection_rqt,
647 MLX5E_INDIR_RQT_SIZE, count);
646 648
647 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 649 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
648 priv->channels.params = new_channels.params; 650 priv->channels.params = new_channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 57f31fa478ce..6ad7f07e7861 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1969,6 +1969,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1969 } 1969 }
1970 1970
1971 mlx5e_build_common_cq_param(priv, param); 1971 mlx5e_build_common_cq_param(priv, param);
1972 param->cq_period_mode = params->rx_cq_period_mode;
1972} 1973}
1973 1974
1974static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, 1975static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 325b2c8c1c6d..7344433259fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -222,13 +222,13 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
222 if (unlikely(!page)) 222 if (unlikely(!page))
223 return -ENOMEM; 223 return -ENOMEM;
224 224
225 dma_info->page = page;
226 dma_info->addr = dma_map_page(rq->pdev, page, 0, 225 dma_info->addr = dma_map_page(rq->pdev, page, 0,
227 RQ_PAGE_SIZE(rq), rq->buff.map_dir); 226 RQ_PAGE_SIZE(rq), rq->buff.map_dir);
228 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { 227 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
229 put_page(page); 228 put_page(page);
230 return -ENOMEM; 229 return -ENOMEM;
231 } 230 }
231 dma_info->page = page;
232 232
233 return 0; 233 return 0;
234} 234}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3c536f560dd2..7f282e8f4e7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1443,12 +1443,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1443 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1443 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1444 int ret; 1444 int ret;
1445 1445
1446 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6); 1446 ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
1447 ret = dst->error; 1447 fl6);
1448 if (ret) { 1448 if (ret < 0)
1449 dst_release(dst);
1450 return ret; 1449 return ret;
1451 }
1452 1450
1453 *out_ttl = ip6_dst_hoplimit(dst); 1451 *out_ttl = ip6_dst_hoplimit(dst);
1454 1452
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index aaa0f4ebba9a..31353e5c3c78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -128,10 +128,10 @@ static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
128 return mlx5e_skb_l2_header_offset(skb); 128 return mlx5e_skb_l2_header_offset(skb);
129} 129}
130 130
131static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, 131static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
132 struct sk_buff *skb) 132 struct sk_buff *skb)
133{ 133{
134 int hlen; 134 u16 hlen;
135 135
136 switch (mode) { 136 switch (mode) {
137 case MLX5_INLINE_MODE_NONE: 137 case MLX5_INLINE_MODE_NONE:
@@ -140,19 +140,22 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
140 hlen = eth_get_headlen(skb->data, skb_headlen(skb)); 140 hlen = eth_get_headlen(skb->data, skb_headlen(skb));
141 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) 141 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
142 hlen += VLAN_HLEN; 142 hlen += VLAN_HLEN;
143 return hlen; 143 break;
144 case MLX5_INLINE_MODE_IP: 144 case MLX5_INLINE_MODE_IP:
145 /* When transport header is set to zero, it means no transport 145 /* When transport header is set to zero, it means no transport
146 * header. When transport header is set to 0xff's, it means 146 * header. When transport header is set to 0xff's, it means
147 * transport header wasn't set. 147 * transport header wasn't set.
148 */ 148 */
149 if (skb_transport_offset(skb)) 149 if (skb_transport_offset(skb)) {
150 return mlx5e_skb_l3_header_offset(skb); 150 hlen = mlx5e_skb_l3_header_offset(skb);
151 break;
152 }
151 /* fall through */ 153 /* fall through */
152 case MLX5_INLINE_MODE_L2: 154 case MLX5_INLINE_MODE_L2:
153 default: 155 default:
154 return mlx5e_skb_l2_header_offset(skb); 156 hlen = mlx5e_skb_l2_header_offset(skb);
155 } 157 }
158 return min_t(u16, hlen, skb->len);
156} 159}
157 160
158static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, 161static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 95b64025ce36..5bc0593bd76e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -815,7 +815,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
815 struct mlx5_eswitch_rep *rep; 815 struct mlx5_eswitch_rep *rep;
816 int vport; 816 int vport;
817 817
818 for (vport = 0; vport < nvports; vport++) { 818 for (vport = nvports - 1; vport >= 0; vport--) {
819 rep = &esw->offloads.vport_reps[vport]; 819 rep = &esw->offloads.vport_reps[vport];
820 if (!rep->valid) 820 if (!rep->valid)
821 continue; 821 continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c065132b956d..16885827367b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1186,7 +1186,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1186 } 1186 }
1187 } 1187 }
1188 1188
1189 clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
1190 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1189 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1191out: 1190out:
1192 mutex_unlock(&dev->intf_state_mutex); 1191 mutex_unlock(&dev->intf_state_mutex);
@@ -1261,7 +1260,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1261 mlx5_drain_health_recovery(dev); 1260 mlx5_drain_health_recovery(dev);
1262 1261
1263 mutex_lock(&dev->intf_state_mutex); 1262 mutex_lock(&dev->intf_state_mutex);
1264 if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { 1263 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1265 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", 1264 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
1266 __func__); 1265 __func__);
1267 if (cleanup) 1266 if (cleanup)
@@ -1270,7 +1269,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1270 } 1269 }
1271 1270
1272 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1271 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1273 set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
1274 1272
1275 if (mlx5_device_registered(dev)) 1273 if (mlx5_device_registered(dev))
1276 mlx5_detach_device(dev); 1274 mlx5_detach_device(dev);
@@ -1565,8 +1563,6 @@ static void shutdown(struct pci_dev *pdev)
1565 int err; 1563 int err;
1566 1564
1567 dev_info(&pdev->dev, "Shutdown was called\n"); 1565 dev_info(&pdev->dev, "Shutdown was called\n");
1568 /* Notify mlx5 clients that the kernel is being shut down */
1569 set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
1570 err = mlx5_try_fast_unload(dev); 1566 err = mlx5_try_fast_unload(dev);
1571 if (err) 1567 if (err)
1572 mlx5_unload_one(dev, priv, false); 1568 mlx5_unload_one(dev, priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index f774de6f5fcb..520f6382dfde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -201,13 +201,13 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
201static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 201static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
202 u16 lwm, int is_srq) 202 u16 lwm, int is_srq)
203{ 203{
204 /* arm_srq structs missing using identical xrc ones */ 204 u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
205 u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; 205 u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
206 u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
207 206
208 MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); 207 MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
209 MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn); 208 MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
210 MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm); 209 MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
210 MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
211 211
212 return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), 212 return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
213 srq_out, sizeof(srq_out)); 213 srq_out, sizeof(srq_out));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 60bf8f27cc00..c6a3e61b53bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4139,6 +4139,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4139 return -EINVAL; 4139 return -EINVAL;
4140 if (!info->linking) 4140 if (!info->linking)
4141 break; 4141 break;
4142 if (netdev_has_any_upper_dev(upper_dev))
4143 return -EINVAL;
4142 if (netif_is_lag_master(upper_dev) && 4144 if (netif_is_lag_master(upper_dev) &&
4143 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4145 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4144 info->upper_info)) 4146 info->upper_info))
@@ -4258,6 +4260,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4258 upper_dev = info->upper_dev; 4260 upper_dev = info->upper_dev;
4259 if (!netif_is_bridge_master(upper_dev)) 4261 if (!netif_is_bridge_master(upper_dev))
4260 return -EINVAL; 4262 return -EINVAL;
4263 if (!info->linking)
4264 break;
4265 if (netdev_has_any_upper_dev(upper_dev))
4266 return -EINVAL;
4261 break; 4267 break;
4262 case NETDEV_CHANGEUPPER: 4268 case NETDEV_CHANGEUPPER:
4263 upper_dev = info->upper_dev; 4269 upper_dev = info->upper_dev;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 5eb1606765c5..d39ffbfcc436 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -705,6 +705,7 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
705 bool is_port_mc_router) 705 bool is_port_mc_router)
706{ 706{
707 struct mlxsw_sp_bridge_port *bridge_port; 707 struct mlxsw_sp_bridge_port *bridge_port;
708 int err;
708 709
709 if (switchdev_trans_ph_prepare(trans)) 710 if (switchdev_trans_ph_prepare(trans))
710 return 0; 711 return 0;
@@ -715,11 +716,17 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
715 return 0; 716 return 0;
716 717
717 if (!bridge_port->bridge_device->multicast_enabled) 718 if (!bridge_port->bridge_device->multicast_enabled)
718 return 0; 719 goto out;
719 720
720 return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 721 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
721 MLXSW_SP_FLOOD_TYPE_MC, 722 MLXSW_SP_FLOOD_TYPE_MC,
722 is_port_mc_router); 723 is_port_mc_router);
724 if (err)
725 return err;
726
727out:
728 bridge_port->mrouter = is_port_mc_router;
729 return 0;
723} 730}
724 731
725static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, 732static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 0e08404480ef..d25b5038c3a2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -42,33 +42,29 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
42 struct tc_cls_flower_offload *flow, u8 key_type, 42 struct tc_cls_flower_offload *flow, u8 key_type,
43 bool mask_version) 43 bool mask_version)
44{ 44{
45 struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
45 struct flow_dissector_key_vlan *flow_vlan; 46 struct flow_dissector_key_vlan *flow_vlan;
46 u16 tmp_tci; 47 u16 tmp_tci;
47 48
49 memset(frame, 0, sizeof(struct nfp_flower_meta_two));
48 /* Populate the metadata frame. */ 50 /* Populate the metadata frame. */
49 frame->nfp_flow_key_layer = key_type; 51 frame->nfp_flow_key_layer = key_type;
50 frame->mask_id = ~0; 52 frame->mask_id = ~0;
51 53
52 if (mask_version) { 54 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
53 frame->tci = cpu_to_be16(~0); 55 flow_vlan = skb_flow_dissector_target(flow->dissector,
54 return; 56 FLOW_DISSECTOR_KEY_VLAN,
55 } 57 target);
56 58 /* Populate the tci field. */
57 flow_vlan = skb_flow_dissector_target(flow->dissector, 59 if (flow_vlan->vlan_id) {
58 FLOW_DISSECTOR_KEY_VLAN, 60 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
59 flow->key); 61 flow_vlan->vlan_priority) |
60 62 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
61 /* Populate the tci field. */ 63 flow_vlan->vlan_id) |
62 if (!flow_vlan->vlan_id) { 64 NFP_FLOWER_MASK_VLAN_CFI;
63 tmp_tci = 0; 65 frame->tci = cpu_to_be16(tmp_tci);
64 } else { 66 }
65 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
66 flow_vlan->vlan_priority) |
67 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
68 flow_vlan->vlan_id) |
69 NFP_FLOWER_MASK_VLAN_CFI;
70 } 67 }
71 frame->tci = cpu_to_be16(tmp_tci);
72} 68}
73 69
74static void 70static void
@@ -99,17 +95,18 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
99 bool mask_version) 95 bool mask_version)
100{ 96{
101 struct fl_flow_key *target = mask_version ? flow->mask : flow->key; 97 struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
102 struct flow_dissector_key_eth_addrs *flow_mac; 98 struct flow_dissector_key_eth_addrs *addr;
103
104 flow_mac = skb_flow_dissector_target(flow->dissector,
105 FLOW_DISSECTOR_KEY_ETH_ADDRS,
106 target);
107 99
108 memset(frame, 0, sizeof(struct nfp_flower_mac_mpls)); 100 memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
109 101
110 /* Populate mac frame. */ 102 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
111 ether_addr_copy(frame->mac_dst, &flow_mac->dst[0]); 103 addr = skb_flow_dissector_target(flow->dissector,
112 ether_addr_copy(frame->mac_src, &flow_mac->src[0]); 104 FLOW_DISSECTOR_KEY_ETH_ADDRS,
105 target);
106 /* Populate mac frame. */
107 ether_addr_copy(frame->mac_dst, &addr->dst[0]);
108 ether_addr_copy(frame->mac_src, &addr->src[0]);
109 }
113 110
114 if (mask_version) 111 if (mask_version)
115 frame->mpls_lse = cpu_to_be32(~0); 112 frame->mpls_lse = cpu_to_be32(~0);
@@ -121,14 +118,17 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
121 bool mask_version) 118 bool mask_version)
122{ 119{
123 struct fl_flow_key *target = mask_version ? flow->mask : flow->key; 120 struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
124 struct flow_dissector_key_ports *flow_tp; 121 struct flow_dissector_key_ports *tp;
125 122
126 flow_tp = skb_flow_dissector_target(flow->dissector, 123 memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
127 FLOW_DISSECTOR_KEY_PORTS,
128 target);
129 124
130 frame->port_src = flow_tp->src; 125 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
131 frame->port_dst = flow_tp->dst; 126 tp = skb_flow_dissector_target(flow->dissector,
127 FLOW_DISSECTOR_KEY_PORTS,
128 target);
129 frame->port_src = tp->src;
130 frame->port_dst = tp->dst;
131 }
132} 132}
133 133
134static void 134static void
@@ -137,25 +137,27 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
137 bool mask_version) 137 bool mask_version)
138{ 138{
139 struct fl_flow_key *target = mask_version ? flow->mask : flow->key; 139 struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
140 struct flow_dissector_key_ipv4_addrs *flow_ipv4; 140 struct flow_dissector_key_ipv4_addrs *addr;
141 struct flow_dissector_key_basic *flow_basic; 141 struct flow_dissector_key_basic *basic;
142
143 flow_ipv4 = skb_flow_dissector_target(flow->dissector,
144 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
145 target);
146
147 flow_basic = skb_flow_dissector_target(flow->dissector,
148 FLOW_DISSECTOR_KEY_BASIC,
149 target);
150 142
151 /* Populate IPv4 frame. */
152 frame->reserved = 0;
153 frame->ipv4_src = flow_ipv4->src;
154 frame->ipv4_dst = flow_ipv4->dst;
155 frame->proto = flow_basic->ip_proto;
156 /* Wildcard TOS/TTL for now. */ 143 /* Wildcard TOS/TTL for now. */
157 frame->tos = 0; 144 memset(frame, 0, sizeof(struct nfp_flower_ipv4));
158 frame->ttl = 0; 145
146 if (dissector_uses_key(flow->dissector,
147 FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
148 addr = skb_flow_dissector_target(flow->dissector,
149 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
150 target);
151 frame->ipv4_src = addr->src;
152 frame->ipv4_dst = addr->dst;
153 }
154
155 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
156 basic = skb_flow_dissector_target(flow->dissector,
157 FLOW_DISSECTOR_KEY_BASIC,
158 target);
159 frame->proto = basic->ip_proto;
160 }
159} 161}
160 162
161static void 163static void
@@ -164,26 +166,27 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
164 bool mask_version) 166 bool mask_version)
165{ 167{
166 struct fl_flow_key *target = mask_version ? flow->mask : flow->key; 168 struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
167 struct flow_dissector_key_ipv6_addrs *flow_ipv6; 169 struct flow_dissector_key_ipv6_addrs *addr;
168 struct flow_dissector_key_basic *flow_basic; 170 struct flow_dissector_key_basic *basic;
169
170 flow_ipv6 = skb_flow_dissector_target(flow->dissector,
171 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
172 target);
173 171
174 flow_basic = skb_flow_dissector_target(flow->dissector,
175 FLOW_DISSECTOR_KEY_BASIC,
176 target);
177
178 /* Populate IPv6 frame. */
179 frame->reserved = 0;
180 frame->ipv6_src = flow_ipv6->src;
181 frame->ipv6_dst = flow_ipv6->dst;
182 frame->proto = flow_basic->ip_proto;
183 /* Wildcard LABEL/TOS/TTL for now. */ 172 /* Wildcard LABEL/TOS/TTL for now. */
184 frame->ipv6_flow_label_exthdr = 0; 173 memset(frame, 0, sizeof(struct nfp_flower_ipv6));
185 frame->tos = 0; 174
186 frame->ttl = 0; 175 if (dissector_uses_key(flow->dissector,
176 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
177 addr = skb_flow_dissector_target(flow->dissector,
178 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
179 target);
180 frame->ipv6_src = addr->src;
181 frame->ipv6_dst = addr->dst;
182 }
183
184 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
185 basic = skb_flow_dissector_target(flow->dissector,
186 FLOW_DISSECTOR_KEY_BASIC,
187 target);
188 frame->proto = basic->ip_proto;
189 }
187} 190}
188 191
189int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, 192int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 4ad10bd5e139..74a96d6bb05c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -105,43 +105,62 @@ static int
105nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, 105nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
106 struct tc_cls_flower_offload *flow) 106 struct tc_cls_flower_offload *flow)
107{ 107{
108 struct flow_dissector_key_control *mask_enc_ctl; 108 struct flow_dissector_key_basic *mask_basic = NULL;
109 struct flow_dissector_key_basic *mask_basic; 109 struct flow_dissector_key_basic *key_basic = NULL;
110 struct flow_dissector_key_basic *key_basic; 110 struct flow_dissector_key_ip *mask_ip = NULL;
111 u32 key_layer_two; 111 u32 key_layer_two;
112 u8 key_layer; 112 u8 key_layer;
113 int key_size; 113 int key_size;
114 114
115 mask_enc_ctl = skb_flow_dissector_target(flow->dissector, 115 if (dissector_uses_key(flow->dissector,
116 FLOW_DISSECTOR_KEY_ENC_CONTROL, 116 FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
117 flow->mask); 117 struct flow_dissector_key_control *mask_enc_ctl =
118 skb_flow_dissector_target(flow->dissector,
119 FLOW_DISSECTOR_KEY_ENC_CONTROL,
120 flow->mask);
121 /* We are expecting a tunnel. For now we ignore offloading. */
122 if (mask_enc_ctl->addr_type)
123 return -EOPNOTSUPP;
124 }
118 125
119 mask_basic = skb_flow_dissector_target(flow->dissector, 126 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
120 FLOW_DISSECTOR_KEY_BASIC, 127 mask_basic = skb_flow_dissector_target(flow->dissector,
121 flow->mask); 128 FLOW_DISSECTOR_KEY_BASIC,
129 flow->mask);
130
131 key_basic = skb_flow_dissector_target(flow->dissector,
132 FLOW_DISSECTOR_KEY_BASIC,
133 flow->key);
134 }
135
136 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
137 mask_ip = skb_flow_dissector_target(flow->dissector,
138 FLOW_DISSECTOR_KEY_IP,
139 flow->mask);
122 140
123 key_basic = skb_flow_dissector_target(flow->dissector,
124 FLOW_DISSECTOR_KEY_BASIC,
125 flow->key);
126 key_layer_two = 0; 141 key_layer_two = 0;
127 key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC; 142 key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
128 key_size = sizeof(struct nfp_flower_meta_one) + 143 key_size = sizeof(struct nfp_flower_meta_one) +
129 sizeof(struct nfp_flower_in_port) + 144 sizeof(struct nfp_flower_in_port) +
130 sizeof(struct nfp_flower_mac_mpls); 145 sizeof(struct nfp_flower_mac_mpls);
131 146
132 /* We are expecting a tunnel. For now we ignore offloading. */ 147 if (mask_basic && mask_basic->n_proto) {
133 if (mask_enc_ctl->addr_type)
134 return -EOPNOTSUPP;
135
136 if (mask_basic->n_proto) {
137 /* Ethernet type is present in the key. */ 148 /* Ethernet type is present in the key. */
138 switch (key_basic->n_proto) { 149 switch (key_basic->n_proto) {
139 case cpu_to_be16(ETH_P_IP): 150 case cpu_to_be16(ETH_P_IP):
151 if (mask_ip && mask_ip->tos)
152 return -EOPNOTSUPP;
153 if (mask_ip && mask_ip->ttl)
154 return -EOPNOTSUPP;
140 key_layer |= NFP_FLOWER_LAYER_IPV4; 155 key_layer |= NFP_FLOWER_LAYER_IPV4;
141 key_size += sizeof(struct nfp_flower_ipv4); 156 key_size += sizeof(struct nfp_flower_ipv4);
142 break; 157 break;
143 158
144 case cpu_to_be16(ETH_P_IPV6): 159 case cpu_to_be16(ETH_P_IPV6):
160 if (mask_ip && mask_ip->tos)
161 return -EOPNOTSUPP;
162 if (mask_ip && mask_ip->ttl)
163 return -EOPNOTSUPP;
145 key_layer |= NFP_FLOWER_LAYER_IPV6; 164 key_layer |= NFP_FLOWER_LAYER_IPV6;
146 key_size += sizeof(struct nfp_flower_ipv6); 165 key_size += sizeof(struct nfp_flower_ipv6);
147 break; 166 break;
@@ -152,6 +171,11 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
152 case cpu_to_be16(ETH_P_ARP): 171 case cpu_to_be16(ETH_P_ARP):
153 return -EOPNOTSUPP; 172 return -EOPNOTSUPP;
154 173
174 /* Currently we do not offload MPLS. */
175 case cpu_to_be16(ETH_P_MPLS_UC):
176 case cpu_to_be16(ETH_P_MPLS_MC):
177 return -EOPNOTSUPP;
178
155 /* Will be included in layer 2. */ 179 /* Will be included in layer 2. */
156 case cpu_to_be16(ETH_P_8021Q): 180 case cpu_to_be16(ETH_P_8021Q):
157 break; 181 break;
@@ -166,7 +190,7 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
166 } 190 }
167 } 191 }
168 192
169 if (mask_basic->ip_proto) { 193 if (mask_basic && mask_basic->ip_proto) {
170 /* Ethernet type is present in the key. */ 194 /* Ethernet type is present in the key. */
171 switch (key_basic->ip_proto) { 195 switch (key_basic->ip_proto) {
172 case IPPROTO_TCP: 196 case IPPROTO_TCP:
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index d67969d3e484..3f199db2002e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -98,21 +98,20 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
98 struct nfp_pf *pf = pci_get_drvdata(pdev); 98 struct nfp_pf *pf = pci_get_drvdata(pdev);
99 int err; 99 int err;
100 100
101 mutex_lock(&pf->lock);
102
103 if (num_vfs > pf->limit_vfs) { 101 if (num_vfs > pf->limit_vfs) {
104 nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n", 102 nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n",
105 pf->limit_vfs); 103 pf->limit_vfs);
106 err = -EINVAL; 104 return -EINVAL;
107 goto err_unlock;
108 } 105 }
109 106
110 err = pci_enable_sriov(pdev, num_vfs); 107 err = pci_enable_sriov(pdev, num_vfs);
111 if (err) { 108 if (err) {
112 dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err); 109 dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err);
113 goto err_unlock; 110 return err;
114 } 111 }
115 112
113 mutex_lock(&pf->lock);
114
116 err = nfp_app_sriov_enable(pf->app, num_vfs); 115 err = nfp_app_sriov_enable(pf->app, num_vfs);
117 if (err) { 116 if (err) {
118 dev_warn(&pdev->dev, 117 dev_warn(&pdev->dev,
@@ -129,9 +128,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
129 return num_vfs; 128 return num_vfs;
130 129
131err_sriov_disable: 130err_sriov_disable:
132 pci_disable_sriov(pdev);
133err_unlock:
134 mutex_unlock(&pf->lock); 131 mutex_unlock(&pf->lock);
132 pci_disable_sriov(pdev);
135 return err; 133 return err;
136#endif 134#endif
137 return 0; 135 return 0;
@@ -158,10 +156,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
158 156
159 pf->num_vfs = 0; 157 pf->num_vfs = 0;
160 158
159 mutex_unlock(&pf->lock);
160
161 pci_disable_sriov(pdev); 161 pci_disable_sriov(pdev);
162 dev_dbg(&pdev->dev, "Removed VFs.\n"); 162 dev_dbg(&pdev->dev, "Removed VFs.\n");
163
164 mutex_unlock(&pf->lock);
165#endif 163#endif
166 return 0; 164 return 0;
167} 165}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9f77ce038a4a..66a09e490cf5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -895,6 +895,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
895 895
896 netdev_tx_sent_queue(nd_q, txbuf->real_len); 896 netdev_tx_sent_queue(nd_q, txbuf->real_len);
897 897
898 skb_tx_timestamp(skb);
899
898 tx_ring->wr_p += nr_frags + 1; 900 tx_ring->wr_p += nr_frags + 1;
899 if (nfp_net_tx_ring_should_stop(tx_ring)) 901 if (nfp_net_tx_ring_should_stop(tx_ring))
900 nfp_net_tx_ring_stop(nd_q, tx_ring); 902 nfp_net_tx_ring_stop(nd_q, tx_ring);
@@ -903,8 +905,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
903 if (!skb->xmit_more || netif_xmit_stopped(nd_q)) 905 if (!skb->xmit_more || netif_xmit_stopped(nd_q))
904 nfp_net_tx_xmit_more_flush(tx_ring); 906 nfp_net_tx_xmit_more_flush(tx_ring);
905 907
906 skb_tx_timestamp(skb);
907
908 return NETDEV_TX_OK; 908 return NETDEV_TX_OK;
909 909
910err_unmap: 910err_unmap:
@@ -1751,6 +1751,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1751 continue; 1751 continue;
1752 } 1752 }
1753 1753
1754 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1755
1756 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1757
1754 if (likely(!meta.portid)) { 1758 if (likely(!meta.portid)) {
1755 netdev = dp->netdev; 1759 netdev = dp->netdev;
1756 } else { 1760 } else {
@@ -1759,16 +1763,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1759 nn = netdev_priv(dp->netdev); 1763 nn = netdev_priv(dp->netdev);
1760 netdev = nfp_app_repr_get(nn->app, meta.portid); 1764 netdev = nfp_app_repr_get(nn->app, meta.portid);
1761 if (unlikely(!netdev)) { 1765 if (unlikely(!netdev)) {
1762 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); 1766 nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
1763 continue; 1767 continue;
1764 } 1768 }
1765 nfp_repr_inc_rx_stats(netdev, pkt_len); 1769 nfp_repr_inc_rx_stats(netdev, pkt_len);
1766 } 1770 }
1767 1771
1768 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1769
1770 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1771
1772 skb_reserve(skb, pkt_off); 1772 skb_reserve(skb, pkt_off);
1773 skb_put(skb, pkt_len); 1773 skb_put(skb, pkt_len);
1774 1774
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 5797dbf2b507..34b985384d26 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -456,13 +456,9 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
456{ 456{
457 int err; 457 int err;
458 458
459 err = nfp_net_pf_app_start_ctrl(pf);
460 if (err)
461 return err;
462
463 err = nfp_app_start(pf->app, pf->ctrl_vnic); 459 err = nfp_app_start(pf->app, pf->ctrl_vnic);
464 if (err) 460 if (err)
465 goto err_ctrl_stop; 461 return err;
466 462
467 if (pf->num_vfs) { 463 if (pf->num_vfs) {
468 err = nfp_app_sriov_enable(pf->app, pf->num_vfs); 464 err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
@@ -474,8 +470,6 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
474 470
475err_app_stop: 471err_app_stop:
476 nfp_app_stop(pf->app); 472 nfp_app_stop(pf->app);
477err_ctrl_stop:
478 nfp_net_pf_app_stop_ctrl(pf);
479 return err; 473 return err;
480} 474}
481 475
@@ -484,7 +478,6 @@ static void nfp_net_pf_app_stop(struct nfp_pf *pf)
484 if (pf->num_vfs) 478 if (pf->num_vfs)
485 nfp_app_sriov_disable(pf->app); 479 nfp_app_sriov_disable(pf->app);
486 nfp_app_stop(pf->app); 480 nfp_app_stop(pf->app);
487 nfp_net_pf_app_stop_ctrl(pf);
488} 481}
489 482
490static void nfp_net_pci_unmap_mem(struct nfp_pf *pf) 483static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
@@ -559,7 +552,7 @@ err_unmap_ctrl:
559 552
560static void nfp_net_pci_remove_finish(struct nfp_pf *pf) 553static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
561{ 554{
562 nfp_net_pf_app_stop(pf); 555 nfp_net_pf_app_stop_ctrl(pf);
563 /* stop app first, to avoid double free of ctrl vNIC's ddir */ 556 /* stop app first, to avoid double free of ctrl vNIC's ddir */
564 nfp_net_debugfs_dir_clean(&pf->ddir); 557 nfp_net_debugfs_dir_clean(&pf->ddir);
565 558
@@ -690,6 +683,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
690{ 683{
691 struct nfp_net_fw_version fw_ver; 684 struct nfp_net_fw_version fw_ver;
692 u8 __iomem *ctrl_bar, *qc_bar; 685 u8 __iomem *ctrl_bar, *qc_bar;
686 struct nfp_net *nn;
693 int stride; 687 int stride;
694 int err; 688 int err;
695 689
@@ -766,7 +760,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
766 if (err) 760 if (err)
767 goto err_free_vnics; 761 goto err_free_vnics;
768 762
769 err = nfp_net_pf_app_start(pf); 763 err = nfp_net_pf_app_start_ctrl(pf);
770 if (err) 764 if (err)
771 goto err_free_irqs; 765 goto err_free_irqs;
772 766
@@ -774,12 +768,20 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
774 if (err) 768 if (err)
775 goto err_stop_app; 769 goto err_stop_app;
776 770
771 err = nfp_net_pf_app_start(pf);
772 if (err)
773 goto err_clean_vnics;
774
777 mutex_unlock(&pf->lock); 775 mutex_unlock(&pf->lock);
778 776
779 return 0; 777 return 0;
780 778
779err_clean_vnics:
780 list_for_each_entry(nn, &pf->vnics, vnic_list)
781 if (nfp_net_is_data_vnic(nn))
782 nfp_net_pf_clean_vnic(pf, nn);
781err_stop_app: 783err_stop_app:
782 nfp_net_pf_app_stop(pf); 784 nfp_net_pf_app_stop_ctrl(pf);
783err_free_irqs: 785err_free_irqs:
784 nfp_net_pf_free_irqs(pf); 786 nfp_net_pf_free_irqs(pf);
785err_free_vnics: 787err_free_vnics:
@@ -803,6 +805,8 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
803 if (list_empty(&pf->vnics)) 805 if (list_empty(&pf->vnics))
804 goto out; 806 goto out;
805 807
808 nfp_net_pf_app_stop(pf);
809
806 list_for_each_entry(nn, &pf->vnics, vnic_list) 810 list_for_each_entry(nn, &pf->vnics, vnic_list)
807 if (nfp_net_is_data_vnic(nn)) 811 if (nfp_net_is_data_vnic(nn))
808 nfp_net_pf_clean_vnic(pf, nn); 812 nfp_net_pf_clean_vnic(pf, nn);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 28ea0af89aef..e3223f2fe2ff 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
724 seg_hdr->cookie = MPI_COREDUMP_COOKIE; 724 seg_hdr->cookie = MPI_COREDUMP_COOKIE;
725 seg_hdr->segNum = seg_number; 725 seg_hdr->segNum = seg_number;
726 seg_hdr->segSize = seg_size; 726 seg_hdr->segSize = seg_size;
727 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); 727 strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
728} 728}
729 729
730/* 730/*
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index bd07a15d3b7c..e03fcf914690 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6863,8 +6863,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
6863 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, 6863 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
6864 tp->TxDescArray + entry); 6864 tp->TxDescArray + entry);
6865 if (skb) { 6865 if (skb) {
6866 tp->dev->stats.tx_dropped++; 6866 dev_consume_skb_any(skb);
6867 dev_kfree_skb_any(skb);
6868 tx_skb->skb = NULL; 6867 tx_skb->skb = NULL;
6869 } 6868 }
6870 } 6869 }
@@ -7319,7 +7318,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7319 tp->tx_stats.packets++; 7318 tp->tx_stats.packets++;
7320 tp->tx_stats.bytes += tx_skb->skb->len; 7319 tp->tx_stats.bytes += tx_skb->skb->len;
7321 u64_stats_update_end(&tp->tx_stats.syncp); 7320 u64_stats_update_end(&tp->tx_stats.syncp);
7322 dev_kfree_skb_any(tx_skb->skb); 7321 dev_consume_skb_any(tx_skb->skb);
7323 tx_skb->skb = NULL; 7322 tx_skb->skb = NULL;
7324 } 7323 }
7325 dirty_tx++; 7324 dirty_tx++;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 73427e29df2a..fbd00cb0cb7d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -47,6 +47,8 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev,
47 plat->mdio_bus_data = devm_kzalloc(&pdev->dev, 47 plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
48 sizeof(*plat->mdio_bus_data), 48 sizeof(*plat->mdio_bus_data),
49 GFP_KERNEL); 49 GFP_KERNEL);
50 if (!plat->mdio_bus_data)
51 return -ENOMEM;
50 52
51 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); 53 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
52 if (!dma_cfg) 54 if (!dma_cfg)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 17d4bbaeb65c..6e359572b9f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -269,7 +269,10 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
269 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); 269 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
270 ctrl |= val << reg_shift; 270 ctrl |= val << reg_shift;
271 271
272 if (dwmac->f2h_ptp_ref_clk) { 272 if (dwmac->f2h_ptp_ref_clk ||
273 phymode == PHY_INTERFACE_MODE_MII ||
274 phymode == PHY_INTERFACE_MODE_GMII ||
275 phymode == PHY_INTERFACE_MODE_SGMII) {
273 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); 276 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
274 regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, 277 regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
275 &module); 278 &module);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index fffd6d5fc907..39c2122a4f26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -979,14 +979,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
979} 979}
980 980
981static const struct of_device_id sun8i_dwmac_match[] = { 981static const struct of_device_id sun8i_dwmac_match[] = {
982 { .compatible = "allwinner,sun8i-h3-emac",
983 .data = &emac_variant_h3 },
984 { .compatible = "allwinner,sun8i-v3s-emac",
985 .data = &emac_variant_v3s },
986 { .compatible = "allwinner,sun8i-a83t-emac",
987 .data = &emac_variant_a83t },
988 { .compatible = "allwinner,sun50i-a64-emac",
989 .data = &emac_variant_a64 },
990 { } 982 { }
991}; 983};
992MODULE_DEVICE_TABLE(of, sun8i_dwmac_match); 984MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 56ba411421f0..38d1cc557c11 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -96,7 +96,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
96 if (of_machine_is_compatible("ti,dra7")) 96 if (of_machine_is_compatible("ti,dra7"))
97 return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr); 97 return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr);
98 98
99 dev_err(dev, "incompatible machine/device type for reading mac address\n"); 99 dev_info(dev, "incompatible machine/device type for reading mac address\n");
100 return -ENOENT; 100 return -ENOENT;
101} 101}
102EXPORT_SYMBOL_GPL(ti_cm_get_macid); 102EXPORT_SYMBOL_GPL(ti_cm_get_macid);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 0d78727f1a14..d91cbc6c3ca4 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1269,7 +1269,12 @@ static void netvsc_link_change(struct work_struct *w)
1269 bool notify = false, reschedule = false; 1269 bool notify = false, reschedule = false;
1270 unsigned long flags, next_reconfig, delay; 1270 unsigned long flags, next_reconfig, delay;
1271 1271
1272 rtnl_lock(); 1272 /* if changes are happening, comeback later */
1273 if (!rtnl_trylock()) {
1274 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1275 return;
1276 }
1277
1273 net_device = rtnl_dereference(ndev_ctx->nvdev); 1278 net_device = rtnl_dereference(ndev_ctx->nvdev);
1274 if (!net_device) 1279 if (!net_device)
1275 goto out_unlock; 1280 goto out_unlock;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 5e1ab1160856..98e4deaa3a6a 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3521,6 +3521,7 @@ module_init(macsec_init);
3521module_exit(macsec_exit); 3521module_exit(macsec_exit);
3522 3522
3523MODULE_ALIAS_RTNL_LINK("macsec"); 3523MODULE_ALIAS_RTNL_LINK("macsec");
3524MODULE_ALIAS_GENL_FAMILY("macsec");
3524 3525
3525MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 3526MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
3526MODULE_LICENSE("GPL v2"); 3527MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 5068c582d502..d0626bf5c540 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -749,9 +749,6 @@ void phy_stop_machine(struct phy_device *phydev)
749 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) 749 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
750 phydev->state = PHY_UP; 750 phydev->state = PHY_UP;
751 mutex_unlock(&phydev->lock); 751 mutex_unlock(&phydev->lock);
752
753 /* Now we can run the state machine synchronously */
754 phy_state_machine(&phydev->state_queue.work);
755} 752}
756 753
757/** 754/**
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1790f7fec125..2f742ae5b92e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -864,15 +864,17 @@ EXPORT_SYMBOL(phy_attached_info);
864#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)" 864#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)"
865void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) 865void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
866{ 866{
867 const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
868
867 if (!fmt) { 869 if (!fmt) {
868 dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n", 870 dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n",
869 phydev->drv->name, phydev_name(phydev), 871 drv_name, phydev_name(phydev),
870 phydev->irq); 872 phydev->irq);
871 } else { 873 } else {
872 va_list ap; 874 va_list ap;
873 875
874 dev_info(&phydev->mdio.dev, ATTACHED_FMT, 876 dev_info(&phydev->mdio.dev, ATTACHED_FMT,
875 phydev->drv->name, phydev_name(phydev), 877 drv_name, phydev_name(phydev),
876 phydev->irq); 878 phydev->irq);
877 879
878 va_start(ap, fmt); 880 va_start(ap, fmt);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 8f572b9f3625..9c80e80c5493 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1758,6 +1758,13 @@ static const struct usb_device_id cdc_devs[] = {
1758 .driver_info = (unsigned long)&wwan_noarp_info, 1758 .driver_info = (unsigned long)&wwan_noarp_info,
1759 }, 1759 },
1760 1760
1761 /* u-blox TOBY-L4 */
1762 { USB_DEVICE_AND_INTERFACE_INFO(0x1546, 0x1010,
1763 USB_CLASS_COMM,
1764 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
1765 .driver_info = (unsigned long)&wwan_info,
1766 },
1767
1761 /* Generic CDC-NCM devices */ 1768 /* Generic CDC-NCM devices */
1762 { USB_INTERFACE_INFO(USB_CLASS_COMM, 1769 { USB_INTERFACE_INFO(USB_CLASS_COMM,
1763 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), 1770 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 98f17b05c68b..b06169ea60dc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1058,7 +1058,7 @@ static void free_old_xmit_skbs(struct send_queue *sq)
1058 bytes += skb->len; 1058 bytes += skb->len;
1059 packets++; 1059 packets++;
1060 1060
1061 dev_kfree_skb_any(skb); 1061 dev_consume_skb_any(skb);
1062 } 1062 }
1063 1063
1064 /* Avoid overhead when no packets have been processed 1064 /* Avoid overhead when no packets have been processed
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index fa315d84e98e..a1ea9ef97ed9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -787,6 +787,8 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
787 787
788void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); 788void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
789 789
790void iwl_pcie_rx_allocator_work(struct work_struct *data);
791
790/* common functions that are used by gen2 transport */ 792/* common functions that are used by gen2 transport */
791void iwl_pcie_apm_config(struct iwl_trans *trans); 793void iwl_pcie_apm_config(struct iwl_trans *trans);
792int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); 794int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 351c4423125a..942736d3fa75 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -597,7 +597,7 @@ static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
597 rxq->free_count += RX_CLAIM_REQ_ALLOC; 597 rxq->free_count += RX_CLAIM_REQ_ALLOC;
598} 598}
599 599
600static void iwl_pcie_rx_allocator_work(struct work_struct *data) 600void iwl_pcie_rx_allocator_work(struct work_struct *data)
601{ 601{
602 struct iwl_rb_allocator *rba_p = 602 struct iwl_rb_allocator *rba_p =
603 container_of(data, struct iwl_rb_allocator, rx_alloc); 603 container_of(data, struct iwl_rb_allocator, rx_alloc);
@@ -900,10 +900,6 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
900 return err; 900 return err;
901 } 901 }
902 def_rxq = trans_pcie->rxq; 902 def_rxq = trans_pcie->rxq;
903 if (!rba->alloc_wq)
904 rba->alloc_wq = alloc_workqueue("rb_allocator",
905 WQ_HIGHPRI | WQ_UNBOUND, 1);
906 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
907 903
908 spin_lock(&rba->lock); 904 spin_lock(&rba->lock);
909 atomic_set(&rba->req_pending, 0); 905 atomic_set(&rba->req_pending, 0);
@@ -1017,10 +1013,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
1017 } 1013 }
1018 1014
1019 cancel_work_sync(&rba->rx_alloc); 1015 cancel_work_sync(&rba->rx_alloc);
1020 if (rba->alloc_wq) {
1021 destroy_workqueue(rba->alloc_wq);
1022 rba->alloc_wq = NULL;
1023 }
1024 1016
1025 iwl_pcie_free_rbs_pool(trans); 1017 iwl_pcie_free_rbs_pool(trans);
1026 1018
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f95eec52508e..3927bbf04f72 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1786,6 +1786,11 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1786 iwl_pcie_tx_free(trans); 1786 iwl_pcie_tx_free(trans);
1787 iwl_pcie_rx_free(trans); 1787 iwl_pcie_rx_free(trans);
1788 1788
1789 if (trans_pcie->rba.alloc_wq) {
1790 destroy_workqueue(trans_pcie->rba.alloc_wq);
1791 trans_pcie->rba.alloc_wq = NULL;
1792 }
1793
1789 if (trans_pcie->msix_enabled) { 1794 if (trans_pcie->msix_enabled) {
1790 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 1795 for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1791 irq_set_affinity_hint( 1796 irq_set_affinity_hint(
@@ -3169,6 +3174,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3169 trans_pcie->inta_mask = CSR_INI_SET_MASK; 3174 trans_pcie->inta_mask = CSR_INI_SET_MASK;
3170 } 3175 }
3171 3176
3177 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3178 WQ_HIGHPRI | WQ_UNBOUND, 1);
3179 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3180
3172#ifdef CONFIG_IWLWIFI_PCIE_RTPM 3181#ifdef CONFIG_IWLWIFI_PCIE_RTPM
3173 trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; 3182 trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
3174#else 3183#else
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 08f0477f78d9..9915d83a4a30 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1571,6 +1571,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
1571 1571
1572 wl->state = WL1251_STATE_OFF; 1572 wl->state = WL1251_STATE_OFF;
1573 mutex_init(&wl->mutex); 1573 mutex_init(&wl->mutex);
1574 spin_lock_init(&wl->wl_lock);
1574 1575
1575 wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE; 1576 wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
1576 wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE; 1577 wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;