aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/b53/b53_regs.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c5
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c66
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c35
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c23
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c223
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c95
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h24
-rw-r--r--drivers/net/hyperv/netvsc.c19
-rw-r--r--drivers/net/hyperv/netvsc_drv.c105
-rw-r--r--drivers/net/macsec.c52
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/macvtap.c5
-rw-r--r--drivers/net/phy/micrel.c27
-rw-r--r--drivers/net/vxlan.c34
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c5
43 files changed, 662 insertions, 397 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1f276fa30ba6..217e8da0628c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -152,7 +152,7 @@ module_param(lacp_rate, charp, 0);
152MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " 152MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
153 "0 for slow, 1 for fast"); 153 "0 for slow, 1 for fast");
154module_param(ad_select, charp, 0); 154module_param(ad_select, charp, 0);
155MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; " 155MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
156 "0 for stable (default), 1 for bandwidth, " 156 "0 for stable (default), 1 for bandwidth, "
157 "2 for count"); 157 "2 for count");
158module_param(min_links, int, 0); 158module_param(min_links, int, 0);
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index 8f12bddd5dc9..a0b453ea34c9 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -258,7 +258,7 @@
258 * BCM5325 and BCM5365 share most definitions below 258 * BCM5325 and BCM5365 share most definitions below
259 */ 259 */
260#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) 260#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
261#define ARLTBL_MAC_MASK 0xffffffffffff 261#define ARLTBL_MAC_MASK 0xffffffffffffULL
262#define ARLTBL_VID_S 48 262#define ARLTBL_VID_S 48
263#define ARLTBL_VID_MASK_25 0xff 263#define ARLTBL_VID_MASK_25 0xff
264#define ARLTBL_VID_MASK 0xfff 264#define ARLTBL_VID_MASK 0xfff
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index d36aedde8cb9..d1d9d3cf9139 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3187,6 +3187,7 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
3187 return err; 3187 return err;
3188} 3188}
3189 3189
3190#ifdef CONFIG_NET_DSA_HWMON
3190static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, 3191static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page,
3191 int reg) 3192 int reg)
3192{ 3193{
@@ -3212,6 +3213,7 @@ static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page,
3212 3213
3213 return ret; 3214 return ret;
3214} 3215}
3216#endif
3215 3217
3216static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) 3218static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port)
3217{ 3219{
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 37a0f463b8de..18bb9556dd00 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -793,6 +793,8 @@ int xgene_enet_phy_connect(struct net_device *ndev)
793 netdev_err(ndev, "Could not connect to PHY\n"); 793 netdev_err(ndev, "Could not connect to PHY\n");
794 return -ENODEV; 794 return -ENODEV;
795 } 795 }
796#else
797 return -ENODEV;
796#endif 798#endif
797 } 799 }
798 800
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 4bff0f3040df..b0da9693f28a 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -771,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface)
771 priv->dev = dev; 771 priv->dev = dev;
772 772
773 priv->regs = devm_ioremap_resource(dev, &res_regs); 773 priv->regs = devm_ioremap_resource(dev, &res_regs);
774 if (IS_ERR(priv->regs)) 774 if (IS_ERR(priv->regs)) {
775 return PTR_ERR(priv->regs); 775 err = PTR_ERR(priv->regs);
776 goto out_put_node;
777 }
776 778
777 dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); 779 dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
778 780
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ff300f7cf529..659261218d9f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12552,10 +12552,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12552 info->data = TG3_RSS_MAX_NUM_QS; 12552 info->data = TG3_RSS_MAX_NUM_QS;
12553 } 12553 }
12554 12554
12555 /* The first interrupt vector only
12556 * handles link interrupts.
12557 */
12558 info->data -= 1;
12559 return 0; 12555 return 0;
12560 12556
12561 default: 12557 default:
@@ -14014,6 +14010,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14014 } 14010 }
14015 14011
14016 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14012 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14013 (!ec->rx_coalesce_usecs) ||
14017 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14014 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14018 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14015 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14019 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14016 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 36893d8958d4..b6fcf10621b6 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -403,11 +403,11 @@
403#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 403#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004
404#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 404#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
405#define MACB_CAPS_USRIO_DISABLED 0x00000010 405#define MACB_CAPS_USRIO_DISABLED 0x00000010
406#define MACB_CAPS_JUMBO 0x00000020
406#define MACB_CAPS_FIFO_MODE 0x10000000 407#define MACB_CAPS_FIFO_MODE 0x10000000
407#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 408#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
408#define MACB_CAPS_SG_DISABLED 0x40000000 409#define MACB_CAPS_SG_DISABLED 0x40000000
409#define MACB_CAPS_MACB_IS_GEM 0x80000000 410#define MACB_CAPS_MACB_IS_GEM 0x80000000
410#define MACB_CAPS_JUMBO 0x00000010
411 411
412/* Bit manipulation macros */ 412/* Bit manipulation macros */
413#define MACB_BIT(name) \ 413#define MACB_BIT(name) \
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 1471e16ba719..f45385f5c6e5 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1299,6 +1299,7 @@ static int
1299dm9000_open(struct net_device *dev) 1299dm9000_open(struct net_device *dev)
1300{ 1300{
1301 struct board_info *db = netdev_priv(dev); 1301 struct board_info *db = netdev_priv(dev);
1302 unsigned int irq_flags = irq_get_trigger_type(dev->irq);
1302 1303
1303 if (netif_msg_ifup(db)) 1304 if (netif_msg_ifup(db))
1304 dev_dbg(db->dev, "enabling %s\n", dev->name); 1305 dev_dbg(db->dev, "enabling %s\n", dev->name);
@@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev)
1306 /* If there is no IRQ type specified, tell the user that this is a 1307 /* If there is no IRQ type specified, tell the user that this is a
1307 * problem 1308 * problem
1308 */ 1309 */
1309 if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) 1310 if (irq_flags == IRQF_TRIGGER_NONE)
1310 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1311 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1311 1312
1313 irq_flags |= IRQF_SHARED;
1314
1312 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1315 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1313 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1316 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1314 mdelay(1); /* delay needs by DM9000B */ 1317 mdelay(1); /* delay needs by DM9000B */
@@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev)
1316 /* Initialize DM9000 board */ 1319 /* Initialize DM9000 board */
1317 dm9000_init_dm9000(dev); 1320 dm9000_init_dm9000(dev);
1318 1321
1319 if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, 1322 if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
1320 dev->name, dev))
1321 return -EAGAIN; 1323 return -EAGAIN;
1322 /* Now that we have an interrupt handler hooked up we can unmask 1324 /* Now that we have an interrupt handler hooked up we can unmask
1323 * our interrupts 1325 * our interrupts
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 1235c7f2564b..1e1eb92998fb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = {
17 {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, 17 {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
18 {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, 18 {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
19 {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, 19 {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
20 {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, 20 {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
21 {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, 21 {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
22 {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, 22 {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
23 {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, 23 {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 7fd4d54599e4..6b03c8553e59 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = {
2032 | FLAG2_DISABLE_ASPM_L0S 2032 | FLAG2_DISABLE_ASPM_L0S
2033 | FLAG2_DISABLE_ASPM_L1 2033 | FLAG2_DISABLE_ASPM_L1
2034 | FLAG2_NO_DISABLE_RX 2034 | FLAG2_NO_DISABLE_RX
2035 | FLAG2_DMA_BURST, 2035 | FLAG2_DMA_BURST
2036 | FLAG2_CHECK_SYSTIM_OVERFLOW,
2036 .pba = 32, 2037 .pba = 32,
2037 .max_hw_frame_size = DEFAULT_JUMBO, 2038 .max_hw_frame_size = DEFAULT_JUMBO,
2038 .get_variants = e1000_get_variants_82571, 2039 .get_variants = e1000_get_variants_82571,
@@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = {
2053 | FLAG_HAS_CTRLEXT_ON_LOAD, 2054 | FLAG_HAS_CTRLEXT_ON_LOAD,
2054 .flags2 = FLAG2_DISABLE_ASPM_L0S 2055 .flags2 = FLAG2_DISABLE_ASPM_L0S
2055 | FLAG2_DISABLE_ASPM_L1 2056 | FLAG2_DISABLE_ASPM_L1
2056 | FLAG2_NO_DISABLE_RX, 2057 | FLAG2_NO_DISABLE_RX
2058 | FLAG2_CHECK_SYSTIM_OVERFLOW,
2057 .pba = 32, 2059 .pba = 32,
2058 .max_hw_frame_size = DEFAULT_JUMBO, 2060 .max_hw_frame_size = DEFAULT_JUMBO,
2059 .get_variants = e1000_get_variants_82571, 2061 .get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ef96cd11d6d2..879cca47b021 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
452#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) 452#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11)
453#define FLAG2_DFLT_CRC_STRIPPING BIT(12) 453#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
454#define FLAG2_CHECK_RX_HWTSTAMP BIT(13) 454#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
455#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
455 456
456#define E1000_RX_DESC_PS(R, i) \ 457#define E1000_RX_DESC_PS(R, i) \
457 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 458 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 3e11322d8d58..f3aaca743ea3 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = {
5885 | FLAG_HAS_JUMBO_FRAMES 5885 | FLAG_HAS_JUMBO_FRAMES
5886 | FLAG_APME_IN_WUC, 5886 | FLAG_APME_IN_WUC,
5887 .flags2 = FLAG2_HAS_PHY_STATS 5887 .flags2 = FLAG2_HAS_PHY_STATS
5888 | FLAG2_HAS_EEE, 5888 | FLAG2_HAS_EEE
5889 | FLAG2_CHECK_SYSTIM_OVERFLOW,
5889 .pba = 26, 5890 .pba = 26,
5890 .max_hw_frame_size = 9022, 5891 .max_hw_frame_size = 9022,
5891 .get_variants = e1000_get_variants_ich8lan, 5892 .get_variants = e1000_get_variants_ich8lan,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 02f443958f31..7017281ba2dc 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4303,6 +4303,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
4303} 4303}
4304 4304
4305/** 4305/**
4306 * e1000e_sanitize_systim - sanitize raw cycle counter reads
4307 * @hw: pointer to the HW structure
4308 * @systim: cycle_t value read, sanitized and returned
4309 *
4310 * Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
4311 * check to see that the time is incrementing at a reasonable
4312 * rate and is a multiple of incvalue.
4313 **/
4314static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim)
4315{
4316 u64 time_delta, rem, temp;
4317 cycle_t systim_next;
4318 u32 incvalue;
4319 int i;
4320
4321 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4322 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4323 /* latch SYSTIMH on read of SYSTIML */
4324 systim_next = (cycle_t)er32(SYSTIML);
4325 systim_next |= (cycle_t)er32(SYSTIMH) << 32;
4326
4327 time_delta = systim_next - systim;
4328 temp = time_delta;
4329 /* VMWare users have seen incvalue of zero, don't div / 0 */
4330 rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
4331
4332 systim = systim_next;
4333
4334 if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0))
4335 break;
4336 }
4337
4338 return systim;
4339}
4340
4341/**
4306 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) 4342 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4307 * @cc: cyclecounter structure 4343 * @cc: cyclecounter structure
4308 **/ 4344 **/
@@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4312 cc); 4348 cc);
4313 struct e1000_hw *hw = &adapter->hw; 4349 struct e1000_hw *hw = &adapter->hw;
4314 u32 systimel, systimeh; 4350 u32 systimel, systimeh;
4315 cycle_t systim, systim_next; 4351 cycle_t systim;
4316 /* SYSTIMH latching upon SYSTIML read does not work well. 4352 /* SYSTIMH latching upon SYSTIML read does not work well.
4317 * This means that if SYSTIML overflows after we read it but before 4353 * This means that if SYSTIML overflows after we read it but before
4318 * we read SYSTIMH, the value of SYSTIMH has been incremented and we 4354 * we read SYSTIMH, the value of SYSTIMH has been incremented and we
@@ -4335,33 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4335 systim = (cycle_t)systimel; 4371 systim = (cycle_t)systimel;
4336 systim |= (cycle_t)systimeh << 32; 4372 systim |= (cycle_t)systimeh << 32;
4337 4373
4338 if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { 4374 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
4339 u64 time_delta, rem, temp; 4375 systim = e1000e_sanitize_systim(hw, systim);
4340 u32 incvalue;
4341 int i;
4342
4343 /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
4344 * check to see that the time is incrementing at a reasonable
4345 * rate and is a multiple of incvalue
4346 */
4347 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4348 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4349 /* latch SYSTIMH on read of SYSTIML */
4350 systim_next = (cycle_t)er32(SYSTIML);
4351 systim_next |= (cycle_t)er32(SYSTIMH) << 32;
4352
4353 time_delta = systim_next - systim;
4354 temp = time_delta;
4355 /* VMWare users have seen incvalue of zero, don't div / 0 */
4356 rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
4357
4358 systim = systim_next;
4359 4376
4360 if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
4361 (rem == 0))
4362 break;
4363 }
4364 }
4365 return systim; 4377 return systim;
4366} 4378}
4367 4379
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 81c99e1be708..c6ac7a61812f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4554,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4554 **/ 4554 **/
4555static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 4555static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4556{ 4556{
4557 int i, tc_unused = 0;
4557 u8 num_tc = 0; 4558 u8 num_tc = 0;
4558 int i; 4559 u8 ret = 0;
4559 4560
4560 /* Scan the ETS Config Priority Table to find 4561 /* Scan the ETS Config Priority Table to find
4561 * traffic class enabled for a given priority 4562 * traffic class enabled for a given priority
4562 * and use the traffic class index to get the 4563 * and create a bitmask of enabled TCs
4563 * number of traffic classes enabled
4564 */ 4564 */
4565 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4565 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4566 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 4566 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4567 num_tc = dcbcfg->etscfg.prioritytable[i];
4568 }
4569 4567
4570 /* Traffic class index starts from zero so 4568 /* Now scan the bitmask to check for
4571 * increment to return the actual count 4569 * contiguous TCs starting with TC0
4572 */ 4570 */
4573 return num_tc + 1; 4571 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4572 if (num_tc & BIT(i)) {
4573 if (!tc_unused) {
4574 ret++;
4575 } else {
4576 pr_err("Non-contiguous TC - Disabling DCB\n");
4577 return 1;
4578 }
4579 } else {
4580 tc_unused = 1;
4581 }
4582 }
4583
4584 /* There is always at least TC0 */
4585 if (!ret)
4586 ret = 1;
4587
4588 return ret;
4574} 4589}
4575 4590
4576/** 4591/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index e61b647f5f2a..336c103ae374 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -744,7 +744,8 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
744 } 744 }
745 } 745 }
746 746
747 shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust); 747 shhwtstamps.hwtstamp =
748 ktime_add_ns(shhwtstamps.hwtstamp, adjust);
748 749
749 skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); 750 skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
750 dev_kfree_skb_any(adapter->ptp_tx_skb); 751 dev_kfree_skb_any(adapter->ptp_tx_skb);
@@ -767,13 +768,32 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
767 struct sk_buff *skb) 768 struct sk_buff *skb)
768{ 769{
769 __le64 *regval = (__le64 *)va; 770 __le64 *regval = (__le64 *)va;
771 struct igb_adapter *adapter = q_vector->adapter;
772 int adjust = 0;
770 773
771 /* The timestamp is recorded in little endian format. 774 /* The timestamp is recorded in little endian format.
772 * DWORD: 0 1 2 3 775 * DWORD: 0 1 2 3
773 * Field: Reserved Reserved SYSTIML SYSTIMH 776 * Field: Reserved Reserved SYSTIML SYSTIMH
774 */ 777 */
775 igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), 778 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
776 le64_to_cpu(regval[1])); 779 le64_to_cpu(regval[1]));
780
781 /* adjust timestamp for the RX latency based on link speed */
782 if (adapter->hw.mac.type == e1000_i210) {
783 switch (adapter->link_speed) {
784 case SPEED_10:
785 adjust = IGB_I210_RX_LATENCY_10;
786 break;
787 case SPEED_100:
788 adjust = IGB_I210_RX_LATENCY_100;
789 break;
790 case SPEED_1000:
791 adjust = IGB_I210_RX_LATENCY_1000;
792 break;
793 }
794 }
795 skb_hwtstamps(skb)->hwtstamp =
796 ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
777} 797}
778 798
779/** 799/**
@@ -825,7 +845,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
825 } 845 }
826 } 846 }
827 skb_hwtstamps(skb)->hwtstamp = 847 skb_hwtstamps(skb)->hwtstamp =
828 ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust); 848 ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
829 849
830 /* Update the last_rx_timestamp timer in order to enable watchdog check 850 /* Update the last_rx_timestamp timer in order to enable watchdog check
831 * for error case of latched timestamp on a dropped packet. 851 * for error case of latched timestamp on a dropped packet.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5418c69a7463..b4f03748adc0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4100,6 +4100,8 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4100 struct ixgbe_hw *hw = &adapter->hw; 4100 struct ixgbe_hw *hw = &adapter->hw;
4101 u32 vlnctrl, i; 4101 u32 vlnctrl, i;
4102 4102
4103 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4104
4103 switch (hw->mac.type) { 4105 switch (hw->mac.type) {
4104 case ixgbe_mac_82599EB: 4106 case ixgbe_mac_82599EB:
4105 case ixgbe_mac_X540: 4107 case ixgbe_mac_X540:
@@ -4112,8 +4114,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4112 /* fall through */ 4114 /* fall through */
4113 case ixgbe_mac_82598EB: 4115 case ixgbe_mac_82598EB:
4114 /* legacy case, we can just disable VLAN filtering */ 4116 /* legacy case, we can just disable VLAN filtering */
4115 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 4117 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4116 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
4117 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 4118 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4118 return; 4119 return;
4119 } 4120 }
@@ -4125,6 +4126,10 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4125 /* Set flag so we don't redo unnecessary work */ 4126 /* Set flag so we don't redo unnecessary work */
4126 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; 4127 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4127 4128
4129 /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
4130 vlnctrl |= IXGBE_VLNCTRL_VFE;
4131 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4132
4128 /* Add PF to all active pools */ 4133 /* Add PF to all active pools */
4129 for (i = IXGBE_VLVF_ENTRIES; --i;) { 4134 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4130 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); 4135 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
@@ -4191,6 +4196,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4191 struct ixgbe_hw *hw = &adapter->hw; 4196 struct ixgbe_hw *hw = &adapter->hw;
4192 u32 vlnctrl, i; 4197 u32 vlnctrl, i;
4193 4198
4199 /* Set VLAN filtering to enabled */
4200 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4201 vlnctrl |= IXGBE_VLNCTRL_VFE;
4202 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4203
4194 switch (hw->mac.type) { 4204 switch (hw->mac.type) {
4195 case ixgbe_mac_82599EB: 4205 case ixgbe_mac_82599EB:
4196 case ixgbe_mac_X540: 4206 case ixgbe_mac_X540:
@@ -4202,10 +4212,6 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4202 break; 4212 break;
4203 /* fall through */ 4213 /* fall through */
4204 case ixgbe_mac_82598EB: 4214 case ixgbe_mac_82598EB:
4205 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4206 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
4207 vlnctrl |= IXGBE_VLNCTRL_VFE;
4208 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4209 return; 4215 return;
4210 } 4216 }
4211 4217
@@ -8390,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
8390 struct tcf_exts *exts, u64 *action, u8 *queue) 8396 struct tcf_exts *exts, u64 *action, u8 *queue)
8391{ 8397{
8392 const struct tc_action *a; 8398 const struct tc_action *a;
8399 LIST_HEAD(actions);
8393 int err; 8400 int err;
8394 8401
8395 if (tc_no_actions(exts)) 8402 if (tc_no_actions(exts))
8396 return -EINVAL; 8403 return -EINVAL;
8397 8404
8398 tc_for_each_action(a, exts) { 8405 tcf_exts_to_list(exts, &actions);
8406 list_for_each_entry(a, &actions, list) {
8399 8407
8400 /* Drop action */ 8408 /* Drop action */
8401 if (is_tcf_gact_shot(a)) { 8409 if (is_tcf_gact_shot(a)) {
@@ -9517,6 +9525,7 @@ skip_sriov:
9517 9525
9518 /* copy netdev features into list of user selectable features */ 9526 /* copy netdev features into list of user selectable features */
9519 netdev->hw_features |= netdev->features | 9527 netdev->hw_features |= netdev->features |
9528 NETIF_F_HW_VLAN_CTAG_FILTER |
9520 NETIF_F_HW_VLAN_CTAG_RX | 9529 NETIF_F_HW_VLAN_CTAG_RX |
9521 NETIF_F_HW_VLAN_CTAG_TX | 9530 NETIF_F_HW_VLAN_CTAG_TX |
9522 NETIF_F_RXALL | 9531 NETIF_F_RXALL |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b57ae3afb994..f1609542adf1 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -245,12 +245,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
245 case PHY_INTERFACE_MODE_MII: 245 case PHY_INTERFACE_MODE_MII:
246 ge_mode = 1; 246 ge_mode = 1;
247 break; 247 break;
248 case PHY_INTERFACE_MODE_RMII: 248 case PHY_INTERFACE_MODE_REVMII:
249 ge_mode = 2; 249 ge_mode = 2;
250 break; 250 break;
251 case PHY_INTERFACE_MODE_RMII:
252 if (!mac->id)
253 goto err_phy;
254 ge_mode = 3;
255 break;
251 default: 256 default:
252 dev_err(eth->dev, "invalid phy_mode\n"); 257 goto err_phy;
253 return -1;
254 } 258 }
255 259
256 /* put the gmac into the right mode */ 260 /* put the gmac into the right mode */
@@ -263,13 +267,25 @@ static int mtk_phy_connect(struct mtk_mac *mac)
263 mac->phy_dev->autoneg = AUTONEG_ENABLE; 267 mac->phy_dev->autoneg = AUTONEG_ENABLE;
264 mac->phy_dev->speed = 0; 268 mac->phy_dev->speed = 0;
265 mac->phy_dev->duplex = 0; 269 mac->phy_dev->duplex = 0;
270
271 if (of_phy_is_fixed_link(mac->of_node))
272 mac->phy_dev->supported |=
273 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
274
266 mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | 275 mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
267 SUPPORTED_Asym_Pause; 276 SUPPORTED_Asym_Pause;
268 mac->phy_dev->advertising = mac->phy_dev->supported | 277 mac->phy_dev->advertising = mac->phy_dev->supported |
269 ADVERTISED_Autoneg; 278 ADVERTISED_Autoneg;
270 phy_start_aneg(mac->phy_dev); 279 phy_start_aneg(mac->phy_dev);
271 280
281 of_node_put(np);
282
272 return 0; 283 return 0;
284
285err_phy:
286 of_node_put(np);
287 dev_err(eth->dev, "invalid phy_mode\n");
288 return -EINVAL;
273} 289}
274 290
275static int mtk_mdio_init(struct mtk_eth *eth) 291static int mtk_mdio_init(struct mtk_eth *eth)
@@ -542,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
542 return &ring->buf[idx]; 558 return &ring->buf[idx];
543} 559}
544 560
545static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) 561static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
546{ 562{
547 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { 563 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
548 dma_unmap_single(dev, 564 dma_unmap_single(eth->dev,
549 dma_unmap_addr(tx_buf, dma_addr0), 565 dma_unmap_addr(tx_buf, dma_addr0),
550 dma_unmap_len(tx_buf, dma_len0), 566 dma_unmap_len(tx_buf, dma_len0),
551 DMA_TO_DEVICE); 567 DMA_TO_DEVICE);
552 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { 568 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
553 dma_unmap_page(dev, 569 dma_unmap_page(eth->dev,
554 dma_unmap_addr(tx_buf, dma_addr0), 570 dma_unmap_addr(tx_buf, dma_addr0),
555 dma_unmap_len(tx_buf, dma_len0), 571 dma_unmap_len(tx_buf, dma_len0),
556 DMA_TO_DEVICE); 572 DMA_TO_DEVICE);
@@ -595,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
595 if (skb_vlan_tag_present(skb)) 611 if (skb_vlan_tag_present(skb))
596 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); 612 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
597 613
598 mapped_addr = dma_map_single(&dev->dev, skb->data, 614 mapped_addr = dma_map_single(eth->dev, skb->data,
599 skb_headlen(skb), DMA_TO_DEVICE); 615 skb_headlen(skb), DMA_TO_DEVICE);
600 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) 616 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
601 return -ENOMEM; 617 return -ENOMEM;
602 618
603 WRITE_ONCE(itxd->txd1, mapped_addr); 619 WRITE_ONCE(itxd->txd1, mapped_addr);
@@ -623,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
623 639
624 n_desc++; 640 n_desc++;
625 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); 641 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
626 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, 642 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
627 frag_map_size, 643 frag_map_size,
628 DMA_TO_DEVICE); 644 DMA_TO_DEVICE);
629 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) 645 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
630 goto err_dma; 646 goto err_dma;
631 647
632 if (i == nr_frags - 1 && 648 if (i == nr_frags - 1 &&
@@ -679,7 +695,7 @@ err_dma:
679 tx_buf = mtk_desc_to_tx_buf(ring, itxd); 695 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
680 696
681 /* unmap dma */ 697 /* unmap dma */
682 mtk_tx_unmap(&dev->dev, tx_buf); 698 mtk_tx_unmap(eth, tx_buf);
683 699
684 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; 700 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
685 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); 701 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
@@ -836,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
836 netdev->stats.rx_dropped++; 852 netdev->stats.rx_dropped++;
837 goto release_desc; 853 goto release_desc;
838 } 854 }
839 dma_addr = dma_map_single(&eth->netdev[mac]->dev, 855 dma_addr = dma_map_single(eth->dev,
840 new_data + NET_SKB_PAD, 856 new_data + NET_SKB_PAD,
841 ring->buf_size, 857 ring->buf_size,
842 DMA_FROM_DEVICE); 858 DMA_FROM_DEVICE);
843 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { 859 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
844 skb_free_frag(new_data); 860 skb_free_frag(new_data);
845 netdev->stats.rx_dropped++; 861 netdev->stats.rx_dropped++;
846 goto release_desc; 862 goto release_desc;
@@ -855,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
855 } 871 }
856 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 872 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
857 873
858 dma_unmap_single(&netdev->dev, trxd.rxd1, 874 dma_unmap_single(eth->dev, trxd.rxd1,
859 ring->buf_size, DMA_FROM_DEVICE); 875 ring->buf_size, DMA_FROM_DEVICE);
860 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); 876 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
861 skb->dev = netdev; 877 skb->dev = netdev;
@@ -937,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
937 done[mac]++; 953 done[mac]++;
938 budget--; 954 budget--;
939 } 955 }
940 mtk_tx_unmap(eth->dev, tx_buf); 956 mtk_tx_unmap(eth, tx_buf);
941 957
942 ring->last_free = desc; 958 ring->last_free = desc;
943 atomic_inc(&ring->free_count); 959 atomic_inc(&ring->free_count);
@@ -1092,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
1092 1108
1093 if (ring->buf) { 1109 if (ring->buf) {
1094 for (i = 0; i < MTK_DMA_SIZE; i++) 1110 for (i = 0; i < MTK_DMA_SIZE; i++)
1095 mtk_tx_unmap(eth->dev, &ring->buf[i]); 1111 mtk_tx_unmap(eth, &ring->buf[i]);
1096 kfree(ring->buf); 1112 kfree(ring->buf);
1097 ring->buf = NULL; 1113 ring->buf = NULL;
1098 } 1114 }
@@ -1751,6 +1767,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1751 goto free_netdev; 1767 goto free_netdev;
1752 } 1768 }
1753 spin_lock_init(&mac->hw_stats->stats_lock); 1769 spin_lock_init(&mac->hw_stats->stats_lock);
1770 u64_stats_init(&mac->hw_stats->syncp);
1754 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; 1771 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
1755 1772
1756 SET_NETDEV_DEV(eth->netdev[id], eth->dev); 1773 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 0f19b01e3fff..dc8b1cb0fdc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -318,6 +318,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
318 u32 *action, u32 *flow_tag) 318 u32 *action, u32 *flow_tag)
319{ 319{
320 const struct tc_action *a; 320 const struct tc_action *a;
321 LIST_HEAD(actions);
321 322
322 if (tc_no_actions(exts)) 323 if (tc_no_actions(exts))
323 return -EINVAL; 324 return -EINVAL;
@@ -325,7 +326,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
325 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 326 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
326 *action = 0; 327 *action = 0;
327 328
328 tc_for_each_action(a, exts) { 329 tcf_exts_to_list(exts, &actions);
330 list_for_each_entry(a, &actions, list) {
329 /* Only support a single action per rule */ 331 /* Only support a single action per rule */
330 if (*action) 332 if (*action)
331 return -EINVAL; 333 return -EINVAL;
@@ -362,13 +364,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
362 u32 *action, u32 *dest_vport) 364 u32 *action, u32 *dest_vport)
363{ 365{
364 const struct tc_action *a; 366 const struct tc_action *a;
367 LIST_HEAD(actions);
365 368
366 if (tc_no_actions(exts)) 369 if (tc_no_actions(exts))
367 return -EINVAL; 370 return -EINVAL;
368 371
369 *action = 0; 372 *action = 0;
370 373
371 tc_for_each_action(a, exts) { 374 tcf_exts_to_list(exts, &actions);
375 list_for_each_entry(a, &actions, list) {
372 /* Only support a single action per rule */ 376 /* Only support a single action per rule */
373 if (*action) 377 if (*action)
374 return -EINVAL; 378 return -EINVAL;
@@ -503,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
503 struct mlx5e_tc_flow *flow; 507 struct mlx5e_tc_flow *flow;
504 struct tc_action *a; 508 struct tc_action *a;
505 struct mlx5_fc *counter; 509 struct mlx5_fc *counter;
510 LIST_HEAD(actions);
506 u64 bytes; 511 u64 bytes;
507 u64 packets; 512 u64 packets;
508 u64 lastuse; 513 u64 lastuse;
@@ -518,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
518 523
519 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 524 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
520 525
521 tc_for_each_action(a, f->exts) 526 tcf_exts_to_list(f->exts, &actions);
527 list_for_each_entry(a, &actions, list)
522 tcf_action_stats_update(a, bytes, packets, lastuse); 528 tcf_action_stats_update(a, bytes, packets, lastuse);
523 529
524 return 0; 530 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 7ca9201f7dcb..1721098eef13 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3383,6 +3383,15 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
3383 */ 3383 */
3384MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); 3384MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
3385 3385
3386/* reg_ritr_lb_en
3387 * Loop-back filter enable for unicast packets.
3388 * If the flag is set then loop-back filter for unicast packets is
3389 * implemented on the RIF. Multicast packets are always subject to
3390 * loop-back filtering.
3391 * Access: RW
3392 */
3393MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1);
3394
3386/* reg_ritr_virtual_router 3395/* reg_ritr_virtual_router
3387 * Virtual router ID associated with the router interface. 3396 * Virtual router ID associated with the router interface.
3388 * Access: RW 3397 * Access: RW
@@ -3484,6 +3493,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
3484 mlxsw_reg_ritr_op_set(payload, op); 3493 mlxsw_reg_ritr_op_set(payload, op);
3485 mlxsw_reg_ritr_rif_set(payload, rif); 3494 mlxsw_reg_ritr_rif_set(payload, rif);
3486 mlxsw_reg_ritr_ipv4_fe_set(payload, 1); 3495 mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
3496 mlxsw_reg_ritr_lb_en_set(payload, 1);
3487 mlxsw_reg_ritr_mtu_set(payload, mtu); 3497 mlxsw_reg_ritr_mtu_set(payload, mtu);
3488 mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); 3498 mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
3489} 3499}
@@ -4000,6 +4010,7 @@ static inline void mlxsw_reg_ralue_pack(char *payload,
4000{ 4010{
4001 MLXSW_REG_ZERO(ralue, payload); 4011 MLXSW_REG_ZERO(ralue, payload);
4002 mlxsw_reg_ralue_protocol_set(payload, protocol); 4012 mlxsw_reg_ralue_protocol_set(payload, protocol);
4013 mlxsw_reg_ralue_op_set(payload, op);
4003 mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); 4014 mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
4004 mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); 4015 mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
4005 mlxsw_reg_ralue_entry_type_set(payload, 4016 mlxsw_reg_ralue_entry_type_set(payload,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index c3e61500819d..1f8168906811 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -942,8 +942,8 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
942 kfree(mlxsw_sp_vport); 942 kfree(mlxsw_sp_vport);
943} 943}
944 944
945int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 945static int mlxsw_sp_port_add_vid(struct net_device *dev,
946 u16 vid) 946 __be16 __always_unused proto, u16 vid)
947{ 947{
948 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 948 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
949 struct mlxsw_sp_port *mlxsw_sp_vport; 949 struct mlxsw_sp_port *mlxsw_sp_vport;
@@ -956,16 +956,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
956 if (!vid) 956 if (!vid)
957 return 0; 957 return 0;
958 958
959 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { 959 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
960 netdev_warn(dev, "VID=%d already configured\n", vid);
961 return 0; 960 return 0;
962 }
963 961
964 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); 962 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
965 if (!mlxsw_sp_vport) { 963 if (!mlxsw_sp_vport)
966 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
967 return -ENOMEM; 964 return -ENOMEM;
968 }
969 965
970 /* When adding the first VLAN interface on a bridged port we need to 966 /* When adding the first VLAN interface on a bridged port we need to
971 * transition all the active 802.1Q bridge VLANs to use explicit 967 * transition all the active 802.1Q bridge VLANs to use explicit
@@ -973,24 +969,17 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
973 */ 969 */
974 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 970 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
975 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 971 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
976 if (err) { 972 if (err)
977 netdev_err(dev, "Failed to set to Virtual mode\n");
978 goto err_port_vp_mode_trans; 973 goto err_port_vp_mode_trans;
979 }
980 } 974 }
981 975
982 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 976 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
983 if (err) { 977 if (err)
984 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
985 goto err_port_vid_learning_set; 978 goto err_port_vid_learning_set;
986 }
987 979
988 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); 980 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
989 if (err) { 981 if (err)
990 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
991 vid);
992 goto err_port_add_vid; 982 goto err_port_add_vid;
993 }
994 983
995 return 0; 984 return 0;
996 985
@@ -1010,7 +999,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1010 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 999 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1011 struct mlxsw_sp_port *mlxsw_sp_vport; 1000 struct mlxsw_sp_port *mlxsw_sp_vport;
1012 struct mlxsw_sp_fid *f; 1001 struct mlxsw_sp_fid *f;
1013 int err;
1014 1002
1015 /* VLAN 0 is removed from HW filter when device goes down, but 1003 /* VLAN 0 is removed from HW filter when device goes down, but
1016 * it is reserved in our case, so simply return. 1004 * it is reserved in our case, so simply return.
@@ -1019,23 +1007,12 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1019 return 0; 1007 return 0;
1020 1008
1021 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 1009 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
1022 if (!mlxsw_sp_vport) { 1010 if (WARN_ON(!mlxsw_sp_vport))
1023 netdev_warn(dev, "VID=%d does not exist\n", vid);
1024 return 0; 1011 return 0;
1025 }
1026 1012
1027 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 1013 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
1028 if (err) {
1029 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
1030 vid);
1031 return err;
1032 }
1033 1014
1034 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 1015 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
1035 if (err) {
1036 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
1037 return err;
1038 }
1039 1016
1040 /* Drop FID reference. If this was the last reference the 1017 /* Drop FID reference. If this was the last reference the
1041 * resources will be freed. 1018 * resources will be freed.
@@ -1048,13 +1025,8 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1048 * transition all active 802.1Q bridge VLANs to use VID to FID 1025 * transition all active 802.1Q bridge VLANs to use VID to FID
1049 * mappings and set port's mode to VLAN mode. 1026 * mappings and set port's mode to VLAN mode.
1050 */ 1027 */
1051 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 1028 if (list_is_singular(&mlxsw_sp_port->vports_list))
1052 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 1029 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1053 if (err) {
1054 netdev_err(dev, "Failed to set to VLAN mode\n");
1055 return err;
1056 }
1057 }
1058 1030
1059 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 1031 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1060 1032
@@ -1149,6 +1121,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1149 bool ingress) 1121 bool ingress)
1150{ 1122{
1151 const struct tc_action *a; 1123 const struct tc_action *a;
1124 LIST_HEAD(actions);
1152 int err; 1125 int err;
1153 1126
1154 if (!tc_single_action(cls->exts)) { 1127 if (!tc_single_action(cls->exts)) {
@@ -1156,7 +1129,8 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1156 return -ENOTSUPP; 1129 return -ENOTSUPP;
1157 } 1130 }
1158 1131
1159 tc_for_each_action(a, cls->exts) { 1132 tcf_exts_to_list(cls->exts, &actions);
1133 list_for_each_entry(a, &actions, list) {
1160 if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) 1134 if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL))
1161 return -ENOTSUPP; 1135 return -ENOTSUPP;
1162 1136
@@ -2076,6 +2050,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2076 return 0; 2050 return 0;
2077} 2051}
2078 2052
2053static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
2054{
2055 mlxsw_sp_port->pvid = 1;
2056
2057 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
2058}
2059
2060static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
2061{
2062 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2063}
2064
2079static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2065static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2080 bool split, u8 module, u8 width, u8 lane) 2066 bool split, u8 module, u8 width, u8 lane)
2081{ 2067{
@@ -2191,7 +2177,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2191 goto err_port_dcb_init; 2177 goto err_port_dcb_init;
2192 } 2178 }
2193 2179
2180 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
2181 if (err) {
2182 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
2183 mlxsw_sp_port->local_port);
2184 goto err_port_pvid_vport_create;
2185 }
2186
2194 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 2187 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2188 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2195 err = register_netdev(dev); 2189 err = register_netdev(dev);
2196 if (err) { 2190 if (err) {
2197 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 2191 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
@@ -2208,24 +2202,23 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2208 goto err_core_port_init; 2202 goto err_core_port_init;
2209 } 2203 }
2210 2204
2211 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
2212 if (err)
2213 goto err_port_vlan_init;
2214
2215 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2216 return 0; 2205 return 0;
2217 2206
2218err_port_vlan_init:
2219 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
2220err_core_port_init: 2207err_core_port_init:
2221 unregister_netdev(dev); 2208 unregister_netdev(dev);
2222err_register_netdev: 2209err_register_netdev:
2210 mlxsw_sp->ports[local_port] = NULL;
2211 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2212 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2213err_port_pvid_vport_create:
2214 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2223err_port_dcb_init: 2215err_port_dcb_init:
2224err_port_ets_init: 2216err_port_ets_init:
2225err_port_buffers_init: 2217err_port_buffers_init:
2226err_port_admin_status_set: 2218err_port_admin_status_set:
2227err_port_mtu_set: 2219err_port_mtu_set:
2228err_port_speed_by_width_set: 2220err_port_speed_by_width_set:
2221 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2229err_port_swid_set: 2222err_port_swid_set:
2230err_port_system_port_mapping_set: 2223err_port_system_port_mapping_set:
2231err_dev_addr_init: 2224err_dev_addr_init:
@@ -2245,12 +2238,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2245 2238
2246 if (!mlxsw_sp_port) 2239 if (!mlxsw_sp_port)
2247 return; 2240 return;
2248 mlxsw_sp->ports[local_port] = NULL;
2249 mlxsw_core_port_fini(&mlxsw_sp_port->core_port); 2241 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
2250 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 2242 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
2251 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2243 mlxsw_sp->ports[local_port] = NULL;
2252 mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2253 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2244 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2245 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2246 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2254 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2247 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2255 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); 2248 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
2256 free_percpu(mlxsw_sp_port->pcpu_stats); 2249 free_percpu(mlxsw_sp_port->pcpu_stats);
@@ -2662,6 +2655,26 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2662 { 2655 {
2663 .func = mlxsw_sp_rx_listener_func, 2656 .func = mlxsw_sp_rx_listener_func,
2664 .local_port = MLXSW_PORT_DONT_CARE, 2657 .local_port = MLXSW_PORT_DONT_CARE,
2658 .trap_id = MLXSW_TRAP_ID_MTUERROR,
2659 },
2660 {
2661 .func = mlxsw_sp_rx_listener_func,
2662 .local_port = MLXSW_PORT_DONT_CARE,
2663 .trap_id = MLXSW_TRAP_ID_TTLERROR,
2664 },
2665 {
2666 .func = mlxsw_sp_rx_listener_func,
2667 .local_port = MLXSW_PORT_DONT_CARE,
2668 .trap_id = MLXSW_TRAP_ID_LBERROR,
2669 },
2670 {
2671 .func = mlxsw_sp_rx_listener_func,
2672 .local_port = MLXSW_PORT_DONT_CARE,
2673 .trap_id = MLXSW_TRAP_ID_OSPF,
2674 },
2675 {
2676 .func = mlxsw_sp_rx_listener_func,
2677 .local_port = MLXSW_PORT_DONT_CARE,
2665 .trap_id = MLXSW_TRAP_ID_IP2ME, 2678 .trap_id = MLXSW_TRAP_ID_IP2ME,
2666 }, 2679 },
2667 { 2680 {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index f69aa37d1521..ab3feb81bd43 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -536,8 +536,6 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
536 u16 vid); 536 u16 vid);
537int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 537int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
538 u16 vid_end, bool is_member, bool untagged); 538 u16 vid_end, bool is_member, bool untagged);
539int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
540 u16 vid);
541int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 539int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
542 bool set); 540 bool set);
543void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); 541void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 074cdda7b6f3..237418a0e6e0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
330 MLXSW_SP_CPU_PORT_SB_CM, 330 MLXSW_SP_CPU_PORT_SB_CM,
331 MLXSW_SP_CPU_PORT_SB_CM, 331 MLXSW_SP_CPU_PORT_SB_CM,
332 MLXSW_SP_CPU_PORT_SB_CM, 332 MLXSW_SP_CPU_PORT_SB_CM,
333 MLXSW_SP_CPU_PORT_SB_CM, 333 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
334 MLXSW_SP_CPU_PORT_SB_CM, 334 MLXSW_SP_CPU_PORT_SB_CM,
335 MLXSW_SP_CPU_PORT_SB_CM, 335 MLXSW_SP_CPU_PORT_SB_CM,
336 MLXSW_SP_CPU_PORT_SB_CM, 336 MLXSW_SP_CPU_PORT_SB_CM,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 01cfb7512827..b6ed7f7c531e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
341 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 341 char pfcc_pl[MLXSW_REG_PFCC_LEN];
342 342
343 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 343 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
344 mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause);
345 mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause);
344 mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); 346 mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
345 347
346 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 348 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
@@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
351 struct ieee_pfc *pfc) 353 struct ieee_pfc *pfc)
352{ 354{
353 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 355 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
356 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
354 int err; 357 int err;
355 358
356 if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) && 359 if (pause_en && pfc->pfc_en) {
357 pfc->pfc_en) {
358 netdev_err(dev, "PAUSE frames already enabled on port\n"); 360 netdev_err(dev, "PAUSE frames already enabled on port\n");
359 return -EINVAL; 361 return -EINVAL;
360 } 362 }
361 363
362 err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, 364 err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
363 mlxsw_sp_port->dcb.ets->prio_tc, 365 mlxsw_sp_port->dcb.ets->prio_tc,
364 false, pfc); 366 pause_en, pfc);
365 if (err) { 367 if (err) {
366 netdev_err(dev, "Failed to configure port's headroom for PFC\n"); 368 netdev_err(dev, "Failed to configure port's headroom for PFC\n");
367 return err; 369 return err;
@@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
380 382
381err_port_pfc_set: 383err_port_pfc_set:
382 __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, 384 __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
383 mlxsw_sp_port->dcb.ets->prio_tc, false, 385 mlxsw_sp_port->dcb.ets->prio_tc, pause_en,
384 mlxsw_sp_port->dcb.pfc); 386 mlxsw_sp_port->dcb.pfc);
385 return err; 387 return err;
386} 388}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 81418d629231..90bb93b037ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1651,9 +1651,10 @@ static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
1651 const struct mlxsw_sp_router_fib4_add_info *info = data; 1651 const struct mlxsw_sp_router_fib4_add_info *info = data;
1652 struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; 1652 struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
1653 struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; 1653 struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
1654 struct mlxsw_sp_vr *vr = fib_entry->vr;
1654 1655
1655 mlxsw_sp_fib_entry_destroy(fib_entry); 1656 mlxsw_sp_fib_entry_destroy(fib_entry);
1656 mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr); 1657 mlxsw_sp_vr_put(mlxsw_sp, vr);
1657 kfree(info); 1658 kfree(info);
1658} 1659}
1659 1660
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a1ad5e6bdfa8..d1b59cdfacc1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -450,6 +450,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
450 450
451 kfree(f); 451 kfree(f);
452 452
453 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
454
453 mlxsw_sp_fid_op(mlxsw_sp, fid, false); 455 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
454} 456}
455 457
@@ -997,13 +999,13 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
997} 999}
998 1000
999static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 1001static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1000 u16 vid_begin, u16 vid_end, bool init) 1002 u16 vid_begin, u16 vid_end)
1001{ 1003{
1002 struct net_device *dev = mlxsw_sp_port->dev; 1004 struct net_device *dev = mlxsw_sp_port->dev;
1003 u16 vid, pvid; 1005 u16 vid, pvid;
1004 int err; 1006 int err;
1005 1007
1006 if (!init && !mlxsw_sp_port->bridged) 1008 if (!mlxsw_sp_port->bridged)
1007 return -EINVAL; 1009 return -EINVAL;
1008 1010
1009 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 1011 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
@@ -1014,9 +1016,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1014 return err; 1016 return err;
1015 } 1017 }
1016 1018
1017 if (init)
1018 goto out;
1019
1020 pvid = mlxsw_sp_port->pvid; 1019 pvid = mlxsw_sp_port->pvid;
1021 if (pvid >= vid_begin && pvid <= vid_end) { 1020 if (pvid >= vid_begin && pvid <= vid_end) {
1022 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); 1021 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
@@ -1028,7 +1027,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1028 1027
1029 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); 1028 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
1030 1029
1031out:
1032 /* Changing activity bits only if HW operation succeded */ 1030 /* Changing activity bits only if HW operation succeded */
1033 for (vid = vid_begin; vid <= vid_end; vid++) 1031 for (vid = vid_begin; vid <= vid_end; vid++)
1034 clear_bit(vid, mlxsw_sp_port->active_vlans); 1032 clear_bit(vid, mlxsw_sp_port->active_vlans);
@@ -1039,8 +1037,8 @@ out:
1039static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 1037static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1040 const struct switchdev_obj_port_vlan *vlan) 1038 const struct switchdev_obj_port_vlan *vlan)
1041{ 1039{
1042 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1040 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
1043 vlan->vid_begin, vlan->vid_end, false); 1041 vlan->vid_end);
1044} 1042}
1045 1043
1046void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) 1044void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -1048,7 +1046,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1048 u16 vid; 1046 u16 vid;
1049 1047
1050 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 1048 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
1051 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); 1049 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
1052} 1050}
1053 1051
1054static int 1052static int
@@ -1546,32 +1544,6 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1546 mlxsw_sp_fdb_fini(mlxsw_sp); 1544 mlxsw_sp_fdb_fini(mlxsw_sp);
1547} 1545}
1548 1546
1549int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
1550{
1551 struct net_device *dev = mlxsw_sp_port->dev;
1552 int err;
1553
1554 /* Allow only untagged packets to ingress and tag them internally
1555 * with VID 1.
1556 */
1557 mlxsw_sp_port->pvid = 1;
1558 err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
1559 true);
1560 if (err) {
1561 netdev_err(dev, "Unable to init VLANs\n");
1562 return err;
1563 }
1564
1565 /* Add implicit VLAN interface in the device, so that untagged
1566 * packets will be classified to the default vFID.
1567 */
1568 err = mlxsw_sp_port_add_vid(dev, 0, 1);
1569 if (err)
1570 netdev_err(dev, "Failed to configure default vFID\n");
1571
1572 return err;
1573}
1574
1575void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) 1547void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1576{ 1548{
1577 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; 1549 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 470d7696e9fe..ed8e30186400 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -56,6 +56,10 @@ enum {
56 MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, 56 MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
57 MLXSW_TRAP_ID_ARPBC = 0x50, 57 MLXSW_TRAP_ID_ARPBC = 0x50,
58 MLXSW_TRAP_ID_ARPUC = 0x51, 58 MLXSW_TRAP_ID_ARPUC = 0x51,
59 MLXSW_TRAP_ID_MTUERROR = 0x52,
60 MLXSW_TRAP_ID_TTLERROR = 0x53,
61 MLXSW_TRAP_ID_LBERROR = 0x54,
62 MLXSW_TRAP_ID_OSPF = 0x55,
59 MLXSW_TRAP_ID_IP2ME = 0x5F, 63 MLXSW_TRAP_ID_IP2ME = 0x5F,
60 MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, 64 MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
61 MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, 65 MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index d0dc28f93c0e..226cb08cc055 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -52,40 +52,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
52 DCBX_APP_SF_ETHTYPE); 52 DCBX_APP_SF_ETHTYPE);
53} 53}
54 54
55static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
56{
57 u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
58
59 /* Old MFW */
60 if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
61 return qed_dcbx_app_ethtype(app_info_bitmap);
62
63 return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
64}
65
55static bool qed_dcbx_app_port(u32 app_info_bitmap) 66static bool qed_dcbx_app_port(u32 app_info_bitmap)
56{ 67{
57 return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == 68 return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
58 DCBX_APP_SF_PORT); 69 DCBX_APP_SF_PORT);
59} 70}
60 71
61static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) 72static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
62{ 73{
63 return !!(qed_dcbx_app_ethtype(app_info_bitmap) && 74 u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
64 proto_id == QED_ETH_TYPE_DEFAULT); 75
76 /* Old MFW */
77 if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
78 return qed_dcbx_app_port(app_info_bitmap);
79
80 return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
65} 81}
66 82
67static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) 83static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
68{ 84{
69 return !!(qed_dcbx_app_port(app_info_bitmap) && 85 bool ethtype;
70 proto_id == QED_TCP_PORT_ISCSI); 86
87 if (ieee)
88 ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
89 else
90 ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
91
92 return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT));
71} 93}
72 94
73static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id) 95static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
74{ 96{
75 return !!(qed_dcbx_app_ethtype(app_info_bitmap) && 97 bool port;
76 proto_id == QED_ETH_TYPE_FCOE); 98
99 if (ieee)
100 port = qed_dcbx_ieee_app_port(app_info_bitmap,
101 DCBX_APP_SF_IEEE_TCP_PORT);
102 else
103 port = qed_dcbx_app_port(app_info_bitmap);
104
105 return !!(port && (proto_id == QED_TCP_PORT_ISCSI));
77} 106}
78 107
79static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id) 108static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
80{ 109{
81 return !!(qed_dcbx_app_ethtype(app_info_bitmap) && 110 bool ethtype;
82 proto_id == QED_ETH_TYPE_ROCE); 111
112 if (ieee)
113 ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
114 else
115 ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
116
117 return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE));
83} 118}
84 119
85static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) 120static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
86{ 121{
87 return !!(qed_dcbx_app_port(app_info_bitmap) && 122 bool ethtype;
88 proto_id == QED_UDP_PORT_TYPE_ROCE_V2); 123
124 if (ieee)
125 ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
126 else
127 ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
128
129 return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE));
130}
131
132static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
133{
134 bool port;
135
136 if (ieee)
137 port = qed_dcbx_ieee_app_port(app_info_bitmap,
138 DCBX_APP_SF_IEEE_UDP_PORT);
139 else
140 port = qed_dcbx_app_port(app_info_bitmap);
141
142 return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2));
89} 143}
90 144
91static void 145static void
@@ -164,17 +218,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
164static bool 218static bool
165qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, 219qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
166 u32 app_prio_bitmap, 220 u32 app_prio_bitmap,
167 u16 id, enum dcbx_protocol_type *type) 221 u16 id, enum dcbx_protocol_type *type, bool ieee)
168{ 222{
169 if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) { 223 if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) {
170 *type = DCBX_PROTOCOL_FCOE; 224 *type = DCBX_PROTOCOL_FCOE;
171 } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) { 225 } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) {
172 *type = DCBX_PROTOCOL_ROCE; 226 *type = DCBX_PROTOCOL_ROCE;
173 } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { 227 } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) {
174 *type = DCBX_PROTOCOL_ISCSI; 228 *type = DCBX_PROTOCOL_ISCSI;
175 } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) { 229 } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
176 *type = DCBX_PROTOCOL_ETH; 230 *type = DCBX_PROTOCOL_ETH;
177 } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { 231 } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) {
178 *type = DCBX_PROTOCOL_ROCE_V2; 232 *type = DCBX_PROTOCOL_ROCE_V2;
179 } else { 233 } else {
180 *type = DCBX_MAX_PROTOCOL_TYPE; 234 *type = DCBX_MAX_PROTOCOL_TYPE;
@@ -194,17 +248,18 @@ static int
194qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, 248qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
195 struct qed_dcbx_results *p_data, 249 struct qed_dcbx_results *p_data,
196 struct dcbx_app_priority_entry *p_tbl, 250 struct dcbx_app_priority_entry *p_tbl,
197 u32 pri_tc_tbl, int count, bool dcbx_enabled) 251 u32 pri_tc_tbl, int count, u8 dcbx_version)
198{ 252{
199 u8 tc, priority_map; 253 u8 tc, priority_map;
200 enum dcbx_protocol_type type; 254 enum dcbx_protocol_type type;
255 bool enable, ieee;
201 u16 protocol_id; 256 u16 protocol_id;
202 int priority; 257 int priority;
203 bool enable;
204 int i; 258 int i;
205 259
206 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); 260 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
207 261
262 ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
208 /* Parse APP TLV */ 263 /* Parse APP TLV */
209 for (i = 0; i < count; i++) { 264 for (i = 0; i < count; i++) {
210 protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, 265 protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
@@ -219,7 +274,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
219 274
220 tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); 275 tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
221 if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, 276 if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
222 protocol_id, &type)) { 277 protocol_id, &type, ieee)) {
223 /* ETH always have the enable bit reset, as it gets 278 /* ETH always have the enable bit reset, as it gets
224 * vlan information per packet. For other protocols, 279 * vlan information per packet. For other protocols,
225 * should be set according to the dcbx_enabled 280 * should be set according to the dcbx_enabled
@@ -275,15 +330,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
275 struct dcbx_ets_feature *p_ets; 330 struct dcbx_ets_feature *p_ets;
276 struct qed_hw_info *p_info; 331 struct qed_hw_info *p_info;
277 u32 pri_tc_tbl, flags; 332 u32 pri_tc_tbl, flags;
278 bool dcbx_enabled; 333 u8 dcbx_version;
279 int num_entries; 334 int num_entries;
280 int rc = 0; 335 int rc = 0;
281 336
282 /* If DCBx version is non zero, then negotiation was
283 * successfuly performed
284 */
285 flags = p_hwfn->p_dcbx_info->operational.flags; 337 flags = p_hwfn->p_dcbx_info->operational.flags;
286 dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); 338 dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
287 339
288 p_app = &p_hwfn->p_dcbx_info->operational.features.app; 340 p_app = &p_hwfn->p_dcbx_info->operational.features.app;
289 p_tbl = p_app->app_pri_tbl; 341 p_tbl = p_app->app_pri_tbl;
@@ -295,13 +347,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
295 num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); 347 num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
296 348
297 rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, 349 rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
298 num_entries, dcbx_enabled); 350 num_entries, dcbx_version);
299 if (rc) 351 if (rc)
300 return rc; 352 return rc;
301 353
302 p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); 354 p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
303 data.pf_id = p_hwfn->rel_pf_id; 355 data.pf_id = p_hwfn->rel_pf_id;
304 data.dcbx_enabled = dcbx_enabled; 356 data.dcbx_enabled = !!dcbx_version;
305 357
306 qed_dcbx_dp_protocol(p_hwfn, &data); 358 qed_dcbx_dp_protocol(p_hwfn, &data);
307 359
@@ -400,7 +452,7 @@ static void
400qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, 452qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
401 struct dcbx_app_priority_feature *p_app, 453 struct dcbx_app_priority_feature *p_app,
402 struct dcbx_app_priority_entry *p_tbl, 454 struct dcbx_app_priority_entry *p_tbl,
403 struct qed_dcbx_params *p_params) 455 struct qed_dcbx_params *p_params, bool ieee)
404{ 456{
405 struct qed_app_entry *entry; 457 struct qed_app_entry *entry;
406 u8 pri_map; 458 u8 pri_map;
@@ -414,15 +466,46 @@ qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
414 DCBX_APP_NUM_ENTRIES); 466 DCBX_APP_NUM_ENTRIES);
415 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { 467 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
416 entry = &p_params->app_entry[i]; 468 entry = &p_params->app_entry[i];
417 entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, 469 if (ieee) {
418 DCBX_APP_SF)); 470 u8 sf_ieee;
471 u32 val;
472
473 sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry,
474 DCBX_APP_SF_IEEE);
475 switch (sf_ieee) {
476 case DCBX_APP_SF_IEEE_RESERVED:
477 /* Old MFW */
478 val = QED_MFW_GET_FIELD(p_tbl[i].entry,
479 DCBX_APP_SF);
480 entry->sf_ieee = val ?
481 QED_DCBX_SF_IEEE_TCP_UDP_PORT :
482 QED_DCBX_SF_IEEE_ETHTYPE;
483 break;
484 case DCBX_APP_SF_IEEE_ETHTYPE:
485 entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
486 break;
487 case DCBX_APP_SF_IEEE_TCP_PORT:
488 entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
489 break;
490 case DCBX_APP_SF_IEEE_UDP_PORT:
491 entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
492 break;
493 case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
494 entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
495 break;
496 }
497 } else {
498 entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
499 DCBX_APP_SF));
500 }
501
419 pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); 502 pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
420 entry->prio = ffs(pri_map) - 1; 503 entry->prio = ffs(pri_map) - 1;
421 entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, 504 entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
422 DCBX_APP_PROTOCOL_ID); 505 DCBX_APP_PROTOCOL_ID);
423 qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, 506 qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
424 entry->proto_id, 507 entry->proto_id,
425 &entry->proto_type); 508 &entry->proto_type, ieee);
426 } 509 }
427 510
428 DP_VERBOSE(p_hwfn, QED_MSG_DCB, 511 DP_VERBOSE(p_hwfn, QED_MSG_DCB,
@@ -483,7 +566,7 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
483 bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); 566 bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
484 tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); 567 tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
485 tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); 568 tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
486 pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]); 569 pri_map = p_ets->pri_tc_tbl[0];
487 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { 570 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
488 p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; 571 p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
489 p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; 572 p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
@@ -500,9 +583,9 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
500 struct dcbx_app_priority_feature *p_app, 583 struct dcbx_app_priority_feature *p_app,
501 struct dcbx_app_priority_entry *p_tbl, 584 struct dcbx_app_priority_entry *p_tbl,
502 struct dcbx_ets_feature *p_ets, 585 struct dcbx_ets_feature *p_ets,
503 u32 pfc, struct qed_dcbx_params *p_params) 586 u32 pfc, struct qed_dcbx_params *p_params, bool ieee)
504{ 587{
505 qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params); 588 qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
506 qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); 589 qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
507 qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); 590 qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
508} 591}
@@ -516,7 +599,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
516 p_feat = &p_hwfn->p_dcbx_info->local_admin.features; 599 p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
517 qed_dcbx_get_common_params(p_hwfn, &p_feat->app, 600 qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
518 p_feat->app.app_pri_tbl, &p_feat->ets, 601 p_feat->app.app_pri_tbl, &p_feat->ets,
519 p_feat->pfc, &params->local.params); 602 p_feat->pfc, &params->local.params, false);
520 params->local.valid = true; 603 params->local.valid = true;
521} 604}
522 605
@@ -529,7 +612,7 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
529 p_feat = &p_hwfn->p_dcbx_info->remote.features; 612 p_feat = &p_hwfn->p_dcbx_info->remote.features;
530 qed_dcbx_get_common_params(p_hwfn, &p_feat->app, 613 qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
531 p_feat->app.app_pri_tbl, &p_feat->ets, 614 p_feat->app.app_pri_tbl, &p_feat->ets,
532 p_feat->pfc, &params->remote.params); 615 p_feat->pfc, &params->remote.params, false);
533 params->remote.valid = true; 616 params->remote.valid = true;
534} 617}
535 618
@@ -574,7 +657,8 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
574 657
575 qed_dcbx_get_common_params(p_hwfn, &p_feat->app, 658 qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
576 p_feat->app.app_pri_tbl, &p_feat->ets, 659 p_feat->app.app_pri_tbl, &p_feat->ets,
577 p_feat->pfc, &params->operational.params); 660 p_feat->pfc, &params->operational.params,
661 p_operational->ieee);
578 qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); 662 qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
579 err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); 663 err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
580 p_operational->err = err; 664 p_operational->err = err;
@@ -944,7 +1028,6 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
944 val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); 1028 val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
945 p_ets->pri_tc_tbl[0] |= val; 1029 p_ets->pri_tc_tbl[0] |= val;
946 } 1030 }
947 p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]);
948 for (i = 0; i < 2; i++) { 1031 for (i = 0; i < 2; i++) {
949 p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); 1032 p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
950 p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); 1033 p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
@@ -954,7 +1037,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
954static void 1037static void
955qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, 1038qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
956 struct dcbx_app_priority_feature *p_app, 1039 struct dcbx_app_priority_feature *p_app,
957 struct qed_dcbx_params *p_params) 1040 struct qed_dcbx_params *p_params, bool ieee)
958{ 1041{
959 u32 *entry; 1042 u32 *entry;
960 int i; 1043 int i;
@@ -975,12 +1058,36 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
975 1058
976 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { 1059 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
977 entry = &p_app->app_pri_tbl[i].entry; 1060 entry = &p_app->app_pri_tbl[i].entry;
978 *entry &= ~DCBX_APP_SF_MASK; 1061 if (ieee) {
979 if (p_params->app_entry[i].ethtype) 1062 *entry &= ~DCBX_APP_SF_IEEE_MASK;
980 *entry |= ((u32)DCBX_APP_SF_ETHTYPE << 1063 switch (p_params->app_entry[i].sf_ieee) {
981 DCBX_APP_SF_SHIFT); 1064 case QED_DCBX_SF_IEEE_ETHTYPE:
982 else 1065 *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
983 *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT); 1066 DCBX_APP_SF_IEEE_SHIFT);
1067 break;
1068 case QED_DCBX_SF_IEEE_TCP_PORT:
1069 *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
1070 DCBX_APP_SF_IEEE_SHIFT);
1071 break;
1072 case QED_DCBX_SF_IEEE_UDP_PORT:
1073 *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
1074 DCBX_APP_SF_IEEE_SHIFT);
1075 break;
1076 case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
1077 *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
1078 DCBX_APP_SF_IEEE_SHIFT);
1079 break;
1080 }
1081 } else {
1082 *entry &= ~DCBX_APP_SF_MASK;
1083 if (p_params->app_entry[i].ethtype)
1084 *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
1085 DCBX_APP_SF_SHIFT);
1086 else
1087 *entry |= ((u32)DCBX_APP_SF_PORT <<
1088 DCBX_APP_SF_SHIFT);
1089 }
1090
984 *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; 1091 *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
985 *entry |= ((u32)p_params->app_entry[i].proto_id << 1092 *entry |= ((u32)p_params->app_entry[i].proto_id <<
986 DCBX_APP_PROTOCOL_ID_SHIFT); 1093 DCBX_APP_PROTOCOL_ID_SHIFT);
@@ -995,15 +1102,19 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
995 struct dcbx_local_params *local_admin, 1102 struct dcbx_local_params *local_admin,
996 struct qed_dcbx_set *params) 1103 struct qed_dcbx_set *params)
997{ 1104{
1105 bool ieee = false;
1106
998 local_admin->flags = 0; 1107 local_admin->flags = 0;
999 memcpy(&local_admin->features, 1108 memcpy(&local_admin->features,
1000 &p_hwfn->p_dcbx_info->operational.features, 1109 &p_hwfn->p_dcbx_info->operational.features,
1001 sizeof(local_admin->features)); 1110 sizeof(local_admin->features));
1002 1111
1003 if (params->enabled) 1112 if (params->enabled) {
1004 local_admin->config = params->ver_num; 1113 local_admin->config = params->ver_num;
1005 else 1114 ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
1115 } else {
1006 local_admin->config = DCBX_CONFIG_VERSION_DISABLED; 1116 local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
1117 }
1007 1118
1008 if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) 1119 if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
1009 qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, 1120 qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
@@ -1015,7 +1126,7 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
1015 1126
1016 if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) 1127 if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
1017 qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, 1128 qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
1018 &params->config.params); 1129 &params->config.params, ieee);
1019} 1130}
1020 1131
1021int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1132int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
@@ -1596,8 +1707,10 @@ static int qed_dcbnl_setapp(struct qed_dev *cdev,
1596 if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) 1707 if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
1597 break; 1708 break;
1598 /* First empty slot */ 1709 /* First empty slot */
1599 if (!entry->proto_id) 1710 if (!entry->proto_id) {
1711 dcbx_set.config.params.num_app_entries++;
1600 break; 1712 break;
1713 }
1601 } 1714 }
1602 1715
1603 if (i == QED_DCBX_MAX_APP_PROTOCOL) { 1716 if (i == QED_DCBX_MAX_APP_PROTOCOL) {
@@ -2117,8 +2230,10 @@ int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
2117 (entry->proto_id == app->protocol)) 2230 (entry->proto_id == app->protocol))
2118 break; 2231 break;
2119 /* First empty slot */ 2232 /* First empty slot */
2120 if (!entry->proto_id) 2233 if (!entry->proto_id) {
2234 dcbx_set.config.params.num_app_entries++;
2121 break; 2235 break;
2236 }
2122 } 2237 }
2123 2238
2124 if (i == QED_DCBX_MAX_APP_PROTOCOL) { 2239 if (i == QED_DCBX_MAX_APP_PROTOCOL) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 592784019994..6f9d3b831a2a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -6850,6 +6850,14 @@ struct dcbx_app_priority_entry {
6850#define DCBX_APP_SF_SHIFT 8 6850#define DCBX_APP_SF_SHIFT 8
6851#define DCBX_APP_SF_ETHTYPE 0 6851#define DCBX_APP_SF_ETHTYPE 0
6852#define DCBX_APP_SF_PORT 1 6852#define DCBX_APP_SF_PORT 1
6853#define DCBX_APP_SF_IEEE_MASK 0x0000f000
6854#define DCBX_APP_SF_IEEE_SHIFT 12
6855#define DCBX_APP_SF_IEEE_RESERVED 0
6856#define DCBX_APP_SF_IEEE_ETHTYPE 1
6857#define DCBX_APP_SF_IEEE_TCP_PORT 2
6858#define DCBX_APP_SF_IEEE_UDP_PORT 3
6859#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
6860
6853#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 6861#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
6854#define DCBX_APP_PROTOCOL_ID_SHIFT 16 6862#define DCBX_APP_PROTOCOL_ID_SHIFT 16
6855}; 6863};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fd973f4f16c7..49bad00a0f8f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
37 37
38#define _QLCNIC_LINUX_MAJOR 5 38#define _QLCNIC_LINUX_MAJOR 5
39#define _QLCNIC_LINUX_MINOR 3 39#define _QLCNIC_LINUX_MINOR 3
40#define _QLCNIC_LINUX_SUBVERSION 64 40#define _QLCNIC_LINUX_SUBVERSION 65
41#define QLCNIC_LINUX_VERSIONID "5.3.64" 41#define QLCNIC_LINUX_VERSIONID "5.3.65"
42#define QLCNIC_DRV_IDC_VER 0x01 42#define QLCNIC_DRV_IDC_VER 0x01
43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 87c642d3b075..fedd7366713c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -102,7 +102,6 @@
102#define QLCNIC_RESPONSE_DESC 0x05 102#define QLCNIC_RESPONSE_DESC 0x05
103#define QLCNIC_LRO_DESC 0x12 103#define QLCNIC_LRO_DESC 0x12
104 104
105#define QLCNIC_TX_POLL_BUDGET 128
106#define QLCNIC_TCP_HDR_SIZE 20 105#define QLCNIC_TCP_HDR_SIZE 20
107#define QLCNIC_TCP_TS_OPTION_SIZE 12 106#define QLCNIC_TCP_TS_OPTION_SIZE 12
108#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) 107#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
@@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
2008 struct qlcnic_host_tx_ring *tx_ring; 2007 struct qlcnic_host_tx_ring *tx_ring;
2009 struct qlcnic_adapter *adapter; 2008 struct qlcnic_adapter *adapter;
2010 2009
2011 budget = QLCNIC_TX_POLL_BUDGET;
2012 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); 2010 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
2013 adapter = tx_ring->adapter; 2011 adapter = tx_ring->adapter;
2014 work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); 2012 work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 017d8c2c8285..24061b9b92e8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -156,10 +156,8 @@ struct qlcnic_vf_info {
156 spinlock_t vlan_list_lock; /* Lock for VLAN list */ 156 spinlock_t vlan_list_lock; /* Lock for VLAN list */
157}; 157};
158 158
159struct qlcnic_async_work_list { 159struct qlcnic_async_cmd {
160 struct list_head list; 160 struct list_head list;
161 struct work_struct work;
162 void *ptr;
163 struct qlcnic_cmd_args *cmd; 161 struct qlcnic_cmd_args *cmd;
164}; 162};
165 163
@@ -168,7 +166,10 @@ struct qlcnic_back_channel {
168 struct workqueue_struct *bc_trans_wq; 166 struct workqueue_struct *bc_trans_wq;
169 struct workqueue_struct *bc_async_wq; 167 struct workqueue_struct *bc_async_wq;
170 struct workqueue_struct *bc_flr_wq; 168 struct workqueue_struct *bc_flr_wq;
171 struct list_head async_list; 169 struct qlcnic_adapter *adapter;
170 struct list_head async_cmd_list;
171 struct work_struct vf_async_work;
172 spinlock_t queue_lock; /* async_cmd_list queue lock */
172}; 173};
173 174
174struct qlcnic_sriov { 175struct qlcnic_sriov {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 7327b729ba2e..d7107055ec60 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -29,6 +29,7 @@
29#define QLC_83XX_VF_RESET_FAIL_THRESH 8 29#define QLC_83XX_VF_RESET_FAIL_THRESH 8
30#define QLC_BC_CMD_MAX_RETRY_CNT 5 30#define QLC_BC_CMD_MAX_RETRY_CNT 5
31 31
32static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
32static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); 33static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
33static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); 34static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
34static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); 35static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
@@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
177 } 178 }
178 179
179 bc->bc_async_wq = wq; 180 bc->bc_async_wq = wq;
180 INIT_LIST_HEAD(&bc->async_list); 181 INIT_LIST_HEAD(&bc->async_cmd_list);
182 INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
183 spin_lock_init(&bc->queue_lock);
184 bc->adapter = adapter;
181 185
182 for (i = 0; i < num_vfs; i++) { 186 for (i = 0; i < num_vfs; i++) {
183 vf = &sriov->vf_info[i]; 187 vf = &sriov->vf_info[i];
@@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
1517 1521
1518void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) 1522void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1519{ 1523{
1520 struct list_head *head = &bc->async_list; 1524 struct list_head *head = &bc->async_cmd_list;
1521 struct qlcnic_async_work_list *entry; 1525 struct qlcnic_async_cmd *entry;
1522 1526
1523 flush_workqueue(bc->bc_async_wq); 1527 flush_workqueue(bc->bc_async_wq);
1528 cancel_work_sync(&bc->vf_async_work);
1529
1530 spin_lock(&bc->queue_lock);
1524 while (!list_empty(head)) { 1531 while (!list_empty(head)) {
1525 entry = list_entry(head->next, struct qlcnic_async_work_list, 1532 entry = list_entry(head->next, struct qlcnic_async_cmd,
1526 list); 1533 list);
1527 cancel_work_sync(&entry->work);
1528 list_del(&entry->list); 1534 list_del(&entry->list);
1535 kfree(entry->cmd);
1529 kfree(entry); 1536 kfree(entry);
1530 } 1537 }
1538 spin_unlock(&bc->queue_lock);
1531} 1539}
1532 1540
1533void qlcnic_sriov_vf_set_multi(struct net_device *netdev) 1541void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
@@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1587 1595
1588static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) 1596static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
1589{ 1597{
1590 struct qlcnic_async_work_list *entry; 1598 struct qlcnic_async_cmd *entry, *tmp;
1591 struct qlcnic_adapter *adapter; 1599 struct qlcnic_back_channel *bc;
1592 struct qlcnic_cmd_args *cmd; 1600 struct qlcnic_cmd_args *cmd;
1601 struct list_head *head;
1602 LIST_HEAD(del_list);
1603
1604 bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
1605 head = &bc->async_cmd_list;
1606
1607 spin_lock(&bc->queue_lock);
1608 list_splice_init(head, &del_list);
1609 spin_unlock(&bc->queue_lock);
1610
1611 list_for_each_entry_safe(entry, tmp, &del_list, list) {
1612 list_del(&entry->list);
1613 cmd = entry->cmd;
1614 __qlcnic_sriov_issue_cmd(bc->adapter, cmd);
1615 kfree(entry);
1616 }
1617
1618 if (!list_empty(head))
1619 queue_work(bc->bc_async_wq, &bc->vf_async_work);
1593 1620
1594 entry = container_of(work, struct qlcnic_async_work_list, work);
1595 adapter = entry->ptr;
1596 cmd = entry->cmd;
1597 __qlcnic_sriov_issue_cmd(adapter, cmd);
1598 return; 1621 return;
1599} 1622}
1600 1623
1601static struct qlcnic_async_work_list * 1624static struct qlcnic_async_cmd *
1602qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) 1625qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
1626 struct qlcnic_cmd_args *cmd)
1603{ 1627{
1604 struct list_head *node; 1628 struct qlcnic_async_cmd *entry = NULL;
1605 struct qlcnic_async_work_list *entry = NULL;
1606 u8 empty = 0;
1607 1629
1608 list_for_each(node, &bc->async_list) { 1630 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1609 entry = list_entry(node, struct qlcnic_async_work_list, list); 1631 if (!entry)
1610 if (!work_pending(&entry->work)) { 1632 return NULL;
1611 empty = 1;
1612 break;
1613 }
1614 }
1615 1633
1616 if (!empty) { 1634 entry->cmd = cmd;
1617 entry = kzalloc(sizeof(struct qlcnic_async_work_list), 1635
1618 GFP_ATOMIC); 1636 spin_lock(&bc->queue_lock);
1619 if (entry == NULL) 1637 list_add_tail(&entry->list, &bc->async_cmd_list);
1620 return NULL; 1638 spin_unlock(&bc->queue_lock);
1621 list_add_tail(&entry->list, &bc->async_list);
1622 }
1623 1639
1624 return entry; 1640 return entry;
1625} 1641}
1626 1642
1627static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, 1643static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
1628 work_func_t func, void *data,
1629 struct qlcnic_cmd_args *cmd) 1644 struct qlcnic_cmd_args *cmd)
1630{ 1645{
1631 struct qlcnic_async_work_list *entry = NULL; 1646 struct qlcnic_async_cmd *entry = NULL;
1632 1647
1633 entry = qlcnic_sriov_get_free_node_async_work(bc); 1648 entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
1634 if (!entry) 1649 if (!entry) {
1650 qlcnic_free_mbx_args(cmd);
1651 kfree(cmd);
1635 return; 1652 return;
1653 }
1636 1654
1637 entry->ptr = data; 1655 queue_work(bc->bc_async_wq, &bc->vf_async_work);
1638 entry->cmd = cmd;
1639 INIT_WORK(&entry->work, func);
1640 queue_work(bc->bc_async_wq, &entry->work);
1641} 1656}
1642 1657
1643static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, 1658static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
@@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
1649 if (adapter->need_fw_reset) 1664 if (adapter->need_fw_reset)
1650 return -EIO; 1665 return -EIO;
1651 1666
1652 qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd, 1667 qlcnic_sriov_schedule_async_cmd(bc, cmd);
1653 adapter, cmd); 1668
1654 return 0; 1669 return 0;
1655} 1670}
1656 1671
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c51f34693eae..f85d605e4560 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -734,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
734 netif_receive_skb(skb); 734 netif_receive_skb(skb);
735 ndev->stats.rx_bytes += len; 735 ndev->stats.rx_bytes += len;
736 ndev->stats.rx_packets++; 736 ndev->stats.rx_packets++;
737 kmemleak_not_leak(new_skb);
737 } else { 738 } else {
738 ndev->stats.rx_dropped++; 739 ndev->stats.rx_dropped++;
739 new_skb = skb; 740 new_skb = skb;
@@ -1325,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
1325 kfree_skb(skb); 1326 kfree_skb(skb);
1326 goto err_cleanup; 1327 goto err_cleanup;
1327 } 1328 }
1329 kmemleak_not_leak(skb);
1328 } 1330 }
1329 /* continue even if we didn't manage to submit all 1331 /* continue even if we didn't manage to submit all
1330 * receive descs 1332 * receive descs
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 01a77145a0fa..8fd131207ee1 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = {
166 166
167static void tsi108_timed_checker(unsigned long dev_ptr); 167static void tsi108_timed_checker(unsigned long dev_ptr);
168 168
169#ifdef DEBUG
169static void dump_eth_one(struct net_device *dev) 170static void dump_eth_one(struct net_device *dev)
170{ 171{
171 struct tsi108_prv_data *data = netdev_priv(dev); 172 struct tsi108_prv_data *data = netdev_priv(dev);
@@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev)
190 TSI_READ(TSI108_EC_RXESTAT), 191 TSI_READ(TSI108_EC_RXESTAT),
191 TSI_READ(TSI108_EC_RXERR), data->rxpending); 192 TSI_READ(TSI108_EC_RXERR), data->rxpending);
192} 193}
194#endif
193 195
194/* Synchronization is needed between the thread and up/down events. 196/* Synchronization is needed between the thread and up/down events.
195 * Note that the PHY is accessed through the same registers for both 197 * Note that the PHY is accessed through the same registers for both
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 467fb8b4d083..591af71eae56 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -644,12 +644,6 @@ struct netvsc_reconfig {
644 u32 event; 644 u32 event;
645}; 645};
646 646
647struct garp_wrk {
648 struct work_struct dwrk;
649 struct net_device *netdev;
650 struct netvsc_device *netvsc_dev;
651};
652
653/* The context of the netvsc device */ 647/* The context of the netvsc device */
654struct net_device_context { 648struct net_device_context {
655 /* point back to our device context */ 649 /* point back to our device context */
@@ -667,7 +661,6 @@ struct net_device_context {
667 661
668 struct work_struct work; 662 struct work_struct work;
669 u32 msg_enable; /* debug level */ 663 u32 msg_enable; /* debug level */
670 struct garp_wrk gwrk;
671 664
672 struct netvsc_stats __percpu *tx_stats; 665 struct netvsc_stats __percpu *tx_stats;
673 struct netvsc_stats __percpu *rx_stats; 666 struct netvsc_stats __percpu *rx_stats;
@@ -678,6 +671,15 @@ struct net_device_context {
678 671
679 /* the device is going away */ 672 /* the device is going away */
680 bool start_remove; 673 bool start_remove;
674
675 /* State to manage the associated VF interface. */
676 struct net_device *vf_netdev;
677 bool vf_inject;
678 atomic_t vf_use_cnt;
679 /* 1: allocated, serial number is valid. 0: not allocated */
680 u32 vf_alloc;
681 /* Serial number of the VF to team with */
682 u32 vf_serial;
681}; 683};
682 684
683/* Per netvsc device */ 685/* Per netvsc device */
@@ -733,15 +735,7 @@ struct netvsc_device {
733 u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ 735 u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
734 u32 pkt_align; /* alignment bytes, e.g. 8 */ 736 u32 pkt_align; /* alignment bytes, e.g. 8 */
735 737
736 /* 1: allocated, serial number is valid. 0: not allocated */
737 u32 vf_alloc;
738 /* Serial number of the VF to team with */
739 u32 vf_serial;
740 atomic_t open_cnt; 738 atomic_t open_cnt;
741 /* State to manage the associated VF interface. */
742 bool vf_inject;
743 struct net_device *vf_netdev;
744 atomic_t vf_use_cnt;
745}; 739};
746 740
747static inline struct netvsc_device * 741static inline struct netvsc_device *
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 20e09174ff62..410fb8e81376 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void)
77 init_waitqueue_head(&net_device->wait_drain); 77 init_waitqueue_head(&net_device->wait_drain);
78 net_device->destroy = false; 78 net_device->destroy = false;
79 atomic_set(&net_device->open_cnt, 0); 79 atomic_set(&net_device->open_cnt, 0);
80 atomic_set(&net_device->vf_use_cnt, 0);
81 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 80 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
82 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 81 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
83 82
84 net_device->vf_netdev = NULL;
85 net_device->vf_inject = false;
86
87 return net_device; 83 return net_device;
88} 84}
89 85
@@ -1106,16 +1102,16 @@ static void netvsc_send_table(struct hv_device *hdev,
1106 nvscdev->send_table[i] = tab[i]; 1102 nvscdev->send_table[i] = tab[i];
1107} 1103}
1108 1104
1109static void netvsc_send_vf(struct netvsc_device *nvdev, 1105static void netvsc_send_vf(struct net_device_context *net_device_ctx,
1110 struct nvsp_message *nvmsg) 1106 struct nvsp_message *nvmsg)
1111{ 1107{
1112 nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; 1108 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1113 nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; 1109 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1114} 1110}
1115 1111
1116static inline void netvsc_receive_inband(struct hv_device *hdev, 1112static inline void netvsc_receive_inband(struct hv_device *hdev,
1117 struct netvsc_device *nvdev, 1113 struct net_device_context *net_device_ctx,
1118 struct nvsp_message *nvmsg) 1114 struct nvsp_message *nvmsg)
1119{ 1115{
1120 switch (nvmsg->hdr.msg_type) { 1116 switch (nvmsg->hdr.msg_type) {
1121 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: 1117 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
@@ -1123,7 +1119,7 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
1123 break; 1119 break;
1124 1120
1125 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: 1121 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1126 netvsc_send_vf(nvdev, nvmsg); 1122 netvsc_send_vf(net_device_ctx, nvmsg);
1127 break; 1123 break;
1128 } 1124 }
1129} 1125}
@@ -1136,6 +1132,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
1136 struct vmpacket_descriptor *desc) 1132 struct vmpacket_descriptor *desc)
1137{ 1133{
1138 struct nvsp_message *nvmsg; 1134 struct nvsp_message *nvmsg;
1135 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1139 1136
1140 nvmsg = (struct nvsp_message *)((unsigned long) 1137 nvmsg = (struct nvsp_message *)((unsigned long)
1141 desc + (desc->offset8 << 3)); 1138 desc + (desc->offset8 << 3));
@@ -1150,7 +1147,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
1150 break; 1147 break;
1151 1148
1152 case VM_PKT_DATA_INBAND: 1149 case VM_PKT_DATA_INBAND:
1153 netvsc_receive_inband(device, net_device, nvmsg); 1150 netvsc_receive_inband(device, net_device_ctx, nvmsg);
1154 break; 1151 break;
1155 1152
1156 default: 1153 default:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 41bd952cc28d..3ba29fc80d05 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -658,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
658 struct sk_buff *skb; 658 struct sk_buff *skb;
659 struct sk_buff *vf_skb; 659 struct sk_buff *vf_skb;
660 struct netvsc_stats *rx_stats; 660 struct netvsc_stats *rx_stats;
661 struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
662 u32 bytes_recvd = packet->total_data_buflen; 661 u32 bytes_recvd = packet->total_data_buflen;
663 int ret = 0; 662 int ret = 0;
664 663
665 if (!net || net->reg_state != NETREG_REGISTERED) 664 if (!net || net->reg_state != NETREG_REGISTERED)
666 return NVSP_STAT_FAIL; 665 return NVSP_STAT_FAIL;
667 666
668 if (READ_ONCE(netvsc_dev->vf_inject)) { 667 if (READ_ONCE(net_device_ctx->vf_inject)) {
669 atomic_inc(&netvsc_dev->vf_use_cnt); 668 atomic_inc(&net_device_ctx->vf_use_cnt);
670 if (!READ_ONCE(netvsc_dev->vf_inject)) { 669 if (!READ_ONCE(net_device_ctx->vf_inject)) {
671 /* 670 /*
672 * We raced; just move on. 671 * We raced; just move on.
673 */ 672 */
674 atomic_dec(&netvsc_dev->vf_use_cnt); 673 atomic_dec(&net_device_ctx->vf_use_cnt);
675 goto vf_injection_done; 674 goto vf_injection_done;
676 } 675 }
677 676
@@ -683,17 +682,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
683 * the host). Deliver these via the VF interface 682 * the host). Deliver these via the VF interface
684 * in the guest. 683 * in the guest.
685 */ 684 */
686 vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet, 685 vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
687 csum_info, *data, vlan_tci); 686 packet, csum_info, *data,
687 vlan_tci);
688 if (vf_skb != NULL) { 688 if (vf_skb != NULL) {
689 ++netvsc_dev->vf_netdev->stats.rx_packets; 689 ++net_device_ctx->vf_netdev->stats.rx_packets;
690 netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd; 690 net_device_ctx->vf_netdev->stats.rx_bytes +=
691 bytes_recvd;
691 netif_receive_skb(vf_skb); 692 netif_receive_skb(vf_skb);
692 } else { 693 } else {
693 ++net->stats.rx_dropped; 694 ++net->stats.rx_dropped;
694 ret = NVSP_STAT_FAIL; 695 ret = NVSP_STAT_FAIL;
695 } 696 }
696 atomic_dec(&netvsc_dev->vf_use_cnt); 697 atomic_dec(&net_device_ctx->vf_use_cnt);
697 return ret; 698 return ret;
698 } 699 }
699 700
@@ -1150,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev)
1150 free_netdev(netdev); 1151 free_netdev(netdev);
1151} 1152}
1152 1153
1153static void netvsc_notify_peers(struct work_struct *wrk)
1154{
1155 struct garp_wrk *gwrk;
1156
1157 gwrk = container_of(wrk, struct garp_wrk, dwrk);
1158
1159 netdev_notify_peers(gwrk->netdev);
1160
1161 atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
1162}
1163
1164static struct net_device *get_netvsc_net_device(char *mac) 1154static struct net_device *get_netvsc_net_device(char *mac)
1165{ 1155{
1166 struct net_device *dev, *found = NULL; 1156 struct net_device *dev, *found = NULL;
@@ -1203,7 +1193,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
1203 1193
1204 net_device_ctx = netdev_priv(ndev); 1194 net_device_ctx = netdev_priv(ndev);
1205 netvsc_dev = net_device_ctx->nvdev; 1195 netvsc_dev = net_device_ctx->nvdev;
1206 if (netvsc_dev == NULL) 1196 if (!netvsc_dev || net_device_ctx->vf_netdev)
1207 return NOTIFY_DONE; 1197 return NOTIFY_DONE;
1208 1198
1209 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); 1199 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
@@ -1211,10 +1201,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
1211 * Take a reference on the module. 1201 * Take a reference on the module.
1212 */ 1202 */
1213 try_module_get(THIS_MODULE); 1203 try_module_get(THIS_MODULE);
1214 netvsc_dev->vf_netdev = vf_netdev; 1204 net_device_ctx->vf_netdev = vf_netdev;
1215 return NOTIFY_OK; 1205 return NOTIFY_OK;
1216} 1206}
1217 1207
1208static void netvsc_inject_enable(struct net_device_context *net_device_ctx)
1209{
1210 net_device_ctx->vf_inject = true;
1211}
1212
1213static void netvsc_inject_disable(struct net_device_context *net_device_ctx)
1214{
1215 net_device_ctx->vf_inject = false;
1216
1217 /* Wait for currently active users to drain out. */
1218 while (atomic_read(&net_device_ctx->vf_use_cnt) != 0)
1219 udelay(50);
1220}
1218 1221
1219static int netvsc_vf_up(struct net_device *vf_netdev) 1222static int netvsc_vf_up(struct net_device *vf_netdev)
1220{ 1223{
@@ -1233,11 +1236,11 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
1233 net_device_ctx = netdev_priv(ndev); 1236 net_device_ctx = netdev_priv(ndev);
1234 netvsc_dev = net_device_ctx->nvdev; 1237 netvsc_dev = net_device_ctx->nvdev;
1235 1238
1236 if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) 1239 if (!netvsc_dev || !net_device_ctx->vf_netdev)
1237 return NOTIFY_DONE; 1240 return NOTIFY_DONE;
1238 1241
1239 netdev_info(ndev, "VF up: %s\n", vf_netdev->name); 1242 netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
1240 netvsc_dev->vf_inject = true; 1243 netvsc_inject_enable(net_device_ctx);
1241 1244
1242 /* 1245 /*
1243 * Open the device before switching data path. 1246 * Open the device before switching data path.
@@ -1252,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
1252 1255
1253 netif_carrier_off(ndev); 1256 netif_carrier_off(ndev);
1254 1257
1255 /* 1258 /* Now notify peers through VF device. */
1256 * Now notify peers. We are scheduling work to 1259 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
1257 * notify peers; take a reference to prevent
1258 * the VF interface from vanishing.
1259 */
1260 atomic_inc(&netvsc_dev->vf_use_cnt);
1261 net_device_ctx->gwrk.netdev = vf_netdev;
1262 net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
1263 schedule_work(&net_device_ctx->gwrk.dwrk);
1264 1260
1265 return NOTIFY_OK; 1261 return NOTIFY_OK;
1266} 1262}
@@ -1283,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
1283 net_device_ctx = netdev_priv(ndev); 1279 net_device_ctx = netdev_priv(ndev);
1284 netvsc_dev = net_device_ctx->nvdev; 1280 netvsc_dev = net_device_ctx->nvdev;
1285 1281
1286 if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) 1282 if (!netvsc_dev || !net_device_ctx->vf_netdev)
1287 return NOTIFY_DONE; 1283 return NOTIFY_DONE;
1288 1284
1289 netdev_info(ndev, "VF down: %s\n", vf_netdev->name); 1285 netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
1290 netvsc_dev->vf_inject = false; 1286 netvsc_inject_disable(net_device_ctx);
1291 /*
1292 * Wait for currently active users to
1293 * drain out.
1294 */
1295
1296 while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
1297 udelay(50);
1298 netvsc_switch_datapath(ndev, false); 1287 netvsc_switch_datapath(ndev, false);
1299 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); 1288 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
1300 rndis_filter_close(netvsc_dev); 1289 rndis_filter_close(netvsc_dev);
1301 netif_carrier_on(ndev); 1290 netif_carrier_on(ndev);
1302 /* 1291
1303 * Notify peers. 1292 /* Now notify peers through netvsc device. */
1304 */ 1293 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
1305 atomic_inc(&netvsc_dev->vf_use_cnt);
1306 net_device_ctx->gwrk.netdev = ndev;
1307 net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
1308 schedule_work(&net_device_ctx->gwrk.dwrk);
1309 1294
1310 return NOTIFY_OK; 1295 return NOTIFY_OK;
1311} 1296}
@@ -1327,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
1327 1312
1328 net_device_ctx = netdev_priv(ndev); 1313 net_device_ctx = netdev_priv(ndev);
1329 netvsc_dev = net_device_ctx->nvdev; 1314 netvsc_dev = net_device_ctx->nvdev;
1330 if (netvsc_dev == NULL) 1315 if (!netvsc_dev || !net_device_ctx->vf_netdev)
1331 return NOTIFY_DONE; 1316 return NOTIFY_DONE;
1332 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); 1317 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
1333 1318 netvsc_inject_disable(net_device_ctx);
1334 netvsc_dev->vf_netdev = NULL; 1319 net_device_ctx->vf_netdev = NULL;
1335 module_put(THIS_MODULE); 1320 module_put(THIS_MODULE);
1336 return NOTIFY_OK; 1321 return NOTIFY_OK;
1337} 1322}
@@ -1377,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev,
1377 1362
1378 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 1363 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
1379 INIT_WORK(&net_device_ctx->work, do_set_multicast); 1364 INIT_WORK(&net_device_ctx->work, do_set_multicast);
1380 INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
1381 1365
1382 spin_lock_init(&net_device_ctx->lock); 1366 spin_lock_init(&net_device_ctx->lock);
1383 INIT_LIST_HEAD(&net_device_ctx->reconfig_events); 1367 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1384 1368
1369 atomic_set(&net_device_ctx->vf_use_cnt, 0);
1370 net_device_ctx->vf_netdev = NULL;
1371 net_device_ctx->vf_inject = false;
1372
1385 net->netdev_ops = &device_ops; 1373 net->netdev_ops = &device_ops;
1386 1374
1387 net->hw_features = NETVSC_HW_FEATURES; 1375 net->hw_features = NETVSC_HW_FEATURES;
@@ -1494,8 +1482,13 @@ static int netvsc_netdev_event(struct notifier_block *this,
1494{ 1482{
1495 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 1483 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1496 1484
1497 /* Avoid Vlan, Bonding dev with same MAC registering as VF */ 1485 /* Avoid Vlan dev with same MAC registering as VF */
1498 if (event_dev->priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING)) 1486 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1487 return NOTIFY_DONE;
1488
1489 /* Avoid Bonding master dev with same MAC registering as VF */
1490 if (event_dev->priv_flags & IFF_BONDING &&
1491 event_dev->flags & IFF_MASTER)
1499 return NOTIFY_DONE; 1492 return NOTIFY_DONE;
1500 1493
1501 switch (event) { 1494 switch (event) {
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index d13e6e15d7b5..351e701eb043 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -270,6 +270,7 @@ struct macsec_dev {
270 struct pcpu_secy_stats __percpu *stats; 270 struct pcpu_secy_stats __percpu *stats;
271 struct list_head secys; 271 struct list_head secys;
272 struct gro_cells gro_cells; 272 struct gro_cells gro_cells;
273 unsigned int nest_level;
273}; 274};
274 275
275/** 276/**
@@ -2699,6 +2700,8 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
2699 2700
2700#define MACSEC_FEATURES \ 2701#define MACSEC_FEATURES \
2701 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2702 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
2703static struct lock_class_key macsec_netdev_addr_lock_key;
2704
2702static int macsec_dev_init(struct net_device *dev) 2705static int macsec_dev_init(struct net_device *dev)
2703{ 2706{
2704 struct macsec_dev *macsec = macsec_priv(dev); 2707 struct macsec_dev *macsec = macsec_priv(dev);
@@ -2910,6 +2913,13 @@ static int macsec_get_iflink(const struct net_device *dev)
2910 return macsec_priv(dev)->real_dev->ifindex; 2913 return macsec_priv(dev)->real_dev->ifindex;
2911} 2914}
2912 2915
2916
2917static int macsec_get_nest_level(struct net_device *dev)
2918{
2919 return macsec_priv(dev)->nest_level;
2920}
2921
2922
2913static const struct net_device_ops macsec_netdev_ops = { 2923static const struct net_device_ops macsec_netdev_ops = {
2914 .ndo_init = macsec_dev_init, 2924 .ndo_init = macsec_dev_init,
2915 .ndo_uninit = macsec_dev_uninit, 2925 .ndo_uninit = macsec_dev_uninit,
@@ -2923,6 +2933,7 @@ static const struct net_device_ops macsec_netdev_ops = {
2923 .ndo_start_xmit = macsec_start_xmit, 2933 .ndo_start_xmit = macsec_start_xmit,
2924 .ndo_get_stats64 = macsec_get_stats64, 2934 .ndo_get_stats64 = macsec_get_stats64,
2925 .ndo_get_iflink = macsec_get_iflink, 2935 .ndo_get_iflink = macsec_get_iflink,
2936 .ndo_get_lock_subclass = macsec_get_nest_level,
2926}; 2937};
2927 2938
2928static const struct device_type macsec_type = { 2939static const struct device_type macsec_type = {
@@ -3047,22 +3058,31 @@ static void macsec_del_dev(struct macsec_dev *macsec)
3047 } 3058 }
3048} 3059}
3049 3060
3061static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3062{
3063 struct macsec_dev *macsec = macsec_priv(dev);
3064 struct net_device *real_dev = macsec->real_dev;
3065
3066 unregister_netdevice_queue(dev, head);
3067 list_del_rcu(&macsec->secys);
3068 macsec_del_dev(macsec);
3069 netdev_upper_dev_unlink(real_dev, dev);
3070
3071 macsec_generation++;
3072}
3073
3050static void macsec_dellink(struct net_device *dev, struct list_head *head) 3074static void macsec_dellink(struct net_device *dev, struct list_head *head)
3051{ 3075{
3052 struct macsec_dev *macsec = macsec_priv(dev); 3076 struct macsec_dev *macsec = macsec_priv(dev);
3053 struct net_device *real_dev = macsec->real_dev; 3077 struct net_device *real_dev = macsec->real_dev;
3054 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3078 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3055 3079
3056 macsec_generation++; 3080 macsec_common_dellink(dev, head);
3057 3081
3058 unregister_netdevice_queue(dev, head);
3059 list_del_rcu(&macsec->secys);
3060 if (list_empty(&rxd->secys)) { 3082 if (list_empty(&rxd->secys)) {
3061 netdev_rx_handler_unregister(real_dev); 3083 netdev_rx_handler_unregister(real_dev);
3062 kfree(rxd); 3084 kfree(rxd);
3063 } 3085 }
3064
3065 macsec_del_dev(macsec);
3066} 3086}
3067 3087
3068static int register_macsec_dev(struct net_device *real_dev, 3088static int register_macsec_dev(struct net_device *real_dev,
@@ -3181,6 +3201,16 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3181 3201
3182 dev_hold(real_dev); 3202 dev_hold(real_dev);
3183 3203
3204 macsec->nest_level = dev_get_nest_level(real_dev) + 1;
3205 netdev_lockdep_set_classes(dev);
3206 lockdep_set_class_and_subclass(&dev->addr_list_lock,
3207 &macsec_netdev_addr_lock_key,
3208 macsec_get_nest_level(dev));
3209
3210 err = netdev_upper_dev_link(real_dev, dev);
3211 if (err < 0)
3212 goto unregister;
3213
3184 /* need to be already registered so that ->init has run and 3214 /* need to be already registered so that ->init has run and
3185 * the MAC addr is set 3215 * the MAC addr is set
3186 */ 3216 */
@@ -3193,12 +3223,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3193 3223
3194 if (rx_handler && sci_exists(real_dev, sci)) { 3224 if (rx_handler && sci_exists(real_dev, sci)) {
3195 err = -EBUSY; 3225 err = -EBUSY;
3196 goto unregister; 3226 goto unlink;
3197 } 3227 }
3198 3228
3199 err = macsec_add_dev(dev, sci, icv_len); 3229 err = macsec_add_dev(dev, sci, icv_len);
3200 if (err) 3230 if (err)
3201 goto unregister; 3231 goto unlink;
3202 3232
3203 if (data) 3233 if (data)
3204 macsec_changelink_common(dev, data); 3234 macsec_changelink_common(dev, data);
@@ -3213,6 +3243,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3213 3243
3214del_dev: 3244del_dev:
3215 macsec_del_dev(macsec); 3245 macsec_del_dev(macsec);
3246unlink:
3247 netdev_upper_dev_unlink(real_dev, dev);
3216unregister: 3248unregister:
3217 unregister_netdevice(dev); 3249 unregister_netdevice(dev);
3218 return err; 3250 return err;
@@ -3382,8 +3414,12 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
3382 3414
3383 rxd = macsec_data_rtnl(real_dev); 3415 rxd = macsec_data_rtnl(real_dev);
3384 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3416 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
3385 macsec_dellink(m->secy.netdev, &head); 3417 macsec_common_dellink(m->secy.netdev, &head);
3386 } 3418 }
3419
3420 netdev_rx_handler_unregister(real_dev);
3421 kfree(rxd);
3422
3387 unregister_netdevice_many(&head); 3423 unregister_netdevice_many(&head);
3388 break; 3424 break;
3389 } 3425 }
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index cd9b53834bf6..3234fcdea317 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1315,7 +1315,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1315 vlan->dev = dev; 1315 vlan->dev = dev;
1316 vlan->port = port; 1316 vlan->port = port;
1317 vlan->set_features = MACVLAN_FEATURES; 1317 vlan->set_features = MACVLAN_FEATURES;
1318 vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; 1318 vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
1319 1319
1320 vlan->mode = MACVLAN_MODE_VEPA; 1320 vlan->mode = MACVLAN_MODE_VEPA;
1321 if (data && data[IFLA_MACVLAN_MODE]) 1321 if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index a38c0dac514b..070e3290aa6e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -275,7 +275,6 @@ static void macvtap_put_queue(struct macvtap_queue *q)
275 rtnl_unlock(); 275 rtnl_unlock();
276 276
277 synchronize_rcu(); 277 synchronize_rcu();
278 skb_array_cleanup(&q->skb_array);
279 sock_put(&q->sk); 278 sock_put(&q->sk);
280} 279}
281 280
@@ -533,10 +532,8 @@ static void macvtap_sock_write_space(struct sock *sk)
533static void macvtap_sock_destruct(struct sock *sk) 532static void macvtap_sock_destruct(struct sock *sk)
534{ 533{
535 struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); 534 struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk);
536 struct sk_buff *skb;
537 535
538 while ((skb = skb_array_consume(&q->skb_array)) != NULL) 536 skb_array_cleanup(&q->skb_array);
539 kfree_skb(skb);
540} 537}
541 538
542static int macvtap_open(struct inode *inode, struct file *file) 539static int macvtap_open(struct inode *inode, struct file *file)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 1882d9828c99..053e87905b94 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -677,17 +677,28 @@ static void kszphy_get_stats(struct phy_device *phydev,
677 data[i] = kszphy_get_stat(phydev, i); 677 data[i] = kszphy_get_stat(phydev, i);
678} 678}
679 679
680static int kszphy_resume(struct phy_device *phydev) 680static int kszphy_suspend(struct phy_device *phydev)
681{ 681{
682 int value; 682 /* Disable PHY Interrupts */
683 if (phy_interrupt_is_valid(phydev)) {
684 phydev->interrupts = PHY_INTERRUPT_DISABLED;
685 if (phydev->drv->config_intr)
686 phydev->drv->config_intr(phydev);
687 }
683 688
684 mutex_lock(&phydev->lock); 689 return genphy_suspend(phydev);
690}
685 691
686 value = phy_read(phydev, MII_BMCR); 692static int kszphy_resume(struct phy_device *phydev)
687 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); 693{
694 genphy_resume(phydev);
688 695
689 kszphy_config_intr(phydev); 696 /* Enable PHY Interrupts */
690 mutex_unlock(&phydev->lock); 697 if (phy_interrupt_is_valid(phydev)) {
698 phydev->interrupts = PHY_INTERRUPT_ENABLED;
699 if (phydev->drv->config_intr)
700 phydev->drv->config_intr(phydev);
701 }
691 702
692 return 0; 703 return 0;
693} 704}
@@ -900,7 +911,7 @@ static struct phy_driver ksphy_driver[] = {
900 .get_sset_count = kszphy_get_sset_count, 911 .get_sset_count = kszphy_get_sset_count,
901 .get_strings = kszphy_get_strings, 912 .get_strings = kszphy_get_strings,
902 .get_stats = kszphy_get_stats, 913 .get_stats = kszphy_get_stats,
903 .suspend = genphy_suspend, 914 .suspend = kszphy_suspend,
904 .resume = kszphy_resume, 915 .resume = kszphy_resume,
905}, { 916}, {
906 .phy_id = PHY_ID_KSZ8061, 917 .phy_id = PHY_ID_KSZ8061,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index da4e3d6632f6..c0dda6fc0921 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1811,7 +1811,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
1811 fl4.flowi4_mark = skb->mark; 1811 fl4.flowi4_mark = skb->mark;
1812 fl4.flowi4_proto = IPPROTO_UDP; 1812 fl4.flowi4_proto = IPPROTO_UDP;
1813 fl4.daddr = daddr; 1813 fl4.daddr = daddr;
1814 fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; 1814 fl4.saddr = *saddr;
1815 1815
1816 rt = ip_route_output_key(vxlan->net, &fl4); 1816 rt = ip_route_output_key(vxlan->net, &fl4);
1817 if (!IS_ERR(rt)) { 1817 if (!IS_ERR(rt)) {
@@ -1847,7 +1847,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1847 memset(&fl6, 0, sizeof(fl6)); 1847 memset(&fl6, 0, sizeof(fl6));
1848 fl6.flowi6_oif = oif; 1848 fl6.flowi6_oif = oif;
1849 fl6.daddr = *daddr; 1849 fl6.daddr = *daddr;
1850 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; 1850 fl6.saddr = *saddr;
1851 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); 1851 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
1852 fl6.flowi6_mark = skb->mark; 1852 fl6.flowi6_mark = skb->mark;
1853 fl6.flowi6_proto = IPPROTO_UDP; 1853 fl6.flowi6_proto = IPPROTO_UDP;
@@ -1920,7 +1920,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1920 struct rtable *rt = NULL; 1920 struct rtable *rt = NULL;
1921 const struct iphdr *old_iph; 1921 const struct iphdr *old_iph;
1922 union vxlan_addr *dst; 1922 union vxlan_addr *dst;
1923 union vxlan_addr remote_ip; 1923 union vxlan_addr remote_ip, local_ip;
1924 union vxlan_addr *src;
1924 struct vxlan_metadata _md; 1925 struct vxlan_metadata _md;
1925 struct vxlan_metadata *md = &_md; 1926 struct vxlan_metadata *md = &_md;
1926 __be16 src_port = 0, dst_port; 1927 __be16 src_port = 0, dst_port;
@@ -1938,6 +1939,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1938 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; 1939 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
1939 vni = rdst->remote_vni; 1940 vni = rdst->remote_vni;
1940 dst = &rdst->remote_ip; 1941 dst = &rdst->remote_ip;
1942 src = &vxlan->cfg.saddr;
1941 dst_cache = &rdst->dst_cache; 1943 dst_cache = &rdst->dst_cache;
1942 } else { 1944 } else {
1943 if (!info) { 1945 if (!info) {
@@ -1948,11 +1950,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1948 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; 1950 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
1949 vni = vxlan_tun_id_to_vni(info->key.tun_id); 1951 vni = vxlan_tun_id_to_vni(info->key.tun_id);
1950 remote_ip.sa.sa_family = ip_tunnel_info_af(info); 1952 remote_ip.sa.sa_family = ip_tunnel_info_af(info);
1951 if (remote_ip.sa.sa_family == AF_INET) 1953 if (remote_ip.sa.sa_family == AF_INET) {
1952 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; 1954 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
1953 else 1955 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
1956 } else {
1954 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; 1957 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
1958 local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
1959 }
1955 dst = &remote_ip; 1960 dst = &remote_ip;
1961 src = &local_ip;
1956 dst_cache = &info->dst_cache; 1962 dst_cache = &info->dst_cache;
1957 } 1963 }
1958 1964
@@ -1992,15 +1998,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1992 } 1998 }
1993 1999
1994 if (dst->sa.sa_family == AF_INET) { 2000 if (dst->sa.sa_family == AF_INET) {
1995 __be32 saddr;
1996
1997 if (!vxlan->vn4_sock) 2001 if (!vxlan->vn4_sock)
1998 goto drop; 2002 goto drop;
1999 sk = vxlan->vn4_sock->sock->sk; 2003 sk = vxlan->vn4_sock->sock->sk;
2000 2004
2001 rt = vxlan_get_route(vxlan, skb, 2005 rt = vxlan_get_route(vxlan, skb,
2002 rdst ? rdst->remote_ifindex : 0, tos, 2006 rdst ? rdst->remote_ifindex : 0, tos,
2003 dst->sin.sin_addr.s_addr, &saddr, 2007 dst->sin.sin_addr.s_addr,
2008 &src->sin.sin_addr.s_addr,
2004 dst_cache, info); 2009 dst_cache, info);
2005 if (IS_ERR(rt)) { 2010 if (IS_ERR(rt)) {
2006 netdev_dbg(dev, "no route to %pI4\n", 2011 netdev_dbg(dev, "no route to %pI4\n",
@@ -2017,7 +2022,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2017 } 2022 }
2018 2023
2019 /* Bypass encapsulation if the destination is local */ 2024 /* Bypass encapsulation if the destination is local */
2020 if (rt->rt_flags & RTCF_LOCAL && 2025 if (!info && rt->rt_flags & RTCF_LOCAL &&
2021 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 2026 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2022 struct vxlan_dev *dst_vxlan; 2027 struct vxlan_dev *dst_vxlan;
2023 2028
@@ -2043,13 +2048,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2043 if (err < 0) 2048 if (err < 0)
2044 goto xmit_tx_error; 2049 goto xmit_tx_error;
2045 2050
2046 udp_tunnel_xmit_skb(rt, sk, skb, saddr, 2051 udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr,
2047 dst->sin.sin_addr.s_addr, tos, ttl, df, 2052 dst->sin.sin_addr.s_addr, tos, ttl, df,
2048 src_port, dst_port, xnet, !udp_sum); 2053 src_port, dst_port, xnet, !udp_sum);
2049#if IS_ENABLED(CONFIG_IPV6) 2054#if IS_ENABLED(CONFIG_IPV6)
2050 } else { 2055 } else {
2051 struct dst_entry *ndst; 2056 struct dst_entry *ndst;
2052 struct in6_addr saddr;
2053 u32 rt6i_flags; 2057 u32 rt6i_flags;
2054 2058
2055 if (!vxlan->vn6_sock) 2059 if (!vxlan->vn6_sock)
@@ -2058,7 +2062,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2058 2062
2059 ndst = vxlan6_get_route(vxlan, skb, 2063 ndst = vxlan6_get_route(vxlan, skb,
2060 rdst ? rdst->remote_ifindex : 0, tos, 2064 rdst ? rdst->remote_ifindex : 0, tos,
2061 label, &dst->sin6.sin6_addr, &saddr, 2065 label, &dst->sin6.sin6_addr,
2066 &src->sin6.sin6_addr,
2062 dst_cache, info); 2067 dst_cache, info);
2063 if (IS_ERR(ndst)) { 2068 if (IS_ERR(ndst)) {
2064 netdev_dbg(dev, "no route to %pI6\n", 2069 netdev_dbg(dev, "no route to %pI6\n",
@@ -2077,7 +2082,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2077 2082
2078 /* Bypass encapsulation if the destination is local */ 2083 /* Bypass encapsulation if the destination is local */
2079 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; 2084 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
2080 if (rt6i_flags & RTF_LOCAL && 2085 if (!info && rt6i_flags & RTF_LOCAL &&
2081 !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 2086 !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2082 struct vxlan_dev *dst_vxlan; 2087 struct vxlan_dev *dst_vxlan;
2083 2088
@@ -2104,7 +2109,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2104 return; 2109 return;
2105 } 2110 }
2106 udp_tunnel6_xmit_skb(ndst, sk, skb, dev, 2111 udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
2107 &saddr, &dst->sin6.sin6_addr, tos, ttl, 2112 &src->sin6.sin6_addr,
2113 &dst->sin6.sin6_addr, tos, ttl,
2108 label, src_port, dst_port, !udp_sum); 2114 label, src_port, dst_port, !udp_sum);
2109#endif 2115#endif
2110 } 2116 }
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 1d689169da76..9e1f2d9c9865 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5700,10 +5700,11 @@ out:
5700 mutex_unlock(&wl->mutex); 5700 mutex_unlock(&wl->mutex);
5701} 5701}
5702 5702
5703static u32 wlcore_op_get_expected_throughput(struct ieee80211_sta *sta) 5703static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5704 struct ieee80211_sta *sta)
5704{ 5705{
5705 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv; 5706 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5706 struct wl1271 *wl = wl_sta->wl; 5707 struct wl1271 *wl = hw->priv;
5707 u8 hlid = wl_sta->hlid; 5708 u8 hlid = wl_sta->hlid;
5708 5709
5709 /* return in units of Kbps */ 5710 /* return in units of Kbps */