diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-09-01 12:33:46 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-09-01 12:33:46 -0400 |
commit | 0cb7bf61b1e9f05027de58c80f9b46a714d24e35 (patch) | |
tree | 41fb55cf62d07b425122f9a8b96412c0d8eb99c5 /drivers/net/ethernet | |
parent | aa877175e7a9982233ed8f10cb4bfddd78d82741 (diff) | |
parent | 3eab887a55424fc2c27553b7bfe32330df83f7b8 (diff) |
Merge branch 'linus' into smp/hotplug
Apply upstream changes to avoid conflicts with pending patches.
Diffstat (limited to 'drivers/net/ethernet')
31 files changed, 504 insertions, 276 deletions
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 37a0f463b8de..18bb9556dd00 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -793,6 +793,8 @@ int xgene_enet_phy_connect(struct net_device *ndev) | |||
793 | netdev_err(ndev, "Could not connect to PHY\n"); | 793 | netdev_err(ndev, "Could not connect to PHY\n"); |
794 | return -ENODEV; | 794 | return -ENODEV; |
795 | } | 795 | } |
796 | #else | ||
797 | return -ENODEV; | ||
796 | #endif | 798 | #endif |
797 | } | 799 | } |
798 | 800 | ||
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 4bff0f3040df..b0da9693f28a 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -771,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface) | |||
771 | priv->dev = dev; | 771 | priv->dev = dev; |
772 | 772 | ||
773 | priv->regs = devm_ioremap_resource(dev, &res_regs); | 773 | priv->regs = devm_ioremap_resource(dev, &res_regs); |
774 | if (IS_ERR(priv->regs)) | 774 | if (IS_ERR(priv->regs)) { |
775 | return PTR_ERR(priv->regs); | 775 | err = PTR_ERR(priv->regs); |
776 | goto out_put_node; | ||
777 | } | ||
776 | 778 | ||
777 | dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); | 779 | dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); |
778 | 780 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ff300f7cf529..659261218d9f 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -12552,10 +12552,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, | |||
12552 | info->data = TG3_RSS_MAX_NUM_QS; | 12552 | info->data = TG3_RSS_MAX_NUM_QS; |
12553 | } | 12553 | } |
12554 | 12554 | ||
12555 | /* The first interrupt vector only | ||
12556 | * handles link interrupts. | ||
12557 | */ | ||
12558 | info->data -= 1; | ||
12559 | return 0; | 12555 | return 0; |
12560 | 12556 | ||
12561 | default: | 12557 | default: |
@@ -14014,6 +14010,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | |||
14014 | } | 14010 | } |
14015 | 14011 | ||
14016 | if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || | 14012 | if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || |
14013 | (!ec->rx_coalesce_usecs) || | ||
14017 | (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || | 14014 | (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || |
14018 | (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || | 14015 | (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || |
14019 | (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || | 14016 | (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 36893d8958d4..b6fcf10621b6 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -403,11 +403,11 @@ | |||
403 | #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 | 403 | #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 |
404 | #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 | 404 | #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 |
405 | #define MACB_CAPS_USRIO_DISABLED 0x00000010 | 405 | #define MACB_CAPS_USRIO_DISABLED 0x00000010 |
406 | #define MACB_CAPS_JUMBO 0x00000020 | ||
406 | #define MACB_CAPS_FIFO_MODE 0x10000000 | 407 | #define MACB_CAPS_FIFO_MODE 0x10000000 |
407 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 | 408 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 |
408 | #define MACB_CAPS_SG_DISABLED 0x40000000 | 409 | #define MACB_CAPS_SG_DISABLED 0x40000000 |
409 | #define MACB_CAPS_MACB_IS_GEM 0x80000000 | 410 | #define MACB_CAPS_MACB_IS_GEM 0x80000000 |
410 | #define MACB_CAPS_JUMBO 0x00000010 | ||
411 | 411 | ||
412 | /* Bit manipulation macros */ | 412 | /* Bit manipulation macros */ |
413 | #define MACB_BIT(name) \ | 413 | #define MACB_BIT(name) \ |
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 1471e16ba719..f45385f5c6e5 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
@@ -1299,6 +1299,7 @@ static int | |||
1299 | dm9000_open(struct net_device *dev) | 1299 | dm9000_open(struct net_device *dev) |
1300 | { | 1300 | { |
1301 | struct board_info *db = netdev_priv(dev); | 1301 | struct board_info *db = netdev_priv(dev); |
1302 | unsigned int irq_flags = irq_get_trigger_type(dev->irq); | ||
1302 | 1303 | ||
1303 | if (netif_msg_ifup(db)) | 1304 | if (netif_msg_ifup(db)) |
1304 | dev_dbg(db->dev, "enabling %s\n", dev->name); | 1305 | dev_dbg(db->dev, "enabling %s\n", dev->name); |
@@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev) | |||
1306 | /* If there is no IRQ type specified, tell the user that this is a | 1307 | /* If there is no IRQ type specified, tell the user that this is a |
1307 | * problem | 1308 | * problem |
1308 | */ | 1309 | */ |
1309 | if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) | 1310 | if (irq_flags == IRQF_TRIGGER_NONE) |
1310 | dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); | 1311 | dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); |
1311 | 1312 | ||
1313 | irq_flags |= IRQF_SHARED; | ||
1314 | |||
1312 | /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ | 1315 | /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ |
1313 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ | 1316 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ |
1314 | mdelay(1); /* delay needs by DM9000B */ | 1317 | mdelay(1); /* delay needs by DM9000B */ |
@@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev) | |||
1316 | /* Initialize DM9000 board */ | 1319 | /* Initialize DM9000 board */ |
1317 | dm9000_init_dm9000(dev); | 1320 | dm9000_init_dm9000(dev); |
1318 | 1321 | ||
1319 | if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, | 1322 | if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev)) |
1320 | dev->name, dev)) | ||
1321 | return -EAGAIN; | 1323 | return -EAGAIN; |
1322 | /* Now that we have an interrupt handler hooked up we can unmask | 1324 | /* Now that we have an interrupt handler hooked up we can unmask |
1323 | * our interrupts | 1325 | * our interrupts |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 1235c7f2564b..1e1eb92998fb 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c | |||
@@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = { | |||
17 | {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, | 17 | {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, |
18 | {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, | 18 | {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, |
19 | {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, | 19 | {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, |
20 | {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, | 20 | {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, |
21 | {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, | 21 | {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, |
22 | {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, | 22 | {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, |
23 | {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, | 23 | {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, |
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 7fd4d54599e4..6b03c8553e59 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c | |||
@@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = { | |||
2032 | | FLAG2_DISABLE_ASPM_L0S | 2032 | | FLAG2_DISABLE_ASPM_L0S |
2033 | | FLAG2_DISABLE_ASPM_L1 | 2033 | | FLAG2_DISABLE_ASPM_L1 |
2034 | | FLAG2_NO_DISABLE_RX | 2034 | | FLAG2_NO_DISABLE_RX |
2035 | | FLAG2_DMA_BURST, | 2035 | | FLAG2_DMA_BURST |
2036 | | FLAG2_CHECK_SYSTIM_OVERFLOW, | ||
2036 | .pba = 32, | 2037 | .pba = 32, |
2037 | .max_hw_frame_size = DEFAULT_JUMBO, | 2038 | .max_hw_frame_size = DEFAULT_JUMBO, |
2038 | .get_variants = e1000_get_variants_82571, | 2039 | .get_variants = e1000_get_variants_82571, |
@@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = { | |||
2053 | | FLAG_HAS_CTRLEXT_ON_LOAD, | 2054 | | FLAG_HAS_CTRLEXT_ON_LOAD, |
2054 | .flags2 = FLAG2_DISABLE_ASPM_L0S | 2055 | .flags2 = FLAG2_DISABLE_ASPM_L0S |
2055 | | FLAG2_DISABLE_ASPM_L1 | 2056 | | FLAG2_DISABLE_ASPM_L1 |
2056 | | FLAG2_NO_DISABLE_RX, | 2057 | | FLAG2_NO_DISABLE_RX |
2058 | | FLAG2_CHECK_SYSTIM_OVERFLOW, | ||
2057 | .pba = 32, | 2059 | .pba = 32, |
2058 | .max_hw_frame_size = DEFAULT_JUMBO, | 2060 | .max_hw_frame_size = DEFAULT_JUMBO, |
2059 | .get_variants = e1000_get_variants_82571, | 2061 | .get_variants = e1000_get_variants_82571, |
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index ef96cd11d6d2..879cca47b021 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h | |||
@@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); | |||
452 | #define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) | 452 | #define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) |
453 | #define FLAG2_DFLT_CRC_STRIPPING BIT(12) | 453 | #define FLAG2_DFLT_CRC_STRIPPING BIT(12) |
454 | #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) | 454 | #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) |
455 | #define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14) | ||
455 | 456 | ||
456 | #define E1000_RX_DESC_PS(R, i) \ | 457 | #define E1000_RX_DESC_PS(R, i) \ |
457 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 458 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 3e11322d8d58..f3aaca743ea3 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c | |||
@@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = { | |||
5885 | | FLAG_HAS_JUMBO_FRAMES | 5885 | | FLAG_HAS_JUMBO_FRAMES |
5886 | | FLAG_APME_IN_WUC, | 5886 | | FLAG_APME_IN_WUC, |
5887 | .flags2 = FLAG2_HAS_PHY_STATS | 5887 | .flags2 = FLAG2_HAS_PHY_STATS |
5888 | | FLAG2_HAS_EEE, | 5888 | | FLAG2_HAS_EEE |
5889 | | FLAG2_CHECK_SYSTIM_OVERFLOW, | ||
5889 | .pba = 26, | 5890 | .pba = 26, |
5890 | .max_hw_frame_size = 9022, | 5891 | .max_hw_frame_size = 9022, |
5891 | .get_variants = e1000_get_variants_ich8lan, | 5892 | .get_variants = e1000_get_variants_ich8lan, |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 02f443958f31..7017281ba2dc 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -4303,6 +4303,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) | |||
4303 | } | 4303 | } |
4304 | 4304 | ||
4305 | /** | 4305 | /** |
4306 | * e1000e_sanitize_systim - sanitize raw cycle counter reads | ||
4307 | * @hw: pointer to the HW structure | ||
4308 | * @systim: cycle_t value read, sanitized and returned | ||
4309 | * | ||
4310 | * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: | ||
4311 | * check to see that the time is incrementing at a reasonable | ||
4312 | * rate and is a multiple of incvalue. | ||
4313 | **/ | ||
4314 | static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim) | ||
4315 | { | ||
4316 | u64 time_delta, rem, temp; | ||
4317 | cycle_t systim_next; | ||
4318 | u32 incvalue; | ||
4319 | int i; | ||
4320 | |||
4321 | incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; | ||
4322 | for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { | ||
4323 | /* latch SYSTIMH on read of SYSTIML */ | ||
4324 | systim_next = (cycle_t)er32(SYSTIML); | ||
4325 | systim_next |= (cycle_t)er32(SYSTIMH) << 32; | ||
4326 | |||
4327 | time_delta = systim_next - systim; | ||
4328 | temp = time_delta; | ||
4329 | /* VMWare users have seen incvalue of zero, don't div / 0 */ | ||
4330 | rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); | ||
4331 | |||
4332 | systim = systim_next; | ||
4333 | |||
4334 | if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0)) | ||
4335 | break; | ||
4336 | } | ||
4337 | |||
4338 | return systim; | ||
4339 | } | ||
4340 | |||
4341 | /** | ||
4306 | * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) | 4342 | * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) |
4307 | * @cc: cyclecounter structure | 4343 | * @cc: cyclecounter structure |
4308 | **/ | 4344 | **/ |
@@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) | |||
4312 | cc); | 4348 | cc); |
4313 | struct e1000_hw *hw = &adapter->hw; | 4349 | struct e1000_hw *hw = &adapter->hw; |
4314 | u32 systimel, systimeh; | 4350 | u32 systimel, systimeh; |
4315 | cycle_t systim, systim_next; | 4351 | cycle_t systim; |
4316 | /* SYSTIMH latching upon SYSTIML read does not work well. | 4352 | /* SYSTIMH latching upon SYSTIML read does not work well. |
4317 | * This means that if SYSTIML overflows after we read it but before | 4353 | * This means that if SYSTIML overflows after we read it but before |
4318 | * we read SYSTIMH, the value of SYSTIMH has been incremented and we | 4354 | * we read SYSTIMH, the value of SYSTIMH has been incremented and we |
@@ -4335,33 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) | |||
4335 | systim = (cycle_t)systimel; | 4371 | systim = (cycle_t)systimel; |
4336 | systim |= (cycle_t)systimeh << 32; | 4372 | systim |= (cycle_t)systimeh << 32; |
4337 | 4373 | ||
4338 | if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { | 4374 | if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) |
4339 | u64 time_delta, rem, temp; | 4375 | systim = e1000e_sanitize_systim(hw, systim); |
4340 | u32 incvalue; | ||
4341 | int i; | ||
4342 | |||
4343 | /* errata for 82574/82583 possible bad bits read from SYSTIMH/L | ||
4344 | * check to see that the time is incrementing at a reasonable | ||
4345 | * rate and is a multiple of incvalue | ||
4346 | */ | ||
4347 | incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; | ||
4348 | for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { | ||
4349 | /* latch SYSTIMH on read of SYSTIML */ | ||
4350 | systim_next = (cycle_t)er32(SYSTIML); | ||
4351 | systim_next |= (cycle_t)er32(SYSTIMH) << 32; | ||
4352 | |||
4353 | time_delta = systim_next - systim; | ||
4354 | temp = time_delta; | ||
4355 | /* VMWare users have seen incvalue of zero, don't div / 0 */ | ||
4356 | rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); | ||
4357 | |||
4358 | systim = systim_next; | ||
4359 | 4376 | ||
4360 | if ((time_delta < E1000_82574_SYSTIM_EPSILON) && | ||
4361 | (rem == 0)) | ||
4362 | break; | ||
4363 | } | ||
4364 | } | ||
4365 | return systim; | 4377 | return systim; |
4366 | } | 4378 | } |
4367 | 4379 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 81c99e1be708..c6ac7a61812f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -4554,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) | |||
4554 | **/ | 4554 | **/ |
4555 | static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) | 4555 | static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) |
4556 | { | 4556 | { |
4557 | int i, tc_unused = 0; | ||
4557 | u8 num_tc = 0; | 4558 | u8 num_tc = 0; |
4558 | int i; | 4559 | u8 ret = 0; |
4559 | 4560 | ||
4560 | /* Scan the ETS Config Priority Table to find | 4561 | /* Scan the ETS Config Priority Table to find |
4561 | * traffic class enabled for a given priority | 4562 | * traffic class enabled for a given priority |
4562 | * and use the traffic class index to get the | 4563 | * and create a bitmask of enabled TCs |
4563 | * number of traffic classes enabled | ||
4564 | */ | 4564 | */ |
4565 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { | 4565 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) |
4566 | if (dcbcfg->etscfg.prioritytable[i] > num_tc) | 4566 | num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); |
4567 | num_tc = dcbcfg->etscfg.prioritytable[i]; | ||
4568 | } | ||
4569 | 4567 | ||
4570 | /* Traffic class index starts from zero so | 4568 | /* Now scan the bitmask to check for |
4571 | * increment to return the actual count | 4569 | * contiguous TCs starting with TC0 |
4572 | */ | 4570 | */ |
4573 | return num_tc + 1; | 4571 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { |
4572 | if (num_tc & BIT(i)) { | ||
4573 | if (!tc_unused) { | ||
4574 | ret++; | ||
4575 | } else { | ||
4576 | pr_err("Non-contiguous TC - Disabling DCB\n"); | ||
4577 | return 1; | ||
4578 | } | ||
4579 | } else { | ||
4580 | tc_unused = 1; | ||
4581 | } | ||
4582 | } | ||
4583 | |||
4584 | /* There is always at least TC0 */ | ||
4585 | if (!ret) | ||
4586 | ret = 1; | ||
4587 | |||
4588 | return ret; | ||
4574 | } | 4589 | } |
4575 | 4590 | ||
4576 | /** | 4591 | /** |
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index e61b647f5f2a..336c103ae374 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c | |||
@@ -744,7 +744,8 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) | |||
744 | } | 744 | } |
745 | } | 745 | } |
746 | 746 | ||
747 | shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust); | 747 | shhwtstamps.hwtstamp = |
748 | ktime_add_ns(shhwtstamps.hwtstamp, adjust); | ||
748 | 749 | ||
749 | skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); | 750 | skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); |
750 | dev_kfree_skb_any(adapter->ptp_tx_skb); | 751 | dev_kfree_skb_any(adapter->ptp_tx_skb); |
@@ -767,13 +768,32 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, | |||
767 | struct sk_buff *skb) | 768 | struct sk_buff *skb) |
768 | { | 769 | { |
769 | __le64 *regval = (__le64 *)va; | 770 | __le64 *regval = (__le64 *)va; |
771 | struct igb_adapter *adapter = q_vector->adapter; | ||
772 | int adjust = 0; | ||
770 | 773 | ||
771 | /* The timestamp is recorded in little endian format. | 774 | /* The timestamp is recorded in little endian format. |
772 | * DWORD: 0 1 2 3 | 775 | * DWORD: 0 1 2 3 |
773 | * Field: Reserved Reserved SYSTIML SYSTIMH | 776 | * Field: Reserved Reserved SYSTIML SYSTIMH |
774 | */ | 777 | */ |
775 | igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), | 778 | igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), |
776 | le64_to_cpu(regval[1])); | 779 | le64_to_cpu(regval[1])); |
780 | |||
781 | /* adjust timestamp for the RX latency based on link speed */ | ||
782 | if (adapter->hw.mac.type == e1000_i210) { | ||
783 | switch (adapter->link_speed) { | ||
784 | case SPEED_10: | ||
785 | adjust = IGB_I210_RX_LATENCY_10; | ||
786 | break; | ||
787 | case SPEED_100: | ||
788 | adjust = IGB_I210_RX_LATENCY_100; | ||
789 | break; | ||
790 | case SPEED_1000: | ||
791 | adjust = IGB_I210_RX_LATENCY_1000; | ||
792 | break; | ||
793 | } | ||
794 | } | ||
795 | skb_hwtstamps(skb)->hwtstamp = | ||
796 | ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); | ||
777 | } | 797 | } |
778 | 798 | ||
779 | /** | 799 | /** |
@@ -825,7 +845,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, | |||
825 | } | 845 | } |
826 | } | 846 | } |
827 | skb_hwtstamps(skb)->hwtstamp = | 847 | skb_hwtstamps(skb)->hwtstamp = |
828 | ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust); | 848 | ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); |
829 | 849 | ||
830 | /* Update the last_rx_timestamp timer in order to enable watchdog check | 850 | /* Update the last_rx_timestamp timer in order to enable watchdog check |
831 | * for error case of latched timestamp on a dropped packet. | 851 | * for error case of latched timestamp on a dropped packet. |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5418c69a7463..b4f03748adc0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -4100,6 +4100,8 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) | |||
4100 | struct ixgbe_hw *hw = &adapter->hw; | 4100 | struct ixgbe_hw *hw = &adapter->hw; |
4101 | u32 vlnctrl, i; | 4101 | u32 vlnctrl, i; |
4102 | 4102 | ||
4103 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
4104 | |||
4103 | switch (hw->mac.type) { | 4105 | switch (hw->mac.type) { |
4104 | case ixgbe_mac_82599EB: | 4106 | case ixgbe_mac_82599EB: |
4105 | case ixgbe_mac_X540: | 4107 | case ixgbe_mac_X540: |
@@ -4112,8 +4114,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) | |||
4112 | /* fall through */ | 4114 | /* fall through */ |
4113 | case ixgbe_mac_82598EB: | 4115 | case ixgbe_mac_82598EB: |
4114 | /* legacy case, we can just disable VLAN filtering */ | 4116 | /* legacy case, we can just disable VLAN filtering */ |
4115 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | 4117 | vlnctrl &= ~IXGBE_VLNCTRL_VFE; |
4116 | vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); | ||
4117 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | 4118 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
4118 | return; | 4119 | return; |
4119 | } | 4120 | } |
@@ -4125,6 +4126,10 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) | |||
4125 | /* Set flag so we don't redo unnecessary work */ | 4126 | /* Set flag so we don't redo unnecessary work */ |
4126 | adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; | 4127 | adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; |
4127 | 4128 | ||
4129 | /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ | ||
4130 | vlnctrl |= IXGBE_VLNCTRL_VFE; | ||
4131 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
4132 | |||
4128 | /* Add PF to all active pools */ | 4133 | /* Add PF to all active pools */ |
4129 | for (i = IXGBE_VLVF_ENTRIES; --i;) { | 4134 | for (i = IXGBE_VLVF_ENTRIES; --i;) { |
4130 | u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); | 4135 | u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); |
@@ -4191,6 +4196,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) | |||
4191 | struct ixgbe_hw *hw = &adapter->hw; | 4196 | struct ixgbe_hw *hw = &adapter->hw; |
4192 | u32 vlnctrl, i; | 4197 | u32 vlnctrl, i; |
4193 | 4198 | ||
4199 | /* Set VLAN filtering to enabled */ | ||
4200 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
4201 | vlnctrl |= IXGBE_VLNCTRL_VFE; | ||
4202 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
4203 | |||
4194 | switch (hw->mac.type) { | 4204 | switch (hw->mac.type) { |
4195 | case ixgbe_mac_82599EB: | 4205 | case ixgbe_mac_82599EB: |
4196 | case ixgbe_mac_X540: | 4206 | case ixgbe_mac_X540: |
@@ -4202,10 +4212,6 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) | |||
4202 | break; | 4212 | break; |
4203 | /* fall through */ | 4213 | /* fall through */ |
4204 | case ixgbe_mac_82598EB: | 4214 | case ixgbe_mac_82598EB: |
4205 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
4206 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | ||
4207 | vlnctrl |= IXGBE_VLNCTRL_VFE; | ||
4208 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
4209 | return; | 4215 | return; |
4210 | } | 4216 | } |
4211 | 4217 | ||
@@ -8390,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, | |||
8390 | struct tcf_exts *exts, u64 *action, u8 *queue) | 8396 | struct tcf_exts *exts, u64 *action, u8 *queue) |
8391 | { | 8397 | { |
8392 | const struct tc_action *a; | 8398 | const struct tc_action *a; |
8399 | LIST_HEAD(actions); | ||
8393 | int err; | 8400 | int err; |
8394 | 8401 | ||
8395 | if (tc_no_actions(exts)) | 8402 | if (tc_no_actions(exts)) |
8396 | return -EINVAL; | 8403 | return -EINVAL; |
8397 | 8404 | ||
8398 | tc_for_each_action(a, exts) { | 8405 | tcf_exts_to_list(exts, &actions); |
8406 | list_for_each_entry(a, &actions, list) { | ||
8399 | 8407 | ||
8400 | /* Drop action */ | 8408 | /* Drop action */ |
8401 | if (is_tcf_gact_shot(a)) { | 8409 | if (is_tcf_gact_shot(a)) { |
@@ -9517,6 +9525,7 @@ skip_sriov: | |||
9517 | 9525 | ||
9518 | /* copy netdev features into list of user selectable features */ | 9526 | /* copy netdev features into list of user selectable features */ |
9519 | netdev->hw_features |= netdev->features | | 9527 | netdev->hw_features |= netdev->features | |
9528 | NETIF_F_HW_VLAN_CTAG_FILTER | | ||
9520 | NETIF_F_HW_VLAN_CTAG_RX | | 9529 | NETIF_F_HW_VLAN_CTAG_RX | |
9521 | NETIF_F_HW_VLAN_CTAG_TX | | 9530 | NETIF_F_HW_VLAN_CTAG_TX | |
9522 | NETIF_F_RXALL | | 9531 | NETIF_F_RXALL | |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b57ae3afb994..f1609542adf1 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
@@ -245,12 +245,16 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
245 | case PHY_INTERFACE_MODE_MII: | 245 | case PHY_INTERFACE_MODE_MII: |
246 | ge_mode = 1; | 246 | ge_mode = 1; |
247 | break; | 247 | break; |
248 | case PHY_INTERFACE_MODE_RMII: | 248 | case PHY_INTERFACE_MODE_REVMII: |
249 | ge_mode = 2; | 249 | ge_mode = 2; |
250 | break; | 250 | break; |
251 | case PHY_INTERFACE_MODE_RMII: | ||
252 | if (!mac->id) | ||
253 | goto err_phy; | ||
254 | ge_mode = 3; | ||
255 | break; | ||
251 | default: | 256 | default: |
252 | dev_err(eth->dev, "invalid phy_mode\n"); | 257 | goto err_phy; |
253 | return -1; | ||
254 | } | 258 | } |
255 | 259 | ||
256 | /* put the gmac into the right mode */ | 260 | /* put the gmac into the right mode */ |
@@ -263,13 +267,25 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
263 | mac->phy_dev->autoneg = AUTONEG_ENABLE; | 267 | mac->phy_dev->autoneg = AUTONEG_ENABLE; |
264 | mac->phy_dev->speed = 0; | 268 | mac->phy_dev->speed = 0; |
265 | mac->phy_dev->duplex = 0; | 269 | mac->phy_dev->duplex = 0; |
270 | |||
271 | if (of_phy_is_fixed_link(mac->of_node)) | ||
272 | mac->phy_dev->supported |= | ||
273 | SUPPORTED_Pause | SUPPORTED_Asym_Pause; | ||
274 | |||
266 | mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | | 275 | mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | |
267 | SUPPORTED_Asym_Pause; | 276 | SUPPORTED_Asym_Pause; |
268 | mac->phy_dev->advertising = mac->phy_dev->supported | | 277 | mac->phy_dev->advertising = mac->phy_dev->supported | |
269 | ADVERTISED_Autoneg; | 278 | ADVERTISED_Autoneg; |
270 | phy_start_aneg(mac->phy_dev); | 279 | phy_start_aneg(mac->phy_dev); |
271 | 280 | ||
281 | of_node_put(np); | ||
282 | |||
272 | return 0; | 283 | return 0; |
284 | |||
285 | err_phy: | ||
286 | of_node_put(np); | ||
287 | dev_err(eth->dev, "invalid phy_mode\n"); | ||
288 | return -EINVAL; | ||
273 | } | 289 | } |
274 | 290 | ||
275 | static int mtk_mdio_init(struct mtk_eth *eth) | 291 | static int mtk_mdio_init(struct mtk_eth *eth) |
@@ -542,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, | |||
542 | return &ring->buf[idx]; | 558 | return &ring->buf[idx]; |
543 | } | 559 | } |
544 | 560 | ||
545 | static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) | 561 | static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) |
546 | { | 562 | { |
547 | if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { | 563 | if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { |
548 | dma_unmap_single(dev, | 564 | dma_unmap_single(eth->dev, |
549 | dma_unmap_addr(tx_buf, dma_addr0), | 565 | dma_unmap_addr(tx_buf, dma_addr0), |
550 | dma_unmap_len(tx_buf, dma_len0), | 566 | dma_unmap_len(tx_buf, dma_len0), |
551 | DMA_TO_DEVICE); | 567 | DMA_TO_DEVICE); |
552 | } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { | 568 | } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { |
553 | dma_unmap_page(dev, | 569 | dma_unmap_page(eth->dev, |
554 | dma_unmap_addr(tx_buf, dma_addr0), | 570 | dma_unmap_addr(tx_buf, dma_addr0), |
555 | dma_unmap_len(tx_buf, dma_len0), | 571 | dma_unmap_len(tx_buf, dma_len0), |
556 | DMA_TO_DEVICE); | 572 | DMA_TO_DEVICE); |
@@ -595,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
595 | if (skb_vlan_tag_present(skb)) | 611 | if (skb_vlan_tag_present(skb)) |
596 | txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); | 612 | txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); |
597 | 613 | ||
598 | mapped_addr = dma_map_single(&dev->dev, skb->data, | 614 | mapped_addr = dma_map_single(eth->dev, skb->data, |
599 | skb_headlen(skb), DMA_TO_DEVICE); | 615 | skb_headlen(skb), DMA_TO_DEVICE); |
600 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | 616 | if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) |
601 | return -ENOMEM; | 617 | return -ENOMEM; |
602 | 618 | ||
603 | WRITE_ONCE(itxd->txd1, mapped_addr); | 619 | WRITE_ONCE(itxd->txd1, mapped_addr); |
@@ -623,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
623 | 639 | ||
624 | n_desc++; | 640 | n_desc++; |
625 | frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); | 641 | frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); |
626 | mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, | 642 | mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, |
627 | frag_map_size, | 643 | frag_map_size, |
628 | DMA_TO_DEVICE); | 644 | DMA_TO_DEVICE); |
629 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | 645 | if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) |
630 | goto err_dma; | 646 | goto err_dma; |
631 | 647 | ||
632 | if (i == nr_frags - 1 && | 648 | if (i == nr_frags - 1 && |
@@ -679,7 +695,7 @@ err_dma: | |||
679 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); | 695 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); |
680 | 696 | ||
681 | /* unmap dma */ | 697 | /* unmap dma */ |
682 | mtk_tx_unmap(&dev->dev, tx_buf); | 698 | mtk_tx_unmap(eth, tx_buf); |
683 | 699 | ||
684 | itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; | 700 | itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; |
685 | itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); | 701 | itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); |
@@ -836,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |||
836 | netdev->stats.rx_dropped++; | 852 | netdev->stats.rx_dropped++; |
837 | goto release_desc; | 853 | goto release_desc; |
838 | } | 854 | } |
839 | dma_addr = dma_map_single(ð->netdev[mac]->dev, | 855 | dma_addr = dma_map_single(eth->dev, |
840 | new_data + NET_SKB_PAD, | 856 | new_data + NET_SKB_PAD, |
841 | ring->buf_size, | 857 | ring->buf_size, |
842 | DMA_FROM_DEVICE); | 858 | DMA_FROM_DEVICE); |
843 | if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { | 859 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { |
844 | skb_free_frag(new_data); | 860 | skb_free_frag(new_data); |
845 | netdev->stats.rx_dropped++; | 861 | netdev->stats.rx_dropped++; |
846 | goto release_desc; | 862 | goto release_desc; |
@@ -855,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |||
855 | } | 871 | } |
856 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | 872 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
857 | 873 | ||
858 | dma_unmap_single(&netdev->dev, trxd.rxd1, | 874 | dma_unmap_single(eth->dev, trxd.rxd1, |
859 | ring->buf_size, DMA_FROM_DEVICE); | 875 | ring->buf_size, DMA_FROM_DEVICE); |
860 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); | 876 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); |
861 | skb->dev = netdev; | 877 | skb->dev = netdev; |
@@ -937,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) | |||
937 | done[mac]++; | 953 | done[mac]++; |
938 | budget--; | 954 | budget--; |
939 | } | 955 | } |
940 | mtk_tx_unmap(eth->dev, tx_buf); | 956 | mtk_tx_unmap(eth, tx_buf); |
941 | 957 | ||
942 | ring->last_free = desc; | 958 | ring->last_free = desc; |
943 | atomic_inc(&ring->free_count); | 959 | atomic_inc(&ring->free_count); |
@@ -1092,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) | |||
1092 | 1108 | ||
1093 | if (ring->buf) { | 1109 | if (ring->buf) { |
1094 | for (i = 0; i < MTK_DMA_SIZE; i++) | 1110 | for (i = 0; i < MTK_DMA_SIZE; i++) |
1095 | mtk_tx_unmap(eth->dev, &ring->buf[i]); | 1111 | mtk_tx_unmap(eth, &ring->buf[i]); |
1096 | kfree(ring->buf); | 1112 | kfree(ring->buf); |
1097 | ring->buf = NULL; | 1113 | ring->buf = NULL; |
1098 | } | 1114 | } |
@@ -1751,6 +1767,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) | |||
1751 | goto free_netdev; | 1767 | goto free_netdev; |
1752 | } | 1768 | } |
1753 | spin_lock_init(&mac->hw_stats->stats_lock); | 1769 | spin_lock_init(&mac->hw_stats->stats_lock); |
1770 | u64_stats_init(&mac->hw_stats->syncp); | ||
1754 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; | 1771 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; |
1755 | 1772 | ||
1756 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); | 1773 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0f19b01e3fff..dc8b1cb0fdc8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -318,6 +318,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
318 | u32 *action, u32 *flow_tag) | 318 | u32 *action, u32 *flow_tag) |
319 | { | 319 | { |
320 | const struct tc_action *a; | 320 | const struct tc_action *a; |
321 | LIST_HEAD(actions); | ||
321 | 322 | ||
322 | if (tc_no_actions(exts)) | 323 | if (tc_no_actions(exts)) |
323 | return -EINVAL; | 324 | return -EINVAL; |
@@ -325,7 +326,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
325 | *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; | 326 | *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; |
326 | *action = 0; | 327 | *action = 0; |
327 | 328 | ||
328 | tc_for_each_action(a, exts) { | 329 | tcf_exts_to_list(exts, &actions); |
330 | list_for_each_entry(a, &actions, list) { | ||
329 | /* Only support a single action per rule */ | 331 | /* Only support a single action per rule */ |
330 | if (*action) | 332 | if (*action) |
331 | return -EINVAL; | 333 | return -EINVAL; |
@@ -362,13 +364,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
362 | u32 *action, u32 *dest_vport) | 364 | u32 *action, u32 *dest_vport) |
363 | { | 365 | { |
364 | const struct tc_action *a; | 366 | const struct tc_action *a; |
367 | LIST_HEAD(actions); | ||
365 | 368 | ||
366 | if (tc_no_actions(exts)) | 369 | if (tc_no_actions(exts)) |
367 | return -EINVAL; | 370 | return -EINVAL; |
368 | 371 | ||
369 | *action = 0; | 372 | *action = 0; |
370 | 373 | ||
371 | tc_for_each_action(a, exts) { | 374 | tcf_exts_to_list(exts, &actions); |
375 | list_for_each_entry(a, &actions, list) { | ||
372 | /* Only support a single action per rule */ | 376 | /* Only support a single action per rule */ |
373 | if (*action) | 377 | if (*action) |
374 | return -EINVAL; | 378 | return -EINVAL; |
@@ -503,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, | |||
503 | struct mlx5e_tc_flow *flow; | 507 | struct mlx5e_tc_flow *flow; |
504 | struct tc_action *a; | 508 | struct tc_action *a; |
505 | struct mlx5_fc *counter; | 509 | struct mlx5_fc *counter; |
510 | LIST_HEAD(actions); | ||
506 | u64 bytes; | 511 | u64 bytes; |
507 | u64 packets; | 512 | u64 packets; |
508 | u64 lastuse; | 513 | u64 lastuse; |
@@ -518,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, | |||
518 | 523 | ||
519 | mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); | 524 | mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); |
520 | 525 | ||
521 | tc_for_each_action(a, f->exts) | 526 | tcf_exts_to_list(f->exts, &actions); |
527 | list_for_each_entry(a, &actions, list) | ||
522 | tcf_action_stats_update(a, bytes, packets, lastuse); | 528 | tcf_action_stats_update(a, bytes, packets, lastuse); |
523 | 529 | ||
524 | return 0; | 530 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 7ca9201f7dcb..1721098eef13 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h | |||
@@ -3383,6 +3383,15 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1); | |||
3383 | */ | 3383 | */ |
3384 | MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); | 3384 | MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); |
3385 | 3385 | ||
3386 | /* reg_ritr_lb_en | ||
3387 | * Loop-back filter enable for unicast packets. | ||
3388 | * If the flag is set then loop-back filter for unicast packets is | ||
3389 | * implemented on the RIF. Multicast packets are always subject to | ||
3390 | * loop-back filtering. | ||
3391 | * Access: RW | ||
3392 | */ | ||
3393 | MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1); | ||
3394 | |||
3386 | /* reg_ritr_virtual_router | 3395 | /* reg_ritr_virtual_router |
3387 | * Virtual router ID associated with the router interface. | 3396 | * Virtual router ID associated with the router interface. |
3388 | * Access: RW | 3397 | * Access: RW |
@@ -3484,6 +3493,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, | |||
3484 | mlxsw_reg_ritr_op_set(payload, op); | 3493 | mlxsw_reg_ritr_op_set(payload, op); |
3485 | mlxsw_reg_ritr_rif_set(payload, rif); | 3494 | mlxsw_reg_ritr_rif_set(payload, rif); |
3486 | mlxsw_reg_ritr_ipv4_fe_set(payload, 1); | 3495 | mlxsw_reg_ritr_ipv4_fe_set(payload, 1); |
3496 | mlxsw_reg_ritr_lb_en_set(payload, 1); | ||
3487 | mlxsw_reg_ritr_mtu_set(payload, mtu); | 3497 | mlxsw_reg_ritr_mtu_set(payload, mtu); |
3488 | mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); | 3498 | mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); |
3489 | } | 3499 | } |
@@ -4000,6 +4010,7 @@ static inline void mlxsw_reg_ralue_pack(char *payload, | |||
4000 | { | 4010 | { |
4001 | MLXSW_REG_ZERO(ralue, payload); | 4011 | MLXSW_REG_ZERO(ralue, payload); |
4002 | mlxsw_reg_ralue_protocol_set(payload, protocol); | 4012 | mlxsw_reg_ralue_protocol_set(payload, protocol); |
4013 | mlxsw_reg_ralue_op_set(payload, op); | ||
4003 | mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); | 4014 | mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); |
4004 | mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); | 4015 | mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); |
4005 | mlxsw_reg_ralue_entry_type_set(payload, | 4016 | mlxsw_reg_ralue_entry_type_set(payload, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c3e61500819d..1f8168906811 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -942,8 +942,8 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) | |||
942 | kfree(mlxsw_sp_vport); | 942 | kfree(mlxsw_sp_vport); |
943 | } | 943 | } |
944 | 944 | ||
945 | int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | 945 | static int mlxsw_sp_port_add_vid(struct net_device *dev, |
946 | u16 vid) | 946 | __be16 __always_unused proto, u16 vid) |
947 | { | 947 | { |
948 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 948 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
949 | struct mlxsw_sp_port *mlxsw_sp_vport; | 949 | struct mlxsw_sp_port *mlxsw_sp_vport; |
@@ -956,16 +956,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | |||
956 | if (!vid) | 956 | if (!vid) |
957 | return 0; | 957 | return 0; |
958 | 958 | ||
959 | if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { | 959 | if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) |
960 | netdev_warn(dev, "VID=%d already configured\n", vid); | ||
961 | return 0; | 960 | return 0; |
962 | } | ||
963 | 961 | ||
964 | mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); | 962 | mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); |
965 | if (!mlxsw_sp_vport) { | 963 | if (!mlxsw_sp_vport) |
966 | netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); | ||
967 | return -ENOMEM; | 964 | return -ENOMEM; |
968 | } | ||
969 | 965 | ||
970 | /* When adding the first VLAN interface on a bridged port we need to | 966 | /* When adding the first VLAN interface on a bridged port we need to |
971 | * transition all the active 802.1Q bridge VLANs to use explicit | 967 | * transition all the active 802.1Q bridge VLANs to use explicit |
@@ -973,24 +969,17 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | |||
973 | */ | 969 | */ |
974 | if (list_is_singular(&mlxsw_sp_port->vports_list)) { | 970 | if (list_is_singular(&mlxsw_sp_port->vports_list)) { |
975 | err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); | 971 | err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); |
976 | if (err) { | 972 | if (err) |
977 | netdev_err(dev, "Failed to set to Virtual mode\n"); | ||
978 | goto err_port_vp_mode_trans; | 973 | goto err_port_vp_mode_trans; |
979 | } | ||
980 | } | 974 | } |
981 | 975 | ||
982 | err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); | 976 | err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); |
983 | if (err) { | 977 | if (err) |
984 | netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); | ||
985 | goto err_port_vid_learning_set; | 978 | goto err_port_vid_learning_set; |
986 | } | ||
987 | 979 | ||
988 | err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); | 980 | err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); |
989 | if (err) { | 981 | if (err) |
990 | netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", | ||
991 | vid); | ||
992 | goto err_port_add_vid; | 982 | goto err_port_add_vid; |
993 | } | ||
994 | 983 | ||
995 | return 0; | 984 | return 0; |
996 | 985 | ||
@@ -1010,7 +999,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, | |||
1010 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 999 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
1011 | struct mlxsw_sp_port *mlxsw_sp_vport; | 1000 | struct mlxsw_sp_port *mlxsw_sp_vport; |
1012 | struct mlxsw_sp_fid *f; | 1001 | struct mlxsw_sp_fid *f; |
1013 | int err; | ||
1014 | 1002 | ||
1015 | /* VLAN 0 is removed from HW filter when device goes down, but | 1003 | /* VLAN 0 is removed from HW filter when device goes down, but |
1016 | * it is reserved in our case, so simply return. | 1004 | * it is reserved in our case, so simply return. |
@@ -1019,23 +1007,12 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, | |||
1019 | return 0; | 1007 | return 0; |
1020 | 1008 | ||
1021 | mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); | 1009 | mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); |
1022 | if (!mlxsw_sp_vport) { | 1010 | if (WARN_ON(!mlxsw_sp_vport)) |
1023 | netdev_warn(dev, "VID=%d does not exist\n", vid); | ||
1024 | return 0; | 1011 | return 0; |
1025 | } | ||
1026 | 1012 | ||
1027 | err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); | 1013 | mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); |
1028 | if (err) { | ||
1029 | netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", | ||
1030 | vid); | ||
1031 | return err; | ||
1032 | } | ||
1033 | 1014 | ||
1034 | err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); | 1015 | mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); |
1035 | if (err) { | ||
1036 | netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); | ||
1037 | return err; | ||
1038 | } | ||
1039 | 1016 | ||
1040 | /* Drop FID reference. If this was the last reference the | 1017 | /* Drop FID reference. If this was the last reference the |
1041 | * resources will be freed. | 1018 | * resources will be freed. |
@@ -1048,13 +1025,8 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, | |||
1048 | * transition all active 802.1Q bridge VLANs to use VID to FID | 1025 | * transition all active 802.1Q bridge VLANs to use VID to FID |
1049 | * mappings and set port's mode to VLAN mode. | 1026 | * mappings and set port's mode to VLAN mode. |
1050 | */ | 1027 | */ |
1051 | if (list_is_singular(&mlxsw_sp_port->vports_list)) { | 1028 | if (list_is_singular(&mlxsw_sp_port->vports_list)) |
1052 | err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); | 1029 | mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); |
1053 | if (err) { | ||
1054 | netdev_err(dev, "Failed to set to VLAN mode\n"); | ||
1055 | return err; | ||
1056 | } | ||
1057 | } | ||
1058 | 1030 | ||
1059 | mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); | 1031 | mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); |
1060 | 1032 | ||
@@ -1149,6 +1121,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1149 | bool ingress) | 1121 | bool ingress) |
1150 | { | 1122 | { |
1151 | const struct tc_action *a; | 1123 | const struct tc_action *a; |
1124 | LIST_HEAD(actions); | ||
1152 | int err; | 1125 | int err; |
1153 | 1126 | ||
1154 | if (!tc_single_action(cls->exts)) { | 1127 | if (!tc_single_action(cls->exts)) { |
@@ -1156,7 +1129,8 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1156 | return -ENOTSUPP; | 1129 | return -ENOTSUPP; |
1157 | } | 1130 | } |
1158 | 1131 | ||
1159 | tc_for_each_action(a, cls->exts) { | 1132 | tcf_exts_to_list(cls->exts, &actions); |
1133 | list_for_each_entry(a, &actions, list) { | ||
1160 | if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) | 1134 | if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) |
1161 | return -ENOTSUPP; | 1135 | return -ENOTSUPP; |
1162 | 1136 | ||
@@ -2076,6 +2050,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) | |||
2076 | return 0; | 2050 | return 0; |
2077 | } | 2051 | } |
2078 | 2052 | ||
2053 | static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port) | ||
2054 | { | ||
2055 | mlxsw_sp_port->pvid = 1; | ||
2056 | |||
2057 | return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1); | ||
2058 | } | ||
2059 | |||
2060 | static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port) | ||
2061 | { | ||
2062 | return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); | ||
2063 | } | ||
2064 | |||
2079 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 2065 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
2080 | bool split, u8 module, u8 width, u8 lane) | 2066 | bool split, u8 module, u8 width, u8 lane) |
2081 | { | 2067 | { |
@@ -2191,7 +2177,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
2191 | goto err_port_dcb_init; | 2177 | goto err_port_dcb_init; |
2192 | } | 2178 | } |
2193 | 2179 | ||
2180 | err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port); | ||
2181 | if (err) { | ||
2182 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n", | ||
2183 | mlxsw_sp_port->local_port); | ||
2184 | goto err_port_pvid_vport_create; | ||
2185 | } | ||
2186 | |||
2194 | mlxsw_sp_port_switchdev_init(mlxsw_sp_port); | 2187 | mlxsw_sp_port_switchdev_init(mlxsw_sp_port); |
2188 | mlxsw_sp->ports[local_port] = mlxsw_sp_port; | ||
2195 | err = register_netdev(dev); | 2189 | err = register_netdev(dev); |
2196 | if (err) { | 2190 | if (err) { |
2197 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", | 2191 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", |
@@ -2208,24 +2202,23 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
2208 | goto err_core_port_init; | 2202 | goto err_core_port_init; |
2209 | } | 2203 | } |
2210 | 2204 | ||
2211 | err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); | ||
2212 | if (err) | ||
2213 | goto err_port_vlan_init; | ||
2214 | |||
2215 | mlxsw_sp->ports[local_port] = mlxsw_sp_port; | ||
2216 | return 0; | 2205 | return 0; |
2217 | 2206 | ||
2218 | err_port_vlan_init: | ||
2219 | mlxsw_core_port_fini(&mlxsw_sp_port->core_port); | ||
2220 | err_core_port_init: | 2207 | err_core_port_init: |
2221 | unregister_netdev(dev); | 2208 | unregister_netdev(dev); |
2222 | err_register_netdev: | 2209 | err_register_netdev: |
2210 | mlxsw_sp->ports[local_port] = NULL; | ||
2211 | mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); | ||
2212 | mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); | ||
2213 | err_port_pvid_vport_create: | ||
2214 | mlxsw_sp_port_dcb_fini(mlxsw_sp_port); | ||
2223 | err_port_dcb_init: | 2215 | err_port_dcb_init: |
2224 | err_port_ets_init: | 2216 | err_port_ets_init: |
2225 | err_port_buffers_init: | 2217 | err_port_buffers_init: |
2226 | err_port_admin_status_set: | 2218 | err_port_admin_status_set: |
2227 | err_port_mtu_set: | 2219 | err_port_mtu_set: |
2228 | err_port_speed_by_width_set: | 2220 | err_port_speed_by_width_set: |
2221 | mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); | ||
2229 | err_port_swid_set: | 2222 | err_port_swid_set: |
2230 | err_port_system_port_mapping_set: | 2223 | err_port_system_port_mapping_set: |
2231 | err_dev_addr_init: | 2224 | err_dev_addr_init: |
@@ -2245,12 +2238,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) | |||
2245 | 2238 | ||
2246 | if (!mlxsw_sp_port) | 2239 | if (!mlxsw_sp_port) |
2247 | return; | 2240 | return; |
2248 | mlxsw_sp->ports[local_port] = NULL; | ||
2249 | mlxsw_core_port_fini(&mlxsw_sp_port->core_port); | 2241 | mlxsw_core_port_fini(&mlxsw_sp_port->core_port); |
2250 | unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ | 2242 | unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ |
2251 | mlxsw_sp_port_dcb_fini(mlxsw_sp_port); | 2243 | mlxsw_sp->ports[local_port] = NULL; |
2252 | mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); | ||
2253 | mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); | 2244 | mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); |
2245 | mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); | ||
2246 | mlxsw_sp_port_dcb_fini(mlxsw_sp_port); | ||
2254 | mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); | 2247 | mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); |
2255 | mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); | 2248 | mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); |
2256 | free_percpu(mlxsw_sp_port->pcpu_stats); | 2249 | free_percpu(mlxsw_sp_port->pcpu_stats); |
@@ -2662,6 +2655,26 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { | |||
2662 | { | 2655 | { |
2663 | .func = mlxsw_sp_rx_listener_func, | 2656 | .func = mlxsw_sp_rx_listener_func, |
2664 | .local_port = MLXSW_PORT_DONT_CARE, | 2657 | .local_port = MLXSW_PORT_DONT_CARE, |
2658 | .trap_id = MLXSW_TRAP_ID_MTUERROR, | ||
2659 | }, | ||
2660 | { | ||
2661 | .func = mlxsw_sp_rx_listener_func, | ||
2662 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2663 | .trap_id = MLXSW_TRAP_ID_TTLERROR, | ||
2664 | }, | ||
2665 | { | ||
2666 | .func = mlxsw_sp_rx_listener_func, | ||
2667 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2668 | .trap_id = MLXSW_TRAP_ID_LBERROR, | ||
2669 | }, | ||
2670 | { | ||
2671 | .func = mlxsw_sp_rx_listener_func, | ||
2672 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2673 | .trap_id = MLXSW_TRAP_ID_OSPF, | ||
2674 | }, | ||
2675 | { | ||
2676 | .func = mlxsw_sp_rx_listener_func, | ||
2677 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2665 | .trap_id = MLXSW_TRAP_ID_IP2ME, | 2678 | .trap_id = MLXSW_TRAP_ID_IP2ME, |
2666 | }, | 2679 | }, |
2667 | { | 2680 | { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index f69aa37d1521..ab3feb81bd43 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
@@ -536,8 +536,6 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
536 | u16 vid); | 536 | u16 vid); |
537 | int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, | 537 | int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, |
538 | u16 vid_end, bool is_member, bool untagged); | 538 | u16 vid_end, bool is_member, bool untagged); |
539 | int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | ||
540 | u16 vid); | ||
541 | int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, | 539 | int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, |
542 | bool set); | 540 | bool set); |
543 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); | 541 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 074cdda7b6f3..237418a0e6e0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | |||
@@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { | |||
330 | MLXSW_SP_CPU_PORT_SB_CM, | 330 | MLXSW_SP_CPU_PORT_SB_CM, |
331 | MLXSW_SP_CPU_PORT_SB_CM, | 331 | MLXSW_SP_CPU_PORT_SB_CM, |
332 | MLXSW_SP_CPU_PORT_SB_CM, | 332 | MLXSW_SP_CPU_PORT_SB_CM, |
333 | MLXSW_SP_CPU_PORT_SB_CM, | 333 | MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0), |
334 | MLXSW_SP_CPU_PORT_SB_CM, | 334 | MLXSW_SP_CPU_PORT_SB_CM, |
335 | MLXSW_SP_CPU_PORT_SB_CM, | 335 | MLXSW_SP_CPU_PORT_SB_CM, |
336 | MLXSW_SP_CPU_PORT_SB_CM, | 336 | MLXSW_SP_CPU_PORT_SB_CM, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 01cfb7512827..b6ed7f7c531e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c | |||
@@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
341 | char pfcc_pl[MLXSW_REG_PFCC_LEN]; | 341 | char pfcc_pl[MLXSW_REG_PFCC_LEN]; |
342 | 342 | ||
343 | mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); | 343 | mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); |
344 | mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause); | ||
345 | mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause); | ||
344 | mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); | 346 | mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); |
345 | 347 | ||
346 | return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), | 348 | return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), |
@@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, | |||
351 | struct ieee_pfc *pfc) | 353 | struct ieee_pfc *pfc) |
352 | { | 354 | { |
353 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 355 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
356 | bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); | ||
354 | int err; | 357 | int err; |
355 | 358 | ||
356 | if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) && | 359 | if (pause_en && pfc->pfc_en) { |
357 | pfc->pfc_en) { | ||
358 | netdev_err(dev, "PAUSE frames already enabled on port\n"); | 360 | netdev_err(dev, "PAUSE frames already enabled on port\n"); |
359 | return -EINVAL; | 361 | return -EINVAL; |
360 | } | 362 | } |
361 | 363 | ||
362 | err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, | 364 | err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, |
363 | mlxsw_sp_port->dcb.ets->prio_tc, | 365 | mlxsw_sp_port->dcb.ets->prio_tc, |
364 | false, pfc); | 366 | pause_en, pfc); |
365 | if (err) { | 367 | if (err) { |
366 | netdev_err(dev, "Failed to configure port's headroom for PFC\n"); | 368 | netdev_err(dev, "Failed to configure port's headroom for PFC\n"); |
367 | return err; | 369 | return err; |
@@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, | |||
380 | 382 | ||
381 | err_port_pfc_set: | 383 | err_port_pfc_set: |
382 | __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, | 384 | __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, |
383 | mlxsw_sp_port->dcb.ets->prio_tc, false, | 385 | mlxsw_sp_port->dcb.ets->prio_tc, pause_en, |
384 | mlxsw_sp_port->dcb.pfc); | 386 | mlxsw_sp_port->dcb.pfc); |
385 | return err; | 387 | return err; |
386 | } | 388 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 81418d629231..90bb93b037ec 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -1651,9 +1651,10 @@ static void mlxsw_sp_router_fib4_add_info_destroy(void const *data) | |||
1651 | const struct mlxsw_sp_router_fib4_add_info *info = data; | 1651 | const struct mlxsw_sp_router_fib4_add_info *info = data; |
1652 | struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; | 1652 | struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; |
1653 | struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; | 1653 | struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; |
1654 | struct mlxsw_sp_vr *vr = fib_entry->vr; | ||
1654 | 1655 | ||
1655 | mlxsw_sp_fib_entry_destroy(fib_entry); | 1656 | mlxsw_sp_fib_entry_destroy(fib_entry); |
1656 | mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr); | 1657 | mlxsw_sp_vr_put(mlxsw_sp, vr); |
1657 | kfree(info); | 1658 | kfree(info); |
1658 | } | 1659 | } |
1659 | 1660 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index a1ad5e6bdfa8..d1b59cdfacc1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -450,6 +450,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f) | |||
450 | 450 | ||
451 | kfree(f); | 451 | kfree(f); |
452 | 452 | ||
453 | mlxsw_sp_fid_map(mlxsw_sp, fid, false); | ||
454 | |||
453 | mlxsw_sp_fid_op(mlxsw_sp, fid, false); | 455 | mlxsw_sp_fid_op(mlxsw_sp, fid, false); |
454 | } | 456 | } |
455 | 457 | ||
@@ -997,13 +999,13 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, | |||
997 | } | 999 | } |
998 | 1000 | ||
999 | static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | 1001 | static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, |
1000 | u16 vid_begin, u16 vid_end, bool init) | 1002 | u16 vid_begin, u16 vid_end) |
1001 | { | 1003 | { |
1002 | struct net_device *dev = mlxsw_sp_port->dev; | 1004 | struct net_device *dev = mlxsw_sp_port->dev; |
1003 | u16 vid, pvid; | 1005 | u16 vid, pvid; |
1004 | int err; | 1006 | int err; |
1005 | 1007 | ||
1006 | if (!init && !mlxsw_sp_port->bridged) | 1008 | if (!mlxsw_sp_port->bridged) |
1007 | return -EINVAL; | 1009 | return -EINVAL; |
1008 | 1010 | ||
1009 | err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, | 1011 | err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, |
@@ -1014,9 +1016,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1014 | return err; | 1016 | return err; |
1015 | } | 1017 | } |
1016 | 1018 | ||
1017 | if (init) | ||
1018 | goto out; | ||
1019 | |||
1020 | pvid = mlxsw_sp_port->pvid; | 1019 | pvid = mlxsw_sp_port->pvid; |
1021 | if (pvid >= vid_begin && pvid <= vid_end) { | 1020 | if (pvid >= vid_begin && pvid <= vid_end) { |
1022 | err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); | 1021 | err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); |
@@ -1028,7 +1027,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1028 | 1027 | ||
1029 | mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); | 1028 | mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); |
1030 | 1029 | ||
1031 | out: | ||
1032 | /* Changing activity bits only if HW operation succeded */ | 1030 | /* Changing activity bits only if HW operation succeded */ |
1033 | for (vid = vid_begin; vid <= vid_end; vid++) | 1031 | for (vid = vid_begin; vid <= vid_end; vid++) |
1034 | clear_bit(vid, mlxsw_sp_port->active_vlans); | 1032 | clear_bit(vid, mlxsw_sp_port->active_vlans); |
@@ -1039,8 +1037,8 @@ out: | |||
1039 | static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | 1037 | static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, |
1040 | const struct switchdev_obj_port_vlan *vlan) | 1038 | const struct switchdev_obj_port_vlan *vlan) |
1041 | { | 1039 | { |
1042 | return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, | 1040 | return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin, |
1043 | vlan->vid_begin, vlan->vid_end, false); | 1041 | vlan->vid_end); |
1044 | } | 1042 | } |
1045 | 1043 | ||
1046 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) | 1044 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) |
@@ -1048,7 +1046,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) | |||
1048 | u16 vid; | 1046 | u16 vid; |
1049 | 1047 | ||
1050 | for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) | 1048 | for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) |
1051 | __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); | 1049 | __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid); |
1052 | } | 1050 | } |
1053 | 1051 | ||
1054 | static int | 1052 | static int |
@@ -1546,32 +1544,6 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) | |||
1546 | mlxsw_sp_fdb_fini(mlxsw_sp); | 1544 | mlxsw_sp_fdb_fini(mlxsw_sp); |
1547 | } | 1545 | } |
1548 | 1546 | ||
1549 | int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) | ||
1550 | { | ||
1551 | struct net_device *dev = mlxsw_sp_port->dev; | ||
1552 | int err; | ||
1553 | |||
1554 | /* Allow only untagged packets to ingress and tag them internally | ||
1555 | * with VID 1. | ||
1556 | */ | ||
1557 | mlxsw_sp_port->pvid = 1; | ||
1558 | err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, | ||
1559 | true); | ||
1560 | if (err) { | ||
1561 | netdev_err(dev, "Unable to init VLANs\n"); | ||
1562 | return err; | ||
1563 | } | ||
1564 | |||
1565 | /* Add implicit VLAN interface in the device, so that untagged | ||
1566 | * packets will be classified to the default vFID. | ||
1567 | */ | ||
1568 | err = mlxsw_sp_port_add_vid(dev, 0, 1); | ||
1569 | if (err) | ||
1570 | netdev_err(dev, "Failed to configure default vFID\n"); | ||
1571 | |||
1572 | return err; | ||
1573 | } | ||
1574 | |||
1575 | void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) | 1547 | void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) |
1576 | { | 1548 | { |
1577 | mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; | 1549 | mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 470d7696e9fe..ed8e30186400 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h | |||
@@ -56,6 +56,10 @@ enum { | |||
56 | MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, | 56 | MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, |
57 | MLXSW_TRAP_ID_ARPBC = 0x50, | 57 | MLXSW_TRAP_ID_ARPBC = 0x50, |
58 | MLXSW_TRAP_ID_ARPUC = 0x51, | 58 | MLXSW_TRAP_ID_ARPUC = 0x51, |
59 | MLXSW_TRAP_ID_MTUERROR = 0x52, | ||
60 | MLXSW_TRAP_ID_TTLERROR = 0x53, | ||
61 | MLXSW_TRAP_ID_LBERROR = 0x54, | ||
62 | MLXSW_TRAP_ID_OSPF = 0x55, | ||
59 | MLXSW_TRAP_ID_IP2ME = 0x5F, | 63 | MLXSW_TRAP_ID_IP2ME = 0x5F, |
60 | MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, | 64 | MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, |
61 | MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, | 65 | MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index d0dc28f93c0e..226cb08cc055 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c | |||
@@ -52,40 +52,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap) | |||
52 | DCBX_APP_SF_ETHTYPE); | 52 | DCBX_APP_SF_ETHTYPE); |
53 | } | 53 | } |
54 | 54 | ||
55 | static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap) | ||
56 | { | ||
57 | u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); | ||
58 | |||
59 | /* Old MFW */ | ||
60 | if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) | ||
61 | return qed_dcbx_app_ethtype(app_info_bitmap); | ||
62 | |||
63 | return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE); | ||
64 | } | ||
65 | |||
55 | static bool qed_dcbx_app_port(u32 app_info_bitmap) | 66 | static bool qed_dcbx_app_port(u32 app_info_bitmap) |
56 | { | 67 | { |
57 | return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == | 68 | return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == |
58 | DCBX_APP_SF_PORT); | 69 | DCBX_APP_SF_PORT); |
59 | } | 70 | } |
60 | 71 | ||
61 | static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) | 72 | static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type) |
62 | { | 73 | { |
63 | return !!(qed_dcbx_app_ethtype(app_info_bitmap) && | 74 | u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); |
64 | proto_id == QED_ETH_TYPE_DEFAULT); | 75 | |
76 | /* Old MFW */ | ||
77 | if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) | ||
78 | return qed_dcbx_app_port(app_info_bitmap); | ||
79 | |||
80 | return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT); | ||
65 | } | 81 | } |
66 | 82 | ||
67 | static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) | 83 | static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
68 | { | 84 | { |
69 | return !!(qed_dcbx_app_port(app_info_bitmap) && | 85 | bool ethtype; |
70 | proto_id == QED_TCP_PORT_ISCSI); | 86 | |
87 | if (ieee) | ||
88 | ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); | ||
89 | else | ||
90 | ethtype = qed_dcbx_app_ethtype(app_info_bitmap); | ||
91 | |||
92 | return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT)); | ||
71 | } | 93 | } |
72 | 94 | ||
73 | static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id) | 95 | static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
74 | { | 96 | { |
75 | return !!(qed_dcbx_app_ethtype(app_info_bitmap) && | 97 | bool port; |
76 | proto_id == QED_ETH_TYPE_FCOE); | 98 | |
99 | if (ieee) | ||
100 | port = qed_dcbx_ieee_app_port(app_info_bitmap, | ||
101 | DCBX_APP_SF_IEEE_TCP_PORT); | ||
102 | else | ||
103 | port = qed_dcbx_app_port(app_info_bitmap); | ||
104 | |||
105 | return !!(port && (proto_id == QED_TCP_PORT_ISCSI)); | ||
77 | } | 106 | } |
78 | 107 | ||
79 | static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id) | 108 | static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
80 | { | 109 | { |
81 | return !!(qed_dcbx_app_ethtype(app_info_bitmap) && | 110 | bool ethtype; |
82 | proto_id == QED_ETH_TYPE_ROCE); | 111 | |
112 | if (ieee) | ||
113 | ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); | ||
114 | else | ||
115 | ethtype = qed_dcbx_app_ethtype(app_info_bitmap); | ||
116 | |||
117 | return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE)); | ||
83 | } | 118 | } |
84 | 119 | ||
85 | static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) | 120 | static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
86 | { | 121 | { |
87 | return !!(qed_dcbx_app_port(app_info_bitmap) && | 122 | bool ethtype; |
88 | proto_id == QED_UDP_PORT_TYPE_ROCE_V2); | 123 | |
124 | if (ieee) | ||
125 | ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); | ||
126 | else | ||
127 | ethtype = qed_dcbx_app_ethtype(app_info_bitmap); | ||
128 | |||
129 | return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE)); | ||
130 | } | ||
131 | |||
132 | static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) | ||
133 | { | ||
134 | bool port; | ||
135 | |||
136 | if (ieee) | ||
137 | port = qed_dcbx_ieee_app_port(app_info_bitmap, | ||
138 | DCBX_APP_SF_IEEE_UDP_PORT); | ||
139 | else | ||
140 | port = qed_dcbx_app_port(app_info_bitmap); | ||
141 | |||
142 | return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2)); | ||
89 | } | 143 | } |
90 | 144 | ||
91 | static void | 145 | static void |
@@ -164,17 +218,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, | |||
164 | static bool | 218 | static bool |
165 | qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, | 219 | qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, |
166 | u32 app_prio_bitmap, | 220 | u32 app_prio_bitmap, |
167 | u16 id, enum dcbx_protocol_type *type) | 221 | u16 id, enum dcbx_protocol_type *type, bool ieee) |
168 | { | 222 | { |
169 | if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) { | 223 | if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) { |
170 | *type = DCBX_PROTOCOL_FCOE; | 224 | *type = DCBX_PROTOCOL_FCOE; |
171 | } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) { | 225 | } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) { |
172 | *type = DCBX_PROTOCOL_ROCE; | 226 | *type = DCBX_PROTOCOL_ROCE; |
173 | } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { | 227 | } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) { |
174 | *type = DCBX_PROTOCOL_ISCSI; | 228 | *type = DCBX_PROTOCOL_ISCSI; |
175 | } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) { | 229 | } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) { |
176 | *type = DCBX_PROTOCOL_ETH; | 230 | *type = DCBX_PROTOCOL_ETH; |
177 | } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { | 231 | } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) { |
178 | *type = DCBX_PROTOCOL_ROCE_V2; | 232 | *type = DCBX_PROTOCOL_ROCE_V2; |
179 | } else { | 233 | } else { |
180 | *type = DCBX_MAX_PROTOCOL_TYPE; | 234 | *type = DCBX_MAX_PROTOCOL_TYPE; |
@@ -194,17 +248,18 @@ static int | |||
194 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | 248 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, |
195 | struct qed_dcbx_results *p_data, | 249 | struct qed_dcbx_results *p_data, |
196 | struct dcbx_app_priority_entry *p_tbl, | 250 | struct dcbx_app_priority_entry *p_tbl, |
197 | u32 pri_tc_tbl, int count, bool dcbx_enabled) | 251 | u32 pri_tc_tbl, int count, u8 dcbx_version) |
198 | { | 252 | { |
199 | u8 tc, priority_map; | 253 | u8 tc, priority_map; |
200 | enum dcbx_protocol_type type; | 254 | enum dcbx_protocol_type type; |
255 | bool enable, ieee; | ||
201 | u16 protocol_id; | 256 | u16 protocol_id; |
202 | int priority; | 257 | int priority; |
203 | bool enable; | ||
204 | int i; | 258 | int i; |
205 | 259 | ||
206 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); | 260 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); |
207 | 261 | ||
262 | ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); | ||
208 | /* Parse APP TLV */ | 263 | /* Parse APP TLV */ |
209 | for (i = 0; i < count; i++) { | 264 | for (i = 0; i < count; i++) { |
210 | protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, | 265 | protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, |
@@ -219,7 +274,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
219 | 274 | ||
220 | tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); | 275 | tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); |
221 | if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, | 276 | if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, |
222 | protocol_id, &type)) { | 277 | protocol_id, &type, ieee)) { |
223 | /* ETH always have the enable bit reset, as it gets | 278 | /* ETH always have the enable bit reset, as it gets |
224 | * vlan information per packet. For other protocols, | 279 | * vlan information per packet. For other protocols, |
225 | * should be set according to the dcbx_enabled | 280 | * should be set according to the dcbx_enabled |
@@ -275,15 +330,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | |||
275 | struct dcbx_ets_feature *p_ets; | 330 | struct dcbx_ets_feature *p_ets; |
276 | struct qed_hw_info *p_info; | 331 | struct qed_hw_info *p_info; |
277 | u32 pri_tc_tbl, flags; | 332 | u32 pri_tc_tbl, flags; |
278 | bool dcbx_enabled; | 333 | u8 dcbx_version; |
279 | int num_entries; | 334 | int num_entries; |
280 | int rc = 0; | 335 | int rc = 0; |
281 | 336 | ||
282 | /* If DCBx version is non zero, then negotiation was | ||
283 | * successfuly performed | ||
284 | */ | ||
285 | flags = p_hwfn->p_dcbx_info->operational.flags; | 337 | flags = p_hwfn->p_dcbx_info->operational.flags; |
286 | dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); | 338 | dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); |
287 | 339 | ||
288 | p_app = &p_hwfn->p_dcbx_info->operational.features.app; | 340 | p_app = &p_hwfn->p_dcbx_info->operational.features.app; |
289 | p_tbl = p_app->app_pri_tbl; | 341 | p_tbl = p_app->app_pri_tbl; |
@@ -295,13 +347,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | |||
295 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); | 347 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); |
296 | 348 | ||
297 | rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, | 349 | rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, |
298 | num_entries, dcbx_enabled); | 350 | num_entries, dcbx_version); |
299 | if (rc) | 351 | if (rc) |
300 | return rc; | 352 | return rc; |
301 | 353 | ||
302 | p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); | 354 | p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); |
303 | data.pf_id = p_hwfn->rel_pf_id; | 355 | data.pf_id = p_hwfn->rel_pf_id; |
304 | data.dcbx_enabled = dcbx_enabled; | 356 | data.dcbx_enabled = !!dcbx_version; |
305 | 357 | ||
306 | qed_dcbx_dp_protocol(p_hwfn, &data); | 358 | qed_dcbx_dp_protocol(p_hwfn, &data); |
307 | 359 | ||
@@ -400,7 +452,7 @@ static void | |||
400 | qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, | 452 | qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, |
401 | struct dcbx_app_priority_feature *p_app, | 453 | struct dcbx_app_priority_feature *p_app, |
402 | struct dcbx_app_priority_entry *p_tbl, | 454 | struct dcbx_app_priority_entry *p_tbl, |
403 | struct qed_dcbx_params *p_params) | 455 | struct qed_dcbx_params *p_params, bool ieee) |
404 | { | 456 | { |
405 | struct qed_app_entry *entry; | 457 | struct qed_app_entry *entry; |
406 | u8 pri_map; | 458 | u8 pri_map; |
@@ -414,15 +466,46 @@ qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, | |||
414 | DCBX_APP_NUM_ENTRIES); | 466 | DCBX_APP_NUM_ENTRIES); |
415 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { | 467 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { |
416 | entry = &p_params->app_entry[i]; | 468 | entry = &p_params->app_entry[i]; |
417 | entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, | 469 | if (ieee) { |
418 | DCBX_APP_SF)); | 470 | u8 sf_ieee; |
471 | u32 val; | ||
472 | |||
473 | sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry, | ||
474 | DCBX_APP_SF_IEEE); | ||
475 | switch (sf_ieee) { | ||
476 | case DCBX_APP_SF_IEEE_RESERVED: | ||
477 | /* Old MFW */ | ||
478 | val = QED_MFW_GET_FIELD(p_tbl[i].entry, | ||
479 | DCBX_APP_SF); | ||
480 | entry->sf_ieee = val ? | ||
481 | QED_DCBX_SF_IEEE_TCP_UDP_PORT : | ||
482 | QED_DCBX_SF_IEEE_ETHTYPE; | ||
483 | break; | ||
484 | case DCBX_APP_SF_IEEE_ETHTYPE: | ||
485 | entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE; | ||
486 | break; | ||
487 | case DCBX_APP_SF_IEEE_TCP_PORT: | ||
488 | entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT; | ||
489 | break; | ||
490 | case DCBX_APP_SF_IEEE_UDP_PORT: | ||
491 | entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT; | ||
492 | break; | ||
493 | case DCBX_APP_SF_IEEE_TCP_UDP_PORT: | ||
494 | entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT; | ||
495 | break; | ||
496 | } | ||
497 | } else { | ||
498 | entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, | ||
499 | DCBX_APP_SF)); | ||
500 | } | ||
501 | |||
419 | pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); | 502 | pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); |
420 | entry->prio = ffs(pri_map) - 1; | 503 | entry->prio = ffs(pri_map) - 1; |
421 | entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, | 504 | entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, |
422 | DCBX_APP_PROTOCOL_ID); | 505 | DCBX_APP_PROTOCOL_ID); |
423 | qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, | 506 | qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, |
424 | entry->proto_id, | 507 | entry->proto_id, |
425 | &entry->proto_type); | 508 | &entry->proto_type, ieee); |
426 | } | 509 | } |
427 | 510 | ||
428 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, | 511 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, |
@@ -483,7 +566,7 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, | |||
483 | bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); | 566 | bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); |
484 | tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); | 567 | tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); |
485 | tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); | 568 | tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); |
486 | pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]); | 569 | pri_map = p_ets->pri_tc_tbl[0]; |
487 | for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { | 570 | for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { |
488 | p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; | 571 | p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; |
489 | p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; | 572 | p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; |
@@ -500,9 +583,9 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn, | |||
500 | struct dcbx_app_priority_feature *p_app, | 583 | struct dcbx_app_priority_feature *p_app, |
501 | struct dcbx_app_priority_entry *p_tbl, | 584 | struct dcbx_app_priority_entry *p_tbl, |
502 | struct dcbx_ets_feature *p_ets, | 585 | struct dcbx_ets_feature *p_ets, |
503 | u32 pfc, struct qed_dcbx_params *p_params) | 586 | u32 pfc, struct qed_dcbx_params *p_params, bool ieee) |
504 | { | 587 | { |
505 | qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params); | 588 | qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee); |
506 | qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); | 589 | qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); |
507 | qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); | 590 | qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); |
508 | } | 591 | } |
@@ -516,7 +599,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, | |||
516 | p_feat = &p_hwfn->p_dcbx_info->local_admin.features; | 599 | p_feat = &p_hwfn->p_dcbx_info->local_admin.features; |
517 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, | 600 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, |
518 | p_feat->app.app_pri_tbl, &p_feat->ets, | 601 | p_feat->app.app_pri_tbl, &p_feat->ets, |
519 | p_feat->pfc, ¶ms->local.params); | 602 | p_feat->pfc, ¶ms->local.params, false); |
520 | params->local.valid = true; | 603 | params->local.valid = true; |
521 | } | 604 | } |
522 | 605 | ||
@@ -529,7 +612,7 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, | |||
529 | p_feat = &p_hwfn->p_dcbx_info->remote.features; | 612 | p_feat = &p_hwfn->p_dcbx_info->remote.features; |
530 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, | 613 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, |
531 | p_feat->app.app_pri_tbl, &p_feat->ets, | 614 | p_feat->app.app_pri_tbl, &p_feat->ets, |
532 | p_feat->pfc, ¶ms->remote.params); | 615 | p_feat->pfc, ¶ms->remote.params, false); |
533 | params->remote.valid = true; | 616 | params->remote.valid = true; |
534 | } | 617 | } |
535 | 618 | ||
@@ -574,7 +657,8 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, | |||
574 | 657 | ||
575 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, | 658 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, |
576 | p_feat->app.app_pri_tbl, &p_feat->ets, | 659 | p_feat->app.app_pri_tbl, &p_feat->ets, |
577 | p_feat->pfc, ¶ms->operational.params); | 660 | p_feat->pfc, ¶ms->operational.params, |
661 | p_operational->ieee); | ||
578 | qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); | 662 | qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); |
579 | err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); | 663 | err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); |
580 | p_operational->err = err; | 664 | p_operational->err = err; |
@@ -944,7 +1028,6 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, | |||
944 | val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); | 1028 | val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); |
945 | p_ets->pri_tc_tbl[0] |= val; | 1029 | p_ets->pri_tc_tbl[0] |= val; |
946 | } | 1030 | } |
947 | p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]); | ||
948 | for (i = 0; i < 2; i++) { | 1031 | for (i = 0; i < 2; i++) { |
949 | p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); | 1032 | p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); |
950 | p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); | 1033 | p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); |
@@ -954,7 +1037,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, | |||
954 | static void | 1037 | static void |
955 | qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, | 1038 | qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, |
956 | struct dcbx_app_priority_feature *p_app, | 1039 | struct dcbx_app_priority_feature *p_app, |
957 | struct qed_dcbx_params *p_params) | 1040 | struct qed_dcbx_params *p_params, bool ieee) |
958 | { | 1041 | { |
959 | u32 *entry; | 1042 | u32 *entry; |
960 | int i; | 1043 | int i; |
@@ -975,12 +1058,36 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, | |||
975 | 1058 | ||
976 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { | 1059 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { |
977 | entry = &p_app->app_pri_tbl[i].entry; | 1060 | entry = &p_app->app_pri_tbl[i].entry; |
978 | *entry &= ~DCBX_APP_SF_MASK; | 1061 | if (ieee) { |
979 | if (p_params->app_entry[i].ethtype) | 1062 | *entry &= ~DCBX_APP_SF_IEEE_MASK; |
980 | *entry |= ((u32)DCBX_APP_SF_ETHTYPE << | 1063 | switch (p_params->app_entry[i].sf_ieee) { |
981 | DCBX_APP_SF_SHIFT); | 1064 | case QED_DCBX_SF_IEEE_ETHTYPE: |
982 | else | 1065 | *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE << |
983 | *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT); | 1066 | DCBX_APP_SF_IEEE_SHIFT); |
1067 | break; | ||
1068 | case QED_DCBX_SF_IEEE_TCP_PORT: | ||
1069 | *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT << | ||
1070 | DCBX_APP_SF_IEEE_SHIFT); | ||
1071 | break; | ||
1072 | case QED_DCBX_SF_IEEE_UDP_PORT: | ||
1073 | *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT << | ||
1074 | DCBX_APP_SF_IEEE_SHIFT); | ||
1075 | break; | ||
1076 | case QED_DCBX_SF_IEEE_TCP_UDP_PORT: | ||
1077 | *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT << | ||
1078 | DCBX_APP_SF_IEEE_SHIFT); | ||
1079 | break; | ||
1080 | } | ||
1081 | } else { | ||
1082 | *entry &= ~DCBX_APP_SF_MASK; | ||
1083 | if (p_params->app_entry[i].ethtype) | ||
1084 | *entry |= ((u32)DCBX_APP_SF_ETHTYPE << | ||
1085 | DCBX_APP_SF_SHIFT); | ||
1086 | else | ||
1087 | *entry |= ((u32)DCBX_APP_SF_PORT << | ||
1088 | DCBX_APP_SF_SHIFT); | ||
1089 | } | ||
1090 | |||
984 | *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; | 1091 | *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; |
985 | *entry |= ((u32)p_params->app_entry[i].proto_id << | 1092 | *entry |= ((u32)p_params->app_entry[i].proto_id << |
986 | DCBX_APP_PROTOCOL_ID_SHIFT); | 1093 | DCBX_APP_PROTOCOL_ID_SHIFT); |
@@ -995,15 +1102,19 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, | |||
995 | struct dcbx_local_params *local_admin, | 1102 | struct dcbx_local_params *local_admin, |
996 | struct qed_dcbx_set *params) | 1103 | struct qed_dcbx_set *params) |
997 | { | 1104 | { |
1105 | bool ieee = false; | ||
1106 | |||
998 | local_admin->flags = 0; | 1107 | local_admin->flags = 0; |
999 | memcpy(&local_admin->features, | 1108 | memcpy(&local_admin->features, |
1000 | &p_hwfn->p_dcbx_info->operational.features, | 1109 | &p_hwfn->p_dcbx_info->operational.features, |
1001 | sizeof(local_admin->features)); | 1110 | sizeof(local_admin->features)); |
1002 | 1111 | ||
1003 | if (params->enabled) | 1112 | if (params->enabled) { |
1004 | local_admin->config = params->ver_num; | 1113 | local_admin->config = params->ver_num; |
1005 | else | 1114 | ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE); |
1115 | } else { | ||
1006 | local_admin->config = DCBX_CONFIG_VERSION_DISABLED; | 1116 | local_admin->config = DCBX_CONFIG_VERSION_DISABLED; |
1117 | } | ||
1007 | 1118 | ||
1008 | if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) | 1119 | if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) |
1009 | qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, | 1120 | qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, |
@@ -1015,7 +1126,7 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, | |||
1015 | 1126 | ||
1016 | if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) | 1127 | if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) |
1017 | qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, | 1128 | qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, |
1018 | ¶ms->config.params); | 1129 | ¶ms->config.params, ieee); |
1019 | } | 1130 | } |
1020 | 1131 | ||
1021 | int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, | 1132 | int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
@@ -1596,8 +1707,10 @@ static int qed_dcbnl_setapp(struct qed_dev *cdev, | |||
1596 | if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) | 1707 | if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) |
1597 | break; | 1708 | break; |
1598 | /* First empty slot */ | 1709 | /* First empty slot */ |
1599 | if (!entry->proto_id) | 1710 | if (!entry->proto_id) { |
1711 | dcbx_set.config.params.num_app_entries++; | ||
1600 | break; | 1712 | break; |
1713 | } | ||
1601 | } | 1714 | } |
1602 | 1715 | ||
1603 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { | 1716 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { |
@@ -2117,8 +2230,10 @@ int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) | |||
2117 | (entry->proto_id == app->protocol)) | 2230 | (entry->proto_id == app->protocol)) |
2118 | break; | 2231 | break; |
2119 | /* First empty slot */ | 2232 | /* First empty slot */ |
2120 | if (!entry->proto_id) | 2233 | if (!entry->proto_id) { |
2234 | dcbx_set.config.params.num_app_entries++; | ||
2121 | break; | 2235 | break; |
2236 | } | ||
2122 | } | 2237 | } |
2123 | 2238 | ||
2124 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { | 2239 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 592784019994..6f9d3b831a2a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h | |||
@@ -6850,6 +6850,14 @@ struct dcbx_app_priority_entry { | |||
6850 | #define DCBX_APP_SF_SHIFT 8 | 6850 | #define DCBX_APP_SF_SHIFT 8 |
6851 | #define DCBX_APP_SF_ETHTYPE 0 | 6851 | #define DCBX_APP_SF_ETHTYPE 0 |
6852 | #define DCBX_APP_SF_PORT 1 | 6852 | #define DCBX_APP_SF_PORT 1 |
6853 | #define DCBX_APP_SF_IEEE_MASK 0x0000f000 | ||
6854 | #define DCBX_APP_SF_IEEE_SHIFT 12 | ||
6855 | #define DCBX_APP_SF_IEEE_RESERVED 0 | ||
6856 | #define DCBX_APP_SF_IEEE_ETHTYPE 1 | ||
6857 | #define DCBX_APP_SF_IEEE_TCP_PORT 2 | ||
6858 | #define DCBX_APP_SF_IEEE_UDP_PORT 3 | ||
6859 | #define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 | ||
6860 | |||
6853 | #define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 | 6861 | #define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 |
6854 | #define DCBX_APP_PROTOCOL_ID_SHIFT 16 | 6862 | #define DCBX_APP_PROTOCOL_ID_SHIFT 16 |
6855 | }; | 6863 | }; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index fd973f4f16c7..49bad00a0f8f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -37,8 +37,8 @@ | |||
37 | 37 | ||
38 | #define _QLCNIC_LINUX_MAJOR 5 | 38 | #define _QLCNIC_LINUX_MAJOR 5 |
39 | #define _QLCNIC_LINUX_MINOR 3 | 39 | #define _QLCNIC_LINUX_MINOR 3 |
40 | #define _QLCNIC_LINUX_SUBVERSION 64 | 40 | #define _QLCNIC_LINUX_SUBVERSION 65 |
41 | #define QLCNIC_LINUX_VERSIONID "5.3.64" | 41 | #define QLCNIC_LINUX_VERSIONID "5.3.65" |
42 | #define QLCNIC_DRV_IDC_VER 0x01 | 42 | #define QLCNIC_DRV_IDC_VER 0x01 |
43 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ | 43 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ |
44 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) | 44 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 87c642d3b075..fedd7366713c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -102,7 +102,6 @@ | |||
102 | #define QLCNIC_RESPONSE_DESC 0x05 | 102 | #define QLCNIC_RESPONSE_DESC 0x05 |
103 | #define QLCNIC_LRO_DESC 0x12 | 103 | #define QLCNIC_LRO_DESC 0x12 |
104 | 104 | ||
105 | #define QLCNIC_TX_POLL_BUDGET 128 | ||
106 | #define QLCNIC_TCP_HDR_SIZE 20 | 105 | #define QLCNIC_TCP_HDR_SIZE 20 |
107 | #define QLCNIC_TCP_TS_OPTION_SIZE 12 | 106 | #define QLCNIC_TCP_TS_OPTION_SIZE 12 |
108 | #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) | 107 | #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) |
@@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) | |||
2008 | struct qlcnic_host_tx_ring *tx_ring; | 2007 | struct qlcnic_host_tx_ring *tx_ring; |
2009 | struct qlcnic_adapter *adapter; | 2008 | struct qlcnic_adapter *adapter; |
2010 | 2009 | ||
2011 | budget = QLCNIC_TX_POLL_BUDGET; | ||
2012 | tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); | 2010 | tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); |
2013 | adapter = tx_ring->adapter; | 2011 | adapter = tx_ring->adapter; |
2014 | work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); | 2012 | work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index 017d8c2c8285..24061b9b92e8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h | |||
@@ -156,10 +156,8 @@ struct qlcnic_vf_info { | |||
156 | spinlock_t vlan_list_lock; /* Lock for VLAN list */ | 156 | spinlock_t vlan_list_lock; /* Lock for VLAN list */ |
157 | }; | 157 | }; |
158 | 158 | ||
159 | struct qlcnic_async_work_list { | 159 | struct qlcnic_async_cmd { |
160 | struct list_head list; | 160 | struct list_head list; |
161 | struct work_struct work; | ||
162 | void *ptr; | ||
163 | struct qlcnic_cmd_args *cmd; | 161 | struct qlcnic_cmd_args *cmd; |
164 | }; | 162 | }; |
165 | 163 | ||
@@ -168,7 +166,10 @@ struct qlcnic_back_channel { | |||
168 | struct workqueue_struct *bc_trans_wq; | 166 | struct workqueue_struct *bc_trans_wq; |
169 | struct workqueue_struct *bc_async_wq; | 167 | struct workqueue_struct *bc_async_wq; |
170 | struct workqueue_struct *bc_flr_wq; | 168 | struct workqueue_struct *bc_flr_wq; |
171 | struct list_head async_list; | 169 | struct qlcnic_adapter *adapter; |
170 | struct list_head async_cmd_list; | ||
171 | struct work_struct vf_async_work; | ||
172 | spinlock_t queue_lock; /* async_cmd_list queue lock */ | ||
172 | }; | 173 | }; |
173 | 174 | ||
174 | struct qlcnic_sriov { | 175 | struct qlcnic_sriov { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 7327b729ba2e..d7107055ec60 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #define QLC_83XX_VF_RESET_FAIL_THRESH 8 | 29 | #define QLC_83XX_VF_RESET_FAIL_THRESH 8 |
30 | #define QLC_BC_CMD_MAX_RETRY_CNT 5 | 30 | #define QLC_BC_CMD_MAX_RETRY_CNT 5 |
31 | 31 | ||
32 | static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work); | ||
32 | static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); | 33 | static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); |
33 | static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); | 34 | static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); |
34 | static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); | 35 | static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); |
@@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) | |||
177 | } | 178 | } |
178 | 179 | ||
179 | bc->bc_async_wq = wq; | 180 | bc->bc_async_wq = wq; |
180 | INIT_LIST_HEAD(&bc->async_list); | 181 | INIT_LIST_HEAD(&bc->async_cmd_list); |
182 | INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd); | ||
183 | spin_lock_init(&bc->queue_lock); | ||
184 | bc->adapter = adapter; | ||
181 | 185 | ||
182 | for (i = 0; i < num_vfs; i++) { | 186 | for (i = 0; i < num_vfs; i++) { |
183 | vf = &sriov->vf_info[i]; | 187 | vf = &sriov->vf_info[i]; |
@@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac, | |||
1517 | 1521 | ||
1518 | void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) | 1522 | void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) |
1519 | { | 1523 | { |
1520 | struct list_head *head = &bc->async_list; | 1524 | struct list_head *head = &bc->async_cmd_list; |
1521 | struct qlcnic_async_work_list *entry; | 1525 | struct qlcnic_async_cmd *entry; |
1522 | 1526 | ||
1523 | flush_workqueue(bc->bc_async_wq); | 1527 | flush_workqueue(bc->bc_async_wq); |
1528 | cancel_work_sync(&bc->vf_async_work); | ||
1529 | |||
1530 | spin_lock(&bc->queue_lock); | ||
1524 | while (!list_empty(head)) { | 1531 | while (!list_empty(head)) { |
1525 | entry = list_entry(head->next, struct qlcnic_async_work_list, | 1532 | entry = list_entry(head->next, struct qlcnic_async_cmd, |
1526 | list); | 1533 | list); |
1527 | cancel_work_sync(&entry->work); | ||
1528 | list_del(&entry->list); | 1534 | list_del(&entry->list); |
1535 | kfree(entry->cmd); | ||
1529 | kfree(entry); | 1536 | kfree(entry); |
1530 | } | 1537 | } |
1538 | spin_unlock(&bc->queue_lock); | ||
1531 | } | 1539 | } |
1532 | 1540 | ||
1533 | void qlcnic_sriov_vf_set_multi(struct net_device *netdev) | 1541 | void qlcnic_sriov_vf_set_multi(struct net_device *netdev) |
@@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev) | |||
1587 | 1595 | ||
1588 | static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) | 1596 | static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) |
1589 | { | 1597 | { |
1590 | struct qlcnic_async_work_list *entry; | 1598 | struct qlcnic_async_cmd *entry, *tmp; |
1591 | struct qlcnic_adapter *adapter; | 1599 | struct qlcnic_back_channel *bc; |
1592 | struct qlcnic_cmd_args *cmd; | 1600 | struct qlcnic_cmd_args *cmd; |
1601 | struct list_head *head; | ||
1602 | LIST_HEAD(del_list); | ||
1603 | |||
1604 | bc = container_of(work, struct qlcnic_back_channel, vf_async_work); | ||
1605 | head = &bc->async_cmd_list; | ||
1606 | |||
1607 | spin_lock(&bc->queue_lock); | ||
1608 | list_splice_init(head, &del_list); | ||
1609 | spin_unlock(&bc->queue_lock); | ||
1610 | |||
1611 | list_for_each_entry_safe(entry, tmp, &del_list, list) { | ||
1612 | list_del(&entry->list); | ||
1613 | cmd = entry->cmd; | ||
1614 | __qlcnic_sriov_issue_cmd(bc->adapter, cmd); | ||
1615 | kfree(entry); | ||
1616 | } | ||
1617 | |||
1618 | if (!list_empty(head)) | ||
1619 | queue_work(bc->bc_async_wq, &bc->vf_async_work); | ||
1593 | 1620 | ||
1594 | entry = container_of(work, struct qlcnic_async_work_list, work); | ||
1595 | adapter = entry->ptr; | ||
1596 | cmd = entry->cmd; | ||
1597 | __qlcnic_sriov_issue_cmd(adapter, cmd); | ||
1598 | return; | 1621 | return; |
1599 | } | 1622 | } |
1600 | 1623 | ||
1601 | static struct qlcnic_async_work_list * | 1624 | static struct qlcnic_async_cmd * |
1602 | qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) | 1625 | qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc, |
1626 | struct qlcnic_cmd_args *cmd) | ||
1603 | { | 1627 | { |
1604 | struct list_head *node; | 1628 | struct qlcnic_async_cmd *entry = NULL; |
1605 | struct qlcnic_async_work_list *entry = NULL; | ||
1606 | u8 empty = 0; | ||
1607 | 1629 | ||
1608 | list_for_each(node, &bc->async_list) { | 1630 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
1609 | entry = list_entry(node, struct qlcnic_async_work_list, list); | 1631 | if (!entry) |
1610 | if (!work_pending(&entry->work)) { | 1632 | return NULL; |
1611 | empty = 1; | ||
1612 | break; | ||
1613 | } | ||
1614 | } | ||
1615 | 1633 | ||
1616 | if (!empty) { | 1634 | entry->cmd = cmd; |
1617 | entry = kzalloc(sizeof(struct qlcnic_async_work_list), | 1635 | |
1618 | GFP_ATOMIC); | 1636 | spin_lock(&bc->queue_lock); |
1619 | if (entry == NULL) | 1637 | list_add_tail(&entry->list, &bc->async_cmd_list); |
1620 | return NULL; | 1638 | spin_unlock(&bc->queue_lock); |
1621 | list_add_tail(&entry->list, &bc->async_list); | ||
1622 | } | ||
1623 | 1639 | ||
1624 | return entry; | 1640 | return entry; |
1625 | } | 1641 | } |
1626 | 1642 | ||
1627 | static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, | 1643 | static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, |
1628 | work_func_t func, void *data, | ||
1629 | struct qlcnic_cmd_args *cmd) | 1644 | struct qlcnic_cmd_args *cmd) |
1630 | { | 1645 | { |
1631 | struct qlcnic_async_work_list *entry = NULL; | 1646 | struct qlcnic_async_cmd *entry = NULL; |
1632 | 1647 | ||
1633 | entry = qlcnic_sriov_get_free_node_async_work(bc); | 1648 | entry = qlcnic_sriov_alloc_async_cmd(bc, cmd); |
1634 | if (!entry) | 1649 | if (!entry) { |
1650 | qlcnic_free_mbx_args(cmd); | ||
1651 | kfree(cmd); | ||
1635 | return; | 1652 | return; |
1653 | } | ||
1636 | 1654 | ||
1637 | entry->ptr = data; | 1655 | queue_work(bc->bc_async_wq, &bc->vf_async_work); |
1638 | entry->cmd = cmd; | ||
1639 | INIT_WORK(&entry->work, func); | ||
1640 | queue_work(bc->bc_async_wq, &entry->work); | ||
1641 | } | 1656 | } |
1642 | 1657 | ||
1643 | static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, | 1658 | static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, |
@@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, | |||
1649 | if (adapter->need_fw_reset) | 1664 | if (adapter->need_fw_reset) |
1650 | return -EIO; | 1665 | return -EIO; |
1651 | 1666 | ||
1652 | qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd, | 1667 | qlcnic_sriov_schedule_async_cmd(bc, cmd); |
1653 | adapter, cmd); | 1668 | |
1654 | return 0; | 1669 | return 0; |
1655 | } | 1670 | } |
1656 | 1671 | ||
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c51f34693eae..f85d605e4560 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -734,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status) | |||
734 | netif_receive_skb(skb); | 734 | netif_receive_skb(skb); |
735 | ndev->stats.rx_bytes += len; | 735 | ndev->stats.rx_bytes += len; |
736 | ndev->stats.rx_packets++; | 736 | ndev->stats.rx_packets++; |
737 | kmemleak_not_leak(new_skb); | ||
737 | } else { | 738 | } else { |
738 | ndev->stats.rx_dropped++; | 739 | ndev->stats.rx_dropped++; |
739 | new_skb = skb; | 740 | new_skb = skb; |
@@ -1325,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev) | |||
1325 | kfree_skb(skb); | 1326 | kfree_skb(skb); |
1326 | goto err_cleanup; | 1327 | goto err_cleanup; |
1327 | } | 1328 | } |
1329 | kmemleak_not_leak(skb); | ||
1328 | } | 1330 | } |
1329 | /* continue even if we didn't manage to submit all | 1331 | /* continue even if we didn't manage to submit all |
1330 | * receive descs | 1332 | * receive descs |
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 01a77145a0fa..8fd131207ee1 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c | |||
@@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = { | |||
166 | 166 | ||
167 | static void tsi108_timed_checker(unsigned long dev_ptr); | 167 | static void tsi108_timed_checker(unsigned long dev_ptr); |
168 | 168 | ||
169 | #ifdef DEBUG | ||
169 | static void dump_eth_one(struct net_device *dev) | 170 | static void dump_eth_one(struct net_device *dev) |
170 | { | 171 | { |
171 | struct tsi108_prv_data *data = netdev_priv(dev); | 172 | struct tsi108_prv_data *data = netdev_priv(dev); |
@@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev) | |||
190 | TSI_READ(TSI108_EC_RXESTAT), | 191 | TSI_READ(TSI108_EC_RXESTAT), |
191 | TSI_READ(TSI108_EC_RXERR), data->rxpending); | 192 | TSI_READ(TSI108_EC_RXERR), data->rxpending); |
192 | } | 193 | } |
194 | #endif | ||
193 | 195 | ||
194 | /* Synchronization is needed between the thread and up/down events. | 196 | /* Synchronization is needed between the thread and up/down events. |
195 | * Note that the PHY is accessed through the same registers for both | 197 | * Note that the PHY is accessed through the same registers for both |