summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/can/flexcan.c91
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c157
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c39
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c89
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c17
-rw-r--r--drivers/net/geneve.c6
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/wimax/i2400m/fw.c2
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netfront.c2
41 files changed, 616 insertions, 278 deletions
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 13f0f219d8aa..a13a4896a8bd 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -182,22 +182,23 @@
182/* FLEXCAN hardware feature flags 182/* FLEXCAN hardware feature flags
183 * 183 *
184 * Below is some version info we got: 184 * Below is some version info we got:
185 * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err RTR re- 185 * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
186 * Filter? connected? detection ception in MB 186 * Filter? connected? Passive detection ception in MB
187 * MX25 FlexCAN2 03.00.00.00 no no no no 187 * MX25 FlexCAN2 03.00.00.00 no no ? no no
188 * MX28 FlexCAN2 03.00.04.00 yes yes no no 188 * MX28 FlexCAN2 03.00.04.00 yes yes no no no
189 * MX35 FlexCAN2 03.00.00.00 no no no no 189 * MX35 FlexCAN2 03.00.00.00 no no ? no no
190 * MX53 FlexCAN2 03.00.00.00 yes no no no 190 * MX53 FlexCAN2 03.00.00.00 yes no no no no
191 * MX6s FlexCAN3 10.00.12.00 yes yes no yes 191 * MX6s FlexCAN3 10.00.12.00 yes yes no no yes
192 * VF610 FlexCAN3 ? no yes yes yes? 192 * VF610 FlexCAN3 ? no yes ? yes yes?
193 * 193 *
194 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. 194 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
195 */ 195 */
196#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */ 196#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */
197#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ 197#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
198#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ 198#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
199#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ 199#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
200#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ 200#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
201#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
201 202
202/* Structure of the message buffer */ 203/* Structure of the message buffer */
203struct flexcan_mb { 204struct flexcan_mb {
@@ -281,14 +282,17 @@ struct flexcan_priv {
281}; 282};
282 283
283static const struct flexcan_devtype_data fsl_p1010_devtype_data = { 284static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
284 .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE, 285 .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
286 FLEXCAN_QUIRK_BROKEN_PERR_STATE,
285}; 287};
286 288
287static const struct flexcan_devtype_data fsl_imx28_devtype_data; 289static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
290 .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
291};
288 292
289static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { 293static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
290 .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | 294 .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
291 FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, 295 FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE,
292}; 296};
293 297
294static const struct flexcan_devtype_data fsl_vf610_devtype_data = { 298static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
@@ -335,6 +339,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
335} 339}
336#endif 340#endif
337 341
342static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
343{
344 struct flexcan_regs __iomem *regs = priv->regs;
345 u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
346
347 flexcan_write(reg_ctrl, &regs->ctrl);
348}
349
350static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
351{
352 struct flexcan_regs __iomem *regs = priv->regs;
353 u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
354
355 flexcan_write(reg_ctrl, &regs->ctrl);
356}
357
338static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) 358static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
339{ 359{
340 if (!priv->reg_xceiver) 360 if (!priv->reg_xceiver)
@@ -713,6 +733,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
713 struct flexcan_regs __iomem *regs = priv->regs; 733 struct flexcan_regs __iomem *regs = priv->regs;
714 irqreturn_t handled = IRQ_NONE; 734 irqreturn_t handled = IRQ_NONE;
715 u32 reg_iflag1, reg_esr; 735 u32 reg_iflag1, reg_esr;
736 enum can_state last_state = priv->can.state;
716 737
717 reg_iflag1 = flexcan_read(&regs->iflag1); 738 reg_iflag1 = flexcan_read(&regs->iflag1);
718 739
@@ -765,8 +786,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
765 flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr); 786 flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
766 } 787 }
767 788
768 /* state change interrupt */ 789 /* state change interrupt or broken error state quirk fix is enabled */
769 if (reg_esr & FLEXCAN_ESR_ERR_STATE) 790 if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
791 (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
792 FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
770 flexcan_irq_state(dev, reg_esr); 793 flexcan_irq_state(dev, reg_esr);
771 794
772 /* bus error IRQ - handle if bus error reporting is activated */ 795 /* bus error IRQ - handle if bus error reporting is activated */
@@ -774,6 +797,44 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
774 (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) 797 (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
775 flexcan_irq_bus_err(dev, reg_esr); 798 flexcan_irq_bus_err(dev, reg_esr);
776 799
800 /* availability of error interrupt among state transitions in case
801 * bus error reporting is de-activated and
802 * FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled:
803 * +--------------------------------------------------------------+
804 * | +----------------------------------------------+ [stopped / |
805 * | | | sleeping] -+
806 * +-+-> active <-> warning <-> passive -> bus off -+
807 * ___________^^^^^^^^^^^^_______________________________
808 * disabled(1) enabled disabled
809 *
810 * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled
811 */
812 if ((last_state != priv->can.state) &&
813 (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
814 !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
815 switch (priv->can.state) {
816 case CAN_STATE_ERROR_ACTIVE:
817 if (priv->devtype_data->quirks &
818 FLEXCAN_QUIRK_BROKEN_WERR_STATE)
819 flexcan_error_irq_enable(priv);
820 else
821 flexcan_error_irq_disable(priv);
822 break;
823
824 case CAN_STATE_ERROR_WARNING:
825 flexcan_error_irq_enable(priv);
826 break;
827
828 case CAN_STATE_ERROR_PASSIVE:
829 case CAN_STATE_BUS_OFF:
830 flexcan_error_irq_disable(priv);
831 break;
832
833 default:
834 break;
835 }
836 }
837
777 return handled; 838 return handled;
778} 839}
779 840
@@ -887,7 +948,7 @@ static int flexcan_chip_start(struct net_device *dev)
887 * on most Flexcan cores, too. Otherwise we don't get 948 * on most Flexcan cores, too. Otherwise we don't get
888 * any error warning or passive interrupts. 949 * any error warning or passive interrupts.
889 */ 950 */
890 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE || 951 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
891 priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 952 priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
892 reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; 953 reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
893 else 954 else
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index be928ce62d32..9fdb0f0bfa06 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
333 } 333 }
334 334
335 cf->can_id = id & ESD_IDMASK; 335 cf->can_id = id & ESD_IDMASK;
336 cf->can_dlc = get_can_dlc(msg->msg.rx.dlc); 336 cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
337 337
338 if (id & ESD_EXTID) 338 if (id & ESD_EXTID)
339 cf->can_id |= CAN_EFF_FLAG; 339 cf->can_id |= CAN_EFF_FLAG;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index afcc1312dbaf..68ac3e88a8ce 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
375 375
376 gs_free_tx_context(txc); 376 gs_free_tx_context(txc);
377 377
378 atomic_dec(&dev->active_tx_urbs);
379
378 netif_wake_queue(netdev); 380 netif_wake_queue(netdev);
379 } 381 }
380 382
@@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
463 urb->transfer_buffer_length, 465 urb->transfer_buffer_length,
464 urb->transfer_buffer, 466 urb->transfer_buffer,
465 urb->transfer_dma); 467 urb->transfer_dma);
466
467 atomic_dec(&dev->active_tx_urbs);
468
469 if (!netif_device_present(netdev))
470 return;
471
472 if (netif_queue_stopped(netdev))
473 netif_wake_queue(netdev);
474} 468}
475 469
476static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, 470static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 0d97311a1b26..060cb18fa659 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -743,8 +743,8 @@ static void ena_get_channels(struct net_device *netdev,
743{ 743{
744 struct ena_adapter *adapter = netdev_priv(netdev); 744 struct ena_adapter *adapter = netdev_priv(netdev);
745 745
746 channels->max_rx = ENA_MAX_NUM_IO_QUEUES; 746 channels->max_rx = adapter->num_queues;
747 channels->max_tx = ENA_MAX_NUM_IO_QUEUES; 747 channels->max_tx = adapter->num_queues;
748 channels->max_other = 0; 748 channels->max_other = 0;
749 channels->max_combined = 0; 749 channels->max_combined = 0;
750 channels->rx_count = adapter->num_queues; 750 channels->rx_count = adapter->num_queues;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 47bdbf9bdefb..5417e4da64ca 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -966,7 +966,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
966 u64_stats_update_begin(&rx_ring->syncp); 966 u64_stats_update_begin(&rx_ring->syncp);
967 rx_ring->rx_stats.bad_csum++; 967 rx_ring->rx_stats.bad_csum++;
968 u64_stats_update_end(&rx_ring->syncp); 968 u64_stats_update_end(&rx_ring->syncp);
969 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, 969 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
970 "RX IPv4 header checksum error\n"); 970 "RX IPv4 header checksum error\n");
971 return; 971 return;
972 } 972 }
@@ -979,7 +979,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
979 u64_stats_update_begin(&rx_ring->syncp); 979 u64_stats_update_begin(&rx_ring->syncp);
980 rx_ring->rx_stats.bad_csum++; 980 rx_ring->rx_stats.bad_csum++;
981 u64_stats_update_end(&rx_ring->syncp); 981 u64_stats_update_end(&rx_ring->syncp);
982 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, 982 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
983 "RX L4 checksum error\n"); 983 "RX L4 checksum error\n");
984 skb->ip_summed = CHECKSUM_NONE; 984 skb->ip_summed = CHECKSUM_NONE;
985 return; 985 return;
@@ -3051,7 +3051,8 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3051 if (ena_dev->mem_bar) 3051 if (ena_dev->mem_bar)
3052 devm_iounmap(&pdev->dev, ena_dev->mem_bar); 3052 devm_iounmap(&pdev->dev, ena_dev->mem_bar);
3053 3053
3054 devm_iounmap(&pdev->dev, ena_dev->reg_bar); 3054 if (ena_dev->reg_bar)
3055 devm_iounmap(&pdev->dev, ena_dev->reg_bar);
3055 3056
3056 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 3057 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3057 pci_release_selected_regions(pdev, release_bars); 3058 pci_release_selected_regions(pdev, release_bars);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 0fdaaa643073..57e796870595 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -22,8 +22,12 @@
22 22
23#define AQ_CFG_FORCE_LEGACY_INT 0U 23#define AQ_CFG_FORCE_LEGACY_INT 0U
24 24
25#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U 25#define AQ_CFG_INTERRUPT_MODERATION_OFF 0
26#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU 26#define AQ_CFG_INTERRUPT_MODERATION_ON 1
27#define AQ_CFG_INTERRUPT_MODERATION_AUTO 0xFFFFU
28
29#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
30
27#define AQ_CFG_IRQ_MASK 0x1FFU 31#define AQ_CFG_IRQ_MASK 0x1FFU
28 32
29#define AQ_CFG_VECS_MAX 8U 33#define AQ_CFG_VECS_MAX 8U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a761e91471df..d5e99b468870 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -56,10 +56,6 @@ aq_ethtool_set_link_ksettings(struct net_device *ndev,
56 return aq_nic_set_link_ksettings(aq_nic, cmd); 56 return aq_nic_set_link_ksettings(aq_nic, cmd);
57} 57}
58 58
59/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */
60static const unsigned int aq_ethtool_stat_queue_lines = 5U;
61static const unsigned int aq_ethtool_stat_queue_chars =
62 5U * ETH_GSTRING_LEN;
63static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { 59static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
64 "InPackets", 60 "InPackets",
65 "InUCast", 61 "InUCast",
@@ -83,56 +79,26 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
83 "InOctetsDma", 79 "InOctetsDma",
84 "OutOctetsDma", 80 "OutOctetsDma",
85 "InDroppedDma", 81 "InDroppedDma",
86 "Queue[0] InPackets", 82};
87 "Queue[0] OutPackets", 83
88 "Queue[0] InJumboPackets", 84static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
89 "Queue[0] InLroPackets", 85 "Queue[%d] InPackets",
90 "Queue[0] InErrors", 86 "Queue[%d] OutPackets",
91 "Queue[1] InPackets", 87 "Queue[%d] Restarts",
92 "Queue[1] OutPackets", 88 "Queue[%d] InJumboPackets",
93 "Queue[1] InJumboPackets", 89 "Queue[%d] InLroPackets",
94 "Queue[1] InLroPackets", 90 "Queue[%d] InErrors",
95 "Queue[1] InErrors",
96 "Queue[2] InPackets",
97 "Queue[2] OutPackets",
98 "Queue[2] InJumboPackets",
99 "Queue[2] InLroPackets",
100 "Queue[2] InErrors",
101 "Queue[3] InPackets",
102 "Queue[3] OutPackets",
103 "Queue[3] InJumboPackets",
104 "Queue[3] InLroPackets",
105 "Queue[3] InErrors",
106 "Queue[4] InPackets",
107 "Queue[4] OutPackets",
108 "Queue[4] InJumboPackets",
109 "Queue[4] InLroPackets",
110 "Queue[4] InErrors",
111 "Queue[5] InPackets",
112 "Queue[5] OutPackets",
113 "Queue[5] InJumboPackets",
114 "Queue[5] InLroPackets",
115 "Queue[5] InErrors",
116 "Queue[6] InPackets",
117 "Queue[6] OutPackets",
118 "Queue[6] InJumboPackets",
119 "Queue[6] InLroPackets",
120 "Queue[6] InErrors",
121 "Queue[7] InPackets",
122 "Queue[7] OutPackets",
123 "Queue[7] InJumboPackets",
124 "Queue[7] InLroPackets",
125 "Queue[7] InErrors",
126}; 91};
127 92
128static void aq_ethtool_stats(struct net_device *ndev, 93static void aq_ethtool_stats(struct net_device *ndev,
129 struct ethtool_stats *stats, u64 *data) 94 struct ethtool_stats *stats, u64 *data)
130{ 95{
131 struct aq_nic_s *aq_nic = netdev_priv(ndev); 96 struct aq_nic_s *aq_nic = netdev_priv(ndev);
97 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
132 98
133/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */ 99 memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
134 BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8); 100 ARRAY_SIZE(aq_ethtool_queue_stat_names) *
135 memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64)); 101 cfg->vecs) * sizeof(u64));
136 aq_nic_get_stats(aq_nic, data); 102 aq_nic_get_stats(aq_nic, data);
137} 103}
138 104
@@ -154,8 +120,8 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
154 120
155 strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "", 121 strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
156 sizeof(drvinfo->bus_info)); 122 sizeof(drvinfo->bus_info));
157 drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) - 123 drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
158 (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines; 124 cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
159 drvinfo->testinfo_len = 0; 125 drvinfo->testinfo_len = 0;
160 drvinfo->regdump_len = regs_count; 126 drvinfo->regdump_len = regs_count;
161 drvinfo->eedump_len = 0; 127 drvinfo->eedump_len = 0;
@@ -164,14 +130,25 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
164static void aq_ethtool_get_strings(struct net_device *ndev, 130static void aq_ethtool_get_strings(struct net_device *ndev,
165 u32 stringset, u8 *data) 131 u32 stringset, u8 *data)
166{ 132{
133 int i, si;
167 struct aq_nic_s *aq_nic = netdev_priv(ndev); 134 struct aq_nic_s *aq_nic = netdev_priv(ndev);
168 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); 135 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
169 136 u8 *p = data;
170 if (stringset == ETH_SS_STATS) 137
171 memcpy(data, *aq_ethtool_stat_names, 138 if (stringset == ETH_SS_STATS) {
172 sizeof(aq_ethtool_stat_names) - 139 memcpy(p, *aq_ethtool_stat_names,
173 (AQ_CFG_VECS_MAX - cfg->vecs) * 140 sizeof(aq_ethtool_stat_names));
174 aq_ethtool_stat_queue_chars); 141 p = p + sizeof(aq_ethtool_stat_names);
142 for (i = 0; i < cfg->vecs; i++) {
143 for (si = 0;
144 si < ARRAY_SIZE(aq_ethtool_queue_stat_names);
145 si++) {
146 snprintf(p, ETH_GSTRING_LEN,
147 aq_ethtool_queue_stat_names[si], i);
148 p += ETH_GSTRING_LEN;
149 }
150 }
151 }
175} 152}
176 153
177static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) 154static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
@@ -182,9 +159,8 @@ static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
182 159
183 switch (stringset) { 160 switch (stringset) {
184 case ETH_SS_STATS: 161 case ETH_SS_STATS:
185 ret = ARRAY_SIZE(aq_ethtool_stat_names) - 162 ret = ARRAY_SIZE(aq_ethtool_stat_names) +
186 (AQ_CFG_VECS_MAX - cfg->vecs) * 163 cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
187 aq_ethtool_stat_queue_lines;
188 break; 164 break;
189 default: 165 default:
190 ret = -EOPNOTSUPP; 166 ret = -EOPNOTSUPP;
@@ -245,6 +221,69 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
245 return err; 221 return err;
246} 222}
247 223
224int aq_ethtool_get_coalesce(struct net_device *ndev,
225 struct ethtool_coalesce *coal)
226{
227 struct aq_nic_s *aq_nic = netdev_priv(ndev);
228 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
229
230 if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
231 cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
232 coal->rx_coalesce_usecs = cfg->rx_itr;
233 coal->tx_coalesce_usecs = cfg->tx_itr;
234 coal->rx_max_coalesced_frames = 0;
235 coal->tx_max_coalesced_frames = 0;
236 } else {
237 coal->rx_coalesce_usecs = 0;
238 coal->tx_coalesce_usecs = 0;
239 coal->rx_max_coalesced_frames = 1;
240 coal->tx_max_coalesced_frames = 1;
241 }
242 return 0;
243}
244
245int aq_ethtool_set_coalesce(struct net_device *ndev,
246 struct ethtool_coalesce *coal)
247{
248 struct aq_nic_s *aq_nic = netdev_priv(ndev);
249 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
250
251 /* This is not yet supported
252 */
253 if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce)
254 return -EOPNOTSUPP;
255
256 /* Atlantic only supports timing based coalescing
257 */
258 if (coal->rx_max_coalesced_frames > 1 ||
259 coal->rx_coalesce_usecs_irq ||
260 coal->rx_max_coalesced_frames_irq)
261 return -EOPNOTSUPP;
262
263 if (coal->tx_max_coalesced_frames > 1 ||
264 coal->tx_coalesce_usecs_irq ||
265 coal->tx_max_coalesced_frames_irq)
266 return -EOPNOTSUPP;
267
268 /* We do not support frame counting. Check this
269 */
270 if (!(coal->rx_max_coalesced_frames == !coal->rx_coalesce_usecs))
271 return -EOPNOTSUPP;
272 if (!(coal->tx_max_coalesced_frames == !coal->tx_coalesce_usecs))
273 return -EOPNOTSUPP;
274
275 if (coal->rx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX ||
276 coal->tx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX)
277 return -EINVAL;
278
279 cfg->itr = AQ_CFG_INTERRUPT_MODERATION_ON;
280
281 cfg->rx_itr = coal->rx_coalesce_usecs;
282 cfg->tx_itr = coal->tx_coalesce_usecs;
283
284 return aq_nic_update_interrupt_moderation_settings(aq_nic);
285}
286
248const struct ethtool_ops aq_ethtool_ops = { 287const struct ethtool_ops aq_ethtool_ops = {
249 .get_link = aq_ethtool_get_link, 288 .get_link = aq_ethtool_get_link,
250 .get_regs_len = aq_ethtool_get_regs_len, 289 .get_regs_len = aq_ethtool_get_regs_len,
@@ -259,4 +298,6 @@ const struct ethtool_ops aq_ethtool_ops = {
259 .get_ethtool_stats = aq_ethtool_stats, 298 .get_ethtool_stats = aq_ethtool_stats,
260 .get_link_ksettings = aq_ethtool_get_link_ksettings, 299 .get_link_ksettings = aq_ethtool_get_link_ksettings,
261 .set_link_ksettings = aq_ethtool_set_link_ksettings, 300 .set_link_ksettings = aq_ethtool_set_link_ksettings,
301 .get_coalesce = aq_ethtool_get_coalesce,
302 .set_coalesce = aq_ethtool_set_coalesce,
262}; 303};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index bf9b3f020e10..0207927dc8a6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -151,8 +151,7 @@ struct aq_hw_ops {
151 [ETH_ALEN], 151 [ETH_ALEN],
152 u32 count); 152 u32 count);
153 153
154 int (*hw_interrupt_moderation_set)(struct aq_hw_s *self, 154 int (*hw_interrupt_moderation_set)(struct aq_hw_s *self);
155 bool itr_enabled);
156 155
157 int (*hw_rss_set)(struct aq_hw_s *self, 156 int (*hw_rss_set)(struct aq_hw_s *self,
158 struct aq_rss_parameters *rss_params); 157 struct aq_rss_parameters *rss_params);
@@ -163,6 +162,8 @@ struct aq_hw_ops {
163 int (*hw_get_regs)(struct aq_hw_s *self, 162 int (*hw_get_regs)(struct aq_hw_s *self,
164 struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); 163 struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
165 164
165 int (*hw_update_stats)(struct aq_hw_s *self);
166
166 int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, 167 int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
167 unsigned int *p_count); 168 unsigned int *p_count);
168 169
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 0a5bb4114eb4..483e97691eea 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -16,6 +16,7 @@
16#include "aq_pci_func.h" 16#include "aq_pci_func.h"
17#include "aq_nic_internal.h" 17#include "aq_nic_internal.h"
18 18
19#include <linux/moduleparam.h>
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
20#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
21#include <linux/timer.h> 22#include <linux/timer.h>
@@ -24,6 +25,18 @@
24#include <linux/tcp.h> 25#include <linux/tcp.h>
25#include <net/ip.h> 26#include <net/ip.h>
26 27
28static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
29module_param_named(aq_itr, aq_itr, uint, 0644);
30MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
31
32static unsigned int aq_itr_tx;
33module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
34MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
35
36static unsigned int aq_itr_rx;
37module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
38MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
39
27static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 40static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
28{ 41{
29 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 42 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -61,9 +74,9 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
61 74
62 cfg->is_polling = AQ_CFG_IS_POLLING_DEF; 75 cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
63 76
64 cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF; 77 cfg->itr = aq_itr;
65 cfg->itr = cfg->is_interrupt_moderation ? 78 cfg->tx_itr = aq_itr_tx;
66 AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U; 79 cfg->rx_itr = aq_itr_rx;
67 80
68 cfg->is_rss = AQ_CFG_IS_RSS_DEF; 81 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
69 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; 82 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
@@ -126,10 +139,12 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
126 if (err) 139 if (err)
127 return err; 140 return err;
128 141
129 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) 142 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
130 pr_info("%s: link change old %d new %d\n", 143 pr_info("%s: link change old %d new %d\n",
131 AQ_CFG_DRV_NAME, self->link_status.mbps, 144 AQ_CFG_DRV_NAME, self->link_status.mbps,
132 self->aq_hw->aq_link_status.mbps); 145 self->aq_hw->aq_link_status.mbps);
146 aq_nic_update_interrupt_moderation_settings(self);
147 }
133 148
134 self->link_status = self->aq_hw->aq_link_status; 149 self->link_status = self->aq_hw->aq_link_status;
135 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { 150 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
@@ -164,8 +179,8 @@ static void aq_nic_service_timer_cb(unsigned long param)
164 if (err) 179 if (err)
165 goto err_exit; 180 goto err_exit;
166 181
167 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 182 if (self->aq_hw_ops.hw_update_stats)
168 self->aq_nic_cfg.is_interrupt_moderation); 183 self->aq_hw_ops.hw_update_stats(self->aq_hw);
169 184
170 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 185 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
171 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 186 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
@@ -334,6 +349,7 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
334 } 349 }
335 if (netif_running(ndev)) 350 if (netif_running(ndev))
336 netif_tx_disable(ndev); 351 netif_tx_disable(ndev);
352 netif_carrier_off(self->ndev);
337 353
338 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; 354 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
339 self->aq_vecs++) { 355 self->aq_vecs++) {
@@ -421,9 +437,8 @@ int aq_nic_start(struct aq_nic_s *self)
421 if (err < 0) 437 if (err < 0)
422 goto err_exit; 438 goto err_exit;
423 439
424 err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 440 err = aq_nic_update_interrupt_moderation_settings(self);
425 self->aq_nic_cfg.is_interrupt_moderation); 441 if (err)
426 if (err < 0)
427 goto err_exit; 442 goto err_exit;
428 setup_timer(&self->service_timer, &aq_nic_service_timer_cb, 443 setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
429 (unsigned long)self); 444 (unsigned long)self);
@@ -645,6 +660,11 @@ err_exit:
645 return err; 660 return err;
646} 661}
647 662
663int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
664{
665 return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw);
666}
667
648int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) 668int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
649{ 669{
650 int err = 0; 670 int err = 0;
@@ -899,6 +919,7 @@ int aq_nic_stop(struct aq_nic_s *self)
899 unsigned int i = 0U; 919 unsigned int i = 0U;
900 920
901 netif_tx_disable(self->ndev); 921 netif_tx_disable(self->ndev);
922 netif_carrier_off(self->ndev);
902 923
903 del_timer_sync(&self->service_timer); 924 del_timer_sync(&self->service_timer);
904 925
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 0ddd556ff901..4309983acdd6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -40,6 +40,8 @@ struct aq_nic_cfg_s {
40 u32 vecs; /* vecs==allocated irqs */ 40 u32 vecs; /* vecs==allocated irqs */
41 u32 irq_type; 41 u32 irq_type;
42 u32 itr; 42 u32 itr;
43 u16 rx_itr;
44 u16 tx_itr;
43 u32 num_rss_queues; 45 u32 num_rss_queues;
44 u32 mtu; 46 u32 mtu;
45 u32 ucp_0x364; 47 u32 ucp_0x364;
@@ -49,7 +51,6 @@ struct aq_nic_cfg_s {
49 u16 is_mc_list_enabled; 51 u16 is_mc_list_enabled;
50 u16 mc_list_count; 52 u16 mc_list_count;
51 bool is_autoneg; 53 bool is_autoneg;
52 bool is_interrupt_moderation;
53 bool is_polling; 54 bool is_polling;
54 bool is_rss; 55 bool is_rss;
55 bool is_lro; 56 bool is_lro;
@@ -104,5 +105,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
104struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); 105struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
105u32 aq_nic_get_fw_version(struct aq_nic_s *self); 106u32 aq_nic_get_fw_version(struct aq_nic_s *self);
106int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); 107int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
108int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
107 109
108#endif /* AQ_NIC_H */ 110#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 4c6c882c6a1c..cadaa646c89f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -85,6 +85,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
85 int err = 0; 85 int err = 0;
86 unsigned int bar = 0U; 86 unsigned int bar = 0U;
87 unsigned int port = 0U; 87 unsigned int port = 0U;
88 unsigned int numvecs = 0U;
88 89
89 err = pci_enable_device(self->pdev); 90 err = pci_enable_device(self->pdev);
90 if (err < 0) 91 if (err < 0)
@@ -142,10 +143,12 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
142 } 143 }
143 } 144 }
144 145
145 /*enable interrupts */ 146 numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
147 numvecs = min(numvecs, num_online_cpus());
148
149 /* enable interrupts */
146#if !AQ_CFG_FORCE_LEGACY_INT 150#if !AQ_CFG_FORCE_LEGACY_INT
147 err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs, 151 err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
148 self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX);
149 152
150 if (err < 0) { 153 if (err < 0) {
151 err = pci_alloc_irq_vectors(self->pdev, 1, 1, 154 err = pci_alloc_irq_vectors(self->pdev, 1, 1,
@@ -153,7 +156,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
153 if (err < 0) 156 if (err < 0)
154 goto err_exit; 157 goto err_exit;
155 } 158 }
156#endif 159#endif /* AQ_CFG_FORCE_LEGACY_INT */
157 160
158 /* net device init */ 161 /* net device init */
159 for (port = 0; port < self->ports; ++port) { 162 for (port = 0; port < self->ports; ++port) {
@@ -265,6 +268,9 @@ void aq_pci_func_free(struct aq_pci_func_s *self)
265 aq_nic_ndev_free(self->port[port]); 268 aq_nic_ndev_free(self->port[port]);
266 } 269 }
267 270
271 if (self->mmio)
272 iounmap(self->mmio);
273
268 kfree(self); 274 kfree(self);
269 275
270err_exit:; 276err_exit:;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 305ff8ffac2c..5fecc9a099ef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -373,8 +373,11 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
373 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 373 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
374 aq_vec_add_stats(self, &stats_rx, &stats_tx); 374 aq_vec_add_stats(self, &stats_rx, &stats_tx);
375 375
376 /* This data should mimic aq_ethtool_queue_stat_names structure
377 */
376 data[count] += stats_rx.packets; 378 data[count] += stats_rx.packets;
377 data[++count] += stats_tx.packets; 379 data[++count] += stats_tx.packets;
380 data[++count] += stats_tx.queue_restarts;
378 data[++count] += stats_rx.jumbo_packets; 381 data[++count] += stats_rx.jumbo_packets;
379 data[++count] += stats_rx.lro_packets; 382 data[++count] += stats_rx.lro_packets;
380 data[++count] += stats_rx.errors; 383 data[++count] += stats_rx.errors;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index c5a02df7a48b..07b3c49a16a4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -765,24 +765,23 @@ err_exit:
765 return err; 765 return err;
766} 766}
767 767
768static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, 768static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
769 bool itr_enabled)
770{ 769{
771 unsigned int i = 0U; 770 unsigned int i = 0U;
771 u32 itr_rx;
772 772
773 if (itr_enabled && self->aq_nic_cfg->itr) { 773 if (self->aq_nic_cfg->itr) {
774 if (self->aq_nic_cfg->itr != 0xFFFFU) { 774 if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) {
775 u32 itr_ = (self->aq_nic_cfg->itr >> 1); 775 u32 itr_ = (self->aq_nic_cfg->itr >> 1);
776 776
777 itr_ = min(AQ_CFG_IRQ_MASK, itr_); 777 itr_ = min(AQ_CFG_IRQ_MASK, itr_);
778 778
779 PHAL_ATLANTIC_A0->itr_rx = 0x80000000U | 779 itr_rx = 0x80000000U | (itr_ << 0x10);
780 (itr_ << 0x10);
781 } else { 780 } else {
782 u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); 781 u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
783 782
784 if (n < self->aq_link_status.mbps) { 783 if (n < self->aq_link_status.mbps) {
785 PHAL_ATLANTIC_A0->itr_rx = 0U; 784 itr_rx = 0U;
786 } else { 785 } else {
787 static unsigned int hw_timers_tbl_[] = { 786 static unsigned int hw_timers_tbl_[] = {
788 0x01CU, /* 10Gbit */ 787 0x01CU, /* 10Gbit */
@@ -797,8 +796,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
797 hw_atl_utils_mbps_2_speed_index( 796 hw_atl_utils_mbps_2_speed_index(
798 self->aq_link_status.mbps); 797 self->aq_link_status.mbps);
799 798
800 PHAL_ATLANTIC_A0->itr_rx = 799 itr_rx = 0x80000000U |
801 0x80000000U |
802 (hw_timers_tbl_[speed_index] << 0x10U); 800 (hw_timers_tbl_[speed_index] << 0x10U);
803 } 801 }
804 802
@@ -806,11 +804,11 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
806 aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); 804 aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
807 } 805 }
808 } else { 806 } else {
809 PHAL_ATLANTIC_A0->itr_rx = 0U; 807 itr_rx = 0U;
810 } 808 }
811 809
812 for (i = HW_ATL_A0_RINGS_MAX; i--;) 810 for (i = HW_ATL_A0_RINGS_MAX; i--;)
813 reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i); 811 reg_irq_thr_set(self, itr_rx, i);
814 812
815 return aq_hw_err_from_flags(self); 813 return aq_hw_err_from_flags(self);
816} 814}
@@ -885,6 +883,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
885 .hw_rss_set = hw_atl_a0_hw_rss_set, 883 .hw_rss_set = hw_atl_a0_hw_rss_set,
886 .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, 884 .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
887 .hw_get_regs = hw_atl_utils_hw_get_regs, 885 .hw_get_regs = hw_atl_utils_hw_get_regs,
886 .hw_update_stats = hw_atl_utils_update_stats,
888 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 887 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
889 .hw_get_fw_version = hw_atl_utils_get_fw_version, 888 .hw_get_fw_version = hw_atl_utils_get_fw_version,
890}; 889};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 21784cc39dab..ec68c20efcbd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -788,39 +788,45 @@ err_exit:
788 return err; 788 return err;
789} 789}
790 790
791static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, 791static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
792 bool itr_enabled)
793{ 792{
794 unsigned int i = 0U; 793 unsigned int i = 0U;
794 u32 itr_tx = 2U;
795 u32 itr_rx = 2U;
795 796
796 if (itr_enabled && self->aq_nic_cfg->itr) { 797 switch (self->aq_nic_cfg->itr) {
798 case AQ_CFG_INTERRUPT_MODERATION_ON:
799 case AQ_CFG_INTERRUPT_MODERATION_AUTO:
797 tdm_tx_desc_wr_wb_irq_en_set(self, 0U); 800 tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
798 tdm_tdm_intr_moder_en_set(self, 1U); 801 tdm_tdm_intr_moder_en_set(self, 1U);
799 rdm_rx_desc_wr_wb_irq_en_set(self, 0U); 802 rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
800 rdm_rdm_intr_moder_en_set(self, 1U); 803 rdm_rdm_intr_moder_en_set(self, 1U);
801 804
802 PHAL_ATLANTIC_B0->itr_tx = 2U; 805 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
803 PHAL_ATLANTIC_B0->itr_rx = 2U; 806 /* HW timers are in 2us units */
807 int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
808 int tx_min_timer = tx_max_timer / 2;
804 809
805 if (self->aq_nic_cfg->itr != 0xFFFFU) { 810 int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
806 unsigned int max_timer = self->aq_nic_cfg->itr / 2U; 811 int rx_min_timer = rx_max_timer / 2;
807 unsigned int min_timer = self->aq_nic_cfg->itr / 32U;
808 812
809 max_timer = min(0x1FFU, max_timer); 813 tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
810 min_timer = min(0xFFU, min_timer); 814 tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
815 rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
816 rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
811 817
812 PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U; 818 itr_tx |= tx_min_timer << 0x8U;
813 PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U; 819 itr_tx |= tx_max_timer << 0x10U;
814 PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U; 820 itr_rx |= rx_min_timer << 0x8U;
815 PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U; 821 itr_rx |= rx_max_timer << 0x10U;
816 } else { 822 } else {
817 static unsigned int hw_atl_b0_timers_table_tx_[][2] = { 823 static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
818 {0xffU, 0xffU}, /* 10Gbit */ 824 {0xfU, 0xffU}, /* 10Gbit */
819 {0xffU, 0x1ffU}, /* 5Gbit */ 825 {0xfU, 0x1ffU}, /* 5Gbit */
820 {0xffU, 0x1ffU}, /* 5Gbit 5GS */ 826 {0xfU, 0x1ffU}, /* 5Gbit 5GS */
821 {0xffU, 0x1ffU}, /* 2.5Gbit */ 827 {0xfU, 0x1ffU}, /* 2.5Gbit */
822 {0xffU, 0x1ffU}, /* 1Gbit */ 828 {0xfU, 0x1ffU}, /* 1Gbit */
823 {0xffU, 0x1ffU}, /* 100Mbit */ 829 {0xfU, 0x1ffU}, /* 100Mbit */
824 }; 830 };
825 831
826 static unsigned int hw_atl_b0_timers_table_rx_[][2] = { 832 static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
@@ -836,34 +842,36 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
836 hw_atl_utils_mbps_2_speed_index( 842 hw_atl_utils_mbps_2_speed_index(
837 self->aq_link_status.mbps); 843 self->aq_link_status.mbps);
838 844
839 PHAL_ATLANTIC_B0->itr_tx |= 845 /* Update user visible ITR settings */
840 hw_atl_b0_timers_table_tx_[speed_index] 846 self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
841 [0] << 0x8U; /* set min timer value */ 847 [speed_index][1] * 2;
842 PHAL_ATLANTIC_B0->itr_tx |= 848 self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
843 hw_atl_b0_timers_table_tx_[speed_index] 849 [speed_index][1] * 2;
844 [1] << 0x10U; /* set max timer value */ 850
845 851 itr_tx |= hw_atl_b0_timers_table_tx_
846 PHAL_ATLANTIC_B0->itr_rx |= 852 [speed_index][0] << 0x8U;
847 hw_atl_b0_timers_table_rx_[speed_index] 853 itr_tx |= hw_atl_b0_timers_table_tx_
848 [0] << 0x8U; /* set min timer value */ 854 [speed_index][1] << 0x10U;
849 PHAL_ATLANTIC_B0->itr_rx |= 855
850 hw_atl_b0_timers_table_rx_[speed_index] 856 itr_rx |= hw_atl_b0_timers_table_rx_
851 [1] << 0x10U; /* set max timer value */ 857 [speed_index][0] << 0x8U;
858 itr_rx |= hw_atl_b0_timers_table_rx_
859 [speed_index][1] << 0x10U;
852 } 860 }
853 } else { 861 break;
862 case AQ_CFG_INTERRUPT_MODERATION_OFF:
854 tdm_tx_desc_wr_wb_irq_en_set(self, 1U); 863 tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
855 tdm_tdm_intr_moder_en_set(self, 0U); 864 tdm_tdm_intr_moder_en_set(self, 0U);
856 rdm_rx_desc_wr_wb_irq_en_set(self, 1U); 865 rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
857 rdm_rdm_intr_moder_en_set(self, 0U); 866 rdm_rdm_intr_moder_en_set(self, 0U);
858 PHAL_ATLANTIC_B0->itr_tx = 0U; 867 itr_tx = 0U;
859 PHAL_ATLANTIC_B0->itr_rx = 0U; 868 itr_rx = 0U;
869 break;
860 } 870 }
861 871
862 for (i = HW_ATL_B0_RINGS_MAX; i--;) { 872 for (i = HW_ATL_B0_RINGS_MAX; i--;) {
863 reg_tx_intr_moder_ctrl_set(self, 873 reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
864 PHAL_ATLANTIC_B0->itr_tx, i); 874 reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
865 reg_rx_intr_moder_ctrl_set(self,
866 PHAL_ATLANTIC_B0->itr_rx, i);
867 } 875 }
868 876
869 return aq_hw_err_from_flags(self); 877 return aq_hw_err_from_flags(self);
@@ -939,6 +947,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
939 .hw_rss_set = hw_atl_b0_hw_rss_set, 947 .hw_rss_set = hw_atl_b0_hw_rss_set,
940 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, 948 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
941 .hw_get_regs = hw_atl_utils_hw_get_regs, 949 .hw_get_regs = hw_atl_utils_hw_get_regs,
950 .hw_update_stats = hw_atl_utils_update_stats,
942 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 951 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
943 .hw_get_fw_version = hw_atl_utils_get_fw_version, 952 .hw_get_fw_version = hw_atl_utils_get_fw_version,
944}; 953};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index fcf89e25a773..9aa2c6edfca2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -139,6 +139,9 @@
139 139
140#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U 140#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
141 141
142#define HW_ATL_INTR_MODER_MAX 0x1FF
143#define HW_ATL_INTR_MODER_MIN 0xFF
144
142/* Hardware tx descriptor */ 145/* Hardware tx descriptor */
143struct __packed hw_atl_txd_s { 146struct __packed hw_atl_txd_s {
144 u64 buf_addr; 147 u64 buf_addr;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index bf734b32e44b..1fe016fc4bc7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -255,6 +255,15 @@ err_exit:
255 return err; 255 return err;
256} 256}
257 257
258int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
259 struct hw_aq_atl_utils_mbox_header *pmbox)
260{
261 return hw_atl_utils_fw_downld_dwords(self,
262 PHAL_ATLANTIC->mbox_addr,
263 (u32 *)(void *)pmbox,
264 sizeof(*pmbox) / sizeof(u32));
265}
266
258void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, 267void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
259 struct hw_aq_atl_utils_mbox *pmbox) 268 struct hw_aq_atl_utils_mbox *pmbox)
260{ 269{
@@ -267,9 +276,6 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
267 if (err < 0) 276 if (err < 0)
268 goto err_exit; 277 goto err_exit;
269 278
270 if (pmbox != &PHAL_ATLANTIC->mbox)
271 memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox));
272
273 if (IS_CHIP_FEATURE(REVISION_A0)) { 279 if (IS_CHIP_FEATURE(REVISION_A0)) {
274 unsigned int mtu = self->aq_nic_cfg ? 280 unsigned int mtu = self->aq_nic_cfg ?
275 self->aq_nic_cfg->mtu : 1514U; 281 self->aq_nic_cfg->mtu : 1514U;
@@ -299,17 +305,17 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
299{ 305{
300 int err = 0; 306 int err = 0;
301 u32 transaction_id = 0; 307 u32 transaction_id = 0;
308 struct hw_aq_atl_utils_mbox_header mbox;
302 309
303 if (state == MPI_RESET) { 310 if (state == MPI_RESET) {
304 hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); 311 hw_atl_utils_mpi_read_mbox(self, &mbox);
305 312
306 transaction_id = PHAL_ATLANTIC->mbox.transaction_id; 313 transaction_id = mbox.transaction_id;
307 314
308 AQ_HW_WAIT_FOR(transaction_id != 315 AQ_HW_WAIT_FOR(transaction_id !=
309 (hw_atl_utils_mpi_read_stats 316 (hw_atl_utils_mpi_read_mbox(self, &mbox),
310 (self, &PHAL_ATLANTIC->mbox), 317 mbox.transaction_id),
311 PHAL_ATLANTIC->mbox.transaction_id), 318 1000U, 100U);
312 1000U, 100U);
313 if (err < 0) 319 if (err < 0)
314 goto err_exit; 320 goto err_exit;
315 } 321 }
@@ -492,16 +498,51 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
492 return 0; 498 return 0;
493} 499}
494 500
501int hw_atl_utils_update_stats(struct aq_hw_s *self)
502{
503 struct hw_atl_s *hw_self = PHAL_ATLANTIC;
504 struct hw_aq_atl_utils_mbox mbox;
505
506 if (!self->aq_link_status.mbps)
507 return 0;
508
509 hw_atl_utils_mpi_read_stats(self, &mbox);
510
511#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
512 mbox.stats._N_ - hw_self->last_stats._N_)
513
514 AQ_SDELTA(uprc);
515 AQ_SDELTA(mprc);
516 AQ_SDELTA(bprc);
517 AQ_SDELTA(erpt);
518
519 AQ_SDELTA(uptc);
520 AQ_SDELTA(mptc);
521 AQ_SDELTA(bptc);
522 AQ_SDELTA(erpr);
523
524 AQ_SDELTA(ubrc);
525 AQ_SDELTA(ubtc);
526 AQ_SDELTA(mbrc);
527 AQ_SDELTA(mbtc);
528 AQ_SDELTA(bbrc);
529 AQ_SDELTA(bbtc);
530 AQ_SDELTA(dpc);
531
532#undef AQ_SDELTA
533
534 memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
535
536 return 0;
537}
538
495int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 539int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
496 u64 *data, unsigned int *p_count) 540 u64 *data, unsigned int *p_count)
497{ 541{
498 struct hw_atl_stats_s *stats = NULL; 542 struct hw_atl_s *hw_self = PHAL_ATLANTIC;
543 struct hw_atl_stats_s *stats = &hw_self->curr_stats;
499 int i = 0; 544 int i = 0;
500 545
501 hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
502
503 stats = &PHAL_ATLANTIC->mbox.stats;
504
505 data[i] = stats->uprc + stats->mprc + stats->bprc; 546 data[i] = stats->uprc + stats->mprc + stats->bprc;
506 data[++i] = stats->uprc; 547 data[++i] = stats->uprc;
507 data[++i] = stats->mprc; 548 data[++i] = stats->mprc;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index e0360a6b2202..c99cc690e425 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -115,19 +115,22 @@ struct __packed hw_aq_atl_utils_fw_rpc {
115 }; 115 };
116}; 116};
117 117
118struct __packed hw_aq_atl_utils_mbox { 118struct __packed hw_aq_atl_utils_mbox_header {
119 u32 version; 119 u32 version;
120 u32 transaction_id; 120 u32 transaction_id;
121 int error; 121 u32 error;
122};
123
124struct __packed hw_aq_atl_utils_mbox {
125 struct hw_aq_atl_utils_mbox_header header;
122 struct hw_atl_stats_s stats; 126 struct hw_atl_stats_s stats;
123}; 127};
124 128
125struct __packed hw_atl_s { 129struct __packed hw_atl_s {
126 struct aq_hw_s base; 130 struct aq_hw_s base;
127 struct hw_aq_atl_utils_mbox mbox; 131 struct hw_atl_stats_s last_stats;
132 struct hw_atl_stats_s curr_stats;
128 u64 speed; 133 u64 speed;
129 u32 itr_tx;
130 u32 itr_rx;
131 unsigned int chip_features; 134 unsigned int chip_features;
132 u32 fw_ver_actual; 135 u32 fw_ver_actual;
133 atomic_t dpc; 136 atomic_t dpc;
@@ -170,6 +173,9 @@ enum hal_atl_utils_fw_state_e {
170 173
171void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); 174void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
172 175
176int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
177 struct hw_aq_atl_utils_mbox_header *pmbox);
178
173void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, 179void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
174 struct hw_aq_atl_utils_mbox *pmbox); 180 struct hw_aq_atl_utils_mbox *pmbox);
175 181
@@ -199,6 +205,8 @@ int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
199 205
200int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); 206int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
201 207
208int hw_atl_utils_update_stats(struct aq_hw_s *self);
209
202int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 210int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
203 u64 *data, 211 u64 *data,
204 unsigned int *p_count); 212 unsigned int *p_count);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b31bdec26fce..24d55724ceff 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -215,6 +215,8 @@ static const u16 bnxt_async_events_arr[] = {
215 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 215 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
216}; 216};
217 217
218static struct workqueue_struct *bnxt_pf_wq;
219
218static bool bnxt_vf_pciid(enum board_idx idx) 220static bool bnxt_vf_pciid(enum board_idx idx)
219{ 221{
220 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); 222 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
@@ -1025,12 +1027,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
1025 return 0; 1027 return 0;
1026} 1028}
1027 1029
1030static void bnxt_queue_sp_work(struct bnxt *bp)
1031{
1032 if (BNXT_PF(bp))
1033 queue_work(bnxt_pf_wq, &bp->sp_task);
1034 else
1035 schedule_work(&bp->sp_task);
1036}
1037
1038static void bnxt_cancel_sp_work(struct bnxt *bp)
1039{
1040 if (BNXT_PF(bp))
1041 flush_workqueue(bnxt_pf_wq);
1042 else
1043 cancel_work_sync(&bp->sp_task);
1044}
1045
1028static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1046static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1029{ 1047{
1030 if (!rxr->bnapi->in_reset) { 1048 if (!rxr->bnapi->in_reset) {
1031 rxr->bnapi->in_reset = true; 1049 rxr->bnapi->in_reset = true;
1032 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1050 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1033 schedule_work(&bp->sp_task); 1051 bnxt_queue_sp_work(bp);
1034 } 1052 }
1035 rxr->rx_next_cons = 0xffff; 1053 rxr->rx_next_cons = 0xffff;
1036} 1054}
@@ -1718,7 +1736,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
1718 default: 1736 default:
1719 goto async_event_process_exit; 1737 goto async_event_process_exit;
1720 } 1738 }
1721 schedule_work(&bp->sp_task); 1739 bnxt_queue_sp_work(bp);
1722async_event_process_exit: 1740async_event_process_exit:
1723 bnxt_ulp_async_events(bp, cmpl); 1741 bnxt_ulp_async_events(bp, cmpl);
1724 return 0; 1742 return 0;
@@ -1752,7 +1770,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1752 1770
1753 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 1771 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1754 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 1772 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1755 schedule_work(&bp->sp_task); 1773 bnxt_queue_sp_work(bp);
1756 break; 1774 break;
1757 1775
1758 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 1776 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
@@ -3449,6 +3467,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3449 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 3467 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3450} 3468}
3451 3469
3470int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3471 int timeout)
3472{
3473 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3474}
3475
3452int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3476int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3453{ 3477{
3454 int rc; 3478 int rc;
@@ -6328,7 +6352,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6328 } 6352 }
6329 6353
6330 if (link_re_init) { 6354 if (link_re_init) {
6355 mutex_lock(&bp->link_lock);
6331 rc = bnxt_update_phy_setting(bp); 6356 rc = bnxt_update_phy_setting(bp);
6357 mutex_unlock(&bp->link_lock);
6332 if (rc) 6358 if (rc)
6333 netdev_warn(bp->dev, "failed to update phy settings\n"); 6359 netdev_warn(bp->dev, "failed to update phy settings\n");
6334 } 6360 }
@@ -6648,7 +6674,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
6648 vnic->rx_mask = mask; 6674 vnic->rx_mask = mask;
6649 6675
6650 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 6676 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
6651 schedule_work(&bp->sp_task); 6677 bnxt_queue_sp_work(bp);
6652 } 6678 }
6653} 6679}
6654 6680
@@ -6921,7 +6947,7 @@ static void bnxt_tx_timeout(struct net_device *dev)
6921 6947
6922 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 6948 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
6923 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 6949 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6924 schedule_work(&bp->sp_task); 6950 bnxt_queue_sp_work(bp);
6925} 6951}
6926 6952
6927#ifdef CONFIG_NET_POLL_CONTROLLER 6953#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6953,7 +6979,7 @@ static void bnxt_timer(unsigned long data)
6953 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 6979 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
6954 bp->stats_coal_ticks) { 6980 bp->stats_coal_ticks) {
6955 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 6981 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6956 schedule_work(&bp->sp_task); 6982 bnxt_queue_sp_work(bp);
6957 } 6983 }
6958bnxt_restart_timer: 6984bnxt_restart_timer:
6959 mod_timer(&bp->timer, jiffies + bp->current_interval); 6985 mod_timer(&bp->timer, jiffies + bp->current_interval);
@@ -7026,30 +7052,28 @@ static void bnxt_sp_task(struct work_struct *work)
7026 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) 7052 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
7027 bnxt_hwrm_port_qstats(bp); 7053 bnxt_hwrm_port_qstats(bp);
7028 7054
7029 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
7030 * must be the last functions to be called before exiting.
7031 */
7032 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 7055 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
7033 int rc = 0; 7056 int rc;
7034 7057
7058 mutex_lock(&bp->link_lock);
7035 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 7059 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
7036 &bp->sp_event)) 7060 &bp->sp_event))
7037 bnxt_hwrm_phy_qcaps(bp); 7061 bnxt_hwrm_phy_qcaps(bp);
7038 7062
7039 bnxt_rtnl_lock_sp(bp); 7063 rc = bnxt_update_link(bp, true);
7040 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7064 mutex_unlock(&bp->link_lock);
7041 rc = bnxt_update_link(bp, true);
7042 bnxt_rtnl_unlock_sp(bp);
7043 if (rc) 7065 if (rc)
7044 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 7066 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
7045 rc); 7067 rc);
7046 } 7068 }
7047 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 7069 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
7048 bnxt_rtnl_lock_sp(bp); 7070 mutex_lock(&bp->link_lock);
7049 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7071 bnxt_get_port_module_status(bp);
7050 bnxt_get_port_module_status(bp); 7072 mutex_unlock(&bp->link_lock);
7051 bnxt_rtnl_unlock_sp(bp);
7052 } 7073 }
7074 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
7075 * must be the last functions to be called before exiting.
7076 */
7053 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 7077 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
7054 bnxt_reset(bp, false); 7078 bnxt_reset(bp, false);
7055 7079
@@ -7457,7 +7481,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
7457 spin_unlock_bh(&bp->ntp_fltr_lock); 7481 spin_unlock_bh(&bp->ntp_fltr_lock);
7458 7482
7459 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 7483 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
7460 schedule_work(&bp->sp_task); 7484 bnxt_queue_sp_work(bp);
7461 7485
7462 return new_fltr->sw_id; 7486 return new_fltr->sw_id;
7463 7487
@@ -7540,7 +7564,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
7540 if (bp->vxlan_port_cnt == 1) { 7564 if (bp->vxlan_port_cnt == 1) {
7541 bp->vxlan_port = ti->port; 7565 bp->vxlan_port = ti->port;
7542 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 7566 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
7543 schedule_work(&bp->sp_task); 7567 bnxt_queue_sp_work(bp);
7544 } 7568 }
7545 break; 7569 break;
7546 case UDP_TUNNEL_TYPE_GENEVE: 7570 case UDP_TUNNEL_TYPE_GENEVE:
@@ -7557,7 +7581,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
7557 return; 7581 return;
7558 } 7582 }
7559 7583
7560 schedule_work(&bp->sp_task); 7584 bnxt_queue_sp_work(bp);
7561} 7585}
7562 7586
7563static void bnxt_udp_tunnel_del(struct net_device *dev, 7587static void bnxt_udp_tunnel_del(struct net_device *dev,
@@ -7596,7 +7620,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
7596 return; 7620 return;
7597 } 7621 }
7598 7622
7599 schedule_work(&bp->sp_task); 7623 bnxt_queue_sp_work(bp);
7600} 7624}
7601 7625
7602static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7626static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
@@ -7744,7 +7768,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
7744 pci_disable_pcie_error_reporting(pdev); 7768 pci_disable_pcie_error_reporting(pdev);
7745 unregister_netdev(dev); 7769 unregister_netdev(dev);
7746 bnxt_shutdown_tc(bp); 7770 bnxt_shutdown_tc(bp);
7747 cancel_work_sync(&bp->sp_task); 7771 bnxt_cancel_sp_work(bp);
7748 bp->sp_event = 0; 7772 bp->sp_event = 0;
7749 7773
7750 bnxt_clear_int_mode(bp); 7774 bnxt_clear_int_mode(bp);
@@ -7772,6 +7796,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
7772 rc); 7796 rc);
7773 return rc; 7797 return rc;
7774 } 7798 }
7799 mutex_init(&bp->link_lock);
7775 7800
7776 rc = bnxt_update_link(bp, false); 7801 rc = bnxt_update_link(bp, false);
7777 if (rc) { 7802 if (rc) {
@@ -7970,7 +7995,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7970 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 7995 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
7971 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; 7996 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
7972 7997
7973 if (pcie_get_minimum_link(bp->pdev, &speed, &width) || 7998 if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
7974 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) 7999 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
7975 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); 8000 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
7976 else 8001 else
@@ -8162,8 +8187,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8162 else 8187 else
8163 device_set_wakeup_capable(&pdev->dev, false); 8188 device_set_wakeup_capable(&pdev->dev, false);
8164 8189
8165 if (BNXT_PF(bp)) 8190 if (BNXT_PF(bp)) {
8191 if (!bnxt_pf_wq) {
8192 bnxt_pf_wq =
8193 create_singlethread_workqueue("bnxt_pf_wq");
8194 if (!bnxt_pf_wq) {
8195 dev_err(&pdev->dev, "Unable to create workqueue.\n");
8196 goto init_err_pci_clean;
8197 }
8198 }
8166 bnxt_init_tc(bp); 8199 bnxt_init_tc(bp);
8200 }
8167 8201
8168 rc = register_netdev(dev); 8202 rc = register_netdev(dev);
8169 if (rc) 8203 if (rc)
@@ -8399,4 +8433,17 @@ static struct pci_driver bnxt_pci_driver = {
8399#endif 8433#endif
8400}; 8434};
8401 8435
8402module_pci_driver(bnxt_pci_driver); 8436static int __init bnxt_init(void)
8437{
8438 return pci_register_driver(&bnxt_pci_driver);
8439}
8440
8441static void __exit bnxt_exit(void)
8442{
8443 pci_unregister_driver(&bnxt_pci_driver);
8444 if (bnxt_pf_wq)
8445 destroy_workqueue(bnxt_pf_wq);
8446}
8447
8448module_init(bnxt_init);
8449module_exit(bnxt_exit);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 7b888d4b2b55..c911e69ff25f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1290,6 +1290,10 @@ struct bnxt {
1290 unsigned long *ntp_fltr_bmap; 1290 unsigned long *ntp_fltr_bmap;
1291 int ntp_fltr_count; 1291 int ntp_fltr_count;
1292 1292
1293 /* To protect link related settings during link changes and
1294 * ethtool settings changes.
1295 */
1296 struct mutex link_lock;
1293 struct bnxt_link_info link_info; 1297 struct bnxt_link_info link_info;
1294 struct ethtool_eee eee; 1298 struct ethtool_eee eee;
1295 u32 lpi_tmr_lo; 1299 u32 lpi_tmr_lo;
@@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *);
1358int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); 1362int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
1359void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); 1363void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
1360int _hwrm_send_message(struct bnxt *, void *, u32, int); 1364int _hwrm_send_message(struct bnxt *, void *, u32, int);
1365int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
1361int hwrm_send_message(struct bnxt *, void *, u32, int); 1366int hwrm_send_message(struct bnxt *, void *, u32, int);
1362int hwrm_send_message_silent(struct bnxt *, void *, u32, int); 1367int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
1363int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, 1368int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index aa1f3a2c7a78..fed37cd9ae1d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
50 50
51 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); 51 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
52 req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 52 req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
53 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 53
54 mutex_lock(&bp->hwrm_cmd_lock);
55 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
54 if (!rc) { 56 if (!rc) {
55 u8 *pri2cos = &resp->pri0_cos_queue_id; 57 u8 *pri2cos = &resp->pri0_cos_queue_id;
56 int i, j; 58 int i, j;
@@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
66 } 68 }
67 } 69 }
68 } 70 }
71 mutex_unlock(&bp->hwrm_cmd_lock);
69 return rc; 72 return rc;
70} 73}
71 74
@@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
119 int rc, i; 122 int rc, i;
120 123
121 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); 124 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
122 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 125
123 if (rc) 126 mutex_lock(&bp->hwrm_cmd_lock);
127 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
128 if (rc) {
129 mutex_unlock(&bp->hwrm_cmd_lock);
124 return rc; 130 return rc;
131 }
125 132
126 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); 133 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
127 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { 134 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
@@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
143 } 150 }
144 } 151 }
145 } 152 }
153 mutex_unlock(&bp->hwrm_cmd_lock);
146 return 0; 154 return 0;
147} 155}
148 156
@@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
240 int rc; 248 int rc;
241 249
242 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); 250 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
243 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 251
244 if (rc) 252 mutex_lock(&bp->hwrm_cmd_lock);
253 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
254 if (rc) {
255 mutex_unlock(&bp->hwrm_cmd_lock);
245 return rc; 256 return rc;
257 }
246 258
247 pri_mask = le32_to_cpu(resp->flags); 259 pri_mask = le32_to_cpu(resp->flags);
248 pfc->pfc_en = pri_mask; 260 pfc->pfc_en = pri_mask;
261 mutex_unlock(&bp->hwrm_cmd_lock);
249 return 0; 262 return 0;
250} 263}
251 264
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8eff05a3e0e4..3cbe771b3352 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
1052 u32 ethtool_speed; 1052 u32 ethtool_speed;
1053 1053
1054 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 1054 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1055 mutex_lock(&bp->link_lock);
1055 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); 1056 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
1056 1057
1057 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 1058 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
@@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
1099 base->port = PORT_FIBRE; 1100 base->port = PORT_FIBRE;
1100 } 1101 }
1101 base->phy_address = link_info->phy_addr; 1102 base->phy_address = link_info->phy_addr;
1103 mutex_unlock(&bp->link_lock);
1102 1104
1103 return 0; 1105 return 0;
1104} 1106}
@@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
1190 if (!BNXT_SINGLE_PF(bp)) 1192 if (!BNXT_SINGLE_PF(bp))
1191 return -EOPNOTSUPP; 1193 return -EOPNOTSUPP;
1192 1194
1195 mutex_lock(&bp->link_lock);
1193 if (base->autoneg == AUTONEG_ENABLE) { 1196 if (base->autoneg == AUTONEG_ENABLE) {
1194 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, 1197 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
1195 advertising); 1198 advertising);
@@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
1234 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 1237 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1235 1238
1236set_setting_exit: 1239set_setting_exit:
1240 mutex_unlock(&bp->link_lock);
1237 return rc; 1241 return rc;
1238} 1242}
1239 1243
@@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1805 req.dir_ordinal = cpu_to_le16(ordinal); 1809 req.dir_ordinal = cpu_to_le16(ordinal);
1806 req.dir_ext = cpu_to_le16(ext); 1810 req.dir_ext = cpu_to_le16(ext);
1807 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 1811 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
1808 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1812 mutex_lock(&bp->hwrm_cmd_lock);
1813 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1809 if (rc == 0) { 1814 if (rc == 0) {
1810 if (index) 1815 if (index)
1811 *index = le16_to_cpu(output->dir_idx); 1816 *index = le16_to_cpu(output->dir_idx);
@@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1814 if (data_length) 1819 if (data_length)
1815 *data_length = le32_to_cpu(output->dir_data_length); 1820 *data_length = le32_to_cpu(output->dir_data_length);
1816 } 1821 }
1822 mutex_unlock(&bp->hwrm_cmd_lock);
1817 return rc; 1823 return rc;
1818} 1824}
1819 1825
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index d37925a8a65b..5ee18660bc33 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
502 int rc = 0, vfs_supported; 502 int rc = 0, vfs_supported;
503 int min_rx_rings, min_tx_rings, min_rss_ctxs; 503 int min_rx_rings, min_tx_rings, min_rss_ctxs;
504 int tx_ok = 0, rx_ok = 0, rss_ok = 0; 504 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
505 int avail_cp, avail_stat;
505 506
506 /* Check if we can enable requested num of vf's. At a mininum 507 /* Check if we can enable requested num of vf's. At a mininum
507 * we require 1 RX 1 TX rings for each VF. In this minimum conf 508 * we require 1 RX 1 TX rings for each VF. In this minimum conf
@@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
509 */ 510 */
510 vfs_supported = *num_vfs; 511 vfs_supported = *num_vfs;
511 512
513 avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
514 avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
515 avail_cp = min_t(int, avail_cp, avail_stat);
516
512 while (vfs_supported) { 517 while (vfs_supported) {
513 min_rx_rings = vfs_supported; 518 min_rx_rings = vfs_supported;
514 min_tx_rings = vfs_supported; 519 min_tx_rings = vfs_supported;
@@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
523 min_rx_rings) 528 min_rx_rings)
524 rx_ok = 1; 529 rx_ok = 1;
525 } 530 }
526 if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings) 531 if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
532 avail_cp < min_rx_rings)
527 rx_ok = 0; 533 rx_ok = 0;
528 534
529 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) 535 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
536 avail_cp >= min_tx_rings)
530 tx_ok = 1; 537 tx_ok = 1;
531 538
532 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) 539 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 963803bc6633..eafae3eb4fed 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1847,7 +1847,7 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1847 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1847 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1848 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1848 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1849 1849
1850 ns = timespec_to_ns(ts); 1850 ns = timespec64_to_ns(ts);
1851 1851
1852 spin_lock_irqsave(&lio->ptp_lock, flags); 1852 spin_lock_irqsave(&lio->ptp_lock, flags);
1853 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 1853 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index b991703319f9..11eba8277132 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1110,11 +1110,12 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1110 * places them in a descriptor array, scrq_arr 1110 * places them in a descriptor array, scrq_arr
1111 */ 1111 */
1112 1112
1113static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 1113static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1114 union sub_crq *scrq_arr) 1114 union sub_crq *scrq_arr)
1115{ 1115{
1116 union sub_crq hdr_desc; 1116 union sub_crq hdr_desc;
1117 int tmp_len = len; 1117 int tmp_len = len;
1118 int num_descs = 0;
1118 u8 *data, *cur; 1119 u8 *data, *cur;
1119 int tmp; 1120 int tmp;
1120 1121
@@ -1143,7 +1144,10 @@ static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1143 tmp_len -= tmp; 1144 tmp_len -= tmp;
1144 *scrq_arr = hdr_desc; 1145 *scrq_arr = hdr_desc;
1145 scrq_arr++; 1146 scrq_arr++;
1147 num_descs++;
1146 } 1148 }
1149
1150 return num_descs;
1147} 1151}
1148 1152
1149/** 1153/**
@@ -1161,16 +1165,12 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1161 int *num_entries, u8 hdr_field) 1165 int *num_entries, u8 hdr_field)
1162{ 1166{
1163 int hdr_len[3] = {0, 0, 0}; 1167 int hdr_len[3] = {0, 0, 0};
1164 int tot_len, len; 1168 int tot_len;
1165 u8 *hdr_data = txbuff->hdr_data; 1169 u8 *hdr_data = txbuff->hdr_data;
1166 1170
1167 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, 1171 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1168 txbuff->hdr_data); 1172 txbuff->hdr_data);
1169 len = tot_len; 1173 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1170 len -= 24;
1171 if (len > 0)
1172 num_entries += len % 29 ? len / 29 + 1 : len / 29;
1173 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1174 txbuff->indir_arr + 1); 1174 txbuff->indir_arr + 1);
1175} 1175}
1176 1176
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 151d9cfb6ea4..0ccab0a5d717 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
298} 298}
299 299
300/** 300/**
301 * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking 301 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
302 * @hw: pointer to the HW structure 302 * @hw: pointer to the HW structure
303 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 303 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
304 * @data: word read from the Shadow RAM 304 * @data: word read from the Shadow RAM
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a23306f04e00..edbc94c4353d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1038,6 +1038,32 @@ reset_latency:
1038} 1038}
1039 1039
1040/** 1040/**
1041 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1042 * @rx_ring: rx descriptor ring to store buffers on
1043 * @old_buff: donor buffer to have page reused
1044 *
1045 * Synchronizes page for reuse by the adapter
1046 **/
1047static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1048 struct i40e_rx_buffer *old_buff)
1049{
1050 struct i40e_rx_buffer *new_buff;
1051 u16 nta = rx_ring->next_to_alloc;
1052
1053 new_buff = &rx_ring->rx_bi[nta];
1054
1055 /* update, and store next to alloc */
1056 nta++;
1057 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1058
1059 /* transfer page from old buffer to new buffer */
1060 new_buff->dma = old_buff->dma;
1061 new_buff->page = old_buff->page;
1062 new_buff->page_offset = old_buff->page_offset;
1063 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1064}
1065
1066/**
1041 * i40e_rx_is_programming_status - check for programming status descriptor 1067 * i40e_rx_is_programming_status - check for programming status descriptor
1042 * @qw: qword representing status_error_len in CPU ordering 1068 * @qw: qword representing status_error_len in CPU ordering
1043 * 1069 *
@@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1071 union i40e_rx_desc *rx_desc, 1097 union i40e_rx_desc *rx_desc,
1072 u64 qw) 1098 u64 qw)
1073{ 1099{
1074 u32 ntc = rx_ring->next_to_clean + 1; 1100 struct i40e_rx_buffer *rx_buffer;
1101 u32 ntc = rx_ring->next_to_clean;
1075 u8 id; 1102 u8 id;
1076 1103
1077 /* fetch, update, and store next to clean */ 1104 /* fetch, update, and store next to clean */
1105 rx_buffer = &rx_ring->rx_bi[ntc++];
1078 ntc = (ntc < rx_ring->count) ? ntc : 0; 1106 ntc = (ntc < rx_ring->count) ? ntc : 0;
1079 rx_ring->next_to_clean = ntc; 1107 rx_ring->next_to_clean = ntc;
1080 1108
1081 prefetch(I40E_RX_DESC(rx_ring, ntc)); 1109 prefetch(I40E_RX_DESC(rx_ring, ntc));
1082 1110
1111 /* place unused page back on the ring */
1112 i40e_reuse_rx_page(rx_ring, rx_buffer);
1113 rx_ring->rx_stats.page_reuse_count++;
1114
1115 /* clear contents of buffer_info */
1116 rx_buffer->page = NULL;
1117
1083 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> 1118 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1084 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; 1119 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1085 1120
@@ -1648,32 +1683,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1648} 1683}
1649 1684
1650/** 1685/**
1651 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1652 * @rx_ring: rx descriptor ring to store buffers on
1653 * @old_buff: donor buffer to have page reused
1654 *
1655 * Synchronizes page for reuse by the adapter
1656 **/
1657static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1658 struct i40e_rx_buffer *old_buff)
1659{
1660 struct i40e_rx_buffer *new_buff;
1661 u16 nta = rx_ring->next_to_alloc;
1662
1663 new_buff = &rx_ring->rx_bi[nta];
1664
1665 /* update, and store next to alloc */
1666 nta++;
1667 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1668
1669 /* transfer page from old buffer to new buffer */
1670 new_buff->dma = old_buff->dma;
1671 new_buff->page = old_buff->page;
1672 new_buff->page_offset = old_buff->page_offset;
1673 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1674}
1675
1676/**
1677 * i40e_page_is_reusable - check if any reuse is possible 1686 * i40e_page_is_reusable - check if any reuse is possible
1678 * @page: page struct to check 1687 * @page: page struct to check
1679 * 1688 *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 9d5e7cf288be..f3315bc874ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -96,6 +96,7 @@ struct mlxsw_core {
96 const struct mlxsw_bus *bus; 96 const struct mlxsw_bus *bus;
97 void *bus_priv; 97 void *bus_priv;
98 const struct mlxsw_bus_info *bus_info; 98 const struct mlxsw_bus_info *bus_info;
99 struct workqueue_struct *emad_wq;
99 struct list_head rx_listener_list; 100 struct list_head rx_listener_list;
100 struct list_head event_listener_list; 101 struct list_head event_listener_list;
101 struct { 102 struct {
@@ -465,7 +466,7 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
465{ 466{
466 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 467 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
467 468
468 mlxsw_core_schedule_dw(&trans->timeout_dw, timeout); 469 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
469} 470}
470 471
471static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 472static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
@@ -587,12 +588,18 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener =
587 588
588static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 589static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
589{ 590{
591 struct workqueue_struct *emad_wq;
590 u64 tid; 592 u64 tid;
591 int err; 593 int err;
592 594
593 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 595 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
594 return 0; 596 return 0;
595 597
598 emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
599 if (!emad_wq)
600 return -ENOMEM;
601 mlxsw_core->emad_wq = emad_wq;
602
596 /* Set the upper 32 bits of the transaction ID field to a random 603 /* Set the upper 32 bits of the transaction ID field to a random
597 * number. This allows us to discard EMADs addressed to other 604 * number. This allows us to discard EMADs addressed to other
598 * devices. 605 * devices.
@@ -619,6 +626,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
619err_emad_trap_set: 626err_emad_trap_set:
620 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 627 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
621 mlxsw_core); 628 mlxsw_core);
629 destroy_workqueue(mlxsw_core->emad_wq);
622 return err; 630 return err;
623} 631}
624 632
@@ -631,6 +639,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
631 mlxsw_core->emad.use_emad = false; 639 mlxsw_core->emad.use_emad = false;
632 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 640 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
633 mlxsw_core); 641 mlxsw_core);
642 destroy_workqueue(mlxsw_core->emad_wq);
634} 643}
635 644
636static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 645static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index d44e673a4c4e..a3f31f425550 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -6778,6 +6778,36 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
6778 mlxsw_reg_mgpc_opcode_set(payload, opcode); 6778 mlxsw_reg_mgpc_opcode_set(payload, opcode);
6779} 6779}
6780 6780
6781/* TIGCR - Tunneling IPinIP General Configuration Register
6782 * -------------------------------------------------------
6783 * The TIGCR register is used for setting up the IPinIP Tunnel configuration.
6784 */
6785#define MLXSW_REG_TIGCR_ID 0xA801
6786#define MLXSW_REG_TIGCR_LEN 0x10
6787
6788MLXSW_REG_DEFINE(tigcr, MLXSW_REG_TIGCR_ID, MLXSW_REG_TIGCR_LEN);
6789
6790/* reg_tigcr_ipip_ttlc
6791 * For IPinIP Tunnel encapsulation: whether to copy the ttl from the packet
6792 * header.
6793 * Access: RW
6794 */
6795MLXSW_ITEM32(reg, tigcr, ttlc, 0x04, 8, 1);
6796
6797/* reg_tigcr_ipip_ttl_uc
6798 * The TTL for IPinIP Tunnel encapsulation of unicast packets if
6799 * reg_tigcr_ipip_ttlc is unset.
6800 * Access: RW
6801 */
6802MLXSW_ITEM32(reg, tigcr, ttl_uc, 0x04, 0, 8);
6803
6804static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc)
6805{
6806 MLXSW_REG_ZERO(tigcr, payload);
6807 mlxsw_reg_tigcr_ttlc_set(payload, ttlc);
6808 mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc);
6809}
6810
6781/* SBPR - Shared Buffer Pools Register 6811/* SBPR - Shared Buffer Pools Register
6782 * ----------------------------------- 6812 * -----------------------------------
6783 * The SBPR configures and retrieves the shared buffer pools and configuration. 6813 * The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -7262,6 +7292,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
7262 MLXSW_REG(mcc), 7292 MLXSW_REG(mcc),
7263 MLXSW_REG(mcda), 7293 MLXSW_REG(mcda),
7264 MLXSW_REG(mgpc), 7294 MLXSW_REG(mgpc),
7295 MLXSW_REG(tigcr),
7265 MLXSW_REG(sbpr), 7296 MLXSW_REG(sbpr),
7266 MLXSW_REG(sbcm), 7297 MLXSW_REG(sbcm),
7267 MLXSW_REG(sbpm), 7298 MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 12d471d2a90b..5f2d100e3718 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -6432,11 +6432,20 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6432 kfree(mlxsw_sp->router->rifs); 6432 kfree(mlxsw_sp->router->rifs);
6433} 6433}
6434 6434
6435static int
6436mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6437{
6438 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6439
6440 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6441 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6442}
6443
6435static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) 6444static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6436{ 6445{
6437 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; 6446 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
6438 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); 6447 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
6439 return 0; 6448 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
6440} 6449}
6441 6450
6442static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) 6451static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index d2f73feb8497..2c9109b09faf 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1180,10 +1180,14 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1180{ 1180{
1181 void *frag; 1181 void *frag;
1182 1182
1183 if (!dp->xdp_prog) 1183 if (!dp->xdp_prog) {
1184 frag = netdev_alloc_frag(dp->fl_bufsz); 1184 frag = netdev_alloc_frag(dp->fl_bufsz);
1185 else 1185 } else {
1186 frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD)); 1186 struct page *page;
1187
1188 page = alloc_page(GFP_KERNEL | __GFP_COLD);
1189 frag = page ? page_address(page) : NULL;
1190 }
1187 if (!frag) { 1191 if (!frag) {
1188 nn_dp_warn(dp, "Failed to alloc receive page frag\n"); 1192 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1189 return NULL; 1193 return NULL;
@@ -1203,10 +1207,14 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1203{ 1207{
1204 void *frag; 1208 void *frag;
1205 1209
1206 if (!dp->xdp_prog) 1210 if (!dp->xdp_prog) {
1207 frag = napi_alloc_frag(dp->fl_bufsz); 1211 frag = napi_alloc_frag(dp->fl_bufsz);
1208 else 1212 } else {
1209 frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD)); 1213 struct page *page;
1214
1215 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
1216 frag = page ? page_address(page) : NULL;
1217 }
1210 if (!frag) { 1218 if (!frag) {
1211 nn_dp_warn(dp, "Failed to alloc receive page frag\n"); 1219 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1212 return NULL; 1220 return NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 07969f06df10..dc016dfec64d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -464,7 +464,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
464 464
465 do { 465 do {
466 start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); 466 start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
467 *data++ = nn->r_vecs[i].rx_pkts; 467 data[0] = nn->r_vecs[i].rx_pkts;
468 tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; 468 tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
469 tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; 469 tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
470 tmp[2] = nn->r_vecs[i].hw_csum_rx_error; 470 tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
@@ -472,14 +472,16 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
472 472
473 do { 473 do {
474 start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); 474 start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
475 *data++ = nn->r_vecs[i].tx_pkts; 475 data[1] = nn->r_vecs[i].tx_pkts;
476 *data++ = nn->r_vecs[i].tx_busy; 476 data[2] = nn->r_vecs[i].tx_busy;
477 tmp[3] = nn->r_vecs[i].hw_csum_tx; 477 tmp[3] = nn->r_vecs[i].hw_csum_tx;
478 tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; 478 tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
479 tmp[5] = nn->r_vecs[i].tx_gather; 479 tmp[5] = nn->r_vecs[i].tx_gather;
480 tmp[6] = nn->r_vecs[i].tx_lso; 480 tmp[6] = nn->r_vecs[i].tx_lso;
481 } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); 481 } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
482 482
483 data += 3;
484
483 for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) 485 for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
484 gathered_stats[j] += tmp[j]; 486 gathered_stats[j] += tmp[j];
485 } 487 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e03fcf914690..a3c949ea7d1a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8491,8 +8491,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8491 rtl8168_driver_start(tp); 8491 rtl8168_driver_start(tp);
8492 } 8492 }
8493 8493
8494 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
8495
8496 if (pci_dev_run_wake(pdev)) 8494 if (pci_dev_run_wake(pdev))
8497 pm_runtime_put_noidle(&pdev->dev); 8495 pm_runtime_put_noidle(&pdev->dev);
8498 8496
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index e0ef02f9503b..4b286e27c4ca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -275,7 +275,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
275 goto exit; 275 goto exit;
276 i++; 276 i++;
277 277
278 } while ((ret == 1) || (i < 10)); 278 } while ((ret == 1) && (i < 10));
279 279
280 if (i == 10) 280 if (i == 10)
281 ret = -EBUSY; 281 ret = -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 67af0bdd7f10..7516ca210855 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -34,7 +34,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
34 34
35 err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, 35 err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
36 !(value & DMA_BUS_MODE_SFT_RESET), 36 !(value & DMA_BUS_MODE_SFT_RESET),
37 100000, 10000); 37 10000, 100000);
38 if (err) 38 if (err)
39 return -EBUSY; 39 return -EBUSY;
40 40
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0e1b0a3d7b76..c7a894ead274 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -473,19 +473,18 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473 struct dma_desc *np, struct sk_buff *skb) 473 struct dma_desc *np, struct sk_buff *skb)
474{ 474{
475 struct skb_shared_hwtstamps *shhwtstamp = NULL; 475 struct skb_shared_hwtstamps *shhwtstamp = NULL;
476 struct dma_desc *desc = p;
476 u64 ns; 477 u64 ns;
477 478
478 if (!priv->hwts_rx_en) 479 if (!priv->hwts_rx_en)
479 return; 480 return;
481 /* For GMAC4, the valid timestamp is from CTX next desc. */
482 if (priv->plat->has_gmac4)
483 desc = np;
480 484
481 /* Check if timestamp is available */ 485 /* Check if timestamp is available */
482 if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { 486 if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
483 /* For GMAC4, the valid timestamp is from CTX next desc. */ 487 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
484 if (priv->plat->has_gmac4)
485 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
486 else
487 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
488
489 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 488 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490 shhwtstamp = skb_hwtstamps(skb); 489 shhwtstamp = skb_hwtstamps(skb);
491 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 490 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
@@ -1815,12 +1814,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1815{ 1814{
1816 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1815 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1817 unsigned int bytes_compl = 0, pkts_compl = 0; 1816 unsigned int bytes_compl = 0, pkts_compl = 0;
1818 unsigned int entry = tx_q->dirty_tx; 1817 unsigned int entry;
1819 1818
1820 netif_tx_lock(priv->dev); 1819 netif_tx_lock(priv->dev);
1821 1820
1822 priv->xstats.tx_clean++; 1821 priv->xstats.tx_clean++;
1823 1822
1823 entry = tx_q->dirty_tx;
1824 while (entry != tx_q->cur_tx) { 1824 while (entry != tx_q->cur_tx) {
1825 struct sk_buff *skb = tx_q->tx_skbuff[entry]; 1825 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1826 struct dma_desc *p; 1826 struct dma_desc *p;
@@ -3358,6 +3358,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3358 * them in stmmac_rx_refill() function so that 3358 * them in stmmac_rx_refill() function so that
3359 * device can reuse it. 3359 * device can reuse it.
3360 */ 3360 */
3361 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3361 rx_q->rx_skbuff[entry] = NULL; 3362 rx_q->rx_skbuff[entry] = NULL;
3362 dma_unmap_single(priv->device, 3363 dma_unmap_single(priv->device,
3363 rx_q->rx_skbuff_dma[entry], 3364 rx_q->rx_skbuff_dma[entry],
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 01f7355ad277..5ec39f113127 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
113 113
114static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni) 114static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni)
115{ 115{
116#ifdef __BIG_ENDIAN
117 return (vni[0] == tun_id[2]) &&
118 (vni[1] == tun_id[1]) &&
119 (vni[2] == tun_id[0]);
120#else
121 return !memcmp(vni, &tun_id[5], 3); 116 return !memcmp(vni, &tun_id[5], 3);
122#endif
123} 117}
124 118
125static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) 119static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 8948b6adc0c5..2c98152d1e1b 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -743,6 +743,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
743 sg_init_table(sg, ret); 743 sg_init_table(sg, ret);
744 ret = skb_to_sgvec(skb, sg, 0, skb->len); 744 ret = skb_to_sgvec(skb, sg, 0, skb->len);
745 if (unlikely(ret < 0)) { 745 if (unlikely(ret < 0)) {
746 aead_request_free(req);
746 macsec_txsa_put(tx_sa); 747 macsec_txsa_put(tx_sa);
747 kfree_skb(skb); 748 kfree_skb(skb);
748 return ERR_PTR(ret); 749 return ERR_PTR(ret);
@@ -955,6 +956,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
955 sg_init_table(sg, ret); 956 sg_init_table(sg, ret);
956 ret = skb_to_sgvec(skb, sg, 0, skb->len); 957 ret = skb_to_sgvec(skb, sg, 0, skb->len);
957 if (unlikely(ret < 0)) { 958 if (unlikely(ret < 0)) {
959 aead_request_free(req);
958 kfree_skb(skb); 960 kfree_skb(skb);
959 return ERR_PTR(ret); 961 return ERR_PTR(ret);
960 } 962 }
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2a2d058cdd40..ea29da91ea5a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2252,6 +2252,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2252 2252
2253 if (!dev) 2253 if (!dev)
2254 return -ENOMEM; 2254 return -ENOMEM;
2255 err = dev_get_valid_name(net, dev, name);
2256 if (err)
2257 goto err_free_dev;
2255 2258
2256 dev_net_set(dev, net); 2259 dev_net_set(dev, net);
2257 dev->rtnl_link_ops = &tun_link_ops; 2260 dev->rtnl_link_ops = &tun_link_ops;
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index c9c711dcd0e6..a89b5685e68b 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -652,7 +652,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
652 struct device *dev = i2400m_dev(i2400m); 652 struct device *dev = i2400m_dev(i2400m);
653 struct { 653 struct {
654 struct i2400m_bootrom_header cmd; 654 struct i2400m_bootrom_header cmd;
655 u8 cmd_payload[chunk_len]; 655 u8 cmd_payload[];
656 } __packed *buf; 656 } __packed *buf;
657 struct i2400m_bootrom_header ack; 657 struct i2400m_bootrom_header ack;
658 658
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 5cbe0ae55a07..d6dff347f896 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -486,7 +486,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
486 486
487 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 487 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
488 488
489 dev->min_mtu = 0; 489 dev->min_mtu = ETH_MIN_MTU;
490 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; 490 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
491 491
492 /* 492 /*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 523387e71a80..8b8689c6d887 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1316,7 +1316,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1316 netdev->features |= netdev->hw_features; 1316 netdev->features |= netdev->hw_features;
1317 1317
1318 netdev->ethtool_ops = &xennet_ethtool_ops; 1318 netdev->ethtool_ops = &xennet_ethtool_ops;
1319 netdev->min_mtu = 0; 1319 netdev->min_mtu = ETH_MIN_MTU;
1320 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; 1320 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1321 SET_NETDEV_DEV(netdev, &dev->dev); 1321 SET_NETDEV_DEV(netdev, &dev->dev);
1322 1322