aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/can/cc770/cc770.c100
-rw-r--r--drivers/net/can/cc770/cc770.h2
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c75
-rw-r--r--drivers/net/can/m_can/m_can.c7
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c25
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c8
-rw-r--r--drivers/net/dsa/Makefile5
-rw-r--r--drivers/net/dsa/b53/b53_common.c4
-rw-r--r--drivers/net/ethernet/8390/Kconfig2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c66
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h2
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c33
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c180
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c33
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/cortina/gemini.c1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c42
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c25
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h3
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig6
-rw-r--r--drivers/net/ethernet/natsemi/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c23
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c4
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c3
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c52
-rw-r--r--drivers/net/hyperv/netvsc_drv.c293
-rw-r--r--drivers/net/hyperv/rndis_filter.c68
-rw-r--r--drivers/net/macsec.c5
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c4
-rw-r--r--drivers/net/phy/marvell.c4
-rw-r--r--drivers/net/phy/micrel.c27
-rw-r--r--drivers/net/phy/phy.c127
-rw-r--r--drivers/net/phy/phy_device.c32
-rw-r--r--drivers/net/phy/realtek.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c26
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/usb/usbnet.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c3
105 files changed, 1114 insertions, 751 deletions
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 1e37313054f3..6da69af103e6 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev,
390 return 0; 390 return 0;
391} 391}
392 392
393static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) 393static void cc770_tx(struct net_device *dev, int mo)
394{ 394{
395 struct cc770_priv *priv = netdev_priv(dev); 395 struct cc770_priv *priv = netdev_priv(dev);
396 struct net_device_stats *stats = &dev->stats; 396 struct can_frame *cf = (struct can_frame *)priv->tx_skb->data;
397 struct can_frame *cf = (struct can_frame *)skb->data;
398 unsigned int mo = obj2msgobj(CC770_OBJ_TX);
399 u8 dlc, rtr; 397 u8 dlc, rtr;
400 u32 id; 398 u32 id;
401 int i; 399 int i;
402 400
403 if (can_dropped_invalid_skb(dev, skb))
404 return NETDEV_TX_OK;
405
406 if ((cc770_read_reg(priv,
407 msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
408 netdev_err(dev, "TX register is still occupied!\n");
409 return NETDEV_TX_BUSY;
410 }
411
412 netif_stop_queue(dev);
413
414 dlc = cf->can_dlc; 401 dlc = cf->can_dlc;
415 id = cf->can_id; 402 id = cf->can_id;
416 if (cf->can_id & CAN_RTR_FLAG) 403 rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR;
417 rtr = 0; 404
418 else 405 cc770_write_reg(priv, msgobj[mo].ctrl0,
419 rtr = MSGCFG_DIR; 406 MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
420 cc770_write_reg(priv, msgobj[mo].ctrl1, 407 cc770_write_reg(priv, msgobj[mo].ctrl1,
421 RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); 408 RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
422 cc770_write_reg(priv, msgobj[mo].ctrl0, 409
423 MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
424 if (id & CAN_EFF_FLAG) { 410 if (id & CAN_EFF_FLAG) {
425 id &= CAN_EFF_MASK; 411 id &= CAN_EFF_MASK;
426 cc770_write_reg(priv, msgobj[mo].config, 412 cc770_write_reg(priv, msgobj[mo].config,
@@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
439 for (i = 0; i < dlc; i++) 425 for (i = 0; i < dlc; i++)
440 cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); 426 cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
441 427
442 /* Store echo skb before starting the transfer */
443 can_put_echo_skb(skb, dev, 0);
444
445 cc770_write_reg(priv, msgobj[mo].ctrl1, 428 cc770_write_reg(priv, msgobj[mo].ctrl1,
446 RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); 429 RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
430 cc770_write_reg(priv, msgobj[mo].ctrl0,
431 MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC);
432}
447 433
448 stats->tx_bytes += dlc; 434static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
435{
436 struct cc770_priv *priv = netdev_priv(dev);
437 unsigned int mo = obj2msgobj(CC770_OBJ_TX);
449 438
439 if (can_dropped_invalid_skb(dev, skb))
440 return NETDEV_TX_OK;
450 441
451 /* 442 netif_stop_queue(dev);
452 * HM: We had some cases of repeated IRQs so make sure the 443
453 * INT is acknowledged I know it's already further up, but 444 if ((cc770_read_reg(priv,
454 * doing again fixed the issue 445 msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
455 */ 446 netdev_err(dev, "TX register is still occupied!\n");
456 cc770_write_reg(priv, msgobj[mo].ctrl0, 447 return NETDEV_TX_BUSY;
457 MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); 448 }
449
450 priv->tx_skb = skb;
451 cc770_tx(dev, mo);
458 452
459 return NETDEV_TX_OK; 453 return NETDEV_TX_OK;
460} 454}
@@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
680 struct cc770_priv *priv = netdev_priv(dev); 674 struct cc770_priv *priv = netdev_priv(dev);
681 struct net_device_stats *stats = &dev->stats; 675 struct net_device_stats *stats = &dev->stats;
682 unsigned int mo = obj2msgobj(o); 676 unsigned int mo = obj2msgobj(o);
677 struct can_frame *cf;
678 u8 ctrl1;
679
680 ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
683 681
684 /* Nothing more to send, switch off interrupts */
685 cc770_write_reg(priv, msgobj[mo].ctrl0, 682 cc770_write_reg(priv, msgobj[mo].ctrl0,
686 MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); 683 MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
687 /* 684 cc770_write_reg(priv, msgobj[mo].ctrl1,
688 * We had some cases of repeated IRQ so make sure the 685 RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES);
689 * INT is acknowledged 686
687 if (unlikely(!priv->tx_skb)) {
688 netdev_err(dev, "missing tx skb in tx interrupt\n");
689 return;
690 }
691
692 if (unlikely(ctrl1 & MSGLST_SET)) {
693 stats->rx_over_errors++;
694 stats->rx_errors++;
695 }
696
697 /* When the CC770 is sending an RTR message and it receives a regular
698 * message that matches the id of the RTR message, it will overwrite the
699 * outgoing message in the TX register. When this happens we must
700 * process the received message and try to transmit the outgoing skb
701 * again.
690 */ 702 */
691 cc770_write_reg(priv, msgobj[mo].ctrl0, 703 if (unlikely(ctrl1 & NEWDAT_SET)) {
692 MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); 704 cc770_rx(dev, mo, ctrl1);
705 cc770_tx(dev, mo);
706 return;
707 }
693 708
709 cf = (struct can_frame *)priv->tx_skb->data;
710 stats->tx_bytes += cf->can_dlc;
694 stats->tx_packets++; 711 stats->tx_packets++;
712
713 can_put_echo_skb(priv->tx_skb, dev, 0);
695 can_get_echo_skb(dev, 0); 714 can_get_echo_skb(dev, 0);
715 priv->tx_skb = NULL;
716
696 netif_wake_queue(dev); 717 netif_wake_queue(dev);
697} 718}
698 719
@@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv)
804 priv->can.do_set_bittiming = cc770_set_bittiming; 825 priv->can.do_set_bittiming = cc770_set_bittiming;
805 priv->can.do_set_mode = cc770_set_mode; 826 priv->can.do_set_mode = cc770_set_mode;
806 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 827 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
828 priv->tx_skb = NULL;
807 829
808 memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); 830 memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
809 831
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
index a1739db98d91..95752e1d1283 100644
--- a/drivers/net/can/cc770/cc770.h
+++ b/drivers/net/can/cc770/cc770.h
@@ -193,6 +193,8 @@ struct cc770_priv {
193 u8 cpu_interface; /* CPU interface register */ 193 u8 cpu_interface; /* CPU interface register */
194 u8 clkout; /* Clock out register */ 194 u8 clkout; /* Clock out register */
195 u8 bus_config; /* Bus conffiguration register */ 195 u8 bus_config; /* Bus conffiguration register */
196
197 struct sk_buff *tx_skb;
196}; 198};
197 199
198struct net_device *alloc_cc770dev(int sizeof_priv); 200struct net_device *alloc_cc770dev(int sizeof_priv);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 2772d05ff11c..fedd927ba6ed 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -30,6 +30,7 @@
30#define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2) 30#define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2)
31#define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3) 31#define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3)
32#define IFI_CANFD_STCMD_BUSOFF BIT(4) 32#define IFI_CANFD_STCMD_BUSOFF BIT(4)
33#define IFI_CANFD_STCMD_ERROR_WARNING BIT(5)
33#define IFI_CANFD_STCMD_BUSMONITOR BIT(16) 34#define IFI_CANFD_STCMD_BUSMONITOR BIT(16)
34#define IFI_CANFD_STCMD_LOOPBACK BIT(18) 35#define IFI_CANFD_STCMD_LOOPBACK BIT(18)
35#define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24) 36#define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24)
@@ -52,7 +53,10 @@
52#define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13) 53#define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13)
53 54
54#define IFI_CANFD_INTERRUPT 0xc 55#define IFI_CANFD_INTERRUPT 0xc
56#define IFI_CANFD_INTERRUPT_ERROR_BUSOFF BIT(0)
55#define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1) 57#define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1)
58#define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG BIT(2)
59#define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC BIT(3)
56#define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10) 60#define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10)
57#define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16) 61#define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16)
58#define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22) 62#define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22)
@@ -61,6 +65,10 @@
61#define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31)) 65#define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31))
62 66
63#define IFI_CANFD_IRQMASK 0x10 67#define IFI_CANFD_IRQMASK 0x10
68#define IFI_CANFD_IRQMASK_ERROR_BUSOFF BIT(0)
69#define IFI_CANFD_IRQMASK_ERROR_WARNING BIT(1)
70#define IFI_CANFD_IRQMASK_ERROR_STATE_CHG BIT(2)
71#define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC BIT(3)
64#define IFI_CANFD_IRQMASK_SET_ERR BIT(7) 72#define IFI_CANFD_IRQMASK_SET_ERR BIT(7)
65#define IFI_CANFD_IRQMASK_SET_TS BIT(15) 73#define IFI_CANFD_IRQMASK_SET_TS BIT(15)
66#define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16) 74#define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16)
@@ -136,6 +144,8 @@
136#define IFI_CANFD_SYSCLOCK 0x50 144#define IFI_CANFD_SYSCLOCK 0x50
137 145
138#define IFI_CANFD_VER 0x54 146#define IFI_CANFD_VER 0x54
147#define IFI_CANFD_VER_REV_MASK 0xff
148#define IFI_CANFD_VER_REV_MIN_SUPPORTED 0x15
139 149
140#define IFI_CANFD_IP_ID 0x58 150#define IFI_CANFD_IP_ID 0x58
141#define IFI_CANFD_IP_ID_VALUE 0xD073CAFD 151#define IFI_CANFD_IP_ID_VALUE 0xD073CAFD
@@ -220,7 +230,10 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable)
220 230
221 if (enable) { 231 if (enable) {
222 enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY | 232 enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY |
223 IFI_CANFD_IRQMASK_RXFIFO_NEMPTY; 233 IFI_CANFD_IRQMASK_RXFIFO_NEMPTY |
234 IFI_CANFD_IRQMASK_ERROR_STATE_CHG |
235 IFI_CANFD_IRQMASK_ERROR_WARNING |
236 IFI_CANFD_IRQMASK_ERROR_BUSOFF;
224 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 237 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
225 enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER; 238 enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER;
226 } 239 }
@@ -361,12 +374,13 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev)
361 return 1; 374 return 1;
362} 375}
363 376
364static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr) 377static int ifi_canfd_handle_lec_err(struct net_device *ndev)
365{ 378{
366 struct ifi_canfd_priv *priv = netdev_priv(ndev); 379 struct ifi_canfd_priv *priv = netdev_priv(ndev);
367 struct net_device_stats *stats = &ndev->stats; 380 struct net_device_stats *stats = &ndev->stats;
368 struct can_frame *cf; 381 struct can_frame *cf;
369 struct sk_buff *skb; 382 struct sk_buff *skb;
383 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
370 const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST | 384 const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST |
371 IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST | 385 IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST |
372 IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST | 386 IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST |
@@ -449,6 +463,11 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
449 463
450 switch (new_state) { 464 switch (new_state) {
451 case CAN_STATE_ERROR_ACTIVE: 465 case CAN_STATE_ERROR_ACTIVE:
466 /* error active state */
467 priv->can.can_stats.error_warning++;
468 priv->can.state = CAN_STATE_ERROR_ACTIVE;
469 break;
470 case CAN_STATE_ERROR_WARNING:
452 /* error warning state */ 471 /* error warning state */
453 priv->can.can_stats.error_warning++; 472 priv->can.can_stats.error_warning++;
454 priv->can.state = CAN_STATE_ERROR_WARNING; 473 priv->can.state = CAN_STATE_ERROR_WARNING;
@@ -477,7 +496,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
477 ifi_canfd_get_berr_counter(ndev, &bec); 496 ifi_canfd_get_berr_counter(ndev, &bec);
478 497
479 switch (new_state) { 498 switch (new_state) {
480 case CAN_STATE_ERROR_ACTIVE: 499 case CAN_STATE_ERROR_WARNING:
481 /* error warning state */ 500 /* error warning state */
482 cf->can_id |= CAN_ERR_CRTL; 501 cf->can_id |= CAN_ERR_CRTL;
483 cf->data[1] = (bec.txerr > bec.rxerr) ? 502 cf->data[1] = (bec.txerr > bec.rxerr) ?
@@ -510,22 +529,21 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
510 return 1; 529 return 1;
511} 530}
512 531
513static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd) 532static int ifi_canfd_handle_state_errors(struct net_device *ndev)
514{ 533{
515 struct ifi_canfd_priv *priv = netdev_priv(ndev); 534 struct ifi_canfd_priv *priv = netdev_priv(ndev);
535 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
516 int work_done = 0; 536 int work_done = 0;
517 u32 isr;
518 537
519 /* 538 if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) &&
520 * The ErrWarn condition is a little special, since the bit is 539 (priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
521 * located in the INTERRUPT register instead of STCMD register. 540 netdev_dbg(ndev, "Error, entered active state\n");
522 */ 541 work_done += ifi_canfd_handle_state_change(ndev,
523 isr = readl(priv->base + IFI_CANFD_INTERRUPT); 542 CAN_STATE_ERROR_ACTIVE);
524 if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) && 543 }
544
545 if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) &&
525 (priv->can.state != CAN_STATE_ERROR_WARNING)) { 546 (priv->can.state != CAN_STATE_ERROR_WARNING)) {
526 /* Clear the interrupt */
527 writel(IFI_CANFD_INTERRUPT_ERROR_WARNING,
528 priv->base + IFI_CANFD_INTERRUPT);
529 netdev_dbg(ndev, "Error, entered warning state\n"); 547 netdev_dbg(ndev, "Error, entered warning state\n");
530 work_done += ifi_canfd_handle_state_change(ndev, 548 work_done += ifi_canfd_handle_state_change(ndev,
531 CAN_STATE_ERROR_WARNING); 549 CAN_STATE_ERROR_WARNING);
@@ -552,18 +570,11 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
552{ 570{
553 struct net_device *ndev = napi->dev; 571 struct net_device *ndev = napi->dev;
554 struct ifi_canfd_priv *priv = netdev_priv(ndev); 572 struct ifi_canfd_priv *priv = netdev_priv(ndev);
555 const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE |
556 IFI_CANFD_STCMD_BUSOFF;
557 int work_done = 0;
558
559 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
560 u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD); 573 u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
561 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); 574 int work_done = 0;
562 575
563 /* Handle bus state changes */ 576 /* Handle bus state changes */
564 if ((stcmd & stcmd_state_mask) || 577 work_done += ifi_canfd_handle_state_errors(ndev);
565 ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0))
566 work_done += ifi_canfd_handle_state_errors(ndev, stcmd);
567 578
568 /* Handle lost messages on RX */ 579 /* Handle lost messages on RX */
569 if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW) 580 if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW)
@@ -571,7 +582,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
571 582
572 /* Handle lec errors on the bus */ 583 /* Handle lec errors on the bus */
573 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 584 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
574 work_done += ifi_canfd_handle_lec_err(ndev, errctr); 585 work_done += ifi_canfd_handle_lec_err(ndev);
575 586
576 /* Handle normal messages on RX */ 587 /* Handle normal messages on RX */
577 if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY)) 588 if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY))
@@ -592,12 +603,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
592 struct net_device_stats *stats = &ndev->stats; 603 struct net_device_stats *stats = &ndev->stats;
593 const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY | 604 const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
594 IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER | 605 IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER |
606 IFI_CANFD_INTERRUPT_ERROR_COUNTER |
607 IFI_CANFD_INTERRUPT_ERROR_STATE_CHG |
595 IFI_CANFD_INTERRUPT_ERROR_WARNING | 608 IFI_CANFD_INTERRUPT_ERROR_WARNING |
596 IFI_CANFD_INTERRUPT_ERROR_COUNTER; 609 IFI_CANFD_INTERRUPT_ERROR_BUSOFF;
597 const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY | 610 const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |
598 IFI_CANFD_INTERRUPT_TXFIFO_REMOVE; 611 IFI_CANFD_INTERRUPT_TXFIFO_REMOVE;
599 const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ | 612 const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ);
600 IFI_CANFD_INTERRUPT_ERROR_WARNING));
601 u32 isr; 613 u32 isr;
602 614
603 isr = readl(priv->base + IFI_CANFD_INTERRUPT); 615 isr = readl(priv->base + IFI_CANFD_INTERRUPT);
@@ -933,7 +945,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
933 struct resource *res; 945 struct resource *res;
934 void __iomem *addr; 946 void __iomem *addr;
935 int irq, ret; 947 int irq, ret;
936 u32 id; 948 u32 id, rev;
937 949
938 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 950 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
939 addr = devm_ioremap_resource(dev, res); 951 addr = devm_ioremap_resource(dev, res);
@@ -947,6 +959,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
947 return -EINVAL; 959 return -EINVAL;
948 } 960 }
949 961
962 rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK;
963 if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) {
964 dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n",
965 rev, IFI_CANFD_VER_REV_MIN_SUPPORTED);
966 return -EINVAL;
967 }
968
950 ndev = alloc_candev(sizeof(*priv), 1); 969 ndev = alloc_candev(sizeof(*priv), 1);
951 if (!ndev) 970 if (!ndev)
952 return -ENOMEM; 971 return -ENOMEM;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2594f7779c6f..b397a33f3d32 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -26,6 +26,7 @@
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/iopoll.h> 27#include <linux/iopoll.h>
28#include <linux/can/dev.h> 28#include <linux/can/dev.h>
29#include <linux/pinctrl/consumer.h>
29 30
30/* napi related */ 31/* napi related */
31#define M_CAN_NAPI_WEIGHT 64 32#define M_CAN_NAPI_WEIGHT 64
@@ -253,7 +254,7 @@ enum m_can_mram_cfg {
253 254
254/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ 255/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
255#define RXFC_FWM_SHIFT 24 256#define RXFC_FWM_SHIFT 24
256#define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT) 257#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT)
257#define RXFC_FS_SHIFT 16 258#define RXFC_FS_SHIFT 16
258#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT) 259#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT)
259 260
@@ -1700,6 +1701,8 @@ static __maybe_unused int m_can_suspend(struct device *dev)
1700 m_can_clk_stop(priv); 1701 m_can_clk_stop(priv);
1701 } 1702 }
1702 1703
1704 pinctrl_pm_select_sleep_state(dev);
1705
1703 priv->can.state = CAN_STATE_SLEEPING; 1706 priv->can.state = CAN_STATE_SLEEPING;
1704 1707
1705 return 0; 1708 return 0;
@@ -1710,6 +1713,8 @@ static __maybe_unused int m_can_resume(struct device *dev)
1710 struct net_device *ndev = dev_get_drvdata(dev); 1713 struct net_device *ndev = dev_get_drvdata(dev);
1711 struct m_can_priv *priv = netdev_priv(ndev); 1714 struct m_can_priv *priv = netdev_priv(ndev);
1712 1715
1716 pinctrl_pm_select_default_state(dev);
1717
1713 m_can_init_ram(priv); 1718 m_can_init_ram(priv);
1714 1719
1715 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1720 priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 55513411a82e..ed8561d4a90f 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -262,7 +262,6 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
262 262
263 spin_lock_irqsave(&priv->echo_lock, flags); 263 spin_lock_irqsave(&priv->echo_lock, flags);
264 can_get_echo_skb(priv->ndev, msg->client); 264 can_get_echo_skb(priv->ndev, msg->client);
265 spin_unlock_irqrestore(&priv->echo_lock, flags);
266 265
267 /* count bytes of the echo instead of skb */ 266 /* count bytes of the echo instead of skb */
268 stats->tx_bytes += cf_len; 267 stats->tx_bytes += cf_len;
@@ -271,6 +270,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
271 /* restart tx queue (a slot is free) */ 270 /* restart tx queue (a slot is free) */
272 netif_wake_queue(priv->ndev); 271 netif_wake_queue(priv->ndev);
273 272
273 spin_unlock_irqrestore(&priv->echo_lock, flags);
274 return 0; 274 return 0;
275 } 275 }
276 276
@@ -333,7 +333,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
333 333
334 /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ 334 /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
335 if (pucan_status_is_rx_barrier(msg)) { 335 if (pucan_status_is_rx_barrier(msg)) {
336 unsigned long flags;
337 336
338 if (priv->enable_tx_path) { 337 if (priv->enable_tx_path) {
339 int err = priv->enable_tx_path(priv); 338 int err = priv->enable_tx_path(priv);
@@ -342,16 +341,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
342 return err; 341 return err;
343 } 342 }
344 343
345 /* restart network queue only if echo skb array is free */ 344 /* start network queue (echo_skb array is empty) */
346 spin_lock_irqsave(&priv->echo_lock, flags); 345 netif_start_queue(ndev);
347
348 if (!priv->can.echo_skb[priv->echo_idx]) {
349 spin_unlock_irqrestore(&priv->echo_lock, flags);
350
351 netif_wake_queue(ndev);
352 } else {
353 spin_unlock_irqrestore(&priv->echo_lock, flags);
354 }
355 346
356 return 0; 347 return 0;
357 } 348 }
@@ -726,11 +717,6 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
726 */ 717 */
727 should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]); 718 should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]);
728 719
729 spin_unlock_irqrestore(&priv->echo_lock, flags);
730
731 /* write the skb on the interface */
732 priv->write_tx_msg(priv, msg);
733
734 /* stop network tx queue if not enough room to save one more msg too */ 720 /* stop network tx queue if not enough room to save one more msg too */
735 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) 721 if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
736 should_stop_tx_queue |= (room_left < 722 should_stop_tx_queue |= (room_left <
@@ -742,6 +728,11 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
742 if (should_stop_tx_queue) 728 if (should_stop_tx_queue)
743 netif_stop_queue(ndev); 729 netif_stop_queue(ndev);
744 730
731 spin_unlock_irqrestore(&priv->echo_lock, flags);
732
733 /* write the skb on the interface */
734 priv->write_tx_msg(priv, msg);
735
745 return NETDEV_TX_OK; 736 return NETDEV_TX_OK;
746} 737}
747 738
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 788c3464a3b0..3c51a884db87 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -349,8 +349,12 @@ static irqreturn_t pciefd_irq_handler(int irq, void *arg)
349 priv->tx_pages_free++; 349 priv->tx_pages_free++;
350 spin_unlock_irqrestore(&priv->tx_lock, flags); 350 spin_unlock_irqrestore(&priv->tx_lock, flags);
351 351
352 /* wake producer up */ 352 /* wake producer up (only if enough room in echo_skb array) */
353 netif_wake_queue(priv->ucan.ndev); 353 spin_lock_irqsave(&priv->ucan.echo_lock, flags);
354 if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx])
355 netif_wake_queue(priv->ucan.ndev);
356
357 spin_unlock_irqrestore(&priv->ucan.echo_lock, flags);
354 } 358 }
355 359
356 /* re-enable Rx DMA transfer for this CAN */ 360 /* re-enable Rx DMA transfer for this CAN */
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index d040aeb45172..15c2a831edf1 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,7 +1,10 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o 2obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
3bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o 3bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
4obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o 4obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
5ifdef CONFIG_NET_DSA_LOOP
6obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
7endif
5obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o 8obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
6obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o 9obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
7obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o 10obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index cd16067265dd..78616787f2a3 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -814,8 +814,8 @@ void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
814 unsigned int i; 814 unsigned int i;
815 815
816 for (i = 0; i < mib_size; i++) 816 for (i = 0; i < mib_size; i++)
817 memcpy(data + i * ETH_GSTRING_LEN, 817 strlcpy(data + i * ETH_GSTRING_LEN,
818 mibs[i].name, ETH_GSTRING_LEN); 818 mibs[i].name, ETH_GSTRING_LEN);
819} 819}
820EXPORT_SYMBOL(b53_get_strings); 820EXPORT_SYMBOL(b53_get_strings);
821 821
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 29c3075bfb05..fdc673484add 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config NET_VENDOR_8390 5config NET_VENDOR_8390
6 bool "National Semi-conductor 8390 devices" 6 bool "National Semiconductor 8390 devices"
7 default y 7 default y
8 depends on NET_VENDOR_NATSEMI 8 depends on NET_VENDOR_NATSEMI
9 ---help--- 9 ---help---
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 0b49f1aeebd3..fc7383106946 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -36,6 +36,8 @@
36#define AQ_CFG_TX_FRAME_MAX (16U * 1024U) 36#define AQ_CFG_TX_FRAME_MAX (16U * 1024U)
37#define AQ_CFG_RX_FRAME_MAX (4U * 1024U) 37#define AQ_CFG_RX_FRAME_MAX (4U * 1024U)
38 38
39#define AQ_CFG_TX_CLEAN_BUDGET 256U
40
39/* LRO */ 41/* LRO */
40#define AQ_CFG_IS_LRO_DEF 1U 42#define AQ_CFG_IS_LRO_DEF 1U
41 43
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index ebbaf63eaf47..c96a92118b8b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -247,6 +247,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
247 self->ndev->hw_features |= aq_hw_caps->hw_features; 247 self->ndev->hw_features |= aq_hw_caps->hw_features;
248 self->ndev->features = aq_hw_caps->hw_features; 248 self->ndev->features = aq_hw_caps->hw_features;
249 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; 249 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
250 self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
251
250 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; 252 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
251 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN; 253 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
252 254
@@ -937,3 +939,23 @@ err_exit:
937out: 939out:
938 return err; 940 return err;
939} 941}
942
943void aq_nic_shutdown(struct aq_nic_s *self)
944{
945 int err = 0;
946
947 if (!self->ndev)
948 return;
949
950 rtnl_lock();
951
952 netif_device_detach(self->ndev);
953
954 err = aq_nic_stop(self);
955 if (err < 0)
956 goto err_exit;
957 aq_nic_deinit(self);
958
959err_exit:
960 rtnl_unlock();
961} \ No newline at end of file
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index d16b0f1a95aa..219b550d1665 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -118,5 +118,6 @@ struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
118u32 aq_nic_get_fw_version(struct aq_nic_s *self); 118u32 aq_nic_get_fw_version(struct aq_nic_s *self);
119int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); 119int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
120int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self); 120int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
121void aq_nic_shutdown(struct aq_nic_s *self);
121 122
122#endif /* AQ_NIC_H */ 123#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 87c4308b52a7..ecc6306f940f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -323,6 +323,20 @@ static void aq_pci_remove(struct pci_dev *pdev)
323 pci_disable_device(pdev); 323 pci_disable_device(pdev);
324} 324}
325 325
326static void aq_pci_shutdown(struct pci_dev *pdev)
327{
328 struct aq_nic_s *self = pci_get_drvdata(pdev);
329
330 aq_nic_shutdown(self);
331
332 pci_disable_device(pdev);
333
334 if (system_state == SYSTEM_POWER_OFF) {
335 pci_wake_from_d3(pdev, false);
336 pci_set_power_state(pdev, PCI_D3hot);
337 }
338}
339
326static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) 340static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
327{ 341{
328 struct aq_nic_s *self = pci_get_drvdata(pdev); 342 struct aq_nic_s *self = pci_get_drvdata(pdev);
@@ -345,6 +359,7 @@ static struct pci_driver aq_pci_ops = {
345 .remove = aq_pci_remove, 359 .remove = aq_pci_remove,
346 .suspend = aq_pci_suspend, 360 .suspend = aq_pci_suspend,
347 .resume = aq_pci_resume, 361 .resume = aq_pci_resume,
362 .shutdown = aq_pci_shutdown,
348}; 363};
349 364
350module_pci_driver(aq_pci_ops); 365module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 0be6a11370bb..b5f1f62e8e25 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -136,11 +136,12 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)
136 netif_stop_subqueue(ndev, ring->idx); 136 netif_stop_subqueue(ndev, ring->idx);
137} 137}
138 138
139void aq_ring_tx_clean(struct aq_ring_s *self) 139bool aq_ring_tx_clean(struct aq_ring_s *self)
140{ 140{
141 struct device *dev = aq_nic_get_dev(self->aq_nic); 141 struct device *dev = aq_nic_get_dev(self->aq_nic);
142 unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET;
142 143
143 for (; self->sw_head != self->hw_head; 144 for (; self->sw_head != self->hw_head && budget--;
144 self->sw_head = aq_ring_next_dx(self, self->sw_head)) { 145 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
145 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; 146 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
146 147
@@ -167,6 +168,8 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
167 buff->pa = 0U; 168 buff->pa = 0U;
168 buff->eop_index = 0xffffU; 169 buff->eop_index = 0xffffU;
169 } 170 }
171
172 return !!budget;
170} 173}
171 174
172#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 175#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 965fae0fb6e0..ac1329f4051d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -153,7 +153,7 @@ void aq_ring_free(struct aq_ring_s *self);
153void aq_ring_update_queue_state(struct aq_ring_s *ring); 153void aq_ring_update_queue_state(struct aq_ring_s *ring);
154void aq_ring_queue_wake(struct aq_ring_s *ring); 154void aq_ring_queue_wake(struct aq_ring_s *ring);
155void aq_ring_queue_stop(struct aq_ring_s *ring); 155void aq_ring_queue_stop(struct aq_ring_s *ring);
156void aq_ring_tx_clean(struct aq_ring_s *self); 156bool aq_ring_tx_clean(struct aq_ring_s *self);
157int aq_ring_rx_clean(struct aq_ring_s *self, 157int aq_ring_rx_clean(struct aq_ring_s *self,
158 struct napi_struct *napi, 158 struct napi_struct *napi,
159 int *work_done, 159 int *work_done,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f890b8a5a862..d335c334fa56 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -35,12 +35,12 @@ struct aq_vec_s {
35static int aq_vec_poll(struct napi_struct *napi, int budget) 35static int aq_vec_poll(struct napi_struct *napi, int budget)
36{ 36{
37 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); 37 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
38 unsigned int sw_tail_old = 0U;
38 struct aq_ring_s *ring = NULL; 39 struct aq_ring_s *ring = NULL;
40 bool was_tx_cleaned = true;
41 unsigned int i = 0U;
39 int work_done = 0; 42 int work_done = 0;
40 int err = 0; 43 int err = 0;
41 unsigned int i = 0U;
42 unsigned int sw_tail_old = 0U;
43 bool was_tx_cleaned = false;
44 44
45 if (!self) { 45 if (!self) {
46 err = -EINVAL; 46 err = -EINVAL;
@@ -57,9 +57,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
57 57
58 if (ring[AQ_VEC_TX_ID].sw_head != 58 if (ring[AQ_VEC_TX_ID].sw_head !=
59 ring[AQ_VEC_TX_ID].hw_head) { 59 ring[AQ_VEC_TX_ID].hw_head) {
60 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 60 was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
61 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); 61 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
62 was_tx_cleaned = true;
63 } 62 }
64 63
65 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, 64 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
@@ -90,7 +89,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
90 } 89 }
91 } 90 }
92 91
93 if (was_tx_cleaned) 92 if (!was_tx_cleaned)
94 work_done = budget; 93 work_done = budget;
95 94
96 if (work_done < budget) { 95 if (work_done < budget) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 967f0fd07fcf..d3b847ec7465 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -21,6 +21,10 @@
21 21
22#define HW_ATL_UCP_0X370_REG 0x0370U 22#define HW_ATL_UCP_0X370_REG 0x0370U
23 23
24#define HW_ATL_MIF_CMD 0x0200U
25#define HW_ATL_MIF_ADDR 0x0208U
26#define HW_ATL_MIF_VAL 0x020CU
27
24#define HW_ATL_FW_SM_RAM 0x2U 28#define HW_ATL_FW_SM_RAM 0x2U
25#define HW_ATL_MPI_FW_VERSION 0x18 29#define HW_ATL_MPI_FW_VERSION 0x18
26#define HW_ATL_MPI_CONTROL_ADR 0x0368U 30#define HW_ATL_MPI_CONTROL_ADR 0x0368U
@@ -79,16 +83,15 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
79 83
80static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) 84static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
81{ 85{
86 u32 gsr, val;
82 int k = 0; 87 int k = 0;
83 u32 gsr;
84 88
85 aq_hw_write_reg(self, 0x404, 0x40e1); 89 aq_hw_write_reg(self, 0x404, 0x40e1);
86 AQ_HW_SLEEP(50); 90 AQ_HW_SLEEP(50);
87 91
88 /* Cleanup SPI */ 92 /* Cleanup SPI */
89 aq_hw_write_reg(self, 0x534, 0xA0); 93 val = aq_hw_read_reg(self, 0x53C);
90 aq_hw_write_reg(self, 0x100, 0x9F); 94 aq_hw_write_reg(self, 0x53C, val | 0x10);
91 aq_hw_write_reg(self, 0x100, 0x809F);
92 95
93 gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR); 96 gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
94 aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000); 97 aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
@@ -97,7 +100,14 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
97 aq_hw_write_reg(self, 0x404, 0x80e0); 100 aq_hw_write_reg(self, 0x404, 0x80e0);
98 aq_hw_write_reg(self, 0x32a8, 0x0); 101 aq_hw_write_reg(self, 0x32a8, 0x0);
99 aq_hw_write_reg(self, 0x520, 0x1); 102 aq_hw_write_reg(self, 0x520, 0x1);
103
104 /* Reset SPI again because of possible interrupted SPI burst */
105 val = aq_hw_read_reg(self, 0x53C);
106 aq_hw_write_reg(self, 0x53C, val | 0x10);
100 AQ_HW_SLEEP(10); 107 AQ_HW_SLEEP(10);
108 /* Clear SPI reset state */
109 aq_hw_write_reg(self, 0x53C, val & ~0x10);
110
101 aq_hw_write_reg(self, 0x404, 0x180e0); 111 aq_hw_write_reg(self, 0x404, 0x180e0);
102 112
103 for (k = 0; k < 1000; k++) { 113 for (k = 0; k < 1000; k++) {
@@ -141,13 +151,15 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
141 aq_pr_err("FW kickstart failed\n"); 151 aq_pr_err("FW kickstart failed\n");
142 return -EIO; 152 return -EIO;
143 } 153 }
154 /* Old FW requires fixed delay after init */
155 AQ_HW_SLEEP(15);
144 156
145 return 0; 157 return 0;
146} 158}
147 159
148static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) 160static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
149{ 161{
150 u32 gsr, rbl_status; 162 u32 gsr, val, rbl_status;
151 int k; 163 int k;
152 164
153 aq_hw_write_reg(self, 0x404, 0x40e1); 165 aq_hw_write_reg(self, 0x404, 0x40e1);
@@ -157,6 +169,10 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
157 /* Alter RBL status */ 169 /* Alter RBL status */
158 aq_hw_write_reg(self, 0x388, 0xDEAD); 170 aq_hw_write_reg(self, 0x388, 0xDEAD);
159 171
172 /* Cleanup SPI */
173 val = aq_hw_read_reg(self, 0x53C);
174 aq_hw_write_reg(self, 0x53C, val | 0x10);
175
160 /* Global software reset*/ 176 /* Global software reset*/
161 hw_atl_rx_rx_reg_res_dis_set(self, 0U); 177 hw_atl_rx_rx_reg_res_dis_set(self, 0U);
162 hw_atl_tx_tx_reg_res_dis_set(self, 0U); 178 hw_atl_tx_tx_reg_res_dis_set(self, 0U);
@@ -204,6 +220,8 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
204 aq_pr_err("FW kickstart failed\n"); 220 aq_pr_err("FW kickstart failed\n");
205 return -EIO; 221 return -EIO;
206 } 222 }
223 /* Old FW requires fixed delay after init */
224 AQ_HW_SLEEP(15);
207 225
208 return 0; 226 return 0;
209} 227}
@@ -255,18 +273,22 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
255 } 273 }
256 } 274 }
257 275
258 aq_hw_write_reg(self, 0x00000208U, a); 276 aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a);
259
260 for (++cnt; --cnt;) {
261 u32 i = 0U;
262 277
263 aq_hw_write_reg(self, 0x00000200U, 0x00008000U); 278 for (++cnt; --cnt && !err;) {
279 aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
264 280
265 for (i = 1024U; 281 if (IS_CHIP_FEATURE(REVISION_B1))
266 (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { 282 AQ_HW_WAIT_FOR(a != aq_hw_read_reg(self,
267 } 283 HW_ATL_MIF_ADDR),
284 1, 1000U);
285 else
286 AQ_HW_WAIT_FOR(!(0x100 & aq_hw_read_reg(self,
287 HW_ATL_MIF_CMD)),
288 1, 1000U);
268 289
269 *(p++) = aq_hw_read_reg(self, 0x0000020CU); 290 *(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
291 a += 4;
270 } 292 }
271 293
272 hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); 294 hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
@@ -662,14 +684,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
662 u32 val = hw_atl_reg_glb_mif_id_get(self); 684 u32 val = hw_atl_reg_glb_mif_id_get(self);
663 u32 mif_rev = val & 0xFFU; 685 u32 mif_rev = val & 0xFFU;
664 686
665 if ((3U & mif_rev) == 1U) { 687 if ((0xFU & mif_rev) == 1U) {
666 chip_features |= 688 chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
667 HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
668 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | 689 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
669 HAL_ATLANTIC_UTILS_CHIP_MIPS; 690 HAL_ATLANTIC_UTILS_CHIP_MIPS;
670 } else if ((3U & mif_rev) == 2U) { 691 } else if ((0xFU & mif_rev) == 2U) {
671 chip_features |= 692 chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
672 HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | 693 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
694 HAL_ATLANTIC_UTILS_CHIP_MIPS |
695 HAL_ATLANTIC_UTILS_CHIP_TPO2 |
696 HAL_ATLANTIC_UTILS_CHIP_RPF2;
697 } else if ((0xFU & mif_rev) == 0xAU) {
698 chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 |
673 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | 699 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
674 HAL_ATLANTIC_UTILS_CHIP_MIPS | 700 HAL_ATLANTIC_UTILS_CHIP_MIPS |
675 HAL_ATLANTIC_UTILS_CHIP_TPO2 | 701 HAL_ATLANTIC_UTILS_CHIP_TPO2 |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 2c690947910a..cd8f18f39c61 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -161,6 +161,7 @@ struct __packed hw_aq_atl_utils_mbox {
161#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U 161#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U
162#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U 162#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U
163#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U 163#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
164#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 0x04000000U
164 165
165#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \ 166#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
166 self->chip_features) 167 self->chip_features)
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 5265b937677b..a445de6837a6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -13,7 +13,7 @@
13#define NIC_MAJOR_DRIVER_VERSION 2 13#define NIC_MAJOR_DRIVER_VERSION 2
14#define NIC_MINOR_DRIVER_VERSION 0 14#define NIC_MINOR_DRIVER_VERSION 0
15#define NIC_BUILD_DRIVER_VERSION 2 15#define NIC_BUILD_DRIVER_VERSION 2
16#define NIC_REVISION_DRIVER_VERSION 0 16#define NIC_REVISION_DRIVER_VERSION 1
17 17
18#define AQ_CFG_DRV_VERSION_SUFFIX "-kern" 18#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
19 19
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 16f9bee992fe..0f6576802607 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
169 /* Optional regulator for PHY */ 169 /* Optional regulator for PHY */
170 priv->regulator = devm_regulator_get_optional(dev, "phy"); 170 priv->regulator = devm_regulator_get_optional(dev, "phy");
171 if (IS_ERR(priv->regulator)) { 171 if (IS_ERR(priv->regulator)) {
172 if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) 172 if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
173 return -EPROBE_DEFER; 173 err = -EPROBE_DEFER;
174 goto out_clk_disable;
175 }
174 dev_err(dev, "no regulator found\n"); 176 dev_err(dev, "no regulator found\n");
175 priv->regulator = NULL; 177 priv->regulator = NULL;
176 } 178 }
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index f15a8fc6dfc9..3fc549b88c43 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
855static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 855static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
856 struct bcm_sysport_tx_ring *ring) 856 struct bcm_sysport_tx_ring *ring)
857{ 857{
858 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
859 unsigned int pkts_compl = 0, bytes_compl = 0; 858 unsigned int pkts_compl = 0, bytes_compl = 0;
860 struct net_device *ndev = priv->netdev; 859 struct net_device *ndev = priv->netdev;
860 unsigned int txbds_processed = 0;
861 struct bcm_sysport_cb *cb; 861 struct bcm_sysport_cb *cb;
862 unsigned int txbds_ready;
863 unsigned int c_index;
862 u32 hw_ind; 864 u32 hw_ind;
863 865
864 /* Clear status before servicing to reduce spurious interrupts */ 866 /* Clear status before servicing to reduce spurious interrupts */
@@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
871 /* Compute how many descriptors have been processed since last call */ 873 /* Compute how many descriptors have been processed since last call */
872 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 874 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
873 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 875 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
874 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); 876 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
875
876 last_c_index = ring->c_index;
877 num_tx_cbs = ring->size;
878
879 c_index &= (num_tx_cbs - 1);
880
881 if (c_index >= last_c_index)
882 last_tx_cn = c_index - last_c_index;
883 else
884 last_tx_cn = num_tx_cbs - last_c_index + c_index;
885 877
886 netif_dbg(priv, tx_done, ndev, 878 netif_dbg(priv, tx_done, ndev,
887 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", 879 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
888 ring->index, c_index, last_tx_cn, last_c_index); 880 ring->index, ring->c_index, c_index, txbds_ready);
889 881
890 while (last_tx_cn-- > 0) { 882 while (txbds_processed < txbds_ready) {
891 cb = ring->cbs + last_c_index; 883 cb = &ring->cbs[ring->clean_index];
892 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); 884 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
893 885
894 ring->desc_count++; 886 ring->desc_count++;
895 last_c_index++; 887 txbds_processed++;
896 last_c_index &= (num_tx_cbs - 1); 888
889 if (likely(ring->clean_index < ring->size - 1))
890 ring->clean_index++;
891 else
892 ring->clean_index = 0;
897 } 893 }
898 894
899 u64_stats_update_begin(&priv->syncp); 895 u64_stats_update_begin(&priv->syncp);
@@ -1394,6 +1390,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1394 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); 1390 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1395 ring->index = index; 1391 ring->index = index;
1396 ring->size = size; 1392 ring->size = size;
1393 ring->clean_index = 0;
1397 ring->alloc_size = ring->size; 1394 ring->alloc_size = ring->size;
1398 ring->desc_cpu = p; 1395 ring->desc_cpu = p;
1399 ring->desc_count = ring->size; 1396 ring->desc_count = ring->size;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index f5a984c1c986..19c91c76e327 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring {
706 unsigned int desc_count; /* Number of descriptors */ 706 unsigned int desc_count; /* Number of descriptors */
707 unsigned int curr_desc; /* Current descriptor */ 707 unsigned int curr_desc; /* Current descriptor */
708 unsigned int c_index; /* Last consumer index */ 708 unsigned int c_index; /* Last consumer index */
709 unsigned int p_index; /* Current producer index */ 709 unsigned int clean_index; /* Current clean index */
710 struct bcm_sysport_cb *cbs; /* Transmit control blocks */ 710 struct bcm_sysport_cb *cbs; /* Transmit control blocks */
711 struct dma_desc *desc_cpu; /* CPU view of the descriptor */ 711 struct dma_desc *desc_cpu; /* CPU view of the descriptor */
712 struct bcm_sysport_priv *priv; /* private context backpointer */ 712 struct bcm_sysport_priv *priv; /* private context backpointer */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 74fc9af4aadb..b8388e93520a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13913,7 +13913,7 @@ static void bnx2x_register_phc(struct bnx2x *bp)
13913 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); 13913 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13914 if (IS_ERR(bp->ptp_clock)) { 13914 if (IS_ERR(bp->ptp_clock)) {
13915 bp->ptp_clock = NULL; 13915 bp->ptp_clock = NULL;
13916 BNX2X_ERR("PTP clock registeration failed\n"); 13916 BNX2X_ERR("PTP clock registration failed\n");
13917 } 13917 }
13918} 13918}
13919 13919
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1500243b9886..c7e5e6f09647 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1439,7 +1439,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1439 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1439 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1440 u16 vlan_proto = tpa_info->metadata >> 1440 u16 vlan_proto = tpa_info->metadata >>
1441 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1441 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1442 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; 1442 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1443 1443
1444 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1444 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1445 } 1445 }
@@ -1623,7 +1623,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1623 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1623 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1624 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1624 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1625 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1625 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1626 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; 1626 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1627 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1627 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1628 1628
1629 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1629 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
@@ -3847,6 +3847,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3847 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3847 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3848 struct hwrm_vnic_tpa_cfg_input req = {0}; 3848 struct hwrm_vnic_tpa_cfg_input req = {0};
3849 3849
3850 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
3851 return 0;
3852
3850 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 3853 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3851 3854
3852 if (tpa_flags) { 3855 if (tpa_flags) {
@@ -4558,18 +4561,17 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
4558 return rc; 4561 return rc;
4559} 4562}
4560 4563
4561static int 4564static void
4562bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4565__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
4563 int ring_grps, int cp_rings, int vnics) 4566 int tx_rings, int rx_rings, int ring_grps,
4567 int cp_rings, int vnics)
4564{ 4568{
4565 struct hwrm_func_cfg_input req = {0};
4566 u32 enables = 0; 4569 u32 enables = 0;
4567 int rc;
4568 4570
4569 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4571 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
4570 req.fid = cpu_to_le16(0xffff); 4572 req->fid = cpu_to_le16(0xffff);
4571 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 4573 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4572 req.num_tx_rings = cpu_to_le16(tx_rings); 4574 req->num_tx_rings = cpu_to_le16(tx_rings);
4573 if (bp->flags & BNXT_FLAG_NEW_RM) { 4575 if (bp->flags & BNXT_FLAG_NEW_RM) {
4574 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 4576 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4575 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 4577 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
@@ -4578,16 +4580,53 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4578 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 4580 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4579 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 4581 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
4580 4582
4581 req.num_rx_rings = cpu_to_le16(rx_rings); 4583 req->num_rx_rings = cpu_to_le16(rx_rings);
4582 req.num_hw_ring_grps = cpu_to_le16(ring_grps); 4584 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
4583 req.num_cmpl_rings = cpu_to_le16(cp_rings); 4585 req->num_cmpl_rings = cpu_to_le16(cp_rings);
4584 req.num_stat_ctxs = req.num_cmpl_rings; 4586 req->num_stat_ctxs = req->num_cmpl_rings;
4585 req.num_vnics = cpu_to_le16(vnics); 4587 req->num_vnics = cpu_to_le16(vnics);
4586 } 4588 }
4587 if (!enables) 4589 req->enables = cpu_to_le32(enables);
4590}
4591
4592static void
4593__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
4594 struct hwrm_func_vf_cfg_input *req, int tx_rings,
4595 int rx_rings, int ring_grps, int cp_rings,
4596 int vnics)
4597{
4598 u32 enables = 0;
4599
4600 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
4601 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4602 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4603 enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4604 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
4605 enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4606 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
4607
4608 req->num_tx_rings = cpu_to_le16(tx_rings);
4609 req->num_rx_rings = cpu_to_le16(rx_rings);
4610 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
4611 req->num_cmpl_rings = cpu_to_le16(cp_rings);
4612 req->num_stat_ctxs = req->num_cmpl_rings;
4613 req->num_vnics = cpu_to_le16(vnics);
4614
4615 req->enables = cpu_to_le32(enables);
4616}
4617
4618static int
4619bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4620 int ring_grps, int cp_rings, int vnics)
4621{
4622 struct hwrm_func_cfg_input req = {0};
4623 int rc;
4624
4625 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4626 cp_rings, vnics);
4627 if (!req.enables)
4588 return 0; 4628 return 0;
4589 4629
4590 req.enables = cpu_to_le32(enables);
4591 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4630 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4592 if (rc) 4631 if (rc)
4593 return -ENOMEM; 4632 return -ENOMEM;
@@ -4604,7 +4643,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4604 int ring_grps, int cp_rings, int vnics) 4643 int ring_grps, int cp_rings, int vnics)
4605{ 4644{
4606 struct hwrm_func_vf_cfg_input req = {0}; 4645 struct hwrm_func_vf_cfg_input req = {0};
4607 u32 enables = 0;
4608 int rc; 4646 int rc;
4609 4647
4610 if (!(bp->flags & BNXT_FLAG_NEW_RM)) { 4648 if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
@@ -4612,22 +4650,8 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4612 return 0; 4650 return 0;
4613 } 4651 }
4614 4652
4615 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 4653 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4616 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 4654 cp_rings, vnics);
4617 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4618 enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4619 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
4620 enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4621 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
4622
4623 req.num_tx_rings = cpu_to_le16(tx_rings);
4624 req.num_rx_rings = cpu_to_le16(rx_rings);
4625 req.num_hw_ring_grps = cpu_to_le16(ring_grps);
4626 req.num_cmpl_rings = cpu_to_le16(cp_rings);
4627 req.num_stat_ctxs = req.num_cmpl_rings;
4628 req.num_vnics = cpu_to_le16(vnics);
4629
4630 req.enables = cpu_to_le32(enables);
4631 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4655 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4632 if (rc) 4656 if (rc)
4633 return -ENOMEM; 4657 return -ENOMEM;
@@ -4743,39 +4767,25 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
4743} 4767}
4744 4768
4745static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4769static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4746 int ring_grps, int cp_rings) 4770 int ring_grps, int cp_rings, int vnics)
4747{ 4771{
4748 struct hwrm_func_vf_cfg_input req = {0}; 4772 struct hwrm_func_vf_cfg_input req = {0};
4749 u32 flags, enables; 4773 u32 flags;
4750 int rc; 4774 int rc;
4751 4775
4752 if (!(bp->flags & BNXT_FLAG_NEW_RM)) 4776 if (!(bp->flags & BNXT_FLAG_NEW_RM))
4753 return 0; 4777 return 0;
4754 4778
4755 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 4779 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4780 cp_rings, vnics);
4756 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 4781 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
4757 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 4782 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
4758 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 4783 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
4759 FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | 4784 FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
4760 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 4785 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
4761 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 4786 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
4762 enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
4763 FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
4764 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4765 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
4766 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
4767 FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
4768 4787
4769 req.flags = cpu_to_le32(flags); 4788 req.flags = cpu_to_le32(flags);
4770 req.enables = cpu_to_le32(enables);
4771 req.num_tx_rings = cpu_to_le16(tx_rings);
4772 req.num_rx_rings = cpu_to_le16(rx_rings);
4773 req.num_cmpl_rings = cpu_to_le16(cp_rings);
4774 req.num_hw_ring_grps = cpu_to_le16(ring_grps);
4775 req.num_stat_ctxs = cpu_to_le16(cp_rings);
4776 req.num_vnics = cpu_to_le16(1);
4777 if (bp->flags & BNXT_FLAG_RFS)
4778 req.num_vnics = cpu_to_le16(rx_rings + 1);
4779 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4789 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4780 if (rc) 4790 if (rc)
4781 return -ENOMEM; 4791 return -ENOMEM;
@@ -4783,38 +4793,23 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4783} 4793}
4784 4794
4785static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4795static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4786 int ring_grps, int cp_rings) 4796 int ring_grps, int cp_rings, int vnics)
4787{ 4797{
4788 struct hwrm_func_cfg_input req = {0}; 4798 struct hwrm_func_cfg_input req = {0};
4789 u32 flags, enables; 4799 u32 flags;
4790 int rc; 4800 int rc;
4791 4801
4792 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4802 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4793 req.fid = cpu_to_le16(0xffff); 4803 cp_rings, vnics);
4794 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 4804 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
4795 enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS; 4805 if (bp->flags & BNXT_FLAG_NEW_RM)
4796 req.num_tx_rings = cpu_to_le16(tx_rings);
4797 if (bp->flags & BNXT_FLAG_NEW_RM) {
4798 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 4806 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
4799 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 4807 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
4800 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | 4808 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
4801 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 4809 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
4802 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 4810 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
4803 enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | 4811
4804 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4805 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
4806 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
4807 FUNC_CFG_REQ_ENABLES_NUM_VNICS;
4808 req.num_rx_rings = cpu_to_le16(rx_rings);
4809 req.num_cmpl_rings = cpu_to_le16(cp_rings);
4810 req.num_hw_ring_grps = cpu_to_le16(ring_grps);
4811 req.num_stat_ctxs = cpu_to_le16(cp_rings);
4812 req.num_vnics = cpu_to_le16(1);
4813 if (bp->flags & BNXT_FLAG_RFS)
4814 req.num_vnics = cpu_to_le16(rx_rings + 1);
4815 }
4816 req.flags = cpu_to_le32(flags); 4812 req.flags = cpu_to_le32(flags);
4817 req.enables = cpu_to_le32(enables);
4818 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4813 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4819 if (rc) 4814 if (rc)
4820 return -ENOMEM; 4815 return -ENOMEM;
@@ -4822,17 +4817,17 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4822} 4817}
4823 4818
4824static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4819static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4825 int ring_grps, int cp_rings) 4820 int ring_grps, int cp_rings, int vnics)
4826{ 4821{
4827 if (bp->hwrm_spec_code < 0x10801) 4822 if (bp->hwrm_spec_code < 0x10801)
4828 return 0; 4823 return 0;
4829 4824
4830 if (BNXT_PF(bp)) 4825 if (BNXT_PF(bp))
4831 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 4826 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
4832 ring_grps, cp_rings); 4827 ring_grps, cp_rings, vnics);
4833 4828
4834 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 4829 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
4835 cp_rings); 4830 cp_rings, vnics);
4836} 4831}
4837 4832
4838static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, 4833static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
@@ -5865,7 +5860,6 @@ static int bnxt_init_msix(struct bnxt *bp)
5865 if (rc) 5860 if (rc)
5866 goto msix_setup_exit; 5861 goto msix_setup_exit;
5867 5862
5868 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5869 bp->cp_nr_rings = (min == 1) ? 5863 bp->cp_nr_rings = (min == 1) ?
5870 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 5864 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
5871 bp->tx_nr_rings + bp->rx_nr_rings; 5865 bp->tx_nr_rings + bp->rx_nr_rings;
@@ -5897,7 +5891,6 @@ static int bnxt_init_inta(struct bnxt *bp)
5897 bp->rx_nr_rings = 1; 5891 bp->rx_nr_rings = 1;
5898 bp->tx_nr_rings = 1; 5892 bp->tx_nr_rings = 1;
5899 bp->cp_nr_rings = 1; 5893 bp->cp_nr_rings = 1;
5900 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5901 bp->flags |= BNXT_FLAG_SHARED_RINGS; 5894 bp->flags |= BNXT_FLAG_SHARED_RINGS;
5902 bp->irq_tbl[0].vector = bp->pdev->irq; 5895 bp->irq_tbl[0].vector = bp->pdev->irq;
5903 return 0; 5896 return 0;
@@ -7531,7 +7524,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
7531 int max_rx, max_tx, tx_sets = 1; 7524 int max_rx, max_tx, tx_sets = 1;
7532 int tx_rings_needed; 7525 int tx_rings_needed;
7533 int rx_rings = rx; 7526 int rx_rings = rx;
7534 int cp, rc; 7527 int cp, vnics, rc;
7535 7528
7536 if (tcs) 7529 if (tcs)
7537 tx_sets = tcs; 7530 tx_sets = tcs;
@@ -7547,10 +7540,15 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
7547 if (max_tx < tx_rings_needed) 7540 if (max_tx < tx_rings_needed)
7548 return -ENOMEM; 7541 return -ENOMEM;
7549 7542
7543 vnics = 1;
7544 if (bp->flags & BNXT_FLAG_RFS)
7545 vnics += rx_rings;
7546
7550 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7547 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7551 rx_rings <<= 1; 7548 rx_rings <<= 1;
7552 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; 7549 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
7553 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp); 7550 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
7551 vnics);
7554} 7552}
7555 7553
7556static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 7554static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -8437,13 +8435,20 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
8437 return 0; 8435 return 0;
8438 8436
8439 bnxt_hwrm_func_qcaps(bp); 8437 bnxt_hwrm_func_qcaps(bp);
8440 __bnxt_close_nic(bp, true, false); 8438
8439 if (netif_running(bp->dev))
8440 __bnxt_close_nic(bp, true, false);
8441
8441 bnxt_clear_int_mode(bp); 8442 bnxt_clear_int_mode(bp);
8442 rc = bnxt_init_int_mode(bp); 8443 rc = bnxt_init_int_mode(bp);
8443 if (rc) 8444
8444 dev_close(bp->dev); 8445 if (netif_running(bp->dev)) {
8445 else 8446 if (rc)
8446 rc = bnxt_open_nic(bp, true, false); 8447 dev_close(bp->dev);
8448 else
8449 rc = bnxt_open_nic(bp, true, false);
8450 }
8451
8447 return rc; 8452 return rc;
8448} 8453}
8449 8454
@@ -8664,6 +8669,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8664 if (rc) 8669 if (rc)
8665 goto init_err_pci_clean; 8670 goto init_err_pci_clean;
8666 8671
8672 /* No TC has been set yet and rings may have been trimmed due to
8673 * limited MSIX, so we re-initialize the TX rings per TC.
8674 */
8675 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8676
8667 bnxt_get_wol_settings(bp); 8677 bnxt_get_wol_settings(bp);
8668 if (bp->flags & BNXT_FLAG_WOL_CAP) 8678 if (bp->flags & BNXT_FLAG_WOL_CAP)
8669 device_set_wakeup_enable(&pdev->dev, bp->wol); 8679 device_set_wakeup_enable(&pdev->dev, bp->wol);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 1989c470172c..5e3d62189cab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -189,6 +189,7 @@ struct rx_cmp_ext {
189 #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) 189 #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
190 #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4) 190 #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4)
191 __le32 rx_cmp_meta_data; 191 __le32 rx_cmp_meta_data;
192 #define RX_CMP_FLAGS2_METADATA_TCI_MASK 0xffff
192 #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff 193 #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff
193 #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000 194 #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000
194 #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16 195 #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index fbe6e208e17b..65c2cee35766 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -349,6 +349,9 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
349 if (rc) 349 if (rc)
350 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", 350 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
351 __func__, flow_handle, rc); 351 __func__, flow_handle, rc);
352
353 if (rc)
354 rc = -EIO;
352 return rc; 355 return rc;
353} 356}
354 357
@@ -484,13 +487,15 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
484 req.action_flags = cpu_to_le16(action_flags); 487 req.action_flags = cpu_to_le16(action_flags);
485 488
486 mutex_lock(&bp->hwrm_cmd_lock); 489 mutex_lock(&bp->hwrm_cmd_lock);
487
488 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 490 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
489 if (!rc) 491 if (!rc)
490 *flow_handle = resp->flow_handle; 492 *flow_handle = resp->flow_handle;
491
492 mutex_unlock(&bp->hwrm_cmd_lock); 493 mutex_unlock(&bp->hwrm_cmd_lock);
493 494
495 if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
496 rc = -ENOSPC;
497 else if (rc)
498 rc = -EIO;
494 return rc; 499 return rc;
495} 500}
496 501
@@ -561,6 +566,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
561 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 566 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
562 mutex_unlock(&bp->hwrm_cmd_lock); 567 mutex_unlock(&bp->hwrm_cmd_lock);
563 568
569 if (rc)
570 rc = -EIO;
564 return rc; 571 return rc;
565} 572}
566 573
@@ -576,6 +583,9 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
576 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 583 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
577 if (rc) 584 if (rc)
578 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 585 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
586
587 if (rc)
588 rc = -EIO;
579 return rc; 589 return rc;
580} 590}
581 591
@@ -624,6 +634,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
624 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 634 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
625 mutex_unlock(&bp->hwrm_cmd_lock); 635 mutex_unlock(&bp->hwrm_cmd_lock);
626 636
637 if (rc)
638 rc = -EIO;
627 return rc; 639 return rc;
628} 640}
629 641
@@ -639,6 +651,9 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,
639 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 651 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
640 if (rc) 652 if (rc)
641 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 653 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
654
655 if (rc)
656 rc = -EIO;
642 return rc; 657 return rc;
643} 658}
644 659
@@ -1269,11 +1284,8 @@ static int bnxt_tc_del_flow(struct bnxt *bp,
1269 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 1284 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1270 &tc_flow_cmd->cookie, 1285 &tc_flow_cmd->cookie,
1271 tc_info->flow_ht_params); 1286 tc_info->flow_ht_params);
1272 if (!flow_node) { 1287 if (!flow_node)
1273 netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx",
1274 tc_flow_cmd->cookie);
1275 return -EINVAL; 1288 return -EINVAL;
1276 }
1277 1289
1278 return __bnxt_tc_del_flow(bp, flow_node); 1290 return __bnxt_tc_del_flow(bp, flow_node);
1279} 1291}
@@ -1290,11 +1302,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1290 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 1302 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1291 &tc_flow_cmd->cookie, 1303 &tc_flow_cmd->cookie,
1292 tc_info->flow_ht_params); 1304 tc_info->flow_ht_params);
1293 if (!flow_node) { 1305 if (!flow_node)
1294 netdev_info(bp->dev, "Error: no flow_node for cookie %lx",
1295 tc_flow_cmd->cookie);
1296 return -1; 1306 return -1;
1297 }
1298 1307
1299 flow = &flow_node->flow; 1308 flow = &flow_node->flow;
1300 curr_stats = &flow->stats; 1309 curr_stats = &flow->stats;
@@ -1344,8 +1353,10 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1344 } else { 1353 } else {
1345 netdev_info(bp->dev, "error rc=%d", rc); 1354 netdev_info(bp->dev, "error rc=%d", rc);
1346 } 1355 }
1347
1348 mutex_unlock(&bp->hwrm_cmd_lock); 1356 mutex_unlock(&bp->hwrm_cmd_lock);
1357
1358 if (rc)
1359 rc = -EIO;
1349 return rc; 1360 return rc;
1350} 1361}
1351 1362
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index c1841db1b500..f2593978ae75 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -820,7 +820,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
820 820
821 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 821 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
822 822
823 usleep_range(10, 20); 823 udelay(10);
824 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 824 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
825 } 825 }
826 826
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 99c9b88d6d34..e880be8e3c45 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5234,7 +5234,6 @@ static void cxgb4_mgmt_setup(struct net_device *dev)
5234 /* Initialize the device structure. */ 5234 /* Initialize the device structure. */
5235 dev->netdev_ops = &cxgb4_mgmt_netdev_ops; 5235 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
5236 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; 5236 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
5237 dev->needs_free_netdev = true;
5238} 5237}
5239 5238
5240static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) 5239static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
@@ -5445,6 +5444,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5445 adapter->name = pci_name(pdev); 5444 adapter->name = pci_name(pdev);
5446 adapter->mbox = func; 5445 adapter->mbox = func;
5447 adapter->pf = func; 5446 adapter->pf = func;
5447 adapter->params.chip = chip;
5448 adapter->adap_idx = adap_idx;
5448 adapter->msg_enable = DFLT_MSG_ENABLE; 5449 adapter->msg_enable = DFLT_MSG_ENABLE;
5449 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + 5450 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
5450 (sizeof(struct mbox_cmd) * 5451 (sizeof(struct mbox_cmd) *
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 5eb999af2c40..bd3f6e4d1341 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -540,6 +540,7 @@ static int gmac_setup_txqs(struct net_device *netdev)
540 540
541 if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { 541 if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
542 dev_warn(geth->dev, "TX queue base it not aligned\n"); 542 dev_warn(geth->dev, "TX queue base it not aligned\n");
543 kfree(skb_tab);
543 return -ENOMEM; 544 return -ENOMEM;
544 } 545 }
545 546
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 159dc2df878d..fd43f98ddbe7 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2021,7 +2021,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
2021 } 2021 }
2022 2022
2023 if (unlikely(err < 0)) { 2023 if (unlikely(err < 0)) {
2024 percpu_stats->tx_errors++;
2025 percpu_stats->tx_fifo_errors++; 2024 percpu_stats->tx_fifo_errors++;
2026 return err; 2025 return err;
2027 } 2026 }
@@ -2289,7 +2288,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2289 vaddr = phys_to_virt(addr); 2288 vaddr = phys_to_virt(addr);
2290 prefetch(vaddr + qm_fd_get_offset(fd)); 2289 prefetch(vaddr + qm_fd_get_offset(fd));
2291 2290
2292 fd_format = qm_fd_get_format(fd);
2293 /* The only FD types that we may receive are contig and S/G */ 2291 /* The only FD types that we may receive are contig and S/G */
2294 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); 2292 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
2295 2293
@@ -2322,8 +2320,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2322 2320
2323 skb_len = skb->len; 2321 skb_len = skb->len;
2324 2322
2325 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) 2323 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
2324 percpu_stats->rx_dropped++;
2326 return qman_cb_dqrr_consume; 2325 return qman_cb_dqrr_consume;
2326 }
2327 2327
2328 percpu_stats->rx_packets++; 2328 percpu_stats->rx_packets++;
2329 percpu_stats->rx_bytes += skb_len; 2329 percpu_stats->rx_bytes += skb_len;
@@ -2871,7 +2871,7 @@ static int dpaa_remove(struct platform_device *pdev)
2871 struct device *dev; 2871 struct device *dev;
2872 int err; 2872 int err;
2873 2873
2874 dev = &pdev->dev; 2874 dev = pdev->dev.parent;
2875 net_dev = dev_get_drvdata(dev); 2875 net_dev = dev_get_drvdata(dev);
2876 2876
2877 priv = netdev_priv(net_dev); 2877 priv = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 7a7f3a42b2aa..d4604bc8eb5b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3600,6 +3600,8 @@ fec_drv_remove(struct platform_device *pdev)
3600 fec_enet_mii_remove(fep); 3600 fec_enet_mii_remove(fep);
3601 if (fep->reg_phy) 3601 if (fep->reg_phy)
3602 regulator_disable(fep->reg_phy); 3602 regulator_disable(fep->reg_phy);
3603 pm_runtime_put(&pdev->dev);
3604 pm_runtime_disable(&pdev->dev);
3603 if (of_phy_is_fixed_link(np)) 3605 if (of_phy_is_fixed_link(np))
3604 of_phy_deregister_fixed_link(np); 3606 of_phy_deregister_fixed_link(np);
3605 of_node_put(fep->phy_node); 3607 of_node_put(fep->phy_node);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 9a581faaa742..57b1e2b47c0a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1100,7 +1100,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
1100 set_bucket(dtsec->regs, bucket, true); 1100 set_bucket(dtsec->regs, bucket, true);
1101 1101
1102 /* Create element to be added to the driver hash table */ 1102 /* Create element to be added to the driver hash table */
1103 hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); 1103 hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
1104 if (!hash_entry) 1104 if (!hash_entry)
1105 return -ENOMEM; 1105 return -ENOMEM;
1106 hash_entry->addr = addr; 1106 hash_entry->addr = addr;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 86944bc3b273..74bd260ca02a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -666,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
666 666
667static int hns_gmac_get_sset_count(int stringset) 667static int hns_gmac_get_sset_count(int stringset)
668{ 668{
669 if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) 669 if (stringset == ETH_SS_STATS)
670 return ARRAY_SIZE(g_gmac_stats_string); 670 return ARRAY_SIZE(g_gmac_stats_string);
671 671
672 return 0; 672 return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index b62816c1574e..93e71e27401b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
422 422
423int hns_ppe_get_sset_count(int stringset) 423int hns_ppe_get_sset_count(int stringset)
424{ 424{
425 if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) 425 if (stringset == ETH_SS_STATS)
426 return ETH_PPE_STATIC_NUM; 426 return ETH_PPE_STATIC_NUM;
427 return 0; 427 return 0;
428} 428}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 6f3570cfb501..e2e28532e4dc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -876,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
876 */ 876 */
877int hns_rcb_get_ring_sset_count(int stringset) 877int hns_rcb_get_ring_sset_count(int stringset)
878{ 878{
879 if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) 879 if (stringset == ETH_SS_STATS)
880 return HNS_RING_STATIC_REG_NUM; 880 return HNS_RING_STATIC_REG_NUM;
881 881
882 return 0; 882 return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 7ea7f8a4aa2a..2e14a3ae1d8b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -993,8 +993,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
993 cnt--; 993 cnt--;
994 994
995 return cnt; 995 return cnt;
996 } else { 996 } else if (stringset == ETH_SS_STATS) {
997 return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset)); 997 return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
998 } else {
999 return -EOPNOTSUPP;
998 } 1000 }
999} 1001}
1000 1002
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index afb7ebe20b24..824fd44e25f0 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -400,6 +400,10 @@
400#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ 400#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
401#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */ 401#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */
402#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ 402#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
403#define E1000_ICR_MDAC 0x00000200 /* MDIO Access Complete */
404#define E1000_ICR_SRPD 0x00010000 /* Small Receive Packet Detected */
405#define E1000_ICR_ACK 0x00020000 /* Receive ACK Frame Detected */
406#define E1000_ICR_MNG 0x00040000 /* Manageability Event Detected */
403#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ 407#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
404/* If this bit asserted, the driver should claim the interrupt */ 408/* If this bit asserted, the driver should claim the interrupt */
405#define E1000_ICR_INT_ASSERTED 0x80000000 409#define E1000_ICR_INT_ASSERTED 0x80000000
@@ -407,7 +411,7 @@
407#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ 411#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
408#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ 412#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
409#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ 413#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
410#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ 414#define E1000_ICR_OTHER 0x01000000 /* Other Interrupt */
411 415
412/* PBA ECC Register */ 416/* PBA ECC Register */
413#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ 417#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
@@ -431,12 +435,27 @@
431 E1000_IMS_RXSEQ | \ 435 E1000_IMS_RXSEQ | \
432 E1000_IMS_LSC) 436 E1000_IMS_LSC)
433 437
438/* These are all of the events related to the OTHER interrupt.
439 */
440#define IMS_OTHER_MASK ( \
441 E1000_IMS_LSC | \
442 E1000_IMS_RXO | \
443 E1000_IMS_MDAC | \
444 E1000_IMS_SRPD | \
445 E1000_IMS_ACK | \
446 E1000_IMS_MNG)
447
434/* Interrupt Mask Set */ 448/* Interrupt Mask Set */
435#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 449#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
436#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ 450#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
437#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ 451#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
438#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ 452#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
453#define E1000_IMS_RXO E1000_ICR_RXO /* Receiver Overrun */
439#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ 454#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
455#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO Access Complete */
456#define E1000_IMS_SRPD E1000_ICR_SRPD /* Small Receive Packet */
457#define E1000_IMS_ACK E1000_ICR_ACK /* Receive ACK Frame Detected */
458#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability Event */
440#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ 459#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
441#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ 460#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
442#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ 461#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 31277d3bb7dc..1dddfb7b2de6 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1367,9 +1367,6 @@ out:
1367 * Checks to see of the link status of the hardware has changed. If a 1367 * Checks to see of the link status of the hardware has changed. If a
1368 * change in link status has been detected, then we read the PHY registers 1368 * change in link status has been detected, then we read the PHY registers
1369 * to get the current speed/duplex if link exists. 1369 * to get the current speed/duplex if link exists.
1370 *
1371 * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
1372 * up).
1373 **/ 1370 **/
1374static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 1371static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1375{ 1372{
@@ -1385,7 +1382,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1385 * Change or Rx Sequence Error interrupt. 1382 * Change or Rx Sequence Error interrupt.
1386 */ 1383 */
1387 if (!mac->get_link_status) 1384 if (!mac->get_link_status)
1388 return 1; 1385 return 0;
1386 mac->get_link_status = false;
1389 1387
1390 /* First we want to see if the MII Status Register reports 1388 /* First we want to see if the MII Status Register reports
1391 * link. If so, then we want to get the current speed/duplex 1389 * link. If so, then we want to get the current speed/duplex
@@ -1393,12 +1391,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1393 */ 1391 */
1394 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 1392 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1395 if (ret_val) 1393 if (ret_val)
1396 return ret_val; 1394 goto out;
1397 1395
1398 if (hw->mac.type == e1000_pchlan) { 1396 if (hw->mac.type == e1000_pchlan) {
1399 ret_val = e1000_k1_gig_workaround_hv(hw, link); 1397 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1400 if (ret_val) 1398 if (ret_val)
1401 return ret_val; 1399 goto out;
1402 } 1400 }
1403 1401
1404 /* When connected at 10Mbps half-duplex, some parts are excessively 1402 /* When connected at 10Mbps half-duplex, some parts are excessively
@@ -1431,7 +1429,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1431 1429
1432 ret_val = hw->phy.ops.acquire(hw); 1430 ret_val = hw->phy.ops.acquire(hw);
1433 if (ret_val) 1431 if (ret_val)
1434 return ret_val; 1432 goto out;
1435 1433
1436 if (hw->mac.type == e1000_pch2lan) 1434 if (hw->mac.type == e1000_pch2lan)
1437 emi_addr = I82579_RX_CONFIG; 1435 emi_addr = I82579_RX_CONFIG;
@@ -1453,7 +1451,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1453 hw->phy.ops.release(hw); 1451 hw->phy.ops.release(hw);
1454 1452
1455 if (ret_val) 1453 if (ret_val)
1456 return ret_val; 1454 goto out;
1457 1455
1458 if (hw->mac.type >= e1000_pch_spt) { 1456 if (hw->mac.type >= e1000_pch_spt) {
1459 u16 data; 1457 u16 data;
@@ -1462,14 +1460,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1462 if (speed == SPEED_1000) { 1460 if (speed == SPEED_1000) {
1463 ret_val = hw->phy.ops.acquire(hw); 1461 ret_val = hw->phy.ops.acquire(hw);
1464 if (ret_val) 1462 if (ret_val)
1465 return ret_val; 1463 goto out;
1466 1464
1467 ret_val = e1e_rphy_locked(hw, 1465 ret_val = e1e_rphy_locked(hw,
1468 PHY_REG(776, 20), 1466 PHY_REG(776, 20),
1469 &data); 1467 &data);
1470 if (ret_val) { 1468 if (ret_val) {
1471 hw->phy.ops.release(hw); 1469 hw->phy.ops.release(hw);
1472 return ret_val; 1470 goto out;
1473 } 1471 }
1474 1472
1475 ptr_gap = (data & (0x3FF << 2)) >> 2; 1473 ptr_gap = (data & (0x3FF << 2)) >> 2;
@@ -1483,18 +1481,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1483 } 1481 }
1484 hw->phy.ops.release(hw); 1482 hw->phy.ops.release(hw);
1485 if (ret_val) 1483 if (ret_val)
1486 return ret_val; 1484 goto out;
1487 } else { 1485 } else {
1488 ret_val = hw->phy.ops.acquire(hw); 1486 ret_val = hw->phy.ops.acquire(hw);
1489 if (ret_val) 1487 if (ret_val)
1490 return ret_val; 1488 goto out;
1491 1489
1492 ret_val = e1e_wphy_locked(hw, 1490 ret_val = e1e_wphy_locked(hw,
1493 PHY_REG(776, 20), 1491 PHY_REG(776, 20),
1494 0xC023); 1492 0xC023);
1495 hw->phy.ops.release(hw); 1493 hw->phy.ops.release(hw);
1496 if (ret_val) 1494 if (ret_val)
1497 return ret_val; 1495 goto out;
1498 1496
1499 } 1497 }
1500 } 1498 }
@@ -1521,7 +1519,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1521 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { 1519 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
1522 ret_val = e1000_k1_workaround_lpt_lp(hw, link); 1520 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1523 if (ret_val) 1521 if (ret_val)
1524 return ret_val; 1522 goto out;
1525 } 1523 }
1526 if (hw->mac.type >= e1000_pch_lpt) { 1524 if (hw->mac.type >= e1000_pch_lpt) {
1527 /* Set platform power management values for 1525 /* Set platform power management values for
@@ -1529,7 +1527,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1529 */ 1527 */
1530 ret_val = e1000_platform_pm_pch_lpt(hw, link); 1528 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1531 if (ret_val) 1529 if (ret_val)
1532 return ret_val; 1530 goto out;
1533 } 1531 }
1534 1532
1535 /* Clear link partner's EEE ability */ 1533 /* Clear link partner's EEE ability */
@@ -1552,9 +1550,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1552 } 1550 }
1553 1551
1554 if (!link) 1552 if (!link)
1555 return 0; /* No link detected */ 1553 goto out;
1556
1557 mac->get_link_status = false;
1558 1554
1559 switch (hw->mac.type) { 1555 switch (hw->mac.type) {
1560 case e1000_pch2lan: 1556 case e1000_pch2lan:
@@ -1616,12 +1612,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1616 * different link partner. 1612 * different link partner.
1617 */ 1613 */
1618 ret_val = e1000e_config_fc_after_link_up(hw); 1614 ret_val = e1000e_config_fc_after_link_up(hw);
1619 if (ret_val) { 1615 if (ret_val)
1620 e_dbg("Error configuring flow control\n"); 1616 e_dbg("Error configuring flow control\n");
1621 return ret_val;
1622 }
1623 1617
1624 return 1; 1618 return ret_val;
1619
1620out:
1621 mac->get_link_status = true;
1622 return ret_val;
1625} 1623}
1626 1624
1627static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) 1625static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index f457c5703d0c..5bdc3a2d4fd7 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -410,9 +410,6 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
410 * Checks to see of the link status of the hardware has changed. If a 410 * Checks to see of the link status of the hardware has changed. If a
411 * change in link status has been detected, then we read the PHY registers 411 * change in link status has been detected, then we read the PHY registers
412 * to get the current speed/duplex if link exists. 412 * to get the current speed/duplex if link exists.
413 *
414 * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
415 * up).
416 **/ 413 **/
417s32 e1000e_check_for_copper_link(struct e1000_hw *hw) 414s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
418{ 415{
@@ -426,20 +423,16 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
426 * Change or Rx Sequence Error interrupt. 423 * Change or Rx Sequence Error interrupt.
427 */ 424 */
428 if (!mac->get_link_status) 425 if (!mac->get_link_status)
429 return 1; 426 return 0;
427 mac->get_link_status = false;
430 428
431 /* First we want to see if the MII Status Register reports 429 /* First we want to see if the MII Status Register reports
432 * link. If so, then we want to get the current speed/duplex 430 * link. If so, then we want to get the current speed/duplex
433 * of the PHY. 431 * of the PHY.
434 */ 432 */
435 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 433 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
436 if (ret_val) 434 if (ret_val || !link)
437 return ret_val; 435 goto out;
438
439 if (!link)
440 return 0; /* No link detected */
441
442 mac->get_link_status = false;
443 436
444 /* Check if there was DownShift, must be checked 437 /* Check if there was DownShift, must be checked
445 * immediately after link-up 438 * immediately after link-up
@@ -464,12 +457,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
464 * different link partner. 457 * different link partner.
465 */ 458 */
466 ret_val = e1000e_config_fc_after_link_up(hw); 459 ret_val = e1000e_config_fc_after_link_up(hw);
467 if (ret_val) { 460 if (ret_val)
468 e_dbg("Error configuring flow control\n"); 461 e_dbg("Error configuring flow control\n");
469 return ret_val;
470 }
471 462
472 return 1; 463 return ret_val;
464
465out:
466 mac->get_link_status = true;
467 return ret_val;
473} 468}
474 469
475/** 470/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 1298b69f990b..dc853b0863af 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1914,30 +1914,20 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
1914 struct net_device *netdev = data; 1914 struct net_device *netdev = data;
1915 struct e1000_adapter *adapter = netdev_priv(netdev); 1915 struct e1000_adapter *adapter = netdev_priv(netdev);
1916 struct e1000_hw *hw = &adapter->hw; 1916 struct e1000_hw *hw = &adapter->hw;
1917 u32 icr; 1917 u32 icr = er32(ICR);
1918 bool enable = true; 1918
1919 1919 if (icr & adapter->eiac_mask)
1920 icr = er32(ICR); 1920 ew32(ICS, (icr & adapter->eiac_mask));
1921 if (icr & E1000_ICR_RXO) { 1921
1922 ew32(ICR, E1000_ICR_RXO);
1923 enable = false;
1924 /* napi poll will re-enable Other, make sure it runs */
1925 if (napi_schedule_prep(&adapter->napi)) {
1926 adapter->total_rx_bytes = 0;
1927 adapter->total_rx_packets = 0;
1928 __napi_schedule(&adapter->napi);
1929 }
1930 }
1931 if (icr & E1000_ICR_LSC) { 1922 if (icr & E1000_ICR_LSC) {
1932 ew32(ICR, E1000_ICR_LSC);
1933 hw->mac.get_link_status = true; 1923 hw->mac.get_link_status = true;
1934 /* guard against interrupt when we're going down */ 1924 /* guard against interrupt when we're going down */
1935 if (!test_bit(__E1000_DOWN, &adapter->state)) 1925 if (!test_bit(__E1000_DOWN, &adapter->state))
1936 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1926 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1937 } 1927 }
1938 1928
1939 if (enable && !test_bit(__E1000_DOWN, &adapter->state)) 1929 if (!test_bit(__E1000_DOWN, &adapter->state))
1940 ew32(IMS, E1000_IMS_OTHER); 1930 ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK);
1941 1931
1942 return IRQ_HANDLED; 1932 return IRQ_HANDLED;
1943} 1933}
@@ -2040,7 +2030,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
2040 hw->hw_addr + E1000_EITR_82574(vector)); 2030 hw->hw_addr + E1000_EITR_82574(vector));
2041 else 2031 else
2042 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); 2032 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
2043 adapter->eiac_mask |= E1000_IMS_OTHER;
2044 2033
2045 /* Cause Tx interrupts on every write back */ 2034 /* Cause Tx interrupts on every write back */
2046 ivar |= BIT(31); 2035 ivar |= BIT(31);
@@ -2265,7 +2254,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
2265 2254
2266 if (adapter->msix_entries) { 2255 if (adapter->msix_entries) {
2267 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2256 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2268 ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC); 2257 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER |
2258 IMS_OTHER_MASK);
2269 } else if (hw->mac.type >= e1000_pch_lpt) { 2259 } else if (hw->mac.type >= e1000_pch_lpt) {
2270 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); 2260 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
2271 } else { 2261 } else {
@@ -2333,8 +2323,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2333{ 2323{
2334 struct pci_dev *pdev = adapter->pdev; 2324 struct pci_dev *pdev = adapter->pdev;
2335 2325
2336 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, 2326 ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
2337 GFP_KERNEL); 2327 GFP_KERNEL);
2338 if (!ring->desc) 2328 if (!ring->desc)
2339 return -ENOMEM; 2329 return -ENOMEM;
2340 2330
@@ -2707,8 +2697,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
2707 napi_complete_done(napi, work_done); 2697 napi_complete_done(napi, work_done);
2708 if (!test_bit(__E1000_DOWN, &adapter->state)) { 2698 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2709 if (adapter->msix_entries) 2699 if (adapter->msix_entries)
2710 ew32(IMS, adapter->rx_ring->ims_val | 2700 ew32(IMS, adapter->rx_ring->ims_val);
2711 E1000_IMS_OTHER);
2712 else 2701 else
2713 e1000_irq_enable(adapter); 2702 e1000_irq_enable(adapter);
2714 } 2703 }
@@ -5101,7 +5090,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
5101 case e1000_media_type_copper: 5090 case e1000_media_type_copper:
5102 if (hw->mac.get_link_status) { 5091 if (hw->mac.get_link_status) {
5103 ret_val = hw->mac.ops.check_for_link(hw); 5092 ret_val = hw->mac.ops.check_for_link(hw);
5104 link_active = ret_val > 0; 5093 link_active = !hw->mac.get_link_status;
5105 } else { 5094 } else {
5106 link_active = true; 5095 link_active = true;
5107 } 5096 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 21d29f7936f6..d39b0b7011b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
124 trigger_cmd_completions(dev); 124 trigger_cmd_completions(dev);
125 } 125 }
126 126
127 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); 127 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
128 mlx5_core_err(dev, "end\n"); 128 mlx5_core_err(dev, "end\n");
129 129
130unlock: 130unlock:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index ba338428ffd1..3c0d882ba183 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -443,6 +443,17 @@ int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
443} 443}
444EXPORT_SYMBOL(mlxsw_afa_block_jump); 444EXPORT_SYMBOL(mlxsw_afa_block_jump);
445 445
446int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
447{
448 if (block->finished)
449 return -EINVAL;
450 mlxsw_afa_set_goto_set(block->cur_set,
451 MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
452 block->finished = true;
453 return 0;
454}
455EXPORT_SYMBOL(mlxsw_afa_block_terminate);
456
446static struct mlxsw_afa_fwd_entry * 457static struct mlxsw_afa_fwd_entry *
447mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port) 458mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
448{ 459{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 6dd601703c99..3a155d104384 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -67,6 +67,7 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
67u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); 67u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
68int mlxsw_afa_block_continue(struct mlxsw_afa_block *block); 68int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
69int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); 69int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
70int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
70int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); 71int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
71int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); 72int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
72int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, 73int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 92194a9b2caf..21bee8f19894 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -535,6 +535,7 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
535int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); 535int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
536int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, 536int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
537 u16 group_id); 537 u16 group_id);
538int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei);
538int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); 539int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
539int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei); 540int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
540int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, 541int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 1c1601a43978..79b1fa27a9a4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -518,6 +518,11 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
518 return mlxsw_afa_block_jump(rulei->act_block, group_id); 518 return mlxsw_afa_block_jump(rulei->act_block, group_id);
519} 519}
520 520
521int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
522{
523 return mlxsw_afa_block_terminate(rulei->act_block);
524}
525
521int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) 526int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
522{ 527{
523 return mlxsw_afa_block_append_drop(rulei->act_block); 528 return mlxsw_afa_block_append_drop(rulei->act_block);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 93728c694e6d..0a9adc5962fb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
385 385
386static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { 386static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
387 MLXSW_SP_CPU_PORT_SB_CM, 387 MLXSW_SP_CPU_PORT_SB_CM,
388 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
389 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
390 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
391 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
392 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
388 MLXSW_SP_CPU_PORT_SB_CM, 393 MLXSW_SP_CPU_PORT_SB_CM,
389 MLXSW_SP_CPU_PORT_SB_CM, 394 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
390 MLXSW_SP_CPU_PORT_SB_CM,
391 MLXSW_SP_CPU_PORT_SB_CM,
392 MLXSW_SP_CPU_PORT_SB_CM,
393 MLXSW_SP_CPU_PORT_SB_CM,
394 MLXSW_SP_SB_CM(10000, 0, 0),
395 MLXSW_SP_CPU_PORT_SB_CM, 395 MLXSW_SP_CPU_PORT_SB_CM,
396 MLXSW_SP_CPU_PORT_SB_CM, 396 MLXSW_SP_CPU_PORT_SB_CM,
397 MLXSW_SP_CPU_PORT_SB_CM, 397 MLXSW_SP_CPU_PORT_SB_CM,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 6ce00e28d4ea..89dbf569dff5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -65,7 +65,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
65 tcf_exts_to_list(exts, &actions); 65 tcf_exts_to_list(exts, &actions);
66 list_for_each_entry(a, &actions, list) { 66 list_for_each_entry(a, &actions, list) {
67 if (is_tcf_gact_ok(a)) { 67 if (is_tcf_gact_ok(a)) {
68 err = mlxsw_sp_acl_rulei_act_continue(rulei); 68 err = mlxsw_sp_acl_rulei_act_terminate(rulei);
69 if (err) 69 if (err)
70 return err; 70 return err;
71 } else if (is_tcf_gact_shot(a)) { 71 } else if (is_tcf_gact_shot(a)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 948aceb512c5..4b87ec20e658 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -51,6 +51,9 @@ struct mlxsw_sp_span_inspected_port {
51 struct list_head list; 51 struct list_head list;
52 enum mlxsw_sp_span_type type; 52 enum mlxsw_sp_span_type type;
53 u8 local_port; 53 u8 local_port;
54
55 /* Whether this is a directly bound mirror (port-to-port) or an ACL. */
56 bool bound;
54}; 57};
55 58
56struct mlxsw_sp_span_parms { 59struct mlxsw_sp_span_parms {
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index a10ef50e4f12..017fb2322589 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -1,16 +1,16 @@
1# 1#
2# National Semi-conductor device configuration 2# National Semiconductor device configuration
3# 3#
4 4
5config NET_VENDOR_NATSEMI 5config NET_VENDOR_NATSEMI
6 bool "National Semi-conductor devices" 6 bool "National Semiconductor devices"
7 default y 7 default y
8 ---help--- 8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y. 9 If you have a network (Ethernet) card belonging to this class, say Y.
10 10
11 Note that the answer to this question doesn't directly affect the 11 Note that the answer to this question doesn't directly affect the
12 kernel: saying N will just cause the configurator to skip all 12 kernel: saying N will just cause the configurator to skip all
13 the questions about National Semi-conductor devices. If you say Y, 13 the questions about National Semiconductor devices. If you say Y,
14 you will be asked for your specific card in the following questions. 14 you will be asked for your specific card in the following questions.
15 15
16if NET_VENDOR_NATSEMI 16if NET_VENDOR_NATSEMI
diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile
index cc664977596e..a759aa09ef59 100644
--- a/drivers/net/ethernet/natsemi/Makefile
+++ b/drivers/net/ethernet/natsemi/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# 2#
3# Makefile for the National Semi-conductor Sonic devices. 3# Makefile for the National Semiconductor Sonic devices.
4# 4#
5 5
6obj-$(CONFIG_MACSONIC) += macsonic.o 6obj-$(CONFIG_MACSONIC) += macsonic.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 6f546e869d8d..00f41c145d4d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -2480,7 +2480,10 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2480 if (rc) 2480 if (rc)
2481 return rc; 2481 return rc;
2482 2482
2483 /* Free Task CXT */ 2483 /* Free Task CXT ( Intentionally RoCE as task-id is shared between
2484 * RoCE and iWARP )
2485 */
2486 proto = PROTOCOLID_ROCE;
2484 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, 2487 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2485 qed_cxt_get_proto_tid_count(p_hwfn, proto)); 2488 qed_cxt_get_proto_tid_count(p_hwfn, proto));
2486 if (rc) 2489 if (rc)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 03ad4eeac7f8..69051e98aff9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1703,6 +1703,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1703 iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); 1703 iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1704 1704
1705 if (eth_type == ETH_P_IP) { 1705 if (eth_type == ETH_P_IP) {
1706 if (iph->protocol != IPPROTO_TCP) {
1707 DP_NOTICE(p_hwfn,
1708 "Unexpected ip protocol on ll2 %x\n",
1709 iph->protocol);
1710 return -EINVAL;
1711 }
1712
1706 cm_info->local_ip[0] = ntohl(iph->daddr); 1713 cm_info->local_ip[0] = ntohl(iph->daddr);
1707 cm_info->remote_ip[0] = ntohl(iph->saddr); 1714 cm_info->remote_ip[0] = ntohl(iph->saddr);
1708 cm_info->ip_version = TCP_IPV4; 1715 cm_info->ip_version = TCP_IPV4;
@@ -1711,6 +1718,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1711 *payload_len = ntohs(iph->tot_len) - ip_hlen; 1718 *payload_len = ntohs(iph->tot_len) - ip_hlen;
1712 } else if (eth_type == ETH_P_IPV6) { 1719 } else if (eth_type == ETH_P_IPV6) {
1713 ip6h = (struct ipv6hdr *)iph; 1720 ip6h = (struct ipv6hdr *)iph;
1721
1722 if (ip6h->nexthdr != IPPROTO_TCP) {
1723 DP_NOTICE(p_hwfn,
1724 "Unexpected ip protocol on ll2 %x\n",
1725 iph->protocol);
1726 return -EINVAL;
1727 }
1728
1714 for (i = 0; i < 4; i++) { 1729 for (i = 0; i < 4; i++) {
1715 cm_info->local_ip[i] = 1730 cm_info->local_ip[i] =
1716 ntohl(ip6h->daddr.in6_u.u6_addr32[i]); 1731 ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
@@ -1928,8 +1943,8 @@ qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1928 /* Missing lower byte is now available */ 1943 /* Missing lower byte is now available */
1929 mpa_len = fpdu->fpdu_length | *mpa_data; 1944 mpa_len = fpdu->fpdu_length | *mpa_data;
1930 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); 1945 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1931 fpdu->mpa_frag_len = fpdu->fpdu_length;
1932 /* one byte of hdr */ 1946 /* one byte of hdr */
1947 fpdu->mpa_frag_len = 1;
1933 fpdu->incomplete_bytes = fpdu->fpdu_length - 1; 1948 fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1934 DP_VERBOSE(p_hwfn, 1949 DP_VERBOSE(p_hwfn,
1935 QED_MSG_RDMA, 1950 QED_MSG_RDMA,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 5d040b873137..a411f9c702a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -379,6 +379,7 @@ static void qed_rdma_free(struct qed_hwfn *p_hwfn)
379 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); 379 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
380 380
381 qed_rdma_free_reserved_lkey(p_hwfn); 381 qed_rdma_free_reserved_lkey(p_hwfn);
382 qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto);
382 qed_rdma_resc_free(p_hwfn); 383 qed_rdma_resc_free(p_hwfn);
383} 384}
384 385
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 2db70eabddfe..a01e7d6e5442 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -288,7 +288,7 @@ int __init qede_init(void)
288 } 288 }
289 289
290 /* Must register notifier before pci ops, since we might miss 290 /* Must register notifier before pci ops, since we might miss
291 * interface rename after pci probe and netdev registeration. 291 * interface rename after pci probe and netdev registration.
292 */ 292 */
293 ret = register_netdevice_notifier(&qede_netdev_notifier); 293 ret = register_netdevice_notifier(&qede_netdev_notifier);
294 if (ret) { 294 if (ret) {
@@ -988,7 +988,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
988 if (rc) 988 if (rc)
989 goto err3; 989 goto err3;
990 990
991 /* Prepare the lock prior to the registeration of the netdev, 991 /* Prepare the lock prior to the registration of the netdev,
992 * as once it's registered we might reach flows requiring it 992 * as once it's registered we might reach flows requiring it
993 * [it's even possible to reach a flow needing it directly 993 * [it's even possible to reach a flow needing it directly
994 * from there, although it's unlikely]. 994 * from there, although it's unlikely].
@@ -2067,8 +2067,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2067 link_params.link_up = true; 2067 link_params.link_up = true;
2068 edev->ops->common->set_link(edev->cdev, &link_params); 2068 edev->ops->common->set_link(edev->cdev, &link_params);
2069 2069
2070 qede_rdma_dev_event_open(edev);
2071
2072 edev->state = QEDE_STATE_OPEN; 2070 edev->state = QEDE_STATE_OPEN;
2073 2071
2074 DP_INFO(edev, "Ending successfully qede load\n"); 2072 DP_INFO(edev, "Ending successfully qede load\n");
@@ -2169,12 +2167,14 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
2169 DP_NOTICE(edev, "Link is up\n"); 2167 DP_NOTICE(edev, "Link is up\n");
2170 netif_tx_start_all_queues(edev->ndev); 2168 netif_tx_start_all_queues(edev->ndev);
2171 netif_carrier_on(edev->ndev); 2169 netif_carrier_on(edev->ndev);
2170 qede_rdma_dev_event_open(edev);
2172 } 2171 }
2173 } else { 2172 } else {
2174 if (netif_carrier_ok(edev->ndev)) { 2173 if (netif_carrier_ok(edev->ndev)) {
2175 DP_NOTICE(edev, "Link is down\n"); 2174 DP_NOTICE(edev, "Link is down\n");
2176 netif_tx_disable(edev->ndev); 2175 netif_tx_disable(edev->ndev);
2177 netif_carrier_off(edev->ndev); 2176 netif_carrier_off(edev->ndev);
2177 qede_rdma_dev_event_close(edev);
2178 } 2178 }
2179 } 2179 }
2180} 2180}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 9b2280badaf7..02adb513f475 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -485,7 +485,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
485 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); 485 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
486 if (IS_ERR(ptp->clock)) { 486 if (IS_ERR(ptp->clock)) {
487 rc = -EINVAL; 487 rc = -EINVAL;
488 DP_ERR(edev, "PTP clock registeration failed\n"); 488 DP_ERR(edev, "PTP clock registration failed\n");
489 goto err2; 489 goto err2;
490 } 490 }
491 491
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 9cbb27263742..d5a32b7c7dc5 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1194,9 +1194,9 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)
1194 while (tx_q->tpd.consume_idx != hw_consume_idx) { 1194 while (tx_q->tpd.consume_idx != hw_consume_idx) {
1195 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx); 1195 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
1196 if (tpbuf->dma_addr) { 1196 if (tpbuf->dma_addr) {
1197 dma_unmap_single(adpt->netdev->dev.parent, 1197 dma_unmap_page(adpt->netdev->dev.parent,
1198 tpbuf->dma_addr, tpbuf->length, 1198 tpbuf->dma_addr, tpbuf->length,
1199 DMA_TO_DEVICE); 1199 DMA_TO_DEVICE);
1200 tpbuf->dma_addr = 0; 1200 tpbuf->dma_addr = 0;
1201 } 1201 }
1202 1202
@@ -1353,9 +1353,11 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
1353 1353
1354 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); 1354 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1355 tpbuf->length = mapped_len; 1355 tpbuf->length = mapped_len;
1356 tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, 1356 tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
1357 skb->data, tpbuf->length, 1357 virt_to_page(skb->data),
1358 DMA_TO_DEVICE); 1358 offset_in_page(skb->data),
1359 tpbuf->length,
1360 DMA_TO_DEVICE);
1359 ret = dma_mapping_error(adpt->netdev->dev.parent, 1361 ret = dma_mapping_error(adpt->netdev->dev.parent,
1360 tpbuf->dma_addr); 1362 tpbuf->dma_addr);
1361 if (ret) 1363 if (ret)
@@ -1371,9 +1373,12 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
1371 if (mapped_len < len) { 1373 if (mapped_len < len) {
1372 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); 1374 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1373 tpbuf->length = len - mapped_len; 1375 tpbuf->length = len - mapped_len;
1374 tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, 1376 tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
1375 skb->data + mapped_len, 1377 virt_to_page(skb->data +
1376 tpbuf->length, DMA_TO_DEVICE); 1378 mapped_len),
1379 offset_in_page(skb->data +
1380 mapped_len),
1381 tpbuf->length, DMA_TO_DEVICE);
1377 ret = dma_mapping_error(adpt->netdev->dev.parent, 1382 ret = dma_mapping_error(adpt->netdev->dev.parent,
1378 tpbuf->dma_addr); 1383 tpbuf->dma_addr);
1379 if (ret) 1384 if (ret)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 012fb66eed8d..f0afb88d7bc2 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2335,14 +2335,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
2335 pdata = netdev_priv(dev); 2335 pdata = netdev_priv(dev);
2336 BUG_ON(!pdata); 2336 BUG_ON(!pdata);
2337 BUG_ON(!pdata->ioaddr); 2337 BUG_ON(!pdata->ioaddr);
2338 WARN_ON(dev->phydev);
2339 2338
2340 SMSC_TRACE(pdata, ifdown, "Stopping driver"); 2339 SMSC_TRACE(pdata, ifdown, "Stopping driver");
2341 2340
2341 unregister_netdev(dev);
2342
2342 mdiobus_unregister(pdata->mii_bus); 2343 mdiobus_unregister(pdata->mii_bus);
2343 mdiobus_free(pdata->mii_bus); 2344 mdiobus_free(pdata->mii_bus);
2344 2345
2345 unregister_netdev(dev);
2346 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2346 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2347 "smsc911x-memory"); 2347 "smsc911x-memory");
2348 if (!res) 2348 if (!res)
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 111e7ca9df56..f5c5984afefb 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1295,7 +1295,7 @@ static int ave_open(struct net_device *ndev)
1295 val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16); 1295 val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
1296 writel(val, priv->base + AVE_IIRQC); 1296 writel(val, priv->base + AVE_IIRQC);
1297 1297
1298 val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX; 1298 val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP;
1299 ave_irq_restore(ndev, val); 1299 ave_irq_restore(ndev, val);
1300 1300
1301 napi_enable(&priv->napi_rx); 1301 napi_enable(&priv->napi_rx);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 63d3d6b215f3..a94f50442613 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -312,7 +312,7 @@ static struct vnet *vnet_new(const u64 *local_mac,
312 dev->ethtool_ops = &vnet_ethtool_ops; 312 dev->ethtool_ops = &vnet_ethtool_ops;
313 dev->watchdog_timeo = VNET_TX_TIMEOUT; 313 dev->watchdog_timeo = VNET_TX_TIMEOUT;
314 314
315 dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | 315 dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO |
316 NETIF_F_HW_CSUM | NETIF_F_SG; 316 NETIF_F_HW_CSUM | NETIF_F_SG;
317 dev->features = dev->hw_features; 317 dev->features = dev->hw_features;
318 318
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 8af8891078e2..1b4af54a4968 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1075,7 +1075,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
1075 /* set speed_in input in case RMII mode is used in 100Mbps */ 1075 /* set speed_in input in case RMII mode is used in 100Mbps */
1076 if (phy->speed == 100) 1076 if (phy->speed == 100)
1077 mac_control |= BIT(15); 1077 mac_control |= BIT(15);
1078 else if (phy->speed == 10) 1078 /* in band mode only works in 10Mbps RGMII mode */
1079 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
1079 mac_control |= BIT(18); /* In Band mode */ 1080 mac_control |= BIT(18); /* In Band mode */
1080 1081
1081 if (priv->rx_pause) 1082 if (priv->rx_pause)
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 0db3bd1ea06f..32861036c3fc 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -173,6 +173,7 @@ struct rndis_device {
173 struct list_head req_list; 173 struct list_head req_list;
174 174
175 struct work_struct mcast_work; 175 struct work_struct mcast_work;
176 u32 filter;
176 177
177 bool link_state; /* 0 - link up, 1 - link down */ 178 bool link_state; /* 0 - link up, 1 - link down */
178 179
@@ -211,7 +212,6 @@ void netvsc_channel_cb(void *context);
211int netvsc_poll(struct napi_struct *napi, int budget); 212int netvsc_poll(struct napi_struct *napi, int budget);
212 213
213void rndis_set_subchannel(struct work_struct *w); 214void rndis_set_subchannel(struct work_struct *w);
214bool rndis_filter_opened(const struct netvsc_device *nvdev);
215int rndis_filter_open(struct netvsc_device *nvdev); 215int rndis_filter_open(struct netvsc_device *nvdev);
216int rndis_filter_close(struct netvsc_device *nvdev); 216int rndis_filter_close(struct netvsc_device *nvdev);
217struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, 217struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index aa95e81af6e5..4123d081b1c7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -93,6 +93,11 @@ static void free_netvsc_device(struct rcu_head *head)
93 = container_of(head, struct netvsc_device, rcu); 93 = container_of(head, struct netvsc_device, rcu);
94 int i; 94 int i;
95 95
96 kfree(nvdev->extension);
97 vfree(nvdev->recv_buf);
98 vfree(nvdev->send_buf);
99 kfree(nvdev->send_section_map);
100
96 for (i = 0; i < VRSS_CHANNEL_MAX; i++) 101 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
97 vfree(nvdev->chan_table[i].mrc.slots); 102 vfree(nvdev->chan_table[i].mrc.slots);
98 103
@@ -218,12 +223,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
218 net_device->recv_buf_gpadl_handle = 0; 223 net_device->recv_buf_gpadl_handle = 0;
219 } 224 }
220 225
221 if (net_device->recv_buf) {
222 /* Free up the receive buffer */
223 vfree(net_device->recv_buf);
224 net_device->recv_buf = NULL;
225 }
226
227 if (net_device->send_buf_gpadl_handle) { 226 if (net_device->send_buf_gpadl_handle) {
228 ret = vmbus_teardown_gpadl(device->channel, 227 ret = vmbus_teardown_gpadl(device->channel,
229 net_device->send_buf_gpadl_handle); 228 net_device->send_buf_gpadl_handle);
@@ -238,12 +237,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
238 } 237 }
239 net_device->send_buf_gpadl_handle = 0; 238 net_device->send_buf_gpadl_handle = 0;
240 } 239 }
241 if (net_device->send_buf) {
242 /* Free up the send buffer */
243 vfree(net_device->send_buf);
244 net_device->send_buf = NULL;
245 }
246 kfree(net_device->send_section_map);
247} 240}
248 241
249int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) 242int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
@@ -580,26 +573,29 @@ void netvsc_device_remove(struct hv_device *device)
580 = rtnl_dereference(net_device_ctx->nvdev); 573 = rtnl_dereference(net_device_ctx->nvdev);
581 int i; 574 int i;
582 575
583 cancel_work_sync(&net_device->subchan_work);
584
585 netvsc_revoke_buf(device, net_device); 576 netvsc_revoke_buf(device, net_device);
586 577
587 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); 578 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
588 579
580 /* And disassociate NAPI context from device */
581 for (i = 0; i < net_device->num_chn; i++)
582 netif_napi_del(&net_device->chan_table[i].napi);
583
589 /* 584 /*
590 * At this point, no one should be accessing net_device 585 * At this point, no one should be accessing net_device
591 * except in here 586 * except in here
592 */ 587 */
593 netdev_dbg(ndev, "net device safe to remove\n"); 588 netdev_dbg(ndev, "net device safe to remove\n");
594 589
590 /* older versions require that buffer be revoked before close */
591 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4)
592 netvsc_teardown_gpadl(device, net_device);
593
595 /* Now, we can close the channel safely */ 594 /* Now, we can close the channel safely */
596 vmbus_close(device->channel); 595 vmbus_close(device->channel);
597 596
598 netvsc_teardown_gpadl(device, net_device); 597 if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4)
599 598 netvsc_teardown_gpadl(device, net_device);
600 /* And dissassociate NAPI context from device */
601 for (i = 0; i < net_device->num_chn; i++)
602 netif_napi_del(&net_device->chan_table[i].napi);
603 599
604 /* Release all resources */ 600 /* Release all resources */
605 free_netvsc_device_rcu(net_device); 601 free_netvsc_device_rcu(net_device);
@@ -663,14 +659,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
663 queue_sends = 659 queue_sends =
664 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); 660 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
665 661
666 if (net_device->destroy && queue_sends == 0) 662 if (unlikely(net_device->destroy)) {
667 wake_up(&net_device->wait_drain); 663 if (queue_sends == 0)
664 wake_up(&net_device->wait_drain);
665 } else {
666 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
668 667
669 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && 668 if (netif_tx_queue_stopped(txq) &&
670 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || 669 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
671 queue_sends < 1)) { 670 queue_sends < 1)) {
672 netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); 671 netif_tx_wake_queue(txq);
673 ndev_ctx->eth_stats.wake_queue++; 672 ndev_ctx->eth_stats.wake_queue++;
673 }
674 } 674 }
675} 675}
676 676
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cdb78eefab67..f28c85d212ce 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -46,7 +46,10 @@
46 46
47#include "hyperv_net.h" 47#include "hyperv_net.h"
48 48
49#define RING_SIZE_MIN 64 49#define RING_SIZE_MIN 64
50#define RETRY_US_LO 5000
51#define RETRY_US_HI 10000
52#define RETRY_MAX 2000 /* >10 sec */
50 53
51#define LINKCHANGE_INT (2 * HZ) 54#define LINKCHANGE_INT (2 * HZ)
52#define VF_TAKEOVER_INT (HZ / 10) 55#define VF_TAKEOVER_INT (HZ / 10)
@@ -89,15 +92,20 @@ static void netvsc_change_rx_flags(struct net_device *net, int change)
89static void netvsc_set_rx_mode(struct net_device *net) 92static void netvsc_set_rx_mode(struct net_device *net)
90{ 93{
91 struct net_device_context *ndev_ctx = netdev_priv(net); 94 struct net_device_context *ndev_ctx = netdev_priv(net);
92 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 95 struct net_device *vf_netdev;
93 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); 96 struct netvsc_device *nvdev;
94 97
98 rcu_read_lock();
99 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
95 if (vf_netdev) { 100 if (vf_netdev) {
96 dev_uc_sync(vf_netdev, net); 101 dev_uc_sync(vf_netdev, net);
97 dev_mc_sync(vf_netdev, net); 102 dev_mc_sync(vf_netdev, net);
98 } 103 }
99 104
100 rndis_filter_update(nvdev); 105 nvdev = rcu_dereference(ndev_ctx->nvdev);
106 if (nvdev)
107 rndis_filter_update(nvdev);
108 rcu_read_unlock();
101} 109}
102 110
103static int netvsc_open(struct net_device *net) 111static int netvsc_open(struct net_device *net)
@@ -118,10 +126,8 @@ static int netvsc_open(struct net_device *net)
118 } 126 }
119 127
120 rdev = nvdev->extension; 128 rdev = nvdev->extension;
121 if (!rdev->link_state) { 129 if (!rdev->link_state)
122 netif_carrier_on(net); 130 netif_carrier_on(net);
123 netif_tx_wake_all_queues(net);
124 }
125 131
126 if (vf_netdev) { 132 if (vf_netdev) {
127 /* Setting synthetic device up transparently sets 133 /* Setting synthetic device up transparently sets
@@ -137,36 +143,25 @@ static int netvsc_open(struct net_device *net)
137 return 0; 143 return 0;
138} 144}
139 145
140static int netvsc_close(struct net_device *net) 146static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
141{ 147{
142 struct net_device_context *net_device_ctx = netdev_priv(net); 148 unsigned int retry = 0;
143 struct net_device *vf_netdev 149 int i;
144 = rtnl_dereference(net_device_ctx->vf_netdev);
145 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
146 int ret = 0;
147 u32 aread, i, msec = 10, retry = 0, retry_max = 20;
148 struct vmbus_channel *chn;
149
150 netif_tx_disable(net);
151
152 /* No need to close rndis filter if it is removed already */
153 if (!nvdev)
154 goto out;
155
156 ret = rndis_filter_close(nvdev);
157 if (ret != 0) {
158 netdev_err(net, "unable to close device (ret %d).\n", ret);
159 return ret;
160 }
161 150
162 /* Ensure pending bytes in ring are read */ 151 /* Ensure pending bytes in ring are read */
163 while (true) { 152 for (;;) {
164 aread = 0; 153 u32 aread = 0;
154
165 for (i = 0; i < nvdev->num_chn; i++) { 155 for (i = 0; i < nvdev->num_chn; i++) {
166 chn = nvdev->chan_table[i].channel; 156 struct vmbus_channel *chn
157 = nvdev->chan_table[i].channel;
158
167 if (!chn) 159 if (!chn)
168 continue; 160 continue;
169 161
162 /* make sure receive not running now */
163 napi_synchronize(&nvdev->chan_table[i].napi);
164
170 aread = hv_get_bytes_to_read(&chn->inbound); 165 aread = hv_get_bytes_to_read(&chn->inbound);
171 if (aread) 166 if (aread)
172 break; 167 break;
@@ -176,22 +171,40 @@ static int netvsc_close(struct net_device *net)
176 break; 171 break;
177 } 172 }
178 173
179 retry++; 174 if (aread == 0)
180 if (retry > retry_max || aread == 0) 175 return 0;
181 break;
182 176
183 msleep(msec); 177 if (++retry > RETRY_MAX)
178 return -ETIMEDOUT;
184 179
185 if (msec < 1000) 180 usleep_range(RETRY_US_LO, RETRY_US_HI);
186 msec *= 2;
187 } 181 }
182}
188 183
189 if (aread) { 184static int netvsc_close(struct net_device *net)
190 netdev_err(net, "Ring buffer not empty after closing rndis\n"); 185{
191 ret = -ETIMEDOUT; 186 struct net_device_context *net_device_ctx = netdev_priv(net);
187 struct net_device *vf_netdev
188 = rtnl_dereference(net_device_ctx->vf_netdev);
189 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
190 int ret;
191
192 netif_tx_disable(net);
193
194 /* No need to close rndis filter if it is removed already */
195 if (!nvdev)
196 return 0;
197
198 ret = rndis_filter_close(nvdev);
199 if (ret != 0) {
200 netdev_err(net, "unable to close device (ret %d).\n", ret);
201 return ret;
192 } 202 }
193 203
194out: 204 ret = netvsc_wait_until_empty(nvdev);
205 if (ret)
206 netdev_err(net, "Ring buffer not empty after closing rndis\n");
207
195 if (vf_netdev) 208 if (vf_netdev)
196 dev_close(vf_netdev); 209 dev_close(vf_netdev);
197 210
@@ -840,16 +853,81 @@ static void netvsc_get_channels(struct net_device *net,
840 } 853 }
841} 854}
842 855
856static int netvsc_detach(struct net_device *ndev,
857 struct netvsc_device *nvdev)
858{
859 struct net_device_context *ndev_ctx = netdev_priv(ndev);
860 struct hv_device *hdev = ndev_ctx->device_ctx;
861 int ret;
862
863 /* Don't try continuing to try and setup sub channels */
864 if (cancel_work_sync(&nvdev->subchan_work))
865 nvdev->num_chn = 1;
866
867 /* If device was up (receiving) then shutdown */
868 if (netif_running(ndev)) {
869 netif_tx_disable(ndev);
870
871 ret = rndis_filter_close(nvdev);
872 if (ret) {
873 netdev_err(ndev,
874 "unable to close device (ret %d).\n", ret);
875 return ret;
876 }
877
878 ret = netvsc_wait_until_empty(nvdev);
879 if (ret) {
880 netdev_err(ndev,
881 "Ring buffer not empty after closing rndis\n");
882 return ret;
883 }
884 }
885
886 netif_device_detach(ndev);
887
888 rndis_filter_device_remove(hdev, nvdev);
889
890 return 0;
891}
892
893static int netvsc_attach(struct net_device *ndev,
894 struct netvsc_device_info *dev_info)
895{
896 struct net_device_context *ndev_ctx = netdev_priv(ndev);
897 struct hv_device *hdev = ndev_ctx->device_ctx;
898 struct netvsc_device *nvdev;
899 struct rndis_device *rdev;
900 int ret;
901
902 nvdev = rndis_filter_device_add(hdev, dev_info);
903 if (IS_ERR(nvdev))
904 return PTR_ERR(nvdev);
905
906 /* Note: enable and attach happen when sub-channels setup */
907
908 netif_carrier_off(ndev);
909
910 if (netif_running(ndev)) {
911 ret = rndis_filter_open(nvdev);
912 if (ret)
913 return ret;
914
915 rdev = nvdev->extension;
916 if (!rdev->link_state)
917 netif_carrier_on(ndev);
918 }
919
920 return 0;
921}
922
843static int netvsc_set_channels(struct net_device *net, 923static int netvsc_set_channels(struct net_device *net,
844 struct ethtool_channels *channels) 924 struct ethtool_channels *channels)
845{ 925{
846 struct net_device_context *net_device_ctx = netdev_priv(net); 926 struct net_device_context *net_device_ctx = netdev_priv(net);
847 struct hv_device *dev = net_device_ctx->device_ctx;
848 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 927 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
849 unsigned int orig, count = channels->combined_count; 928 unsigned int orig, count = channels->combined_count;
850 struct netvsc_device_info device_info; 929 struct netvsc_device_info device_info;
851 bool was_opened; 930 int ret;
852 int ret = 0;
853 931
854 /* We do not support separate count for rx, tx, or other */ 932 /* We do not support separate count for rx, tx, or other */
855 if (count == 0 || 933 if (count == 0 ||
@@ -866,9 +944,6 @@ static int netvsc_set_channels(struct net_device *net,
866 return -EINVAL; 944 return -EINVAL;
867 945
868 orig = nvdev->num_chn; 946 orig = nvdev->num_chn;
869 was_opened = rndis_filter_opened(nvdev);
870 if (was_opened)
871 rndis_filter_close(nvdev);
872 947
873 memset(&device_info, 0, sizeof(device_info)); 948 memset(&device_info, 0, sizeof(device_info));
874 device_info.num_chn = count; 949 device_info.num_chn = count;
@@ -877,28 +952,17 @@ static int netvsc_set_channels(struct net_device *net,
877 device_info.recv_sections = nvdev->recv_section_cnt; 952 device_info.recv_sections = nvdev->recv_section_cnt;
878 device_info.recv_section_size = nvdev->recv_section_size; 953 device_info.recv_section_size = nvdev->recv_section_size;
879 954
880 rndis_filter_device_remove(dev, nvdev); 955 ret = netvsc_detach(net, nvdev);
956 if (ret)
957 return ret;
881 958
882 nvdev = rndis_filter_device_add(dev, &device_info); 959 ret = netvsc_attach(net, &device_info);
883 if (IS_ERR(nvdev)) { 960 if (ret) {
884 ret = PTR_ERR(nvdev);
885 device_info.num_chn = orig; 961 device_info.num_chn = orig;
886 nvdev = rndis_filter_device_add(dev, &device_info); 962 if (netvsc_attach(net, &device_info))
887 963 netdev_err(net, "restoring channel setting failed\n");
888 if (IS_ERR(nvdev)) {
889 netdev_err(net, "restoring channel setting failed: %ld\n",
890 PTR_ERR(nvdev));
891 return ret;
892 }
893 } 964 }
894 965
895 if (was_opened)
896 rndis_filter_open(nvdev);
897
898 /* We may have missed link change notifications */
899 net_device_ctx->last_reconfig = 0;
900 schedule_delayed_work(&net_device_ctx->dwork, 0);
901
902 return ret; 966 return ret;
903} 967}
904 968
@@ -964,10 +1028,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
964 struct net_device_context *ndevctx = netdev_priv(ndev); 1028 struct net_device_context *ndevctx = netdev_priv(ndev);
965 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1029 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
966 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1030 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
967 struct hv_device *hdev = ndevctx->device_ctx;
968 int orig_mtu = ndev->mtu; 1031 int orig_mtu = ndev->mtu;
969 struct netvsc_device_info device_info; 1032 struct netvsc_device_info device_info;
970 bool was_opened;
971 int ret = 0; 1033 int ret = 0;
972 1034
973 if (!nvdev || nvdev->destroy) 1035 if (!nvdev || nvdev->destroy)
@@ -980,11 +1042,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
980 return ret; 1042 return ret;
981 } 1043 }
982 1044
983 netif_device_detach(ndev);
984 was_opened = rndis_filter_opened(nvdev);
985 if (was_opened)
986 rndis_filter_close(nvdev);
987
988 memset(&device_info, 0, sizeof(device_info)); 1045 memset(&device_info, 0, sizeof(device_info));
989 device_info.num_chn = nvdev->num_chn; 1046 device_info.num_chn = nvdev->num_chn;
990 device_info.send_sections = nvdev->send_section_cnt; 1047 device_info.send_sections = nvdev->send_section_cnt;
@@ -992,35 +1049,27 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
992 device_info.recv_sections = nvdev->recv_section_cnt; 1049 device_info.recv_sections = nvdev->recv_section_cnt;
993 device_info.recv_section_size = nvdev->recv_section_size; 1050 device_info.recv_section_size = nvdev->recv_section_size;
994 1051
995 rndis_filter_device_remove(hdev, nvdev); 1052 ret = netvsc_detach(ndev, nvdev);
1053 if (ret)
1054 goto rollback_vf;
996 1055
997 ndev->mtu = mtu; 1056 ndev->mtu = mtu;
998 1057
999 nvdev = rndis_filter_device_add(hdev, &device_info); 1058 ret = netvsc_attach(ndev, &device_info);
1000 if (IS_ERR(nvdev)) { 1059 if (ret)
1001 ret = PTR_ERR(nvdev); 1060 goto rollback;
1002
1003 /* Attempt rollback to original MTU */
1004 ndev->mtu = orig_mtu;
1005 nvdev = rndis_filter_device_add(hdev, &device_info);
1006
1007 if (vf_netdev)
1008 dev_set_mtu(vf_netdev, orig_mtu);
1009
1010 if (IS_ERR(nvdev)) {
1011 netdev_err(ndev, "restoring mtu failed: %ld\n",
1012 PTR_ERR(nvdev));
1013 return ret;
1014 }
1015 }
1016 1061
1017 if (was_opened) 1062 return 0;
1018 rndis_filter_open(nvdev);
1019 1063
1020 netif_device_attach(ndev); 1064rollback:
1065 /* Attempt rollback to original MTU */
1066 ndev->mtu = orig_mtu;
1021 1067
1022 /* We may have missed link change notifications */ 1068 if (netvsc_attach(ndev, &device_info))
1023 schedule_delayed_work(&ndevctx->dwork, 0); 1069 netdev_err(ndev, "restoring mtu failed\n");
1070rollback_vf:
1071 if (vf_netdev)
1072 dev_set_mtu(vf_netdev, orig_mtu);
1024 1073
1025 return ret; 1074 return ret;
1026} 1075}
@@ -1526,11 +1575,9 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1526{ 1575{
1527 struct net_device_context *ndevctx = netdev_priv(ndev); 1576 struct net_device_context *ndevctx = netdev_priv(ndev);
1528 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1577 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1529 struct hv_device *hdev = ndevctx->device_ctx;
1530 struct netvsc_device_info device_info; 1578 struct netvsc_device_info device_info;
1531 struct ethtool_ringparam orig; 1579 struct ethtool_ringparam orig;
1532 u32 new_tx, new_rx; 1580 u32 new_tx, new_rx;
1533 bool was_opened;
1534 int ret = 0; 1581 int ret = 0;
1535 1582
1536 if (!nvdev || nvdev->destroy) 1583 if (!nvdev || nvdev->destroy)
@@ -1555,34 +1602,18 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1555 device_info.recv_sections = new_rx; 1602 device_info.recv_sections = new_rx;
1556 device_info.recv_section_size = nvdev->recv_section_size; 1603 device_info.recv_section_size = nvdev->recv_section_size;
1557 1604
1558 netif_device_detach(ndev); 1605 ret = netvsc_detach(ndev, nvdev);
1559 was_opened = rndis_filter_opened(nvdev); 1606 if (ret)
1560 if (was_opened) 1607 return ret;
1561 rndis_filter_close(nvdev);
1562
1563 rndis_filter_device_remove(hdev, nvdev);
1564
1565 nvdev = rndis_filter_device_add(hdev, &device_info);
1566 if (IS_ERR(nvdev)) {
1567 ret = PTR_ERR(nvdev);
1568 1608
1609 ret = netvsc_attach(ndev, &device_info);
1610 if (ret) {
1569 device_info.send_sections = orig.tx_pending; 1611 device_info.send_sections = orig.tx_pending;
1570 device_info.recv_sections = orig.rx_pending; 1612 device_info.recv_sections = orig.rx_pending;
1571 nvdev = rndis_filter_device_add(hdev, &device_info);
1572 if (IS_ERR(nvdev)) {
1573 netdev_err(ndev, "restoring ringparam failed: %ld\n",
1574 PTR_ERR(nvdev));
1575 return ret;
1576 }
1577 }
1578
1579 if (was_opened)
1580 rndis_filter_open(nvdev);
1581 netif_device_attach(ndev);
1582 1613
1583 /* We may have missed link change notifications */ 1614 if (netvsc_attach(ndev, &device_info))
1584 ndevctx->last_reconfig = 0; 1615 netdev_err(ndev, "restoring ringparam failed");
1585 schedule_delayed_work(&ndevctx->dwork, 0); 1616 }
1586 1617
1587 return ret; 1618 return ret;
1588} 1619}
@@ -1846,8 +1877,12 @@ static void __netvsc_vf_setup(struct net_device *ndev,
1846 1877
1847 /* set multicast etc flags on VF */ 1878 /* set multicast etc flags on VF */
1848 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); 1879 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
1880
1881 /* sync address list from ndev to VF */
1882 netif_addr_lock_bh(ndev);
1849 dev_uc_sync(vf_netdev, ndev); 1883 dev_uc_sync(vf_netdev, ndev);
1850 dev_mc_sync(vf_netdev, ndev); 1884 dev_mc_sync(vf_netdev, ndev);
1885 netif_addr_unlock_bh(ndev);
1851 1886
1852 if (netif_running(ndev)) { 1887 if (netif_running(ndev)) {
1853 ret = dev_open(vf_netdev); 1888 ret = dev_open(vf_netdev);
@@ -2063,8 +2098,8 @@ no_net:
2063static int netvsc_remove(struct hv_device *dev) 2098static int netvsc_remove(struct hv_device *dev)
2064{ 2099{
2065 struct net_device_context *ndev_ctx; 2100 struct net_device_context *ndev_ctx;
2066 struct net_device *vf_netdev; 2101 struct net_device *vf_netdev, *net;
2067 struct net_device *net; 2102 struct netvsc_device *nvdev;
2068 2103
2069 net = hv_get_drvdata(dev); 2104 net = hv_get_drvdata(dev);
2070 if (net == NULL) { 2105 if (net == NULL) {
@@ -2074,10 +2109,14 @@ static int netvsc_remove(struct hv_device *dev)
2074 2109
2075 ndev_ctx = netdev_priv(net); 2110 ndev_ctx = netdev_priv(net);
2076 2111
2077 netif_device_detach(net);
2078
2079 cancel_delayed_work_sync(&ndev_ctx->dwork); 2112 cancel_delayed_work_sync(&ndev_ctx->dwork);
2080 2113
2114 rcu_read_lock();
2115 nvdev = rcu_dereference(ndev_ctx->nvdev);
2116
2117 if (nvdev)
2118 cancel_work_sync(&nvdev->subchan_work);
2119
2081 /* 2120 /*
2082 * Call to the vsc driver to let it know that the device is being 2121 * Call to the vsc driver to let it know that the device is being
2083 * removed. Also blocks mtu and channel changes. 2122 * removed. Also blocks mtu and channel changes.
@@ -2087,11 +2126,13 @@ static int netvsc_remove(struct hv_device *dev)
2087 if (vf_netdev) 2126 if (vf_netdev)
2088 netvsc_unregister_vf(vf_netdev); 2127 netvsc_unregister_vf(vf_netdev);
2089 2128
2129 if (nvdev)
2130 rndis_filter_device_remove(dev, nvdev);
2131
2090 unregister_netdevice(net); 2132 unregister_netdevice(net);
2091 2133
2092 rndis_filter_device_remove(dev,
2093 rtnl_dereference(ndev_ctx->nvdev));
2094 rtnl_unlock(); 2134 rtnl_unlock();
2135 rcu_read_unlock();
2095 2136
2096 hv_set_drvdata(dev, NULL); 2137 hv_set_drvdata(dev, NULL);
2097 2138
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2dc00f714482..020f8bc54386 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -267,13 +267,23 @@ static void rndis_set_link_state(struct rndis_device *rdev,
267 } 267 }
268} 268}
269 269
270static void rndis_filter_receive_response(struct rndis_device *dev, 270static void rndis_filter_receive_response(struct net_device *ndev,
271 struct rndis_message *resp) 271 struct netvsc_device *nvdev,
272 const struct rndis_message *resp)
272{ 273{
274 struct rndis_device *dev = nvdev->extension;
273 struct rndis_request *request = NULL; 275 struct rndis_request *request = NULL;
274 bool found = false; 276 bool found = false;
275 unsigned long flags; 277 unsigned long flags;
276 struct net_device *ndev = dev->ndev; 278
279 /* This should never happen, it means control message
280 * response received after device removed.
281 */
282 if (dev->state == RNDIS_DEV_UNINITIALIZED) {
283 netdev_err(ndev,
284 "got rndis message uninitialized\n");
285 return;
286 }
277 287
278 spin_lock_irqsave(&dev->request_lock, flags); 288 spin_lock_irqsave(&dev->request_lock, flags);
279 list_for_each_entry(request, &dev->req_list, list_ent) { 289 list_for_each_entry(request, &dev->req_list, list_ent) {
@@ -355,7 +365,6 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
355 365
356static int rndis_filter_receive_data(struct net_device *ndev, 366static int rndis_filter_receive_data(struct net_device *ndev,
357 struct netvsc_device *nvdev, 367 struct netvsc_device *nvdev,
358 struct rndis_device *dev,
359 struct rndis_message *msg, 368 struct rndis_message *msg,
360 struct vmbus_channel *channel, 369 struct vmbus_channel *channel,
361 void *data, u32 data_buflen) 370 void *data, u32 data_buflen)
@@ -375,7 +384,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
375 * should be the data packet size plus the trailer padding size 384 * should be the data packet size plus the trailer padding size
376 */ 385 */
377 if (unlikely(data_buflen < rndis_pkt->data_len)) { 386 if (unlikely(data_buflen < rndis_pkt->data_len)) {
378 netdev_err(dev->ndev, "rndis message buffer " 387 netdev_err(ndev, "rndis message buffer "
379 "overflow detected (got %u, min %u)" 388 "overflow detected (got %u, min %u)"
380 "...dropping this message!\n", 389 "...dropping this message!\n",
381 data_buflen, rndis_pkt->data_len); 390 data_buflen, rndis_pkt->data_len);
@@ -403,35 +412,20 @@ int rndis_filter_receive(struct net_device *ndev,
403 void *data, u32 buflen) 412 void *data, u32 buflen)
404{ 413{
405 struct net_device_context *net_device_ctx = netdev_priv(ndev); 414 struct net_device_context *net_device_ctx = netdev_priv(ndev);
406 struct rndis_device *rndis_dev = net_dev->extension;
407 struct rndis_message *rndis_msg = data; 415 struct rndis_message *rndis_msg = data;
408 416
409 /* Make sure the rndis device state is initialized */
410 if (unlikely(!rndis_dev)) {
411 netif_dbg(net_device_ctx, rx_err, ndev,
412 "got rndis message but no rndis device!\n");
413 return NVSP_STAT_FAIL;
414 }
415
416 if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) {
417 netif_dbg(net_device_ctx, rx_err, ndev,
418 "got rndis message uninitialized\n");
419 return NVSP_STAT_FAIL;
420 }
421
422 if (netif_msg_rx_status(net_device_ctx)) 417 if (netif_msg_rx_status(net_device_ctx))
423 dump_rndis_message(ndev, rndis_msg); 418 dump_rndis_message(ndev, rndis_msg);
424 419
425 switch (rndis_msg->ndis_msg_type) { 420 switch (rndis_msg->ndis_msg_type) {
426 case RNDIS_MSG_PACKET: 421 case RNDIS_MSG_PACKET:
427 return rndis_filter_receive_data(ndev, net_dev, 422 return rndis_filter_receive_data(ndev, net_dev, rndis_msg,
428 rndis_dev, rndis_msg,
429 channel, data, buflen); 423 channel, data, buflen);
430 case RNDIS_MSG_INIT_C: 424 case RNDIS_MSG_INIT_C:
431 case RNDIS_MSG_QUERY_C: 425 case RNDIS_MSG_QUERY_C:
432 case RNDIS_MSG_SET_C: 426 case RNDIS_MSG_SET_C:
433 /* completion msgs */ 427 /* completion msgs */
434 rndis_filter_receive_response(rndis_dev, rndis_msg); 428 rndis_filter_receive_response(ndev, net_dev, rndis_msg);
435 break; 429 break;
436 430
437 case RNDIS_MSG_INDICATE: 431 case RNDIS_MSG_INDICATE:
@@ -828,13 +822,15 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
828 struct rndis_set_request *set; 822 struct rndis_set_request *set;
829 int ret; 823 int ret;
830 824
825 if (dev->filter == new_filter)
826 return 0;
827
831 request = get_rndis_request(dev, RNDIS_MSG_SET, 828 request = get_rndis_request(dev, RNDIS_MSG_SET,
832 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + 829 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
833 sizeof(u32)); 830 sizeof(u32));
834 if (!request) 831 if (!request)
835 return -ENOMEM; 832 return -ENOMEM;
836 833
837
838 /* Setup the rndis set */ 834 /* Setup the rndis set */
839 set = &request->request_msg.msg.set_req; 835 set = &request->request_msg.msg.set_req;
840 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER; 836 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
@@ -845,8 +841,10 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
845 &new_filter, sizeof(u32)); 841 &new_filter, sizeof(u32));
846 842
847 ret = rndis_filter_send_request(dev, request); 843 ret = rndis_filter_send_request(dev, request);
848 if (ret == 0) 844 if (ret == 0) {
849 wait_for_completion(&request->wait_event); 845 wait_for_completion(&request->wait_event);
846 dev->filter = new_filter;
847 }
850 848
851 put_rndis_request(dev, request); 849 put_rndis_request(dev, request);
852 850
@@ -864,9 +862,9 @@ static void rndis_set_multicast(struct work_struct *w)
864 filter = NDIS_PACKET_TYPE_PROMISCUOUS; 862 filter = NDIS_PACKET_TYPE_PROMISCUOUS;
865 } else { 863 } else {
866 if (flags & IFF_ALLMULTI) 864 if (flags & IFF_ALLMULTI)
867 flags |= NDIS_PACKET_TYPE_ALL_MULTICAST; 865 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
868 if (flags & IFF_BROADCAST) 866 if (flags & IFF_BROADCAST)
869 flags |= NDIS_PACKET_TYPE_BROADCAST; 867 filter |= NDIS_PACKET_TYPE_BROADCAST;
870 } 868 }
871 869
872 rndis_filter_set_packet_filter(rdev, filter); 870 rndis_filter_set_packet_filter(rdev, filter);
@@ -1124,6 +1122,7 @@ void rndis_set_subchannel(struct work_struct *w)
1124 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) 1122 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1125 ndev_ctx->tx_table[i] = i % nvdev->num_chn; 1123 ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1126 1124
1125 netif_device_attach(ndev);
1127 rtnl_unlock(); 1126 rtnl_unlock();
1128 return; 1127 return;
1129 1128
@@ -1134,6 +1133,8 @@ failed:
1134 1133
1135 nvdev->max_chn = 1; 1134 nvdev->max_chn = 1;
1136 nvdev->num_chn = 1; 1135 nvdev->num_chn = 1;
1136
1137 netif_device_attach(ndev);
1137unlock: 1138unlock:
1138 rtnl_unlock(); 1139 rtnl_unlock();
1139} 1140}
@@ -1336,6 +1337,10 @@ out:
1336 net_device->num_chn = 1; 1337 net_device->num_chn = 1;
1337 } 1338 }
1338 1339
1340 /* No sub channels, device is ready */
1341 if (net_device->num_chn == 1)
1342 netif_device_attach(net);
1343
1339 return net_device; 1344 return net_device;
1340 1345
1341err_dev_remv: 1346err_dev_remv:
@@ -1348,16 +1353,12 @@ void rndis_filter_device_remove(struct hv_device *dev,
1348{ 1353{
1349 struct rndis_device *rndis_dev = net_dev->extension; 1354 struct rndis_device *rndis_dev = net_dev->extension;
1350 1355
1351 /* Don't try and setup sub channels if about to halt */
1352 cancel_work_sync(&net_dev->subchan_work);
1353
1354 /* Halt and release the rndis device */ 1356 /* Halt and release the rndis device */
1355 rndis_filter_halt_device(net_dev, rndis_dev); 1357 rndis_filter_halt_device(net_dev, rndis_dev);
1356 1358
1357 net_dev->extension = NULL; 1359 net_dev->extension = NULL;
1358 1360
1359 netvsc_device_remove(dev); 1361 netvsc_device_remove(dev);
1360 kfree(rndis_dev);
1361} 1362}
1362 1363
1363int rndis_filter_open(struct netvsc_device *nvdev) 1364int rndis_filter_open(struct netvsc_device *nvdev)
@@ -1375,10 +1376,3 @@ int rndis_filter_close(struct netvsc_device *nvdev)
1375 1376
1376 return rndis_filter_close_device(nvdev->extension); 1377 return rndis_filter_close_device(nvdev->extension);
1377} 1378}
1378
1379bool rndis_filter_opened(const struct netvsc_device *nvdev)
1380{
1381 const struct rndis_device *dev = nvdev->extension;
1382
1383 return dev->state == RNDIS_DEV_DATAINITIALIZED;
1384}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 7de88b33d5b9..9cbb0c8a896a 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3277 3277
3278 err = netdev_upper_dev_link(real_dev, dev, extack); 3278 err = netdev_upper_dev_link(real_dev, dev, extack);
3279 if (err < 0) 3279 if (err < 0)
3280 goto unregister; 3280 goto put_dev;
3281 3281
3282 /* need to be already registered so that ->init has run and 3282 /* need to be already registered so that ->init has run and
3283 * the MAC addr is set 3283 * the MAC addr is set
@@ -3316,7 +3316,8 @@ del_dev:
3316 macsec_del_dev(macsec); 3316 macsec_del_dev(macsec);
3317unlink: 3317unlink:
3318 netdev_upper_dev_unlink(real_dev, dev); 3318 netdev_upper_dev_unlink(real_dev, dev);
3319unregister: 3319put_dev:
3320 dev_put(real_dev);
3320 unregister_netdevice(dev); 3321 unregister_netdevice(dev);
3321 return err; 3322 return err;
3322} 3323}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 8fc02d9db3d0..725f4b4afc6d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1036,7 +1036,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
1036 lowerdev_features &= (features | ~NETIF_F_LRO); 1036 lowerdev_features &= (features | ~NETIF_F_LRO);
1037 features = netdev_increment_features(lowerdev_features, features, mask); 1037 features = netdev_increment_features(lowerdev_features, features, mask);
1038 features |= ALWAYS_ON_FEATURES; 1038 features |= ALWAYS_ON_FEATURES;
1039 features &= ~NETIF_F_NETNS_LOCAL; 1039 features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES);
1040 1040
1041 return features; 1041 return features;
1042} 1042}
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 171010eb4d9c..5ad130c3da43 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -341,8 +341,8 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)
341 unsigned int i; 341 unsigned int i;
342 342
343 for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++) 343 for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
344 memcpy(data + i * ETH_GSTRING_LEN, 344 strlcpy(data + i * ETH_GSTRING_LEN,
345 bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN); 345 bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);
346} 346}
347EXPORT_SYMBOL_GPL(bcm_phy_get_strings); 347EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
348 348
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 98fd6b7ceeec..a75c511950c3 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1452,8 +1452,8 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data)
1452 int i; 1452 int i;
1453 1453
1454 for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) { 1454 for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
1455 memcpy(data + i * ETH_GSTRING_LEN, 1455 strlcpy(data + i * ETH_GSTRING_LEN,
1456 marvell_hw_stats[i].string, ETH_GSTRING_LEN); 1456 marvell_hw_stats[i].string, ETH_GSTRING_LEN);
1457 } 1457 }
1458} 1458}
1459 1459
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0f45310300f6..f41b224a9cdb 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -635,25 +635,6 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev)
635 return 0; 635 return 0;
636} 636}
637 637
638/* This routine returns -1 as an indication to the caller that the
639 * Micrel ksz9021 10/100/1000 PHY does not support standard IEEE
640 * MMD extended PHY registers.
641 */
642static int
643ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum)
644{
645 return -1;
646}
647
648/* This routine does nothing since the Micrel ksz9021 does not support
649 * standard IEEE MMD extended PHY registers.
650 */
651static int
652ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum, u16 val)
653{
654 return -1;
655}
656
657static int kszphy_get_sset_count(struct phy_device *phydev) 638static int kszphy_get_sset_count(struct phy_device *phydev)
658{ 639{
659 return ARRAY_SIZE(kszphy_hw_stats); 640 return ARRAY_SIZE(kszphy_hw_stats);
@@ -664,8 +645,8 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data)
664 int i; 645 int i;
665 646
666 for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) { 647 for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) {
667 memcpy(data + i * ETH_GSTRING_LEN, 648 strlcpy(data + i * ETH_GSTRING_LEN,
668 kszphy_hw_stats[i].string, ETH_GSTRING_LEN); 649 kszphy_hw_stats[i].string, ETH_GSTRING_LEN);
669 } 650 }
670} 651}
671 652
@@ -946,8 +927,8 @@ static struct phy_driver ksphy_driver[] = {
946 .get_stats = kszphy_get_stats, 927 .get_stats = kszphy_get_stats,
947 .suspend = genphy_suspend, 928 .suspend = genphy_suspend,
948 .resume = genphy_resume, 929 .resume = genphy_resume,
949 .read_mmd = ksz9021_rd_mmd_phyreg, 930 .read_mmd = genphy_read_mmd_unsupported,
950 .write_mmd = ksz9021_wr_mmd_phyreg, 931 .write_mmd = genphy_write_mmd_unsupported,
951}, { 932}, {
952 .phy_id = PHY_ID_KSZ9031, 933 .phy_id = PHY_ID_KSZ9031,
953 .phy_id_mask = MICREL_PHY_ID_MASK, 934 .phy_id_mask = MICREL_PHY_ID_MASK,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c2d9027be863..05c1e8ef15e6 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -618,6 +618,68 @@ static void phy_error(struct phy_device *phydev)
618} 618}
619 619
620/** 620/**
621 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
622 * @phydev: target phy_device struct
623 */
624static int phy_disable_interrupts(struct phy_device *phydev)
625{
626 int err;
627
628 /* Disable PHY interrupts */
629 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
630 if (err)
631 return err;
632
633 /* Clear the interrupt */
634 return phy_clear_interrupt(phydev);
635}
636
637/**
638 * phy_change - Called by the phy_interrupt to handle PHY changes
639 * @phydev: phy_device struct that interrupted
640 */
641static irqreturn_t phy_change(struct phy_device *phydev)
642{
643 if (phy_interrupt_is_valid(phydev)) {
644 if (phydev->drv->did_interrupt &&
645 !phydev->drv->did_interrupt(phydev))
646 return IRQ_NONE;
647
648 if (phydev->state == PHY_HALTED)
649 if (phy_disable_interrupts(phydev))
650 goto phy_err;
651 }
652
653 mutex_lock(&phydev->lock);
654 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
655 phydev->state = PHY_CHANGELINK;
656 mutex_unlock(&phydev->lock);
657
658 /* reschedule state queue work to run as soon as possible */
659 phy_trigger_machine(phydev, true);
660
661 if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
662 goto phy_err;
663 return IRQ_HANDLED;
664
665phy_err:
666 phy_error(phydev);
667 return IRQ_NONE;
668}
669
670/**
671 * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
672 * @work: work_struct that describes the work to be done
673 */
674void phy_change_work(struct work_struct *work)
675{
676 struct phy_device *phydev =
677 container_of(work, struct phy_device, phy_queue);
678
679 phy_change(phydev);
680}
681
682/**
621 * phy_interrupt - PHY interrupt handler 683 * phy_interrupt - PHY interrupt handler
622 * @irq: interrupt line 684 * @irq: interrupt line
623 * @phy_dat: phy_device pointer 685 * @phy_dat: phy_device pointer
@@ -632,9 +694,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
632 if (PHY_HALTED == phydev->state) 694 if (PHY_HALTED == phydev->state)
633 return IRQ_NONE; /* It can't be ours. */ 695 return IRQ_NONE; /* It can't be ours. */
634 696
635 phy_change(phydev); 697 return phy_change(phydev);
636
637 return IRQ_HANDLED;
638} 698}
639 699
640/** 700/**
@@ -652,23 +712,6 @@ static int phy_enable_interrupts(struct phy_device *phydev)
652} 712}
653 713
654/** 714/**
655 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
656 * @phydev: target phy_device struct
657 */
658static int phy_disable_interrupts(struct phy_device *phydev)
659{
660 int err;
661
662 /* Disable PHY interrupts */
663 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
664 if (err)
665 return err;
666
667 /* Clear the interrupt */
668 return phy_clear_interrupt(phydev);
669}
670
671/**
672 * phy_start_interrupts - request and enable interrupts for a PHY device 715 * phy_start_interrupts - request and enable interrupts for a PHY device
673 * @phydev: target phy_device struct 716 * @phydev: target phy_device struct
674 * 717 *
@@ -711,50 +754,6 @@ int phy_stop_interrupts(struct phy_device *phydev)
711EXPORT_SYMBOL(phy_stop_interrupts); 754EXPORT_SYMBOL(phy_stop_interrupts);
712 755
713/** 756/**
714 * phy_change - Called by the phy_interrupt to handle PHY changes
715 * @phydev: phy_device struct that interrupted
716 */
717void phy_change(struct phy_device *phydev)
718{
719 if (phy_interrupt_is_valid(phydev)) {
720 if (phydev->drv->did_interrupt &&
721 !phydev->drv->did_interrupt(phydev))
722 return;
723
724 if (phydev->state == PHY_HALTED)
725 if (phy_disable_interrupts(phydev))
726 goto phy_err;
727 }
728
729 mutex_lock(&phydev->lock);
730 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
731 phydev->state = PHY_CHANGELINK;
732 mutex_unlock(&phydev->lock);
733
734 /* reschedule state queue work to run as soon as possible */
735 phy_trigger_machine(phydev, true);
736
737 if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
738 goto phy_err;
739 return;
740
741phy_err:
742 phy_error(phydev);
743}
744
745/**
746 * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
747 * @work: work_struct that describes the work to be done
748 */
749void phy_change_work(struct work_struct *work)
750{
751 struct phy_device *phydev =
752 container_of(work, struct phy_device, phy_queue);
753
754 phy_change(phydev);
755}
756
757/**
758 * phy_stop - Bring down the PHY link, and stop checking the status 757 * phy_stop - Bring down the PHY link, and stop checking the status
759 * @phydev: target phy_device struct 758 * @phydev: target phy_device struct
760 */ 759 */
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b285323327c4..ac23322a32e1 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1012,10 +1012,17 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
1012 err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj, 1012 err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,
1013 "attached_dev"); 1013 "attached_dev");
1014 if (!err) { 1014 if (!err) {
1015 err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj, 1015 err = sysfs_create_link_nowarn(&dev->dev.kobj,
1016 "phydev"); 1016 &phydev->mdio.dev.kobj,
1017 if (err) 1017 "phydev");
1018 goto error; 1018 if (err) {
1019 dev_err(&dev->dev, "could not add device link to %s err %d\n",
1020 kobject_name(&phydev->mdio.dev.kobj),
1021 err);
1022 /* non-fatal - some net drivers can use one netdevice
1023 * with more then one phy
1024 */
1025 }
1019 1026
1020 phydev->sysfs_links = true; 1027 phydev->sysfs_links = true;
1021 } 1028 }
@@ -1666,6 +1673,23 @@ int genphy_config_init(struct phy_device *phydev)
1666} 1673}
1667EXPORT_SYMBOL(genphy_config_init); 1674EXPORT_SYMBOL(genphy_config_init);
1668 1675
1676/* This is used for the phy device which doesn't support the MMD extended
1677 * register access, but it does have side effect when we are trying to access
1678 * the MMD register via indirect method.
1679 */
1680int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, u16 regnum)
1681{
1682 return -EOPNOTSUPP;
1683}
1684EXPORT_SYMBOL(genphy_read_mmd_unsupported);
1685
1686int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
1687 u16 regnum, u16 val)
1688{
1689 return -EOPNOTSUPP;
1690}
1691EXPORT_SYMBOL(genphy_write_mmd_unsupported);
1692
1669int genphy_suspend(struct phy_device *phydev) 1693int genphy_suspend(struct phy_device *phydev)
1670{ 1694{
1671 return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN); 1695 return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index ee3ca4a2f12b..9f48ecf9c627 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -172,6 +172,8 @@ static struct phy_driver realtek_drvs[] = {
172 .flags = PHY_HAS_INTERRUPT, 172 .flags = PHY_HAS_INTERRUPT,
173 .ack_interrupt = &rtl821x_ack_interrupt, 173 .ack_interrupt = &rtl821x_ack_interrupt,
174 .config_intr = &rtl8211b_config_intr, 174 .config_intr = &rtl8211b_config_intr,
175 .read_mmd = &genphy_read_mmd_unsupported,
176 .write_mmd = &genphy_write_mmd_unsupported,
175 }, { 177 }, {
176 .phy_id = 0x001cc914, 178 .phy_id = 0x001cc914,
177 .name = "RTL8211DN Gigabit Ethernet", 179 .name = "RTL8211DN Gigabit Ethernet",
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 7dc2f34e7229..926c2c322d43 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -257,7 +257,7 @@ struct ppp_net {
257/* Prototypes. */ 257/* Prototypes. */
258static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 258static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
259 struct file *file, unsigned int cmd, unsigned long arg); 259 struct file *file, unsigned int cmd, unsigned long arg);
260static void ppp_xmit_process(struct ppp *ppp); 260static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
261static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 261static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
262static void ppp_push(struct ppp *ppp); 262static void ppp_push(struct ppp *ppp);
263static void ppp_channel_push(struct channel *pch); 263static void ppp_channel_push(struct channel *pch);
@@ -513,13 +513,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
513 goto out; 513 goto out;
514 } 514 }
515 515
516 skb_queue_tail(&pf->xq, skb);
517
518 switch (pf->kind) { 516 switch (pf->kind) {
519 case INTERFACE: 517 case INTERFACE:
520 ppp_xmit_process(PF_TO_PPP(pf)); 518 ppp_xmit_process(PF_TO_PPP(pf), skb);
521 break; 519 break;
522 case CHANNEL: 520 case CHANNEL:
521 skb_queue_tail(&pf->xq, skb);
523 ppp_channel_push(PF_TO_CHANNEL(pf)); 522 ppp_channel_push(PF_TO_CHANNEL(pf));
524 break; 523 break;
525 } 524 }
@@ -1268,8 +1267,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
1268 put_unaligned_be16(proto, pp); 1267 put_unaligned_be16(proto, pp);
1269 1268
1270 skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev))); 1269 skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
1271 skb_queue_tail(&ppp->file.xq, skb); 1270 ppp_xmit_process(ppp, skb);
1272 ppp_xmit_process(ppp); 1271
1273 return NETDEV_TX_OK; 1272 return NETDEV_TX_OK;
1274 1273
1275 outf: 1274 outf:
@@ -1421,13 +1420,14 @@ static void ppp_setup(struct net_device *dev)
1421 */ 1420 */
1422 1421
1423/* Called to do any work queued up on the transmit side that can now be done */ 1422/* Called to do any work queued up on the transmit side that can now be done */
1424static void __ppp_xmit_process(struct ppp *ppp) 1423static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1425{ 1424{
1426 struct sk_buff *skb;
1427
1428 ppp_xmit_lock(ppp); 1425 ppp_xmit_lock(ppp);
1429 if (!ppp->closing) { 1426 if (!ppp->closing) {
1430 ppp_push(ppp); 1427 ppp_push(ppp);
1428
1429 if (skb)
1430 skb_queue_tail(&ppp->file.xq, skb);
1431 while (!ppp->xmit_pending && 1431 while (!ppp->xmit_pending &&
1432 (skb = skb_dequeue(&ppp->file.xq))) 1432 (skb = skb_dequeue(&ppp->file.xq)))
1433 ppp_send_frame(ppp, skb); 1433 ppp_send_frame(ppp, skb);
@@ -1441,7 +1441,7 @@ static void __ppp_xmit_process(struct ppp *ppp)
1441 ppp_xmit_unlock(ppp); 1441 ppp_xmit_unlock(ppp);
1442} 1442}
1443 1443
1444static void ppp_xmit_process(struct ppp *ppp) 1444static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1445{ 1445{
1446 local_bh_disable(); 1446 local_bh_disable();
1447 1447
@@ -1449,7 +1449,7 @@ static void ppp_xmit_process(struct ppp *ppp)
1449 goto err; 1449 goto err;
1450 1450
1451 (*this_cpu_ptr(ppp->xmit_recursion))++; 1451 (*this_cpu_ptr(ppp->xmit_recursion))++;
1452 __ppp_xmit_process(ppp); 1452 __ppp_xmit_process(ppp, skb);
1453 (*this_cpu_ptr(ppp->xmit_recursion))--; 1453 (*this_cpu_ptr(ppp->xmit_recursion))--;
1454 1454
1455 local_bh_enable(); 1455 local_bh_enable();
@@ -1459,6 +1459,8 @@ static void ppp_xmit_process(struct ppp *ppp)
1459err: 1459err:
1460 local_bh_enable(); 1460 local_bh_enable();
1461 1461
1462 kfree_skb(skb);
1463
1462 if (net_ratelimit()) 1464 if (net_ratelimit())
1463 netdev_err(ppp->dev, "recursion detected\n"); 1465 netdev_err(ppp->dev, "recursion detected\n");
1464} 1466}
@@ -1943,7 +1945,7 @@ static void __ppp_channel_push(struct channel *pch)
1943 if (skb_queue_empty(&pch->file.xq)) { 1945 if (skb_queue_empty(&pch->file.xq)) {
1944 ppp = pch->ppp; 1946 ppp = pch->ppp;
1945 if (ppp) 1947 if (ppp)
1946 __ppp_xmit_process(ppp); 1948 __ppp_xmit_process(ppp, NULL);
1947 } 1949 }
1948} 1950}
1949 1951
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 5dd781e65958..222093e878a8 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2401,7 +2401,7 @@ send_done:
2401 if (!nlh) { 2401 if (!nlh) {
2402 err = __send_and_alloc_skb(&skb, team, portid, send_func); 2402 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2403 if (err) 2403 if (err)
2404 goto errout; 2404 return err;
2405 goto send_done; 2405 goto send_done;
2406 } 2406 }
2407 2407
@@ -2687,7 +2687,7 @@ send_done:
2687 if (!nlh) { 2687 if (!nlh) {
2688 err = __send_and_alloc_skb(&skb, team, portid, send_func); 2688 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2689 if (err) 2689 if (err)
2690 goto errout; 2690 return err;
2691 goto send_done; 2691 goto send_done;
2692 } 2692 }
2693 2693
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index baeafa004463..a1ba262f40ad 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -656,7 +656,7 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
656 return tun; 656 return tun;
657} 657}
658 658
659static void tun_ptr_free(void *ptr) 659void tun_ptr_free(void *ptr)
660{ 660{
661 if (!ptr) 661 if (!ptr)
662 return; 662 return;
@@ -668,6 +668,7 @@ static void tun_ptr_free(void *ptr)
668 __skb_array_destroy_skb(ptr); 668 __skb_array_destroy_skb(ptr);
669 } 669 }
670} 670}
671EXPORT_SYMBOL_GPL(tun_ptr_free);
671 672
672static void tun_queue_purge(struct tun_file *tfile) 673static void tun_queue_purge(struct tun_file *tfile)
673{ 674{
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8a22ff67b026..d9eea8cfe6cb 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -315,6 +315,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev)
315void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) 315void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
316{ 316{
317 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); 317 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
318 unsigned long flags;
318 int status; 319 int status;
319 320
320 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { 321 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
@@ -326,10 +327,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
326 if (skb->protocol == 0) 327 if (skb->protocol == 0)
327 skb->protocol = eth_type_trans (skb, dev->net); 328 skb->protocol = eth_type_trans (skb, dev->net);
328 329
329 u64_stats_update_begin(&stats64->syncp); 330 flags = u64_stats_update_begin_irqsave(&stats64->syncp);
330 stats64->rx_packets++; 331 stats64->rx_packets++;
331 stats64->rx_bytes += skb->len; 332 stats64->rx_bytes += skb->len;
332 u64_stats_update_end(&stats64->syncp); 333 u64_stats_update_end_irqrestore(&stats64->syncp, flags);
333 334
334 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", 335 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
335 skb->len + sizeof (struct ethhdr), skb->protocol); 336 skb->len + sizeof (struct ethhdr), skb->protocol);
@@ -1248,11 +1249,12 @@ static void tx_complete (struct urb *urb)
1248 1249
1249 if (urb->status == 0) { 1250 if (urb->status == 0) {
1250 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); 1251 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
1252 unsigned long flags;
1251 1253
1252 u64_stats_update_begin(&stats64->syncp); 1254 flags = u64_stats_update_begin_irqsave(&stats64->syncp);
1253 stats64->tx_packets += entry->packets; 1255 stats64->tx_packets += entry->packets;
1254 stats64->tx_bytes += entry->length; 1256 stats64->tx_bytes += entry->length;
1255 u64_stats_update_end(&stats64->syncp); 1257 u64_stats_update_end_irqrestore(&stats64->syncp, flags);
1256 } else { 1258 } else {
1257 dev->net->stats.tx_errors++; 1259 dev->net->stats.tx_errors++;
1258 1260
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 8b39c160743d..e04937f44f33 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -977,6 +977,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
977{ 977{
978 int ret; 978 int ret;
979 u32 count; 979 u32 count;
980 int num_pkts;
981 int tx_num_deferred;
980 unsigned long flags; 982 unsigned long flags;
981 struct vmxnet3_tx_ctx ctx; 983 struct vmxnet3_tx_ctx ctx;
982 union Vmxnet3_GenericDesc *gdesc; 984 union Vmxnet3_GenericDesc *gdesc;
@@ -1075,12 +1077,12 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1075#else 1077#else
1076 gdesc = ctx.sop_txd; 1078 gdesc = ctx.sop_txd;
1077#endif 1079#endif
1080 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1078 if (ctx.mss) { 1081 if (ctx.mss) {
1079 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; 1082 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1080 gdesc->txd.om = VMXNET3_OM_TSO; 1083 gdesc->txd.om = VMXNET3_OM_TSO;
1081 gdesc->txd.msscof = ctx.mss; 1084 gdesc->txd.msscof = ctx.mss;
1082 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len - 1085 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1083 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
1084 } else { 1086 } else {
1085 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1087 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1086 gdesc->txd.hlen = ctx.eth_ip_hdr_size; 1088 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
@@ -1091,8 +1093,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1091 gdesc->txd.om = 0; 1093 gdesc->txd.om = 0;
1092 gdesc->txd.msscof = 0; 1094 gdesc->txd.msscof = 0;
1093 } 1095 }
1094 le32_add_cpu(&tq->shared->txNumDeferred, 1); 1096 num_pkts = 1;
1095 } 1097 }
1098 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1099 tx_num_deferred += num_pkts;
1096 1100
1097 if (skb_vlan_tag_present(skb)) { 1101 if (skb_vlan_tag_present(skb)) {
1098 gdesc->txd.ti = 1; 1102 gdesc->txd.ti = 1;
@@ -1118,8 +1122,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1118 1122
1119 spin_unlock_irqrestore(&tq->tx_lock, flags); 1123 spin_unlock_irqrestore(&tq->tx_lock, flags);
1120 1124
1121 if (le32_to_cpu(tq->shared->txNumDeferred) >= 1125 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1122 le32_to_cpu(tq->shared->txThreshold)) {
1123 tq->shared->txNumDeferred = 0; 1126 tq->shared->txNumDeferred = 0;
1124 VMXNET3_WRITE_BAR0_REG(adapter, 1127 VMXNET3_WRITE_BAR0_REG(adapter,
1125 VMXNET3_REG_TXPROD + tq->qid * 8, 1128 VMXNET3_REG_TXPROD + tq->qid * 8,
@@ -1470,7 +1473,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1470 vmxnet3_rx_csum(adapter, skb, 1473 vmxnet3_rx_csum(adapter, skb,
1471 (union Vmxnet3_GenericDesc *)rcd); 1474 (union Vmxnet3_GenericDesc *)rcd);
1472 skb->protocol = eth_type_trans(skb, adapter->netdev); 1475 skb->protocol = eth_type_trans(skb, adapter->netdev);
1473 if (!rcd->tcp || !adapter->lro) 1476 if (!rcd->tcp ||
1477 !(adapter->netdev->features & NETIF_F_LRO))
1474 goto not_lro; 1478 goto not_lro;
1475 1479
1476 if (segCnt != 0 && mss != 0) { 1480 if (segCnt != 0 && mss != 0) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 5ba222920e80..59ec34052a65 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.11.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040b00 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
@@ -342,9 +342,6 @@ struct vmxnet3_adapter {
342 u8 __iomem *hw_addr1; /* for BAR 1 */ 342 u8 __iomem *hw_addr1; /* for BAR 1 */
343 u8 version; 343 u8 version;
344 344
345 bool rxcsum;
346 bool lro;
347
348#ifdef VMXNET3_RSS 345#ifdef VMXNET3_RSS
349 struct UPT1_RSSConf *rss_conf; 346 struct UPT1_RSSConf *rss_conf;
350 bool rss; 347 bool rss;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index e89e5ef2c2a4..f246e9ed4a81 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -729,6 +729,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
729 ieee80211_hw_set(hw, SPECTRUM_MGMT); 729 ieee80211_hw_set(hw, SPECTRUM_MGMT);
730 ieee80211_hw_set(hw, SIGNAL_DBM); 730 ieee80211_hw_set(hw, SIGNAL_DBM);
731 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 731 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
732 ieee80211_hw_set(hw, DOESNT_SUPPORT_QOS_NDP);
732 733
733 if (ath9k_ps_enable) 734 if (ath9k_ps_enable)
734 ieee80211_hw_set(hw, SUPPORTS_PS); 735 ieee80211_hw_set(hw, SUPPORTS_PS);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index df8a1ecb9924..232dcbb83311 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -181,6 +181,7 @@ enum brcmf_netif_stop_reason {
181 * @netif_stop_lock: spinlock for update netif_stop from multiple sources. 181 * @netif_stop_lock: spinlock for update netif_stop from multiple sources.
182 * @pend_8021x_cnt: tracks outstanding number of 802.1x frames. 182 * @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
183 * @pend_8021x_wait: used for signalling change in count. 183 * @pend_8021x_wait: used for signalling change in count.
184 * @fwil_fwerr: flag indicating fwil layer should return firmware error codes.
184 */ 185 */
185struct brcmf_if { 186struct brcmf_if {
186 struct brcmf_pub *drvr; 187 struct brcmf_pub *drvr;
@@ -198,6 +199,7 @@ struct brcmf_if {
198 wait_queue_head_t pend_8021x_wait; 199 wait_queue_head_t pend_8021x_wait;
199 struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES]; 200 struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES];
200 u8 ipv6addr_idx; 201 u8 ipv6addr_idx;
202 bool fwil_fwerr;
201}; 203};
202 204
203int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp); 205int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 47de35a33853..bede7b7fd996 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -104,6 +104,9 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
104 u32 data; 104 u32 data;
105 int err; 105 int err;
106 106
107 /* we need to know firmware error */
108 ifp->fwil_fwerr = true;
109
107 err = brcmf_fil_iovar_int_get(ifp, name, &data); 110 err = brcmf_fil_iovar_int_get(ifp, name, &data);
108 if (err == 0) { 111 if (err == 0) {
109 brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); 112 brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
@@ -112,6 +115,8 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
112 brcmf_dbg(TRACE, "%s feature check failed: %d\n", 115 brcmf_dbg(TRACE, "%s feature check failed: %d\n",
113 brcmf_feat_names[id], err); 116 brcmf_feat_names[id], err);
114 } 117 }
118
119 ifp->fwil_fwerr = false;
115} 120}
116 121
117static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, 122static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
@@ -120,6 +125,9 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
120{ 125{
121 int err; 126 int err;
122 127
128 /* we need to know firmware error */
129 ifp->fwil_fwerr = true;
130
123 err = brcmf_fil_iovar_data_set(ifp, name, data, len); 131 err = brcmf_fil_iovar_data_set(ifp, name, data, len);
124 if (err != -BRCMF_FW_UNSUPPORTED) { 132 if (err != -BRCMF_FW_UNSUPPORTED) {
125 brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); 133 brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
@@ -128,6 +136,8 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
128 brcmf_dbg(TRACE, "%s feature check failed: %d\n", 136 brcmf_dbg(TRACE, "%s feature check failed: %d\n",
129 brcmf_feat_names[id], err); 137 brcmf_feat_names[id], err);
130 } 138 }
139
140 ifp->fwil_fwerr = false;
131} 141}
132 142
133#define MAX_CAPS_BUFFER_SIZE 512 143#define MAX_CAPS_BUFFER_SIZE 512
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index f2cfdd3b2bf1..fc5751116d99 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -131,6 +131,9 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
131 brcmf_fil_get_errstr((u32)(-fwerr)), fwerr); 131 brcmf_fil_get_errstr((u32)(-fwerr)), fwerr);
132 err = -EBADE; 132 err = -EBADE;
133 } 133 }
134 if (ifp->fwil_fwerr)
135 return fwerr;
136
134 return err; 137 return err;
135} 138}
136 139
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 2ee54133efa1..82064e909784 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -462,25 +462,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
462 * @dev_addr: optional device address. 462 * @dev_addr: optional device address.
463 * 463 *
464 * P2P needs mac addresses for P2P device and interface. If no device 464 * P2P needs mac addresses for P2P device and interface. If no device
465 * address it specified, these are derived from the primary net device, ie. 465 * address it specified, these are derived from a random ethernet
466 * the permanent ethernet address of the device. 466 * address.
467 */ 467 */
468static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr) 468static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
469{ 469{
470 struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; 470 bool random_addr = false;
471 bool local_admin = false;
472 471
473 if (!dev_addr || is_zero_ether_addr(dev_addr)) { 472 if (!dev_addr || is_zero_ether_addr(dev_addr))
474 dev_addr = pri_ifp->mac_addr; 473 random_addr = true;
475 local_admin = true;
476 }
477 474
478 /* Generate the P2P Device Address. This consists of the device's 475 /* Generate the P2P Device Address obtaining a random ethernet
479 * primary MAC address with the locally administered bit set. 476 * address with the locally administered bit set.
480 */ 477 */
481 memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); 478 if (random_addr)
482 if (local_admin) 479 eth_random_addr(p2p->dev_addr);
483 p2p->dev_addr[0] |= 0x02; 480 else
481 memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
484 482
485 /* Generate the P2P Interface Address. If the discovery and connection 483 /* Generate the P2P Interface Address. If the discovery and connection
486 * BSSCFGs need to simultaneously co-exist, then this address must be 484 * BSSCFGs need to simultaneously co-exist, then this address must be
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index c5f2ddf9b0fe..e5a2fc738ac3 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -91,7 +91,6 @@ config IWLWIFI_BCAST_FILTERING
91config IWLWIFI_PCIE_RTPM 91config IWLWIFI_PCIE_RTPM
92 bool "Enable runtime power management mode for PCIe devices" 92 bool "Enable runtime power management mode for PCIe devices"
93 depends on IWLMVM && PM && EXPERT 93 depends on IWLMVM && PM && EXPERT
94 default false
95 help 94 help
96 Say Y here to enable runtime power management for PCIe 95 Say Y here to enable runtime power management for PCIe
97 devices. If enabled, the device will go into low power mode 96 devices. If enabled, the device will go into low power mode
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index 3721a3ed358b..f824bebceb06 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -211,7 +211,7 @@ enum {
211 * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end 211 * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
212 * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. 212 * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
213 * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. 213 * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
214 * @T2_V2_START_IMMEDIATELY: start time event immediately 214 * @TE_V2_START_IMMEDIATELY: start time event immediately
215 * @TE_V2_DEP_OTHER: depends on another time event 215 * @TE_V2_DEP_OTHER: depends on another time event
216 * @TE_V2_DEP_TSF: depends on a specific time 216 * @TE_V2_DEP_TSF: depends on a specific time
217 * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC 217 * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
@@ -230,7 +230,7 @@ enum iwl_time_event_policy {
230 TE_V2_NOTIF_HOST_FRAG_END = BIT(5), 230 TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
231 TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), 231 TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
232 TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), 232 TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
233 T2_V2_START_IMMEDIATELY = BIT(11), 233 TE_V2_START_IMMEDIATELY = BIT(11),
234 234
235 /* placement characteristics */ 235 /* placement characteristics */
236 TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), 236 TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 67aefc8fc9ac..7bd704a3e640 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 36 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
37 * Copyright(c) 2018 Intel Corporation
36 * All rights reserved. 38 * All rights reserved.
37 * 39 *
38 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -942,7 +944,6 @@ dump_trans_data:
942 944
943out: 945out:
944 iwl_fw_free_dump_desc(fwrt); 946 iwl_fw_free_dump_desc(fwrt);
945 fwrt->dump.trig = NULL;
946 clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); 947 clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
947 IWL_DEBUG_INFO(fwrt, "WRT dump done\n"); 948 IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
948} 949}
@@ -1112,6 +1113,14 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
1112 fwrt->ops->dump_start(fwrt->ops_ctx)) 1113 fwrt->ops->dump_start(fwrt->ops_ctx))
1113 return; 1114 return;
1114 1115
1116 if (fwrt->ops && fwrt->ops->fw_running &&
1117 !fwrt->ops->fw_running(fwrt->ops_ctx)) {
1118 IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
1119 iwl_fw_free_dump_desc(fwrt);
1120 clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
1121 goto out;
1122 }
1123
1115 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 1124 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1116 /* stop recording */ 1125 /* stop recording */
1117 iwl_fw_dbg_stop_recording(fwrt); 1126 iwl_fw_dbg_stop_recording(fwrt);
@@ -1145,7 +1154,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
1145 iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); 1154 iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl);
1146 } 1155 }
1147 } 1156 }
1148 1157out:
1149 if (fwrt->ops && fwrt->ops->dump_end) 1158 if (fwrt->ops && fwrt->ops->dump_end)
1150 fwrt->ops->dump_end(fwrt->ops_ctx); 1159 fwrt->ops->dump_end(fwrt->ops_ctx);
1151} 1160}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 223fb77a3aa9..72259bff9922 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -8,6 +8,7 @@
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 36 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
37 * Copyright(c) 2018 Intel Corporation
36 * All rights reserved. 38 * All rights reserved.
37 * 39 *
38 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -91,6 +93,7 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
91 if (fwrt->dump.desc != &iwl_dump_desc_assert) 93 if (fwrt->dump.desc != &iwl_dump_desc_assert)
92 kfree(fwrt->dump.desc); 94 kfree(fwrt->dump.desc);
93 fwrt->dump.desc = NULL; 95 fwrt->dump.desc = NULL;
96 fwrt->dump.trig = NULL;
94} 97}
95 98
96void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); 99void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
index e57ff92a68ae..3da468d2cc92 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
@@ -75,6 +75,20 @@ static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
75 cancel_delayed_work_sync(&fwrt->timestamp.wk); 75 cancel_delayed_work_sync(&fwrt->timestamp.wk);
76} 76}
77 77
78static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt)
79{
80 cancel_delayed_work_sync(&fwrt->timestamp.wk);
81}
82
83static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt)
84{
85 if (!fwrt->timestamp.delay)
86 return;
87
88 schedule_delayed_work(&fwrt->timestamp.wk,
89 round_jiffies_relative(fwrt->timestamp.delay));
90}
91
78#else 92#else
79static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, 93static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
80 struct dentry *dbgfs_dir) 94 struct dentry *dbgfs_dir)
@@ -84,4 +98,8 @@ static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
84 98
85static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} 99static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
86 100
101static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {}
102
103static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
104
87#endif /* CONFIG_IWLWIFI_DEBUGFS */ 105#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index c39fe84bb4c4..2efac307909e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -77,8 +77,14 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
77} 77}
78IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); 78IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
79 79
80void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt) 80void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt)
81{ 81{
82 iwl_fw_cancel_timestamp(fwrt); 82 iwl_fw_suspend_timestamp(fwrt);
83} 83}
84IWL_EXPORT_SYMBOL(iwl_fw_runtime_exit); 84IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend);
85
86void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt)
87{
88 iwl_fw_resume_timestamp(fwrt);
89}
90IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index e25c049f980f..3fb940ebd74a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -6,6 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2017 Intel Deutschland GmbH 8 * Copyright(c) 2017 Intel Deutschland GmbH
9 * Copyright(c) 2018 Intel Corporation
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -26,6 +27,7 @@
26 * BSD LICENSE 27 * BSD LICENSE
27 * 28 *
28 * Copyright(c) 2017 Intel Deutschland GmbH 29 * Copyright(c) 2017 Intel Deutschland GmbH
30 * Copyright(c) 2018 Intel Corporation
29 * All rights reserved. 31 * All rights reserved.
30 * 32 *
31 * Redistribution and use in source and binary forms, with or without 33 * Redistribution and use in source and binary forms, with or without
@@ -68,6 +70,7 @@
68struct iwl_fw_runtime_ops { 70struct iwl_fw_runtime_ops {
69 int (*dump_start)(void *ctx); 71 int (*dump_start)(void *ctx);
70 void (*dump_end)(void *ctx); 72 void (*dump_end)(void *ctx);
73 bool (*fw_running)(void *ctx);
71}; 74};
72 75
73#define MAX_NUM_LMAC 2 76#define MAX_NUM_LMAC 2
@@ -150,6 +153,10 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
150 153
151void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt); 154void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt);
152 155
156void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt);
157
158void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt);
159
153static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, 160static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt,
154 enum iwl_ucode_type cur_fw_img) 161 enum iwl_ucode_type cur_fw_img)
155{ 162{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 0e6cf39285f4..2efe9b099556 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1098,6 +1098,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1098 /* make sure the d0i3 exit work is not pending */ 1098 /* make sure the d0i3 exit work is not pending */
1099 flush_work(&mvm->d0i3_exit_work); 1099 flush_work(&mvm->d0i3_exit_work);
1100 1100
1101 iwl_fw_runtime_suspend(&mvm->fwrt);
1102
1101 ret = iwl_trans_suspend(trans); 1103 ret = iwl_trans_suspend(trans);
1102 if (ret) 1104 if (ret)
1103 return ret; 1105 return ret;
@@ -2012,6 +2014,8 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
2012 2014
2013 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2015 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2014 2016
2017 iwl_fw_runtime_resume(&mvm->fwrt);
2018
2015 return ret; 2019 return ret;
2016} 2020}
2017 2021
@@ -2038,6 +2042,8 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
2038 2042
2039 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; 2043 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
2040 2044
2045 iwl_fw_runtime_suspend(&mvm->fwrt);
2046
2041 /* start pseudo D3 */ 2047 /* start pseudo D3 */
2042 rtnl_lock(); 2048 rtnl_lock();
2043 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); 2049 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
@@ -2098,6 +2104,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2098 __iwl_mvm_resume(mvm, true); 2104 __iwl_mvm_resume(mvm, true);
2099 rtnl_unlock(); 2105 rtnl_unlock();
2100 2106
2107 iwl_fw_runtime_resume(&mvm->fwrt);
2108
2101 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2109 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2102 2110
2103 iwl_abort_notification_waits(&mvm->notif_wait); 2111 iwl_abort_notification_waits(&mvm->notif_wait);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index a7892c1254a2..9c436d8d001d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
39 * Copyright(c) 2018 Intel Corporation
38 * All rights reserved. 40 * All rights reserved.
39 * 41 *
40 * Redistribution and use in source and binary forms, with or without 42 * Redistribution and use in source and binary forms, with or without
@@ -1281,9 +1283,6 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
1281{ 1283{
1282 int ret; 1284 int ret;
1283 1285
1284 if (!iwl_mvm_firmware_running(mvm))
1285 return -EIO;
1286
1287 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); 1286 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
1288 if (ret) 1287 if (ret)
1289 return ret; 1288 return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 2f22e14e00fe..8ba16fc24e3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -438,7 +438,8 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
438 } 438 }
439 439
440 /* Allocate the CAB queue for softAP and GO interfaces */ 440 /* Allocate the CAB queue for softAP and GO interfaces */
441 if (vif->type == NL80211_IFTYPE_AP) { 441 if (vif->type == NL80211_IFTYPE_AP ||
442 vif->type == NL80211_IFTYPE_ADHOC) {
442 /* 443 /*
443 * For TVQM this will be overwritten later with the FW assigned 444 * For TVQM this will be overwritten later with the FW assigned
444 * queue value (when queue is enabled). 445 * queue value (when queue is enabled).
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 8aed40a8bc38..ebf511150f4d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -2106,15 +2107,40 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2106 if (ret) 2107 if (ret)
2107 goto out_remove; 2108 goto out_remove;
2108 2109
2109 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2110 /*
2110 if (ret) 2111 * This is not very nice, but the simplest:
2111 goto out_unbind; 2112 * For older FWs adding the mcast sta before the bcast station may
2112 2113 * cause assert 0x2b00.
2113 /* Send the bcast station. At this stage the TBTT and DTIM time events 2114 * This is fixed in later FW so make the order of removal depend on
2114 * are added and applied to the scheduler */ 2115 * the TLV
2115 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2116 */
2116 if (ret) 2117 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2117 goto out_rm_mcast; 2118 ret = iwl_mvm_add_mcast_sta(mvm, vif);
2119 if (ret)
2120 goto out_unbind;
2121 /*
2122 * Send the bcast station. At this stage the TBTT and DTIM time
2123 * events are added and applied to the scheduler
2124 */
2125 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2126 if (ret) {
2127 iwl_mvm_rm_mcast_sta(mvm, vif);
2128 goto out_unbind;
2129 }
2130 } else {
2131 /*
2132 * Send the bcast station. At this stage the TBTT and DTIM time
2133 * events are added and applied to the scheduler
2134 */
2135 iwl_mvm_send_add_bcast_sta(mvm, vif);
2136 if (ret)
2137 goto out_unbind;
2138 iwl_mvm_add_mcast_sta(mvm, vif);
2139 if (ret) {
2140 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2141 goto out_unbind;
2142 }
2143 }
2118 2144
2119 /* must be set before quota calculations */ 2145 /* must be set before quota calculations */
2120 mvmvif->ap_ibss_active = true; 2146 mvmvif->ap_ibss_active = true;
@@ -2144,7 +2170,6 @@ out_quota_failed:
2144 iwl_mvm_power_update_mac(mvm); 2170 iwl_mvm_power_update_mac(mvm);
2145 mvmvif->ap_ibss_active = false; 2171 mvmvif->ap_ibss_active = false;
2146 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2172 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2147out_rm_mcast:
2148 iwl_mvm_rm_mcast_sta(mvm, vif); 2173 iwl_mvm_rm_mcast_sta(mvm, vif);
2149out_unbind: 2174out_unbind:
2150 iwl_mvm_binding_remove_vif(mvm, vif); 2175 iwl_mvm_binding_remove_vif(mvm, vif);
@@ -2682,6 +2707,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2682 2707
2683 /* enable beacon filtering */ 2708 /* enable beacon filtering */
2684 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 2709 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2710
2711 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
2712 false);
2713
2685 ret = 0; 2714 ret = 0;
2686 } else if (old_state == IEEE80211_STA_AUTHORIZED && 2715 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2687 new_state == IEEE80211_STA_ASSOC) { 2716 new_state == IEEE80211_STA_ASSOC) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 2d28e0804218..89ff02d7c876 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -90,6 +90,7 @@
90#include "fw/runtime.h" 90#include "fw/runtime.h"
91#include "fw/dbg.h" 91#include "fw/dbg.h"
92#include "fw/acpi.h" 92#include "fw/acpi.h"
93#include "fw/debugfs.h"
93 94
94#define IWL_MVM_MAX_ADDRESSES 5 95#define IWL_MVM_MAX_ADDRESSES 5
95/* RSSI offset for WkP */ 96/* RSSI offset for WkP */
@@ -1783,6 +1784,7 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
1783 1784
1784static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) 1785static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
1785{ 1786{
1787 iwl_fw_cancel_timestamp(&mvm->fwrt);
1786 iwl_free_fw_paging(&mvm->fwrt); 1788 iwl_free_fw_paging(&mvm->fwrt);
1787 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 1789 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
1788 iwl_fw_dump_conf_clear(&mvm->fwrt); 1790 iwl_fw_dump_conf_clear(&mvm->fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5d525a0023dc..ab7fb5aad984 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
39 * Copyright(c) 2018 Intel Corporation
38 * All rights reserved. 40 * All rights reserved.
39 * 41 *
40 * Redistribution and use in source and binary forms, with or without 42 * Redistribution and use in source and binary forms, with or without
@@ -552,9 +554,15 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)
552 iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); 554 iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
553} 555}
554 556
557static bool iwl_mvm_fwrt_fw_running(void *ctx)
558{
559 return iwl_mvm_firmware_running(ctx);
560}
561
555static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { 562static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
556 .dump_start = iwl_mvm_fwrt_dump_start, 563 .dump_start = iwl_mvm_fwrt_dump_start,
557 .dump_end = iwl_mvm_fwrt_dump_end, 564 .dump_end = iwl_mvm_fwrt_dump_end,
565 .fw_running = iwl_mvm_fwrt_fw_running,
558}; 566};
559 567
560static struct iwl_op_mode * 568static struct iwl_op_mode *
@@ -802,7 +810,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
802 iwl_mvm_leds_exit(mvm); 810 iwl_mvm_leds_exit(mvm);
803 iwl_mvm_thermal_exit(mvm); 811 iwl_mvm_thermal_exit(mvm);
804 out_free: 812 out_free:
805 iwl_fw_runtime_exit(&mvm->fwrt);
806 iwl_fw_flush_dump(&mvm->fwrt); 813 iwl_fw_flush_dump(&mvm->fwrt);
807 814
808 if (iwlmvm_mod_params.init_dbg) 815 if (iwlmvm_mod_params.init_dbg)
@@ -843,7 +850,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
843#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) 850#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
844 kfree(mvm->d3_resume_sram); 851 kfree(mvm->d3_resume_sram);
845#endif 852#endif
846 iwl_fw_runtime_exit(&mvm->fwrt);
847 iwl_trans_op_mode_leave(mvm->trans); 853 iwl_trans_op_mode_leave(mvm->trans);
848 854
849 iwl_phy_db_free(mvm->phy_db); 855 iwl_phy_db_free(mvm->phy_db);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 60abb0084ee5..47f4c7a1d80d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2684,7 +2684,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
2684 struct ieee80211_sta *sta, 2684 struct ieee80211_sta *sta,
2685 struct iwl_lq_sta *lq_sta, 2685 struct iwl_lq_sta *lq_sta,
2686 enum nl80211_band band, 2686 enum nl80211_band band,
2687 struct rs_rate *rate) 2687 struct rs_rate *rate,
2688 bool init)
2688{ 2689{
2689 int i, nentries; 2690 int i, nentries;
2690 unsigned long active_rate; 2691 unsigned long active_rate;
@@ -2738,14 +2739,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
2738 */ 2739 */
2739 if (sta->vht_cap.vht_supported && 2740 if (sta->vht_cap.vht_supported &&
2740 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { 2741 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
2741 switch (sta->bandwidth) { 2742 /*
2742 case IEEE80211_STA_RX_BW_160: 2743 * In AP mode, when a new station associates, rs is initialized
2743 case IEEE80211_STA_RX_BW_80: 2744 * immediately upon association completion, before the phy
2744 case IEEE80211_STA_RX_BW_40: 2745 * context is updated with the association parameters, so the
2746 * sta bandwidth might be wider than the phy context allows.
2747 * To avoid this issue, always initialize rs with 20mhz
2748 * bandwidth rate, and after authorization, when the phy context
2749 * is already up-to-date, re-init rs with the correct bw.
2750 */
2751 u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta);
2752
2753 switch (bw) {
2754 case RATE_MCS_CHAN_WIDTH_40:
2755 case RATE_MCS_CHAN_WIDTH_80:
2756 case RATE_MCS_CHAN_WIDTH_160:
2745 initial_rates = rs_optimal_rates_vht; 2757 initial_rates = rs_optimal_rates_vht;
2746 nentries = ARRAY_SIZE(rs_optimal_rates_vht); 2758 nentries = ARRAY_SIZE(rs_optimal_rates_vht);
2747 break; 2759 break;
2748 case IEEE80211_STA_RX_BW_20: 2760 case RATE_MCS_CHAN_WIDTH_20:
2749 initial_rates = rs_optimal_rates_vht_20mhz; 2761 initial_rates = rs_optimal_rates_vht_20mhz;
2750 nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); 2762 nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
2751 break; 2763 break;
@@ -2756,7 +2768,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
2756 2768
2757 active_rate = lq_sta->active_siso_rate; 2769 active_rate = lq_sta->active_siso_rate;
2758 rate->type = LQ_VHT_SISO; 2770 rate->type = LQ_VHT_SISO;
2759 rate->bw = rs_bw_from_sta_bw(sta); 2771 rate->bw = bw;
2760 } else if (sta->ht_cap.ht_supported && 2772 } else if (sta->ht_cap.ht_supported &&
2761 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { 2773 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
2762 initial_rates = rs_optimal_rates_ht; 2774 initial_rates = rs_optimal_rates_ht;
@@ -2839,7 +2851,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
2839 tbl = &(lq_sta->lq_info[active_tbl]); 2851 tbl = &(lq_sta->lq_info[active_tbl]);
2840 rate = &tbl->rate; 2852 rate = &tbl->rate;
2841 2853
2842 rs_get_initial_rate(mvm, sta, lq_sta, band, rate); 2854 rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init);
2843 rs_init_optimal_rate(mvm, sta, lq_sta); 2855 rs_init_optimal_rate(mvm, sta, lq_sta);
2844 2856
2845 WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B, 2857 WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index a3f7c1bf3cc8..580de5851fc7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -71,6 +71,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
71 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 71 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
72 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); 72 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
73 struct iwl_mvm_key_pn *ptk_pn; 73 struct iwl_mvm_key_pn *ptk_pn;
74 int res;
74 u8 tid, keyidx; 75 u8 tid, keyidx;
75 u8 pn[IEEE80211_CCMP_PN_LEN]; 76 u8 pn[IEEE80211_CCMP_PN_LEN];
76 u8 *extiv; 77 u8 *extiv;
@@ -127,12 +128,13 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
127 pn[4] = extiv[1]; 128 pn[4] = extiv[1];
128 pn[5] = extiv[0]; 129 pn[5] = extiv[0];
129 130
130 if (memcmp(pn, ptk_pn->q[queue].pn[tid], 131 res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
131 IEEE80211_CCMP_PN_LEN) <= 0) 132 if (res < 0)
133 return -1;
134 if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
132 return -1; 135 return -1;
133 136
134 if (!(stats->flag & RX_FLAG_AMSDU_MORE)) 137 memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
135 memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
136 stats->flag |= RX_FLAG_PN_VALIDATED; 138 stats->flag |= RX_FLAG_PN_VALIDATED;
137 139
138 return 0; 140 return 0;
@@ -314,28 +316,21 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
314} 316}
315 317
316/* 318/*
317 * returns true if a packet outside BA session is a duplicate and 319 * returns true if a packet is a duplicate and should be dropped.
318 * should be dropped 320 * Updates AMSDU PN tracking info
319 */ 321 */
320static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, 322static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
321 struct ieee80211_rx_status *rx_status, 323 struct ieee80211_rx_status *rx_status,
322 struct ieee80211_hdr *hdr, 324 struct ieee80211_hdr *hdr,
323 struct iwl_rx_mpdu_desc *desc) 325 struct iwl_rx_mpdu_desc *desc)
324{ 326{
325 struct iwl_mvm_sta *mvm_sta; 327 struct iwl_mvm_sta *mvm_sta;
326 struct iwl_mvm_rxq_dup_data *dup_data; 328 struct iwl_mvm_rxq_dup_data *dup_data;
327 u8 baid, tid, sub_frame_idx; 329 u8 tid, sub_frame_idx;
328 330
329 if (WARN_ON(IS_ERR_OR_NULL(sta))) 331 if (WARN_ON(IS_ERR_OR_NULL(sta)))
330 return false; 332 return false;
331 333
332 baid = (le32_to_cpu(desc->reorder_data) &
333 IWL_RX_MPDU_REORDER_BAID_MASK) >>
334 IWL_RX_MPDU_REORDER_BAID_SHIFT;
335
336 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
337 return false;
338
339 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 334 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
340 dup_data = &mvm_sta->dup_data[queue]; 335 dup_data = &mvm_sta->dup_data[queue];
341 336
@@ -365,6 +360,12 @@ static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
365 dup_data->last_sub_frame[tid] >= sub_frame_idx)) 360 dup_data->last_sub_frame[tid] >= sub_frame_idx))
366 return true; 361 return true;
367 362
363 /* Allow same PN as the first subframe for following sub frames */
364 if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
365 sub_frame_idx > dup_data->last_sub_frame[tid] &&
366 desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
367 rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
368
368 dup_data->last_seq[tid] = hdr->seq_ctrl; 369 dup_data->last_seq[tid] = hdr->seq_ctrl;
369 dup_data->last_sub_frame[tid] = sub_frame_idx; 370 dup_data->last_sub_frame[tid] = sub_frame_idx;
370 371
@@ -971,7 +972,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
971 if (ieee80211_is_data(hdr->frame_control)) 972 if (ieee80211_is_data(hdr->frame_control))
972 iwl_mvm_rx_csum(sta, skb, desc); 973 iwl_mvm_rx_csum(sta, skb, desc);
973 974
974 if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { 975 if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
975 kfree_skb(skb); 976 kfree_skb(skb);
976 goto out; 977 goto out;
977 } 978 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 6b2674e02606..630e23cb0ffb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2039,7 +2039,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2039 struct iwl_trans_txq_scd_cfg cfg = { 2039 struct iwl_trans_txq_scd_cfg cfg = {
2040 .fifo = IWL_MVM_TX_FIFO_MCAST, 2040 .fifo = IWL_MVM_TX_FIFO_MCAST,
2041 .sta_id = msta->sta_id, 2041 .sta_id = msta->sta_id,
2042 .tid = IWL_MAX_TID_COUNT, 2042 .tid = 0,
2043 .aggregate = false, 2043 .aggregate = false,
2044 .frame_limit = IWL_FRAME_LIMIT, 2044 .frame_limit = IWL_FRAME_LIMIT,
2045 }; 2045 };
@@ -2053,6 +2053,17 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2053 return -ENOTSUPP; 2053 return -ENOTSUPP;
2054 2054
2055 /* 2055 /*
2056 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2057 * invalid, so make sure we use the queue we want.
2058 * Note that this is done here as we want to avoid making DQA
2059 * changes in mac80211 layer.
2060 */
2061 if (vif->type == NL80211_IFTYPE_ADHOC) {
2062 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2063 mvmvif->cab_queue = vif->cab_queue;
2064 }
2065
2066 /*
2056 * While in previous FWs we had to exclude cab queue from TFD queue 2067 * While in previous FWs we had to exclude cab queue from TFD queue
2057 * mask, now it is needed as any other queue. 2068 * mask, now it is needed as any other queue.
2058 */ 2069 */
@@ -2079,24 +2090,13 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2079 if (iwl_mvm_has_new_tx_api(mvm)) { 2090 if (iwl_mvm_has_new_tx_api(mvm)) {
2080 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, 2091 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2081 msta->sta_id, 2092 msta->sta_id,
2082 IWL_MAX_TID_COUNT, 2093 0,
2083 timeout); 2094 timeout);
2084 mvmvif->cab_queue = queue; 2095 mvmvif->cab_queue = queue;
2085 } else if (!fw_has_api(&mvm->fw->ucode_capa, 2096 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2086 IWL_UCODE_TLV_API_STA_TYPE)) { 2097 IWL_UCODE_TLV_API_STA_TYPE))
2087 /*
2088 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2089 * invalid, so make sure we use the queue we want.
2090 * Note that this is done here as we want to avoid making DQA
2091 * changes in mac80211 layer.
2092 */
2093 if (vif->type == NL80211_IFTYPE_ADHOC) {
2094 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2095 mvmvif->cab_queue = vif->cab_queue;
2096 }
2097 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, 2098 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2098 &cfg, timeout); 2099 &cfg, timeout);
2099 }
2100 2100
2101 return 0; 2101 return 0;
2102} 2102}
@@ -2115,7 +2115,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2115 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); 2115 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2116 2116
2117 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, 2117 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
2118 IWL_MAX_TID_COUNT, 0); 2118 0, 0);
2119 2119
2120 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); 2120 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2121 if (ret) 2121 if (ret)
@@ -3170,8 +3170,9 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
3170 int ret, size; 3170 int ret, size;
3171 u32 status; 3171 u32 status;
3172 3172
3173 /* This is a valid situation for GTK removal */
3173 if (sta_id == IWL_MVM_INVALID_STA) 3174 if (sta_id == IWL_MVM_INVALID_STA)
3174 return -EINVAL; 3175 return 0;
3175 3176
3176 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 3177 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3177 STA_KEY_FLG_KEYID_MSK); 3178 STA_KEY_FLG_KEYID_MSK);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 200ab50ec86b..acb217e666db 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -616,7 +616,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
616 time_cmd.repeat = 1; 616 time_cmd.repeat = 1;
617 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 617 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
618 TE_V2_NOTIF_HOST_EVENT_END | 618 TE_V2_NOTIF_HOST_EVENT_END |
619 T2_V2_START_IMMEDIATELY); 619 TE_V2_START_IMMEDIATELY);
620 620
621 if (!wait_for_notif) { 621 if (!wait_for_notif) {
622 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 622 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
@@ -803,7 +803,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
803 time_cmd.repeat = 1; 803 time_cmd.repeat = 1;
804 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 804 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
805 TE_V2_NOTIF_HOST_EVENT_END | 805 TE_V2_NOTIF_HOST_EVENT_END |
806 T2_V2_START_IMMEDIATELY); 806 TE_V2_START_IMMEDIATELY);
807 807
808 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 808 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
809} 809}
@@ -913,6 +913,8 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
913 time_cmd.interval = cpu_to_le32(1); 913 time_cmd.interval = cpu_to_le32(1);
914 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 914 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
915 TE_V2_ABSENCE); 915 TE_V2_ABSENCE);
916 if (!apply_time)
917 time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
916 918
917 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 919 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
918} 920}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index dda77b327c98..af6dfceab6b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -419,11 +419,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
419{ 419{
420 struct ieee80211_key_conf *keyconf = info->control.hw_key; 420 struct ieee80211_key_conf *keyconf = info->control.hw_key;
421 u8 *crypto_hdr = skb_frag->data + hdrlen; 421 u8 *crypto_hdr = skb_frag->data + hdrlen;
422 enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
422 u64 pn; 423 u64 pn;
423 424
424 switch (keyconf->cipher) { 425 switch (keyconf->cipher) {
425 case WLAN_CIPHER_SUITE_CCMP: 426 case WLAN_CIPHER_SUITE_CCMP:
426 case WLAN_CIPHER_SUITE_CCMP_256:
427 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); 427 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
428 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); 428 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
429 break; 429 break;
@@ -447,13 +447,16 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
447 break; 447 break;
448 case WLAN_CIPHER_SUITE_GCMP: 448 case WLAN_CIPHER_SUITE_GCMP:
449 case WLAN_CIPHER_SUITE_GCMP_256: 449 case WLAN_CIPHER_SUITE_GCMP_256:
450 type = TX_CMD_SEC_GCMP;
451 /* Fall through */
452 case WLAN_CIPHER_SUITE_CCMP_256:
450 /* TODO: Taking the key from the table might introduce a race 453 /* TODO: Taking the key from the table might introduce a race
451 * when PTK rekeying is done, having an old packets with a PN 454 * when PTK rekeying is done, having an old packets with a PN
452 * based on the old key but the message encrypted with a new 455 * based on the old key but the message encrypted with a new
453 * one. 456 * one.
454 * Need to handle this. 457 * Need to handle this.
455 */ 458 */
456 tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE; 459 tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
457 tx_cmd->key[0] = keyconf->hw_key_idx; 460 tx_cmd->key[0] = keyconf->hw_key_idx;
458 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); 461 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
459 break; 462 break;
@@ -645,7 +648,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
645 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || 648 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
646 info.control.vif->type == NL80211_IFTYPE_AP || 649 info.control.vif->type == NL80211_IFTYPE_AP ||
647 info.control.vif->type == NL80211_IFTYPE_ADHOC) { 650 info.control.vif->type == NL80211_IFTYPE_ADHOC) {
648 sta_id = mvmvif->bcast_sta.sta_id; 651 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
652 sta_id = mvmvif->bcast_sta.sta_id;
653 else
654 sta_id = mvmvif->mcast_sta.sta_id;
655
649 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, 656 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
650 hdr->frame_control); 657 hdr->frame_control);
651 if (queue < 0) 658 if (queue < 0)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 6d0a907d5ba5..fabae0f60683 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -147,7 +147,7 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
147 /* Sanity check on number of chunks */ 147 /* Sanity check on number of chunks */
148 num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); 148 num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
149 149
150 if (num_tbs >= trans_pcie->max_tbs) { 150 if (num_tbs > trans_pcie->max_tbs) {
151 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 151 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
152 return; 152 return;
153 } 153 }
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 3f85713c41dc..1a566287993d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -378,7 +378,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
378 /* Sanity check on number of chunks */ 378 /* Sanity check on number of chunks */
379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
380 380
381 if (num_tbs >= trans_pcie->max_tbs) { 381 if (num_tbs > trans_pcie->max_tbs) {
382 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 382 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
383 /* @todo issue fatal error, it is quite serious situation */ 383 /* @todo issue fatal error, it is quite serious situation */
384 return; 384 return;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7b6c3640a94f..100cf42db65d 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2742,6 +2742,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2742 mutex_init(&data->mutex); 2742 mutex_init(&data->mutex);
2743 2743
2744 data->netgroup = hwsim_net_get_netgroup(net); 2744 data->netgroup = hwsim_net_get_netgroup(net);
2745 data->wmediumd = hwsim_net_get_wmediumd(net);
2745 2746
2746 /* Enable frame retransmissions for lossy channels */ 2747 /* Enable frame retransmissions for lossy channels */
2747 hw->max_rates = 4; 2748 hw->max_rates = 4;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index f9ccd13c79f9..e7bbbc95cdb1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -1125,7 +1125,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw)
1125 1125
1126 /* Configuration Space offset 0x70f BIT7 is used to control L0S */ 1126 /* Configuration Space offset 0x70f BIT7 is used to control L0S */
1127 tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); 1127 tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f);
1128 _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7)); 1128 _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) |
1129 ASPM_L1_LATENCY << 3);
1129 1130
1130 /* Configuration Space offset 0x719 Bit3 is for L1 1131 /* Configuration Space offset 0x719 Bit3 is for L1
1131 * BIT4 is for clock request 1132 * BIT4 is for clock request