aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx.c8
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h137
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c65
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c244
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c42
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c1
-rw-r--r--drivers/net/ethernet/rocker/rocker.c1
-rw-r--r--drivers/net/ethernet/sfc/selftest.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/xen-netfront.c2
35 files changed, 508 insertions, 290 deletions
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 6791fd16272c..3ef0cf9f5c44 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -73,7 +73,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
73 c4iw_init_wr_wait(&wr_wait); 73 c4iw_init_wr_wait(&wr_wait);
74 wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16); 74 wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
75 75
76 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); 76 skb = alloc_skb(wr_len, GFP_KERNEL);
77 if (!skb) 77 if (!skb)
78 return -ENOMEM; 78 return -ENOMEM;
79 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); 79 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 657b89b1d291..915ad04a827e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -846,6 +846,11 @@ static int ipoib_get_iflink(const struct net_device *dev)
846{ 846{
847 struct ipoib_dev_priv *priv = netdev_priv(dev); 847 struct ipoib_dev_priv *priv = netdev_priv(dev);
848 848
849 /* parent interface */
850 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
851 return dev->ifindex;
852
853 /* child/vlan interface */
849 return priv->parent->ifindex; 854 return priv->parent->ifindex;
850} 855}
851 856
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 4dd1313056a4..fca1a882de27 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -58,6 +58,7 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
58 /* MTU will be reset when mcast join happens */ 58 /* MTU will be reset when mcast join happens */
59 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 59 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
60 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 60 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
61 priv->parent = ppriv->dev;
61 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 62 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
62 63
63 result = ipoib_set_dev_features(priv, ppriv->ca); 64 result = ipoib_set_dev_features(priv, ppriv->ca);
@@ -84,8 +85,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
84 goto register_failed; 85 goto register_failed;
85 } 86 }
86 87
87 priv->parent = ppriv->dev;
88
89 ipoib_create_debug_files(priv->dev); 88 ipoib_create_debug_files(priv->dev);
90 89
91 /* RTNL childs don't need proprietary sysfs entries */ 90 /* RTNL childs don't need proprietary sysfs entries */
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fc8d3b6ffe8e..9f0c2b9d58ae 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -602,8 +602,6 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
602 u32 high = 0; 602 u32 high = 0;
603 603
604 if (s->reg >= 0x100) { 604 if (s->reg >= 0x100) {
605 int ret;
606
607 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), 605 ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
608 s->reg - 0x100); 606 s->reg - 0x100);
609 if (ret < 0) 607 if (ret < 0)
@@ -902,14 +900,16 @@ static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
902static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state) 900static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
903{ 901{
904 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 902 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
905 int reg, ret; 903 int reg, ret = 0;
906 u8 oldstate; 904 u8 oldstate;
907 905
908 mutex_lock(&ps->smi_mutex); 906 mutex_lock(&ps->smi_mutex);
909 907
910 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL); 908 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
911 if (reg < 0) 909 if (reg < 0) {
910 ret = reg;
912 goto abort; 911 goto abort;
912 }
913 913
914 oldstate = reg & PORT_CONTROL_STATE_MASK; 914 oldstate = reg & PORT_CONTROL_STATE_MASK;
915 if (oldstate != state) { 915 if (oldstate != state) {
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 79ea35869e1e..90a76306ad0f 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -376,8 +376,13 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
376 u16 pktlength; 376 u16 pktlength;
377 u16 pktstatus; 377 u16 pktstatus;
378 378
379 while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) && 379 /* Check for count < limit first as get_rx_status is changing
380 (count < limit)) { 380 * the response-fifo so we must process the next packet
381 * after calling get_rx_status if a response is pending.
382 * (reading the last byte of the response pops the value from the fifo.)
383 */
384 while ((count < limit) &&
385 ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
381 pktstatus = rxstatus >> 16; 386 pktstatus = rxstatus >> 16;
382 pktlength = rxstatus & 0xffff; 387 pktlength = rxstatus & 0xffff;
383 388
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4085c4b31047..355d5fea5be9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -531,20 +531,8 @@ struct bnx2x_fastpath {
531 struct napi_struct napi; 531 struct napi_struct napi;
532 532
533#ifdef CONFIG_NET_RX_BUSY_POLL 533#ifdef CONFIG_NET_RX_BUSY_POLL
534 unsigned int state; 534 unsigned long busy_poll_state;
535#define BNX2X_FP_STATE_IDLE 0 535#endif
536#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
537#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
538#define BNX2X_FP_STATE_DISABLED (1 << 2)
539#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
540#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
541#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
542#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
543#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
544#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
545 /* protect state */
546 spinlock_t lock;
547#endif /* CONFIG_NET_RX_BUSY_POLL */
548 536
549 union host_hc_status_block status_blk; 537 union host_hc_status_block status_blk;
550 /* chip independent shortcuts into sb structure */ 538 /* chip independent shortcuts into sb structure */
@@ -619,104 +607,83 @@ struct bnx2x_fastpath {
619#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) 607#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
620 608
621#ifdef CONFIG_NET_RX_BUSY_POLL 609#ifdef CONFIG_NET_RX_BUSY_POLL
622static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) 610
611enum bnx2x_fp_state {
612 BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */
613
614 BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
615 BNX2X_STATE_FP_NAPI_REQ = BIT(1),
616
617 BNX2X_STATE_FP_POLL_BIT = 2,
618 BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */
619
620 BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
621};
622
623static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
623{ 624{
624 spin_lock_init(&fp->lock); 625 WRITE_ONCE(fp->busy_poll_state, 0);
625 fp->state = BNX2X_FP_STATE_IDLE;
626} 626}
627 627
628/* called from the device poll routine to get ownership of a FP */ 628/* called from the device poll routine to get ownership of a FP */
629static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) 629static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
630{ 630{
631 bool rc = true; 631 unsigned long prev, old = READ_ONCE(fp->busy_poll_state);
632 632
633 spin_lock_bh(&fp->lock); 633 while (1) {
634 if (fp->state & BNX2X_FP_LOCKED) { 634 switch (old) {
635 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); 635 case BNX2X_STATE_FP_POLL:
636 fp->state |= BNX2X_FP_STATE_NAPI_YIELD; 636 /* make sure bnx2x_fp_lock_poll() wont starve us */
637 rc = false; 637 set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
638 } else { 638 &fp->busy_poll_state);
639 /* we don't care if someone yielded */ 639 /* fallthrough */
640 fp->state = BNX2X_FP_STATE_NAPI; 640 case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
641 return false;
642 default:
643 break;
644 }
645 prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
646 if (unlikely(prev != old)) {
647 old = prev;
648 continue;
649 }
650 return true;
641 } 651 }
642 spin_unlock_bh(&fp->lock);
643 return rc;
644} 652}
645 653
646/* returns true is someone tried to get the FP while napi had it */ 654static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
647static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
648{ 655{
649 bool rc = false; 656 smp_wmb();
650 657 fp->busy_poll_state = 0;
651 spin_lock_bh(&fp->lock);
652 WARN_ON(fp->state &
653 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
654
655 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
656 rc = true;
657
658 /* state ==> idle, unless currently disabled */
659 fp->state &= BNX2X_FP_STATE_DISABLED;
660 spin_unlock_bh(&fp->lock);
661 return rc;
662} 658}
663 659
664/* called from bnx2x_low_latency_poll() */ 660/* called from bnx2x_low_latency_poll() */
665static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) 661static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
666{ 662{
667 bool rc = true; 663 return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
668
669 spin_lock_bh(&fp->lock);
670 if ((fp->state & BNX2X_FP_LOCKED)) {
671 fp->state |= BNX2X_FP_STATE_POLL_YIELD;
672 rc = false;
673 } else {
674 /* preserve yield marks */
675 fp->state |= BNX2X_FP_STATE_POLL;
676 }
677 spin_unlock_bh(&fp->lock);
678 return rc;
679} 664}
680 665
681/* returns true if someone tried to get the FP while it was locked */ 666static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
682static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
683{ 667{
684 bool rc = false; 668 smp_mb__before_atomic();
685 669 clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
686 spin_lock_bh(&fp->lock);
687 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
688
689 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
690 rc = true;
691
692 /* state ==> idle, unless currently disabled */
693 fp->state &= BNX2X_FP_STATE_DISABLED;
694 spin_unlock_bh(&fp->lock);
695 return rc;
696} 670}
697 671
698/* true if a socket is polling, even if it did not get the lock */ 672/* true if a socket is polling */
699static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 673static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
700{ 674{
701 WARN_ON(!(fp->state & BNX2X_FP_OWNED)); 675 return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
702 return fp->state & BNX2X_FP_USER_PEND;
703} 676}
704 677
705/* false if fp is currently owned */ 678/* false if fp is currently owned */
706static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) 679static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
707{ 680{
708 int rc = true; 681 set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
709 682 return !bnx2x_fp_ll_polling(fp);
710 spin_lock_bh(&fp->lock);
711 if (fp->state & BNX2X_FP_OWNED)
712 rc = false;
713 fp->state |= BNX2X_FP_STATE_DISABLED;
714 spin_unlock_bh(&fp->lock);
715 683
716 return rc;
717} 684}
718#else 685#else
719static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) 686static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
720{ 687{
721} 688}
722 689
@@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
725 return true; 692 return true;
726} 693}
727 694
728static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) 695static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
729{ 696{
730 return false;
731} 697}
732 698
733static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) 699static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
@@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
735 return false; 701 return false;
736} 702}
737 703
738static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) 704static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
739{ 705{
740 return false;
741} 706}
742 707
743static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 708static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 0a9faa134a9a..2f63467bce46 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1849 int i; 1849 int i;
1850 1850
1851 for_each_rx_queue_cnic(bp, i) { 1851 for_each_rx_queue_cnic(bp, i) {
1852 bnx2x_fp_init_lock(&bp->fp[i]); 1852 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1853 napi_enable(&bnx2x_fp(bp, i, napi)); 1853 napi_enable(&bnx2x_fp(bp, i, napi));
1854 } 1854 }
1855} 1855}
@@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
1859 int i; 1859 int i;
1860 1860
1861 for_each_eth_queue(bp, i) { 1861 for_each_eth_queue(bp, i) {
1862 bnx2x_fp_init_lock(&bp->fp[i]); 1862 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1863 napi_enable(&bnx2x_fp(bp, i, napi)); 1863 napi_enable(&bnx2x_fp(bp, i, napi));
1864 } 1864 }
1865} 1865}
@@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
3191 } 3191 }
3192 } 3192 }
3193 3193
3194 bnx2x_fp_unlock_napi(fp);
3195
3194 /* Fall out from the NAPI loop if needed */ 3196 /* Fall out from the NAPI loop if needed */
3195 if (!bnx2x_fp_unlock_napi(fp) && 3197 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3196 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3197 3198
3198 /* No need to update SB for FCoE L2 ring as long as 3199 /* No need to update SB for FCoE L2 ring as long as
3199 * it's connected to the default SB and the SB 3200 * it's connected to the default SB and the SB
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6de054404156..803d91beec6f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1140,6 +1140,10 @@ static int set_filter_wr(struct adapter *adapter, int fidx)
1140 struct fw_filter_wr *fwr; 1140 struct fw_filter_wr *fwr;
1141 unsigned int ftid; 1141 unsigned int ftid;
1142 1142
1143 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
1144 if (!skb)
1145 return -ENOMEM;
1146
1143 /* If the new filter requires loopback Destination MAC and/or VLAN 1147 /* If the new filter requires loopback Destination MAC and/or VLAN
1144 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for 1148 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1145 * the filter. 1149 * the filter.
@@ -1147,19 +1151,21 @@ static int set_filter_wr(struct adapter *adapter, int fidx)
1147 if (f->fs.newdmac || f->fs.newvlan) { 1151 if (f->fs.newdmac || f->fs.newvlan) {
1148 /* allocate L2T entry for new filter */ 1152 /* allocate L2T entry for new filter */
1149 f->l2t = t4_l2t_alloc_switching(adapter->l2t); 1153 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1150 if (f->l2t == NULL) 1154 if (f->l2t == NULL) {
1155 kfree_skb(skb);
1151 return -EAGAIN; 1156 return -EAGAIN;
1157 }
1152 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan, 1158 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1153 f->fs.eport, f->fs.dmac)) { 1159 f->fs.eport, f->fs.dmac)) {
1154 cxgb4_l2t_release(f->l2t); 1160 cxgb4_l2t_release(f->l2t);
1155 f->l2t = NULL; 1161 f->l2t = NULL;
1162 kfree_skb(skb);
1156 return -ENOMEM; 1163 return -ENOMEM;
1157 } 1164 }
1158 } 1165 }
1159 1166
1160 ftid = adapter->tids.ftid_base + fidx; 1167 ftid = adapter->tids.ftid_base + fidx;
1161 1168
1162 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1163 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr)); 1169 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1164 memset(fwr, 0, sizeof(*fwr)); 1170 memset(fwr, 0, sizeof(*fwr));
1165 1171
@@ -1257,7 +1263,10 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
1257 len = sizeof(*fwr); 1263 len = sizeof(*fwr);
1258 ftid = adapter->tids.ftid_base + fidx; 1264 ftid = adapter->tids.ftid_base + fidx;
1259 1265
1260 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); 1266 skb = alloc_skb(len, GFP_KERNEL);
1267 if (!skb)
1268 return -ENOMEM;
1269
1261 fwr = (struct fw_filter_wr *)__skb_put(skb, len); 1270 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1262 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id); 1271 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1263 1272
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index b72d238695d7..3b39fdddeb57 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -413,6 +413,15 @@ out:
413 return count; 413 return count;
414} 414}
415 415
416static void hip04_start_tx_timer(struct hip04_priv *priv)
417{
418 unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2;
419
420 /* allow timer to fire after half the time at the earliest */
421 hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns),
422 ns, HRTIMER_MODE_REL);
423}
424
416static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) 425static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
417{ 426{
418 struct hip04_priv *priv = netdev_priv(ndev); 427 struct hip04_priv *priv = netdev_priv(ndev);
@@ -466,8 +475,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
466 } 475 }
467 } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) { 476 } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
468 /* cleanup not pending yet, start a new timer */ 477 /* cleanup not pending yet, start a new timer */
469 hrtimer_start_expires(&priv->tx_coalesce_timer, 478 hip04_start_tx_timer(priv);
470 HRTIMER_MODE_REL);
471 } 479 }
472 480
473 return NETDEV_TX_OK; 481 return NETDEV_TX_OK;
@@ -549,7 +557,7 @@ done:
549 /* clean up tx descriptors and start a new timer if necessary */ 557 /* clean up tx descriptors and start a new timer if necessary */
550 tx_remaining = hip04_tx_reclaim(ndev, false); 558 tx_remaining = hip04_tx_reclaim(ndev, false);
551 if (rx < budget && tx_remaining) 559 if (rx < budget && tx_remaining)
552 hrtimer_start_expires(&priv->tx_coalesce_timer, HRTIMER_MODE_REL); 560 hip04_start_tx_timer(priv);
553 561
554 return rx; 562 return rx;
555} 563}
@@ -809,7 +817,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
809 struct hip04_priv *priv; 817 struct hip04_priv *priv;
810 struct resource *res; 818 struct resource *res;
811 unsigned int irq; 819 unsigned int irq;
812 ktime_t txtime;
813 int ret; 820 int ret;
814 821
815 ndev = alloc_etherdev(sizeof(struct hip04_priv)); 822 ndev = alloc_etherdev(sizeof(struct hip04_priv));
@@ -846,9 +853,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
846 */ 853 */
847 priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4; 854 priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
848 priv->tx_coalesce_usecs = 200; 855 priv->tx_coalesce_usecs = 200;
849 /* allow timer to fire after half the time at the earliest */
850 txtime = ktime_set(0, priv->tx_coalesce_usecs * NSEC_PER_USEC / 2);
851 hrtimer_set_expires_range(&priv->tx_coalesce_timer, txtime, txtime);
852 priv->tx_coalesce_timer.function = tx_done; 856 priv->tx_coalesce_timer.function = tx_done;
853 857
854 priv->map = syscon_node_to_regmap(arg.np); 858 priv->map = syscon_node_to_regmap(arg.np);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index d596f6624025..0bae22da014d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -2397,6 +2397,7 @@ i40e_aq_erase_nvm_exit:
2397#define I40E_DEV_FUNC_CAP_LED 0x61 2397#define I40E_DEV_FUNC_CAP_LED 0x61
2398#define I40E_DEV_FUNC_CAP_SDP 0x62 2398#define I40E_DEV_FUNC_CAP_SDP 0x62
2399#define I40E_DEV_FUNC_CAP_MDIO 0x63 2399#define I40E_DEV_FUNC_CAP_MDIO 0x63
2400#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64
2400 2401
2401/** 2402/**
2402 * i40e_parse_discover_capabilities 2403 * i40e_parse_discover_capabilities
@@ -2541,11 +2542,18 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
2541 p->fd_filters_guaranteed = number; 2542 p->fd_filters_guaranteed = number;
2542 p->fd_filters_best_effort = logical_id; 2543 p->fd_filters_best_effort = logical_id;
2543 break; 2544 break;
2545 case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
2546 p->wr_csr_prot = (u64)number;
2547 p->wr_csr_prot |= (u64)logical_id << 32;
2548 break;
2544 default: 2549 default:
2545 break; 2550 break;
2546 } 2551 }
2547 } 2552 }
2548 2553
2554 if (p->fcoe)
2555 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
2556
2549 /* Software override ensuring FCoE is disabled if npar or mfp 2557 /* Software override ensuring FCoE is disabled if npar or mfp
2550 * mode because it is not supported in these modes. 2558 * mode because it is not supported in these modes.
2551 */ 2559 */
@@ -3503,6 +3511,63 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
3503} 3511}
3504 3512
3505/** 3513/**
3514 * i40e_aq_debug_dump
3515 * @hw: pointer to the hardware structure
3516 * @cluster_id: specific cluster to dump
3517 * @table_id: table id within cluster
3518 * @start_index: index of line in the block to read
3519 * @buff_size: dump buffer size
3520 * @buff: dump buffer
3521 * @ret_buff_size: actual buffer size returned
3522 * @ret_next_table: next block to read
3523 * @ret_next_index: next index to read
3524 *
3525 * Dump internal FW/HW data for debug purposes.
3526 *
3527 **/
3528i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
3529 u8 table_id, u32 start_index, u16 buff_size,
3530 void *buff, u16 *ret_buff_size,
3531 u8 *ret_next_table, u32 *ret_next_index,
3532 struct i40e_asq_cmd_details *cmd_details)
3533{
3534 struct i40e_aq_desc desc;
3535 struct i40e_aqc_debug_dump_internals *cmd =
3536 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
3537 struct i40e_aqc_debug_dump_internals *resp =
3538 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
3539 i40e_status status;
3540
3541 if (buff_size == 0 || !buff)
3542 return I40E_ERR_PARAM;
3543
3544 i40e_fill_default_direct_cmd_desc(&desc,
3545 i40e_aqc_opc_debug_dump_internals);
3546 /* Indirect Command */
3547 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3548 if (buff_size > I40E_AQ_LARGE_BUF)
3549 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3550
3551 cmd->cluster_id = cluster_id;
3552 cmd->table_id = table_id;
3553 cmd->idx = cpu_to_le32(start_index);
3554
3555 desc.datalen = cpu_to_le16(buff_size);
3556
3557 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3558 if (!status) {
3559 if (ret_buff_size)
3560 *ret_buff_size = le16_to_cpu(desc.datalen);
3561 if (ret_next_table)
3562 *ret_next_table = resp->table_id;
3563 if (ret_next_index)
3564 *ret_next_index = le32_to_cpu(resp->idx);
3565 }
3566
3567 return status;
3568}
3569
3570/**
3506 * i40e_read_bw_from_alt_ram 3571 * i40e_read_bw_from_alt_ram
3507 * @hw: pointer to the hardware structure 3572 * @hw: pointer to the hardware structure
3508 * @max_bw: pointer for max_bw read 3573 * @max_bw: pointer for max_bw read
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 6e1466756760..2547aa21b2ca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -419,7 +419,7 @@ static void i40e_cee_to_dcb_v1_config(
419{ 419{
420 u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status); 420 u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status);
421 u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); 421 u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
422 u8 i, tc, err, sync, oper; 422 u8 i, tc, err;
423 423
424 /* CEE PG data to ETS config */ 424 /* CEE PG data to ETS config */
425 dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; 425 dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
@@ -456,9 +456,7 @@ static void i40e_cee_to_dcb_v1_config(
456 status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> 456 status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
457 I40E_AQC_CEE_APP_STATUS_SHIFT; 457 I40E_AQC_CEE_APP_STATUS_SHIFT;
458 err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; 458 err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
459 sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; 459 /* Add APPs if Error is False */
460 oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
461 /* Add APPs if Error is False and Oper/Sync is True */
462 if (!err) { 460 if (!err) {
463 /* CEE operating configuration supports FCoE/iSCSI/FIP only */ 461 /* CEE operating configuration supports FCoE/iSCSI/FIP only */
464 dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; 462 dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index daa88263af66..34170eabca7d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1388,6 +1388,50 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1388 r_cfg->app[i].selector, 1388 r_cfg->app[i].selector,
1389 r_cfg->app[i].protocolid); 1389 r_cfg->app[i].protocolid);
1390 } 1390 }
1391 } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
1392 int cluster_id, table_id;
1393 int index, ret;
1394 u16 buff_len = 4096;
1395 u32 next_index;
1396 u8 next_table;
1397 u8 *buff;
1398 u16 rlen;
1399
1400 cnt = sscanf(&cmd_buf[18], "%i %i %i",
1401 &cluster_id, &table_id, &index);
1402 if (cnt != 3) {
1403 dev_info(&pf->pdev->dev,
1404 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1405 goto command_write_done;
1406 }
1407
1408 dev_info(&pf->pdev->dev,
1409 "AQ debug dump fwdata params %x %x %x %x\n",
1410 cluster_id, table_id, index, buff_len);
1411 buff = kzalloc(buff_len, GFP_KERNEL);
1412 if (!buff)
1413 goto command_write_done;
1414
1415 ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id,
1416 index, buff_len, buff, &rlen,
1417 &next_table, &next_index,
1418 NULL);
1419 if (ret) {
1420 dev_info(&pf->pdev->dev,
1421 "debug dump fwdata AQ Failed %d 0x%x\n",
1422 ret, pf->hw.aq.asq_last_status);
1423 kfree(buff);
1424 buff = NULL;
1425 goto command_write_done;
1426 }
1427 dev_info(&pf->pdev->dev,
1428 "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n",
1429 rlen, next_table, next_index);
1430 print_hex_dump(KERN_INFO, "AQ buffer WB: ",
1431 DUMP_PREFIX_OFFSET, 16, 1,
1432 buff, rlen, true);
1433 kfree(buff);
1434 buff = NULL;
1391 } else { 1435 } else {
1392 dev_info(&pf->pdev->dev, 1436 dev_info(&pf->pdev->dev,
1393 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n"); 1437 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
@@ -1903,6 +1947,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1903 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); 1947 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1904 dev_info(&pf->pdev->dev, " dump desc aq\n"); 1948 dev_info(&pf->pdev->dev, " dump desc aq\n");
1905 dev_info(&pf->pdev->dev, " dump reset stats\n"); 1949 dev_info(&pf->pdev->dev, " dump reset stats\n");
1950 dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
1906 dev_info(&pf->pdev->dev, " msg_enable [level]\n"); 1951 dev_info(&pf->pdev->dev, " msg_enable [level]\n");
1907 dev_info(&pf->pdev->dev, " read <reg>\n"); 1952 dev_info(&pf->pdev->dev, " read <reg>\n");
1908 dev_info(&pf->pdev->dev, " write <reg> <value>\n"); 1953 dev_info(&pf->pdev->dev, " write <reg> <value>\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index c848b1862512..4cbaaeb902c4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -356,8 +356,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
356 /* Set speed and duplex */ 356 /* Set speed and duplex */
357 switch (link_speed) { 357 switch (link_speed) {
358 case I40E_LINK_SPEED_40GB: 358 case I40E_LINK_SPEED_40GB:
359 /* need a SPEED_40000 in ethtool.h */ 359 ethtool_cmd_speed_set(ecmd, SPEED_40000);
360 ethtool_cmd_speed_set(ecmd, 40000);
361 break; 360 break;
362 case I40E_LINK_SPEED_20GB: 361 case I40E_LINK_SPEED_20GB:
363 ethtool_cmd_speed_set(ecmd, SPEED_20000); 362 ethtool_cmd_speed_set(ecmd, SPEED_20000);
@@ -1914,6 +1913,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
1914 else 1913 else
1915 fsp->ring_cookie = rule->q_index; 1914 fsp->ring_cookie = rule->q_index;
1916 1915
1916 if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
1917 struct i40e_vsi *vsi;
1918
1919 vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
1920 if (vsi && vsi->type == I40E_VSI_SRIOV) {
1921 fsp->h_ext.data[1] = htonl(vsi->vf_id);
1922 fsp->m_ext.data[1] = htonl(0x1);
1923 }
1924 }
1925
1917 return 0; 1926 return 0;
1918} 1927}
1919 1928
@@ -2207,6 +2216,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
2207 struct i40e_fdir_filter *input; 2216 struct i40e_fdir_filter *input;
2208 struct i40e_pf *pf; 2217 struct i40e_pf *pf;
2209 int ret = -EINVAL; 2218 int ret = -EINVAL;
2219 u16 vf_id;
2210 2220
2211 if (!vsi) 2221 if (!vsi)
2212 return -EINVAL; 2222 return -EINVAL;
@@ -2267,7 +2277,22 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
2267 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 2277 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2268 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 2278 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2269 2279
2280 if (ntohl(fsp->m_ext.data[1])) {
2281 if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) {
2282 netif_info(pf, drv, vsi->netdev, "Invalid VF id\n");
2283 goto free_input;
2284 }
2285 vf_id = ntohl(fsp->h_ext.data[1]);
2286 /* Find vsi id from vf id and override dest vsi */
2287 input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
2288 if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
2289 netif_info(pf, drv, vsi->netdev, "Invalid queue id\n");
2290 goto free_input;
2291 }
2292 }
2293
2270 ret = i40e_add_del_fdir(vsi, input, true); 2294 ret = i40e_add_del_fdir(vsi, input, true);
2295free_input:
2271 if (ret) 2296 if (ret)
2272 kfree(input); 2297 kfree(input);
2273 else 2298 else
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 63de3f4b7a94..24481cd7e59a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
39 39
40#define DRV_VERSION_MAJOR 1 40#define DRV_VERSION_MAJOR 1
41#define DRV_VERSION_MINOR 3 41#define DRV_VERSION_MINOR 3
42#define DRV_VERSION_BUILD 1 42#define DRV_VERSION_BUILD 2
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN 45 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -7301,7 +7301,7 @@ err_out:
7301 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 7301 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7302 * @pf: board private structure to initialize 7302 * @pf: board private structure to initialize
7303 **/ 7303 **/
7304static void i40e_init_interrupt_scheme(struct i40e_pf *pf) 7304static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7305{ 7305{
7306 int vectors = 0; 7306 int vectors = 0;
7307 ssize_t size; 7307 ssize_t size;
@@ -7343,11 +7343,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
7343 /* set up vector assignment tracking */ 7343 /* set up vector assignment tracking */
7344 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); 7344 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7345 pf->irq_pile = kzalloc(size, GFP_KERNEL); 7345 pf->irq_pile = kzalloc(size, GFP_KERNEL);
7346 if (!pf->irq_pile) {
7347 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7348 return -ENOMEM;
7349 }
7346 pf->irq_pile->num_entries = vectors; 7350 pf->irq_pile->num_entries = vectors;
7347 pf->irq_pile->search_hint = 0; 7351 pf->irq_pile->search_hint = 0;
7348 7352
7349 /* track first vector for misc interrupts */ 7353 /* track first vector for misc interrupts, ignore return */
7350 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); 7354 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7355
7356 return 0;
7351} 7357}
7352 7358
7353/** 7359/**
@@ -9827,7 +9833,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9827 9833
9828 /* set up the main switch operations */ 9834 /* set up the main switch operations */
9829 i40e_determine_queue_usage(pf); 9835 i40e_determine_queue_usage(pf);
9830 i40e_init_interrupt_scheme(pf); 9836 err = i40e_init_interrupt_scheme(pf);
9837 if (err)
9838 goto err_switch_setup;
9831 9839
9832 /* The number of VSIs reported by the FW is the minimum guaranteed 9840 /* The number of VSIs reported by the FW is the minimum guaranteed
9833 * to us; HW supports far more and we share the remaining pool with 9841 * to us; HW supports far more and we share the remaining pool with
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index e49acd2accd3..554e49d02683 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -821,13 +821,12 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
821 int *errno) 821 int *errno)
822{ 822{
823 enum i40e_nvmupd_cmd upd_cmd; 823 enum i40e_nvmupd_cmd upd_cmd;
824 u8 transaction, module; 824 u8 transaction;
825 825
826 /* anything that doesn't match a recognized case is an error */ 826 /* anything that doesn't match a recognized case is an error */
827 upd_cmd = I40E_NVMUPD_INVALID; 827 upd_cmd = I40E_NVMUPD_INVALID;
828 828
829 transaction = i40e_nvmupd_get_transaction(cmd->config); 829 transaction = i40e_nvmupd_get_transaction(cmd->config);
830 module = i40e_nvmupd_get_module(cmd->config);
831 830
832 /* limits on data size */ 831 /* limits on data size */
833 if ((cmd->data_size < 1) || 832 if ((cmd->data_size < 1) ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index fea0d37ecc72..7b34f1e660ea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -303,4 +303,9 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
303 u16 vsi_seid, u16 queue, bool is_add, 303 u16 vsi_seid, u16 queue, bool is_add,
304 struct i40e_control_filter_stats *stats, 304 struct i40e_control_filter_stats *stats,
305 struct i40e_asq_cmd_details *cmd_details); 305 struct i40e_asq_cmd_details *cmd_details);
306i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
307 u8 table_id, u32 start_index, u16 buff_size,
308 void *buff, u16 *ret_buff_size,
309 u8 *ret_next_table, u32 *ret_next_index,
310 struct i40e_asq_cmd_details *cmd_details);
306#endif /* _I40E_PROTOTYPE_H_ */ 311#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 67c7bc9e9c21..568e855da0f3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -242,6 +242,7 @@ struct i40e_hw_capabilities {
242 u8 rx_buf_chain_len; 242 u8 rx_buf_chain_len;
243 u32 enabled_tcmap; 243 u32 enabled_tcmap;
244 u32 maxtc; 244 u32 maxtc;
245 u64 wr_csr_prot;
245}; 246};
246 247
247struct i40e_mac_info { 248struct i40e_mac_info {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4d69e1f04901..78d1c4ff565e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -26,6 +26,129 @@
26 26
27#include "i40e.h" 27#include "i40e.h"
28 28
29/*********************notification routines***********************/
30
31/**
32 * i40e_vc_vf_broadcast
33 * @pf: pointer to the PF structure
34 * @opcode: operation code
35 * @retval: return value
36 * @msg: pointer to the msg buffer
37 * @msglen: msg length
38 *
39 * send a message to all VFs on a given PF
40 **/
41static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
42 enum i40e_virtchnl_ops v_opcode,
43 i40e_status v_retval, u8 *msg,
44 u16 msglen)
45{
46 struct i40e_hw *hw = &pf->hw;
47 struct i40e_vf *vf = pf->vf;
48 int i;
49
50 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
51 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
52 /* Not all vfs are enabled so skip the ones that are not */
53 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
54 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
55 continue;
56
57 /* Ignore return value on purpose - a given VF may fail, but
58 * we need to keep going and send to all of them
59 */
60 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
61 msg, msglen, NULL);
62 }
63}
64
65/**
66 * i40e_vc_notify_link_state
67 * @vf: pointer to the VF structure
68 *
69 * send a link status message to a single VF
70 **/
71static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
72{
73 struct i40e_virtchnl_pf_event pfe;
74 struct i40e_pf *pf = vf->pf;
75 struct i40e_hw *hw = &pf->hw;
76 struct i40e_link_status *ls = &pf->hw.phy.link_info;
77 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
78
79 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
80 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
81 if (vf->link_forced) {
82 pfe.event_data.link_event.link_status = vf->link_up;
83 pfe.event_data.link_event.link_speed =
84 (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
85 } else {
86 pfe.event_data.link_event.link_status =
87 ls->link_info & I40E_AQ_LINK_UP;
88 pfe.event_data.link_event.link_speed = ls->link_speed;
89 }
90 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
91 0, (u8 *)&pfe, sizeof(pfe), NULL);
92}
93
94/**
95 * i40e_vc_notify_link_state
96 * @pf: pointer to the PF structure
97 *
98 * send a link status message to all VFs on a given PF
99 **/
100void i40e_vc_notify_link_state(struct i40e_pf *pf)
101{
102 int i;
103
104 for (i = 0; i < pf->num_alloc_vfs; i++)
105 i40e_vc_notify_vf_link_state(&pf->vf[i]);
106}
107
108/**
109 * i40e_vc_notify_reset
110 * @pf: pointer to the PF structure
111 *
112 * indicate a pending reset to all VFs on a given PF
113 **/
114void i40e_vc_notify_reset(struct i40e_pf *pf)
115{
116 struct i40e_virtchnl_pf_event pfe;
117
118 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
119 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
120 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0,
121 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
122}
123
124/**
125 * i40e_vc_notify_vf_reset
126 * @vf: pointer to the VF structure
127 *
128 * indicate a pending reset to the given VF
129 **/
130void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
131{
132 struct i40e_virtchnl_pf_event pfe;
133 int abs_vf_id;
134
135 /* validate the request */
136 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
137 return;
138
139 /* verify if the VF is in either init or active before proceeding */
140 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
141 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
142 return;
143
144 abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
145
146 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
147 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
148 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
149 0, (u8 *)&pfe,
150 sizeof(struct i40e_virtchnl_pf_event), NULL);
151}
29/***********************misc routines*****************************/ 152/***********************misc routines*****************************/
30 153
31/** 154/**
@@ -689,6 +812,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
689 } 812 }
690 } 813 }
691 814
815 if (flr)
816 usleep_range(10000, 20000);
817
692 if (!rsd) 818 if (!rsd)
693 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 819 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
694 vf->vf_id); 820 vf->vf_id);
@@ -733,6 +859,11 @@ void i40e_free_vfs(struct i40e_pf *pf)
733 while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 859 while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
734 usleep_range(1000, 2000); 860 usleep_range(1000, 2000);
735 861
862 for (i = 0; i < pf->num_alloc_vfs; i++)
863 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
864 i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
865 false);
866
736 /* Disable IOV before freeing resources. This lets any VF drivers 867 /* Disable IOV before freeing resources. This lets any VF drivers
737 * running in the host get themselves cleaned up before we yank 868 * running in the host get themselves cleaned up before we yank
738 * the carpet out from underneath their feet. 869 * the carpet out from underneath their feet.
@@ -1762,6 +1893,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
1762 break; 1893 break;
1763 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1894 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1764 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 1895 ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
1896 i40e_vc_notify_vf_link_state(vf);
1765 break; 1897 break;
1766 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1898 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1767 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); 1899 ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
@@ -1835,118 +1967,6 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
1835} 1967}
1836 1968
1837/** 1969/**
1838 * i40e_vc_vf_broadcast
1839 * @pf: pointer to the PF structure
1840 * @opcode: operation code
1841 * @retval: return value
1842 * @msg: pointer to the msg buffer
1843 * @msglen: msg length
1844 *
1845 * send a message to all VFs on a given PF
1846 **/
1847static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
1848 enum i40e_virtchnl_ops v_opcode,
1849 i40e_status v_retval, u8 *msg,
1850 u16 msglen)
1851{
1852 struct i40e_hw *hw = &pf->hw;
1853 struct i40e_vf *vf = pf->vf;
1854 int i;
1855
1856 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
1857 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1858 /* Not all VFs are enabled so skip the ones that are not */
1859 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
1860 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1861 continue;
1862
1863 /* Ignore return value on purpose - a given VF may fail, but
1864 * we need to keep going and send to all of them
1865 */
1866 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1867 msg, msglen, NULL);
1868 }
1869}
1870
1871/**
1872 * i40e_vc_notify_link_state
1873 * @pf: pointer to the PF structure
1874 *
1875 * send a link status message to all VFs on a given PF
1876 **/
1877void i40e_vc_notify_link_state(struct i40e_pf *pf)
1878{
1879 struct i40e_virtchnl_pf_event pfe;
1880 struct i40e_hw *hw = &pf->hw;
1881 struct i40e_vf *vf = pf->vf;
1882 struct i40e_link_status *ls = &pf->hw.phy.link_info;
1883 int i;
1884
1885 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
1886 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
1887 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
1888 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1889 if (vf->link_forced) {
1890 pfe.event_data.link_event.link_status = vf->link_up;
1891 pfe.event_data.link_event.link_speed =
1892 (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
1893 } else {
1894 pfe.event_data.link_event.link_status =
1895 ls->link_info & I40E_AQ_LINK_UP;
1896 pfe.event_data.link_event.link_speed = ls->link_speed;
1897 }
1898 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
1899 0, (u8 *)&pfe, sizeof(pfe),
1900 NULL);
1901 }
1902}
1903
1904/**
1905 * i40e_vc_notify_reset
1906 * @pf: pointer to the PF structure
1907 *
1908 * indicate a pending reset to all VFs on a given PF
1909 **/
1910void i40e_vc_notify_reset(struct i40e_pf *pf)
1911{
1912 struct i40e_virtchnl_pf_event pfe;
1913
1914 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
1915 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
1916 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
1917 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
1918}
1919
1920/**
1921 * i40e_vc_notify_vf_reset
1922 * @vf: pointer to the VF structure
1923 *
1924 * indicate a pending reset to the given VF
1925 **/
1926void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
1927{
1928 struct i40e_virtchnl_pf_event pfe;
1929 int abs_vf_id;
1930
1931 /* validate the request */
1932 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1933 return;
1934
1935 /* verify if the VF is in either init or active before proceeding */
1936 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
1937 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1938 return;
1939
1940 abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
1941
1942 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
1943 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
1944 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
1945 I40E_SUCCESS, (u8 *)&pfe,
1946 sizeof(struct i40e_virtchnl_pf_event), NULL);
1947}
1948
1949/**
1950 * i40e_ndo_set_vf_mac 1970 * i40e_ndo_set_vf_mac
1951 * @netdev: network interface device structure 1971 * @netdev: network interface device structure
1952 * @vf_id: VF identifier 1972 * @vf_id: VF identifier
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 9c79cb6abb2b..ec9d83a93379 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -242,6 +242,7 @@ struct i40e_hw_capabilities {
242 u8 rx_buf_chain_len; 242 u8 rx_buf_chain_len;
243 u32 enabled_tcmap; 243 u32 enabled_tcmap;
244 u32 maxtc; 244 u32 maxtc;
245 u64 wr_csr_prot;
245}; 246};
246 247
247struct i40e_mac_info { 248struct i40e_mac_info {
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 34c8565031f6..1b98c25b3092 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -225,7 +225,6 @@ struct i40evf_adapter {
225#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED 225#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
226 /* flags for admin queue service task */ 226 /* flags for admin queue service task */
227 u32 aq_required; 227 u32 aq_required;
228 u32 aq_pending;
229#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1) 228#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1)
230#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) 229#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
231#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) 230#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 6d5f3b21c68a..7c53aca4b5a6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1008,7 +1008,6 @@ void i40evf_down(struct i40evf_adapter *adapter)
1008 adapter->state != __I40EVF_RESETTING) { 1008 adapter->state != __I40EVF_RESETTING) {
1009 /* cancel any current operation */ 1009 /* cancel any current operation */
1010 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 1010 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1011 adapter->aq_pending = 0;
1012 /* Schedule operations to close down the HW. Don't wait 1011 /* Schedule operations to close down the HW. Don't wait
1013 * here for this to complete. The watchdog is still running 1012 * here for this to complete. The watchdog is still running
1014 * and it will take care of this. 1013 * and it will take care of this.
@@ -1335,7 +1334,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
1335 */ 1334 */
1336 return; 1335 return;
1337 } 1336 }
1338 adapter->aq_pending = 0;
1339 adapter->aq_required = 0; 1337 adapter->aq_required = 0;
1340 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 1338 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1341 goto watchdog_done; 1339 goto watchdog_done;
@@ -1355,7 +1353,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
1355 adapter->flags |= I40EVF_FLAG_RESET_PENDING; 1353 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1356 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 1354 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1357 schedule_work(&adapter->reset_task); 1355 schedule_work(&adapter->reset_task);
1358 adapter->aq_pending = 0;
1359 adapter->aq_required = 0; 1356 adapter->aq_required = 0;
1360 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 1357 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1361 goto watchdog_done; 1358 goto watchdog_done;
@@ -1364,7 +1361,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
1364 /* Process admin queue tasks. After init, everything gets done 1361 /* Process admin queue tasks. After init, everything gets done
1365 * here so we don't race on the admin queue. 1362 * here so we don't race on the admin queue.
1366 */ 1363 */
1367 if (adapter->aq_pending) { 1364 if (adapter->current_op) {
1368 if (!i40evf_asq_done(hw)) { 1365 if (!i40evf_asq_done(hw)) {
1369 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); 1366 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1370 i40evf_send_api_ver(adapter); 1367 i40evf_send_api_ver(adapter);
@@ -2029,7 +2026,7 @@ static void i40evf_init_task(struct work_struct *work)
2029 if (err) { 2026 if (err) {
2030 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", 2027 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
2031 err); 2028 err);
2032 goto err; 2029 goto err;
2033 } 2030 }
2034 err = i40evf_check_reset_complete(hw); 2031 err = i40evf_check_reset_complete(hw);
2035 if (err) { 2032 if (err) {
@@ -2249,7 +2246,6 @@ static void i40evf_shutdown(struct pci_dev *pdev)
2249 /* Prevent the watchdog from running. */ 2246 /* Prevent the watchdog from running. */
2250 adapter->state = __I40EVF_REMOVE; 2247 adapter->state = __I40EVF_REMOVE;
2251 adapter->aq_required = 0; 2248 adapter->aq_required = 0;
2252 adapter->aq_pending = 0;
2253 2249
2254#ifdef CONFIG_PM 2250#ifdef CONFIG_PM
2255 pci_save_state(pdev); 2251 pci_save_state(pdev);
@@ -2467,7 +2463,6 @@ static void i40evf_remove(struct pci_dev *pdev)
2467 /* Shut down all the garbage mashers on the detention level */ 2463 /* Shut down all the garbage mashers on the detention level */
2468 adapter->state = __I40EVF_REMOVE; 2464 adapter->state = __I40EVF_REMOVE;
2469 adapter->aq_required = 0; 2465 adapter->aq_required = 0;
2470 adapter->aq_pending = 0;
2471 i40evf_request_reset(adapter); 2466 i40evf_request_reset(adapter);
2472 msleep(20); 2467 msleep(20);
2473 /* If the FW isn't responding, kick it once, but only once. */ 2468 /* If the FW isn't responding, kick it once, but only once. */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 4240a496dc50..61e090558f31 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -250,7 +250,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
250 vqpi++; 250 vqpi++;
251 } 251 }
252 252
253 adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
254 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; 253 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
255 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 254 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
256 (u8 *)vqci, len); 255 (u8 *)vqci, len);
@@ -277,7 +276,6 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
277 vqs.vsi_id = adapter->vsi_res->vsi_id; 276 vqs.vsi_id = adapter->vsi_res->vsi_id;
278 vqs.tx_queues = (1 << adapter->num_active_queues) - 1; 277 vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
279 vqs.rx_queues = vqs.tx_queues; 278 vqs.rx_queues = vqs.tx_queues;
280 adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
281 adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; 279 adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
282 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, 280 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
283 (u8 *)&vqs, sizeof(vqs)); 281 (u8 *)&vqs, sizeof(vqs));
@@ -303,7 +301,6 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
303 vqs.vsi_id = adapter->vsi_res->vsi_id; 301 vqs.vsi_id = adapter->vsi_res->vsi_id;
304 vqs.tx_queues = (1 << adapter->num_active_queues) - 1; 302 vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
305 vqs.rx_queues = vqs.tx_queues; 303 vqs.rx_queues = vqs.tx_queues;
306 adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
307 adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; 304 adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
308 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, 305 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
309 (u8 *)&vqs, sizeof(vqs)); 306 (u8 *)&vqs, sizeof(vqs));
@@ -354,7 +351,6 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
354 vimi->vecmap[v_idx].txq_map = 0; 351 vimi->vecmap[v_idx].txq_map = 0;
355 vimi->vecmap[v_idx].rxq_map = 0; 352 vimi->vecmap[v_idx].rxq_map = 0;
356 353
357 adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
358 adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; 354 adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
359 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 355 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
360 (u8 *)vimi, len); 356 (u8 *)vimi, len);
@@ -415,7 +411,6 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
415 f->add = false; 411 f->add = false;
416 } 412 }
417 } 413 }
418 adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
419 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; 414 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
420 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 415 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
421 (u8 *)veal, len); 416 (u8 *)veal, len);
@@ -476,7 +471,6 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
476 kfree(f); 471 kfree(f);
477 } 472 }
478 } 473 }
479 adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
480 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; 474 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
481 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 475 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
482 (u8 *)veal, len); 476 (u8 *)veal, len);
@@ -537,7 +531,6 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
537 f->add = false; 531 f->add = false;
538 } 532 }
539 } 533 }
540 adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
541 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; 534 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
542 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); 535 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
543 kfree(vvfl); 536 kfree(vvfl);
@@ -598,7 +591,6 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
598 kfree(f); 591 kfree(f);
599 } 592 }
600 } 593 }
601 adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
602 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; 594 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
603 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); 595 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
604 kfree(vvfl); 596 kfree(vvfl);
@@ -720,9 +712,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
720 __func__, v_retval, v_opcode); 712 __func__, v_retval, v_opcode);
721 } 713 }
722 switch (v_opcode) { 714 switch (v_opcode) {
723 case I40E_VIRTCHNL_OP_VERSION:
724 /* no action, but also not an error */
725 break;
726 case I40E_VIRTCHNL_OP_GET_STATS: { 715 case I40E_VIRTCHNL_OP_GET_STATS: {
727 struct i40e_eth_stats *stats = 716 struct i40e_eth_stats *stats =
728 (struct i40e_eth_stats *)msg; 717 (struct i40e_eth_stats *)msg;
@@ -740,39 +729,30 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
740 adapter->current_stats = *stats; 729 adapter->current_stats = *stats;
741 } 730 }
742 break; 731 break;
743 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
744 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER);
745 break;
746 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
747 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER);
748 break;
749 case I40E_VIRTCHNL_OP_ADD_VLAN:
750 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER);
751 break;
752 case I40E_VIRTCHNL_OP_DEL_VLAN:
753 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER);
754 break;
755 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 732 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
756 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES);
757 /* enable transmits */ 733 /* enable transmits */
758 i40evf_irq_enable(adapter, true); 734 i40evf_irq_enable(adapter, true);
759 netif_tx_start_all_queues(adapter->netdev); 735 netif_tx_start_all_queues(adapter->netdev);
760 netif_carrier_on(adapter->netdev); 736 netif_carrier_on(adapter->netdev);
761 break; 737 break;
762 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 738 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
763 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES);
764 i40evf_free_all_tx_resources(adapter); 739 i40evf_free_all_tx_resources(adapter);
765 i40evf_free_all_rx_resources(adapter); 740 i40evf_free_all_rx_resources(adapter);
766 break; 741 break;
767 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 742 case I40E_VIRTCHNL_OP_VERSION:
768 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES); 743 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
769 break;
770 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 744 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
771 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); 745 /* Don't display an error if we get these out of sequence.
746 * If the firmware needed to get kicked, we'll get these and
747 * it's no problem.
748 */
749 if (v_opcode != adapter->current_op)
750 return;
772 break; 751 break;
773 default: 752 default:
774 dev_info(&adapter->pdev->dev, "Received unexpected message %d from PF\n", 753 if (v_opcode != adapter->current_op)
775 v_opcode); 754 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
755 adapter->current_op, v_opcode);
776 break; 756 break;
777 } /* switch v_opcode */ 757 } /* switch v_opcode */
778 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 758 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 4a42e960d331..f66641d961e3 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -41,7 +41,6 @@
41#include <linux/skbuff.h> 41#include <linux/skbuff.h>
42#include <linux/spi/spi.h> 42#include <linux/spi/spi.h>
43#include <linux/types.h> 43#include <linux/types.h>
44#include <linux/version.h>
45 44
46#include "qca_7k.h" 45#include "qca_7k.h"
47#include "qca_debug.h" 46#include "qca_debug.h"
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index a87b177bd723..a570a60533be 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4759,6 +4759,7 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4759 4759
4760 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { 4760 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
4761 dev_err(&pdev->dev, "invalid PCI region size\n"); 4761 dev_err(&pdev->dev, "invalid PCI region size\n");
4762 err = -EINVAL;
4762 goto err_pci_resource_len_check; 4763 goto err_pci_resource_len_check;
4763 } 4764 }
4764 4765
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 10b6173d557d..b605dfd5c7bc 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -46,7 +46,7 @@ struct efx_loopback_payload {
46 struct iphdr ip; 46 struct iphdr ip;
47 struct udphdr udp; 47 struct udphdr udp;
48 __be16 iteration; 48 __be16 iteration;
49 const char msg[64]; 49 char msg[64];
50} __packed; 50} __packed;
51 51
52/* Loopback test source MAC address */ 52/* Loopback test source MAC address */
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index cd77289c3cfe..623c6ed8764a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -150,7 +150,7 @@ struct stmmac_extra_stats {
150#define MAC_CSR_H_FRQ_MASK 0x20 150#define MAC_CSR_H_FRQ_MASK 0x20
151 151
152#define HASH_TABLE_SIZE 64 152#define HASH_TABLE_SIZE 64
153#define PAUSE_TIME 0x200 153#define PAUSE_TIME 0xffff
154 154
155/* Flow Control defines */ 155/* Flow Control defines */
156#define FLOW_OFF 0 156#define FLOW_OFF 0
@@ -357,7 +357,8 @@ struct stmmac_dma_ops {
357 void (*dump_regs) (void __iomem *ioaddr); 357 void (*dump_regs) (void __iomem *ioaddr);
358 /* Set tx/rx threshold in the csr6 register 358 /* Set tx/rx threshold in the csr6 register
359 * An invalid value enables the store-and-forward mode */ 359 * An invalid value enables the store-and-forward mode */
360 void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode); 360 void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
361 int rxfifosz);
361 /* To track extra statistic (if supported) */ 362 /* To track extra statistic (if supported) */
362 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, 363 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
363 void __iomem *ioaddr); 364 void __iomem *ioaddr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 64d8f56a9c17..b3fe0575ff6b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -172,6 +172,7 @@ enum inter_frame_gap {
172/* GMAC FLOW CTRL defines */ 172/* GMAC FLOW CTRL defines */
173#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ 173#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
174#define GMAC_FLOW_CTRL_PT_SHIFT 16 174#define GMAC_FLOW_CTRL_PT_SHIFT 16
175#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */
175#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ 176#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
176#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ 177#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
177#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ 178#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
@@ -246,6 +247,56 @@ enum ttc_control {
246#define DMA_CONTROL_FEF 0x00000080 247#define DMA_CONTROL_FEF 0x00000080
247#define DMA_CONTROL_FUF 0x00000040 248#define DMA_CONTROL_FUF 0x00000040
248 249
250/* Receive flow control activation field
251 * RFA field in DMA control register, bits 23,10:9
252 */
253#define DMA_CONTROL_RFA_MASK 0x00800600
254
255/* Receive flow control deactivation field
256 * RFD field in DMA control register, bits 22,12:11
257 */
258#define DMA_CONTROL_RFD_MASK 0x00401800
259
260/* RFD and RFA fields are encoded as follows
261 *
262 * Bit Field
263 * 0,00 - Full minus 1KB (only valid when rxfifo >= 4KB and EFC enabled)
264 * 0,01 - Full minus 2KB (only valid when rxfifo >= 4KB and EFC enabled)
265 * 0,10 - Full minus 3KB (only valid when rxfifo >= 4KB and EFC enabled)
266 * 0,11 - Full minus 4KB (only valid when rxfifo > 4KB and EFC enabled)
267 * 1,00 - Full minus 5KB (only valid when rxfifo > 8KB and EFC enabled)
268 * 1,01 - Full minus 6KB (only valid when rxfifo > 8KB and EFC enabled)
269 * 1,10 - Full minus 7KB (only valid when rxfifo > 8KB and EFC enabled)
270 * 1,11 - Reserved
271 *
272 * RFD should always be > RFA for a given FIFO size. RFD == RFA may work,
273 * but packet throughput performance may not be as expected.
274 *
275 * Be sure that bit 3 in GMAC Register 6 is set for Unicast Pause frame
276 * detection (IEEE Specification Requirement, Annex 31B, 31B.1, Pause
277 * Description).
278 *
279 * Be sure that DZPA (bit 7 in Flow Control Register, GMAC Register 6),
280 * is set to 0. This allows pause frames with a quanta of 0 to be sent
281 * as an XOFF message to the link peer.
282 */
283
284#define RFA_FULL_MINUS_1K 0x00000000
285#define RFA_FULL_MINUS_2K 0x00000200
286#define RFA_FULL_MINUS_3K 0x00000400
287#define RFA_FULL_MINUS_4K 0x00000600
288#define RFA_FULL_MINUS_5K 0x00800000
289#define RFA_FULL_MINUS_6K 0x00800200
290#define RFA_FULL_MINUS_7K 0x00800400
291
292#define RFD_FULL_MINUS_1K 0x00000000
293#define RFD_FULL_MINUS_2K 0x00000800
294#define RFD_FULL_MINUS_3K 0x00001000
295#define RFD_FULL_MINUS_4K 0x00001800
296#define RFD_FULL_MINUS_5K 0x00400000
297#define RFD_FULL_MINUS_6K 0x00400800
298#define RFD_FULL_MINUS_7K 0x00401000
299
249enum rtc_control { 300enum rtc_control {
250 DMA_CONTROL_RTC_64 = 0x00000000, 301 DMA_CONTROL_RTC_64 = 0x00000000,
251 DMA_CONTROL_RTC_32 = 0x00000008, 302 DMA_CONTROL_RTC_32 = 0x00000008,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 0adcf73cf722..371a669d69fd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -201,7 +201,10 @@ static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
201 unsigned int fc, unsigned int pause_time) 201 unsigned int fc, unsigned int pause_time)
202{ 202{
203 void __iomem *ioaddr = hw->pcsr; 203 void __iomem *ioaddr = hw->pcsr;
204 unsigned int flow = 0; 204 /* Set flow such that DZPQ in Mac Register 6 is 0,
205 * and unicast pause detect is enabled.
206 */
207 unsigned int flow = GMAC_FLOW_CTRL_UP;
205 208
206 pr_debug("GMAC Flow-Control:\n"); 209 pr_debug("GMAC Flow-Control:\n");
207 if (fc & FLOW_RX) { 210 if (fc & FLOW_RX) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 59d92e811750..0e8937c1184a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -106,8 +106,29 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
106 return 0; 106 return 0;
107} 107}
108 108
109static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
110{
111 csr6 &= ~DMA_CONTROL_RFA_MASK;
112 csr6 &= ~DMA_CONTROL_RFD_MASK;
113
114 /* Leave flow control disabled if receive fifo size is less than
115 * 4K or 0. Otherwise, send XOFF when fifo is 1K less than full,
116 * and send XON when 2K less than full.
117 */
118 if (rxfifosz < 4096) {
119 csr6 &= ~DMA_CONTROL_EFC;
120 pr_debug("GMAC: disabling flow control, rxfifo too small(%d)\n",
121 rxfifosz);
122 } else {
123 csr6 |= DMA_CONTROL_EFC;
124 csr6 |= RFA_FULL_MINUS_1K;
125 csr6 |= RFD_FULL_MINUS_2K;
126 }
127 return csr6;
128}
129
109static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, 130static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
110 int rxmode) 131 int rxmode, int rxfifosz)
111{ 132{
112 u32 csr6 = readl(ioaddr + DMA_CONTROL); 133 u32 csr6 = readl(ioaddr + DMA_CONTROL);
113 134
@@ -153,6 +174,9 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
153 csr6 |= DMA_CONTROL_RTC_128; 174 csr6 |= DMA_CONTROL_RTC_128;
154 } 175 }
155 176
177 /* Configure flow control based on rx fifo size */
178 csr6 = dwmac1000_configure_fc(csr6, rxfifosz);
179
156 writel(csr6, ioaddr + DMA_CONTROL); 180 writel(csr6, ioaddr + DMA_CONTROL);
157} 181}
158 182
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 7d1dce9e7ffc..9d0971c1c2ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -72,7 +72,7 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
72 * control register. 72 * control register.
73 */ 73 */
74static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode, 74static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
75 int rxmode) 75 int rxmode, int rxfifosz)
76{ 76{
77 u32 csr6 = readl(ioaddr + DMA_CONTROL); 77 u32 csr6 = readl(ioaddr + DMA_CONTROL);
78 78
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 06103cad7c77..05c146f718a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1277,8 +1277,10 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
1277 */ 1277 */
1278static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 1278static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1279{ 1279{
1280 int rxfifosz = priv->plat->rx_fifo_size;
1281
1280 if (priv->plat->force_thresh_dma_mode) 1282 if (priv->plat->force_thresh_dma_mode)
1281 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc); 1283 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
1282 else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 1284 else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1283 /* 1285 /*
1284 * In case of GMAC, SF mode can be enabled 1286 * In case of GMAC, SF mode can be enabled
@@ -1287,10 +1289,12 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1287 * 2) There is no bugged Jumbo frame support 1289 * 2) There is no bugged Jumbo frame support
1288 * that needs to not insert csum in the TDES. 1290 * that needs to not insert csum in the TDES.
1289 */ 1291 */
1290 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE); 1292 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
1293 rxfifosz);
1291 priv->xstats.threshold = SF_DMA_MODE; 1294 priv->xstats.threshold = SF_DMA_MODE;
1292 } else 1295 } else
1293 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 1296 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
1297 rxfifosz);
1294} 1298}
1295 1299
1296/** 1300/**
@@ -1442,6 +1446,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
1442static void stmmac_dma_interrupt(struct stmmac_priv *priv) 1446static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1443{ 1447{
1444 int status; 1448 int status;
1449 int rxfifosz = priv->plat->rx_fifo_size;
1445 1450
1446 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); 1451 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1447 if (likely((status & handle_rx)) || (status & handle_tx)) { 1452 if (likely((status & handle_rx)) || (status & handle_tx)) {
@@ -1456,10 +1461,11 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1456 (tc <= 256)) { 1461 (tc <= 256)) {
1457 tc += 64; 1462 tc += 64;
1458 if (priv->plat->force_thresh_dma_mode) 1463 if (priv->plat->force_thresh_dma_mode)
1459 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc); 1464 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
1465 rxfifosz);
1460 else 1466 else
1461 priv->hw->dma->dma_mode(priv->ioaddr, tc, 1467 priv->hw->dma->dma_mode(priv->ioaddr, tc,
1462 SF_DMA_MODE); 1468 SF_DMA_MODE, rxfifosz);
1463 priv->xstats.threshold = tc; 1469 priv->xstats.threshold = tc;
1464 } 1470 }
1465 } else if (unlikely(status == tx_hard_error)) 1471 } else if (unlikely(status == tx_hard_error))
@@ -2970,15 +2976,15 @@ int stmmac_dvr_remove(struct net_device *ndev)
2970 priv->hw->dma->stop_tx(priv->ioaddr); 2976 priv->hw->dma->stop_tx(priv->ioaddr);
2971 2977
2972 stmmac_set_mac(priv->ioaddr, false); 2978 stmmac_set_mac(priv->ioaddr, false);
2973 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
2974 priv->pcs != STMMAC_PCS_RTBI)
2975 stmmac_mdio_unregister(ndev);
2976 netif_carrier_off(ndev); 2979 netif_carrier_off(ndev);
2977 unregister_netdev(ndev); 2980 unregister_netdev(ndev);
2978 if (priv->stmmac_rst) 2981 if (priv->stmmac_rst)
2979 reset_control_assert(priv->stmmac_rst); 2982 reset_control_assert(priv->stmmac_rst);
2980 clk_disable_unprepare(priv->pclk); 2983 clk_disable_unprepare(priv->pclk);
2981 clk_disable_unprepare(priv->stmmac_clk); 2984 clk_disable_unprepare(priv->stmmac_clk);
2985 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
2986 priv->pcs != STMMAC_PCS_RTBI)
2987 stmmac_mdio_unregister(ndev);
2982 free_netdev(ndev); 2988 free_netdev(ndev);
2983 2989
2984 return 0; 2990 return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index f9b42f11950f..705bbdf93940 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -181,6 +181,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
181 sizeof(struct stmmac_mdio_bus_data), 181 sizeof(struct stmmac_mdio_bus_data),
182 GFP_KERNEL); 182 GFP_KERNEL);
183 183
184 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
185
186 of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size);
187
184 plat->force_sf_dma_mode = 188 plat->force_sf_dma_mode =
185 of_property_read_bool(np, "snps,force_sf_dma_mode"); 189 of_property_read_bool(np, "snps,force_sf_dma_mode");
186 190
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 34f846b4bd05..94570aace241 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -105,7 +105,7 @@ static void ri_tasklet(unsigned long dev)
105 if (from & AT_EGRESS) { 105 if (from & AT_EGRESS) {
106 dev_queue_xmit(skb); 106 dev_queue_xmit(skb);
107 } else if (from & AT_INGRESS) { 107 } else if (from & AT_INGRESS) {
108 skb_pull(skb, skb->dev->hard_header_len); 108 skb_pull(skb, skb->mac_len);
109 netif_receive_skb(skb); 109 netif_receive_skb(skb);
110 } else 110 } else
111 BUG(); 111 BUG();
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 9c91ff872485..8c350c5d54ad 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -313,7 +313,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
313 */ 313 */
314 if (q->flags & IFF_VNET_HDR) 314 if (q->flags & IFF_VNET_HDR)
315 features |= vlan->tap_features; 315 features |= vlan->tap_features;
316 if (netif_needs_gso(dev, skb, features)) { 316 if (netif_needs_gso(skb, features)) {
317 struct sk_buff *segs = __skb_gso_segment(skb, features, false); 317 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
318 318
319 if (IS_ERR(segs)) 319 if (IS_ERR(segs))
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 4c08f98f4484..3f45afd4382e 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -560,7 +560,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
560 560
561 if (unlikely(!netif_carrier_ok(dev) || 561 if (unlikely(!netif_carrier_ok(dev) ||
562 (slots > 1 && !xennet_can_sg(dev)) || 562 (slots > 1 && !xennet_can_sg(dev)) ||
563 netif_needs_gso(dev, skb, netif_skb_features(skb)))) { 563 netif_needs_gso(skb, netif_skb_features(skb)))) {
564 spin_unlock_irqrestore(&queue->tx_lock, flags); 564 spin_unlock_irqrestore(&queue->tx_lock, flags);
565 goto drop; 565 goto drop;
566 } 566 }