aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-27 14:59:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-27 14:59:39 -0400
commit050cdc6c9501abcd64720b8cc3e7941efee9547d (patch)
tree7fe489cbe14f95c2d547011caea5fe6281ab3344 /drivers/net
parent908946c4bee705542f38bc06c0203a6d83e3700c (diff)
parent98c8f125fd8a6240ea343c1aa50a1be9047791b8 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) ICE, E1000, IGB, IXGBE, and I40E bug fixes from the Intel folks. 2) Better fix for AB-BA deadlock in packet scheduler code, from Cong Wang. 3) bpf sockmap fixes (zero sized key handling, etc.) from Daniel Borkmann. 4) Send zero IPID in TCP resets and SYN-RECV state ACKs, to prevent attackers using it as a side-channel. From Eric Dumazet. 5) Memory leak in mediatek bluetooth driver, from Gustavo A. R. Silva. 6) Hook up rt->dst.input of ipv6 anycast routes properly, from Hangbin Liu. 7) hns and hns3 bug fixes from Huazhong Tan. 8) Fix RIF leak in mlxsw driver, from Ido Schimmel. 9) iova range check fix in vhost, from Jason Wang. 10) Fix hang in do_tcp_sendpages() with tls, from John Fastabend. 11) More r8152 chips need to disable RX aggregation, from Kai-Heng Feng. 12) Memory exposure in TCA_U32_SEL handling, from Kees Cook. 13) TCP BBR congestion control fixes from Kevin Yang. 14) hv_netvsc, ignore non-PCI devices, from Stephen Hemminger. 15) qed driver fixes from Tomer Tayar. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (77 commits) net: sched: Fix memory exposure from short TCA_U32_SEL qed: fix spelling mistake "comparsion" -> "comparison" vhost: correctly check the iova range when waking virtqueue qlge: Fix netdev features configuration. net: macb: do not disable MDIO bus at open/close time Revert "net: stmmac: fix build failure due to missing COMMON_CLK dependency" net: macb: Fix regression breaking non-MDIO fixed-link PHYs mlxsw: spectrum_switchdev: Do not leak RIFs when removing bridge i40e: fix condition of WARN_ONCE for stat strings i40e: Fix for Tx timeouts when interface is brought up if DCB is enabled ixgbe: fix driver behaviour after issuing VFLR ixgbe: Prevent unsupported configurations with XDP ixgbe: Replace GFP_ATOMIC with GFP_KERNEL igb: Replace mdelay() with msleep() in igb_integrated_phy_loopback() igb: Replace GFP_ATOMIC with GFP_KERNEL in igb_sw_init() igb: Use an advanced ctx descriptor for launchtime e1000: ensure to free old tx/rx rings in set_ringparam() e1000: check on netif_running() before calling e1000_up() ixgb: use dma_zalloc_coherent instead of allocator/memset ice: Trivial formatting fixes ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c6
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c108
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c115
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h16
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c36
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c187
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c23
-rw-r--r--drivers/net/ethernet/renesas/ravb.h5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c13
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c5
-rw-r--r--drivers/net/hyperv/netvsc_drv.c5
-rw-r--r--drivers/net/usb/r8152.c4
53 files changed, 565 insertions, 410 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 139d96c5a023..092c817f8f11 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -110,16 +110,14 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
110 struct tcf_exts *tc_exts) 110 struct tcf_exts *tc_exts)
111{ 111{
112 const struct tc_action *tc_act; 112 const struct tc_action *tc_act;
113 LIST_HEAD(tc_actions); 113 int i, rc;
114 int rc;
115 114
116 if (!tcf_exts_has_actions(tc_exts)) { 115 if (!tcf_exts_has_actions(tc_exts)) {
117 netdev_info(bp->dev, "no actions"); 116 netdev_info(bp->dev, "no actions");
118 return -EINVAL; 117 return -EINVAL;
119 } 118 }
120 119
121 tcf_exts_to_list(tc_exts, &tc_actions); 120 tcf_exts_for_each_action(i, tc_act, tc_exts) {
122 list_for_each_entry(tc_act, &tc_actions, list) {
123 /* Drop action */ 121 /* Drop action */
124 if (is_tcf_gact_shot(tc_act)) { 122 if (is_tcf_gact_shot(tc_act)) {
125 actions->flags |= BNXT_TC_ACTION_FLAG_DROP; 123 actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index dc09f9a8a49b..c6707ea2d751 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -482,11 +482,6 @@ static int macb_mii_probe(struct net_device *dev)
482 482
483 if (np) { 483 if (np) {
484 if (of_phy_is_fixed_link(np)) { 484 if (of_phy_is_fixed_link(np)) {
485 if (of_phy_register_fixed_link(np) < 0) {
486 dev_err(&bp->pdev->dev,
487 "broken fixed-link specification\n");
488 return -ENODEV;
489 }
490 bp->phy_node = of_node_get(np); 485 bp->phy_node = of_node_get(np);
491 } else { 486 } else {
492 bp->phy_node = of_parse_phandle(np, "phy-handle", 0); 487 bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -569,7 +564,7 @@ static int macb_mii_init(struct macb *bp)
569{ 564{
570 struct macb_platform_data *pdata; 565 struct macb_platform_data *pdata;
571 struct device_node *np; 566 struct device_node *np;
572 int err; 567 int err = -ENXIO;
573 568
574 /* Enable management port */ 569 /* Enable management port */
575 macb_writel(bp, NCR, MACB_BIT(MPE)); 570 macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -592,12 +587,23 @@ static int macb_mii_init(struct macb *bp)
592 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 587 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
593 588
594 np = bp->pdev->dev.of_node; 589 np = bp->pdev->dev.of_node;
595 if (pdata) 590 if (np && of_phy_is_fixed_link(np)) {
596 bp->mii_bus->phy_mask = pdata->phy_mask; 591 if (of_phy_register_fixed_link(np) < 0) {
592 dev_err(&bp->pdev->dev,
593 "broken fixed-link specification %pOF\n", np);
594 goto err_out_free_mdiobus;
595 }
596
597 err = mdiobus_register(bp->mii_bus);
598 } else {
599 if (pdata)
600 bp->mii_bus->phy_mask = pdata->phy_mask;
601
602 err = of_mdiobus_register(bp->mii_bus, np);
603 }
597 604
598 err = of_mdiobus_register(bp->mii_bus, np);
599 if (err) 605 if (err)
600 goto err_out_free_mdiobus; 606 goto err_out_free_fixed_link;
601 607
602 err = macb_mii_probe(bp->dev); 608 err = macb_mii_probe(bp->dev);
603 if (err) 609 if (err)
@@ -607,6 +613,7 @@ static int macb_mii_init(struct macb *bp)
607 613
608err_out_unregister_bus: 614err_out_unregister_bus:
609 mdiobus_unregister(bp->mii_bus); 615 mdiobus_unregister(bp->mii_bus);
616err_out_free_fixed_link:
610 if (np && of_phy_is_fixed_link(np)) 617 if (np && of_phy_is_fixed_link(np))
611 of_phy_deregister_fixed_link(np); 618 of_phy_deregister_fixed_link(np);
612err_out_free_mdiobus: 619err_out_free_mdiobus:
@@ -2028,14 +2035,17 @@ static void macb_reset_hw(struct macb *bp)
2028{ 2035{
2029 struct macb_queue *queue; 2036 struct macb_queue *queue;
2030 unsigned int q; 2037 unsigned int q;
2038 u32 ctrl = macb_readl(bp, NCR);
2031 2039
2032 /* Disable RX and TX (XXX: Should we halt the transmission 2040 /* Disable RX and TX (XXX: Should we halt the transmission
2033 * more gracefully?) 2041 * more gracefully?)
2034 */ 2042 */
2035 macb_writel(bp, NCR, 0); 2043 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
2036 2044
2037 /* Clear the stats registers (XXX: Update stats first?) */ 2045 /* Clear the stats registers (XXX: Update stats first?) */
2038 macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); 2046 ctrl |= MACB_BIT(CLRSTAT);
2047
2048 macb_writel(bp, NCR, ctrl);
2039 2049
2040 /* Clear all status flags */ 2050 /* Clear all status flags */
2041 macb_writel(bp, TSR, -1); 2051 macb_writel(bp, TSR, -1);
@@ -2223,7 +2233,7 @@ static void macb_init_hw(struct macb *bp)
2223 } 2233 }
2224 2234
2225 /* Enable TX and RX */ 2235 /* Enable TX and RX */
2226 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 2236 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
2227} 2237}
2228 2238
2229/* The hash address register is 64 bits long and takes up two 2239/* The hash address register is 64 bits long and takes up two
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 623f73dd7738..c116f96956fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -417,10 +417,9 @@ static void cxgb4_process_flow_actions(struct net_device *in,
417 struct ch_filter_specification *fs) 417 struct ch_filter_specification *fs)
418{ 418{
419 const struct tc_action *a; 419 const struct tc_action *a;
420 LIST_HEAD(actions); 420 int i;
421 421
422 tcf_exts_to_list(cls->exts, &actions); 422 tcf_exts_for_each_action(i, a, cls->exts) {
423 list_for_each_entry(a, &actions, list) {
424 if (is_tcf_gact_ok(a)) { 423 if (is_tcf_gact_ok(a)) {
425 fs->action = FILTER_PASS; 424 fs->action = FILTER_PASS;
426 } else if (is_tcf_gact_shot(a)) { 425 } else if (is_tcf_gact_shot(a)) {
@@ -591,10 +590,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
591 bool act_redir = false; 590 bool act_redir = false;
592 bool act_pedit = false; 591 bool act_pedit = false;
593 bool act_vlan = false; 592 bool act_vlan = false;
594 LIST_HEAD(actions); 593 int i;
595 594
596 tcf_exts_to_list(cls->exts, &actions); 595 tcf_exts_for_each_action(i, a, cls->exts) {
597 list_for_each_entry(a, &actions, list) {
598 if (is_tcf_gact_ok(a)) { 596 if (is_tcf_gact_ok(a)) {
599 /* Do nothing */ 597 /* Do nothing */
600 } else if (is_tcf_gact_shot(a)) { 598 } else if (is_tcf_gact_shot(a)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 18eb2aedd4cb..c7d2b4dc7568 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -93,14 +93,13 @@ static int fill_action_fields(struct adapter *adap,
93 unsigned int num_actions = 0; 93 unsigned int num_actions = 0;
94 const struct tc_action *a; 94 const struct tc_action *a;
95 struct tcf_exts *exts; 95 struct tcf_exts *exts;
96 LIST_HEAD(actions); 96 int i;
97 97
98 exts = cls->knode.exts; 98 exts = cls->knode.exts;
99 if (!tcf_exts_has_actions(exts)) 99 if (!tcf_exts_has_actions(exts))
100 return -EINVAL; 100 return -EINVAL;
101 101
102 tcf_exts_to_list(exts, &actions); 102 tcf_exts_for_each_action(i, a, exts) {
103 list_for_each_entry(a, &actions, list) {
104 /* Don't allow more than one action per rule. */ 103 /* Don't allow more than one action per rule. */
105 if (num_actions) 104 if (num_actions)
106 return -EINVAL; 105 return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index fa5b30f547f6..cad52bd331f7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -220,10 +220,10 @@ struct hnae_desc_cb {
220 220
221 /* priv data for the desc, e.g. skb when use with ip stack*/ 221 /* priv data for the desc, e.g. skb when use with ip stack*/
222 void *priv; 222 void *priv;
223 u16 page_offset; 223 u32 page_offset;
224 u16 reuse_flag; 224 u32 length; /* length of the buffer */
225 225
226 u16 length; /* length of the buffer */ 226 u16 reuse_flag;
227 227
228 /* desc type, used by the ring user to mark the type of the priv data */ 228 /* desc type, used by the ring user to mark the type of the priv data */
229 u16 type; 229 u16 type;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 9f2b552aee33..02a0ba20fad5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -406,113 +406,13 @@ out_net_tx_busy:
406 return NETDEV_TX_BUSY; 406 return NETDEV_TX_BUSY;
407} 407}
408 408
409/**
410 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
411 * @data: pointer to the start of the headers
412 * @max: total length of section to find headers in
413 *
414 * This function is meant to determine the length of headers that will
415 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
416 * motivation of doing this is to only perform one pull for IPv4 TCP
417 * packets so that we can do basic things like calculating the gso_size
418 * based on the average data per packet.
419 **/
420static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
421 unsigned int max_size)
422{
423 unsigned char *network;
424 u8 hlen;
425
426 /* this should never happen, but better safe than sorry */
427 if (max_size < ETH_HLEN)
428 return max_size;
429
430 /* initialize network frame pointer */
431 network = data;
432
433 /* set first protocol and move network header forward */
434 network += ETH_HLEN;
435
436 /* handle any vlan tag if present */
437 if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
438 == HNS_RX_FLAG_VLAN_PRESENT) {
439 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
440 return max_size;
441
442 network += VLAN_HLEN;
443 }
444
445 /* handle L3 protocols */
446 if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
447 == HNS_RX_FLAG_L3ID_IPV4) {
448 if ((typeof(max_size))(network - data) >
449 (max_size - sizeof(struct iphdr)))
450 return max_size;
451
452 /* access ihl as a u8 to avoid unaligned access on ia64 */
453 hlen = (network[0] & 0x0F) << 2;
454
455 /* verify hlen meets minimum size requirements */
456 if (hlen < sizeof(struct iphdr))
457 return network - data;
458
459 /* record next protocol if header is present */
460 } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
461 == HNS_RX_FLAG_L3ID_IPV6) {
462 if ((typeof(max_size))(network - data) >
463 (max_size - sizeof(struct ipv6hdr)))
464 return max_size;
465
466 /* record next protocol */
467 hlen = sizeof(struct ipv6hdr);
468 } else {
469 return network - data;
470 }
471
472 /* relocate pointer to start of L4 header */
473 network += hlen;
474
475 /* finally sort out TCP/UDP */
476 if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
477 == HNS_RX_FLAG_L4ID_TCP) {
478 if ((typeof(max_size))(network - data) >
479 (max_size - sizeof(struct tcphdr)))
480 return max_size;
481
482 /* access doff as a u8 to avoid unaligned access on ia64 */
483 hlen = (network[12] & 0xF0) >> 2;
484
485 /* verify hlen meets minimum size requirements */
486 if (hlen < sizeof(struct tcphdr))
487 return network - data;
488
489 network += hlen;
490 } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
491 == HNS_RX_FLAG_L4ID_UDP) {
492 if ((typeof(max_size))(network - data) >
493 (max_size - sizeof(struct udphdr)))
494 return max_size;
495
496 network += sizeof(struct udphdr);
497 }
498
499 /* If everything has gone correctly network should be the
500 * data section of the packet and will be the end of the header.
501 * If not then it probably represents the end of the last recognized
502 * header.
503 */
504 if ((typeof(max_size))(network - data) < max_size)
505 return network - data;
506 else
507 return max_size;
508}
509
510static void hns_nic_reuse_page(struct sk_buff *skb, int i, 409static void hns_nic_reuse_page(struct sk_buff *skb, int i,
511 struct hnae_ring *ring, int pull_len, 410 struct hnae_ring *ring, int pull_len,
512 struct hnae_desc_cb *desc_cb) 411 struct hnae_desc_cb *desc_cb)
513{ 412{
514 struct hnae_desc *desc; 413 struct hnae_desc *desc;
515 int truesize, size; 414 u32 truesize;
415 int size;
516 int last_offset; 416 int last_offset;
517 bool twobufs; 417 bool twobufs;
518 418
@@ -530,7 +430,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
530 } 430 }
531 431
532 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 432 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
533 size - pull_len, truesize - pull_len); 433 size - pull_len, truesize);
534 434
535 /* avoid re-using remote pages,flag default unreuse */ 435 /* avoid re-using remote pages,flag default unreuse */
536 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) 436 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
@@ -695,7 +595,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
695 } else { 595 } else {
696 ring->stats.seg_pkt_cnt++; 596 ring->stats.seg_pkt_cnt++;
697 597
698 pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE); 598 pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE);
699 memcpy(__skb_put(skb, pull_len), va, 599 memcpy(__skb_put(skb, pull_len), va,
700 ALIGN(pull_len, sizeof(long))); 600 ALIGN(pull_len, sizeof(long)));
701 601
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 3554dca7a680..955c4ab18b03 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2019,7 +2019,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2019 struct hns3_desc_cb *desc_cb) 2019 struct hns3_desc_cb *desc_cb)
2020{ 2020{
2021 struct hns3_desc *desc; 2021 struct hns3_desc *desc;
2022 int truesize, size; 2022 u32 truesize;
2023 int size;
2023 int last_offset; 2024 int last_offset;
2024 bool twobufs; 2025 bool twobufs;
2025 2026
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index a02a96aee2a2..cb450d7ec8c1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -284,11 +284,11 @@ struct hns3_desc_cb {
284 284
285 /* priv data for the desc, e.g. skb when use with ip stack*/ 285 /* priv data for the desc, e.g. skb when use with ip stack*/
286 void *priv; 286 void *priv;
287 u16 page_offset; 287 u32 page_offset;
288 u16 reuse_flag;
289
290 u32 length; /* length of the buffer */ 288 u32 length; /* length of the buffer */
291 289
290 u16 reuse_flag;
291
292 /* desc type, used by the ring user to mark the type of the priv data */ 292 /* desc type, used by the ring user to mark the type of the priv data */
293 u16 type; 293 u16 type;
294}; 294};
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index bdb3f8e65ed4..2569a168334c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -624,14 +624,14 @@ static int e1000_set_ringparam(struct net_device *netdev,
624 adapter->tx_ring = tx_old; 624 adapter->tx_ring = tx_old;
625 e1000_free_all_rx_resources(adapter); 625 e1000_free_all_rx_resources(adapter);
626 e1000_free_all_tx_resources(adapter); 626 e1000_free_all_tx_resources(adapter);
627 kfree(tx_old);
628 kfree(rx_old);
629 adapter->rx_ring = rxdr; 627 adapter->rx_ring = rxdr;
630 adapter->tx_ring = txdr; 628 adapter->tx_ring = txdr;
631 err = e1000_up(adapter); 629 err = e1000_up(adapter);
632 if (err) 630 if (err)
633 goto err_setup; 631 goto err_setup;
634 } 632 }
633 kfree(tx_old);
634 kfree(rx_old);
635 635
636 clear_bit(__E1000_RESETTING, &adapter->flags); 636 clear_bit(__E1000_RESETTING, &adapter->flags);
637 return 0; 637 return 0;
@@ -644,7 +644,8 @@ err_setup_rx:
644err_alloc_rx: 644err_alloc_rx:
645 kfree(txdr); 645 kfree(txdr);
646err_alloc_tx: 646err_alloc_tx:
647 e1000_up(adapter); 647 if (netif_running(adapter->netdev))
648 e1000_up(adapter);
648err_setup: 649err_setup:
649 clear_bit(__E1000_RESETTING, &adapter->flags); 650 clear_bit(__E1000_RESETTING, &adapter->flags);
650 return err; 651 return err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index abcd096ede14..5ff6caa83948 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2013,7 +2013,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
2013 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) 2013 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
2014 i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); 2014 i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
2015 2015
2016 WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, 2016 WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
2017 "stat strings count mismatch!"); 2017 "stat strings count mismatch!");
2018} 2018}
2019 2019
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f2c622e78802..ac685ad4d877 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -5122,15 +5122,17 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5122 u8 *bw_share) 5122 u8 *bw_share)
5123{ 5123{
5124 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 5124 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5125 struct i40e_pf *pf = vsi->back;
5125 i40e_status ret; 5126 i40e_status ret;
5126 int i; 5127 int i;
5127 5128
5128 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) 5129 /* There is no need to reset BW when mqprio mode is on. */
5130 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5129 return 0; 5131 return 0;
5130 if (!vsi->mqprio_qopt.qopt.hw) { 5132 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5131 ret = i40e_set_bw_limit(vsi, vsi->seid, 0); 5133 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5132 if (ret) 5134 if (ret)
5133 dev_info(&vsi->back->pdev->dev, 5135 dev_info(&pf->pdev->dev,
5134 "Failed to reset tx rate for vsi->seid %u\n", 5136 "Failed to reset tx rate for vsi->seid %u\n",
5135 vsi->seid); 5137 vsi->seid);
5136 return ret; 5138 return ret;
@@ -5139,12 +5141,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5139 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 5141 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5140 bw_data.tc_bw_credits[i] = bw_share[i]; 5142 bw_data.tc_bw_credits[i] = bw_share[i];
5141 5143
5142 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 5144 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5143 NULL);
5144 if (ret) { 5145 if (ret) {
5145 dev_info(&vsi->back->pdev->dev, 5146 dev_info(&pf->pdev->dev,
5146 "AQ command Config VSI BW allocation per TC failed = %d\n", 5147 "AQ command Config VSI BW allocation per TC failed = %d\n",
5147 vsi->back->hw.aq.asq_last_status); 5148 pf->hw.aq.asq_last_status);
5148 return -EINVAL; 5149 return -EINVAL;
5149 } 5150 }
5150 5151
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index d8b5fff581e7..868f4a1d0f72 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -89,6 +89,13 @@ extern const char ice_drv_ver[];
89#define ice_for_each_rxq(vsi, i) \ 89#define ice_for_each_rxq(vsi, i) \
90 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) 90 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
91 91
92/* Macros for each allocated tx/rx ring whether used or not in a VSI */
93#define ice_for_each_alloc_txq(vsi, i) \
94 for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
95
96#define ice_for_each_alloc_rxq(vsi, i) \
97 for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
98
92struct ice_tc_info { 99struct ice_tc_info {
93 u16 qoffset; 100 u16 qoffset;
94 u16 qcount; 101 u16 qcount;
@@ -189,9 +196,9 @@ struct ice_vsi {
189 struct list_head tmp_sync_list; /* MAC filters to be synced */ 196 struct list_head tmp_sync_list; /* MAC filters to be synced */
190 struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ 197 struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
191 198
192 bool irqs_ready; 199 u8 irqs_ready;
193 bool current_isup; /* Sync 'link up' logging */ 200 u8 current_isup; /* Sync 'link up' logging */
194 bool stat_offsets_loaded; 201 u8 stat_offsets_loaded;
195 202
196 /* queue information */ 203 /* queue information */
197 u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 204 u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -262,7 +269,7 @@ struct ice_pf {
262 struct ice_hw_port_stats stats; 269 struct ice_hw_port_stats stats;
263 struct ice_hw_port_stats stats_prev; 270 struct ice_hw_port_stats stats_prev;
264 struct ice_hw hw; 271 struct ice_hw hw;
265 bool stat_prev_loaded; /* has previous stats been loaded */ 272 u8 stat_prev_loaded; /* has previous stats been loaded */
266 char int_name[ICE_INT_NAME_STR_LEN]; 273 char int_name[ICE_INT_NAME_STR_LEN];
267}; 274};
268 275
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 7541ec2270b3..a0614f472658 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -329,19 +329,19 @@ struct ice_aqc_vsi_props {
329 /* VLAN section */ 329 /* VLAN section */
330 __le16 pvid; /* VLANS include priority bits */ 330 __le16 pvid; /* VLANS include priority bits */
331 u8 pvlan_reserved[2]; 331 u8 pvlan_reserved[2];
332 u8 port_vlan_flags; 332 u8 vlan_flags;
333#define ICE_AQ_VSI_PVLAN_MODE_S 0 333#define ICE_AQ_VSI_VLAN_MODE_S 0
334#define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S) 334#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
335#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1 335#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
336#define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2 336#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
337#define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3 337#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
338#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) 338#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
339#define ICE_AQ_VSI_PVLAN_EMOD_S 3 339#define ICE_AQ_VSI_VLAN_EMOD_S 3
340#define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) 340#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
341#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S) 341#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
342#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S) 342#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
343#define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S) 343#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
344#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) 344#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
345 u8 pvlan_reserved2[3]; 345 u8 pvlan_reserved2[3];
346 /* ingress egress up sections */ 346 /* ingress egress up sections */
347 __le32 ingress_table; /* bitmap, 3 bits per up */ 347 __le32 ingress_table; /* bitmap, 3 bits per up */
@@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act {
594#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) 594#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S)
595#define ICE_LG_ACT_GENERIC_PRIORITY_S 22 595#define ICE_LG_ACT_GENERIC_PRIORITY_S 22
596#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) 596#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S)
597#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7
597 598
598 /* Action = 7 - Set Stat count */ 599 /* Action = 7 - Set Stat count */
599#define ICE_LG_ACT_STAT_COUNT 0x7 600#define ICE_LG_ACT_STAT_COUNT 0x7
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 71d032cc5fa7..661beea6af79 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
45/** 45/**
46 * ice_clear_pf_cfg - Clear PF configuration 46 * ice_clear_pf_cfg - Clear PF configuration
47 * @hw: pointer to the hardware structure 47 * @hw: pointer to the hardware structure
48 *
49 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
50 * configuration, flow director filters, etc.).
48 */ 51 */
49enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 52enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
50{ 53{
@@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
1483 struct ice_phy_info *phy_info; 1486 struct ice_phy_info *phy_info;
1484 enum ice_status status = 0; 1487 enum ice_status status = 0;
1485 1488
1486 if (!pi) 1489 if (!pi || !link_up)
1487 return ICE_ERR_PARAM; 1490 return ICE_ERR_PARAM;
1488 1491
1489 phy_info = &pi->phy; 1492 phy_info = &pi->phy;
@@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1619 } 1622 }
1620 1623
1621 /* LUT size is only valid for Global and PF table types */ 1624 /* LUT size is only valid for Global and PF table types */
1622 if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) { 1625 switch (lut_size) {
1623 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << 1626 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
1624 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1627 break;
1625 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1628 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
1626 } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) {
1627 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 1629 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
1628 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1630 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1629 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1631 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
1630 } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) && 1632 break;
1631 (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) { 1633 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
1632 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 1634 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
1633 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1635 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
1634 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1636 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1635 } else { 1637 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
1638 break;
1639 }
1640 /* fall-through */
1641 default:
1636 status = ICE_ERR_PARAM; 1642 status = ICE_ERR_PARAM;
1637 goto ice_aq_get_set_rss_lut_exit; 1643 goto ice_aq_get_set_rss_lut_exit;
1638 } 1644 }
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 7c511f144ed6..62be72fdc8f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
597 return 0; 597 return 0;
598 598
599init_ctrlq_free_rq: 599init_ctrlq_free_rq:
600 ice_shutdown_rq(hw, cq); 600 if (cq->rq.head) {
601 ice_shutdown_sq(hw, cq); 601 ice_shutdown_rq(hw, cq);
602 mutex_destroy(&cq->sq_lock); 602 mutex_destroy(&cq->rq_lock);
603 mutex_destroy(&cq->rq_lock); 603 }
604 if (cq->sq.head) {
605 ice_shutdown_sq(hw, cq);
606 mutex_destroy(&cq->sq_lock);
607 }
604 return status; 608 return status;
605} 609}
606 610
@@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
706 return; 710 return;
707 } 711 }
708 712
709 ice_shutdown_sq(hw, cq); 713 if (cq->sq.head) {
710 ice_shutdown_rq(hw, cq); 714 ice_shutdown_sq(hw, cq);
711 mutex_destroy(&cq->sq_lock); 715 mutex_destroy(&cq->sq_lock);
712 mutex_destroy(&cq->rq_lock); 716 }
717 if (cq->rq.head) {
718 ice_shutdown_rq(hw, cq);
719 mutex_destroy(&cq->rq_lock);
720 }
713} 721}
714 722
715/** 723/**
@@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1057 1065
1058clean_rq_elem_out: 1066clean_rq_elem_out:
1059 /* Set pending if needed, unlock and return */ 1067 /* Set pending if needed, unlock and return */
1060 if (pending) 1068 if (pending) {
1069 /* re-read HW head to calculate actual pending messages */
1070 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1061 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); 1071 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1072 }
1062clean_rq_elem_err: 1073clean_rq_elem_err:
1063 mutex_unlock(&cq->rq_lock); 1074 mutex_unlock(&cq->rq_lock);
1064 1075
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 1db304c01d10..c71a9b528d6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev)
26{ 26{
27 struct ice_netdev_priv *np = netdev_priv(netdev); 27 struct ice_netdev_priv *np = netdev_priv(netdev);
28 28
29 return ((np->vsi->num_txq + np->vsi->num_rxq) * 29 return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
30 (sizeof(struct ice_q_stats) / sizeof(u64))); 30 (sizeof(struct ice_q_stats) / sizeof(u64)));
31} 31}
32 32
@@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
218 p += ETH_GSTRING_LEN; 218 p += ETH_GSTRING_LEN;
219 } 219 }
220 220
221 ice_for_each_txq(vsi, i) { 221 ice_for_each_alloc_txq(vsi, i) {
222 snprintf(p, ETH_GSTRING_LEN, 222 snprintf(p, ETH_GSTRING_LEN,
223 "tx-queue-%u.tx_packets", i); 223 "tx-queue-%u.tx_packets", i);
224 p += ETH_GSTRING_LEN; 224 p += ETH_GSTRING_LEN;
@@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
226 p += ETH_GSTRING_LEN; 226 p += ETH_GSTRING_LEN;
227 } 227 }
228 228
229 ice_for_each_rxq(vsi, i) { 229 ice_for_each_alloc_rxq(vsi, i) {
230 snprintf(p, ETH_GSTRING_LEN, 230 snprintf(p, ETH_GSTRING_LEN,
231 "rx-queue-%u.rx_packets", i); 231 "rx-queue-%u.rx_packets", i);
232 p += ETH_GSTRING_LEN; 232 p += ETH_GSTRING_LEN;
@@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
253{ 253{
254 switch (sset) { 254 switch (sset) {
255 case ETH_SS_STATS: 255 case ETH_SS_STATS:
256 /* The number (and order) of strings reported *must* remain
257 * constant for a given netdevice. This function must not
258 * report a different number based on run time parameters
259 * (such as the number of queues in use, or the setting of
260 * a private ethtool flag). This is due to the nature of the
261 * ethtool stats API.
262 *
263 * User space programs such as ethtool must make 3 separate
264 * ioctl requests, one for size, one for the strings, and
265 * finally one for the stats. Since these cross into
266 * user space, changes to the number or size could result in
267 * undefined memory access or incorrect string<->value
268 * correlations for statistics.
269 *
270 * Even if it appears to be safe, changes to the size or
271 * order of strings will suffer from race conditions and are
272 * not safe.
273 */
256 return ICE_ALL_STATS_LEN(netdev); 274 return ICE_ALL_STATS_LEN(netdev);
257 default: 275 default:
258 return -EOPNOTSUPP; 276 return -EOPNOTSUPP;
@@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev,
280 /* populate per queue stats */ 298 /* populate per queue stats */
281 rcu_read_lock(); 299 rcu_read_lock();
282 300
283 ice_for_each_txq(vsi, j) { 301 ice_for_each_alloc_txq(vsi, j) {
284 ring = READ_ONCE(vsi->tx_rings[j]); 302 ring = READ_ONCE(vsi->tx_rings[j]);
285 if (!ring) 303 if (ring) {
286 continue; 304 data[i++] = ring->stats.pkts;
287 data[i++] = ring->stats.pkts; 305 data[i++] = ring->stats.bytes;
288 data[i++] = ring->stats.bytes; 306 } else {
307 data[i++] = 0;
308 data[i++] = 0;
309 }
289 } 310 }
290 311
291 ice_for_each_rxq(vsi, j) { 312 ice_for_each_alloc_rxq(vsi, j) {
292 ring = READ_ONCE(vsi->rx_rings[j]); 313 ring = READ_ONCE(vsi->rx_rings[j]);
293 data[i++] = ring->stats.pkts; 314 if (ring) {
294 data[i++] = ring->stats.bytes; 315 data[i++] = ring->stats.pkts;
316 data[i++] = ring->stats.bytes;
317 } else {
318 data[i++] = 0;
319 data[i++] = 0;
320 }
295 } 321 }
296 322
297 rcu_read_unlock(); 323 rcu_read_unlock();
@@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
519 goto done; 545 goto done;
520 } 546 }
521 547
522 for (i = 0; i < vsi->num_txq; i++) { 548 for (i = 0; i < vsi->alloc_txq; i++) {
523 /* clone ring and setup updated count */ 549 /* clone ring and setup updated count */
524 tx_rings[i] = *vsi->tx_rings[i]; 550 tx_rings[i] = *vsi->tx_rings[i];
525 tx_rings[i].count = new_tx_cnt; 551 tx_rings[i].count = new_tx_cnt;
@@ -551,7 +577,7 @@ process_rx:
551 goto done; 577 goto done;
552 } 578 }
553 579
554 for (i = 0; i < vsi->num_rxq; i++) { 580 for (i = 0; i < vsi->alloc_rxq; i++) {
555 /* clone ring and setup updated count */ 581 /* clone ring and setup updated count */
556 rx_rings[i] = *vsi->rx_rings[i]; 582 rx_rings[i] = *vsi->rx_rings[i];
557 rx_rings[i].count = new_rx_cnt; 583 rx_rings[i].count = new_rx_cnt;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 499904874b3f..6076fc87df9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -121,10 +121,6 @@
121#define PFINT_FW_CTL_CAUSE_ENA_S 30 121#define PFINT_FW_CTL_CAUSE_ENA_S 30
122#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) 122#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
123#define PFINT_OICR 0x0016CA00 123#define PFINT_OICR 0x0016CA00
124#define PFINT_OICR_HLP_RDY_S 14
125#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
126#define PFINT_OICR_CPM_RDY_S 15
127#define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S)
128#define PFINT_OICR_ECC_ERR_S 16 124#define PFINT_OICR_ECC_ERR_S 16
129#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) 125#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
130#define PFINT_OICR_MAL_DETECT_S 19 126#define PFINT_OICR_MAL_DETECT_S 19
@@ -133,10 +129,6 @@
133#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) 129#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
134#define PFINT_OICR_PCI_EXCEPTION_S 21 130#define PFINT_OICR_PCI_EXCEPTION_S 21
135#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) 131#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
136#define PFINT_OICR_GPIO_S 22
137#define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S)
138#define PFINT_OICR_STORM_DETECT_S 24
139#define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S)
140#define PFINT_OICR_HMC_ERR_S 26 132#define PFINT_OICR_HMC_ERR_S 26
141#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) 133#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
142#define PFINT_OICR_PE_CRITERR_S 28 134#define PFINT_OICR_PE_CRITERR_S 28
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index d23a91665b46..068dbc740b76 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits {
265struct ice_rlan_ctx { 265struct ice_rlan_ctx {
266 u16 head; 266 u16 head;
267 u16 cpuid; /* bigger than needed, see above for reason */ 267 u16 cpuid; /* bigger than needed, see above for reason */
268#define ICE_RLAN_BASE_S 7
268 u64 base; 269 u64 base;
269 u16 qlen; 270 u16 qlen;
270#define ICE_RLAN_CTX_DBUF_S 7 271#define ICE_RLAN_CTX_DBUF_S 7
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5299caf55a7f..f1e80eed2fd6 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
901 case ice_aqc_opc_get_link_status: 901 case ice_aqc_opc_get_link_status:
902 if (ice_handle_link_event(pf)) 902 if (ice_handle_link_event(pf))
903 dev_err(&pf->pdev->dev, 903 dev_err(&pf->pdev->dev,
904 "Could not handle link event"); 904 "Could not handle link event\n");
905 break; 905 break;
906 default: 906 default:
907 dev_dbg(&pf->pdev->dev, 907 dev_dbg(&pf->pdev->dev,
@@ -917,13 +917,27 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
917} 917}
918 918
919/** 919/**
920 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
921 * @hw: pointer to hardware info
922 * @cq: control queue information
923 *
924 * returns true if there are pending messages in a queue, false if there aren't
925 */
926static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
927{
928 u16 ntu;
929
930 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
931 return cq->rq.next_to_clean != ntu;
932}
933
934/**
920 * ice_clean_adminq_subtask - clean the AdminQ rings 935 * ice_clean_adminq_subtask - clean the AdminQ rings
921 * @pf: board private structure 936 * @pf: board private structure
922 */ 937 */
923static void ice_clean_adminq_subtask(struct ice_pf *pf) 938static void ice_clean_adminq_subtask(struct ice_pf *pf)
924{ 939{
925 struct ice_hw *hw = &pf->hw; 940 struct ice_hw *hw = &pf->hw;
926 u32 val;
927 941
928 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 942 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
929 return; 943 return;
@@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
933 947
934 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 948 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
935 949
936 /* re-enable Admin queue interrupt causes */ 950 /* There might be a situation where new messages arrive to a control
937 val = rd32(hw, PFINT_FW_CTL); 951 * queue between processing the last message and clearing the
938 wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M)); 952 * EVENT_PENDING bit. So before exiting, check queue head again (using
953 * ice_ctrlq_pending) and process new messages if any.
954 */
955 if (ice_ctrlq_pending(hw, &hw->adminq))
956 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
939 957
940 ice_flush(hw); 958 ice_flush(hw);
941} 959}
@@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1295 qcount = numq_tc; 1313 qcount = numq_tc;
1296 } 1314 }
1297 1315
1298 /* find higher power-of-2 of qcount */ 1316 /* find the (rounded up) power-of-2 of qcount */
1299 pow = ilog2(qcount); 1317 pow = order_base_2(qcount);
1300
1301 if (!is_power_of_2(qcount))
1302 pow++;
1303 1318
1304 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { 1319 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
1305 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 1320 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
@@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
1352 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 1367 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
1353 /* Traffic from VSI can be sent to LAN */ 1368 /* Traffic from VSI can be sent to LAN */
1354 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 1369 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1355 /* Allow all packets untagged/tagged */ 1370
1356 ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL & 1371 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
1357 ICE_AQ_VSI_PVLAN_MODE_M) >> 1372 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
1358 ICE_AQ_VSI_PVLAN_MODE_S); 1373 * packets untagged/tagged.
1359 /* Show VLAN/UP from packets in Rx descriptors */ 1374 */
1360 ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH & 1375 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
1361 ICE_AQ_VSI_PVLAN_EMOD_M) >> 1376 ICE_AQ_VSI_VLAN_MODE_M) >>
1362 ICE_AQ_VSI_PVLAN_EMOD_S); 1377 ICE_AQ_VSI_VLAN_MODE_S);
1378
1363 /* Have 1:1 UP mapping for both ingress/egress tables */ 1379 /* Have 1:1 UP mapping for both ingress/egress tables */
1364 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 1380 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
1365 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 1381 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
@@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
1688 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 1704 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
1689 rd32(hw, PFINT_OICR); /* read to clear */ 1705 rd32(hw, PFINT_OICR); /* read to clear */
1690 1706
1691 val = (PFINT_OICR_HLP_RDY_M | 1707 val = (PFINT_OICR_ECC_ERR_M |
1692 PFINT_OICR_CPM_RDY_M |
1693 PFINT_OICR_ECC_ERR_M |
1694 PFINT_OICR_MAL_DETECT_M | 1708 PFINT_OICR_MAL_DETECT_M |
1695 PFINT_OICR_GRST_M | 1709 PFINT_OICR_GRST_M |
1696 PFINT_OICR_PCI_EXCEPTION_M | 1710 PFINT_OICR_PCI_EXCEPTION_M |
1697 PFINT_OICR_GPIO_M | 1711 PFINT_OICR_HMC_ERR_M |
1698 PFINT_OICR_STORM_DETECT_M | 1712 PFINT_OICR_PE_CRITERR_M);
1699 PFINT_OICR_HMC_ERR_M);
1700 1713
1701 wr32(hw, PFINT_OICR_ENA, val); 1714 wr32(hw, PFINT_OICR_ENA, val);
1702 1715
@@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
2058skip_req_irq: 2071skip_req_irq:
2059 ice_ena_misc_vector(pf); 2072 ice_ena_misc_vector(pf);
2060 2073
2061 val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 2074 val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2062 (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) | 2075 PFINT_OICR_CTL_CAUSE_ENA_M);
2063 PFINT_OICR_CTL_CAUSE_ENA_M;
2064 wr32(hw, PFINT_OICR_CTL, val); 2076 wr32(hw, PFINT_OICR_CTL, val);
2065 2077
2066 /* This enables Admin queue Interrupt causes */ 2078 /* This enables Admin queue Interrupt causes */
2067 val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | 2079 val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2068 (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) | 2080 PFINT_FW_CTL_CAUSE_ENA_M);
2069 PFINT_FW_CTL_CAUSE_ENA_M;
2070 wr32(hw, PFINT_FW_CTL, val); 2081 wr32(hw, PFINT_FW_CTL, val);
2071 2082
2072 itr_gran = hw->itr_gran_200; 2083 itr_gran = hw->itr_gran_200;
@@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3246 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 3257 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3247 ice_dis_msix(pf); 3258 ice_dis_msix(pf);
3248 3259
3249 devm_kfree(&pf->pdev->dev, pf->irq_tracker); 3260 if (pf->irq_tracker) {
3250 pf->irq_tracker = NULL; 3261 devm_kfree(&pf->pdev->dev, pf->irq_tracker);
3262 pf->irq_tracker = NULL;
3263 }
3251} 3264}
3252 3265
3253/** 3266/**
@@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev,
3271 3284
3272 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); 3285 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
3273 if (err) { 3286 if (err) {
3274 dev_err(&pdev->dev, "I/O map error %d\n", err); 3287 dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
3275 return err; 3288 return err;
3276 } 3289 }
3277 3290
@@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
3720 enum ice_status status; 3733 enum ice_status status;
3721 3734
3722 /* Here we are configuring the VSI to let the driver add VLAN tags by 3735 /* Here we are configuring the VSI to let the driver add VLAN tags by
3723 * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN 3736 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
3724 * tag insertion happens in the Tx hot path, in ice_tx_map. 3737 * insertion happens in the Tx hot path, in ice_tx_map.
3725 */ 3738 */
3726 ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL; 3739 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
3727 3740
3728 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 3741 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
3729 ctxt.vsi_num = vsi->vsi_num; 3742 ctxt.vsi_num = vsi->vsi_num;
@@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
3735 return -EIO; 3748 return -EIO;
3736 } 3749 }
3737 3750
3738 vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; 3751 vsi->info.vlan_flags = ctxt.info.vlan_flags;
3739 return 0; 3752 return 0;
3740} 3753}
3741 3754
@@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
3757 */ 3770 */
3758 if (ena) { 3771 if (ena) {
3759 /* Strip VLAN tag from Rx packet and put it in the desc */ 3772 /* Strip VLAN tag from Rx packet and put it in the desc */
3760 ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH; 3773 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
3761 } else { 3774 } else {
3762 /* Disable stripping. Leave tag in packet */ 3775 /* Disable stripping. Leave tag in packet */
3763 ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING; 3776 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3764 } 3777 }
3765 3778
3779 /* Allow all packets untagged/tagged */
3780 ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
3781
3766 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 3782 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
3767 ctxt.vsi_num = vsi->vsi_num; 3783 ctxt.vsi_num = vsi->vsi_num;
3768 3784
@@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
3773 return -EIO; 3789 return -EIO;
3774 } 3790 }
3775 3791
3776 vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; 3792 vsi->info.vlan_flags = ctxt.info.vlan_flags;
3777 return 0; 3793 return 0;
3778} 3794}
3779 3795
@@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
3986 /* clear the context structure first */ 4002 /* clear the context structure first */
3987 memset(&rlan_ctx, 0, sizeof(rlan_ctx)); 4003 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
3988 4004
3989 rlan_ctx.base = ring->dma >> 7; 4005 rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
3990 4006
3991 rlan_ctx.qlen = ring->count; 4007 rlan_ctx.qlen = ring->count;
3992 4008
@@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
4098{ 4114{
4099 int err; 4115 int err;
4100 4116
4101 ice_set_rx_mode(vsi->netdev); 4117 if (vsi->netdev) {
4102 4118 ice_set_rx_mode(vsi->netdev);
4103 err = ice_restore_vlan(vsi); 4119 err = ice_restore_vlan(vsi);
4104 if (err) 4120 if (err)
4105 return err; 4121 return err;
4122 }
4106 4123
4107 err = ice_vsi_cfg_txqs(vsi); 4124 err = ice_vsi_cfg_txqs(vsi);
4108 if (!err) 4125 if (!err)
@@ -4868,7 +4885,7 @@ int ice_down(struct ice_vsi *vsi)
4868 */ 4885 */
4869static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 4886static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
4870{ 4887{
4871 int i, err; 4888 int i, err = 0;
4872 4889
4873 if (!vsi->num_txq) { 4890 if (!vsi->num_txq) {
4874 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", 4891 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
@@ -4893,7 +4910,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
4893 */ 4910 */
4894static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 4911static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
4895{ 4912{
4896 int i, err; 4913 int i, err = 0;
4897 4914
4898 if (!vsi->num_rxq) { 4915 if (!vsi->num_rxq) {
4899 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", 4916 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
@@ -5235,7 +5252,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
5235 u8 count = 0; 5252 u8 count = 0;
5236 5253
5237 if (new_mtu == netdev->mtu) { 5254 if (new_mtu == netdev->mtu) {
5238 netdev_warn(netdev, "mtu is already %d\n", netdev->mtu); 5255 netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
5239 return 0; 5256 return 0;
5240 } 5257 }
5241 5258
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 92da0a626ce0..295a8cd87fc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
131 * 131 *
132 * This function will request NVM ownership. 132 * This function will request NVM ownership.
133 */ 133 */
134static enum 134static enum ice_status
135ice_status ice_acquire_nvm(struct ice_hw *hw, 135ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
136 enum ice_aq_res_access_type access)
137{ 136{
138 if (hw->nvm.blank_nvm_mode) 137 if (hw->nvm.blank_nvm_mode)
139 return 0; 138 return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2e6c1d92cc88..eeae199469b6 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
1576 return status; 1576 return status;
1577 } 1577 }
1578 1578
1579 if (owner == ICE_SCHED_NODE_OWNER_LAN) 1579 vsi->max_lanq[tc] = new_numqs;
1580 vsi->max_lanq[tc] = new_numqs;
1581 1580
1582 return status; 1581 return status;
1583} 1582}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 723d15f1e90b..6b7ec2ae5ad6 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
645 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; 645 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
646 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); 646 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
647 647
648 act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; 648 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
649 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
649 650
650 /* Third action Marker value */ 651 /* Third action Marker value */
651 act |= ICE_LG_ACT_GENERIC; 652 act |= ICE_LG_ACT_GENERIC;
652 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & 653 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
653 ICE_LG_ACT_GENERIC_VALUE_M; 654 ICE_LG_ACT_GENERIC_VALUE_M;
654 655
655 act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
656 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); 656 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
657 657
658 /* call the fill switch rule to fill the lookup tx rx structure */ 658 /* call the fill switch rule to fill the lookup tx rx structure */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 6f4a0d159dbf..9b8ec128ee31 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -17,7 +17,7 @@ struct ice_vsi_ctx {
17 u16 vsis_unallocated; 17 u16 vsis_unallocated;
18 u16 flags; 18 u16 flags;
19 struct ice_aqc_vsi_props info; 19 struct ice_aqc_vsi_props info;
20 bool alloc_from_pool; 20 u8 alloc_from_pool;
21}; 21};
22 22
23enum ice_sw_fwd_act_type { 23enum ice_sw_fwd_act_type {
@@ -94,8 +94,8 @@ struct ice_fltr_info {
94 u8 qgrp_size; 94 u8 qgrp_size;
95 95
96 /* Rule creations populate these indicators basing on the switch type */ 96 /* Rule creations populate these indicators basing on the switch type */
97 bool lb_en; /* Indicate if packet can be looped back */ 97 u8 lb_en; /* Indicate if packet can be looped back */
98 bool lan_en; /* Indicate if packet can be forwarded to the uplink */ 98 u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
99}; 99};
100 100
101/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ 101/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 567067b650c4..31bc998fe200 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -143,7 +143,7 @@ struct ice_ring {
143 u16 next_to_use; 143 u16 next_to_use;
144 u16 next_to_clean; 144 u16 next_to_clean;
145 145
146 bool ring_active; /* is ring online or not */ 146 u8 ring_active; /* is ring online or not */
147 147
148 /* stats structs */ 148 /* stats structs */
149 struct ice_q_stats stats; 149 struct ice_q_stats stats;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 99c8a9a71b5e..97c366e0ca59 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -83,7 +83,7 @@ struct ice_link_status {
83 u64 phy_type_low; 83 u64 phy_type_low;
84 u16 max_frame_size; 84 u16 max_frame_size;
85 u16 link_speed; 85 u16 link_speed;
86 bool lse_ena; /* Link Status Event notification */ 86 u8 lse_ena; /* Link Status Event notification */
87 u8 link_info; 87 u8 link_info;
88 u8 an_info; 88 u8 an_info;
89 u8 ext_info; 89 u8 ext_info;
@@ -101,7 +101,7 @@ struct ice_phy_info {
101 struct ice_link_status link_info_old; 101 struct ice_link_status link_info_old;
102 u64 phy_type_low; 102 u64 phy_type_low;
103 enum ice_media_type media_type; 103 enum ice_media_type media_type;
104 bool get_link_info; 104 u8 get_link_info;
105}; 105};
106 106
107/* Common HW capabilities for SW use */ 107/* Common HW capabilities for SW use */
@@ -167,7 +167,7 @@ struct ice_nvm_info {
167 u32 oem_ver; /* OEM version info */ 167 u32 oem_ver; /* OEM version info */
168 u16 sr_words; /* Shadow RAM size in words */ 168 u16 sr_words; /* Shadow RAM size in words */
169 u16 ver; /* NVM package version */ 169 u16 ver; /* NVM package version */
170 bool blank_nvm_mode; /* is NVM empty (no FW present) */ 170 u8 blank_nvm_mode; /* is NVM empty (no FW present) */
171}; 171};
172 172
173/* Max number of port to queue branches w.r.t topology */ 173/* Max number of port to queue branches w.r.t topology */
@@ -181,7 +181,7 @@ struct ice_sched_node {
181 struct ice_aqc_txsched_elem_data info; 181 struct ice_aqc_txsched_elem_data info;
182 u32 agg_id; /* aggregator group id */ 182 u32 agg_id; /* aggregator group id */
183 u16 vsi_id; 183 u16 vsi_id;
184 bool in_use; /* suspended or in use */ 184 u8 in_use; /* suspended or in use */
185 u8 tx_sched_layer; /* Logical Layer (1-9) */ 185 u8 tx_sched_layer; /* Logical Layer (1-9) */
186 u8 num_children; 186 u8 num_children;
187 u8 tc_num; 187 u8 tc_num;
@@ -218,7 +218,7 @@ struct ice_sched_vsi_info {
218struct ice_sched_tx_policy { 218struct ice_sched_tx_policy {
219 u16 max_num_vsis; 219 u16 max_num_vsis;
220 u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; 220 u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS];
221 bool rdma_ena; 221 u8 rdma_ena;
222}; 222};
223 223
224struct ice_port_info { 224struct ice_port_info {
@@ -243,7 +243,7 @@ struct ice_port_info {
243 struct list_head agg_list; /* lists all aggregator */ 243 struct list_head agg_list; /* lists all aggregator */
244 u8 lport; 244 u8 lport;
245#define ICE_LPORT_MASK 0xff 245#define ICE_LPORT_MASK 0xff
246 bool is_vf; 246 u8 is_vf;
247}; 247};
248 248
249struct ice_switch_info { 249struct ice_switch_info {
@@ -287,7 +287,7 @@ struct ice_hw {
287 u8 max_cgds; 287 u8 max_cgds;
288 u8 sw_entry_point_layer; 288 u8 sw_entry_point_layer;
289 289
290 bool evb_veb; /* true for VEB, false for VEPA */ 290 u8 evb_veb; /* true for VEB, false for VEPA */
291 struct ice_bus_info bus; 291 struct ice_bus_info bus;
292 struct ice_nvm_info nvm; 292 struct ice_nvm_info nvm;
293 struct ice_hw_dev_caps dev_caps; /* device capabilities */ 293 struct ice_hw_dev_caps dev_caps; /* device capabilities */
@@ -318,7 +318,7 @@ struct ice_hw {
318 u8 itr_gran_100; 318 u8 itr_gran_100;
319 u8 itr_gran_50; 319 u8 itr_gran_50;
320 u8 itr_gran_25; 320 u8 itr_gran_25;
321 bool ucast_shared; /* true if VSIs can share unicast addr */ 321 u8 ucast_shared; /* true if VSIs can share unicast addr */
322 322
323}; 323};
324 324
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index f92f7918112d..5acf3b743876 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1649,7 +1649,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1649 if (hw->phy.type == e1000_phy_m88) 1649 if (hw->phy.type == e1000_phy_m88)
1650 igb_phy_disable_receiver(adapter); 1650 igb_phy_disable_receiver(adapter);
1651 1651
1652 mdelay(500); 1652 msleep(500);
1653 return 0; 1653 return 0;
1654} 1654}
1655 1655
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d03c2f0d7592..a32c576c1e65 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3873,7 +3873,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
3873 3873
3874 adapter->mac_table = kcalloc(hw->mac.rar_entry_count, 3874 adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
3875 sizeof(struct igb_mac_addr), 3875 sizeof(struct igb_mac_addr),
3876 GFP_ATOMIC); 3876 GFP_KERNEL);
3877 if (!adapter->mac_table) 3877 if (!adapter->mac_table)
3878 return -ENOMEM; 3878 return -ENOMEM;
3879 3879
@@ -3883,7 +3883,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
3883 3883
3884 /* Setup and initialize a copy of the hw vlan table array */ 3884 /* Setup and initialize a copy of the hw vlan table array */
3885 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), 3885 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3886 GFP_ATOMIC); 3886 GFP_KERNEL);
3887 if (!adapter->shadow_vfta) 3887 if (!adapter->shadow_vfta)
3888 return -ENOMEM; 3888 return -ENOMEM;
3889 3889
@@ -5816,7 +5816,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5816 5816
5817 if (skb->ip_summed != CHECKSUM_PARTIAL) { 5817 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5818csum_failed: 5818csum_failed:
5819 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) 5819 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
5820 !tx_ring->launchtime_enable)
5820 return; 5821 return;
5821 goto no_csum; 5822 goto no_csum;
5822 } 5823 }
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 43664adf7a3c..d3e72d0f66ef 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -771,14 +771,13 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
771 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); 771 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
772 rxdr->size = ALIGN(rxdr->size, 4096); 772 rxdr->size = ALIGN(rxdr->size, 4096);
773 773
774 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 774 rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
775 GFP_KERNEL); 775 GFP_KERNEL);
776 776
777 if (!rxdr->desc) { 777 if (!rxdr->desc) {
778 vfree(rxdr->buffer_info); 778 vfree(rxdr->buffer_info);
779 return -ENOMEM; 779 return -ENOMEM;
780 } 780 }
781 memset(rxdr->desc, 0, rxdr->size);
782 781
783 rxdr->next_to_clean = 0; 782 rxdr->next_to_clean = 0;
784 rxdr->next_to_use = 0; 783 rxdr->next_to_use = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 94b3165ff543..ccd852ad62a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
192 } 192 }
193 193
194 /* alloc the udl from per cpu ddp pool */ 194 /* alloc the udl from per cpu ddp pool */
195 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); 195 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
196 if (!ddp->udl) { 196 if (!ddp->udl) {
197 e_err(drv, "failed allocated ddp context\n"); 197 e_err(drv, "failed allocated ddp context\n");
198 goto out_noddp_unmap; 198 goto out_noddp_unmap;
@@ -760,7 +760,7 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
760 return 0; 760 return 0;
761 761
762 /* Extra buffer to be shared by all DDPs for HW work around */ 762 /* Extra buffer to be shared by all DDPs for HW work around */
763 buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); 763 buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL);
764 if (!buffer) 764 if (!buffer)
765 return -ENOMEM; 765 return -ENOMEM;
766 766
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 447098005490..9a23d33a47ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6201,7 +6201,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6201 6201
6202 adapter->mac_table = kcalloc(hw->mac.num_rar_entries, 6202 adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
6203 sizeof(struct ixgbe_mac_addr), 6203 sizeof(struct ixgbe_mac_addr),
6204 GFP_ATOMIC); 6204 GFP_KERNEL);
6205 if (!adapter->mac_table) 6205 if (!adapter->mac_table)
6206 return -ENOMEM; 6206 return -ENOMEM;
6207 6207
@@ -6620,8 +6620,18 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6620 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6620 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6621 6621
6622 if (adapter->xdp_prog) { 6622 if (adapter->xdp_prog) {
6623 e_warn(probe, "MTU cannot be changed while XDP program is loaded\n"); 6623 int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
6624 return -EPERM; 6624 VLAN_HLEN;
6625 int i;
6626
6627 for (i = 0; i < adapter->num_rx_queues; i++) {
6628 struct ixgbe_ring *ring = adapter->rx_ring[i];
6629
6630 if (new_frame_size > ixgbe_rx_bufsz(ring)) {
6631 e_warn(probe, "Requested MTU size is not supported with XDP\n");
6632 return -EINVAL;
6633 }
6634 }
6625 } 6635 }
6626 6636
6627 /* 6637 /*
@@ -8983,6 +8993,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8983 8993
8984#ifdef CONFIG_IXGBE_DCB 8994#ifdef CONFIG_IXGBE_DCB
8985 if (tc) { 8995 if (tc) {
8996 if (adapter->xdp_prog) {
8997 e_warn(probe, "DCB is not supported with XDP\n");
8998
8999 ixgbe_init_interrupt_scheme(adapter);
9000 if (netif_running(dev))
9001 ixgbe_open(dev);
9002 return -EINVAL;
9003 }
9004
8986 netdev_set_num_tc(dev, tc); 9005 netdev_set_num_tc(dev, tc);
8987 ixgbe_set_prio_tc_map(adapter); 9006 ixgbe_set_prio_tc_map(adapter);
8988 9007
@@ -9171,14 +9190,12 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
9171 struct tcf_exts *exts, u64 *action, u8 *queue) 9190 struct tcf_exts *exts, u64 *action, u8 *queue)
9172{ 9191{
9173 const struct tc_action *a; 9192 const struct tc_action *a;
9174 LIST_HEAD(actions); 9193 int i;
9175 9194
9176 if (!tcf_exts_has_actions(exts)) 9195 if (!tcf_exts_has_actions(exts))
9177 return -EINVAL; 9196 return -EINVAL;
9178 9197
9179 tcf_exts_to_list(exts, &actions); 9198 tcf_exts_for_each_action(i, a, exts) {
9180 list_for_each_entry(a, &actions, list) {
9181
9182 /* Drop action */ 9199 /* Drop action */
9183 if (is_tcf_gact_shot(a)) { 9200 if (is_tcf_gact_shot(a)) {
9184 *action = IXGBE_FDIR_DROP_QUEUE; 9201 *action = IXGBE_FDIR_DROP_QUEUE;
@@ -9936,6 +9953,11 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9936 int tcs = adapter->hw_tcs ? : 1; 9953 int tcs = adapter->hw_tcs ? : 1;
9937 int pool, err; 9954 int pool, err;
9938 9955
9956 if (adapter->xdp_prog) {
9957 e_warn(probe, "L2FW offload is not supported with XDP\n");
9958 return ERR_PTR(-EINVAL);
9959 }
9960
9939 /* The hardware supported by ixgbe only filters on the destination MAC 9961 /* The hardware supported by ixgbe only filters on the destination MAC
9940 * address. In order to avoid issues we only support offloading modes 9962 * address. In order to avoid issues we only support offloading modes
9941 * where the hardware can actually provide the functionality. 9963 * where the hardware can actually provide the functionality.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 6f59933cdff7..3c6f01c41b78 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -53,6 +53,11 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
53 struct ixgbe_hw *hw = &adapter->hw; 53 struct ixgbe_hw *hw = &adapter->hw;
54 int i; 54 int i;
55 55
56 if (adapter->xdp_prog) {
57 e_warn(probe, "SRIOV is not supported with XDP\n");
58 return -EINVAL;
59 }
60
56 /* Enable VMDq flag so device will be set in VM mode */ 61 /* Enable VMDq flag so device will be set in VM mode */
57 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | 62 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
58 IXGBE_FLAG_VMDQ_ENABLED; 63 IXGBE_FLAG_VMDQ_ENABLED;
@@ -688,8 +693,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
688static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 693static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
689{ 694{
690 struct ixgbe_hw *hw = &adapter->hw; 695 struct ixgbe_hw *hw = &adapter->hw;
696 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
691 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 697 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
698 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
692 u8 num_tcs = adapter->hw_tcs; 699 u8 num_tcs = adapter->hw_tcs;
700 u32 reg_val;
701 u32 queue;
702 u32 word;
693 703
694 /* remove VLAN filters beloning to this VF */ 704 /* remove VLAN filters beloning to this VF */
695 ixgbe_clear_vf_vlans(adapter, vf); 705 ixgbe_clear_vf_vlans(adapter, vf);
@@ -726,6 +736,27 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
726 736
727 /* reset VF api back to unknown */ 737 /* reset VF api back to unknown */
728 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; 738 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
739
740 /* Restart each queue for given VF */
741 for (queue = 0; queue < q_per_pool; queue++) {
742 unsigned int reg_idx = (vf * q_per_pool) + queue;
743
744 reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx));
745
746 /* Re-enabling only configured queues */
747 if (reg_val) {
748 reg_val |= IXGBE_TXDCTL_ENABLE;
749 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
750 reg_val &= ~IXGBE_TXDCTL_ENABLE;
751 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
752 }
753 }
754
755 /* Clear VF's mailbox memory */
756 for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
757 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
758
759 IXGBE_WRITE_FLUSH(hw);
729} 760}
730 761
731static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 762static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 44cfb2021145..41bcbb337e83 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2518,6 +2518,7 @@ enum {
2518/* Translated register #defines */ 2518/* Translated register #defines */
2519#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) 2519#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
2520#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) 2520#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
2521#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
2521#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) 2522#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
2522#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) 2523#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
2523 2524
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9131a1376e7d..9fed54017659 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1982,14 +1982,15 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
1982 goto out_ok; 1982 goto out_ok;
1983 1983
1984 modify_ip_header = false; 1984 modify_ip_header = false;
1985 tcf_exts_to_list(exts, &actions); 1985 tcf_exts_for_each_action(i, a, exts) {
1986 list_for_each_entry(a, &actions, list) { 1986 int k;
1987
1987 if (!is_tcf_pedit(a)) 1988 if (!is_tcf_pedit(a))
1988 continue; 1989 continue;
1989 1990
1990 nkeys = tcf_pedit_nkeys(a); 1991 nkeys = tcf_pedit_nkeys(a);
1991 for (i = 0; i < nkeys; i++) { 1992 for (k = 0; k < nkeys; k++) {
1992 htype = tcf_pedit_htype(a, i); 1993 htype = tcf_pedit_htype(a, k);
1993 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || 1994 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
1994 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { 1995 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
1995 modify_ip_header = true; 1996 modify_ip_header = true;
@@ -2053,15 +2054,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2053 const struct tc_action *a; 2054 const struct tc_action *a;
2054 LIST_HEAD(actions); 2055 LIST_HEAD(actions);
2055 u32 action = 0; 2056 u32 action = 0;
2056 int err; 2057 int err, i;
2057 2058
2058 if (!tcf_exts_has_actions(exts)) 2059 if (!tcf_exts_has_actions(exts))
2059 return -EINVAL; 2060 return -EINVAL;
2060 2061
2061 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 2062 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2062 2063
2063 tcf_exts_to_list(exts, &actions); 2064 tcf_exts_for_each_action(i, a, exts) {
2064 list_for_each_entry(a, &actions, list) {
2065 if (is_tcf_gact_shot(a)) { 2065 if (is_tcf_gact_shot(a)) {
2066 action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 2066 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2067 if (MLX5_CAP_FLOWTABLE(priv->mdev, 2067 if (MLX5_CAP_FLOWTABLE(priv->mdev,
@@ -2666,7 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2666 LIST_HEAD(actions); 2666 LIST_HEAD(actions);
2667 bool encap = false; 2667 bool encap = false;
2668 u32 action = 0; 2668 u32 action = 0;
2669 int err; 2669 int err, i;
2670 2670
2671 if (!tcf_exts_has_actions(exts)) 2671 if (!tcf_exts_has_actions(exts))
2672 return -EINVAL; 2672 return -EINVAL;
@@ -2674,8 +2674,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2674 attr->in_rep = rpriv->rep; 2674 attr->in_rep = rpriv->rep;
2675 attr->in_mdev = priv->mdev; 2675 attr->in_mdev = priv->mdev;
2676 2676
2677 tcf_exts_to_list(exts, &actions); 2677 tcf_exts_for_each_action(i, a, exts) {
2678 list_for_each_entry(a, &actions, list) {
2679 if (is_tcf_gact_shot(a)) { 2678 if (is_tcf_gact_shot(a)) {
2680 action |= MLX5_FLOW_CONTEXT_ACTION_DROP | 2679 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2681 MLX5_FLOW_CONTEXT_ACTION_COUNT; 2680 MLX5_FLOW_CONTEXT_ACTION_COUNT;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 6070d1591d1e..930700413b1d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1346,8 +1346,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1346 return -ENOMEM; 1346 return -ENOMEM;
1347 mall_tc_entry->cookie = f->cookie; 1347 mall_tc_entry->cookie = f->cookie;
1348 1348
1349 tcf_exts_to_list(f->exts, &actions); 1349 a = tcf_exts_first_action(f->exts);
1350 a = list_first_entry(&actions, struct tc_action, list);
1351 1350
1352 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1351 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1353 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1352 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3ae930196741..3cdb7aca90b7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -414,6 +414,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
414void 414void
415mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); 415mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
416void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); 416void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
417void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
418 struct net_device *dev);
417 419
418/* spectrum_kvdl.c */ 420/* spectrum_kvdl.c */
419enum mlxsw_sp_kvdl_entry_type { 421enum mlxsw_sp_kvdl_entry_type {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index ebd1b24ebaa5..8d211972c5e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -21,8 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
21 struct netlink_ext_ack *extack) 21 struct netlink_ext_ack *extack)
22{ 22{
23 const struct tc_action *a; 23 const struct tc_action *a;
24 LIST_HEAD(actions); 24 int err, i;
25 int err;
26 25
27 if (!tcf_exts_has_actions(exts)) 26 if (!tcf_exts_has_actions(exts))
28 return 0; 27 return 0;
@@ -32,8 +31,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
32 if (err) 31 if (err)
33 return err; 32 return err;
34 33
35 tcf_exts_to_list(exts, &actions); 34 tcf_exts_for_each_action(i, a, exts) {
36 list_for_each_entry(a, &actions, list) {
37 if (is_tcf_gact_ok(a)) { 35 if (is_tcf_gact_ok(a)) {
38 err = mlxsw_sp_acl_rulei_act_terminate(rulei); 36 err = mlxsw_sp_acl_rulei_act_terminate(rulei);
39 if (err) { 37 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 3a96307f51b0..2ab9cf25a08a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -6234,6 +6234,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
6234 mlxsw_sp_vr_put(mlxsw_sp, vr); 6234 mlxsw_sp_vr_put(mlxsw_sp, vr);
6235} 6235}
6236 6236
6237void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
6238 struct net_device *dev)
6239{
6240 struct mlxsw_sp_rif *rif;
6241
6242 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6243 if (!rif)
6244 return;
6245 mlxsw_sp_rif_destroy(rif);
6246}
6247
6237static void 6248static void
6238mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, 6249mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6239 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 6250 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 0d8444aaba01..db715da7bab7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -127,6 +127,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
127 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 127 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
128} 128}
129 129
130static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
131 void *data)
132{
133 struct mlxsw_sp *mlxsw_sp = data;
134
135 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
136 return 0;
137}
138
139static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
140 struct net_device *dev)
141{
142 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
143 netdev_walk_all_upper_dev_rcu(dev,
144 mlxsw_sp_bridge_device_upper_rif_destroy,
145 mlxsw_sp);
146}
147
130static struct mlxsw_sp_bridge_device * 148static struct mlxsw_sp_bridge_device *
131mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, 149mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
132 struct net_device *br_dev) 150 struct net_device *br_dev)
@@ -165,6 +183,8 @@ static void
165mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, 183mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
166 struct mlxsw_sp_bridge_device *bridge_device) 184 struct mlxsw_sp_bridge_device *bridge_device)
167{ 185{
186 mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
187 bridge_device->dev);
168 list_del(&bridge_device->list); 188 list_del(&bridge_device->list);
169 if (bridge_device->vlan_enabled) 189 if (bridge_device->vlan_enabled)
170 bridge->vlan_enabled_exists = false; 190 bridge->vlan_enabled_exists = false;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 0ba0356ec4e6..9044496803e6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -796,11 +796,10 @@ int nfp_flower_compile_action(struct nfp_app *app,
796 struct net_device *netdev, 796 struct net_device *netdev,
797 struct nfp_fl_payload *nfp_flow) 797 struct nfp_fl_payload *nfp_flow)
798{ 798{
799 int act_len, act_cnt, err, tun_out_cnt, out_cnt; 799 int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
800 enum nfp_flower_tun_type tun_type; 800 enum nfp_flower_tun_type tun_type;
801 const struct tc_action *a; 801 const struct tc_action *a;
802 u32 csum_updated = 0; 802 u32 csum_updated = 0;
803 LIST_HEAD(actions);
804 803
805 memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); 804 memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
806 nfp_flow->meta.act_len = 0; 805 nfp_flow->meta.act_len = 0;
@@ -810,8 +809,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
810 tun_out_cnt = 0; 809 tun_out_cnt = 0;
811 out_cnt = 0; 810 out_cnt = 0;
812 811
813 tcf_exts_to_list(flow->exts, &actions); 812 tcf_exts_for_each_action(i, a, flow->exts) {
814 list_for_each_entry(a, &actions, list) {
815 err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, 813 err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
816 netdev, &tun_type, &tun_out_cnt, 814 netdev, &tun_type, &tun_out_cnt,
817 &out_cnt, &csum_updated); 815 &out_cnt, &csum_updated);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index d9ab5add27a8..34193c2f1699 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -407,7 +407,7 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
407 407
408 if (i == QED_INIT_MAX_POLL_COUNT) { 408 if (i == QED_INIT_MAX_POLL_COUNT) {
409 DP_ERR(p_hwfn, 409 DP_ERR(p_hwfn,
410 "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", 410 "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
411 addr, le32_to_cpu(cmd->expected_val), 411 addr, le32_to_cpu(cmd->expected_val),
412 val, le32_to_cpu(cmd->op_data)); 412 val, le32_to_cpu(cmd->op_data));
413 } 413 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index d89a0e22f6e4..5d37ec7e9b0b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -48,7 +48,7 @@
48#include "qed_reg_addr.h" 48#include "qed_reg_addr.h"
49#include "qed_sriov.h" 49#include "qed_sriov.h"
50 50
51#define CHIP_MCP_RESP_ITER_US 10 51#define QED_MCP_RESP_ITER_US 10
52 52
53#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ 53#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
54#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ 54#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
@@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
183 return 0; 183 return 0;
184} 184}
185 185
186/* Maximum of 1 sec to wait for the SHMEM ready indication */
187#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
188#define QED_MCP_SHMEM_RDY_ITER_MS 50
189
186static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 190static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
187{ 191{
188 struct qed_mcp_info *p_info = p_hwfn->mcp_info; 192 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
193 u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
194 u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
189 u32 drv_mb_offsize, mfw_mb_offsize; 195 u32 drv_mb_offsize, mfw_mb_offsize;
190 u32 mcp_pf_id = MCP_PF_ID(p_hwfn); 196 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
191 197
192 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); 198 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
193 if (!p_info->public_base) 199 if (!p_info->public_base) {
194 return 0; 200 DP_NOTICE(p_hwfn,
201 "The address of the MCP scratch-pad is not configured\n");
202 return -EINVAL;
203 }
195 204
196 p_info->public_base |= GRCBASE_MCP; 205 p_info->public_base |= GRCBASE_MCP;
197 206
207 /* Get the MFW MB address and number of supported messages */
208 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
209 SECTION_OFFSIZE_ADDR(p_info->public_base,
210 PUBLIC_MFW_MB));
211 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
212 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
213 p_info->mfw_mb_addr +
214 offsetof(struct public_mfw_mb,
215 sup_msgs));
216
217 /* The driver can notify that there was an MCP reset, and might read the
218 * SHMEM values before the MFW has completed initializing them.
219 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
220 * data ready indication.
221 */
222 while (!p_info->mfw_mb_length && --cnt) {
223 msleep(msec);
224 p_info->mfw_mb_length =
225 (u16)qed_rd(p_hwfn, p_ptt,
226 p_info->mfw_mb_addr +
227 offsetof(struct public_mfw_mb, sup_msgs));
228 }
229
230 if (!cnt) {
231 DP_NOTICE(p_hwfn,
232 "Failed to get the SHMEM ready notification after %d msec\n",
233 QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
234 return -EBUSY;
235 }
236
198 /* Calculate the driver and MFW mailbox address */ 237 /* Calculate the driver and MFW mailbox address */
199 drv_mb_offsize = qed_rd(p_hwfn, p_ptt, 238 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
200 SECTION_OFFSIZE_ADDR(p_info->public_base, 239 SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
204 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", 243 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
205 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); 244 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
206 245
207 /* Set the MFW MB address */
208 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
209 SECTION_OFFSIZE_ADDR(p_info->public_base,
210 PUBLIC_MFW_MB));
211 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
212 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
213
214 /* Get the current driver mailbox sequence before sending 246 /* Get the current driver mailbox sequence before sending
215 * the first command 247 * the first command
216 */ 248 */
@@ -285,9 +317,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
285 317
286int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 318int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
287{ 319{
288 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; 320 u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
289 int rc = 0; 321 int rc = 0;
290 322
323 if (p_hwfn->mcp_info->b_block_cmd) {
324 DP_NOTICE(p_hwfn,
325 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
326 return -EBUSY;
327 }
328
291 /* Ensure that only a single thread is accessing the mailbox */ 329 /* Ensure that only a single thread is accessing the mailbox */
292 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 330 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
293 331
@@ -413,14 +451,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
413 (p_mb_params->cmd | seq_num), p_mb_params->param); 451 (p_mb_params->cmd | seq_num), p_mb_params->param);
414} 452}
415 453
454static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
455{
456 p_hwfn->mcp_info->b_block_cmd = block_cmd;
457
458 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
459 block_cmd ? "Block" : "Unblock");
460}
461
462static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
463 struct qed_ptt *p_ptt)
464{
465 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
466 u32 delay = QED_MCP_RESP_ITER_US;
467
468 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
469 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
470 cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
471 udelay(delay);
472 cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
473 udelay(delay);
474 cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
475
476 DP_NOTICE(p_hwfn,
477 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
478 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
479}
480
416static int 481static int
417_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, 482_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
418 struct qed_ptt *p_ptt, 483 struct qed_ptt *p_ptt,
419 struct qed_mcp_mb_params *p_mb_params, 484 struct qed_mcp_mb_params *p_mb_params,
420 u32 max_retries, u32 delay) 485 u32 max_retries, u32 usecs)
421{ 486{
487 u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
422 struct qed_mcp_cmd_elem *p_cmd_elem; 488 struct qed_mcp_cmd_elem *p_cmd_elem;
423 u32 cnt = 0;
424 u16 seq_num; 489 u16 seq_num;
425 int rc = 0; 490 int rc = 0;
426 491
@@ -443,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
443 goto err; 508 goto err;
444 509
445 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 510 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
446 udelay(delay); 511
512 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
513 msleep(msecs);
514 else
515 udelay(usecs);
447 } while (++cnt < max_retries); 516 } while (++cnt < max_retries);
448 517
449 if (cnt >= max_retries) { 518 if (cnt >= max_retries) {
@@ -472,7 +541,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
472 * The spinlock stays locked until the list element is removed. 541 * The spinlock stays locked until the list element is removed.
473 */ 542 */
474 543
475 udelay(delay); 544 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
545 msleep(msecs);
546 else
547 udelay(usecs);
548
476 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 549 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
477 550
478 if (p_cmd_elem->b_is_completed) 551 if (p_cmd_elem->b_is_completed)
@@ -491,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
491 DP_NOTICE(p_hwfn, 564 DP_NOTICE(p_hwfn,
492 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", 565 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
493 p_mb_params->cmd, p_mb_params->param); 566 p_mb_params->cmd, p_mb_params->param);
567 qed_mcp_print_cpu_info(p_hwfn, p_ptt);
494 568
495 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 569 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
496 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 570 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
497 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 571 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
498 572
573 if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
574 qed_mcp_cmd_set_blocking(p_hwfn, true);
575
499 return -EAGAIN; 576 return -EAGAIN;
500 } 577 }
501 578
@@ -507,7 +584,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
507 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", 584 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
508 p_mb_params->mcp_resp, 585 p_mb_params->mcp_resp,
509 p_mb_params->mcp_param, 586 p_mb_params->mcp_param,
510 (cnt * delay) / 1000, (cnt * delay) % 1000); 587 (cnt * usecs) / 1000, (cnt * usecs) % 1000);
511 588
512 /* Clear the sequence number from the MFW response */ 589 /* Clear the sequence number from the MFW response */
513 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; 590 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
@@ -525,7 +602,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
525{ 602{
526 size_t union_data_size = sizeof(union drv_union_data); 603 size_t union_data_size = sizeof(union drv_union_data);
527 u32 max_retries = QED_DRV_MB_MAX_RETRIES; 604 u32 max_retries = QED_DRV_MB_MAX_RETRIES;
528 u32 delay = CHIP_MCP_RESP_ITER_US; 605 u32 usecs = QED_MCP_RESP_ITER_US;
529 606
530 /* MCP not initialized */ 607 /* MCP not initialized */
531 if (!qed_mcp_is_init(p_hwfn)) { 608 if (!qed_mcp_is_init(p_hwfn)) {
@@ -533,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
533 return -EBUSY; 610 return -EBUSY;
534 } 611 }
535 612
613 if (p_hwfn->mcp_info->b_block_cmd) {
614 DP_NOTICE(p_hwfn,
615 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
616 p_mb_params->cmd, p_mb_params->param);
617 return -EBUSY;
618 }
619
536 if (p_mb_params->data_src_size > union_data_size || 620 if (p_mb_params->data_src_size > union_data_size ||
537 p_mb_params->data_dst_size > union_data_size) { 621 p_mb_params->data_dst_size > union_data_size) {
538 DP_ERR(p_hwfn, 622 DP_ERR(p_hwfn,
@@ -542,8 +626,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
542 return -EINVAL; 626 return -EINVAL;
543 } 627 }
544 628
629 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
630 max_retries = DIV_ROUND_UP(max_retries, 1000);
631 usecs *= 1000;
632 }
633
545 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, 634 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
546 delay); 635 usecs);
547} 636}
548 637
549int qed_mcp_cmd(struct qed_hwfn *p_hwfn, 638int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -761,6 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
761 mb_params.data_src_size = sizeof(load_req); 850 mb_params.data_src_size = sizeof(load_req);
762 mb_params.p_data_dst = &load_rsp; 851 mb_params.p_data_dst = &load_rsp;
763 mb_params.data_dst_size = sizeof(load_rsp); 852 mb_params.data_dst_size = sizeof(load_rsp);
853 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
764 854
765 DP_VERBOSE(p_hwfn, QED_MSG_SP, 855 DP_VERBOSE(p_hwfn, QED_MSG_SP,
766 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", 856 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
@@ -982,7 +1072,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
982 1072
983int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1073int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
984{ 1074{
985 u32 wol_param, mcp_resp, mcp_param; 1075 struct qed_mcp_mb_params mb_params;
1076 u32 wol_param;
986 1077
987 switch (p_hwfn->cdev->wol_config) { 1078 switch (p_hwfn->cdev->wol_config) {
988 case QED_OV_WOL_DISABLED: 1079 case QED_OV_WOL_DISABLED:
@@ -1000,8 +1091,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1000 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; 1091 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1001 } 1092 }
1002 1093
1003 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, 1094 memset(&mb_params, 0, sizeof(mb_params));
1004 &mcp_resp, &mcp_param); 1095 mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1096 mb_params.param = wol_param;
1097 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1098
1099 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1005} 1100}
1006 1101
1007int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1102int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -2077,31 +2172,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2077 return rc; 2172 return rc;
2078} 2173}
2079 2174
2175/* A maximal 100 msec waiting time for the MCP to halt */
2176#define QED_MCP_HALT_SLEEP_MS 10
2177#define QED_MCP_HALT_MAX_RETRIES 10
2178
2080int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2179int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2081{ 2180{
2082 u32 resp = 0, param = 0; 2181 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2083 int rc; 2182 int rc;
2084 2183
2085 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, 2184 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2086 &param); 2185 &param);
2087 if (rc) 2186 if (rc) {
2088 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2187 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2188 return rc;
2189 }
2089 2190
2090 return rc; 2191 do {
2192 msleep(QED_MCP_HALT_SLEEP_MS);
2193 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2194 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2195 break;
2196 } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2197
2198 if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2199 DP_NOTICE(p_hwfn,
2200 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2201 qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2202 return -EBUSY;
2203 }
2204
2205 qed_mcp_cmd_set_blocking(p_hwfn, true);
2206
2207 return 0;
2091} 2208}
2092 2209
2210#define QED_MCP_RESUME_SLEEP_MS 10
2211
2093int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2212int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2094{ 2213{
2095 u32 value, cpu_mode; 2214 u32 cpu_mode, cpu_state;
2096 2215
2097 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); 2216 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2098 2217
2099 value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2100 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2101 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2102 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 2218 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2219 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2220 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2221 msleep(QED_MCP_RESUME_SLEEP_MS);
2222 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2103 2223
2104 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; 2224 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2225 DP_NOTICE(p_hwfn,
2226 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2227 cpu_mode, cpu_state);
2228 return -EBUSY;
2229 }
2230
2231 qed_mcp_cmd_set_blocking(p_hwfn, false);
2232
2233 return 0;
2105} 2234}
2106 2235
2107int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, 2236int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 047976d5c6e9..85e6b3989e7a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -635,11 +635,14 @@ struct qed_mcp_info {
635 */ 635 */
636 spinlock_t cmd_lock; 636 spinlock_t cmd_lock;
637 637
638 /* Flag to indicate whether sending a MFW mailbox command is blocked */
639 bool b_block_cmd;
640
638 /* Spinlock used for syncing SW link-changes and link-changes 641 /* Spinlock used for syncing SW link-changes and link-changes
639 * originating from attention context. 642 * originating from attention context.
640 */ 643 */
641 spinlock_t link_lock; 644 spinlock_t link_lock;
642 bool block_mb_sending; 645
643 u32 public_base; 646 u32 public_base;
644 u32 drv_mb_addr; 647 u32 drv_mb_addr;
645 u32 mfw_mb_addr; 648 u32 mfw_mb_addr;
@@ -660,14 +663,20 @@ struct qed_mcp_info {
660}; 663};
661 664
662struct qed_mcp_mb_params { 665struct qed_mcp_mb_params {
663 u32 cmd; 666 u32 cmd;
664 u32 param; 667 u32 param;
665 void *p_data_src; 668 void *p_data_src;
666 u8 data_src_size; 669 void *p_data_dst;
667 void *p_data_dst; 670 u8 data_src_size;
668 u8 data_dst_size; 671 u8 data_dst_size;
669 u32 mcp_resp; 672 u32 mcp_resp;
670 u32 mcp_param; 673 u32 mcp_param;
674 u32 flags;
675#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
676#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1)
677#define QED_MB_FLAGS_IS_SET(params, flag) \
678 ({ typeof(params) __params = (params); \
679 (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
671}; 680};
672 681
673struct qed_drv_tlv_hdr { 682struct qed_drv_tlv_hdr {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index d8ad2dcad8d5..f736f70956fd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -562,8 +562,10 @@
562 0 562 0
563#define MCP_REG_CPU_STATE \ 563#define MCP_REG_CPU_STATE \
564 0xe05004UL 564 0xe05004UL
565#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
565#define MCP_REG_CPU_EVENT_MASK \ 566#define MCP_REG_CPU_EVENT_MASK \
566 0xe05008UL 567 0xe05008UL
568#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
567#define PGLUE_B_REG_PF_BAR0_SIZE \ 569#define PGLUE_B_REG_PF_BAR0_SIZE \
568 0x2aae60UL 570 0x2aae60UL
569#define PGLUE_B_REG_PF_BAR1_SIZE \ 571#define PGLUE_B_REG_PF_BAR1_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 9673d19308e6..b16ce7d93caf 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -2006,18 +2006,16 @@ unlock:
2006static int qede_parse_actions(struct qede_dev *edev, 2006static int qede_parse_actions(struct qede_dev *edev,
2007 struct tcf_exts *exts) 2007 struct tcf_exts *exts)
2008{ 2008{
2009 int rc = -EINVAL, num_act = 0; 2009 int rc = -EINVAL, num_act = 0, i;
2010 const struct tc_action *a; 2010 const struct tc_action *a;
2011 bool is_drop = false; 2011 bool is_drop = false;
2012 LIST_HEAD(actions);
2013 2012
2014 if (!tcf_exts_has_actions(exts)) { 2013 if (!tcf_exts_has_actions(exts)) {
2015 DP_NOTICE(edev, "No tc actions received\n"); 2014 DP_NOTICE(edev, "No tc actions received\n");
2016 return rc; 2015 return rc;
2017 } 2016 }
2018 2017
2019 tcf_exts_to_list(exts, &actions); 2018 tcf_exts_for_each_action(i, a, exts) {
2020 list_for_each_entry(a, &actions, list) {
2021 num_act++; 2019 num_act++;
2022 2020
2023 if (is_tcf_gact_shot(a)) 2021 if (is_tcf_gact_shot(a))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 353f1c129af1..059ba9429e51 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
2384 return status; 2384 return status;
2385} 2385}
2386 2386
2387static netdev_features_t qlge_fix_features(struct net_device *ndev,
2388 netdev_features_t features)
2389{
2390 int err;
2391
2392 /* Update the behavior of vlan accel in the adapter */
2393 err = qlge_update_hw_vlan_features(ndev, features);
2394 if (err)
2395 return err;
2396
2397 return features;
2398}
2399
2400static int qlge_set_features(struct net_device *ndev, 2387static int qlge_set_features(struct net_device *ndev,
2401 netdev_features_t features) 2388 netdev_features_t features)
2402{ 2389{
2403 netdev_features_t changed = ndev->features ^ features; 2390 netdev_features_t changed = ndev->features ^ features;
2391 int err;
2392
2393 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2394 /* Update the behavior of vlan accel in the adapter */
2395 err = qlge_update_hw_vlan_features(ndev, features);
2396 if (err)
2397 return err;
2404 2398
2405 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2406 qlge_vlan_mode(ndev, features); 2399 qlge_vlan_mode(ndev, features);
2400 }
2407 2401
2408 return 0; 2402 return 0;
2409} 2403}
@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
4719 .ndo_set_mac_address = qlge_set_mac_address, 4713 .ndo_set_mac_address = qlge_set_mac_address,
4720 .ndo_validate_addr = eth_validate_addr, 4714 .ndo_validate_addr = eth_validate_addr,
4721 .ndo_tx_timeout = qlge_tx_timeout, 4715 .ndo_tx_timeout = qlge_tx_timeout,
4722 .ndo_fix_features = qlge_fix_features,
4723 .ndo_set_features = qlge_set_features, 4716 .ndo_set_features = qlge_set_features,
4724 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, 4717 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4725 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, 4718 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b81f4faf7b10..1470fc12282b 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* Renesas Ethernet AVB device driver 2/* Renesas Ethernet AVB device driver
2 * 3 *
3 * Copyright (C) 2014-2015 Renesas Electronics Corporation 4 * Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
5 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
6 * 7 *
7 * Based on the SuperH Ethernet driver 8 * Based on the SuperH Ethernet driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License version 2,
11 * as published by the Free Software Foundation.
12 */ 9 */
13 10
14#ifndef __RAVB_H__ 11#ifndef __RAVB_H__
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c06f2df895c2..aff5516b781e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* Renesas Ethernet AVB device driver 2/* Renesas Ethernet AVB device driver
2 * 3 *
3 * Copyright (C) 2014-2015 Renesas Electronics Corporation 4 * Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
5 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
6 * 7 *
7 * Based on the SuperH Ethernet driver 8 * Based on the SuperH Ethernet driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License version 2,
11 * as published by the Free Software Foundation.
12 */ 9 */
13 10
14#include <linux/cache.h> 11#include <linux/cache.h>
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5573199c4536..ad4433d59237 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* SuperH Ethernet device driver 2/* SuperH Ethernet device driver
2 * 3 *
3 * Copyright (C) 2014 Renesas Electronics Corporation 4 * Copyright (C) 2014 Renesas Electronics Corporation
@@ -5,18 +6,6 @@
5 * Copyright (C) 2008-2014 Renesas Solutions Corp. 6 * Copyright (C) 2008-2014 Renesas Solutions Corp.
6 * Copyright (C) 2013-2017 Cogent Embedded, Inc. 7 * Copyright (C) 2013-2017 Cogent Embedded, Inc.
7 * Copyright (C) 2014 Codethink Limited 8 * Copyright (C) 2014 Codethink Limited
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 */ 9 */
21 10
22#include <linux/module.h> 11#include <linux/module.h>
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index f94be99cf400..0c18650bbfe6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -1,19 +1,8 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* SuperH Ethernet device driver 2/* SuperH Ethernet device driver
2 * 3 *
3 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
4 * Copyright (C) 2008-2012 Renesas Solutions Corp. 5 * Copyright (C) 2008-2012 Renesas Solutions Corp.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 */ 6 */
18 7
19#ifndef __SH_ETH_H__ 8#ifndef __SH_ETH_H__
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index edf20361ea5f..bf4acebb6bcd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -33,7 +33,7 @@ config DWMAC_DWC_QOS_ETH
33 select PHYLIB 33 select PHYLIB
34 select CRC32 34 select CRC32
35 select MII 35 select MII
36 depends on OF && COMMON_CLK && HAS_DMA 36 depends on OF && HAS_DMA
37 help 37 help
38 Support for chips using the snps,dwc-qos-ethernet.txt DT binding. 38 Support for chips using the snps,dwc-qos-ethernet.txt DT binding.
39 39
@@ -57,7 +57,7 @@ config DWMAC_ANARION
57config DWMAC_IPQ806X 57config DWMAC_IPQ806X
58 tristate "QCA IPQ806x DWMAC support" 58 tristate "QCA IPQ806x DWMAC support"
59 default ARCH_QCOM 59 default ARCH_QCOM
60 depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST) 60 depends on OF && (ARCH_QCOM || COMPILE_TEST)
61 select MFD_SYSCON 61 select MFD_SYSCON
62 help 62 help
63 Support for QCA IPQ806X DWMAC Ethernet. 63 Support for QCA IPQ806X DWMAC Ethernet.
@@ -100,7 +100,7 @@ config DWMAC_OXNAS
100config DWMAC_ROCKCHIP 100config DWMAC_ROCKCHIP
101 tristate "Rockchip dwmac support" 101 tristate "Rockchip dwmac support"
102 default ARCH_ROCKCHIP 102 default ARCH_ROCKCHIP
103 depends on OF && COMMON_CLK && (ARCH_ROCKCHIP || COMPILE_TEST) 103 depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
104 select MFD_SYSCON 104 select MFD_SYSCON
105 help 105 help
106 Support for Ethernet controller on Rockchip RK3288 SoC. 106 Support for Ethernet controller on Rockchip RK3288 SoC.
@@ -123,7 +123,7 @@ config DWMAC_SOCFPGA
123config DWMAC_STI 123config DWMAC_STI
124 tristate "STi GMAC support" 124 tristate "STi GMAC support"
125 default ARCH_STI 125 default ARCH_STI
126 depends on OF && COMMON_CLK && (ARCH_STI || COMPILE_TEST) 126 depends on OF && (ARCH_STI || COMPILE_TEST)
127 select MFD_SYSCON 127 select MFD_SYSCON
128 ---help--- 128 ---help---
129 Support for ethernet controller on STi SOCs. 129 Support for ethernet controller on STi SOCs.
@@ -147,7 +147,7 @@ config DWMAC_STM32
147config DWMAC_SUNXI 147config DWMAC_SUNXI
148 tristate "Allwinner GMAC support" 148 tristate "Allwinner GMAC support"
149 default ARCH_SUNXI 149 default ARCH_SUNXI
150 depends on OF && COMMON_CLK && (ARCH_SUNXI || COMPILE_TEST) 150 depends on OF && (ARCH_SUNXI || COMPILE_TEST)
151 ---help--- 151 ---help---
152 Support for Allwinner A20/A31 GMAC ethernet controllers. 152 Support for Allwinner A20/A31 GMAC ethernet controllers.
153 153
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 1a96dd9c1091..531294f4978b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -61,7 +61,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
61 struct stmmac_tc_entry *action_entry = entry; 61 struct stmmac_tc_entry *action_entry = entry;
62 const struct tc_action *act; 62 const struct tc_action *act;
63 struct tcf_exts *exts; 63 struct tcf_exts *exts;
64 LIST_HEAD(actions); 64 int i;
65 65
66 exts = cls->knode.exts; 66 exts = cls->knode.exts;
67 if (!tcf_exts_has_actions(exts)) 67 if (!tcf_exts_has_actions(exts))
@@ -69,8 +69,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
69 if (frag) 69 if (frag)
70 action_entry = frag; 70 action_entry = frag;
71 71
72 tcf_exts_to_list(exts, &actions); 72 tcf_exts_for_each_action(i, act, exts) {
73 list_for_each_entry(act, &actions, list) {
74 /* Accept */ 73 /* Accept */
75 if (is_tcf_gact_ok(act)) { 74 if (is_tcf_gact_ok(act)) {
76 action_entry->val.af = 1; 75 action_entry->val.af = 1;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 507f68190cb1..1121a1ec407c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -29,6 +29,7 @@
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/inetdevice.h> 30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/pci.h>
32#include <linux/skbuff.h> 33#include <linux/skbuff.h>
33#include <linux/if_vlan.h> 34#include <linux/if_vlan.h>
34#include <linux/in.h> 35#include <linux/in.h>
@@ -2039,12 +2040,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
2039{ 2040{
2040 struct net_device *ndev; 2041 struct net_device *ndev;
2041 struct net_device_context *net_device_ctx; 2042 struct net_device_context *net_device_ctx;
2043 struct device *pdev = vf_netdev->dev.parent;
2042 struct netvsc_device *netvsc_dev; 2044 struct netvsc_device *netvsc_dev;
2043 int ret; 2045 int ret;
2044 2046
2045 if (vf_netdev->addr_len != ETH_ALEN) 2047 if (vf_netdev->addr_len != ETH_ALEN)
2046 return NOTIFY_DONE; 2048 return NOTIFY_DONE;
2047 2049
2050 if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
2051 return NOTIFY_DONE;
2052
2048 /* 2053 /*
2049 * We will use the MAC address to locate the synthetic interface to 2054 * We will use the MAC address to locate the synthetic interface to
2050 * associate with the VF interface. If we don't find a matching 2055 * associate with the VF interface. If we don't find a matching
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 97742708460b..2cd71bdb6484 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -5217,8 +5217,8 @@ static int rtl8152_probe(struct usb_interface *intf,
5217 netdev->hw_features &= ~NETIF_F_RXCSUM; 5217 netdev->hw_features &= ~NETIF_F_RXCSUM;
5218 } 5218 }
5219 5219
5220 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && 5220 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
5221 udev->serial && !strcmp(udev->serial, "000001000000")) { 5221 (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) {
5222 dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); 5222 dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
5223 set_bit(DELL_TB_RX_AGG_BUG, &tp->flags); 5223 set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
5224 } 5224 }