aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 17:33:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 17:33:16 -0400
commitbbda1baeeb2f4aff3addac3d086a1e56c3f2503e (patch)
treeaca955046da89c24f612e8c7ee177ef6bf6efbd5
parent2b76db6a0f649f5a54805807d36d51b6e9e49089 (diff)
parentf3ad857e3da1abaea780dc892b592cd86c541c52 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Brown paper bag fix in HTB scheduler, class options set incorrectly due to a typoe. Fix from Vimalkumar. 2) It's possible for the ipv6 FIB garbage collector to run before all the necessary datastructure are setup during init, defer the notifier registry to avoid this problem. Fix from Michal Kubecek. 3) New i40e ethernet driver from the Intel folks. 4) Add new qmi wwan device IDs, from Bjørn Mork. 5) Doorbell lock in bnx2x driver is not initialized properly in some configurations, fix from Ariel Elior. 6) Revert an ipv6 packet option padding change that broke standardized ipv6 implementation test suites. From Jiri Pirko. 7) Fix synchronization of ARP information in bonding layer, from Nikolay Aleksandrov. 8) Fix missing error return resulting in illegal memory accesses in openvswitch, from Daniel Borkmann. 9) SCTP doesn't signal poll events properly due to mistaken operator precedence, fix also from Daniel Borkmann. 10) __netdev_pick_tx() passes wrong index to sk_tx_queue_set() which essentially disables caching of TX queue in sockets :-/ Fix from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (29 commits) net_sched: htb: fix a typo in htb_change_class() net: qmi_wwan: add new Qualcomm devices ipv6: don't call fib6_run_gc() until routing is ready net: tilegx driver: avoid compiler warning fib6_rules: fix indentation irda: vlsi_ir: Remove casting the return value which is a void pointer irda: donauboe: Remove casting the return value which is a void pointer net: fix multiqueue selection net: sctp: fix smatch warning in sctp_send_asconf_del_ip net: sctp: fix bug in sctp_poll for SOCK_SELECT_ERR_QUEUE net: fib: fib6_add: fix potential NULL pointer dereference net: ovs: flow: fix potential illegal memory access in __parse_flow_nlattrs bcm63xx_enet: remove deprecated IRQF_DISABLED net: korina: remove deprecated IRQF_DISABLED macvlan: Move skb_clone check closer to call qlcnic: Fix warning reported by kbuild test robot. bonding: fix bond_arp_rcv setting and arp validate desync state bonding: fix store_arp_validate race with mode change ipv6/exthdrs: accept tlv which includes only padding bnx2x: avoid atomic allocations during initialization ...
-rw-r--r--Documentation/networking/00-INDEX2
-rw-r--r--Documentation/networking/i40e.txt115
-rw-r--r--MAINTAINERS3
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/bonding/bond_sysfs.c31
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c3
-rw-r--r--drivers/net/ethernet/intel/Kconfig18
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h558
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c983
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h112
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2076
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2041
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2076
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c131
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c1449
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c366
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h245
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c1006
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h169
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c7375
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c391
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h239
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h4688
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h101
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c1817
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h259
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1154
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h368
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2335
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h120
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c6
-rw-r--r--drivers/net/irda/donauboe.c6
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/macvlan.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c130
-rw-r--r--include/net/ndisc.h2
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/exthdrs.c6
-rw-r--r--net/ipv6/fib6_rules.c4
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ndisc.c18
-rw-r--r--net/openvswitch/flow.c1
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sctp/socket.c5
56 files changed, 30666 insertions, 67 deletions
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 18b64b2b8a68..f11580f8719a 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -86,6 +86,8 @@ generic_netlink.txt
86 - info on Generic Netlink 86 - info on Generic Netlink
87gianfar.txt 87gianfar.txt
88 - Gianfar Ethernet Driver. 88 - Gianfar Ethernet Driver.
89i40e.txt
90 - README for the Intel Ethernet Controller XL710 Driver (i40e).
89ieee802154.txt 91ieee802154.txt
90 - Linux IEEE 802.15.4 implementation, API and drivers 92 - Linux IEEE 802.15.4 implementation, API and drivers
91igb.txt 93igb.txt
diff --git a/Documentation/networking/i40e.txt b/Documentation/networking/i40e.txt
new file mode 100644
index 000000000000..f737273c6dc1
--- /dev/null
+++ b/Documentation/networking/i40e.txt
@@ -0,0 +1,115 @@
1Linux Base Driver for the Intel(R) Ethernet Controller XL710 Family
2===================================================================
3
4Intel i40e Linux driver.
5Copyright(c) 2013 Intel Corporation.
6
7Contents
8========
9
10- Identifying Your Adapter
11- Additional Configurations
12- Performance Tuning
13- Known Issues
14- Support
15
16
17Identifying Your Adapter
18========================
19
20The driver in this release is compatible with the Intel Ethernet
21Controller XL710 Family.
22
23For more information on how to identify your adapter, go to the Adapter &
24Driver ID Guide at:
25
26 http://support.intel.com/support/network/sb/CS-012904.htm
27
28
29Enabling the driver
30===================
31
32The driver is enabled via the standard kernel configuration system,
33using the make command:
34
35 Make oldconfig/silentoldconfig/menuconfig/etc.
36
37The driver is located in the menu structure at:
38
39 -> Device Drivers
40 -> Network device support (NETDEVICES [=y])
41 -> Ethernet driver support
42 -> Intel devices
43 -> Intel(R) Ethernet Controller XL710 Family
44
45Additional Configurations
46=========================
47
48 Generic Receive Offload (GRO)
49 -----------------------------
50 The driver supports the in-kernel software implementation of GRO. GRO has
51 shown that by coalescing Rx traffic into larger chunks of data, CPU
52 utilization can be significantly reduced when under large Rx load. GRO is
53 an evolution of the previously-used LRO interface. GRO is able to coalesce
54 other protocols besides TCP. It's also safe to use with configurations that
55 are problematic for LRO, namely bridging and iSCSI.
56
57 Ethtool
58 -------
59 The driver utilizes the ethtool interface for driver configuration and
60 diagnostics, as well as displaying statistical information. The latest
61 ethtool version is required for this functionality.
62
63 The latest release of ethtool can be found from
64 https://www.kernel.org/pub/software/network/ethtool
65
66 Data Center Bridging (DCB)
67 --------------------------
68 DCB configuration is not currently supported.
69
70 FCoE
71 ----
72 Fiber Channel over Ethernet (FCoE) hardware offload is not currently
73 supported.
74
75 MAC and VLAN anti-spoofing feature
76 ----------------------------------
77 When a malicious driver attempts to send a spoofed packet, it is dropped by
78 the hardware and not transmitted. An interrupt is sent to the PF driver
79 notifying it of the spoof attempt.
80
81 When a spoofed packet is detected the PF driver will send the following
82 message to the system log (displayed by the "dmesg" command):
83
84 Spoof event(s) detected on VF (n)
85
86 Where n=the VF that attempted to do the spoofing.
87
88
89Performance Tuning
90==================
91
92An excellent article on performance tuning can be found at:
93
94http://www.redhat.com/promo/summit/2008/downloads/pdf/Thursday/Mark_Wagner.pdf
95
96
97Known Issues
98============
99
100
101Support
102=======
103
104For general information, go to the Intel support website at:
105
106 http://support.intel.com
107
108or the Intel Wired Networking project hosted by Sourceforge at:
109
110 http://e1000.sourceforge.net
111
112If an issue is identified with the released source code on the supported
113kernel with a supported adapter, email the specific information related
114to the issue to e1000-devel@lists.sourceforge.net and copy
115netdev@vger.kernel.org.
diff --git a/MAINTAINERS b/MAINTAINERS
index d721af119ff9..be70759e51c5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4355,7 +4355,7 @@ M: Deepak Saxena <dsaxena@plexity.net>
4355S: Maintained 4355S: Maintained
4356F: drivers/char/hw_random/ixp4xx-rng.c 4356F: drivers/char/hw_random/ixp4xx-rng.c
4357 4357
4358INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf) 4358INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e)
4359M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> 4359M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
4360M: Jesse Brandeburg <jesse.brandeburg@intel.com> 4360M: Jesse Brandeburg <jesse.brandeburg@intel.com>
4361M: Bruce Allan <bruce.w.allan@intel.com> 4361M: Bruce Allan <bruce.w.allan@intel.com>
@@ -4380,6 +4380,7 @@ F: Documentation/networking/igbvf.txt
4380F: Documentation/networking/ixgb.txt 4380F: Documentation/networking/ixgb.txt
4381F: Documentation/networking/ixgbe.txt 4381F: Documentation/networking/ixgbe.txt
4382F: Documentation/networking/ixgbevf.txt 4382F: Documentation/networking/ixgbevf.txt
4383F: Documentation/networking/i40e.txt
4383F: drivers/net/ethernet/intel/ 4384F: drivers/net/ethernet/intel/
4384 4385
4385INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT 4386INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 39e5b1c7ffe2..72df399c4ab3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2404,8 +2404,8 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
2404 slave->target_last_arp_rx[i] = jiffies; 2404 slave->target_last_arp_rx[i] = jiffies;
2405} 2405}
2406 2406
2407static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, 2407int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2408 struct slave *slave) 2408 struct slave *slave)
2409{ 2409{
2410 struct arphdr *arp = (struct arphdr *)skb->data; 2410 struct arphdr *arp = (struct arphdr *)skb->data;
2411 unsigned char *arp_ptr; 2411 unsigned char *arp_ptr;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index ce4677668e2c..eeab40b01b7a 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -349,6 +349,8 @@ static ssize_t bonding_store_mode(struct device *d,
349 goto out; 349 goto out;
350 } 350 }
351 351
352 /* don't cache arp_validate between modes */
353 bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
352 bond->params.mode = new_value; 354 bond->params.mode = new_value;
353 bond_set_mode_ops(bond, bond->params.mode); 355 bond_set_mode_ops(bond, bond->params.mode);
354 pr_info("%s: setting mode to %s (%d).\n", 356 pr_info("%s: setting mode to %s (%d).\n",
@@ -419,27 +421,39 @@ static ssize_t bonding_store_arp_validate(struct device *d,
419 struct device_attribute *attr, 421 struct device_attribute *attr,
420 const char *buf, size_t count) 422 const char *buf, size_t count)
421{ 423{
422 int new_value;
423 struct bonding *bond = to_bond(d); 424 struct bonding *bond = to_bond(d);
425 int new_value, ret = count;
424 426
427 if (!rtnl_trylock())
428 return restart_syscall();
425 new_value = bond_parse_parm(buf, arp_validate_tbl); 429 new_value = bond_parse_parm(buf, arp_validate_tbl);
426 if (new_value < 0) { 430 if (new_value < 0) {
427 pr_err("%s: Ignoring invalid arp_validate value %s\n", 431 pr_err("%s: Ignoring invalid arp_validate value %s\n",
428 bond->dev->name, buf); 432 bond->dev->name, buf);
429 return -EINVAL; 433 ret = -EINVAL;
434 goto out;
430 } 435 }
431 if (new_value && (bond->params.mode != BOND_MODE_ACTIVEBACKUP)) { 436 if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
432 pr_err("%s: arp_validate only supported in active-backup mode.\n", 437 pr_err("%s: arp_validate only supported in active-backup mode.\n",
433 bond->dev->name); 438 bond->dev->name);
434 return -EINVAL; 439 ret = -EINVAL;
440 goto out;
435 } 441 }
436 pr_info("%s: setting arp_validate to %s (%d).\n", 442 pr_info("%s: setting arp_validate to %s (%d).\n",
437 bond->dev->name, arp_validate_tbl[new_value].modename, 443 bond->dev->name, arp_validate_tbl[new_value].modename,
438 new_value); 444 new_value);
439 445
446 if (bond->dev->flags & IFF_UP) {
447 if (!new_value)
448 bond->recv_probe = NULL;
449 else if (bond->params.arp_interval)
450 bond->recv_probe = bond_arp_rcv;
451 }
440 bond->params.arp_validate = new_value; 452 bond->params.arp_validate = new_value;
453out:
454 rtnl_unlock();
441 455
442 return count; 456 return ret;
443} 457}
444 458
445static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, 459static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
@@ -555,8 +569,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
555 struct device_attribute *attr, 569 struct device_attribute *attr,
556 const char *buf, size_t count) 570 const char *buf, size_t count)
557{ 571{
558 int new_value, ret = count;
559 struct bonding *bond = to_bond(d); 572 struct bonding *bond = to_bond(d);
573 int new_value, ret = count;
560 574
561 if (!rtnl_trylock()) 575 if (!rtnl_trylock())
562 return restart_syscall(); 576 return restart_syscall();
@@ -599,8 +613,13 @@ static ssize_t bonding_store_arp_interval(struct device *d,
599 * is called. 613 * is called.
600 */ 614 */
601 if (!new_value) { 615 if (!new_value) {
616 if (bond->params.arp_validate)
617 bond->recv_probe = NULL;
602 cancel_delayed_work_sync(&bond->arp_work); 618 cancel_delayed_work_sync(&bond->arp_work);
603 } else { 619 } else {
620 /* arp_validate can be set only in active-backup mode */
621 if (bond->params.arp_validate)
622 bond->recv_probe = bond_arp_rcv;
604 cancel_delayed_work_sync(&bond->mii_work); 623 cancel_delayed_work_sync(&bond->mii_work);
605 queue_delayed_work(bond->wq, &bond->arp_work, 0); 624 queue_delayed_work(bond->wq, &bond->arp_work, 0);
606 } 625 }
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index f7ab16185f68..7ad8bd5cc947 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -430,6 +430,7 @@ static inline bool slave_can_tx(struct slave *slave)
430 430
431struct bond_net; 431struct bond_net;
432 432
433int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
433struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 434struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
434int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 435int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
435void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id); 436void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 8ac48fbf8a66..b9a5fb6400d3 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -926,13 +926,13 @@ static int bcm_enet_open(struct net_device *dev)
926 if (ret) 926 if (ret)
927 goto out_phy_disconnect; 927 goto out_phy_disconnect;
928 928
929 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED, 929 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
930 dev->name, dev); 930 dev->name, dev);
931 if (ret) 931 if (ret)
932 goto out_freeirq; 932 goto out_freeirq;
933 933
934 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 934 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
935 IRQF_DISABLED, dev->name, dev); 935 0, dev->name, dev);
936 if (ret) 936 if (ret)
937 goto out_freeirq_rx; 937 goto out_freeirq_rx;
938 938
@@ -2156,13 +2156,13 @@ static int bcm_enetsw_open(struct net_device *dev)
2156 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 2156 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2157 2157
2158 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 2158 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2159 IRQF_DISABLED, dev->name, dev); 2159 0, dev->name, dev);
2160 if (ret) 2160 if (ret)
2161 goto out_freeirq; 2161 goto out_freeirq;
2162 2162
2163 if (priv->irq_tx != -1) { 2163 if (priv->irq_tx != -1) {
2164 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 2164 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2165 IRQF_DISABLED, dev->name, dev); 2165 0, dev->name, dev);
2166 if (ret) 2166 if (ret)
2167 goto out_freeirq_rx; 2167 goto out_freeirq_rx;
2168 } 2168 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 2361bf236ce3..90045c920d09 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -490,10 +490,10 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
490 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs; 490 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
491} 491}
492 492
493static int bnx2x_alloc_rx_sge(struct bnx2x *bp, 493static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
494 struct bnx2x_fastpath *fp, u16 index) 494 u16 index, gfp_t gfp_mask)
495{ 495{
496 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); 496 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
497 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 497 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
498 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 498 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
499 dma_addr_t mapping; 499 dma_addr_t mapping;
@@ -572,7 +572,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
572 572
573 /* If we fail to allocate a substitute page, we simply stop 573 /* If we fail to allocate a substitute page, we simply stop
574 where we are and drop the whole packet */ 574 where we are and drop the whole packet */
575 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); 575 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
576 if (unlikely(err)) { 576 if (unlikely(err)) {
577 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; 577 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
578 return err; 578 return err;
@@ -616,12 +616,17 @@ static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
616 kfree(data); 616 kfree(data);
617} 617}
618 618
619static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp) 619static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
620{ 620{
621 if (fp->rx_frag_size) 621 if (fp->rx_frag_size) {
622 /* GFP_KERNEL allocations are used only during initialization */
623 if (unlikely(gfp_mask & __GFP_WAIT))
624 return (void *)__get_free_page(gfp_mask);
625
622 return netdev_alloc_frag(fp->rx_frag_size); 626 return netdev_alloc_frag(fp->rx_frag_size);
627 }
623 628
624 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); 629 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
625} 630}
626 631
627#ifdef CONFIG_INET 632#ifdef CONFIG_INET
@@ -701,7 +706,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
701 goto drop; 706 goto drop;
702 707
703 /* Try to allocate the new data */ 708 /* Try to allocate the new data */
704 new_data = bnx2x_frag_alloc(fp); 709 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
705 /* Unmap skb in the pool anyway, as we are going to change 710 /* Unmap skb in the pool anyway, as we are going to change
706 pool entry status to BNX2X_TPA_STOP even if new skb allocation 711 pool entry status to BNX2X_TPA_STOP even if new skb allocation
707 fails. */ 712 fails. */
@@ -752,15 +757,15 @@ drop:
752 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; 757 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
753} 758}
754 759
755static int bnx2x_alloc_rx_data(struct bnx2x *bp, 760static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
756 struct bnx2x_fastpath *fp, u16 index) 761 u16 index, gfp_t gfp_mask)
757{ 762{
758 u8 *data; 763 u8 *data;
759 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; 764 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
760 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 765 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
761 dma_addr_t mapping; 766 dma_addr_t mapping;
762 767
763 data = bnx2x_frag_alloc(fp); 768 data = bnx2x_frag_alloc(fp, gfp_mask);
764 if (unlikely(data == NULL)) 769 if (unlikely(data == NULL))
765 return -ENOMEM; 770 return -ENOMEM;
766 771
@@ -953,7 +958,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
953 memcpy(skb->data, data + pad, len); 958 memcpy(skb->data, data + pad, len);
954 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); 959 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
955 } else { 960 } else {
956 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) { 961 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
962 GFP_ATOMIC) == 0)) {
957 dma_unmap_single(&bp->pdev->dev, 963 dma_unmap_single(&bp->pdev->dev,
958 dma_unmap_addr(rx_buf, mapping), 964 dma_unmap_addr(rx_buf, mapping),
959 fp->rx_buf_size, 965 fp->rx_buf_size,
@@ -1313,7 +1319,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1313 struct sw_rx_bd *first_buf = 1319 struct sw_rx_bd *first_buf =
1314 &tpa_info->first_buf; 1320 &tpa_info->first_buf;
1315 1321
1316 first_buf->data = bnx2x_frag_alloc(fp); 1322 first_buf->data =
1323 bnx2x_frag_alloc(fp, GFP_KERNEL);
1317 if (!first_buf->data) { 1324 if (!first_buf->data) {
1318 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", 1325 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1319 j); 1326 j);
@@ -1335,7 +1342,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1335 for (i = 0, ring_prod = 0; 1342 for (i = 0, ring_prod = 0;
1336 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { 1343 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1337 1344
1338 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { 1345 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1346 GFP_KERNEL) < 0) {
1339 BNX2X_ERR("was only able to allocate %d rx sges\n", 1347 BNX2X_ERR("was only able to allocate %d rx sges\n",
1340 i); 1348 i);
1341 BNX2X_ERR("disabling TPA for queue[%d]\n", 1349 BNX2X_ERR("disabling TPA for queue[%d]\n",
@@ -4221,7 +4229,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4221 * fp->eth_q_stats.rx_skb_alloc_failed = 0 4229 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4222 */ 4230 */
4223 for (i = 0; i < rx_ring_size; i++) { 4231 for (i = 0; i < rx_ring_size; i++) {
4224 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) { 4232 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4225 failure_cnt++; 4233 failure_cnt++;
4226 continue; 4234 continue;
4227 } 4235 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 634a793c1c46..2f8dbbbd7a86 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -7645,6 +7645,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7645 7645
7646 bnx2x_init_block(bp, BLOCK_TM, init_phase); 7646 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7647 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 7647 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7648 REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
7648 7649
7649 bnx2x_iov_init_dq(bp); 7650 bnx2x_iov_init_dq(bp);
7650 7651
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index b26eb83069b6..2604b6204abe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1756,9 +1756,6 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
1756 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1756 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1757 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1757 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1758 1758
1759 /* set the number of VF allowed doorbells to the full DQ range */
1760 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
1761
1762 /* set the VF doorbell threshold */ 1759 /* set the VF doorbell threshold */
1763 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1760 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
1764} 1761}
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index f0e7ed20a750..149ac85b5f9e 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -241,4 +241,22 @@ config IXGBEVF
241 will be called ixgbevf. MSI-X interrupt support is required 241 will be called ixgbevf. MSI-X interrupt support is required
242 for this driver to work correctly. 242 for this driver to work correctly.
243 243
244config I40E
245 tristate "Intel(R) Ethernet Controller XL710 Family support"
246 depends on PCI
247 ---help---
248 This driver supports Intel(R) Ethernet Controller XL710 Family of
249 devices. For more information on how to identify your adapter, go
250 to the Adapter & Driver ID Guide at:
251
252 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
253
254 For general information and support, go to the Intel support
255 website at:
256
257 <http://support.intel.com>
258
259 To compile this driver as a module, choose M here. The module
260 will be called i40e.
261
244endif # NET_VENDOR_INTEL 262endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index c8210e688669..5bae933efc7c 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_IGB) += igb/
9obj-$(CONFIG_IGBVF) += igbvf/ 9obj-$(CONFIG_IGBVF) += igbvf/
10obj-$(CONFIG_IXGBE) += ixgbe/ 10obj-$(CONFIG_IXGBE) += ixgbe/
11obj-$(CONFIG_IXGBEVF) += ixgbevf/ 11obj-$(CONFIG_IXGBEVF) += ixgbevf/
12obj-$(CONFIG_I40E) += i40e/
12obj-$(CONFIG_IXGB) += ixgb/ 13obj-$(CONFIG_IXGB) += ixgb/
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
new file mode 100644
index 000000000000..479b2c4e552d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -0,0 +1,44 @@
1################################################################################
2#
3# Intel Ethernet Controller XL710 Family Linux Driver
4# Copyright(c) 2013 Intel Corporation.
5#
6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License,
8# version 2, as published by the Free Software Foundation.
9#
10# This program is distributed in the hope it will be useful, but WITHOUT
11# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details.
14#
15# You should have received a copy of the GNU General Public License along with
16# this program; if not, write to the Free Software Foundation, Inc.,
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25#
26################################################################################
27
28#
29# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
30#
31
32obj-$(CONFIG_I40E) += i40e.o
33
34i40e-objs := i40e_main.o \
35 i40e_ethtool.o \
36 i40e_adminq.o \
37 i40e_common.o \
38 i40e_hmc.o \
39 i40e_lan_hmc.o \
40 i40e_nvm.o \
41 i40e_debugfs.o \
42 i40e_diag.o \
43 i40e_txrx.o \
44 i40e_virtchnl_pf.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
new file mode 100644
index 000000000000..b5252eb8a6c7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -0,0 +1,558 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_H_
29#define _I40E_H_
30
31#include <net/tcp.h>
32#include <linux/init.h>
33#include <linux/types.h>
34#include <linux/errno.h>
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/aer.h>
38#include <linux/netdevice.h>
39#include <linux/ioport.h>
40#include <linux/slab.h>
41#include <linux/list.h>
42#include <linux/string.h>
43#include <linux/in.h>
44#include <linux/ip.h>
45#include <linux/tcp.h>
46#include <linux/sctp.h>
47#include <linux/pkt_sched.h>
48#include <linux/ipv6.h>
49#include <linux/version.h>
50#include <net/checksum.h>
51#include <net/ip6_checksum.h>
52#include <linux/ethtool.h>
53#include <linux/if_vlan.h>
54#include "i40e_type.h"
55#include "i40e_prototype.h"
56#include "i40e_virtchnl.h"
57#include "i40e_virtchnl_pf.h"
58#include "i40e_txrx.h"
59
60/* Useful i40e defaults */
61#define I40E_BASE_PF_SEID 16
62#define I40E_BASE_VSI_SEID 512
63#define I40E_BASE_VEB_SEID 288
64#define I40E_MAX_VEB 16
65
66#define I40E_MAX_NUM_DESCRIPTORS 4096
67#define I40E_MAX_REGISTER 0x0038FFFF
68#define I40E_DEFAULT_NUM_DESCRIPTORS 512
69#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
70#define I40E_MIN_NUM_DESCRIPTORS 64
71#define I40E_MIN_MSIX 2
72#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
73#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
74#define I40E_DEFAULT_QUEUES_PER_VF 4
75#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
76#define I40E_FDIR_RING 0
77#define I40E_FDIR_RING_COUNT 32
78#define I40E_MAX_AQ_BUF_SIZE 4096
79#define I40E_AQ_LEN 32
80#define I40E_AQ_WORK_LIMIT 16
81#define I40E_MAX_USER_PRIORITY 8
82#define I40E_DEFAULT_MSG_ENABLE 4
83
84#define I40E_NVM_VERSION_LO_SHIFT 0
85#define I40E_NVM_VERSION_LO_MASK (0xf << I40E_NVM_VERSION_LO_SHIFT)
86#define I40E_NVM_VERSION_MID_SHIFT 4
87#define I40E_NVM_VERSION_MID_MASK (0xff << I40E_NVM_VERSION_MID_SHIFT)
88#define I40E_NVM_VERSION_HI_SHIFT 12
89#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
90
91/* magic for getting defines into strings */
92#define STRINGIFY(foo) #foo
93#define XSTRINGIFY(bar) STRINGIFY(bar)
94
95#ifndef ARCH_HAS_PREFETCH
96#define prefetch(X)
97#endif
98
99#define I40E_RX_DESC(R, i) \
100 ((ring_is_16byte_desc_enabled(R)) \
101 ? (union i40e_32byte_rx_desc *) \
102 (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
103 : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
104#define I40E_TX_DESC(R, i) \
105 (&(((struct i40e_tx_desc *)((R)->desc))[i]))
106#define I40E_TX_CTXTDESC(R, i) \
107 (&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
108#define I40E_TX_FDIRDESC(R, i) \
109 (&(((struct i40e_filter_program_desc *)((R)->desc))[i]))
110
111/* default to trying for four seconds */
112#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
113
114/* driver state flags */
115enum i40e_state_t {
116 __I40E_TESTING,
117 __I40E_CONFIG_BUSY,
118 __I40E_CONFIG_DONE,
119 __I40E_DOWN,
120 __I40E_NEEDS_RESTART,
121 __I40E_SERVICE_SCHED,
122 __I40E_ADMINQ_EVENT_PENDING,
123 __I40E_MDD_EVENT_PENDING,
124 __I40E_VFLR_EVENT_PENDING,
125 __I40E_RESET_RECOVERY_PENDING,
126 __I40E_RESET_INTR_RECEIVED,
127 __I40E_REINIT_REQUESTED,
128 __I40E_PF_RESET_REQUESTED,
129 __I40E_CORE_RESET_REQUESTED,
130 __I40E_GLOBAL_RESET_REQUESTED,
131 __I40E_FILTER_OVERFLOW_PROMISC,
132};
133
134enum i40e_interrupt_policy {
135 I40E_INTERRUPT_BEST_CASE,
136 I40E_INTERRUPT_MEDIUM,
137 I40E_INTERRUPT_LOWEST
138};
139
140struct i40e_lump_tracking {
141 u16 num_entries;
142 u16 search_hint;
143 u16 list[0];
144#define I40E_PILE_VALID_BIT 0x8000
145};
146
147#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
148#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512
149struct i40e_fdir_data {
150 u16 q_index;
151 u8 flex_off;
152 u8 pctype;
153 u16 dest_vsi;
154 u8 dest_ctl;
155 u8 fd_status;
156 u16 cnt_index;
157 u32 fd_id;
158 u8 *raw_packet;
159};
160
161#define I40E_DCB_PRIO_TYPE_STRICT 0
162#define I40E_DCB_PRIO_TYPE_ETS 1
163#define I40E_DCB_STRICT_PRIO_CREDITS 127
164#define I40E_MAX_USER_PRIORITY 8
165/* DCB per TC information data structure */
166struct i40e_tc_info {
167 u16 qoffset; /* Queue offset from base queue */
168 u16 qcount; /* Total Queues */
169 u8 netdev_tc; /* Netdev TC index if netdev associated */
170};
171
172/* TC configuration data structure */
173struct i40e_tc_configuration {
174 u8 numtc; /* Total number of enabled TCs */
175 u8 enabled_tc; /* TC map */
176 struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
177};
178
179/* struct that defines the Ethernet device */
180struct i40e_pf {
181 struct pci_dev *pdev;
182 struct i40e_hw hw;
183 unsigned long state;
184 unsigned long link_check_timeout;
185 struct msix_entry *msix_entries;
186 u16 num_msix_entries;
187 bool fc_autoneg_status;
188
189 u16 eeprom_version;
190 u16 num_vmdq_vsis; /* num vmdq pools this pf has set up */
191 u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
192 u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
193 u16 num_req_vfs; /* num vfs requested for this vf */
194 u16 num_vf_qps; /* num queue pairs per vf */
195 u16 num_tc_qps; /* num queue pairs per TC */
196 u16 num_lan_qps; /* num lan queues this pf has set up */
197 u16 num_lan_msix; /* num queue vectors for the base pf vsi */
198 u16 rss_size; /* num queues in the RSS array */
199 u16 rss_size_max; /* HW defined max RSS queues */
200 u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
201 u8 atr_sample_rate;
202
203 enum i40e_interrupt_policy int_policy;
204 u16 rx_itr_default;
205 u16 tx_itr_default;
206 u16 msg_enable;
207 char misc_int_name[IFNAMSIZ + 9];
208 u16 adminq_work_limit; /* num of admin receive queue desc to process */
209 int service_timer_period;
210 struct timer_list service_timer;
211 struct work_struct service_task;
212
213 u64 flags;
214#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1)
215#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2)
216#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3)
217#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
218#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
219#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
220#define I40E_FLAG_MQ_ENABLED (u64)(1 << 7)
221#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 8)
222#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 9)
223#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 10)
224#define I40E_FLAG_IN_NETPOLL (u64)(1 << 13)
225#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 14)
226#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 15)
227#define I40E_FLAG_FILTER_SYNC (u64)(1 << 16)
228#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 18)
229#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 19)
230#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 20)
231#define I40E_FLAG_DCB_ENABLED (u64)(1 << 21)
232#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 22)
233#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 23)
234#define I40E_FLAG_MFP_ENABLED (u64)(1 << 27)
235
236 u16 num_tx_queues;
237 u16 num_rx_queues;
238
239 bool stat_offsets_loaded;
240 struct i40e_hw_port_stats stats;
241 struct i40e_hw_port_stats stats_offsets;
242 u32 tx_timeout_count;
243 u32 tx_timeout_recovery_level;
244 unsigned long tx_timeout_last_recovery;
245 u32 hw_csum_rx_error;
246 u32 led_status;
247 u16 corer_count; /* Core reset count */
248 u16 globr_count; /* Global reset count */
249 u16 empr_count; /* EMP reset count */
250 u16 pfr_count; /* PF reset count */
251
252 struct mutex switch_mutex;
253 u16 lan_vsi; /* our default LAN VSI */
254 u16 lan_veb; /* initial relay, if exists */
255#define I40E_NO_VEB 0xffff
256#define I40E_NO_VSI 0xffff
257 u16 next_vsi; /* Next unallocated VSI - 0-based! */
258 struct i40e_vsi **vsi;
259 struct i40e_veb *veb[I40E_MAX_VEB];
260
261 struct i40e_lump_tracking *qp_pile;
262 struct i40e_lump_tracking *irq_pile;
263
264 /* switch config info */
265 u16 pf_seid;
266 u16 main_vsi_seid;
267 u16 mac_seid;
268 struct i40e_aqc_get_switch_config_data *sw_config;
269 struct kobject *switch_kobj;
270#ifdef CONFIG_DEBUG_FS
271 struct dentry *i40e_dbg_pf;
272#endif /* CONFIG_DEBUG_FS */
273
274 /* sr-iov config info */
275 struct i40e_vf *vf;
276 int num_alloc_vfs; /* actual number of VFs allocated */
277 u32 vf_aq_requests;
278
279 /* DCBx/DCBNL capability for PF that indicates
280 * whether DCBx is managed by firmware or host
281 * based agent (LLDPAD). Also, indicates what
282 * flavor of DCBx protocol (IEEE/CEE) is supported
283 * by the device. For now we're supporting IEEE
284 * mode only.
285 */
286 u16 dcbx_cap;
287
288 u32 fcoe_hmc_filt_num;
289 u32 fcoe_hmc_cntx_num;
290 struct i40e_filter_control_settings filter_settings;
291};
292
293struct i40e_mac_filter {
294 struct list_head list;
295 u8 macaddr[ETH_ALEN];
296#define I40E_VLAN_ANY -1
297 s16 vlan;
298 u8 counter; /* number of instances of this filter */
299 bool is_vf; /* filter belongs to a VF */
300 bool is_netdev; /* filter belongs to a netdev */
301 bool changed; /* filter needs to be sync'd to the HW */
302};
303
304struct i40e_veb {
305 struct i40e_pf *pf;
306 u16 idx;
307 u16 veb_idx; /* index of VEB parent */
308 u16 seid;
309 u16 uplink_seid;
310 u16 stats_idx; /* index of VEB parent */
311 u8 enabled_tc;
312 u16 flags;
313 u16 bw_limit;
314 u8 bw_max_quanta;
315 bool is_abs_credits;
316 u8 bw_tc_share_credits[I40E_MAX_TRAFFIC_CLASS];
317 u16 bw_tc_limit_credits[I40E_MAX_TRAFFIC_CLASS];
318 u8 bw_tc_max_quanta[I40E_MAX_TRAFFIC_CLASS];
319 struct kobject *kobj;
320 bool stat_offsets_loaded;
321 struct i40e_eth_stats stats;
322 struct i40e_eth_stats stats_offsets;
323};
324
325/* struct that defines a VSI, associated with a dev */
326struct i40e_vsi {
327 struct net_device *netdev;
328 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
329 bool netdev_registered;
330 bool stat_offsets_loaded;
331
332 u32 current_netdev_flags;
333 unsigned long state;
334#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0)
335#define I40E_VSI_FLAG_VEB_OWNER (1<<1)
336 unsigned long flags;
337
338 struct list_head mac_filter_list;
339
340 /* VSI stats */
341 struct rtnl_link_stats64 net_stats;
342 struct rtnl_link_stats64 net_stats_offsets;
343 struct i40e_eth_stats eth_stats;
344 struct i40e_eth_stats eth_stats_offsets;
345 u32 tx_restart;
346 u32 tx_busy;
347 u32 rx_buf_failed;
348 u32 rx_page_failed;
349
350 /* These are arrays of rings, allocated at run-time */
351 struct i40e_ring *rx_rings;
352 struct i40e_ring *tx_rings;
353
354 u16 work_limit;
355 /* high bit set means dynamic, use accessor routines to read/write.
356 * hardware only supports 2us resolution for the ITR registers.
357 * these values always store the USER setting, and must be converted
358 * before programming to a register.
359 */
360 u16 rx_itr_setting;
361 u16 tx_itr_setting;
362
363 u16 max_frame;
364 u16 rx_hdr_len;
365 u16 rx_buf_len;
366 u8 dtype;
367
368 /* List of q_vectors allocated to this VSI */
369 struct i40e_q_vector *q_vectors;
370 int num_q_vectors;
371 int base_vector;
372
373 u16 seid; /* HW index of this VSI (absolute index) */
374 u16 id; /* VSI number */
375 u16 uplink_seid;
376
377 u16 base_queue; /* vsi's first queue in hw array */
378 u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
379 u16 num_queue_pairs; /* Used tx and rx pairs */
380 u16 num_desc;
381 enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
382 u16 vf_id; /* Virtual function ID for SRIOV VSIs */
383
384 struct i40e_tc_configuration tc_config;
385 struct i40e_aqc_vsi_properties_data info;
386
387 /* VSI BW limit (absolute across all TCs) */
388 u16 bw_limit; /* VSI BW Limit (0 = disabled) */
389 u8 bw_max_quanta; /* Max Quanta when BW limit is enabled */
390
391 /* Relative TC credits across VSIs */
392 u8 bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
393 /* TC BW limit credits within VSI */
394 u16 bw_ets_limit_credits[I40E_MAX_TRAFFIC_CLASS];
395 /* TC BW limit max quanta within VSI */
396 u8 bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS];
397
398 struct i40e_pf *back; /* Backreference to associated PF */
399 u16 idx; /* index in pf->vsi[] */
400 u16 veb_idx; /* index of VEB parent */
401 struct kobject *kobj; /* sysfs object */
402
403 /* VSI specific handlers */
404 irqreturn_t (*irq_handler)(int irq, void *data);
405} ____cacheline_internodealigned_in_smp;
406
407struct i40e_netdev_priv {
408 struct i40e_vsi *vsi;
409};
410
411/* struct that defines an interrupt vector */
412struct i40e_q_vector {
413 struct i40e_vsi *vsi;
414
415 u16 v_idx; /* index in the vsi->q_vector array. */
416 u16 reg_idx; /* register index of the interrupt */
417
418 struct napi_struct napi;
419
420 struct i40e_ring_container rx;
421 struct i40e_ring_container tx;
422
423 u8 num_ringpairs; /* total number of ring pairs in vector */
424
425 char name[IFNAMSIZ + 9];
426 cpumask_t affinity_mask;
427} ____cacheline_internodealigned_in_smp;
428
429/* lan device */
430struct i40e_device {
431 struct list_head list;
432 struct i40e_pf *pf;
433};
434
435/**
436 * i40e_fw_version_str - format the FW and NVM version strings
437 * @hw: ptr to the hardware info
438 **/
439static inline char *i40e_fw_version_str(struct i40e_hw *hw)
440{
441 static char buf[32];
442
443 snprintf(buf, sizeof(buf),
444 "f%d.%d a%d.%d n%02d.%02d.%02d e%08x",
445 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
446 hw->aq.api_maj_ver, hw->aq.api_min_ver,
447 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
448 >> I40E_NVM_VERSION_HI_SHIFT,
449 (hw->nvm.version & I40E_NVM_VERSION_MID_MASK)
450 >> I40E_NVM_VERSION_MID_SHIFT,
451 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
452 >> I40E_NVM_VERSION_LO_SHIFT,
453 hw->nvm.eetrack);
454
455 return buf;
456}
457
458/**
459 * i40e_netdev_to_pf: Retrieve the PF struct for given netdev
460 * @netdev: the corresponding netdev
461 *
462 * Return the PF struct for the given netdev
463 **/
464static inline struct i40e_pf *i40e_netdev_to_pf(struct net_device *netdev)
465{
466 struct i40e_netdev_priv *np = netdev_priv(netdev);
467 struct i40e_vsi *vsi = np->vsi;
468
469 return vsi->back;
470}
471
472static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi,
473 irqreturn_t (*irq_handler)(int, void *))
474{
475 vsi->irq_handler = irq_handler;
476}
477
478/**
479 * i40e_rx_is_programming_status - check for programming status descriptor
480 * @qw: the first quad word of the program status descriptor
481 *
482 * The value of in the descriptor length field indicate if this
483 * is a programming status descriptor for flow director or FCoE
484 * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
485 * it is a packet descriptor.
486 **/
487static inline bool i40e_rx_is_programming_status(u64 qw)
488{
489 return I40E_RX_PROG_STATUS_DESC_LENGTH ==
490 (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
491}
492
493/* needed by i40e_ethtool.c */
494int i40e_up(struct i40e_vsi *vsi);
495void i40e_down(struct i40e_vsi *vsi);
496extern const char i40e_driver_name[];
497extern const char i40e_driver_version_str[];
498void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
499void i40e_update_stats(struct i40e_vsi *vsi);
500void i40e_update_eth_stats(struct i40e_vsi *vsi);
501struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
502int i40e_fetch_switch_configuration(struct i40e_pf *pf,
503 bool printconfig);
504
505/* needed by i40e_main.c */
506void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
507 struct i40e_ring *tx_ring);
508void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
509 struct i40e_ring *tx_ring);
510void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
511 struct i40e_ring *tx_ring);
512int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
513 struct i40e_pf *pf, bool add);
514
515void i40e_set_ethtool_ops(struct net_device *netdev);
516struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
517 u8 *macaddr, s16 vlan,
518 bool is_vf, bool is_netdev);
519void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
520 bool is_vf, bool is_netdev);
521int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
522struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
523 u16 uplink, u32 param1);
524int i40e_vsi_release(struct i40e_vsi *vsi);
525struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
526 struct i40e_vsi *start_vsi);
527struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
528 u16 downlink_seid, u8 enabled_tc);
529void i40e_veb_release(struct i40e_veb *veb);
530
531i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
532void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
533void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
534void i40e_pf_reset_stats(struct i40e_pf *pf);
535#ifdef CONFIG_DEBUG_FS
536void i40e_dbg_pf_init(struct i40e_pf *pf);
537void i40e_dbg_pf_exit(struct i40e_pf *pf);
538void i40e_dbg_init(void);
539void i40e_dbg_exit(void);
540#else
541static inline void i40e_dbg_pf_init(struct i40e_pf *pf) {}
542static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
543static inline void i40e_dbg_init(void) {}
544static inline void i40e_dbg_exit(void) {}
545#endif /* CONFIG_DEBUG_FS*/
546void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
547int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
548void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
549int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
550int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
551struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
552 bool is_vf, bool is_netdev);
553bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
554struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
555 bool is_vf, bool is_netdev);
556void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
557
558#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
new file mode 100644
index 000000000000..0c524fa9f811
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -0,0 +1,983 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e_status.h"
29#include "i40e_type.h"
30#include "i40e_register.h"
31#include "i40e_adminq.h"
32#include "i40e_prototype.h"
33
34/**
35 * i40e_adminq_init_regs - Initialize AdminQ registers
36 * @hw: pointer to the hardware structure
37 *
38 * This assumes the alloc_asq and alloc_arq functions have already been called
39 **/
40static void i40e_adminq_init_regs(struct i40e_hw *hw)
41{
42 /* set head and tail registers in our local struct */
43 if (hw->mac.type == I40E_MAC_VF) {
44 hw->aq.asq.tail = I40E_VF_ATQT1;
45 hw->aq.asq.head = I40E_VF_ATQH1;
46 hw->aq.arq.tail = I40E_VF_ARQT1;
47 hw->aq.arq.head = I40E_VF_ARQH1;
48 } else {
49 hw->aq.asq.tail = I40E_PF_ATQT;
50 hw->aq.asq.head = I40E_PF_ATQH;
51 hw->aq.arq.tail = I40E_PF_ARQT;
52 hw->aq.arq.head = I40E_PF_ARQH;
53 }
54}
55
56/**
57 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
58 * @hw: pointer to the hardware structure
59 **/
60static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
61{
62 i40e_status ret_code;
63 struct i40e_virt_mem mem;
64
65 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
66 i40e_mem_atq_ring,
67 (hw->aq.num_asq_entries *
68 sizeof(struct i40e_aq_desc)),
69 I40E_ADMINQ_DESC_ALIGNMENT);
70 if (ret_code)
71 return ret_code;
72
73 hw->aq.asq.desc = hw->aq.asq_mem.va;
74 hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
75
76 ret_code = i40e_allocate_virt_mem(hw, &mem,
77 (hw->aq.num_asq_entries *
78 sizeof(struct i40e_asq_cmd_details)));
79 if (ret_code) {
80 i40e_free_dma_mem(hw, &hw->aq.asq_mem);
81 hw->aq.asq_mem.va = NULL;
82 hw->aq.asq_mem.pa = 0;
83 return ret_code;
84 }
85
86 hw->aq.asq.details = mem.va;
87
88 return ret_code;
89}
90
91/**
92 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
93 * @hw: pointer to the hardware structure
94 **/
95static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
96{
97 i40e_status ret_code;
98
99 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
100 i40e_mem_arq_ring,
101 (hw->aq.num_arq_entries *
102 sizeof(struct i40e_aq_desc)),
103 I40E_ADMINQ_DESC_ALIGNMENT);
104 if (ret_code)
105 return ret_code;
106
107 hw->aq.arq.desc = hw->aq.arq_mem.va;
108 hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
109
110 return ret_code;
111}
112
113/**
114 * i40e_free_adminq_asq - Free Admin Queue send rings
115 * @hw: pointer to the hardware structure
116 *
117 * This assumes the posted send buffers have already been cleaned
118 * and de-allocated
119 **/
120static void i40e_free_adminq_asq(struct i40e_hw *hw)
121{
122 struct i40e_virt_mem mem;
123
124 i40e_free_dma_mem(hw, &hw->aq.asq_mem);
125 hw->aq.asq_mem.va = NULL;
126 hw->aq.asq_mem.pa = 0;
127 mem.va = hw->aq.asq.details;
128 i40e_free_virt_mem(hw, &mem);
129 hw->aq.asq.details = NULL;
130}
131
132/**
133 * i40e_free_adminq_arq - Free Admin Queue receive rings
134 * @hw: pointer to the hardware structure
135 *
136 * This assumes the posted receive buffers have already been cleaned
137 * and de-allocated
138 **/
139static void i40e_free_adminq_arq(struct i40e_hw *hw)
140{
141 i40e_free_dma_mem(hw, &hw->aq.arq_mem);
142 hw->aq.arq_mem.va = NULL;
143 hw->aq.arq_mem.pa = 0;
144}
145
146/**
147 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
148 * @hw: pointer to the hardware structure
149 **/
150static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
151{
152 i40e_status ret_code;
153 struct i40e_aq_desc *desc;
154 struct i40e_virt_mem mem;
155 struct i40e_dma_mem *bi;
156 int i;
157
158 /* We'll be allocating the buffer info memory first, then we can
159 * allocate the mapped buffers for the event processing
160 */
161
162 /* buffer_info structures do not need alignment */
163 ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
164 sizeof(struct i40e_dma_mem)));
165 if (ret_code)
166 goto alloc_arq_bufs;
167 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
168
169 /* allocate the mapped buffers */
170 for (i = 0; i < hw->aq.num_arq_entries; i++) {
171 bi = &hw->aq.arq.r.arq_bi[i];
172 ret_code = i40e_allocate_dma_mem(hw, bi,
173 i40e_mem_arq_buf,
174 hw->aq.arq_buf_size,
175 I40E_ADMINQ_DESC_ALIGNMENT);
176 if (ret_code)
177 goto unwind_alloc_arq_bufs;
178
179 /* now configure the descriptors for use */
180 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
181
182 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
183 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
184 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
185 desc->opcode = 0;
186 /* This is in accordance with Admin queue design, there is no
187 * register for buffer size configuration
188 */
189 desc->datalen = cpu_to_le16((u16)bi->size);
190 desc->retval = 0;
191 desc->cookie_high = 0;
192 desc->cookie_low = 0;
193 desc->params.external.addr_high =
194 cpu_to_le32(upper_32_bits(bi->pa));
195 desc->params.external.addr_low =
196 cpu_to_le32(lower_32_bits(bi->pa));
197 desc->params.external.param0 = 0;
198 desc->params.external.param1 = 0;
199 }
200
201alloc_arq_bufs:
202 return ret_code;
203
204unwind_alloc_arq_bufs:
205 /* don't try to free the one that failed... */
206 i--;
207 for (; i >= 0; i--)
208 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
209 mem.va = hw->aq.arq.r.arq_bi;
210 i40e_free_virt_mem(hw, &mem);
211
212 return ret_code;
213}
214
215/**
216 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
217 * @hw: pointer to the hardware structure
218 **/
219static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
220{
221 i40e_status ret_code;
222 struct i40e_virt_mem mem;
223 struct i40e_dma_mem *bi;
224 int i;
225
226 /* No mapped memory needed yet, just the buffer info structures */
227 ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
228 sizeof(struct i40e_dma_mem)));
229 if (ret_code)
230 goto alloc_asq_bufs;
231 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
232
233 /* allocate the mapped buffers */
234 for (i = 0; i < hw->aq.num_asq_entries; i++) {
235 bi = &hw->aq.asq.r.asq_bi[i];
236 ret_code = i40e_allocate_dma_mem(hw, bi,
237 i40e_mem_asq_buf,
238 hw->aq.asq_buf_size,
239 I40E_ADMINQ_DESC_ALIGNMENT);
240 if (ret_code)
241 goto unwind_alloc_asq_bufs;
242 }
243alloc_asq_bufs:
244 return ret_code;
245
246unwind_alloc_asq_bufs:
247 /* don't try to free the one that failed... */
248 i--;
249 for (; i >= 0; i--)
250 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
251 mem.va = hw->aq.asq.r.asq_bi;
252 i40e_free_virt_mem(hw, &mem);
253
254 return ret_code;
255}
256
257/**
258 * i40e_free_arq_bufs - Free receive queue buffer info elements
259 * @hw: pointer to the hardware structure
260 **/
261static void i40e_free_arq_bufs(struct i40e_hw *hw)
262{
263 struct i40e_virt_mem mem;
264 int i;
265
266 for (i = 0; i < hw->aq.num_arq_entries; i++)
267 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
268
269 mem.va = hw->aq.arq.r.arq_bi;
270 i40e_free_virt_mem(hw, &mem);
271}
272
273/**
274 * i40e_free_asq_bufs - Free send queue buffer info elements
275 * @hw: pointer to the hardware structure
276 **/
277static void i40e_free_asq_bufs(struct i40e_hw *hw)
278{
279 struct i40e_virt_mem mem;
280 int i;
281
282 /* only unmap if the address is non-NULL */
283 for (i = 0; i < hw->aq.num_asq_entries; i++)
284 if (hw->aq.asq.r.asq_bi[i].pa)
285 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
286
287 /* now free the buffer info list */
288 mem.va = hw->aq.asq.r.asq_bi;
289 i40e_free_virt_mem(hw, &mem);
290}
291
292/**
293 * i40e_config_asq_regs - configure ASQ registers
294 * @hw: pointer to the hardware structure
295 *
296 * Configure base address and length registers for the transmit queue
297 **/
298static void i40e_config_asq_regs(struct i40e_hw *hw)
299{
300 if (hw->mac.type == I40E_MAC_VF) {
301 /* configure the transmit queue */
302 wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
303 wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
304 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
305 I40E_VF_ATQLEN1_ATQENABLE_MASK));
306 } else {
307 /* configure the transmit queue */
308 wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
309 wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
310 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
311 I40E_PF_ATQLEN_ATQENABLE_MASK));
312 }
313}
314
315/**
316 * i40e_config_arq_regs - ARQ register configuration
317 * @hw: pointer to the hardware structure
318 *
319 * Configure base address and length registers for the receive (event queue)
320 **/
321static void i40e_config_arq_regs(struct i40e_hw *hw)
322{
323 if (hw->mac.type == I40E_MAC_VF) {
324 /* configure the receive queue */
325 wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
326 wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
327 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
328 I40E_VF_ARQLEN1_ARQENABLE_MASK));
329 } else {
330 /* configure the receive queue */
331 wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
332 wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
333 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
334 I40E_PF_ARQLEN_ARQENABLE_MASK));
335 }
336
337 /* Update tail in the HW to post pre-allocated buffers */
338 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
339}
340
341/**
342 * i40e_init_asq - main initialization routine for ASQ
343 * @hw: pointer to the hardware structure
344 *
345 * This is the main initialization routine for the Admin Send Queue
346 * Prior to calling this function, drivers *MUST* set the following fields
347 * in the hw->aq structure:
348 * - hw->aq.num_asq_entries
349 * - hw->aq.arq_buf_size
350 *
351 * Do *NOT* hold the lock when calling this as the memory allocation routines
352 * called are not going to be atomic context safe
353 **/
354static i40e_status i40e_init_asq(struct i40e_hw *hw)
355{
356 i40e_status ret_code = 0;
357
358 if (hw->aq.asq.count > 0) {
359 /* queue already initialized */
360 ret_code = I40E_ERR_NOT_READY;
361 goto init_adminq_exit;
362 }
363
364 /* verify input for valid configuration */
365 if ((hw->aq.num_asq_entries == 0) ||
366 (hw->aq.asq_buf_size == 0)) {
367 ret_code = I40E_ERR_CONFIG;
368 goto init_adminq_exit;
369 }
370
371 hw->aq.asq.next_to_use = 0;
372 hw->aq.asq.next_to_clean = 0;
373 hw->aq.asq.count = hw->aq.num_asq_entries;
374
375 /* allocate the ring memory */
376 ret_code = i40e_alloc_adminq_asq_ring(hw);
377 if (ret_code)
378 goto init_adminq_exit;
379
380 /* allocate buffers in the rings */
381 ret_code = i40e_alloc_asq_bufs(hw);
382 if (ret_code)
383 goto init_adminq_free_rings;
384
385 /* initialize base registers */
386 i40e_config_asq_regs(hw);
387
388 /* success! */
389 goto init_adminq_exit;
390
391init_adminq_free_rings:
392 i40e_free_adminq_asq(hw);
393
394init_adminq_exit:
395 return ret_code;
396}
397
398/**
399 * i40e_init_arq - initialize ARQ
400 * @hw: pointer to the hardware structure
401 *
402 * The main initialization routine for the Admin Receive (Event) Queue.
403 * Prior to calling this function, drivers *MUST* set the following fields
404 * in the hw->aq structure:
405 * - hw->aq.num_asq_entries
406 * - hw->aq.arq_buf_size
407 *
408 * Do *NOT* hold the lock when calling this as the memory allocation routines
409 * called are not going to be atomic context safe
410 **/
411static i40e_status i40e_init_arq(struct i40e_hw *hw)
412{
413 i40e_status ret_code = 0;
414
415 if (hw->aq.arq.count > 0) {
416 /* queue already initialized */
417 ret_code = I40E_ERR_NOT_READY;
418 goto init_adminq_exit;
419 }
420
421 /* verify input for valid configuration */
422 if ((hw->aq.num_arq_entries == 0) ||
423 (hw->aq.arq_buf_size == 0)) {
424 ret_code = I40E_ERR_CONFIG;
425 goto init_adminq_exit;
426 }
427
428 hw->aq.arq.next_to_use = 0;
429 hw->aq.arq.next_to_clean = 0;
430 hw->aq.arq.count = hw->aq.num_arq_entries;
431
432 /* allocate the ring memory */
433 ret_code = i40e_alloc_adminq_arq_ring(hw);
434 if (ret_code)
435 goto init_adminq_exit;
436
437 /* allocate buffers in the rings */
438 ret_code = i40e_alloc_arq_bufs(hw);
439 if (ret_code)
440 goto init_adminq_free_rings;
441
442 /* initialize base registers */
443 i40e_config_arq_regs(hw);
444
445 /* success! */
446 goto init_adminq_exit;
447
448init_adminq_free_rings:
449 i40e_free_adminq_arq(hw);
450
451init_adminq_exit:
452 return ret_code;
453}
454
455/**
456 * i40e_shutdown_asq - shutdown the ASQ
457 * @hw: pointer to the hardware structure
458 *
459 * The main shutdown routine for the Admin Send Queue
460 **/
461static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
462{
463 i40e_status ret_code = 0;
464
465 if (hw->aq.asq.count == 0)
466 return I40E_ERR_NOT_READY;
467
468 /* Stop firmware AdminQ processing */
469 if (hw->mac.type == I40E_MAC_VF)
470 wr32(hw, I40E_VF_ATQLEN1, 0);
471 else
472 wr32(hw, I40E_PF_ATQLEN, 0);
473
474 /* make sure lock is available */
475 mutex_lock(&hw->aq.asq_mutex);
476
477 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
478
479 /* free ring buffers */
480 i40e_free_asq_bufs(hw);
481 /* free the ring descriptors */
482 i40e_free_adminq_asq(hw);
483
484 mutex_unlock(&hw->aq.asq_mutex);
485
486 return ret_code;
487}
488
489/**
490 * i40e_shutdown_arq - shutdown ARQ
491 * @hw: pointer to the hardware structure
492 *
493 * The main shutdown routine for the Admin Receive Queue
494 **/
495static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
496{
497 i40e_status ret_code = 0;
498
499 if (hw->aq.arq.count == 0)
500 return I40E_ERR_NOT_READY;
501
502 /* Stop firmware AdminQ processing */
503 if (hw->mac.type == I40E_MAC_VF)
504 wr32(hw, I40E_VF_ARQLEN1, 0);
505 else
506 wr32(hw, I40E_PF_ARQLEN, 0);
507
508 /* make sure lock is available */
509 mutex_lock(&hw->aq.arq_mutex);
510
511 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
512
513 /* free ring buffers */
514 i40e_free_arq_bufs(hw);
515 /* free the ring descriptors */
516 i40e_free_adminq_arq(hw);
517
518 mutex_unlock(&hw->aq.arq_mutex);
519
520 return ret_code;
521}
522
523/**
524 * i40e_init_adminq - main initialization routine for Admin Queue
525 * @hw: pointer to the hardware structure
526 *
527 * Prior to calling this function, drivers *MUST* set the following fields
528 * in the hw->aq structure:
529 * - hw->aq.num_asq_entries
530 * - hw->aq.num_arq_entries
531 * - hw->aq.arq_buf_size
532 * - hw->aq.asq_buf_size
533 **/
534i40e_status i40e_init_adminq(struct i40e_hw *hw)
535{
536 u16 eetrack_lo, eetrack_hi;
537 i40e_status ret_code;
538
539 /* verify input for valid configuration */
540 if ((hw->aq.num_arq_entries == 0) ||
541 (hw->aq.num_asq_entries == 0) ||
542 (hw->aq.arq_buf_size == 0) ||
543 (hw->aq.asq_buf_size == 0)) {
544 ret_code = I40E_ERR_CONFIG;
545 goto init_adminq_exit;
546 }
547
548 /* initialize locks */
549 mutex_init(&hw->aq.asq_mutex);
550 mutex_init(&hw->aq.arq_mutex);
551
552 /* Set up register offsets */
553 i40e_adminq_init_regs(hw);
554
555 /* allocate the ASQ */
556 ret_code = i40e_init_asq(hw);
557 if (ret_code)
558 goto init_adminq_destroy_locks;
559
560 /* allocate the ARQ */
561 ret_code = i40e_init_arq(hw);
562 if (ret_code)
563 goto init_adminq_free_asq;
564
565 ret_code = i40e_aq_get_firmware_version(hw,
566 &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
567 &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
568 NULL);
569 if (ret_code)
570 goto init_adminq_free_arq;
571
572 if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
573 hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
574 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
575 goto init_adminq_free_arq;
576 }
577 i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
578 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
579 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
580 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
581
582 ret_code = i40e_aq_set_hmc_resource_profile(hw,
583 I40E_HMC_PROFILE_DEFAULT,
584 0,
585 NULL);
586 ret_code = 0;
587
588 /* success! */
589 goto init_adminq_exit;
590
591init_adminq_free_arq:
592 i40e_shutdown_arq(hw);
593init_adminq_free_asq:
594 i40e_shutdown_asq(hw);
595init_adminq_destroy_locks:
596
597init_adminq_exit:
598 return ret_code;
599}
600
601/**
602 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
603 * @hw: pointer to the hardware structure
604 **/
605i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
606{
607 i40e_status ret_code = 0;
608
609 i40e_shutdown_asq(hw);
610 i40e_shutdown_arq(hw);
611
612 /* destroy the locks */
613
614 return ret_code;
615}
616
617/**
618 * i40e_clean_asq - cleans Admin send queue
619 * @asq: pointer to the adminq send ring
620 *
621 * returns the number of free desc
622 **/
623static u16 i40e_clean_asq(struct i40e_hw *hw)
624{
625 struct i40e_adminq_ring *asq = &(hw->aq.asq);
626 struct i40e_asq_cmd_details *details;
627 u16 ntc = asq->next_to_clean;
628 struct i40e_aq_desc desc_cb;
629 struct i40e_aq_desc *desc;
630
631 desc = I40E_ADMINQ_DESC(*asq, ntc);
632 details = I40E_ADMINQ_DETAILS(*asq, ntc);
633 while (rd32(hw, hw->aq.asq.head) != ntc) {
634 if (details->callback) {
635 I40E_ADMINQ_CALLBACK cb_func =
636 (I40E_ADMINQ_CALLBACK)details->callback;
637 desc_cb = *desc;
638 cb_func(hw, &desc_cb);
639 }
640 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
641 memset((void *)details, 0,
642 sizeof(struct i40e_asq_cmd_details));
643 ntc++;
644 if (ntc == asq->count)
645 ntc = 0;
646 desc = I40E_ADMINQ_DESC(*asq, ntc);
647 details = I40E_ADMINQ_DETAILS(*asq, ntc);
648 }
649
650 asq->next_to_clean = ntc;
651
652 return I40E_DESC_UNUSED(asq);
653}
654
655/**
656 * i40e_asq_done - check if FW has processed the Admin Send Queue
657 * @hw: pointer to the hw struct
658 *
659 * Returns true if the firmware has processed all descriptors on the
660 * admin send queue. Returns false if there are still requests pending.
661 **/
662bool i40e_asq_done(struct i40e_hw *hw)
663{
664 /* AQ designers suggest use of head for better
665 * timing reliability than DD bit
666 */
667 return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
668
669}
670
671/**
672 * i40e_asq_send_command - send command to Admin Queue
673 * @hw: pointer to the hw struct
674 * @desc: prefilled descriptor describing the command (non DMA mem)
675 * @buff: buffer to use for indirect commands
676 * @buff_size: size of buffer for indirect commands
677 * @opaque: pointer to info to be used in async cleanup
678 *
679 * This is the main send command driver routine for the Admin Queue send
680 * queue. It runs the queue, cleans the queue, etc
681 **/
682i40e_status i40e_asq_send_command(struct i40e_hw *hw,
683 struct i40e_aq_desc *desc,
684 void *buff, /* can be NULL */
685 u16 buff_size,
686 struct i40e_asq_cmd_details *cmd_details)
687{
688 i40e_status status = 0;
689 struct i40e_dma_mem *dma_buff = NULL;
690 struct i40e_asq_cmd_details *details;
691 struct i40e_aq_desc *desc_on_ring;
692 bool cmd_completed = false;
693 u16 retval = 0;
694
695 if (hw->aq.asq.count == 0) {
696 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
697 "AQTX: Admin queue not initialized.\n");
698 status = I40E_ERR_QUEUE_EMPTY;
699 goto asq_send_command_exit;
700 }
701
702 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
703 if (cmd_details) {
704 memcpy(details, cmd_details,
705 sizeof(struct i40e_asq_cmd_details));
706
707 /* If the cmd_details are defined copy the cookie. The
708 * cpu_to_le32 is not needed here because the data is ignored
709 * by the FW, only used by the driver
710 */
711 if (details->cookie) {
712 desc->cookie_high =
713 cpu_to_le32(upper_32_bits(details->cookie));
714 desc->cookie_low =
715 cpu_to_le32(lower_32_bits(details->cookie));
716 }
717 } else {
718 memset(details, 0, sizeof(struct i40e_asq_cmd_details));
719 }
720
721 /* clear requested flags and then set additional flags if defined */
722 desc->flags &= ~cpu_to_le16(details->flags_dis);
723 desc->flags |= cpu_to_le16(details->flags_ena);
724
725 mutex_lock(&hw->aq.asq_mutex);
726
727 if (buff_size > hw->aq.asq_buf_size) {
728 i40e_debug(hw,
729 I40E_DEBUG_AQ_MESSAGE,
730 "AQTX: Invalid buffer size: %d.\n",
731 buff_size);
732 status = I40E_ERR_INVALID_SIZE;
733 goto asq_send_command_error;
734 }
735
736 if (details->postpone && !details->async) {
737 i40e_debug(hw,
738 I40E_DEBUG_AQ_MESSAGE,
739 "AQTX: Async flag not set along with postpone flag");
740 status = I40E_ERR_PARAM;
741 goto asq_send_command_error;
742 }
743
744 /* call clean and check queue available function to reclaim the
745 * descriptors that were processed by FW, the function returns the
746 * number of desc available
747 */
748 /* the clean function called here could be called in a separate thread
749 * in case of asynchronous completions
750 */
751 if (i40e_clean_asq(hw) == 0) {
752 i40e_debug(hw,
753 I40E_DEBUG_AQ_MESSAGE,
754 "AQTX: Error queue is full.\n");
755 status = I40E_ERR_ADMIN_QUEUE_FULL;
756 goto asq_send_command_error;
757 }
758
759 /* initialize the temp desc pointer with the right desc */
760 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
761
762 /* if the desc is available copy the temp desc to the right place */
763 memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc));
764
765 /* if buff is not NULL assume indirect command */
766 if (buff != NULL) {
767 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
768 /* copy the user buff into the respective DMA buff */
769 memcpy(dma_buff->va, buff, buff_size);
770 desc_on_ring->datalen = cpu_to_le16(buff_size);
771
772 /* Update the address values in the desc with the pa value
773 * for respective buffer
774 */
775 desc_on_ring->params.external.addr_high =
776 cpu_to_le32(upper_32_bits(dma_buff->pa));
777 desc_on_ring->params.external.addr_low =
778 cpu_to_le32(lower_32_bits(dma_buff->pa));
779 }
780
781 /* bump the tail */
782 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
783 (hw->aq.asq.next_to_use)++;
784 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
785 hw->aq.asq.next_to_use = 0;
786 if (!details->postpone)
787 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
788
789 /* if cmd_details are not defined or async flag is not set,
790 * we need to wait for desc write back
791 */
792 if (!details->async && !details->postpone) {
793 u32 total_delay = 0;
794 u32 delay_len = 10;
795
796 do {
797 /* AQ designers suggest use of head for better
798 * timing reliability than DD bit
799 */
800 if (i40e_asq_done(hw))
801 break;
802 /* ugh! delay while spin_lock */
803 udelay(delay_len);
804 total_delay += delay_len;
805 } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
806 }
807
808 /* if ready, copy the desc back to temp */
809 if (i40e_asq_done(hw)) {
810 memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc));
811 if (buff != NULL)
812 memcpy(buff, dma_buff->va, buff_size);
813 retval = le16_to_cpu(desc->retval);
814 if (retval != 0) {
815 i40e_debug(hw,
816 I40E_DEBUG_AQ_MESSAGE,
817 "AQTX: Command completed with error 0x%X.\n",
818 retval);
819 /* strip off FW internal code */
820 retval &= 0xff;
821 }
822 cmd_completed = true;
823 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
824 status = 0;
825 else
826 status = I40E_ERR_ADMIN_QUEUE_ERROR;
827 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
828 }
829
830 /* update the error if time out occurred */
831 if ((!cmd_completed) &&
832 (!details->async && !details->postpone)) {
833 i40e_debug(hw,
834 I40E_DEBUG_AQ_MESSAGE,
835 "AQTX: Writeback timeout.\n");
836 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
837 }
838
839asq_send_command_error:
840 mutex_unlock(&hw->aq.asq_mutex);
841asq_send_command_exit:
842 return status;
843}
844
845/**
846 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
847 * @desc: pointer to the temp descriptor (non DMA mem)
848 * @opcode: the opcode can be used to decide which flags to turn off or on
849 *
850 * Fill the desc with default values
851 **/
852void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
853 u16 opcode)
854{
855 /* zero out the desc */
856 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
857 desc->opcode = cpu_to_le16(opcode);
858 desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
859}
860
861/**
862 * i40e_clean_arq_element
863 * @hw: pointer to the hw struct
864 * @e: event info from the receive descriptor, includes any buffers
865 * @pending: number of events that could be left to process
866 *
867 * This function cleans one Admin Receive Queue element and returns
868 * the contents through e. It can also return how many events are
869 * left to process through 'pending'
870 **/
871i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
872 struct i40e_arq_event_info *e,
873 u16 *pending)
874{
875 i40e_status ret_code = 0;
876 u16 ntc = hw->aq.arq.next_to_clean;
877 struct i40e_aq_desc *desc;
878 struct i40e_dma_mem *bi;
879 u16 desc_idx;
880 u16 datalen;
881 u16 flags;
882 u16 ntu;
883
884 /* take the lock before we start messing with the ring */
885 mutex_lock(&hw->aq.arq_mutex);
886
887 /* set next_to_use to head */
888 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
889 if (ntu == ntc) {
890 /* nothing to do - shouldn't need to update ring's values */
891 i40e_debug(hw,
892 I40E_DEBUG_AQ_MESSAGE,
893 "AQRX: Queue is empty.\n");
894 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
895 goto clean_arq_element_out;
896 }
897
898 /* now clean the next descriptor */
899 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
900 desc_idx = ntc;
901 i40e_debug_aq(hw,
902 I40E_DEBUG_AQ_COMMAND,
903 (void *)desc,
904 hw->aq.arq.r.arq_bi[desc_idx].va);
905
906 flags = le16_to_cpu(desc->flags);
907 if (flags & I40E_AQ_FLAG_ERR) {
908 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
909 hw->aq.arq_last_status =
910 (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
911 i40e_debug(hw,
912 I40E_DEBUG_AQ_MESSAGE,
913 "AQRX: Event received with error 0x%X.\n",
914 hw->aq.arq_last_status);
915 } else {
916 memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
917 datalen = le16_to_cpu(desc->datalen);
918 e->msg_size = min(datalen, e->msg_size);
919 if (e->msg_buf != NULL && (e->msg_size != 0))
920 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
921 e->msg_size);
922 }
923
924 /* Restore the original datalen and buffer address in the desc,
925 * FW updates datalen to indicate the event message
926 * size
927 */
928 bi = &hw->aq.arq.r.arq_bi[ntc];
929 desc->datalen = cpu_to_le16((u16)bi->size);
930 desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
931 desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
932
933 /* set tail = the last cleaned desc index. */
934 wr32(hw, hw->aq.arq.tail, ntc);
935 /* ntc is updated to tail + 1 */
936 ntc++;
937 if (ntc == hw->aq.num_arq_entries)
938 ntc = 0;
939 hw->aq.arq.next_to_clean = ntc;
940 hw->aq.arq.next_to_use = ntu;
941
942clean_arq_element_out:
943 /* Set pending if needed, unlock and return */
944 if (pending != NULL)
945 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
946 mutex_unlock(&hw->aq.arq_mutex);
947
948 return ret_code;
949}
950
951void i40e_resume_aq(struct i40e_hw *hw)
952{
953 u32 reg = 0;
954
955 /* Registers are reset after PF reset */
956 hw->aq.asq.next_to_use = 0;
957 hw->aq.asq.next_to_clean = 0;
958
959 i40e_config_asq_regs(hw);
960 reg = hw->aq.num_asq_entries;
961
962 if (hw->mac.type == I40E_MAC_VF) {
963 reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
964 wr32(hw, I40E_VF_ATQLEN1, reg);
965 } else {
966 reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
967 wr32(hw, I40E_PF_ATQLEN, reg);
968 }
969
970 hw->aq.arq.next_to_use = 0;
971 hw->aq.arq.next_to_clean = 0;
972
973 i40e_config_arq_regs(hw);
974 reg = hw->aq.num_arq_entries;
975
976 if (hw->mac.type == I40E_MAC_VF) {
977 reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
978 wr32(hw, I40E_VF_ARQLEN1, reg);
979 } else {
980 reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
981 wr32(hw, I40E_PF_ARQLEN, reg);
982 }
983}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
new file mode 100644
index 000000000000..22e5ed683e47
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -0,0 +1,112 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_ADMINQ_H_
29#define _I40E_ADMINQ_H_
30
31#include "i40e_osdep.h"
32#include "i40e_adminq_cmd.h"
33
34#define I40E_ADMINQ_DESC(R, i) \
35 (&(((struct i40e_aq_desc *)((R).desc))[i]))
36
37#define I40E_ADMINQ_DESC_ALIGNMENT 4096
38
39struct i40e_adminq_ring {
40 void *desc; /* Descriptor ring memory */
41 void *details; /* ASQ details */
42
43 union {
44 struct i40e_dma_mem *asq_bi;
45 struct i40e_dma_mem *arq_bi;
46 } r;
47
48 u64 dma_addr; /* Physical address of the ring */
49 u16 count; /* Number of descriptors */
50 u16 rx_buf_len; /* Admin Receive Queue buffer length */
51
52 /* used for interrupt processing */
53 u16 next_to_use;
54 u16 next_to_clean;
55
56 /* used for queue tracking */
57 u32 head;
58 u32 tail;
59};
60
61/* ASQ transaction details */
62struct i40e_asq_cmd_details {
63 void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
64 u64 cookie;
65 u16 flags_ena;
66 u16 flags_dis;
67 bool async;
68 bool postpone;
69};
70
71#define I40E_ADMINQ_DETAILS(R, i) \
72 (&(((struct i40e_asq_cmd_details *)((R).details))[i]))
73
74/* ARQ event information */
75struct i40e_arq_event_info {
76 struct i40e_aq_desc desc;
77 u16 msg_size;
78 u8 *msg_buf;
79};
80
81/* Admin Queue information */
82struct i40e_adminq_info {
83 struct i40e_adminq_ring arq; /* receive queue */
84 struct i40e_adminq_ring asq; /* send queue */
85 u16 num_arq_entries; /* receive queue depth */
86 u16 num_asq_entries; /* send queue depth */
87 u16 arq_buf_size; /* receive queue buffer size */
88 u16 asq_buf_size; /* send queue buffer size */
89 u16 fw_maj_ver; /* firmware major version */
90 u16 fw_min_ver; /* firmware minor version */
91 u16 api_maj_ver; /* api major version */
92 u16 api_min_ver; /* api minor version */
93
94 struct mutex asq_mutex; /* Send queue lock */
95 struct mutex arq_mutex; /* Receive queue lock */
96
97 struct i40e_dma_mem asq_mem; /* send queue dynamic memory */
98 struct i40e_dma_mem arq_mem; /* receive queue dynamic memory */
99
100 /* last status values on send and receive queues */
101 enum i40e_admin_queue_err asq_last_status;
102 enum i40e_admin_queue_err arq_last_status;
103};
104
105/* general information */
106#define I40E_AQ_LARGE_BUF 512
107#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
108
109void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
110 u16 opcode);
111
112#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
new file mode 100644
index 000000000000..e61ebdd5a5f9
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -0,0 +1,2076 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_ADMINQ_CMD_H_
29#define _I40E_ADMINQ_CMD_H_
30
31/* This header file defines the i40e Admin Queue commands and is shared between
32 * i40e Firmware and Software.
33 *
34 * This file needs to comply with the Linux Kernel coding style.
35 */
36
37#define I40E_FW_API_VERSION_MAJOR 0x0001
38#define I40E_FW_API_VERSION_MINOR 0x0000
39
40struct i40e_aq_desc {
41 __le16 flags;
42 __le16 opcode;
43 __le16 datalen;
44 __le16 retval;
45 __le32 cookie_high;
46 __le32 cookie_low;
47 union {
48 struct {
49 __le32 param0;
50 __le32 param1;
51 __le32 param2;
52 __le32 param3;
53 } internal;
54 struct {
55 __le32 param0;
56 __le32 param1;
57 __le32 addr_high;
58 __le32 addr_low;
59 } external;
60 u8 raw[16];
61 } params;
62};
63
64/* Flags sub-structure
65 * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
66 * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
67 */
68
69/* command flags and offsets*/
70#define I40E_AQ_FLAG_DD_SHIFT 0
71#define I40E_AQ_FLAG_CMP_SHIFT 1
72#define I40E_AQ_FLAG_ERR_SHIFT 2
73#define I40E_AQ_FLAG_VFE_SHIFT 3
74#define I40E_AQ_FLAG_LB_SHIFT 9
75#define I40E_AQ_FLAG_RD_SHIFT 10
76#define I40E_AQ_FLAG_VFC_SHIFT 11
77#define I40E_AQ_FLAG_BUF_SHIFT 12
78#define I40E_AQ_FLAG_SI_SHIFT 13
79#define I40E_AQ_FLAG_EI_SHIFT 14
80#define I40E_AQ_FLAG_FE_SHIFT 15
81
82#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
83#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
84#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
85#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
86#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
87#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
88#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
89#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
90#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
91#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
92#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
93
94/* error codes */
95enum i40e_admin_queue_err {
96 I40E_AQ_RC_OK = 0, /* success */
97 I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
98 I40E_AQ_RC_ENOENT = 2, /* No such element */
99 I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
100 I40E_AQ_RC_EINTR = 4, /* operation interrupted */
101 I40E_AQ_RC_EIO = 5, /* I/O error */
102 I40E_AQ_RC_ENXIO = 6, /* No such resource */
103 I40E_AQ_RC_E2BIG = 7, /* Arg too long */
104 I40E_AQ_RC_EAGAIN = 8, /* Try again */
105 I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
106 I40E_AQ_RC_EACCES = 10, /* Permission denied */
107 I40E_AQ_RC_EFAULT = 11, /* Bad address */
108 I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
109 I40E_AQ_RC_EEXIST = 13, /* object already exists */
110 I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
111 I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
112 I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
113 I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
114 I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
115 I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
116 I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
117 I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
118 I40E_AQ_RC_EFBIG = 22, /* File too large */
119};
120
121/* Admin Queue command opcodes */
122enum i40e_admin_queue_opc {
123 /* aq commands */
124 i40e_aqc_opc_get_version = 0x0001,
125 i40e_aqc_opc_driver_version = 0x0002,
126 i40e_aqc_opc_queue_shutdown = 0x0003,
127
128 /* resource ownership */
129 i40e_aqc_opc_request_resource = 0x0008,
130 i40e_aqc_opc_release_resource = 0x0009,
131
132 i40e_aqc_opc_list_func_capabilities = 0x000A,
133 i40e_aqc_opc_list_dev_capabilities = 0x000B,
134
135 i40e_aqc_opc_set_cppm_configuration = 0x0103,
136 i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
137 i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
138
139 /* LAA */
140 i40e_aqc_opc_mng_laa = 0x0106,
141 i40e_aqc_opc_mac_address_read = 0x0107,
142 i40e_aqc_opc_mac_address_write = 0x0108,
143
144 /* internal switch commands */
145 i40e_aqc_opc_get_switch_config = 0x0200,
146 i40e_aqc_opc_add_statistics = 0x0201,
147 i40e_aqc_opc_remove_statistics = 0x0202,
148 i40e_aqc_opc_set_port_parameters = 0x0203,
149 i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
150
151 i40e_aqc_opc_add_vsi = 0x0210,
152 i40e_aqc_opc_update_vsi_parameters = 0x0211,
153 i40e_aqc_opc_get_vsi_parameters = 0x0212,
154
155 i40e_aqc_opc_add_pv = 0x0220,
156 i40e_aqc_opc_update_pv_parameters = 0x0221,
157 i40e_aqc_opc_get_pv_parameters = 0x0222,
158
159 i40e_aqc_opc_add_veb = 0x0230,
160 i40e_aqc_opc_update_veb_parameters = 0x0231,
161 i40e_aqc_opc_get_veb_parameters = 0x0232,
162
163 i40e_aqc_opc_delete_element = 0x0243,
164
165 i40e_aqc_opc_add_macvlan = 0x0250,
166 i40e_aqc_opc_remove_macvlan = 0x0251,
167 i40e_aqc_opc_add_vlan = 0x0252,
168 i40e_aqc_opc_remove_vlan = 0x0253,
169 i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
170 i40e_aqc_opc_add_tag = 0x0255,
171 i40e_aqc_opc_remove_tag = 0x0256,
172 i40e_aqc_opc_add_multicast_etag = 0x0257,
173 i40e_aqc_opc_remove_multicast_etag = 0x0258,
174 i40e_aqc_opc_update_tag = 0x0259,
175 i40e_aqc_opc_add_control_packet_filter = 0x025A,
176 i40e_aqc_opc_remove_control_packet_filter = 0x025B,
177 i40e_aqc_opc_add_cloud_filters = 0x025C,
178 i40e_aqc_opc_remove_cloud_filters = 0x025D,
179
180 i40e_aqc_opc_add_mirror_rule = 0x0260,
181 i40e_aqc_opc_delete_mirror_rule = 0x0261,
182
183 i40e_aqc_opc_set_storm_control_config = 0x0280,
184 i40e_aqc_opc_get_storm_control_config = 0x0281,
185
186 /* DCB commands */
187 i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
188 i40e_aqc_opc_dcb_updated = 0x0302,
189
190 /* TX scheduler */
191 i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
192 i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
193 i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
194 i40e_aqc_opc_query_vsi_bw_config = 0x0408,
195 i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
196 i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
197
198 i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
199 i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
200 i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
201 i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
202 i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
203 i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
204 i40e_aqc_opc_query_port_ets_config = 0x0419,
205 i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
206 i40e_aqc_opc_suspend_port_tx = 0x041B,
207 i40e_aqc_opc_resume_port_tx = 0x041C,
208
209 /* hmc */
210 i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
211 i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
212
213 /* phy commands*/
214 i40e_aqc_opc_get_phy_abilities = 0x0600,
215 i40e_aqc_opc_set_phy_config = 0x0601,
216 i40e_aqc_opc_set_mac_config = 0x0603,
217 i40e_aqc_opc_set_link_restart_an = 0x0605,
218 i40e_aqc_opc_get_link_status = 0x0607,
219 i40e_aqc_opc_set_phy_int_mask = 0x0613,
220 i40e_aqc_opc_get_local_advt_reg = 0x0614,
221 i40e_aqc_opc_set_local_advt_reg = 0x0615,
222 i40e_aqc_opc_get_partner_advt = 0x0616,
223 i40e_aqc_opc_set_lb_modes = 0x0618,
224 i40e_aqc_opc_get_phy_wol_caps = 0x0621,
225 i40e_aqc_opc_set_phy_reset = 0x0622,
226 i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
227
228 /* NVM commands */
229 i40e_aqc_opc_nvm_read = 0x0701,
230 i40e_aqc_opc_nvm_erase = 0x0702,
231 i40e_aqc_opc_nvm_update = 0x0703,
232
233 /* virtualization commands */
234 i40e_aqc_opc_send_msg_to_pf = 0x0801,
235 i40e_aqc_opc_send_msg_to_vf = 0x0802,
236 i40e_aqc_opc_send_msg_to_peer = 0x0803,
237
238 /* alternate structure */
239 i40e_aqc_opc_alternate_write = 0x0900,
240 i40e_aqc_opc_alternate_write_indirect = 0x0901,
241 i40e_aqc_opc_alternate_read = 0x0902,
242 i40e_aqc_opc_alternate_read_indirect = 0x0903,
243 i40e_aqc_opc_alternate_write_done = 0x0904,
244 i40e_aqc_opc_alternate_set_mode = 0x0905,
245 i40e_aqc_opc_alternate_clear_port = 0x0906,
246
247 /* LLDP commands */
248 i40e_aqc_opc_lldp_get_mib = 0x0A00,
249 i40e_aqc_opc_lldp_update_mib = 0x0A01,
250 i40e_aqc_opc_lldp_add_tlv = 0x0A02,
251 i40e_aqc_opc_lldp_update_tlv = 0x0A03,
252 i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
253 i40e_aqc_opc_lldp_stop = 0x0A05,
254 i40e_aqc_opc_lldp_start = 0x0A06,
255
256 /* Tunnel commands */
257 i40e_aqc_opc_add_udp_tunnel = 0x0B00,
258 i40e_aqc_opc_del_udp_tunnel = 0x0B01,
259 i40e_aqc_opc_tunnel_key_structure = 0x0B10,
260
261 /* Async Events */
262 i40e_aqc_opc_event_lan_overflow = 0x1001,
263
264 /* OEM commands */
265 i40e_aqc_opc_oem_parameter_change = 0xFE00,
266 i40e_aqc_opc_oem_device_status_change = 0xFE01,
267
268 /* debug commands */
269 i40e_aqc_opc_debug_get_deviceid = 0xFF00,
270 i40e_aqc_opc_debug_set_mode = 0xFF01,
271 i40e_aqc_opc_debug_read_reg = 0xFF03,
272 i40e_aqc_opc_debug_write_reg = 0xFF04,
273 i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
274 i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
275 i40e_aqc_opc_debug_modify_reg = 0xFF07,
276 i40e_aqc_opc_debug_dump_internals = 0xFF08,
277 i40e_aqc_opc_debug_modify_internals = 0xFF09,
278};
279
280/* command structures and indirect data structures */
281
282/* Structure naming conventions:
283 * - no suffix for direct command descriptor structures
284 * - _data for indirect sent data
285 * - _resp for indirect return data (data which is both will use _data)
286 * - _completion for direct return data
287 * - _element_ for repeated elements (may also be _data or _resp)
288 *
289 * Command structures are expected to overlay the params.raw member of the basic
290 * descriptor, and as such cannot exceed 16 bytes in length.
291 */
292
293/* This macro is used to generate a compilation error if a structure
294 * is not exactly the correct length. It gives a divide by zero error if the
295 * structure is not of the correct size, otherwise it creates an enum that is
296 * never used.
297 */
298#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
299 { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
300
301/* This macro is used extensively to ensure that command structures are 16
302 * bytes in length as they have to map to the raw array of that size.
303 */
304#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
305
306/* internal (0x00XX) commands */
307
308/* Get version (direct 0x0001) */
309struct i40e_aqc_get_version {
310 __le32 rom_ver;
311 __le32 fw_build;
312 __le16 fw_major;
313 __le16 fw_minor;
314 __le16 api_major;
315 __le16 api_minor;
316};
317
318I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
319
320/* Send driver version (direct 0x0002) */
321struct i40e_aqc_driver_version {
322 u8 driver_major_ver;
323 u8 driver_minor_ver;
324 u8 driver_build_ver;
325 u8 driver_subbuild_ver;
326 u8 reserved[12];
327};
328
329I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
330
331/* Queue Shutdown (direct 0x0003) */
332struct i40e_aqc_queue_shutdown {
333 __le32 driver_unloading;
334#define I40E_AQ_DRIVER_UNLOADING 0x1
335 u8 reserved[12];
336};
337
338I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
339
340/* Request resource ownership (direct 0x0008)
341 * Release resource ownership (direct 0x0009)
342 */
343#define I40E_AQ_RESOURCE_NVM 1
344#define I40E_AQ_RESOURCE_SDP 2
345#define I40E_AQ_RESOURCE_ACCESS_READ 1
346#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
347#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
348#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
349
350struct i40e_aqc_request_resource {
351 __le16 resource_id;
352 __le16 access_type;
353 __le32 timeout;
354 __le32 resource_number;
355 u8 reserved[4];
356};
357
358I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
359
360/* Get function capabilities (indirect 0x000A)
361 * Get device capabilities (indirect 0x000B)
362 */
363struct i40e_aqc_list_capabilites {
364 u8 command_flags;
365#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
366 u8 pf_index;
367 u8 reserved[2];
368 __le32 count;
369 __le32 addr_high;
370 __le32 addr_low;
371};
372
373I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
374
375struct i40e_aqc_list_capabilities_element_resp {
376 __le16 id;
377 u8 major_rev;
378 u8 minor_rev;
379 __le32 number;
380 __le32 logical_id;
381 __le32 phys_id;
382 u8 reserved[16];
383};
384
385/* list of caps */
386
387#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
388#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
389#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
390#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
391#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
392#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
393#define I40E_AQ_CAP_ID_SRIOV 0x0012
394#define I40E_AQ_CAP_ID_VF 0x0013
395#define I40E_AQ_CAP_ID_VMDQ 0x0014
396#define I40E_AQ_CAP_ID_8021QBG 0x0015
397#define I40E_AQ_CAP_ID_8021QBR 0x0016
398#define I40E_AQ_CAP_ID_VSI 0x0017
399#define I40E_AQ_CAP_ID_DCB 0x0018
400#define I40E_AQ_CAP_ID_FCOE 0x0021
401#define I40E_AQ_CAP_ID_RSS 0x0040
402#define I40E_AQ_CAP_ID_RXQ 0x0041
403#define I40E_AQ_CAP_ID_TXQ 0x0042
404#define I40E_AQ_CAP_ID_MSIX 0x0043
405#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
406#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
407#define I40E_AQ_CAP_ID_1588 0x0046
408#define I40E_AQ_CAP_ID_IWARP 0x0051
409#define I40E_AQ_CAP_ID_LED 0x0061
410#define I40E_AQ_CAP_ID_SDP 0x0062
411#define I40E_AQ_CAP_ID_MDIO 0x0063
412#define I40E_AQ_CAP_ID_FLEX10 0x00F1
413#define I40E_AQ_CAP_ID_CEM 0x00F2
414
415/* Set CPPM Configuration (direct 0x0103) */
416struct i40e_aqc_cppm_configuration {
417 __le16 command_flags;
418#define I40E_AQ_CPPM_EN_LTRC 0x0800
419#define I40E_AQ_CPPM_EN_DMCTH 0x1000
420#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
421#define I40E_AQ_CPPM_EN_HPTC 0x4000
422#define I40E_AQ_CPPM_EN_DMARC 0x8000
423 __le16 ttlx;
424 __le32 dmacr;
425 __le16 dmcth;
426 u8 hptc;
427 u8 reserved;
428 __le32 pfltrc;
429};
430
431I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
432
433/* Set ARP Proxy command / response (indirect 0x0104) */
434struct i40e_aqc_arp_proxy_data {
435 __le16 command_flags;
436#define I40E_AQ_ARP_INIT_IPV4 0x0008
437#define I40E_AQ_ARP_UNSUP_CTL 0x0010
438#define I40E_AQ_ARP_ENA 0x0020
439#define I40E_AQ_ARP_ADD_IPV4 0x0040
440#define I40E_AQ_ARP_DEL_IPV4 0x0080
441 __le16 table_id;
442 __le32 pfpm_proxyfc;
443 __le32 ip_addr;
444 u8 mac_addr[6];
445};
446
447/* Set NS Proxy Table Entry Command (indirect 0x0105) */
448struct i40e_aqc_ns_proxy_data {
449 __le16 table_idx_mac_addr_0;
450 __le16 table_idx_mac_addr_1;
451 __le16 table_idx_ipv6_0;
452 __le16 table_idx_ipv6_1;
453 __le16 control;
454#define I40E_AQ_NS_PROXY_ADD_0 0x0100
455#define I40E_AQ_NS_PROXY_DEL_0 0x0200
456#define I40E_AQ_NS_PROXY_ADD_1 0x0400
457#define I40E_AQ_NS_PROXY_DEL_1 0x0800
458#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
459#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
460#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
461#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
462#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
463#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
464#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
465 u8 mac_addr_0[6];
466 u8 mac_addr_1[6];
467 u8 local_mac_addr[6];
468 u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
469 u8 ipv6_addr_1[16];
470};
471
472/* Manage LAA Command (0x0106) - obsolete */
473struct i40e_aqc_mng_laa {
474 __le16 command_flags;
475#define I40E_AQ_LAA_FLAG_WR 0x8000
476 u8 reserved[2];
477 __le32 sal;
478 __le16 sah;
479 u8 reserved2[6];
480};
481
482/* Manage MAC Address Read Command (0x0107) */
483struct i40e_aqc_mac_address_read {
484 __le16 command_flags;
485#define I40E_AQC_LAN_ADDR_VALID 0x10
486#define I40E_AQC_SAN_ADDR_VALID 0x20
487#define I40E_AQC_PORT_ADDR_VALID 0x40
488#define I40E_AQC_WOL_ADDR_VALID 0x80
489#define I40E_AQC_ADDR_VALID_MASK 0xf0
490 u8 reserved[6];
491 __le32 addr_high;
492 __le32 addr_low;
493};
494
495I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
496
497struct i40e_aqc_mac_address_read_data {
498 u8 pf_lan_mac[6];
499 u8 pf_san_mac[6];
500 u8 port_mac[6];
501 u8 pf_wol_mac[6];
502};
503
504I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
505
506/* Manage MAC Address Write Command (0x0108) */
507struct i40e_aqc_mac_address_write {
508 __le16 command_flags;
509#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
510#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
511#define I40E_AQC_WRITE_TYPE_PORT 0x8000
512#define I40E_AQC_WRITE_TYPE_MASK 0xc000
513 __le16 mac_sah;
514 __le32 mac_sal;
515 u8 reserved[8];
516};
517
518I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
519
520/* Switch configuration commands (0x02xx) */
521
522/* Used by many indirect commands that only pass an seid and a buffer in the
523 * command
524 */
525struct i40e_aqc_switch_seid {
526 __le16 seid;
527 u8 reserved[6];
528 __le32 addr_high;
529 __le32 addr_low;
530};
531
532I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
533
534/* Get Switch Configuration command (indirect 0x0200)
535 * uses i40e_aqc_switch_seid for the descriptor
536 */
537struct i40e_aqc_get_switch_config_header_resp {
538 __le16 num_reported;
539 __le16 num_total;
540 u8 reserved[12];
541};
542
543struct i40e_aqc_switch_config_element_resp {
544 u8 element_type;
545#define I40E_AQ_SW_ELEM_TYPE_MAC 1
546#define I40E_AQ_SW_ELEM_TYPE_PF 2
547#define I40E_AQ_SW_ELEM_TYPE_VF 3
548#define I40E_AQ_SW_ELEM_TYPE_EMP 4
549#define I40E_AQ_SW_ELEM_TYPE_BMC 5
550#define I40E_AQ_SW_ELEM_TYPE_PV 16
551#define I40E_AQ_SW_ELEM_TYPE_VEB 17
552#define I40E_AQ_SW_ELEM_TYPE_PA 18
553#define I40E_AQ_SW_ELEM_TYPE_VSI 19
554 u8 revision;
555#define I40E_AQ_SW_ELEM_REV_1 1
556 __le16 seid;
557 __le16 uplink_seid;
558 __le16 downlink_seid;
559 u8 reserved[3];
560 u8 connection_type;
561#define I40E_AQ_CONN_TYPE_REGULAR 0x1
562#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
563#define I40E_AQ_CONN_TYPE_CASCADED 0x3
564 __le16 scheduler_id;
565 __le16 element_info;
566};
567
568/* Get Switch Configuration (indirect 0x0200)
569 * an array of elements are returned in the response buffer
570 * the first in the array is the header, remainder are elements
571 */
572struct i40e_aqc_get_switch_config_resp {
573 struct i40e_aqc_get_switch_config_header_resp header;
574 struct i40e_aqc_switch_config_element_resp element[1];
575};
576
577/* Add Statistics (direct 0x0201)
578 * Remove Statistics (direct 0x0202)
579 */
580struct i40e_aqc_add_remove_statistics {
581 __le16 seid;
582 __le16 vlan;
583 __le16 stat_index;
584 u8 reserved[10];
585};
586
587I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
588
589/* Set Port Parameters command (direct 0x0203) */
590struct i40e_aqc_set_port_parameters {
591 __le16 command_flags;
592#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
593#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
594#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
595 __le16 bad_frame_vsi;
596 __le16 default_seid; /* reserved for command */
597 u8 reserved[10];
598};
599
600I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
601
602/* Get Switch Resource Allocation (indirect 0x0204) */
603struct i40e_aqc_get_switch_resource_alloc {
604 u8 num_entries; /* reserved for command */
605 u8 reserved[7];
606 __le32 addr_high;
607 __le32 addr_low;
608};
609
610I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
611
612/* expect an array of these structs in the response buffer */
613struct i40e_aqc_switch_resource_alloc_element_resp {
614 u8 resource_type;
615#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
616#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
617#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
618#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
619#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
620#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
621#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
622#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
623#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
624#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
625#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
626#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
627#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
628#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
629#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
630#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
631#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
632#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
633#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
634 u8 reserved1;
635 __le16 guaranteed;
636 __le16 total;
637 __le16 used;
638 __le16 total_unalloced;
639 u8 reserved2[6];
640};
641
642/* Add VSI (indirect 0x210)
643 * this indirect command uses struct i40e_aqc_vsi_properties_data
644 * as the indirect buffer (128 bytes)
645 *
646 * Update VSI (indirect 0x211) Get VSI (indirect 0x0212)
647 * use the generic i40e_aqc_switch_seid descriptor format
648 * use the same completion and data structure as Add VSI
649 */
650struct i40e_aqc_add_get_update_vsi {
651 __le16 uplink_seid;
652 u8 connection_type;
653#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
654#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
655#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
656 u8 reserved1;
657 u8 vf_id;
658 u8 reserved2;
659 __le16 vsi_flags;
660#define I40E_AQ_VSI_TYPE_SHIFT 0x0
661#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
662#define I40E_AQ_VSI_TYPE_VF 0x0
663#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
664#define I40E_AQ_VSI_TYPE_PF 0x2
665#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
666#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
667#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
668 __le32 addr_high;
669 __le32 addr_low;
670};
671
672I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
673
674struct i40e_aqc_add_get_update_vsi_completion {
675 __le16 seid;
676 __le16 vsi_number;
677 __le16 vsi_used;
678 __le16 vsi_free;
679 __le32 addr_high;
680 __le32 addr_low;
681};
682
683I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
684
685struct i40e_aqc_vsi_properties_data {
686 /* first 96 byte are written by SW */
687 __le16 valid_sections;
688#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
689#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
690#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
691#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
692#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
693#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
694#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
695#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
696#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
697#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
698 /* switch section */
699 __le16 switch_id; /* 12bit id combined with flags below */
700#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
701#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
702#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
703#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
704#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
705 u8 sw_reserved[2];
706 /* security section */
707 u8 sec_flags;
708#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
709#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
710#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
711 u8 sec_reserved;
712 /* VLAN section */
713 __le16 pvid; /* VLANS include priority bits */
714 __le16 fcoe_pvid;
715 u8 port_vlan_flags;
716#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
717#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
718 I40E_AQ_VSI_PVLAN_MODE_SHIFT)
719#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
720#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
721#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
722#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
723#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
724#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
725 I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
726#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
727#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
728#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
729#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
730 u8 pvlan_reserved[3];
731 /* ingress egress up sections */
732 __le32 ingress_table; /* bitmap, 3 bits per up */
733#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
734#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
735 I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
736#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
737#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
738 I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
739#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
740#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
741 I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
742#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
743#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
744 I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
745#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
746#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
747 I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
748#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
749#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
750 I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
751#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
752#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
753 I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
754#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
755#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
756 I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
757 __le32 egress_table; /* same defines as for ingress table */
758 /* cascaded PV section */
759 __le16 cas_pv_tag;
760 u8 cas_pv_flags;
761#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
762#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
763 I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
764#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
765#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
766#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
767#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
768#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
769#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
770 u8 cas_pv_reserved;
771 /* queue mapping section */
772 __le16 mapping_flags;
773#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
774#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
775 __le16 queue_mapping[16];
776#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
777#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
778 __le16 tc_mapping[8];
779#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
780#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
781 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
782#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
783#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
784 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
785 /* queueing option section */
786 u8 queueing_opt_flags;
787#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
788#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
789 u8 queueing_opt_reserved[3];
790 /* scheduler section */
791 u8 up_enable_bits;
792 u8 sched_reserved;
793 /* outer up section */
794 __le32 outer_up_table; /* same structure and defines as ingress table */
795 u8 cmd_reserved[8];
796 /* last 32 bytes are written by FW */
797 __le16 qs_handle[8];
798#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
799 __le16 stat_counter_idx;
800 __le16 sched_id;
801 u8 resp_reserved[12];
802};
803
804I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
805
806/* Add Port Virtualizer (direct 0x0220)
807 * also used for update PV (direct 0x0221) but only flags are used
808 * (IS_CTRL_PORT only works on add PV)
809 */
810struct i40e_aqc_add_update_pv {
811 __le16 command_flags;
812#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
813#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
814#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
815#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
816 __le16 uplink_seid;
817 __le16 connected_seid;
818 u8 reserved[10];
819};
820
821I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
822
823struct i40e_aqc_add_update_pv_completion {
824 /* reserved for update; for add also encodes error if rc == ENOSPC */
825 __le16 pv_seid;
826#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
827#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
828#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
829#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
830 u8 reserved[14];
831};
832
833I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
834
835/* Get PV Params (direct 0x0222)
836 * uses i40e_aqc_switch_seid for the descriptor
837 */
838
839struct i40e_aqc_get_pv_params_completion {
840 __le16 seid;
841 __le16 default_stag;
842 __le16 pv_flags; /* same flags as add_pv */
843#define I40E_AQC_GET_PV_PV_TYPE 0x1
844#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
845#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
846 u8 reserved[8];
847 __le16 default_port_seid;
848};
849
850I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
851
852/* Add VEB (direct 0x0230) */
853struct i40e_aqc_add_veb {
854 __le16 uplink_seid;
855 __le16 downlink_seid;
856 __le16 veb_flags;
857#define I40E_AQC_ADD_VEB_FLOATING 0x1
858#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
859#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
860 I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
861#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
862#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
863#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
864 u8 enable_tcs;
865 u8 reserved[9];
866};
867
868I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
869
870struct i40e_aqc_add_veb_completion {
871 u8 reserved[6];
872 __le16 switch_seid;
873 /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
874 __le16 veb_seid;
875#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
876#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
877#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
878#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
879 __le16 statistic_index;
880 __le16 vebs_used;
881 __le16 vebs_free;
882};
883
884I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
885
886/* Get VEB Parameters (direct 0x0232)
887 * uses i40e_aqc_switch_seid for the descriptor
888 */
889struct i40e_aqc_get_veb_parameters_completion {
890 __le16 seid;
891 __le16 switch_id;
892 __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
893 __le16 statistic_index;
894 __le16 vebs_used;
895 __le16 vebs_free;
896 u8 reserved[4];
897};
898
899I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
900
901/* Delete Element (direct 0x0243)
902 * uses the generic i40e_aqc_switch_seid
903 */
904
905/* Add MAC-VLAN (indirect 0x0250) */
906
907/* used for the command for most vlan commands */
908struct i40e_aqc_macvlan {
909 __le16 num_addresses;
910 __le16 seid[3];
911#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
912#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
913 I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
914#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
915 __le32 addr_high;
916 __le32 addr_low;
917};
918
919I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
920
921/* indirect data for command and response */
922struct i40e_aqc_add_macvlan_element_data {
923 u8 mac_addr[6];
924 __le16 vlan_tag;
925 __le16 flags;
926#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
927#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
928#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
929#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
930 __le16 queue_number;
931#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
932#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
933 I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
934 /* response section */
935 u8 match_method;
936#define I40E_AQC_MM_PERFECT_MATCH 0x01
937#define I40E_AQC_MM_HASH_MATCH 0x02
938#define I40E_AQC_MM_ERR_NO_RES 0xFF
939 u8 reserved1[3];
940};
941
942struct i40e_aqc_add_remove_macvlan_completion {
943 __le16 perfect_mac_used;
944 __le16 perfect_mac_free;
945 __le16 unicast_hash_free;
946 __le16 multicast_hash_free;
947 __le32 addr_high;
948 __le32 addr_low;
949};
950
951I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
952
953/* Remove MAC-VLAN (indirect 0x0251)
954 * uses i40e_aqc_macvlan for the descriptor
955 * data points to an array of num_addresses of elements
956 */
957
958struct i40e_aqc_remove_macvlan_element_data {
959 u8 mac_addr[6];
960 __le16 vlan_tag;
961 u8 flags;
962#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
963#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
964#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
965#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
966 u8 reserved[3];
967 /* reply section */
968 u8 error_code;
969#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
970#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
971 u8 reply_reserved[3];
972};
973
974/* Add VLAN (indirect 0x0252)
975 * Remove VLAN (indirect 0x0253)
976 * use the generic i40e_aqc_macvlan for the command
977 */
978struct i40e_aqc_add_remove_vlan_element_data {
979 __le16 vlan_tag;
980 u8 vlan_flags;
981/* flags for add VLAN */
982#define I40E_AQC_ADD_VLAN_LOCAL 0x1
983#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
984#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
985 I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
986#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
987#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
988#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
989#define I40E_AQC_VLAN_PTYPE_SHIFT 3
990#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
991#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
992#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
993#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
994#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
995/* flags for remove VLAN */
996#define I40E_AQC_REMOVE_VLAN_ALL 0x1
997 u8 reserved;
998 u8 result;
999/* flags for add VLAN */
1000#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
1001#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
1002#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
1003/* flags for remove VLAN */
1004#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
1005#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
1006 u8 reserved1[3];
1007};
1008
1009struct i40e_aqc_add_remove_vlan_completion {
1010 u8 reserved[4];
1011 __le16 vlans_used;
1012 __le16 vlans_free;
1013 __le32 addr_high;
1014 __le32 addr_low;
1015};
1016
1017/* Set VSI Promiscuous Modes (direct 0x0254) */
1018struct i40e_aqc_set_vsi_promiscuous_modes {
1019 __le16 promiscuous_flags;
1020 __le16 valid_flags;
1021/* flags used for both fields above */
1022#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
1023#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
1024#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
1025#define I40E_AQC_SET_VSI_DEFAULT 0x08
1026#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
1027 __le16 seid;
1028#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
1029 u8 reserved[10];
1030};
1031
1032I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
1033
1034/* Add S/E-tag command (direct 0x0255)
1035 * Uses generic i40e_aqc_add_remove_tag_completion for completion
1036 */
1037struct i40e_aqc_add_tag {
1038 __le16 flags;
1039#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
1040 __le16 seid;
1041#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
1042#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
1043 I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
1044 __le16 tag;
1045 __le16 queue_number;
1046 u8 reserved[8];
1047};
1048
1049I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
1050
1051struct i40e_aqc_add_remove_tag_completion {
1052 u8 reserved[12];
1053 __le16 tags_used;
1054 __le16 tags_free;
1055};
1056
1057I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
1058
1059/* Remove S/E-tag command (direct 0x0256)
1060 * Uses generic i40e_aqc_add_remove_tag_completion for completion
1061 */
1062struct i40e_aqc_remove_tag {
1063 __le16 seid;
1064#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
1065#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
1066 I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
1067 __le16 tag;
1068 u8 reserved[12];
1069};
1070
1071/* Add multicast E-Tag (direct 0x0257)
1072 * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
1073 * and no external data
1074 */
1075struct i40e_aqc_add_remove_mcast_etag {
1076 __le16 pv_seid;
1077 __le16 etag;
1078 u8 num_unicast_etags;
1079 u8 reserved[3];
1080 __le32 addr_high; /* address of array of 2-byte s-tags */
1081 __le32 addr_low;
1082};
1083
1084I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
1085
1086struct i40e_aqc_add_remove_mcast_etag_completion {
1087 u8 reserved[4];
1088 __le16 mcast_etags_used;
1089 __le16 mcast_etags_free;
1090 __le32 addr_high;
1091 __le32 addr_low;
1092
1093};
1094
1095I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
1096
1097/* Update S/E-Tag (direct 0x0259) */
1098struct i40e_aqc_update_tag {
1099 __le16 seid;
1100#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
1101#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
1102 I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
1103 __le16 old_tag;
1104 __le16 new_tag;
1105 u8 reserved[10];
1106};
1107
1108I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
1109
1110struct i40e_aqc_update_tag_completion {
1111 u8 reserved[12];
1112 __le16 tags_used;
1113 __le16 tags_free;
1114};
1115
1116I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
1117
1118/* Add Control Packet filter (direct 0x025A)
1119 * Remove Control Packet filter (direct 0x025B)
1120 * uses the i40e_aqc_add_oveb_cloud,
1121 * and the generic direct completion structure
1122 */
1123struct i40e_aqc_add_remove_control_packet_filter {
1124 u8 mac[6];
1125 __le16 etype;
1126 __le16 flags;
1127#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
1128#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
1129#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
1130#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
1131#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
1132 __le16 seid;
1133#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
1134#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
1135 I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
1136 __le16 queue;
1137 u8 reserved[2];
1138};
1139
1140I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
1141
1142struct i40e_aqc_add_remove_control_packet_filter_completion {
1143 __le16 mac_etype_used;
1144 __le16 etype_used;
1145 __le16 mac_etype_free;
1146 __le16 etype_free;
1147 u8 reserved[8];
1148};
1149
1150I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
1151
1152/* Add Cloud filters (indirect 0x025C)
1153 * Remove Cloud filters (indirect 0x025D)
1154 * uses the i40e_aqc_add_remove_cloud_filters,
1155 * and the generic indirect completion structure
1156 */
1157struct i40e_aqc_add_remove_cloud_filters {
1158 u8 num_filters;
1159 u8 reserved;
1160 __le16 seid;
1161#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
1162#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
1163 I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
1164 u8 reserved2[4];
1165 __le32 addr_high;
1166 __le32 addr_low;
1167};
1168
1169I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
1170
1171struct i40e_aqc_add_remove_cloud_filters_element_data {
1172 u8 outer_mac[6];
1173 u8 inner_mac[6];
1174 __le16 inner_vlan;
1175 union {
1176 struct {
1177 u8 reserved[12];
1178 u8 data[4];
1179 } v4;
1180 struct {
1181 u8 data[16];
1182 } v6;
1183 } ipaddr;
1184 __le16 flags;
1185#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
1186#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
1187 I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
1188#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
1189#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
1190#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
1191#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
1192#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
1193#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
1194/* 0x0008 reserved */
1195#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
1196#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
1197#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
1198#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
1199#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
1200#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
1201#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
1202 __le32 key_low;
1203 __le32 key_high;
1204 __le16 queue_number;
1205#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
1206#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
1207 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
1208 u8 reserved[14];
1209 /* response section */
1210 u8 allocation_result;
1211#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
1212#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
1213 u8 response_reserved[7];
1214};
1215
1216struct i40e_aqc_remove_cloud_filters_completion {
1217 __le16 perfect_ovlan_used;
1218 __le16 perfect_ovlan_free;
1219 __le16 vlan_used;
1220 __le16 vlan_free;
1221 __le32 addr_high;
1222 __le32 addr_low;
1223};
1224
1225I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
1226
1227/* Add Mirror Rule (indirect or direct 0x0260)
1228 * Delete Mirror Rule (indirect or direct 0x0261)
1229 * note: some rule types (4,5) do not use an external buffer.
1230 * take care to set the flags correctly.
1231 */
1232struct i40e_aqc_add_delete_mirror_rule {
1233 __le16 seid;
1234 __le16 rule_type;
1235#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
1236#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
1237 I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
1238#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
1239#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
1240#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
1241#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
1242#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
1243 __le16 num_entries;
1244 __le16 destination; /* VSI for add, rule id for delete */
1245 __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
1246 __le32 addr_low;
1247};
1248
1249I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
1250
1251struct i40e_aqc_add_delete_mirror_rule_completion {
1252 u8 reserved[2];
1253 __le16 rule_id; /* only used on add */
1254 __le16 mirror_rules_used;
1255 __le16 mirror_rules_free;
1256 __le32 addr_high;
1257 __le32 addr_low;
1258};
1259
1260I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
1261
1262/* Set Storm Control Configuration (direct 0x0280)
1263 * Get Storm Control Configuration (direct 0x0281)
1264 * the command and response use the same descriptor structure
1265 */
1266struct i40e_aqc_set_get_storm_control_config {
1267 __le32 broadcast_threshold;
1268 __le32 multicast_threshold;
1269 __le32 control_flags;
1270#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
1271#define I40E_AQC_STORM_CONTROL_MDICW 0x02
1272#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
1273#define I40E_AQC_STORM_CONTROL_BDICW 0x08
1274#define I40E_AQC_STORM_CONTROL_BIDU 0x10
1275#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
1276#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
1277 I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
1278 u8 reserved[4];
1279};
1280
1281I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
1282
1283/* DCB 0x03xx*/
1284
1285/* PFC Ignore (direct 0x0301)
1286 * the command and response use the same descriptor structure
1287 */
1288struct i40e_aqc_pfc_ignore {
1289 u8 tc_bitmap;
1290 u8 command_flags; /* unused on response */
1291#define I40E_AQC_PFC_IGNORE_SET 0x80
1292#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
1293 u8 reserved[14];
1294};
1295
1296I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
1297
1298/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
1299 * with no parameters
1300 */
1301
1302/* TX scheduler 0x04xx */
1303
1304/* Almost all the indirect commands use
1305 * this generic struct to pass the SEID in param0
1306 */
1307struct i40e_aqc_tx_sched_ind {
1308 __le16 vsi_seid;
1309 u8 reserved[6];
1310 __le32 addr_high;
1311 __le32 addr_low;
1312};
1313
1314I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
1315
1316/* Several commands respond with a set of queue set handles */
1317struct i40e_aqc_qs_handles_resp {
1318 __le16 qs_handles[8];
1319};
1320
1321/* Configure VSI BW limits (direct 0x0400) */
1322struct i40e_aqc_configure_vsi_bw_limit {
1323 __le16 vsi_seid;
1324 u8 reserved[2];
1325 __le16 credit;
1326 u8 reserved1[2];
1327 u8 max_credit; /* 0-3, limit = 2^max */
1328 u8 reserved2[7];
1329};
1330
1331I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
1332
1333/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
1334 * responds with i40e_aqc_qs_handles_resp
1335 */
1336struct i40e_aqc_configure_vsi_ets_sla_bw_data {
1337 u8 tc_valid_bits;
1338 u8 reserved[15];
1339 __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
1340
1341 /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
1342 __le16 tc_bw_max[2];
1343 u8 reserved1[28];
1344};
1345
1346/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
1347 * responds with i40e_aqc_qs_handles_resp
1348 */
1349struct i40e_aqc_configure_vsi_tc_bw_data {
1350 u8 tc_valid_bits;
1351 u8 reserved[3];
1352 u8 tc_bw_credits[8];
1353 u8 reserved1[4];
1354 __le16 qs_handles[8];
1355};
1356
1357/* Query vsi bw configuration (indirect 0x0408) */
1358struct i40e_aqc_query_vsi_bw_config_resp {
1359 u8 tc_valid_bits;
1360 u8 tc_suspended_bits;
1361 u8 reserved[14];
1362 __le16 qs_handles[8];
1363 u8 reserved1[4];
1364 __le16 port_bw_limit;
1365 u8 reserved2[2];
1366 u8 max_bw; /* 0-3, limit = 2^max */
1367 u8 reserved3[23];
1368};
1369
1370/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
1371struct i40e_aqc_query_vsi_ets_sla_config_resp {
1372 u8 tc_valid_bits;
1373 u8 reserved[3];
1374 u8 share_credits[8];
1375 __le16 credits[8];
1376
1377 /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
1378 __le16 tc_bw_max[2];
1379};
1380
1381/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
1382struct i40e_aqc_configure_switching_comp_bw_limit {
1383 __le16 seid;
1384 u8 reserved[2];
1385 __le16 credit;
1386 u8 reserved1[2];
1387 u8 max_bw; /* 0-3, limit = 2^max */
1388 u8 reserved2[7];
1389};
1390
1391I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
1392
1393/* Enable Physical Port ETS (indirect 0x0413)
1394 * Modify Physical Port ETS (indirect 0x0414)
1395 * Disable Physical Port ETS (indirect 0x0415)
1396 */
1397struct i40e_aqc_configure_switching_comp_ets_data {
1398 u8 reserved[4];
1399 u8 tc_valid_bits;
1400 u8 reserved1;
1401 u8 tc_strict_priority_flags;
1402 u8 reserved2[17];
1403 u8 tc_bw_share_credits[8];
1404 u8 reserved3[96];
1405};
1406
1407/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
1408struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
1409 u8 tc_valid_bits;
1410 u8 reserved[15];
1411 __le16 tc_bw_credit[8];
1412
1413 /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
1414 __le16 tc_bw_max[2];
1415 u8 reserved1[28];
1416};
1417
1418/* Configure Switching Component Bandwidth Allocation per Tc
1419 * (indirect 0x0417)
1420 */
1421struct i40e_aqc_configure_switching_comp_bw_config_data {
1422 u8 tc_valid_bits;
1423 u8 reserved[2];
1424 u8 absolute_credits; /* bool */
1425 u8 tc_bw_share_credits[8];
1426 u8 reserved1[20];
1427};
1428
1429/* Query Switching Component Configuration (indirect 0x0418) */
1430struct i40e_aqc_query_switching_comp_ets_config_resp {
1431 u8 tc_valid_bits;
1432 u8 reserved[35];
1433 __le16 port_bw_limit;
1434 u8 reserved1[2];
1435 u8 tc_bw_max; /* 0-3, limit = 2^max */
1436 u8 reserved2[23];
1437};
1438
1439/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
1440struct i40e_aqc_query_port_ets_config_resp {
1441 u8 reserved[4];
1442 u8 tc_valid_bits;
1443 u8 reserved1;
1444 u8 tc_strict_priority_bits;
1445 u8 reserved2;
1446 u8 tc_bw_share_credits[8];
1447 __le16 tc_bw_limits[8];
1448
1449 /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
1450 __le16 tc_bw_max[2];
1451 u8 reserved3[32];
1452};
1453
1454/* Query Switching Component Bandwidth Allocation per Traffic Type
1455 * (indirect 0x041A)
1456 */
1457struct i40e_aqc_query_switching_comp_bw_config_resp {
1458 u8 tc_valid_bits;
1459 u8 reserved[2];
1460 u8 absolute_credits_enable; /* bool */
1461 u8 tc_bw_share_credits[8];
1462 __le16 tc_bw_limits[8];
1463
1464 /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
1465 __le16 tc_bw_max[2];
1466};
1467
1468/* Suspend/resume port TX traffic
1469 * (direct 0x041B and 0x041C) uses the generic SEID struct
1470 */
1471
1472/* Get and set the active HMC resource profile and status.
1473 * (direct 0x0500) and (direct 0x0501)
1474 */
1475struct i40e_aq_get_set_hmc_resource_profile {
1476 u8 pm_profile;
1477 u8 pe_vf_enabled;
1478 u8 reserved[14];
1479};
1480
1481I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
1482
1483enum i40e_aq_hmc_profile {
1484 /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
1485 I40E_HMC_PROFILE_DEFAULT = 1,
1486 I40E_HMC_PROFILE_FAVOR_VF = 2,
1487 I40E_HMC_PROFILE_EQUAL = 3,
1488};
1489
1490#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
1491#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
1492
1493/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
1494
1495/* set in param0 for get phy abilities to report qualified modules */
1496#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
1497#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
1498
1499enum i40e_aq_phy_type {
1500 I40E_PHY_TYPE_SGMII = 0x0,
1501 I40E_PHY_TYPE_1000BASE_KX = 0x1,
1502 I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
1503 I40E_PHY_TYPE_10GBASE_KR = 0x3,
1504 I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
1505 I40E_PHY_TYPE_XAUI = 0x5,
1506 I40E_PHY_TYPE_XFI = 0x6,
1507 I40E_PHY_TYPE_SFI = 0x7,
1508 I40E_PHY_TYPE_XLAUI = 0x8,
1509 I40E_PHY_TYPE_XLPPI = 0x9,
1510 I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
1511 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
1512 I40E_PHY_TYPE_100BASE_TX = 0x11,
1513 I40E_PHY_TYPE_1000BASE_T = 0x12,
1514 I40E_PHY_TYPE_10GBASE_T = 0x13,
1515 I40E_PHY_TYPE_10GBASE_SR = 0x14,
1516 I40E_PHY_TYPE_10GBASE_LR = 0x15,
1517 I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
1518 I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
1519 I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
1520 I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
1521 I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
1522 I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
1523 I40E_PHY_TYPE_MAX
1524};
1525
1526#define I40E_LINK_SPEED_100MB_SHIFT 0x1
1527#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
1528#define I40E_LINK_SPEED_10GB_SHIFT 0x3
1529#define I40E_LINK_SPEED_40GB_SHIFT 0x4
1530#define I40E_LINK_SPEED_20GB_SHIFT 0x5
1531
1532enum i40e_aq_link_speed {
1533 I40E_LINK_SPEED_UNKNOWN = 0,
1534 I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
1535 I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
1536 I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
1537 I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
1538 I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
1539};
1540
1541struct i40e_aqc_module_desc {
1542 u8 oui[3];
1543 u8 reserved1;
1544 u8 part_number[16];
1545 u8 revision[4];
1546 u8 reserved2[8];
1547};
1548
1549struct i40e_aq_get_phy_abilities_resp {
1550 __le32 phy_type; /* bitmap using the above enum for offsets */
1551 u8 link_speed; /* bitmap using the above enum */
1552 u8 abilities;
1553#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
1554#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
1555#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
1556#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
1557#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
1558#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
1559#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
1560#define I40E_AQ_PHY_FLAG_AN_ON 0x02
1561#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
1562 __le16 eee_capability;
1563#define I40E_AQ_EEE_100BASE_TX 0x0002
1564#define I40E_AQ_EEE_1000BASE_T 0x0004
1565#define I40E_AQ_EEE_10GBASE_T 0x0008
1566#define I40E_AQ_EEE_1000BASE_KX 0x0010
1567#define I40E_AQ_EEE_10GBASE_KX4 0x0020
1568#define I40E_AQ_EEE_10GBASE_KR 0x0040
1569 __le32 eeer_val;
1570 u8 d3_lpan;
1571#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
1572 u8 reserved[3];
1573 u8 phy_id[4];
1574 u8 module_type[3];
1575 u8 qualified_module_count;
1576#define I40E_AQ_PHY_MAX_QMS 16
1577 struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
1578};
1579
1580/* Set PHY Config (direct 0x0601) */
1581struct i40e_aq_set_phy_config { /* same bits as above in all */
1582 __le32 phy_type;
1583 u8 link_speed;
1584 u8 abilities;
1585 __le16 eee_capability;
1586 __le32 eeer;
1587 u8 low_power_ctrl;
1588 u8 reserved[3];
1589};
1590
1591I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
1592
1593/* Set MAC Config command data structure (direct 0x0603) */
1594struct i40e_aq_set_mac_config {
1595 __le16 max_frame_size;
1596 u8 params;
1597#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
1598#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
1599#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
1600#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
1601#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
1602#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
1603#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
1604#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
1605#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
1606#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
1607#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
1608#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
1609#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
1610#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
1611 u8 tx_timer_priority; /* bitmap */
1612 __le16 tx_timer_value;
1613 __le16 fc_refresh_threshold;
1614 u8 reserved[8];
1615};
1616
1617I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
1618
1619/* Restart Auto-Negotiation (direct 0x605) */
1620struct i40e_aqc_set_link_restart_an {
1621 u8 command;
1622#define I40E_AQ_PHY_RESTART_AN 0x02
1623#define I40E_AQ_PHY_LINK_ENABLE 0x04
1624 u8 reserved[15];
1625};
1626
1627I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
1628
1629/* Get Link Status cmd & response data structure (direct 0x0607) */
1630struct i40e_aqc_get_link_status {
1631 __le16 command_flags; /* only field set on command */
1632#define I40E_AQ_LSE_MASK 0x3
1633#define I40E_AQ_LSE_NOP 0x0
1634#define I40E_AQ_LSE_DISABLE 0x2
1635#define I40E_AQ_LSE_ENABLE 0x3
1636/* only response uses this flag */
1637#define I40E_AQ_LSE_IS_ENABLED 0x1
1638 u8 phy_type; /* i40e_aq_phy_type */
1639 u8 link_speed; /* i40e_aq_link_speed */
1640 u8 link_info;
1641#define I40E_AQ_LINK_UP 0x01
1642#define I40E_AQ_LINK_FAULT 0x02
1643#define I40E_AQ_LINK_FAULT_TX 0x04
1644#define I40E_AQ_LINK_FAULT_RX 0x08
1645#define I40E_AQ_LINK_FAULT_REMOTE 0x10
1646#define I40E_AQ_MEDIA_AVAILABLE 0x40
1647#define I40E_AQ_SIGNAL_DETECT 0x80
1648 u8 an_info;
1649#define I40E_AQ_AN_COMPLETED 0x01
1650#define I40E_AQ_LP_AN_ABILITY 0x02
1651#define I40E_AQ_PD_FAULT 0x04
1652#define I40E_AQ_FEC_EN 0x08
1653#define I40E_AQ_PHY_LOW_POWER 0x10
1654#define I40E_AQ_LINK_PAUSE_TX 0x20
1655#define I40E_AQ_LINK_PAUSE_RX 0x40
1656#define I40E_AQ_QUALIFIED_MODULE 0x80
1657 u8 ext_info;
1658#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
1659#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
1660#define I40E_AQ_LINK_TX_SHIFT 0x02
1661#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
1662#define I40E_AQ_LINK_TX_ACTIVE 0x00
1663#define I40E_AQ_LINK_TX_DRAINED 0x01
1664#define I40E_AQ_LINK_TX_FLUSHED 0x03
1665 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
1666 __le16 max_frame_size;
1667 u8 config;
1668#define I40E_AQ_CONFIG_CRC_ENA 0x04
1669#define I40E_AQ_CONFIG_PACING_MASK 0x78
1670 u8 reserved[5];
1671};
1672
1673I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
1674
1675/* Set event mask command (direct 0x613) */
1676struct i40e_aqc_set_phy_int_mask {
1677 u8 reserved[8];
1678 __le16 event_mask;
1679#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
1680#define I40E_AQ_EVENT_MEDIA_NA 0x0004
1681#define I40E_AQ_EVENT_LINK_FAULT 0x0008
1682#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
1683#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
1684#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
1685#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
1686#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
1687#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
1688 u8 reserved1[6];
1689};
1690
1691I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
1692
1693/* Get Local AN advt register (direct 0x0614)
1694 * Set Local AN advt register (direct 0x0615)
1695 * Get Link Partner AN advt register (direct 0x0616)
1696 */
1697struct i40e_aqc_an_advt_reg {
1698 __le32 local_an_reg0;
1699 __le16 local_an_reg1;
1700 u8 reserved[10];
1701};
1702
1703I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
1704
1705/* Set Loopback mode (0x0618) */
1706struct i40e_aqc_set_lb_mode {
1707 __le16 lb_mode;
1708#define I40E_AQ_LB_PHY_LOCAL 0x01
1709#define I40E_AQ_LB_PHY_REMOTE 0x02
1710#define I40E_AQ_LB_MAC_LOCAL 0x04
1711 u8 reserved[14];
1712};
1713
1714I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
1715
1716/* Set PHY Reset command (0x0622) */
1717struct i40e_aqc_set_phy_reset {
1718 u8 reset_flags;
1719#define I40E_AQ_PHY_RESET_REQUEST 0x02
1720 u8 reserved[15];
1721};
1722
1723I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
1724
1725enum i40e_aq_phy_reg_type {
1726 I40E_AQC_PHY_REG_INTERNAL = 0x1,
1727 I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
1728 I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
1729};
1730
1731/* NVM Read command (indirect 0x0701)
1732 * NVM Erase commands (direct 0x0702)
1733 * NVM Update commands (indirect 0x0703)
1734 */
1735struct i40e_aqc_nvm_update {
1736 u8 command_flags;
1737#define I40E_AQ_NVM_LAST_CMD 0x01
1738#define I40E_AQ_NVM_FLASH_ONLY 0x80
1739 u8 module_pointer;
1740 __le16 length;
1741 __le32 offset;
1742 __le32 addr_high;
1743 __le32 addr_low;
1744};
1745
1746I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
1747
1748/* Send to PF command (indirect 0x0801) id is only used by PF
1749 * Send to VF command (indirect 0x0802) id is only used by PF
1750 * Send to Peer PF command (indirect 0x0803)
1751 */
1752struct i40e_aqc_pf_vf_message {
1753 __le32 id;
1754 u8 reserved[4];
1755 __le32 addr_high;
1756 __le32 addr_low;
1757};
1758
1759I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
1760
1761/* Alternate structure */
1762
1763/* Direct write (direct 0x0900)
1764 * Direct read (direct 0x0902)
1765 */
1766struct i40e_aqc_alternate_write {
1767 __le32 address0;
1768 __le32 data0;
1769 __le32 address1;
1770 __le32 data1;
1771};
1772
1773I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
1774
1775/* Indirect write (indirect 0x0901)
1776 * Indirect read (indirect 0x0903)
1777 */
1778
1779struct i40e_aqc_alternate_ind_write {
1780 __le32 address;
1781 __le32 length;
1782 __le32 addr_high;
1783 __le32 addr_low;
1784};
1785
1786I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
1787
1788/* Done alternate write (direct 0x0904)
1789 * uses i40e_aq_desc
1790 */
1791struct i40e_aqc_alternate_write_done {
1792 __le16 cmd_flags;
1793#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
1794#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
1795#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
1796#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
1797 u8 reserved[14];
1798};
1799
1800I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
1801
1802/* Set OEM mode (direct 0x0905) */
1803struct i40e_aqc_alternate_set_mode {
1804 __le32 mode;
1805#define I40E_AQ_ALTERNATE_MODE_NONE 0
1806#define I40E_AQ_ALTERNATE_MODE_OEM 1
1807 u8 reserved[12];
1808};
1809
1810I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
1811
1812/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
1813
1814/* async events 0x10xx */
1815
1816/* Lan Queue Overflow Event (direct, 0x1001) */
1817struct i40e_aqc_lan_overflow {
1818 __le32 prtdcb_rupto;
1819 __le32 otx_ctl;
1820 u8 reserved[8];
1821};
1822
1823I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
1824
1825/* Get LLDP MIB (indirect 0x0A00) */
1826struct i40e_aqc_lldp_get_mib {
1827 u8 type;
1828 u8 reserved1;
1829#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
1830#define I40E_AQ_LLDP_MIB_LOCAL 0x0
1831#define I40E_AQ_LLDP_MIB_REMOTE 0x1
1832#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
1833#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
1834#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
1835#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
1836#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
1837#define I40E_AQ_LLDP_TX_SHIFT 0x4
1838#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
1839/* TX pause flags use I40E_AQ_LINK_TX_* above */
1840 __le16 local_len;
1841 __le16 remote_len;
1842 u8 reserved2[2];
1843 __le32 addr_high;
1844 __le32 addr_low;
1845};
1846
1847I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
1848
1849/* Configure LLDP MIB Change Event (direct 0x0A01)
1850 * also used for the event (with type in the command field)
1851 */
1852struct i40e_aqc_lldp_update_mib {
1853 u8 command;
1854#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
1855#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
1856 u8 reserved[7];
1857 __le32 addr_high;
1858 __le32 addr_low;
1859};
1860
1861I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
1862
1863/* Add LLDP TLV (indirect 0x0A02)
1864 * Delete LLDP TLV (indirect 0x0A04)
1865 */
1866struct i40e_aqc_lldp_add_tlv {
1867 u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
1868 u8 reserved1[1];
1869 __le16 len;
1870 u8 reserved2[4];
1871 __le32 addr_high;
1872 __le32 addr_low;
1873};
1874
1875I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
1876
1877/* Update LLDP TLV (indirect 0x0A03) */
1878struct i40e_aqc_lldp_update_tlv {
1879 u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
1880 u8 reserved;
1881 __le16 old_len;
1882 __le16 new_offset;
1883 __le16 new_len;
1884 __le32 addr_high;
1885 __le32 addr_low;
1886};
1887
1888I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
1889
1890/* Stop LLDP (direct 0x0A05) */
1891struct i40e_aqc_lldp_stop {
1892 u8 command;
1893#define I40E_AQ_LLDP_AGENT_STOP 0x0
1894#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
1895 u8 reserved[15];
1896};
1897
1898I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
1899
1900/* Start LLDP (direct 0x0A06) */
1901
1902struct i40e_aqc_lldp_start {
1903 u8 command;
1904#define I40E_AQ_LLDP_AGENT_START 0x1
1905 u8 reserved[15];
1906};
1907
1908I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
1909
1910/* Apply MIB changes (0x0A07)
1911 * uses the generic struc as it contains no data
1912 */
1913
1914/* Add Udp Tunnel command and completion (direct 0x0B00) */
1915struct i40e_aqc_add_udp_tunnel {
1916 __le16 udp_port;
1917 u8 header_len; /* in DWords, 1 to 15 */
1918 u8 protocol_index;
1919#define I40E_AQC_TUNNEL_TYPE_MAC 0x0
1920#define I40E_AQC_TUNNEL_TYPE_UDP 0x1
1921 u8 reserved[12];
1922};
1923
1924I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
1925
1926/* remove UDP Tunnel command (0x0B01) */
1927struct i40e_aqc_remove_udp_tunnel {
1928 u8 reserved[2];
1929 u8 index; /* 0 to 15 */
1930 u8 pf_filters;
1931 u8 total_filters;
1932 u8 reserved2[11];
1933};
1934
1935I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
1936
1937struct i40e_aqc_del_udp_tunnel_completion {
1938 __le16 udp_port;
1939 u8 index; /* 0 to 15 */
1940 u8 multiple_entries;
1941 u8 tunnels_used;
1942 u8 reserved;
1943 u8 tunnels_free;
1944 u8 reserved1[9];
1945};
1946
1947I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
1948
1949/* tunnel key structure 0x0B10 */
1950struct i40e_aqc_tunnel_key_structure {
1951 __le16 key1_off;
1952 __le16 key1_len;
1953 __le16 key2_off;
1954 __le16 key2_len;
1955 __le16 flags;
1956#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
1957/* response flags */
1958#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
1959#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
1960#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
1961 u8 resreved[6];
1962};
1963
1964I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
1965
1966/* OEM mode commands (direct 0xFE0x) */
1967struct i40e_aqc_oem_param_change {
1968 __le32 param_type;
1969#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
1970#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
1971#define I40E_AQ_OEM_PARAM_MAC 2
1972 __le32 param_value1;
1973 u8 param_value2[8];
1974};
1975
1976I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
1977
1978struct i40e_aqc_oem_state_change {
1979 __le32 state;
1980#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
1981#define I40E_AQ_OEM_STATE_LINK_UP 0x1
1982 u8 reserved[12];
1983};
1984
1985I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
1986
1987/* debug commands */
1988
1989/* get device id (0xFF00) uses the generic structure */
1990
1991/* set test more (0xFF01, internal) */
1992
1993struct i40e_acq_set_test_mode {
1994 u8 mode;
1995#define I40E_AQ_TEST_PARTIAL 0
1996#define I40E_AQ_TEST_FULL 1
1997#define I40E_AQ_TEST_NVM 2
1998 u8 reserved[3];
1999 u8 command;
2000#define I40E_AQ_TEST_OPEN 0
2001#define I40E_AQ_TEST_CLOSE 1
2002#define I40E_AQ_TEST_INC 2
2003 u8 reserved2[3];
2004 __le32 address_high;
2005 __le32 address_low;
2006};
2007
2008I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
2009
2010/* Debug Read Register command (0xFF03)
2011 * Debug Write Register command (0xFF04)
2012 */
2013struct i40e_aqc_debug_reg_read_write {
2014 __le32 reserved;
2015 __le32 address;
2016 __le32 value_high;
2017 __le32 value_low;
2018};
2019
2020I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
2021
2022/* Scatter/gather Reg Read (indirect 0xFF05)
2023 * Scatter/gather Reg Write (indirect 0xFF06)
2024 */
2025
2026/* i40e_aq_desc is used for the command */
2027struct i40e_aqc_debug_reg_sg_element_data {
2028 __le32 address;
2029 __le32 value;
2030};
2031
2032/* Debug Modify register (direct 0xFF07) */
2033struct i40e_aqc_debug_modify_reg {
2034 __le32 address;
2035 __le32 value;
2036 __le32 clear_mask;
2037 __le32 set_mask;
2038};
2039
2040I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
2041
2042/* dump internal data (0xFF08, indirect) */
2043
2044#define I40E_AQ_CLUSTER_ID_AUX 0
2045#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
2046#define I40E_AQ_CLUSTER_ID_TXSCHED 2
2047#define I40E_AQ_CLUSTER_ID_HMC 3
2048#define I40E_AQ_CLUSTER_ID_MAC0 4
2049#define I40E_AQ_CLUSTER_ID_MAC1 5
2050#define I40E_AQ_CLUSTER_ID_MAC2 6
2051#define I40E_AQ_CLUSTER_ID_MAC3 7
2052#define I40E_AQ_CLUSTER_ID_DCB 8
2053#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
2054#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
2055
2056struct i40e_aqc_debug_dump_internals {
2057 u8 cluster_id;
2058 u8 table_id;
2059 __le16 data_size;
2060 __le32 idx;
2061 __le32 address_high;
2062 __le32 address_low;
2063};
2064
2065I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
2066
2067struct i40e_aqc_debug_modify_internals {
2068 u8 cluster_id;
2069 u8 cluster_specific_params[7];
2070 __le32 address_high;
2071 __le32 address_low;
2072};
2073
2074I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
2075
2076#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
new file mode 100644
index 000000000000..3b1cc214f9dc
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -0,0 +1,59 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_ALLOC_H_
29#define _I40E_ALLOC_H_
30
31struct i40e_hw;
32
33/* Memory allocation types */
34enum i40e_memory_type {
35 i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
36 i40e_mem_asq_buf = 1,
37 i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
38 i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
39 i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
40 i40e_mem_pd = 5, /* Page Descriptor */
41 i40e_mem_bp = 6, /* Backing Page - 4KB */
42 i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
43 i40e_mem_reserved
44};
45
46/* prototype for functions used for dynamic memory allocation */
47i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
48 struct i40e_dma_mem *mem,
49 enum i40e_memory_type type,
50 u64 size, u32 alignment);
51i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
52 struct i40e_dma_mem *mem);
53i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
54 struct i40e_virt_mem *mem,
55 u32 size);
56i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
57 struct i40e_virt_mem *mem);
58
59#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
new file mode 100644
index 000000000000..c21df7bc3b1d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -0,0 +1,2041 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e_type.h"
29#include "i40e_adminq.h"
30#include "i40e_prototype.h"
31#include "i40e_virtchnl.h"
32
33/**
34 * i40e_set_mac_type - Sets MAC type
35 * @hw: pointer to the HW structure
36 *
37 * This function sets the mac type of the adapter based on the
38 * vendor ID and device ID stored in the hw structure.
39 **/
40static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
41{
42 i40e_status status = 0;
43
44 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
45 switch (hw->device_id) {
46 case I40E_SFP_XL710_DEVICE_ID:
47 case I40E_SFP_X710_DEVICE_ID:
48 case I40E_QEMU_DEVICE_ID:
49 case I40E_KX_A_DEVICE_ID:
50 case I40E_KX_B_DEVICE_ID:
51 case I40E_KX_C_DEVICE_ID:
52 case I40E_KX_D_DEVICE_ID:
53 case I40E_QSFP_A_DEVICE_ID:
54 case I40E_QSFP_B_DEVICE_ID:
55 case I40E_QSFP_C_DEVICE_ID:
56 hw->mac.type = I40E_MAC_XL710;
57 break;
58 case I40E_VF_DEVICE_ID:
59 case I40E_VF_HV_DEVICE_ID:
60 hw->mac.type = I40E_MAC_VF;
61 break;
62 default:
63 hw->mac.type = I40E_MAC_GENERIC;
64 break;
65 }
66 } else {
67 status = I40E_ERR_DEVICE_NOT_SUPPORTED;
68 }
69
70 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
71 hw->mac.type, status);
72 return status;
73}
74
75/**
76 * i40e_debug_aq
77 * @hw: debug mask related to admin queue
78 * @cap: pointer to adminq command descriptor
79 * @buffer: pointer to command buffer
80 *
81 * Dumps debug log about adminq command with descriptor contents.
82 **/
83void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
84 void *buffer)
85{
86 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
87 u8 *aq_buffer = (u8 *)buffer;
88 u32 data[4];
89 u32 i = 0;
90
91 if ((!(mask & hw->debug_mask)) || (desc == NULL))
92 return;
93
94 i40e_debug(hw, mask,
95 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
96 aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
97 aq_desc->retval);
98 i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
99 aq_desc->cookie_high, aq_desc->cookie_low);
100 i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
101 aq_desc->params.internal.param0,
102 aq_desc->params.internal.param1);
103 i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
104 aq_desc->params.external.addr_high,
105 aq_desc->params.external.addr_low);
106
107 if ((buffer != NULL) && (aq_desc->datalen != 0)) {
108 memset(data, 0, sizeof(data));
109 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
110 for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
111 data[((i % 16) / 4)] |=
112 ((u32)aq_buffer[i]) << (8 * (i % 4));
113 if ((i % 16) == 15) {
114 i40e_debug(hw, mask,
115 "\t0x%04X %08X %08X %08X %08X\n",
116 i - 15, data[0], data[1], data[2],
117 data[3]);
118 memset(data, 0, sizeof(data));
119 }
120 }
121 if ((i % 16) != 0)
122 i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
123 i - (i % 16), data[0], data[1], data[2],
124 data[3]);
125 }
126}
127
128/**
129 * i40e_init_shared_code - Initialize the shared code
130 * @hw: pointer to hardware structure
131 *
132 * This assigns the MAC type and PHY code and inits the NVM.
133 * Does not touch the hardware. This function must be called prior to any
134 * other function in the shared code. The i40e_hw structure should be
135 * memset to 0 prior to calling this function. The following fields in
136 * hw structure should be filled in prior to calling this function:
137 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
138 * subsystem_vendor_id, and revision_id
139 **/
140i40e_status i40e_init_shared_code(struct i40e_hw *hw)
141{
142 i40e_status status = 0;
143 u32 reg;
144
145 hw->phy.get_link_info = true;
146
147 /* Determine port number */
148 reg = rd32(hw, I40E_PFGEN_PORTNUM);
149 reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
150 I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
151 hw->port = (u8)reg;
152
153 i40e_set_mac_type(hw);
154
155 switch (hw->mac.type) {
156 case I40E_MAC_XL710:
157 break;
158 default:
159 return I40E_ERR_DEVICE_NOT_SUPPORTED;
160 break;
161 }
162
163 status = i40e_init_nvm(hw);
164 return status;
165}
166
167/**
168 * i40e_aq_mac_address_read - Retrieve the MAC addresses
169 * @hw: pointer to the hw struct
170 * @flags: a return indicator of what addresses were added to the addr store
171 * @addrs: the requestor's mac addr store
172 * @cmd_details: pointer to command details structure or NULL
173 **/
174static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
175 u16 *flags,
176 struct i40e_aqc_mac_address_read_data *addrs,
177 struct i40e_asq_cmd_details *cmd_details)
178{
179 struct i40e_aq_desc desc;
180 struct i40e_aqc_mac_address_read *cmd_data =
181 (struct i40e_aqc_mac_address_read *)&desc.params.raw;
182 i40e_status status;
183
184 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
185 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
186
187 status = i40e_asq_send_command(hw, &desc, addrs,
188 sizeof(*addrs), cmd_details);
189 *flags = le16_to_cpu(cmd_data->command_flags);
190
191 return status;
192}
193
194/**
195 * i40e_aq_mac_address_write - Change the MAC addresses
196 * @hw: pointer to the hw struct
197 * @flags: indicates which MAC to be written
198 * @mac_addr: address to write
199 * @cmd_details: pointer to command details structure or NULL
200 **/
201i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
202 u16 flags, u8 *mac_addr,
203 struct i40e_asq_cmd_details *cmd_details)
204{
205 struct i40e_aq_desc desc;
206 struct i40e_aqc_mac_address_write *cmd_data =
207 (struct i40e_aqc_mac_address_write *)&desc.params.raw;
208 i40e_status status;
209
210 i40e_fill_default_direct_cmd_desc(&desc,
211 i40e_aqc_opc_mac_address_write);
212 cmd_data->command_flags = cpu_to_le16(flags);
213 memcpy(&cmd_data->mac_sal, &mac_addr[0], 4);
214 memcpy(&cmd_data->mac_sah, &mac_addr[4], 2);
215
216 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
217
218 return status;
219}
220
221/**
222 * i40e_get_mac_addr - get MAC address
223 * @hw: pointer to the HW structure
224 * @mac_addr: pointer to MAC address
225 *
226 * Reads the adapter's MAC address from register
227 **/
228i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
229{
230 struct i40e_aqc_mac_address_read_data addrs;
231 i40e_status status;
232 u16 flags = 0;
233
234 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
235
236 if (flags & I40E_AQC_LAN_ADDR_VALID)
237 memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
238
239 return status;
240}
241
242/**
243 * i40e_validate_mac_addr - Validate MAC address
244 * @mac_addr: pointer to MAC address
245 *
246 * Tests a MAC address to ensure it is a valid Individual Address
247 **/
248i40e_status i40e_validate_mac_addr(u8 *mac_addr)
249{
250 i40e_status status = 0;
251
252 /* Make sure it is not a multicast address */
253 if (I40E_IS_MULTICAST(mac_addr)) {
254 hw_dbg(hw, "MAC address is multicast\n");
255 status = I40E_ERR_INVALID_MAC_ADDR;
256 /* Not a broadcast address */
257 } else if (I40E_IS_BROADCAST(mac_addr)) {
258 hw_dbg(hw, "MAC address is broadcast\n");
259 status = I40E_ERR_INVALID_MAC_ADDR;
260 /* Reject the zero address */
261 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
262 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
263 hw_dbg(hw, "MAC address is all zeros\n");
264 status = I40E_ERR_INVALID_MAC_ADDR;
265 }
266 return status;
267}
268
269/**
270 * i40e_pf_reset - Reset the PF
271 * @hw: pointer to the hardware structure
272 *
273 * Assuming someone else has triggered a global reset,
274 * assure the global reset is complete and then reset the PF
275 **/
276i40e_status i40e_pf_reset(struct i40e_hw *hw)
277{
278 u32 wait_cnt = 0;
279 u32 reg = 0;
280 u32 grst_del;
281
282 /* Poll for Global Reset steady state in case of recent GRST.
283 * The grst delay value is in 100ms units, and we'll wait a
284 * couple counts longer to be sure we don't just miss the end.
285 */
286 grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
287 >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
288 for (wait_cnt = 0; wait_cnt < grst_del + 2; wait_cnt++) {
289 reg = rd32(hw, I40E_GLGEN_RSTAT);
290 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
291 break;
292 msleep(100);
293 }
294 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
295 hw_dbg(hw, "Global reset polling failed to complete.\n");
296 return I40E_ERR_RESET_FAILED;
297 }
298
299 /* Determine the PF number based on the PCI fn */
300 hw->pf_id = (u8)hw->bus.func;
301
302 /* If there was a Global Reset in progress when we got here,
303 * we don't need to do the PF Reset
304 */
305 if (!wait_cnt) {
306 reg = rd32(hw, I40E_PFGEN_CTRL);
307 wr32(hw, I40E_PFGEN_CTRL,
308 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
309 for (wait_cnt = 0; wait_cnt < 10; wait_cnt++) {
310 reg = rd32(hw, I40E_PFGEN_CTRL);
311 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
312 break;
313 usleep_range(1000, 2000);
314 }
315 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
316 hw_dbg(hw, "PF reset polling failed to complete.\n");
317 return I40E_ERR_RESET_FAILED;
318 }
319 }
320
321 i40e_clear_pxe_mode(hw);
322 return 0;
323}
324
325/**
326 * i40e_clear_pxe_mode - clear pxe operations mode
327 * @hw: pointer to the hw struct
328 *
329 * Make sure all PXE mode settings are cleared, including things
330 * like descriptor fetch/write-back mode.
331 **/
332void i40e_clear_pxe_mode(struct i40e_hw *hw)
333{
334 u32 reg;
335
336 /* Clear single descriptor fetch/write-back mode */
337 reg = rd32(hw, I40E_GLLAN_RCTL_0);
338 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
339}
340
341/**
342 * i40e_led_get - return current on/off mode
343 * @hw: pointer to the hw struct
344 *
345 * The value returned is the 'mode' field as defined in the
346 * GPIO register definitions: 0x0 = off, 0xf = on, and other
347 * values are variations of possible behaviors relating to
348 * blink, link, and wire.
349 **/
350u32 i40e_led_get(struct i40e_hw *hw)
351{
352 u32 gpio_val = 0;
353 u32 mode = 0;
354 u32 port;
355 int i;
356
357 for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
358 if (!hw->func_caps.led[i])
359 continue;
360
361 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
362 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
363 >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
364
365 if (port != hw->port)
366 continue;
367
368 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
369 >> I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT;
370 break;
371 }
372
373 return mode;
374}
375
376/**
377 * i40e_led_set - set new on/off mode
378 * @hw: pointer to the hw struct
379 * @mode: 0=off, else on (see EAS for mode details)
380 **/
381void i40e_led_set(struct i40e_hw *hw, u32 mode)
382{
383 u32 gpio_val = 0;
384 u32 led_mode = 0;
385 u32 port;
386 int i;
387
388 for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
389 if (!hw->func_caps.led[i])
390 continue;
391
392 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
393 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
394 >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
395
396 if (port != hw->port)
397 continue;
398
399 led_mode = (mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
400 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
401 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
402 gpio_val |= led_mode;
403 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
404 }
405}
406
407/* Admin command wrappers */
408/**
409 * i40e_aq_queue_shutdown
410 * @hw: pointer to the hw struct
411 * @unloading: is the driver unloading itself
412 *
413 * Tell the Firmware that we're shutting down the AdminQ and whether
414 * or not the driver is unloading as well.
415 **/
416i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
417 bool unloading)
418{
419 struct i40e_aq_desc desc;
420 struct i40e_aqc_queue_shutdown *cmd =
421 (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
422 i40e_status status;
423
424 i40e_fill_default_direct_cmd_desc(&desc,
425 i40e_aqc_opc_queue_shutdown);
426
427 if (unloading)
428 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
429 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
430
431 return status;
432}
433
434/**
435 * i40e_aq_set_link_restart_an
436 * @hw: pointer to the hw struct
437 * @cmd_details: pointer to command details structure or NULL
438 *
439 * Sets up the link and restarts the Auto-Negotiation over the link.
440 **/
441i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
442 struct i40e_asq_cmd_details *cmd_details)
443{
444 struct i40e_aq_desc desc;
445 struct i40e_aqc_set_link_restart_an *cmd =
446 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
447 i40e_status status;
448
449 i40e_fill_default_direct_cmd_desc(&desc,
450 i40e_aqc_opc_set_link_restart_an);
451
452 cmd->command = I40E_AQ_PHY_RESTART_AN;
453
454 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
455
456 return status;
457}
458
459/**
460 * i40e_aq_get_link_info
461 * @hw: pointer to the hw struct
462 * @enable_lse: enable/disable LinkStatusEvent reporting
463 * @link: pointer to link status structure - optional
464 * @cmd_details: pointer to command details structure or NULL
465 *
466 * Returns the link status of the adapter.
467 **/
468i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
469 bool enable_lse, struct i40e_link_status *link,
470 struct i40e_asq_cmd_details *cmd_details)
471{
472 struct i40e_aq_desc desc;
473 struct i40e_aqc_get_link_status *resp =
474 (struct i40e_aqc_get_link_status *)&desc.params.raw;
475 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
476 i40e_status status;
477 u16 command_flags;
478
479 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
480
481 if (enable_lse)
482 command_flags = I40E_AQ_LSE_ENABLE;
483 else
484 command_flags = I40E_AQ_LSE_DISABLE;
485 resp->command_flags = cpu_to_le16(command_flags);
486
487 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
488
489 if (status)
490 goto aq_get_link_info_exit;
491
492 /* save off old link status information */
493 memcpy(&hw->phy.link_info_old, hw_link_info,
494 sizeof(struct i40e_link_status));
495
496 /* update link status */
497 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
498 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
499 hw_link_info->link_info = resp->link_info;
500 hw_link_info->an_info = resp->an_info;
501 hw_link_info->ext_info = resp->ext_info;
502
503 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
504 hw_link_info->lse_enable = true;
505 else
506 hw_link_info->lse_enable = false;
507
508 /* save link status information */
509 if (link)
510 memcpy(link, hw_link_info, sizeof(struct i40e_link_status));
511
512 /* flag cleared so helper functions don't call AQ again */
513 hw->phy.get_link_info = false;
514
515aq_get_link_info_exit:
516 return status;
517}
518
519/**
520 * i40e_aq_add_vsi
521 * @hw: pointer to the hw struct
522 * @vsi: pointer to a vsi context struct
523 * @cmd_details: pointer to command details structure or NULL
524 *
525 * Add a VSI context to the hardware.
526**/
527i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
528 struct i40e_vsi_context *vsi_ctx,
529 struct i40e_asq_cmd_details *cmd_details)
530{
531 struct i40e_aq_desc desc;
532 struct i40e_aqc_add_get_update_vsi *cmd =
533 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
534 struct i40e_aqc_add_get_update_vsi_completion *resp =
535 (struct i40e_aqc_add_get_update_vsi_completion *)
536 &desc.params.raw;
537 i40e_status status;
538
539 i40e_fill_default_direct_cmd_desc(&desc,
540 i40e_aqc_opc_add_vsi);
541
542 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
543 cmd->connection_type = vsi_ctx->connection_type;
544 cmd->vf_id = vsi_ctx->vf_num;
545 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
546
547 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
548 if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
549 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
550
551 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
552 sizeof(vsi_ctx->info), cmd_details);
553
554 if (status)
555 goto aq_add_vsi_exit;
556
557 vsi_ctx->seid = le16_to_cpu(resp->seid);
558 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
559 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
560 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
561
562aq_add_vsi_exit:
563 return status;
564}
565
566/**
567 * i40e_aq_set_vsi_unicast_promiscuous
568 * @hw: pointer to the hw struct
569 * @seid: vsi number
570 * @set: set unicast promiscuous enable/disable
571 * @cmd_details: pointer to command details structure or NULL
572 **/
573i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
574 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
575{
576 struct i40e_aq_desc desc;
577 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
578 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
579 i40e_status status;
580 u16 flags = 0;
581
582 i40e_fill_default_direct_cmd_desc(&desc,
583 i40e_aqc_opc_set_vsi_promiscuous_modes);
584
585 if (set)
586 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
587
588 cmd->promiscuous_flags = cpu_to_le16(flags);
589
590 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
591
592 cmd->seid = cpu_to_le16(seid);
593 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
594
595 return status;
596}
597
598/**
599 * i40e_aq_set_vsi_multicast_promiscuous
600 * @hw: pointer to the hw struct
601 * @seid: vsi number
602 * @set: set multicast promiscuous enable/disable
603 * @cmd_details: pointer to command details structure or NULL
604 **/
605i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
606 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
607{
608 struct i40e_aq_desc desc;
609 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
610 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
611 i40e_status status;
612 u16 flags = 0;
613
614 i40e_fill_default_direct_cmd_desc(&desc,
615 i40e_aqc_opc_set_vsi_promiscuous_modes);
616
617 if (set)
618 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
619
620 cmd->promiscuous_flags = cpu_to_le16(flags);
621
622 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
623
624 cmd->seid = cpu_to_le16(seid);
625 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
626
627 return status;
628}
629
630/**
631 * i40e_aq_set_vsi_broadcast
632 * @hw: pointer to the hw struct
633 * @seid: vsi number
634 * @set_filter: true to set filter, false to clear filter
635 * @cmd_details: pointer to command details structure or NULL
636 *
637 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
638 **/
639i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
640 u16 seid, bool set_filter,
641 struct i40e_asq_cmd_details *cmd_details)
642{
643 struct i40e_aq_desc desc;
644 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
645 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
646 i40e_status status;
647
648 i40e_fill_default_direct_cmd_desc(&desc,
649 i40e_aqc_opc_set_vsi_promiscuous_modes);
650
651 if (set_filter)
652 cmd->promiscuous_flags
653 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
654 else
655 cmd->promiscuous_flags
656 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
657
658 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
659 cmd->seid = cpu_to_le16(seid);
660 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
661
662 return status;
663}
664
665/**
666 * i40e_get_vsi_params - get VSI configuration info
667 * @hw: pointer to the hw struct
668 * @vsi: pointer to a vsi context struct
669 * @cmd_details: pointer to command details structure or NULL
670 **/
671i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
672 struct i40e_vsi_context *vsi_ctx,
673 struct i40e_asq_cmd_details *cmd_details)
674{
675 struct i40e_aq_desc desc;
676 struct i40e_aqc_switch_seid *cmd =
677 (struct i40e_aqc_switch_seid *)&desc.params.raw;
678 struct i40e_aqc_add_get_update_vsi_completion *resp =
679 (struct i40e_aqc_add_get_update_vsi_completion *)
680 &desc.params.raw;
681 i40e_status status;
682
683 i40e_fill_default_direct_cmd_desc(&desc,
684 i40e_aqc_opc_get_vsi_parameters);
685
686 cmd->seid = cpu_to_le16(vsi_ctx->seid);
687
688 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
689 if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
690 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
691
692 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
693 sizeof(vsi_ctx->info), NULL);
694
695 if (status)
696 goto aq_get_vsi_params_exit;
697
698 vsi_ctx->seid = le16_to_cpu(resp->seid);
699 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
700 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
701 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
702
703aq_get_vsi_params_exit:
704 return status;
705}
706
707/**
708 * i40e_aq_update_vsi_params
709 * @hw: pointer to the hw struct
710 * @vsi: pointer to a vsi context struct
711 * @cmd_details: pointer to command details structure or NULL
712 *
713 * Update a VSI context.
714 **/
715i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
716 struct i40e_vsi_context *vsi_ctx,
717 struct i40e_asq_cmd_details *cmd_details)
718{
719 struct i40e_aq_desc desc;
720 struct i40e_aqc_switch_seid *cmd =
721 (struct i40e_aqc_switch_seid *)&desc.params.raw;
722 i40e_status status;
723
724 i40e_fill_default_direct_cmd_desc(&desc,
725 i40e_aqc_opc_update_vsi_parameters);
726 cmd->seid = cpu_to_le16(vsi_ctx->seid);
727
728 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
729 if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
730 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
731
732 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
733 sizeof(vsi_ctx->info), cmd_details);
734
735 return status;
736}
737
738/**
739 * i40e_aq_get_switch_config
740 * @hw: pointer to the hardware structure
741 * @buf: pointer to the result buffer
742 * @buf_size: length of input buffer
743 * @start_seid: seid to start for the report, 0 == beginning
744 * @cmd_details: pointer to command details structure or NULL
745 *
746 * Fill the buf with switch configuration returned from AdminQ command
747 **/
748i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
749 struct i40e_aqc_get_switch_config_resp *buf,
750 u16 buf_size, u16 *start_seid,
751 struct i40e_asq_cmd_details *cmd_details)
752{
753 struct i40e_aq_desc desc;
754 struct i40e_aqc_switch_seid *scfg =
755 (struct i40e_aqc_switch_seid *)&desc.params.raw;
756 i40e_status status;
757
758 i40e_fill_default_direct_cmd_desc(&desc,
759 i40e_aqc_opc_get_switch_config);
760 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
761 if (buf_size > I40E_AQ_LARGE_BUF)
762 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
763 scfg->seid = cpu_to_le16(*start_seid);
764
765 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
766 *start_seid = le16_to_cpu(scfg->seid);
767
768 return status;
769}
770
771/**
772 * i40e_aq_get_firmware_version
773 * @hw: pointer to the hw struct
774 * @fw_major_version: firmware major version
775 * @fw_minor_version: firmware minor version
776 * @api_major_version: major queue version
777 * @api_minor_version: minor queue version
778 * @cmd_details: pointer to command details structure or NULL
779 *
780 * Get the firmware version from the admin queue commands
781 **/
782i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
783 u16 *fw_major_version, u16 *fw_minor_version,
784 u16 *api_major_version, u16 *api_minor_version,
785 struct i40e_asq_cmd_details *cmd_details)
786{
787 struct i40e_aq_desc desc;
788 struct i40e_aqc_get_version *resp =
789 (struct i40e_aqc_get_version *)&desc.params.raw;
790 i40e_status status;
791
792 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
793
794 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
795
796 if (!status) {
797 if (fw_major_version != NULL)
798 *fw_major_version = le16_to_cpu(resp->fw_major);
799 if (fw_minor_version != NULL)
800 *fw_minor_version = le16_to_cpu(resp->fw_minor);
801 if (api_major_version != NULL)
802 *api_major_version = le16_to_cpu(resp->api_major);
803 if (api_minor_version != NULL)
804 *api_minor_version = le16_to_cpu(resp->api_minor);
805 }
806
807 return status;
808}
809
810/**
811 * i40e_aq_send_driver_version
812 * @hw: pointer to the hw struct
813 * @event: driver event: driver ok, start or stop
814 * @dv: driver's major, minor version
815 * @cmd_details: pointer to command details structure or NULL
816 *
817 * Send the driver version to the firmware
818 **/
819i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
820 struct i40e_driver_version *dv,
821 struct i40e_asq_cmd_details *cmd_details)
822{
823 struct i40e_aq_desc desc;
824 struct i40e_aqc_driver_version *cmd =
825 (struct i40e_aqc_driver_version *)&desc.params.raw;
826 i40e_status status;
827
828 if (dv == NULL)
829 return I40E_ERR_PARAM;
830
831 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
832
833 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI);
834 cmd->driver_major_ver = dv->major_version;
835 cmd->driver_minor_ver = dv->minor_version;
836 cmd->driver_build_ver = dv->build_version;
837 cmd->driver_subbuild_ver = dv->subbuild_version;
838 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
839
840 return status;
841}
842
843/**
844 * i40e_get_link_status - get status of the HW network link
845 * @hw: pointer to the hw struct
846 *
847 * Returns true if link is up, false if link is down.
848 *
849 * Side effect: LinkStatusEvent reporting becomes enabled
850 **/
851bool i40e_get_link_status(struct i40e_hw *hw)
852{
853 i40e_status status = 0;
854 bool link_status = false;
855
856 if (hw->phy.get_link_info) {
857 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
858
859 if (status)
860 goto i40e_get_link_status_exit;
861 }
862
863 link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
864
865i40e_get_link_status_exit:
866 return link_status;
867}
868
869/**
870 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
871 * @hw: pointer to the hw struct
872 * @uplink_seid: the MAC or other gizmo SEID
873 * @downlink_seid: the VSI SEID
874 * @enabled_tc: bitmap of TCs to be enabled
875 * @default_port: true for default port VSI, false for control port
876 * @veb_seid: pointer to where to put the resulting VEB SEID
877 * @cmd_details: pointer to command details structure or NULL
878 *
879 * This asks the FW to add a VEB between the uplink and downlink
880 * elements. If the uplink SEID is 0, this will be a floating VEB.
881 **/
882i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
883 u16 downlink_seid, u8 enabled_tc,
884 bool default_port, u16 *veb_seid,
885 struct i40e_asq_cmd_details *cmd_details)
886{
887 struct i40e_aq_desc desc;
888 struct i40e_aqc_add_veb *cmd =
889 (struct i40e_aqc_add_veb *)&desc.params.raw;
890 struct i40e_aqc_add_veb_completion *resp =
891 (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
892 i40e_status status;
893 u16 veb_flags = 0;
894
895 /* SEIDs need to either both be set or both be 0 for floating VEB */
896 if (!!uplink_seid != !!downlink_seid)
897 return I40E_ERR_PARAM;
898
899 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
900
901 cmd->uplink_seid = cpu_to_le16(uplink_seid);
902 cmd->downlink_seid = cpu_to_le16(downlink_seid);
903 cmd->enable_tcs = enabled_tc;
904 if (!uplink_seid)
905 veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
906 if (default_port)
907 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
908 else
909 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
910 cmd->veb_flags = cpu_to_le16(veb_flags);
911
912 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
913
914 if (!status && veb_seid)
915 *veb_seid = le16_to_cpu(resp->veb_seid);
916
917 return status;
918}
919
920/**
921 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
922 * @hw: pointer to the hw struct
923 * @veb_seid: the SEID of the VEB to query
924 * @switch_id: the uplink switch id
925 * @floating_veb: set to true if the VEB is floating
926 * @statistic_index: index of the stats counter block for this VEB
927 * @vebs_used: number of VEB's used by function
928 * @vebs_unallocated: total VEB's not reserved by any function
929 * @cmd_details: pointer to command details structure or NULL
930 *
931 * This retrieves the parameters for a particular VEB, specified by
932 * uplink_seid, and returns them to the caller.
933 **/
934i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
935 u16 veb_seid, u16 *switch_id,
936 bool *floating, u16 *statistic_index,
937 u16 *vebs_used, u16 *vebs_free,
938 struct i40e_asq_cmd_details *cmd_details)
939{
940 struct i40e_aq_desc desc;
941 struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
942 (struct i40e_aqc_get_veb_parameters_completion *)
943 &desc.params.raw;
944 i40e_status status;
945
946 if (veb_seid == 0)
947 return I40E_ERR_PARAM;
948
949 i40e_fill_default_direct_cmd_desc(&desc,
950 i40e_aqc_opc_get_veb_parameters);
951 cmd_resp->seid = cpu_to_le16(veb_seid);
952
953 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
954 if (status)
955 goto get_veb_exit;
956
957 if (switch_id)
958 *switch_id = le16_to_cpu(cmd_resp->switch_id);
959 if (statistic_index)
960 *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
961 if (vebs_used)
962 *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
963 if (vebs_free)
964 *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
965 if (floating) {
966 u16 flags = le16_to_cpu(cmd_resp->veb_flags);
967 if (flags & I40E_AQC_ADD_VEB_FLOATING)
968 *floating = true;
969 else
970 *floating = false;
971 }
972
973get_veb_exit:
974 return status;
975}
976
977/**
978 * i40e_aq_add_macvlan
979 * @hw: pointer to the hw struct
980 * @seid: VSI for the mac address
981 * @mv_list: list of macvlans to be added
982 * @count: length of the list
983 * @cmd_details: pointer to command details structure or NULL
984 *
985 * Add MAC/VLAN addresses to the HW filtering
986 **/
987i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
988 struct i40e_aqc_add_macvlan_element_data *mv_list,
989 u16 count, struct i40e_asq_cmd_details *cmd_details)
990{
991 struct i40e_aq_desc desc;
992 struct i40e_aqc_macvlan *cmd =
993 (struct i40e_aqc_macvlan *)&desc.params.raw;
994 i40e_status status;
995 u16 buf_size;
996
997 if (count == 0 || !mv_list || !hw)
998 return I40E_ERR_PARAM;
999
1000 buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data);
1001
1002 /* prep the rest of the request */
1003 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
1004 cmd->num_addresses = cpu_to_le16(count);
1005 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
1006 cmd->seid[1] = 0;
1007 cmd->seid[2] = 0;
1008
1009 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1010 if (buf_size > I40E_AQ_LARGE_BUF)
1011 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1012
1013 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
1014 cmd_details);
1015
1016 return status;
1017}
1018
1019/**
1020 * i40e_aq_remove_macvlan
1021 * @hw: pointer to the hw struct
1022 * @seid: VSI for the mac address
1023 * @mv_list: list of macvlans to be removed
1024 * @count: length of the list
1025 * @cmd_details: pointer to command details structure or NULL
1026 *
1027 * Remove MAC/VLAN addresses from the HW filtering
1028 **/
1029i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
1030 struct i40e_aqc_remove_macvlan_element_data *mv_list,
1031 u16 count, struct i40e_asq_cmd_details *cmd_details)
1032{
1033 struct i40e_aq_desc desc;
1034 struct i40e_aqc_macvlan *cmd =
1035 (struct i40e_aqc_macvlan *)&desc.params.raw;
1036 i40e_status status;
1037 u16 buf_size;
1038
1039 if (count == 0 || !mv_list || !hw)
1040 return I40E_ERR_PARAM;
1041
1042 buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data);
1043
1044 /* prep the rest of the request */
1045 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
1046 cmd->num_addresses = cpu_to_le16(count);
1047 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
1048 cmd->seid[1] = 0;
1049 cmd->seid[2] = 0;
1050
1051 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1052 if (buf_size > I40E_AQ_LARGE_BUF)
1053 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1054
1055 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
1056 cmd_details);
1057
1058 return status;
1059}
1060
1061/**
1062 * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
1063 * @hw: pointer to the hw struct
1064 * @seid: VSI for the vlan filters
1065 * @v_list: list of vlan filters to be added
1066 * @count: length of the list
1067 * @cmd_details: pointer to command details structure or NULL
1068 **/
1069i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
1070 struct i40e_aqc_add_remove_vlan_element_data *v_list,
1071 u8 count, struct i40e_asq_cmd_details *cmd_details)
1072{
1073 struct i40e_aq_desc desc;
1074 struct i40e_aqc_macvlan *cmd =
1075 (struct i40e_aqc_macvlan *)&desc.params.raw;
1076 i40e_status status;
1077 u16 buf_size;
1078
1079 if (count == 0 || !v_list || !hw)
1080 return I40E_ERR_PARAM;
1081
1082 buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
1083
1084 /* prep the rest of the request */
1085 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
1086 cmd->num_addresses = cpu_to_le16(count);
1087 cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
1088 cmd->seid[1] = 0;
1089 cmd->seid[2] = 0;
1090
1091 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1092 if (buf_size > I40E_AQ_LARGE_BUF)
1093 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1094
1095 status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
1096 cmd_details);
1097
1098 return status;
1099}
1100
1101/**
1102 * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
1103 * @hw: pointer to the hw struct
1104 * @seid: VSI for the vlan filters
1105 * @v_list: list of macvlans to be removed
1106 * @count: length of the list
1107 * @cmd_details: pointer to command details structure or NULL
1108 **/
1109i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
1110 struct i40e_aqc_add_remove_vlan_element_data *v_list,
1111 u8 count, struct i40e_asq_cmd_details *cmd_details)
1112{
1113 struct i40e_aq_desc desc;
1114 struct i40e_aqc_macvlan *cmd =
1115 (struct i40e_aqc_macvlan *)&desc.params.raw;
1116 i40e_status status;
1117 u16 buf_size;
1118
1119 if (count == 0 || !v_list || !hw)
1120 return I40E_ERR_PARAM;
1121
1122 buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
1123
1124 /* prep the rest of the request */
1125 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
1126 cmd->num_addresses = cpu_to_le16(count);
1127 cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
1128 cmd->seid[1] = 0;
1129 cmd->seid[2] = 0;
1130
1131 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1132 if (buf_size > I40E_AQ_LARGE_BUF)
1133 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1134
1135 status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
1136 cmd_details);
1137
1138 return status;
1139}
1140
1141/**
1142 * i40e_aq_send_msg_to_vf
1143 * @hw: pointer to the hardware structure
1144 * @vfid: vf id to send msg
1145 * @msg: pointer to the msg buffer
1146 * @msglen: msg length
1147 * @cmd_details: pointer to command details
1148 *
1149 * send msg to vf
1150 **/
1151i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
1152 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
1153 struct i40e_asq_cmd_details *cmd_details)
1154{
1155 struct i40e_aq_desc desc;
1156 struct i40e_aqc_pf_vf_message *cmd =
1157 (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
1158 i40e_status status;
1159
1160 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
1161 cmd->id = cpu_to_le32(vfid);
1162 desc.cookie_high = cpu_to_le32(v_opcode);
1163 desc.cookie_low = cpu_to_le32(v_retval);
1164 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
1165 if (msglen) {
1166 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
1167 I40E_AQ_FLAG_RD));
1168 if (msglen > I40E_AQ_LARGE_BUF)
1169 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1170 desc.datalen = cpu_to_le16(msglen);
1171 }
1172 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
1173
1174 return status;
1175}
1176
1177/**
1178 * i40e_aq_set_hmc_resource_profile
1179 * @hw: pointer to the hw struct
1180 * @profile: type of profile the HMC is to be set as
1181 * @pe_vf_enabled_count: the number of PE enabled VFs the system has
1182 * @cmd_details: pointer to command details structure or NULL
1183 *
1184 * set the HMC profile of the device.
1185 **/
1186i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
1187 enum i40e_aq_hmc_profile profile,
1188 u8 pe_vf_enabled_count,
1189 struct i40e_asq_cmd_details *cmd_details)
1190{
1191 struct i40e_aq_desc desc;
1192 struct i40e_aq_get_set_hmc_resource_profile *cmd =
1193 (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
1194 i40e_status status;
1195
1196 i40e_fill_default_direct_cmd_desc(&desc,
1197 i40e_aqc_opc_set_hmc_resource_profile);
1198
1199 cmd->pm_profile = (u8)profile;
1200 cmd->pe_vf_enabled = pe_vf_enabled_count;
1201
1202 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1203
1204 return status;
1205}
1206
1207/**
1208 * i40e_aq_request_resource
1209 * @hw: pointer to the hw struct
1210 * @resource: resource id
1211 * @access: access type
1212 * @sdp_number: resource number
1213 * @timeout: the maximum time in ms that the driver may hold the resource
1214 * @cmd_details: pointer to command details structure or NULL
1215 *
1216 * requests common resource using the admin queue commands
1217 **/
1218i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
1219 enum i40e_aq_resources_ids resource,
1220 enum i40e_aq_resource_access_type access,
1221 u8 sdp_number, u64 *timeout,
1222 struct i40e_asq_cmd_details *cmd_details)
1223{
1224 struct i40e_aq_desc desc;
1225 struct i40e_aqc_request_resource *cmd_resp =
1226 (struct i40e_aqc_request_resource *)&desc.params.raw;
1227 i40e_status status;
1228
1229 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
1230
1231 cmd_resp->resource_id = cpu_to_le16(resource);
1232 cmd_resp->access_type = cpu_to_le16(access);
1233 cmd_resp->resource_number = cpu_to_le32(sdp_number);
1234
1235 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1236 /* The completion specifies the maximum time in ms that the driver
1237 * may hold the resource in the Timeout field.
1238 * If the resource is held by someone else, the command completes with
1239 * busy return value and the timeout field indicates the maximum time
1240 * the current owner of the resource has to free it.
1241 */
1242 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
1243 *timeout = le32_to_cpu(cmd_resp->timeout);
1244
1245 return status;
1246}
1247
1248/**
1249 * i40e_aq_release_resource
1250 * @hw: pointer to the hw struct
1251 * @resource: resource id
1252 * @sdp_number: resource number
1253 * @cmd_details: pointer to command details structure or NULL
1254 *
1255 * release common resource using the admin queue commands
1256 **/
1257i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
1258 enum i40e_aq_resources_ids resource,
1259 u8 sdp_number,
1260 struct i40e_asq_cmd_details *cmd_details)
1261{
1262 struct i40e_aq_desc desc;
1263 struct i40e_aqc_request_resource *cmd =
1264 (struct i40e_aqc_request_resource *)&desc.params.raw;
1265 i40e_status status;
1266
1267 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
1268
1269 cmd->resource_id = cpu_to_le16(resource);
1270 cmd->resource_number = cpu_to_le32(sdp_number);
1271
1272 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1273
1274 return status;
1275}
1276
1277/**
1278 * i40e_aq_read_nvm
1279 * @hw: pointer to the hw struct
1280 * @module_pointer: module pointer location in words from the NVM beginning
1281 * @offset: byte offset from the module beginning
1282 * @length: length of the section to be read (in bytes from the offset)
1283 * @data: command buffer (size [bytes] = length)
1284 * @last_command: tells if this is the last command in a series
1285 * @cmd_details: pointer to command details structure or NULL
1286 *
1287 * Read the NVM using the admin queue commands
1288 **/
1289i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
1290 u32 offset, u16 length, void *data,
1291 bool last_command,
1292 struct i40e_asq_cmd_details *cmd_details)
1293{
1294 struct i40e_aq_desc desc;
1295 struct i40e_aqc_nvm_update *cmd =
1296 (struct i40e_aqc_nvm_update *)&desc.params.raw;
1297 i40e_status status;
1298
1299 /* In offset the highest byte must be zeroed. */
1300 if (offset & 0xFF000000) {
1301 status = I40E_ERR_PARAM;
1302 goto i40e_aq_read_nvm_exit;
1303 }
1304
1305 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
1306
1307 /* If this is the last command in a series, set the proper flag. */
1308 if (last_command)
1309 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
1310 cmd->module_pointer = module_pointer;
1311 cmd->offset = cpu_to_le32(offset);
1312 cmd->length = cpu_to_le16(length);
1313
1314 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1315 if (length > I40E_AQ_LARGE_BUF)
1316 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1317
1318 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
1319
1320i40e_aq_read_nvm_exit:
1321 return status;
1322}
1323
1324#define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01
1325#define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02
1326#define I40E_DEV_FUNC_CAP_NPAR 0x03
1327#define I40E_DEV_FUNC_CAP_OS2BMC 0x04
1328#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05
1329#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12
1330#define I40E_DEV_FUNC_CAP_VF 0x13
1331#define I40E_DEV_FUNC_CAP_VMDQ 0x14
1332#define I40E_DEV_FUNC_CAP_802_1_QBG 0x15
1333#define I40E_DEV_FUNC_CAP_802_1_QBH 0x16
1334#define I40E_DEV_FUNC_CAP_VSI 0x17
1335#define I40E_DEV_FUNC_CAP_DCB 0x18
1336#define I40E_DEV_FUNC_CAP_FCOE 0x21
1337#define I40E_DEV_FUNC_CAP_RSS 0x40
1338#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41
1339#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42
1340#define I40E_DEV_FUNC_CAP_MSIX 0x43
1341#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
1342#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
1343#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
1344#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1
1345#define I40E_DEV_FUNC_CAP_CEM 0xF2
1346#define I40E_DEV_FUNC_CAP_IWARP 0x51
1347#define I40E_DEV_FUNC_CAP_LED 0x61
1348#define I40E_DEV_FUNC_CAP_SDP 0x62
1349#define I40E_DEV_FUNC_CAP_MDIO 0x63
1350
1351/**
1352 * i40e_parse_discover_capabilities
1353 * @hw: pointer to the hw struct
1354 * @buff: pointer to a buffer containing device/function capability records
1355 * @cap_count: number of capability records in the list
1356 * @list_type_opc: type of capabilities list to parse
1357 *
1358 * Parse the device/function capabilities list.
1359 **/
1360static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
1361 u32 cap_count,
1362 enum i40e_admin_queue_opc list_type_opc)
1363{
1364 struct i40e_aqc_list_capabilities_element_resp *cap;
1365 u32 number, logical_id, phys_id;
1366 struct i40e_hw_capabilities *p;
1367 u32 reg_val;
1368 u32 i = 0;
1369 u16 id;
1370
1371 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
1372
1373 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
1374 p = (struct i40e_hw_capabilities *)&hw->dev_caps;
1375 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
1376 p = (struct i40e_hw_capabilities *)&hw->func_caps;
1377 else
1378 return;
1379
1380 for (i = 0; i < cap_count; i++, cap++) {
1381 id = le16_to_cpu(cap->id);
1382 number = le32_to_cpu(cap->number);
1383 logical_id = le32_to_cpu(cap->logical_id);
1384 phys_id = le32_to_cpu(cap->phys_id);
1385
1386 switch (id) {
1387 case I40E_DEV_FUNC_CAP_SWITCH_MODE:
1388 p->switch_mode = number;
1389 break;
1390 case I40E_DEV_FUNC_CAP_MGMT_MODE:
1391 p->management_mode = number;
1392 break;
1393 case I40E_DEV_FUNC_CAP_NPAR:
1394 p->npar_enable = number;
1395 break;
1396 case I40E_DEV_FUNC_CAP_OS2BMC:
1397 p->os2bmc = number;
1398 break;
1399 case I40E_DEV_FUNC_CAP_VALID_FUNC:
1400 p->valid_functions = number;
1401 break;
1402 case I40E_DEV_FUNC_CAP_SRIOV_1_1:
1403 if (number == 1)
1404 p->sr_iov_1_1 = true;
1405 break;
1406 case I40E_DEV_FUNC_CAP_VF:
1407 p->num_vfs = number;
1408 p->vf_base_id = logical_id;
1409 break;
1410 case I40E_DEV_FUNC_CAP_VMDQ:
1411 if (number == 1)
1412 p->vmdq = true;
1413 break;
1414 case I40E_DEV_FUNC_CAP_802_1_QBG:
1415 if (number == 1)
1416 p->evb_802_1_qbg = true;
1417 break;
1418 case I40E_DEV_FUNC_CAP_802_1_QBH:
1419 if (number == 1)
1420 p->evb_802_1_qbh = true;
1421 break;
1422 case I40E_DEV_FUNC_CAP_VSI:
1423 p->num_vsis = number;
1424 break;
1425 case I40E_DEV_FUNC_CAP_DCB:
1426 if (number == 1) {
1427 p->dcb = true;
1428 p->enabled_tcmap = logical_id;
1429 p->maxtc = phys_id;
1430 }
1431 break;
1432 case I40E_DEV_FUNC_CAP_FCOE:
1433 if (number == 1)
1434 p->fcoe = true;
1435 break;
1436 case I40E_DEV_FUNC_CAP_RSS:
1437 p->rss = true;
1438 reg_val = rd32(hw, I40E_PFQF_CTL_0);
1439 if (reg_val & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK)
1440 p->rss_table_size = number;
1441 else
1442 p->rss_table_size = 128;
1443 p->rss_table_entry_width = logical_id;
1444 break;
1445 case I40E_DEV_FUNC_CAP_RX_QUEUES:
1446 p->num_rx_qp = number;
1447 p->base_queue = phys_id;
1448 break;
1449 case I40E_DEV_FUNC_CAP_TX_QUEUES:
1450 p->num_tx_qp = number;
1451 p->base_queue = phys_id;
1452 break;
1453 case I40E_DEV_FUNC_CAP_MSIX:
1454 p->num_msix_vectors = number;
1455 break;
1456 case I40E_DEV_FUNC_CAP_MSIX_VF:
1457 p->num_msix_vectors_vf = number;
1458 break;
1459 case I40E_DEV_FUNC_CAP_MFP_MODE_1:
1460 if (number == 1)
1461 p->mfp_mode_1 = true;
1462 break;
1463 case I40E_DEV_FUNC_CAP_CEM:
1464 if (number == 1)
1465 p->mgmt_cem = true;
1466 break;
1467 case I40E_DEV_FUNC_CAP_IWARP:
1468 if (number == 1)
1469 p->iwarp = true;
1470 break;
1471 case I40E_DEV_FUNC_CAP_LED:
1472 if (phys_id < I40E_HW_CAP_MAX_GPIO)
1473 p->led[phys_id] = true;
1474 break;
1475 case I40E_DEV_FUNC_CAP_SDP:
1476 if (phys_id < I40E_HW_CAP_MAX_GPIO)
1477 p->sdp[phys_id] = true;
1478 break;
1479 case I40E_DEV_FUNC_CAP_MDIO:
1480 if (number == 1) {
1481 p->mdio_port_num = phys_id;
1482 p->mdio_port_mode = logical_id;
1483 }
1484 break;
1485 case I40E_DEV_FUNC_CAP_IEEE_1588:
1486 if (number == 1)
1487 p->ieee_1588 = true;
1488 break;
1489 case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
1490 p->fd = true;
1491 p->fd_filters_guaranteed = number;
1492 p->fd_filters_best_effort = logical_id;
1493 break;
1494 default:
1495 break;
1496 }
1497 }
1498
1499 /* additional HW specific goodies that might
1500 * someday be HW version specific
1501 */
1502 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
1503}
1504
1505/**
1506 * i40e_aq_discover_capabilities
1507 * @hw: pointer to the hw struct
1508 * @buff: a virtual buffer to hold the capabilities
1509 * @buff_size: Size of the virtual buffer
1510 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
1511 * @list_type_opc: capabilities type to discover - pass in the command opcode
1512 * @cmd_details: pointer to command details structure or NULL
1513 *
1514 * Get the device capabilities descriptions from the firmware
1515 **/
1516i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
1517 void *buff, u16 buff_size, u16 *data_size,
1518 enum i40e_admin_queue_opc list_type_opc,
1519 struct i40e_asq_cmd_details *cmd_details)
1520{
1521 struct i40e_aqc_list_capabilites *cmd;
1522 i40e_status status = 0;
1523 struct i40e_aq_desc desc;
1524
1525 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
1526
1527 if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
1528 list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
1529 status = I40E_ERR_PARAM;
1530 goto exit;
1531 }
1532
1533 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
1534
1535 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1536 if (buff_size > I40E_AQ_LARGE_BUF)
1537 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1538
1539 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
1540 *data_size = le16_to_cpu(desc.datalen);
1541
1542 if (status)
1543 goto exit;
1544
1545 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
1546 list_type_opc);
1547
1548exit:
1549 return status;
1550}
1551
1552/**
1553 * i40e_aq_get_lldp_mib
1554 * @hw: pointer to the hw struct
1555 * @bridge_type: type of bridge requested
1556 * @mib_type: Local, Remote or both Local and Remote MIBs
1557 * @buff: pointer to a user supplied buffer to store the MIB block
1558 * @buff_size: size of the buffer (in bytes)
1559 * @local_len : length of the returned Local LLDP MIB
1560 * @remote_len: length of the returned Remote LLDP MIB
1561 * @cmd_details: pointer to command details structure or NULL
1562 *
1563 * Requests the complete LLDP MIB (entire packet).
1564 **/
1565i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
1566 u8 mib_type, void *buff, u16 buff_size,
1567 u16 *local_len, u16 *remote_len,
1568 struct i40e_asq_cmd_details *cmd_details)
1569{
1570 struct i40e_aq_desc desc;
1571 struct i40e_aqc_lldp_get_mib *cmd =
1572 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
1573 struct i40e_aqc_lldp_get_mib *resp =
1574 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
1575 i40e_status status;
1576
1577 if (buff_size == 0 || !buff)
1578 return I40E_ERR_PARAM;
1579
1580 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
1581 /* Indirect Command */
1582 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1583
1584 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
1585 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
1586 I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
1587
1588 desc.datalen = cpu_to_le16(buff_size);
1589
1590 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1591 if (buff_size > I40E_AQ_LARGE_BUF)
1592 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1593
1594 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
1595 if (!status) {
1596 if (local_len != NULL)
1597 *local_len = le16_to_cpu(resp->local_len);
1598 if (remote_len != NULL)
1599 *remote_len = le16_to_cpu(resp->remote_len);
1600 }
1601
1602 return status;
1603}
1604
1605/**
1606 * i40e_aq_cfg_lldp_mib_change_event
1607 * @hw: pointer to the hw struct
1608 * @enable_update: Enable or Disable event posting
1609 * @cmd_details: pointer to command details structure or NULL
1610 *
1611 * Enable or Disable posting of an event on ARQ when LLDP MIB
1612 * associated with the interface changes
1613 **/
1614i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
1615 bool enable_update,
1616 struct i40e_asq_cmd_details *cmd_details)
1617{
1618 struct i40e_aq_desc desc;
1619 struct i40e_aqc_lldp_update_mib *cmd =
1620 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
1621 i40e_status status;
1622
1623 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
1624
1625 if (!enable_update)
1626 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
1627
1628 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1629
1630 return status;
1631}
1632
1633/**
1634 * i40e_aq_stop_lldp
1635 * @hw: pointer to the hw struct
1636 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
1637 * @cmd_details: pointer to command details structure or NULL
1638 *
1639 * Stop or Shutdown the embedded LLDP Agent
1640 **/
1641i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
1642 struct i40e_asq_cmd_details *cmd_details)
1643{
1644 struct i40e_aq_desc desc;
1645 struct i40e_aqc_lldp_stop *cmd =
1646 (struct i40e_aqc_lldp_stop *)&desc.params.raw;
1647 i40e_status status;
1648
1649 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
1650
1651 if (shutdown_agent)
1652 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
1653
1654 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1655
1656 return status;
1657}
1658
1659/**
1660 * i40e_aq_start_lldp
1661 * @hw: pointer to the hw struct
1662 * @cmd_details: pointer to command details structure or NULL
1663 *
1664 * Start the embedded LLDP Agent on all ports.
1665 **/
1666i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
1667 struct i40e_asq_cmd_details *cmd_details)
1668{
1669 struct i40e_aq_desc desc;
1670 struct i40e_aqc_lldp_start *cmd =
1671 (struct i40e_aqc_lldp_start *)&desc.params.raw;
1672 i40e_status status;
1673
1674 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
1675
1676 cmd->command = I40E_AQ_LLDP_AGENT_START;
1677
1678 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1679
1680 return status;
1681}
1682
1683/**
1684 * i40e_aq_delete_element - Delete switch element
1685 * @hw: pointer to the hw struct
1686 * @seid: the SEID to delete from the switch
1687 * @cmd_details: pointer to command details structure or NULL
1688 *
1689 * This deletes a switch element from the switch.
1690 **/
1691i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
1692 struct i40e_asq_cmd_details *cmd_details)
1693{
1694 struct i40e_aq_desc desc;
1695 struct i40e_aqc_switch_seid *cmd =
1696 (struct i40e_aqc_switch_seid *)&desc.params.raw;
1697 i40e_status status;
1698
1699 if (seid == 0)
1700 return I40E_ERR_PARAM;
1701
1702 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
1703
1704 cmd->seid = cpu_to_le16(seid);
1705
1706 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1707
1708 return status;
1709}
1710
1711/**
1712 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
1713 * @hw: pointer to the hw struct
1714 * @seid: seid for the physical port/switching component/vsi
1715 * @buff: Indirect buffer to hold data parameters and response
1716 * @buff_size: Indirect buffer size
1717 * @opcode: Tx scheduler AQ command opcode
1718 * @cmd_details: pointer to command details structure or NULL
1719 *
1720 * Generic command handler for Tx scheduler AQ commands
1721 **/
1722static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
1723 void *buff, u16 buff_size,
1724 enum i40e_admin_queue_opc opcode,
1725 struct i40e_asq_cmd_details *cmd_details)
1726{
1727 struct i40e_aq_desc desc;
1728 struct i40e_aqc_tx_sched_ind *cmd =
1729 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
1730 i40e_status status;
1731 bool cmd_param_flag = false;
1732
1733 switch (opcode) {
1734 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
1735 case i40e_aqc_opc_configure_vsi_tc_bw:
1736 case i40e_aqc_opc_enable_switching_comp_ets:
1737 case i40e_aqc_opc_modify_switching_comp_ets:
1738 case i40e_aqc_opc_disable_switching_comp_ets:
1739 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
1740 case i40e_aqc_opc_configure_switching_comp_bw_config:
1741 cmd_param_flag = true;
1742 break;
1743 case i40e_aqc_opc_query_vsi_bw_config:
1744 case i40e_aqc_opc_query_vsi_ets_sla_config:
1745 case i40e_aqc_opc_query_switching_comp_ets_config:
1746 case i40e_aqc_opc_query_port_ets_config:
1747 case i40e_aqc_opc_query_switching_comp_bw_config:
1748 cmd_param_flag = false;
1749 break;
1750 default:
1751 return I40E_ERR_PARAM;
1752 }
1753
1754 i40e_fill_default_direct_cmd_desc(&desc, opcode);
1755
1756 /* Indirect command */
1757 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1758 if (cmd_param_flag)
1759 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
1760 if (buff_size > I40E_AQ_LARGE_BUF)
1761 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1762
1763 desc.datalen = cpu_to_le16(buff_size);
1764
1765 cmd->vsi_seid = cpu_to_le16(seid);
1766
1767 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
1768
1769 return status;
1770}
1771
1772/**
1773 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
1774 * @hw: pointer to the hw struct
1775 * @seid: VSI seid
1776 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
1777 * @cmd_details: pointer to command details structure or NULL
1778 **/
1779i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
1780 u16 seid,
1781 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
1782 struct i40e_asq_cmd_details *cmd_details)
1783{
1784 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
1785 i40e_aqc_opc_configure_vsi_tc_bw,
1786 cmd_details);
1787}
1788
1789/**
1790 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
1791 * @hw: pointer to the hw struct
1792 * @seid: seid of the VSI
1793 * @bw_data: Buffer to hold VSI BW configuration
1794 * @cmd_details: pointer to command details structure or NULL
1795 **/
1796i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
1797 u16 seid,
1798 struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
1799 struct i40e_asq_cmd_details *cmd_details)
1800{
1801 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
1802 i40e_aqc_opc_query_vsi_bw_config,
1803 cmd_details);
1804}
1805
1806/**
1807 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
1808 * @hw: pointer to the hw struct
1809 * @seid: seid of the VSI
1810 * @bw_data: Buffer to hold VSI BW configuration per TC
1811 * @cmd_details: pointer to command details structure or NULL
1812 **/
1813i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
1814 u16 seid,
1815 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
1816 struct i40e_asq_cmd_details *cmd_details)
1817{
1818 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
1819 i40e_aqc_opc_query_vsi_ets_sla_config,
1820 cmd_details);
1821}
1822
1823/**
1824 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
1825 * @hw: pointer to the hw struct
1826 * @seid: seid of the switching component
1827 * @bw_data: Buffer to hold switching component's per TC BW config
1828 * @cmd_details: pointer to command details structure or NULL
1829 **/
1830i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
1831 u16 seid,
1832 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
1833 struct i40e_asq_cmd_details *cmd_details)
1834{
1835 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
1836 i40e_aqc_opc_query_switching_comp_ets_config,
1837 cmd_details);
1838}
1839
1840/**
1841 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
1842 * @hw: pointer to the hw struct
1843 * @seid: seid of the VSI or switching component connected to Physical Port
1844 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
1845 * @cmd_details: pointer to command details structure or NULL
1846 **/
1847i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
1848 u16 seid,
1849 struct i40e_aqc_query_port_ets_config_resp *bw_data,
1850 struct i40e_asq_cmd_details *cmd_details)
1851{
1852 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
1853 i40e_aqc_opc_query_port_ets_config,
1854 cmd_details);
1855}
1856
1857/**
1858 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
1859 * @hw: pointer to the hw struct
1860 * @seid: seid of the switching component
1861 * @bw_data: Buffer to hold switching component's BW configuration
1862 * @cmd_details: pointer to command details structure or NULL
1863 **/
1864i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
1865 u16 seid,
1866 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
1867 struct i40e_asq_cmd_details *cmd_details)
1868{
1869 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
1870 i40e_aqc_opc_query_switching_comp_bw_config,
1871 cmd_details);
1872}
1873
1874/**
1875 * i40e_validate_filter_settings
1876 * @hw: pointer to the hardware structure
1877 * @settings: Filter control settings
1878 *
1879 * Check and validate the filter control settings passed.
1880 * The function checks for the valid filter/context sizes being
1881 * passed for FCoE and PE.
1882 *
1883 * Returns 0 if the values passed are valid and within
1884 * range else returns an error.
1885 **/
1886static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
1887 struct i40e_filter_control_settings *settings)
1888{
1889 u32 fcoe_cntx_size, fcoe_filt_size;
1890 u32 pe_cntx_size, pe_filt_size;
1891 u32 fcoe_fmax, pe_fmax;
1892 u32 val;
1893
1894 /* Validate FCoE settings passed */
1895 switch (settings->fcoe_filt_num) {
1896 case I40E_HASH_FILTER_SIZE_1K:
1897 case I40E_HASH_FILTER_SIZE_2K:
1898 case I40E_HASH_FILTER_SIZE_4K:
1899 case I40E_HASH_FILTER_SIZE_8K:
1900 case I40E_HASH_FILTER_SIZE_16K:
1901 case I40E_HASH_FILTER_SIZE_32K:
1902 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
1903 fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
1904 break;
1905 default:
1906 return I40E_ERR_PARAM;
1907 }
1908
1909 switch (settings->fcoe_cntx_num) {
1910 case I40E_DMA_CNTX_SIZE_512:
1911 case I40E_DMA_CNTX_SIZE_1K:
1912 case I40E_DMA_CNTX_SIZE_2K:
1913 case I40E_DMA_CNTX_SIZE_4K:
1914 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
1915 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
1916 break;
1917 default:
1918 return I40E_ERR_PARAM;
1919 }
1920
1921 /* Validate PE settings passed */
1922 switch (settings->pe_filt_num) {
1923 case I40E_HASH_FILTER_SIZE_1K:
1924 case I40E_HASH_FILTER_SIZE_2K:
1925 case I40E_HASH_FILTER_SIZE_4K:
1926 case I40E_HASH_FILTER_SIZE_8K:
1927 case I40E_HASH_FILTER_SIZE_16K:
1928 case I40E_HASH_FILTER_SIZE_32K:
1929 case I40E_HASH_FILTER_SIZE_64K:
1930 case I40E_HASH_FILTER_SIZE_128K:
1931 case I40E_HASH_FILTER_SIZE_256K:
1932 case I40E_HASH_FILTER_SIZE_512K:
1933 case I40E_HASH_FILTER_SIZE_1M:
1934 pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
1935 pe_filt_size <<= (u32)settings->pe_filt_num;
1936 break;
1937 default:
1938 return I40E_ERR_PARAM;
1939 }
1940
1941 switch (settings->pe_cntx_num) {
1942 case I40E_DMA_CNTX_SIZE_512:
1943 case I40E_DMA_CNTX_SIZE_1K:
1944 case I40E_DMA_CNTX_SIZE_2K:
1945 case I40E_DMA_CNTX_SIZE_4K:
1946 case I40E_DMA_CNTX_SIZE_8K:
1947 case I40E_DMA_CNTX_SIZE_16K:
1948 case I40E_DMA_CNTX_SIZE_32K:
1949 case I40E_DMA_CNTX_SIZE_64K:
1950 case I40E_DMA_CNTX_SIZE_128K:
1951 case I40E_DMA_CNTX_SIZE_256K:
1952 pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
1953 pe_cntx_size <<= (u32)settings->pe_cntx_num;
1954 break;
1955 default:
1956 return I40E_ERR_PARAM;
1957 }
1958
1959 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
1960 val = rd32(hw, I40E_GLHMC_FCOEFMAX);
1961 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
1962 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
1963 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
1964 return I40E_ERR_INVALID_SIZE;
1965
1966 /* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
1967 val = rd32(hw, I40E_GLHMC_PEXFMAX);
1968 pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
1969 >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
1970 if (pe_filt_size + pe_cntx_size > pe_fmax)
1971 return I40E_ERR_INVALID_SIZE;
1972
1973 return 0;
1974}
1975
1976/**
1977 * i40e_set_filter_control
1978 * @hw: pointer to the hardware structure
1979 * @settings: Filter control settings
1980 *
1981 * Set the Queue Filters for PE/FCoE and enable filters required
1982 * for a single PF. It is expected that these settings are programmed
1983 * at the driver initialization time.
1984 **/
1985i40e_status i40e_set_filter_control(struct i40e_hw *hw,
1986 struct i40e_filter_control_settings *settings)
1987{
1988 i40e_status ret = 0;
1989 u32 hash_lut_size = 0;
1990 u32 val;
1991
1992 if (!settings)
1993 return I40E_ERR_PARAM;
1994
1995 /* Validate the input settings */
1996 ret = i40e_validate_filter_settings(hw, settings);
1997 if (ret)
1998 return ret;
1999
2000 /* Read the PF Queue Filter control register */
2001 val = rd32(hw, I40E_PFQF_CTL_0);
2002
2003 /* Program required PE hash buckets for the PF */
2004 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
2005 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
2006 I40E_PFQF_CTL_0_PEHSIZE_MASK;
2007 /* Program required PE contexts for the PF */
2008 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
2009 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
2010 I40E_PFQF_CTL_0_PEDSIZE_MASK;
2011
2012 /* Program required FCoE hash buckets for the PF */
2013 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
2014 val |= ((u32)settings->fcoe_filt_num <<
2015 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
2016 I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
2017 /* Program required FCoE DDP contexts for the PF */
2018 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
2019 val |= ((u32)settings->fcoe_cntx_num <<
2020 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
2021 I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
2022
2023 /* Program Hash LUT size for the PF */
2024 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
2025 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
2026 hash_lut_size = 1;
2027 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
2028 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
2029
2030 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
2031 if (settings->enable_fdir)
2032 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
2033 if (settings->enable_ethtype)
2034 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
2035 if (settings->enable_macvlan)
2036 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
2037
2038 wr32(hw, I40E_PFQF_CTL_0, val);
2039
2040 return 0;
2041}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
new file mode 100644
index 000000000000..8dbd91f64b74
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -0,0 +1,2076 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifdef CONFIG_DEBUG_FS
29
30#include <linux/fs.h>
31#include <linux/debugfs.h>
32
33#include "i40e.h"
34
35static struct dentry *i40e_dbg_root;
36
37/**
38 * i40e_dbg_find_vsi - searches for the vsi with the given seid
39 * @pf - the pf structure to search for the vsi
40 * @seid - seid of the vsi it is searching for
41 **/
42static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
43{
44 int i;
45
46 if (seid < 0)
47 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
48 else
49 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
50 if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
51 return pf->vsi[i];
52
53 return NULL;
54}
55
56/**
57 * i40e_dbg_find_veb - searches for the veb with the given seid
58 * @pf - the pf structure to search for the veb
59 * @seid - seid of the veb it is searching for
60 **/
61static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
62{
63 int i;
64
65 if ((seid < I40E_BASE_VEB_SEID) ||
66 (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB)))
67 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
68 else
69 for (i = 0; i < I40E_MAX_VEB; i++)
70 if (pf->veb[i] && pf->veb[i]->seid == seid)
71 return pf->veb[i];
72 return NULL;
73}
74
75/**************************************************************
76 * dump
77 * The dump entry in debugfs is for getting a data snapshow of
78 * the driver's current configuration and runtime details.
79 * When the filesystem entry is written, a snapshot is taken.
80 * When the entry is read, the most recent snapshot data is dumped.
81 **************************************************************/
82static char *i40e_dbg_dump_buf;
83static ssize_t i40e_dbg_dump_data_len;
84static ssize_t i40e_dbg_dump_buffer_len;
85
86/**
87 * i40e_dbg_dump_read - read the dump data
88 * @filp: the opened file
89 * @buffer: where to write the data for the user to read
90 * @count: the size of the user's buffer
91 * @ppos: file position offset
92 **/
93static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer,
94 size_t count, loff_t *ppos)
95{
96 int bytes_not_copied;
97 int len;
98
99 /* is *ppos bigger than the available data? */
100 if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf)
101 return 0;
102
103 /* be sure to not read beyond the end of available data */
104 len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos));
105
106 bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len);
107 if (bytes_not_copied < 0)
108 return bytes_not_copied;
109
110 *ppos += len;
111 return len;
112}
113
114/**
115 * i40e_dbg_prep_dump_buf
116 * @pf: the pf we're working with
117 * @buflen: the desired buffer length
118 *
119 * Return positive if success, 0 if failed
120 **/
121static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen)
122{
123 /* if not already big enough, prep for re alloc */
124 if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) {
125 kfree(i40e_dbg_dump_buf);
126 i40e_dbg_dump_buffer_len = 0;
127 i40e_dbg_dump_buf = NULL;
128 }
129
130 /* get a new buffer if needed */
131 if (!i40e_dbg_dump_buf) {
132 i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL);
133 if (i40e_dbg_dump_buf != NULL)
134 i40e_dbg_dump_buffer_len = buflen;
135 }
136
137 return i40e_dbg_dump_buffer_len;
138}
139
140/**
141 * i40e_dbg_dump_write - trigger a datadump snapshot
142 * @filp: the opened file
143 * @buffer: where to find the user's data
144 * @count: the length of the user's data
145 * @ppos: file position offset
146 *
147 * Any write clears the stats
148 **/
149static ssize_t i40e_dbg_dump_write(struct file *filp,
150 const char __user *buffer,
151 size_t count, loff_t *ppos)
152{
153 struct i40e_pf *pf = filp->private_data;
154 char dump_request_buf[16];
155 bool seid_found = false;
156 int bytes_not_copied;
157 long seid = -1;
158 int buflen = 0;
159 int i, ret;
160 int len;
161 u8 *p;
162
163 /* don't allow partial writes */
164 if (*ppos != 0)
165 return 0;
166 if (count >= sizeof(dump_request_buf))
167 return -ENOSPC;
168
169 bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
170 if (bytes_not_copied < 0)
171 return bytes_not_copied;
172 if (bytes_not_copied > 0)
173 count -= bytes_not_copied;
174 dump_request_buf[count] = '\0';
175
176 /* decode the SEID given to be dumped */
177 ret = kstrtol(dump_request_buf, 0, &seid);
178 if (ret < 0) {
179 dev_info(&pf->pdev->dev, "bad seid value '%s'\n",
180 dump_request_buf);
181 } else if (seid == 0) {
182 seid_found = true;
183
184 kfree(i40e_dbg_dump_buf);
185 i40e_dbg_dump_buffer_len = 0;
186 i40e_dbg_dump_data_len = 0;
187 i40e_dbg_dump_buf = NULL;
188 dev_info(&pf->pdev->dev, "debug buffer freed\n");
189
190 } else if (seid == pf->pf_seid || seid == 1) {
191 seid_found = true;
192
193 buflen = sizeof(struct i40e_pf);
194 buflen += (sizeof(struct i40e_aq_desc)
195 * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries));
196
197 if (i40e_dbg_prep_dump_buf(pf, buflen)) {
198 p = i40e_dbg_dump_buf;
199
200 len = sizeof(struct i40e_pf);
201 memcpy(p, pf, len);
202 p += len;
203
204 len = (sizeof(struct i40e_aq_desc)
205 * pf->hw.aq.num_asq_entries);
206 memcpy(p, pf->hw.aq.asq.desc, len);
207 p += len;
208
209 len = (sizeof(struct i40e_aq_desc)
210 * pf->hw.aq.num_arq_entries);
211 memcpy(p, pf->hw.aq.arq.desc, len);
212 p += len;
213
214 i40e_dbg_dump_data_len = buflen;
215 dev_info(&pf->pdev->dev,
216 "PF seid %ld dumped %d bytes\n",
217 seid, (int)i40e_dbg_dump_data_len);
218 }
219 } else if (seid >= I40E_BASE_VSI_SEID) {
220 struct i40e_vsi *vsi = NULL;
221 struct i40e_mac_filter *f;
222 int filter_count = 0;
223
224 mutex_lock(&pf->switch_mutex);
225 vsi = i40e_dbg_find_vsi(pf, seid);
226 if (!vsi) {
227 mutex_unlock(&pf->switch_mutex);
228 goto write_exit;
229 }
230
231 buflen = sizeof(struct i40e_vsi);
232 buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors;
233 buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs;
234 buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs;
235 buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs;
236 list_for_each_entry(f, &vsi->mac_filter_list, list)
237 filter_count++;
238 buflen += sizeof(struct i40e_mac_filter) * filter_count;
239
240 if (i40e_dbg_prep_dump_buf(pf, buflen)) {
241 p = i40e_dbg_dump_buf;
242 seid_found = true;
243
244 len = sizeof(struct i40e_vsi);
245 memcpy(p, vsi, len);
246 p += len;
247
248 len = (sizeof(struct i40e_q_vector)
249 * vsi->num_q_vectors);
250 memcpy(p, vsi->q_vectors, len);
251 p += len;
252
253 len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs);
254 memcpy(p, vsi->tx_rings, len);
255 p += len;
256 memcpy(p, vsi->rx_rings, len);
257 p += len;
258
259 for (i = 0; i < vsi->num_queue_pairs; i++) {
260 len = sizeof(struct i40e_tx_buffer);
261 memcpy(p, vsi->tx_rings[i].tx_bi, len);
262 p += len;
263 }
264 for (i = 0; i < vsi->num_queue_pairs; i++) {
265 len = sizeof(struct i40e_rx_buffer);
266 memcpy(p, vsi->rx_rings[i].rx_bi, len);
267 p += len;
268 }
269
270 /* macvlan filter list */
271 len = sizeof(struct i40e_mac_filter);
272 list_for_each_entry(f, &vsi->mac_filter_list, list) {
273 memcpy(p, f, len);
274 p += len;
275 }
276
277 i40e_dbg_dump_data_len = buflen;
278 dev_info(&pf->pdev->dev,
279 "VSI seid %ld dumped %d bytes\n",
280 seid, (int)i40e_dbg_dump_data_len);
281 }
282 mutex_unlock(&pf->switch_mutex);
283 } else if (seid >= I40E_BASE_VEB_SEID) {
284 struct i40e_veb *veb = NULL;
285
286 mutex_lock(&pf->switch_mutex);
287 veb = i40e_dbg_find_veb(pf, seid);
288 if (!veb) {
289 mutex_unlock(&pf->switch_mutex);
290 goto write_exit;
291 }
292
293 buflen = sizeof(struct i40e_veb);
294 if (i40e_dbg_prep_dump_buf(pf, buflen)) {
295 seid_found = true;
296 memcpy(i40e_dbg_dump_buf, veb, buflen);
297 i40e_dbg_dump_data_len = buflen;
298 dev_info(&pf->pdev->dev,
299 "VEB seid %ld dumped %d bytes\n",
300 seid, (int)i40e_dbg_dump_data_len);
301 }
302 mutex_unlock(&pf->switch_mutex);
303 }
304
305write_exit:
306 if (!seid_found)
307 dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid);
308
309 return count;
310}
311
312static const struct file_operations i40e_dbg_dump_fops = {
313 .owner = THIS_MODULE,
314 .open = simple_open,
315 .read = i40e_dbg_dump_read,
316 .write = i40e_dbg_dump_write,
317};
318
319/**************************************************************
320 * command
321 * The command entry in debugfs is for giving the driver commands
322 * to be executed - these may be for changing the internal switch
323 * setup, adding or removing filters, or other things. Many of
324 * these will be useful for some forms of unit testing.
325 **************************************************************/
326static char i40e_dbg_command_buf[256] = "hello world";
327
328/**
329 * i40e_dbg_command_read - read for command datum
330 * @filp: the opened file
331 * @buffer: where to write the data for the user to read
332 * @count: the size of the user's buffer
333 * @ppos: file position offset
334 **/
335static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
336 size_t count, loff_t *ppos)
337{
338 struct i40e_pf *pf = filp->private_data;
339 int bytes_not_copied;
340 int buf_size = 256;
341 char *buf;
342 int len;
343
344 /* don't allow partial reads */
345 if (*ppos != 0)
346 return 0;
347 if (count < buf_size)
348 return -ENOSPC;
349
350 buf = kzalloc(buf_size, GFP_KERNEL);
351 if (!buf)
352 return -ENOSPC;
353
354 len = snprintf(buf, buf_size, "%s: %s\n",
355 pf->vsi[pf->lan_vsi]->netdev->name,
356 i40e_dbg_command_buf);
357
358 bytes_not_copied = copy_to_user(buffer, buf, len);
359 kfree(buf);
360
361 if (bytes_not_copied < 0)
362 return bytes_not_copied;
363
364 *ppos = len;
365 return len;
366}
367
368/**
369 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum
370 * @pf: the i40e_pf created in command write
371 * @seid: the seid the user put in
372 **/
373static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
374{
375 struct rtnl_link_stats64 *nstat;
376 struct i40e_mac_filter *f;
377 struct i40e_vsi *vsi;
378 int i;
379
380 vsi = i40e_dbg_find_vsi(pf, seid);
381 if (!vsi) {
382 dev_info(&pf->pdev->dev,
383 "dump %d: seid not found\n", seid);
384 return;
385 }
386 dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
387 if (vsi->netdev)
388 dev_info(&pf->pdev->dev,
389 " netdev: name = %s\n",
390 vsi->netdev->name);
391 if (vsi->active_vlans)
392 dev_info(&pf->pdev->dev,
393 " vlgrp: & = %p\n", vsi->active_vlans);
394 dev_info(&pf->pdev->dev,
395 " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n",
396 vsi->netdev_registered,
397 vsi->current_netdev_flags, vsi->state, vsi->flags);
398 list_for_each_entry(f, &vsi->mac_filter_list, list) {
399 dev_info(&pf->pdev->dev,
400 " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n",
401 f->macaddr, f->vlan, f->is_netdev, f->is_vf,
402 f->counter);
403 }
404 nstat = i40e_get_vsi_stats_struct(vsi);
405 dev_info(&pf->pdev->dev,
406 " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
407 (long unsigned int)nstat->rx_packets,
408 (long unsigned int)nstat->rx_bytes,
409 (long unsigned int)nstat->rx_errors,
410 (long unsigned int)nstat->rx_dropped);
411 dev_info(&pf->pdev->dev,
412 " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
413 (long unsigned int)nstat->tx_packets,
414 (long unsigned int)nstat->tx_bytes,
415 (long unsigned int)nstat->tx_errors,
416 (long unsigned int)nstat->tx_dropped);
417 dev_info(&pf->pdev->dev,
418 " net_stats: multicast = %lu, collisions = %lu\n",
419 (long unsigned int)nstat->multicast,
420 (long unsigned int)nstat->collisions);
421 dev_info(&pf->pdev->dev,
422 " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
423 (long unsigned int)nstat->rx_length_errors,
424 (long unsigned int)nstat->rx_over_errors,
425 (long unsigned int)nstat->rx_crc_errors);
426 dev_info(&pf->pdev->dev,
427 " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
428 (long unsigned int)nstat->rx_frame_errors,
429 (long unsigned int)nstat->rx_fifo_errors,
430 (long unsigned int)nstat->rx_missed_errors);
431 dev_info(&pf->pdev->dev,
432 " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
433 (long unsigned int)nstat->tx_aborted_errors,
434 (long unsigned int)nstat->tx_carrier_errors,
435 (long unsigned int)nstat->tx_fifo_errors);
436 dev_info(&pf->pdev->dev,
437 " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
438 (long unsigned int)nstat->tx_heartbeat_errors,
439 (long unsigned int)nstat->tx_window_errors);
440 dev_info(&pf->pdev->dev,
441 " net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
442 (long unsigned int)nstat->rx_compressed,
443 (long unsigned int)nstat->tx_compressed);
444 dev_info(&pf->pdev->dev,
445 " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
446 (long unsigned int)vsi->net_stats_offsets.rx_packets,
447 (long unsigned int)vsi->net_stats_offsets.rx_bytes,
448 (long unsigned int)vsi->net_stats_offsets.rx_errors,
449 (long unsigned int)vsi->net_stats_offsets.rx_dropped);
450 dev_info(&pf->pdev->dev,
451 " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
452 (long unsigned int)vsi->net_stats_offsets.tx_packets,
453 (long unsigned int)vsi->net_stats_offsets.tx_bytes,
454 (long unsigned int)vsi->net_stats_offsets.tx_errors,
455 (long unsigned int)vsi->net_stats_offsets.tx_dropped);
456 dev_info(&pf->pdev->dev,
457 " net_stats_offsets: multicast = %lu, collisions = %lu\n",
458 (long unsigned int)vsi->net_stats_offsets.multicast,
459 (long unsigned int)vsi->net_stats_offsets.collisions);
460 dev_info(&pf->pdev->dev,
461 " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
462 (long unsigned int)vsi->net_stats_offsets.rx_length_errors,
463 (long unsigned int)vsi->net_stats_offsets.rx_over_errors,
464 (long unsigned int)vsi->net_stats_offsets.rx_crc_errors);
465 dev_info(&pf->pdev->dev,
466 " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
467 (long unsigned int)vsi->net_stats_offsets.rx_frame_errors,
468 (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors,
469 (long unsigned int)vsi->net_stats_offsets.rx_missed_errors);
470 dev_info(&pf->pdev->dev,
471 " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
472 (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors,
473 (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors,
474 (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors);
475 dev_info(&pf->pdev->dev,
476 " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
477 (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors,
478 (long unsigned int)vsi->net_stats_offsets.tx_window_errors);
479 dev_info(&pf->pdev->dev,
480 " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
481 (long unsigned int)vsi->net_stats_offsets.rx_compressed,
482 (long unsigned int)vsi->net_stats_offsets.tx_compressed);
483 dev_info(&pf->pdev->dev,
484 " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
485 vsi->tx_restart, vsi->tx_busy,
486 vsi->rx_buf_failed, vsi->rx_page_failed);
487 if (vsi->rx_rings) {
488 for (i = 0; i < vsi->num_queue_pairs; i++) {
489 dev_info(&pf->pdev->dev,
490 " rx_rings[%i]: desc = %p\n",
491 i, vsi->rx_rings[i].desc);
492 dev_info(&pf->pdev->dev,
493 " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
494 i, vsi->rx_rings[i].dev,
495 vsi->rx_rings[i].netdev,
496 vsi->rx_rings[i].rx_bi);
497 dev_info(&pf->pdev->dev,
498 " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
499 i, vsi->rx_rings[i].state,
500 vsi->rx_rings[i].queue_index,
501 vsi->rx_rings[i].reg_idx);
502 dev_info(&pf->pdev->dev,
503 " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
504 i, vsi->rx_rings[i].rx_hdr_len,
505 vsi->rx_rings[i].rx_buf_len,
506 vsi->rx_rings[i].dtype);
507 dev_info(&pf->pdev->dev,
508 " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
509 i, vsi->rx_rings[i].hsplit,
510 vsi->rx_rings[i].next_to_use,
511 vsi->rx_rings[i].next_to_clean,
512 vsi->rx_rings[i].ring_active);
513 dev_info(&pf->pdev->dev,
514 " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
515 i, vsi->rx_rings[i].rx_stats.packets,
516 vsi->rx_rings[i].rx_stats.bytes,
517 vsi->rx_rings[i].rx_stats.non_eop_descs);
518 dev_info(&pf->pdev->dev,
519 " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
520 i,
521 vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
522 vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
523 dev_info(&pf->pdev->dev,
524 " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
525 i, vsi->rx_rings[i].size,
526 (long unsigned int)vsi->rx_rings[i].dma);
527 dev_info(&pf->pdev->dev,
528 " rx_rings[%i]: vsi = %p, q_vector = %p\n",
529 i, vsi->rx_rings[i].vsi,
530 vsi->rx_rings[i].q_vector);
531 }
532 }
533 if (vsi->tx_rings) {
534 for (i = 0; i < vsi->num_queue_pairs; i++) {
535 dev_info(&pf->pdev->dev,
536 " tx_rings[%i]: desc = %p\n",
537 i, vsi->tx_rings[i].desc);
538 dev_info(&pf->pdev->dev,
539 " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
540 i, vsi->tx_rings[i].dev,
541 vsi->tx_rings[i].netdev,
542 vsi->tx_rings[i].tx_bi);
543 dev_info(&pf->pdev->dev,
544 " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
545 i, vsi->tx_rings[i].state,
546 vsi->tx_rings[i].queue_index,
547 vsi->tx_rings[i].reg_idx);
548 dev_info(&pf->pdev->dev,
549 " tx_rings[%i]: dtype = %d\n",
550 i, vsi->tx_rings[i].dtype);
551 dev_info(&pf->pdev->dev,
552 " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
553 i, vsi->tx_rings[i].hsplit,
554 vsi->tx_rings[i].next_to_use,
555 vsi->tx_rings[i].next_to_clean,
556 vsi->tx_rings[i].ring_active);
557 dev_info(&pf->pdev->dev,
558 " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
559 i, vsi->tx_rings[i].tx_stats.packets,
560 vsi->tx_rings[i].tx_stats.bytes,
561 vsi->tx_rings[i].tx_stats.restart_queue);
562 dev_info(&pf->pdev->dev,
563 " tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n",
564 i,
565 vsi->tx_rings[i].tx_stats.tx_busy,
566 vsi->tx_rings[i].tx_stats.completed,
567 vsi->tx_rings[i].tx_stats.tx_done_old);
568 dev_info(&pf->pdev->dev,
569 " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
570 i, vsi->tx_rings[i].size,
571 (long unsigned int)vsi->tx_rings[i].dma);
572 dev_info(&pf->pdev->dev,
573 " tx_rings[%i]: vsi = %p, q_vector = %p\n",
574 i, vsi->tx_rings[i].vsi,
575 vsi->tx_rings[i].q_vector);
576 dev_info(&pf->pdev->dev,
577 " tx_rings[%i]: DCB tc = %d\n",
578 i, vsi->tx_rings[i].dcb_tc);
579 }
580 }
581 dev_info(&pf->pdev->dev,
582 " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
583 vsi->work_limit, vsi->rx_itr_setting,
584 ITR_IS_DYNAMIC(vsi->rx_itr_setting) ? "dynamic" : "fixed",
585 vsi->tx_itr_setting,
586 ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed");
587 dev_info(&pf->pdev->dev,
588 " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
589 vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
590 if (vsi->q_vectors) {
591 for (i = 0; i < vsi->num_q_vectors; i++) {
592 dev_info(&pf->pdev->dev,
593 " q_vectors[%i]: base index = %ld\n",
594 i, ((long int)*vsi->q_vectors[i].rx.ring-
595 (long int)*vsi->q_vectors[0].rx.ring)/
596 sizeof(struct i40e_ring));
597 }
598 }
599 dev_info(&pf->pdev->dev,
600 " num_q_vectors = %i, base_vector = %i\n",
601 vsi->num_q_vectors, vsi->base_vector);
602 dev_info(&pf->pdev->dev,
603 " seid = %d, id = %d, uplink_seid = %d\n",
604 vsi->seid, vsi->id, vsi->uplink_seid);
605 dev_info(&pf->pdev->dev,
606 " base_queue = %d, num_queue_pairs = %d, num_desc = %d\n",
607 vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc);
608 dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
609 dev_info(&pf->pdev->dev,
610 " info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
611 vsi->info.valid_sections, vsi->info.switch_id);
612 dev_info(&pf->pdev->dev,
613 " info: sw_reserved[] = 0x%02x 0x%02x\n",
614 vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
615 dev_info(&pf->pdev->dev,
616 " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
617 vsi->info.sec_flags, vsi->info.sec_reserved);
618 dev_info(&pf->pdev->dev,
619 " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
620 vsi->info.pvid, vsi->info.fcoe_pvid,
621 vsi->info.port_vlan_flags);
622 dev_info(&pf->pdev->dev,
623 " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
624 vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
625 vsi->info.pvlan_reserved[2]);
626 dev_info(&pf->pdev->dev,
627 " info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
628 vsi->info.ingress_table, vsi->info.egress_table);
629 dev_info(&pf->pdev->dev,
630 " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
631 vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
632 vsi->info.cas_pv_reserved);
633 dev_info(&pf->pdev->dev,
634 " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
635 vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
636 vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
637 vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
638 vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
639 dev_info(&pf->pdev->dev,
640 " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
641 vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
642 vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
643 vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
644 vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
645 dev_info(&pf->pdev->dev,
646 " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
647 vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
648 vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
649 vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
650 vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
651 dev_info(&pf->pdev->dev,
652 " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
653 vsi->info.queueing_opt_flags,
654 vsi->info.queueing_opt_reserved[0],
655 vsi->info.queueing_opt_reserved[1],
656 vsi->info.queueing_opt_reserved[2]);
657 dev_info(&pf->pdev->dev,
658 " info: up_enable_bits = 0x%02x\n",
659 vsi->info.up_enable_bits);
660 dev_info(&pf->pdev->dev,
661 " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
662 vsi->info.sched_reserved, vsi->info.outer_up_table);
663 dev_info(&pf->pdev->dev,
664 " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
665 vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
666 vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
667 vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
668 vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
669 dev_info(&pf->pdev->dev,
670 " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
671 vsi->info.qs_handle[0], vsi->info.qs_handle[1],
672 vsi->info.qs_handle[2], vsi->info.qs_handle[3],
673 vsi->info.qs_handle[4], vsi->info.qs_handle[5],
674 vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
675 dev_info(&pf->pdev->dev,
676 " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
677 vsi->info.stat_counter_idx, vsi->info.sched_id);
678 dev_info(&pf->pdev->dev,
679 " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
680 vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
681 vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
682 vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
683 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
684 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
685 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
686 if (vsi->back)
687 dev_info(&pf->pdev->dev, " pf = %p\n", vsi->back);
688 dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
689 dev_info(&pf->pdev->dev,
690 " tc_config: numtc = %d, enabled_tc = 0x%x\n",
691 vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
692 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
693 dev_info(&pf->pdev->dev,
694 " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
695 i, vsi->tc_config.tc_info[i].qoffset,
696 vsi->tc_config.tc_info[i].qcount,
697 vsi->tc_config.tc_info[i].netdev_tc);
698 }
699 dev_info(&pf->pdev->dev,
700 " bw: bw_limit = %d, bw_max_quanta = %d\n",
701 vsi->bw_limit, vsi->bw_max_quanta);
702 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
703 dev_info(&pf->pdev->dev,
704 " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
705 i, vsi->bw_ets_share_credits[i],
706 vsi->bw_ets_limit_credits[i],
707 vsi->bw_ets_max_quanta[i]);
708 }
709}
710
711/**
712 * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
713 * @pf: the i40e_pf created in command write
714 **/
715static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
716{
717 struct i40e_adminq_ring *ring;
718 struct i40e_hw *hw = &pf->hw;
719 int i;
720
721 /* first the send (command) ring, then the receive (event) ring */
722 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
723 ring = &(hw->aq.asq);
724 for (i = 0; i < ring->count; i++) {
725 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
726 dev_info(&pf->pdev->dev,
727 " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
728 i, d->flags, d->opcode, d->datalen, d->retval,
729 d->cookie_high, d->cookie_low);
730 dev_info(&pf->pdev->dev,
731 " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
732 d->params.raw[0], d->params.raw[1], d->params.raw[2],
733 d->params.raw[3], d->params.raw[4], d->params.raw[5],
734 d->params.raw[6], d->params.raw[7], d->params.raw[8],
735 d->params.raw[9], d->params.raw[10], d->params.raw[11],
736 d->params.raw[12], d->params.raw[13],
737 d->params.raw[14], d->params.raw[15]);
738 }
739
740 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
741 ring = &(hw->aq.arq);
742 for (i = 0; i < ring->count; i++) {
743 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
744 dev_info(&pf->pdev->dev,
745 " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
746 i, d->flags, d->opcode, d->datalen, d->retval,
747 d->cookie_high, d->cookie_low);
748 dev_info(&pf->pdev->dev,
749 " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
750 d->params.raw[0], d->params.raw[1], d->params.raw[2],
751 d->params.raw[3], d->params.raw[4], d->params.raw[5],
752 d->params.raw[6], d->params.raw[7], d->params.raw[8],
753 d->params.raw[9], d->params.raw[10], d->params.raw[11],
754 d->params.raw[12], d->params.raw[13],
755 d->params.raw[14], d->params.raw[15]);
756 }
757}
758
759/**
760 * i40e_dbg_dump_desc - handles dump desc write into command datum
761 * @cnt: number of arguments that the user supplied
762 * @vsi_seid: vsi id entered by user
763 * @ring_id: ring id entered by user
764 * @desc_n: descriptor number entered by user
765 * @pf: the i40e_pf created in command write
766 * @is_rx_ring: true if rx, false if tx
767 **/
768static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
769 struct i40e_pf *pf, bool is_rx_ring)
770{
771 union i40e_rx_desc *ds;
772 struct i40e_ring ring;
773 struct i40e_vsi *vsi;
774 int i;
775
776 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
777 if (!vsi) {
778 dev_info(&pf->pdev->dev,
779 "vsi %d not found\n", vsi_seid);
780 if (is_rx_ring)
781 dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
782 else
783 dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
784 return;
785 }
786 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
787 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
788 if (is_rx_ring)
789 dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
790 else
791 dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
792 return;
793 }
794 if (is_rx_ring)
795 ring = vsi->rx_rings[ring_id];
796 else
797 ring = vsi->tx_rings[ring_id];
798 if (cnt == 2) {
799 dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
800 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
801 for (i = 0; i < ring.count; i++) {
802 if (is_rx_ring)
803 ds = I40E_RX_DESC(&ring, i);
804 else
805 ds = (union i40e_rx_desc *)
806 I40E_TX_DESC(&ring, i);
807 if ((sizeof(union i40e_rx_desc) ==
808 sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
809 dev_info(&pf->pdev->dev,
810 " d[%03i] = 0x%016llx 0x%016llx\n", i,
811 ds->read.pkt_addr, ds->read.hdr_addr);
812 else
813 dev_info(&pf->pdev->dev,
814 " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
815 i, ds->read.pkt_addr,
816 ds->read.hdr_addr,
817 ds->read.rsvd1, ds->read.rsvd2);
818 }
819 } else if (cnt == 3) {
820 if (desc_n >= ring.count || desc_n < 0) {
821 dev_info(&pf->pdev->dev,
822 "descriptor %d not found\n", desc_n);
823 return;
824 }
825 if (is_rx_ring)
826 ds = I40E_RX_DESC(&ring, desc_n);
827 else
828 ds = (union i40e_rx_desc *)I40E_TX_DESC(&ring, desc_n);
829 if ((sizeof(union i40e_rx_desc) ==
830 sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
831 dev_info(&pf->pdev->dev,
832 "vsi = %02i %s ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
833 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id,
834 desc_n, ds->read.pkt_addr, ds->read.hdr_addr);
835 else
836 dev_info(&pf->pdev->dev,
837 "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
838 vsi_seid, ring_id,
839 desc_n, ds->read.pkt_addr, ds->read.hdr_addr,
840 ds->read.rsvd1, ds->read.rsvd2);
841 } else {
842 if (is_rx_ring)
843 dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
844 else
845 dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
846 }
847}
848
849/**
850 * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
851 * @pf: the i40e_pf created in command write
852 **/
853static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
854{
855 int i;
856
857 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
858 if (pf->vsi[i])
859 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
860 i, pf->vsi[i]->seid);
861}
862
863/**
864 * i40e_dbg_dump_stats - handles dump stats write into command datum
865 * @pf: the i40e_pf created in command write
866 * @estats: the eth stats structure to be dumped
867 **/
868static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
869 struct i40e_eth_stats *estats)
870{
871 dev_info(&pf->pdev->dev, " ethstats:\n");
872 dev_info(&pf->pdev->dev,
873 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
874 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
875 dev_info(&pf->pdev->dev,
876 " rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n",
877 estats->rx_broadcast, estats->rx_discards, estats->rx_errors);
878 dev_info(&pf->pdev->dev,
879 " rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
880 estats->rx_missed, estats->rx_unknown_protocol,
881 estats->tx_bytes);
882 dev_info(&pf->pdev->dev,
883 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
884 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
885 dev_info(&pf->pdev->dev,
886 " tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
887 estats->tx_discards, estats->tx_errors);
888}
889
890/**
891 * i40e_dbg_dump_stats - handles dump stats write into command datum
892 * @pf: the i40e_pf created in command write
893 * @stats: the stats structure to be dumped
894 **/
895static void i40e_dbg_dump_stats(struct i40e_pf *pf,
896 struct i40e_hw_port_stats *stats)
897{
898 int i;
899
900 dev_info(&pf->pdev->dev, " stats:\n");
901 dev_info(&pf->pdev->dev,
902 " crc_errors = \t\t%lld \tillegal_bytes = \t%lld \terror_bytes = \t\t%lld\n",
903 stats->crc_errors, stats->illegal_bytes, stats->error_bytes);
904 dev_info(&pf->pdev->dev,
905 " mac_local_faults = \t%lld \tmac_remote_faults = \t%lld \trx_length_errors = \t%lld\n",
906 stats->mac_local_faults, stats->mac_remote_faults,
907 stats->rx_length_errors);
908 dev_info(&pf->pdev->dev,
909 " link_xon_rx = \t\t%lld \tlink_xoff_rx = \t\t%lld \tlink_xon_tx = \t\t%lld\n",
910 stats->link_xon_rx, stats->link_xoff_rx, stats->link_xon_tx);
911 dev_info(&pf->pdev->dev,
912 " link_xoff_tx = \t\t%lld \trx_size_64 = \t\t%lld \trx_size_127 = \t\t%lld\n",
913 stats->link_xoff_tx, stats->rx_size_64, stats->rx_size_127);
914 dev_info(&pf->pdev->dev,
915 " rx_size_255 = \t\t%lld \trx_size_511 = \t\t%lld \trx_size_1023 = \t\t%lld\n",
916 stats->rx_size_255, stats->rx_size_511, stats->rx_size_1023);
917 dev_info(&pf->pdev->dev,
918 " rx_size_big = \t\t%lld \trx_undersize = \t\t%lld \trx_jabber = \t\t%lld\n",
919 stats->rx_size_big, stats->rx_undersize, stats->rx_jabber);
920 dev_info(&pf->pdev->dev,
921 " rx_fragments = \t\t%lld \trx_oversize = \t\t%lld \ttx_size_64 = \t\t%lld\n",
922 stats->rx_fragments, stats->rx_oversize, stats->tx_size_64);
923 dev_info(&pf->pdev->dev,
924 " tx_size_127 = \t\t%lld \ttx_size_255 = \t\t%lld \ttx_size_511 = \t\t%lld\n",
925 stats->tx_size_127, stats->tx_size_255, stats->tx_size_511);
926 dev_info(&pf->pdev->dev,
927 " tx_size_1023 = \t\t%lld \ttx_size_big = \t\t%lld \tmac_short_packet_dropped = \t%lld\n",
928 stats->tx_size_1023, stats->tx_size_big,
929 stats->mac_short_packet_dropped);
930 for (i = 0; i < 8; i += 4) {
931 dev_info(&pf->pdev->dev,
932 " priority_xon_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
933 i, stats->priority_xon_rx[i],
934 i+1, stats->priority_xon_rx[i+1],
935 i+2, stats->priority_xon_rx[i+2],
936 i+3, stats->priority_xon_rx[i+3]);
937 }
938 for (i = 0; i < 8; i += 4) {
939 dev_info(&pf->pdev->dev,
940 " priority_xoff_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
941 i, stats->priority_xoff_rx[i],
942 i+1, stats->priority_xoff_rx[i+1],
943 i+2, stats->priority_xoff_rx[i+2],
944 i+3, stats->priority_xoff_rx[i+3]);
945 }
946 for (i = 0; i < 8; i += 4) {
947 dev_info(&pf->pdev->dev,
948 " priority_xon_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
949 i, stats->priority_xon_tx[i],
950 i+1, stats->priority_xon_tx[i+1],
951 i+2, stats->priority_xon_tx[i+2],
952 i+3, stats->priority_xon_rx[i+3]);
953 }
954 for (i = 0; i < 8; i += 4) {
955 dev_info(&pf->pdev->dev,
956 " priority_xoff_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
957 i, stats->priority_xoff_tx[i],
958 i+1, stats->priority_xoff_tx[i+1],
959 i+2, stats->priority_xoff_tx[i+2],
960 i+3, stats->priority_xoff_tx[i+3]);
961 }
962 for (i = 0; i < 8; i += 4) {
963 dev_info(&pf->pdev->dev,
964 " priority_xon_2_xoff[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
965 i, stats->priority_xon_2_xoff[i],
966 i+1, stats->priority_xon_2_xoff[i+1],
967 i+2, stats->priority_xon_2_xoff[i+2],
968 i+3, stats->priority_xon_2_xoff[i+3]);
969 }
970
971 i40e_dbg_dump_eth_stats(pf, &stats->eth);
972}
973
974/**
975 * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
976 * @pf: the i40e_pf created in command write
977 * @seid: the seid the user put in
978 **/
979static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
980{
981 struct i40e_veb *veb;
982
983 if ((seid < I40E_BASE_VEB_SEID) ||
984 (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) {
985 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
986 return;
987 }
988
989 veb = i40e_dbg_find_veb(pf, seid);
990 if (!veb) {
991 dev_info(&pf->pdev->dev,
992 "%d: can't find veb\n", seid);
993 return;
994 }
995 dev_info(&pf->pdev->dev,
996 "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d\n",
997 veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
998 veb->uplink_seid);
999 i40e_dbg_dump_eth_stats(pf, &veb->stats);
1000}
1001
1002/**
1003 * i40e_dbg_dump_veb_all - dumps all known veb's stats
1004 * @pf: the i40e_pf created in command write
1005 **/
1006static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
1007{
1008 struct i40e_veb *veb;
1009 int i;
1010
1011 for (i = 0; i < I40E_MAX_VEB; i++) {
1012 veb = pf->veb[i];
1013 if (veb)
1014 i40e_dbg_dump_veb_seid(pf, veb->seid);
1015 }
1016}
1017
1018#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
1019/**
1020 * i40e_dbg_command_write - write into command datum
1021 * @filp: the opened file
1022 * @buffer: where to find the user's data
1023 * @count: the length of the user's data
1024 * @ppos: file position offset
1025 **/
1026static ssize_t i40e_dbg_command_write(struct file *filp,
1027 const char __user *buffer,
1028 size_t count, loff_t *ppos)
1029{
1030 struct i40e_pf *pf = filp->private_data;
1031 int bytes_not_copied;
1032 struct i40e_vsi *vsi;
1033 u8 *print_buf_start;
1034 u8 *print_buf;
1035 char *cmd_buf;
1036 int vsi_seid;
1037 int veb_seid;
1038 int cnt;
1039
1040 /* don't allow partial writes */
1041 if (*ppos != 0)
1042 return 0;
1043
1044 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1045 if (!cmd_buf)
1046 return count;
1047 bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
1048 if (bytes_not_copied < 0)
1049 return bytes_not_copied;
1050 if (bytes_not_copied > 0)
1051 count -= bytes_not_copied;
1052 cmd_buf[count] = '\0';
1053
1054 print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
1055 if (!print_buf_start)
1056 goto command_write_done;
1057 print_buf = print_buf_start;
1058
1059 if (strncmp(cmd_buf, "add vsi", 7) == 0) {
1060 vsi_seid = -1;
1061 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
1062 if (cnt == 0) {
1063 /* default to PF VSI */
1064 vsi_seid = pf->vsi[pf->lan_vsi]->seid;
1065 } else if (vsi_seid < 0) {
1066 dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
1067 vsi_seid);
1068 goto command_write_done;
1069 }
1070
1071 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
1072 if (vsi)
1073 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
1074 vsi->seid, vsi->uplink_seid);
1075 else
1076 dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
1077
1078 } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
1079 sscanf(&cmd_buf[7], "%i", &vsi_seid);
1080 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1081 if (!vsi) {
1082 dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
1083 vsi_seid);
1084 goto command_write_done;
1085 }
1086
1087 dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
1088 i40e_vsi_release(vsi);
1089
1090 } else if (strncmp(cmd_buf, "add relay", 9) == 0) {
1091 struct i40e_veb *veb;
1092 int uplink_seid, i;
1093
1094 cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
1095 if (cnt != 2) {
1096 dev_info(&pf->pdev->dev,
1097 "add relay: bad command string, cnt=%d\n",
1098 cnt);
1099 goto command_write_done;
1100 } else if (uplink_seid < 0) {
1101 dev_info(&pf->pdev->dev,
1102 "add relay %d: bad uplink seid\n",
1103 uplink_seid);
1104 goto command_write_done;
1105 }
1106
1107 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1108 if (!vsi) {
1109 dev_info(&pf->pdev->dev,
1110 "add relay: vsi VSI %d not found\n", vsi_seid);
1111 goto command_write_done;
1112 }
1113
1114 for (i = 0; i < I40E_MAX_VEB; i++)
1115 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
1116 break;
1117 if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
1118 uplink_seid != pf->mac_seid) {
1119 dev_info(&pf->pdev->dev,
1120 "add relay: relay uplink %d not found\n",
1121 uplink_seid);
1122 goto command_write_done;
1123 }
1124
1125 veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
1126 vsi->tc_config.enabled_tc);
1127 if (veb)
1128 dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
1129 else
1130 dev_info(&pf->pdev->dev, "add relay failed\n");
1131
1132 } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
1133 int i;
1134 cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
1135 if (cnt != 1) {
1136 dev_info(&pf->pdev->dev,
1137 "del relay: bad command string, cnt=%d\n",
1138 cnt);
1139 goto command_write_done;
1140 } else if (veb_seid < 0) {
1141 dev_info(&pf->pdev->dev,
1142 "del relay %d: bad relay seid\n", veb_seid);
1143 goto command_write_done;
1144 }
1145
1146 /* find the veb */
1147 for (i = 0; i < I40E_MAX_VEB; i++)
1148 if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
1149 break;
1150 if (i >= I40E_MAX_VEB) {
1151 dev_info(&pf->pdev->dev,
1152 "del relay: relay %d not found\n", veb_seid);
1153 goto command_write_done;
1154 }
1155
1156 dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
1157 i40e_veb_release(pf->veb[i]);
1158
1159 } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
1160 u8 ma[6];
1161 int vlan = 0;
1162 struct i40e_mac_filter *f;
1163 int ret;
1164
1165 cnt = sscanf(&cmd_buf[11],
1166 "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
1167 &vsi_seid,
1168 &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
1169 &vlan);
1170 if (cnt == 7) {
1171 vlan = 0;
1172 } else if (cnt != 8) {
1173 dev_info(&pf->pdev->dev,
1174 "add macaddr: bad command string, cnt=%d\n",
1175 cnt);
1176 goto command_write_done;
1177 }
1178
1179 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1180 if (!vsi) {
1181 dev_info(&pf->pdev->dev,
1182 "add macaddr: VSI %d not found\n", vsi_seid);
1183 goto command_write_done;
1184 }
1185
1186 f = i40e_add_filter(vsi, ma, vlan, false, false);
1187 ret = i40e_sync_vsi_filters(vsi);
1188 if (f && !ret)
1189 dev_info(&pf->pdev->dev,
1190 "add macaddr: %pM vlan=%d added to VSI %d\n",
1191 ma, vlan, vsi_seid);
1192 else
1193 dev_info(&pf->pdev->dev,
1194 "add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n",
1195 ma, vlan, vsi_seid, f, ret);
1196
1197 } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
1198 u8 ma[6];
1199 int vlan = 0;
1200 int ret;
1201
1202 cnt = sscanf(&cmd_buf[11],
1203 "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
1204 &vsi_seid,
1205 &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
1206 &vlan);
1207 if (cnt == 7) {
1208 vlan = 0;
1209 } else if (cnt != 8) {
1210 dev_info(&pf->pdev->dev,
1211 "del macaddr: bad command string, cnt=%d\n",
1212 cnt);
1213 goto command_write_done;
1214 }
1215
1216 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1217 if (!vsi) {
1218 dev_info(&pf->pdev->dev,
1219 "del macaddr: VSI %d not found\n", vsi_seid);
1220 goto command_write_done;
1221 }
1222
1223 i40e_del_filter(vsi, ma, vlan, false, false);
1224 ret = i40e_sync_vsi_filters(vsi);
1225 if (!ret)
1226 dev_info(&pf->pdev->dev,
1227 "del macaddr: %pM vlan=%d removed from VSI %d\n",
1228 ma, vlan, vsi_seid);
1229 else
1230 dev_info(&pf->pdev->dev,
1231 "del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n",
1232 ma, vlan, vsi_seid, ret);
1233
1234 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
1235 int v;
1236 u16 vid;
1237 i40e_status ret;
1238
1239 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
1240 if (cnt != 2) {
1241 dev_info(&pf->pdev->dev,
1242 "add pvid: bad command string, cnt=%d\n", cnt);
1243 goto command_write_done;
1244 }
1245
1246 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1247 if (!vsi) {
1248 dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
1249 vsi_seid);
1250 goto command_write_done;
1251 }
1252
1253 vid = (unsigned)v;
1254 ret = i40e_vsi_add_pvid(vsi, vid);
1255 if (!ret)
1256 dev_info(&pf->pdev->dev,
1257 "add pvid: %d added to VSI %d\n",
1258 vid, vsi_seid);
1259 else
1260 dev_info(&pf->pdev->dev,
1261 "add pvid: %d to VSI %d failed, ret=%d\n",
1262 vid, vsi_seid, ret);
1263
1264 } else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
1265
1266 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
1267 if (cnt != 1) {
1268 dev_info(&pf->pdev->dev,
1269 "del pvid: bad command string, cnt=%d\n",
1270 cnt);
1271 goto command_write_done;
1272 }
1273
1274 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1275 if (!vsi) {
1276 dev_info(&pf->pdev->dev,
1277 "del pvid: VSI %d not found\n", vsi_seid);
1278 goto command_write_done;
1279 }
1280
1281 i40e_vsi_remove_pvid(vsi);
1282 dev_info(&pf->pdev->dev,
1283 "del pvid: removed from VSI %d\n", vsi_seid);
1284
1285 } else if (strncmp(cmd_buf, "dump", 4) == 0) {
1286 if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
1287 i40e_fetch_switch_configuration(pf, true);
1288 } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
1289 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
1290 if (cnt > 0)
1291 i40e_dbg_dump_vsi_seid(pf, vsi_seid);
1292 else
1293 i40e_dbg_dump_vsi_no_seid(pf);
1294 } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
1295 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
1296 if (cnt > 0)
1297 i40e_dbg_dump_veb_seid(pf, vsi_seid);
1298 else
1299 i40e_dbg_dump_veb_all(pf);
1300 } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
1301 int ring_id, desc_n;
1302 if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
1303 cnt = sscanf(&cmd_buf[12], "%i %i %i",
1304 &vsi_seid, &ring_id, &desc_n);
1305 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
1306 desc_n, pf, true);
1307 } else if (strncmp(&cmd_buf[10], "tx", 2)
1308 == 0) {
1309 cnt = sscanf(&cmd_buf[12], "%i %i %i",
1310 &vsi_seid, &ring_id, &desc_n);
1311 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
1312 desc_n, pf, false);
1313 } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
1314 i40e_dbg_dump_aq_desc(pf);
1315 } else {
1316 dev_info(&pf->pdev->dev,
1317 "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1318 dev_info(&pf->pdev->dev,
1319 "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1320 dev_info(&pf->pdev->dev, "dump desc aq\n");
1321 }
1322 } else if (strncmp(&cmd_buf[5], "stats", 5) == 0) {
1323 dev_info(&pf->pdev->dev, "pf stats:\n");
1324 i40e_dbg_dump_stats(pf, &pf->stats);
1325 dev_info(&pf->pdev->dev, "pf stats_offsets:\n");
1326 i40e_dbg_dump_stats(pf, &pf->stats_offsets);
1327 } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
1328 dev_info(&pf->pdev->dev,
1329 "core reset count: %d\n", pf->corer_count);
1330 dev_info(&pf->pdev->dev,
1331 "global reset count: %d\n", pf->globr_count);
1332 dev_info(&pf->pdev->dev,
1333 "emp reset count: %d\n", pf->empr_count);
1334 dev_info(&pf->pdev->dev,
1335 "pf reset count: %d\n", pf->pfr_count);
1336 } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
1337 struct i40e_aqc_query_port_ets_config_resp *bw_data;
1338 struct i40e_dcbx_config *cfg =
1339 &pf->hw.local_dcbx_config;
1340 struct i40e_dcbx_config *r_cfg =
1341 &pf->hw.remote_dcbx_config;
1342 int i, ret;
1343
1344 bw_data = kzalloc(sizeof(
1345 struct i40e_aqc_query_port_ets_config_resp),
1346 GFP_KERNEL);
1347 if (!bw_data) {
1348 ret = -ENOMEM;
1349 goto command_write_done;
1350 }
1351
1352 ret = i40e_aq_query_port_ets_config(&pf->hw,
1353 pf->mac_seid,
1354 bw_data, NULL);
1355 if (ret) {
1356 dev_info(&pf->pdev->dev,
1357 "Query Port ETS Config AQ command failed =0x%x\n",
1358 pf->hw.aq.asq_last_status);
1359 kfree(bw_data);
1360 bw_data = NULL;
1361 goto command_write_done;
1362 }
1363 dev_info(&pf->pdev->dev,
1364 "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
1365 bw_data->tc_valid_bits,
1366 bw_data->tc_strict_priority_bits,
1367 le16_to_cpu(bw_data->tc_bw_max[0]),
1368 le16_to_cpu(bw_data->tc_bw_max[1]));
1369 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1370 dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
1371 bw_data->tc_bw_share_credits[i],
1372 le16_to_cpu(bw_data->tc_bw_limits[i]));
1373 }
1374
1375 kfree(bw_data);
1376 bw_data = NULL;
1377
1378 dev_info(&pf->pdev->dev,
1379 "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1380 cfg->etscfg.willing, cfg->etscfg.cbs,
1381 cfg->etscfg.maxtcs);
1382 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1383 dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1384 i, cfg->etscfg.prioritytable[i],
1385 cfg->etscfg.tcbwtable[i],
1386 cfg->etscfg.tsatable[i]);
1387 }
1388 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1389 dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1390 i, cfg->etsrec.prioritytable[i],
1391 cfg->etsrec.tcbwtable[i],
1392 cfg->etsrec.tsatable[i]);
1393 }
1394 dev_info(&pf->pdev->dev,
1395 "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1396 cfg->pfc.willing, cfg->pfc.mbc,
1397 cfg->pfc.pfccap, cfg->pfc.pfcenable);
1398 dev_info(&pf->pdev->dev,
1399 "port app_table: num_apps=%d\n", cfg->numapps);
1400 for (i = 0; i < cfg->numapps; i++) {
1401 dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1402 i, cfg->app[i].priority,
1403 cfg->app[i].selector,
1404 cfg->app[i].protocolid);
1405 }
1406 /* Peer TLV DCBX data */
1407 dev_info(&pf->pdev->dev,
1408 "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1409 r_cfg->etscfg.willing,
1410 r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
1411 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1412 dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1413 i, r_cfg->etscfg.prioritytable[i],
1414 r_cfg->etscfg.tcbwtable[i],
1415 r_cfg->etscfg.tsatable[i]);
1416 }
1417 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1418 dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1419 i, r_cfg->etsrec.prioritytable[i],
1420 r_cfg->etsrec.tcbwtable[i],
1421 r_cfg->etsrec.tsatable[i]);
1422 }
1423 dev_info(&pf->pdev->dev,
1424 "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1425 r_cfg->pfc.willing,
1426 r_cfg->pfc.mbc,
1427 r_cfg->pfc.pfccap,
1428 r_cfg->pfc.pfcenable);
1429 dev_info(&pf->pdev->dev,
1430 "remote port app_table: num_apps=%d\n",
1431 r_cfg->numapps);
1432 for (i = 0; i < r_cfg->numapps; i++) {
1433 dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1434 i, r_cfg->app[i].priority,
1435 r_cfg->app[i].selector,
1436 r_cfg->app[i].protocolid);
1437 }
1438 } else {
1439 dev_info(&pf->pdev->dev,
1440 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
1441 dev_info(&pf->pdev->dev, "dump switch, dump vsi [seid] or\n");
1442 dev_info(&pf->pdev->dev, "dump stats\n");
1443 dev_info(&pf->pdev->dev, "dump reset stats\n");
1444 dev_info(&pf->pdev->dev, "dump port\n");
1445 dev_info(&pf->pdev->dev,
1446 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1447 }
1448
1449 } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) {
1450 u32 level;
1451 cnt = sscanf(&cmd_buf[10], "%i", &level);
1452 if (cnt) {
1453 if (I40E_DEBUG_USER & level) {
1454 pf->hw.debug_mask = level;
1455 dev_info(&pf->pdev->dev,
1456 "set hw.debug_mask = 0x%08x\n",
1457 pf->hw.debug_mask);
1458 }
1459 pf->msg_enable = level;
1460 dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n",
1461 pf->msg_enable);
1462 } else {
1463 dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n",
1464 pf->msg_enable);
1465 }
1466 } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
1467 dev_info(&pf->pdev->dev, "forcing PFR\n");
1468 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
1469
1470 } else if (strncmp(cmd_buf, "corer", 5) == 0) {
1471 dev_info(&pf->pdev->dev, "forcing CoreR\n");
1472 i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED));
1473
1474 } else if (strncmp(cmd_buf, "globr", 5) == 0) {
1475 dev_info(&pf->pdev->dev, "forcing GlobR\n");
1476 i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
1477
1478 } else if (strncmp(cmd_buf, "read", 4) == 0) {
1479 u32 address;
1480 u32 value;
1481 cnt = sscanf(&cmd_buf[4], "%x", &address);
1482 if (cnt != 1) {
1483 dev_info(&pf->pdev->dev, "read <reg>\n");
1484 goto command_write_done;
1485 }
1486
1487 /* check the range on address */
1488 if (address >= I40E_MAX_REGISTER) {
1489 dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n",
1490 address);
1491 goto command_write_done;
1492 }
1493
1494 value = rd32(&pf->hw, address);
1495 dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
1496 address, value);
1497
1498 } else if (strncmp(cmd_buf, "write", 5) == 0) {
1499 u32 address, value;
1500 cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value);
1501 if (cnt != 2) {
1502 dev_info(&pf->pdev->dev, "write <reg> <value>\n");
1503 goto command_write_done;
1504 }
1505
1506 /* check the range on address */
1507 if (address >= I40E_MAX_REGISTER) {
1508 dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n",
1509 address);
1510 goto command_write_done;
1511 }
1512 wr32(&pf->hw, address, value);
1513 value = rd32(&pf->hw, address);
1514 dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
1515 address, value);
1516 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
1517 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
1518 cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid);
1519 if (cnt == 0) {
1520 int i;
1521 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
1522 i40e_vsi_reset_stats(pf->vsi[i]);
1523 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
1524 } else if (cnt == 1) {
1525 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1526 if (!vsi) {
1527 dev_info(&pf->pdev->dev,
1528 "clear_stats vsi: bad vsi %d\n",
1529 vsi_seid);
1530 goto command_write_done;
1531 }
1532 i40e_vsi_reset_stats(vsi);
1533 dev_info(&pf->pdev->dev,
1534 "vsi clear stats called for vsi %d\n",
1535 vsi_seid);
1536 } else {
1537 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
1538 }
1539 } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) {
1540 i40e_pf_reset_stats(pf);
1541 dev_info(&pf->pdev->dev, "pf clear stats called\n");
1542 } else {
1543 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
1544 }
1545 } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
1546 (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
1547 struct i40e_fdir_data fd_data;
1548 int ret;
1549 u16 packet_len, i, j = 0;
1550 char *asc_packet;
1551 bool add = false;
1552
1553 asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
1554 GFP_KERNEL);
1555 if (!asc_packet)
1556 goto command_write_done;
1557
1558 fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
1559 GFP_KERNEL);
1560
1561 if (!fd_data.raw_packet) {
1562 kfree(asc_packet);
1563 asc_packet = NULL;
1564 goto command_write_done;
1565 }
1566
1567 if (strncmp(cmd_buf, "add", 3) == 0)
1568 add = true;
1569 cnt = sscanf(&cmd_buf[13],
1570 "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s",
1571 &fd_data.q_index,
1572 &fd_data.flex_off, &fd_data.pctype,
1573 &fd_data.dest_vsi, &fd_data.dest_ctl,
1574 &fd_data.fd_status, &fd_data.cnt_index,
1575 &fd_data.fd_id, &packet_len, asc_packet);
1576 if (cnt != 10) {
1577 dev_info(&pf->pdev->dev,
1578 "program fd_filter: bad command string, cnt=%d\n",
1579 cnt);
1580 kfree(asc_packet);
1581 asc_packet = NULL;
1582 kfree(fd_data.raw_packet);
1583 goto command_write_done;
1584 }
1585
1586 /* fix packet length if user entered 0 */
1587 if (packet_len == 0)
1588 packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP;
1589
1590 /* make sure to check the max as well */
1591 packet_len = min_t(u16,
1592 packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
1593
1594 dev_info(&pf->pdev->dev, "FD raw packet:\n");
1595 for (i = 0; i < packet_len; i++) {
1596 sscanf(&asc_packet[j], "%2hhx ",
1597 &fd_data.raw_packet[i]);
1598 j += 3;
1599 snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]);
1600 print_buf += 3;
1601 if ((i % 16) == 15) {
1602 snprintf(print_buf, 1, "\n");
1603 print_buf++;
1604 }
1605 }
1606 dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
1607 ret = i40e_program_fdir_filter(&fd_data, pf, add);
1608 if (!ret) {
1609 dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
1610 } else {
1611 dev_info(&pf->pdev->dev,
1612 "Filter command send failed %d\n", ret);
1613 }
1614 kfree(fd_data.raw_packet);
1615 fd_data.raw_packet = NULL;
1616 kfree(asc_packet);
1617 asc_packet = NULL;
1618 } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
1619 if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
1620 int ret;
1621 ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
1622 if (ret) {
1623 dev_info(&pf->pdev->dev,
1624 "Stop LLDP AQ command failed =0x%x\n",
1625 pf->hw.aq.asq_last_status);
1626 goto command_write_done;
1627 }
1628 } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
1629 int ret;
1630 ret = i40e_aq_start_lldp(&pf->hw, NULL);
1631 if (ret) {
1632 dev_info(&pf->pdev->dev,
1633 "Start LLDP AQ command failed =0x%x\n",
1634 pf->hw.aq.asq_last_status);
1635 goto command_write_done;
1636 }
1637 } else if (strncmp(&cmd_buf[5],
1638 "get local", 9) == 0) {
1639 int ret, i;
1640 u8 *buff;
1641 u16 llen, rlen;
1642 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1643 if (!buff)
1644 goto command_write_done;
1645
1646 ret = i40e_aq_get_lldp_mib(&pf->hw, 0,
1647 I40E_AQ_LLDP_MIB_LOCAL,
1648 buff, I40E_LLDPDU_SIZE,
1649 &llen, &rlen, NULL);
1650 if (ret) {
1651 dev_info(&pf->pdev->dev,
1652 "Get LLDP MIB (local) AQ command failed =0x%x\n",
1653 pf->hw.aq.asq_last_status);
1654 kfree(buff);
1655 buff = NULL;
1656 goto command_write_done;
1657 }
1658 dev_info(&pf->pdev->dev,
1659 "Get LLDP MIB (local) AQ buffer written back:\n");
1660 for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
1661 snprintf(print_buf, 3, "%02x ", buff[i]);
1662 print_buf += 3;
1663 if ((i % 16) == 15) {
1664 snprintf(print_buf, 1, "\n");
1665 print_buf++;
1666 }
1667 }
1668 dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
1669 kfree(buff);
1670 buff = NULL;
1671 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
1672 int ret, i;
1673 u8 *buff;
1674 u16 llen, rlen;
1675 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1676 if (!buff)
1677 goto command_write_done;
1678
1679 ret = i40e_aq_get_lldp_mib(&pf->hw,
1680 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
1681 I40E_AQ_LLDP_MIB_LOCAL,
1682 buff, I40E_LLDPDU_SIZE,
1683 &llen, &rlen, NULL);
1684 if (ret) {
1685 dev_info(&pf->pdev->dev,
1686 "Get LLDP MIB (remote) AQ command failed =0x%x\n",
1687 pf->hw.aq.asq_last_status);
1688 kfree(buff);
1689 buff = NULL;
1690 goto command_write_done;
1691 }
1692 dev_info(&pf->pdev->dev,
1693 "Get LLDP MIB (remote) AQ buffer written back:\n");
1694 for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
1695 snprintf(print_buf, 3, "%02x ", buff[i]);
1696 print_buf += 3;
1697 if ((i % 16) == 15) {
1698 snprintf(print_buf, 1, "\n");
1699 print_buf++;
1700 }
1701 }
1702 dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
1703 kfree(buff);
1704 buff = NULL;
1705 } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
1706 int ret;
1707 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1708 true, NULL);
1709 if (ret) {
1710 dev_info(&pf->pdev->dev,
1711 "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
1712 pf->hw.aq.asq_last_status);
1713 goto command_write_done;
1714 }
1715 } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
1716 int ret;
1717 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1718 false, NULL);
1719 if (ret) {
1720 dev_info(&pf->pdev->dev,
1721 "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
1722 pf->hw.aq.asq_last_status);
1723 goto command_write_done;
1724 }
1725 }
1726 } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
1727 u16 buffer_len, i, bytes;
1728 u16 module;
1729 u32 offset;
1730 u16 *buff;
1731 int ret;
1732
1733 cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
1734 &module, &offset, &buffer_len);
1735 if (cnt == 0) {
1736 module = 0;
1737 offset = 0;
1738 buffer_len = 0;
1739 } else if (cnt == 1) {
1740 offset = 0;
1741 buffer_len = 0;
1742 } else if (cnt == 2) {
1743 buffer_len = 0;
1744 } else if (cnt > 3) {
1745 dev_info(&pf->pdev->dev,
1746 "nvm read: bad command string, cnt=%d\n", cnt);
1747 goto command_write_done;
1748 }
1749
1750 /* Read at least 512 words */
1751 if (buffer_len == 0)
1752 buffer_len = 512;
1753
1754 bytes = 2 * buffer_len;
1755 buff = kzalloc(bytes, GFP_KERNEL);
1756 if (!buff)
1757 goto command_write_done;
1758
1759 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
1760 if (ret) {
1761 dev_info(&pf->pdev->dev,
1762 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
1763 ret, pf->hw.aq.asq_last_status);
1764 kfree(buff);
1765 goto command_write_done;
1766 }
1767
1768 ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset),
1769 bytes, (u8 *)buff, true, NULL);
1770 i40e_release_nvm(&pf->hw);
1771 if (ret) {
1772 dev_info(&pf->pdev->dev,
1773 "Read NVM AQ failed err=%d status=0x%x\n",
1774 ret, pf->hw.aq.asq_last_status);
1775 } else {
1776 dev_info(&pf->pdev->dev,
1777 "Read NVM module=0x%x offset=0x%x words=%d\n",
1778 module, offset, buffer_len);
1779 for (i = 0; i < buffer_len; i++) {
1780 if ((i % 16) == 0) {
1781 snprintf(print_buf, 11, "\n0x%08x: ",
1782 offset + i);
1783 print_buf += 11;
1784 }
1785 snprintf(print_buf, 5, "%04x ", buff[i]);
1786 print_buf += 5;
1787 }
1788 dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
1789 }
1790 kfree(buff);
1791 buff = NULL;
1792 } else {
1793 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
1794 dev_info(&pf->pdev->dev, "available commands\n");
1795 dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n");
1796 dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
1797 dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
1798 dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
1799 dev_info(&pf->pdev->dev, " add macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
1800 dev_info(&pf->pdev->dev, " del macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
1801 dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
1802 dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
1803 dev_info(&pf->pdev->dev, " dump switch\n");
1804 dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
1805 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1806 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1807 dev_info(&pf->pdev->dev, " dump desc aq\n");
1808 dev_info(&pf->pdev->dev, " dump stats\n");
1809 dev_info(&pf->pdev->dev, " dump reset stats\n");
1810 dev_info(&pf->pdev->dev, " msg_enable [level]\n");
1811 dev_info(&pf->pdev->dev, " read <reg>\n");
1812 dev_info(&pf->pdev->dev, " write <reg> <value>\n");
1813 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
1814 dev_info(&pf->pdev->dev, " clear_stats pf\n");
1815 dev_info(&pf->pdev->dev, " pfr\n");
1816 dev_info(&pf->pdev->dev, " corer\n");
1817 dev_info(&pf->pdev->dev, " globr\n");
1818 dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
1819 dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
1820 dev_info(&pf->pdev->dev, " lldp start\n");
1821 dev_info(&pf->pdev->dev, " lldp stop\n");
1822 dev_info(&pf->pdev->dev, " lldp get local\n");
1823 dev_info(&pf->pdev->dev, " lldp get remote\n");
1824 dev_info(&pf->pdev->dev, " lldp event on\n");
1825 dev_info(&pf->pdev->dev, " lldp event off\n");
1826 dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n");
1827 }
1828
1829command_write_done:
1830 kfree(cmd_buf);
1831 cmd_buf = NULL;
1832 kfree(print_buf_start);
1833 print_buf = NULL;
1834 print_buf_start = NULL;
1835 return count;
1836}
1837
1838static const struct file_operations i40e_dbg_command_fops = {
1839 .owner = THIS_MODULE,
1840 .open = simple_open,
1841 .read = i40e_dbg_command_read,
1842 .write = i40e_dbg_command_write,
1843};
1844
1845/**************************************************************
1846 * netdev_ops
1847 * The netdev_ops entry in debugfs is for giving the driver commands
1848 * to be executed from the netdev operations.
1849 **************************************************************/
1850static char i40e_dbg_netdev_ops_buf[256] = "hello world";
1851
1852/**
1853 * i40e_dbg_netdev_ops - read for netdev_ops datum
1854 * @filp: the opened file
1855 * @buffer: where to write the data for the user to read
1856 * @count: the size of the user's buffer
1857 * @ppos: file position offset
1858 **/
1859static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
1860 size_t count, loff_t *ppos)
1861{
1862 struct i40e_pf *pf = filp->private_data;
1863 int bytes_not_copied;
1864 int buf_size = 256;
1865 char *buf;
1866 int len;
1867
1868 /* don't allow partal reads */
1869 if (*ppos != 0)
1870 return 0;
1871 if (count < buf_size)
1872 return -ENOSPC;
1873
1874 buf = kzalloc(buf_size, GFP_KERNEL);
1875 if (!buf)
1876 return -ENOSPC;
1877
1878 len = snprintf(buf, buf_size, "%s: %s\n",
1879 pf->vsi[pf->lan_vsi]->netdev->name,
1880 i40e_dbg_netdev_ops_buf);
1881
1882 bytes_not_copied = copy_to_user(buffer, buf, len);
1883 kfree(buf);
1884
1885 if (bytes_not_copied < 0)
1886 return bytes_not_copied;
1887
1888 *ppos = len;
1889 return len;
1890}
1891
1892/**
1893 * i40e_dbg_netdev_ops_write - write into netdev_ops datum
1894 * @filp: the opened file
1895 * @buffer: where to find the user's data
1896 * @count: the length of the user's data
1897 * @ppos: file position offset
1898 **/
1899static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
1900 const char __user *buffer,
1901 size_t count, loff_t *ppos)
1902{
1903 struct i40e_pf *pf = filp->private_data;
1904 int bytes_not_copied;
1905 struct i40e_vsi *vsi;
1906 int vsi_seid;
1907 int i, cnt;
1908
1909 /* don't allow partial writes */
1910 if (*ppos != 0)
1911 return 0;
1912 if (count >= sizeof(i40e_dbg_netdev_ops_buf))
1913 return -ENOSPC;
1914
1915 memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
1916 bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
1917 buffer, count);
1918 if (bytes_not_copied < 0)
1919 return bytes_not_copied;
1920 else if (bytes_not_copied > 0)
1921 count -= bytes_not_copied;
1922 i40e_dbg_netdev_ops_buf[count] = '\0';
1923
1924 if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
1925 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
1926 if (cnt != 1) {
1927 dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n");
1928 goto netdev_ops_write_done;
1929 }
1930 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1931 if (!vsi) {
1932 dev_info(&pf->pdev->dev,
1933 "tx_timeout: VSI %d not found\n", vsi_seid);
1934 goto netdev_ops_write_done;
1935 }
1936 if (rtnl_trylock()) {
1937 vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
1938 rtnl_unlock();
1939 dev_info(&pf->pdev->dev, "tx_timeout called\n");
1940 } else {
1941 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1942 }
1943 } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
1944 int mtu;
1945 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
1946 &vsi_seid, &mtu);
1947 if (cnt != 2) {
1948 dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
1949 goto netdev_ops_write_done;
1950 }
1951 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1952 if (!vsi) {
1953 dev_info(&pf->pdev->dev,
1954 "change_mtu: VSI %d not found\n", vsi_seid);
1955 goto netdev_ops_write_done;
1956 }
1957 if (rtnl_trylock()) {
1958 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
1959 mtu);
1960 rtnl_unlock();
1961 dev_info(&pf->pdev->dev, "change_mtu called\n");
1962 } else {
1963 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1964 }
1965
1966 } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
1967 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
1968 if (cnt != 1) {
1969 dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
1970 goto netdev_ops_write_done;
1971 }
1972 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1973 if (!vsi) {
1974 dev_info(&pf->pdev->dev,
1975 "set_rx_mode: VSI %d not found\n", vsi_seid);
1976 goto netdev_ops_write_done;
1977 }
1978 if (rtnl_trylock()) {
1979 vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
1980 rtnl_unlock();
1981 dev_info(&pf->pdev->dev, "set_rx_mode called\n");
1982 } else {
1983 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1984 }
1985
1986 } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
1987 cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
1988 if (cnt != 1) {
1989 dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
1990 goto netdev_ops_write_done;
1991 }
1992 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1993 if (!vsi) {
1994 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
1995 vsi_seid);
1996 goto netdev_ops_write_done;
1997 }
1998 for (i = 0; i < vsi->num_q_vectors; i++)
1999 napi_schedule(&vsi->q_vectors[i].napi);
2000 dev_info(&pf->pdev->dev, "napi called\n");
2001 } else {
2002 dev_info(&pf->pdev->dev, "unknown command '%s'\n",
2003 i40e_dbg_netdev_ops_buf);
2004 dev_info(&pf->pdev->dev, "available commands\n");
2005 dev_info(&pf->pdev->dev, " tx_timeout <vsi_seid>\n");
2006 dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
2007 dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
2008 dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
2009 }
2010netdev_ops_write_done:
2011 return count;
2012}
2013
2014static const struct file_operations i40e_dbg_netdev_ops_fops = {
2015 .owner = THIS_MODULE,
2016 .open = simple_open,
2017 .read = i40e_dbg_netdev_ops_read,
2018 .write = i40e_dbg_netdev_ops_write,
2019};
2020
2021/**
2022 * i40e_dbg_pf_init - setup the debugfs directory for the pf
2023 * @pf: the pf that is starting up
2024 **/
2025void i40e_dbg_pf_init(struct i40e_pf *pf)
2026{
2027 struct dentry *pfile __attribute__((unused));
2028 const char *name = pci_name(pf->pdev);
2029
2030 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
2031 if (pf->i40e_dbg_pf) {
2032 pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf,
2033 pf, &i40e_dbg_command_fops);
2034 pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
2035 &i40e_dbg_dump_fops);
2036 pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf,
2037 pf, &i40e_dbg_netdev_ops_fops);
2038 } else {
2039 dev_info(&pf->pdev->dev,
2040 "debugfs entry for %s failed\n", name);
2041 }
2042}
2043
2044/**
2045 * i40e_dbg_pf_exit - clear out the pf's debugfs entries
2046 * @pf: the pf that is stopping
2047 **/
2048void i40e_dbg_pf_exit(struct i40e_pf *pf)
2049{
2050 debugfs_remove_recursive(pf->i40e_dbg_pf);
2051 pf->i40e_dbg_pf = NULL;
2052
2053 kfree(i40e_dbg_dump_buf);
2054 i40e_dbg_dump_buf = NULL;
2055}
2056
2057/**
2058 * i40e_dbg_init - start up debugfs for the driver
2059 **/
2060void i40e_dbg_init(void)
2061{
2062 i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
2063 if (!i40e_dbg_root)
2064 pr_info("init of debugfs failed\n");
2065}
2066
2067/**
2068 * i40e_dbg_exit - clean out the driver's debugfs entries
2069 **/
2070void i40e_dbg_exit(void)
2071{
2072 debugfs_remove_recursive(i40e_dbg_root);
2073 i40e_dbg_root = NULL;
2074}
2075
2076#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
new file mode 100644
index 000000000000..de255143bde6
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -0,0 +1,131 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e_diag.h"
29#include "i40e_prototype.h"
30
31/**
32 * i40e_diag_reg_pattern_test
33 * @hw: pointer to the hw struct
34 * @reg: reg to be tested
35 * @mask: bits to be touched
36 **/
37static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
38 u32 reg, u32 mask)
39{
40 const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
41 u32 pat, val, orig_val;
42 int i;
43
44 orig_val = rd32(hw, reg);
45 for (i = 0; i < ARRAY_SIZE(patterns); i++) {
46 pat = patterns[i];
47 wr32(hw, reg, (pat & mask));
48 val = rd32(hw, reg);
49 if ((val & mask) != (pat & mask)) {
50 i40e_debug(hw, I40E_DEBUG_DIAG,
51 "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n",
52 __func__, reg, pat, val);
53 return I40E_ERR_DIAG_TEST_FAILED;
54 }
55 }
56
57 wr32(hw, reg, orig_val);
58 val = rd32(hw, reg);
59 if (val != orig_val) {
60 i40e_debug(hw, I40E_DEBUG_DIAG,
61 "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n",
62 __func__, reg, orig_val, val);
63 return I40E_ERR_DIAG_TEST_FAILED;
64 }
65
66 return 0;
67}
68
69struct i40e_diag_reg_test_info i40e_reg_list[] = {
70 /* offset mask elements stride */
71 {I40E_QTX_CTL(0), 0x0000FFBF, 64, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
72 {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
73 {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
74 {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
75 {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
76 {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
77 {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
78 {I40E_PFINT_LNKLSTN(0), 0x000007FF, 511, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
79 {I40E_QINT_TQCTL(0), 0x000000FF, I40E_QINT_TQCTL_MAX_INDEX + 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
80 {I40E_QINT_RQCTL(0), 0x000000FF, I40E_QINT_RQCTL_MAX_INDEX + 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
81 {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
82 { 0 }
83};
84
85/**
86 * i40e_diag_reg_test
87 * @hw: pointer to the hw struct
88 *
89 * Perform registers diagnostic test
90 **/
91i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
92{
93 i40e_status ret_code = 0;
94 u32 reg, mask;
95 u32 i, j;
96
97 for (i = 0; (i40e_reg_list[i].offset != 0) && !ret_code; i++) {
98 mask = i40e_reg_list[i].mask;
99 for (j = 0; (j < i40e_reg_list[i].elements) && !ret_code; j++) {
100 reg = i40e_reg_list[i].offset +
101 (j * i40e_reg_list[i].stride);
102 ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
103 }
104 }
105
106 return ret_code;
107}
108
109/**
110 * i40e_diag_eeprom_test
111 * @hw: pointer to the hw struct
112 *
113 * Perform EEPROM diagnostic test
114 **/
115i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
116{
117 i40e_status ret_code;
118 u16 reg_val;
119
120 /* read NVM control word and if NVM valid, validate EEPROM checksum*/
121 ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
122 if ((!ret_code) &&
123 ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
124 (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
125 ret_code = i40e_validate_nvm_checksum(hw, NULL);
126 } else {
127 ret_code = I40E_ERR_DIAG_TEST_FAILED;
128 }
129
130 return ret_code;
131}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
new file mode 100644
index 000000000000..3d98277f4526
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -0,0 +1,52 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_DIAG_H_
29#define _I40E_DIAG_H_
30
31#include "i40e_type.h"
32
33enum i40e_lb_mode {
34 I40E_LB_MODE_NONE = 0,
35 I40E_LB_MODE_PHY_LOCAL,
36 I40E_LB_MODE_PHY_REMOTE,
37 I40E_LB_MODE_MAC_LOCAL,
38};
39
40struct i40e_diag_reg_test_info {
41 u32 offset; /* the base register */
42 u32 mask; /* bits that can be tested */
43 u32 elements; /* number of elements if array */
44 u32 stride; /* bytes between each element */
45};
46
47extern struct i40e_diag_reg_test_info i40e_reg_list[];
48
49i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
50i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
51
52#endif /* _I40E_DIAG_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
new file mode 100644
index 000000000000..9a76b8cec76c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -0,0 +1,1449 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28/* ethtool support for i40e */
29
30#include "i40e.h"
31#include "i40e_diag.h"
32
33struct i40e_stats {
34 char stat_string[ETH_GSTRING_LEN];
35 int sizeof_stat;
36 int stat_offset;
37};
38
39#define I40E_STAT(_type, _name, _stat) { \
40 .stat_string = _name, \
41 .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
42 .stat_offset = offsetof(_type, _stat) \
43}
44#define I40E_NETDEV_STAT(_net_stat) \
45 I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
46#define I40E_PF_STAT(_name, _stat) \
47 I40E_STAT(struct i40e_pf, _name, _stat)
48#define I40E_VSI_STAT(_name, _stat) \
49 I40E_STAT(struct i40e_vsi, _name, _stat)
50
51static const struct i40e_stats i40e_gstrings_net_stats[] = {
52 I40E_NETDEV_STAT(rx_packets),
53 I40E_NETDEV_STAT(tx_packets),
54 I40E_NETDEV_STAT(rx_bytes),
55 I40E_NETDEV_STAT(tx_bytes),
56 I40E_NETDEV_STAT(rx_errors),
57 I40E_NETDEV_STAT(tx_errors),
58 I40E_NETDEV_STAT(rx_dropped),
59 I40E_NETDEV_STAT(tx_dropped),
60 I40E_NETDEV_STAT(multicast),
61 I40E_NETDEV_STAT(collisions),
62 I40E_NETDEV_STAT(rx_length_errors),
63 I40E_NETDEV_STAT(rx_crc_errors),
64};
65
66/* These PF_STATs might look like duplicates of some NETDEV_STATs,
67 * but they are separate. This device supports Virtualization, and
68 * as such might have several netdevs supporting VMDq and FCoE going
69 * through a single port. The NETDEV_STATs are for individual netdevs
70 * seen at the top of the stack, and the PF_STATs are for the physical
71 * function at the bottom of the stack hosting those netdevs.
72 *
73 * The PF_STATs are appended to the netdev stats only when ethtool -S
74 * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
75 */
76static struct i40e_stats i40e_gstrings_stats[] = {
77 I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
78 I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
79 I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
80 I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
81 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
82 I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
83 I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
84 I40E_PF_STAT("crc_errors", stats.crc_errors),
85 I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
86 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
87 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
88 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
89 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
90 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
91 I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
92 I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
93 I40E_PF_STAT("rx_size_64", stats.rx_size_64),
94 I40E_PF_STAT("rx_size_127", stats.rx_size_127),
95 I40E_PF_STAT("rx_size_255", stats.rx_size_255),
96 I40E_PF_STAT("rx_size_511", stats.rx_size_511),
97 I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
98 I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
99 I40E_PF_STAT("rx_size_big", stats.rx_size_big),
100 I40E_PF_STAT("tx_size_64", stats.tx_size_64),
101 I40E_PF_STAT("tx_size_127", stats.tx_size_127),
102 I40E_PF_STAT("tx_size_255", stats.tx_size_255),
103 I40E_PF_STAT("tx_size_511", stats.tx_size_511),
104 I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
105 I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
106 I40E_PF_STAT("tx_size_big", stats.tx_size_big),
107 I40E_PF_STAT("rx_undersize", stats.rx_undersize),
108 I40E_PF_STAT("rx_fragments", stats.rx_fragments),
109 I40E_PF_STAT("rx_oversize", stats.rx_oversize),
110 I40E_PF_STAT("rx_jabber", stats.rx_jabber),
111 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
112};
113
114#define I40E_QUEUE_STATS_LEN(n) \
115 ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
116 ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
117#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
118#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
119#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
120 I40E_QUEUE_STATS_LEN((n)))
121#define I40E_PFC_STATS_LEN ( \
122 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
123 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
124 FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
125 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
126 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
127 / sizeof(u64))
128#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
129 I40E_PFC_STATS_LEN + \
130 I40E_VSI_STATS_LEN((n)))
131
132enum i40e_ethtool_test_id {
133 I40E_ETH_TEST_REG = 0,
134 I40E_ETH_TEST_EEPROM,
135 I40E_ETH_TEST_INTR,
136 I40E_ETH_TEST_LOOPBACK,
137 I40E_ETH_TEST_LINK,
138};
139
140static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
141 "Register test (offline)",
142 "Eeprom test (offline)",
143 "Interrupt test (offline)",
144 "Loopback test (offline)",
145 "Link test (on/offline)"
146};
147
148#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
149
150/**
151 * i40e_get_settings - Get Link Speed and Duplex settings
152 * @netdev: network interface device structure
153 * @ecmd: ethtool command
154 *
155 * Reports speed/duplex settings based on media_type
156 **/
157static int i40e_get_settings(struct net_device *netdev,
158 struct ethtool_cmd *ecmd)
159{
160 struct i40e_netdev_priv *np = netdev_priv(netdev);
161 struct i40e_pf *pf = np->vsi->back;
162 struct i40e_hw *hw = &pf->hw;
163 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
164 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
165 u32 link_speed = hw_link_info->link_speed;
166
167 /* hardware is either in 40G mode or 10G mode
168 * NOTE: this section initializes supported and advertising
169 */
170 switch (hw_link_info->phy_type) {
171 case I40E_PHY_TYPE_40GBASE_CR4:
172 case I40E_PHY_TYPE_40GBASE_CR4_CU:
173 ecmd->supported = SUPPORTED_40000baseCR4_Full;
174 ecmd->advertising = ADVERTISED_40000baseCR4_Full;
175 break;
176 case I40E_PHY_TYPE_40GBASE_KR4:
177 ecmd->supported = SUPPORTED_40000baseKR4_Full;
178 ecmd->advertising = ADVERTISED_40000baseKR4_Full;
179 break;
180 case I40E_PHY_TYPE_40GBASE_SR4:
181 ecmd->supported = SUPPORTED_40000baseSR4_Full;
182 ecmd->advertising = ADVERTISED_40000baseSR4_Full;
183 break;
184 case I40E_PHY_TYPE_40GBASE_LR4:
185 ecmd->supported = SUPPORTED_40000baseLR4_Full;
186 ecmd->advertising = ADVERTISED_40000baseLR4_Full;
187 break;
188 case I40E_PHY_TYPE_10GBASE_KX4:
189 ecmd->supported = SUPPORTED_10000baseKX4_Full;
190 ecmd->advertising = ADVERTISED_10000baseKX4_Full;
191 break;
192 case I40E_PHY_TYPE_10GBASE_KR:
193 ecmd->supported = SUPPORTED_10000baseKR_Full;
194 ecmd->advertising = ADVERTISED_10000baseKR_Full;
195 break;
196 case I40E_PHY_TYPE_10GBASE_T:
197 default:
198 ecmd->supported = SUPPORTED_10000baseT_Full;
199 ecmd->advertising = ADVERTISED_10000baseT_Full;
200 break;
201 }
202
203 /* for now just say autoneg all the time */
204 ecmd->supported |= SUPPORTED_Autoneg;
205
206 if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) {
207 ecmd->supported |= SUPPORTED_Backplane;
208 ecmd->advertising |= ADVERTISED_Backplane;
209 ecmd->port = PORT_NONE;
210 } else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) {
211 ecmd->supported |= SUPPORTED_TP;
212 ecmd->advertising |= ADVERTISED_TP;
213 ecmd->port = PORT_TP;
214 } else {
215 ecmd->supported |= SUPPORTED_FIBRE;
216 ecmd->advertising |= ADVERTISED_FIBRE;
217 ecmd->port = PORT_FIBRE;
218 }
219
220 ecmd->transceiver = XCVR_EXTERNAL;
221
222 if (link_up) {
223 switch (link_speed) {
224 case I40E_LINK_SPEED_40GB:
225 /* need a SPEED_40000 in ethtool.h */
226 ethtool_cmd_speed_set(ecmd, 40000);
227 break;
228 case I40E_LINK_SPEED_10GB:
229 ethtool_cmd_speed_set(ecmd, SPEED_10000);
230 break;
231 default:
232 break;
233 }
234 ecmd->duplex = DUPLEX_FULL;
235 } else {
236 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
237 ecmd->duplex = DUPLEX_UNKNOWN;
238 }
239
240 return 0;
241}
242
243/**
244 * i40e_get_pauseparam - Get Flow Control status
245 * Return tx/rx-pause status
246 **/
247static void i40e_get_pauseparam(struct net_device *netdev,
248 struct ethtool_pauseparam *pause)
249{
250 struct i40e_netdev_priv *np = netdev_priv(netdev);
251 struct i40e_pf *pf = np->vsi->back;
252 struct i40e_hw *hw = &pf->hw;
253 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
254
255 pause->autoneg =
256 ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
257 AUTONEG_ENABLE : AUTONEG_DISABLE);
258
259 pause->rx_pause = 0;
260 pause->tx_pause = 0;
261 if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX)
262 pause->rx_pause = 1;
263 if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX)
264 pause->tx_pause = 1;
265}
266
267static u32 i40e_get_msglevel(struct net_device *netdev)
268{
269 struct i40e_netdev_priv *np = netdev_priv(netdev);
270 struct i40e_pf *pf = np->vsi->back;
271
272 return pf->msg_enable;
273}
274
275static void i40e_set_msglevel(struct net_device *netdev, u32 data)
276{
277 struct i40e_netdev_priv *np = netdev_priv(netdev);
278 struct i40e_pf *pf = np->vsi->back;
279
280 if (I40E_DEBUG_USER & data)
281 pf->hw.debug_mask = data;
282 pf->msg_enable = data;
283}
284
285static int i40e_get_regs_len(struct net_device *netdev)
286{
287 int reg_count = 0;
288 int i;
289
290 for (i = 0; i40e_reg_list[i].offset != 0; i++)
291 reg_count += i40e_reg_list[i].elements;
292
293 return reg_count * sizeof(u32);
294}
295
296static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
297 void *p)
298{
299 struct i40e_netdev_priv *np = netdev_priv(netdev);
300 struct i40e_pf *pf = np->vsi->back;
301 struct i40e_hw *hw = &pf->hw;
302 u32 *reg_buf = p;
303 int i, j, ri;
304 u32 reg;
305
306 /* Tell ethtool which driver-version-specific regs output we have.
307 *
308 * At some point, if we have ethtool doing special formatting of
309 * this data, it will rely on this version number to know how to
310 * interpret things. Hence, this needs to be updated if/when the
311 * diags register table is changed.
312 */
313 regs->version = 1;
314
315 /* loop through the diags reg table for what to print */
316 ri = 0;
317 for (i = 0; i40e_reg_list[i].offset != 0; i++) {
318 for (j = 0; j < i40e_reg_list[i].elements; j++) {
319 reg = i40e_reg_list[i].offset
320 + (j * i40e_reg_list[i].stride);
321 reg_buf[ri++] = rd32(hw, reg);
322 }
323 }
324
325}
326
327static int i40e_get_eeprom(struct net_device *netdev,
328 struct ethtool_eeprom *eeprom, u8 *bytes)
329{
330 struct i40e_netdev_priv *np = netdev_priv(netdev);
331 struct i40e_hw *hw = &np->vsi->back->hw;
332 int first_word, last_word;
333 u16 i, eeprom_len;
334 u16 *eeprom_buff;
335 int ret_val = 0;
336
337 if (eeprom->len == 0)
338 return -EINVAL;
339
340 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
341
342 first_word = eeprom->offset >> 1;
343 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
344 eeprom_len = last_word - first_word + 1;
345
346 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
347 if (!eeprom_buff)
348 return -ENOMEM;
349
350 ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len,
351 eeprom_buff);
352 if (eeprom_len == 0) {
353 kfree(eeprom_buff);
354 return -EACCES;
355 }
356
357 /* Device's eeprom is always little-endian, word addressable */
358 for (i = 0; i < eeprom_len; i++)
359 le16_to_cpus(&eeprom_buff[i]);
360
361 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
362 kfree(eeprom_buff);
363
364 return ret_val;
365}
366
367static int i40e_get_eeprom_len(struct net_device *netdev)
368{
369 struct i40e_netdev_priv *np = netdev_priv(netdev);
370 struct i40e_hw *hw = &np->vsi->back->hw;
371
372 return hw->nvm.sr_size * 2;
373}
374
375static void i40e_get_drvinfo(struct net_device *netdev,
376 struct ethtool_drvinfo *drvinfo)
377{
378 struct i40e_netdev_priv *np = netdev_priv(netdev);
379 struct i40e_vsi *vsi = np->vsi;
380 struct i40e_pf *pf = vsi->back;
381
382 strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
383 strlcpy(drvinfo->version, i40e_driver_version_str,
384 sizeof(drvinfo->version));
385 strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
386 sizeof(drvinfo->fw_version));
387 strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
388 sizeof(drvinfo->bus_info));
389}
390
391static void i40e_get_ringparam(struct net_device *netdev,
392 struct ethtool_ringparam *ring)
393{
394 struct i40e_netdev_priv *np = netdev_priv(netdev);
395 struct i40e_pf *pf = np->vsi->back;
396 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
397
398 ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
399 ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
400 ring->rx_mini_max_pending = 0;
401 ring->rx_jumbo_max_pending = 0;
402 ring->rx_pending = vsi->rx_rings[0].count;
403 ring->tx_pending = vsi->tx_rings[0].count;
404 ring->rx_mini_pending = 0;
405 ring->rx_jumbo_pending = 0;
406}
407
408static int i40e_set_ringparam(struct net_device *netdev,
409 struct ethtool_ringparam *ring)
410{
411 struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
412 struct i40e_netdev_priv *np = netdev_priv(netdev);
413 struct i40e_vsi *vsi = np->vsi;
414 struct i40e_pf *pf = vsi->back;
415 u32 new_rx_count, new_tx_count;
416 int i, err = 0;
417
418 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
419 return -EINVAL;
420
421 new_tx_count = clamp_t(u32, ring->tx_pending,
422 I40E_MIN_NUM_DESCRIPTORS,
423 I40E_MAX_NUM_DESCRIPTORS);
424 new_tx_count = ALIGN(new_tx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
425
426 new_rx_count = clamp_t(u32, ring->rx_pending,
427 I40E_MIN_NUM_DESCRIPTORS,
428 I40E_MAX_NUM_DESCRIPTORS);
429 new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
430
431 /* if nothing to do return success */
432 if ((new_tx_count == vsi->tx_rings[0].count) &&
433 (new_rx_count == vsi->rx_rings[0].count))
434 return 0;
435
436 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
437 usleep_range(1000, 2000);
438
439 if (!netif_running(vsi->netdev)) {
440 /* simple case - set for the next time the netdev is started */
441 for (i = 0; i < vsi->num_queue_pairs; i++) {
442 vsi->tx_rings[i].count = new_tx_count;
443 vsi->rx_rings[i].count = new_rx_count;
444 }
445 goto done;
446 }
447
448 /* We can't just free everything and then setup again,
449 * because the ISRs in MSI-X mode get passed pointers
450 * to the Tx and Rx ring structs.
451 */
452
453 /* alloc updated Tx resources */
454 if (new_tx_count != vsi->tx_rings[0].count) {
455 netdev_info(netdev,
456 "Changing Tx descriptor count from %d to %d.\n",
457 vsi->tx_rings[0].count, new_tx_count);
458 tx_rings = kcalloc(vsi->alloc_queue_pairs,
459 sizeof(struct i40e_ring), GFP_KERNEL);
460 if (!tx_rings) {
461 err = -ENOMEM;
462 goto done;
463 }
464
465 for (i = 0; i < vsi->num_queue_pairs; i++) {
466 /* clone ring and setup updated count */
467 tx_rings[i] = vsi->tx_rings[i];
468 tx_rings[i].count = new_tx_count;
469 err = i40e_setup_tx_descriptors(&tx_rings[i]);
470 if (err) {
471 while (i) {
472 i--;
473 i40e_free_tx_resources(&tx_rings[i]);
474 }
475 kfree(tx_rings);
476 tx_rings = NULL;
477
478 goto done;
479 }
480 }
481 }
482
483 /* alloc updated Rx resources */
484 if (new_rx_count != vsi->rx_rings[0].count) {
485 netdev_info(netdev,
486 "Changing Rx descriptor count from %d to %d\n",
487 vsi->rx_rings[0].count, new_rx_count);
488 rx_rings = kcalloc(vsi->alloc_queue_pairs,
489 sizeof(struct i40e_ring), GFP_KERNEL);
490 if (!rx_rings) {
491 err = -ENOMEM;
492 goto free_tx;
493 }
494
495 for (i = 0; i < vsi->num_queue_pairs; i++) {
496 /* clone ring and setup updated count */
497 rx_rings[i] = vsi->rx_rings[i];
498 rx_rings[i].count = new_rx_count;
499 err = i40e_setup_rx_descriptors(&rx_rings[i]);
500 if (err) {
501 while (i) {
502 i--;
503 i40e_free_rx_resources(&rx_rings[i]);
504 }
505 kfree(rx_rings);
506 rx_rings = NULL;
507
508 goto free_tx;
509 }
510 }
511 }
512
513 /* Bring interface down, copy in the new ring info,
514 * then restore the interface
515 */
516 i40e_down(vsi);
517
518 if (tx_rings) {
519 for (i = 0; i < vsi->num_queue_pairs; i++) {
520 i40e_free_tx_resources(&vsi->tx_rings[i]);
521 vsi->tx_rings[i] = tx_rings[i];
522 }
523 kfree(tx_rings);
524 tx_rings = NULL;
525 }
526
527 if (rx_rings) {
528 for (i = 0; i < vsi->num_queue_pairs; i++) {
529 i40e_free_rx_resources(&vsi->rx_rings[i]);
530 vsi->rx_rings[i] = rx_rings[i];
531 }
532 kfree(rx_rings);
533 rx_rings = NULL;
534 }
535
536 i40e_up(vsi);
537
538free_tx:
539 /* error cleanup if the Rx allocations failed after getting Tx */
540 if (tx_rings) {
541 for (i = 0; i < vsi->num_queue_pairs; i++)
542 i40e_free_tx_resources(&tx_rings[i]);
543 kfree(tx_rings);
544 tx_rings = NULL;
545 }
546
547done:
548 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
549
550 return err;
551}
552
553static int i40e_get_sset_count(struct net_device *netdev, int sset)
554{
555 struct i40e_netdev_priv *np = netdev_priv(netdev);
556 struct i40e_vsi *vsi = np->vsi;
557 struct i40e_pf *pf = vsi->back;
558
559 switch (sset) {
560 case ETH_SS_TEST:
561 return I40E_TEST_LEN;
562 case ETH_SS_STATS:
563 if (vsi == pf->vsi[pf->lan_vsi])
564 return I40E_PF_STATS_LEN(netdev);
565 else
566 return I40E_VSI_STATS_LEN(netdev);
567 default:
568 return -EOPNOTSUPP;
569 }
570}
571
572static void i40e_get_ethtool_stats(struct net_device *netdev,
573 struct ethtool_stats *stats, u64 *data)
574{
575 struct i40e_netdev_priv *np = netdev_priv(netdev);
576 struct i40e_vsi *vsi = np->vsi;
577 struct i40e_pf *pf = vsi->back;
578 int i = 0;
579 char *p;
580 int j;
581 struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
582
583 i40e_update_stats(vsi);
584
585 for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
586 p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
587 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
588 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
589 }
590 for (j = 0; j < vsi->num_queue_pairs; j++) {
591 data[i++] = vsi->tx_rings[j].tx_stats.packets;
592 data[i++] = vsi->tx_rings[j].tx_stats.bytes;
593 }
594 for (j = 0; j < vsi->num_queue_pairs; j++) {
595 data[i++] = vsi->rx_rings[j].rx_stats.packets;
596 data[i++] = vsi->rx_rings[j].rx_stats.bytes;
597 }
598 if (vsi == pf->vsi[pf->lan_vsi]) {
599 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
600 p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
601 data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
602 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
603 }
604 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
605 data[i++] = pf->stats.priority_xon_tx[j];
606 data[i++] = pf->stats.priority_xoff_tx[j];
607 }
608 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
609 data[i++] = pf->stats.priority_xon_rx[j];
610 data[i++] = pf->stats.priority_xoff_rx[j];
611 }
612 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
613 data[i++] = pf->stats.priority_xon_2_xoff[j];
614 }
615}
616
617static void i40e_get_strings(struct net_device *netdev, u32 stringset,
618 u8 *data)
619{
620 struct i40e_netdev_priv *np = netdev_priv(netdev);
621 struct i40e_vsi *vsi = np->vsi;
622 struct i40e_pf *pf = vsi->back;
623 char *p = (char *)data;
624 int i;
625
626 switch (stringset) {
627 case ETH_SS_TEST:
628 for (i = 0; i < I40E_TEST_LEN; i++) {
629 memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
630 data += ETH_GSTRING_LEN;
631 }
632 break;
633 case ETH_SS_STATS:
634 for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
635 snprintf(p, ETH_GSTRING_LEN, "%s",
636 i40e_gstrings_net_stats[i].stat_string);
637 p += ETH_GSTRING_LEN;
638 }
639 for (i = 0; i < vsi->num_queue_pairs; i++) {
640 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
641 p += ETH_GSTRING_LEN;
642 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
643 p += ETH_GSTRING_LEN;
644 }
645 for (i = 0; i < vsi->num_queue_pairs; i++) {
646 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
647 p += ETH_GSTRING_LEN;
648 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
649 p += ETH_GSTRING_LEN;
650 }
651 if (vsi == pf->vsi[pf->lan_vsi]) {
652 for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
653 snprintf(p, ETH_GSTRING_LEN, "port.%s",
654 i40e_gstrings_stats[i].stat_string);
655 p += ETH_GSTRING_LEN;
656 }
657 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
658 snprintf(p, ETH_GSTRING_LEN,
659 "port.tx_priority_%u_xon", i);
660 p += ETH_GSTRING_LEN;
661 snprintf(p, ETH_GSTRING_LEN,
662 "port.tx_priority_%u_xoff", i);
663 p += ETH_GSTRING_LEN;
664 }
665 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
666 snprintf(p, ETH_GSTRING_LEN,
667 "port.rx_priority_%u_xon", i);
668 p += ETH_GSTRING_LEN;
669 snprintf(p, ETH_GSTRING_LEN,
670 "port.rx_priority_%u_xoff", i);
671 p += ETH_GSTRING_LEN;
672 }
673 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
674 snprintf(p, ETH_GSTRING_LEN,
675 "port.rx_priority_%u_xon_2_xoff", i);
676 p += ETH_GSTRING_LEN;
677 }
678 }
679 /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
680 break;
681 }
682}
683
684static int i40e_get_ts_info(struct net_device *dev,
685 struct ethtool_ts_info *info)
686{
687 return ethtool_op_get_ts_info(dev, info);
688}
689
690static int i40e_link_test(struct i40e_pf *pf, u64 *data)
691{
692 if (i40e_get_link_status(&pf->hw))
693 *data = 0;
694 else
695 *data = 1;
696
697 return *data;
698}
699
700static int i40e_reg_test(struct i40e_pf *pf, u64 *data)
701{
702 i40e_status ret;
703
704 ret = i40e_diag_reg_test(&pf->hw);
705 *data = ret;
706
707 return ret;
708}
709
710static int i40e_eeprom_test(struct i40e_pf *pf, u64 *data)
711{
712 i40e_status ret;
713
714 ret = i40e_diag_eeprom_test(&pf->hw);
715 *data = ret;
716
717 return ret;
718}
719
720static int i40e_intr_test(struct i40e_pf *pf, u64 *data)
721{
722 *data = -ENOSYS;
723
724 return *data;
725}
726
727static int i40e_loopback_test(struct i40e_pf *pf, u64 *data)
728{
729 *data = -ENOSYS;
730
731 return *data;
732}
733
734static void i40e_diag_test(struct net_device *netdev,
735 struct ethtool_test *eth_test, u64 *data)
736{
737 struct i40e_netdev_priv *np = netdev_priv(netdev);
738 struct i40e_pf *pf = np->vsi->back;
739
740 set_bit(__I40E_TESTING, &pf->state);
741 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
742 /* Offline tests */
743
744 netdev_info(netdev, "offline testing starting\n");
745
746 /* Link test performed before hardware reset
747 * so autoneg doesn't interfere with test result
748 */
749 netdev_info(netdev, "link test starting\n");
750 if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
751 eth_test->flags |= ETH_TEST_FL_FAILED;
752
753 netdev_info(netdev, "register test starting\n");
754 if (i40e_reg_test(pf, &data[I40E_ETH_TEST_REG]))
755 eth_test->flags |= ETH_TEST_FL_FAILED;
756
757 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
758 netdev_info(netdev, "eeprom test starting\n");
759 if (i40e_eeprom_test(pf, &data[I40E_ETH_TEST_EEPROM]))
760 eth_test->flags |= ETH_TEST_FL_FAILED;
761
762 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
763 netdev_info(netdev, "interrupt test starting\n");
764 if (i40e_intr_test(pf, &data[I40E_ETH_TEST_INTR]))
765 eth_test->flags |= ETH_TEST_FL_FAILED;
766
767 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
768 netdev_info(netdev, "loopback test starting\n");
769 if (i40e_loopback_test(pf, &data[I40E_ETH_TEST_LOOPBACK]))
770 eth_test->flags |= ETH_TEST_FL_FAILED;
771
772 } else {
773 netdev_info(netdev, "online test starting\n");
774 /* Online tests */
775 if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
776 eth_test->flags |= ETH_TEST_FL_FAILED;
777
778 /* Offline only tests, not run in online; pass by default */
779 data[I40E_ETH_TEST_REG] = 0;
780 data[I40E_ETH_TEST_EEPROM] = 0;
781 data[I40E_ETH_TEST_INTR] = 0;
782 data[I40E_ETH_TEST_LOOPBACK] = 0;
783
784 clear_bit(__I40E_TESTING, &pf->state);
785 }
786}
787
788static void i40e_get_wol(struct net_device *netdev,
789 struct ethtool_wolinfo *wol)
790{
791 wol->supported = 0;
792 wol->wolopts = 0;
793}
794
795static int i40e_nway_reset(struct net_device *netdev)
796{
797 /* restart autonegotiation */
798 struct i40e_netdev_priv *np = netdev_priv(netdev);
799 struct i40e_pf *pf = np->vsi->back;
800 struct i40e_hw *hw = &pf->hw;
801 i40e_status ret = 0;
802
803 ret = i40e_aq_set_link_restart_an(hw, NULL);
804 if (ret) {
805 netdev_info(netdev, "link restart failed, aq_err=%d\n",
806 pf->hw.aq.asq_last_status);
807 return -EIO;
808 }
809
810 return 0;
811}
812
813static int i40e_set_phys_id(struct net_device *netdev,
814 enum ethtool_phys_id_state state)
815{
816 struct i40e_netdev_priv *np = netdev_priv(netdev);
817 struct i40e_pf *pf = np->vsi->back;
818 struct i40e_hw *hw = &pf->hw;
819 int blink_freq = 2;
820
821 switch (state) {
822 case ETHTOOL_ID_ACTIVE:
823 pf->led_status = i40e_led_get(hw);
824 return blink_freq;
825 case ETHTOOL_ID_ON:
826 i40e_led_set(hw, 0xF);
827 break;
828 case ETHTOOL_ID_OFF:
829 i40e_led_set(hw, 0x0);
830 break;
831 case ETHTOOL_ID_INACTIVE:
832 i40e_led_set(hw, pf->led_status);
833 break;
834 }
835
836 return 0;
837}
838
839/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
840 * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
841 * 125us (8000 interrupts per second) == ITR(62)
842 */
843
844static int i40e_get_coalesce(struct net_device *netdev,
845 struct ethtool_coalesce *ec)
846{
847 struct i40e_netdev_priv *np = netdev_priv(netdev);
848 struct i40e_vsi *vsi = np->vsi;
849
850 ec->tx_max_coalesced_frames_irq = vsi->work_limit;
851 ec->rx_max_coalesced_frames_irq = vsi->work_limit;
852
853 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
854 ec->rx_coalesce_usecs = 1;
855 else
856 ec->rx_coalesce_usecs = vsi->rx_itr_setting;
857
858 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
859 ec->tx_coalesce_usecs = 1;
860 else
861 ec->tx_coalesce_usecs = vsi->tx_itr_setting;
862
863 return 0;
864}
865
866static int i40e_set_coalesce(struct net_device *netdev,
867 struct ethtool_coalesce *ec)
868{
869 struct i40e_netdev_priv *np = netdev_priv(netdev);
870 struct i40e_q_vector *q_vector;
871 struct i40e_vsi *vsi = np->vsi;
872 struct i40e_pf *pf = vsi->back;
873 struct i40e_hw *hw = &pf->hw;
874 u16 vector;
875 int i;
876
877 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
878 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
879
880 switch (ec->rx_coalesce_usecs) {
881 case 0:
882 vsi->rx_itr_setting = 0;
883 break;
884 case 1:
885 vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
886 ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
887 break;
888 default:
889 if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
890 (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
891 return -EINVAL;
892 vsi->rx_itr_setting = ec->rx_coalesce_usecs;
893 break;
894 }
895
896 switch (ec->tx_coalesce_usecs) {
897 case 0:
898 vsi->tx_itr_setting = 0;
899 break;
900 case 1:
901 vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
902 ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
903 break;
904 default:
905 if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
906 (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
907 return -EINVAL;
908 vsi->tx_itr_setting = ec->tx_coalesce_usecs;
909 break;
910 }
911
912 vector = vsi->base_vector;
913 q_vector = vsi->q_vectors;
914 for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) {
915 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
916 wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
917 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
918 wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
919 i40e_flush(hw);
920 }
921
922 return 0;
923}
924
925/**
926 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
927 * @pf: pointer to the physical function struct
928 * @cmd: ethtool rxnfc command
929 *
930 * Returns Success if the flow is supported, else Invalid Input.
931 **/
932static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
933{
934 cmd->data = 0;
935
936 /* Report default options for RSS on i40e */
937 switch (cmd->flow_type) {
938 case TCP_V4_FLOW:
939 case UDP_V4_FLOW:
940 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
941 /* fall through to add IP fields */
942 case SCTP_V4_FLOW:
943 case AH_ESP_V4_FLOW:
944 case AH_V4_FLOW:
945 case ESP_V4_FLOW:
946 case IPV4_FLOW:
947 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
948 break;
949 case TCP_V6_FLOW:
950 case UDP_V6_FLOW:
951 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
952 /* fall through to add IP fields */
953 case SCTP_V6_FLOW:
954 case AH_ESP_V6_FLOW:
955 case AH_V6_FLOW:
956 case ESP_V6_FLOW:
957 case IPV6_FLOW:
958 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
959 break;
960 default:
961 return -EINVAL;
962 }
963
964 return 0;
965}
966
967/**
968 * i40e_get_rxnfc - command to get RX flow classification rules
969 * @netdev: network interface device structure
970 * @cmd: ethtool rxnfc command
971 *
972 * Returns Success if the command is supported.
973 **/
974static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
975 u32 *rule_locs)
976{
977 struct i40e_netdev_priv *np = netdev_priv(netdev);
978 struct i40e_vsi *vsi = np->vsi;
979 struct i40e_pf *pf = vsi->back;
980 int ret = -EOPNOTSUPP;
981
982 switch (cmd->cmd) {
983 case ETHTOOL_GRXRINGS:
984 cmd->data = vsi->alloc_queue_pairs;
985 ret = 0;
986 break;
987 case ETHTOOL_GRXFH:
988 ret = i40e_get_rss_hash_opts(pf, cmd);
989 break;
990 case ETHTOOL_GRXCLSRLCNT:
991 ret = 0;
992 break;
993 case ETHTOOL_GRXCLSRULE:
994 ret = 0;
995 break;
996 case ETHTOOL_GRXCLSRLALL:
997 cmd->data = 500;
998 ret = 0;
999 default:
1000 break;
1001 }
1002
1003 return ret;
1004}
1005
1006/**
1007 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
1008 * @pf: pointer to the physical function struct
1009 * @cmd: ethtool rxnfc command
1010 *
1011 * Returns Success if the flow input set is supported.
1012 **/
1013static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1014{
1015 struct i40e_hw *hw = &pf->hw;
1016 u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
1017 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
1018
1019 /* RSS does not support anything other than hashing
1020 * to queues on src and dst IPs and ports
1021 */
1022 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
1023 RXH_L4_B_0_1 | RXH_L4_B_2_3))
1024 return -EINVAL;
1025
1026 /* We need at least the IP SRC and DEST fields for hashing */
1027 if (!(nfc->data & RXH_IP_SRC) ||
1028 !(nfc->data & RXH_IP_DST))
1029 return -EINVAL;
1030
1031 switch (nfc->flow_type) {
1032 case TCP_V4_FLOW:
1033 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1034 case 0:
1035 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1036 break;
1037 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1038 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1039 break;
1040 default:
1041 return -EINVAL;
1042 }
1043 break;
1044 case TCP_V6_FLOW:
1045 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1046 case 0:
1047 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1048 break;
1049 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1050 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1051 break;
1052 default:
1053 return -EINVAL;
1054 }
1055 break;
1056 case UDP_V4_FLOW:
1057 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1058 case 0:
1059 hena &=
1060 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
1061 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1062 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1063 break;
1064 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1065 hena |=
1066 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
1067 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1068 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1069 break;
1070 default:
1071 return -EINVAL;
1072 }
1073 break;
1074 case UDP_V6_FLOW:
1075 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1076 case 0:
1077 hena &=
1078 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
1079 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1080 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1081 break;
1082 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1083 hena |=
1084 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
1085 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1086 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1087 break;
1088 default:
1089 return -EINVAL;
1090 }
1091 break;
1092 case AH_ESP_V4_FLOW:
1093 case AH_V4_FLOW:
1094 case ESP_V4_FLOW:
1095 case SCTP_V4_FLOW:
1096 if ((nfc->data & RXH_L4_B_0_1) ||
1097 (nfc->data & RXH_L4_B_2_3))
1098 return -EINVAL;
1099 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1100 break;
1101 case AH_ESP_V6_FLOW:
1102 case AH_V6_FLOW:
1103 case ESP_V6_FLOW:
1104 case SCTP_V6_FLOW:
1105 if ((nfc->data & RXH_L4_B_0_1) ||
1106 (nfc->data & RXH_L4_B_2_3))
1107 return -EINVAL;
1108 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1109 break;
1110 case IPV4_FLOW:
1111 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
1112 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
1113 break;
1114 case IPV6_FLOW:
1115 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
1116 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1117 break;
1118 default:
1119 return -EINVAL;
1120 }
1121
1122 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
1123 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1124 i40e_flush(hw);
1125
1126 return 0;
1127}
1128
1129#define IP_HEADER_OFFSET 14
1130/**
1131 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
1132 * a specific flow spec
1133 * @vsi: pointer to the targeted VSI
1134 * @fd_data: the flow director data required from the FDir descriptor
1135 * @ethtool_rx_flow_spec: the flow spec
1136 * @add: true adds a filter, false removes it
1137 *
1138 * Returns 0 if the filters were successfully added or removed
1139 **/
1140static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
1141 struct i40e_fdir_data *fd_data,
1142 struct ethtool_rx_flow_spec *fsp, bool add)
1143{
1144 struct i40e_pf *pf = vsi->back;
1145 struct udphdr *udp;
1146 struct iphdr *ip;
1147 bool err = false;
1148 int ret;
1149 int i;
1150
1151 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1152 udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
1153 + sizeof(struct iphdr));
1154
1155 ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
1156 ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
1157 udp->source = fsp->h_u.tcp_ip4_spec.psrc;
1158 udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
1159
1160 for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
1161 i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
1162 fd_data->pctype = i;
1163 ret = i40e_program_fdir_filter(fd_data, pf, add);
1164
1165 if (ret) {
1166 dev_info(&pf->pdev->dev,
1167 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1168 fd_data->pctype, ret);
1169 err = true;
1170 } else {
1171 dev_info(&pf->pdev->dev,
1172 "Filter OK for PCTYPE %d (ret = %d)\n",
1173 fd_data->pctype, ret);
1174 }
1175 }
1176
1177 return err ? -EOPNOTSUPP : 0;
1178}
1179
1180/**
1181 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
1182 * a specific flow spec
1183 * @vsi: pointer to the targeted VSI
1184 * @fd_data: the flow director data required from the FDir descriptor
1185 * @ethtool_rx_flow_spec: the flow spec
1186 * @add: true adds a filter, false removes it
1187 *
1188 * Returns 0 if the filters were successfully added or removed
1189 **/
1190static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
1191 struct i40e_fdir_data *fd_data,
1192 struct ethtool_rx_flow_spec *fsp, bool add)
1193{
1194 struct i40e_pf *pf = vsi->back;
1195 struct tcphdr *tcp;
1196 struct iphdr *ip;
1197 bool err = false;
1198 int ret;
1199
1200 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1201 tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
1202 + sizeof(struct iphdr));
1203
1204 ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
1205 tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
1206
1207 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
1208 ret = i40e_program_fdir_filter(fd_data, pf, add);
1209
1210 if (ret) {
1211 dev_info(&pf->pdev->dev,
1212 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1213 fd_data->pctype, ret);
1214 err = true;
1215 } else {
1216 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
1217 fd_data->pctype, ret);
1218 }
1219
1220 ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
1221 tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
1222
1223 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
1224
1225 ret = i40e_program_fdir_filter(fd_data, pf, add);
1226 if (ret) {
1227 dev_info(&pf->pdev->dev,
1228 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1229 fd_data->pctype, ret);
1230 err = true;
1231 } else {
1232 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
1233 fd_data->pctype, ret);
1234 }
1235
1236 return err ? -EOPNOTSUPP : 0;
1237}
1238
1239/**
1240 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
1241 * a specific flow spec
1242 * @vsi: pointer to the targeted VSI
1243 * @fd_data: the flow director data required from the FDir descriptor
1244 * @ethtool_rx_flow_spec: the flow spec
1245 * @add: true adds a filter, false removes it
1246 *
1247 * Returns 0 if the filters were successfully added or removed
1248 **/
1249static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
1250 struct i40e_fdir_data *fd_data,
1251 struct ethtool_rx_flow_spec *fsp, bool add)
1252{
1253 return -EOPNOTSUPP;
1254}
1255
1256/**
1257 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
1258 * a specific flow spec
1259 * @vsi: pointer to the targeted VSI
1260 * @fd_data: the flow director data required for the FDir descriptor
1261 * @fsp: the ethtool flow spec
1262 * @add: true adds a filter, false removes it
1263 *
1264 * Returns 0 if the filters were successfully added or removed
1265 **/
1266static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
1267 struct i40e_fdir_data *fd_data,
1268 struct ethtool_rx_flow_spec *fsp, bool add)
1269{
1270 struct i40e_pf *pf = vsi->back;
1271 struct iphdr *ip;
1272 bool err = false;
1273 int ret;
1274 int i;
1275
1276 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1277
1278 ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
1279 ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
1280 ip->protocol = fsp->h_u.usr_ip4_spec.proto;
1281
1282 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
1283 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
1284 fd_data->pctype = i;
1285 ret = i40e_program_fdir_filter(fd_data, pf, add);
1286
1287 if (ret) {
1288 dev_info(&pf->pdev->dev,
1289 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1290 fd_data->pctype, ret);
1291 err = true;
1292 } else {
1293 dev_info(&pf->pdev->dev,
1294 "Filter OK for PCTYPE %d (ret = %d)\n",
1295 fd_data->pctype, ret);
1296 }
1297 }
1298
1299 return err ? -EOPNOTSUPP : 0;
1300}
1301
1302/**
1303 * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for
1304 * a specific flow spec based on their protocol
1305 * @vsi: pointer to the targeted VSI
1306 * @cmd: command to get or set RX flow classification rules
1307 * @add: true adds a filter, false removes it
1308 *
1309 * Returns 0 if the filters were successfully added or removed
1310 **/
1311static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
1312 struct ethtool_rxnfc *cmd, bool add)
1313{
1314 struct i40e_fdir_data fd_data;
1315 int ret = -EINVAL;
1316 struct i40e_pf *pf;
1317 struct ethtool_rx_flow_spec *fsp =
1318 (struct ethtool_rx_flow_spec *)&cmd->fs;
1319
1320 if (!vsi)
1321 return -EINVAL;
1322
1323 pf = vsi->back;
1324
1325 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
1326 (fsp->ring_cookie >= vsi->num_queue_pairs))
1327 return -EINVAL;
1328
1329 /* Populate the Flow Director that we have at the moment
1330 * and allocate the raw packet buffer for the calling functions
1331 */
1332 fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
1333 GFP_KERNEL);
1334
1335 if (!fd_data.raw_packet) {
1336 dev_info(&pf->pdev->dev, "Could not allocate memory\n");
1337 return -ENOMEM;
1338 }
1339
1340 fd_data.q_index = fsp->ring_cookie;
1341 fd_data.flex_off = 0;
1342 fd_data.pctype = 0;
1343 fd_data.dest_vsi = vsi->id;
1344 fd_data.dest_ctl = 0;
1345 fd_data.fd_status = 0;
1346 fd_data.cnt_index = 0;
1347 fd_data.fd_id = 0;
1348
1349 switch (fsp->flow_type & ~FLOW_EXT) {
1350 case TCP_V4_FLOW:
1351 ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
1352 break;
1353 case UDP_V4_FLOW:
1354 ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
1355 break;
1356 case SCTP_V4_FLOW:
1357 ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
1358 break;
1359 case IPV4_FLOW:
1360 ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
1361 break;
1362 case IP_USER_FLOW:
1363 switch (fsp->h_u.usr_ip4_spec.proto) {
1364 case IPPROTO_TCP:
1365 ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
1366 break;
1367 case IPPROTO_UDP:
1368 ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
1369 break;
1370 case IPPROTO_SCTP:
1371 ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
1372 break;
1373 default:
1374 ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
1375 break;
1376 }
1377 break;
1378 default:
1379 dev_info(&pf->pdev->dev, "Could not specify spec type\n");
1380 ret = -EINVAL;
1381 }
1382
1383 kfree(fd_data.raw_packet);
1384 fd_data.raw_packet = NULL;
1385
1386 return ret;
1387}
1388/**
1389 * i40e_set_rxnfc - command to set RX flow classification rules
1390 * @netdev: network interface device structure
1391 * @cmd: ethtool rxnfc command
1392 *
1393 * Returns Success if the command is supported.
1394 **/
1395static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1396{
1397 struct i40e_netdev_priv *np = netdev_priv(netdev);
1398 struct i40e_vsi *vsi = np->vsi;
1399 struct i40e_pf *pf = vsi->back;
1400 int ret = -EOPNOTSUPP;
1401
1402 switch (cmd->cmd) {
1403 case ETHTOOL_SRXFH:
1404 ret = i40e_set_rss_hash_opt(pf, cmd);
1405 break;
1406 case ETHTOOL_SRXCLSRLINS:
1407 ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
1408 break;
1409 case ETHTOOL_SRXCLSRLDEL:
1410 ret = i40e_add_del_fdir_ethtool(vsi, cmd, false);
1411 break;
1412 default:
1413 break;
1414 }
1415
1416 return ret;
1417}
1418
1419static const struct ethtool_ops i40e_ethtool_ops = {
1420 .get_settings = i40e_get_settings,
1421 .get_drvinfo = i40e_get_drvinfo,
1422 .get_regs_len = i40e_get_regs_len,
1423 .get_regs = i40e_get_regs,
1424 .nway_reset = i40e_nway_reset,
1425 .get_link = ethtool_op_get_link,
1426 .get_wol = i40e_get_wol,
1427 .get_eeprom_len = i40e_get_eeprom_len,
1428 .get_eeprom = i40e_get_eeprom,
1429 .get_ringparam = i40e_get_ringparam,
1430 .set_ringparam = i40e_set_ringparam,
1431 .get_pauseparam = i40e_get_pauseparam,
1432 .get_msglevel = i40e_get_msglevel,
1433 .set_msglevel = i40e_set_msglevel,
1434 .get_rxnfc = i40e_get_rxnfc,
1435 .set_rxnfc = i40e_set_rxnfc,
1436 .self_test = i40e_diag_test,
1437 .get_strings = i40e_get_strings,
1438 .set_phys_id = i40e_set_phys_id,
1439 .get_sset_count = i40e_get_sset_count,
1440 .get_ethtool_stats = i40e_get_ethtool_stats,
1441 .get_coalesce = i40e_get_coalesce,
1442 .set_coalesce = i40e_set_coalesce,
1443 .get_ts_info = i40e_get_ts_info,
1444};
1445
1446void i40e_set_ethtool_ops(struct net_device *netdev)
1447{
1448 SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
1449}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
new file mode 100644
index 000000000000..901804af8b0e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -0,0 +1,366 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e_osdep.h"
29#include "i40e_register.h"
30#include "i40e_status.h"
31#include "i40e_alloc.h"
32#include "i40e_hmc.h"
33#include "i40e_type.h"
34
35/**
36 * i40e_add_sd_table_entry - Adds a segment descriptor to the table
37 * @hw: pointer to our hw struct
38 * @hmc_info: pointer to the HMC configuration information struct
39 * @sd_index: segment descriptor index to manipulate
40 * @type: what type of segment descriptor we're manipulating
41 * @direct_mode_sz: size to alloc in direct mode
42 **/
43i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
44 struct i40e_hmc_info *hmc_info,
45 u32 sd_index,
46 enum i40e_sd_entry_type type,
47 u64 direct_mode_sz)
48{
49 enum i40e_memory_type mem_type __attribute__((unused));
50 i40e_status ret_code = 0;
51 struct i40e_hmc_sd_entry *sd_entry;
52 bool dma_mem_alloc_done = false;
53 struct i40e_dma_mem mem;
54 u64 alloc_len;
55
56 if (NULL == hmc_info->sd_table.sd_entry) {
57 ret_code = I40E_ERR_BAD_PTR;
58 hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
59 goto exit;
60 }
61
62 if (sd_index >= hmc_info->sd_table.sd_cnt) {
63 ret_code = I40E_ERR_INVALID_SD_INDEX;
64 hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
65 goto exit;
66 }
67
68 sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
69 if (!sd_entry->valid) {
70 if (I40E_SD_TYPE_PAGED == type) {
71 mem_type = i40e_mem_pd;
72 alloc_len = I40E_HMC_PAGED_BP_SIZE;
73 } else {
74 mem_type = i40e_mem_bp_jumbo;
75 alloc_len = direct_mode_sz;
76 }
77
78 /* allocate a 4K pd page or 2M backing page */
79 ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
80 I40E_HMC_PD_BP_BUF_ALIGNMENT);
81 if (ret_code)
82 goto exit;
83 dma_mem_alloc_done = true;
84 if (I40E_SD_TYPE_PAGED == type) {
85 ret_code = i40e_allocate_virt_mem(hw,
86 &sd_entry->u.pd_table.pd_entry_virt_mem,
87 sizeof(struct i40e_hmc_pd_entry) * 512);
88 if (ret_code)
89 goto exit;
90 sd_entry->u.pd_table.pd_entry =
91 (struct i40e_hmc_pd_entry *)
92 sd_entry->u.pd_table.pd_entry_virt_mem.va;
93 memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem,
94 sizeof(struct i40e_dma_mem));
95 } else {
96 memcpy(&sd_entry->u.bp.addr, &mem,
97 sizeof(struct i40e_dma_mem));
98 sd_entry->u.bp.sd_pd_index = sd_index;
99 }
100 /* initialize the sd entry */
101 hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
102
103 /* increment the ref count */
104 I40E_INC_SD_REFCNT(&hmc_info->sd_table);
105 }
106 /* Increment backing page reference count */
107 if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
108 I40E_INC_BP_REFCNT(&sd_entry->u.bp);
109exit:
110 if (ret_code)
111 if (dma_mem_alloc_done)
112 i40e_free_dma_mem(hw, &mem);
113
114 return ret_code;
115}
116
117/**
118 * i40e_add_pd_table_entry - Adds page descriptor to the specified table
119 * @hw: pointer to our HW structure
120 * @hmc_info: pointer to the HMC configuration information structure
121 * @pd_index: which page descriptor index to manipulate
122 *
123 * This function:
124 * 1. Initializes the pd entry
125 * 2. Adds pd_entry in the pd_table
126 * 3. Mark the entry valid in i40e_hmc_pd_entry structure
127 * 4. Initializes the pd_entry's ref count to 1
128 * assumptions:
129 * 1. The memory for pd should be pinned down, physically contiguous and
130 * aligned on 4K boundary and zeroed memory.
131 * 2. It should be 4K in size.
132 **/
133i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
134 struct i40e_hmc_info *hmc_info,
135 u32 pd_index)
136{
137 i40e_status ret_code = 0;
138 struct i40e_hmc_pd_table *pd_table;
139 struct i40e_hmc_pd_entry *pd_entry;
140 struct i40e_dma_mem mem;
141 u32 sd_idx, rel_pd_idx;
142 u64 *pd_addr;
143 u64 page_desc;
144
145 if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
146 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
147 hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n");
148 goto exit;
149 }
150
151 /* find corresponding sd */
152 sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
153 if (I40E_SD_TYPE_PAGED !=
154 hmc_info->sd_table.sd_entry[sd_idx].entry_type)
155 goto exit;
156
157 rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
158 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
159 pd_entry = &pd_table->pd_entry[rel_pd_idx];
160 if (!pd_entry->valid) {
161 /* allocate a 4K backing page */
162 ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
163 I40E_HMC_PAGED_BP_SIZE,
164 I40E_HMC_PD_BP_BUF_ALIGNMENT);
165 if (ret_code)
166 goto exit;
167
168 memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem));
169 pd_entry->bp.sd_pd_index = pd_index;
170 pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
171 /* Set page address and valid bit */
172 page_desc = mem.pa | 0x1;
173
174 pd_addr = (u64 *)pd_table->pd_page_addr.va;
175 pd_addr += rel_pd_idx;
176
177 /* Add the backing page physical address in the pd entry */
178 memcpy(pd_addr, &page_desc, sizeof(u64));
179
180 pd_entry->sd_index = sd_idx;
181 pd_entry->valid = true;
182 I40E_INC_PD_REFCNT(pd_table);
183 }
184 I40E_INC_BP_REFCNT(&pd_entry->bp);
185exit:
186 return ret_code;
187}
188
189/**
190 * i40e_remove_pd_bp - remove a backing page from a page descriptor
191 * @hw: pointer to our HW structure
192 * @hmc_info: pointer to the HMC configuration information structure
193 * @idx: the page index
194 * @is_pf: distinguishes a VF from a PF
195 *
196 * This function:
197 * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
198 * (for direct address mode) invalid.
199 * 2. Write to register PMPDINV to invalidate the backing page in FV cache
200 * 3. Decrement the ref count for the pd _entry
201 * assumptions:
202 * 1. Caller can deallocate the memory used by backing storage after this
203 * function returns.
204 **/
205i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
206 struct i40e_hmc_info *hmc_info,
207 u32 idx, bool is_pf)
208{
209 i40e_status ret_code = 0;
210 struct i40e_hmc_pd_entry *pd_entry;
211 struct i40e_hmc_pd_table *pd_table;
212 struct i40e_hmc_sd_entry *sd_entry;
213 u32 sd_idx, rel_pd_idx;
214 u64 *pd_addr;
215
216 /* calculate index */
217 sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
218 rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
219 if (sd_idx >= hmc_info->sd_table.sd_cnt) {
220 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
221 hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
222 goto exit;
223 }
224 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
225 if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
226 ret_code = I40E_ERR_INVALID_SD_TYPE;
227 hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
228 goto exit;
229 }
230 /* get the entry and decrease its ref counter */
231 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
232 pd_entry = &pd_table->pd_entry[rel_pd_idx];
233 I40E_DEC_BP_REFCNT(&pd_entry->bp);
234 if (pd_entry->bp.ref_cnt)
235 goto exit;
236
237 /* mark the entry invalid */
238 pd_entry->valid = false;
239 I40E_DEC_PD_REFCNT(pd_table);
240 pd_addr = (u64 *)pd_table->pd_page_addr.va;
241 pd_addr += rel_pd_idx;
242 memset(pd_addr, 0, sizeof(u64));
243 if (is_pf)
244 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
245 else
246 I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);
247
248 /* free memory here */
249 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
250 if (ret_code)
251 goto exit;
252 if (!pd_table->ref_cnt)
253 i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
254exit:
255 return ret_code;
256}
257
258/**
259 * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
260 * @hmc_info: pointer to the HMC configuration information structure
261 * @idx: the page index
262 **/
263i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
264 u32 idx)
265{
266 i40e_status ret_code = 0;
267 struct i40e_hmc_sd_entry *sd_entry;
268
269 /* get the entry and decrease its ref counter */
270 sd_entry = &hmc_info->sd_table.sd_entry[idx];
271 I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
272 if (sd_entry->u.bp.ref_cnt) {
273 ret_code = I40E_ERR_NOT_READY;
274 goto exit;
275 }
276 I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
277
278 /* mark the entry invalid */
279 sd_entry->valid = false;
280exit:
281 return ret_code;
282}
283
284/**
285 * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
286 * @hw: pointer to our hw struct
287 * @hmc_info: pointer to the HMC configuration information structure
288 * @idx: the page index
289 * @is_pf: used to distinguish between VF and PF
290 **/
291i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
292 struct i40e_hmc_info *hmc_info,
293 u32 idx, bool is_pf)
294{
295 struct i40e_hmc_sd_entry *sd_entry;
296 i40e_status ret_code = 0;
297
298 /* get the entry and decrease its ref counter */
299 sd_entry = &hmc_info->sd_table.sd_entry[idx];
300 if (is_pf) {
301 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
302 } else {
303 ret_code = I40E_NOT_SUPPORTED;
304 goto exit;
305 }
306 ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
307 if (ret_code)
308 goto exit;
309exit:
310 return ret_code;
311}
312
313/**
314 * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
315 * @hmc_info: pointer to the HMC configuration information structure
316 * @idx: segment descriptor index to find the relevant page descriptor
317 **/
318i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
319 u32 idx)
320{
321 i40e_status ret_code = 0;
322 struct i40e_hmc_sd_entry *sd_entry;
323
324 sd_entry = &hmc_info->sd_table.sd_entry[idx];
325
326 if (sd_entry->u.pd_table.ref_cnt) {
327 ret_code = I40E_ERR_NOT_READY;
328 goto exit;
329 }
330
331 /* mark the entry invalid */
332 sd_entry->valid = false;
333
334 I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
335exit:
336 return ret_code;
337}
338
339/**
340 * i40e_remove_pd_page_new - Removes a PD page from sd entry.
341 * @hw: pointer to our hw struct
342 * @hmc_info: pointer to the HMC configuration information structure
343 * @idx: segment descriptor index to find the relevant page descriptor
344 * @is_pf: used to distinguish between VF and PF
345 **/
346i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
347 struct i40e_hmc_info *hmc_info,
348 u32 idx, bool is_pf)
349{
350 i40e_status ret_code = 0;
351 struct i40e_hmc_sd_entry *sd_entry;
352
353 sd_entry = &hmc_info->sd_table.sd_entry[idx];
354 if (is_pf) {
355 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
356 } else {
357 ret_code = I40E_NOT_SUPPORTED;
358 goto exit;
359 }
360 /* free memory here */
361 ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
362 if (ret_code)
363 goto exit;
364exit:
365 return ret_code;
366}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
new file mode 100644
index 000000000000..aacd42a261e9
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -0,0 +1,245 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_HMC_H_
29#define _I40E_HMC_H_
30
31#define I40E_HMC_MAX_BP_COUNT 512
32
33/* forward-declare the HW struct for the compiler */
34struct i40e_hw;
35
36#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
37#define I40E_HMC_PD_CNT_IN_SD 512
38#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
39#define I40E_HMC_PAGED_BP_SIZE 4096
40#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
41#define I40E_FIRST_VF_FPM_ID 16
42
43struct i40e_hmc_obj_info {
44 u64 base; /* base addr in FPM */
45 u32 max_cnt; /* max count available for this hmc func */
46 u32 cnt; /* count of objects driver actually wants to create */
47 u64 size; /* size in bytes of one object */
48};
49
50enum i40e_sd_entry_type {
51 I40E_SD_TYPE_INVALID = 0,
52 I40E_SD_TYPE_PAGED = 1,
53 I40E_SD_TYPE_DIRECT = 2
54};
55
56struct i40e_hmc_bp {
57 enum i40e_sd_entry_type entry_type;
58 struct i40e_dma_mem addr; /* populate to be used by hw */
59 u32 sd_pd_index;
60 u32 ref_cnt;
61};
62
63struct i40e_hmc_pd_entry {
64 struct i40e_hmc_bp bp;
65 u32 sd_index;
66 bool valid;
67};
68
69struct i40e_hmc_pd_table {
70 struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
71 struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
72 struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
73
74 u32 ref_cnt;
75 u32 sd_index;
76};
77
78struct i40e_hmc_sd_entry {
79 enum i40e_sd_entry_type entry_type;
80 bool valid;
81
82 union {
83 struct i40e_hmc_pd_table pd_table;
84 struct i40e_hmc_bp bp;
85 } u;
86};
87
88struct i40e_hmc_sd_table {
89 struct i40e_virt_mem addr; /* used to track sd_entry allocations */
90 u32 sd_cnt;
91 u32 ref_cnt;
92 struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
93};
94
95struct i40e_hmc_info {
96 u32 signature;
97 /* equals to pci func num for PF and dynamically allocated for VFs */
98 u8 hmc_fn_id;
99 u16 first_sd_index; /* index of the first available SD */
100
101 /* hmc objects */
102 struct i40e_hmc_obj_info *hmc_obj;
103 struct i40e_virt_mem hmc_obj_virt_mem;
104 struct i40e_hmc_sd_table sd_table;
105};
106
107#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
108#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
109#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
110
111#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
112#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
113#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
114
115/**
116 * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
117 * @hw: pointer to our hw struct
118 * @pa: pointer to physical address
119 * @sd_index: segment descriptor index
120 * @hmc_fn_id: hmc function id
121 * @type: if sd entry is direct or paged
122 **/
123#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
124{ \
125 u32 val1, val2, val3; \
126 val1 = (u32)(upper_32_bits(pa)); \
127 val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
128 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
129 ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
130 I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
131 (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
132 val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
133 wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
134 wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
135 wr32((hw), I40E_PFHMC_SDCMD, val3); \
136}
137
138/**
139 * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
140 * @hw: pointer to our hw struct
141 * @sd_index: segment descriptor index
142 * @hmc_fn_id: hmc function id
143 * @type: if sd entry is direct or paged
144 **/
145#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
146{ \
147 u32 val2, val3; \
148 val2 = (I40E_HMC_MAX_BP_COUNT << \
149 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
150 ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
151 I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
152 val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
153 wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
154 wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
155 wr32((hw), I40E_PFHMC_SDCMD, val3); \
156}
157
158/**
159 * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
160 * @hw: pointer to our hw struct
161 * @sd_idx: segment descriptor index
162 * @pd_idx: page descriptor index
163 * @hmc_fn_id: hmc function id
164 **/
165#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
166 wr32((hw), I40E_PFHMC_PDINV, \
167 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
168 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
169
170#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
171 wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
172 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
173 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
174
175/**
176 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
177 * @hmc_info: pointer to the HMC configuration information structure
178 * @type: type of HMC resources we're searching
179 * @index: starting index for the object
180 * @cnt: number of objects we're trying to create
181 * @sd_idx: pointer to return index of the segment descriptor in question
182 * @sd_limit: pointer to return the maximum number of segment descriptors
183 *
184 * This function calculates the segment descriptor index and index limit
185 * for the resource defined by i40e_hmc_rsrc_type.
186 **/
187#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
188{ \
189 u64 fpm_addr, fpm_limit; \
190 fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
191 (hmc_info)->hmc_obj[(type)].size * (index); \
192 fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
193 *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
194 *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
195 /* add one more to the limit to correct our range */ \
196 *(sd_limit) += 1; \
197}
198
199/**
200 * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
201 * @hmc_info: pointer to the HMC configuration information struct
202 * @type: HMC resource type we're examining
203 * @idx: starting index for the object
204 * @cnt: number of objects we're trying to create
205 * @pd_index: pointer to return page descriptor index
206 * @pd_limit: pointer to return page descriptor index limit
207 *
208 * Calculates the page descriptor index and index limit for the resource
209 * defined by i40e_hmc_rsrc_type.
210 **/
211#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
212{ \
213 u64 fpm_adr, fpm_limit; \
214 fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
215 (hmc_info)->hmc_obj[(type)].size * (idx); \
216 fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
217 *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
218 *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
219 /* add one more to the limit to correct our range */ \
220 *(pd_limit) += 1; \
221}
222i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
223 struct i40e_hmc_info *hmc_info,
224 u32 sd_index,
225 enum i40e_sd_entry_type type,
226 u64 direct_mode_sz);
227
228i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
229 struct i40e_hmc_info *hmc_info,
230 u32 pd_index);
231i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
232 struct i40e_hmc_info *hmc_info,
233 u32 idx, bool is_pf);
234i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
235 u32 idx);
236i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
237 struct i40e_hmc_info *hmc_info,
238 u32 idx, bool is_pf);
239i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
240 u32 idx);
241i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
242 struct i40e_hmc_info *hmc_info,
243 u32 idx, bool is_pf);
244
245#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
new file mode 100644
index 000000000000..a695b91c9c79
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -0,0 +1,1006 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e_osdep.h"
29#include "i40e_register.h"
30#include "i40e_type.h"
31#include "i40e_hmc.h"
32#include "i40e_lan_hmc.h"
33#include "i40e_prototype.h"
34
35/* lan specific interface functions */
36
37/**
38 * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
39 * @offset: base address offset needing alignment
40 *
41 * Aligns the layer 2 function private memory so it's 512-byte aligned.
42 **/
43static u64 i40e_align_l2obj_base(u64 offset)
44{
45 u64 aligned_offset = offset;
46
47 if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
48 aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
49 (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
50
51 return aligned_offset;
52}
53
54/**
55 * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
56 * @txq_num: number of Tx queues needing backing context
57 * @rxq_num: number of Rx queues needing backing context
58 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
59 * @fcoe_filt_num: number of FCoE filters needing backing context
60 *
61 * Calculates the maximum amount of memory for the function required, based
62 * on the number of resources it must provide context for.
63 **/
64static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
65 u32 fcoe_cntx_num, u32 fcoe_filt_num)
66{
67 u64 fpm_size = 0;
68
69 fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
70 fpm_size = i40e_align_l2obj_base(fpm_size);
71
72 fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
73 fpm_size = i40e_align_l2obj_base(fpm_size);
74
75 fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
76 fpm_size = i40e_align_l2obj_base(fpm_size);
77
78 fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
79 fpm_size = i40e_align_l2obj_base(fpm_size);
80
81 return fpm_size;
82}
83
84/**
85 * i40e_init_lan_hmc - initialize i40e_hmc_info struct
86 * @hw: pointer to the HW structure
87 * @txq_num: number of Tx queues needing backing context
88 * @rxq_num: number of Rx queues needing backing context
89 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
90 * @fcoe_filt_num: number of FCoE filters needing backing context
91 *
92 * This function will be called once per physical function initialization.
93 * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
94 * the driver's provided input, as well as information from the HMC itself
95 * loaded from NVRAM.
96 *
97 * Assumptions:
98 * - HMC Resource Profile has been selected before calling this function.
99 **/
100i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
101 u32 rxq_num, u32 fcoe_cntx_num,
102 u32 fcoe_filt_num)
103{
104 struct i40e_hmc_obj_info *obj, *full_obj;
105 i40e_status ret_code = 0;
106 u64 l2fpm_size;
107 u32 size_exp;
108
109 hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
110 hw->hmc.hmc_fn_id = hw->pf_id;
111
112 /* allocate memory for hmc_obj */
113 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
114 sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
115 if (ret_code)
116 goto init_lan_hmc_out;
117 hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
118 hw->hmc.hmc_obj_virt_mem.va;
119
120 /* The full object will be used to create the LAN HMC SD */
121 full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
122 full_obj->max_cnt = 0;
123 full_obj->cnt = 0;
124 full_obj->base = 0;
125 full_obj->size = 0;
126
127 /* Tx queue context information */
128 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
129 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
130 obj->cnt = txq_num;
131 obj->base = 0;
132 size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
133 obj->size = (u64)1 << size_exp;
134
135 /* validate values requested by driver don't exceed HMC capacity */
136 if (txq_num > obj->max_cnt) {
137 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
138 hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
139 txq_num, obj->max_cnt, ret_code);
140 goto init_lan_hmc_out;
141 }
142
143 /* aggregate values into the full LAN object for later */
144 full_obj->max_cnt += obj->max_cnt;
145 full_obj->cnt += obj->cnt;
146
147 /* Rx queue context information */
148 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
149 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
150 obj->cnt = rxq_num;
151 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
152 (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
153 hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
154 obj->base = i40e_align_l2obj_base(obj->base);
155 size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
156 obj->size = (u64)1 << size_exp;
157
158 /* validate values requested by driver don't exceed HMC capacity */
159 if (rxq_num > obj->max_cnt) {
160 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
161 hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
162 rxq_num, obj->max_cnt, ret_code);
163 goto init_lan_hmc_out;
164 }
165
166 /* aggregate values into the full LAN object for later */
167 full_obj->max_cnt += obj->max_cnt;
168 full_obj->cnt += obj->cnt;
169
170 /* FCoE context information */
171 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
172 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
173 obj->cnt = fcoe_cntx_num;
174 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
175 (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
176 hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
177 obj->base = i40e_align_l2obj_base(obj->base);
178 size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
179 obj->size = (u64)1 << size_exp;
180
181 /* validate values requested by driver don't exceed HMC capacity */
182 if (fcoe_cntx_num > obj->max_cnt) {
183 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
184 hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
185 fcoe_cntx_num, obj->max_cnt, ret_code);
186 goto init_lan_hmc_out;
187 }
188
189 /* aggregate values into the full LAN object for later */
190 full_obj->max_cnt += obj->max_cnt;
191 full_obj->cnt += obj->cnt;
192
193 /* FCoE filter information */
194 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
195 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
196 obj->cnt = fcoe_filt_num;
197 obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
198 (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
199 hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
200 obj->base = i40e_align_l2obj_base(obj->base);
201 size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
202 obj->size = (u64)1 << size_exp;
203
204 /* validate values requested by driver don't exceed HMC capacity */
205 if (fcoe_filt_num > obj->max_cnt) {
206 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
207 hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
208 fcoe_filt_num, obj->max_cnt, ret_code);
209 goto init_lan_hmc_out;
210 }
211
212 /* aggregate values into the full LAN object for later */
213 full_obj->max_cnt += obj->max_cnt;
214 full_obj->cnt += obj->cnt;
215
216 hw->hmc.first_sd_index = 0;
217 hw->hmc.sd_table.ref_cnt = 0;
218 l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
219 fcoe_filt_num);
220 if (NULL == hw->hmc.sd_table.sd_entry) {
221 hw->hmc.sd_table.sd_cnt = (u32)
222 (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
223 I40E_HMC_DIRECT_BP_SIZE;
224
225 /* allocate the sd_entry members in the sd_table */
226 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
227 (sizeof(struct i40e_hmc_sd_entry) *
228 hw->hmc.sd_table.sd_cnt));
229 if (ret_code)
230 goto init_lan_hmc_out;
231 hw->hmc.sd_table.sd_entry =
232 (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
233 }
234 /* store in the LAN full object for later */
235 full_obj->size = l2fpm_size;
236
237init_lan_hmc_out:
238 return ret_code;
239}
240
241/**
242 * i40e_remove_pd_page - Remove a page from the page descriptor table
243 * @hw: pointer to the HW structure
244 * @hmc_info: pointer to the HMC configuration information structure
245 * @idx: segment descriptor index to find the relevant page descriptor
246 *
247 * This function:
248 * 1. Marks the entry in pd table (for paged address mode) invalid
249 * 2. write to register PMPDINV to invalidate the backing page in FV cache
250 * 3. Decrement the ref count for pd_entry
251 * assumptions:
252 * 1. caller can deallocate the memory used by pd after this function
253 * returns.
254 **/
255static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
256 struct i40e_hmc_info *hmc_info,
257 u32 idx)
258{
259 i40e_status ret_code = 0;
260
261 if (!i40e_prep_remove_pd_page(hmc_info, idx))
262 ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
263
264 return ret_code;
265}
266
267/**
268 * i40e_remove_sd_bp - remove a backing page from a segment descriptor
269 * @hw: pointer to our HW structure
270 * @hmc_info: pointer to the HMC configuration information structure
271 * @idx: the page index
272 *
273 * This function:
274 * 1. Marks the entry in sd table (for direct address mode) invalid
275 * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
276 * to 0) and PMSDDATAHIGH to invalidate the sd page
277 * 3. Decrement the ref count for the sd_entry
278 * assumptions:
279 * 1. caller can deallocate the memory used by backing storage after this
280 * function returns.
281 **/
282static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
283 struct i40e_hmc_info *hmc_info,
284 u32 idx)
285{
286 i40e_status ret_code = 0;
287
288 if (!i40e_prep_remove_sd_bp(hmc_info, idx))
289 ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
290
291 return ret_code;
292}
293
294/**
295 * i40e_create_lan_hmc_object - allocate backing store for hmc objects
296 * @hw: pointer to the HW structure
297 * @info: pointer to i40e_hmc_create_obj_info struct
298 *
299 * This will allocate memory for PDs and backing pages and populate
300 * the sd and pd entries.
301 **/
302static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
303 struct i40e_hmc_lan_create_obj_info *info)
304{
305 i40e_status ret_code = 0;
306 struct i40e_hmc_sd_entry *sd_entry;
307 u32 pd_idx1 = 0, pd_lmt1 = 0;
308 u32 pd_idx = 0, pd_lmt = 0;
309 bool pd_error = false;
310 u32 sd_idx, sd_lmt;
311 u64 sd_size;
312 u32 i, j;
313
314 if (NULL == info) {
315 ret_code = I40E_ERR_BAD_PTR;
316 hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n");
317 goto exit;
318 }
319 if (NULL == info->hmc_info) {
320 ret_code = I40E_ERR_BAD_PTR;
321 hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n");
322 goto exit;
323 }
324 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
325 ret_code = I40E_ERR_BAD_PTR;
326 hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n");
327 goto exit;
328 }
329
330 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
331 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
332 hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
333 ret_code);
334 goto exit;
335 }
336 if ((info->start_idx + info->count) >
337 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
338 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
339 hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
340 ret_code);
341 goto exit;
342 }
343
344 /* find sd index and limit */
345 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
346 info->start_idx, info->count,
347 &sd_idx, &sd_lmt);
348 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
349 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
350 ret_code = I40E_ERR_INVALID_SD_INDEX;
351 goto exit;
352 }
353 /* find pd index */
354 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
355 info->start_idx, info->count, &pd_idx,
356 &pd_lmt);
357
358 /* This is to cover for cases where you may not want to have an SD with
359 * the full 2M memory but something smaller. By not filling out any
360 * size, the function will default the SD size to be 2M.
361 */
362 if (info->direct_mode_sz == 0)
363 sd_size = I40E_HMC_DIRECT_BP_SIZE;
364 else
365 sd_size = info->direct_mode_sz;
366
367 /* check if all the sds are valid. If not, allocate a page and
368 * initialize it.
369 */
370 for (j = sd_idx; j < sd_lmt; j++) {
371 /* update the sd table entry */
372 ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
373 info->entry_type,
374 sd_size);
375 if (ret_code)
376 goto exit_sd_error;
377 sd_entry = &info->hmc_info->sd_table.sd_entry[j];
378 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
379 /* check if all the pds in this sd are valid. If not,
380 * allocate a page and initialize it.
381 */
382
383 /* find pd_idx and pd_lmt in this sd */
384 pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
385 pd_lmt1 = min(pd_lmt,
386 ((j + 1) * I40E_HMC_MAX_BP_COUNT));
387 for (i = pd_idx1; i < pd_lmt1; i++) {
388 /* update the pd table entry */
389 ret_code = i40e_add_pd_table_entry(hw,
390 info->hmc_info,
391 i);
392 if (ret_code) {
393 pd_error = true;
394 break;
395 }
396 }
397 if (pd_error) {
398 /* remove the backing pages from pd_idx1 to i */
399 while (i && (i > pd_idx1)) {
400 i40e_remove_pd_bp(hw, info->hmc_info,
401 (i - 1), true);
402 i--;
403 }
404 }
405 }
406 if (!sd_entry->valid) {
407 sd_entry->valid = true;
408 switch (sd_entry->entry_type) {
409 case I40E_SD_TYPE_PAGED:
410 I40E_SET_PF_SD_ENTRY(hw,
411 sd_entry->u.pd_table.pd_page_addr.pa,
412 j, sd_entry->entry_type);
413 break;
414 case I40E_SD_TYPE_DIRECT:
415 I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
416 j, sd_entry->entry_type);
417 break;
418 default:
419 ret_code = I40E_ERR_INVALID_SD_TYPE;
420 goto exit;
421 break;
422 }
423 }
424 }
425 goto exit;
426
427exit_sd_error:
428 /* cleanup for sd entries from j to sd_idx */
429 while (j && (j > sd_idx)) {
430 sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
431 switch (sd_entry->entry_type) {
432 case I40E_SD_TYPE_PAGED:
433 pd_idx1 = max(pd_idx,
434 ((j - 1) * I40E_HMC_MAX_BP_COUNT));
435 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
436 for (i = pd_idx1; i < pd_lmt1; i++) {
437 i40e_remove_pd_bp(
438 hw,
439 info->hmc_info,
440 i,
441 true);
442 }
443 i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
444 break;
445 case I40E_SD_TYPE_DIRECT:
446 i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
447 break;
448 default:
449 ret_code = I40E_ERR_INVALID_SD_TYPE;
450 break;
451 }
452 j--;
453 }
454exit:
455 return ret_code;
456}
457
458/**
459 * i40e_configure_lan_hmc - prepare the HMC backing store
460 * @hw: pointer to the hw structure
461 * @model: the model for the layout of the SD/PD tables
462 *
463 * - This function will be called once per physical function initialization.
464 * - This function will be called after i40e_init_lan_hmc() and before
465 * any LAN/FCoE HMC objects can be created.
466 **/
467i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
468 enum i40e_hmc_model model)
469{
470 struct i40e_hmc_lan_create_obj_info info;
471 i40e_status ret_code = 0;
472 u8 hmc_fn_id = hw->hmc.hmc_fn_id;
473 struct i40e_hmc_obj_info *obj;
474
475 /* Initialize part of the create object info struct */
476 info.hmc_info = &hw->hmc;
477 info.rsrc_type = I40E_HMC_LAN_FULL;
478 info.start_idx = 0;
479 info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
480
481 /* Build the SD entry for the LAN objects */
482 switch (model) {
483 case I40E_HMC_MODEL_DIRECT_PREFERRED:
484 case I40E_HMC_MODEL_DIRECT_ONLY:
485 info.entry_type = I40E_SD_TYPE_DIRECT;
486 /* Make one big object, a single SD */
487 info.count = 1;
488 ret_code = i40e_create_lan_hmc_object(hw, &info);
489 if ((ret_code) &&
490 (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
491 goto try_type_paged;
492 else if (ret_code)
493 goto configure_lan_hmc_out;
494 /* else clause falls through the break */
495 break;
496 case I40E_HMC_MODEL_PAGED_ONLY:
497try_type_paged:
498 info.entry_type = I40E_SD_TYPE_PAGED;
499 /* Make one big object in the PD table */
500 info.count = 1;
501 ret_code = i40e_create_lan_hmc_object(hw, &info);
502 if (ret_code)
503 goto configure_lan_hmc_out;
504 break;
505 default:
506 /* unsupported type */
507 ret_code = I40E_ERR_INVALID_SD_TYPE;
508 hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
509 ret_code);
510 goto configure_lan_hmc_out;
511 break;
512 }
513
514 /* Configure and program the FPM registers so objects can be created */
515
516 /* Tx contexts */
517 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
518 wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
519 (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
520 wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
521
522 /* Rx contexts */
523 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
524 wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
525 (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
526 wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
527
528 /* FCoE contexts */
529 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
530 wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
531 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
532 wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
533
534 /* FCoE filters */
535 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
536 wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
537 (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
538 wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
539
540configure_lan_hmc_out:
541 return ret_code;
542}
543
544/**
545 * i40e_delete_hmc_object - remove hmc objects
546 * @hw: pointer to the HW structure
547 * @info: pointer to i40e_hmc_delete_obj_info struct
548 *
549 * This will de-populate the SDs and PDs. It frees
550 * the memory for PDS and backing storage. After this function is returned,
551 * caller should deallocate memory allocated previously for
552 * book-keeping information about PDs and backing storage.
553 **/
554static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
555 struct i40e_hmc_lan_delete_obj_info *info)
556{
557 i40e_status ret_code = 0;
558 struct i40e_hmc_pd_table *pd_table;
559 u32 pd_idx, pd_lmt, rel_pd_idx;
560 u32 sd_idx, sd_lmt;
561 u32 i, j;
562
563 if (NULL == info) {
564 ret_code = I40E_ERR_BAD_PTR;
565 hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n");
566 goto exit;
567 }
568 if (NULL == info->hmc_info) {
569 ret_code = I40E_ERR_BAD_PTR;
570 hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n");
571 goto exit;
572 }
573 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
574 ret_code = I40E_ERR_BAD_PTR;
575 hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n");
576 goto exit;
577 }
578
579 if (NULL == info->hmc_info->sd_table.sd_entry) {
580 ret_code = I40E_ERR_BAD_PTR;
581 hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n");
582 goto exit;
583 }
584
585 if (NULL == info->hmc_info->hmc_obj) {
586 ret_code = I40E_ERR_BAD_PTR;
587 hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
588 goto exit;
589 }
590 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
591 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
592 hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
593 ret_code);
594 goto exit;
595 }
596
597 if ((info->start_idx + info->count) >
598 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
599 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
600 hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
601 ret_code);
602 goto exit;
603 }
604
605 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
606 info->start_idx, info->count, &pd_idx,
607 &pd_lmt);
608
609 for (j = pd_idx; j < pd_lmt; j++) {
610 sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
611
612 if (I40E_SD_TYPE_PAGED !=
613 info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
614 continue;
615
616 rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
617
618 pd_table =
619 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
620 if (pd_table->pd_entry[rel_pd_idx].valid) {
621 ret_code = i40e_remove_pd_bp(hw, info->hmc_info,
622 j, true);
623 if (ret_code)
624 goto exit;
625 }
626 }
627
628 /* find sd index and limit */
629 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
630 info->start_idx, info->count,
631 &sd_idx, &sd_lmt);
632 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
633 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
634 ret_code = I40E_ERR_INVALID_SD_INDEX;
635 goto exit;
636 }
637
638 for (i = sd_idx; i < sd_lmt; i++) {
639 if (!info->hmc_info->sd_table.sd_entry[i].valid)
640 continue;
641 switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
642 case I40E_SD_TYPE_DIRECT:
643 ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
644 if (ret_code)
645 goto exit;
646 break;
647 case I40E_SD_TYPE_PAGED:
648 ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
649 if (ret_code)
650 goto exit;
651 break;
652 default:
653 break;
654 }
655 }
656exit:
657 return ret_code;
658}
659
660/**
661 * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
662 * @hw: pointer to the hw structure
663 *
664 * This must be called by drivers as they are shutting down and being
665 * removed from the OS.
666 **/
667i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
668{
669 struct i40e_hmc_lan_delete_obj_info info;
670 i40e_status ret_code;
671
672 info.hmc_info = &hw->hmc;
673 info.rsrc_type = I40E_HMC_LAN_FULL;
674 info.start_idx = 0;
675 info.count = 1;
676
677 /* delete the object */
678 ret_code = i40e_delete_lan_hmc_object(hw, &info);
679
680 /* free the SD table entry for LAN */
681 i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
682 hw->hmc.sd_table.sd_cnt = 0;
683 hw->hmc.sd_table.sd_entry = NULL;
684
685 /* free memory used for hmc_obj */
686 i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
687 hw->hmc.hmc_obj = NULL;
688
689 return ret_code;
690}
691
692#define I40E_HMC_STORE(_struct, _ele) \
693 offsetof(struct _struct, _ele), \
694 FIELD_SIZEOF(struct _struct, _ele)
695
696struct i40e_context_ele {
697 u16 offset;
698 u16 size_of;
699 u16 width;
700 u16 lsb;
701};
702
703/* LAN Tx Queue Context */
704static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
705 /* Field Width LSB */
706 {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
707 {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
708 {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
709 {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
710 {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
711 {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
712 {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
713 {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
714/* line 1 */
715 {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
716 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
717 {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
718 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
719 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
720 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
721 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
722/* line 7 */
723 {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
724 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
725 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
726 { 0 }
727};
728
729/* LAN Rx Queue Context */
730static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
731 /* Field Width LSB */
732 { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
733 { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
734 { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
735 { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
736 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
737 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
738 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
739 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
740 { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
741 { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
742 { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
743 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
744 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
745 { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
746 { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
747 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
748 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
749 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
750 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
751 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
752 { 0 }
753};
754
755/**
756 * i40e_clear_hmc_context - zero out the HMC context bits
757 * @hw: the hardware struct
758 * @context_bytes: pointer to the context bit array (DMA memory)
759 * @hmc_type: the type of HMC resource
760 **/
761static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
762 u8 *context_bytes,
763 enum i40e_hmc_lan_rsrc_type hmc_type)
764{
765 /* clean the bit array */
766 memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
767
768 return 0;
769}
770
771/**
772 * i40e_set_hmc_context - replace HMC context bits
773 * @context_bytes: pointer to the context bit array
774 * @ce_info: a description of the struct to be filled
775 * @dest: the struct to be filled
776 **/
777static i40e_status i40e_set_hmc_context(u8 *context_bytes,
778 struct i40e_context_ele *ce_info,
779 u8 *dest)
780{
781 u16 shift_width;
782 u64 bitfield;
783 u8 hi_byte;
784 u8 hi_mask;
785 u64 t_bits;
786 u64 mask;
787 u8 *p;
788 int f;
789
790 for (f = 0; ce_info[f].width != 0; f++) {
791 /* clear out the field */
792 bitfield = 0;
793
794 /* copy from the next struct field */
795 p = dest + ce_info[f].offset;
796 switch (ce_info[f].size_of) {
797 case 1:
798 bitfield = *p;
799 break;
800 case 2:
801 bitfield = cpu_to_le16(*(u16 *)p);
802 break;
803 case 4:
804 bitfield = cpu_to_le32(*(u32 *)p);
805 break;
806 case 8:
807 bitfield = cpu_to_le64(*(u64 *)p);
808 break;
809 }
810
811 /* prepare the bits and mask */
812 shift_width = ce_info[f].lsb % 8;
813 mask = ((u64)1 << ce_info[f].width) - 1;
814
815 /* save upper bytes for special case */
816 hi_mask = (u8)((mask >> 56) & 0xff);
817 hi_byte = (u8)((bitfield >> 56) & 0xff);
818
819 /* shift to correct alignment */
820 mask <<= shift_width;
821 bitfield <<= shift_width;
822
823 /* get the current bits from the target bit string */
824 p = context_bytes + (ce_info[f].lsb / 8);
825 memcpy(&t_bits, p, sizeof(u64));
826
827 t_bits &= ~mask; /* get the bits not changing */
828 t_bits |= bitfield; /* add in the new bits */
829
830 /* put it all back */
831 memcpy(p, &t_bits, sizeof(u64));
832
833 /* deal with the special case if needed
834 * example: 62 bit field that starts in bit 5 of first byte
835 * will overlap 3 bits into byte 9
836 */
837 if ((shift_width + ce_info[f].width) > 64) {
838 u8 byte;
839
840 hi_mask >>= (8 - shift_width);
841 hi_byte >>= (8 - shift_width);
842 byte = p[8] & ~hi_mask; /* get the bits not changing */
843 byte |= hi_byte; /* add in the new bits */
844 p[8] = byte; /* put it back */
845 }
846 }
847
848 return 0;
849}
850
851/**
852 * i40e_hmc_get_object_va - retrieves an object's virtual address
853 * @hmc_info: pointer to i40e_hmc_info struct
854 * @object_base: pointer to u64 to get the va
855 * @rsrc_type: the hmc resource type
856 * @obj_idx: hmc object index
857 *
858 * This function retrieves the object's virtual address from the object
859 * base pointer. This function is used for LAN Queue contexts.
860 **/
861static
862i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
863 u8 **object_base,
864 enum i40e_hmc_lan_rsrc_type rsrc_type,
865 u32 obj_idx)
866{
867 u32 obj_offset_in_sd, obj_offset_in_pd;
868 i40e_status ret_code = 0;
869 struct i40e_hmc_sd_entry *sd_entry;
870 struct i40e_hmc_pd_entry *pd_entry;
871 u32 pd_idx, pd_lmt, rel_pd_idx;
872 u64 obj_offset_in_fpm;
873 u32 sd_idx, sd_lmt;
874
875 if (NULL == hmc_info) {
876 ret_code = I40E_ERR_BAD_PTR;
877 hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
878 goto exit;
879 }
880 if (NULL == hmc_info->hmc_obj) {
881 ret_code = I40E_ERR_BAD_PTR;
882 hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
883 goto exit;
884 }
885 if (NULL == object_base) {
886 ret_code = I40E_ERR_BAD_PTR;
887 hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n");
888 goto exit;
889 }
890 if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
891 ret_code = I40E_ERR_BAD_PTR;
892 hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n");
893 goto exit;
894 }
895 if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
896 hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n",
897 ret_code);
898 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
899 goto exit;
900 }
901 /* find sd index and limit */
902 I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
903 &sd_idx, &sd_lmt);
904
905 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
906 obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
907 hmc_info->hmc_obj[rsrc_type].size * obj_idx;
908
909 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
910 I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
911 &pd_idx, &pd_lmt);
912 rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
913 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
914 obj_offset_in_pd = (u32)(obj_offset_in_fpm %
915 I40E_HMC_PAGED_BP_SIZE);
916 *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
917 } else {
918 obj_offset_in_sd = (u32)(obj_offset_in_fpm %
919 I40E_HMC_DIRECT_BP_SIZE);
920 *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
921 }
922exit:
923 return ret_code;
924}
925
926/**
927 * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
928 * @hw: the hardware struct
929 * @queue: the queue we care about
930 **/
931i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
932 u16 queue)
933{
934 i40e_status err;
935 u8 *context_bytes;
936
937 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
938 I40E_HMC_LAN_TX, queue);
939 if (err < 0)
940 return err;
941
942 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
943}
944
945/**
946 * i40e_set_lan_tx_queue_context - set the HMC context for the queue
947 * @hw: the hardware struct
948 * @queue: the queue we care about
949 * @s: the struct to be filled
950 **/
951i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
952 u16 queue,
953 struct i40e_hmc_obj_txq *s)
954{
955 i40e_status err;
956 u8 *context_bytes;
957
958 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
959 I40E_HMC_LAN_TX, queue);
960 if (err < 0)
961 return err;
962
963 return i40e_set_hmc_context(context_bytes,
964 i40e_hmc_txq_ce_info, (u8 *)s);
965}
966
967/**
968 * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
969 * @hw: the hardware struct
970 * @queue: the queue we care about
971 **/
972i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
973 u16 queue)
974{
975 i40e_status err;
976 u8 *context_bytes;
977
978 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
979 I40E_HMC_LAN_RX, queue);
980 if (err < 0)
981 return err;
982
983 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
984}
985
986/**
987 * i40e_set_lan_rx_queue_context - set the HMC context for the queue
988 * @hw: the hardware struct
989 * @queue: the queue we care about
990 * @s: the struct to be filled
991 **/
992i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
993 u16 queue,
994 struct i40e_hmc_obj_rxq *s)
995{
996 i40e_status err;
997 u8 *context_bytes;
998
999 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1000 I40E_HMC_LAN_RX, queue);
1001 if (err < 0)
1002 return err;
1003
1004 return i40e_set_hmc_context(context_bytes,
1005 i40e_hmc_rxq_ce_info, (u8 *)s);
1006}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
new file mode 100644
index 000000000000..00ff35006077
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -0,0 +1,169 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_LAN_HMC_H_
29#define _I40E_LAN_HMC_H_
30
31/* forward-declare the HW struct for the compiler */
32struct i40e_hw;
33
34/* HMC element context information */
35
36/* Rx queue context data */
37struct i40e_hmc_obj_rxq {
38 u16 head;
39 u8 cpuid;
40 u64 base;
41 u16 qlen;
42#define I40E_RXQ_CTX_DBUFF_SHIFT 7
43 u8 dbuff;
44#define I40E_RXQ_CTX_HBUFF_SHIFT 6
45 u8 hbuff;
46 u8 dtype;
47 u8 dsize;
48 u8 crcstrip;
49 u8 fc_ena;
50 u8 l2tsel;
51 u8 hsplit_0;
52 u8 hsplit_1;
53 u8 showiv;
54 u16 rxmax;
55 u8 tphrdesc_ena;
56 u8 tphwdesc_ena;
57 u8 tphdata_ena;
58 u8 tphhead_ena;
59 u8 lrxqthresh;
60};
61
62/* Tx queue context data */
63struct i40e_hmc_obj_txq {
64 u16 head;
65 u8 new_context;
66 u64 base;
67 u8 fc_ena;
68 u8 timesync_ena;
69 u8 fd_ena;
70 u8 alt_vlan_ena;
71 u16 thead_wb;
72 u16 cpuid;
73 u8 head_wb_ena;
74 u16 qlen;
75 u8 tphrdesc_ena;
76 u8 tphrpacket_ena;
77 u8 tphwdesc_ena;
78 u64 head_wb_addr;
79 u32 crc;
80 u16 rdylist;
81 u8 rdylist_act;
82};
83
84/* for hsplit_0 field of Rx HMC context */
85enum i40e_hmc_obj_rx_hsplit_0 {
86 I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
87 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
88 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
89 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
90 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
91};
92
93/* fcoe_cntx and fcoe_filt are for debugging purpose only */
94struct i40e_hmc_obj_fcoe_cntx {
95 u32 rsv[32];
96};
97
98struct i40e_hmc_obj_fcoe_filt {
99 u32 rsv[8];
100};
101
102/* Context sizes for LAN objects */
103enum i40e_hmc_lan_object_size {
104 I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
105 I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
106 I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
107 I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
108 I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
109 I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
110 I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
111};
112
113#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
114#define I40E_HMC_OBJ_SIZE_TXQ 128
115#define I40E_HMC_OBJ_SIZE_RXQ 32
116#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
117#define I40E_HMC_OBJ_SIZE_FCOE_FILT 32
118
119enum i40e_hmc_lan_rsrc_type {
120 I40E_HMC_LAN_FULL = 0,
121 I40E_HMC_LAN_TX = 1,
122 I40E_HMC_LAN_RX = 2,
123 I40E_HMC_FCOE_CTX = 3,
124 I40E_HMC_FCOE_FILT = 4,
125 I40E_HMC_LAN_MAX = 5
126};
127
128enum i40e_hmc_model {
129 I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
130 I40E_HMC_MODEL_DIRECT_ONLY = 1,
131 I40E_HMC_MODEL_PAGED_ONLY = 2,
132 I40E_HMC_MODEL_UNKNOWN,
133};
134
135struct i40e_hmc_lan_create_obj_info {
136 struct i40e_hmc_info *hmc_info;
137 u32 rsrc_type;
138 u32 start_idx;
139 u32 count;
140 enum i40e_sd_entry_type entry_type;
141 u64 direct_mode_sz;
142};
143
144struct i40e_hmc_lan_delete_obj_info {
145 struct i40e_hmc_info *hmc_info;
146 u32 rsrc_type;
147 u32 start_idx;
148 u32 count;
149};
150
151i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
152 u32 rxq_num, u32 fcoe_cntx_num,
153 u32 fcoe_filt_num);
154i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
155 enum i40e_hmc_model model);
156i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
157
158i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
159 u16 queue);
160i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
161 u16 queue,
162 struct i40e_hmc_obj_txq *s);
163i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
164 u16 queue);
165i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
166 u16 queue,
167 struct i40e_hmc_obj_rxq *s);
168
169#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
new file mode 100644
index 000000000000..601d482694ea
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -0,0 +1,7375 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28/* Local includes */
29#include "i40e.h"
30
31const char i40e_driver_name[] = "i40e";
32static const char i40e_driver_string[] =
33 "Intel(R) Ethernet Connection XL710 Network Driver";
34
35#define DRV_KERN "-k"
36
37#define DRV_VERSION_MAJOR 0
38#define DRV_VERSION_MINOR 3
39#define DRV_VERSION_BUILD 9
40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
41 __stringify(DRV_VERSION_MINOR) "." \
42 __stringify(DRV_VERSION_BUILD) DRV_KERN
43const char i40e_driver_version_str[] = DRV_VERSION;
44static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
45
46/* a bit of forward declarations */
47static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
48static void i40e_handle_reset_warning(struct i40e_pf *pf);
49static int i40e_add_vsi(struct i40e_vsi *vsi);
50static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
51static int i40e_setup_pf_switch(struct i40e_pf *pf);
52static int i40e_setup_misc_vector(struct i40e_pf *pf);
53static void i40e_determine_queue_usage(struct i40e_pf *pf);
54static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
55
56/* i40e_pci_tbl - PCI Device ID Table
57 *
58 * Last entry must be all 0s
59 *
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
61 * Class, Class Mask, private data (not used) }
62 */
63static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
64 {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
65 {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
66 {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
67 {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
68 {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
69 {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
70 {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
71 {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
72 {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
73 {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
74 /* required last entry */
75 {0, }
76};
77MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
78
79#define I40E_MAX_VF_COUNT 128
80static int debug = -1;
81module_param(debug, int, 0);
82MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
83
84MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
85MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(DRV_VERSION);
88
89/**
90 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
91 * @hw: pointer to the HW structure
92 * @mem: ptr to mem struct to fill out
93 * @size: size of memory requested
94 * @alignment: what to align the allocation to
95 **/
96int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
97 u64 size, u32 alignment)
98{
99 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
100
101 mem->size = ALIGN(size, alignment);
102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
103 &mem->pa, GFP_KERNEL);
104 if (mem->va)
105 return 0;
106
107 return -ENOMEM;
108}
109
110/**
111 * i40e_free_dma_mem_d - OS specific memory free for shared code
112 * @hw: pointer to the HW structure
113 * @mem: ptr to mem struct to free
114 **/
115int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
116{
117 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
118
119 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
120 mem->va = NULL;
121 mem->pa = 0;
122 mem->size = 0;
123
124 return 0;
125}
126
127/**
128 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
129 * @hw: pointer to the HW structure
130 * @mem: ptr to mem struct to fill out
131 * @size: size of memory requested
132 **/
133int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
134 u32 size)
135{
136 mem->size = size;
137 mem->va = kzalloc(size, GFP_KERNEL);
138
139 if (mem->va)
140 return 0;
141
142 return -ENOMEM;
143}
144
145/**
146 * i40e_free_virt_mem_d - OS specific memory free for shared code
147 * @hw: pointer to the HW structure
148 * @mem: ptr to mem struct to free
149 **/
150int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
151{
152 /* it's ok to kfree a NULL pointer */
153 kfree(mem->va);
154 mem->va = NULL;
155 mem->size = 0;
156
157 return 0;
158}
159
160/**
161 * i40e_get_lump - find a lump of free generic resource
162 * @pf: board private structure
163 * @pile: the pile of resource to search
164 * @needed: the number of items needed
165 * @id: an owner id to stick on the items assigned
166 *
167 * Returns the base item index of the lump, or negative for error
168 *
169 * The search_hint trick and lack of advanced fit-finding only work
170 * because we're highly likely to have all the same size lump requests.
171 * Linear search time and any fragmentation should be minimal.
172 **/
173static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
174 u16 needed, u16 id)
175{
176 int ret = -ENOMEM;
177 int i = 0;
178 int j = 0;
179
180 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
181 dev_info(&pf->pdev->dev,
182 "param err: pile=%p needed=%d id=0x%04x\n",
183 pile, needed, id);
184 return -EINVAL;
185 }
186
187 /* start the linear search with an imperfect hint */
188 i = pile->search_hint;
189 while (i < pile->num_entries && ret < 0) {
190 /* skip already allocated entries */
191 if (pile->list[i] & I40E_PILE_VALID_BIT) {
192 i++;
193 continue;
194 }
195
196 /* do we have enough in this lump? */
197 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
198 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
199 break;
200 }
201
202 if (j == needed) {
203 /* there was enough, so assign it to the requestor */
204 for (j = 0; j < needed; j++)
205 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
206 ret = i;
207 pile->search_hint = i + j;
208 } else {
209 /* not enough, so skip over it and continue looking */
210 i += j;
211 }
212 }
213
214 return ret;
215}
216
217/**
218 * i40e_put_lump - return a lump of generic resource
219 * @pile: the pile of resource to search
220 * @index: the base item index
221 * @id: the owner id of the items assigned
222 *
223 * Returns the count of items in the lump
224 **/
225static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
226{
227 int valid_id = (id | I40E_PILE_VALID_BIT);
228 int count = 0;
229 int i;
230
231 if (!pile || index >= pile->num_entries)
232 return -EINVAL;
233
234 for (i = index;
235 i < pile->num_entries && pile->list[i] == valid_id;
236 i++) {
237 pile->list[i] = 0;
238 count++;
239 }
240
241 if (count && index < pile->search_hint)
242 pile->search_hint = index;
243
244 return count;
245}
246
247/**
248 * i40e_service_event_schedule - Schedule the service task to wake up
249 * @pf: board private structure
250 *
251 * If not already scheduled, this puts the task into the work queue
252 **/
253static void i40e_service_event_schedule(struct i40e_pf *pf)
254{
255 if (!test_bit(__I40E_DOWN, &pf->state) &&
256 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
257 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
258 schedule_work(&pf->service_task);
259}
260
261/**
262 * i40e_tx_timeout - Respond to a Tx Hang
263 * @netdev: network interface device structure
264 *
265 * If any port has noticed a Tx timeout, it is likely that the whole
266 * device is munged, not just the one netdev port, so go for the full
267 * reset.
268 **/
269static void i40e_tx_timeout(struct net_device *netdev)
270{
271 struct i40e_netdev_priv *np = netdev_priv(netdev);
272 struct i40e_vsi *vsi = np->vsi;
273 struct i40e_pf *pf = vsi->back;
274
275 pf->tx_timeout_count++;
276
277 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
278 pf->tx_timeout_recovery_level = 0;
279 pf->tx_timeout_last_recovery = jiffies;
280 netdev_info(netdev, "tx_timeout recovery level %d\n",
281 pf->tx_timeout_recovery_level);
282
283 switch (pf->tx_timeout_recovery_level) {
284 case 0:
285 /* disable and re-enable queues for the VSI */
286 if (in_interrupt()) {
287 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
288 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
289 } else {
290 i40e_vsi_reinit_locked(vsi);
291 }
292 break;
293 case 1:
294 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
295 break;
296 case 2:
297 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
298 break;
299 case 3:
300 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
301 break;
302 default:
303 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
304 i40e_down(vsi);
305 break;
306 }
307 i40e_service_event_schedule(pf);
308 pf->tx_timeout_recovery_level++;
309}
310
311/**
312 * i40e_release_rx_desc - Store the new tail and head values
313 * @rx_ring: ring to bump
314 * @val: new head index
315 **/
316static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
317{
318 rx_ring->next_to_use = val;
319
320 /* Force memory writes to complete before letting h/w
321 * know there are new descriptors to fetch. (Only
322 * applicable for weak-ordered memory model archs,
323 * such as IA-64).
324 */
325 wmb();
326 writel(val, rx_ring->tail);
327}
328
329/**
330 * i40e_get_vsi_stats_struct - Get System Network Statistics
331 * @vsi: the VSI we care about
332 *
333 * Returns the address of the device statistics structure.
334 * The statistics are actually updated from the service task.
335 **/
336struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
337{
338 return &vsi->net_stats;
339}
340
341/**
342 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
343 * @netdev: network interface device structure
344 *
345 * Returns the address of the device statistics structure.
346 * The statistics are actually updated from the service task.
347 **/
348static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
349 struct net_device *netdev,
350 struct rtnl_link_stats64 *storage)
351{
352 struct i40e_netdev_priv *np = netdev_priv(netdev);
353 struct i40e_vsi *vsi = np->vsi;
354
355 *storage = *i40e_get_vsi_stats_struct(vsi);
356
357 return storage;
358}
359
360/**
361 * i40e_vsi_reset_stats - Resets all stats of the given vsi
362 * @vsi: the VSI to have its stats reset
363 **/
364void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
365{
366 struct rtnl_link_stats64 *ns;
367 int i;
368
369 if (!vsi)
370 return;
371
372 ns = i40e_get_vsi_stats_struct(vsi);
373 memset(ns, 0, sizeof(*ns));
374 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
375 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
376 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
377 if (vsi->rx_rings)
378 for (i = 0; i < vsi->num_queue_pairs; i++) {
379 memset(&vsi->rx_rings[i].rx_stats, 0 ,
380 sizeof(vsi->rx_rings[i].rx_stats));
381 memset(&vsi->tx_rings[i].tx_stats, 0,
382 sizeof(vsi->tx_rings[i].tx_stats));
383 }
384 vsi->stat_offsets_loaded = false;
385}
386
387/**
388 * i40e_pf_reset_stats - Reset all of the stats for the given pf
389 * @pf: the PF to be reset
390 **/
391void i40e_pf_reset_stats(struct i40e_pf *pf)
392{
393 memset(&pf->stats, 0, sizeof(pf->stats));
394 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
395 pf->stat_offsets_loaded = false;
396}
397
398/**
399 * i40e_stat_update48 - read and update a 48 bit stat from the chip
400 * @hw: ptr to the hardware info
401 * @hireg: the high 32 bit reg to read
402 * @loreg: the low 32 bit reg to read
403 * @offset_loaded: has the initial offset been loaded yet
404 * @offset: ptr to current offset value
405 * @stat: ptr to the stat
406 *
407 * Since the device stats are not reset at PFReset, they likely will not
408 * be zeroed when the driver starts. We'll save the first values read
409 * and use them as offsets to be subtracted from the raw values in order
410 * to report stats that count from zero. In the process, we also manage
411 * the potential roll-over.
412 **/
413static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
414 bool offset_loaded, u64 *offset, u64 *stat)
415{
416 u64 new_data;
417
418 if (hw->device_id == I40E_QEMU_DEVICE_ID) {
419 new_data = rd32(hw, loreg);
420 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
421 } else {
422 new_data = rd64(hw, loreg);
423 }
424 if (!offset_loaded)
425 *offset = new_data;
426 if (likely(new_data >= *offset))
427 *stat = new_data - *offset;
428 else
429 *stat = (new_data + ((u64)1 << 48)) - *offset;
430 *stat &= 0xFFFFFFFFFFFFULL;
431}
432
433/**
434 * i40e_stat_update32 - read and update a 32 bit stat from the chip
435 * @hw: ptr to the hardware info
436 * @reg: the hw reg to read
437 * @offset_loaded: has the initial offset been loaded yet
438 * @offset: ptr to current offset value
439 * @stat: ptr to the stat
440 **/
441static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
442 bool offset_loaded, u64 *offset, u64 *stat)
443{
444 u32 new_data;
445
446 new_data = rd32(hw, reg);
447 if (!offset_loaded)
448 *offset = new_data;
449 if (likely(new_data >= *offset))
450 *stat = (u32)(new_data - *offset);
451 else
452 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
453}
454
455/**
456 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
457 * @vsi: the VSI to be updated
458 **/
459void i40e_update_eth_stats(struct i40e_vsi *vsi)
460{
461 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
462 struct i40e_pf *pf = vsi->back;
463 struct i40e_hw *hw = &pf->hw;
464 struct i40e_eth_stats *oes;
465 struct i40e_eth_stats *es; /* device's eth stats */
466
467 es = &vsi->eth_stats;
468 oes = &vsi->eth_stats_offsets;
469
470 /* Gather up the stats that the hw collects */
471 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
472 vsi->stat_offsets_loaded,
473 &oes->tx_errors, &es->tx_errors);
474 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
475 vsi->stat_offsets_loaded,
476 &oes->rx_discards, &es->rx_discards);
477
478 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
479 I40E_GLV_GORCL(stat_idx),
480 vsi->stat_offsets_loaded,
481 &oes->rx_bytes, &es->rx_bytes);
482 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
483 I40E_GLV_UPRCL(stat_idx),
484 vsi->stat_offsets_loaded,
485 &oes->rx_unicast, &es->rx_unicast);
486 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
487 I40E_GLV_MPRCL(stat_idx),
488 vsi->stat_offsets_loaded,
489 &oes->rx_multicast, &es->rx_multicast);
490 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
491 I40E_GLV_BPRCL(stat_idx),
492 vsi->stat_offsets_loaded,
493 &oes->rx_broadcast, &es->rx_broadcast);
494
495 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
496 I40E_GLV_GOTCL(stat_idx),
497 vsi->stat_offsets_loaded,
498 &oes->tx_bytes, &es->tx_bytes);
499 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
500 I40E_GLV_UPTCL(stat_idx),
501 vsi->stat_offsets_loaded,
502 &oes->tx_unicast, &es->tx_unicast);
503 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
504 I40E_GLV_MPTCL(stat_idx),
505 vsi->stat_offsets_loaded,
506 &oes->tx_multicast, &es->tx_multicast);
507 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
508 I40E_GLV_BPTCL(stat_idx),
509 vsi->stat_offsets_loaded,
510 &oes->tx_broadcast, &es->tx_broadcast);
511 vsi->stat_offsets_loaded = true;
512}
513
514/**
515 * i40e_update_veb_stats - Update Switch component statistics
516 * @veb: the VEB being updated
517 **/
518static void i40e_update_veb_stats(struct i40e_veb *veb)
519{
520 struct i40e_pf *pf = veb->pf;
521 struct i40e_hw *hw = &pf->hw;
522 struct i40e_eth_stats *oes;
523 struct i40e_eth_stats *es; /* device's eth stats */
524 int idx = 0;
525
526 idx = veb->stats_idx;
527 es = &veb->stats;
528 oes = &veb->stats_offsets;
529
530 /* Gather up the stats that the hw collects */
531 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
532 veb->stat_offsets_loaded,
533 &oes->tx_discards, &es->tx_discards);
534 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
535 veb->stat_offsets_loaded,
536 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
537
538 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
539 veb->stat_offsets_loaded,
540 &oes->rx_bytes, &es->rx_bytes);
541 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
542 veb->stat_offsets_loaded,
543 &oes->rx_unicast, &es->rx_unicast);
544 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
545 veb->stat_offsets_loaded,
546 &oes->rx_multicast, &es->rx_multicast);
547 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
548 veb->stat_offsets_loaded,
549 &oes->rx_broadcast, &es->rx_broadcast);
550
551 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
552 veb->stat_offsets_loaded,
553 &oes->tx_bytes, &es->tx_bytes);
554 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
555 veb->stat_offsets_loaded,
556 &oes->tx_unicast, &es->tx_unicast);
557 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
558 veb->stat_offsets_loaded,
559 &oes->tx_multicast, &es->tx_multicast);
560 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
561 veb->stat_offsets_loaded,
562 &oes->tx_broadcast, &es->tx_broadcast);
563 veb->stat_offsets_loaded = true;
564}
565
566/**
567 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
568 * @pf: the corresponding PF
569 *
570 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
571 **/
572static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
573{
574 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
575 struct i40e_hw_port_stats *nsd = &pf->stats;
576 struct i40e_hw *hw = &pf->hw;
577 u64 xoff = 0;
578 u16 i, v;
579
580 if ((hw->fc.current_mode != I40E_FC_FULL) &&
581 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
582 return;
583
584 xoff = nsd->link_xoff_rx;
585 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
586 pf->stat_offsets_loaded,
587 &osd->link_xoff_rx, &nsd->link_xoff_rx);
588
589 /* No new LFC xoff rx */
590 if (!(nsd->link_xoff_rx - xoff))
591 return;
592
593 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
594 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
595 struct i40e_vsi *vsi = pf->vsi[v];
596
597 if (!vsi)
598 continue;
599
600 for (i = 0; i < vsi->num_queue_pairs; i++) {
601 struct i40e_ring *ring = &vsi->tx_rings[i];
602 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
603 }
604 }
605}
606
607/**
608 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
609 * @pf: the corresponding PF
610 *
611 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
612 **/
613static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
614{
615 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
616 struct i40e_hw_port_stats *nsd = &pf->stats;
617 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
618 struct i40e_dcbx_config *dcb_cfg;
619 struct i40e_hw *hw = &pf->hw;
620 u16 i, v;
621 u8 tc;
622
623 dcb_cfg = &hw->local_dcbx_config;
624
625 /* See if DCB enabled with PFC TC */
626 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
627 !(dcb_cfg->pfc.pfcenable)) {
628 i40e_update_link_xoff_rx(pf);
629 return;
630 }
631
632 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
633 u64 prio_xoff = nsd->priority_xoff_rx[i];
634 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
635 pf->stat_offsets_loaded,
636 &osd->priority_xoff_rx[i],
637 &nsd->priority_xoff_rx[i]);
638
639 /* No new PFC xoff rx */
640 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
641 continue;
642 /* Get the TC for given priority */
643 tc = dcb_cfg->etscfg.prioritytable[i];
644 xoff[tc] = true;
645 }
646
647 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
648 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
649 struct i40e_vsi *vsi = pf->vsi[v];
650
651 if (!vsi)
652 continue;
653
654 for (i = 0; i < vsi->num_queue_pairs; i++) {
655 struct i40e_ring *ring = &vsi->tx_rings[i];
656
657 tc = ring->dcb_tc;
658 if (xoff[tc])
659 clear_bit(__I40E_HANG_CHECK_ARMED,
660 &ring->state);
661 }
662 }
663}
664
665/**
666 * i40e_update_stats - Update the board statistics counters.
667 * @vsi: the VSI to be updated
668 *
669 * There are a few instances where we store the same stat in a
670 * couple of different structs. This is partly because we have
671 * the netdev stats that need to be filled out, which is slightly
672 * different from the "eth_stats" defined by the chip and used in
673 * VF communications. We sort it all out here in a central place.
674 **/
675void i40e_update_stats(struct i40e_vsi *vsi)
676{
677 struct i40e_pf *pf = vsi->back;
678 struct i40e_hw *hw = &pf->hw;
679 struct rtnl_link_stats64 *ons;
680 struct rtnl_link_stats64 *ns; /* netdev stats */
681 struct i40e_eth_stats *oes;
682 struct i40e_eth_stats *es; /* device's eth stats */
683 u32 tx_restart, tx_busy;
684 u32 rx_page, rx_buf;
685 u64 rx_p, rx_b;
686 u64 tx_p, tx_b;
687 int i;
688 u16 q;
689
690 if (test_bit(__I40E_DOWN, &vsi->state) ||
691 test_bit(__I40E_CONFIG_BUSY, &pf->state))
692 return;
693
694 ns = i40e_get_vsi_stats_struct(vsi);
695 ons = &vsi->net_stats_offsets;
696 es = &vsi->eth_stats;
697 oes = &vsi->eth_stats_offsets;
698
699 /* Gather up the netdev and vsi stats that the driver collects
700 * on the fly during packet processing
701 */
702 rx_b = rx_p = 0;
703 tx_b = tx_p = 0;
704 tx_restart = tx_busy = 0;
705 rx_page = 0;
706 rx_buf = 0;
707 for (q = 0; q < vsi->num_queue_pairs; q++) {
708 struct i40e_ring *p;
709
710 p = &vsi->rx_rings[q];
711 rx_b += p->rx_stats.bytes;
712 rx_p += p->rx_stats.packets;
713 rx_buf += p->rx_stats.alloc_rx_buff_failed;
714 rx_page += p->rx_stats.alloc_rx_page_failed;
715
716 p = &vsi->tx_rings[q];
717 tx_b += p->tx_stats.bytes;
718 tx_p += p->tx_stats.packets;
719 tx_restart += p->tx_stats.restart_queue;
720 tx_busy += p->tx_stats.tx_busy;
721 }
722 vsi->tx_restart = tx_restart;
723 vsi->tx_busy = tx_busy;
724 vsi->rx_page_failed = rx_page;
725 vsi->rx_buf_failed = rx_buf;
726
727 ns->rx_packets = rx_p;
728 ns->rx_bytes = rx_b;
729 ns->tx_packets = tx_p;
730 ns->tx_bytes = tx_b;
731
732 i40e_update_eth_stats(vsi);
733 /* update netdev stats from eth stats */
734 ons->rx_errors = oes->rx_errors;
735 ns->rx_errors = es->rx_errors;
736 ons->tx_errors = oes->tx_errors;
737 ns->tx_errors = es->tx_errors;
738 ons->multicast = oes->rx_multicast;
739 ns->multicast = es->rx_multicast;
740 ons->tx_dropped = oes->tx_discards;
741 ns->tx_dropped = es->tx_discards;
742
743 /* Get the port data only if this is the main PF VSI */
744 if (vsi == pf->vsi[pf->lan_vsi]) {
745 struct i40e_hw_port_stats *nsd = &pf->stats;
746 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
747
748 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
749 I40E_GLPRT_GORCL(hw->port),
750 pf->stat_offsets_loaded,
751 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
752 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
753 I40E_GLPRT_GOTCL(hw->port),
754 pf->stat_offsets_loaded,
755 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
756 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
757 pf->stat_offsets_loaded,
758 &osd->eth.rx_discards,
759 &nsd->eth.rx_discards);
760 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
761 pf->stat_offsets_loaded,
762 &osd->eth.tx_discards,
763 &nsd->eth.tx_discards);
764 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
765 I40E_GLPRT_MPRCL(hw->port),
766 pf->stat_offsets_loaded,
767 &osd->eth.rx_multicast,
768 &nsd->eth.rx_multicast);
769
770 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
771 pf->stat_offsets_loaded,
772 &osd->tx_dropped_link_down,
773 &nsd->tx_dropped_link_down);
774
775 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
776 pf->stat_offsets_loaded,
777 &osd->crc_errors, &nsd->crc_errors);
778 ns->rx_crc_errors = nsd->crc_errors;
779
780 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
781 pf->stat_offsets_loaded,
782 &osd->illegal_bytes, &nsd->illegal_bytes);
783 ns->rx_errors = nsd->crc_errors
784 + nsd->illegal_bytes;
785
786 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
787 pf->stat_offsets_loaded,
788 &osd->mac_local_faults,
789 &nsd->mac_local_faults);
790 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
791 pf->stat_offsets_loaded,
792 &osd->mac_remote_faults,
793 &nsd->mac_remote_faults);
794
795 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
796 pf->stat_offsets_loaded,
797 &osd->rx_length_errors,
798 &nsd->rx_length_errors);
799 ns->rx_length_errors = nsd->rx_length_errors;
800
801 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
802 pf->stat_offsets_loaded,
803 &osd->link_xon_rx, &nsd->link_xon_rx);
804 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
805 pf->stat_offsets_loaded,
806 &osd->link_xon_tx, &nsd->link_xon_tx);
807 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
808 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
809 pf->stat_offsets_loaded,
810 &osd->link_xoff_tx, &nsd->link_xoff_tx);
811
812 for (i = 0; i < 8; i++) {
813 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
814 pf->stat_offsets_loaded,
815 &osd->priority_xon_rx[i],
816 &nsd->priority_xon_rx[i]);
817 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
818 pf->stat_offsets_loaded,
819 &osd->priority_xon_tx[i],
820 &nsd->priority_xon_tx[i]);
821 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
822 pf->stat_offsets_loaded,
823 &osd->priority_xoff_tx[i],
824 &nsd->priority_xoff_tx[i]);
825 i40e_stat_update32(hw,
826 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
827 pf->stat_offsets_loaded,
828 &osd->priority_xon_2_xoff[i],
829 &nsd->priority_xon_2_xoff[i]);
830 }
831
832 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
833 I40E_GLPRT_PRC64L(hw->port),
834 pf->stat_offsets_loaded,
835 &osd->rx_size_64, &nsd->rx_size_64);
836 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
837 I40E_GLPRT_PRC127L(hw->port),
838 pf->stat_offsets_loaded,
839 &osd->rx_size_127, &nsd->rx_size_127);
840 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
841 I40E_GLPRT_PRC255L(hw->port),
842 pf->stat_offsets_loaded,
843 &osd->rx_size_255, &nsd->rx_size_255);
844 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
845 I40E_GLPRT_PRC511L(hw->port),
846 pf->stat_offsets_loaded,
847 &osd->rx_size_511, &nsd->rx_size_511);
848 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
849 I40E_GLPRT_PRC1023L(hw->port),
850 pf->stat_offsets_loaded,
851 &osd->rx_size_1023, &nsd->rx_size_1023);
852 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
853 I40E_GLPRT_PRC1522L(hw->port),
854 pf->stat_offsets_loaded,
855 &osd->rx_size_1522, &nsd->rx_size_1522);
856 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
857 I40E_GLPRT_PRC9522L(hw->port),
858 pf->stat_offsets_loaded,
859 &osd->rx_size_big, &nsd->rx_size_big);
860
861 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
862 I40E_GLPRT_PTC64L(hw->port),
863 pf->stat_offsets_loaded,
864 &osd->tx_size_64, &nsd->tx_size_64);
865 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
866 I40E_GLPRT_PTC127L(hw->port),
867 pf->stat_offsets_loaded,
868 &osd->tx_size_127, &nsd->tx_size_127);
869 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
870 I40E_GLPRT_PTC255L(hw->port),
871 pf->stat_offsets_loaded,
872 &osd->tx_size_255, &nsd->tx_size_255);
873 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
874 I40E_GLPRT_PTC511L(hw->port),
875 pf->stat_offsets_loaded,
876 &osd->tx_size_511, &nsd->tx_size_511);
877 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
878 I40E_GLPRT_PTC1023L(hw->port),
879 pf->stat_offsets_loaded,
880 &osd->tx_size_1023, &nsd->tx_size_1023);
881 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
882 I40E_GLPRT_PTC1522L(hw->port),
883 pf->stat_offsets_loaded,
884 &osd->tx_size_1522, &nsd->tx_size_1522);
885 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
886 I40E_GLPRT_PTC9522L(hw->port),
887 pf->stat_offsets_loaded,
888 &osd->tx_size_big, &nsd->tx_size_big);
889
890 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
891 pf->stat_offsets_loaded,
892 &osd->rx_undersize, &nsd->rx_undersize);
893 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
894 pf->stat_offsets_loaded,
895 &osd->rx_fragments, &nsd->rx_fragments);
896 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
897 pf->stat_offsets_loaded,
898 &osd->rx_oversize, &nsd->rx_oversize);
899 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
900 pf->stat_offsets_loaded,
901 &osd->rx_jabber, &nsd->rx_jabber);
902 }
903
904 pf->stat_offsets_loaded = true;
905}
906
907/**
908 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
909 * @vsi: the VSI to be searched
910 * @macaddr: the MAC address
911 * @vlan: the vlan
912 * @is_vf: make sure its a vf filter, else doesn't matter
913 * @is_netdev: make sure its a netdev filter, else doesn't matter
914 *
915 * Returns ptr to the filter object or NULL
916 **/
917static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
918 u8 *macaddr, s16 vlan,
919 bool is_vf, bool is_netdev)
920{
921 struct i40e_mac_filter *f;
922
923 if (!vsi || !macaddr)
924 return NULL;
925
926 list_for_each_entry(f, &vsi->mac_filter_list, list) {
927 if ((ether_addr_equal(macaddr, f->macaddr)) &&
928 (vlan == f->vlan) &&
929 (!is_vf || f->is_vf) &&
930 (!is_netdev || f->is_netdev))
931 return f;
932 }
933 return NULL;
934}
935
936/**
937 * i40e_find_mac - Find a mac addr in the macvlan filters list
938 * @vsi: the VSI to be searched
939 * @macaddr: the MAC address we are searching for
940 * @is_vf: make sure its a vf filter, else doesn't matter
941 * @is_netdev: make sure its a netdev filter, else doesn't matter
942 *
943 * Returns the first filter with the provided MAC address or NULL if
944 * MAC address was not found
945 **/
946struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
947 bool is_vf, bool is_netdev)
948{
949 struct i40e_mac_filter *f;
950
951 if (!vsi || !macaddr)
952 return NULL;
953
954 list_for_each_entry(f, &vsi->mac_filter_list, list) {
955 if ((ether_addr_equal(macaddr, f->macaddr)) &&
956 (!is_vf || f->is_vf) &&
957 (!is_netdev || f->is_netdev))
958 return f;
959 }
960 return NULL;
961}
962
963/**
964 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
965 * @vsi: the VSI to be searched
966 *
967 * Returns true if VSI is in vlan mode or false otherwise
968 **/
969bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
970{
971 struct i40e_mac_filter *f;
972
973 /* Only -1 for all the filters denotes not in vlan mode
974 * so we have to go through all the list in order to make sure
975 */
976 list_for_each_entry(f, &vsi->mac_filter_list, list) {
977 if (f->vlan >= 0)
978 return true;
979 }
980
981 return false;
982}
983
984/**
985 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
986 * @vsi: the VSI to be searched
987 * @macaddr: the mac address to be filtered
988 * @is_vf: true if it is a vf
989 * @is_netdev: true if it is a netdev
990 *
991 * Goes through all the macvlan filters and adds a
992 * macvlan filter for each unique vlan that already exists
993 *
994 * Returns first filter found on success, else NULL
995 **/
996struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
997 bool is_vf, bool is_netdev)
998{
999 struct i40e_mac_filter *f;
1000
1001 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1002 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1003 is_vf, is_netdev)) {
1004 if (!i40e_add_filter(vsi, macaddr, f->vlan,
1005 is_vf, is_netdev))
1006 return NULL;
1007 }
1008 }
1009
1010 return list_first_entry_or_null(&vsi->mac_filter_list,
1011 struct i40e_mac_filter, list);
1012}
1013
1014/**
1015 * i40e_add_filter - Add a mac/vlan filter to the VSI
1016 * @vsi: the VSI to be searched
1017 * @macaddr: the MAC address
1018 * @vlan: the vlan
1019 * @is_vf: make sure its a vf filter, else doesn't matter
1020 * @is_netdev: make sure its a netdev filter, else doesn't matter
1021 *
1022 * Returns ptr to the filter object or NULL when no memory available.
1023 **/
1024struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1025 u8 *macaddr, s16 vlan,
1026 bool is_vf, bool is_netdev)
1027{
1028 struct i40e_mac_filter *f;
1029
1030 if (!vsi || !macaddr)
1031 return NULL;
1032
1033 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1034 if (!f) {
1035 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1036 if (!f)
1037 goto add_filter_out;
1038
1039 memcpy(f->macaddr, macaddr, ETH_ALEN);
1040 f->vlan = vlan;
1041 f->changed = true;
1042
1043 INIT_LIST_HEAD(&f->list);
1044 list_add(&f->list, &vsi->mac_filter_list);
1045 }
1046
1047 /* increment counter and add a new flag if needed */
1048 if (is_vf) {
1049 if (!f->is_vf) {
1050 f->is_vf = true;
1051 f->counter++;
1052 }
1053 } else if (is_netdev) {
1054 if (!f->is_netdev) {
1055 f->is_netdev = true;
1056 f->counter++;
1057 }
1058 } else {
1059 f->counter++;
1060 }
1061
1062 /* changed tells sync_filters_subtask to
1063 * push the filter down to the firmware
1064 */
1065 if (f->changed) {
1066 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1067 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1068 }
1069
1070add_filter_out:
1071 return f;
1072}
1073
1074/**
1075 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1076 * @vsi: the VSI to be searched
1077 * @macaddr: the MAC address
1078 * @vlan: the vlan
1079 * @is_vf: make sure it's a vf filter, else doesn't matter
1080 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1081 **/
1082void i40e_del_filter(struct i40e_vsi *vsi,
1083 u8 *macaddr, s16 vlan,
1084 bool is_vf, bool is_netdev)
1085{
1086 struct i40e_mac_filter *f;
1087
1088 if (!vsi || !macaddr)
1089 return;
1090
1091 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1092 if (!f || f->counter == 0)
1093 return;
1094
1095 if (is_vf) {
1096 if (f->is_vf) {
1097 f->is_vf = false;
1098 f->counter--;
1099 }
1100 } else if (is_netdev) {
1101 if (f->is_netdev) {
1102 f->is_netdev = false;
1103 f->counter--;
1104 }
1105 } else {
1106 /* make sure we don't remove a filter in use by vf or netdev */
1107 int min_f = 0;
1108 min_f += (f->is_vf ? 1 : 0);
1109 min_f += (f->is_netdev ? 1 : 0);
1110
1111 if (f->counter > min_f)
1112 f->counter--;
1113 }
1114
1115 /* counter == 0 tells sync_filters_subtask to
1116 * remove the filter from the firmware's list
1117 */
1118 if (f->counter == 0) {
1119 f->changed = true;
1120 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1121 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1122 }
1123}
1124
1125/**
1126 * i40e_set_mac - NDO callback to set mac address
1127 * @netdev: network interface device structure
1128 * @p: pointer to an address structure
1129 *
1130 * Returns 0 on success, negative on failure
1131 **/
1132static int i40e_set_mac(struct net_device *netdev, void *p)
1133{
1134 struct i40e_netdev_priv *np = netdev_priv(netdev);
1135 struct i40e_vsi *vsi = np->vsi;
1136 struct sockaddr *addr = p;
1137 struct i40e_mac_filter *f;
1138
1139 if (!is_valid_ether_addr(addr->sa_data))
1140 return -EADDRNOTAVAIL;
1141
1142 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
1143
1144 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
1145 return 0;
1146
1147 if (vsi->type == I40E_VSI_MAIN) {
1148 i40e_status ret;
1149 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1150 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1151 addr->sa_data, NULL);
1152 if (ret) {
1153 netdev_info(netdev,
1154 "Addr change for Main VSI failed: %d\n",
1155 ret);
1156 return -EADDRNOTAVAIL;
1157 }
1158
1159 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
1160 }
1161
1162 /* In order to be sure to not drop any packets, add the new address
1163 * then delete the old one.
1164 */
1165 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
1166 if (!f)
1167 return -ENOMEM;
1168
1169 i40e_sync_vsi_filters(vsi);
1170 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1171 i40e_sync_vsi_filters(vsi);
1172
1173 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1174
1175 return 0;
1176}
1177
1178/**
1179 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1180 * @vsi: the VSI being setup
1181 * @ctxt: VSI context structure
1182 * @enabled_tc: Enabled TCs bitmap
1183 * @is_add: True if called before Add VSI
1184 *
1185 * Setup VSI queue mapping for enabled traffic classes.
1186 **/
1187static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1188 struct i40e_vsi_context *ctxt,
1189 u8 enabled_tc,
1190 bool is_add)
1191{
1192 struct i40e_pf *pf = vsi->back;
1193 u16 sections = 0;
1194 u8 netdev_tc = 0;
1195 u16 numtc = 0;
1196 u16 qcount;
1197 u8 offset;
1198 u16 qmap;
1199 int i;
1200
1201 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1202 offset = 0;
1203
1204 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1205 /* Find numtc from enabled TC bitmap */
1206 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1207 if (enabled_tc & (1 << i)) /* TC is enabled */
1208 numtc++;
1209 }
1210 if (!numtc) {
1211 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1212 numtc = 1;
1213 }
1214 } else {
1215 /* At least TC0 is enabled in case of non-DCB case */
1216 numtc = 1;
1217 }
1218
1219 vsi->tc_config.numtc = numtc;
1220 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1221
1222 /* Setup queue offset/count for all TCs for given VSI */
1223 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1224 /* See if the given TC is enabled for the given VSI */
1225 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1226 int pow, num_qps;
1227
1228 vsi->tc_config.tc_info[i].qoffset = offset;
1229 switch (vsi->type) {
1230 case I40E_VSI_MAIN:
1231 if (i == 0)
1232 qcount = pf->rss_size;
1233 else
1234 qcount = pf->num_tc_qps;
1235 vsi->tc_config.tc_info[i].qcount = qcount;
1236 break;
1237 case I40E_VSI_FDIR:
1238 case I40E_VSI_SRIOV:
1239 case I40E_VSI_VMDQ2:
1240 default:
1241 qcount = vsi->alloc_queue_pairs;
1242 vsi->tc_config.tc_info[i].qcount = qcount;
1243 WARN_ON(i != 0);
1244 break;
1245 }
1246
1247 /* find the power-of-2 of the number of queue pairs */
1248 num_qps = vsi->tc_config.tc_info[i].qcount;
1249 pow = 0;
1250 while (num_qps &&
1251 ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
1252 pow++;
1253 num_qps >>= 1;
1254 }
1255
1256 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1257 qmap =
1258 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1259 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1260
1261 offset += vsi->tc_config.tc_info[i].qcount;
1262 } else {
1263 /* TC is not enabled so set the offset to
1264 * default queue and allocate one queue
1265 * for the given TC.
1266 */
1267 vsi->tc_config.tc_info[i].qoffset = 0;
1268 vsi->tc_config.tc_info[i].qcount = 1;
1269 vsi->tc_config.tc_info[i].netdev_tc = 0;
1270
1271 qmap = 0;
1272 }
1273 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1274 }
1275
1276 /* Set actual Tx/Rx queue pairs */
1277 vsi->num_queue_pairs = offset;
1278
1279 /* Scheduler section valid can only be set for ADD VSI */
1280 if (is_add) {
1281 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1282
1283 ctxt->info.up_enable_bits = enabled_tc;
1284 }
1285 if (vsi->type == I40E_VSI_SRIOV) {
1286 ctxt->info.mapping_flags |=
1287 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1288 for (i = 0; i < vsi->num_queue_pairs; i++)
1289 ctxt->info.queue_mapping[i] =
1290 cpu_to_le16(vsi->base_queue + i);
1291 } else {
1292 ctxt->info.mapping_flags |=
1293 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1294 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1295 }
1296 ctxt->info.valid_sections |= cpu_to_le16(sections);
1297}
1298
1299/**
1300 * i40e_set_rx_mode - NDO callback to set the netdev filters
1301 * @netdev: network interface device structure
1302 **/
1303static void i40e_set_rx_mode(struct net_device *netdev)
1304{
1305 struct i40e_netdev_priv *np = netdev_priv(netdev);
1306 struct i40e_mac_filter *f, *ftmp;
1307 struct i40e_vsi *vsi = np->vsi;
1308 struct netdev_hw_addr *uca;
1309 struct netdev_hw_addr *mca;
1310 struct netdev_hw_addr *ha;
1311
1312 /* add addr if not already in the filter list */
1313 netdev_for_each_uc_addr(uca, netdev) {
1314 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1315 if (i40e_is_vsi_in_vlan(vsi))
1316 i40e_put_mac_in_vlan(vsi, uca->addr,
1317 false, true);
1318 else
1319 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1320 false, true);
1321 }
1322 }
1323
1324 netdev_for_each_mc_addr(mca, netdev) {
1325 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1326 if (i40e_is_vsi_in_vlan(vsi))
1327 i40e_put_mac_in_vlan(vsi, mca->addr,
1328 false, true);
1329 else
1330 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1331 false, true);
1332 }
1333 }
1334
1335 /* remove filter if not in netdev list */
1336 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1337 bool found = false;
1338
1339 if (!f->is_netdev)
1340 continue;
1341
1342 if (is_multicast_ether_addr(f->macaddr)) {
1343 netdev_for_each_mc_addr(mca, netdev) {
1344 if (ether_addr_equal(mca->addr, f->macaddr)) {
1345 found = true;
1346 break;
1347 }
1348 }
1349 } else {
1350 netdev_for_each_uc_addr(uca, netdev) {
1351 if (ether_addr_equal(uca->addr, f->macaddr)) {
1352 found = true;
1353 break;
1354 }
1355 }
1356
1357 for_each_dev_addr(netdev, ha) {
1358 if (ether_addr_equal(ha->addr, f->macaddr)) {
1359 found = true;
1360 break;
1361 }
1362 }
1363 }
1364 if (!found)
1365 i40e_del_filter(
1366 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1367 }
1368
1369 /* check for other flag changes */
1370 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1371 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1372 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1373 }
1374}
1375
1376/**
1377 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1378 * @vsi: ptr to the VSI
1379 *
1380 * Push any outstanding VSI filter changes through the AdminQ.
1381 *
1382 * Returns 0 or error value
1383 **/
1384int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1385{
1386 struct i40e_mac_filter *f, *ftmp;
1387 bool promisc_forced_on = false;
1388 bool add_happened = false;
1389 int filter_list_len = 0;
1390 u32 changed_flags = 0;
1391 i40e_status ret = 0;
1392 struct i40e_pf *pf;
1393 int num_add = 0;
1394 int num_del = 0;
1395 u16 cmd_flags;
1396
1397 /* empty array typed pointers, kcalloc later */
1398 struct i40e_aqc_add_macvlan_element_data *add_list;
1399 struct i40e_aqc_remove_macvlan_element_data *del_list;
1400
1401 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1402 usleep_range(1000, 2000);
1403 pf = vsi->back;
1404
1405 if (vsi->netdev) {
1406 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1407 vsi->current_netdev_flags = vsi->netdev->flags;
1408 }
1409
1410 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1411 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1412
1413 filter_list_len = pf->hw.aq.asq_buf_size /
1414 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1415 del_list = kcalloc(filter_list_len,
1416 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1417 GFP_KERNEL);
1418 if (!del_list)
1419 return -ENOMEM;
1420
1421 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1422 if (!f->changed)
1423 continue;
1424
1425 if (f->counter != 0)
1426 continue;
1427 f->changed = false;
1428 cmd_flags = 0;
1429
1430 /* add to delete list */
1431 memcpy(del_list[num_del].mac_addr,
1432 f->macaddr, ETH_ALEN);
1433 del_list[num_del].vlan_tag =
1434 cpu_to_le16((u16)(f->vlan ==
1435 I40E_VLAN_ANY ? 0 : f->vlan));
1436
1437 /* vlan0 as wild card to allow packets from all vlans */
1438 if (f->vlan == I40E_VLAN_ANY ||
1439 (vsi->netdev && !(vsi->netdev->features &
1440 NETIF_F_HW_VLAN_CTAG_FILTER)))
1441 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1442 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1443 del_list[num_del].flags = cmd_flags;
1444 num_del++;
1445
1446 /* unlink from filter list */
1447 list_del(&f->list);
1448 kfree(f);
1449
1450 /* flush a full buffer */
1451 if (num_del == filter_list_len) {
1452 ret = i40e_aq_remove_macvlan(&pf->hw,
1453 vsi->seid, del_list, num_del,
1454 NULL);
1455 num_del = 0;
1456 memset(del_list, 0, sizeof(*del_list));
1457
1458 if (ret)
1459 dev_info(&pf->pdev->dev,
1460 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1461 ret,
1462 pf->hw.aq.asq_last_status);
1463 }
1464 }
1465 if (num_del) {
1466 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1467 del_list, num_del, NULL);
1468 num_del = 0;
1469
1470 if (ret)
1471 dev_info(&pf->pdev->dev,
1472 "ignoring delete macvlan error, err %d, aq_err %d\n",
1473 ret, pf->hw.aq.asq_last_status);
1474 }
1475
1476 kfree(del_list);
1477 del_list = NULL;
1478
1479 /* do all the adds now */
1480 filter_list_len = pf->hw.aq.asq_buf_size /
1481 sizeof(struct i40e_aqc_add_macvlan_element_data),
1482 add_list = kcalloc(filter_list_len,
1483 sizeof(struct i40e_aqc_add_macvlan_element_data),
1484 GFP_KERNEL);
1485 if (!add_list)
1486 return -ENOMEM;
1487
1488 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1489 if (!f->changed)
1490 continue;
1491
1492 if (f->counter == 0)
1493 continue;
1494 f->changed = false;
1495 add_happened = true;
1496 cmd_flags = 0;
1497
1498 /* add to add array */
1499 memcpy(add_list[num_add].mac_addr,
1500 f->macaddr, ETH_ALEN);
1501 add_list[num_add].vlan_tag =
1502 cpu_to_le16(
1503 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1504 add_list[num_add].queue_number = 0;
1505
1506 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1507
1508 /* vlan0 as wild card to allow packets from all vlans */
1509 if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
1510 !(vsi->netdev->features &
1511 NETIF_F_HW_VLAN_CTAG_FILTER)))
1512 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1513 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1514 num_add++;
1515
1516 /* flush a full buffer */
1517 if (num_add == filter_list_len) {
1518 ret = i40e_aq_add_macvlan(&pf->hw,
1519 vsi->seid,
1520 add_list,
1521 num_add,
1522 NULL);
1523 num_add = 0;
1524
1525 if (ret)
1526 break;
1527 memset(add_list, 0, sizeof(*add_list));
1528 }
1529 }
1530 if (num_add) {
1531 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1532 add_list, num_add, NULL);
1533 num_add = 0;
1534 }
1535 kfree(add_list);
1536 add_list = NULL;
1537
1538 if (add_happened && (!ret)) {
1539 /* do nothing */;
1540 } else if (add_happened && (ret)) {
1541 dev_info(&pf->pdev->dev,
1542 "add filter failed, err %d, aq_err %d\n",
1543 ret, pf->hw.aq.asq_last_status);
1544 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1545 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1546 &vsi->state)) {
1547 promisc_forced_on = true;
1548 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1549 &vsi->state);
1550 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1551 }
1552 }
1553 }
1554
1555 /* check for changes in promiscuous modes */
1556 if (changed_flags & IFF_ALLMULTI) {
1557 bool cur_multipromisc;
1558 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1559 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1560 vsi->seid,
1561 cur_multipromisc,
1562 NULL);
1563 if (ret)
1564 dev_info(&pf->pdev->dev,
1565 "set multi promisc failed, err %d, aq_err %d\n",
1566 ret, pf->hw.aq.asq_last_status);
1567 }
1568 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1569 bool cur_promisc;
1570 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1571 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1572 &vsi->state));
1573 ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1574 vsi->seid,
1575 cur_promisc,
1576 NULL);
1577 if (ret)
1578 dev_info(&pf->pdev->dev,
1579 "set uni promisc failed, err %d, aq_err %d\n",
1580 ret, pf->hw.aq.asq_last_status);
1581 }
1582
1583 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1584 return 0;
1585}
1586
1587/**
1588 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1589 * @pf: board private structure
1590 **/
1591static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1592{
1593 int v;
1594
1595 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1596 return;
1597 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1598
1599 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
1600 if (pf->vsi[v] &&
1601 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1602 i40e_sync_vsi_filters(pf->vsi[v]);
1603 }
1604}
1605
1606/**
1607 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1608 * @netdev: network interface device structure
1609 * @new_mtu: new value for maximum frame size
1610 *
1611 * Returns 0 on success, negative on failure
1612 **/
1613static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1614{
1615 struct i40e_netdev_priv *np = netdev_priv(netdev);
1616 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1617 struct i40e_vsi *vsi = np->vsi;
1618
1619 /* MTU < 68 is an error and causes problems on some kernels */
1620 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1621 return -EINVAL;
1622
1623 netdev_info(netdev, "changing MTU from %d to %d\n",
1624 netdev->mtu, new_mtu);
1625 netdev->mtu = new_mtu;
1626 if (netif_running(netdev))
1627 i40e_vsi_reinit_locked(vsi);
1628
1629 return 0;
1630}
1631
1632/**
1633 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1634 * @vsi: the vsi being adjusted
1635 **/
1636void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1637{
1638 struct i40e_vsi_context ctxt;
1639 i40e_status ret;
1640
1641 if ((vsi->info.valid_sections &
1642 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1643 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1644 return; /* already enabled */
1645
1646 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1647 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1648 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1649
1650 ctxt.seid = vsi->seid;
1651 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1652 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1653 if (ret) {
1654 dev_info(&vsi->back->pdev->dev,
1655 "%s: update vsi failed, aq_err=%d\n",
1656 __func__, vsi->back->hw.aq.asq_last_status);
1657 }
1658}
1659
1660/**
1661 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1662 * @vsi: the vsi being adjusted
1663 **/
1664void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1665{
1666 struct i40e_vsi_context ctxt;
1667 i40e_status ret;
1668
1669 if ((vsi->info.valid_sections &
1670 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1671 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1672 I40E_AQ_VSI_PVLAN_EMOD_MASK))
1673 return; /* already disabled */
1674
1675 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1676 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1677 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1678
1679 ctxt.seid = vsi->seid;
1680 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1681 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1682 if (ret) {
1683 dev_info(&vsi->back->pdev->dev,
1684 "%s: update vsi failed, aq_err=%d\n",
1685 __func__, vsi->back->hw.aq.asq_last_status);
1686 }
1687}
1688
1689/**
1690 * i40e_vlan_rx_register - Setup or shutdown vlan offload
1691 * @netdev: network interface to be adjusted
1692 * @features: netdev features to test if VLAN offload is enabled or not
1693 **/
1694static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
1695{
1696 struct i40e_netdev_priv *np = netdev_priv(netdev);
1697 struct i40e_vsi *vsi = np->vsi;
1698
1699 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1700 i40e_vlan_stripping_enable(vsi);
1701 else
1702 i40e_vlan_stripping_disable(vsi);
1703}
1704
1705/**
1706 * i40e_vsi_add_vlan - Add vsi membership for given vlan
1707 * @vsi: the vsi being configured
1708 * @vid: vlan id to be added (0 = untagged only , -1 = any)
1709 **/
1710int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1711{
1712 struct i40e_mac_filter *f, *add_f;
1713 bool is_netdev, is_vf;
1714 int ret;
1715
1716 is_vf = (vsi->type == I40E_VSI_SRIOV);
1717 is_netdev = !!(vsi->netdev);
1718
1719 if (is_netdev) {
1720 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
1721 is_vf, is_netdev);
1722 if (!add_f) {
1723 dev_info(&vsi->back->pdev->dev,
1724 "Could not add vlan filter %d for %pM\n",
1725 vid, vsi->netdev->dev_addr);
1726 return -ENOMEM;
1727 }
1728 }
1729
1730 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1731 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1732 if (!add_f) {
1733 dev_info(&vsi->back->pdev->dev,
1734 "Could not add vlan filter %d for %pM\n",
1735 vid, f->macaddr);
1736 return -ENOMEM;
1737 }
1738 }
1739
1740 ret = i40e_sync_vsi_filters(vsi);
1741 if (ret) {
1742 dev_info(&vsi->back->pdev->dev,
1743 "Could not sync filters for vid %d\n", vid);
1744 return ret;
1745 }
1746
1747 /* Now if we add a vlan tag, make sure to check if it is the first
1748 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
1749 * with 0, so we now accept untagged and specified tagged traffic
1750 * (and not any taged and untagged)
1751 */
1752 if (vid > 0) {
1753 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
1754 I40E_VLAN_ANY,
1755 is_vf, is_netdev)) {
1756 i40e_del_filter(vsi, vsi->netdev->dev_addr,
1757 I40E_VLAN_ANY, is_vf, is_netdev);
1758 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
1759 is_vf, is_netdev);
1760 if (!add_f) {
1761 dev_info(&vsi->back->pdev->dev,
1762 "Could not add filter 0 for %pM\n",
1763 vsi->netdev->dev_addr);
1764 return -ENOMEM;
1765 }
1766 }
1767
1768 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1769 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1770 is_vf, is_netdev)) {
1771 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1772 is_vf, is_netdev);
1773 add_f = i40e_add_filter(vsi, f->macaddr,
1774 0, is_vf, is_netdev);
1775 if (!add_f) {
1776 dev_info(&vsi->back->pdev->dev,
1777 "Could not add filter 0 for %pM\n",
1778 f->macaddr);
1779 return -ENOMEM;
1780 }
1781 }
1782 }
1783 ret = i40e_sync_vsi_filters(vsi);
1784 }
1785
1786 return ret;
1787}
1788
1789/**
1790 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1791 * @vsi: the vsi being configured
1792 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
1793 **/
1794int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1795{
1796 struct net_device *netdev = vsi->netdev;
1797 struct i40e_mac_filter *f, *add_f;
1798 bool is_vf, is_netdev;
1799 int filter_count = 0;
1800 int ret;
1801
1802 is_vf = (vsi->type == I40E_VSI_SRIOV);
1803 is_netdev = !!(netdev);
1804
1805 if (is_netdev)
1806 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
1807
1808 list_for_each_entry(f, &vsi->mac_filter_list, list)
1809 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1810
1811 ret = i40e_sync_vsi_filters(vsi);
1812 if (ret) {
1813 dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
1814 return ret;
1815 }
1816
1817 /* go through all the filters for this VSI and if there is only
1818 * vid == 0 it means there are no other filters, so vid 0 must
1819 * be replaced with -1. This signifies that we should from now
1820 * on accept any traffic (with any tag present, or untagged)
1821 */
1822 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1823 if (is_netdev) {
1824 if (f->vlan &&
1825 ether_addr_equal(netdev->dev_addr, f->macaddr))
1826 filter_count++;
1827 }
1828
1829 if (f->vlan)
1830 filter_count++;
1831 }
1832
1833 if (!filter_count && is_netdev) {
1834 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
1835 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1836 is_vf, is_netdev);
1837 if (!f) {
1838 dev_info(&vsi->back->pdev->dev,
1839 "Could not add filter %d for %pM\n",
1840 I40E_VLAN_ANY, netdev->dev_addr);
1841 return -ENOMEM;
1842 }
1843 }
1844
1845 if (!filter_count) {
1846 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1847 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
1848 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1849 is_vf, is_netdev);
1850 if (!add_f) {
1851 dev_info(&vsi->back->pdev->dev,
1852 "Could not add filter %d for %pM\n",
1853 I40E_VLAN_ANY, f->macaddr);
1854 return -ENOMEM;
1855 }
1856 }
1857 }
1858
1859 return i40e_sync_vsi_filters(vsi);
1860}
1861
1862/**
1863 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1864 * @netdev: network interface to be adjusted
1865 * @vid: vlan id to be added
1866 **/
1867static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1868 __always_unused __be16 proto, u16 vid)
1869{
1870 struct i40e_netdev_priv *np = netdev_priv(netdev);
1871 struct i40e_vsi *vsi = np->vsi;
1872 int ret;
1873
1874 if (vid > 4095)
1875 return 0;
1876
1877 netdev_info(vsi->netdev, "adding %pM vid=%d\n",
1878 netdev->dev_addr, vid);
1879 /* If the network stack called us with vid = 0, we should
1880 * indicate to i40e_vsi_add_vlan() that we want to receive
1881 * any traffic (i.e. with any vlan tag, or untagged)
1882 */
1883 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
1884
1885 if (!ret) {
1886 if (vid < VLAN_N_VID)
1887 set_bit(vid, vsi->active_vlans);
1888 }
1889
1890 return 0;
1891}
1892
1893/**
1894 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
1895 * @netdev: network interface to be adjusted
1896 * @vid: vlan id to be removed
1897 **/
1898static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1899 __always_unused __be16 proto, u16 vid)
1900{
1901 struct i40e_netdev_priv *np = netdev_priv(netdev);
1902 struct i40e_vsi *vsi = np->vsi;
1903
1904 netdev_info(vsi->netdev, "removing %pM vid=%d\n",
1905 netdev->dev_addr, vid);
1906 /* return code is ignored as there is nothing a user
1907 * can do about failure to remove and a log message was
1908 * already printed from another function
1909 */
1910 i40e_vsi_kill_vlan(vsi, vid);
1911
1912 clear_bit(vid, vsi->active_vlans);
1913 return 0;
1914}
1915
1916/**
1917 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
1918 * @vsi: the vsi being brought back up
1919 **/
1920static void i40e_restore_vlan(struct i40e_vsi *vsi)
1921{
1922 u16 vid;
1923
1924 if (!vsi->netdev)
1925 return;
1926
1927 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
1928
1929 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
1930 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
1931 vid);
1932}
1933
1934/**
1935 * i40e_vsi_add_pvid - Add pvid for the VSI
1936 * @vsi: the vsi being adjusted
1937 * @vid: the vlan id to set as a PVID
1938 **/
1939i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
1940{
1941 struct i40e_vsi_context ctxt;
1942 i40e_status ret;
1943
1944 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1945 vsi->info.pvid = cpu_to_le16(vid);
1946 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
1947 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
1948
1949 ctxt.seid = vsi->seid;
1950 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1951 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1952 if (ret) {
1953 dev_info(&vsi->back->pdev->dev,
1954 "%s: update vsi failed, aq_err=%d\n",
1955 __func__, vsi->back->hw.aq.asq_last_status);
1956 }
1957
1958 return ret;
1959}
1960
1961/**
1962 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
1963 * @vsi: the vsi being adjusted
1964 *
1965 * Just use the vlan_rx_register() service to put it back to normal
1966 **/
1967void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
1968{
1969 vsi->info.pvid = 0;
1970 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
1971}
1972
1973/**
1974 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
1975 * @vsi: ptr to the VSI
1976 *
1977 * If this function returns with an error, then it's possible one or
1978 * more of the rings is populated (while the rest are not). It is the
1979 * callers duty to clean those orphaned rings.
1980 *
1981 * Return 0 on success, negative on failure
1982 **/
1983static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
1984{
1985 int i, err = 0;
1986
1987 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
1988 err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
1989
1990 return err;
1991}
1992
1993/**
1994 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
1995 * @vsi: ptr to the VSI
1996 *
1997 * Free VSI's transmit software resources
1998 **/
1999static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2000{
2001 int i;
2002
2003 for (i = 0; i < vsi->num_queue_pairs; i++)
2004 if (vsi->tx_rings[i].desc)
2005 i40e_free_tx_resources(&vsi->tx_rings[i]);
2006}
2007
2008/**
2009 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2010 * @vsi: ptr to the VSI
2011 *
2012 * If this function returns with an error, then it's possible one or
2013 * more of the rings is populated (while the rest are not). It is the
2014 * callers duty to clean those orphaned rings.
2015 *
2016 * Return 0 on success, negative on failure
2017 **/
2018static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2019{
2020 int i, err = 0;
2021
2022 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2023 err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
2024 return err;
2025}
2026
2027/**
2028 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2029 * @vsi: ptr to the VSI
2030 *
2031 * Free all receive software resources
2032 **/
2033static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2034{
2035 int i;
2036
2037 for (i = 0; i < vsi->num_queue_pairs; i++)
2038 if (vsi->rx_rings[i].desc)
2039 i40e_free_rx_resources(&vsi->rx_rings[i]);
2040}
2041
2042/**
2043 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2044 * @ring: The Tx ring to configure
2045 *
2046 * Configure the Tx descriptor ring in the HMC context.
2047 **/
2048static int i40e_configure_tx_ring(struct i40e_ring *ring)
2049{
2050 struct i40e_vsi *vsi = ring->vsi;
2051 u16 pf_q = vsi->base_queue + ring->queue_index;
2052 struct i40e_hw *hw = &vsi->back->hw;
2053 struct i40e_hmc_obj_txq tx_ctx;
2054 i40e_status err = 0;
2055 u32 qtx_ctl = 0;
2056
2057 /* some ATR related tx ring init */
2058 if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
2059 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2060 ring->atr_count = 0;
2061 } else {
2062 ring->atr_sample_rate = 0;
2063 }
2064
2065 /* initialize XPS */
2066 if (ring->q_vector && ring->netdev &&
2067 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2068 netif_set_xps_queue(ring->netdev,
2069 &ring->q_vector->affinity_mask,
2070 ring->queue_index);
2071
2072 /* clear the context structure first */
2073 memset(&tx_ctx, 0, sizeof(tx_ctx));
2074
2075 tx_ctx.new_context = 1;
2076 tx_ctx.base = (ring->dma / 128);
2077 tx_ctx.qlen = ring->count;
2078 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
2079 I40E_FLAG_FDIR_ATR_ENABLED));
2080
2081 /* As part of VSI creation/update, FW allocates certain
2082 * Tx arbitration queue sets for each TC enabled for
2083 * the VSI. The FW returns the handles to these queue
2084 * sets as part of the response buffer to Add VSI,
2085 * Update VSI, etc. AQ commands. It is expected that
2086 * these queue set handles be associated with the Tx
2087 * queues by the driver as part of the TX queue context
2088 * initialization. This has to be done regardless of
2089 * DCB as by default everything is mapped to TC0.
2090 */
2091 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2092 tx_ctx.rdylist_act = 0;
2093
2094 /* clear the context in the HMC */
2095 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2096 if (err) {
2097 dev_info(&vsi->back->pdev->dev,
2098 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2099 ring->queue_index, pf_q, err);
2100 return -ENOMEM;
2101 }
2102
2103 /* set the context in the HMC */
2104 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2105 if (err) {
2106 dev_info(&vsi->back->pdev->dev,
2107 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2108 ring->queue_index, pf_q, err);
2109 return -ENOMEM;
2110 }
2111
2112 /* Now associate this queue with this PCI function */
2113 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2114 qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
2115 & I40E_QTX_CTL_PF_INDX_MASK);
2116 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2117 i40e_flush(hw);
2118
2119 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2120
2121 /* cache tail off for easier writes later */
2122 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2123
2124 return 0;
2125}
2126
2127/**
2128 * i40e_configure_rx_ring - Configure a receive ring context
2129 * @ring: The Rx ring to configure
2130 *
2131 * Configure the Rx descriptor ring in the HMC context.
2132 **/
2133static int i40e_configure_rx_ring(struct i40e_ring *ring)
2134{
2135 struct i40e_vsi *vsi = ring->vsi;
2136 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2137 u16 pf_q = vsi->base_queue + ring->queue_index;
2138 struct i40e_hw *hw = &vsi->back->hw;
2139 struct i40e_hmc_obj_rxq rx_ctx;
2140 i40e_status err = 0;
2141
2142 ring->state = 0;
2143
2144 /* clear the context structure first */
2145 memset(&rx_ctx, 0, sizeof(rx_ctx));
2146
2147 ring->rx_buf_len = vsi->rx_buf_len;
2148 ring->rx_hdr_len = vsi->rx_hdr_len;
2149
2150 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2151 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2152
2153 rx_ctx.base = (ring->dma / 128);
2154 rx_ctx.qlen = ring->count;
2155
2156 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2157 set_ring_16byte_desc_enabled(ring);
2158 rx_ctx.dsize = 0;
2159 } else {
2160 rx_ctx.dsize = 1;
2161 }
2162
2163 rx_ctx.dtype = vsi->dtype;
2164 if (vsi->dtype) {
2165 set_ring_ps_enabled(ring);
2166 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2167 I40E_RX_SPLIT_IP |
2168 I40E_RX_SPLIT_TCP_UDP |
2169 I40E_RX_SPLIT_SCTP;
2170 } else {
2171 rx_ctx.hsplit_0 = 0;
2172 }
2173
2174 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2175 (chain_len * ring->rx_buf_len));
2176 rx_ctx.tphrdesc_ena = 1;
2177 rx_ctx.tphwdesc_ena = 1;
2178 rx_ctx.tphdata_ena = 1;
2179 rx_ctx.tphhead_ena = 1;
2180 rx_ctx.lrxqthresh = 2;
2181 rx_ctx.crcstrip = 1;
2182 rx_ctx.l2tsel = 1;
2183 rx_ctx.showiv = 1;
2184
2185 /* clear the context in the HMC */
2186 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2187 if (err) {
2188 dev_info(&vsi->back->pdev->dev,
2189 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2190 ring->queue_index, pf_q, err);
2191 return -ENOMEM;
2192 }
2193
2194 /* set the context in the HMC */
2195 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2196 if (err) {
2197 dev_info(&vsi->back->pdev->dev,
2198 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2199 ring->queue_index, pf_q, err);
2200 return -ENOMEM;
2201 }
2202
2203 /* cache tail for quicker writes, and clear the reg before use */
2204 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2205 writel(0, ring->tail);
2206
2207 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2208
2209 return 0;
2210}
2211
2212/**
2213 * i40e_vsi_configure_tx - Configure the VSI for Tx
2214 * @vsi: VSI structure describing this set of rings and resources
2215 *
2216 * Configure the Tx VSI for operation.
2217 **/
2218static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2219{
2220 int err = 0;
2221 u16 i;
2222
2223 for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
2224 err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
2225
2226 return err;
2227}
2228
2229/**
2230 * i40e_vsi_configure_rx - Configure the VSI for Rx
2231 * @vsi: the VSI being configured
2232 *
2233 * Configure the Rx VSI for operation.
2234 **/
2235static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2236{
2237 int err = 0;
2238 u16 i;
2239
2240 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2241 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2242 + ETH_FCS_LEN + VLAN_HLEN;
2243 else
2244 vsi->max_frame = I40E_RXBUFFER_2048;
2245
2246 /* figure out correct receive buffer length */
2247 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2248 I40E_FLAG_RX_PS_ENABLED)) {
2249 case I40E_FLAG_RX_1BUF_ENABLED:
2250 vsi->rx_hdr_len = 0;
2251 vsi->rx_buf_len = vsi->max_frame;
2252 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2253 break;
2254 case I40E_FLAG_RX_PS_ENABLED:
2255 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2256 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2257 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2258 break;
2259 default:
2260 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2261 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2262 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2263 break;
2264 }
2265
2266 /* round up for the chip's needs */
2267 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2268 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2269 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2270 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2271
2272 /* set up individual rings */
2273 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2274 err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
2275
2276 return err;
2277}
2278
2279/**
2280 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2281 * @vsi: ptr to the VSI
2282 **/
2283static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2284{
2285 u16 qoffset, qcount;
2286 int i, n;
2287
2288 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2289 return;
2290
2291 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2292 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2293 continue;
2294
2295 qoffset = vsi->tc_config.tc_info[n].qoffset;
2296 qcount = vsi->tc_config.tc_info[n].qcount;
2297 for (i = qoffset; i < (qoffset + qcount); i++) {
2298 struct i40e_ring *rx_ring = &vsi->rx_rings[i];
2299 struct i40e_ring *tx_ring = &vsi->tx_rings[i];
2300 rx_ring->dcb_tc = n;
2301 tx_ring->dcb_tc = n;
2302 }
2303 }
2304}
2305
2306/**
2307 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2308 * @vsi: ptr to the VSI
2309 **/
2310static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2311{
2312 if (vsi->netdev)
2313 i40e_set_rx_mode(vsi->netdev);
2314}
2315
2316/**
2317 * i40e_vsi_configure - Set up the VSI for action
2318 * @vsi: the VSI being configured
2319 **/
2320static int i40e_vsi_configure(struct i40e_vsi *vsi)
2321{
2322 int err;
2323
2324 i40e_set_vsi_rx_mode(vsi);
2325 i40e_restore_vlan(vsi);
2326 i40e_vsi_config_dcb_rings(vsi);
2327 err = i40e_vsi_configure_tx(vsi);
2328 if (!err)
2329 err = i40e_vsi_configure_rx(vsi);
2330
2331 return err;
2332}
2333
2334/**
2335 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2336 * @vsi: the VSI being configured
2337 **/
2338static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2339{
2340 struct i40e_pf *pf = vsi->back;
2341 struct i40e_q_vector *q_vector;
2342 struct i40e_hw *hw = &pf->hw;
2343 u16 vector;
2344 int i, q;
2345 u32 val;
2346 u32 qp;
2347
2348 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2349 * and PFINT_LNKLSTn registers, e.g.:
2350 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2351 */
2352 qp = vsi->base_queue;
2353 vector = vsi->base_vector;
2354 q_vector = vsi->q_vectors;
2355 for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
2356 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2357 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2358 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2359 q_vector->rx.itr);
2360 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2361 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2362 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2363 q_vector->tx.itr);
2364
2365 /* Linked list for the queuepairs assigned to this vector */
2366 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2367 for (q = 0; q < q_vector->num_ringpairs; q++) {
2368 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2369 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2370 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2371 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2372 (I40E_QUEUE_TYPE_TX
2373 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2374
2375 wr32(hw, I40E_QINT_RQCTL(qp), val);
2376
2377 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2378 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2379 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2380 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2381 (I40E_QUEUE_TYPE_RX
2382 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2383
2384 /* Terminate the linked list */
2385 if (q == (q_vector->num_ringpairs - 1))
2386 val |= (I40E_QUEUE_END_OF_LIST
2387 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2388
2389 wr32(hw, I40E_QINT_TQCTL(qp), val);
2390 qp++;
2391 }
2392 }
2393
2394 i40e_flush(hw);
2395}
2396
2397/**
2398 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2399 * @hw: ptr to the hardware info
2400 **/
2401static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2402{
2403 u32 val;
2404
2405 /* clear things first */
2406 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2407 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2408
2409 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2410 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2411 I40E_PFINT_ICR0_ENA_GRST_MASK |
2412 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2413 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2414 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
2415 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2416 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2417 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2418
2419 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2420
2421 /* SW_ITR_IDX = 0, but don't change INTENA */
2422 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2423 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2424
2425 /* OTHER_ITR_IDX = 0 */
2426 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2427}
2428
2429/**
2430 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2431 * @vsi: the VSI being configured
2432 **/
2433static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2434{
2435 struct i40e_q_vector *q_vector = vsi->q_vectors;
2436 struct i40e_pf *pf = vsi->back;
2437 struct i40e_hw *hw = &pf->hw;
2438 u32 val;
2439
2440 /* set the ITR configuration */
2441 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2442 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2443 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2444 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2445 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2446 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2447
2448 i40e_enable_misc_int_causes(hw);
2449
2450 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2451 wr32(hw, I40E_PFINT_LNKLST0, 0);
2452
2453 /* Associate the queue pair to the vector and enable the q int */
2454 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2455 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2456 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2457
2458 wr32(hw, I40E_QINT_RQCTL(0), val);
2459
2460 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2461 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2462 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2463
2464 wr32(hw, I40E_QINT_TQCTL(0), val);
2465 i40e_flush(hw);
2466}
2467
2468/**
2469 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2470 * @pf: board private structure
2471 **/
2472static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2473{
2474 struct i40e_hw *hw = &pf->hw;
2475 u32 val;
2476
2477 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2478 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2479 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2480
2481 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2482 i40e_flush(hw);
2483}
2484
2485/**
2486 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2487 * @vsi: pointer to a vsi
2488 * @vector: enable a particular Hw Interrupt vector
2489 **/
2490void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2491{
2492 struct i40e_pf *pf = vsi->back;
2493 struct i40e_hw *hw = &pf->hw;
2494 u32 val;
2495
2496 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2497 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2498 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2499 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2500 i40e_flush(hw);
2501}
2502
2503/**
2504 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2505 * @irq: interrupt number
2506 * @data: pointer to a q_vector
2507 **/
2508static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2509{
2510 struct i40e_q_vector *q_vector = data;
2511
2512 if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
2513 return IRQ_HANDLED;
2514
2515 napi_schedule(&q_vector->napi);
2516
2517 return IRQ_HANDLED;
2518}
2519
2520/**
2521 * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
2522 * @irq: interrupt number
2523 * @data: pointer to a q_vector
2524 **/
2525static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
2526{
2527 struct i40e_q_vector *q_vector = data;
2528
2529 if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
2530 return IRQ_HANDLED;
2531
2532 pr_info("fdir ring cleaning needed\n");
2533
2534 return IRQ_HANDLED;
2535}
2536
2537/**
2538 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2539 * @vsi: the VSI being configured
2540 * @basename: name for the vector
2541 *
2542 * Allocates MSI-X vectors and requests interrupts from the kernel.
2543 **/
2544static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2545{
2546 int q_vectors = vsi->num_q_vectors;
2547 struct i40e_pf *pf = vsi->back;
2548 int base = vsi->base_vector;
2549 int rx_int_idx = 0;
2550 int tx_int_idx = 0;
2551 int vector, err;
2552
2553 for (vector = 0; vector < q_vectors; vector++) {
2554 struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
2555
2556 if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
2557 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2558 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2559 tx_int_idx++;
2560 } else if (q_vector->rx.ring[0]) {
2561 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2562 "%s-%s-%d", basename, "rx", rx_int_idx++);
2563 } else if (q_vector->tx.ring[0]) {
2564 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2565 "%s-%s-%d", basename, "tx", tx_int_idx++);
2566 } else {
2567 /* skip this unused q_vector */
2568 continue;
2569 }
2570 err = request_irq(pf->msix_entries[base + vector].vector,
2571 vsi->irq_handler,
2572 0,
2573 q_vector->name,
2574 q_vector);
2575 if (err) {
2576 dev_info(&pf->pdev->dev,
2577 "%s: request_irq failed, error: %d\n",
2578 __func__, err);
2579 goto free_queue_irqs;
2580 }
2581 /* assign the mask for this irq */
2582 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2583 &q_vector->affinity_mask);
2584 }
2585
2586 return 0;
2587
2588free_queue_irqs:
2589 while (vector) {
2590 vector--;
2591 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2592 NULL);
2593 free_irq(pf->msix_entries[base + vector].vector,
2594 &(vsi->q_vectors[vector]));
2595 }
2596 return err;
2597}
2598
2599/**
2600 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
2601 * @vsi: the VSI being un-configured
2602 **/
2603static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
2604{
2605 struct i40e_pf *pf = vsi->back;
2606 struct i40e_hw *hw = &pf->hw;
2607 int base = vsi->base_vector;
2608 int i;
2609
2610 for (i = 0; i < vsi->num_queue_pairs; i++) {
2611 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
2612 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
2613 }
2614
2615 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2616 for (i = vsi->base_vector;
2617 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2618 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
2619
2620 i40e_flush(hw);
2621 for (i = 0; i < vsi->num_q_vectors; i++)
2622 synchronize_irq(pf->msix_entries[i + base].vector);
2623 } else {
2624 /* Legacy and MSI mode - this stops all interrupt handling */
2625 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
2626 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
2627 i40e_flush(hw);
2628 synchronize_irq(pf->pdev->irq);
2629 }
2630}
2631
2632/**
2633 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
2634 * @vsi: the VSI being configured
2635 **/
2636static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2637{
2638 struct i40e_pf *pf = vsi->back;
2639 int i;
2640
2641 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2642 for (i = vsi->base_vector;
2643 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2644 i40e_irq_dynamic_enable(vsi, i);
2645 } else {
2646 i40e_irq_dynamic_enable_icr0(pf);
2647 }
2648
2649 return 0;
2650}
2651
2652/**
2653 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
2654 * @pf: board private structure
2655 **/
2656static void i40e_stop_misc_vector(struct i40e_pf *pf)
2657{
2658 /* Disable ICR 0 */
2659 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
2660 i40e_flush(&pf->hw);
2661}
2662
2663/**
2664 * i40e_intr - MSI/Legacy and non-queue interrupt handler
2665 * @irq: interrupt number
2666 * @data: pointer to a q_vector
2667 *
2668 * This is the handler used for all MSI/Legacy interrupts, and deals
2669 * with both queue and non-queue interrupts. This is also used in
2670 * MSIX mode to handle the non-queue interrupts.
2671 **/
2672static irqreturn_t i40e_intr(int irq, void *data)
2673{
2674 struct i40e_pf *pf = (struct i40e_pf *)data;
2675 struct i40e_hw *hw = &pf->hw;
2676 u32 icr0, icr0_remaining;
2677 u32 val, ena_mask;
2678
2679 icr0 = rd32(hw, I40E_PFINT_ICR0);
2680
2681 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2682 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2683 return IRQ_NONE;
2684
2685 val = rd32(hw, I40E_PFINT_DYN_CTL0);
2686 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
2687 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2688
2689 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
2690
2691 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
2692 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
2693
2694 /* temporarily disable queue cause for NAPI processing */
2695 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
2696 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2697 wr32(hw, I40E_QINT_RQCTL(0), qval);
2698
2699 qval = rd32(hw, I40E_QINT_TQCTL(0));
2700 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2701 wr32(hw, I40E_QINT_TQCTL(0), qval);
2702 i40e_flush(hw);
2703
2704 if (!test_bit(__I40E_DOWN, &pf->state))
2705 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
2706 }
2707
2708 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
2709 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2710 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
2711 }
2712
2713 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
2714 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2715 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
2716 }
2717
2718 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
2719 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
2720 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2721 }
2722
2723 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
2724 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
2725 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
2726 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
2727 val = rd32(hw, I40E_GLGEN_RSTAT);
2728 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
2729 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
2730 if (val & I40E_RESET_CORER)
2731 pf->corer_count++;
2732 else if (val & I40E_RESET_GLOBR)
2733 pf->globr_count++;
2734 else if (val & I40E_RESET_EMPR)
2735 pf->empr_count++;
2736 }
2737
2738 /* If a critical error is pending we have no choice but to reset the
2739 * device.
2740 * Report and mask out any remaining unexpected interrupts.
2741 */
2742 icr0_remaining = icr0 & ena_mask;
2743 if (icr0_remaining) {
2744 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
2745 icr0_remaining);
2746 if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
2747 (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
2748 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
2749 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
2750 (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
2751 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
2752 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
2753 } else {
2754 dev_info(&pf->pdev->dev, "device will be reset\n");
2755 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2756 i40e_service_event_schedule(pf);
2757 }
2758 }
2759 ena_mask &= ~icr0_remaining;
2760 }
2761
2762 /* re-enable interrupt causes */
2763 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
2764 i40e_flush(hw);
2765 if (!test_bit(__I40E_DOWN, &pf->state)) {
2766 i40e_service_event_schedule(pf);
2767 i40e_irq_dynamic_enable_icr0(pf);
2768 }
2769
2770 return IRQ_HANDLED;
2771}
2772
2773/**
2774 * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
2775 * @vsi: the VSI being configured
2776 * @v_idx: vector index
2777 * @r_idx: rx queue index
2778 **/
2779static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
2780{
2781 struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
2782 struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
2783
2784 rx_ring->q_vector = q_vector;
2785 q_vector->rx.ring[q_vector->rx.count] = rx_ring;
2786 q_vector->rx.count++;
2787 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2788 q_vector->vsi = vsi;
2789}
2790
2791/**
2792 * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
2793 * @vsi: the VSI being configured
2794 * @v_idx: vector index
2795 * @t_idx: tx queue index
2796 **/
2797static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
2798{
2799 struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
2800 struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
2801
2802 tx_ring->q_vector = q_vector;
2803 q_vector->tx.ring[q_vector->tx.count] = tx_ring;
2804 q_vector->tx.count++;
2805 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2806 q_vector->num_ringpairs++;
2807 q_vector->vsi = vsi;
2808}
2809
2810/**
2811 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
2812 * @vsi: the VSI being configured
2813 *
2814 * This function maps descriptor rings to the queue-specific vectors
2815 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2816 * one vector per queue pair, but on a constrained vector budget, we
2817 * group the queue pairs as "efficiently" as possible.
2818 **/
2819static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
2820{
2821 int qp_remaining = vsi->num_queue_pairs;
2822 int q_vectors = vsi->num_q_vectors;
2823 int qp_per_vector;
2824 int v_start = 0;
2825 int qp_idx = 0;
2826
2827 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
2828 * group them so there are multiple queues per vector.
2829 */
2830 for (; v_start < q_vectors && qp_remaining; v_start++) {
2831 qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
2832 for (; qp_per_vector;
2833 qp_per_vector--, qp_idx++, qp_remaining--) {
2834 map_vector_to_rxq(vsi, v_start, qp_idx);
2835 map_vector_to_txq(vsi, v_start, qp_idx);
2836 }
2837 }
2838}
2839
2840/**
2841 * i40e_vsi_request_irq - Request IRQ from the OS
2842 * @vsi: the VSI being configured
2843 * @basename: name for the vector
2844 **/
2845static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
2846{
2847 struct i40e_pf *pf = vsi->back;
2848 int err;
2849
2850 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2851 err = i40e_vsi_request_irq_msix(vsi, basename);
2852 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
2853 err = request_irq(pf->pdev->irq, i40e_intr, 0,
2854 pf->misc_int_name, pf);
2855 else
2856 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
2857 pf->misc_int_name, pf);
2858
2859 if (err)
2860 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
2861
2862 return err;
2863}
2864
2865#ifdef CONFIG_NET_POLL_CONTROLLER
2866/**
2867 * i40e_netpoll - A Polling 'interrupt'handler
2868 * @netdev: network interface device structure
2869 *
2870 * This is used by netconsole to send skbs without having to re-enable
2871 * interrupts. It's not called while the normal interrupt routine is executing.
2872 **/
2873static void i40e_netpoll(struct net_device *netdev)
2874{
2875 struct i40e_netdev_priv *np = netdev_priv(netdev);
2876 struct i40e_vsi *vsi = np->vsi;
2877 struct i40e_pf *pf = vsi->back;
2878 int i;
2879
2880 /* if interface is down do nothing */
2881 if (test_bit(__I40E_DOWN, &vsi->state))
2882 return;
2883
2884 pf->flags |= I40E_FLAG_IN_NETPOLL;
2885 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2886 for (i = 0; i < vsi->num_q_vectors; i++)
2887 i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
2888 } else {
2889 i40e_intr(pf->pdev->irq, netdev);
2890 }
2891 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
2892}
2893#endif
2894
2895/**
2896 * i40e_vsi_control_tx - Start or stop a VSI's rings
2897 * @vsi: the VSI being configured
2898 * @enable: start or stop the rings
2899 **/
2900static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
2901{
2902 struct i40e_pf *pf = vsi->back;
2903 struct i40e_hw *hw = &pf->hw;
2904 int i, j, pf_q;
2905 u32 tx_reg;
2906
2907 pf_q = vsi->base_queue;
2908 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
2909 j = 1000;
2910 do {
2911 usleep_range(1000, 2000);
2912 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
2913 } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
2914 ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
2915
2916 if (enable) {
2917 /* is STAT set ? */
2918 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
2919 dev_info(&pf->pdev->dev,
2920 "Tx %d already enabled\n", i);
2921 continue;
2922 }
2923 } else {
2924 /* is !STAT set ? */
2925 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
2926 dev_info(&pf->pdev->dev,
2927 "Tx %d already disabled\n", i);
2928 continue;
2929 }
2930 }
2931
2932 /* turn on/off the queue */
2933 if (enable)
2934 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2935 I40E_QTX_ENA_QENA_STAT_MASK;
2936 else
2937 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2938
2939 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
2940
2941 /* wait for the change to finish */
2942 for (j = 0; j < 10; j++) {
2943 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
2944 if (enable) {
2945 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
2946 break;
2947 } else {
2948 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
2949 break;
2950 }
2951
2952 udelay(10);
2953 }
2954 if (j >= 10) {
2955 dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
2956 pf_q, (enable ? "en" : "dis"));
2957 return -ETIMEDOUT;
2958 }
2959 }
2960
2961 return 0;
2962}
2963
2964/**
2965 * i40e_vsi_control_rx - Start or stop a VSI's rings
2966 * @vsi: the VSI being configured
2967 * @enable: start or stop the rings
2968 **/
2969static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
2970{
2971 struct i40e_pf *pf = vsi->back;
2972 struct i40e_hw *hw = &pf->hw;
2973 int i, j, pf_q;
2974 u32 rx_reg;
2975
2976 pf_q = vsi->base_queue;
2977 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
2978 j = 1000;
2979 do {
2980 usleep_range(1000, 2000);
2981 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
2982 } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
2983 ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
2984
2985 if (enable) {
2986 /* is STAT set ? */
2987 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
2988 continue;
2989 } else {
2990 /* is !STAT set ? */
2991 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
2992 continue;
2993 }
2994
2995 /* turn on/off the queue */
2996 if (enable)
2997 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2998 I40E_QRX_ENA_QENA_STAT_MASK;
2999 else
3000 rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
3001 I40E_QRX_ENA_QENA_STAT_MASK);
3002 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3003
3004 /* wait for the change to finish */
3005 for (j = 0; j < 10; j++) {
3006 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3007
3008 if (enable) {
3009 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3010 break;
3011 } else {
3012 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3013 break;
3014 }
3015
3016 udelay(10);
3017 }
3018 if (j >= 10) {
3019 dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
3020 pf_q, (enable ? "en" : "dis"));
3021 return -ETIMEDOUT;
3022 }
3023 }
3024
3025 return 0;
3026}
3027
3028/**
3029 * i40e_vsi_control_rings - Start or stop a VSI's rings
3030 * @vsi: the VSI being configured
3031 * @enable: start or stop the rings
3032 **/
3033static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3034{
3035 int ret;
3036
3037 /* do rx first for enable and last for disable */
3038 if (request) {
3039 ret = i40e_vsi_control_rx(vsi, request);
3040 if (ret)
3041 return ret;
3042 ret = i40e_vsi_control_tx(vsi, request);
3043 } else {
3044 ret = i40e_vsi_control_tx(vsi, request);
3045 if (ret)
3046 return ret;
3047 ret = i40e_vsi_control_rx(vsi, request);
3048 }
3049
3050 return ret;
3051}
3052
3053/**
3054 * i40e_vsi_free_irq - Free the irq association with the OS
3055 * @vsi: the VSI being configured
3056 **/
3057static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3058{
3059 struct i40e_pf *pf = vsi->back;
3060 struct i40e_hw *hw = &pf->hw;
3061 int base = vsi->base_vector;
3062 u32 val, qp;
3063 int i;
3064
3065 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3066 if (!vsi->q_vectors)
3067 return;
3068
3069 for (i = 0; i < vsi->num_q_vectors; i++) {
3070 u16 vector = i + base;
3071
3072 /* free only the irqs that were actually requested */
3073 if (vsi->q_vectors[i].num_ringpairs == 0)
3074 continue;
3075
3076 /* clear the affinity_mask in the IRQ descriptor */
3077 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3078 NULL);
3079 free_irq(pf->msix_entries[vector].vector,
3080 &vsi->q_vectors[i]);
3081
3082 /* Tear down the interrupt queue link list
3083 *
3084 * We know that they come in pairs and always
3085 * the Rx first, then the Tx. To clear the
3086 * link list, stick the EOL value into the
3087 * next_q field of the registers.
3088 */
3089 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3090 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3091 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3092 val |= I40E_QUEUE_END_OF_LIST
3093 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3094 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3095
3096 while (qp != I40E_QUEUE_END_OF_LIST) {
3097 u32 next;
3098
3099 val = rd32(hw, I40E_QINT_RQCTL(qp));
3100
3101 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3102 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3103 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3104 I40E_QINT_RQCTL_INTEVENT_MASK);
3105
3106 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3107 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3108
3109 wr32(hw, I40E_QINT_RQCTL(qp), val);
3110
3111 val = rd32(hw, I40E_QINT_TQCTL(qp));
3112
3113 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3114 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3115
3116 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3117 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3118 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3119 I40E_QINT_TQCTL_INTEVENT_MASK);
3120
3121 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3122 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3123
3124 wr32(hw, I40E_QINT_TQCTL(qp), val);
3125 qp = next;
3126 }
3127 }
3128 } else {
3129 free_irq(pf->pdev->irq, pf);
3130
3131 val = rd32(hw, I40E_PFINT_LNKLST0);
3132 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3133 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3134 val |= I40E_QUEUE_END_OF_LIST
3135 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3136 wr32(hw, I40E_PFINT_LNKLST0, val);
3137
3138 val = rd32(hw, I40E_QINT_RQCTL(qp));
3139 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3140 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3141 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3142 I40E_QINT_RQCTL_INTEVENT_MASK);
3143
3144 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3145 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3146
3147 wr32(hw, I40E_QINT_RQCTL(qp), val);
3148
3149 val = rd32(hw, I40E_QINT_TQCTL(qp));
3150
3151 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3152 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3153 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3154 I40E_QINT_TQCTL_INTEVENT_MASK);
3155
3156 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3157 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3158
3159 wr32(hw, I40E_QINT_TQCTL(qp), val);
3160 }
3161}
3162
3163/**
3164 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3165 * @vsi: the VSI being un-configured
3166 *
3167 * This frees the memory allocated to the q_vectors and
3168 * deletes references to the NAPI struct.
3169 **/
3170static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3171{
3172 int v_idx;
3173
3174 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
3175 struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
3176 int r_idx;
3177
3178 if (!q_vector)
3179 continue;
3180
3181 /* disassociate q_vector from rings */
3182 for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
3183 q_vector->tx.ring[r_idx]->q_vector = NULL;
3184 for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
3185 q_vector->rx.ring[r_idx]->q_vector = NULL;
3186
3187 /* only VSI w/ an associated netdev is set up w/ NAPI */
3188 if (vsi->netdev)
3189 netif_napi_del(&q_vector->napi);
3190 }
3191 kfree(vsi->q_vectors);
3192}
3193
3194/**
3195 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3196 * @pf: board private structure
3197 **/
3198static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3199{
3200 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3201 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3202 pci_disable_msix(pf->pdev);
3203 kfree(pf->msix_entries);
3204 pf->msix_entries = NULL;
3205 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3206 pci_disable_msi(pf->pdev);
3207 }
3208 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3209}
3210
3211/**
3212 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3213 * @pf: board private structure
3214 *
3215 * We go through and clear interrupt specific resources and reset the structure
3216 * to pre-load conditions
3217 **/
3218static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3219{
3220 int i;
3221
3222 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3223 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
3224 if (pf->vsi[i])
3225 i40e_vsi_free_q_vectors(pf->vsi[i]);
3226 i40e_reset_interrupt_capability(pf);
3227}
3228
3229/**
3230 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3231 * @vsi: the VSI being configured
3232 **/
3233static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3234{
3235 int q_idx;
3236
3237 if (!vsi->netdev)
3238 return;
3239
3240 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3241 napi_enable(&vsi->q_vectors[q_idx].napi);
3242}
3243
3244/**
3245 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3246 * @vsi: the VSI being configured
3247 **/
3248static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3249{
3250 int q_idx;
3251
3252 if (!vsi->netdev)
3253 return;
3254
3255 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3256 napi_disable(&vsi->q_vectors[q_idx].napi);
3257}
3258
3259/**
3260 * i40e_quiesce_vsi - Pause a given VSI
3261 * @vsi: the VSI being paused
3262 **/
3263static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3264{
3265 if (test_bit(__I40E_DOWN, &vsi->state))
3266 return;
3267
3268 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3269 if (vsi->netdev && netif_running(vsi->netdev)) {
3270 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3271 } else {
3272 set_bit(__I40E_DOWN, &vsi->state);
3273 i40e_down(vsi);
3274 }
3275}
3276
3277/**
3278 * i40e_unquiesce_vsi - Resume a given VSI
3279 * @vsi: the VSI being resumed
3280 **/
3281static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3282{
3283 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3284 return;
3285
3286 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3287 if (vsi->netdev && netif_running(vsi->netdev))
3288 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3289 else
3290 i40e_up(vsi); /* this clears the DOWN bit */
3291}
3292
3293/**
3294 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3295 * @pf: the PF
3296 **/
3297static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3298{
3299 int v;
3300
3301 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3302 if (pf->vsi[v])
3303 i40e_quiesce_vsi(pf->vsi[v]);
3304 }
3305}
3306
3307/**
3308 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3309 * @pf: the PF
3310 **/
3311static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3312{
3313 int v;
3314
3315 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3316 if (pf->vsi[v])
3317 i40e_unquiesce_vsi(pf->vsi[v]);
3318 }
3319}
3320
3321/**
3322 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
3323 * @dcbcfg: the corresponding DCBx configuration structure
3324 *
3325 * Return the number of TCs from given DCBx configuration
3326 **/
3327static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3328{
3329 int num_tc = 0, i;
3330
3331 /* Scan the ETS Config Priority Table to find
3332 * traffic class enabled for a given priority
3333 * and use the traffic class index to get the
3334 * number of traffic classes enabled
3335 */
3336 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3337 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
3338 num_tc = dcbcfg->etscfg.prioritytable[i];
3339 }
3340
3341 /* Traffic class index starts from zero so
3342 * increment to return the actual count
3343 */
3344 num_tc++;
3345
3346 return num_tc;
3347}
3348
3349/**
3350 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
3351 * @dcbcfg: the corresponding DCBx configuration structure
3352 *
3353 * Query the current DCB configuration and return the number of
3354 * traffic classes enabled from the given DCBX config
3355 **/
3356static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
3357{
3358 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
3359 u8 enabled_tc = 1;
3360 u8 i;
3361
3362 for (i = 0; i < num_tc; i++)
3363 enabled_tc |= 1 << i;
3364
3365 return enabled_tc;
3366}
3367
3368/**
3369 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
3370 * @pf: PF being queried
3371 *
3372 * Return number of traffic classes enabled for the given PF
3373 **/
3374static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
3375{
3376 struct i40e_hw *hw = &pf->hw;
3377 u8 i, enabled_tc;
3378 u8 num_tc = 0;
3379 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3380
3381 /* If DCB is not enabled then always in single TC */
3382 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3383 return 1;
3384
3385 /* MFP mode return count of enabled TCs for this PF */
3386 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3387 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3388 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3389 if (enabled_tc & (1 << i))
3390 num_tc++;
3391 }
3392 return num_tc;
3393 }
3394
3395 /* SFP mode will be enabled for all TCs on port */
3396 return i40e_dcb_get_num_tc(dcbcfg);
3397}
3398
3399/**
3400 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
3401 * @pf: PF being queried
3402 *
3403 * Return a bitmap for first enabled traffic class for this PF.
3404 **/
3405static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
3406{
3407 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3408 u8 i = 0;
3409
3410 if (!enabled_tc)
3411 return 0x1; /* TC0 */
3412
3413 /* Find the first enabled TC */
3414 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3415 if (enabled_tc & (1 << i))
3416 break;
3417 }
3418
3419 return 1 << i;
3420}
3421
3422/**
3423 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
3424 * @pf: PF being queried
3425 *
3426 * Return a bitmap for enabled traffic classes for this PF.
3427 **/
3428static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
3429{
3430 /* If DCB is not enabled for this PF then just return default TC */
3431 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3432 return i40e_pf_get_default_tc(pf);
3433
3434 /* MFP mode will have enabled TCs set by FW */
3435 if (pf->flags & I40E_FLAG_MFP_ENABLED)
3436 return pf->hw.func_caps.enabled_tcmap;
3437
3438 /* SFP mode we want PF to be enabled for all TCs */
3439 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
3440}
3441
3442/**
3443 * i40e_vsi_get_bw_info - Query VSI BW Information
3444 * @vsi: the VSI being queried
3445 *
3446 * Returns 0 on success, negative value on failure
3447 **/
3448static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3449{
3450 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
3451 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3452 struct i40e_pf *pf = vsi->back;
3453 struct i40e_hw *hw = &pf->hw;
3454 u32 tc_bw_max;
3455 int ret;
3456 int i;
3457
3458 /* Get the VSI level BW configuration */
3459 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3460 if (ret) {
3461 dev_info(&pf->pdev->dev,
3462 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
3463 ret, pf->hw.aq.asq_last_status);
3464 return ret;
3465 }
3466
3467 /* Get the VSI level BW configuration per TC */
3468 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
3469 &bw_ets_config,
3470 NULL);
3471 if (ret) {
3472 dev_info(&pf->pdev->dev,
3473 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
3474 ret, pf->hw.aq.asq_last_status);
3475 return ret;
3476 }
3477
3478 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
3479 dev_info(&pf->pdev->dev,
3480 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
3481 bw_config.tc_valid_bits,
3482 bw_ets_config.tc_valid_bits);
3483 /* Still continuing */
3484 }
3485
3486 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
3487 vsi->bw_max_quanta = bw_config.max_bw;
3488 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
3489 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
3490 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3491 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
3492 vsi->bw_ets_limit_credits[i] =
3493 le16_to_cpu(bw_ets_config.credits[i]);
3494 /* 3 bits out of 4 for each TC */
3495 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3496 }
3497 return ret;
3498}
3499
3500/**
3501 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
3502 * @vsi: the VSI being configured
3503 * @enabled_tc: TC bitmap
3504 * @bw_credits: BW shared credits per TC
3505 *
3506 * Returns 0 on success, negative value on failure
3507 **/
3508static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi,
3509 u8 enabled_tc,
3510 u8 *bw_share)
3511{
3512 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
3513 int i, ret = 0;
3514
3515 bw_data.tc_valid_bits = enabled_tc;
3516 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3517 bw_data.tc_bw_credits[i] = bw_share[i];
3518
3519 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid,
3520 &bw_data, NULL);
3521 if (ret) {
3522 dev_info(&vsi->back->pdev->dev,
3523 "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
3524 __func__, vsi->back->hw.aq.asq_last_status);
3525 return ret;
3526 }
3527
3528 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3529 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3530
3531 return ret;
3532}
3533
3534/**
3535 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
3536 * @vsi: the VSI being configured
3537 * @enabled_tc: TC map to be enabled
3538 *
3539 **/
3540static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3541{
3542 struct net_device *netdev = vsi->netdev;
3543 struct i40e_pf *pf = vsi->back;
3544 struct i40e_hw *hw = &pf->hw;
3545 u8 netdev_tc = 0;
3546 int i;
3547 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3548
3549 if (!netdev)
3550 return;
3551
3552 if (!enabled_tc) {
3553 netdev_reset_tc(netdev);
3554 return;
3555 }
3556
3557 /* Set up actual enabled TCs on the VSI */
3558 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
3559 return;
3560
3561 /* set per TC queues for the VSI */
3562 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3563 /* Only set TC queues for enabled tcs
3564 *
3565 * e.g. For a VSI that has TC0 and TC3 enabled the
3566 * enabled_tc bitmap would be 0x00001001; the driver
3567 * will set the numtc for netdev as 2 that will be
3568 * referenced by the netdev layer as TC 0 and 1.
3569 */
3570 if (vsi->tc_config.enabled_tc & (1 << i))
3571 netdev_set_tc_queue(netdev,
3572 vsi->tc_config.tc_info[i].netdev_tc,
3573 vsi->tc_config.tc_info[i].qcount,
3574 vsi->tc_config.tc_info[i].qoffset);
3575 }
3576
3577 /* Assign UP2TC map for the VSI */
3578 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3579 /* Get the actual TC# for the UP */
3580 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
3581 /* Get the mapped netdev TC# for the UP */
3582 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
3583 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3584 }
3585}
3586
3587/**
3588 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
3589 * @vsi: the VSI being configured
3590 * @ctxt: the ctxt buffer returned from AQ VSI update param command
3591 **/
3592static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
3593 struct i40e_vsi_context *ctxt)
3594{
3595 /* copy just the sections touched not the entire info
3596 * since not all sections are valid as returned by
3597 * update vsi params
3598 */
3599 vsi->info.mapping_flags = ctxt->info.mapping_flags;
3600 memcpy(&vsi->info.queue_mapping,
3601 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
3602 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
3603 sizeof(vsi->info.tc_mapping));
3604}
3605
3606/**
3607 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
3608 * @vsi: VSI to be configured
3609 * @enabled_tc: TC bitmap
3610 *
3611 * This configures a particular VSI for TCs that are mapped to the
3612 * given TC bitmap. It uses default bandwidth share for TCs across
3613 * VSIs to configure TC for a particular VSI.
3614 *
3615 * NOTE:
3616 * It is expected that the VSI queues have been quisced before calling
3617 * this function.
3618 **/
3619static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3620{
3621 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
3622 struct i40e_vsi_context ctxt;
3623 int ret = 0;
3624 int i;
3625
3626 /* Check if enabled_tc is same as existing or new TCs */
3627 if (vsi->tc_config.enabled_tc == enabled_tc)
3628 return ret;
3629
3630 /* Enable ETS TCs with equal BW Share for now across all VSIs */
3631 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3632 if (enabled_tc & (1 << i))
3633 bw_share[i] = 1;
3634 }
3635
3636 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
3637 if (ret) {
3638 dev_info(&vsi->back->pdev->dev,
3639 "Failed configuring TC map %d for VSI %d\n",
3640 enabled_tc, vsi->seid);
3641 goto out;
3642 }
3643
3644 /* Update Queue Pairs Mapping for currently enabled UPs */
3645 ctxt.seid = vsi->seid;
3646 ctxt.pf_num = vsi->back->hw.pf_id;
3647 ctxt.vf_num = 0;
3648 ctxt.uplink_seid = vsi->uplink_seid;
3649 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3650 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
3651
3652 /* Update the VSI after updating the VSI queue-mapping information */
3653 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3654 if (ret) {
3655 dev_info(&vsi->back->pdev->dev,
3656 "update vsi failed, aq_err=%d\n",
3657 vsi->back->hw.aq.asq_last_status);
3658 goto out;
3659 }
3660 /* update the local VSI info with updated queue map */
3661 i40e_vsi_update_queue_map(vsi, &ctxt);
3662 vsi->info.valid_sections = 0;
3663
3664 /* Update current VSI BW information */
3665 ret = i40e_vsi_get_bw_info(vsi);
3666 if (ret) {
3667 dev_info(&vsi->back->pdev->dev,
3668 "Failed updating vsi bw info, aq_err=%d\n",
3669 vsi->back->hw.aq.asq_last_status);
3670 goto out;
3671 }
3672
3673 /* Update the netdev TC setup */
3674 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
3675out:
3676 return ret;
3677}
3678
3679/**
3680 * i40e_up_complete - Finish the last steps of bringing up a connection
3681 * @vsi: the VSI being configured
3682 **/
3683static int i40e_up_complete(struct i40e_vsi *vsi)
3684{
3685 struct i40e_pf *pf = vsi->back;
3686 int err;
3687
3688 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3689 i40e_vsi_configure_msix(vsi);
3690 else
3691 i40e_configure_msi_and_legacy(vsi);
3692
3693 /* start rings */
3694 err = i40e_vsi_control_rings(vsi, true);
3695 if (err)
3696 return err;
3697
3698 clear_bit(__I40E_DOWN, &vsi->state);
3699 i40e_napi_enable_all(vsi);
3700 i40e_vsi_enable_irq(vsi);
3701
3702 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
3703 (vsi->netdev)) {
3704 netif_tx_start_all_queues(vsi->netdev);
3705 netif_carrier_on(vsi->netdev);
3706 }
3707 i40e_service_event_schedule(pf);
3708
3709 return 0;
3710}
3711
3712/**
3713 * i40e_vsi_reinit_locked - Reset the VSI
3714 * @vsi: the VSI being configured
3715 *
3716 * Rebuild the ring structs after some configuration
3717 * has changed, e.g. MTU size.
3718 **/
3719static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
3720{
3721 struct i40e_pf *pf = vsi->back;
3722
3723 WARN_ON(in_interrupt());
3724 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
3725 usleep_range(1000, 2000);
3726 i40e_down(vsi);
3727
3728 /* Give a VF some time to respond to the reset. The
3729 * two second wait is based upon the watchdog cycle in
3730 * the VF driver.
3731 */
3732 if (vsi->type == I40E_VSI_SRIOV)
3733 msleep(2000);
3734 i40e_up(vsi);
3735 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
3736}
3737
3738/**
3739 * i40e_up - Bring the connection back up after being down
3740 * @vsi: the VSI being configured
3741 **/
3742int i40e_up(struct i40e_vsi *vsi)
3743{
3744 int err;
3745
3746 err = i40e_vsi_configure(vsi);
3747 if (!err)
3748 err = i40e_up_complete(vsi);
3749
3750 return err;
3751}
3752
3753/**
3754 * i40e_down - Shutdown the connection processing
3755 * @vsi: the VSI being stopped
3756 **/
3757void i40e_down(struct i40e_vsi *vsi)
3758{
3759 int i;
3760
3761 /* It is assumed that the caller of this function
3762 * sets the vsi->state __I40E_DOWN bit.
3763 */
3764 if (vsi->netdev) {
3765 netif_carrier_off(vsi->netdev);
3766 netif_tx_disable(vsi->netdev);
3767 }
3768 i40e_vsi_disable_irq(vsi);
3769 i40e_vsi_control_rings(vsi, false);
3770 i40e_napi_disable_all(vsi);
3771
3772 for (i = 0; i < vsi->num_queue_pairs; i++) {
3773 i40e_clean_tx_ring(&vsi->tx_rings[i]);
3774 i40e_clean_rx_ring(&vsi->rx_rings[i]);
3775 }
3776}
3777
3778/**
3779 * i40e_setup_tc - configure multiple traffic classes
3780 * @netdev: net device to configure
3781 * @tc: number of traffic classes to enable
3782 **/
3783static int i40e_setup_tc(struct net_device *netdev, u8 tc)
3784{
3785 struct i40e_netdev_priv *np = netdev_priv(netdev);
3786 struct i40e_vsi *vsi = np->vsi;
3787 struct i40e_pf *pf = vsi->back;
3788 u8 enabled_tc = 0;
3789 int ret = -EINVAL;
3790 int i;
3791
3792 /* Check if DCB enabled to continue */
3793 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
3794 netdev_info(netdev, "DCB is not enabled for adapter\n");
3795 goto exit;
3796 }
3797
3798 /* Check if MFP enabled */
3799 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3800 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
3801 goto exit;
3802 }
3803
3804 /* Check whether tc count is within enabled limit */
3805 if (tc > i40e_pf_get_num_tc(pf)) {
3806 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
3807 goto exit;
3808 }
3809
3810 /* Generate TC map for number of tc requested */
3811 for (i = 0; i < tc; i++)
3812 enabled_tc |= (1 << i);
3813
3814 /* Requesting same TC configuration as already enabled */
3815 if (enabled_tc == vsi->tc_config.enabled_tc)
3816 return 0;
3817
3818 /* Quiesce VSI queues */
3819 i40e_quiesce_vsi(vsi);
3820
3821 /* Configure VSI for enabled TCs */
3822 ret = i40e_vsi_config_tc(vsi, enabled_tc);
3823 if (ret) {
3824 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
3825 vsi->seid);
3826 goto exit;
3827 }
3828
3829 /* Unquiesce VSI */
3830 i40e_unquiesce_vsi(vsi);
3831
3832exit:
3833 return ret;
3834}
3835
3836/**
3837 * i40e_open - Called when a network interface is made active
3838 * @netdev: network interface device structure
3839 *
3840 * The open entry point is called when a network interface is made
3841 * active by the system (IFF_UP). At this point all resources needed
3842 * for transmit and receive operations are allocated, the interrupt
3843 * handler is registered with the OS, the netdev watchdog subtask is
3844 * enabled, and the stack is notified that the interface is ready.
3845 *
3846 * Returns 0 on success, negative value on failure
3847 **/
3848static int i40e_open(struct net_device *netdev)
3849{
3850 struct i40e_netdev_priv *np = netdev_priv(netdev);
3851 struct i40e_vsi *vsi = np->vsi;
3852 struct i40e_pf *pf = vsi->back;
3853 char int_name[IFNAMSIZ];
3854 int err;
3855
3856 /* disallow open during test */
3857 if (test_bit(__I40E_TESTING, &pf->state))
3858 return -EBUSY;
3859
3860 netif_carrier_off(netdev);
3861
3862 /* allocate descriptors */
3863 err = i40e_vsi_setup_tx_resources(vsi);
3864 if (err)
3865 goto err_setup_tx;
3866 err = i40e_vsi_setup_rx_resources(vsi);
3867 if (err)
3868 goto err_setup_rx;
3869
3870 err = i40e_vsi_configure(vsi);
3871 if (err)
3872 goto err_setup_rx;
3873
3874 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
3875 dev_driver_string(&pf->pdev->dev), netdev->name);
3876 err = i40e_vsi_request_irq(vsi, int_name);
3877 if (err)
3878 goto err_setup_rx;
3879
3880 err = i40e_up_complete(vsi);
3881 if (err)
3882 goto err_up_complete;
3883
3884 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
3885 err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
3886 if (err)
3887 netdev_info(netdev,
3888 "couldn't set broadcast err %d aq_err %d\n",
3889 err, pf->hw.aq.asq_last_status);
3890 }
3891
3892 return 0;
3893
3894err_up_complete:
3895 i40e_down(vsi);
3896 i40e_vsi_free_irq(vsi);
3897err_setup_rx:
3898 i40e_vsi_free_rx_resources(vsi);
3899err_setup_tx:
3900 i40e_vsi_free_tx_resources(vsi);
3901 if (vsi == pf->vsi[pf->lan_vsi])
3902 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
3903
3904 return err;
3905}
3906
3907/**
3908 * i40e_close - Disables a network interface
3909 * @netdev: network interface device structure
3910 *
3911 * The close entry point is called when an interface is de-activated
3912 * by the OS. The hardware is still under the driver's control, but
3913 * this netdev interface is disabled.
3914 *
3915 * Returns 0, this is not allowed to fail
3916 **/
3917static int i40e_close(struct net_device *netdev)
3918{
3919 struct i40e_netdev_priv *np = netdev_priv(netdev);
3920 struct i40e_vsi *vsi = np->vsi;
3921
3922 if (test_and_set_bit(__I40E_DOWN, &vsi->state))
3923 return 0;
3924
3925 i40e_down(vsi);
3926 i40e_vsi_free_irq(vsi);
3927
3928 i40e_vsi_free_tx_resources(vsi);
3929 i40e_vsi_free_rx_resources(vsi);
3930
3931 return 0;
3932}
3933
3934/**
3935 * i40e_do_reset - Start a PF or Core Reset sequence
3936 * @pf: board private structure
3937 * @reset_flags: which reset is requested
3938 *
3939 * The essential difference in resets is that the PF Reset
3940 * doesn't clear the packet buffers, doesn't reset the PE
3941 * firmware, and doesn't bother the other PFs on the chip.
3942 **/
3943void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
3944{
3945 u32 val;
3946
3947 WARN_ON(in_interrupt());
3948
3949 /* do the biggest reset indicated */
3950 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
3951
3952 /* Request a Global Reset
3953 *
3954 * This will start the chip's countdown to the actual full
3955 * chip reset event, and a warning interrupt to be sent
3956 * to all PFs, including the requestor. Our handler
3957 * for the warning interrupt will deal with the shutdown
3958 * and recovery of the switch setup.
3959 */
3960 dev_info(&pf->pdev->dev, "GlobalR requested\n");
3961 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
3962 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
3963 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
3964
3965 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
3966
3967 /* Request a Core Reset
3968 *
3969 * Same as Global Reset, except does *not* include the MAC/PHY
3970 */
3971 dev_info(&pf->pdev->dev, "CoreR requested\n");
3972 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
3973 val |= I40E_GLGEN_RTRIG_CORER_MASK;
3974 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
3975 i40e_flush(&pf->hw);
3976
3977 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
3978
3979 /* Request a PF Reset
3980 *
3981 * Resets only the PF-specific registers
3982 *
3983 * This goes directly to the tear-down and rebuild of
3984 * the switch, since we need to do all the recovery as
3985 * for the Core Reset.
3986 */
3987 dev_info(&pf->pdev->dev, "PFR requested\n");
3988 i40e_handle_reset_warning(pf);
3989
3990 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
3991 int v;
3992
3993 /* Find the VSI(s) that requested a re-init */
3994 dev_info(&pf->pdev->dev,
3995 "VSI reinit requested\n");
3996 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3997 struct i40e_vsi *vsi = pf->vsi[v];
3998 if (vsi != NULL &&
3999 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
4000 i40e_vsi_reinit_locked(pf->vsi[v]);
4001 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
4002 }
4003 }
4004
4005 /* no further action needed, so return now */
4006 return;
4007 } else {
4008 dev_info(&pf->pdev->dev,
4009 "bad reset request 0x%08x\n", reset_flags);
4010 return;
4011 }
4012}
4013
4014/**
4015 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
4016 * @pf: board private structure
4017 * @e: event info posted on ARQ
4018 *
4019 * Handler for LAN Queue Overflow Event generated by the firmware for PF
4020 * and VF queues
4021 **/
4022static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
4023 struct i40e_arq_event_info *e)
4024{
4025 struct i40e_aqc_lan_overflow *data =
4026 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
4027 u32 queue = le32_to_cpu(data->prtdcb_rupto);
4028 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
4029 struct i40e_hw *hw = &pf->hw;
4030 struct i40e_vf *vf;
4031 u16 vf_id;
4032
4033 dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
4034 __func__, queue, qtx_ctl);
4035
4036 /* Queue belongs to VF, find the VF and issue VF reset */
4037 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
4038 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
4039 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
4040 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
4041 vf_id -= hw->func_caps.vf_base_id;
4042 vf = &pf->vf[vf_id];
4043 i40e_vc_notify_vf_reset(vf);
4044 /* Allow VF to process pending reset notification */
4045 msleep(20);
4046 i40e_reset_vf(vf, false);
4047 }
4048}
4049
4050/**
4051 * i40e_service_event_complete - Finish up the service event
4052 * @pf: board private structure
4053 **/
4054static void i40e_service_event_complete(struct i40e_pf *pf)
4055{
4056 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
4057
4058 /* flush memory to make sure state is correct before next watchog */
4059 smp_mb__before_clear_bit();
4060 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
4061}
4062
4063/**
4064 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
4065 * @pf: board private structure
4066 **/
4067static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
4068{
4069 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
4070 return;
4071
4072 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
4073
4074 /* if interface is down do nothing */
4075 if (test_bit(__I40E_DOWN, &pf->state))
4076 return;
4077}
4078
4079/**
4080 * i40e_vsi_link_event - notify VSI of a link event
4081 * @vsi: vsi to be notified
4082 * @link_up: link up or down
4083 **/
4084static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
4085{
4086 if (!vsi)
4087 return;
4088
4089 switch (vsi->type) {
4090 case I40E_VSI_MAIN:
4091 if (!vsi->netdev || !vsi->netdev_registered)
4092 break;
4093
4094 if (link_up) {
4095 netif_carrier_on(vsi->netdev);
4096 netif_tx_wake_all_queues(vsi->netdev);
4097 } else {
4098 netif_carrier_off(vsi->netdev);
4099 netif_tx_stop_all_queues(vsi->netdev);
4100 }
4101 break;
4102
4103 case I40E_VSI_SRIOV:
4104 break;
4105
4106 case I40E_VSI_VMDQ2:
4107 case I40E_VSI_CTRL:
4108 case I40E_VSI_MIRROR:
4109 default:
4110 /* there is no notification for other VSIs */
4111 break;
4112 }
4113}
4114
4115/**
4116 * i40e_veb_link_event - notify elements on the veb of a link event
4117 * @veb: veb to be notified
4118 * @link_up: link up or down
4119 **/
4120static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4121{
4122 struct i40e_pf *pf;
4123 int i;
4124
4125 if (!veb || !veb->pf)
4126 return;
4127 pf = veb->pf;
4128
4129 /* depth first... */
4130 for (i = 0; i < I40E_MAX_VEB; i++)
4131 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
4132 i40e_veb_link_event(pf->veb[i], link_up);
4133
4134 /* ... now the local VSIs */
4135 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4136 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4137 i40e_vsi_link_event(pf->vsi[i], link_up);
4138}
4139
4140/**
4141 * i40e_link_event - Update netif_carrier status
4142 * @pf: board private structure
4143 **/
4144static void i40e_link_event(struct i40e_pf *pf)
4145{
4146 bool new_link, old_link;
4147
4148 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
4149 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
4150
4151 if (new_link == old_link)
4152 return;
4153
4154 netdev_info(pf->vsi[pf->lan_vsi]->netdev,
4155 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
4156
4157 /* Notify the base of the switch tree connected to
4158 * the link. Floating VEBs are not notified.
4159 */
4160 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
4161 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
4162 else
4163 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
4164
4165 if (pf->vf)
4166 i40e_vc_notify_link_state(pf);
4167}
4168
4169/**
4170 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
4171 * @pf: board private structure
4172 *
4173 * Set the per-queue flags to request a check for stuck queues in the irq
4174 * clean functions, then force interrupts to be sure the irq clean is called.
4175 **/
4176static void i40e_check_hang_subtask(struct i40e_pf *pf)
4177{
4178 int i, v;
4179
4180 /* If we're down or resetting, just bail */
4181 if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
4182 return;
4183
4184 /* for each VSI/netdev
4185 * for each Tx queue
4186 * set the check flag
4187 * for each q_vector
4188 * force an interrupt
4189 */
4190 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4191 struct i40e_vsi *vsi = pf->vsi[v];
4192 int armed = 0;
4193
4194 if (!pf->vsi[v] ||
4195 test_bit(__I40E_DOWN, &vsi->state) ||
4196 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
4197 continue;
4198
4199 for (i = 0; i < vsi->num_queue_pairs; i++) {
4200 set_check_for_tx_hang(&vsi->tx_rings[i]);
4201 if (test_bit(__I40E_HANG_CHECK_ARMED,
4202 &vsi->tx_rings[i].state))
4203 armed++;
4204 }
4205
4206 if (armed) {
4207 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
4208 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
4209 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
4210 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
4211 } else {
4212 u16 vec = vsi->base_vector - 1;
4213 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
4214 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
4215 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
4216 wr32(&vsi->back->hw,
4217 I40E_PFINT_DYN_CTLN(vec), val);
4218 }
4219 i40e_flush(&vsi->back->hw);
4220 }
4221 }
4222}
4223
4224/**
4225 * i40e_watchdog_subtask - Check and bring link up
4226 * @pf: board private structure
4227 **/
4228static void i40e_watchdog_subtask(struct i40e_pf *pf)
4229{
4230 int i;
4231
4232 /* if interface is down do nothing */
4233 if (test_bit(__I40E_DOWN, &pf->state) ||
4234 test_bit(__I40E_CONFIG_BUSY, &pf->state))
4235 return;
4236
4237 /* Update the stats for active netdevs so the network stack
4238 * can look at updated numbers whenever it cares to
4239 */
4240 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4241 if (pf->vsi[i] && pf->vsi[i]->netdev)
4242 i40e_update_stats(pf->vsi[i]);
4243
4244 /* Update the stats for the active switching components */
4245 for (i = 0; i < I40E_MAX_VEB; i++)
4246 if (pf->veb[i])
4247 i40e_update_veb_stats(pf->veb[i]);
4248}
4249
4250/**
4251 * i40e_reset_subtask - Set up for resetting the device and driver
4252 * @pf: board private structure
4253 **/
4254static void i40e_reset_subtask(struct i40e_pf *pf)
4255{
4256 u32 reset_flags = 0;
4257
4258 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
4259 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
4260 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
4261 }
4262 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
4263 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
4264 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4265 }
4266 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
4267 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
4268 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
4269 }
4270 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
4271 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
4272 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
4273 }
4274
4275 /* If there's a recovery already waiting, it takes
4276 * precedence before starting a new reset sequence.
4277 */
4278 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
4279 i40e_handle_reset_warning(pf);
4280 return;
4281 }
4282
4283 /* If we're already down or resetting, just bail */
4284 if (reset_flags &&
4285 !test_bit(__I40E_DOWN, &pf->state) &&
4286 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
4287 i40e_do_reset(pf, reset_flags);
4288}
4289
4290/**
4291 * i40e_handle_link_event - Handle link event
4292 * @pf: board private structure
4293 * @e: event info posted on ARQ
4294 **/
4295static void i40e_handle_link_event(struct i40e_pf *pf,
4296 struct i40e_arq_event_info *e)
4297{
4298 struct i40e_hw *hw = &pf->hw;
4299 struct i40e_aqc_get_link_status *status =
4300 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
4301 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
4302
4303 /* save off old link status information */
4304 memcpy(&pf->hw.phy.link_info_old, hw_link_info,
4305 sizeof(pf->hw.phy.link_info_old));
4306
4307 /* update link status */
4308 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
4309 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
4310 hw_link_info->link_info = status->link_info;
4311 hw_link_info->an_info = status->an_info;
4312 hw_link_info->ext_info = status->ext_info;
4313 hw_link_info->lse_enable =
4314 le16_to_cpu(status->command_flags) &
4315 I40E_AQ_LSE_ENABLE;
4316
4317 /* process the event */
4318 i40e_link_event(pf);
4319
4320 /* Do a new status request to re-enable LSE reporting
4321 * and load new status information into the hw struct,
4322 * then see if the status changed while processing the
4323 * initial event.
4324 */
4325 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
4326 i40e_link_event(pf);
4327}
4328
4329/**
4330 * i40e_clean_adminq_subtask - Clean the AdminQ rings
4331 * @pf: board private structure
4332 **/
4333static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
4334{
4335 struct i40e_arq_event_info event;
4336 struct i40e_hw *hw = &pf->hw;
4337 u16 pending, i = 0;
4338 i40e_status ret;
4339 u16 opcode;
4340 u32 val;
4341
4342 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
4343 return;
4344
4345 event.msg_size = I40E_MAX_AQ_BUF_SIZE;
4346 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
4347 if (!event.msg_buf)
4348 return;
4349
4350 do {
4351 ret = i40e_clean_arq_element(hw, &event, &pending);
4352 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
4353 dev_info(&pf->pdev->dev, "No ARQ event found\n");
4354 break;
4355 } else if (ret) {
4356 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
4357 break;
4358 }
4359
4360 opcode = le16_to_cpu(event.desc.opcode);
4361 switch (opcode) {
4362
4363 case i40e_aqc_opc_get_link_status:
4364 i40e_handle_link_event(pf, &event);
4365 break;
4366 case i40e_aqc_opc_send_msg_to_pf:
4367 ret = i40e_vc_process_vf_msg(pf,
4368 le16_to_cpu(event.desc.retval),
4369 le32_to_cpu(event.desc.cookie_high),
4370 le32_to_cpu(event.desc.cookie_low),
4371 event.msg_buf,
4372 event.msg_size);
4373 break;
4374 case i40e_aqc_opc_lldp_update_mib:
4375 dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4376 break;
4377 case i40e_aqc_opc_event_lan_overflow:
4378 dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
4379 i40e_handle_lan_overflow_event(pf, &event);
4380 break;
4381 default:
4382 dev_info(&pf->pdev->dev,
4383 "ARQ Error: Unknown event %d received\n",
4384 event.desc.opcode);
4385 break;
4386 }
4387 } while (pending && (i++ < pf->adminq_work_limit));
4388
4389 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
4390 /* re-enable Admin queue interrupt cause */
4391 val = rd32(hw, I40E_PFINT_ICR0_ENA);
4392 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4393 wr32(hw, I40E_PFINT_ICR0_ENA, val);
4394 i40e_flush(hw);
4395
4396 kfree(event.msg_buf);
4397}
4398
4399/**
4400 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
4401 * @veb: pointer to the VEB instance
4402 *
4403 * This is a recursive function that first builds the attached VSIs then
4404 * recurses in to build the next layer of VEB. We track the connections
4405 * through our own index numbers because the seid's from the HW could
4406 * change across the reset.
4407 **/
4408static int i40e_reconstitute_veb(struct i40e_veb *veb)
4409{
4410 struct i40e_vsi *ctl_vsi = NULL;
4411 struct i40e_pf *pf = veb->pf;
4412 int v, veb_idx;
4413 int ret;
4414
4415 /* build VSI that owns this VEB, temporarily attached to base VEB */
4416 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
4417 if (pf->vsi[v] &&
4418 pf->vsi[v]->veb_idx == veb->idx &&
4419 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
4420 ctl_vsi = pf->vsi[v];
4421 break;
4422 }
4423 }
4424 if (!ctl_vsi) {
4425 dev_info(&pf->pdev->dev,
4426 "missing owner VSI for veb_idx %d\n", veb->idx);
4427 ret = -ENOENT;
4428 goto end_reconstitute;
4429 }
4430 if (ctl_vsi != pf->vsi[pf->lan_vsi])
4431 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
4432 ret = i40e_add_vsi(ctl_vsi);
4433 if (ret) {
4434 dev_info(&pf->pdev->dev,
4435 "rebuild of owner VSI failed: %d\n", ret);
4436 goto end_reconstitute;
4437 }
4438 i40e_vsi_reset_stats(ctl_vsi);
4439
4440 /* create the VEB in the switch and move the VSI onto the VEB */
4441 ret = i40e_add_veb(veb, ctl_vsi);
4442 if (ret)
4443 goto end_reconstitute;
4444
4445 /* create the remaining VSIs attached to this VEB */
4446 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4447 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
4448 continue;
4449
4450 if (pf->vsi[v]->veb_idx == veb->idx) {
4451 struct i40e_vsi *vsi = pf->vsi[v];
4452 vsi->uplink_seid = veb->seid;
4453 ret = i40e_add_vsi(vsi);
4454 if (ret) {
4455 dev_info(&pf->pdev->dev,
4456 "rebuild of vsi_idx %d failed: %d\n",
4457 v, ret);
4458 goto end_reconstitute;
4459 }
4460 i40e_vsi_reset_stats(vsi);
4461 }
4462 }
4463
4464 /* create any VEBs attached to this VEB - RECURSION */
4465 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
4466 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
4467 pf->veb[veb_idx]->uplink_seid = veb->seid;
4468 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
4469 if (ret)
4470 break;
4471 }
4472 }
4473
4474end_reconstitute:
4475 return ret;
4476}
4477
4478/**
4479 * i40e_get_capabilities - get info about the HW
4480 * @pf: the PF struct
4481 **/
4482static int i40e_get_capabilities(struct i40e_pf *pf)
4483{
4484 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
4485 u16 data_size;
4486 int buf_len;
4487 int err;
4488
4489 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
4490 do {
4491 cap_buf = kzalloc(buf_len, GFP_KERNEL);
4492 if (!cap_buf)
4493 return -ENOMEM;
4494
4495 /* this loads the data into the hw struct for us */
4496 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
4497 &data_size,
4498 i40e_aqc_opc_list_func_capabilities,
4499 NULL);
4500 /* data loaded, buffer no longer needed */
4501 kfree(cap_buf);
4502
4503 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
4504 /* retry with a larger buffer */
4505 buf_len = data_size;
4506 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
4507 dev_info(&pf->pdev->dev,
4508 "capability discovery failed: aq=%d\n",
4509 pf->hw.aq.asq_last_status);
4510 return -ENODEV;
4511 }
4512 } while (err);
4513
4514 if (pf->hw.debug_mask & I40E_DEBUG_USER)
4515 dev_info(&pf->pdev->dev,
4516 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
4517 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
4518 pf->hw.func_caps.num_msix_vectors,
4519 pf->hw.func_caps.num_msix_vectors_vf,
4520 pf->hw.func_caps.fd_filters_guaranteed,
4521 pf->hw.func_caps.fd_filters_best_effort,
4522 pf->hw.func_caps.num_tx_qp,
4523 pf->hw.func_caps.num_vsis);
4524
4525 return 0;
4526}
4527
4528/**
4529 * i40e_fdir_setup - initialize the Flow Director resources
4530 * @pf: board private structure
4531 **/
4532static void i40e_fdir_setup(struct i40e_pf *pf)
4533{
4534 struct i40e_vsi *vsi;
4535 bool new_vsi = false;
4536 int err, i;
4537
4538 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
4539 return;
4540
4541 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
4542
4543 /* find existing or make new FDIR VSI */
4544 vsi = NULL;
4545 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4546 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
4547 vsi = pf->vsi[i];
4548 if (!vsi) {
4549 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
4550 if (!vsi) {
4551 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
4552 pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
4553 return;
4554 }
4555 new_vsi = true;
4556 }
4557 WARN_ON(vsi->base_queue != I40E_FDIR_RING);
4558 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
4559
4560 err = i40e_vsi_setup_tx_resources(vsi);
4561 if (!err)
4562 err = i40e_vsi_setup_rx_resources(vsi);
4563 if (!err)
4564 err = i40e_vsi_configure(vsi);
4565 if (!err && new_vsi) {
4566 char int_name[IFNAMSIZ + 9];
4567 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4568 dev_driver_string(&pf->pdev->dev));
4569 err = i40e_vsi_request_irq(vsi, int_name);
4570 }
4571 if (!err)
4572 err = i40e_up_complete(vsi);
4573
4574 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4575}
4576
4577/**
4578 * i40e_fdir_teardown - release the Flow Director resources
4579 * @pf: board private structure
4580 **/
4581static void i40e_fdir_teardown(struct i40e_pf *pf)
4582{
4583 int i;
4584
4585 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
4586 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
4587 i40e_vsi_release(pf->vsi[i]);
4588 break;
4589 }
4590 }
4591}
4592
4593/**
4594 * i40e_handle_reset_warning - prep for the core to reset
4595 * @pf: board private structure
4596 *
4597 * Close up the VFs and other things in prep for a Core Reset,
4598 * then get ready to rebuild the world.
4599 **/
4600static void i40e_handle_reset_warning(struct i40e_pf *pf)
4601{
4602 struct i40e_driver_version dv;
4603 struct i40e_hw *hw = &pf->hw;
4604 i40e_status ret;
4605 u32 v;
4606
4607 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
4608 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
4609 return;
4610
4611 dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
4612
4613 i40e_vc_notify_reset(pf);
4614
4615 /* quiesce the VSIs and their queues that are not already DOWN */
4616 i40e_pf_quiesce_all_vsi(pf);
4617
4618 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4619 if (pf->vsi[v])
4620 pf->vsi[v]->seid = 0;
4621 }
4622
4623 i40e_shutdown_adminq(&pf->hw);
4624
4625 /* Now we wait for GRST to settle out.
4626 * We don't have to delete the VEBs or VSIs from the hw switch
4627 * because the reset will make them disappear.
4628 */
4629 ret = i40e_pf_reset(hw);
4630 if (ret)
4631 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
4632 pf->pfr_count++;
4633
4634 if (test_bit(__I40E_DOWN, &pf->state))
4635 goto end_core_reset;
4636 dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
4637
4638 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
4639 ret = i40e_init_adminq(&pf->hw);
4640 if (ret) {
4641 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
4642 goto end_core_reset;
4643 }
4644
4645 ret = i40e_get_capabilities(pf);
4646 if (ret) {
4647 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
4648 ret);
4649 goto end_core_reset;
4650 }
4651
4652 /* call shutdown HMC */
4653 ret = i40e_shutdown_lan_hmc(hw);
4654 if (ret) {
4655 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
4656 goto end_core_reset;
4657 }
4658
4659 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
4660 hw->func_caps.num_rx_qp,
4661 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
4662 if (ret) {
4663 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
4664 goto end_core_reset;
4665 }
4666 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
4667 if (ret) {
4668 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
4669 goto end_core_reset;
4670 }
4671
4672 /* do basic switch setup */
4673 ret = i40e_setup_pf_switch(pf);
4674 if (ret)
4675 goto end_core_reset;
4676
4677 /* Rebuild the VSIs and VEBs that existed before reset.
4678 * They are still in our local switch element arrays, so only
4679 * need to rebuild the switch model in the HW.
4680 *
4681 * If there were VEBs but the reconstitution failed, we'll try
4682 * try to recover minimal use by getting the basic PF VSI working.
4683 */
4684 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
4685 dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
4686 /* find the one VEB connected to the MAC, and find orphans */
4687 for (v = 0; v < I40E_MAX_VEB; v++) {
4688 if (!pf->veb[v])
4689 continue;
4690
4691 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
4692 pf->veb[v]->uplink_seid == 0) {
4693 ret = i40e_reconstitute_veb(pf->veb[v]);
4694
4695 if (!ret)
4696 continue;
4697
4698 /* If Main VEB failed, we're in deep doodoo,
4699 * so give up rebuilding the switch and set up
4700 * for minimal rebuild of PF VSI.
4701 * If orphan failed, we'll report the error
4702 * but try to keep going.
4703 */
4704 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
4705 dev_info(&pf->pdev->dev,
4706 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
4707 ret);
4708 pf->vsi[pf->lan_vsi]->uplink_seid
4709 = pf->mac_seid;
4710 break;
4711 } else if (pf->veb[v]->uplink_seid == 0) {
4712 dev_info(&pf->pdev->dev,
4713 "rebuild of orphan VEB failed: %d\n",
4714 ret);
4715 }
4716 }
4717 }
4718 }
4719
4720 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
4721 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
4722 /* no VEB, so rebuild only the Main VSI */
4723 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
4724 if (ret) {
4725 dev_info(&pf->pdev->dev,
4726 "rebuild of Main VSI failed: %d\n", ret);
4727 goto end_core_reset;
4728 }
4729 }
4730
4731 /* reinit the misc interrupt */
4732 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4733 ret = i40e_setup_misc_vector(pf);
4734
4735 /* restart the VSIs that were rebuilt and running before the reset */
4736 i40e_pf_unquiesce_all_vsi(pf);
4737
4738 /* tell the firmware that we're starting */
4739 dv.major_version = DRV_VERSION_MAJOR;
4740 dv.minor_version = DRV_VERSION_MINOR;
4741 dv.build_version = DRV_VERSION_BUILD;
4742 dv.subbuild_version = 0;
4743 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
4744
4745 dev_info(&pf->pdev->dev, "PF reset done\n");
4746
4747end_core_reset:
4748 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
4749}
4750
4751/**
4752 * i40e_handle_mdd_event
4753 * @pf: pointer to the pf structure
4754 *
4755 * Called from the MDD irq handler to identify possibly malicious vfs
4756 **/
4757static void i40e_handle_mdd_event(struct i40e_pf *pf)
4758{
4759 struct i40e_hw *hw = &pf->hw;
4760 bool mdd_detected = false;
4761 struct i40e_vf *vf;
4762 u32 reg;
4763 int i;
4764
4765 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
4766 return;
4767
4768 /* find what triggered the MDD event */
4769 reg = rd32(hw, I40E_GL_MDET_TX);
4770 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4771 u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
4772 >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
4773 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
4774 >> I40E_GL_MDET_TX_EVENT_SHIFT;
4775 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
4776 >> I40E_GL_MDET_TX_QUEUE_SHIFT;
4777 dev_info(&pf->pdev->dev,
4778 "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
4779 event, queue, func);
4780 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
4781 mdd_detected = true;
4782 }
4783 reg = rd32(hw, I40E_GL_MDET_RX);
4784 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4785 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
4786 >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
4787 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
4788 >> I40E_GL_MDET_RX_EVENT_SHIFT;
4789 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
4790 >> I40E_GL_MDET_RX_QUEUE_SHIFT;
4791 dev_info(&pf->pdev->dev,
4792 "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
4793 event, queue, func);
4794 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4795 mdd_detected = true;
4796 }
4797
4798 /* see if one of the VFs needs its hand slapped */
4799 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
4800 vf = &(pf->vf[i]);
4801 reg = rd32(hw, I40E_VP_MDET_TX(i));
4802 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
4803 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
4804 vf->num_mdd_events++;
4805 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
4806 }
4807
4808 reg = rd32(hw, I40E_VP_MDET_RX(i));
4809 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
4810 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
4811 vf->num_mdd_events++;
4812 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
4813 }
4814
4815 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
4816 dev_info(&pf->pdev->dev,
4817 "Too many MDD events on VF %d, disabled\n", i);
4818 dev_info(&pf->pdev->dev,
4819 "Use PF Control I/F to re-enable the VF\n");
4820 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
4821 }
4822 }
4823
4824 /* re-enable mdd interrupt cause */
4825 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
4826 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4827 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4828 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4829 i40e_flush(hw);
4830}
4831
4832/**
4833 * i40e_service_task - Run the driver's async subtasks
4834 * @work: pointer to work_struct containing our data
4835 **/
4836static void i40e_service_task(struct work_struct *work)
4837{
4838 struct i40e_pf *pf = container_of(work,
4839 struct i40e_pf,
4840 service_task);
4841 unsigned long start_time = jiffies;
4842
4843 i40e_reset_subtask(pf);
4844 i40e_handle_mdd_event(pf);
4845 i40e_vc_process_vflr_event(pf);
4846 i40e_watchdog_subtask(pf);
4847 i40e_fdir_reinit_subtask(pf);
4848 i40e_check_hang_subtask(pf);
4849 i40e_sync_filters_subtask(pf);
4850 i40e_clean_adminq_subtask(pf);
4851
4852 i40e_service_event_complete(pf);
4853
4854 /* If the tasks have taken longer than one timer cycle or there
4855 * is more work to be done, reschedule the service task now
4856 * rather than wait for the timer to tick again.
4857 */
4858 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
4859 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
4860 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
4861 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
4862 i40e_service_event_schedule(pf);
4863}
4864
4865/**
4866 * i40e_service_timer - timer callback
4867 * @data: pointer to PF struct
4868 **/
4869static void i40e_service_timer(unsigned long data)
4870{
4871 struct i40e_pf *pf = (struct i40e_pf *)data;
4872
4873 mod_timer(&pf->service_timer,
4874 round_jiffies(jiffies + pf->service_timer_period));
4875 i40e_service_event_schedule(pf);
4876}
4877
4878/**
4879 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
4880 * @vsi: the VSI being configured
4881 **/
4882static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
4883{
4884 struct i40e_pf *pf = vsi->back;
4885
4886 switch (vsi->type) {
4887 case I40E_VSI_MAIN:
4888 vsi->alloc_queue_pairs = pf->num_lan_qps;
4889 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4890 I40E_REQ_DESCRIPTOR_MULTIPLE);
4891 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4892 vsi->num_q_vectors = pf->num_lan_msix;
4893 else
4894 vsi->num_q_vectors = 1;
4895
4896 break;
4897
4898 case I40E_VSI_FDIR:
4899 vsi->alloc_queue_pairs = 1;
4900 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
4901 I40E_REQ_DESCRIPTOR_MULTIPLE);
4902 vsi->num_q_vectors = 1;
4903 break;
4904
4905 case I40E_VSI_VMDQ2:
4906 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
4907 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4908 I40E_REQ_DESCRIPTOR_MULTIPLE);
4909 vsi->num_q_vectors = pf->num_vmdq_msix;
4910 break;
4911
4912 case I40E_VSI_SRIOV:
4913 vsi->alloc_queue_pairs = pf->num_vf_qps;
4914 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4915 I40E_REQ_DESCRIPTOR_MULTIPLE);
4916 break;
4917
4918 default:
4919 WARN_ON(1);
4920 return -ENODATA;
4921 }
4922
4923 return 0;
4924}
4925
4926/**
4927 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
4928 * @pf: board private structure
4929 * @type: type of VSI
4930 *
4931 * On error: returns error code (negative)
4932 * On success: returns vsi index in PF (positive)
4933 **/
4934static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
4935{
4936 int ret = -ENODEV;
4937 struct i40e_vsi *vsi;
4938 int vsi_idx;
4939 int i;
4940
4941 /* Need to protect the allocation of the VSIs at the PF level */
4942 mutex_lock(&pf->switch_mutex);
4943
4944 /* VSI list may be fragmented if VSI creation/destruction has
4945 * been happening. We can afford to do a quick scan to look
4946 * for any free VSIs in the list.
4947 *
4948 * find next empty vsi slot, looping back around if necessary
4949 */
4950 i = pf->next_vsi;
4951 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
4952 i++;
4953 if (i >= pf->hw.func_caps.num_vsis) {
4954 i = 0;
4955 while (i < pf->next_vsi && pf->vsi[i])
4956 i++;
4957 }
4958
4959 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
4960 vsi_idx = i; /* Found one! */
4961 } else {
4962 ret = -ENODEV;
4963 goto err_alloc_vsi; /* out of VSI slots! */
4964 }
4965 pf->next_vsi = ++i;
4966
4967 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
4968 if (!vsi) {
4969 ret = -ENOMEM;
4970 goto err_alloc_vsi;
4971 }
4972 vsi->type = type;
4973 vsi->back = pf;
4974 set_bit(__I40E_DOWN, &vsi->state);
4975 vsi->flags = 0;
4976 vsi->idx = vsi_idx;
4977 vsi->rx_itr_setting = pf->rx_itr_default;
4978 vsi->tx_itr_setting = pf->tx_itr_default;
4979 vsi->netdev_registered = false;
4980 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
4981 INIT_LIST_HEAD(&vsi->mac_filter_list);
4982
4983 i40e_set_num_rings_in_vsi(vsi);
4984
4985 /* Setup default MSIX irq handler for VSI */
4986 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
4987
4988 pf->vsi[vsi_idx] = vsi;
4989 ret = vsi_idx;
4990err_alloc_vsi:
4991 mutex_unlock(&pf->switch_mutex);
4992 return ret;
4993}
4994
4995/**
4996 * i40e_vsi_clear - Deallocate the VSI provided
4997 * @vsi: the VSI being un-configured
4998 **/
4999static int i40e_vsi_clear(struct i40e_vsi *vsi)
5000{
5001 struct i40e_pf *pf;
5002
5003 if (!vsi)
5004 return 0;
5005
5006 if (!vsi->back)
5007 goto free_vsi;
5008 pf = vsi->back;
5009
5010 mutex_lock(&pf->switch_mutex);
5011 if (!pf->vsi[vsi->idx]) {
5012 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
5013 vsi->idx, vsi->idx, vsi, vsi->type);
5014 goto unlock_vsi;
5015 }
5016
5017 if (pf->vsi[vsi->idx] != vsi) {
5018 dev_err(&pf->pdev->dev,
5019 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
5020 pf->vsi[vsi->idx]->idx,
5021 pf->vsi[vsi->idx],
5022 pf->vsi[vsi->idx]->type,
5023 vsi->idx, vsi, vsi->type);
5024 goto unlock_vsi;
5025 }
5026
5027 /* updates the pf for this cleared vsi */
5028 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
5029 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
5030
5031 pf->vsi[vsi->idx] = NULL;
5032 if (vsi->idx < pf->next_vsi)
5033 pf->next_vsi = vsi->idx;
5034
5035unlock_vsi:
5036 mutex_unlock(&pf->switch_mutex);
5037free_vsi:
5038 kfree(vsi);
5039
5040 return 0;
5041}
5042
5043/**
5044 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
5045 * @vsi: the VSI being configured
5046 **/
5047static int i40e_alloc_rings(struct i40e_vsi *vsi)
5048{
5049 struct i40e_pf *pf = vsi->back;
5050 int ret = 0;
5051 int i;
5052
5053 vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
5054 sizeof(struct i40e_ring), GFP_KERNEL);
5055 if (!vsi->rx_rings) {
5056 ret = -ENOMEM;
5057 goto err_alloc_rings;
5058 }
5059
5060 vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
5061 sizeof(struct i40e_ring), GFP_KERNEL);
5062 if (!vsi->tx_rings) {
5063 ret = -ENOMEM;
5064 kfree(vsi->rx_rings);
5065 goto err_alloc_rings;
5066 }
5067
5068 /* Set basic values in the rings to be used later during open() */
5069 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5070 struct i40e_ring *rx_ring = &vsi->rx_rings[i];
5071 struct i40e_ring *tx_ring = &vsi->tx_rings[i];
5072
5073 tx_ring->queue_index = i;
5074 tx_ring->reg_idx = vsi->base_queue + i;
5075 tx_ring->ring_active = false;
5076 tx_ring->vsi = vsi;
5077 tx_ring->netdev = vsi->netdev;
5078 tx_ring->dev = &pf->pdev->dev;
5079 tx_ring->count = vsi->num_desc;
5080 tx_ring->size = 0;
5081 tx_ring->dcb_tc = 0;
5082
5083 rx_ring->queue_index = i;
5084 rx_ring->reg_idx = vsi->base_queue + i;
5085 rx_ring->ring_active = false;
5086 rx_ring->vsi = vsi;
5087 rx_ring->netdev = vsi->netdev;
5088 rx_ring->dev = &pf->pdev->dev;
5089 rx_ring->count = vsi->num_desc;
5090 rx_ring->size = 0;
5091 rx_ring->dcb_tc = 0;
5092 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
5093 set_ring_16byte_desc_enabled(rx_ring);
5094 else
5095 clear_ring_16byte_desc_enabled(rx_ring);
5096 }
5097
5098err_alloc_rings:
5099 return ret;
5100}
5101
5102/**
5103 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
5104 * @vsi: the VSI being cleaned
5105 **/
5106static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5107{
5108 if (vsi) {
5109 kfree(vsi->rx_rings);
5110 kfree(vsi->tx_rings);
5111 }
5112
5113 return 0;
5114}
5115
5116/**
5117 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
5118 * @pf: board private structure
5119 * @vectors: the number of MSI-X vectors to request
5120 *
5121 * Returns the number of vectors reserved, or error
5122 **/
5123static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
5124{
5125 int err = 0;
5126
5127 pf->num_msix_entries = 0;
5128 while (vectors >= I40E_MIN_MSIX) {
5129 err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
5130 if (err == 0) {
5131 /* good to go */
5132 pf->num_msix_entries = vectors;
5133 break;
5134 } else if (err < 0) {
5135 /* total failure */
5136 dev_info(&pf->pdev->dev,
5137 "MSI-X vector reservation failed: %d\n", err);
5138 vectors = 0;
5139 break;
5140 } else {
5141 /* err > 0 is the hint for retry */
5142 dev_info(&pf->pdev->dev,
5143 "MSI-X vectors wanted %d, retrying with %d\n",
5144 vectors, err);
5145 vectors = err;
5146 }
5147 }
5148
5149 if (vectors > 0 && vectors < I40E_MIN_MSIX) {
5150 dev_info(&pf->pdev->dev,
5151 "Couldn't get enough vectors, only %d available\n",
5152 vectors);
5153 vectors = 0;
5154 }
5155
5156 return vectors;
5157}
5158
5159/**
5160 * i40e_init_msix - Setup the MSIX capability
5161 * @pf: board private structure
5162 *
5163 * Work with the OS to set up the MSIX vectors needed.
5164 *
5165 * Returns 0 on success, negative on failure
5166 **/
5167static int i40e_init_msix(struct i40e_pf *pf)
5168{
5169 i40e_status err = 0;
5170 struct i40e_hw *hw = &pf->hw;
5171 int v_budget, i;
5172 int vec;
5173
5174 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
5175 return -ENODEV;
5176
5177 /* The number of vectors we'll request will be comprised of:
5178 * - Add 1 for "other" cause for Admin Queue events, etc.
5179 * - The number of LAN queue pairs
5180 * already adjusted for the NUMA node
5181 * assumes symmetric Tx/Rx pairing
5182 * - The number of VMDq pairs
5183 * Once we count this up, try the request.
5184 *
5185 * If we can't get what we want, we'll simplify to nearly nothing
5186 * and try again. If that still fails, we punt.
5187 */
5188 pf->num_lan_msix = pf->num_lan_qps;
5189 pf->num_vmdq_msix = pf->num_vmdq_qps;
5190 v_budget = 1 + pf->num_lan_msix;
5191 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
5192 if (pf->flags & I40E_FLAG_FDIR_ENABLED)
5193 v_budget++;
5194
5195 /* Scale down if necessary, and the rings will share vectors */
5196 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
5197
5198 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
5199 GFP_KERNEL);
5200 if (!pf->msix_entries)
5201 return -ENOMEM;
5202
5203 for (i = 0; i < v_budget; i++)
5204 pf->msix_entries[i].entry = i;
5205 vec = i40e_reserve_msix_vectors(pf, v_budget);
5206 if (vec < I40E_MIN_MSIX) {
5207 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
5208 kfree(pf->msix_entries);
5209 pf->msix_entries = NULL;
5210 return -ENODEV;
5211
5212 } else if (vec == I40E_MIN_MSIX) {
5213 /* Adjust for minimal MSIX use */
5214 dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
5215 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
5216 pf->num_vmdq_vsis = 0;
5217 pf->num_vmdq_qps = 0;
5218 pf->num_vmdq_msix = 0;
5219 pf->num_lan_qps = 1;
5220 pf->num_lan_msix = 1;
5221
5222 } else if (vec != v_budget) {
5223 /* Scale vector usage down */
5224 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
5225 vec--; /* reserve the misc vector */
5226
5227 /* partition out the remaining vectors */
5228 switch (vec) {
5229 case 2:
5230 pf->num_vmdq_vsis = 1;
5231 pf->num_lan_msix = 1;
5232 break;
5233 case 3:
5234 pf->num_vmdq_vsis = 1;
5235 pf->num_lan_msix = 2;
5236 break;
5237 default:
5238 pf->num_lan_msix = min_t(int, (vec / 2),
5239 pf->num_lan_qps);
5240 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
5241 I40E_DEFAULT_NUM_VMDQ_VSI);
5242 break;
5243 }
5244 }
5245
5246 return err;
5247}
5248
5249/**
5250 * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
5251 * @vsi: the VSI being configured
5252 *
5253 * We allocate one q_vector per queue interrupt. If allocation fails we
5254 * return -ENOMEM.
5255 **/
5256static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
5257{
5258 struct i40e_pf *pf = vsi->back;
5259 int v_idx, num_q_vectors;
5260
5261 /* if not MSIX, give the one vector only to the LAN VSI */
5262 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5263 num_q_vectors = vsi->num_q_vectors;
5264 else if (vsi == pf->vsi[pf->lan_vsi])
5265 num_q_vectors = 1;
5266 else
5267 return -EINVAL;
5268
5269 vsi->q_vectors = kcalloc(num_q_vectors,
5270 sizeof(struct i40e_q_vector),
5271 GFP_KERNEL);
5272 if (!vsi->q_vectors)
5273 return -ENOMEM;
5274
5275 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
5276 vsi->q_vectors[v_idx].vsi = vsi;
5277 vsi->q_vectors[v_idx].v_idx = v_idx;
5278 cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
5279 if (vsi->netdev)
5280 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
5281 i40e_napi_poll, vsi->work_limit);
5282 }
5283
5284 return 0;
5285}
5286
5287/**
5288 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
5289 * @pf: board private structure to initialize
5290 **/
5291static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
5292{
5293 int err = 0;
5294
5295 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5296 err = i40e_init_msix(pf);
5297 if (err) {
5298 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
5299 I40E_FLAG_MQ_ENABLED |
5300 I40E_FLAG_DCB_ENABLED |
5301 I40E_FLAG_SRIOV_ENABLED |
5302 I40E_FLAG_FDIR_ENABLED |
5303 I40E_FLAG_FDIR_ATR_ENABLED |
5304 I40E_FLAG_VMDQ_ENABLED);
5305
5306 /* rework the queue expectations without MSIX */
5307 i40e_determine_queue_usage(pf);
5308 }
5309 }
5310
5311 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
5312 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
5313 err = pci_enable_msi(pf->pdev);
5314 if (err) {
5315 dev_info(&pf->pdev->dev,
5316 "MSI init failed (%d), trying legacy.\n", err);
5317 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
5318 }
5319 }
5320
5321 /* track first vector for misc interrupts */
5322 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
5323}
5324
5325/**
5326 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
5327 * @pf: board private structure
5328 *
5329 * This sets up the handler for MSIX 0, which is used to manage the
5330 * non-queue interrupts, e.g. AdminQ and errors. This is not used
5331 * when in MSI or Legacy interrupt mode.
5332 **/
5333static int i40e_setup_misc_vector(struct i40e_pf *pf)
5334{
5335 struct i40e_hw *hw = &pf->hw;
5336 int err = 0;
5337
5338 /* Only request the irq if this is the first time through, and
5339 * not when we're rebuilding after a Reset
5340 */
5341 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
5342 err = request_irq(pf->msix_entries[0].vector,
5343 i40e_intr, 0, pf->misc_int_name, pf);
5344 if (err) {
5345 dev_info(&pf->pdev->dev,
5346 "request_irq for msix_misc failed: %d\n", err);
5347 return -EFAULT;
5348 }
5349 }
5350
5351 i40e_enable_misc_int_causes(hw);
5352
5353 /* associate no queues to the misc vector */
5354 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
5355 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
5356
5357 i40e_flush(hw);
5358
5359 i40e_irq_dynamic_enable_icr0(pf);
5360
5361 return err;
5362}
5363
5364/**
5365 * i40e_config_rss - Prepare for RSS if used
5366 * @pf: board private structure
5367 **/
5368static int i40e_config_rss(struct i40e_pf *pf)
5369{
5370 struct i40e_hw *hw = &pf->hw;
5371 u32 lut = 0;
5372 int i, j;
5373 u64 hena;
5374 /* Set of random keys generated using kernel random number generator */
5375 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
5376 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
5377 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
5378 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
5379
5380 /* Fill out hash function seed */
5381 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
5382 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
5383
5384 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
5385 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
5386 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
5387 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
5388 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
5389 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
5390 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
5391 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
5392 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
5393 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
5394 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
5395 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
5396 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
5397 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
5398 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
5399
5400 /* Populate the LUT with max no. of queues in round robin fashion */
5401 for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
5402
5403 /* The assumption is that lan qp count will be the highest
5404 * qp count for any PF VSI that needs RSS.
5405 * If multiple VSIs need RSS support, all the qp counts
5406 * for those VSIs should be a power of 2 for RSS to work.
5407 * If LAN VSI is the only consumer for RSS then this requirement
5408 * is not necessary.
5409 */
5410 if (j == pf->rss_size)
5411 j = 0;
5412 /* lut = 4-byte sliding window of 4 lut entries */
5413 lut = (lut << 8) | (j &
5414 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
5415 /* On i = 3, we have 4 entries in lut; write to the register */
5416 if ((i & 3) == 3)
5417 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
5418 }
5419 i40e_flush(hw);
5420
5421 return 0;
5422}
5423
5424/**
5425 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
5426 * @pf: board private structure to initialize
5427 *
5428 * i40e_sw_init initializes the Adapter private data structure.
5429 * Fields are initialized based on PCI device information and
5430 * OS network device settings (MTU size).
5431 **/
5432static int i40e_sw_init(struct i40e_pf *pf)
5433{
5434 int err = 0;
5435 int size;
5436
5437 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
5438 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
5439 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
5440 if (I40E_DEBUG_USER & debug)
5441 pf->hw.debug_mask = debug;
5442 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
5443 I40E_DEFAULT_MSG_ENABLE);
5444 }
5445
5446 /* Set default capability flags */
5447 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
5448 I40E_FLAG_MSI_ENABLED |
5449 I40E_FLAG_MSIX_ENABLED |
5450 I40E_FLAG_RX_PS_ENABLED |
5451 I40E_FLAG_MQ_ENABLED |
5452 I40E_FLAG_RX_1BUF_ENABLED;
5453
5454 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
5455 if (pf->hw.func_caps.rss) {
5456 pf->flags |= I40E_FLAG_RSS_ENABLED;
5457 pf->rss_size = min_t(int, pf->rss_size_max,
5458 nr_cpus_node(numa_node_id()));
5459 } else {
5460 pf->rss_size = 1;
5461 }
5462
5463 if (pf->hw.func_caps.dcb)
5464 pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
5465 else
5466 pf->num_tc_qps = 0;
5467
5468 if (pf->hw.func_caps.fd) {
5469 /* FW/NVM is not yet fixed in this regard */
5470 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
5471 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
5472 pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
5473 dev_info(&pf->pdev->dev,
5474 "Flow Director ATR mode Enabled\n");
5475 pf->flags |= I40E_FLAG_FDIR_ENABLED;
5476 dev_info(&pf->pdev->dev,
5477 "Flow Director Side Band mode Enabled\n");
5478 pf->fdir_pf_filter_count =
5479 pf->hw.func_caps.fd_filters_guaranteed;
5480 }
5481 } else {
5482 pf->fdir_pf_filter_count = 0;
5483 }
5484
5485 if (pf->hw.func_caps.vmdq) {
5486 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
5487 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
5488 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
5489 }
5490
5491 /* MFP mode enabled */
5492 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
5493 pf->flags |= I40E_FLAG_MFP_ENABLED;
5494 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
5495 }
5496
5497#ifdef CONFIG_PCI_IOV
5498 if (pf->hw.func_caps.num_vfs) {
5499 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
5500 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
5501 pf->num_req_vfs = min_t(int,
5502 pf->hw.func_caps.num_vfs,
5503 I40E_MAX_VF_COUNT);
5504 }
5505#endif /* CONFIG_PCI_IOV */
5506 pf->eeprom_version = 0xDEAD;
5507 pf->lan_veb = I40E_NO_VEB;
5508 pf->lan_vsi = I40E_NO_VSI;
5509
5510 /* set up queue assignment tracking */
5511 size = sizeof(struct i40e_lump_tracking)
5512 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
5513 pf->qp_pile = kzalloc(size, GFP_KERNEL);
5514 if (!pf->qp_pile) {
5515 err = -ENOMEM;
5516 goto sw_init_done;
5517 }
5518 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
5519 pf->qp_pile->search_hint = 0;
5520
5521 /* set up vector assignment tracking */
5522 size = sizeof(struct i40e_lump_tracking)
5523 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
5524 pf->irq_pile = kzalloc(size, GFP_KERNEL);
5525 if (!pf->irq_pile) {
5526 kfree(pf->qp_pile);
5527 err = -ENOMEM;
5528 goto sw_init_done;
5529 }
5530 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
5531 pf->irq_pile->search_hint = 0;
5532
5533 mutex_init(&pf->switch_mutex);
5534
5535sw_init_done:
5536 return err;
5537}
5538
5539/**
5540 * i40e_set_features - set the netdev feature flags
5541 * @netdev: ptr to the netdev being adjusted
5542 * @features: the feature set that the stack is suggesting
5543 **/
5544static int i40e_set_features(struct net_device *netdev,
5545 netdev_features_t features)
5546{
5547 struct i40e_netdev_priv *np = netdev_priv(netdev);
5548 struct i40e_vsi *vsi = np->vsi;
5549
5550 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5551 i40e_vlan_stripping_enable(vsi);
5552 else
5553 i40e_vlan_stripping_disable(vsi);
5554
5555 return 0;
5556}
5557
5558static const struct net_device_ops i40e_netdev_ops = {
5559 .ndo_open = i40e_open,
5560 .ndo_stop = i40e_close,
5561 .ndo_start_xmit = i40e_lan_xmit_frame,
5562 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
5563 .ndo_set_rx_mode = i40e_set_rx_mode,
5564 .ndo_validate_addr = eth_validate_addr,
5565 .ndo_set_mac_address = i40e_set_mac,
5566 .ndo_change_mtu = i40e_change_mtu,
5567 .ndo_tx_timeout = i40e_tx_timeout,
5568 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
5569 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
5570#ifdef CONFIG_NET_POLL_CONTROLLER
5571 .ndo_poll_controller = i40e_netpoll,
5572#endif
5573 .ndo_setup_tc = i40e_setup_tc,
5574 .ndo_set_features = i40e_set_features,
5575 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
5576 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
5577 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
5578 .ndo_get_vf_config = i40e_ndo_get_vf_config,
5579};
5580
5581/**
5582 * i40e_config_netdev - Setup the netdev flags
5583 * @vsi: the VSI being configured
5584 *
5585 * Returns 0 on success, negative value on failure
5586 **/
5587static int i40e_config_netdev(struct i40e_vsi *vsi)
5588{
5589 struct i40e_pf *pf = vsi->back;
5590 struct i40e_hw *hw = &pf->hw;
5591 struct i40e_netdev_priv *np;
5592 struct net_device *netdev;
5593 u8 mac_addr[ETH_ALEN];
5594 int etherdev_size;
5595
5596 etherdev_size = sizeof(struct i40e_netdev_priv);
5597 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
5598 if (!netdev)
5599 return -ENOMEM;
5600
5601 vsi->netdev = netdev;
5602 np = netdev_priv(netdev);
5603 np->vsi = vsi;
5604
5605 netdev->hw_enc_features = NETIF_F_IP_CSUM |
5606 NETIF_F_GSO_UDP_TUNNEL |
5607 NETIF_F_TSO |
5608 NETIF_F_SG;
5609
5610 netdev->features = NETIF_F_SG |
5611 NETIF_F_IP_CSUM |
5612 NETIF_F_SCTP_CSUM |
5613 NETIF_F_HIGHDMA |
5614 NETIF_F_GSO_UDP_TUNNEL |
5615 NETIF_F_HW_VLAN_CTAG_TX |
5616 NETIF_F_HW_VLAN_CTAG_RX |
5617 NETIF_F_HW_VLAN_CTAG_FILTER |
5618 NETIF_F_IPV6_CSUM |
5619 NETIF_F_TSO |
5620 NETIF_F_TSO6 |
5621 NETIF_F_RXCSUM |
5622 NETIF_F_RXHASH |
5623 0;
5624
5625 /* copy netdev features into list of user selectable features */
5626 netdev->hw_features |= netdev->features;
5627
5628 if (vsi->type == I40E_VSI_MAIN) {
5629 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
5630 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
5631 } else {
5632 /* relate the VSI_VMDQ name to the VSI_MAIN name */
5633 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
5634 pf->vsi[pf->lan_vsi]->netdev->name);
5635 random_ether_addr(mac_addr);
5636 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
5637 }
5638
5639 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
5640 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
5641 /* vlan gets same features (except vlan offload)
5642 * after any tweaks for specific VSI types
5643 */
5644 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
5645 NETIF_F_HW_VLAN_CTAG_RX |
5646 NETIF_F_HW_VLAN_CTAG_FILTER);
5647 netdev->priv_flags |= IFF_UNICAST_FLT;
5648 netdev->priv_flags |= IFF_SUPP_NOFCS;
5649 /* Setup netdev TC information */
5650 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
5651
5652 netdev->netdev_ops = &i40e_netdev_ops;
5653 netdev->watchdog_timeo = 5 * HZ;
5654 i40e_set_ethtool_ops(netdev);
5655
5656 return 0;
5657}
5658
5659/**
5660 * i40e_vsi_delete - Delete a VSI from the switch
5661 * @vsi: the VSI being removed
5662 *
5663 * Returns 0 on success, negative value on failure
5664 **/
5665static void i40e_vsi_delete(struct i40e_vsi *vsi)
5666{
5667 /* remove default VSI is not allowed */
5668 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
5669 return;
5670
5671 /* there is no HW VSI for FDIR */
5672 if (vsi->type == I40E_VSI_FDIR)
5673 return;
5674
5675 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
5676 return;
5677}
5678
5679/**
5680 * i40e_add_vsi - Add a VSI to the switch
5681 * @vsi: the VSI being configured
5682 *
5683 * This initializes a VSI context depending on the VSI type to be added and
5684 * passes it down to the add_vsi aq command.
5685 **/
5686static int i40e_add_vsi(struct i40e_vsi *vsi)
5687{
5688 int ret = -ENODEV;
5689 struct i40e_mac_filter *f, *ftmp;
5690 struct i40e_pf *pf = vsi->back;
5691 struct i40e_hw *hw = &pf->hw;
5692 struct i40e_vsi_context ctxt;
5693 u8 enabled_tc = 0x1; /* TC0 enabled */
5694 int f_count = 0;
5695
5696 memset(&ctxt, 0, sizeof(ctxt));
5697 switch (vsi->type) {
5698 case I40E_VSI_MAIN:
5699 /* The PF's main VSI is already setup as part of the
5700 * device initialization, so we'll not bother with
5701 * the add_vsi call, but we will retrieve the current
5702 * VSI context.
5703 */
5704 ctxt.seid = pf->main_vsi_seid;
5705 ctxt.pf_num = pf->hw.pf_id;
5706 ctxt.vf_num = 0;
5707 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
5708 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5709 if (ret) {
5710 dev_info(&pf->pdev->dev,
5711 "couldn't get pf vsi config, err %d, aq_err %d\n",
5712 ret, pf->hw.aq.asq_last_status);
5713 return -ENOENT;
5714 }
5715 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5716 vsi->info.valid_sections = 0;
5717
5718 vsi->seid = ctxt.seid;
5719 vsi->id = ctxt.vsi_number;
5720
5721 enabled_tc = i40e_pf_get_tc_map(pf);
5722
5723 /* MFP mode setup queue map and update VSI */
5724 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5725 memset(&ctxt, 0, sizeof(ctxt));
5726 ctxt.seid = pf->main_vsi_seid;
5727 ctxt.pf_num = pf->hw.pf_id;
5728 ctxt.vf_num = 0;
5729 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5730 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5731 if (ret) {
5732 dev_info(&pf->pdev->dev,
5733 "update vsi failed, aq_err=%d\n",
5734 pf->hw.aq.asq_last_status);
5735 ret = -ENOENT;
5736 goto err;
5737 }
5738 /* update the local VSI info queue map */
5739 i40e_vsi_update_queue_map(vsi, &ctxt);
5740 vsi->info.valid_sections = 0;
5741 } else {
5742 /* Default/Main VSI is only enabled for TC0
5743 * reconfigure it to enable all TCs that are
5744 * available on the port in SFP mode.
5745 */
5746 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5747 if (ret) {
5748 dev_info(&pf->pdev->dev,
5749 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
5750 enabled_tc, ret,
5751 pf->hw.aq.asq_last_status);
5752 ret = -ENOENT;
5753 }
5754 }
5755 break;
5756
5757 case I40E_VSI_FDIR:
5758 /* no queue mapping or actual HW VSI needed */
5759 vsi->info.valid_sections = 0;
5760 vsi->seid = 0;
5761 vsi->id = 0;
5762 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5763 return 0;
5764 break;
5765
5766 case I40E_VSI_VMDQ2:
5767 ctxt.pf_num = hw->pf_id;
5768 ctxt.vf_num = 0;
5769 ctxt.uplink_seid = vsi->uplink_seid;
5770 ctxt.connection_type = 0x1; /* regular data port */
5771 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5772
5773 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5774
5775 /* This VSI is connected to VEB so the switch_id
5776 * should be set to zero by default.
5777 */
5778 ctxt.info.switch_id = 0;
5779 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5780 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5781
5782 /* Setup the VSI tx/rx queue map for TC0 only for now */
5783 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5784 break;
5785
5786 case I40E_VSI_SRIOV:
5787 ctxt.pf_num = hw->pf_id;
5788 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
5789 ctxt.uplink_seid = vsi->uplink_seid;
5790 ctxt.connection_type = 0x1; /* regular data port */
5791 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5792
5793 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5794
5795 /* This VSI is connected to VEB so the switch_id
5796 * should be set to zero by default.
5797 */
5798 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5799
5800 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
5801 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5802 /* Setup the VSI tx/rx queue map for TC0 only for now */
5803 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5804 break;
5805
5806 default:
5807 return -ENODEV;
5808 }
5809
5810 if (vsi->type != I40E_VSI_MAIN) {
5811 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5812 if (ret) {
5813 dev_info(&vsi->back->pdev->dev,
5814 "add vsi failed, aq_err=%d\n",
5815 vsi->back->hw.aq.asq_last_status);
5816 ret = -ENOENT;
5817 goto err;
5818 }
5819 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5820 vsi->info.valid_sections = 0;
5821 vsi->seid = ctxt.seid;
5822 vsi->id = ctxt.vsi_number;
5823 }
5824
5825 /* If macvlan filters already exist, force them to get loaded */
5826 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
5827 f->changed = true;
5828 f_count++;
5829 }
5830 if (f_count) {
5831 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
5832 pf->flags |= I40E_FLAG_FILTER_SYNC;
5833 }
5834
5835 /* Update VSI BW information */
5836 ret = i40e_vsi_get_bw_info(vsi);
5837 if (ret) {
5838 dev_info(&pf->pdev->dev,
5839 "couldn't get vsi bw info, err %d, aq_err %d\n",
5840 ret, pf->hw.aq.asq_last_status);
5841 /* VSI is already added so not tearing that up */
5842 ret = 0;
5843 }
5844
5845err:
5846 return ret;
5847}
5848
5849/**
5850 * i40e_vsi_release - Delete a VSI and free its resources
5851 * @vsi: the VSI being removed
5852 *
5853 * Returns 0 on success or < 0 on error
5854 **/
5855int i40e_vsi_release(struct i40e_vsi *vsi)
5856{
5857 struct i40e_mac_filter *f, *ftmp;
5858 struct i40e_veb *veb = NULL;
5859 struct i40e_pf *pf;
5860 u16 uplink_seid;
5861 int i, n;
5862
5863 pf = vsi->back;
5864
5865 /* release of a VEB-owner or last VSI is not allowed */
5866 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
5867 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
5868 vsi->seid, vsi->uplink_seid);
5869 return -ENODEV;
5870 }
5871 if (vsi == pf->vsi[pf->lan_vsi] &&
5872 !test_bit(__I40E_DOWN, &pf->state)) {
5873 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
5874 return -ENODEV;
5875 }
5876
5877 uplink_seid = vsi->uplink_seid;
5878 if (vsi->type != I40E_VSI_SRIOV) {
5879 if (vsi->netdev_registered) {
5880 vsi->netdev_registered = false;
5881 if (vsi->netdev) {
5882 /* results in a call to i40e_close() */
5883 unregister_netdev(vsi->netdev);
5884 free_netdev(vsi->netdev);
5885 vsi->netdev = NULL;
5886 }
5887 } else {
5888 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
5889 i40e_down(vsi);
5890 i40e_vsi_free_irq(vsi);
5891 i40e_vsi_free_tx_resources(vsi);
5892 i40e_vsi_free_rx_resources(vsi);
5893 }
5894 i40e_vsi_disable_irq(vsi);
5895 }
5896
5897 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
5898 i40e_del_filter(vsi, f->macaddr, f->vlan,
5899 f->is_vf, f->is_netdev);
5900 i40e_sync_vsi_filters(vsi);
5901
5902 i40e_vsi_delete(vsi);
5903 i40e_vsi_free_q_vectors(vsi);
5904 i40e_vsi_clear_rings(vsi);
5905 i40e_vsi_clear(vsi);
5906
5907 /* If this was the last thing on the VEB, except for the
5908 * controlling VSI, remove the VEB, which puts the controlling
5909 * VSI onto the next level down in the switch.
5910 *
5911 * Well, okay, there's one more exception here: don't remove
5912 * the orphan VEBs yet. We'll wait for an explicit remove request
5913 * from up the network stack.
5914 */
5915 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
5916 if (pf->vsi[i] &&
5917 pf->vsi[i]->uplink_seid == uplink_seid &&
5918 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
5919 n++; /* count the VSIs */
5920 }
5921 }
5922 for (i = 0; i < I40E_MAX_VEB; i++) {
5923 if (!pf->veb[i])
5924 continue;
5925 if (pf->veb[i]->uplink_seid == uplink_seid)
5926 n++; /* count the VEBs */
5927 if (pf->veb[i]->seid == uplink_seid)
5928 veb = pf->veb[i];
5929 }
5930 if (n == 0 && veb && veb->uplink_seid != 0)
5931 i40e_veb_release(veb);
5932
5933 return 0;
5934}
5935
5936/**
5937 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
5938 * @vsi: ptr to the VSI
5939 *
5940 * This should only be called after i40e_vsi_mem_alloc() which allocates the
5941 * corresponding SW VSI structure and initializes num_queue_pairs for the
5942 * newly allocated VSI.
5943 *
5944 * Returns 0 on success or negative on failure
5945 **/
5946static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
5947{
5948 int ret = -ENOENT;
5949 struct i40e_pf *pf = vsi->back;
5950
5951 if (vsi->q_vectors) {
5952 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
5953 vsi->seid);
5954 return -EEXIST;
5955 }
5956
5957 if (vsi->base_vector) {
5958 dev_info(&pf->pdev->dev,
5959 "VSI %d has non-zero base vector %d\n",
5960 vsi->seid, vsi->base_vector);
5961 return -EEXIST;
5962 }
5963
5964 ret = i40e_alloc_q_vectors(vsi);
5965 if (ret) {
5966 dev_info(&pf->pdev->dev,
5967 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
5968 vsi->num_q_vectors, vsi->seid, ret);
5969 vsi->num_q_vectors = 0;
5970 goto vector_setup_out;
5971 }
5972
5973 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
5974 vsi->num_q_vectors, vsi->idx);
5975 if (vsi->base_vector < 0) {
5976 dev_info(&pf->pdev->dev,
5977 "failed to get q tracking for VSI %d, err=%d\n",
5978 vsi->seid, vsi->base_vector);
5979 i40e_vsi_free_q_vectors(vsi);
5980 ret = -ENOENT;
5981 goto vector_setup_out;
5982 }
5983
5984vector_setup_out:
5985 return ret;
5986}
5987
5988/**
5989 * i40e_vsi_setup - Set up a VSI by a given type
5990 * @pf: board private structure
5991 * @type: VSI type
5992 * @uplink_seid: the switch element to link to
5993 * @param1: usage depends upon VSI type. For VF types, indicates VF id
5994 *
5995 * This allocates the sw VSI structure and its queue resources, then add a VSI
5996 * to the identified VEB.
5997 *
5998 * Returns pointer to the successfully allocated and configure VSI sw struct on
5999 * success, otherwise returns NULL on failure.
6000 **/
6001struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
6002 u16 uplink_seid, u32 param1)
6003{
6004 struct i40e_vsi *vsi = NULL;
6005 struct i40e_veb *veb = NULL;
6006 int ret, i;
6007 int v_idx;
6008
6009 /* The requested uplink_seid must be either
6010 * - the PF's port seid
6011 * no VEB is needed because this is the PF
6012 * or this is a Flow Director special case VSI
6013 * - seid of an existing VEB
6014 * - seid of a VSI that owns an existing VEB
6015 * - seid of a VSI that doesn't own a VEB
6016 * a new VEB is created and the VSI becomes the owner
6017 * - seid of the PF VSI, which is what creates the first VEB
6018 * this is a special case of the previous
6019 *
6020 * Find which uplink_seid we were given and create a new VEB if needed
6021 */
6022 for (i = 0; i < I40E_MAX_VEB; i++) {
6023 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
6024 veb = pf->veb[i];
6025 break;
6026 }
6027 }
6028
6029 if (!veb && uplink_seid != pf->mac_seid) {
6030
6031 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6032 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
6033 vsi = pf->vsi[i];
6034 break;
6035 }
6036 }
6037 if (!vsi) {
6038 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
6039 uplink_seid);
6040 return NULL;
6041 }
6042
6043 if (vsi->uplink_seid == pf->mac_seid)
6044 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
6045 vsi->tc_config.enabled_tc);
6046 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
6047 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
6048 vsi->tc_config.enabled_tc);
6049
6050 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
6051 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
6052 veb = pf->veb[i];
6053 }
6054 if (!veb) {
6055 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
6056 return NULL;
6057 }
6058
6059 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6060 uplink_seid = veb->seid;
6061 }
6062
6063 /* get vsi sw struct */
6064 v_idx = i40e_vsi_mem_alloc(pf, type);
6065 if (v_idx < 0)
6066 goto err_alloc;
6067 vsi = pf->vsi[v_idx];
6068 vsi->type = type;
6069 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
6070
6071 if (type == I40E_VSI_MAIN)
6072 pf->lan_vsi = v_idx;
6073 else if (type == I40E_VSI_SRIOV)
6074 vsi->vf_id = param1;
6075 /* assign it some queues */
6076 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
6077 if (ret < 0) {
6078 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
6079 vsi->seid, ret);
6080 goto err_vsi;
6081 }
6082 vsi->base_queue = ret;
6083
6084 /* get a VSI from the hardware */
6085 vsi->uplink_seid = uplink_seid;
6086 ret = i40e_add_vsi(vsi);
6087 if (ret)
6088 goto err_vsi;
6089
6090 switch (vsi->type) {
6091 /* setup the netdev if needed */
6092 case I40E_VSI_MAIN:
6093 case I40E_VSI_VMDQ2:
6094 ret = i40e_config_netdev(vsi);
6095 if (ret)
6096 goto err_netdev;
6097 ret = register_netdev(vsi->netdev);
6098 if (ret)
6099 goto err_netdev;
6100 vsi->netdev_registered = true;
6101 netif_carrier_off(vsi->netdev);
6102 /* fall through */
6103
6104 case I40E_VSI_FDIR:
6105 /* set up vectors and rings if needed */
6106 ret = i40e_vsi_setup_vectors(vsi);
6107 if (ret)
6108 goto err_msix;
6109
6110 ret = i40e_alloc_rings(vsi);
6111 if (ret)
6112 goto err_rings;
6113
6114 /* map all of the rings to the q_vectors */
6115 i40e_vsi_map_rings_to_vectors(vsi);
6116
6117 i40e_vsi_reset_stats(vsi);
6118 break;
6119
6120 default:
6121 /* no netdev or rings for the other VSI types */
6122 break;
6123 }
6124
6125 return vsi;
6126
6127err_rings:
6128 i40e_vsi_free_q_vectors(vsi);
6129err_msix:
6130 if (vsi->netdev_registered) {
6131 vsi->netdev_registered = false;
6132 unregister_netdev(vsi->netdev);
6133 free_netdev(vsi->netdev);
6134 vsi->netdev = NULL;
6135 }
6136err_netdev:
6137 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
6138err_vsi:
6139 i40e_vsi_clear(vsi);
6140err_alloc:
6141 return NULL;
6142}
6143
6144/**
6145 * i40e_veb_get_bw_info - Query VEB BW information
6146 * @veb: the veb to query
6147 *
6148 * Query the Tx scheduler BW configuration data for given VEB
6149 **/
6150static int i40e_veb_get_bw_info(struct i40e_veb *veb)
6151{
6152 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
6153 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
6154 struct i40e_pf *pf = veb->pf;
6155 struct i40e_hw *hw = &pf->hw;
6156 u32 tc_bw_max;
6157 int ret = 0;
6158 int i;
6159
6160 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
6161 &bw_data, NULL);
6162 if (ret) {
6163 dev_info(&pf->pdev->dev,
6164 "query veb bw config failed, aq_err=%d\n",
6165 hw->aq.asq_last_status);
6166 goto out;
6167 }
6168
6169 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
6170 &ets_data, NULL);
6171 if (ret) {
6172 dev_info(&pf->pdev->dev,
6173 "query veb bw ets config failed, aq_err=%d\n",
6174 hw->aq.asq_last_status);
6175 goto out;
6176 }
6177
6178 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
6179 veb->bw_max_quanta = ets_data.tc_bw_max;
6180 veb->is_abs_credits = bw_data.absolute_credits_enable;
6181 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
6182 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
6183 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6184 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
6185 veb->bw_tc_limit_credits[i] =
6186 le16_to_cpu(bw_data.tc_bw_limits[i]);
6187 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
6188 }
6189
6190out:
6191 return ret;
6192}
6193
6194/**
6195 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
6196 * @pf: board private structure
6197 *
6198 * On error: returns error code (negative)
6199 * On success: returns vsi index in PF (positive)
6200 **/
6201static int i40e_veb_mem_alloc(struct i40e_pf *pf)
6202{
6203 int ret = -ENOENT;
6204 struct i40e_veb *veb;
6205 int i;
6206
6207 /* Need to protect the allocation of switch elements at the PF level */
6208 mutex_lock(&pf->switch_mutex);
6209
6210 /* VEB list may be fragmented if VEB creation/destruction has
6211 * been happening. We can afford to do a quick scan to look
6212 * for any free slots in the list.
6213 *
6214 * find next empty veb slot, looping back around if necessary
6215 */
6216 i = 0;
6217 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
6218 i++;
6219 if (i >= I40E_MAX_VEB) {
6220 ret = -ENOMEM;
6221 goto err_alloc_veb; /* out of VEB slots! */
6222 }
6223
6224 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
6225 if (!veb) {
6226 ret = -ENOMEM;
6227 goto err_alloc_veb;
6228 }
6229 veb->pf = pf;
6230 veb->idx = i;
6231 veb->enabled_tc = 1;
6232
6233 pf->veb[i] = veb;
6234 ret = i;
6235err_alloc_veb:
6236 mutex_unlock(&pf->switch_mutex);
6237 return ret;
6238}
6239
6240/**
6241 * i40e_switch_branch_release - Delete a branch of the switch tree
6242 * @branch: where to start deleting
6243 *
6244 * This uses recursion to find the tips of the branch to be
6245 * removed, deleting until we get back to and can delete this VEB.
6246 **/
6247static void i40e_switch_branch_release(struct i40e_veb *branch)
6248{
6249 struct i40e_pf *pf = branch->pf;
6250 u16 branch_seid = branch->seid;
6251 u16 veb_idx = branch->idx;
6252 int i;
6253
6254 /* release any VEBs on this VEB - RECURSION */
6255 for (i = 0; i < I40E_MAX_VEB; i++) {
6256 if (!pf->veb[i])
6257 continue;
6258 if (pf->veb[i]->uplink_seid == branch->seid)
6259 i40e_switch_branch_release(pf->veb[i]);
6260 }
6261
6262 /* Release the VSIs on this VEB, but not the owner VSI.
6263 *
6264 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
6265 * the VEB itself, so don't use (*branch) after this loop.
6266 */
6267 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6268 if (!pf->vsi[i])
6269 continue;
6270 if (pf->vsi[i]->uplink_seid == branch_seid &&
6271 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
6272 i40e_vsi_release(pf->vsi[i]);
6273 }
6274 }
6275
6276 /* There's one corner case where the VEB might not have been
6277 * removed, so double check it here and remove it if needed.
6278 * This case happens if the veb was created from the debugfs
6279 * commands and no VSIs were added to it.
6280 */
6281 if (pf->veb[veb_idx])
6282 i40e_veb_release(pf->veb[veb_idx]);
6283}
6284
6285/**
6286 * i40e_veb_clear - remove veb struct
6287 * @veb: the veb to remove
6288 **/
6289static void i40e_veb_clear(struct i40e_veb *veb)
6290{
6291 if (!veb)
6292 return;
6293
6294 if (veb->pf) {
6295 struct i40e_pf *pf = veb->pf;
6296
6297 mutex_lock(&pf->switch_mutex);
6298 if (pf->veb[veb->idx] == veb)
6299 pf->veb[veb->idx] = NULL;
6300 mutex_unlock(&pf->switch_mutex);
6301 }
6302
6303 kfree(veb);
6304}
6305
6306/**
6307 * i40e_veb_release - Delete a VEB and free its resources
6308 * @veb: the VEB being removed
6309 **/
6310void i40e_veb_release(struct i40e_veb *veb)
6311{
6312 struct i40e_vsi *vsi = NULL;
6313 struct i40e_pf *pf;
6314 int i, n = 0;
6315
6316 pf = veb->pf;
6317
6318 /* find the remaining VSI and check for extras */
6319 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6320 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
6321 n++;
6322 vsi = pf->vsi[i];
6323 }
6324 }
6325 if (n != 1) {
6326 dev_info(&pf->pdev->dev,
6327 "can't remove VEB %d with %d VSIs left\n",
6328 veb->seid, n);
6329 return;
6330 }
6331
6332 /* move the remaining VSI to uplink veb */
6333 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
6334 if (veb->uplink_seid) {
6335 vsi->uplink_seid = veb->uplink_seid;
6336 if (veb->uplink_seid == pf->mac_seid)
6337 vsi->veb_idx = I40E_NO_VEB;
6338 else
6339 vsi->veb_idx = veb->veb_idx;
6340 } else {
6341 /* floating VEB */
6342 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6343 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
6344 }
6345
6346 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
6347 i40e_veb_clear(veb);
6348
6349 return;
6350}
6351
6352/**
6353 * i40e_add_veb - create the VEB in the switch
6354 * @veb: the VEB to be instantiated
6355 * @vsi: the controlling VSI
6356 **/
6357static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
6358{
6359 bool is_default = (vsi->idx == vsi->back->lan_vsi);
6360 int ret;
6361
6362 /* get a VEB from the hardware */
6363 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
6364 veb->enabled_tc, is_default, &veb->seid, NULL);
6365 if (ret) {
6366 dev_info(&veb->pf->pdev->dev,
6367 "couldn't add VEB, err %d, aq_err %d\n",
6368 ret, veb->pf->hw.aq.asq_last_status);
6369 return -EPERM;
6370 }
6371
6372 /* get statistics counter */
6373 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
6374 &veb->stats_idx, NULL, NULL, NULL);
6375 if (ret) {
6376 dev_info(&veb->pf->pdev->dev,
6377 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
6378 ret, veb->pf->hw.aq.asq_last_status);
6379 return -EPERM;
6380 }
6381 ret = i40e_veb_get_bw_info(veb);
6382 if (ret) {
6383 dev_info(&veb->pf->pdev->dev,
6384 "couldn't get VEB bw info, err %d, aq_err %d\n",
6385 ret, veb->pf->hw.aq.asq_last_status);
6386 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
6387 return -ENOENT;
6388 }
6389
6390 vsi->uplink_seid = veb->seid;
6391 vsi->veb_idx = veb->idx;
6392 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6393
6394 return 0;
6395}
6396
6397/**
6398 * i40e_veb_setup - Set up a VEB
6399 * @pf: board private structure
6400 * @flags: VEB setup flags
6401 * @uplink_seid: the switch element to link to
6402 * @vsi_seid: the initial VSI seid
6403 * @enabled_tc: Enabled TC bit-map
6404 *
6405 * This allocates the sw VEB structure and links it into the switch
6406 * It is possible and legal for this to be a duplicate of an already
6407 * existing VEB. It is also possible for both uplink and vsi seids
6408 * to be zero, in order to create a floating VEB.
6409 *
6410 * Returns pointer to the successfully allocated VEB sw struct on
6411 * success, otherwise returns NULL on failure.
6412 **/
6413struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
6414 u16 uplink_seid, u16 vsi_seid,
6415 u8 enabled_tc)
6416{
6417 struct i40e_veb *veb, *uplink_veb = NULL;
6418 int vsi_idx, veb_idx;
6419 int ret;
6420
6421 /* if one seid is 0, the other must be 0 to create a floating relay */
6422 if ((uplink_seid == 0 || vsi_seid == 0) &&
6423 (uplink_seid + vsi_seid != 0)) {
6424 dev_info(&pf->pdev->dev,
6425 "one, not both seid's are 0: uplink=%d vsi=%d\n",
6426 uplink_seid, vsi_seid);
6427 return NULL;
6428 }
6429
6430 /* make sure there is such a vsi and uplink */
6431 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
6432 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
6433 break;
6434 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
6435 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
6436 vsi_seid);
6437 return NULL;
6438 }
6439
6440 if (uplink_seid && uplink_seid != pf->mac_seid) {
6441 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6442 if (pf->veb[veb_idx] &&
6443 pf->veb[veb_idx]->seid == uplink_seid) {
6444 uplink_veb = pf->veb[veb_idx];
6445 break;
6446 }
6447 }
6448 if (!uplink_veb) {
6449 dev_info(&pf->pdev->dev,
6450 "uplink seid %d not found\n", uplink_seid);
6451 return NULL;
6452 }
6453 }
6454
6455 /* get veb sw struct */
6456 veb_idx = i40e_veb_mem_alloc(pf);
6457 if (veb_idx < 0)
6458 goto err_alloc;
6459 veb = pf->veb[veb_idx];
6460 veb->flags = flags;
6461 veb->uplink_seid = uplink_seid;
6462 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
6463 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
6464
6465 /* create the VEB in the switch */
6466 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
6467 if (ret)
6468 goto err_veb;
6469
6470 return veb;
6471
6472err_veb:
6473 i40e_veb_clear(veb);
6474err_alloc:
6475 return NULL;
6476}
6477
6478/**
6479 * i40e_setup_pf_switch_element - set pf vars based on switch type
6480 * @pf: board private structure
6481 * @ele: element we are building info from
6482 * @num_reported: total number of elements
6483 * @printconfig: should we print the contents
6484 *
6485 * helper function to assist in extracting a few useful SEID values.
6486 **/
6487static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
6488 struct i40e_aqc_switch_config_element_resp *ele,
6489 u16 num_reported, bool printconfig)
6490{
6491 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
6492 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
6493 u8 element_type = ele->element_type;
6494 u16 seid = le16_to_cpu(ele->seid);
6495
6496 if (printconfig)
6497 dev_info(&pf->pdev->dev,
6498 "type=%d seid=%d uplink=%d downlink=%d\n",
6499 element_type, seid, uplink_seid, downlink_seid);
6500
6501 switch (element_type) {
6502 case I40E_SWITCH_ELEMENT_TYPE_MAC:
6503 pf->mac_seid = seid;
6504 break;
6505 case I40E_SWITCH_ELEMENT_TYPE_VEB:
6506 /* Main VEB? */
6507 if (uplink_seid != pf->mac_seid)
6508 break;
6509 if (pf->lan_veb == I40E_NO_VEB) {
6510 int v;
6511
6512 /* find existing or else empty VEB */
6513 for (v = 0; v < I40E_MAX_VEB; v++) {
6514 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
6515 pf->lan_veb = v;
6516 break;
6517 }
6518 }
6519 if (pf->lan_veb == I40E_NO_VEB) {
6520 v = i40e_veb_mem_alloc(pf);
6521 if (v < 0)
6522 break;
6523 pf->lan_veb = v;
6524 }
6525 }
6526
6527 pf->veb[pf->lan_veb]->seid = seid;
6528 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
6529 pf->veb[pf->lan_veb]->pf = pf;
6530 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
6531 break;
6532 case I40E_SWITCH_ELEMENT_TYPE_VSI:
6533 if (num_reported != 1)
6534 break;
6535 /* This is immediately after a reset so we can assume this is
6536 * the PF's VSI
6537 */
6538 pf->mac_seid = uplink_seid;
6539 pf->pf_seid = downlink_seid;
6540 pf->main_vsi_seid = seid;
6541 if (printconfig)
6542 dev_info(&pf->pdev->dev,
6543 "pf_seid=%d main_vsi_seid=%d\n",
6544 pf->pf_seid, pf->main_vsi_seid);
6545 break;
6546 case I40E_SWITCH_ELEMENT_TYPE_PF:
6547 case I40E_SWITCH_ELEMENT_TYPE_VF:
6548 case I40E_SWITCH_ELEMENT_TYPE_EMP:
6549 case I40E_SWITCH_ELEMENT_TYPE_BMC:
6550 case I40E_SWITCH_ELEMENT_TYPE_PE:
6551 case I40E_SWITCH_ELEMENT_TYPE_PA:
6552 /* ignore these for now */
6553 break;
6554 default:
6555 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
6556 element_type, seid);
6557 break;
6558 }
6559}
6560
6561/**
6562 * i40e_fetch_switch_configuration - Get switch config from firmware
6563 * @pf: board private structure
6564 * @printconfig: should we print the contents
6565 *
6566 * Get the current switch configuration from the device and
6567 * extract a few useful SEID values.
6568 **/
6569int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
6570{
6571 struct i40e_aqc_get_switch_config_resp *sw_config;
6572 u16 next_seid = 0;
6573 int ret = 0;
6574 u8 *aq_buf;
6575 int i;
6576
6577 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
6578 if (!aq_buf)
6579 return -ENOMEM;
6580
6581 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
6582 do {
6583 u16 num_reported, num_total;
6584
6585 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
6586 I40E_AQ_LARGE_BUF,
6587 &next_seid, NULL);
6588 if (ret) {
6589 dev_info(&pf->pdev->dev,
6590 "get switch config failed %d aq_err=%x\n",
6591 ret, pf->hw.aq.asq_last_status);
6592 kfree(aq_buf);
6593 return -ENOENT;
6594 }
6595
6596 num_reported = le16_to_cpu(sw_config->header.num_reported);
6597 num_total = le16_to_cpu(sw_config->header.num_total);
6598
6599 if (printconfig)
6600 dev_info(&pf->pdev->dev,
6601 "header: %d reported %d total\n",
6602 num_reported, num_total);
6603
6604 if (num_reported) {
6605 int sz = sizeof(*sw_config) * num_reported;
6606
6607 kfree(pf->sw_config);
6608 pf->sw_config = kzalloc(sz, GFP_KERNEL);
6609 if (pf->sw_config)
6610 memcpy(pf->sw_config, sw_config, sz);
6611 }
6612
6613 for (i = 0; i < num_reported; i++) {
6614 struct i40e_aqc_switch_config_element_resp *ele =
6615 &sw_config->element[i];
6616
6617 i40e_setup_pf_switch_element(pf, ele, num_reported,
6618 printconfig);
6619 }
6620 } while (next_seid != 0);
6621
6622 kfree(aq_buf);
6623 return ret;
6624}
6625
6626/**
6627 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
6628 * @pf: board private structure
6629 *
6630 * Returns 0 on success, negative value on failure
6631 **/
6632static int i40e_setup_pf_switch(struct i40e_pf *pf)
6633{
6634 int ret;
6635
6636 /* find out what's out there already */
6637 ret = i40e_fetch_switch_configuration(pf, false);
6638 if (ret) {
6639 dev_info(&pf->pdev->dev,
6640 "couldn't fetch switch config, err %d, aq_err %d\n",
6641 ret, pf->hw.aq.asq_last_status);
6642 return ret;
6643 }
6644 i40e_pf_reset_stats(pf);
6645
6646 /* fdir VSI must happen first to be sure it gets queue 0, but only
6647 * if there is enough room for the fdir VSI
6648 */
6649 if (pf->num_lan_qps > 1)
6650 i40e_fdir_setup(pf);
6651
6652 /* first time setup */
6653 if (pf->lan_vsi == I40E_NO_VSI) {
6654 struct i40e_vsi *vsi = NULL;
6655 u16 uplink_seid;
6656
6657 /* Set up the PF VSI associated with the PF's main VSI
6658 * that is already in the HW switch
6659 */
6660 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6661 uplink_seid = pf->veb[pf->lan_veb]->seid;
6662 else
6663 uplink_seid = pf->mac_seid;
6664
6665 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
6666 if (!vsi) {
6667 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
6668 i40e_fdir_teardown(pf);
6669 return -EAGAIN;
6670 }
6671 /* accommodate kcompat by copying the main VSI queue count
6672 * into the pf, since this newer code pushes the pf queue
6673 * info down a level into a VSI
6674 */
6675 pf->num_rx_queues = vsi->alloc_queue_pairs;
6676 pf->num_tx_queues = vsi->alloc_queue_pairs;
6677 } else {
6678 /* force a reset of TC and queue layout configurations */
6679 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
6680 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
6681 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
6682 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
6683 }
6684 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
6685
6686 /* Setup static PF queue filter control settings */
6687 ret = i40e_setup_pf_filter_control(pf);
6688 if (ret) {
6689 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
6690 ret);
6691 /* Failure here should not stop continuing other steps */
6692 }
6693
6694 /* enable RSS in the HW, even for only one queue, as the stack can use
6695 * the hash
6696 */
6697 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
6698 i40e_config_rss(pf);
6699
6700 /* fill in link information and enable LSE reporting */
6701 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
6702 i40e_link_event(pf);
6703
6704 /* Initialize user-specifics link properties */
6705 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
6706 I40E_AQ_AN_COMPLETED) ? true : false);
6707 pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
6708 if (pf->hw.phy.link_info.an_info &
6709 (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
6710 pf->hw.fc.current_mode = I40E_FC_FULL;
6711 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
6712 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
6713 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
6714 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
6715 else
6716 pf->hw.fc.current_mode = I40E_FC_DEFAULT;
6717
6718 return ret;
6719}
6720
6721/**
6722 * i40e_set_rss_size - helper to set rss_size
6723 * @pf: board private structure
6724 * @queues_left: how many queues
6725 */
6726static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
6727{
6728 int num_tc0;
6729
6730 num_tc0 = min_t(int, queues_left, pf->rss_size_max);
6731 num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
6732 num_tc0 = rounddown_pow_of_two(num_tc0);
6733
6734 return num_tc0;
6735}
6736
6737/**
6738 * i40e_determine_queue_usage - Work out queue distribution
6739 * @pf: board private structure
6740 **/
6741static void i40e_determine_queue_usage(struct i40e_pf *pf)
6742{
6743 int accum_tc_size;
6744 int queues_left;
6745
6746 pf->num_lan_qps = 0;
6747 pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
6748 accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
6749
6750 /* Find the max queues to be put into basic use. We'll always be
6751 * using TC0, whether or not DCB is running, and TC0 will get the
6752 * big RSS set.
6753 */
6754 queues_left = pf->hw.func_caps.num_tx_qp;
6755
6756 if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
6757 (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
6758 !(pf->flags & (I40E_FLAG_RSS_ENABLED |
6759 I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
6760 (queues_left == 1)) {
6761
6762 /* one qp for PF, no queues for anything else */
6763 queues_left = 0;
6764 pf->rss_size = pf->num_lan_qps = 1;
6765
6766 /* make sure all the fancies are disabled */
6767 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
6768 I40E_FLAG_MQ_ENABLED |
6769 I40E_FLAG_FDIR_ENABLED |
6770 I40E_FLAG_FDIR_ATR_ENABLED |
6771 I40E_FLAG_DCB_ENABLED |
6772 I40E_FLAG_SRIOV_ENABLED |
6773 I40E_FLAG_VMDQ_ENABLED);
6774
6775 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6776 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6777 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6778
6779 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6780
6781 queues_left -= pf->rss_size;
6782 pf->num_lan_qps = pf->rss_size;
6783
6784 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6785 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6786 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
6787
6788 /* save num_tc_qps queues for TCs 1 thru 7 and the rest
6789 * are set up for RSS in TC0
6790 */
6791 queues_left -= accum_tc_size;
6792
6793 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6794
6795 queues_left -= pf->rss_size;
6796 if (queues_left < 0) {
6797 dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
6798 return;
6799 }
6800
6801 pf->num_lan_qps = pf->rss_size + accum_tc_size;
6802
6803 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6804 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6805 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6806
6807 queues_left -= 1; /* save 1 queue for FD */
6808
6809 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6810
6811 queues_left -= pf->rss_size;
6812 if (queues_left < 0) {
6813 dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
6814 return;
6815 }
6816
6817 pf->num_lan_qps = pf->rss_size;
6818
6819 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6820 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6821 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
6822
6823 /* save 1 queue for TCs 1 thru 7,
6824 * 1 queue for flow director,
6825 * and the rest are set up for RSS in TC0
6826 */
6827 queues_left -= 1;
6828 queues_left -= accum_tc_size;
6829
6830 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6831 queues_left -= pf->rss_size;
6832 if (queues_left < 0) {
6833 dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
6834 return;
6835 }
6836
6837 pf->num_lan_qps = pf->rss_size + accum_tc_size;
6838
6839 } else {
6840 dev_info(&pf->pdev->dev,
6841 "Invalid configuration, flags=0x%08llx\n", pf->flags);
6842 return;
6843 }
6844
6845 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
6846 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
6847 pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
6848 pf->num_vf_qps));
6849 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
6850 }
6851
6852 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
6853 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
6854 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
6855 (queues_left / pf->num_vmdq_qps));
6856 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
6857 }
6858
6859 return;
6860}
6861
6862/**
6863 * i40e_setup_pf_filter_control - Setup PF static filter control
6864 * @pf: PF to be setup
6865 *
6866 * i40e_setup_pf_filter_control sets up a pf's initial filter control
6867 * settings. If PE/FCoE are enabled then it will also set the per PF
6868 * based filter sizes required for them. It also enables Flow director,
6869 * ethertype and macvlan type filter settings for the pf.
6870 *
6871 * Returns 0 on success, negative on failure
6872 **/
6873static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
6874{
6875 struct i40e_filter_control_settings *settings = &pf->filter_settings;
6876
6877 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
6878
6879 /* Flow Director is enabled */
6880 if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
6881 settings->enable_fdir = true;
6882
6883 /* Ethtype and MACVLAN filters enabled for PF */
6884 settings->enable_ethtype = true;
6885 settings->enable_macvlan = true;
6886
6887 if (i40e_set_filter_control(&pf->hw, settings))
6888 return -ENOENT;
6889
6890 return 0;
6891}
6892
6893/**
6894 * i40e_probe - Device initialization routine
6895 * @pdev: PCI device information struct
6896 * @ent: entry in i40e_pci_tbl
6897 *
6898 * i40e_probe initializes a pf identified by a pci_dev structure.
6899 * The OS initialization, configuring of the pf private structure,
6900 * and a hardware reset occur.
6901 *
6902 * Returns 0 on success, negative on failure
6903 **/
6904static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6905{
6906 struct i40e_driver_version dv;
6907 struct i40e_pf *pf;
6908 struct i40e_hw *hw;
6909 int err = 0;
6910 u32 len;
6911
6912 err = pci_enable_device_mem(pdev);
6913 if (err)
6914 return err;
6915
6916 /* set up for high or low dma */
6917 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
6918 /* coherent mask for the same size will always succeed if
6919 * dma_set_mask does
6920 */
6921 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
6922 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
6923 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
6924 } else {
6925 dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
6926 err = -EIO;
6927 goto err_dma;
6928 }
6929
6930 /* set up pci connections */
6931 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
6932 IORESOURCE_MEM), i40e_driver_name);
6933 if (err) {
6934 dev_info(&pdev->dev,
6935 "pci_request_selected_regions failed %d\n", err);
6936 goto err_pci_reg;
6937 }
6938
6939 pci_enable_pcie_error_reporting(pdev);
6940 pci_set_master(pdev);
6941
6942 /* Now that we have a PCI connection, we need to do the
6943 * low level device setup. This is primarily setting up
6944 * the Admin Queue structures and then querying for the
6945 * device's current profile information.
6946 */
6947 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
6948 if (!pf) {
6949 err = -ENOMEM;
6950 goto err_pf_alloc;
6951 }
6952 pf->next_vsi = 0;
6953 pf->pdev = pdev;
6954 set_bit(__I40E_DOWN, &pf->state);
6955
6956 hw = &pf->hw;
6957 hw->back = pf;
6958 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
6959 pci_resource_len(pdev, 0));
6960 if (!hw->hw_addr) {
6961 err = -EIO;
6962 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
6963 (unsigned int)pci_resource_start(pdev, 0),
6964 (unsigned int)pci_resource_len(pdev, 0), err);
6965 goto err_ioremap;
6966 }
6967 hw->vendor_id = pdev->vendor;
6968 hw->device_id = pdev->device;
6969 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
6970 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6971 hw->subsystem_device_id = pdev->subsystem_device;
6972 hw->bus.device = PCI_SLOT(pdev->devfn);
6973 hw->bus.func = PCI_FUNC(pdev->devfn);
6974
6975 /* Reset here to make sure all is clean and to define PF 'n' */
6976 err = i40e_pf_reset(hw);
6977 if (err) {
6978 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
6979 goto err_pf_reset;
6980 }
6981 pf->pfr_count++;
6982
6983 hw->aq.num_arq_entries = I40E_AQ_LEN;
6984 hw->aq.num_asq_entries = I40E_AQ_LEN;
6985 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
6986 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
6987 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
6988 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
6989 "%s-pf%d:misc",
6990 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
6991
6992 err = i40e_init_shared_code(hw);
6993 if (err) {
6994 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
6995 goto err_pf_reset;
6996 }
6997
6998 err = i40e_init_adminq(hw);
6999 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
7000 if (err) {
7001 dev_info(&pdev->dev,
7002 "init_adminq failed: %d expecting API %02x.%02x\n",
7003 err,
7004 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
7005 goto err_pf_reset;
7006 }
7007
7008 err = i40e_get_capabilities(pf);
7009 if (err)
7010 goto err_adminq_setup;
7011
7012 err = i40e_sw_init(pf);
7013 if (err) {
7014 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
7015 goto err_sw_init;
7016 }
7017
7018 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
7019 hw->func_caps.num_rx_qp,
7020 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
7021 if (err) {
7022 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
7023 goto err_init_lan_hmc;
7024 }
7025
7026 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
7027 if (err) {
7028 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
7029 err = -ENOENT;
7030 goto err_configure_lan_hmc;
7031 }
7032
7033 i40e_get_mac_addr(hw, hw->mac.addr);
7034 if (i40e_validate_mac_addr(hw->mac.addr)) {
7035 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
7036 err = -EIO;
7037 goto err_mac_addr;
7038 }
7039 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
7040 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
7041
7042 pci_set_drvdata(pdev, pf);
7043 pci_save_state(pdev);
7044
7045 /* set up periodic task facility */
7046 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
7047 pf->service_timer_period = HZ;
7048
7049 INIT_WORK(&pf->service_task, i40e_service_task);
7050 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
7051 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
7052 pf->link_check_timeout = jiffies;
7053
7054 /* set up the main switch operations */
7055 i40e_determine_queue_usage(pf);
7056 i40e_init_interrupt_scheme(pf);
7057
7058 /* Set up the *vsi struct based on the number of VSIs in the HW,
7059 * and set up our local tracking of the MAIN PF vsi.
7060 */
7061 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
7062 pf->vsi = kzalloc(len, GFP_KERNEL);
7063 if (!pf->vsi)
7064 goto err_switch_setup;
7065
7066 err = i40e_setup_pf_switch(pf);
7067 if (err) {
7068 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
7069 goto err_vsis;
7070 }
7071
7072 /* The main driver is (mostly) up and happy. We need to set this state
7073 * before setting up the misc vector or we get a race and the vector
7074 * ends up disabled forever.
7075 */
7076 clear_bit(__I40E_DOWN, &pf->state);
7077
7078 /* In case of MSIX we are going to setup the misc vector right here
7079 * to handle admin queue events etc. In case of legacy and MSI
7080 * the misc functionality and queue processing is combined in
7081 * the same vector and that gets setup at open.
7082 */
7083 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7084 err = i40e_setup_misc_vector(pf);
7085 if (err) {
7086 dev_info(&pdev->dev,
7087 "setup of misc vector failed: %d\n", err);
7088 goto err_vsis;
7089 }
7090 }
7091
7092 /* prep for VF support */
7093 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
7094 (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
7095 u32 val;
7096
7097 /* disable link interrupts for VFs */
7098 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
7099 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
7100 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
7101 i40e_flush(hw);
7102 }
7103
7104 i40e_dbg_pf_init(pf);
7105
7106 /* tell the firmware that we're starting */
7107 dv.major_version = DRV_VERSION_MAJOR;
7108 dv.minor_version = DRV_VERSION_MINOR;
7109 dv.build_version = DRV_VERSION_BUILD;
7110 dv.subbuild_version = 0;
7111 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
7112
7113 /* since everything's happy, start the service_task timer */
7114 mod_timer(&pf->service_timer,
7115 round_jiffies(jiffies + pf->service_timer_period));
7116
7117 return 0;
7118
7119 /* Unwind what we've done if something failed in the setup */
7120err_vsis:
7121 set_bit(__I40E_DOWN, &pf->state);
7122err_switch_setup:
7123 i40e_clear_interrupt_scheme(pf);
7124 kfree(pf->vsi);
7125 del_timer_sync(&pf->service_timer);
7126err_mac_addr:
7127err_configure_lan_hmc:
7128 (void)i40e_shutdown_lan_hmc(hw);
7129err_init_lan_hmc:
7130 kfree(pf->qp_pile);
7131 kfree(pf->irq_pile);
7132err_sw_init:
7133err_adminq_setup:
7134 (void)i40e_shutdown_adminq(hw);
7135err_pf_reset:
7136 iounmap(hw->hw_addr);
7137err_ioremap:
7138 kfree(pf);
7139err_pf_alloc:
7140 pci_disable_pcie_error_reporting(pdev);
7141 pci_release_selected_regions(pdev,
7142 pci_select_bars(pdev, IORESOURCE_MEM));
7143err_pci_reg:
7144err_dma:
7145 pci_disable_device(pdev);
7146 return err;
7147}
7148
7149/**
7150 * i40e_remove - Device removal routine
7151 * @pdev: PCI device information struct
7152 *
7153 * i40e_remove is called by the PCI subsystem to alert the driver
7154 * that is should release a PCI device. This could be caused by a
7155 * Hot-Plug event, or because the driver is going to be removed from
7156 * memory.
7157 **/
7158static void i40e_remove(struct pci_dev *pdev)
7159{
7160 struct i40e_pf *pf = pci_get_drvdata(pdev);
7161 i40e_status ret_code;
7162 u32 reg;
7163 int i;
7164
7165 i40e_dbg_pf_exit(pf);
7166
7167 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
7168 i40e_free_vfs(pf);
7169 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
7170 }
7171
7172 /* no more scheduling of any task */
7173 set_bit(__I40E_DOWN, &pf->state);
7174 del_timer_sync(&pf->service_timer);
7175 cancel_work_sync(&pf->service_task);
7176
7177 i40e_fdir_teardown(pf);
7178
7179 /* If there is a switch structure or any orphans, remove them.
7180 * This will leave only the PF's VSI remaining.
7181 */
7182 for (i = 0; i < I40E_MAX_VEB; i++) {
7183 if (!pf->veb[i])
7184 continue;
7185
7186 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
7187 pf->veb[i]->uplink_seid == 0)
7188 i40e_switch_branch_release(pf->veb[i]);
7189 }
7190
7191 /* Now we can shutdown the PF's VSI, just before we kill
7192 * adminq and hmc.
7193 */
7194 if (pf->vsi[pf->lan_vsi])
7195 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
7196
7197 i40e_stop_misc_vector(pf);
7198 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7199 synchronize_irq(pf->msix_entries[0].vector);
7200 free_irq(pf->msix_entries[0].vector, pf);
7201 }
7202
7203 /* shutdown and destroy the HMC */
7204 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
7205 if (ret_code)
7206 dev_warn(&pdev->dev,
7207 "Failed to destroy the HMC resources: %d\n", ret_code);
7208
7209 /* shutdown the adminq */
7210 i40e_aq_queue_shutdown(&pf->hw, true);
7211 ret_code = i40e_shutdown_adminq(&pf->hw);
7212 if (ret_code)
7213 dev_warn(&pdev->dev,
7214 "Failed to destroy the Admin Queue resources: %d\n",
7215 ret_code);
7216
7217 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
7218 i40e_clear_interrupt_scheme(pf);
7219 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7220 if (pf->vsi[i]) {
7221 i40e_vsi_clear_rings(pf->vsi[i]);
7222 i40e_vsi_clear(pf->vsi[i]);
7223 pf->vsi[i] = NULL;
7224 }
7225 }
7226
7227 for (i = 0; i < I40E_MAX_VEB; i++) {
7228 kfree(pf->veb[i]);
7229 pf->veb[i] = NULL;
7230 }
7231
7232 kfree(pf->qp_pile);
7233 kfree(pf->irq_pile);
7234 kfree(pf->sw_config);
7235 kfree(pf->vsi);
7236
7237 /* force a PF reset to clean anything leftover */
7238 reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
7239 wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
7240 i40e_flush(&pf->hw);
7241
7242 iounmap(pf->hw.hw_addr);
7243 kfree(pf);
7244 pci_release_selected_regions(pdev,
7245 pci_select_bars(pdev, IORESOURCE_MEM));
7246
7247 pci_disable_pcie_error_reporting(pdev);
7248 pci_disable_device(pdev);
7249}
7250
7251/**
7252 * i40e_pci_error_detected - warning that something funky happened in PCI land
7253 * @pdev: PCI device information struct
7254 *
7255 * Called to warn that something happened and the error handling steps
7256 * are in progress. Allows the driver to quiesce things, be ready for
7257 * remediation.
7258 **/
7259static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
7260 enum pci_channel_state error)
7261{
7262 struct i40e_pf *pf = pci_get_drvdata(pdev);
7263
7264 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
7265
7266 /* shutdown all operations */
7267 i40e_pf_quiesce_all_vsi(pf);
7268
7269 /* Request a slot reset */
7270 return PCI_ERS_RESULT_NEED_RESET;
7271}
7272
7273/**
7274 * i40e_pci_error_slot_reset - a PCI slot reset just happened
7275 * @pdev: PCI device information struct
7276 *
7277 * Called to find if the driver can work with the device now that
7278 * the pci slot has been reset. If a basic connection seems good
7279 * (registers are readable and have sane content) then return a
7280 * happy little PCI_ERS_RESULT_xxx.
7281 **/
7282static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
7283{
7284 struct i40e_pf *pf = pci_get_drvdata(pdev);
7285 pci_ers_result_t result;
7286 int err;
7287 u32 reg;
7288
7289 dev_info(&pdev->dev, "%s\n", __func__);
7290 if (pci_enable_device_mem(pdev)) {
7291 dev_info(&pdev->dev,
7292 "Cannot re-enable PCI device after reset.\n");
7293 result = PCI_ERS_RESULT_DISCONNECT;
7294 } else {
7295 pci_set_master(pdev);
7296 pci_restore_state(pdev);
7297 pci_save_state(pdev);
7298 pci_wake_from_d3(pdev, false);
7299
7300 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7301 if (reg == 0)
7302 result = PCI_ERS_RESULT_RECOVERED;
7303 else
7304 result = PCI_ERS_RESULT_DISCONNECT;
7305 }
7306
7307 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7308 if (err) {
7309 dev_info(&pdev->dev,
7310 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7311 err);
7312 /* non-fatal, continue */
7313 }
7314
7315 return result;
7316}
7317
7318/**
7319 * i40e_pci_error_resume - restart operations after PCI error recovery
7320 * @pdev: PCI device information struct
7321 *
7322 * Called to allow the driver to bring things back up after PCI error
7323 * and/or reset recovery has finished.
7324 **/
7325static void i40e_pci_error_resume(struct pci_dev *pdev)
7326{
7327 struct i40e_pf *pf = pci_get_drvdata(pdev);
7328
7329 dev_info(&pdev->dev, "%s\n", __func__);
7330 i40e_handle_reset_warning(pf);
7331}
7332
7333static const struct pci_error_handlers i40e_err_handler = {
7334 .error_detected = i40e_pci_error_detected,
7335 .slot_reset = i40e_pci_error_slot_reset,
7336 .resume = i40e_pci_error_resume,
7337};
7338
7339static struct pci_driver i40e_driver = {
7340 .name = i40e_driver_name,
7341 .id_table = i40e_pci_tbl,
7342 .probe = i40e_probe,
7343 .remove = i40e_remove,
7344 .err_handler = &i40e_err_handler,
7345 .sriov_configure = i40e_pci_sriov_configure,
7346};
7347
7348/**
7349 * i40e_init_module - Driver registration routine
7350 *
7351 * i40e_init_module is the first routine called when the driver is
7352 * loaded. All it does is register with the PCI subsystem.
7353 **/
7354static int __init i40e_init_module(void)
7355{
7356 pr_info("%s: %s - version %s\n", i40e_driver_name,
7357 i40e_driver_string, i40e_driver_version_str);
7358 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
7359 i40e_dbg_init();
7360 return pci_register_driver(&i40e_driver);
7361}
7362module_init(i40e_init_module);
7363
7364/**
7365 * i40e_exit_module - Driver exit cleanup routine
7366 *
7367 * i40e_exit_module is called just before the driver is removed
7368 * from memory.
7369 **/
7370static void __exit i40e_exit_module(void)
7371{
7372 pci_unregister_driver(&i40e_driver);
7373 i40e_dbg_exit();
7374}
7375module_exit(i40e_exit_module);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
new file mode 100644
index 000000000000..97e1bb30ef8a
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -0,0 +1,391 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e_prototype.h"
29
30/**
31 * i40e_init_nvm_ops - Initialize NVM function pointers.
32 * @hw: pointer to the HW structure.
33 *
34 * Setups the function pointers and the NVM info structure. Should be called
35 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
36 * Please notice that the NVM term is used here (& in all methods covered
37 * in this file) as an equivalent of the FLASH part mapped into the SR.
38 * We are accessing FLASH always thru the Shadow RAM.
39 **/
40i40e_status i40e_init_nvm(struct i40e_hw *hw)
41{
42 struct i40e_nvm_info *nvm = &hw->nvm;
43 i40e_status ret_code = 0;
44 u32 fla, gens;
45 u8 sr_size;
46
47 /* The SR size is stored regardless of the nvm programming mode
48 * as the blank mode may be used in the factory line.
49 */
50 gens = rd32(hw, I40E_GLNVM_GENS);
51 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
52 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
53 /* Switching to words (sr_size contains power of 2KB). */
54 nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
55
56 /* Check if we are in the normal or blank NVM programming mode. */
57 fla = rd32(hw, I40E_GLNVM_FLA);
58 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */
59 /* Max NVM timeout. */
60 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
61 nvm->blank_nvm_mode = false;
62 } else { /* Blank programming mode. */
63 nvm->blank_nvm_mode = true;
64 ret_code = I40E_ERR_NVM_BLANK_MODE;
65 hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
66 }
67
68 return ret_code;
69}
70
71/**
72 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership.
73 * @hw: pointer to the HW structure.
74 * @access: NVM access type (read or write).
75 *
76 * This function will request NVM ownership for reading
77 * via the proper Admin Command.
78 **/
79i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
80 enum i40e_aq_resource_access_type access)
81{
82 i40e_status ret_code = 0;
83 u64 gtime, timeout;
84 u64 time = 0;
85
86 if (hw->nvm.blank_nvm_mode)
87 goto i40e_i40e_acquire_nvm_exit;
88
89 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
90 0, &time, NULL);
91 /* Reading the Global Device Timer. */
92 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
93
94 /* Store the timeout. */
95 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
96
97 if (ret_code) {
98 /* Set the polling timeout. */
99 if (time > I40E_MAX_NVM_TIMEOUT)
100 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
101 + gtime;
102 else
103 timeout = hw->nvm.hw_semaphore_timeout;
104 /* Poll until the current NVM owner timeouts. */
105 while (gtime < timeout) {
106 usleep_range(10000, 20000);
107 ret_code = i40e_aq_request_resource(hw,
108 I40E_NVM_RESOURCE_ID,
109 access, 0, &time,
110 NULL);
111 if (!ret_code) {
112 hw->nvm.hw_semaphore_timeout =
113 I40E_MS_TO_GTIME(time) + gtime;
114 break;
115 }
116 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
117 }
118 if (ret_code) {
119 hw->nvm.hw_semaphore_timeout = 0;
120 hw->nvm.hw_semaphore_wait =
121 I40E_MS_TO_GTIME(time) + gtime;
122 hw_dbg(hw, "NVM acquire timed out, wait %llu ms before trying again.\n",
123 time);
124 }
125 }
126
127i40e_i40e_acquire_nvm_exit:
128 return ret_code;
129}
130
131/**
132 * i40e_release_nvm - Generic request for releasing the NVM ownership.
133 * @hw: pointer to the HW structure.
134 *
135 * This function will release NVM resource via the proper Admin Command.
136 **/
137void i40e_release_nvm(struct i40e_hw *hw)
138{
139 if (!hw->nvm.blank_nvm_mode)
140 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
141}
142
143/**
144 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit.
145 * @hw: pointer to the HW structure.
146 *
147 * Polls the SRCTL Shadow RAM register done bit.
148 **/
149static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
150{
151 i40e_status ret_code = I40E_ERR_TIMEOUT;
152 u32 srctl, wait_cnt;
153
154 /* Poll the I40E_GLNVM_SRCTL until the done bit is set. */
155 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
156 srctl = rd32(hw, I40E_GLNVM_SRCTL);
157 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
158 ret_code = 0;
159 break;
160 }
161 udelay(5);
162 }
163 if (ret_code == I40E_ERR_TIMEOUT)
164 hw_dbg(hw, "Done bit in GLNVM_SRCTL not set");
165 return ret_code;
166}
167
168/**
169 * i40e_read_nvm_srctl - Reads Shadow RAM.
170 * @hw: pointer to the HW structure.
171 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
172 * @data: word read from the Shadow RAM.
173 *
174 * Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
175 **/
176static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset,
177 u16 *data)
178{
179 i40e_status ret_code = I40E_ERR_TIMEOUT;
180 u32 sr_reg;
181
182 if (offset >= hw->nvm.sr_size) {
183 hw_dbg(hw, "NVM read error: Offset beyond Shadow RAM limit.\n");
184 ret_code = I40E_ERR_PARAM;
185 goto read_nvm_exit;
186 }
187
188 /* Poll the done bit first. */
189 ret_code = i40e_poll_sr_srctl_done_bit(hw);
190 if (!ret_code) {
191 /* Write the address and start reading. */
192 sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
193 (1 << I40E_GLNVM_SRCTL_START_SHIFT);
194 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
195
196 /* Poll I40E_GLNVM_SRCTL until the done bit is set. */
197 ret_code = i40e_poll_sr_srctl_done_bit(hw);
198 if (!ret_code) {
199 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
200 *data = (u16)((sr_reg &
201 I40E_GLNVM_SRDATA_RDDATA_MASK)
202 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
203 }
204 }
205 if (ret_code)
206 hw_dbg(hw, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
207 offset);
208
209read_nvm_exit:
210 return ret_code;
211}
212
213/**
214 * i40e_read_nvm_word - Reads Shadow RAM word.
215 * @hw: pointer to the HW structure.
216 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
217 * @data: word read from the Shadow RAM.
218 *
219 * Reads 16 bit word from the Shadow RAM. Each read is preceded
220 * with the NVM ownership taking and followed by the release.
221 **/
222i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
223 u16 *data)
224{
225 i40e_status ret_code = 0;
226
227 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
228 if (!ret_code) {
229 ret_code = i40e_read_nvm_srctl(hw, offset, data);
230 i40e_release_nvm(hw);
231 }
232
233 return ret_code;
234}
235
236/**
237 * i40e_read_nvm_buffer - Reads Shadow RAM buffer.
238 * @hw: pointer to the HW structure.
239 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
240 * @words: number of words to read (in) &
241 * number of words read before the NVM ownership timeout (out).
242 * @data: words read from the Shadow RAM.
243 *
244 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
245 * method. The buffer read is preceded by the NVM ownership take
246 * and followed by the release.
247 **/
248i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
249 u16 *words, u16 *data)
250{
251 i40e_status ret_code = 0;
252 u16 index, word;
253 u32 time;
254
255 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
256 if (!ret_code) {
257 /* Loop thru the selected region. */
258 for (word = 0; word < *words; word++) {
259 index = offset + word;
260 ret_code = i40e_read_nvm_srctl(hw, index, &data[word]);
261 if (ret_code)
262 break;
263 /* Check if we didn't exceeded the semaphore timeout. */
264 time = rd32(hw, I40E_GLVFGEN_TIMER);
265 if (time >= hw->nvm.hw_semaphore_timeout) {
266 ret_code = I40E_ERR_TIMEOUT;
267 hw_dbg(hw, "NVM read error: timeout.\n");
268 break;
269 }
270 }
271 /* Update the number of words read from the Shadow RAM. */
272 *words = word;
273 /* Release the NVM ownership. */
274 i40e_release_nvm(hw);
275 }
276
277 return ret_code;
278}
279
280/**
281 * i40e_calc_nvm_checksum - Calculates and returns the checksum
282 * @hw: pointer to hardware structure
283 *
284 * This function calculate SW Checksum that covers the whole 64kB shadow RAM
285 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
286 * is customer specific and unknown. Therefore, this function skips all maximum
287 * possible size of VPD (1kB).
288 **/
289static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
290 u16 *checksum)
291{
292 i40e_status ret_code = 0;
293 u16 pcie_alt_module = 0;
294 u16 checksum_local = 0;
295 u16 vpd_module = 0;
296 u16 word = 0;
297 u32 i = 0;
298
299 /* read pointer to VPD area */
300 ret_code = i40e_read_nvm_srctl(hw, I40E_SR_VPD_PTR, &vpd_module);
301 if (ret_code) {
302 ret_code = I40E_ERR_NVM_CHECKSUM;
303 goto i40e_calc_nvm_checksum_exit;
304 }
305
306 /* read pointer to PCIe Alt Auto-load module */
307 ret_code = i40e_read_nvm_srctl(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
308 &pcie_alt_module);
309 if (ret_code) {
310 ret_code = I40E_ERR_NVM_CHECKSUM;
311 goto i40e_calc_nvm_checksum_exit;
312 }
313
314 /* Calculate SW checksum that covers the whole 64kB shadow RAM
315 * except the VPD and PCIe ALT Auto-load modules
316 */
317 for (i = 0; i < hw->nvm.sr_size; i++) {
318 /* Skip Checksum word */
319 if (i == I40E_SR_SW_CHECKSUM_WORD)
320 i++;
321 /* Skip VPD module (convert byte size to word count) */
322 if (i == (u32)vpd_module) {
323 i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
324 if (i >= hw->nvm.sr_size)
325 break;
326 }
327 /* Skip PCIe ALT module (convert byte size to word count) */
328 if (i == (u32)pcie_alt_module) {
329 i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
330 if (i >= hw->nvm.sr_size)
331 break;
332 }
333
334 ret_code = i40e_read_nvm_srctl(hw, (u16)i, &word);
335 if (ret_code) {
336 ret_code = I40E_ERR_NVM_CHECKSUM;
337 goto i40e_calc_nvm_checksum_exit;
338 }
339 checksum_local += word;
340 }
341
342 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
343
344i40e_calc_nvm_checksum_exit:
345 return ret_code;
346}
347
348/**
349 * i40e_validate_nvm_checksum - Validate EEPROM checksum
350 * @hw: pointer to hardware structure
351 * @checksum: calculated checksum
352 *
353 * Performs checksum calculation and validates the NVM SW checksum. If the
354 * caller does not need checksum, the value can be NULL.
355 **/
356i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
357 u16 *checksum)
358{
359 i40e_status ret_code = 0;
360 u16 checksum_sr = 0;
361 u16 checksum_local;
362
363 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
364 if (ret_code)
365 goto i40e_validate_nvm_checksum_exit;
366
367 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
368 if (ret_code)
369 goto i40e_validate_nvm_checksum_free;
370
371 /* Do not use i40e_read_nvm_word() because we do not want to take
372 * the synchronization semaphores twice here.
373 */
374 i40e_read_nvm_srctl(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
375
376 /* Verify read checksum from EEPROM is the same as
377 * calculated checksum
378 */
379 if (checksum_local != checksum_sr)
380 ret_code = I40E_ERR_NVM_CHECKSUM;
381
382 /* If the user cares, return the calculated checksum */
383 if (checksum)
384 *checksum = checksum_local;
385
386i40e_validate_nvm_checksum_free:
387 i40e_release_nvm(hw);
388
389i40e_validate_nvm_checksum_exit:
390 return ret_code;
391}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
new file mode 100644
index 000000000000..702c81ba86e3
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -0,0 +1,82 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_OSDEP_H_
29#define _I40E_OSDEP_H_
30
31#include <linux/types.h>
32#include <linux/if_ether.h>
33#include <linux/if_vlan.h>
34#include <linux/tcp.h>
35#include <linux/pci.h>
36#include <linux/highuid.h>
37
38/* get readq/writeq support for 32 bit kernels, use the low-first version */
39#include <asm-generic/io-64-nonatomic-lo-hi.h>
40
41/* File to be the magic between shared code and
42 * actual OS primitives
43 */
44
45#define hw_dbg(hw, S, A...) do {} while (0)
46
47#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
48#define rd32(a, reg) readl((a)->hw_addr + (reg))
49
50#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
51#define rd64(a, reg) readq((a)->hw_addr + (reg))
52#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
53
54/* memory allocation tracking */
55struct i40e_dma_mem {
56 void *va;
57 dma_addr_t pa;
58 u32 size;
59} __packed;
60
61#define i40e_allocate_dma_mem(h, m, unused, s, a) \
62 i40e_allocate_dma_mem_d(h, m, s, a)
63#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
64
65struct i40e_virt_mem {
66 void *va;
67 u32 size;
68} __packed;
69
70#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
71#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
72
73#define i40e_debug(h, m, s, ...) \
74do { \
75 if (((m) & (h)->debug_mask)) \
76 pr_info("i40e %02x.%x " s, \
77 (h)->bus.device, (h)->bus.func, \
78 ##__VA_ARGS__); \
79} while (0)
80
81typedef enum i40e_status_code i40e_status;
82#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
new file mode 100644
index 000000000000..f75bb9ccc900
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -0,0 +1,239 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_PROTOTYPE_H_
29#define _I40E_PROTOTYPE_H_
30
31#include "i40e_type.h"
32#include "i40e_alloc.h"
33#include "i40e_virtchnl.h"
34
35/* Prototypes for shared code functions that are not in
36 * the standard function pointer structures. These are
37 * mostly because they are needed even before the init
38 * has happened and will assist in the early SW and FW
39 * setup.
40 */
41
42/* adminq functions */
43i40e_status i40e_init_adminq(struct i40e_hw *hw);
44i40e_status i40e_shutdown_adminq(struct i40e_hw *hw);
45void i40e_adminq_init_ring_data(struct i40e_hw *hw);
46i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
47 struct i40e_arq_event_info *e,
48 u16 *events_pending);
49i40e_status i40e_asq_send_command(struct i40e_hw *hw,
50 struct i40e_aq_desc *desc,
51 void *buff, /* can be NULL */
52 u16 buff_size,
53 struct i40e_asq_cmd_details *cmd_details);
54bool i40e_asq_done(struct i40e_hw *hw);
55
56/* debug function for adminq */
57void i40e_debug_aq(struct i40e_hw *hw,
58 enum i40e_debug_mask mask,
59 void *desc,
60 void *buffer);
61
62void i40e_idle_aq(struct i40e_hw *hw);
63void i40e_resume_aq(struct i40e_hw *hw);
64
65u32 i40e_led_get(struct i40e_hw *hw);
66void i40e_led_set(struct i40e_hw *hw, u32 mode);
67
68/* admin send queue commands */
69
70i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
71 u16 *fw_major_version, u16 *fw_minor_version,
72 u16 *api_major_version, u16 *api_minor_version,
73 struct i40e_asq_cmd_details *cmd_details);
74i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
75 bool unloading);
76i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
77 struct i40e_asq_cmd_details *cmd_details);
78i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
79 struct i40e_asq_cmd_details *cmd_details);
80i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
81 struct i40e_asq_cmd_details *cmd_details);
82i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
83 bool enable_lse, struct i40e_link_status *link,
84 struct i40e_asq_cmd_details *cmd_details);
85i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
86 u64 advt_reg,
87 struct i40e_asq_cmd_details *cmd_details);
88i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
89 struct i40e_driver_version *dv,
90 struct i40e_asq_cmd_details *cmd_details);
91i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
92 struct i40e_vsi_context *vsi_ctx,
93 struct i40e_asq_cmd_details *cmd_details);
94i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
95 u16 vsi_id, bool set_filter,
96 struct i40e_asq_cmd_details *cmd_details);
97i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
98 u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
99i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
100 u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
101i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
102 struct i40e_vsi_context *vsi_ctx,
103 struct i40e_asq_cmd_details *cmd_details);
104i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
105 struct i40e_vsi_context *vsi_ctx,
106 struct i40e_asq_cmd_details *cmd_details);
107i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
108 u16 downlink_seid, u8 enabled_tc,
109 bool default_port, u16 *pveb_seid,
110 struct i40e_asq_cmd_details *cmd_details);
111i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
112 u16 veb_seid, u16 *switch_id, bool *floating,
113 u16 *statistic_index, u16 *vebs_used,
114 u16 *vebs_free,
115 struct i40e_asq_cmd_details *cmd_details);
116i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
117 struct i40e_aqc_add_macvlan_element_data *mv_list,
118 u16 count, struct i40e_asq_cmd_details *cmd_details);
119i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
120 struct i40e_aqc_remove_macvlan_element_data *mv_list,
121 u16 count, struct i40e_asq_cmd_details *cmd_details);
122i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
123 struct i40e_aqc_add_remove_vlan_element_data *v_list,
124 u8 count, struct i40e_asq_cmd_details *cmd_details);
125i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
126 struct i40e_aqc_add_remove_vlan_element_data *v_list,
127 u8 count, struct i40e_asq_cmd_details *cmd_details);
128i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
129 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
130 struct i40e_asq_cmd_details *cmd_details);
131i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
132 struct i40e_aqc_get_switch_config_resp *buf,
133 u16 buf_size, u16 *start_seid,
134 struct i40e_asq_cmd_details *cmd_details);
135i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
136 enum i40e_aq_resources_ids resource,
137 enum i40e_aq_resource_access_type access,
138 u8 sdp_number, u64 *timeout,
139 struct i40e_asq_cmd_details *cmd_details);
140i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
141 enum i40e_aq_resources_ids resource,
142 u8 sdp_number,
143 struct i40e_asq_cmd_details *cmd_details);
144i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
145 u32 offset, u16 length, void *data,
146 bool last_command,
147 struct i40e_asq_cmd_details *cmd_details);
148i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
149 void *buff, u16 buff_size, u16 *data_size,
150 enum i40e_admin_queue_opc list_type_opc,
151 struct i40e_asq_cmd_details *cmd_details);
152i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
153 u32 offset, u16 length, void *data,
154 bool last_command,
155 struct i40e_asq_cmd_details *cmd_details);
156i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
157 u8 mib_type, void *buff, u16 buff_size,
158 u16 *local_len, u16 *remote_len,
159 struct i40e_asq_cmd_details *cmd_details);
160i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
161 bool enable_update,
162 struct i40e_asq_cmd_details *cmd_details);
163i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
164 struct i40e_asq_cmd_details *cmd_details);
165i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
166 struct i40e_asq_cmd_details *cmd_details);
167i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
168 struct i40e_asq_cmd_details *cmd_details);
169i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
170 u16 flags, u8 *mac_addr,
171 struct i40e_asq_cmd_details *cmd_details);
172i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
173 enum i40e_aq_hmc_profile profile,
174 u8 pe_vf_enabled_count,
175 struct i40e_asq_cmd_details *cmd_details);
176i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
177 u16 seid, u16 credit, u8 max_bw,
178 struct i40e_asq_cmd_details *cmd_details);
179i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
180 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
181 struct i40e_asq_cmd_details *cmd_details);
182i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
183 u16 seid,
184 struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
185 struct i40e_asq_cmd_details *cmd_details);
186i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
187 u16 seid,
188 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
189 struct i40e_asq_cmd_details *cmd_details);
190i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
191 u16 seid,
192 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
193 struct i40e_asq_cmd_details *cmd_details);
194i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
195 u16 seid,
196 struct i40e_aqc_query_port_ets_config_resp *bw_data,
197 struct i40e_asq_cmd_details *cmd_details);
198i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
199 u16 seid,
200 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
201 struct i40e_asq_cmd_details *cmd_details);
202/* i40e_common */
203i40e_status i40e_init_shared_code(struct i40e_hw *hw);
204i40e_status i40e_pf_reset(struct i40e_hw *hw);
205void i40e_clear_pxe_mode(struct i40e_hw *hw);
206bool i40e_get_link_status(struct i40e_hw *hw);
207i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
208 u8 *mac_addr);
209i40e_status i40e_validate_mac_addr(u8 *mac_addr);
210i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
211 struct i40e_lldp_variables *lldp_cfg);
212/* prototype for functions used for NVM access */
213i40e_status i40e_init_nvm(struct i40e_hw *hw);
214i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
215 enum i40e_aq_resource_access_type access);
216void i40e_release_nvm(struct i40e_hw *hw);
217i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
218 u16 *data);
219i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
220 u16 *data);
221i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
222 u16 *words, u16 *data);
223i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
224 u16 *checksum);
225
226/* prototype for functions used for SW locks */
227
228/* i40e_common for VF drivers*/
229void i40e_vf_parse_hw_config(struct i40e_hw *hw,
230 struct i40e_virtchnl_vf_resource *msg);
231i40e_status i40e_vf_reset(struct i40e_hw *hw);
232i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
233 enum i40e_virtchnl_ops v_opcode,
234 i40e_status v_retval,
235 u8 *msg, u16 msglen,
236 struct i40e_asq_cmd_details *cmd_details);
237i40e_status i40e_set_filter_control(struct i40e_hw *hw,
238 struct i40e_filter_control_settings *settings);
239#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
new file mode 100644
index 000000000000..6bd333cde28b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -0,0 +1,4688 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_REGISTER_H_
29#define _I40E_REGISTER_H_
30
31#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
32#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
33#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
34#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
35#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
36#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
37#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
38#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
39#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
40#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
41#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
42#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
43#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
44#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
45#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
46#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
47#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
48#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
49#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
50#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
51#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
52#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
53#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
54#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
55#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
56#define I40E_PF_ARQBAH 0x00080180
57#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
58#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
59#define I40E_PF_ARQBAL 0x00080080
60#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
61#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
62#define I40E_PF_ARQH 0x00080380
63#define I40E_PF_ARQH_ARQH_SHIFT 0
64#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
65#define I40E_PF_ARQLEN 0x00080280
66#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
67#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
68#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
69#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
70#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
71#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
72#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
73#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
74#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
75#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
76#define I40E_PF_ARQT 0x00080480
77#define I40E_PF_ARQT_ARQT_SHIFT 0
78#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
79#define I40E_PF_ATQBAH 0x00080100
80#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
81#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
82#define I40E_PF_ATQBAL 0x00080000
83#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
84#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
85#define I40E_PF_ATQH 0x00080300
86#define I40E_PF_ATQH_ATQH_SHIFT 0
87#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
88#define I40E_PF_ATQLEN 0x00080200
89#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
90#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
91#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
92#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
93#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
94#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
95#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
96#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
97#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
98#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
99#define I40E_PF_ATQT 0x00080400
100#define I40E_PF_ATQT_ATQT_SHIFT 0
101#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
102#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
103#define I40E_VF_ARQBAH_MAX_INDEX 127
104#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
105#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
106#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
107#define I40E_VF_ARQBAL_MAX_INDEX 127
108#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
109#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
110#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
111#define I40E_VF_ARQH_MAX_INDEX 127
112#define I40E_VF_ARQH_ARQH_SHIFT 0
113#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
114#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
115#define I40E_VF_ARQLEN_MAX_INDEX 127
116#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
117#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
118#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
119#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
120#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
121#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
122#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
123#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
124#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
125#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
126#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
127#define I40E_VF_ARQT_MAX_INDEX 127
128#define I40E_VF_ARQT_ARQT_SHIFT 0
129#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
130#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
131#define I40E_VF_ATQBAH_MAX_INDEX 127
132#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
133#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
134#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
135#define I40E_VF_ATQBAL_MAX_INDEX 127
136#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
137#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
138#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
139#define I40E_VF_ATQH_MAX_INDEX 127
140#define I40E_VF_ATQH_ATQH_SHIFT 0
141#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
142#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
143#define I40E_VF_ATQLEN_MAX_INDEX 127
144#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
145#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
146#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
147#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
148#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
149#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
150#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
151#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
152#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
153#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
154#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
155#define I40E_VF_ATQT_MAX_INDEX 127
156#define I40E_VF_ATQT_ATQT_SHIFT 0
157#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
158#define I40E_PRT_L2TAGSEN 0x001C0B20
159#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
160#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
161#define I40E_PFCM_LAN_ERRDATA 0x0010C080
162#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
163#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
164#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
165#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
166#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
167#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
168#define I40E_PFCM_LAN_ERRINFO 0x0010C000
169#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
170#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
171#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
172#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
173#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
174#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
175#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
176#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
177#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
178#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
179#define I40E_PFCM_LANCTXCTL 0x0010C300
180#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
181#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
182#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
183#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
184#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
185#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
186#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
187#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
188#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */
189#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
190#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
191#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
192#define I40E_PFCM_LANCTXSTAT 0x0010C380
193#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
194#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
195#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
196#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
197#define I40E_PFCM_PE_ERRDATA 0x00138D00
198#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
199#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
200#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
201#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
202#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
203#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
204#define I40E_PFCM_PE_ERRINFO 0x00138C80
205#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
206#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
207#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
208#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
209#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
210#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
211#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
212#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
213#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
214#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
215#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
216#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
217#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
218#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
219#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
220#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
221#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
222#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
223#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
224#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
225#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
226#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
227#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
228#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
229#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
230#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
231#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
232#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
233#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
234#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
235#define I40E_GLDCB_GENC 0x00083044
236#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
237#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
238#define I40E_GLDCB_RUPTI 0x00122618
239#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
240#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
241#define I40E_PRTDCB_FCCFG 0x001E4640
242#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
243#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
244#define I40E_PRTDCB_FCRTV 0x001E4600
245#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
246#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
247#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
248#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
249#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
250#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
251#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
252#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
253#define I40E_PRTDCB_GENC 0x00083000
254#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
255#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
256#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
257#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
258#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
259#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
260#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
261#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
262#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
263#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
264#define I40E_PRTDCB_GENS 0x00083020
265#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
266#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
267#define I40E_PRTDCB_MFLCN 0x001E2400
268#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
269#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
270#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
271#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
272#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
273#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
274#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
275#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
276#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
277#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
278#define I40E_PRTDCB_RETSC 0x001223E0
279#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
280#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
281#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
282#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
283#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
284#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
285#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
286#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
287#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
288#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
289#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
290#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
291#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
292#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
293#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
294#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
295#define I40E_PRTDCB_RPPMC 0x001223A0
296#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
297#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
298#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
299#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
300#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
301#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
302#define I40E_PRTDCB_RUP 0x001C0B00
303#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
304#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
305#define I40E_PRTDCB_RUP2TC 0x001C09A0
306#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
307#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
308#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
309#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
310#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
311#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
312#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
313#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
314#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
315#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
316#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
317#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
318#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
319#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
320#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
321#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
322#define I40E_PRTDCB_TC2PFC 0x001C0980
323#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
324#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
325#define I40E_PRTDCB_TCPMC 0x000A21A0
326#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
327#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
328#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
329#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
330#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
331#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
332#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
333#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
334#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
335#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
336#define I40E_PRTDCB_TDPMC 0x000A0180
337#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
338#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
339#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
340#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
341#define I40E_PRTDCB_TDPUC 0x00044100
342#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
343#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
344#define I40E_PRTDCB_TETSC_TCB 0x000AE060
345#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
346#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
347#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
348#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
349#define I40E_PRTDCB_TETSC_TPB 0x00098060
350#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
351#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
352#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
353#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
354#define I40E_PRTDCB_TFCS 0x001E4560
355#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
356#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
357#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
358#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
359#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
360#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
361#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
362#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
363#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
364#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
365#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
366#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
367#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
368#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
369#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
370#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
371#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
372#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
373#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
374#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
375#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
376#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
377#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
378#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
379#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
380#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
381#define I40E_GLFCOE_RCTL 0x00269B94
382#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
383#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
384#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
385#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
386#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
387#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
388#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
389#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
390#define I40E_GL_FWSTS 0x00083048
391#define I40E_GL_FWSTS_FWS0B_SHIFT 0
392#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
393#define I40E_GL_FWSTS_FWRI_SHIFT 9
394#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
395#define I40E_GL_FWSTS_FWS1B_SHIFT 16
396#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
397#define I40E_GLGEN_CLKSTAT 0x000B8184
398#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
399#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
400#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
401#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
402#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
403#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
404#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
405#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
406#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
407#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
408#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
409#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
410#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
411#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
412#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
413#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
414#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
415#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
416#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
417#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
418#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
419#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
420#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
421#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
422#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
423#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
424#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
425#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
426#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
427#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
428#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
429#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
430#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
431#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
432#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
433#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
434#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
435#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
436#define I40E_GLGEN_GPIO_SET 0x00088184
437#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
438#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
439#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
440#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
441#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
442#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
443#define I40E_GLGEN_GPIO_STAT 0x0008817C
444#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
445#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
446#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
447#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
448#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
449#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
450#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
451#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
452#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
453#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
454#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
455#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
456#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
457#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
458#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
459#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
460#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
461#define I40E_GLGEN_I2CCMD_R_SHIFT 29
462#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
463#define I40E_GLGEN_I2CCMD_E_SHIFT 31
464#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
465#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
466#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
467#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
468#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
469#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
470#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
471#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
472#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
473#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
474#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
475#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
476#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
477#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
478#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
479#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
480#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
481#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
482#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
483#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
484#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
485#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
486#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
487#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
488#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
489#define I40E_GLGEN_LED_CTL 0x00088178
490#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
491#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
492#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
493#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
494#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
495#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
496#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
497#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
498#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
499#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
500#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
501#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
502#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
503#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
504#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
505#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
506#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
507#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
508#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
509#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
510#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
511#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
512#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
513#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
514#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
515#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
516#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
517#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
518#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
519#define I40E_GLGEN_MSCA_MAX_INDEX 3
520#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
521#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
522#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
523#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
524#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
525#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
526#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
527#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
528#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
529#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
530#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
531#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
532#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
533#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
534#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
535#define I40E_GLGEN_MSRWD_MAX_INDEX 3
536#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
537#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
538#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
539#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
540#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
541#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
542#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
543#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
544#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
545#define I40E_GLGEN_PE_ENA 0x000B81A0
546#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
547#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
548#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
549#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
550#define I40E_GLGEN_RSTAT 0x000B8188
551#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
552#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
553#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
554#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
555#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
556#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
557#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
558#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
559#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
560#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
561#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
562#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
563#define I40E_GLGEN_RSTCTL 0x000B8180
564#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
565#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
566#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
567#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
568#define I40E_GLGEN_RSTENA_EMP 0x000B818C
569#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
570#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
571#define I40E_GLGEN_RTRIG 0x000B8190
572#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
573#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
574#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
575#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
576#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
577#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
578#define I40E_GLGEN_STAT 0x000B612C
579#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
580#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
581#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
582#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
583#define I40E_GLGEN_STAT_VTEN_SHIFT 3
584#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
585#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
586#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
587#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
588#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
589#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
590#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
591#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
592#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
593#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
594#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
595#define I40E_GLVFGEN_TIMER 0x000881BC
596#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
597#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
598#define I40E_PFGEN_CTRL 0x00092400
599#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
600#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
601#define I40E_PFGEN_DRUN 0x00092500
602#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
603#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
604#define I40E_PFGEN_PORTNUM 0x001C0480
605#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
606#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
607#define I40E_PFGEN_STATE 0x00088000
608#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
609#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
610#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
611#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
612#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
613#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
614#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
615#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
616#define I40E_PRTGEN_CNF 0x000B8120
617#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
618#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
619#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
620#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
621#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
622#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
623#define I40E_PRTGEN_CNF2 0x000B8160
624#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
625#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
626#define I40E_PRTGEN_STATUS 0x000B8100
627#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
628#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
629#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
630#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
631#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
632#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
633#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
634#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
635#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
636#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
637#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
638#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
639#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
640#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
641#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
642#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
643#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
644#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
645#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
646#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
647#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
648#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
649#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
650#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
651#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
652#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
653#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
654#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
655#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
656#define I40E_GLHMC_CEQPART_MAX_INDEX 15
657#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
658#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
659#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
660#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
661#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
662#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
663#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
664#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
665#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
666#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
667#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
668#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
669#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
670#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
671#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
672#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
673#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
674#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
675#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
676#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
677#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
678#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
679#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
680#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
681#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
682#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
683#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
684#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
685#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
686#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
687#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
688#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
689#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
690#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
691#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
692#define I40E_GLHMC_FCOEFMAX 0x000C20D0
693#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
694#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
695#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
696#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
697#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
698#define I40E_GLHMC_FCOEMAX 0x000C2014
699#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
700#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
701#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
702#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
703#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
704#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
705#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
706#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
707#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
708#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
709#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
710#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
711#define I40E_GLHMC_FSIAVMAX 0x000C2068
712#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
713#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
714#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
715#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
716#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
717#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
718#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
719#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
720#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
721#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
722#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
723#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
724#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
725#define I40E_GLHMC_FSIMCMAX 0x000C2060
726#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
727#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
728#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
729#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
730#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
731#define I40E_GLHMC_LANQMAX 0x000C2008
732#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
733#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
734#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
735#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
736#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
737#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
738#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
739#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
740#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
741#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
742#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
743#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
744#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
745#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
746#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
747#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
748#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
749#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
750#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
751#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
752#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
753#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
754#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
755#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
756#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
757#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
758#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
759#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
760#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
761#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
762#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
763#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
764#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
765#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
766#define I40E_GLHMC_PEARPMAX 0x000C2038
767#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
768#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
769#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
770#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
771#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
772#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
773#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
774#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
775#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
776#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
777#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
778#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
779#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
780#define I40E_GLHMC_PECQOBJSZ 0x000C2020
781#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
782#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
783#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
784#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
785#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
786#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
787#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
788#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
789#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
790#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
791#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
792#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
793#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
794#define I40E_GLHMC_PEHTMAX 0x000C2030
795#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
796#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
797#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
798#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
799#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
800#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
801#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
802#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
803#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
804#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
805#define I40E_GLHMC_PEMRMAX 0x000C2040
806#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
807#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
808#define I40E_GLHMC_PEMROBJSZ 0x000C203c
809#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
810#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
811#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
812#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
813#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
814#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
815#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
816#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
817#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
818#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
819#define I40E_GLHMC_PEPBLMAX 0x000C206c
820#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
821#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
822#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
823#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
824#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
825#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
826#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
827#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
828#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
829#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
830#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
831#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
832#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
833#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
834#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
835#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
836#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
837#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
838#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
839#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
840#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
841#define I40E_GLHMC_PEQ1MAX 0x000C2054
842#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
843#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
844#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
845#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
846#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
847#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
848#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
849#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
850#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
851#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
852#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
853#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
854#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
855#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
856#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
857#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
858#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
859#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
860#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
861#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
862#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
863#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
864#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
865#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
866#define I40E_GLHMC_PESRQMAX 0x000C2028
867#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
868#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
869#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
870#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
871#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
872#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
873#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
874#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
875#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
876#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
877#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
878#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
879#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
880#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
881#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
882#define I40E_GLHMC_PETIMERMAX 0x000C2084
883#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
884#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
885#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
886#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
887#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
888#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
889#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
890#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
891#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
892#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
893#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
894#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
895#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
896#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
897#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
898#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
899#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
900#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
901#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
902#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
903#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
904#define I40E_GLHMC_PEXFFLMAX 0x000C204c
905#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
906#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
907#define I40E_GLHMC_PEXFMAX 0x000C2048
908#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
909#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
910#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
911#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
912#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
913#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
914#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
915#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
916#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
917#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
918#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
919#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
920#define I40E_GLHMC_SDPART_MAX_INDEX 15
921#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
922#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
923#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
924#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
925#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
926#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
927#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
928#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
929#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
930#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
931#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
932#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
933#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
934#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
935#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
936#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
937#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
938#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
939#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
940#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
941#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
942#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
943#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
944#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
945#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
946#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
947#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
948#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
949#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
950#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
951#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
952#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
953#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
954#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
955#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
956#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
957#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
958#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
959#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
960#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
961#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
962#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
963#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
964#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
965#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
966#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
967#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
968#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
969#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
970#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
971#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
972#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
973#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
974#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
975#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
976#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
977#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
978#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
979#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
980#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
981#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
982#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
983#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
984#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
985#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
986#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
987#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
988#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
989#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
990#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
991#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
992#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
993#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
994#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
995#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
996#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
997#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
998#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
999#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
1000#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
1001#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
1002#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
1003#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
1004#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
1005#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
1006#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
1007#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
1008#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
1009#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
1010#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
1011#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
1012#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
1013#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
1014#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
1015#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
1016#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
1017#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
1018#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
1019#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
1020#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
1021#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
1022#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
1023#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
1024#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
1025#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
1026#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
1027#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
1028#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
1029#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
1030#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
1031#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
1032#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
1033#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
1034#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
1035#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
1036#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
1037#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
1038#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
1039#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
1040#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
1041#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
1042#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
1043#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
1044#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
1045#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
1046#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
1047#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
1048#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
1049#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
1050#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
1051#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
1052#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
1053#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
1054#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
1055#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
1056#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
1057#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
1058#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
1059#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
1060#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
1061#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
1062#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
1063#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
1064#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
1065#define I40E_PFHMC_ERRORDATA 0x000C0500
1066#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
1067#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
1068#define I40E_PFHMC_ERRORINFO 0x000C0400
1069#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
1070#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
1071#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
1072#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
1073#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
1074#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
1075#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
1076#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
1077#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
1078#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
1079#define I40E_PFHMC_PDINV 0x000C0300
1080#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
1081#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
1082#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
1083#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
1084#define I40E_PFHMC_SDCMD 0x000C0000
1085#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
1086#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
1087#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
1088#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
1089#define I40E_PFHMC_SDDATAHIGH 0x000C0200
1090#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
1091#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
1092#define I40E_PFHMC_SDDATALOW 0x000C0100
1093#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
1094#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
1095#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
1096#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
1097#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
1098#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
1099#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
1100#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
1101#define I40E_GL_UFUSE 0x00094008
1102#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
1103#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
1104#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
1105#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
1106#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
1107#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
1108#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
1109#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
1110#define I40E_EMPINT_GPIO_ENA 0x00088188
1111#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
1112#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
1113#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
1114#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
1115#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
1116#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
1117#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
1118#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
1119#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
1120#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
1121#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
1122#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
1123#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
1124#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
1125#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
1126#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
1127#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
1128#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
1129#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
1130#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
1131#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
1132#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
1133#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
1134#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
1135#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
1136#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
1137#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
1138#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
1139#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
1140#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
1141#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
1142#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
1143#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
1144#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
1145#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
1146#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
1147#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
1148#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
1149#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
1150#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
1151#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
1152#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
1153#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
1154#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
1155#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
1156#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
1157#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
1158#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
1159#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
1160#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
1161#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
1162#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
1163#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
1164#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
1165#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
1166#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
1167#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
1168#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
1169#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
1170#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
1171#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
1172#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
1173#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
1174#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
1175#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
1176#define I40E_PFINT_AEQCTL 0x00038700
1177#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
1178#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
1179#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
1180#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
1181#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
1182#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
1183#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
1184#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
1185#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
1186#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
1187#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
1188#define I40E_PFINT_CEQCTL_MAX_INDEX 511
1189#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
1190#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
1191#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
1192#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
1193#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
1194#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
1195#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
1196#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
1197#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
1198#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
1199#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
1200#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
1201#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
1202#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
1203#define I40E_PFINT_DYN_CTL0 0x00038480
1204#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
1205#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
1206#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
1207#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
1208#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
1209#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
1210#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
1211#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
1212#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
1213#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
1214#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
1215#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
1216#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
1217#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
1218#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
1219#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
1220#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
1221#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
1222#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
1223#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
1224#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
1225#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
1226#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
1227#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
1228#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
1229#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
1230#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
1231#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
1232#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
1233#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
1234#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
1235#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
1236#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
1237#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
1238#define I40E_PFINT_GPIO_ENA 0x00088080
1239#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
1240#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
1241#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
1242#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
1243#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
1244#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
1245#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
1246#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
1247#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
1248#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
1249#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
1250#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
1251#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
1252#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
1253#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
1254#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
1255#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
1256#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
1257#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
1258#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
1259#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
1260#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
1261#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
1262#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
1263#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
1264#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
1265#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
1266#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
1267#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
1268#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
1269#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
1270#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
1271#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
1272#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
1273#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
1274#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
1275#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
1276#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
1277#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
1278#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
1279#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
1280#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
1281#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
1282#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
1283#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
1284#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
1285#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
1286#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
1287#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
1288#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
1289#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
1290#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
1291#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
1292#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
1293#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
1294#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
1295#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
1296#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
1297#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
1298#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
1299#define I40E_PFINT_ICR0 0x00038780
1300#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
1301#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
1302#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
1303#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
1304#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
1305#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
1306#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
1307#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
1308#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
1309#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
1310#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
1311#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
1312#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
1313#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
1314#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
1315#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
1316#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
1317#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
1318#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
1319#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
1320#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
1321#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
1322#define I40E_PFINT_ICR0_GRST_SHIFT 20
1323#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
1324#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
1325#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
1326#define I40E_PFINT_ICR0_GPIO_SHIFT 22
1327#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
1328#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
1329#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
1330#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
1331#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
1332#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
1333#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
1334#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
1335#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
1336#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
1337#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
1338#define I40E_PFINT_ICR0_VFLR_SHIFT 29
1339#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
1340#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
1341#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
1342#define I40E_PFINT_ICR0_SWINT_SHIFT 31
1343#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
1344#define I40E_PFINT_ICR0_ENA 0x00038800
1345#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
1346#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
1347#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
1348#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
1349#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
1350#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
1351#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
1352#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
1353#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
1354#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
1355#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
1356#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
1357#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
1358#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
1359#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
1360#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
1361#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
1362#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
1363#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
1364#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
1365#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
1366#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
1367#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
1368#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
1369#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
1370#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
1371#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
1372#define I40E_PFINT_ITR0_MAX_INDEX 2
1373#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
1374#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
1375#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
1376#define I40E_PFINT_ITRN_MAX_INDEX 2
1377#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
1378#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
1379#define I40E_PFINT_LNKLST0 0x00038500
1380#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
1381#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
1382#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
1383#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
1384#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
1385#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
1386#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
1387#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
1388#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
1389#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
1390#define I40E_PFINT_RATE0 0x00038580
1391#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
1392#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
1393#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
1394#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
1395#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
1396#define I40E_PFINT_RATEN_MAX_INDEX 511
1397#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
1398#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
1399#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
1400#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
1401#define I40E_PFINT_STAT_CTL0 0x00038400
1402#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
1403#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
1404#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
1405#define I40E_QINT_RQCTL_MAX_INDEX 1535
1406#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
1407#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
1408#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
1409#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
1410#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
1411#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
1412#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
1413#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
1414#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
1415#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
1416#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
1417#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
1418#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
1419#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
1420#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
1421#define I40E_QINT_TQCTL_MAX_INDEX 1535
1422#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
1423#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
1424#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
1425#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
1426#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
1427#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
1428#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
1429#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
1430#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
1431#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
1432#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
1433#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
1434#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
1435#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
1436#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
1437#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
1438#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
1439#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
1440#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
1441#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
1442#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
1443#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
1444#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
1445#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
1446#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
1447#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
1448#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
1449#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
1450#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
1451#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
1452#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
1453#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
1454#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
1455#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
1456#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
1457#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
1458#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
1459#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
1460#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
1461#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
1462#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
1463#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
1464#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
1465#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
1466#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
1467#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
1468#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
1469#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
1470#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
1471#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
1472#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
1473#define I40E_VFINT_ICR0_MAX_INDEX 127
1474#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
1475#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
1476#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
1477#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
1478#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
1479#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
1480#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
1481#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
1482#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
1483#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
1484#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
1485#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
1486#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
1487#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
1488#define I40E_VFINT_ICR0_SWINT_SHIFT 31
1489#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
1490#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
1491#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
1492#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
1493#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
1494#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
1495#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
1496#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
1497#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
1498#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
1499#define I40E_VFINT_ITR0_MAX_INDEX 2
1500#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
1501#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
1502#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
1503#define I40E_VFINT_ITRN_MAX_INDEX 2
1504#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
1505#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
1506#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
1507#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
1508#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
1509#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
1510#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
1511#define I40E_VPINT_AEQCTL_MAX_INDEX 127
1512#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
1513#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
1514#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
1515#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
1516#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
1517#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
1518#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
1519#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
1520#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
1521#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
1522#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
1523#define I40E_VPINT_CEQCTL_MAX_INDEX 511
1524#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
1525#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
1526#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
1527#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
1528#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
1529#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
1530#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
1531#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
1532#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
1533#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
1534#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
1535#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
1536#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
1537#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
1538#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
1539#define I40E_VPINT_LNKLST0_MAX_INDEX 127
1540#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
1541#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
1542#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
1543#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
1544#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
1545#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
1546#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
1547#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
1548#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
1549#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
1550#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
1551#define I40E_VPINT_RATE0_MAX_INDEX 127
1552#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
1553#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
1554#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
1555#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
1556#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
1557#define I40E_VPINT_RATEN_MAX_INDEX 511
1558#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
1559#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
1560#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
1561#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
1562#define I40E_GL_RDPU_CNTRL 0x00051060
1563#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
1564#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
1565#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
1566#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
1567#define I40E_GLLAN_RCTL_0 0x0012A500
1568#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
1569#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
1570#define I40E_GLLAN_TSOMSK_F 0x000442D8
1571#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
1572#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
1573#define I40E_GLLAN_TSOMSK_L 0x000442E0
1574#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
1575#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
1576#define I40E_GLLAN_TSOMSK_M 0x000442DC
1577#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
1578#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
1579#define I40E_PFLAN_QALLOC 0x001C0400
1580#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
1581#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
1582#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
1583#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
1584#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
1585#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
1586#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
1587#define I40E_QRX_ENA_MAX_INDEX 1535
1588#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
1589#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
1590#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
1591#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
1592#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
1593#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
1594#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
1595#define I40E_QRX_TAIL_MAX_INDEX 1535
1596#define I40E_QRX_TAIL_TAIL_SHIFT 0
1597#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
1598#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
1599#define I40E_QTX_CTL_MAX_INDEX 1535
1600#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
1601#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
1602#define I40E_QTX_CTL_PF_INDX_SHIFT 2
1603#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
1604#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
1605#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
1606#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
1607#define I40E_QTX_ENA_MAX_INDEX 1535
1608#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
1609#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
1610#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
1611#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
1612#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
1613#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
1614#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
1615#define I40E_QTX_HEAD_MAX_INDEX 1535
1616#define I40E_QTX_HEAD_HEAD_SHIFT 0
1617#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
1618#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
1619#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
1620#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
1621#define I40E_QTX_TAIL_MAX_INDEX 1535
1622#define I40E_QTX_TAIL_TAIL_SHIFT 0
1623#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
1624#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
1625#define I40E_VPLAN_MAPENA_MAX_INDEX 127
1626#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
1627#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
1628#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
1629#define I40E_VPLAN_QTABLE_MAX_INDEX 15
1630#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
1631#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
1632#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
1633#define I40E_VSILAN_QBASE_MAX_INDEX 383
1634#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
1635#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
1636#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
1637#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
1638#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
1639#define I40E_VSILAN_QTABLE_MAX_INDEX 15
1640#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
1641#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
1642#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
1643#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
1644#define I40E_PRTGL_SAH 0x001E2140
1645#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
1646#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
1647#define I40E_PRTGL_SAH_MFS_SHIFT 16
1648#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
1649#define I40E_PRTGL_SAL 0x001E2120
1650#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
1651#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
1652#define I40E_PRTMAC_HLCTLA 0x001E4760
1653#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
1654#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
1655#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
1656#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
1657#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
1658#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
1659#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
1660#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
1661#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
1662#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
1663#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
1664#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
1665#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
1666#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
1667#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
1668#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
1669#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
1670#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
1671#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
1672#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
1673#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
1674#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
1675#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
1676#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
1677#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
1678#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
1679#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
1680#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
1681#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
1682#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
1683#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
1684#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
1685#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
1686#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
1687#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
1688#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
1689#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
1690#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
1691#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
1692#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
1693#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
1694#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
1695#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
1696#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
1697#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
1698#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
1699#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
1700#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
1701#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
1702#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
1703#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
1704#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
1705#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
1706#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
1707#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
1708#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
1709#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
1710#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
1711#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
1712#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
1713#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
1714#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
1715#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
1716#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
1717#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
1718#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
1719#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
1720#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
1721#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
1722#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
1723#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
1724#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
1725#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
1726#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
1727#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
1728#define I40E_PRTMAC_HSECTL1 0x001E3560
1729#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
1730#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
1731#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
1732#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
1733#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
1734#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
1735#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
1736#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
1737#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
1738#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
1739#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
1740#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
1741#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
1742#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
1743#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
1744#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
1745#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
1746#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
1747#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
1748#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
1749#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
1750#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
1751#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
1752#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
1753#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
1754#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
1755#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
1756#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
1757#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
1758#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
1759#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
1760#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
1761#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
1762#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
1763#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
1764#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
1765#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
1766#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
1767#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
1768#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
1769#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
1770#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
1771#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
1772#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
1773#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
1774#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
1775#define I40E_GL_MNG_FWSM 0x000B6134
1776#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
1777#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x3FF << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
1778#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
1779#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
1780#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
1781#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
1782#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
1783#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
1784#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
1785#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
1786#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
1787#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
1788#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
1789#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
1790#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
1791#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
1792#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
1793#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
1794#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
1795#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
1796#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
1797#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
1798#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
1799#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
1800#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
1801#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
1802#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
1803#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
1804#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
1805#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
1806#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
1807#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
1808#define I40E_PRT_MNG_MANC 0x00256A20
1809#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
1810#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
1811#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
1812#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
1813#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
1814#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
1815#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
1816#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
1817#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
1818#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
1819#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
1820#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
1821#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
1822#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
1823#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
1824#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
1825#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
1826#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
1827#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
1828#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
1829#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
1830#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
1831#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
1832#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
1833#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
1834#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
1835#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
1836#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
1837#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
1838#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
1839#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
1840#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
1841#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
1842#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
1843#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
1844#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
1845#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
1846#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
1847#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
1848#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
1849#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
1850#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
1851#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
1852#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
1853#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
1854#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
1855#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
1856#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
1857#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
1858#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
1859#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
1860#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
1861#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
1862#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
1863#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
1864#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
1865#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
1866#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
1867#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
1868#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
1869#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
1870#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
1871#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
1872#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
1873#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
1874#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
1875#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
1876#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
1877#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
1878#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
1879#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
1880#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
1881#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
1882#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
1883#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
1884#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
1885#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
1886#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
1887#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
1888#define I40E_PRT_MNG_METF_MAX_INDEX 3
1889#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
1890#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
1891#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
1892#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
1893#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
1894#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
1895#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
1896#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
1897#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
1898#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
1899#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
1900#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
1901#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
1902#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
1903#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
1904#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
1905#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
1906#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
1907#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
1908#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
1909#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
1910#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
1911#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
1912#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
1913#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
1914#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
1915#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
1916#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
1917#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
1918#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
1919#define I40E_PRT_MNG_MNGONLY 0x00256A60
1920#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
1921#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
1922#define I40E_PRT_MNG_MSFM 0x00256AA0
1923#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
1924#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
1925#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
1926#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
1927#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
1928#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
1929#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
1930#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
1931#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
1932#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
1933#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
1934#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
1935#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
1936#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
1937#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
1938#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
1939#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
1940#define I40E_MSIX_PBA_MAX_INDEX 5
1941#define I40E_MSIX_PBA_PENBIT_SHIFT 0
1942#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
1943#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
1944#define I40E_MSIX_TADD_MAX_INDEX 128
1945#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
1946#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
1947#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
1948#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
1949#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
1950#define I40E_MSIX_TMSG_MAX_INDEX 128
1951#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
1952#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
1953#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
1954#define I40E_MSIX_TUADD_MAX_INDEX 128
1955#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
1956#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
1957#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
1958#define I40E_MSIX_TVCTRL_MAX_INDEX 128
1959#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
1960#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
1961#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
1962#define I40E_VFMSIX_PBA1_MAX_INDEX 19
1963#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
1964#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
1965#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
1966#define I40E_VFMSIX_TADD1_MAX_INDEX 639
1967#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
1968#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
1969#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
1970#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
1971#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
1972#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
1973#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
1974#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
1975#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
1976#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
1977#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
1978#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
1979#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
1980#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
1981#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
1982#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
1983#define I40E_GLNVM_FLA 0x000B6108
1984#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
1985#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
1986#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
1987#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
1988#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
1989#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
1990#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
1991#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
1992#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
1993#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
1994#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
1995#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
1996#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
1997#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
1998#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
1999#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
2000#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
2001#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
2002#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
2003#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
2004#define I40E_GLNVM_FLASHID 0x000B6104
2005#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
2006#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
2007#define I40E_GLNVM_GENS 0x000B6100
2008#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
2009#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
2010#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
2011#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
2012#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
2013#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
2014#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
2015#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
2016#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
2017#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
2018#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
2019#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
2020#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
2021#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
2022#define I40E_GLNVM_SRCTL 0x000B6110
2023#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
2024#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
2025#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
2026#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
2027#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
2028#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
2029#define I40E_GLNVM_SRCTL_START_SHIFT 30
2030#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
2031#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
2032#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
2033#define I40E_GLNVM_SRDATA 0x000B6114
2034#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
2035#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
2036#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
2037#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
2038#define I40E_GLPCI_BYTCTH 0x0009C484
2039#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
2040#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
2041#define I40E_GLPCI_BYTCTL 0x0009C488
2042#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
2043#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
2044#define I40E_GLPCI_CAPCTRL 0x000BE4A4
2045#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
2046#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
2047#define I40E_GLPCI_CAPSUP 0x000BE4A8
2048#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
2049#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
2050#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
2051#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
2052#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
2053#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
2054#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
2055#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
2056#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
2057#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
2058#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
2059#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
2060#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
2061#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
2062#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
2063#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
2064#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
2065#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
2066#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
2067#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
2068#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
2069#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
2070#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
2071#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
2072#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
2073#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
2074#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
2075#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
2076#define I40E_GLPCI_CNF 0x000BE4C0
2077#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
2078#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
2079#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
2080#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
2081#define I40E_GLPCI_CNF2 0x000BE494
2082#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
2083#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
2084#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
2085#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
2086#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
2087#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
2088#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
2089#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
2090#define I40E_GLPCI_DREVID 0x0009C480
2091#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
2092#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
2093#define I40E_GLPCI_GSCL_1 0x0009C48C
2094#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
2095#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
2096#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
2097#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
2098#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
2099#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
2100#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
2101#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
2102#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
2103#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
2104#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
2105#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
2106#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
2107#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
2108#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
2109#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
2110#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
2111#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
2112#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
2113#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
2114#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
2115#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
2116#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
2117#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
2118#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
2119#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
2120#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
2121#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
2122#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
2123#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
2124#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
2125#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
2126#define I40E_GLPCI_GSCL_2 0x0009C490
2127#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
2128#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
2129#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
2130#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
2131#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
2132#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
2133#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
2134#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
2135#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
2136#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
2137#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
2138#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
2139#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
2140#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
2141#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
2142#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
2143#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
2144#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
2145#define I40E_GLPCI_LATCT 0x0009C4B4
2146#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
2147#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
2148#define I40E_GLPCI_LBARCTRL 0x000BE484
2149#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
2150#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
2151#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
2152#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
2153#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
2154#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
2155#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
2156#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
2157#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
2158#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
2159#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
2160#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
2161#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
2162#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
2163#define I40E_GLPCI_LINKCAP 0x000BE4AC
2164#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
2165#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
2166#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
2167#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
2168#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
2169#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
2170#define I40E_GLPCI_PCIERR 0x000BE4FC
2171#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
2172#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
2173#define I40E_GLPCI_PKTCT 0x0009C4BC
2174#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
2175#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
2176#define I40E_GLPCI_PMSUP 0x000BE4B0
2177#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
2178#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
2179#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
2180#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
2181#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
2182#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
2183#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
2184#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
2185#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
2186#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
2187#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
2188#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
2189#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
2190#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
2191#define I40E_GLPCI_PWRDATA 0x000BE490
2192#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
2193#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
2194#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
2195#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
2196#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
2197#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
2198#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
2199#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
2200#define I40E_GLPCI_REVID 0x000BE4B4
2201#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
2202#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
2203#define I40E_GLPCI_SERH 0x000BE49C
2204#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
2205#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
2206#define I40E_GLPCI_SERL 0x000BE498
2207#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
2208#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
2209#define I40E_GLPCI_SUBSYSID 0x000BE48C
2210#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
2211#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
2212#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
2213#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
2214#define I40E_GLPCI_UPADD 0x000BE4F8
2215#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
2216#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
2217#define I40E_GLPCI_VFSUP 0x000BE4B8
2218#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
2219#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
2220#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
2221#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
2222#define I40E_PF_FUNC_RID 0x0009C000
2223#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
2224#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
2225#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
2226#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
2227#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
2228#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
2229#define I40E_PF_PCI_CIAA 0x0009C080
2230#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
2231#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
2232#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
2233#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
2234#define I40E_PF_PCI_CIAD 0x0009C100
2235#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
2236#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
2237#define I40E_PFPCI_CLASS 0x000BE400
2238#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
2239#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
2240#define I40E_PFPCI_CNF 0x000BE000
2241#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
2242#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
2243#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
2244#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
2245#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
2246#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
2247#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
2248#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
2249#define I40E_PFPCI_FACTPS 0x0009C180
2250#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
2251#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
2252#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
2253#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
2254#define I40E_PFPCI_FUNC 0x000BE200
2255#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
2256#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
2257#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
2258#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
2259#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
2260#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
2261#define I40E_PFPCI_FUNC2 0x000BE180
2262#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
2263#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
2264#define I40E_PFPCI_ICAUSE 0x0009C200
2265#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
2266#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
2267#define I40E_PFPCI_IENA 0x0009C280
2268#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
2269#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
2270#define I40E_PFPCI_PFDEVID 0x000BE080
2271#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
2272#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
2273#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
2274#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
2275#define I40E_PFPCI_PM 0x000BE300
2276#define I40E_PFPCI_PM_PME_EN_SHIFT 0
2277#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
2278#define I40E_PFPCI_STATUS1 0x000BE280
2279#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
2280#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
2281#define I40E_PFPCI_VFDEVID 0x000BE100
2282#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
2283#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
2284#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
2285#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
2286#define I40E_PFPCI_VMINDEX 0x0009C300
2287#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
2288#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
2289#define I40E_PFPCI_VMPEND 0x0009C380
2290#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
2291#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
2292#define I40E_GLPE_CPUSTATUS0 0x0000D040
2293#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
2294#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
2295#define I40E_GLPE_CPUSTATUS1 0x0000D044
2296#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
2297#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
2298#define I40E_GLPE_CPUSTATUS2 0x0000D048
2299#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
2300#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
2301#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
2302#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
2303#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
2304#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
2305#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
2306#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
2307#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
2308#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
2309#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
2310#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
2311#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
2312#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
2313#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
2314#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
2315#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
2316#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
2317#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
2318#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
2319#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
2320#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
2321#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
2322#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
2323#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
2324#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
2325#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
2326#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
2327#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
2328#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
2329#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
2330#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
2331#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
2332#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
2333#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
2334#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
2335#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
2336#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
2337#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
2338#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
2339#define I40E_PFPE_AEQALLOC 0x00131180
2340#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
2341#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
2342#define I40E_PFPE_CCQPHIGH 0x00008200
2343#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
2344#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
2345#define I40E_PFPE_CCQPLOW 0x00008180
2346#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
2347#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
2348#define I40E_PFPE_CCQPSTATUS 0x00008100
2349#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
2350#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
2351#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
2352#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
2353#define I40E_PFPE_CQACK 0x00131100
2354#define I40E_PFPE_CQACK_PECQID_SHIFT 0
2355#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
2356#define I40E_PFPE_CQARM 0x00131080
2357#define I40E_PFPE_CQARM_PECQID_SHIFT 0
2358#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
2359#define I40E_PFPE_CQPDB 0x00008000
2360#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
2361#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
2362#define I40E_PFPE_CQPERRCODES 0x00008880
2363#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
2364#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
2365#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
2366#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
2367#define I40E_PFPE_CQPTAIL 0x00008080
2368#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
2369#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
2370#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
2371#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
2372#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
2373#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
2374#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
2375#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
2376#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
2377#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
2378#define I40E_PFPE_IPCONFIG0 0x00008280
2379#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
2380#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
2381#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
2382#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
2383#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
2384#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
2385#define I40E_PFPE_MRTEIDXMASK 0x00008600
2386#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
2387#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
2388#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
2389#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
2390#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
2391#define I40E_PFPE_TCPNOWTIMER 0x00008580
2392#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
2393#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
2394#define I40E_PFPE_UDACTRL 0x00008700
2395#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
2396#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
2397#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
2398#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
2399#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
2400#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
2401#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
2402#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
2403#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
2404#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
2405#define I40E_PFPE_UDAUCFBQPN 0x00008780
2406#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
2407#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
2408#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
2409#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
2410#define I40E_PFPE_WQEALLOC 0x00138C00
2411#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
2412#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
2413#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
2414#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
2415#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
2416#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
2417#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
2418#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
2419#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
2420#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
2421#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
2422#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
2423#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
2424#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
2425#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
2426#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
2427#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
2428#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
2429#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
2430#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
2431#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
2432#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
2433#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
2434#define I40E_VFPE_CQACK_MAX_INDEX 127
2435#define I40E_VFPE_CQACK_PECQID_SHIFT 0
2436#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
2437#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
2438#define I40E_VFPE_CQARM_MAX_INDEX 127
2439#define I40E_VFPE_CQARM_PECQID_SHIFT 0
2440#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
2441#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
2442#define I40E_VFPE_CQPDB_MAX_INDEX 127
2443#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
2444#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
2445#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
2446#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
2447#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
2448#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
2449#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
2450#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
2451#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
2452#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
2453#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
2454#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
2455#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
2456#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
2457#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
2458#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
2459#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
2460#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
2461#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
2462#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
2463#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
2464#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
2465#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
2466#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
2467#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
2468#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
2469#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
2470#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
2471#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
2472#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
2473#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
2474#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
2475#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
2476#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
2477#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
2478#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
2479#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
2480#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
2481#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
2482#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
2483#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
2484#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
2485#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
2486#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
2487#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
2488#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
2489#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
2490#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
2491#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
2492#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
2493#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
2494#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
2495#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
2496#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
2497#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
2498#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
2499#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
2500#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
2501#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
2502#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
2503#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
2504#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
2505#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
2506#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
2507#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
2508#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
2509#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
2510#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
2511#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
2512#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
2513#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
2514#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
2515#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
2516#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
2517#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
2518#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
2519#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
2520#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
2521#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
2522#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
2523#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
2524#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
2525#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
2526#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
2527#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
2528#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
2529#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
2530#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
2531#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
2532#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
2533#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
2534#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
2535#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
2536#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
2537#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
2538#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
2539#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
2540#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
2541#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
2542#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
2543#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
2544#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
2545#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
2546#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
2547#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
2548#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
2549#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
2550#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
2551#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
2552#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
2553#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
2554#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
2555#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
2556#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
2557#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
2558#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
2559#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
2560#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
2561#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
2562#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
2563#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
2564#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
2565#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
2566#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
2567#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
2568#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
2569#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
2570#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
2571#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
2572#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
2573#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
2574#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
2575#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
2576#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
2577#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
2578#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
2579#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
2580#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
2581#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
2582#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
2583#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
2584#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
2585#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
2586#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
2587#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
2588#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
2589#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
2590#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
2591#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
2592#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
2593#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
2594#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
2595#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
2596#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
2597#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
2598#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
2599#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
2600#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
2601#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
2602#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
2603#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
2604#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
2605#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
2606#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
2607#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
2608#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
2609#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
2610#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
2611#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
2612#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
2613#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
2614#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
2615#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
2616#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
2617#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
2618#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
2619#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
2620#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
2621#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
2622#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
2623#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
2624#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
2625#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
2626#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
2627#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
2628#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
2629#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
2630#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
2631#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
2632#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
2633#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
2634#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
2635#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
2636#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
2637#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
2638#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
2639#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
2640#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
2641#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
2642#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
2643#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
2644#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
2645#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
2646#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
2647#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
2648#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
2649#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
2650#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
2651#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
2652#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
2653#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
2654#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
2655#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
2656#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
2657#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
2658#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
2659#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
2660#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
2661#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
2662#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
2663#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
2664#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
2665#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
2666#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
2667#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
2668#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
2669#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
2670#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
2671#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
2672#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
2673#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
2674#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
2675#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
2676#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
2677#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
2678#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
2679#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
2680#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
2681#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
2682#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
2683#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
2684#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
2685#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
2686#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
2687#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
2688#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
2689#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
2690#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
2691#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
2692#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
2693#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
2694#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
2695#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
2696#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
2697#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
2698#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
2699#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
2700#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
2701#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
2702#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
2703#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
2704#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
2705#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
2706#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
2707#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
2708#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
2709#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
2710#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
2711#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
2712#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
2713#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
2714#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
2715#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
2716#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
2717#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
2718#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
2719#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
2720#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
2721#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
2722#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
2723#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
2724#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
2725#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
2726#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
2727#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
2728#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
2729#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
2730#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
2731#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
2732#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
2733#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
2734#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
2735#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
2736#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
2737#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
2738#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
2739#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
2740#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
2741#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
2742#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
2743#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
2744#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
2745#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
2746#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
2747#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
2748#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
2749#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
2750#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
2751#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
2752#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
2753#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
2754#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
2755#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
2756#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
2757#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
2758#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
2759#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
2760#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
2761#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
2762#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
2763#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
2764#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
2765#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
2766#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
2767#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
2768#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
2769#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
2770#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
2771#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
2772#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
2773#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
2774#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
2775#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
2776#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
2777#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
2778#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
2779#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
2780#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
2781#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
2782#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
2783#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
2784#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
2785#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
2786#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
2787#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
2788#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
2789#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
2790#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
2791#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
2792#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
2793#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
2794#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
2795#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
2796#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
2797#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
2798#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
2799#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
2800#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
2801#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
2802#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
2803#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
2804#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
2805#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
2806#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
2807#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
2808#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
2809#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
2810#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
2811#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
2812#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
2813#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
2814#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
2815#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
2816#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
2817#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
2818#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
2819#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
2820#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
2821#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
2822#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
2823#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
2824#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
2825#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
2826#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
2827#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
2828#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
2829#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
2830#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
2831#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
2832#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
2833#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
2834#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
2835#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
2836#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
2837#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
2838#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
2839#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
2840#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
2841#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
2842#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
2843#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
2844#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
2845#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
2846#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
2847#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
2848#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
2849#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
2850#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
2851#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
2852#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
2853#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
2854#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
2855#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
2856#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
2857#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
2858#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
2859#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
2860#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
2861#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
2862#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
2863#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
2864#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
2865#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
2866#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
2867#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
2868#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
2869#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
2870#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
2871#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
2872#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
2873#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
2874#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
2875#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
2876#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
2877#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
2878#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
2879#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
2880#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
2881#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
2882#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
2883#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
2884#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
2885#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
2886#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
2887#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
2888#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
2889#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
2890#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
2891#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
2892#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
2893#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
2894#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
2895#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
2896#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
2897#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
2898#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
2899#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
2900#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
2901#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
2902#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
2903#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
2904#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
2905#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
2906#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
2907#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
2908#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
2909#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
2910#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
2911#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
2912#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
2913#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
2914#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
2915#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
2916#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
2917#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
2918#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
2919#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
2920#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
2921#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
2922#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
2923#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
2924#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
2925#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
2926#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
2927#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
2928#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
2929#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
2930#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
2931#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
2932#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
2933#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
2934#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
2935#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
2936#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
2937#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
2938#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
2939#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
2940#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
2941#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
2942#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
2943#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
2944#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
2945#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
2946#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
2947#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
2948#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
2949#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
2950#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
2951#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
2952#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
2953#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
2954#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
2955#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
2956#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
2957#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
2958#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
2959#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
2960#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
2961#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
2962#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
2963#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
2964#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
2965#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
2966#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
2967#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
2968#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
2969#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
2970#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
2971#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
2972#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
2973#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
2974#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
2975#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
2976#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
2977#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
2978#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
2979#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
2980#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
2981#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
2982#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
2983#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
2984#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
2985#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
2986#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
2987#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
2988#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
2989#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
2990#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
2991#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
2992#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
2993#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
2994#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
2995#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
2996#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
2997#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
2998#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
2999#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
3000#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
3001#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
3002#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
3003#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
3004#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
3005#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
3006#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
3007#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
3008#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
3009#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
3010#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
3011#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
3012#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
3013#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
3014#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
3015#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
3016#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
3017#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
3018#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
3019#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
3020#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
3021#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
3022#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
3023#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
3024#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
3025#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
3026#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
3027#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
3028#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
3029#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
3030#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
3031#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
3032#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
3033#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
3034#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
3035#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
3036#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
3037#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
3038#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
3039#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
3040#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
3041#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
3042#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
3043#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
3044#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
3045#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
3046#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
3047#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
3048#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
3049#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
3050#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
3051#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
3052#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
3053#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
3054#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
3055#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
3056#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
3057#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
3058#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
3059#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
3060#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
3061#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
3062#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
3063#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
3064#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
3065#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
3066#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
3067#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
3068#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
3069#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
3070#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
3071#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
3072#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
3073#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
3074#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
3075#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
3076#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
3077#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
3078#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
3079#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
3080#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
3081#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
3082#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
3083#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
3084#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
3085#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
3086#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
3087#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
3088#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
3089#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
3090#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
3091#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
3092#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
3093#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
3094#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
3095#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
3096#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
3097#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
3098#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
3099#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
3100#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
3101#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
3102#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
3103#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
3104#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
3105#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
3106#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
3107#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
3108#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
3109#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
3110#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
3111#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
3112#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
3113#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
3114#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
3115#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
3116#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
3117#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
3118#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
3119#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
3120#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
3121#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
3122#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
3123#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
3124#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
3125#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
3126#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
3127#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
3128#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
3129#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
3130#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
3131#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
3132#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
3133#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
3134#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
3135#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
3136#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
3137#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
3138#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
3139#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
3140#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
3141#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
3142#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
3143#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
3144#define I40E_GLPM_DMACR 0x000881F4
3145#define I40E_GLPM_DMACR_DMACWT_SHIFT 0
3146#define I40E_GLPM_DMACR_DMACWT_MASK (0xFFFF << I40E_GLPM_DMACR_DMACWT_SHIFT)
3147#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29
3148#define I40E_GLPM_DMACR_EXIT_DC_MASK (0x1 << I40E_GLPM_DMACR_EXIT_DC_SHIFT)
3149#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30
3150#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK (0x1 << I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT)
3151#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31
3152#define I40E_GLPM_DMACR_DMAC_EN_MASK (0x1 << I40E_GLPM_DMACR_DMAC_EN_SHIFT)
3153#define I40E_GLPM_LTRC 0x000BE500
3154#define I40E_GLPM_LTRC_SLTRV_SHIFT 0
3155#define I40E_GLPM_LTRC_SLTRV_MASK (0x3FF << I40E_GLPM_LTRC_SLTRV_SHIFT)
3156#define I40E_GLPM_LTRC_SSCALE_SHIFT 10
3157#define I40E_GLPM_LTRC_SSCALE_MASK (0x7 << I40E_GLPM_LTRC_SSCALE_SHIFT)
3158#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15
3159#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT)
3160#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16
3161#define I40E_GLPM_LTRC_NSLTRV_MASK (0x3FF << I40E_GLPM_LTRC_NSLTRV_SHIFT)
3162#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26
3163#define I40E_GLPM_LTRC_NSSCALE_MASK (0x7 << I40E_GLPM_LTRC_NSSCALE_SHIFT)
3164#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30
3165#define I40E_GLPM_LTRC_LTR_SEND_MASK (0x1 << I40E_GLPM_LTRC_LTR_SEND_SHIFT)
3166#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31
3167#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT)
3168#define I40E_PRTPM_EEE_STAT 0x001E4320
3169#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
3170#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
3171#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
3172#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
3173#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
3174#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
3175#define I40E_PRTPM_EEEC 0x001E4380
3176#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
3177#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
3178#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
3179#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
3180#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
3181#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
3182#define I40E_PRTPM_EEEFWD 0x001E4400
3183#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
3184#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
3185#define I40E_PRTPM_EEER 0x001E4360
3186#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
3187#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
3188#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
3189#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
3190#define I40E_PRTPM_EEETXC 0x001E43E0
3191#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
3192#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
3193#define I40E_PRTPM_GC 0x000B8140
3194#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
3195#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
3196#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
3197#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
3198#define I40E_PRTPM_GC_RATD_SHIFT 2
3199#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
3200#define I40E_PRTPM_GC_LCDMP_SHIFT 3
3201#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
3202#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
3203#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
3204#define I40E_PRTPM_HPTC 0x000AC800
3205#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0
3206#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK (0xFF << I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT)
3207#define I40E_PRTPM_RLPIC 0x001E43A0
3208#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
3209#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
3210#define I40E_PRTPM_TLPIC 0x001E43C0
3211#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
3212#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
3213#define I40E_GLRPB_DPSS 0x000AC828
3214#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
3215#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
3216#define I40E_GLRPB_GHW 0x000AC830
3217#define I40E_GLRPB_GHW_GHW_SHIFT 0
3218#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
3219#define I40E_GLRPB_GLW 0x000AC834
3220#define I40E_GLRPB_GLW_GLW_SHIFT 0
3221#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
3222#define I40E_GLRPB_PHW 0x000AC844
3223#define I40E_GLRPB_PHW_PHW_SHIFT 0
3224#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
3225#define I40E_GLRPB_PLW 0x000AC848
3226#define I40E_GLRPB_PLW_PLW_SHIFT 0
3227#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
3228#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
3229#define I40E_PRTRPB_DHW_MAX_INDEX 7
3230#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
3231#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
3232#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
3233#define I40E_PRTRPB_DLW_MAX_INDEX 7
3234#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
3235#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
3236#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
3237#define I40E_PRTRPB_DPS_MAX_INDEX 7
3238#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
3239#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
3240#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
3241#define I40E_PRTRPB_SHT_MAX_INDEX 7
3242#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
3243#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
3244#define I40E_PRTRPB_SHW 0x000AC580
3245#define I40E_PRTRPB_SHW_SHW_SHIFT 0
3246#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
3247#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
3248#define I40E_PRTRPB_SLT_MAX_INDEX 7
3249#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
3250#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
3251#define I40E_PRTRPB_SLW 0x000AC6A0
3252#define I40E_PRTRPB_SLW_SLW_SHIFT 0
3253#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
3254#define I40E_PRTRPB_SPS 0x000AC7C0
3255#define I40E_PRTRPB_SPS_SPS_SHIFT 0
3256#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
3257#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
3258#define I40E_GLQF_APBVT_MAX_INDEX 2047
3259#define I40E_GLQF_APBVT_APBVT_SHIFT 0
3260#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
3261#define I40E_GLQF_CTL 0x00269BA4
3262#define I40E_GLQF_CTL_HTOEP_SHIFT 1
3263#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
3264#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
3265#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
3266#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
3267#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
3268#define I40E_GLQF_CTL_DDPLPEN_SHIFT 7
3269#define I40E_GLQF_CTL_DDPLPEN_MASK (0x1 << I40E_GLQF_CTL_DDPLPEN_SHIFT)
3270#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
3271#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
3272#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
3273#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
3274#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
3275#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
3276#define I40E_GLQF_CTL_FDBEST_SHIFT 17
3277#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
3278#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
3279#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
3280#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
3281#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
3282#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
3283#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
3284#define I40E_GLQF_FDCNT_0 0x00269BAC
3285#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
3286#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
3287#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
3288#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
3289#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
3290#define I40E_GLQF_HSYM_MAX_INDEX 63
3291#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
3292#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
3293#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
3294#define I40E_GLQF_PCNT_MAX_INDEX 511
3295#define I40E_GLQF_PCNT_PCNT_SHIFT 0
3296#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
3297#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
3298#define I40E_GLQF_SWAP_MAX_INDEX 1
3299#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
3300#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
3301#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
3302#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
3303#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
3304#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
3305#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
3306#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
3307#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
3308#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
3309#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
3310#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
3311#define I40E_PFQF_CTL_0 0x001C0AC0
3312#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
3313#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
3314#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
3315#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
3316#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
3317#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
3318#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
3319#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
3320#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
3321#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
3322#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
3323#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
3324#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
3325#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
3326#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
3327#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
3328#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
3329#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
3330#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
3331#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
3332#define I40E_PFQF_CTL_1 0x00245D80
3333#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
3334#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
3335#define I40E_PFQF_FDALLOC 0x00246280
3336#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
3337#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
3338#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
3339#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
3340#define I40E_PFQF_FDSTAT 0x00246380
3341#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
3342#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
3343#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
3344#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
3345#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
3346#define I40E_PFQF_HENA_MAX_INDEX 1
3347#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
3348#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
3349#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
3350#define I40E_PFQF_HKEY_MAX_INDEX 12
3351#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
3352#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
3353#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
3354#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
3355#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
3356#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
3357#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
3358#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
3359#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
3360#define I40E_PFQF_HLUT_MAX_INDEX 127
3361#define I40E_PFQF_HLUT_LUT0_SHIFT 0
3362#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
3363#define I40E_PFQF_HLUT_LUT1_SHIFT 8
3364#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
3365#define I40E_PFQF_HLUT_LUT2_SHIFT 16
3366#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
3367#define I40E_PFQF_HLUT_LUT3_SHIFT 24
3368#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
3369#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
3370#define I40E_PFQF_HREGION_MAX_INDEX 7
3371#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
3372#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
3373#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
3374#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
3375#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
3376#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
3377#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
3378#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
3379#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
3380#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
3381#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
3382#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
3383#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
3384#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
3385#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
3386#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
3387#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
3388#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
3389#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
3390#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
3391#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
3392#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
3393#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
3394#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
3395#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
3396#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
3397#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
3398#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
3399#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
3400#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
3401#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
3402#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
3403#define I40E_PRTQF_CTL_0 0x00256E60
3404#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
3405#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
3406#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
3407#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
3408#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
3409#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
3410#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
3411#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
3412#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
3413#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
3414#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
3415#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
3416#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
3417#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
3418#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
3419#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
3420#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 6
3421#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0xF << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
3422#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
3423#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
3424#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
3425#define I40E_VFQF_HENA1_MAX_INDEX 1
3426#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
3427#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
3428#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
3429#define I40E_VFQF_HKEY1_MAX_INDEX 12
3430#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
3431#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
3432#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
3433#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
3434#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
3435#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
3436#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
3437#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
3438#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
3439#define I40E_VFQF_HLUT1_MAX_INDEX 15
3440#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
3441#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
3442#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
3443#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
3444#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
3445#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
3446#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
3447#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
3448#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
3449#define I40E_VFQF_HREGION1_MAX_INDEX 7
3450#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
3451#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
3452#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
3453#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
3454#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
3455#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
3456#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
3457#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
3458#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
3459#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
3460#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
3461#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
3462#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
3463#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
3464#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
3465#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
3466#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
3467#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
3468#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
3469#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
3470#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
3471#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
3472#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
3473#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
3474#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
3475#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
3476#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
3477#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
3478#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
3479#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
3480#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
3481#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
3482#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
3483#define I40E_VPQF_CTL_MAX_INDEX 127
3484#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
3485#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
3486#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
3487#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
3488#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
3489#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
3490#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
3491#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
3492#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
3493#define I40E_VSIQF_CTL_MAX_INDEX 383
3494#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
3495#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
3496#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
3497#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
3498#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
3499#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
3500#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
3501#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
3502#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
3503#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
3504#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
3505#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
3506#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
3507#define I40E_VSIQF_TCREGION_MAX_INDEX 7
3508#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
3509#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
3510#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
3511#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
3512#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
3513#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
3514#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
3515#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
3516#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
3517#define I40E_GL_FCOECRC_MAX_INDEX 143
3518#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
3519#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
3520#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
3521#define I40E_GL_FCOEDDPC_MAX_INDEX 143
3522#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
3523#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
3524#define I40E_GL_FCOEDDPEC(_i) (0x00314900 + ((_i) * 8)) /* _i=0...143 */
3525#define I40E_GL_FCOEDDPEC_MAX_INDEX 143
3526#define I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT 0
3527#define I40E_GL_FCOEDDPEC_CFOEDDPEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT)
3528#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
3529#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
3530#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
3531#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
3532#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
3533#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
3534#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
3535#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
3536#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
3537#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
3538#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
3539#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
3540#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
3541#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
3542#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
3543#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
3544#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
3545#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
3546#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
3547#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
3548#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
3549#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
3550#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
3551#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
3552#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
3553#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
3554#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
3555#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
3556#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
3557#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
3558#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
3559#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
3560#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
3561#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
3562#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
3563#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
3564#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
3565#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
3566#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
3567#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
3568#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
3569#define I40E_GL_FCOELAST_MAX_INDEX 143
3570#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
3571#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
3572#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
3573#define I40E_GL_FCOEPRC_MAX_INDEX 143
3574#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
3575#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
3576#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
3577#define I40E_GL_FCOEPTC_MAX_INDEX 143
3578#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
3579#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
3580#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
3581#define I40E_GL_FCOERPDC_MAX_INDEX 143
3582#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
3583#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
3584#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
3585#define I40E_GLPRT_BPRCH_MAX_INDEX 3
3586#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
3587#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
3588#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
3589#define I40E_GLPRT_BPRCL_MAX_INDEX 3
3590#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
3591#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
3592#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
3593#define I40E_GLPRT_BPTCH_MAX_INDEX 3
3594#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
3595#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
3596#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
3597#define I40E_GLPRT_BPTCL_MAX_INDEX 3
3598#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
3599#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
3600#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
3601#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
3602#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
3603#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
3604#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
3605#define I40E_GLPRT_GORCH_MAX_INDEX 3
3606#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
3607#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
3608#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
3609#define I40E_GLPRT_GORCL_MAX_INDEX 3
3610#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
3611#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
3612#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
3613#define I40E_GLPRT_GOTCH_MAX_INDEX 3
3614#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
3615#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
3616#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
3617#define I40E_GLPRT_GOTCL_MAX_INDEX 3
3618#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
3619#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
3620#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
3621#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
3622#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
3623#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
3624#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
3625#define I40E_GLPRT_LDPC_MAX_INDEX 3
3626#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
3627#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
3628#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
3629#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
3630#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
3631#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
3632#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
3633#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
3634#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
3635#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
3636#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
3637#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
3638#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
3639#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
3640#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
3641#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
3642#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
3643#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
3644#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
3645#define I40E_GLPRT_MLFC_MAX_INDEX 3
3646#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
3647#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
3648#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
3649#define I40E_GLPRT_MPRCH_MAX_INDEX 3
3650#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
3651#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
3652#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
3653#define I40E_GLPRT_MPRCL_MAX_INDEX 3
3654#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
3655#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
3656#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
3657#define I40E_GLPRT_MPTCH_MAX_INDEX 3
3658#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
3659#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
3660#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
3661#define I40E_GLPRT_MPTCL_MAX_INDEX 3
3662#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
3663#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
3664#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
3665#define I40E_GLPRT_MRFC_MAX_INDEX 3
3666#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
3667#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
3668#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
3669#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
3670#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
3671#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
3672#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
3673#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
3674#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
3675#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
3676#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
3677#define I40E_GLPRT_PRC127H_MAX_INDEX 3
3678#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
3679#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
3680#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
3681#define I40E_GLPRT_PRC127L_MAX_INDEX 3
3682#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
3683#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
3684#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
3685#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
3686#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
3687#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
3688#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
3689#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
3690#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
3691#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
3692#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
3693#define I40E_GLPRT_PRC255H_MAX_INDEX 3
3694#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
3695#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
3696#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
3697#define I40E_GLPRT_PRC255L_MAX_INDEX 3
3698#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
3699#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
3700#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
3701#define I40E_GLPRT_PRC511H_MAX_INDEX 3
3702#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
3703#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
3704#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
3705#define I40E_GLPRT_PRC511L_MAX_INDEX 3
3706#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
3707#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
3708#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
3709#define I40E_GLPRT_PRC64H_MAX_INDEX 3
3710#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
3711#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
3712#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
3713#define I40E_GLPRT_PRC64L_MAX_INDEX 3
3714#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
3715#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
3716#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
3717#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
3718#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
3719#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
3720#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
3721#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
3722#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
3723#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
3724#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
3725#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
3726#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
3727#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
3728#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
3729#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
3730#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
3731#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
3732#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
3733#define I40E_GLPRT_PTC127H_MAX_INDEX 3
3734#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
3735#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
3736#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
3737#define I40E_GLPRT_PTC127L_MAX_INDEX 3
3738#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
3739#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
3740#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
3741#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
3742#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
3743#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
3744#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
3745#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
3746#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
3747#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
3748#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
3749#define I40E_GLPRT_PTC255H_MAX_INDEX 3
3750#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
3751#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
3752#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
3753#define I40E_GLPRT_PTC255L_MAX_INDEX 3
3754#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
3755#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
3756#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
3757#define I40E_GLPRT_PTC511H_MAX_INDEX 3
3758#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
3759#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
3760#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
3761#define I40E_GLPRT_PTC511L_MAX_INDEX 3
3762#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
3763#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
3764#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
3765#define I40E_GLPRT_PTC64H_MAX_INDEX 3
3766#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
3767#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
3768#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
3769#define I40E_GLPRT_PTC64L_MAX_INDEX 3
3770#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
3771#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
3772#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
3773#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
3774#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
3775#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
3776#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
3777#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
3778#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
3779#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
3780#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
3781#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
3782#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
3783#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
3784#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
3785#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
3786#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
3787#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
3788#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
3789#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
3790#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
3791#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
3792#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
3793#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
3794#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
3795#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
3796#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
3797#define I40E_GLPRT_RDPC_MAX_INDEX 3
3798#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
3799#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
3800#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
3801#define I40E_GLPRT_RFC_MAX_INDEX 3
3802#define I40E_GLPRT_RFC_RFC_SHIFT 0
3803#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
3804#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
3805#define I40E_GLPRT_RJC_MAX_INDEX 3
3806#define I40E_GLPRT_RJC_RJC_SHIFT 0
3807#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
3808#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
3809#define I40E_GLPRT_RLEC_MAX_INDEX 3
3810#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
3811#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
3812#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
3813#define I40E_GLPRT_ROC_MAX_INDEX 3
3814#define I40E_GLPRT_ROC_ROC_SHIFT 0
3815#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
3816#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
3817#define I40E_GLPRT_RUC_MAX_INDEX 3
3818#define I40E_GLPRT_RUC_RUC_SHIFT 0
3819#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
3820#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
3821#define I40E_GLPRT_RUPP_MAX_INDEX 3
3822#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
3823#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
3824#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
3825#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
3826#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
3827#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
3828#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
3829#define I40E_GLPRT_STDC_MAX_INDEX 3
3830#define I40E_GLPRT_STDC_STDC_SHIFT 0
3831#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
3832#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
3833#define I40E_GLPRT_TDOLD_MAX_INDEX 3
3834#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
3835#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
3836#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
3837#define I40E_GLPRT_TDPC_MAX_INDEX 3
3838#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
3839#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
3840#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
3841#define I40E_GLPRT_UPRCH_MAX_INDEX 3
3842#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
3843#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
3844#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
3845#define I40E_GLPRT_UPRCL_MAX_INDEX 3
3846#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
3847#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
3848#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
3849#define I40E_GLPRT_UPTCH_MAX_INDEX 3
3850#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
3851#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
3852#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
3853#define I40E_GLPRT_UPTCL_MAX_INDEX 3
3854#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
3855#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
3856#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
3857#define I40E_GLSW_BPRCH_MAX_INDEX 15
3858#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
3859#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
3860#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
3861#define I40E_GLSW_BPRCL_MAX_INDEX 15
3862#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
3863#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
3864#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
3865#define I40E_GLSW_BPTCH_MAX_INDEX 15
3866#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
3867#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
3868#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
3869#define I40E_GLSW_BPTCL_MAX_INDEX 15
3870#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
3871#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
3872#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
3873#define I40E_GLSW_GORCH_MAX_INDEX 15
3874#define I40E_GLSW_GORCH_GORCH_SHIFT 0
3875#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
3876#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
3877#define I40E_GLSW_GORCL_MAX_INDEX 15
3878#define I40E_GLSW_GORCL_GORCL_SHIFT 0
3879#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
3880#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
3881#define I40E_GLSW_GOTCH_MAX_INDEX 15
3882#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
3883#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
3884#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
3885#define I40E_GLSW_GOTCL_MAX_INDEX 15
3886#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
3887#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
3888#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
3889#define I40E_GLSW_MPRCH_MAX_INDEX 15
3890#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
3891#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
3892#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
3893#define I40E_GLSW_MPRCL_MAX_INDEX 15
3894#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
3895#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
3896#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
3897#define I40E_GLSW_MPTCH_MAX_INDEX 15
3898#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
3899#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
3900#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
3901#define I40E_GLSW_MPTCL_MAX_INDEX 15
3902#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
3903#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
3904#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
3905#define I40E_GLSW_RUPP_MAX_INDEX 15
3906#define I40E_GLSW_RUPP_RUPP_SHIFT 0
3907#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
3908#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
3909#define I40E_GLSW_TDPC_MAX_INDEX 15
3910#define I40E_GLSW_TDPC_TDPC_SHIFT 0
3911#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
3912#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
3913#define I40E_GLSW_UPRCH_MAX_INDEX 15
3914#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
3915#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
3916#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
3917#define I40E_GLSW_UPRCL_MAX_INDEX 15
3918#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
3919#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
3920#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
3921#define I40E_GLSW_UPTCH_MAX_INDEX 15
3922#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
3923#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
3924#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
3925#define I40E_GLSW_UPTCL_MAX_INDEX 15
3926#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
3927#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
3928#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
3929#define I40E_GLV_BPRCH_MAX_INDEX 383
3930#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
3931#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
3932#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
3933#define I40E_GLV_BPRCL_MAX_INDEX 383
3934#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
3935#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
3936#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
3937#define I40E_GLV_BPTCH_MAX_INDEX 383
3938#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
3939#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
3940#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
3941#define I40E_GLV_BPTCL_MAX_INDEX 383
3942#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
3943#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
3944#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
3945#define I40E_GLV_GORCH_MAX_INDEX 383
3946#define I40E_GLV_GORCH_GORCH_SHIFT 0
3947#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
3948#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
3949#define I40E_GLV_GORCL_MAX_INDEX 383
3950#define I40E_GLV_GORCL_GORCL_SHIFT 0
3951#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
3952#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
3953#define I40E_GLV_GOTCH_MAX_INDEX 383
3954#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
3955#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
3956#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
3957#define I40E_GLV_GOTCL_MAX_INDEX 383
3958#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
3959#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
3960#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
3961#define I40E_GLV_MPRCH_MAX_INDEX 383
3962#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
3963#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
3964#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
3965#define I40E_GLV_MPRCL_MAX_INDEX 383
3966#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
3967#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
3968#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
3969#define I40E_GLV_MPTCH_MAX_INDEX 383
3970#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
3971#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
3972#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
3973#define I40E_GLV_MPTCL_MAX_INDEX 383
3974#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
3975#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
3976#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
3977#define I40E_GLV_RDPC_MAX_INDEX 383
3978#define I40E_GLV_RDPC_RDPC_SHIFT 0
3979#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
3980#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
3981#define I40E_GLV_RUPP_MAX_INDEX 383
3982#define I40E_GLV_RUPP_RUPP_SHIFT 0
3983#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
3984#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
3985#define I40E_GLV_TEPC_MAX_INDEX 383
3986#define I40E_GLV_TEPC_TEPC_SHIFT 0
3987#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
3988#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
3989#define I40E_GLV_UPRCH_MAX_INDEX 383
3990#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
3991#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
3992#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
3993#define I40E_GLV_UPRCL_MAX_INDEX 383
3994#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
3995#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
3996#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
3997#define I40E_GLV_UPTCH_MAX_INDEX 383
3998#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
3999#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
4000#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
4001#define I40E_GLV_UPTCL_MAX_INDEX 383
4002#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
4003#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
4004#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
4005#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
4006#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
4007#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
4008#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
4009#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
4010#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
4011#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
4012#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
4013#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
4014#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
4015#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
4016#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
4017#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
4018#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
4019#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
4020#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
4021#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
4022#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
4023#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
4024#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
4025#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
4026#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
4027#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
4028#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
4029#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
4030#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
4031#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
4032#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
4033#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
4034#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
4035#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
4036#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
4037#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
4038#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
4039#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
4040#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
4041#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
4042#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
4043#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
4044#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
4045#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
4046#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
4047#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
4048#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
4049#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
4050#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
4051#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
4052#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
4053#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
4054#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
4055#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
4056#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
4057#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
4058#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
4059#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
4060#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
4061#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
4062#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
4063#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
4064#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
4065#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
4066#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
4067#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
4068#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
4069#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
4070#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
4071#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
4072#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
4073#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
4074#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
4075#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
4076#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
4077#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
4078#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
4079#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
4080#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
4081#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
4082#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
4083#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
4084#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
4085#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
4086#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
4087#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
4088#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
4089#define I40E_PRT_MSCCNT 0x00256BA0
4090#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
4091#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
4092#define I40E_PRT_SCSTS 0x00256C20
4093#define I40E_PRT_SCSTS_BSCA_SHIFT 0
4094#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
4095#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
4096#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
4097#define I40E_PRT_SCSTS_MSCA_SHIFT 2
4098#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
4099#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
4100#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
4101#define I40E_PRT_SWT_BSCCNT 0x00256C60
4102#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
4103#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
4104#define I40E_PRTTSYN_ADJ 0x001E4280
4105#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
4106#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
4107#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
4108#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
4109#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
4110#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
4111#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
4112#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
4113#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
4114#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
4115#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
4116#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
4117#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
4118#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
4119#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
4120#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
4121#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
4122#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
4123#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
4124#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
4125#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
4126#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
4127#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
4128#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
4129#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
4130#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
4131#define I40E_PRTTSYN_CTL0 0x001E4200
4132#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
4133#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
4134#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
4135#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
4136#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
4137#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
4138#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
4139#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
4140#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
4141#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
4142#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
4143#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
4144#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
4145#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
4146#define I40E_PRTTSYN_CTL1 0x00085020
4147#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
4148#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
4149#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
4150#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
4151#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
4152#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
4153#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
4154#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
4155#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
4156#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
4157#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
4158#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
4159#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
4160#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
4161#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
4162#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
4163#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
4164#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
4165#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
4166#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
4167#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
4168#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
4169#define I40E_PRTTSYN_INC_H 0x001E4060
4170#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
4171#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
4172#define I40E_PRTTSYN_INC_L 0x001E4040
4173#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
4174#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
4175#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
4176#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
4177#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
4178#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
4179#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
4180#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
4181#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
4182#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
4183#define I40E_PRTTSYN_STAT_0 0x001E4220
4184#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
4185#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
4186#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
4187#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
4188#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
4189#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
4190#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
4191#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
4192#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
4193#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
4194#define I40E_PRTTSYN_STAT_1 0x00085140
4195#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
4196#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
4197#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
4198#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
4199#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
4200#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
4201#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
4202#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
4203#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
4204#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
4205#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
4206#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
4207#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
4208#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
4209#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
4210#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
4211#define I40E_PRTTSYN_TIME_H 0x001E4120
4212#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
4213#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
4214#define I40E_PRTTSYN_TIME_L 0x001E4100
4215#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
4216#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
4217#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
4218#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
4219#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
4220#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
4221#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
4222#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
4223#define I40E_GLSCD_QUANTA 0x000B2080
4224#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
4225#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
4226#define I40E_GL_MDET_RX 0x0012A510
4227#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
4228#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
4229#define I40E_GL_MDET_RX_EVENT_SHIFT 8
4230#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
4231#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
4232#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
4233#define I40E_GL_MDET_RX_VALID_SHIFT 31
4234#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
4235#define I40E_GL_MDET_TX 0x000E6480
4236#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
4237#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
4238#define I40E_GL_MDET_TX_EVENT_SHIFT 8
4239#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
4240#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
4241#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
4242#define I40E_GL_MDET_TX_VALID_SHIFT 31
4243#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
4244#define I40E_PF_MDET_RX 0x0012A400
4245#define I40E_PF_MDET_RX_VALID_SHIFT 0
4246#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
4247#define I40E_PF_MDET_TX 0x000E6400
4248#define I40E_PF_MDET_TX_VALID_SHIFT 0
4249#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
4250#define I40E_PF_VT_PFALLOC 0x001C0500
4251#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
4252#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
4253#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
4254#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
4255#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
4256#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
4257#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
4258#define I40E_VP_MDET_RX_MAX_INDEX 127
4259#define I40E_VP_MDET_RX_VALID_SHIFT 0
4260#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
4261#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
4262#define I40E_VP_MDET_TX_MAX_INDEX 127
4263#define I40E_VP_MDET_TX_VALID_SHIFT 0
4264#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
4265#define I40E_GLPM_WUMC 0x0006C800
4266#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
4267#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
4268#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
4269#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
4270#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
4271#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
4272#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
4273#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
4274#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
4275#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
4276#define I40E_PFPM_APM 0x000B8080
4277#define I40E_PFPM_APM_APME_SHIFT 0
4278#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
4279#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128))
4280#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7
4281#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0
4282#define I40E_PFPM_FHFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PFPM_FHFT_DATA_DWORD_SHIFT)
4283#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
4284#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
4285#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
4286#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
4287#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128))
4288#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7
4289#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0
4290#define I40E_PFPM_FHFT_MASK_MASK_MASK (0xFFFF << I40E_PFPM_FHFT_MASK_MASK_SHIFT)
4291#define I40E_PFPM_PROXYFC 0x00245A80
4292#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0
4293#define I40E_PFPM_PROXYFC_PPROXYE_MASK (0x1 << I40E_PFPM_PROXYFC_PPROXYE_SHIFT)
4294#define I40E_PFPM_PROXYFC_EX_SHIFT 1
4295#define I40E_PFPM_PROXYFC_EX_MASK (0x1 << I40E_PFPM_PROXYFC_EX_SHIFT)
4296#define I40E_PFPM_PROXYFC_ARP_SHIFT 4
4297#define I40E_PFPM_PROXYFC_ARP_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_SHIFT)
4298#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5
4299#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT)
4300#define I40E_PFPM_PROXYFC_NS_SHIFT 9
4301#define I40E_PFPM_PROXYFC_NS_MASK (0x1 << I40E_PFPM_PROXYFC_NS_SHIFT)
4302#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10
4303#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT)
4304#define I40E_PFPM_PROXYFC_MLD_SHIFT 12
4305#define I40E_PFPM_PROXYFC_MLD_MASK (0x1 << I40E_PFPM_PROXYFC_MLD_SHIFT)
4306#define I40E_PFPM_PROXYS 0x00245B80
4307#define I40E_PFPM_PROXYS_EX_SHIFT 1
4308#define I40E_PFPM_PROXYS_EX_MASK (0x1 << I40E_PFPM_PROXYS_EX_SHIFT)
4309#define I40E_PFPM_PROXYS_ARP_SHIFT 4
4310#define I40E_PFPM_PROXYS_ARP_MASK (0x1 << I40E_PFPM_PROXYS_ARP_SHIFT)
4311#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5
4312#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT)
4313#define I40E_PFPM_PROXYS_NS_SHIFT 9
4314#define I40E_PFPM_PROXYS_NS_MASK (0x1 << I40E_PFPM_PROXYS_NS_SHIFT)
4315#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10
4316#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT)
4317#define I40E_PFPM_PROXYS_MLD_SHIFT 12
4318#define I40E_PFPM_PROXYS_MLD_MASK (0x1 << I40E_PFPM_PROXYS_MLD_SHIFT)
4319#define I40E_PFPM_WUC 0x0006B200
4320#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
4321#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
4322#define I40E_PFPM_WUFC 0x0006B400
4323#define I40E_PFPM_WUFC_LNKC_SHIFT 0
4324#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
4325#define I40E_PFPM_WUFC_MAG_SHIFT 1
4326#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
4327#define I40E_PFPM_WUFC_MNG_SHIFT 3
4328#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
4329#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
4330#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
4331#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
4332#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
4333#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
4334#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
4335#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
4336#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
4337#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
4338#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
4339#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
4340#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
4341#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
4342#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
4343#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
4344#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
4345#define I40E_PFPM_WUFC_FLX0_SHIFT 16
4346#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
4347#define I40E_PFPM_WUFC_FLX1_SHIFT 17
4348#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
4349#define I40E_PFPM_WUFC_FLX2_SHIFT 18
4350#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
4351#define I40E_PFPM_WUFC_FLX3_SHIFT 19
4352#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
4353#define I40E_PFPM_WUFC_FLX4_SHIFT 20
4354#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
4355#define I40E_PFPM_WUFC_FLX5_SHIFT 21
4356#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
4357#define I40E_PFPM_WUFC_FLX6_SHIFT 22
4358#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
4359#define I40E_PFPM_WUFC_FLX7_SHIFT 23
4360#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
4361#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
4362#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
4363#define I40E_PFPM_WUS 0x0006B600
4364#define I40E_PFPM_WUS_LNKC_SHIFT 0
4365#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
4366#define I40E_PFPM_WUS_MAG_SHIFT 1
4367#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
4368#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
4369#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
4370#define I40E_PFPM_WUS_MNG_SHIFT 3
4371#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
4372#define I40E_PFPM_WUS_FLX0_SHIFT 16
4373#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
4374#define I40E_PFPM_WUS_FLX1_SHIFT 17
4375#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
4376#define I40E_PFPM_WUS_FLX2_SHIFT 18
4377#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
4378#define I40E_PFPM_WUS_FLX3_SHIFT 19
4379#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
4380#define I40E_PFPM_WUS_FLX4_SHIFT 20
4381#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
4382#define I40E_PFPM_WUS_FLX5_SHIFT 21
4383#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
4384#define I40E_PFPM_WUS_FLX6_SHIFT 22
4385#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
4386#define I40E_PFPM_WUS_FLX7_SHIFT 23
4387#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
4388#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
4389#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
4390#define I40E_PRTPM_FHFHR 0x0006C000
4391#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
4392#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
4393#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
4394#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
4395#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
4396#define I40E_PRTPM_SAH_MAX_INDEX 3
4397#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
4398#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
4399#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
4400#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
4401#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
4402#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
4403#define I40E_PRTPM_SAH_AV_SHIFT 31
4404#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
4405#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
4406#define I40E_PRTPM_SAL_MAX_INDEX 3
4407#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
4408#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
4409#define I40E_VF_ARQBAH1 0x00006000
4410#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
4411#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
4412#define I40E_VF_ARQBAL1 0x00006C00
4413#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
4414#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
4415#define I40E_VF_ARQH1 0x00007400
4416#define I40E_VF_ARQH1_ARQH_SHIFT 0
4417#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
4418#define I40E_VF_ARQLEN1 0x00008000
4419#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
4420#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
4421#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
4422#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
4423#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
4424#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
4425#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
4426#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
4427#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
4428#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
4429#define I40E_VF_ARQT1 0x00007000
4430#define I40E_VF_ARQT1_ARQT_SHIFT 0
4431#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
4432#define I40E_VF_ATQBAH1 0x00007800
4433#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
4434#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
4435#define I40E_VF_ATQBAL1 0x00007C00
4436#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
4437#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
4438#define I40E_VF_ATQH1 0x00006400
4439#define I40E_VF_ATQH1_ATQH_SHIFT 0
4440#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
4441#define I40E_VF_ATQLEN1 0x00006800
4442#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
4443#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
4444#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
4445#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
4446#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
4447#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
4448#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
4449#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
4450#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
4451#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
4452#define I40E_VF_ATQT1 0x00008400
4453#define I40E_VF_ATQT1_ATQT_SHIFT 0
4454#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
4455#define I40E_VFGEN_RSTAT 0x00008800
4456#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
4457#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
4458#define I40E_VFINT_DYN_CTL01 0x00005C00
4459#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
4460#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
4461#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
4462#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
4463#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
4464#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
4465#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
4466#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
4467#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
4468#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
4469#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
4470#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
4471#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
4472#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
4473#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
4474#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
4475#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
4476#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
4477#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
4478#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
4479#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
4480#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
4481#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
4482#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
4483#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
4484#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
4485#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
4486#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
4487#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
4488#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
4489#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
4490#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
4491#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
4492#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
4493#define I40E_VFINT_ICR0_ENA1 0x00005000
4494#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
4495#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
4496#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
4497#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
4498#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
4499#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
4500#define I40E_VFINT_ICR01 0x00004800
4501#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
4502#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
4503#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
4504#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
4505#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
4506#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
4507#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
4508#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
4509#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
4510#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
4511#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
4512#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
4513#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
4514#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
4515#define I40E_VFINT_ICR01_SWINT_SHIFT 31
4516#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
4517#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
4518#define I40E_VFINT_ITR01_MAX_INDEX 2
4519#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
4520#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
4521#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
4522#define I40E_VFINT_ITRN1_MAX_INDEX 2
4523#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
4524#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
4525#define I40E_VFINT_STAT_CTL01 0x00005400
4526#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
4527#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
4528#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
4529#define I40E_QRX_TAIL1_MAX_INDEX 15
4530#define I40E_QRX_TAIL1_TAIL_SHIFT 0
4531#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
4532#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
4533#define I40E_QTX_TAIL1_MAX_INDEX 15
4534#define I40E_QTX_TAIL1_TAIL_SHIFT 0
4535#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
4536#define I40E_VFMSIX_PBA 0x00002000
4537#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
4538#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
4539#define I40E_VFMSIX_TADD(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
4540#define I40E_VFMSIX_TADD_MAX_INDEX 16
4541#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
4542#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
4543#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
4544#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
4545#define I40E_VFMSIX_TMSG(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
4546#define I40E_VFMSIX_TMSG_MAX_INDEX 16
4547#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
4548#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
4549#define I40E_VFMSIX_TUADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
4550#define I40E_VFMSIX_TUADD_MAX_INDEX 16
4551#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
4552#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
4553#define I40E_VFMSIX_TVCTRL(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
4554#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
4555#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
4556#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
4557#define I40E_VFCM_PE_ERRDATA 0x0000DC00
4558#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
4559#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
4560#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
4561#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
4562#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
4563#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
4564#define I40E_VFCM_PE_ERRINFO 0x0000D800
4565#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
4566#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
4567#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
4568#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
4569#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
4570#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
4571#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
4572#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
4573#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
4574#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
4575#define I40E_VFPE_AEQALLOC1 0x0000A400
4576#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
4577#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
4578#define I40E_VFPE_CCQPHIGH1 0x00009800
4579#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
4580#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
4581#define I40E_VFPE_CCQPLOW1 0x0000AC00
4582#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
4583#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
4584#define I40E_VFPE_CCQPSTATUS1 0x0000B800
4585#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
4586#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
4587#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
4588#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
4589#define I40E_VFPE_CQACK1 0x0000B000
4590#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
4591#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
4592#define I40E_VFPE_CQARM1 0x0000B400
4593#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
4594#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
4595#define I40E_VFPE_CQPDB1 0x0000BC00
4596#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
4597#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
4598#define I40E_VFPE_CQPERRCODES1 0x00009C00
4599#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
4600#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
4601#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
4602#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
4603#define I40E_VFPE_CQPTAIL1 0x0000A000
4604#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
4605#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
4606#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
4607#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
4608#define I40E_VFPE_IPCONFIG01 0x00008C00
4609#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
4610#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
4611#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
4612#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
4613#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT 17
4614#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT)
4615#define I40E_VFPE_MRTEIDXMASK1 0x00009000
4616#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
4617#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
4618#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
4619#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
4620#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
4621#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
4622#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
4623#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
4624#define I40E_VFPE_WQEALLOC1 0x0000C000
4625#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
4626#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
4627#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
4628#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
4629#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
4630#define I40E_VFQF_HENA_MAX_INDEX 1
4631#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
4632#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
4633#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
4634#define I40E_VFQF_HKEY_MAX_INDEX 12
4635#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
4636#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
4637#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
4638#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
4639#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
4640#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
4641#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
4642#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
4643#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
4644#define I40E_VFQF_HLUT_MAX_INDEX 15
4645#define I40E_VFQF_HLUT_LUT0_SHIFT 0
4646#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
4647#define I40E_VFQF_HLUT_LUT1_SHIFT 8
4648#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
4649#define I40E_VFQF_HLUT_LUT2_SHIFT 16
4650#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
4651#define I40E_VFQF_HLUT_LUT3_SHIFT 24
4652#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
4653#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
4654#define I40E_VFQF_HREGION_MAX_INDEX 7
4655#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
4656#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
4657#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
4658#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
4659#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
4660#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
4661#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
4662#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
4663#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
4664#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
4665#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
4666#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
4667#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
4668#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
4669#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
4670#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
4671#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
4672#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
4673#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
4674#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
4675#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
4676#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
4677#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
4678#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
4679#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
4680#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
4681#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
4682#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
4683#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
4684#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
4685#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
4686#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
4687
4688#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
new file mode 100644
index 000000000000..5e5bcddac573
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -0,0 +1,101 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_STATUS_H_
29#define _I40E_STATUS_H_
30
31/* Error Codes */
32enum i40e_status_code {
33 I40E_SUCCESS = 0,
34 I40E_ERR_NVM = -1,
35 I40E_ERR_NVM_CHECKSUM = -2,
36 I40E_ERR_PHY = -3,
37 I40E_ERR_CONFIG = -4,
38 I40E_ERR_PARAM = -5,
39 I40E_ERR_MAC_TYPE = -6,
40 I40E_ERR_UNKNOWN_PHY = -7,
41 I40E_ERR_LINK_SETUP = -8,
42 I40E_ERR_ADAPTER_STOPPED = -9,
43 I40E_ERR_INVALID_MAC_ADDR = -10,
44 I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
45 I40E_ERR_MASTER_REQUESTS_PENDING = -12,
46 I40E_ERR_INVALID_LINK_SETTINGS = -13,
47 I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
48 I40E_ERR_RESET_FAILED = -15,
49 I40E_ERR_SWFW_SYNC = -16,
50 I40E_ERR_NO_AVAILABLE_VSI = -17,
51 I40E_ERR_NO_MEMORY = -18,
52 I40E_ERR_BAD_PTR = -19,
53 I40E_ERR_RING_FULL = -20,
54 I40E_ERR_INVALID_PD_ID = -21,
55 I40E_ERR_INVALID_QP_ID = -22,
56 I40E_ERR_INVALID_CQ_ID = -23,
57 I40E_ERR_INVALID_CEQ_ID = -24,
58 I40E_ERR_INVALID_AEQ_ID = -25,
59 I40E_ERR_INVALID_SIZE = -26,
60 I40E_ERR_INVALID_ARP_INDEX = -27,
61 I40E_ERR_INVALID_FPM_FUNC_ID = -28,
62 I40E_ERR_QP_INVALID_MSG_SIZE = -29,
63 I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
64 I40E_ERR_INVALID_FRAG_COUNT = -31,
65 I40E_ERR_QUEUE_EMPTY = -32,
66 I40E_ERR_INVALID_ALIGNMENT = -33,
67 I40E_ERR_FLUSHED_QUEUE = -34,
68 I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
69 I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
70 I40E_ERR_TIMEOUT = -37,
71 I40E_ERR_OPCODE_MISMATCH = -38,
72 I40E_ERR_CQP_COMPL_ERROR = -39,
73 I40E_ERR_INVALID_VF_ID = -40,
74 I40E_ERR_INVALID_HMCFN_ID = -41,
75 I40E_ERR_BACKING_PAGE_ERROR = -42,
76 I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
77 I40E_ERR_INVALID_PBLE_INDEX = -44,
78 I40E_ERR_INVALID_SD_INDEX = -45,
79 I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
80 I40E_ERR_INVALID_SD_TYPE = -47,
81 I40E_ERR_MEMCPY_FAILED = -48,
82 I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
83 I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
84 I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
85 I40E_ERR_SRQ_ENABLED = -52,
86 I40E_ERR_ADMIN_QUEUE_ERROR = -53,
87 I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
88 I40E_ERR_BUF_TOO_SHORT = -55,
89 I40E_ERR_ADMIN_QUEUE_FULL = -56,
90 I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
91 I40E_ERR_BAD_IWARP_CQE = -58,
92 I40E_ERR_NVM_BLANK_MODE = -59,
93 I40E_ERR_NOT_IMPLEMENTED = -60,
94 I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
95 I40E_ERR_DIAG_TEST_FAILED = -62,
96 I40E_ERR_NOT_READY = -63,
97 I40E_NOT_SUPPORTED = -64,
98 I40E_ERR_FIRMWARE_API_VERSION = -65,
99};
100
101#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
new file mode 100644
index 000000000000..49d2cfa9b0cc
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -0,0 +1,1817 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e.h"
29
30static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
31 u32 td_tag)
32{
33 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
34 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
35 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
36 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
37 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
38}
39
40/**
41 * i40e_program_fdir_filter - Program a Flow Director filter
42 * @fdir_input: Packet data that will be filter parameters
43 * @pf: The pf pointer
44 * @add: True for add/update, False for remove
45 **/
46int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
47 struct i40e_pf *pf, bool add)
48{
49 struct i40e_filter_program_desc *fdir_desc;
50 struct i40e_tx_buffer *tx_buf;
51 struct i40e_tx_desc *tx_desc;
52 struct i40e_ring *tx_ring;
53 struct i40e_vsi *vsi;
54 struct device *dev;
55 dma_addr_t dma;
56 u32 td_cmd = 0;
57 u16 i;
58
59 /* find existing FDIR VSI */
60 vsi = NULL;
61 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
62 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
63 vsi = pf->vsi[i];
64 if (!vsi)
65 return -ENOENT;
66
67 tx_ring = &vsi->tx_rings[0];
68 dev = tx_ring->dev;
69
70 dma = dma_map_single(dev, fdir_data->raw_packet,
71 I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
72 if (dma_mapping_error(dev, dma))
73 goto dma_fail;
74
75 /* grab the next descriptor */
76 fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
77 tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
78 tx_ring->next_to_use++;
79 if (tx_ring->next_to_use == tx_ring->count)
80 tx_ring->next_to_use = 0;
81
82 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
83 << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
84 & I40E_TXD_FLTR_QW0_QINDEX_MASK);
85
86 fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off
87 << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
88 & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
89
90 fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype
91 << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
92 & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
93
94 /* Use LAN VSI Id if not programmed by user */
95 if (fdir_data->dest_vsi == 0)
96 fdir_desc->qindex_flex_ptype_vsi |=
97 cpu_to_le32((pf->vsi[pf->lan_vsi]->id)
98 << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
99 else
100 fdir_desc->qindex_flex_ptype_vsi |=
101 cpu_to_le32((fdir_data->dest_vsi
102 << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
103 & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
104
105 fdir_desc->dtype_cmd_cntindex =
106 cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG);
107
108 if (add)
109 fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
110 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
111 << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
112 else
113 fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
114 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
115 << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
116
117 fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl
118 << I40E_TXD_FLTR_QW1_DEST_SHIFT)
119 & I40E_TXD_FLTR_QW1_DEST_MASK);
120
121 fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
122 (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
123 & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
124
125 if (fdir_data->cnt_index != 0) {
126 fdir_desc->dtype_cmd_cntindex |=
127 cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
128 fdir_desc->dtype_cmd_cntindex |=
129 cpu_to_le32((fdir_data->cnt_index
130 << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
131 & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
132 }
133
134 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
135
136 /* Now program a dummy descriptor */
137 tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use);
138 tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
139 tx_ring->next_to_use++;
140 if (tx_ring->next_to_use == tx_ring->count)
141 tx_ring->next_to_use = 0;
142
143 tx_desc->buffer_addr = cpu_to_le64(dma);
144 td_cmd = I40E_TX_DESC_CMD_EOP |
145 I40E_TX_DESC_CMD_RS |
146 I40E_TX_DESC_CMD_DUMMY;
147
148 tx_desc->cmd_type_offset_bsz =
149 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
150
151 /* Mark the data descriptor to be watched */
152 tx_buf->next_to_watch = tx_desc;
153
154 /* Force memory writes to complete before letting h/w
155 * know there are new descriptors to fetch. (Only
156 * applicable for weak-ordered memory model archs,
157 * such as IA-64).
158 */
159 wmb();
160
161 writel(tx_ring->next_to_use, tx_ring->tail);
162 return 0;
163
164dma_fail:
165 return -1;
166}
167
168/**
169 * i40e_fd_handle_status - check the Programming Status for FD
170 * @rx_ring: the Rx ring for this descriptor
171 * @qw: the descriptor data
172 * @prog_id: the id originally used for programming
173 *
174 * This is used to verify if the FD programming or invalidation
175 * requested by SW to the HW is successful or not and take actions accordingly.
176 **/
177static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
178{
179 struct pci_dev *pdev = rx_ring->vsi->back->pdev;
180 u32 error;
181
182 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
183 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
184
185 /* for now just print the Status */
186 dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n",
187 prog_id, error);
188}
189
190/**
191 * i40e_unmap_tx_resource - Release a Tx buffer
192 * @ring: the ring that owns the buffer
193 * @tx_buffer: the buffer to free
194 **/
195static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
196 struct i40e_tx_buffer *tx_buffer)
197{
198 if (tx_buffer->dma) {
199 if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE)
200 dma_unmap_page(ring->dev,
201 tx_buffer->dma,
202 tx_buffer->length,
203 DMA_TO_DEVICE);
204 else
205 dma_unmap_single(ring->dev,
206 tx_buffer->dma,
207 tx_buffer->length,
208 DMA_TO_DEVICE);
209 }
210 tx_buffer->dma = 0;
211 tx_buffer->time_stamp = 0;
212}
213
214/**
215 * i40e_clean_tx_ring - Free any empty Tx buffers
216 * @tx_ring: ring to be cleaned
217 **/
218void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
219{
220 struct i40e_tx_buffer *tx_buffer;
221 unsigned long bi_size;
222 u16 i;
223
224 /* ring already cleared, nothing to do */
225 if (!tx_ring->tx_bi)
226 return;
227
228 /* Free all the Tx ring sk_buffs */
229 for (i = 0; i < tx_ring->count; i++) {
230 tx_buffer = &tx_ring->tx_bi[i];
231 i40e_unmap_tx_resource(tx_ring, tx_buffer);
232 if (tx_buffer->skb)
233 dev_kfree_skb_any(tx_buffer->skb);
234 tx_buffer->skb = NULL;
235 }
236
237 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
238 memset(tx_ring->tx_bi, 0, bi_size);
239
240 /* Zero out the descriptor ring */
241 memset(tx_ring->desc, 0, tx_ring->size);
242
243 tx_ring->next_to_use = 0;
244 tx_ring->next_to_clean = 0;
245}
246
247/**
248 * i40e_free_tx_resources - Free Tx resources per queue
249 * @tx_ring: Tx descriptor ring for a specific queue
250 *
251 * Free all transmit software resources
252 **/
253void i40e_free_tx_resources(struct i40e_ring *tx_ring)
254{
255 i40e_clean_tx_ring(tx_ring);
256 kfree(tx_ring->tx_bi);
257 tx_ring->tx_bi = NULL;
258
259 if (tx_ring->desc) {
260 dma_free_coherent(tx_ring->dev, tx_ring->size,
261 tx_ring->desc, tx_ring->dma);
262 tx_ring->desc = NULL;
263 }
264}
265
266/**
267 * i40e_get_tx_pending - how many tx descriptors not processed
268 * @tx_ring: the ring of descriptors
269 *
270 * Since there is no access to the ring head register
271 * in XL710, we need to use our local copies
272 **/
273static u32 i40e_get_tx_pending(struct i40e_ring *ring)
274{
275 u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
276 ? ring->next_to_use
277 : ring->next_to_use + ring->count);
278 return ntu - ring->next_to_clean;
279}
280
281/**
282 * i40e_check_tx_hang - Is there a hang in the Tx queue
283 * @tx_ring: the ring of descriptors
284 **/
285static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
286{
287 u32 tx_pending = i40e_get_tx_pending(tx_ring);
288 bool ret = false;
289
290 clear_check_for_tx_hang(tx_ring);
291
292 /* Check for a hung queue, but be thorough. This verifies
293 * that a transmit has been completed since the previous
294 * check AND there is at least one packet pending. The
295 * ARMED bit is set to indicate a potential hang. The
296 * bit is cleared if a pause frame is received to remove
297 * false hang detection due to PFC or 802.3x frames. By
298 * requiring this to fail twice we avoid races with
299 * PFC clearing the ARMED bit and conditions where we
300 * run the check_tx_hang logic with a transmit completion
301 * pending but without time to complete it yet.
302 */
303 if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) &&
304 tx_pending) {
305 /* make sure it is true for two checks in a row */
306 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
307 &tx_ring->state);
308 } else {
309 /* update completed stats and disarm the hang check */
310 tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets;
311 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
312 }
313
314 return ret;
315}
316
317/**
318 * i40e_clean_tx_irq - Reclaim resources after transmit completes
319 * @tx_ring: tx ring to clean
320 * @budget: how many cleans we're allowed
321 *
322 * Returns true if there's any budget left (e.g. the clean is finished)
323 **/
324static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
325{
326 u16 i = tx_ring->next_to_clean;
327 struct i40e_tx_buffer *tx_buf;
328 struct i40e_tx_desc *tx_desc;
329 unsigned int total_packets = 0;
330 unsigned int total_bytes = 0;
331
332 tx_buf = &tx_ring->tx_bi[i];
333 tx_desc = I40E_TX_DESC(tx_ring, i);
334
335 for (; budget; budget--) {
336 struct i40e_tx_desc *eop_desc;
337
338 eop_desc = tx_buf->next_to_watch;
339
340 /* if next_to_watch is not set then there is no work pending */
341 if (!eop_desc)
342 break;
343
344 /* if the descriptor isn't done, no work yet to do */
345 if (!(eop_desc->cmd_type_offset_bsz &
346 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
347 break;
348
349 /* count the packet as being completed */
350 tx_ring->tx_stats.completed++;
351 tx_buf->next_to_watch = NULL;
352 tx_buf->time_stamp = 0;
353
354 /* set memory barrier before eop_desc is verified */
355 rmb();
356
357 do {
358 i40e_unmap_tx_resource(tx_ring, tx_buf);
359
360 /* clear dtype status */
361 tx_desc->cmd_type_offset_bsz &=
362 ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
363
364 if (likely(tx_desc == eop_desc)) {
365 eop_desc = NULL;
366
367 dev_kfree_skb_any(tx_buf->skb);
368 tx_buf->skb = NULL;
369
370 total_bytes += tx_buf->bytecount;
371 total_packets += tx_buf->gso_segs;
372 }
373
374 tx_buf++;
375 tx_desc++;
376 i++;
377 if (unlikely(i == tx_ring->count)) {
378 i = 0;
379 tx_buf = tx_ring->tx_bi;
380 tx_desc = I40E_TX_DESC(tx_ring, 0);
381 }
382 } while (eop_desc);
383 }
384
385 tx_ring->next_to_clean = i;
386 tx_ring->tx_stats.bytes += total_bytes;
387 tx_ring->tx_stats.packets += total_packets;
388 tx_ring->q_vector->tx.total_bytes += total_bytes;
389 tx_ring->q_vector->tx.total_packets += total_packets;
390 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
391 /* schedule immediate reset if we believe we hung */
392 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
393 " VSI <%d>\n"
394 " Tx Queue <%d>\n"
395 " next_to_use <%x>\n"
396 " next_to_clean <%x>\n",
397 tx_ring->vsi->seid,
398 tx_ring->queue_index,
399 tx_ring->next_to_use, i);
400 dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
401 " time_stamp <%lx>\n"
402 " jiffies <%lx>\n",
403 tx_ring->tx_bi[i].time_stamp, jiffies);
404
405 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
406
407 dev_info(tx_ring->dev,
408 "tx hang detected on queue %d, resetting adapter\n",
409 tx_ring->queue_index);
410
411 tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
412
413 /* the adapter is about to reset, no point in enabling stuff */
414 return true;
415 }
416
417#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
418 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
419 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
420 /* Make sure that anybody stopping the queue after this
421 * sees the new next_to_clean.
422 */
423 smp_mb();
424 if (__netif_subqueue_stopped(tx_ring->netdev,
425 tx_ring->queue_index) &&
426 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
427 netif_wake_subqueue(tx_ring->netdev,
428 tx_ring->queue_index);
429 ++tx_ring->tx_stats.restart_queue;
430 }
431 }
432
433 return budget > 0;
434}
435
436/**
437 * i40e_set_new_dynamic_itr - Find new ITR level
438 * @rc: structure containing ring performance data
439 *
440 * Stores a new ITR value based on packets and byte counts during
441 * the last interrupt. The advantage of per interrupt computation
442 * is faster updates and more accurate ITR for the current traffic
443 * pattern. Constants in this function were computed based on
444 * theoretical maximum wire speed and thresholds were set based on
445 * testing data as well as attempting to minimize response time
446 * while increasing bulk throughput.
447 **/
448static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
449{
450 enum i40e_latency_range new_latency_range = rc->latency_range;
451 u32 new_itr = rc->itr;
452 int bytes_per_int;
453
454 if (rc->total_packets == 0 || !rc->itr)
455 return;
456
457 /* simple throttlerate management
458 * 0-10MB/s lowest (100000 ints/s)
459 * 10-20MB/s low (20000 ints/s)
460 * 20-1249MB/s bulk (8000 ints/s)
461 */
462 bytes_per_int = rc->total_bytes / rc->itr;
463 switch (rc->itr) {
464 case I40E_LOWEST_LATENCY:
465 if (bytes_per_int > 10)
466 new_latency_range = I40E_LOW_LATENCY;
467 break;
468 case I40E_LOW_LATENCY:
469 if (bytes_per_int > 20)
470 new_latency_range = I40E_BULK_LATENCY;
471 else if (bytes_per_int <= 10)
472 new_latency_range = I40E_LOWEST_LATENCY;
473 break;
474 case I40E_BULK_LATENCY:
475 if (bytes_per_int <= 20)
476 rc->latency_range = I40E_LOW_LATENCY;
477 break;
478 }
479
480 switch (new_latency_range) {
481 case I40E_LOWEST_LATENCY:
482 new_itr = I40E_ITR_100K;
483 break;
484 case I40E_LOW_LATENCY:
485 new_itr = I40E_ITR_20K;
486 break;
487 case I40E_BULK_LATENCY:
488 new_itr = I40E_ITR_8K;
489 break;
490 default:
491 break;
492 }
493
494 if (new_itr != rc->itr) {
495 /* do an exponential smoothing */
496 new_itr = (10 * new_itr * rc->itr) /
497 ((9 * new_itr) + rc->itr);
498 rc->itr = new_itr & I40E_MAX_ITR;
499 }
500
501 rc->total_bytes = 0;
502 rc->total_packets = 0;
503}
504
505/**
506 * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
507 * @q_vector: the vector to adjust
508 **/
509static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
510{
511 u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
512 struct i40e_hw *hw = &q_vector->vsi->back->hw;
513 u32 reg_addr;
514 u16 old_itr;
515
516 reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
517 old_itr = q_vector->rx.itr;
518 i40e_set_new_dynamic_itr(&q_vector->rx);
519 if (old_itr != q_vector->rx.itr)
520 wr32(hw, reg_addr, q_vector->rx.itr);
521
522 reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
523 old_itr = q_vector->tx.itr;
524 i40e_set_new_dynamic_itr(&q_vector->tx);
525 if (old_itr != q_vector->tx.itr)
526 wr32(hw, reg_addr, q_vector->tx.itr);
527
528 i40e_flush(hw);
529}
530
531/**
532 * i40e_clean_programming_status - clean the programming status descriptor
533 * @rx_ring: the rx ring that has this descriptor
534 * @rx_desc: the rx descriptor written back by HW
535 *
536 * Flow director should handle FD_FILTER_STATUS to check its filter programming
537 * status being successful or not and take actions accordingly. FCoE should
538 * handle its context/filter programming/invalidation status and take actions.
539 *
540 **/
541static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
542 union i40e_rx_desc *rx_desc)
543{
544 u64 qw;
545 u8 id;
546
547 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
548 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
549 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
550
551 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
552 i40e_fd_handle_status(rx_ring, qw, id);
553}
554
555/**
556 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
557 * @tx_ring: the tx ring to set up
558 *
559 * Return 0 on success, negative on error
560 **/
561int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
562{
563 struct device *dev = tx_ring->dev;
564 int bi_size;
565
566 if (!dev)
567 return -ENOMEM;
568
569 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
570 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
571 if (!tx_ring->tx_bi)
572 goto err;
573
574 /* round up to nearest 4K */
575 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
576 tx_ring->size = ALIGN(tx_ring->size, 4096);
577 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
578 &tx_ring->dma, GFP_KERNEL);
579 if (!tx_ring->desc) {
580 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
581 tx_ring->size);
582 goto err;
583 }
584
585 tx_ring->next_to_use = 0;
586 tx_ring->next_to_clean = 0;
587 return 0;
588
589err:
590 kfree(tx_ring->tx_bi);
591 tx_ring->tx_bi = NULL;
592 return -ENOMEM;
593}
594
595/**
596 * i40e_clean_rx_ring - Free Rx buffers
597 * @rx_ring: ring to be cleaned
598 **/
599void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
600{
601 struct device *dev = rx_ring->dev;
602 struct i40e_rx_buffer *rx_bi;
603 unsigned long bi_size;
604 u16 i;
605
606 /* ring already cleared, nothing to do */
607 if (!rx_ring->rx_bi)
608 return;
609
610 /* Free all the Rx ring sk_buffs */
611 for (i = 0; i < rx_ring->count; i++) {
612 rx_bi = &rx_ring->rx_bi[i];
613 if (rx_bi->dma) {
614 dma_unmap_single(dev,
615 rx_bi->dma,
616 rx_ring->rx_buf_len,
617 DMA_FROM_DEVICE);
618 rx_bi->dma = 0;
619 }
620 if (rx_bi->skb) {
621 dev_kfree_skb(rx_bi->skb);
622 rx_bi->skb = NULL;
623 }
624 if (rx_bi->page) {
625 if (rx_bi->page_dma) {
626 dma_unmap_page(dev,
627 rx_bi->page_dma,
628 PAGE_SIZE / 2,
629 DMA_FROM_DEVICE);
630 rx_bi->page_dma = 0;
631 }
632 __free_page(rx_bi->page);
633 rx_bi->page = NULL;
634 rx_bi->page_offset = 0;
635 }
636 }
637
638 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
639 memset(rx_ring->rx_bi, 0, bi_size);
640
641 /* Zero out the descriptor ring */
642 memset(rx_ring->desc, 0, rx_ring->size);
643
644 rx_ring->next_to_clean = 0;
645 rx_ring->next_to_use = 0;
646}
647
648/**
649 * i40e_free_rx_resources - Free Rx resources
650 * @rx_ring: ring to clean the resources from
651 *
652 * Free all receive software resources
653 **/
654void i40e_free_rx_resources(struct i40e_ring *rx_ring)
655{
656 i40e_clean_rx_ring(rx_ring);
657 kfree(rx_ring->rx_bi);
658 rx_ring->rx_bi = NULL;
659
660 if (rx_ring->desc) {
661 dma_free_coherent(rx_ring->dev, rx_ring->size,
662 rx_ring->desc, rx_ring->dma);
663 rx_ring->desc = NULL;
664 }
665}
666
667/**
668 * i40e_setup_rx_descriptors - Allocate Rx descriptors
669 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
670 *
671 * Returns 0 on success, negative on failure
672 **/
673int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
674{
675 struct device *dev = rx_ring->dev;
676 int bi_size;
677
678 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
679 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
680 if (!rx_ring->rx_bi)
681 goto err;
682
683 /* Round up to nearest 4K */
684 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
685 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
686 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
687 rx_ring->size = ALIGN(rx_ring->size, 4096);
688 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
689 &rx_ring->dma, GFP_KERNEL);
690
691 if (!rx_ring->desc) {
692 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
693 rx_ring->size);
694 goto err;
695 }
696
697 rx_ring->next_to_clean = 0;
698 rx_ring->next_to_use = 0;
699
700 return 0;
701err:
702 kfree(rx_ring->rx_bi);
703 rx_ring->rx_bi = NULL;
704 return -ENOMEM;
705}
706
707/**
708 * i40e_release_rx_desc - Store the new tail and head values
709 * @rx_ring: ring to bump
710 * @val: new head index
711 **/
712static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
713{
714 rx_ring->next_to_use = val;
715 /* Force memory writes to complete before letting h/w
716 * know there are new descriptors to fetch. (Only
717 * applicable for weak-ordered memory model archs,
718 * such as IA-64).
719 */
720 wmb();
721 writel(val, rx_ring->tail);
722}
723
724/**
725 * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
726 * @rx_ring: ring to place buffers on
727 * @cleaned_count: number of buffers to replace
728 **/
729void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
730{
731 u16 i = rx_ring->next_to_use;
732 union i40e_rx_desc *rx_desc;
733 struct i40e_rx_buffer *bi;
734 struct sk_buff *skb;
735
736 /* do nothing if no valid netdev defined */
737 if (!rx_ring->netdev || !cleaned_count)
738 return;
739
740 while (cleaned_count--) {
741 rx_desc = I40E_RX_DESC(rx_ring, i);
742 bi = &rx_ring->rx_bi[i];
743 skb = bi->skb;
744
745 if (!skb) {
746 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
747 rx_ring->rx_buf_len);
748 if (!skb) {
749 rx_ring->rx_stats.alloc_rx_buff_failed++;
750 goto no_buffers;
751 }
752 /* initialize queue mapping */
753 skb_record_rx_queue(skb, rx_ring->queue_index);
754 bi->skb = skb;
755 }
756
757 if (!bi->dma) {
758 bi->dma = dma_map_single(rx_ring->dev,
759 skb->data,
760 rx_ring->rx_buf_len,
761 DMA_FROM_DEVICE);
762 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
763 rx_ring->rx_stats.alloc_rx_buff_failed++;
764 bi->dma = 0;
765 goto no_buffers;
766 }
767 }
768
769 if (ring_is_ps_enabled(rx_ring)) {
770 if (!bi->page) {
771 bi->page = alloc_page(GFP_ATOMIC);
772 if (!bi->page) {
773 rx_ring->rx_stats.alloc_rx_page_failed++;
774 goto no_buffers;
775 }
776 }
777
778 if (!bi->page_dma) {
779 /* use a half page if we're re-using */
780 bi->page_offset ^= PAGE_SIZE / 2;
781 bi->page_dma = dma_map_page(rx_ring->dev,
782 bi->page,
783 bi->page_offset,
784 PAGE_SIZE / 2,
785 DMA_FROM_DEVICE);
786 if (dma_mapping_error(rx_ring->dev,
787 bi->page_dma)) {
788 rx_ring->rx_stats.alloc_rx_page_failed++;
789 bi->page_dma = 0;
790 goto no_buffers;
791 }
792 }
793
794 /* Refresh the desc even if buffer_addrs didn't change
795 * because each write-back erases this info.
796 */
797 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
798 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
799 } else {
800 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
801 rx_desc->read.hdr_addr = 0;
802 }
803 i++;
804 if (i == rx_ring->count)
805 i = 0;
806 }
807
808no_buffers:
809 if (rx_ring->next_to_use != i)
810 i40e_release_rx_desc(rx_ring, i);
811}
812
813/**
814 * i40e_receive_skb - Send a completed packet up the stack
815 * @rx_ring: rx ring in play
816 * @skb: packet to send up
817 * @vlan_tag: vlan tag for packet
818 **/
819static void i40e_receive_skb(struct i40e_ring *rx_ring,
820 struct sk_buff *skb, u16 vlan_tag)
821{
822 struct i40e_q_vector *q_vector = rx_ring->q_vector;
823 struct i40e_vsi *vsi = rx_ring->vsi;
824 u64 flags = vsi->back->flags;
825
826 if (vlan_tag & VLAN_VID_MASK)
827 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
828
829 if (flags & I40E_FLAG_IN_NETPOLL)
830 netif_rx(skb);
831 else
832 napi_gro_receive(&q_vector->napi, skb);
833}
834
835/**
836 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
837 * @vsi: the VSI we care about
838 * @skb: skb currently being received and modified
839 * @rx_status: status value of last descriptor in packet
840 * @rx_error: error value of last descriptor in packet
841 **/
842static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
843 struct sk_buff *skb,
844 u32 rx_status,
845 u32 rx_error)
846{
847 skb->ip_summed = CHECKSUM_NONE;
848
849 /* Rx csum enabled and ip headers found? */
850 if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
851 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
852 return;
853
854 /* IP or L4 checksum error */
855 if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
856 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
857 vsi->back->hw_csum_rx_error++;
858 return;
859 }
860
861 skb->ip_summed = CHECKSUM_UNNECESSARY;
862}
863
864/**
865 * i40e_rx_hash - returns the hash value from the Rx descriptor
866 * @ring: descriptor ring
867 * @rx_desc: specific descriptor
868 **/
869static inline u32 i40e_rx_hash(struct i40e_ring *ring,
870 union i40e_rx_desc *rx_desc)
871{
872 if (ring->netdev->features & NETIF_F_RXHASH) {
873 if ((le64_to_cpu(rx_desc->wb.qword1.status_error_len) >>
874 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
875 I40E_RX_DESC_FLTSTAT_RSS_HASH)
876 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
877 }
878 return 0;
879}
880
881/**
882 * i40e_clean_rx_irq - Reclaim resources after receive completes
883 * @rx_ring: rx ring to clean
884 * @budget: how many cleans we're allowed
885 *
886 * Returns true if there's any budget left (e.g. the clean is finished)
887 **/
888static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
889{
890 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
891 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
892 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
893 const int current_node = numa_node_id();
894 struct i40e_vsi *vsi = rx_ring->vsi;
895 u16 i = rx_ring->next_to_clean;
896 union i40e_rx_desc *rx_desc;
897 u32 rx_error, rx_status;
898 u64 qword;
899
900 rx_desc = I40E_RX_DESC(rx_ring, i);
901 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
902 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
903 >> I40E_RXD_QW1_STATUS_SHIFT;
904
905 while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
906 union i40e_rx_desc *next_rxd;
907 struct i40e_rx_buffer *rx_bi;
908 struct sk_buff *skb;
909 u16 vlan_tag;
910 if (i40e_rx_is_programming_status(qword)) {
911 i40e_clean_programming_status(rx_ring, rx_desc);
912 I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
913 goto next_desc;
914 }
915 rx_bi = &rx_ring->rx_bi[i];
916 skb = rx_bi->skb;
917 prefetch(skb->data);
918
919 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
920 >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
921 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
922 >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
923 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK)
924 >> I40E_RXD_QW1_LENGTH_SPH_SHIFT;
925
926 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK)
927 >> I40E_RXD_QW1_ERROR_SHIFT;
928 rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
929 rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
930
931 rx_bi->skb = NULL;
932
933 /* This memory barrier is needed to keep us from reading
934 * any other fields out of the rx_desc until we know the
935 * STATUS_DD bit is set
936 */
937 rmb();
938
939 /* Get the header and possibly the whole packet
940 * If this is an skb from previous receive dma will be 0
941 */
942 if (rx_bi->dma) {
943 u16 len;
944
945 if (rx_hbo)
946 len = I40E_RX_HDR_SIZE;
947 else if (rx_sph)
948 len = rx_header_len;
949 else if (rx_packet_len)
950 len = rx_packet_len; /* 1buf/no split found */
951 else
952 len = rx_header_len; /* split always mode */
953
954 skb_put(skb, len);
955 dma_unmap_single(rx_ring->dev,
956 rx_bi->dma,
957 rx_ring->rx_buf_len,
958 DMA_FROM_DEVICE);
959 rx_bi->dma = 0;
960 }
961
962 /* Get the rest of the data if this was a header split */
963 if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
964
965 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
966 rx_bi->page,
967 rx_bi->page_offset,
968 rx_packet_len);
969
970 skb->len += rx_packet_len;
971 skb->data_len += rx_packet_len;
972 skb->truesize += rx_packet_len;
973
974 if ((page_count(rx_bi->page) == 1) &&
975 (page_to_nid(rx_bi->page) == current_node))
976 get_page(rx_bi->page);
977 else
978 rx_bi->page = NULL;
979
980 dma_unmap_page(rx_ring->dev,
981 rx_bi->page_dma,
982 PAGE_SIZE / 2,
983 DMA_FROM_DEVICE);
984 rx_bi->page_dma = 0;
985 }
986 I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
987
988 if (unlikely(
989 !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
990 struct i40e_rx_buffer *next_buffer;
991
992 next_buffer = &rx_ring->rx_bi[i];
993
994 if (ring_is_ps_enabled(rx_ring)) {
995 rx_bi->skb = next_buffer->skb;
996 rx_bi->dma = next_buffer->dma;
997 next_buffer->skb = skb;
998 next_buffer->dma = 0;
999 }
1000 rx_ring->rx_stats.non_eop_descs++;
1001 goto next_desc;
1002 }
1003
1004 /* ERR_MASK will only have valid bits if EOP set */
1005 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1006 dev_kfree_skb_any(skb);
1007 goto next_desc;
1008 }
1009
1010 skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
1011 i40e_rx_checksum(vsi, skb, rx_status, rx_error);
1012
1013 /* probably a little skewed due to removing CRC */
1014 total_rx_bytes += skb->len;
1015 total_rx_packets++;
1016
1017 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1018 vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1019 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1020 : 0;
1021 i40e_receive_skb(rx_ring, skb, vlan_tag);
1022
1023 rx_ring->netdev->last_rx = jiffies;
1024 budget--;
1025next_desc:
1026 rx_desc->wb.qword1.status_error_len = 0;
1027 if (!budget)
1028 break;
1029
1030 cleaned_count++;
1031 /* return some buffers to hardware, one at a time is too slow */
1032 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1033 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
1034 cleaned_count = 0;
1035 }
1036
1037 /* use prefetched values */
1038 rx_desc = next_rxd;
1039 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1040 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
1041 >> I40E_RXD_QW1_STATUS_SHIFT;
1042 }
1043
1044 rx_ring->next_to_clean = i;
1045 rx_ring->rx_stats.packets += total_rx_packets;
1046 rx_ring->rx_stats.bytes += total_rx_bytes;
1047 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1048 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1049
1050 if (cleaned_count)
1051 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
1052
1053 return budget > 0;
1054}
1055
1056/**
1057 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1058 * @napi: napi struct with our devices info in it
1059 * @budget: amount of work driver is allowed to do this pass, in packets
1060 *
1061 * This function will clean all queues associated with a q_vector.
1062 *
1063 * Returns the amount of work done
1064 **/
1065int i40e_napi_poll(struct napi_struct *napi, int budget)
1066{
1067 struct i40e_q_vector *q_vector =
1068 container_of(napi, struct i40e_q_vector, napi);
1069 struct i40e_vsi *vsi = q_vector->vsi;
1070 bool clean_complete = true;
1071 int budget_per_ring;
1072 int i;
1073
1074 if (test_bit(__I40E_DOWN, &vsi->state)) {
1075 napi_complete(napi);
1076 return 0;
1077 }
1078
1079 /* We attempt to distribute budget to each Rx queue fairly, but don't
1080 * allow the budget to go below 1 because that would exit polling early.
1081 * Since the actual Tx work is minimal, we can give the Tx a larger
1082 * budget and be more aggressive about cleaning up the Tx descriptors.
1083 */
1084 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1085 for (i = 0; i < q_vector->num_ringpairs; i++) {
1086 clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i],
1087 vsi->work_limit);
1088 clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
1089 budget_per_ring);
1090 }
1091
1092 /* If work not completed, return budget and polling will return */
1093 if (!clean_complete)
1094 return budget;
1095
1096 /* Work is done so exit the polling mode and re-enable the interrupt */
1097 napi_complete(napi);
1098 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
1099 ITR_IS_DYNAMIC(vsi->tx_itr_setting))
1100 i40e_update_dynamic_itr(q_vector);
1101
1102 if (!test_bit(__I40E_DOWN, &vsi->state)) {
1103 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1104 i40e_irq_dynamic_enable(vsi,
1105 q_vector->v_idx + vsi->base_vector);
1106 } else {
1107 struct i40e_hw *hw = &vsi->back->hw;
1108 /* We re-enable the queue 0 cause, but
1109 * don't worry about dynamic_enable
1110 * because we left it on for the other
1111 * possible interrupts during napi
1112 */
1113 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
1114 qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1115 wr32(hw, I40E_QINT_RQCTL(0), qval);
1116
1117 qval = rd32(hw, I40E_QINT_TQCTL(0));
1118 qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1119 wr32(hw, I40E_QINT_TQCTL(0), qval);
1120 i40e_flush(hw);
1121 }
1122 }
1123
1124 return 0;
1125}
1126
1127/**
1128 * i40e_atr - Add a Flow Director ATR filter
1129 * @tx_ring: ring to add programming descriptor to
1130 * @skb: send buffer
1131 * @flags: send flags
1132 * @protocol: wire protocol
1133 **/
1134static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1135 u32 flags, __be16 protocol)
1136{
1137 struct i40e_filter_program_desc *fdir_desc;
1138 struct i40e_pf *pf = tx_ring->vsi->back;
1139 union {
1140 unsigned char *network;
1141 struct iphdr *ipv4;
1142 struct ipv6hdr *ipv6;
1143 } hdr;
1144 struct tcphdr *th;
1145 unsigned int hlen;
1146 u32 flex_ptype, dtype_cmd;
1147
1148 /* make sure ATR is enabled */
1149 if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
1150 return;
1151
1152 /* if sampling is disabled do nothing */
1153 if (!tx_ring->atr_sample_rate)
1154 return;
1155
1156 tx_ring->atr_count++;
1157
1158 /* snag network header to get L4 type and address */
1159 hdr.network = skb_network_header(skb);
1160
1161 /* Currently only IPv4/IPv6 with TCP is supported */
1162 if (protocol == htons(ETH_P_IP)) {
1163 if (hdr.ipv4->protocol != IPPROTO_TCP)
1164 return;
1165
1166 /* access ihl as a u8 to avoid unaligned access on ia64 */
1167 hlen = (hdr.network[0] & 0x0F) << 2;
1168 } else if (protocol == htons(ETH_P_IPV6)) {
1169 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1170 return;
1171
1172 hlen = sizeof(struct ipv6hdr);
1173 } else {
1174 return;
1175 }
1176
1177 th = (struct tcphdr *)(hdr.network + hlen);
1178
1179 /* sample on all syn/fin packets or once every atr sample rate */
1180 if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate))
1181 return;
1182
1183 tx_ring->atr_count = 0;
1184
1185 /* grab the next descriptor */
1186 fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
1187 tx_ring->next_to_use++;
1188 if (tx_ring->next_to_use == tx_ring->count)
1189 tx_ring->next_to_use = 0;
1190
1191 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1192 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1193 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
1194 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1195 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1196 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1197 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1198
1199 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1200
1201 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
1202
1203 dtype_cmd |= th->fin ?
1204 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1205 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1206 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1207 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1208
1209 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1210 I40E_TXD_FLTR_QW1_DEST_SHIFT;
1211
1212 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1213 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1214
1215 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
1216 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
1217}
1218
1219#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
1220/**
1221 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1222 * @skb: send buffer
1223 * @tx_ring: ring to send buffer on
1224 * @flags: the tx flags to be set
1225 *
1226 * Checks the skb and set up correspondingly several generic transmit flags
1227 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1228 *
1229 * Returns error code indicate the frame should be dropped upon error and the
1230 * otherwise returns 0 to indicate the flags has been set properly.
1231 **/
1232static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1233 struct i40e_ring *tx_ring,
1234 u32 *flags)
1235{
1236 __be16 protocol = skb->protocol;
1237 u32 tx_flags = 0;
1238
1239 /* if we have a HW VLAN tag being added, default to the HW one */
1240 if (vlan_tx_tag_present(skb)) {
1241 tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
1242 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1243 /* else if it is a SW VLAN, check the next protocol and store the tag */
1244 } else if (protocol == __constant_htons(ETH_P_8021Q)) {
1245 struct vlan_hdr *vhdr, _vhdr;
1246 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1247 if (!vhdr)
1248 return -EINVAL;
1249
1250 protocol = vhdr->h_vlan_encapsulated_proto;
1251 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
1252 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
1253 }
1254
1255 /* Insert 802.1p priority into VLAN header */
1256 if ((tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED) &&
1257 ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
1258 (skb->priority != TC_PRIO_CONTROL))) {
1259 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
1260 tx_flags |= (skb->priority & 0x7) <<
1261 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
1262 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
1263 struct vlan_ethhdr *vhdr;
1264 if (skb_header_cloned(skb) &&
1265 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1266 return -ENOMEM;
1267 vhdr = (struct vlan_ethhdr *)skb->data;
1268 vhdr->h_vlan_TCI = htons(tx_flags >>
1269 I40E_TX_FLAGS_VLAN_SHIFT);
1270 } else {
1271 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1272 }
1273 }
1274 *flags = tx_flags;
1275 return 0;
1276}
1277
1278/**
1279 * i40e_tx_csum - is checksum offload requested
1280 * @tx_ring: ptr to the ring to send
1281 * @skb: ptr to the skb we're sending
1282 * @tx_flags: the collected send information
1283 * @protocol: the send protocol
1284 *
1285 * Returns true if checksum offload is requested
1286 **/
1287static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
1288 u32 tx_flags, __be16 protocol)
1289{
1290 if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
1291 !(tx_flags & I40E_TX_FLAGS_TXSW)) {
1292 if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
1293 return false;
1294 }
1295
1296 return skb->ip_summed == CHECKSUM_PARTIAL;
1297}
1298
1299/**
1300 * i40e_tso - set up the tso context descriptor
1301 * @tx_ring: ptr to the ring to send
1302 * @skb: ptr to the skb we're sending
1303 * @tx_flags: the collected send information
1304 * @protocol: the send protocol
1305 * @hdr_len: ptr to the size of the packet header
1306 * @cd_tunneling: ptr to context descriptor bits
1307 *
1308 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1309 **/
1310static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1311 u32 tx_flags, __be16 protocol, u8 *hdr_len,
1312 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
1313{
1314 u32 cd_cmd, cd_tso_len, cd_mss;
1315 struct tcphdr *tcph;
1316 struct iphdr *iph;
1317 u32 l4len;
1318 int err;
1319 struct ipv6hdr *ipv6h;
1320
1321 if (!skb_is_gso(skb))
1322 return 0;
1323
1324 if (skb_header_cloned(skb)) {
1325 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1326 if (err)
1327 return err;
1328 }
1329
1330 if (protocol == __constant_htons(ETH_P_IP)) {
1331 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
1332 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1333 iph->tot_len = 0;
1334 iph->check = 0;
1335 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1336 0, IPPROTO_TCP, 0);
1337 } else if (skb_is_gso_v6(skb)) {
1338
1339 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
1340 : ipv6_hdr(skb);
1341 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1342 ipv6h->payload_len = 0;
1343 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
1344 0, IPPROTO_TCP, 0);
1345 }
1346
1347 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
1348 *hdr_len = (skb->encapsulation
1349 ? (skb_inner_transport_header(skb) - skb->data)
1350 : skb_transport_offset(skb)) + l4len;
1351
1352 /* find the field values */
1353 cd_cmd = I40E_TX_CTX_DESC_TSO;
1354 cd_tso_len = skb->len - *hdr_len;
1355 cd_mss = skb_shinfo(skb)->gso_size;
1356 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT)
1357 | ((u64)cd_tso_len
1358 << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
1359 | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
1360 return 1;
1361}
1362
1363/**
1364 * i40e_tx_enable_csum - Enable Tx checksum offloads
1365 * @skb: send buffer
1366 * @tx_flags: Tx flags currently set
1367 * @td_cmd: Tx descriptor command bits to set
1368 * @td_offset: Tx descriptor header offsets to set
1369 * @cd_tunneling: ptr to context desc bits
1370 **/
1371static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1372 u32 *td_cmd, u32 *td_offset,
1373 struct i40e_ring *tx_ring,
1374 u32 *cd_tunneling)
1375{
1376 struct ipv6hdr *this_ipv6_hdr;
1377 unsigned int this_tcp_hdrlen;
1378 struct iphdr *this_ip_hdr;
1379 u32 network_hdr_len;
1380 u8 l4_hdr = 0;
1381
1382 if (skb->encapsulation) {
1383 network_hdr_len = skb_inner_network_header_len(skb);
1384 this_ip_hdr = inner_ip_hdr(skb);
1385 this_ipv6_hdr = inner_ipv6_hdr(skb);
1386 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
1387
1388 if (tx_flags & I40E_TX_FLAGS_IPV4) {
1389
1390 if (tx_flags & I40E_TX_FLAGS_TSO) {
1391 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
1392 ip_hdr(skb)->check = 0;
1393 } else {
1394 *cd_tunneling |=
1395 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1396 }
1397 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1398 if (tx_flags & I40E_TX_FLAGS_TSO) {
1399 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1400 ip_hdr(skb)->check = 0;
1401 } else {
1402 *cd_tunneling |=
1403 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1404 }
1405 }
1406
1407 /* Now set the ctx descriptor fields */
1408 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
1409 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
1410 I40E_TXD_CTX_UDP_TUNNELING |
1411 ((skb_inner_network_offset(skb) -
1412 skb_transport_offset(skb)) >> 1) <<
1413 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1414
1415 } else {
1416 network_hdr_len = skb_network_header_len(skb);
1417 this_ip_hdr = ip_hdr(skb);
1418 this_ipv6_hdr = ipv6_hdr(skb);
1419 this_tcp_hdrlen = tcp_hdrlen(skb);
1420 }
1421
1422 /* Enable IP checksum offloads */
1423 if (tx_flags & I40E_TX_FLAGS_IPV4) {
1424 l4_hdr = this_ip_hdr->protocol;
1425 /* the stack computes the IP header already, the only time we
1426 * need the hardware to recompute it is in the case of TSO.
1427 */
1428 if (tx_flags & I40E_TX_FLAGS_TSO) {
1429 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
1430 this_ip_hdr->check = 0;
1431 } else {
1432 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
1433 }
1434 /* Now set the td_offset for IP header length */
1435 *td_offset = (network_hdr_len >> 2) <<
1436 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1437 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1438 l4_hdr = this_ipv6_hdr->nexthdr;
1439 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
1440 /* Now set the td_offset for IP header length */
1441 *td_offset = (network_hdr_len >> 2) <<
1442 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1443 }
1444 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
1445 *td_offset |= (skb_network_offset(skb) >> 1) <<
1446 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1447
1448 /* Enable L4 checksum offloads */
1449 switch (l4_hdr) {
1450 case IPPROTO_TCP:
1451 /* enable checksum offloads */
1452 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
1453 *td_offset |= (this_tcp_hdrlen >> 2) <<
1454 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1455 break;
1456 case IPPROTO_SCTP:
1457 /* enable SCTP checksum offload */
1458 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
1459 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
1460 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1461 break;
1462 case IPPROTO_UDP:
1463 /* enable UDP checksum offload */
1464 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
1465 *td_offset |= (sizeof(struct udphdr) >> 2) <<
1466 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1467 break;
1468 default:
1469 break;
1470 }
1471}
1472
1473/**
1474 * i40e_create_tx_ctx Build the Tx context descriptor
1475 * @tx_ring: ring to create the descriptor on
1476 * @cd_type_cmd_tso_mss: Quad Word 1
1477 * @cd_tunneling: Quad Word 0 - bits 0-31
1478 * @cd_l2tag2: Quad Word 0 - bits 32-63
1479 **/
1480static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1481 const u64 cd_type_cmd_tso_mss,
1482 const u32 cd_tunneling, const u32 cd_l2tag2)
1483{
1484 struct i40e_tx_context_desc *context_desc;
1485
1486 if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
1487 return;
1488
1489 /* grab the next descriptor */
1490 context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
1491 tx_ring->next_to_use++;
1492 if (tx_ring->next_to_use == tx_ring->count)
1493 tx_ring->next_to_use = 0;
1494
1495 /* cpu_to_le32 and assign to struct fields */
1496 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
1497 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
1498 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1499}
1500
1501/**
1502 * i40e_tx_map - Build the Tx descriptor
1503 * @tx_ring: ring to send buffer on
1504 * @skb: send buffer
1505 * @first: first buffer info buffer to use
1506 * @tx_flags: collected send information
1507 * @hdr_len: size of the packet header
1508 * @td_cmd: the command field in the descriptor
1509 * @td_offset: offset for checksum or crc
1510 **/
1511static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1512 struct i40e_tx_buffer *first, u32 tx_flags,
1513 const u8 hdr_len, u32 td_cmd, u32 td_offset)
1514{
1515 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1516 unsigned int data_len = skb->data_len;
1517 unsigned int size = skb_headlen(skb);
1518 struct device *dev = tx_ring->dev;
1519 u32 paylen = skb->len - hdr_len;
1520 u16 i = tx_ring->next_to_use;
1521 struct i40e_tx_buffer *tx_bi;
1522 struct i40e_tx_desc *tx_desc;
1523 u32 buf_offset = 0;
1524 u32 td_tag = 0;
1525 dma_addr_t dma;
1526 u16 gso_segs;
1527
1528 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1529 if (dma_mapping_error(dev, dma))
1530 goto dma_error;
1531
1532 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
1533 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
1534 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
1535 I40E_TX_FLAGS_VLAN_SHIFT;
1536 }
1537
1538 tx_desc = I40E_TX_DESC(tx_ring, i);
1539 for (;;) {
1540 while (size > I40E_MAX_DATA_PER_TXD) {
1541 tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
1542 tx_desc->cmd_type_offset_bsz =
1543 build_ctob(td_cmd, td_offset,
1544 I40E_MAX_DATA_PER_TXD, td_tag);
1545
1546 buf_offset += I40E_MAX_DATA_PER_TXD;
1547 size -= I40E_MAX_DATA_PER_TXD;
1548
1549 tx_desc++;
1550 i++;
1551 if (i == tx_ring->count) {
1552 tx_desc = I40E_TX_DESC(tx_ring, 0);
1553 i = 0;
1554 }
1555 }
1556
1557 tx_bi = &tx_ring->tx_bi[i];
1558 tx_bi->length = buf_offset + size;
1559 tx_bi->tx_flags = tx_flags;
1560 tx_bi->dma = dma;
1561
1562 tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
1563 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1564 size, td_tag);
1565
1566 if (likely(!data_len))
1567 break;
1568
1569 size = skb_frag_size(frag);
1570 data_len -= size;
1571 buf_offset = 0;
1572 tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
1573
1574 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1575 if (dma_mapping_error(dev, dma))
1576 goto dma_error;
1577
1578 tx_desc++;
1579 i++;
1580 if (i == tx_ring->count) {
1581 tx_desc = I40E_TX_DESC(tx_ring, 0);
1582 i = 0;
1583 }
1584
1585 frag++;
1586 }
1587
1588 tx_desc->cmd_type_offset_bsz |=
1589 cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
1590
1591 i++;
1592 if (i == tx_ring->count)
1593 i = 0;
1594
1595 tx_ring->next_to_use = i;
1596
1597 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
1598 gso_segs = skb_shinfo(skb)->gso_segs;
1599 else
1600 gso_segs = 1;
1601
1602 /* multiply data chunks by size of headers */
1603 tx_bi->bytecount = paylen + (gso_segs * hdr_len);
1604 tx_bi->gso_segs = gso_segs;
1605 tx_bi->skb = skb;
1606
1607 /* set the timestamp and next to watch values */
1608 first->time_stamp = jiffies;
1609 first->next_to_watch = tx_desc;
1610
1611 /* Force memory writes to complete before letting h/w
1612 * know there are new descriptors to fetch. (Only
1613 * applicable for weak-ordered memory model archs,
1614 * such as IA-64).
1615 */
1616 wmb();
1617
1618 writel(i, tx_ring->tail);
1619 return;
1620
1621dma_error:
1622 dev_info(dev, "TX DMA map failed\n");
1623
1624 /* clear dma mappings for failed tx_bi map */
1625 for (;;) {
1626 tx_bi = &tx_ring->tx_bi[i];
1627 i40e_unmap_tx_resource(tx_ring, tx_bi);
1628 if (tx_bi == first)
1629 break;
1630 if (i == 0)
1631 i = tx_ring->count;
1632 i--;
1633 }
1634
1635 dev_kfree_skb_any(skb);
1636
1637 tx_ring->next_to_use = i;
1638}
1639
1640/**
1641 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
1642 * @tx_ring: the ring to be checked
1643 * @size: the size buffer we want to assure is available
1644 *
1645 * Returns -EBUSY if a stop is needed, else 0
1646 **/
1647static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
1648{
1649 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1650 smp_mb();
1651
1652 /* Check again in a case another CPU has just made room available. */
1653 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
1654 return -EBUSY;
1655
1656 /* A reprieve! - use start_queue because it doesn't call schedule */
1657 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
1658 ++tx_ring->tx_stats.restart_queue;
1659 return 0;
1660}
1661
1662/**
1663 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
1664 * @tx_ring: the ring to be checked
1665 * @size: the size buffer we want to assure is available
1666 *
1667 * Returns 0 if stop is not needed
1668 **/
1669static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
1670{
1671 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
1672 return 0;
1673 return __i40e_maybe_stop_tx(tx_ring, size);
1674}
1675
1676/**
1677 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
1678 * @skb: send buffer
1679 * @tx_ring: ring to send buffer on
1680 *
1681 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
1682 * there is not enough descriptors available in this ring since we need at least
1683 * one descriptor.
1684 **/
1685static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1686 struct i40e_ring *tx_ring)
1687{
1688#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
1689 unsigned int f;
1690#endif
1691 int count = 0;
1692
1693 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
1694 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
1695 * + 2 desc gap to keep tail from touching head,
1696 * + 1 desc for context descriptor,
1697 * otherwise try next time
1698 */
1699#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
1700 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1701 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
1702#else
1703 count += skb_shinfo(skb)->nr_frags;
1704#endif
1705 count += TXD_USE_COUNT(skb_headlen(skb));
1706 if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
1707 tx_ring->tx_stats.tx_busy++;
1708 return 0;
1709 }
1710 return count;
1711}
1712
1713/**
1714 * i40e_xmit_frame_ring - Sends buffer on Tx ring
1715 * @skb: send buffer
1716 * @tx_ring: ring to send buffer on
1717 *
1718 * Returns NETDEV_TX_OK if sent, else an error code
1719 **/
1720static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1721 struct i40e_ring *tx_ring)
1722{
1723 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
1724 u32 cd_tunneling = 0, cd_l2tag2 = 0;
1725 struct i40e_tx_buffer *first;
1726 u32 td_offset = 0;
1727 u32 tx_flags = 0;
1728 __be16 protocol;
1729 u32 td_cmd = 0;
1730 u8 hdr_len = 0;
1731 int tso;
1732 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
1733 return NETDEV_TX_BUSY;
1734
1735 /* prepare the xmit flags */
1736 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
1737 goto out_drop;
1738
1739 /* obtain protocol of skb */
1740 protocol = skb->protocol;
1741
1742 /* record the location of the first descriptor for this packet */
1743 first = &tx_ring->tx_bi[tx_ring->next_to_use];
1744
1745 /* setup IPv4/IPv6 offloads */
1746 if (protocol == __constant_htons(ETH_P_IP))
1747 tx_flags |= I40E_TX_FLAGS_IPV4;
1748 else if (protocol == __constant_htons(ETH_P_IPV6))
1749 tx_flags |= I40E_TX_FLAGS_IPV6;
1750
1751 tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
1752 &cd_type_cmd_tso_mss, &cd_tunneling);
1753
1754 if (tso < 0)
1755 goto out_drop;
1756 else if (tso)
1757 tx_flags |= I40E_TX_FLAGS_TSO;
1758
1759 skb_tx_timestamp(skb);
1760
1761 /* Always offload the checksum, since it's in the data descriptor */
1762 if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol))
1763 tx_flags |= I40E_TX_FLAGS_CSUM;
1764
1765 /* always enable offload insertion */
1766 td_cmd |= I40E_TX_DESC_CMD_ICRC;
1767
1768 if (tx_flags & I40E_TX_FLAGS_CSUM)
1769 i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
1770 tx_ring, &cd_tunneling);
1771
1772 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
1773 cd_tunneling, cd_l2tag2);
1774
1775 /* Add Flow Director ATR if it's enabled.
1776 *
1777 * NOTE: this must always be directly before the data descriptor.
1778 */
1779 i40e_atr(tx_ring, skb, tx_flags, protocol);
1780
1781 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
1782 td_cmd, td_offset);
1783
1784 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
1785
1786 return NETDEV_TX_OK;
1787
1788out_drop:
1789 dev_kfree_skb_any(skb);
1790 return NETDEV_TX_OK;
1791}
1792
1793/**
1794 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
1795 * @skb: send buffer
1796 * @netdev: network interface device structure
1797 *
1798 * Returns NETDEV_TX_OK if sent, else an error code
1799 **/
1800netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1801{
1802 struct i40e_netdev_priv *np = netdev_priv(netdev);
1803 struct i40e_vsi *vsi = np->vsi;
1804 struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping];
1805
1806 /* hardware can't handle really short frames, hardware padding works
1807 * beyond this point
1808 */
1809 if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
1810 if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
1811 return NETDEV_TX_OK;
1812 skb->len = I40E_MIN_TX_LEN;
1813 skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
1814 }
1815
1816 return i40e_xmit_frame_ring(skb, tx_ring);
1817}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
new file mode 100644
index 000000000000..b1d7722d98a7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -0,0 +1,259 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
29
30#define I40E_MAX_ITR 0x07FF
31#define I40E_MIN_ITR 0x0001
32#define I40E_ITR_USEC_RESOLUTION 2
33#define I40E_MAX_IRATE 0x03F
34#define I40E_MIN_IRATE 0x001
35#define I40E_IRATE_USEC_RESOLUTION 4
36#define I40E_ITR_100K 0x0005
37#define I40E_ITR_20K 0x0019
38#define I40E_ITR_8K 0x003E
39#define I40E_ITR_4K 0x007A
40#define I40E_ITR_RX_DEF I40E_ITR_8K
41#define I40E_ITR_TX_DEF I40E_ITR_4K
42#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
43#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
44#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
45#define I40E_DEFAULT_IRQ_WORK 256
46#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
47#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
48#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
49
50#define I40E_QUEUE_END_OF_LIST 0x7FF
51
52#define I40E_ITR_NONE 3
53#define I40E_RX_ITR 0
54#define I40E_TX_ITR 1
55#define I40E_PE_ITR 2
56/* Supported Rx Buffer Sizes */
57#define I40E_RXBUFFER_512 512 /* Used for packet split */
58#define I40E_RXBUFFER_2048 2048
59#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
60#define I40E_RXBUFFER_4096 4096
61#define I40E_RXBUFFER_8192 8192
62#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
63
64/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
65 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
66 * this adds up to 512 bytes of extra data meaning the smallest allocation
67 * we could have is 1K.
68 * i.e. RXBUFFER_512 --> size-1024 slab
69 */
70#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
71
72/* How many Rx Buffers do we bundle into one write to the hardware ? */
73#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
74#define I40E_RX_NEXT_DESC(r, i, n) \
75 do { \
76 (i)++; \
77 if ((i) == (r)->count) \
78 i = 0; \
79 (n) = I40E_RX_DESC((r), (i)); \
80 } while (0)
81
82#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
83 do { \
84 I40E_RX_NEXT_DESC((r), (i), (n)); \
85 prefetch((n)); \
86 } while (0)
87
88#define i40e_rx_desc i40e_32byte_rx_desc
89
90#define I40E_MIN_TX_LEN 17
91#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
92
93/* Tx Descriptors needed, worst case */
94#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
95#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
96
97#define I40E_TX_FLAGS_CSUM (u32)(1)
98#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
99#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
100#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
101#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
102#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
103#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
104#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
105#define I40E_TX_FLAGS_TXSW (u32)(1 << 8)
106#define I40E_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 9)
107#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
108#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
109#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
110#define I40E_TX_FLAGS_VLAN_SHIFT 16
111
112struct i40e_tx_buffer {
113 struct sk_buff *skb;
114 dma_addr_t dma;
115 unsigned long time_stamp;
116 u16 length;
117 u32 tx_flags;
118 struct i40e_tx_desc *next_to_watch;
119 unsigned int bytecount;
120 u16 gso_segs;
121 u8 mapped_as_page;
122};
123
124struct i40e_rx_buffer {
125 struct sk_buff *skb;
126 dma_addr_t dma;
127 struct page *page;
128 dma_addr_t page_dma;
129 unsigned int page_offset;
130};
131
132struct i40e_tx_queue_stats {
133 u64 packets;
134 u64 bytes;
135 u64 restart_queue;
136 u64 tx_busy;
137 u64 completed;
138 u64 tx_done_old;
139};
140
141struct i40e_rx_queue_stats {
142 u64 packets;
143 u64 bytes;
144 u64 non_eop_descs;
145 u64 alloc_rx_page_failed;
146 u64 alloc_rx_buff_failed;
147};
148
149enum i40e_ring_state_t {
150 __I40E_TX_FDIR_INIT_DONE,
151 __I40E_TX_XPS_INIT_DONE,
152 __I40E_TX_DETECT_HANG,
153 __I40E_HANG_CHECK_ARMED,
154 __I40E_RX_PS_ENABLED,
155 __I40E_RX_LRO_ENABLED,
156 __I40E_RX_16BYTE_DESC_ENABLED,
157};
158
159#define ring_is_ps_enabled(ring) \
160 test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
161#define set_ring_ps_enabled(ring) \
162 set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
163#define clear_ring_ps_enabled(ring) \
164 clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
165#define check_for_tx_hang(ring) \
166 test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
167#define set_check_for_tx_hang(ring) \
168 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
169#define clear_check_for_tx_hang(ring) \
170 clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
171#define ring_is_lro_enabled(ring) \
172 test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
173#define set_ring_lro_enabled(ring) \
174 set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
175#define clear_ring_lro_enabled(ring) \
176 clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
177#define ring_is_16byte_desc_enabled(ring) \
178 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
179#define set_ring_16byte_desc_enabled(ring) \
180 set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
181#define clear_ring_16byte_desc_enabled(ring) \
182 clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
183
184/* struct that defines a descriptor ring, associated with a VSI */
185struct i40e_ring {
186 void *desc; /* Descriptor ring memory */
187 struct device *dev; /* Used for DMA mapping */
188 struct net_device *netdev; /* netdev ring maps to */
189 union {
190 struct i40e_tx_buffer *tx_bi;
191 struct i40e_rx_buffer *rx_bi;
192 };
193 unsigned long state;
194 u16 queue_index; /* Queue number of ring */
195 u8 dcb_tc; /* Traffic class of ring */
196 u8 __iomem *tail;
197
198 u16 count; /* Number of descriptors */
199 u16 reg_idx; /* HW register index of the ring */
200 u16 rx_hdr_len;
201 u16 rx_buf_len;
202 u8 dtype;
203#define I40E_RX_DTYPE_NO_SPLIT 0
204#define I40E_RX_DTYPE_SPLIT_ALWAYS 1
205#define I40E_RX_DTYPE_HEADER_SPLIT 2
206 u8 hsplit;
207#define I40E_RX_SPLIT_L2 0x1
208#define I40E_RX_SPLIT_IP 0x2
209#define I40E_RX_SPLIT_TCP_UDP 0x4
210#define I40E_RX_SPLIT_SCTP 0x8
211
212 /* used in interrupt processing */
213 u16 next_to_use;
214 u16 next_to_clean;
215
216 u8 atr_sample_rate;
217 u8 atr_count;
218
219 bool ring_active; /* is ring online or not */
220
221 /* stats structs */
222 union {
223 struct i40e_tx_queue_stats tx_stats;
224 struct i40e_rx_queue_stats rx_stats;
225 };
226
227 unsigned int size; /* length of descriptor ring in bytes */
228 dma_addr_t dma; /* physical address of ring */
229
230 struct i40e_vsi *vsi; /* Backreference to associated VSI */
231 struct i40e_q_vector *q_vector; /* Backreference to associated vector */
232} ____cacheline_internodealigned_in_smp;
233
234enum i40e_latency_range {
235 I40E_LOWEST_LATENCY = 0,
236 I40E_LOW_LATENCY = 1,
237 I40E_BULK_LATENCY = 2,
238};
239
240struct i40e_ring_container {
241#define I40E_MAX_RINGPAIR_PER_VECTOR 8
242 /* array of pointers to rings */
243 struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR];
244 unsigned int total_bytes; /* total bytes processed this int */
245 unsigned int total_packets; /* total packets processed this int */
246 u16 count;
247 enum i40e_latency_range latency_range;
248 u16 itr;
249};
250
251void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
252netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
253void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
254void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
255int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
256int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
257void i40e_free_tx_resources(struct i40e_ring *tx_ring);
258void i40e_free_rx_resources(struct i40e_ring *rx_ring);
259int i40e_napi_poll(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
new file mode 100644
index 000000000000..f3f22b20f02f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -0,0 +1,1154 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_TYPE_H_
29#define _I40E_TYPE_H_
30
31#include "i40e_status.h"
32#include "i40e_osdep.h"
33#include "i40e_register.h"
34#include "i40e_adminq.h"
35#include "i40e_hmc.h"
36#include "i40e_lan_hmc.h"
37
38/* Device IDs */
39#define I40E_SFP_XL710_DEVICE_ID 0x1572
40#define I40E_SFP_X710_DEVICE_ID 0x1573
41#define I40E_QEMU_DEVICE_ID 0x1574
42#define I40E_KX_A_DEVICE_ID 0x157F
43#define I40E_KX_B_DEVICE_ID 0x1580
44#define I40E_KX_C_DEVICE_ID 0x1581
45#define I40E_KX_D_DEVICE_ID 0x1582
46#define I40E_QSFP_A_DEVICE_ID 0x1583
47#define I40E_QSFP_B_DEVICE_ID 0x1584
48#define I40E_QSFP_C_DEVICE_ID 0x1585
49#define I40E_VF_DEVICE_ID 0x154C
50#define I40E_VF_HV_DEVICE_ID 0x1571
51
52#define I40E_FW_API_VERSION_MAJOR 0x0001
53#define I40E_FW_API_VERSION_MINOR 0x0000
54
55#define I40E_MAX_VSI_QP 16
56#define I40E_MAX_VF_VSI 3
57#define I40E_MAX_CHAINED_RX_BUFFERS 5
58
59/* Max default timeout in ms, */
60#define I40E_MAX_NVM_TIMEOUT 18000
61
62/* Check whether address is multicast. This is little-endian specific check.*/
63#define I40E_IS_MULTICAST(address) \
64 (bool)(((u8 *)(address))[0] & ((u8)0x01))
65
66/* Check whether an address is broadcast. */
67#define I40E_IS_BROADCAST(address) \
68 ((((u8 *)(address))[0] == ((u8)0xff)) && \
69 (((u8 *)(address))[1] == ((u8)0xff)))
70
71/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
72#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
73
74/* forward declaration */
75struct i40e_hw;
76typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
77
78#define I40E_ETH_LENGTH_OF_ADDRESS 6
79
80/* Data type manipulation macros. */
81
82#define I40E_DESC_UNUSED(R) \
83 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
84 (R)->next_to_clean - (R)->next_to_use - 1)
85
86/* bitfields for Tx queue mapping in QTX_CTL */
87#define I40E_QTX_CTL_VF_QUEUE 0x0
88#define I40E_QTX_CTL_PF_QUEUE 0x2
89
90/* debug masks */
91enum i40e_debug_mask {
92 I40E_DEBUG_INIT = 0x00000001,
93 I40E_DEBUG_RELEASE = 0x00000002,
94
95 I40E_DEBUG_LINK = 0x00000010,
96 I40E_DEBUG_PHY = 0x00000020,
97 I40E_DEBUG_HMC = 0x00000040,
98 I40E_DEBUG_NVM = 0x00000080,
99 I40E_DEBUG_LAN = 0x00000100,
100 I40E_DEBUG_FLOW = 0x00000200,
101 I40E_DEBUG_DCB = 0x00000400,
102 I40E_DEBUG_DIAG = 0x00000800,
103
104 I40E_DEBUG_AQ_MESSAGE = 0x01000000, /* for i40e_debug() */
105 I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
106 I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
107 I40E_DEBUG_AQ_COMMAND = 0x06000000, /* for i40e_debug_aq() */
108 I40E_DEBUG_AQ = 0x0F000000,
109
110 I40E_DEBUG_USER = 0xF0000000,
111
112 I40E_DEBUG_ALL = 0xFFFFFFFF
113};
114
115/* These are structs for managing the hardware information and the operations.
116 * The structures of function pointers are filled out at init time when we
117 * know for sure exactly which hardware we're working with. This gives us the
118 * flexibility of using the same main driver code but adapting to slightly
119 * different hardware needs as new parts are developed. For this architecture,
120 * the Firmware and AdminQ are intended to insulate the driver from most of the
121 * future changes, but these structures will also do part of the job.
122 */
123enum i40e_mac_type {
124 I40E_MAC_UNKNOWN = 0,
125 I40E_MAC_X710,
126 I40E_MAC_XL710,
127 I40E_MAC_VF,
128 I40E_MAC_GENERIC,
129};
130
131enum i40e_media_type {
132 I40E_MEDIA_TYPE_UNKNOWN = 0,
133 I40E_MEDIA_TYPE_FIBER,
134 I40E_MEDIA_TYPE_BASET,
135 I40E_MEDIA_TYPE_BACKPLANE,
136 I40E_MEDIA_TYPE_CX4,
137 I40E_MEDIA_TYPE_VIRTUAL
138};
139
140enum i40e_fc_mode {
141 I40E_FC_NONE = 0,
142 I40E_FC_RX_PAUSE,
143 I40E_FC_TX_PAUSE,
144 I40E_FC_FULL,
145 I40E_FC_PFC,
146 I40E_FC_DEFAULT
147};
148
149enum i40e_vsi_type {
150 I40E_VSI_MAIN = 0,
151 I40E_VSI_VMDQ1,
152 I40E_VSI_VMDQ2,
153 I40E_VSI_CTRL,
154 I40E_VSI_FCOE,
155 I40E_VSI_MIRROR,
156 I40E_VSI_SRIOV,
157 I40E_VSI_FDIR,
158 I40E_VSI_TYPE_UNKNOWN
159};
160
161enum i40e_queue_type {
162 I40E_QUEUE_TYPE_RX = 0,
163 I40E_QUEUE_TYPE_TX,
164 I40E_QUEUE_TYPE_PE_CEQ,
165 I40E_QUEUE_TYPE_UNKNOWN
166};
167
168struct i40e_link_status {
169 enum i40e_aq_phy_type phy_type;
170 enum i40e_aq_link_speed link_speed;
171 u8 link_info;
172 u8 an_info;
173 u8 ext_info;
174 /* is Link Status Event notification to SW enabled */
175 bool lse_enable;
176};
177
178struct i40e_phy_info {
179 struct i40e_link_status link_info;
180 struct i40e_link_status link_info_old;
181 u32 autoneg_advertised;
182 u32 phy_id;
183 u32 module_type;
184 bool get_link_info;
185 enum i40e_media_type media_type;
186};
187
188#define I40E_HW_CAP_MAX_GPIO 30
189/* Capabilities of a PF or a VF or the whole device */
190struct i40e_hw_capabilities {
191 u32 switch_mode;
192#define I40E_NVM_IMAGE_TYPE_EVB 0x0
193#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
194#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
195
196 u32 management_mode;
197 u32 npar_enable;
198 u32 os2bmc;
199 u32 valid_functions;
200 bool sr_iov_1_1;
201 bool vmdq;
202 bool evb_802_1_qbg; /* Edge Virtual Bridging */
203 bool evb_802_1_qbh; /* Bridge Port Extension */
204 bool dcb;
205 bool fcoe;
206 bool mfp_mode_1;
207 bool mgmt_cem;
208 bool ieee_1588;
209 bool iwarp;
210 bool fd;
211 u32 fd_filters_guaranteed;
212 u32 fd_filters_best_effort;
213 bool rss;
214 u32 rss_table_size;
215 u32 rss_table_entry_width;
216 bool led[I40E_HW_CAP_MAX_GPIO];
217 bool sdp[I40E_HW_CAP_MAX_GPIO];
218 u32 nvm_image_type;
219 u32 num_flow_director_filters;
220 u32 num_vfs;
221 u32 vf_base_id;
222 u32 num_vsis;
223 u32 num_rx_qp;
224 u32 num_tx_qp;
225 u32 base_queue;
226 u32 num_msix_vectors;
227 u32 num_msix_vectors_vf;
228 u32 led_pin_num;
229 u32 sdp_pin_num;
230 u32 mdio_port_num;
231 u32 mdio_port_mode;
232 u8 rx_buf_chain_len;
233 u32 enabled_tcmap;
234 u32 maxtc;
235};
236
237struct i40e_mac_info {
238 enum i40e_mac_type type;
239 u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
240 u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
241 u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
242 u16 max_fcoeq;
243};
244
245enum i40e_aq_resources_ids {
246 I40E_NVM_RESOURCE_ID = 1
247};
248
249enum i40e_aq_resource_access_type {
250 I40E_RESOURCE_READ = 1,
251 I40E_RESOURCE_WRITE
252};
253
254struct i40e_nvm_info {
255 u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
256 u64 hw_semaphore_wait; /* - || - */
257 u32 timeout; /* [ms] */
258 u16 sr_size; /* Shadow RAM size in words */
259 bool blank_nvm_mode; /* is NVM empty (no FW present)*/
260 u16 version; /* NVM package version */
261 u32 eetrack; /* NVM data version */
262};
263
264/* PCI bus types */
265enum i40e_bus_type {
266 i40e_bus_type_unknown = 0,
267 i40e_bus_type_pci,
268 i40e_bus_type_pcix,
269 i40e_bus_type_pci_express,
270 i40e_bus_type_reserved
271};
272
273/* PCI bus speeds */
274enum i40e_bus_speed {
275 i40e_bus_speed_unknown = 0,
276 i40e_bus_speed_33 = 33,
277 i40e_bus_speed_66 = 66,
278 i40e_bus_speed_100 = 100,
279 i40e_bus_speed_120 = 120,
280 i40e_bus_speed_133 = 133,
281 i40e_bus_speed_2500 = 2500,
282 i40e_bus_speed_5000 = 5000,
283 i40e_bus_speed_8000 = 8000,
284 i40e_bus_speed_reserved
285};
286
287/* PCI bus widths */
288enum i40e_bus_width {
289 i40e_bus_width_unknown = 0,
290 i40e_bus_width_pcie_x1 = 1,
291 i40e_bus_width_pcie_x2 = 2,
292 i40e_bus_width_pcie_x4 = 4,
293 i40e_bus_width_pcie_x8 = 8,
294 i40e_bus_width_32 = 32,
295 i40e_bus_width_64 = 64,
296 i40e_bus_width_reserved
297};
298
299/* Bus parameters */
300struct i40e_bus_info {
301 enum i40e_bus_speed speed;
302 enum i40e_bus_width width;
303 enum i40e_bus_type type;
304
305 u16 func;
306 u16 device;
307 u16 lan_id;
308};
309
310/* Flow control (FC) parameters */
311struct i40e_fc_info {
312 enum i40e_fc_mode current_mode; /* FC mode in effect */
313 enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
314};
315
316#define I40E_MAX_TRAFFIC_CLASS 8
317#define I40E_MAX_USER_PRIORITY 8
318#define I40E_DCBX_MAX_APPS 32
319#define I40E_LLDPDU_SIZE 1500
320
321/* IEEE 802.1Qaz ETS Configuration data */
322struct i40e_ieee_ets_config {
323 u8 willing;
324 u8 cbs;
325 u8 maxtcs;
326 u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
327 u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
328 u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
329};
330
331/* IEEE 802.1Qaz ETS Recommendation data */
332struct i40e_ieee_ets_recommend {
333 u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
334 u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
335 u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
336};
337
338/* IEEE 802.1Qaz PFC Configuration data */
339struct i40e_ieee_pfc_config {
340 u8 willing;
341 u8 mbc;
342 u8 pfccap;
343 u8 pfcenable;
344};
345
346/* IEEE 802.1Qaz Application Priority data */
347struct i40e_ieee_app_priority_table {
348 u8 priority;
349 u8 selector;
350 u16 protocolid;
351};
352
353struct i40e_dcbx_config {
354 u32 numapps;
355 struct i40e_ieee_ets_config etscfg;
356 struct i40e_ieee_ets_recommend etsrec;
357 struct i40e_ieee_pfc_config pfc;
358 struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
359};
360
361/* Port hardware description */
362struct i40e_hw {
363 u8 __iomem *hw_addr;
364 void *back;
365
366 /* function pointer structs */
367 struct i40e_phy_info phy;
368 struct i40e_mac_info mac;
369 struct i40e_bus_info bus;
370 struct i40e_nvm_info nvm;
371 struct i40e_fc_info fc;
372
373 /* pci info */
374 u16 device_id;
375 u16 vendor_id;
376 u16 subsystem_device_id;
377 u16 subsystem_vendor_id;
378 u8 revision_id;
379 u8 port;
380 bool adapter_stopped;
381
382 /* capabilities for entire device and PCI func */
383 struct i40e_hw_capabilities dev_caps;
384 struct i40e_hw_capabilities func_caps;
385
386 /* Flow Director shared filter space */
387 u16 fdir_shared_filter_count;
388
389 /* device profile info */
390 u8 pf_id;
391 u16 main_vsi_seid;
392
393 /* Closest numa node to the device */
394 u16 numa_node;
395
396 /* Admin Queue info */
397 struct i40e_adminq_info aq;
398
399 /* HMC info */
400 struct i40e_hmc_info hmc; /* HMC info struct */
401
402 /* LLDP/DCBX Status */
403 u16 dcbx_status;
404
405 /* DCBX info */
406 struct i40e_dcbx_config local_dcbx_config;
407 struct i40e_dcbx_config remote_dcbx_config;
408
409 /* debug mask */
410 u32 debug_mask;
411};
412
413struct i40e_driver_version {
414 u8 major_version;
415 u8 minor_version;
416 u8 build_version;
417 u8 subbuild_version;
418};
419
420/* RX Descriptors */
421union i40e_16byte_rx_desc {
422 struct {
423 __le64 pkt_addr; /* Packet buffer address */
424 __le64 hdr_addr; /* Header buffer address */
425 } read;
426 struct {
427 struct {
428 struct {
429 union {
430 __le16 mirroring_status;
431 __le16 fcoe_ctx_id;
432 } mirr_fcoe;
433 __le16 l2tag1;
434 } lo_dword;
435 union {
436 __le32 rss; /* RSS Hash */
437 __le32 fd_id; /* Flow director filter id */
438 __le32 fcoe_param; /* FCoE DDP Context id */
439 } hi_dword;
440 } qword0;
441 struct {
442 /* ext status/error/pktype/length */
443 __le64 status_error_len;
444 } qword1;
445 } wb; /* writeback */
446};
447
448union i40e_32byte_rx_desc {
449 struct {
450 __le64 pkt_addr; /* Packet buffer address */
451 __le64 hdr_addr; /* Header buffer address */
452 /* bit 0 of hdr_buffer_addr is DD bit */
453 __le64 rsvd1;
454 __le64 rsvd2;
455 } read;
456 struct {
457 struct {
458 struct {
459 union {
460 __le16 mirroring_status;
461 __le16 fcoe_ctx_id;
462 } mirr_fcoe;
463 __le16 l2tag1;
464 } lo_dword;
465 union {
466 __le32 rss; /* RSS Hash */
467 __le32 fcoe_param; /* FCoE DDP Context id */
468 } hi_dword;
469 } qword0;
470 struct {
471 /* status/error/pktype/length */
472 __le64 status_error_len;
473 } qword1;
474 struct {
475 __le16 ext_status; /* extended status */
476 __le16 rsvd;
477 __le16 l2tag2_1;
478 __le16 l2tag2_2;
479 } qword2;
480 struct {
481 union {
482 __le32 flex_bytes_lo;
483 __le32 pe_status;
484 } lo_dword;
485 union {
486 __le32 flex_bytes_hi;
487 __le32 fd_id;
488 } hi_dword;
489 } qword3;
490 } wb; /* writeback */
491};
492
493#define I40E_RXD_QW1_STATUS_SHIFT 0
494#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
495
496enum i40e_rx_desc_status_bits {
497 /* Note: These are predefined bit offsets */
498 I40E_RX_DESC_STATUS_DD_SHIFT = 0,
499 I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
500 I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
501 I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
502 I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
503 I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 3 BITS */
504 I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
505 I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
506 I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
507 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
508 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14
509};
510
511#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
512#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x7UL << \
513 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
514
515enum i40e_rx_desc_fltstat_values {
516 I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
517 I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
518 I40E_RX_DESC_FLTSTAT_RSV = 2,
519 I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
520};
521
522#define I40E_RXD_QW1_ERROR_SHIFT 19
523#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
524
525enum i40e_rx_desc_error_bits {
526 /* Note: These are predefined bit offsets */
527 I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
528 I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
529 I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
530 I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
531 I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
532 I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
533 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
534 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
535};
536
537enum i40e_rx_desc_error_l3l4e_fcoe_masks {
538 I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
539 I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
540 I40E_RX_DESC_ERROR_L3L4E_FC = 2,
541 I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
542 I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
543};
544
545#define I40E_RXD_QW1_PTYPE_SHIFT 30
546#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
547
548/* Packet type non-ip values */
549enum i40e_rx_l2_ptype {
550 I40E_RX_PTYPE_L2_RESERVED = 0,
551 I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
552 I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
553 I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
554 I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
555 I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
556 I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
557 I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
558 I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
559 I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
560 I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
561 I40E_RX_PTYPE_L2_ARP = 11,
562 I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
563 I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
564 I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
565 I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
566 I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
567 I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
568 I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
569 I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
570 I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
571 I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21
572};
573
574struct i40e_rx_ptype_decoded {
575 u32 ptype:8;
576 u32 known:1;
577 u32 outer_ip:1;
578 u32 outer_ip_ver:1;
579 u32 outer_frag:1;
580 u32 tunnel_type:3;
581 u32 tunnel_end_prot:2;
582 u32 tunnel_end_frag:1;
583 u32 inner_prot:4;
584 u32 payload_layer:3;
585};
586
587enum i40e_rx_ptype_outer_ip {
588 I40E_RX_PTYPE_OUTER_L2 = 0,
589 I40E_RX_PTYPE_OUTER_IP = 1
590};
591
592enum i40e_rx_ptype_outer_ip_ver {
593 I40E_RX_PTYPE_OUTER_NONE = 0,
594 I40E_RX_PTYPE_OUTER_IPV4 = 0,
595 I40E_RX_PTYPE_OUTER_IPV6 = 1
596};
597
598enum i40e_rx_ptype_outer_fragmented {
599 I40E_RX_PTYPE_NOT_FRAG = 0,
600 I40E_RX_PTYPE_FRAG = 1
601};
602
603enum i40e_rx_ptype_tunnel_type {
604 I40E_RX_PTYPE_TUNNEL_NONE = 0,
605 I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
606 I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
607 I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
608 I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
609};
610
611enum i40e_rx_ptype_tunnel_end_prot {
612 I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
613 I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
614 I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
615};
616
617enum i40e_rx_ptype_inner_prot {
618 I40E_RX_PTYPE_INNER_PROT_NONE = 0,
619 I40E_RX_PTYPE_INNER_PROT_UDP = 1,
620 I40E_RX_PTYPE_INNER_PROT_TCP = 2,
621 I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
622 I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
623 I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
624};
625
626enum i40e_rx_ptype_payload_layer {
627 I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
628 I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
629 I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
630 I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
631};
632
633#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
634#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
635 I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
636
637#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
638#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
639 I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
640
641#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
642#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
643 I40E_RXD_QW1_LENGTH_SPH_SHIFT)
644
645enum i40e_rx_desc_ext_status_bits {
646 /* Note: These are predefined bit offsets */
647 I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
648 I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
649 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
650 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
651 I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
652 I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
653 I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
654 I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
655};
656
657enum i40e_rx_desc_pe_status_bits {
658 /* Note: These are predefined bit offsets */
659 I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
660 I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
661 I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
662 I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
663 I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
664 I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
665 I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
666 I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
667 I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
668};
669
670#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
671#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
672
673#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
674#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
675 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
676
677#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
678#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
679 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
680
681enum i40e_rx_prog_status_desc_status_bits {
682 /* Note: These are predefined bit offsets */
683 I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
684 I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
685};
686
687enum i40e_rx_prog_status_desc_prog_id_masks {
688 I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
689 I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
690 I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
691};
692
693enum i40e_rx_prog_status_desc_error_bits {
694 /* Note: These are predefined bit offsets */
695 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
696 I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
697 I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
698 I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
699};
700
701/* TX Descriptor */
702struct i40e_tx_desc {
703 __le64 buffer_addr; /* Address of descriptor's data buf */
704 __le64 cmd_type_offset_bsz;
705};
706
707#define I40E_TXD_QW1_DTYPE_SHIFT 0
708#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
709
710enum i40e_tx_desc_dtype_value {
711 I40E_TX_DESC_DTYPE_DATA = 0x0,
712 I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
713 I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
714 I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
715 I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
716 I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
717 I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
718 I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
719 I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
720 I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
721};
722
723#define I40E_TXD_QW1_CMD_SHIFT 4
724#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
725
726enum i40e_tx_desc_cmd_bits {
727 I40E_TX_DESC_CMD_EOP = 0x0001,
728 I40E_TX_DESC_CMD_RS = 0x0002,
729 I40E_TX_DESC_CMD_ICRC = 0x0004,
730 I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
731 I40E_TX_DESC_CMD_DUMMY = 0x0010,
732 I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
733 I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
734 I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
735 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
736 I40E_TX_DESC_CMD_FCOET = 0x0080,
737 I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
738 I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
739 I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
740 I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
741 I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
742 I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
743 I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
744 I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
745};
746
747#define I40E_TXD_QW1_OFFSET_SHIFT 16
748#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
749 I40E_TXD_QW1_OFFSET_SHIFT)
750
751enum i40e_tx_desc_length_fields {
752 /* Note: These are predefined bit offsets */
753 I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
754 I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
755 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
756};
757
758#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
759#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
760 I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
761
762#define I40E_TXD_QW1_L2TAG1_SHIFT 48
763#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
764
765/* Context descriptors */
766struct i40e_tx_context_desc {
767 __le32 tunneling_params;
768 __le16 l2tag2;
769 __le16 rsvd;
770 __le64 type_cmd_tso_mss;
771};
772
773#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
774#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
775
776#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
777#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
778
779enum i40e_tx_ctx_desc_cmd_bits {
780 I40E_TX_CTX_DESC_TSO = 0x01,
781 I40E_TX_CTX_DESC_TSYN = 0x02,
782 I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
783 I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
784 I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
785 I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
786 I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
787 I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
788 I40E_TX_CTX_DESC_SWPE = 0x40
789};
790
791#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
792#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
793 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
794
795#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
796#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
797 I40E_TXD_CTX_QW1_MSS_SHIFT)
798
799#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
800#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
801
802#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
803#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
804 I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
805
806enum i40e_tx_ctx_desc_eipt_offload {
807 I40E_TX_CTX_EXT_IP_NONE = 0x0,
808 I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
809 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
810 I40E_TX_CTX_EXT_IP_IPV4 = 0x3
811};
812
813#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
814#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
815 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
816
817#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
818#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
819
820#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
821#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
822
823#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
824#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
825 I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
826
827#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
828
829#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
830#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
831 I40E_TXD_CTX_QW0_NATLEN_SHIFT)
832
833#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
834#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
835 I40E_TXD_CTX_QW0_DECTTL_SHIFT)
836
837struct i40e_filter_program_desc {
838 __le32 qindex_flex_ptype_vsi;
839 __le32 rsvd;
840 __le32 dtype_cmd_cntindex;
841 __le32 fd_id;
842};
843#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
844#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
845 I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
846#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
847#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
848 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
849#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
850#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
851 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
852
853/* Packet Classifier Types for filters */
854enum i40e_filter_pctype {
855 /* Note: Value 0-25 are reserved for future use */
856 I40E_FILTER_PCTYPE_IPV4_TEREDO_UDP = 26,
857 I40E_FILTER_PCTYPE_IPV6_TEREDO_UDP = 27,
858 I40E_FILTER_PCTYPE_NONF_IPV4_1588_UDP = 28,
859 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
860 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
861 I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
862 I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
863 I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
864 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
865 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
866 I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
867 /* Note: Value 37 is reserved for future use */
868 I40E_FILTER_PCTYPE_NONF_IPV6_1588_UDP = 38,
869 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
870 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
871 I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
872 I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
873 I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
874 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
875 I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
876 I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
877 /* Note: Value 47 is reserved for future use */
878 I40E_FILTER_PCTYPE_FCOE_OX = 48,
879 I40E_FILTER_PCTYPE_FCOE_RX = 49,
880 /* Note: Value 50-62 are reserved for future use */
881 I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
882};
883
884enum i40e_filter_program_desc_dest {
885 I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
886 I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
887 I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
888};
889
890enum i40e_filter_program_desc_fd_status {
891 I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
892 I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
893 I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
894 I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
895};
896
897#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
898#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
899 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
900
901#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
902#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
903 I40E_TXD_FLTR_QW1_CMD_SHIFT)
904
905#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
906#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
907
908enum i40e_filter_program_desc_pcmd {
909 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
910 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
911};
912
913#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
914#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
915
916#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
917#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
918 I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
919
920#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
921 I40E_TXD_FLTR_QW1_CMD_SHIFT)
922#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
923 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
924
925#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
926#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
927 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
928
929enum i40e_filter_type {
930 I40E_FLOW_DIRECTOR_FLTR = 0,
931 I40E_PE_QUAD_HASH_FLTR = 1,
932 I40E_ETHERTYPE_FLTR,
933 I40E_FCOE_CTX_FLTR,
934 I40E_MAC_VLAN_FLTR,
935 I40E_HASH_FLTR
936};
937
938struct i40e_vsi_context {
939 u16 seid;
940 u16 uplink_seid;
941 u16 vsi_number;
942 u16 vsis_allocated;
943 u16 vsis_unallocated;
944 u16 flags;
945 u8 pf_num;
946 u8 vf_num;
947 u8 connection_type;
948 struct i40e_aqc_vsi_properties_data info;
949};
950
951/* Statistics collected by each port, VSI, VEB, and S-channel */
952struct i40e_eth_stats {
953 u64 rx_bytes; /* gorc */
954 u64 rx_unicast; /* uprc */
955 u64 rx_multicast; /* mprc */
956 u64 rx_broadcast; /* bprc */
957 u64 rx_discards; /* rdpc */
958 u64 rx_errors; /* repc */
959 u64 rx_missed; /* rmpc */
960 u64 rx_unknown_protocol; /* rupp */
961 u64 tx_bytes; /* gotc */
962 u64 tx_unicast; /* uptc */
963 u64 tx_multicast; /* mptc */
964 u64 tx_broadcast; /* bptc */
965 u64 tx_discards; /* tdpc */
966 u64 tx_errors; /* tepc */
967};
968
969/* Statistics collected by the MAC */
970struct i40e_hw_port_stats {
971 /* eth stats collected by the port */
972 struct i40e_eth_stats eth;
973
974 /* additional port specific stats */
975 u64 tx_dropped_link_down; /* tdold */
976 u64 crc_errors; /* crcerrs */
977 u64 illegal_bytes; /* illerrc */
978 u64 error_bytes; /* errbc */
979 u64 mac_local_faults; /* mlfc */
980 u64 mac_remote_faults; /* mrfc */
981 u64 rx_length_errors; /* rlec */
982 u64 link_xon_rx; /* lxonrxc */
983 u64 link_xoff_rx; /* lxoffrxc */
984 u64 priority_xon_rx[8]; /* pxonrxc[8] */
985 u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
986 u64 link_xon_tx; /* lxontxc */
987 u64 link_xoff_tx; /* lxofftxc */
988 u64 priority_xon_tx[8]; /* pxontxc[8] */
989 u64 priority_xoff_tx[8]; /* pxofftxc[8] */
990 u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
991 u64 rx_size_64; /* prc64 */
992 u64 rx_size_127; /* prc127 */
993 u64 rx_size_255; /* prc255 */
994 u64 rx_size_511; /* prc511 */
995 u64 rx_size_1023; /* prc1023 */
996 u64 rx_size_1522; /* prc1522 */
997 u64 rx_size_big; /* prc9522 */
998 u64 rx_undersize; /* ruc */
999 u64 rx_fragments; /* rfc */
1000 u64 rx_oversize; /* roc */
1001 u64 rx_jabber; /* rjc */
1002 u64 tx_size_64; /* ptc64 */
1003 u64 tx_size_127; /* ptc127 */
1004 u64 tx_size_255; /* ptc255 */
1005 u64 tx_size_511; /* ptc511 */
1006 u64 tx_size_1023; /* ptc1023 */
1007 u64 tx_size_1522; /* ptc1522 */
1008 u64 tx_size_big; /* ptc9522 */
1009 u64 mac_short_packet_dropped; /* mspdc */
1010 u64 checksum_error; /* xec */
1011};
1012
1013/* Checksum and Shadow RAM pointers */
1014#define I40E_SR_NVM_CONTROL_WORD 0x00
1015#define I40E_SR_EMP_MODULE_PTR 0x0F
1016#define I40E_SR_NVM_IMAGE_VERSION 0x18
1017#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
1018#define I40E_SR_NVM_EETRACK_LO 0x2D
1019#define I40E_SR_NVM_EETRACK_HI 0x2E
1020#define I40E_SR_VPD_PTR 0x2F
1021#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
1022#define I40E_SR_SW_CHECKSUM_WORD 0x3F
1023
1024/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
1025#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
1026#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
1027#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
1028#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
1029
1030/* Shadow RAM related */
1031#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
1032#define I40E_SR_WORDS_IN_1KB 512
1033/* Checksum should be calculated such that after adding all the words,
1034 * including the checksum word itself, the sum should be 0xBABA.
1035 */
1036#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
1037
1038#define I40E_SRRD_SRCTL_ATTEMPTS 100000
1039
1040enum i40e_switch_element_types {
1041 I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
1042 I40E_SWITCH_ELEMENT_TYPE_PF = 2,
1043 I40E_SWITCH_ELEMENT_TYPE_VF = 3,
1044 I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
1045 I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
1046 I40E_SWITCH_ELEMENT_TYPE_PE = 16,
1047 I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
1048 I40E_SWITCH_ELEMENT_TYPE_PA = 18,
1049 I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
1050};
1051
1052/* Supported EtherType filters */
1053enum i40e_ether_type_index {
1054 I40E_ETHER_TYPE_1588 = 0,
1055 I40E_ETHER_TYPE_FIP = 1,
1056 I40E_ETHER_TYPE_OUI_EXTENDED = 2,
1057 I40E_ETHER_TYPE_MAC_CONTROL = 3,
1058 I40E_ETHER_TYPE_LLDP = 4,
1059 I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
1060 I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
1061 I40E_ETHER_TYPE_QCN_CNM = 7,
1062 I40E_ETHER_TYPE_8021X = 8,
1063 I40E_ETHER_TYPE_ARP = 9,
1064 I40E_ETHER_TYPE_RSV1 = 10,
1065 I40E_ETHER_TYPE_RSV2 = 11,
1066};
1067
1068/* Filter context base size is 1K */
1069#define I40E_HASH_FILTER_BASE_SIZE 1024
1070/* Supported Hash filter values */
1071enum i40e_hash_filter_size {
1072 I40E_HASH_FILTER_SIZE_1K = 0,
1073 I40E_HASH_FILTER_SIZE_2K = 1,
1074 I40E_HASH_FILTER_SIZE_4K = 2,
1075 I40E_HASH_FILTER_SIZE_8K = 3,
1076 I40E_HASH_FILTER_SIZE_16K = 4,
1077 I40E_HASH_FILTER_SIZE_32K = 5,
1078 I40E_HASH_FILTER_SIZE_64K = 6,
1079 I40E_HASH_FILTER_SIZE_128K = 7,
1080 I40E_HASH_FILTER_SIZE_256K = 8,
1081 I40E_HASH_FILTER_SIZE_512K = 9,
1082 I40E_HASH_FILTER_SIZE_1M = 10,
1083};
1084
1085/* DMA context base size is 0.5K */
1086#define I40E_DMA_CNTX_BASE_SIZE 512
1087/* Supported DMA context values */
1088enum i40e_dma_cntx_size {
1089 I40E_DMA_CNTX_SIZE_512 = 0,
1090 I40E_DMA_CNTX_SIZE_1K = 1,
1091 I40E_DMA_CNTX_SIZE_2K = 2,
1092 I40E_DMA_CNTX_SIZE_4K = 3,
1093 I40E_DMA_CNTX_SIZE_8K = 4,
1094 I40E_DMA_CNTX_SIZE_16K = 5,
1095 I40E_DMA_CNTX_SIZE_32K = 6,
1096 I40E_DMA_CNTX_SIZE_64K = 7,
1097 I40E_DMA_CNTX_SIZE_128K = 8,
1098 I40E_DMA_CNTX_SIZE_256K = 9,
1099};
1100
1101/* Supported Hash look up table (LUT) sizes */
1102enum i40e_hash_lut_size {
1103 I40E_HASH_LUT_SIZE_128 = 0,
1104 I40E_HASH_LUT_SIZE_512 = 1,
1105};
1106
1107/* Structure to hold a per PF filter control settings */
1108struct i40e_filter_control_settings {
1109 /* number of PE Quad Hash filter buckets */
1110 enum i40e_hash_filter_size pe_filt_num;
1111 /* number of PE Quad Hash contexts */
1112 enum i40e_dma_cntx_size pe_cntx_num;
1113 /* number of FCoE filter buckets */
1114 enum i40e_hash_filter_size fcoe_filt_num;
1115 /* number of FCoE DDP contexts */
1116 enum i40e_dma_cntx_size fcoe_cntx_num;
1117 /* size of the Hash LUT */
1118 enum i40e_hash_lut_size hash_lut_size;
1119 /* enable FDIR filters for PF and its VFs */
1120 bool enable_fdir;
1121 /* enable Ethertype filters for PF and its VFs */
1122 bool enable_ethtype;
1123 /* enable MAC/VLAN filters for PF and its VFs */
1124 bool enable_macvlan;
1125};
1126
1127/* Structure to hold device level control filter counts */
1128struct i40e_control_filter_stats {
1129 u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
1130 u16 etype_used; /* Used perfect EtherType filters */
1131 u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
1132 u16 etype_free; /* Un-used perfect EtherType filters */
1133};
1134
1135enum i40e_reset_type {
1136 I40E_RESET_POR = 0,
1137 I40E_RESET_CORER = 1,
1138 I40E_RESET_GLOBR = 2,
1139 I40E_RESET_EMPR = 3,
1140};
1141
1142/* IEEE 802.1AB LLDP Agent Variables from NVM */
1143#define I40E_NVM_LLDP_CFG_PTR 0xF
1144struct i40e_lldp_variables {
1145 u16 length;
1146 u16 adminstatus;
1147 u16 msgfasttx;
1148 u16 msgtxinterval;
1149 u16 txparams;
1150 u16 timers;
1151 u16 crc8;
1152};
1153
1154#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
new file mode 100644
index 000000000000..cc6654f1dac7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -0,0 +1,368 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_VIRTCHNL_H_
29#define _I40E_VIRTCHNL_H_
30
31#include "i40e_type.h"
32
33/* Description:
34 * This header file describes the VF-PF communication protocol used
35 * by the various i40e drivers.
36 *
37 * Admin queue buffer usage:
38 * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
39 * flags, retval, datalen, and data addr are all used normally.
40 * Firmware copies the cookie fields when sending messages between the PF and
41 * VF, but uses all other fields internally. Due to this limitation, we
42 * must send all messages as "indirect", i.e. using an external buffer.
43 *
44 * All the vsi indexes are relative to the VF. Each VF can have maximum of
45 * three VSIs. All the queue indexes are relative to the VSI. Each VF can
46 * have a maximum of sixteen queues for all of its VSIs.
47 *
48 * The PF is required to return a status code in v_retval for all messages
49 * except RESET_VF, which does not require any response. The return value is of
50 * i40e_status_code type, defined in the i40e_type.h.
51 *
52 * In general, VF driver initialization should roughly follow the order of these
53 * opcodes. The VF driver must first validate the API version of the PF driver,
54 * then request a reset, then get resources, then configure queues and
55 * interrupts. After these operations are complete, the VF driver may start
56 * its queues, optionally add MAC and VLAN filters, and process traffic.
57 */
58
59/* Opcodes for VF-PF communication. These are placed in the v_opcode field
60 * of the virtchnl_msg structure.
61 */
62enum i40e_virtchnl_ops {
63/* VF sends req. to pf for the following
64 * ops.
65 */
66 I40E_VIRTCHNL_OP_UNKNOWN = 0,
67 I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
68 I40E_VIRTCHNL_OP_RESET_VF,
69 I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
70 I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
71 I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
72 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
73 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
74 I40E_VIRTCHNL_OP_ENABLE_QUEUES,
75 I40E_VIRTCHNL_OP_DISABLE_QUEUES,
76 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
77 I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
78 I40E_VIRTCHNL_OP_ADD_VLAN,
79 I40E_VIRTCHNL_OP_DEL_VLAN,
80 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
81 I40E_VIRTCHNL_OP_GET_STATS,
82 I40E_VIRTCHNL_OP_FCOE,
83/* PF sends status change events to vfs using
84 * the following op.
85 */
86 I40E_VIRTCHNL_OP_EVENT,
87};
88
89/* Virtual channel message descriptor. This overlays the admin queue
90 * descriptor. All other data is passed in external buffers.
91 */
92
93struct i40e_virtchnl_msg {
94 u8 pad[8]; /* AQ flags/opcode/len/retval fields */
95 enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
96 i40e_status v_retval; /* ditto for desc->retval */
97 u32 vfid; /* used by PF when sending to VF */
98};
99
100/* Message descriptions and data structures.*/
101
102/* I40E_VIRTCHNL_OP_VERSION
103 * VF posts its version number to the PF. PF responds with its version number
104 * in the same format, along with a return code.
105 * Reply from PF has its major/minor versions also in param0 and param1.
106 * If there is a major version mismatch, then the VF cannot operate.
107 * If there is a minor version mismatch, then the VF can operate but should
108 * add a warning to the system log.
109 *
110 * This enum element MUST always be specified as == 1, regardless of other
111 * changes in the API. The PF must always respond to this message without
112 * error regardless of version mismatch.
113 */
114#define I40E_VIRTCHNL_VERSION_MAJOR 1
115#define I40E_VIRTCHNL_VERSION_MINOR 0
116struct i40e_virtchnl_version_info {
117 u32 major;
118 u32 minor;
119};
120
121/* I40E_VIRTCHNL_OP_RESET_VF
122 * VF sends this request to PF with no parameters
123 * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
124 * until reset completion is indicated. The admin queue must be reinitialized
125 * after this operation.
126 *
127 * When reset is complete, PF must ensure that all queues in all VSIs associated
128 * with the VF are stopped, all queue configurations in the HMC are set to 0,
129 * and all MAC and VLAN filters (except the default MAC address) on all VSIs
130 * are cleared.
131 */
132
133/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
134 * VF sends this request to PF with no parameters
135 * PF responds with an indirect message containing
136 * i40e_virtchnl_vf_resource and one or more
137 * i40e_virtchnl_vsi_resource structures.
138 */
139
140struct i40e_virtchnl_vsi_resource {
141 u16 vsi_id;
142 u16 num_queue_pairs;
143 enum i40e_vsi_type vsi_type;
144 u16 qset_handle;
145 u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
146};
147/* VF offload flags */
148#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
149#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
150#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
151
152struct i40e_virtchnl_vf_resource {
153 u16 num_vsis;
154 u16 num_queue_pairs;
155 u16 max_vectors;
156 u16 max_mtu;
157
158 u32 vf_offload_flags;
159 u32 max_fcoe_contexts;
160 u32 max_fcoe_filters;
161
162 struct i40e_virtchnl_vsi_resource vsi_res[1];
163};
164
165/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
166 * VF sends this message to set up parameters for one TX queue.
167 * External data buffer contains one instance of i40e_virtchnl_txq_info.
168 * PF configures requested queue and returns a status code.
169 */
170
171/* Tx queue config info */
172struct i40e_virtchnl_txq_info {
173 u16 vsi_id;
174 u16 queue_id;
175 u16 ring_len; /* number of descriptors, multiple of 8 */
176 u16 headwb_enabled;
177 u64 dma_ring_addr;
178 u64 dma_headwb_addr;
179};
180
181/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
182 * VF sends this message to set up parameters for one RX queue.
183 * External data buffer contains one instance of i40e_virtchnl_rxq_info.
184 * PF configures requested queue and returns a status code.
185 */
186
187/* Rx queue config info */
188struct i40e_virtchnl_rxq_info {
189 u16 vsi_id;
190 u16 queue_id;
191 u32 ring_len; /* number of descriptors, multiple of 32 */
192 u16 hdr_size;
193 u16 splithdr_enabled;
194 u32 databuffer_size;
195 u32 max_pkt_size;
196 u64 dma_ring_addr;
197 enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
198};
199
200/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
201 * VF sends this message to set parameters for all active TX and RX queues
202 * associated with the specified VSI.
203 * PF configures queues and returns status.
204 * If the number of queues specified is greater than the number of queues
205 * associated with the VSI, an error is returned and no queues are configured.
206 */
207struct i40e_virtchnl_queue_pair_info {
208 /* NOTE: vsi_id and queue_id should be identical for both queues. */
209 struct i40e_virtchnl_txq_info txq;
210 struct i40e_virtchnl_rxq_info rxq;
211};
212
213struct i40e_virtchnl_vsi_queue_config_info {
214 u16 vsi_id;
215 u16 num_queue_pairs;
216 struct i40e_virtchnl_queue_pair_info qpair[1];
217};
218
219/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
220 * VF uses this message to map vectors to queues.
221 * The rxq_map and txq_map fields are bitmaps used to indicate which queues
222 * are to be associated with the specified vector.
223 * The "other" causes are always mapped to vector 0.
224 * PF configures interrupt mapping and returns status.
225 */
226struct i40e_virtchnl_vector_map {
227 u16 vsi_id;
228 u16 vector_id;
229 u16 rxq_map;
230 u16 txq_map;
231 u16 rxitr_idx;
232 u16 txitr_idx;
233};
234
235struct i40e_virtchnl_irq_map_info {
236 u16 num_vectors;
237 struct i40e_virtchnl_vector_map vecmap[1];
238};
239
240/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
241 * I40E_VIRTCHNL_OP_DISABLE_QUEUES
242 * VF sends these message to enable or disable TX/RX queue pairs.
243 * The queues fields are bitmaps indicating which queues to act upon.
244 * (Currently, we only support 16 queues per VF, but we make the field
245 * u32 to allow for expansion.)
246 * PF performs requested action and returns status.
247 */
248struct i40e_virtchnl_queue_select {
249 u16 vsi_id;
250 u16 pad;
251 u32 rx_queues;
252 u32 tx_queues;
253};
254
255/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
256 * VF sends this message in order to add one or more unicast or multicast
257 * address filters for the specified VSI.
258 * PF adds the filters and returns status.
259 */
260
261/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
262 * VF sends this message in order to remove one or more unicast or multicast
263 * filters for the specified VSI.
264 * PF removes the filters and returns status.
265 */
266
267struct i40e_virtchnl_ether_addr {
268 u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
269 u8 pad[2];
270};
271
272struct i40e_virtchnl_ether_addr_list {
273 u16 vsi_id;
274 u16 num_elements;
275 struct i40e_virtchnl_ether_addr list[1];
276};
277
278/* I40E_VIRTCHNL_OP_ADD_VLAN
279 * VF sends this message to add one or more VLAN tag filters for receives.
280 * PF adds the filters and returns status.
281 * If a port VLAN is configured by the PF, this operation will return an
282 * error to the VF.
283 */
284
285/* I40E_VIRTCHNL_OP_DEL_VLAN
286 * VF sends this message to remove one or more VLAN tag filters for receives.
287 * PF removes the filters and returns status.
288 * If a port VLAN is configured by the PF, this operation will return an
289 * error to the VF.
290 */
291
292struct i40e_virtchnl_vlan_filter_list {
293 u16 vsi_id;
294 u16 num_elements;
295 u16 vlan_id[1];
296};
297
298/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
299 * VF sends VSI id and flags.
300 * PF returns status code in retval.
301 * Note: we assume that broadcast accept mode is always enabled.
302 */
303struct i40e_virtchnl_promisc_info {
304 u16 vsi_id;
305 u16 flags;
306};
307
308#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
309#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
310
311/* I40E_VIRTCHNL_OP_GET_STATS
312 * VF sends this message to request stats for the selected VSI. VF uses
313 * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
314 * field is ignored by the PF.
315 *
316 * PF replies with struct i40e_eth_stats in an external buffer.
317 */
318
319/* I40E_VIRTCHNL_OP_EVENT
320 * PF sends this message to inform the VF driver of events that may affect it.
321 * No direct response is expected from the VF, though it may generate other
322 * messages in response to this one.
323 */
324enum i40e_virtchnl_event_codes {
325 I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
326 I40E_VIRTCHNL_EVENT_LINK_CHANGE,
327 I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
328 I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
329};
330#define I40E_PF_EVENT_SEVERITY_INFO 0
331#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
332
333struct i40e_virtchnl_pf_event {
334 enum i40e_virtchnl_event_codes event;
335 union {
336 struct {
337 enum i40e_aq_link_speed link_speed;
338 bool link_status;
339 } link_event;
340 } event_data;
341
342 int severity;
343};
344
345/* The following are TBD, not necessary for LAN functionality.
346 * I40E_VIRTCHNL_OP_FCOE
347 */
348
349/* VF reset states - these are written into the RSTAT register:
350 * I40E_VFGEN_RSTAT1 on the PF
351 * I40E_VFGEN_RSTAT on the VF
352 * When the PF initiates a reset, it writes 0
353 * When the reset is complete, it writes 1
354 * When the PF detects that the VF has recovered, it writes 2
355 * VF checks this register periodically to determine if a reset has occurred,
356 * then polls it to know when the reset is complete.
357 * If either the PF or VF reads the register while the hardware
358 * is in a reset state, it will return DEADBEEF, which, when masked
359 * will result in 3.
360 */
361enum i40e_vfr_states {
362 I40E_VFR_INPROGRESS = 0,
363 I40E_VFR_COMPLETED,
364 I40E_VFR_VFACTIVE,
365 I40E_VFR_UNKNOWN,
366};
367
368#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
new file mode 100644
index 000000000000..8967e58e2408
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -0,0 +1,2335 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e.h"
29
30/***********************misc routines*****************************/
31
32/**
33 * i40e_vc_isvalid_vsi_id
34 * @vf: pointer to the vf info
35 * @vsi_id: vf relative vsi id
36 *
37 * check for the valid vsi id
38 **/
39static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
40{
41 struct i40e_pf *pf = vf->pf;
42
43 return pf->vsi[vsi_id]->vf_id == vf->vf_id;
44}
45
46/**
47 * i40e_vc_isvalid_queue_id
48 * @vf: pointer to the vf info
49 * @vsi_id: vsi id
50 * @qid: vsi relative queue id
51 *
52 * check for the valid queue id
53 **/
54static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
55 u8 qid)
56{
57 struct i40e_pf *pf = vf->pf;
58
59 return qid < pf->vsi[vsi_id]->num_queue_pairs;
60}
61
62/**
63 * i40e_vc_isvalid_vector_id
64 * @vf: pointer to the vf info
65 * @vector_id: vf relative vector id
66 *
67 * check for the valid vector id
68 **/
69static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
70{
71 struct i40e_pf *pf = vf->pf;
72
73 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
74}
75
76/***********************vf resource mgmt routines*****************/
77
78/**
79 * i40e_vc_get_pf_queue_id
80 * @vf: pointer to the vf info
81 * @vsi_idx: index of VSI in PF struct
82 * @vsi_queue_id: vsi relative queue id
83 *
84 * return pf relative queue id
85 **/
86static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
87 u8 vsi_queue_id)
88{
89 struct i40e_pf *pf = vf->pf;
90 struct i40e_vsi *vsi = pf->vsi[vsi_idx];
91 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
92
93 if (le16_to_cpu(vsi->info.mapping_flags) &
94 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
95 pf_queue_id =
96 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
97 else
98 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
99 vsi_queue_id;
100
101 return pf_queue_id;
102}
103
104/**
105 * i40e_ctrl_vsi_tx_queue
106 * @vf: pointer to the vf info
107 * @vsi_idx: index of VSI in PF struct
108 * @vsi_queue_id: vsi relative queue index
109 * @ctrl: control flags
110 *
111 * enable/disable/enable check/disable check
112 **/
113static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
114 u16 vsi_queue_id,
115 enum i40e_queue_ctrl ctrl)
116{
117 struct i40e_pf *pf = vf->pf;
118 struct i40e_hw *hw = &pf->hw;
119 bool writeback = false;
120 u16 pf_queue_id;
121 int ret = 0;
122 u32 reg;
123
124 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
125 reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
126
127 switch (ctrl) {
128 case I40E_QUEUE_CTRL_ENABLE:
129 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
130 writeback = true;
131 break;
132 case I40E_QUEUE_CTRL_ENABLECHECK:
133 ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
134 break;
135 case I40E_QUEUE_CTRL_DISABLE:
136 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
137 writeback = true;
138 break;
139 case I40E_QUEUE_CTRL_DISABLECHECK:
140 ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
141 break;
142 case I40E_QUEUE_CTRL_FASTDISABLE:
143 reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
144 writeback = true;
145 break;
146 case I40E_QUEUE_CTRL_FASTDISABLECHECK:
147 ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
148 if (!ret) {
149 reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
150 writeback = true;
151 }
152 break;
153 default:
154 ret = -EINVAL;
155 break;
156 }
157
158 if (writeback) {
159 wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
160 i40e_flush(hw);
161 }
162
163 return ret;
164}
165
166/**
167 * i40e_ctrl_vsi_rx_queue
168 * @vf: pointer to the vf info
169 * @vsi_idx: index of VSI in PF struct
170 * @vsi_queue_id: vsi relative queue index
171 * @ctrl: control flags
172 *
173 * enable/disable/enable check/disable check
174 **/
175static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
176 u16 vsi_queue_id,
177 enum i40e_queue_ctrl ctrl)
178{
179 struct i40e_pf *pf = vf->pf;
180 struct i40e_hw *hw = &pf->hw;
181 bool writeback = false;
182 u16 pf_queue_id;
183 int ret = 0;
184 u32 reg;
185
186 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
187 reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
188
189 switch (ctrl) {
190 case I40E_QUEUE_CTRL_ENABLE:
191 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
192 writeback = true;
193 break;
194 case I40E_QUEUE_CTRL_ENABLECHECK:
195 ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
196 break;
197 case I40E_QUEUE_CTRL_DISABLE:
198 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
199 writeback = true;
200 break;
201 case I40E_QUEUE_CTRL_DISABLECHECK:
202 ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
203 break;
204 case I40E_QUEUE_CTRL_FASTDISABLE:
205 reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
206 writeback = true;
207 break;
208 case I40E_QUEUE_CTRL_FASTDISABLECHECK:
209 ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
210 if (!ret) {
211 reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
212 writeback = true;
213 }
214 break;
215 default:
216 ret = -EINVAL;
217 break;
218 }
219
220 if (writeback) {
221 wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
222 i40e_flush(hw);
223 }
224
225 return ret;
226}
227
228/**
229 * i40e_config_irq_link_list
230 * @vf: pointer to the vf info
231 * @vsi_idx: index of VSI in PF struct
232 * @vecmap: irq map info
233 *
234 * configure irq link list from the map
235 **/
236static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
237 struct i40e_virtchnl_vector_map *vecmap)
238{
239 unsigned long linklistmap = 0, tempmap;
240 struct i40e_pf *pf = vf->pf;
241 struct i40e_hw *hw = &pf->hw;
242 u16 vsi_queue_id, pf_queue_id;
243 enum i40e_queue_type qtype;
244 u16 next_q, vector_id;
245 u32 reg, reg_idx;
246 u16 itr_idx = 0;
247
248 vector_id = vecmap->vector_id;
249 /* setup the head */
250 if (0 == vector_id)
251 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
252 else
253 reg_idx = I40E_VPINT_LNKLSTN(
254 ((pf->hw.func_caps.num_msix_vectors_vf - 1)
255 * vf->vf_id) + (vector_id - 1));
256
257 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
258 /* Special case - No queues mapped on this vector */
259 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
260 goto irq_list_done;
261 }
262 tempmap = vecmap->rxq_map;
263 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
264 while (vsi_queue_id < I40E_MAX_VSI_QP) {
265 linklistmap |= (1 <<
266 (I40E_VIRTCHNL_SUPPORTED_QTYPES *
267 vsi_queue_id));
268 vsi_queue_id =
269 find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
270 }
271
272 tempmap = vecmap->txq_map;
273 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
274 while (vsi_queue_id < I40E_MAX_VSI_QP) {
275 linklistmap |= (1 <<
276 (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
277 + 1));
278 vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
279 vsi_queue_id + 1);
280 }
281
282 next_q = find_first_bit(&linklistmap,
283 (I40E_MAX_VSI_QP *
284 I40E_VIRTCHNL_SUPPORTED_QTYPES));
285 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
286 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
287 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
288 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
289
290 wr32(hw, reg_idx, reg);
291
292 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
293 switch (qtype) {
294 case I40E_QUEUE_TYPE_RX:
295 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
296 itr_idx = vecmap->rxitr_idx;
297 break;
298 case I40E_QUEUE_TYPE_TX:
299 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
300 itr_idx = vecmap->txitr_idx;
301 break;
302 default:
303 break;
304 }
305
306 next_q = find_next_bit(&linklistmap,
307 (I40E_MAX_VSI_QP *
308 I40E_VIRTCHNL_SUPPORTED_QTYPES),
309 next_q + 1);
310 if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
311 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
312 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
313 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
314 vsi_queue_id);
315 } else {
316 pf_queue_id = I40E_QUEUE_END_OF_LIST;
317 qtype = 0;
318 }
319
320 /* format for the RQCTL & TQCTL regs is same */
321 reg = (vector_id) |
322 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
323 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
324 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
325 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
326 wr32(hw, reg_idx, reg);
327 }
328
329irq_list_done:
330 i40e_flush(hw);
331}
332
333/**
334 * i40e_config_vsi_tx_queue
335 * @vf: pointer to the vf info
336 * @vsi_idx: index of VSI in PF struct
337 * @vsi_queue_id: vsi relative queue index
338 * @info: config. info
339 *
340 * configure tx queue
341 **/
342static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
343 u16 vsi_queue_id,
344 struct i40e_virtchnl_txq_info *info)
345{
346 struct i40e_pf *pf = vf->pf;
347 struct i40e_hw *hw = &pf->hw;
348 struct i40e_hmc_obj_txq tx_ctx;
349 u16 pf_queue_id;
350 u32 qtx_ctl;
351 int ret = 0;
352
353 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
354
355 /* clear the context structure first */
356 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
357
358 /* only set the required fields */
359 tx_ctx.base = info->dma_ring_addr / 128;
360 tx_ctx.qlen = info->ring_len;
361 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
362 tx_ctx.rdylist_act = 0;
363
364 /* clear the context in the HMC */
365 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
366 if (ret) {
367 dev_err(&pf->pdev->dev,
368 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
369 pf_queue_id, ret);
370 ret = -ENOENT;
371 goto error_context;
372 }
373
374 /* set the context in the HMC */
375 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
376 if (ret) {
377 dev_err(&pf->pdev->dev,
378 "Failed to set VF LAN Tx queue context %d error: %d\n",
379 pf_queue_id, ret);
380 ret = -ENOENT;
381 goto error_context;
382 }
383
384 /* associate this queue with the PCI VF function */
385 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
386 qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
387 & I40E_QTX_CTL_PF_INDX_MASK);
388 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
389 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
390 & I40E_QTX_CTL_VFVM_INDX_MASK);
391 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
392 i40e_flush(hw);
393
394error_context:
395 return ret;
396}
397
398/**
399 * i40e_config_vsi_rx_queue
400 * @vf: pointer to the vf info
401 * @vsi_idx: index of VSI in PF struct
402 * @vsi_queue_id: vsi relative queue index
403 * @info: config. info
404 *
405 * configure rx queue
406 **/
407static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
408 u16 vsi_queue_id,
409 struct i40e_virtchnl_rxq_info *info)
410{
411 struct i40e_pf *pf = vf->pf;
412 struct i40e_hw *hw = &pf->hw;
413 struct i40e_hmc_obj_rxq rx_ctx;
414 u16 pf_queue_id;
415 int ret = 0;
416
417 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
418
419 /* clear the context structure first */
420 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
421
422 /* only set the required fields */
423 rx_ctx.base = info->dma_ring_addr / 128;
424 rx_ctx.qlen = info->ring_len;
425
426 if (info->splithdr_enabled) {
427 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
428 I40E_RX_SPLIT_IP |
429 I40E_RX_SPLIT_TCP_UDP |
430 I40E_RX_SPLIT_SCTP;
431 /* header length validation */
432 if (info->hdr_size > ((2 * 1024) - 64)) {
433 ret = -EINVAL;
434 goto error_param;
435 }
436 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
437
438 /* set splitalways mode 10b */
439 rx_ctx.dtype = 0x2;
440 }
441
442 /* databuffer length validation */
443 if (info->databuffer_size > ((16 * 1024) - 128)) {
444 ret = -EINVAL;
445 goto error_param;
446 }
447 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
448
449 /* max pkt. length validation */
450 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
451 ret = -EINVAL;
452 goto error_param;
453 }
454 rx_ctx.rxmax = info->max_pkt_size;
455
456 /* enable 32bytes desc always */
457 rx_ctx.dsize = 1;
458
459 /* default values */
460 rx_ctx.tphrdesc_ena = 1;
461 rx_ctx.tphwdesc_ena = 1;
462 rx_ctx.tphdata_ena = 1;
463 rx_ctx.tphhead_ena = 1;
464 rx_ctx.lrxqthresh = 2;
465 rx_ctx.crcstrip = 1;
466
467 /* clear the context in the HMC */
468 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
469 if (ret) {
470 dev_err(&pf->pdev->dev,
471 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
472 pf_queue_id, ret);
473 ret = -ENOENT;
474 goto error_param;
475 }
476
477 /* set the context in the HMC */
478 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
479 if (ret) {
480 dev_err(&pf->pdev->dev,
481 "Failed to set VF LAN Rx queue context %d error: %d\n",
482 pf_queue_id, ret);
483 ret = -ENOENT;
484 goto error_param;
485 }
486
487error_param:
488 return ret;
489}
490
491/**
492 * i40e_alloc_vsi_res
493 * @vf: pointer to the vf info
494 * @type: type of VSI to allocate
495 *
496 * alloc vf vsi context & resources
497 **/
498static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
499{
500 struct i40e_mac_filter *f = NULL;
501 struct i40e_pf *pf = vf->pf;
502 struct i40e_hw *hw = &pf->hw;
503 struct i40e_vsi *vsi;
504 int ret = 0;
505
506 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
507
508 if (!vsi) {
509 dev_err(&pf->pdev->dev,
510 "add vsi failed for vf %d, aq_err %d\n",
511 vf->vf_id, pf->hw.aq.asq_last_status);
512 ret = -ENOENT;
513 goto error_alloc_vsi_res;
514 }
515 if (type == I40E_VSI_SRIOV) {
516 vf->lan_vsi_index = vsi->idx;
517 vf->lan_vsi_id = vsi->id;
518 dev_info(&pf->pdev->dev,
519 "LAN VSI index %d, VSI id %d\n",
520 vsi->idx, vsi->id);
521 f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
522 0, true, false);
523 }
524 if (!f) {
525 dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
526 ret = -ENOMEM;
527 goto error_alloc_vsi_res;
528 }
529
530 /* program mac filter */
531 ret = i40e_sync_vsi_filters(vsi);
532 if (ret) {
533 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
534 goto error_alloc_vsi_res;
535 }
536
537 /* accept bcast pkts. by default */
538 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
539 if (ret) {
540 dev_err(&pf->pdev->dev,
541 "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
542 vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
543 ret = -EINVAL;
544 }
545
546error_alloc_vsi_res:
547 return ret;
548}
549
550/**
551 * i40e_reset_vf
552 * @vf: pointer to the vf structure
553 * @flr: VFLR was issued or not
554 *
555 * reset the vf
556 **/
557int i40e_reset_vf(struct i40e_vf *vf, bool flr)
558{
559 int ret = -ENOENT;
560 struct i40e_pf *pf = vf->pf;
561 struct i40e_hw *hw = &pf->hw;
562 u32 reg, reg_idx, msix_vf;
563 bool rsd = false;
564 u16 pf_queue_id;
565 int i, j;
566
567 /* warn the VF */
568 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
569
570 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
571
572 /* PF triggers VFR only when VF requests, in case of
573 * VFLR, HW triggers VFR
574 */
575 if (!flr) {
576 /* reset vf using VPGEN_VFRTRIG reg */
577 reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
578 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
579 i40e_flush(hw);
580 }
581
582 /* poll VPGEN_VFRSTAT reg to make sure
583 * that reset is complete
584 */
585 for (i = 0; i < 4; i++) {
586 /* vf reset requires driver to first reset the
587 * vf & than poll the status register to make sure
588 * that the requested op was completed
589 * successfully
590 */
591 udelay(10);
592 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
593 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
594 rsd = true;
595 break;
596 }
597 }
598
599 if (!rsd)
600 dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
601 vf->vf_id);
602
603 /* fast disable qps */
604 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
605 ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
606 I40E_QUEUE_CTRL_FASTDISABLE);
607 ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
608 I40E_QUEUE_CTRL_FASTDISABLE);
609 }
610
611 /* Queue enable/disable requires driver to
612 * first reset the vf & than poll the status register
613 * to make sure that the requested op was completed
614 * successfully
615 */
616 udelay(10);
617 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
618 ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
619 I40E_QUEUE_CTRL_FASTDISABLECHECK);
620 if (ret)
621 dev_info(&pf->pdev->dev,
622 "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
623 vf->lan_vsi_index, j, vf->vf_id);
624 ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
625 I40E_QUEUE_CTRL_FASTDISABLECHECK);
626 if (ret)
627 dev_info(&pf->pdev->dev,
628 "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
629 vf->lan_vsi_index, j, vf->vf_id);
630 }
631
632 /* clear the irq settings */
633 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
634 for (i = 0; i < msix_vf; i++) {
635 /* format is same for both registers */
636 if (0 == i)
637 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
638 else
639 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
640 (vf->vf_id))
641 + (i - 1));
642 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
643 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
644 wr32(hw, reg_idx, reg);
645 i40e_flush(hw);
646 }
647 /* disable interrupts so the VF starts in a known state */
648 for (i = 0; i < msix_vf; i++) {
649 /* format is same for both registers */
650 if (0 == i)
651 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
652 else
653 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
654 (vf->vf_id))
655 + (i - 1));
656 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
657 i40e_flush(hw);
658 }
659
660 /* set the defaults for the rqctl & tqctl registers */
661 reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
662 I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
663 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
664 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
665 wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
666 wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
667 }
668
669 /* clear the reset bit in the VPGEN_VFRTRIG reg */
670 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
671 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
672 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
673 /* tell the VF the reset is done */
674 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
675 i40e_flush(hw);
676
677 return ret;
678}
679
680/**
681 * i40e_enable_vf_mappings
682 * @vf: pointer to the vf info
683 *
684 * enable vf mappings
685 **/
686static void i40e_enable_vf_mappings(struct i40e_vf *vf)
687{
688 struct i40e_pf *pf = vf->pf;
689 struct i40e_hw *hw = &pf->hw;
690 u32 reg, total_queue_pairs = 0;
691 int j;
692
693 /* Tell the hardware we're using noncontiguous mapping. HW requires
694 * that VF queues be mapped using this method, even when they are
695 * contiguous in real life
696 */
697 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
698 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
699
700 /* enable VF vplan_qtable mappings */
701 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
702 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
703
704 /* map PF queues to VF queues */
705 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
706 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
707 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
708 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
709 total_queue_pairs++;
710 }
711
712 /* map PF queues to VSI */
713 for (j = 0; j < 7; j++) {
714 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
715 reg = 0x07FF07FF; /* unused */
716 } else {
717 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
718 j * 2);
719 reg = qid;
720 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
721 (j * 2) + 1);
722 reg |= qid << 16;
723 }
724 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
725 }
726
727 i40e_flush(hw);
728}
729
730/**
731 * i40e_disable_vf_mappings
732 * @vf: pointer to the vf info
733 *
734 * disable vf mappings
735 **/
736static void i40e_disable_vf_mappings(struct i40e_vf *vf)
737{
738 struct i40e_pf *pf = vf->pf;
739 struct i40e_hw *hw = &pf->hw;
740 int i;
741
742 /* disable qp mappings */
743 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
744 for (i = 0; i < I40E_MAX_VSI_QP; i++)
745 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
746 I40E_QUEUE_END_OF_LIST);
747 i40e_flush(hw);
748}
749
750/**
751 * i40e_free_vf_res
752 * @vf: pointer to the vf info
753 *
754 * free vf resources
755 **/
756static void i40e_free_vf_res(struct i40e_vf *vf)
757{
758 struct i40e_pf *pf = vf->pf;
759
760 /* free vsi & disconnect it from the parent uplink */
761 if (vf->lan_vsi_index) {
762 i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
763 vf->lan_vsi_index = 0;
764 vf->lan_vsi_id = 0;
765 }
766 /* reset some of the state varibles keeping
767 * track of the resources
768 */
769 vf->num_queue_pairs = 0;
770 vf->vf_states = 0;
771}
772
773/**
774 * i40e_alloc_vf_res
775 * @vf: pointer to the vf info
776 *
777 * allocate vf resources
778 **/
779static int i40e_alloc_vf_res(struct i40e_vf *vf)
780{
781 struct i40e_pf *pf = vf->pf;
782 int total_queue_pairs = 0;
783 int ret;
784
785 /* allocate hw vsi context & associated resources */
786 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
787 if (ret)
788 goto error_alloc;
789 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
790 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
791
792 /* store the total qps number for the runtime
793 * vf req validation
794 */
795 vf->num_queue_pairs = total_queue_pairs;
796
797 /* vf is now completely initialized */
798 set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
799
800error_alloc:
801 if (ret)
802 i40e_free_vf_res(vf);
803
804 return ret;
805}
806
807/**
808 * i40e_vfs_are_assigned
809 * @pf: pointer to the pf structure
810 *
811 * Determine if any VFs are assigned to VMs
812 **/
813static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
814{
815 struct pci_dev *pdev = pf->pdev;
816 struct pci_dev *vfdev;
817
818 /* loop through all the VFs to see if we own any that are assigned */
819 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
820 while (vfdev) {
821 /* if we don't own it we don't care */
822 if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
823 /* if it is assigned we cannot release it */
824 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
825 return true;
826 }
827
828 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
829 I40E_VF_DEVICE_ID,
830 vfdev);
831 }
832
833 return false;
834}
835
836/**
837 * i40e_free_vfs
838 * @pf: pointer to the pf structure
839 *
840 * free vf resources
841 **/
842void i40e_free_vfs(struct i40e_pf *pf)
843{
844 struct i40e_hw *hw = &pf->hw;
845 int i;
846
847 if (!pf->vf)
848 return;
849
850 /* Disable interrupt 0 so we don't try to handle the VFLR. */
851 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
852 i40e_flush(hw);
853
854 /* free up vf resources */
855 for (i = 0; i < pf->num_alloc_vfs; i++) {
856 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
857 i40e_free_vf_res(&pf->vf[i]);
858 /* disable qp mappings */
859 i40e_disable_vf_mappings(&pf->vf[i]);
860 }
861
862 kfree(pf->vf);
863 pf->vf = NULL;
864 pf->num_alloc_vfs = 0;
865
866 if (!i40e_vfs_are_assigned(pf))
867 pci_disable_sriov(pf->pdev);
868 else
869 dev_warn(&pf->pdev->dev,
870 "unable to disable SR-IOV because VFs are assigned.\n");
871
872 /* Re-enable interrupt 0. */
873 wr32(hw, I40E_PFINT_DYN_CTL0,
874 I40E_PFINT_DYN_CTL0_INTENA_MASK |
875 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
876 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
877 i40e_flush(hw);
878}
879
880#ifdef CONFIG_PCI_IOV
881/**
882 * i40e_alloc_vfs
883 * @pf: pointer to the pf structure
884 * @num_alloc_vfs: number of vfs to allocate
885 *
886 * allocate vf resources
887 **/
888static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
889{
890 struct i40e_vf *vfs;
891 int i, ret = 0;
892
893 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
894 if (ret) {
895 dev_err(&pf->pdev->dev,
896 "pci_enable_sriov failed with error %d!\n", ret);
897 pf->num_alloc_vfs = 0;
898 goto err_iov;
899 }
900
901 /* allocate memory */
902 vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
903 if (!vfs) {
904 ret = -ENOMEM;
905 goto err_alloc;
906 }
907
908 /* apply default profile */
909 for (i = 0; i < num_alloc_vfs; i++) {
910 vfs[i].pf = pf;
911 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
912 vfs[i].vf_id = i;
913
914 /* assign default capabilities */
915 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
916
917 ret = i40e_alloc_vf_res(&vfs[i]);
918 i40e_reset_vf(&vfs[i], true);
919 if (ret)
920 break;
921
922 /* enable vf vplan_qtable mappings */
923 i40e_enable_vf_mappings(&vfs[i]);
924 }
925 pf->vf = vfs;
926 pf->num_alloc_vfs = num_alloc_vfs;
927
928err_alloc:
929 if (ret)
930 i40e_free_vfs(pf);
931err_iov:
932 return ret;
933}
934
935#endif
936/**
937 * i40e_pci_sriov_enable
938 * @pdev: pointer to a pci_dev structure
939 * @num_vfs: number of vfs to allocate
940 *
941 * Enable or change the number of VFs
942 **/
943static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
944{
945#ifdef CONFIG_PCI_IOV
946 struct i40e_pf *pf = pci_get_drvdata(pdev);
947 int pre_existing_vfs = pci_num_vf(pdev);
948 int err = 0;
949
950 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
951 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
952 i40e_free_vfs(pf);
953 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
954 goto out;
955
956 if (num_vfs > pf->num_req_vfs) {
957 err = -EPERM;
958 goto err_out;
959 }
960
961 err = i40e_alloc_vfs(pf, num_vfs);
962 if (err) {
963 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
964 goto err_out;
965 }
966
967out:
968 return num_vfs;
969
970err_out:
971 return err;
972#endif
973 return 0;
974}
975
976/**
977 * i40e_pci_sriov_configure
978 * @pdev: pointer to a pci_dev structure
979 * @num_vfs: number of vfs to allocate
980 *
981 * Enable or change the number of VFs. Called when the user updates the number
982 * of VFs in sysfs.
983 **/
984int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
985{
986 struct i40e_pf *pf = pci_get_drvdata(pdev);
987
988 if (num_vfs)
989 return i40e_pci_sriov_enable(pdev, num_vfs);
990
991 i40e_free_vfs(pf);
992 return 0;
993}
994
995/***********************virtual channel routines******************/
996
997/**
998 * i40e_vc_send_msg_to_vf
999 * @vf: pointer to the vf info
1000 * @v_opcode: virtual channel opcode
1001 * @v_retval: virtual channel return value
1002 * @msg: pointer to the msg buffer
1003 * @msglen: msg length
1004 *
1005 * send msg to vf
1006 **/
1007static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1008 u32 v_retval, u8 *msg, u16 msglen)
1009{
1010 struct i40e_pf *pf = vf->pf;
1011 struct i40e_hw *hw = &pf->hw;
1012 i40e_status aq_ret;
1013
1014 /* single place to detect unsuccessful return values */
1015 if (v_retval) {
1016 vf->num_invalid_msgs++;
1017 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
1018 v_opcode, v_retval);
1019 if (vf->num_invalid_msgs >
1020 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1021 dev_err(&pf->pdev->dev,
1022 "Number of invalid messages exceeded for VF %d\n",
1023 vf->vf_id);
1024 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1025 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
1026 }
1027 } else {
1028 vf->num_valid_msgs++;
1029 }
1030
1031 aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
1032 msg, msglen, NULL);
1033 if (aq_ret) {
1034 dev_err(&pf->pdev->dev,
1035 "Unable to send the message to VF %d aq_err %d\n",
1036 vf->vf_id, pf->hw.aq.asq_last_status);
1037 return -EIO;
1038 }
1039
1040 return 0;
1041}
1042
1043/**
1044 * i40e_vc_send_resp_to_vf
1045 * @vf: pointer to the vf info
1046 * @opcode: operation code
1047 * @retval: return value
1048 *
1049 * send resp msg to vf
1050 **/
1051static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1052 enum i40e_virtchnl_ops opcode,
1053 i40e_status retval)
1054{
1055 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1056}
1057
1058/**
1059 * i40e_vc_get_version_msg
1060 * @vf: pointer to the vf info
1061 *
1062 * called from the vf to request the API version used by the PF
1063 **/
1064static int i40e_vc_get_version_msg(struct i40e_vf *vf)
1065{
1066 struct i40e_virtchnl_version_info info = {
1067 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
1068 };
1069
1070 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
1071 I40E_SUCCESS, (u8 *)&info,
1072 sizeof(struct
1073 i40e_virtchnl_version_info));
1074}
1075
1076/**
1077 * i40e_vc_get_vf_resources_msg
1078 * @vf: pointer to the vf info
1079 * @msg: pointer to the msg buffer
1080 * @msglen: msg length
1081 *
1082 * called from the vf to request its resources
1083 **/
1084static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
1085{
1086 struct i40e_virtchnl_vf_resource *vfres = NULL;
1087 struct i40e_pf *pf = vf->pf;
1088 i40e_status aq_ret = 0;
1089 struct i40e_vsi *vsi;
1090 int i = 0, len = 0;
1091 int num_vsis = 1;
1092 int ret;
1093
1094 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1095 aq_ret = I40E_ERR_PARAM;
1096 goto err;
1097 }
1098
1099 len = (sizeof(struct i40e_virtchnl_vf_resource) +
1100 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
1101
1102 vfres = kzalloc(len, GFP_KERNEL);
1103 if (!vfres) {
1104 aq_ret = I40E_ERR_NO_MEMORY;
1105 len = 0;
1106 goto err;
1107 }
1108
1109 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
1110 vsi = pf->vsi[vf->lan_vsi_index];
1111 if (!vsi->info.pvid)
1112 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
1113
1114 vfres->num_vsis = num_vsis;
1115 vfres->num_queue_pairs = vf->num_queue_pairs;
1116 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1117 if (vf->lan_vsi_index) {
1118 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
1119 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
1120 vfres->vsi_res[i].num_queue_pairs =
1121 pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
1122 memcpy(vfres->vsi_res[i].default_mac_addr,
1123 vf->default_lan_addr.addr, ETH_ALEN);
1124 i++;
1125 }
1126 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
1127
1128err:
1129 /* send the response back to the vf */
1130 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
1131 aq_ret, (u8 *)vfres, len);
1132
1133 kfree(vfres);
1134 return ret;
1135}
1136
1137/**
1138 * i40e_vc_reset_vf_msg
1139 * @vf: pointer to the vf info
1140 * @msg: pointer to the msg buffer
1141 * @msglen: msg length
1142 *
1143 * called from the vf to reset itself,
1144 * unlike other virtchnl messages, pf driver
1145 * doesn't send the response back to the vf
1146 **/
1147static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1148{
1149 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1150 return -ENOENT;
1151
1152 return i40e_reset_vf(vf, false);
1153}
1154
1155/**
1156 * i40e_vc_config_promiscuous_mode_msg
1157 * @vf: pointer to the vf info
1158 * @msg: pointer to the msg buffer
1159 * @msglen: msg length
1160 *
1161 * called from the vf to configure the promiscuous mode of
1162 * vf vsis
1163 **/
1164static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1165 u8 *msg, u16 msglen)
1166{
1167 struct i40e_virtchnl_promisc_info *info =
1168 (struct i40e_virtchnl_promisc_info *)msg;
1169 struct i40e_pf *pf = vf->pf;
1170 struct i40e_hw *hw = &pf->hw;
1171 bool allmulti = false;
1172 bool promisc = false;
1173 i40e_status aq_ret;
1174
1175 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1176 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1177 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1178 (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
1179 aq_ret = I40E_ERR_PARAM;
1180 goto error_param;
1181 }
1182
1183 if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
1184 promisc = true;
1185 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
1186 promisc, NULL);
1187 if (aq_ret)
1188 goto error_param;
1189
1190 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
1191 allmulti = true;
1192 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
1193 allmulti, NULL);
1194
1195error_param:
1196 /* send the response to the vf */
1197 return i40e_vc_send_resp_to_vf(vf,
1198 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1199 aq_ret);
1200}
1201
1202/**
1203 * i40e_vc_config_queues_msg
1204 * @vf: pointer to the vf info
1205 * @msg: pointer to the msg buffer
1206 * @msglen: msg length
1207 *
1208 * called from the vf to configure the rx/tx
1209 * queues
1210 **/
1211static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1212{
1213 struct i40e_virtchnl_vsi_queue_config_info *qci =
1214 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1215 struct i40e_virtchnl_queue_pair_info *qpi;
1216 u16 vsi_id, vsi_queue_id;
1217 i40e_status aq_ret = 0;
1218 int i;
1219
1220 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1221 aq_ret = I40E_ERR_PARAM;
1222 goto error_param;
1223 }
1224
1225 vsi_id = qci->vsi_id;
1226 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1227 aq_ret = I40E_ERR_PARAM;
1228 goto error_param;
1229 }
1230 for (i = 0; i < qci->num_queue_pairs; i++) {
1231 qpi = &qci->qpair[i];
1232 vsi_queue_id = qpi->txq.queue_id;
1233 if ((qpi->txq.vsi_id != vsi_id) ||
1234 (qpi->rxq.vsi_id != vsi_id) ||
1235 (qpi->rxq.queue_id != vsi_queue_id) ||
1236 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
1237 aq_ret = I40E_ERR_PARAM;
1238 goto error_param;
1239 }
1240
1241 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
1242 &qpi->rxq) ||
1243 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
1244 &qpi->txq)) {
1245 aq_ret = I40E_ERR_PARAM;
1246 goto error_param;
1247 }
1248 }
1249
1250error_param:
1251 /* send the response to the vf */
1252 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1253 aq_ret);
1254}
1255
1256/**
1257 * i40e_vc_config_irq_map_msg
1258 * @vf: pointer to the vf info
1259 * @msg: pointer to the msg buffer
1260 * @msglen: msg length
1261 *
1262 * called from the vf to configure the irq to
1263 * queue map
1264 **/
1265static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1266{
1267 struct i40e_virtchnl_irq_map_info *irqmap_info =
1268 (struct i40e_virtchnl_irq_map_info *)msg;
1269 struct i40e_virtchnl_vector_map *map;
1270 u16 vsi_id, vsi_queue_id, vector_id;
1271 i40e_status aq_ret = 0;
1272 unsigned long tempmap;
1273 int i;
1274
1275 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1276 aq_ret = I40E_ERR_PARAM;
1277 goto error_param;
1278 }
1279
1280 for (i = 0; i < irqmap_info->num_vectors; i++) {
1281 map = &irqmap_info->vecmap[i];
1282
1283 vector_id = map->vector_id;
1284 vsi_id = map->vsi_id;
1285 /* validate msg params */
1286 if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
1287 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1288 aq_ret = I40E_ERR_PARAM;
1289 goto error_param;
1290 }
1291
1292 /* lookout for the invalid queue index */
1293 tempmap = map->rxq_map;
1294 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1295 while (vsi_queue_id < I40E_MAX_VSI_QP) {
1296 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1297 vsi_queue_id)) {
1298 aq_ret = I40E_ERR_PARAM;
1299 goto error_param;
1300 }
1301 vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1302 vsi_queue_id + 1);
1303 }
1304
1305 tempmap = map->txq_map;
1306 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1307 while (vsi_queue_id < I40E_MAX_VSI_QP) {
1308 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1309 vsi_queue_id)) {
1310 aq_ret = I40E_ERR_PARAM;
1311 goto error_param;
1312 }
1313 vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1314 vsi_queue_id + 1);
1315 }
1316
1317 i40e_config_irq_link_list(vf, vsi_id, map);
1318 }
1319error_param:
1320 /* send the response to the vf */
1321 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
1322 aq_ret);
1323}
1324
1325/**
1326 * i40e_vc_enable_queues_msg
1327 * @vf: pointer to the vf info
1328 * @msg: pointer to the msg buffer
1329 * @msglen: msg length
1330 *
1331 * called from the vf to enable all or specific queue(s)
1332 **/
1333static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1334{
1335 struct i40e_virtchnl_queue_select *vqs =
1336 (struct i40e_virtchnl_queue_select *)msg;
1337 struct i40e_pf *pf = vf->pf;
1338 u16 vsi_id = vqs->vsi_id;
1339 i40e_status aq_ret = 0;
1340 unsigned long tempmap;
1341 u16 queue_id;
1342
1343 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1344 aq_ret = I40E_ERR_PARAM;
1345 goto error_param;
1346 }
1347
1348 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1349 aq_ret = I40E_ERR_PARAM;
1350 goto error_param;
1351 }
1352
1353 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1354 aq_ret = I40E_ERR_PARAM;
1355 goto error_param;
1356 }
1357
1358 tempmap = vqs->rx_queues;
1359 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1360 while (queue_id < I40E_MAX_VSI_QP) {
1361 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
1362 aq_ret = I40E_ERR_PARAM;
1363 goto error_param;
1364 }
1365 i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
1366 I40E_QUEUE_CTRL_ENABLE);
1367
1368 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1369 queue_id + 1);
1370 }
1371
1372 tempmap = vqs->tx_queues;
1373 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1374 while (queue_id < I40E_MAX_VSI_QP) {
1375 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
1376 aq_ret = I40E_ERR_PARAM;
1377 goto error_param;
1378 }
1379 i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
1380 I40E_QUEUE_CTRL_ENABLE);
1381
1382 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1383 queue_id + 1);
1384 }
1385
1386 /* Poll the status register to make sure that the
1387 * requested op was completed successfully
1388 */
1389 udelay(10);
1390
1391 tempmap = vqs->rx_queues;
1392 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1393 while (queue_id < I40E_MAX_VSI_QP) {
1394 if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
1395 I40E_QUEUE_CTRL_ENABLECHECK)) {
1396 dev_err(&pf->pdev->dev,
1397 "Queue control check failed on RX queue %d of VSI %d VF %d\n",
1398 queue_id, vsi_id, vf->vf_id);
1399 }
1400 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1401 queue_id + 1);
1402 }
1403
1404 tempmap = vqs->tx_queues;
1405 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1406 while (queue_id < I40E_MAX_VSI_QP) {
1407 if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
1408 I40E_QUEUE_CTRL_ENABLECHECK)) {
1409 dev_err(&pf->pdev->dev,
1410 "Queue control check failed on TX queue %d of VSI %d VF %d\n",
1411 queue_id, vsi_id, vf->vf_id);
1412 }
1413 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1414 queue_id + 1);
1415 }
1416
1417error_param:
1418 /* send the response to the vf */
1419 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1420 aq_ret);
1421}
1422
1423/**
1424 * i40e_vc_disable_queues_msg
1425 * @vf: pointer to the vf info
1426 * @msg: pointer to the msg buffer
1427 * @msglen: msg length
1428 *
1429 * called from the vf to disable all or specific
1430 * queue(s)
1431 **/
1432static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1433{
1434 struct i40e_virtchnl_queue_select *vqs =
1435 (struct i40e_virtchnl_queue_select *)msg;
1436 struct i40e_pf *pf = vf->pf;
1437 u16 vsi_id = vqs->vsi_id;
1438 i40e_status aq_ret = 0;
1439 unsigned long tempmap;
1440 u16 queue_id;
1441
1442 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1443 aq_ret = I40E_ERR_PARAM;
1444 goto error_param;
1445 }
1446
1447 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1448 aq_ret = I40E_ERR_PARAM;
1449 goto error_param;
1450 }
1451
1452 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1453 aq_ret = I40E_ERR_PARAM;
1454 goto error_param;
1455 }
1456
1457 tempmap = vqs->rx_queues;
1458 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1459 while (queue_id < I40E_MAX_VSI_QP) {
1460 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
1461 aq_ret = I40E_ERR_PARAM;
1462 goto error_param;
1463 }
1464 i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
1465 I40E_QUEUE_CTRL_DISABLE);
1466
1467 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1468 queue_id + 1);
1469 }
1470
1471 tempmap = vqs->tx_queues;
1472 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1473 while (queue_id < I40E_MAX_VSI_QP) {
1474 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
1475 aq_ret = I40E_ERR_PARAM;
1476 goto error_param;
1477 }
1478 i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
1479 I40E_QUEUE_CTRL_DISABLE);
1480
1481 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1482 queue_id + 1);
1483 }
1484
1485 /* Poll the status register to make sure that the
1486 * requested op was completed successfully
1487 */
1488 udelay(10);
1489
1490 tempmap = vqs->rx_queues;
1491 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1492 while (queue_id < I40E_MAX_VSI_QP) {
1493 if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
1494 I40E_QUEUE_CTRL_DISABLECHECK)) {
1495 dev_err(&pf->pdev->dev,
1496 "Queue control check failed on RX queue %d of VSI %d VF %d\n",
1497 queue_id, vsi_id, vf->vf_id);
1498 }
1499 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1500 queue_id + 1);
1501 }
1502
1503 tempmap = vqs->tx_queues;
1504 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1505 while (queue_id < I40E_MAX_VSI_QP) {
1506 if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
1507 I40E_QUEUE_CTRL_DISABLECHECK)) {
1508 dev_err(&pf->pdev->dev,
1509 "Queue control check failed on TX queue %d of VSI %d VF %d\n",
1510 queue_id, vsi_id, vf->vf_id);
1511 }
1512 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1513 queue_id + 1);
1514 }
1515
1516error_param:
1517 /* send the response to the vf */
1518 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1519 aq_ret);
1520}
1521
1522/**
1523 * i40e_vc_get_stats_msg
1524 * @vf: pointer to the vf info
1525 * @msg: pointer to the msg buffer
1526 * @msglen: msg length
1527 *
1528 * called from the vf to get vsi stats
1529 **/
1530static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1531{
1532 struct i40e_virtchnl_queue_select *vqs =
1533 (struct i40e_virtchnl_queue_select *)msg;
1534 struct i40e_pf *pf = vf->pf;
1535 struct i40e_eth_stats stats;
1536 i40e_status aq_ret = 0;
1537 struct i40e_vsi *vsi;
1538
1539 memset(&stats, 0, sizeof(struct i40e_eth_stats));
1540
1541 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1542 aq_ret = I40E_ERR_PARAM;
1543 goto error_param;
1544 }
1545
1546 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1547 aq_ret = I40E_ERR_PARAM;
1548 goto error_param;
1549 }
1550
1551 vsi = pf->vsi[vqs->vsi_id];
1552 if (!vsi) {
1553 aq_ret = I40E_ERR_PARAM;
1554 goto error_param;
1555 }
1556 i40e_update_eth_stats(vsi);
1557 memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
1558
1559error_param:
1560 /* send the response back to the vf */
1561 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
1562 (u8 *)&stats, sizeof(stats));
1563}
1564
1565/**
1566 * i40e_vc_add_mac_addr_msg
1567 * @vf: pointer to the vf info
1568 * @msg: pointer to the msg buffer
1569 * @msglen: msg length
1570 *
1571 * add guest mac address filter
1572 **/
1573static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1574{
1575 struct i40e_virtchnl_ether_addr_list *al =
1576 (struct i40e_virtchnl_ether_addr_list *)msg;
1577 struct i40e_pf *pf = vf->pf;
1578 struct i40e_vsi *vsi = NULL;
1579 u16 vsi_id = al->vsi_id;
1580 i40e_status aq_ret = 0;
1581 int i;
1582
1583 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1584 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1585 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1586 aq_ret = I40E_ERR_PARAM;
1587 goto error_param;
1588 }
1589
1590 for (i = 0; i < al->num_elements; i++) {
1591 if (is_broadcast_ether_addr(al->list[i].addr) ||
1592 is_zero_ether_addr(al->list[i].addr)) {
1593 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
1594 al->list[i].addr);
1595 aq_ret = I40E_ERR_PARAM;
1596 goto error_param;
1597 }
1598 }
1599 vsi = pf->vsi[vsi_id];
1600
1601 /* add new addresses to the list */
1602 for (i = 0; i < al->num_elements; i++) {
1603 struct i40e_mac_filter *f;
1604
1605 f = i40e_find_mac(vsi, al->list[i].addr, true, false);
1606 if (f) {
1607 if (i40e_is_vsi_in_vlan(vsi))
1608 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
1609 true, false);
1610 else
1611 f = i40e_add_filter(vsi, al->list[i].addr, -1,
1612 true, false);
1613 }
1614
1615 if (!f) {
1616 dev_err(&pf->pdev->dev,
1617 "Unable to add VF MAC filter\n");
1618 aq_ret = I40E_ERR_PARAM;
1619 goto error_param;
1620 }
1621 }
1622
1623 /* program the updated filter list */
1624 if (i40e_sync_vsi_filters(vsi))
1625 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1626
1627error_param:
1628 /* send the response to the vf */
1629 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1630 aq_ret);
1631}
1632
1633/**
1634 * i40e_vc_del_mac_addr_msg
1635 * @vf: pointer to the vf info
1636 * @msg: pointer to the msg buffer
1637 * @msglen: msg length
1638 *
1639 * remove guest mac address filter
1640 **/
1641static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1642{
1643 struct i40e_virtchnl_ether_addr_list *al =
1644 (struct i40e_virtchnl_ether_addr_list *)msg;
1645 struct i40e_pf *pf = vf->pf;
1646 struct i40e_vsi *vsi = NULL;
1647 u16 vsi_id = al->vsi_id;
1648 i40e_status aq_ret = 0;
1649 int i;
1650
1651 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1652 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1653 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1654 aq_ret = I40E_ERR_PARAM;
1655 goto error_param;
1656 }
1657 vsi = pf->vsi[vsi_id];
1658
1659 /* delete addresses from the list */
1660 for (i = 0; i < al->num_elements; i++)
1661 i40e_del_filter(vsi, al->list[i].addr,
1662 I40E_VLAN_ANY, true, false);
1663
1664 /* program the updated filter list */
1665 if (i40e_sync_vsi_filters(vsi))
1666 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1667
1668error_param:
1669 /* send the response to the vf */
1670 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
1671 aq_ret);
1672}
1673
1674/**
1675 * i40e_vc_add_vlan_msg
1676 * @vf: pointer to the vf info
1677 * @msg: pointer to the msg buffer
1678 * @msglen: msg length
1679 *
1680 * program guest vlan id
1681 **/
1682static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1683{
1684 struct i40e_virtchnl_vlan_filter_list *vfl =
1685 (struct i40e_virtchnl_vlan_filter_list *)msg;
1686 struct i40e_pf *pf = vf->pf;
1687 struct i40e_vsi *vsi = NULL;
1688 u16 vsi_id = vfl->vsi_id;
1689 i40e_status aq_ret = 0;
1690 int i;
1691
1692 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1693 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1694 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1695 aq_ret = I40E_ERR_PARAM;
1696 goto error_param;
1697 }
1698
1699 for (i = 0; i < vfl->num_elements; i++) {
1700 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1701 aq_ret = I40E_ERR_PARAM;
1702 dev_err(&pf->pdev->dev,
1703 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
1704 goto error_param;
1705 }
1706 }
1707 vsi = pf->vsi[vsi_id];
1708 if (vsi->info.pvid) {
1709 aq_ret = I40E_ERR_PARAM;
1710 goto error_param;
1711 }
1712
1713 i40e_vlan_stripping_enable(vsi);
1714 for (i = 0; i < vfl->num_elements; i++) {
1715 /* add new VLAN filter */
1716 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
1717 if (ret)
1718 dev_err(&pf->pdev->dev,
1719 "Unable to add VF vlan filter %d, error %d\n",
1720 vfl->vlan_id[i], ret);
1721 }
1722
1723error_param:
1724 /* send the response to the vf */
1725 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
1726}
1727
1728/**
1729 * i40e_vc_remove_vlan_msg
1730 * @vf: pointer to the vf info
1731 * @msg: pointer to the msg buffer
1732 * @msglen: msg length
1733 *
1734 * remove programmed guest vlan id
1735 **/
1736static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1737{
1738 struct i40e_virtchnl_vlan_filter_list *vfl =
1739 (struct i40e_virtchnl_vlan_filter_list *)msg;
1740 struct i40e_pf *pf = vf->pf;
1741 struct i40e_vsi *vsi = NULL;
1742 u16 vsi_id = vfl->vsi_id;
1743 i40e_status aq_ret = 0;
1744 int i;
1745
1746 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1747 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1748 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1749 aq_ret = I40E_ERR_PARAM;
1750 goto error_param;
1751 }
1752
1753 for (i = 0; i < vfl->num_elements; i++) {
1754 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1755 aq_ret = I40E_ERR_PARAM;
1756 goto error_param;
1757 }
1758 }
1759
1760 vsi = pf->vsi[vsi_id];
1761 if (vsi->info.pvid) {
1762 aq_ret = I40E_ERR_PARAM;
1763 goto error_param;
1764 }
1765
1766 for (i = 0; i < vfl->num_elements; i++) {
1767 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
1768 if (ret)
1769 dev_err(&pf->pdev->dev,
1770 "Unable to delete VF vlan filter %d, error %d\n",
1771 vfl->vlan_id[i], ret);
1772 }
1773
1774error_param:
1775 /* send the response to the vf */
1776 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
1777}
1778
1779/**
1780 * i40e_vc_fcoe_msg
1781 * @vf: pointer to the vf info
1782 * @msg: pointer to the msg buffer
1783 * @msglen: msg length
1784 *
1785 * called from the vf for the fcoe msgs
1786 **/
1787static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1788{
1789 i40e_status aq_ret = 0;
1790
1791 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1792 !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
1793 aq_ret = I40E_ERR_PARAM;
1794 goto error_param;
1795 }
1796 aq_ret = I40E_ERR_NOT_IMPLEMENTED;
1797
1798error_param:
1799 /* send the response to the vf */
1800 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
1801}
1802
1803/**
1804 * i40e_vc_validate_vf_msg
1805 * @vf: pointer to the vf info
1806 * @msg: pointer to the msg buffer
1807 * @msglen: msg length
1808 * @msghndl: msg handle
1809 *
1810 * validate msg
1811 **/
1812static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
1813 u32 v_retval, u8 *msg, u16 msglen)
1814{
1815 bool err_msg_format = false;
1816 int valid_len;
1817
1818 /* Check if VF is disabled. */
1819 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
1820 return I40E_ERR_PARAM;
1821
1822 /* Validate message length. */
1823 switch (v_opcode) {
1824 case I40E_VIRTCHNL_OP_VERSION:
1825 valid_len = sizeof(struct i40e_virtchnl_version_info);
1826 break;
1827 case I40E_VIRTCHNL_OP_RESET_VF:
1828 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1829 valid_len = 0;
1830 break;
1831 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1832 valid_len = sizeof(struct i40e_virtchnl_txq_info);
1833 break;
1834 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1835 valid_len = sizeof(struct i40e_virtchnl_rxq_info);
1836 break;
1837 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1838 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
1839 if (msglen >= valid_len) {
1840 struct i40e_virtchnl_vsi_queue_config_info *vqc =
1841 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1842 valid_len += (vqc->num_queue_pairs *
1843 sizeof(struct
1844 i40e_virtchnl_queue_pair_info));
1845 if (vqc->num_queue_pairs == 0)
1846 err_msg_format = true;
1847 }
1848 break;
1849 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1850 valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
1851 if (msglen >= valid_len) {
1852 struct i40e_virtchnl_irq_map_info *vimi =
1853 (struct i40e_virtchnl_irq_map_info *)msg;
1854 valid_len += (vimi->num_vectors *
1855 sizeof(struct i40e_virtchnl_vector_map));
1856 if (vimi->num_vectors == 0)
1857 err_msg_format = true;
1858 }
1859 break;
1860 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1861 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1862 valid_len = sizeof(struct i40e_virtchnl_queue_select);
1863 break;
1864 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1865 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1866 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
1867 if (msglen >= valid_len) {
1868 struct i40e_virtchnl_ether_addr_list *veal =
1869 (struct i40e_virtchnl_ether_addr_list *)msg;
1870 valid_len += veal->num_elements *
1871 sizeof(struct i40e_virtchnl_ether_addr);
1872 if (veal->num_elements == 0)
1873 err_msg_format = true;
1874 }
1875 break;
1876 case I40E_VIRTCHNL_OP_ADD_VLAN:
1877 case I40E_VIRTCHNL_OP_DEL_VLAN:
1878 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
1879 if (msglen >= valid_len) {
1880 struct i40e_virtchnl_vlan_filter_list *vfl =
1881 (struct i40e_virtchnl_vlan_filter_list *)msg;
1882 valid_len += vfl->num_elements * sizeof(u16);
1883 if (vfl->num_elements == 0)
1884 err_msg_format = true;
1885 }
1886 break;
1887 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1888 valid_len = sizeof(struct i40e_virtchnl_promisc_info);
1889 break;
1890 case I40E_VIRTCHNL_OP_GET_STATS:
1891 valid_len = sizeof(struct i40e_virtchnl_queue_select);
1892 break;
1893 /* These are always errors coming from the VF. */
1894 case I40E_VIRTCHNL_OP_EVENT:
1895 case I40E_VIRTCHNL_OP_UNKNOWN:
1896 default:
1897 return -EPERM;
1898 break;
1899 }
1900 /* few more checks */
1901 if ((valid_len != msglen) || (err_msg_format)) {
1902 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
1903 return -EINVAL;
1904 } else {
1905 return 0;
1906 }
1907}
1908
1909/**
1910 * i40e_vc_process_vf_msg
1911 * @pf: pointer to the pf structure
1912 * @vf_id: source vf id
1913 * @msg: pointer to the msg buffer
1914 * @msglen: msg length
1915 * @msghndl: msg handle
1916 *
1917 * called from the common aeq/arq handler to
1918 * process request from vf
1919 **/
1920int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
1921 u32 v_retval, u8 *msg, u16 msglen)
1922{
1923 struct i40e_vf *vf = &(pf->vf[vf_id]);
1924 struct i40e_hw *hw = &pf->hw;
1925 int ret;
1926
1927 pf->vf_aq_requests++;
1928 /* perform basic checks on the msg */
1929 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
1930
1931 if (ret) {
1932 dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
1933 return ret;
1934 }
1935 wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
1936 switch (v_opcode) {
1937 case I40E_VIRTCHNL_OP_VERSION:
1938 ret = i40e_vc_get_version_msg(vf);
1939 break;
1940 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1941 ret = i40e_vc_get_vf_resources_msg(vf);
1942 break;
1943 case I40E_VIRTCHNL_OP_RESET_VF:
1944 ret = i40e_vc_reset_vf_msg(vf);
1945 break;
1946 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1947 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
1948 break;
1949 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1950 ret = i40e_vc_config_queues_msg(vf, msg, msglen);
1951 break;
1952 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1953 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
1954 break;
1955 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1956 ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
1957 break;
1958 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1959 ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
1960 break;
1961 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1962 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
1963 break;
1964 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1965 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
1966 break;
1967 case I40E_VIRTCHNL_OP_ADD_VLAN:
1968 ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
1969 break;
1970 case I40E_VIRTCHNL_OP_DEL_VLAN:
1971 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
1972 break;
1973 case I40E_VIRTCHNL_OP_GET_STATS:
1974 ret = i40e_vc_get_stats_msg(vf, msg, msglen);
1975 break;
1976 case I40E_VIRTCHNL_OP_FCOE:
1977 ret = i40e_vc_fcoe_msg(vf, msg, msglen);
1978 break;
1979 case I40E_VIRTCHNL_OP_UNKNOWN:
1980 default:
1981 dev_err(&pf->pdev->dev,
1982 "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
1983 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
1984 I40E_ERR_NOT_IMPLEMENTED);
1985 break;
1986 }
1987
1988 return ret;
1989}
1990
1991/**
1992 * i40e_vc_process_vflr_event
1993 * @pf: pointer to the pf structure
1994 *
1995 * called from the vlfr irq handler to
1996 * free up vf resources and state variables
1997 **/
1998int i40e_vc_process_vflr_event(struct i40e_pf *pf)
1999{
2000 u32 reg, reg_idx, bit_idx, vf_id;
2001 struct i40e_hw *hw = &pf->hw;
2002 struct i40e_vf *vf;
2003
2004 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
2005 return 0;
2006
2007 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2008 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
2009 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
2010 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
2011 /* read GLGEN_VFLRSTAT register to find out the flr vfs */
2012 vf = &pf->vf[vf_id];
2013 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
2014 if (reg & (1 << bit_idx)) {
2015 /* clear the bit in GLGEN_VFLRSTAT */
2016 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
2017
2018 if (i40e_reset_vf(vf, true))
2019 dev_err(&pf->pdev->dev,
2020 "Unable to reset the VF %d\n", vf_id);
2021 /* free up vf resources to destroy vsi state */
2022 i40e_free_vf_res(vf);
2023
2024 /* allocate new vf resources with the default state */
2025 if (i40e_alloc_vf_res(vf))
2026 dev_err(&pf->pdev->dev,
2027 "Unable to allocate VF resources %d\n",
2028 vf_id);
2029
2030 i40e_enable_vf_mappings(vf);
2031 }
2032 }
2033
2034 /* re-enable vflr interrupt cause */
2035 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2036 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
2037 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2038 i40e_flush(hw);
2039
2040 return 0;
2041}
2042
2043/**
2044 * i40e_vc_vf_broadcast
2045 * @pf: pointer to the pf structure
2046 * @opcode: operation code
2047 * @retval: return value
2048 * @msg: pointer to the msg buffer
2049 * @msglen: msg length
2050 *
2051 * send a message to all VFs on a given PF
2052 **/
2053static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
2054 enum i40e_virtchnl_ops v_opcode,
2055 i40e_status v_retval, u8 *msg,
2056 u16 msglen)
2057{
2058 struct i40e_hw *hw = &pf->hw;
2059 struct i40e_vf *vf = pf->vf;
2060 int i;
2061
2062 for (i = 0; i < pf->num_alloc_vfs; i++) {
2063 /* Ignore return value on purpose - a given VF may fail, but
2064 * we need to keep going and send to all of them
2065 */
2066 i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
2067 msg, msglen, NULL);
2068 vf++;
2069 }
2070}
2071
2072/**
2073 * i40e_vc_notify_link_state
2074 * @pf: pointer to the pf structure
2075 *
2076 * send a link status message to all VFs on a given PF
2077 **/
2078void i40e_vc_notify_link_state(struct i40e_pf *pf)
2079{
2080 struct i40e_virtchnl_pf_event pfe;
2081
2082 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
2083 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
2084 pfe.event_data.link_event.link_status =
2085 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
2086 pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
2087
2088 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
2089 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
2090}
2091
2092/**
2093 * i40e_vc_notify_reset
2094 * @pf: pointer to the pf structure
2095 *
2096 * indicate a pending reset to all VFs on a given PF
2097 **/
2098void i40e_vc_notify_reset(struct i40e_pf *pf)
2099{
2100 struct i40e_virtchnl_pf_event pfe;
2101
2102 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
2103 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
2104 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
2105 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
2106}
2107
2108/**
2109 * i40e_vc_notify_vf_reset
2110 * @vf: pointer to the vf structure
2111 *
2112 * indicate a pending reset to the given VF
2113 **/
2114void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
2115{
2116 struct i40e_virtchnl_pf_event pfe;
2117
2118 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
2119 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
2120 i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
2121 I40E_SUCCESS, (u8 *)&pfe,
2122 sizeof(struct i40e_virtchnl_pf_event), NULL);
2123}
2124
2125/**
2126 * i40e_ndo_set_vf_mac
2127 * @netdev: network interface device structure
2128 * @vf_id: vf identifier
2129 * @mac: mac address
2130 *
2131 * program vf mac address
2132 **/
2133int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2134{
2135 struct i40e_netdev_priv *np = netdev_priv(netdev);
2136 struct i40e_vsi *vsi = np->vsi;
2137 struct i40e_pf *pf = vsi->back;
2138 struct i40e_mac_filter *f;
2139 struct i40e_vf *vf;
2140 int ret = 0;
2141
2142 /* validate the request */
2143 if (vf_id >= pf->num_alloc_vfs) {
2144 dev_err(&pf->pdev->dev,
2145 "Invalid VF Identifier %d\n", vf_id);
2146 ret = -EINVAL;
2147 goto error_param;
2148 }
2149
2150 vf = &(pf->vf[vf_id]);
2151 vsi = pf->vsi[vf->lan_vsi_index];
2152 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2153 dev_err(&pf->pdev->dev,
2154 "Uninitialized VF %d\n", vf_id);
2155 ret = -EINVAL;
2156 goto error_param;
2157 }
2158
2159 if (!is_valid_ether_addr(mac)) {
2160 dev_err(&pf->pdev->dev,
2161 "Invalid VF ethernet address\n");
2162 ret = -EINVAL;
2163 goto error_param;
2164 }
2165
2166 /* delete the temporary mac address */
2167 i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
2168
2169 /* add the new mac address */
2170 f = i40e_add_filter(vsi, mac, 0, true, false);
2171 if (!f) {
2172 dev_err(&pf->pdev->dev,
2173 "Unable to add VF ucast filter\n");
2174 ret = -ENOMEM;
2175 goto error_param;
2176 }
2177
2178 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
2179 /* program mac filter */
2180 if (i40e_sync_vsi_filters(vsi)) {
2181 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
2182 ret = -EIO;
2183 goto error_param;
2184 }
2185 memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
2186 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2187 ret = 0;
2188
2189error_param:
2190 return ret;
2191}
2192
2193/**
2194 * i40e_ndo_set_vf_port_vlan
2195 * @netdev: network interface device structure
2196 * @vf_id: vf identifier
2197 * @vlan_id: mac address
2198 * @qos: priority setting
2199 *
2200 * program vf vlan id and/or qos
2201 **/
2202int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
2203 int vf_id, u16 vlan_id, u8 qos)
2204{
2205 struct i40e_netdev_priv *np = netdev_priv(netdev);
2206 struct i40e_pf *pf = np->vsi->back;
2207 struct i40e_vsi *vsi;
2208 struct i40e_vf *vf;
2209 int ret = 0;
2210
2211 /* validate the request */
2212 if (vf_id >= pf->num_alloc_vfs) {
2213 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2214 ret = -EINVAL;
2215 goto error_pvid;
2216 }
2217
2218 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
2219 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2220 ret = -EINVAL;
2221 goto error_pvid;
2222 }
2223
2224 vf = &(pf->vf[vf_id]);
2225 vsi = pf->vsi[vf->lan_vsi_index];
2226 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2227 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2228 ret = -EINVAL;
2229 goto error_pvid;
2230 }
2231
2232 if (vsi->info.pvid) {
2233 /* kill old VLAN */
2234 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2235 VLAN_VID_MASK));
2236 if (ret) {
2237 dev_info(&vsi->back->pdev->dev,
2238 "remove VLAN failed, ret=%d, aq_err=%d\n",
2239 ret, pf->hw.aq.asq_last_status);
2240 }
2241 }
2242 if (vlan_id || qos)
2243 ret = i40e_vsi_add_pvid(vsi,
2244 vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
2245 else
2246 i40e_vlan_stripping_disable(vsi);
2247
2248 if (vlan_id) {
2249 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2250 vlan_id, qos, vf_id);
2251
2252 /* add new VLAN filter */
2253 ret = i40e_vsi_add_vlan(vsi, vlan_id);
2254 if (ret) {
2255 dev_info(&vsi->back->pdev->dev,
2256 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
2257 vsi->back->hw.aq.asq_last_status);
2258 goto error_pvid;
2259 }
2260 }
2261
2262 if (ret) {
2263 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
2264 goto error_pvid;
2265 }
2266 ret = 0;
2267
2268error_pvid:
2269 return ret;
2270}
2271
2272/**
2273 * i40e_ndo_set_vf_bw
2274 * @netdev: network interface device structure
2275 * @vf_id: vf identifier
2276 * @tx_rate: tx rate
2277 *
2278 * configure vf tx rate
2279 **/
2280int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
2281{
2282 return -EOPNOTSUPP;
2283}
2284
2285/**
2286 * i40e_ndo_get_vf_config
2287 * @netdev: network interface device structure
2288 * @vf_id: vf identifier
2289 * @ivi: vf configuration structure
2290 *
2291 * return vf configuration
2292 **/
2293int i40e_ndo_get_vf_config(struct net_device *netdev,
2294 int vf_id, struct ifla_vf_info *ivi)
2295{
2296 struct i40e_netdev_priv *np = netdev_priv(netdev);
2297 struct i40e_mac_filter *f, *ftmp;
2298 struct i40e_vsi *vsi = np->vsi;
2299 struct i40e_pf *pf = vsi->back;
2300 struct i40e_vf *vf;
2301 int ret = 0;
2302
2303 /* validate the request */
2304 if (vf_id >= pf->num_alloc_vfs) {
2305 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2306 ret = -EINVAL;
2307 goto error_param;
2308 }
2309
2310 vf = &(pf->vf[vf_id]);
2311 /* first vsi is always the LAN vsi */
2312 vsi = pf->vsi[vf->lan_vsi_index];
2313 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2314 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2315 ret = -EINVAL;
2316 goto error_param;
2317 }
2318
2319 ivi->vf = vf_id;
2320
2321 /* first entry of the list is the default ethernet address */
2322 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2323 memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
2324 break;
2325 }
2326
2327 ivi->tx_rate = 0;
2328 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
2329 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
2330 I40E_VLAN_PRIORITY_SHIFT;
2331 ret = 0;
2332
2333error_param:
2334 return ret;
2335}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
new file mode 100644
index 000000000000..360382cf3040
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -0,0 +1,120 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_VIRTCHNL_PF_H_
29#define _I40E_VIRTCHNL_PF_H_
30
31#include "i40e.h"
32
33#define I40E_MAX_MACVLAN_FILTERS 256
34#define I40E_MAX_VLAN_FILTERS 256
35#define I40E_MAX_VLANID 4095
36
37#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
38
39#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3
40#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
41
42#define I40E_VLAN_PRIORITY_SHIFT 12
43#define I40E_VLAN_MASK 0xFFF
44#define I40E_PRIORITY_MASK 0x7000
45
46/* Various queue ctrls */
47enum i40e_queue_ctrl {
48 I40E_QUEUE_CTRL_UNKNOWN = 0,
49 I40E_QUEUE_CTRL_ENABLE,
50 I40E_QUEUE_CTRL_ENABLECHECK,
51 I40E_QUEUE_CTRL_DISABLE,
52 I40E_QUEUE_CTRL_DISABLECHECK,
53 I40E_QUEUE_CTRL_FASTDISABLE,
54 I40E_QUEUE_CTRL_FASTDISABLECHECK,
55};
56
57/* VF states */
58enum i40e_vf_states {
59 I40E_VF_STAT_INIT = 0,
60 I40E_VF_STAT_ACTIVE,
61 I40E_VF_STAT_FCOEENA,
62 I40E_VF_STAT_DISABLED,
63};
64
65/* VF capabilities */
66enum i40e_vf_capabilities {
67 I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
68 I40E_VIRTCHNL_VF_CAP_L2,
69};
70
71/* VF information structure */
72struct i40e_vf {
73 struct i40e_pf *pf;
74
75 /* vf id in the pf space */
76 u16 vf_id;
77 /* all vf vsis connect to the same parent */
78 enum i40e_switch_element_types parent_type;
79
80 /* vf Port Extender (PE) stag if used */
81 u16 stag;
82
83 struct i40e_virtchnl_ether_addr default_lan_addr;
84 struct i40e_virtchnl_ether_addr default_fcoe_addr;
85
86 /* VSI indices - actual VSI pointers are maintained in the PF structure
87 * When assigned, these will be non-zero, because VSI 0 is always
88 * the main LAN VSI for the PF.
89 */
90 u8 lan_vsi_index; /* index into PF struct */
91 u8 lan_vsi_id; /* ID as used by firmware */
92
93 u8 num_queue_pairs; /* num of qps assigned to vf vsis */
94 u64 num_mdd_events; /* num of mdd events detected */
95 u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */
96 u64 num_valid_msgs; /* num of valid msgs detected */
97
98 unsigned long vf_caps; /* vf's adv. capabilities */
99 unsigned long vf_states; /* vf's runtime states */
100};
101
102void i40e_free_vfs(struct i40e_pf *pf);
103int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
104int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
105 u32 v_retval, u8 *msg, u16 msglen);
106int i40e_vc_process_vflr_event(struct i40e_pf *pf);
107int i40e_reset_vf(struct i40e_vf *vf, bool flr);
108void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
109
110/* vf configuration related iplink handlers */
111int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
112int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
113 int vf_id, u16 vlan_id, u8 qos);
114int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
115int i40e_ndo_get_vf_config(struct net_device *netdev,
116 int vf_id, struct ifla_vf_info *ivi);
117void i40e_vc_notify_link_state(struct i40e_pf *pf);
118void i40e_vc_notify_reset(struct i40e_pf *pf);
119
120#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 270e65f21102..a36fa80968eb 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -996,14 +996,14 @@ static int korina_open(struct net_device *dev)
996 * that handles the Done Finished 996 * that handles the Done Finished
997 * Ovr and Und Events */ 997 * Ovr and Und Events */
998 ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt, 998 ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
999 IRQF_DISABLED, "Korina ethernet Rx", dev); 999 0, "Korina ethernet Rx", dev);
1000 if (ret < 0) { 1000 if (ret < 0) {
1001 printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n", 1001 printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
1002 dev->name, lp->rx_irq); 1002 dev->name, lp->rx_irq);
1003 goto err_release; 1003 goto err_release;
1004 } 1004 }
1005 ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt, 1005 ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
1006 IRQF_DISABLED, "Korina ethernet Tx", dev); 1006 0, "Korina ethernet Tx", dev);
1007 if (ret < 0) { 1007 if (ret < 0) {
1008 printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n", 1008 printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
1009 dev->name, lp->tx_irq); 1009 dev->name, lp->tx_irq);
@@ -1012,7 +1012,7 @@ static int korina_open(struct net_device *dev)
1012 1012
1013 /* Install handler for overrun error. */ 1013 /* Install handler for overrun error. */
1014 ret = request_irq(lp->ovr_irq, korina_ovr_interrupt, 1014 ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
1015 IRQF_DISABLED, "Ethernet Overflow", dev); 1015 0, "Ethernet Overflow", dev);
1016 if (ret < 0) { 1016 if (ret < 0) {
1017 printk(KERN_ERR "%s: unable to get OVR IRQ %d\n", 1017 printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
1018 dev->name, lp->ovr_irq); 1018 dev->name, lp->ovr_irq);
@@ -1021,7 +1021,7 @@ static int korina_open(struct net_device *dev)
1021 1021
1022 /* Install handler for underflow error. */ 1022 /* Install handler for underflow error. */
1023 ret = request_irq(lp->und_irq, korina_und_interrupt, 1023 ret = request_irq(lp->und_irq, korina_und_interrupt,
1024 IRQF_DISABLED, "Ethernet Underflow", dev); 1024 0, "Ethernet Underflow", dev);
1025 if (ret < 0) { 1025 if (ret < 0) {
1026 printk(KERN_ERR "%s: unable to get UND IRQ %d\n", 1026 printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
1027 dev->name, lp->und_irq); 1027 dev->name, lp->und_irq);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 88349b8fa39a..81bf83604c4f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -430,7 +430,7 @@ struct qlcnic_hardware_context {
430 u8 diag_test; 430 u8 diag_test;
431 u8 num_msix; 431 u8 num_msix;
432 u8 nic_mode; 432 u8 nic_mode;
433 char diag_cnt; 433 int diag_cnt;
434 434
435 u16 max_uc_count; 435 u16 max_uc_count;
436 u16 port_type; 436 u16 port_type;
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 949076f4e6ae..13e6fff8ca23 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -1734,7 +1734,8 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1734 unsigned int data_len = skb->len - sh_len; 1734 unsigned int data_len = skb->len - sh_len;
1735 unsigned char *data = skb->data; 1735 unsigned char *data = skb->data;
1736 unsigned int ih_off, th_off, p_len; 1736 unsigned int ih_off, th_off, p_len;
1737 unsigned int isum_seed, tsum_seed, id, seq; 1737 unsigned int isum_seed, tsum_seed, seq;
1738 unsigned int uninitialized_var(id);
1738 int is_ipv6; 1739 int is_ipv6;
1739 long f_id = -1; /* id of the current fragment */ 1740 long f_id = -1; /* id of the current fragment */
1740 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ 1741 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
@@ -1781,7 +1782,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1781 } else { 1782 } else {
1782 ih = (struct iphdr *)(buf + ih_off); 1783 ih = (struct iphdr *)(buf + ih_off);
1783 ih->tot_len = htons(sh_len + p_len - ih_off); 1784 ih->tot_len = htons(sh_len + p_len - ih_off);
1784 ih->id = htons(id); 1785 ih->id = htons(id++);
1785 ih->check = csum_long(isum_seed + ih->tot_len + 1786 ih->check = csum_long(isum_seed + ih->tot_len +
1786 ih->id) ^ 0xffff; 1787 ih->id) ^ 0xffff;
1787 } 1788 }
@@ -1818,7 +1819,6 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1818 slot++; 1819 slot++;
1819 } 1820 }
1820 1821
1821 id++;
1822 seq += p_len; 1822 seq += p_len;
1823 1823
1824 /* The last segment may be less than gso_size. */ 1824 /* The last segment may be less than gso_size. */
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 510b9c8d23a9..31bcb98ef356 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1488,7 +1488,7 @@ static void
1488toshoboe_close (struct pci_dev *pci_dev) 1488toshoboe_close (struct pci_dev *pci_dev)
1489{ 1489{
1490 int i; 1490 int i;
1491 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev); 1491 struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
1492 1492
1493 IRDA_DEBUG (4, "%s()\n", __func__); 1493 IRDA_DEBUG (4, "%s()\n", __func__);
1494 1494
@@ -1696,7 +1696,7 @@ freeself:
1696static int 1696static int
1697toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap) 1697toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
1698{ 1698{
1699 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev); 1699 struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
1700 unsigned long flags; 1700 unsigned long flags;
1701 int i = 10; 1701 int i = 10;
1702 1702
@@ -1725,7 +1725,7 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
1725static int 1725static int
1726toshoboe_wakeup (struct pci_dev *pci_dev) 1726toshoboe_wakeup (struct pci_dev *pci_dev)
1727{ 1727{
1728 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev); 1728 struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
1729 unsigned long flags; 1729 unsigned long flags;
1730 1730
1731 IRDA_DEBUG (4, "%s()\n", __func__); 1731 IRDA_DEBUG (4, "%s()\n", __func__);
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 5f4758492e4c..c5bd58b4d8a8 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -543,7 +543,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
543 int crclen, len = 0; 543 int crclen, len = 0;
544 struct sk_buff *skb; 544 struct sk_buff *skb;
545 int ret = 0; 545 int ret = 0;
546 struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev); 546 struct net_device *ndev = pci_get_drvdata(r->pdev);
547 vlsi_irda_dev_t *idev = netdev_priv(ndev); 547 vlsi_irda_dev_t *idev = netdev_priv(ndev);
548 548
549 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 549 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 64dfaa303dcc..9bf46bd19b87 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -118,8 +118,6 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
118 const struct ethhdr *eth, bool local) 118 const struct ethhdr *eth, bool local)
119{ 119{
120 struct net_device *dev = vlan->dev; 120 struct net_device *dev = vlan->dev;
121 if (!skb)
122 return NET_RX_DROP;
123 121
124 if (local) 122 if (local)
125 return vlan->forward(dev, skb); 123 return vlan->forward(dev, skb);
@@ -171,9 +169,13 @@ static void macvlan_broadcast(struct sk_buff *skb,
171 hash = mc_hash(vlan, eth->h_dest); 169 hash = mc_hash(vlan, eth->h_dest);
172 if (!test_bit(hash, vlan->mc_filter)) 170 if (!test_bit(hash, vlan->mc_filter))
173 continue; 171 continue;
172
173 err = NET_RX_DROP;
174 nskb = skb_clone(skb, GFP_ATOMIC); 174 nskb = skb_clone(skb, GFP_ATOMIC);
175 err = macvlan_broadcast_one(nskb, vlan, eth, 175 if (likely(nskb))
176 mode == MACVLAN_MODE_BRIDGE); 176 err = macvlan_broadcast_one(
177 nskb, vlan, eth,
178 mode == MACVLAN_MODE_BRIDGE);
177 macvlan_count_rx(vlan, skb->len + ETH_HLEN, 179 macvlan_count_rx(vlan, skb->len + ETH_HLEN,
178 err == NET_RX_SUCCESS, 1); 180 err == NET_RX_SUCCESS, 1);
179 } 181 }
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3a8131582e75..6312332afeba 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -518,6 +518,135 @@ static const struct usb_device_id products[] = {
518 518
519 /* 3. Combined interface devices matching on interface number */ 519 /* 3. Combined interface devices matching on interface number */
520 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ 520 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
521 {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
522 {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
523 {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
524 {QMI_FIXED_INTF(0x05c6, 0x7101, 1)},
525 {QMI_FIXED_INTF(0x05c6, 0x7101, 2)},
526 {QMI_FIXED_INTF(0x05c6, 0x7101, 3)},
527 {QMI_FIXED_INTF(0x05c6, 0x7102, 1)},
528 {QMI_FIXED_INTF(0x05c6, 0x7102, 2)},
529 {QMI_FIXED_INTF(0x05c6, 0x7102, 3)},
530 {QMI_FIXED_INTF(0x05c6, 0x8000, 7)},
531 {QMI_FIXED_INTF(0x05c6, 0x8001, 6)},
532 {QMI_FIXED_INTF(0x05c6, 0x9000, 4)},
533 {QMI_FIXED_INTF(0x05c6, 0x9003, 4)},
534 {QMI_FIXED_INTF(0x05c6, 0x9005, 2)},
535 {QMI_FIXED_INTF(0x05c6, 0x900a, 4)},
536 {QMI_FIXED_INTF(0x05c6, 0x900b, 2)},
537 {QMI_FIXED_INTF(0x05c6, 0x900c, 4)},
538 {QMI_FIXED_INTF(0x05c6, 0x900c, 5)},
539 {QMI_FIXED_INTF(0x05c6, 0x900c, 6)},
540 {QMI_FIXED_INTF(0x05c6, 0x900d, 5)},
541 {QMI_FIXED_INTF(0x05c6, 0x900f, 3)},
542 {QMI_FIXED_INTF(0x05c6, 0x900f, 4)},
543 {QMI_FIXED_INTF(0x05c6, 0x900f, 5)},
544 {QMI_FIXED_INTF(0x05c6, 0x9010, 4)},
545 {QMI_FIXED_INTF(0x05c6, 0x9010, 5)},
546 {QMI_FIXED_INTF(0x05c6, 0x9011, 3)},
547 {QMI_FIXED_INTF(0x05c6, 0x9011, 4)},
548 {QMI_FIXED_INTF(0x05c6, 0x9021, 1)},
549 {QMI_FIXED_INTF(0x05c6, 0x9022, 2)},
550 {QMI_FIXED_INTF(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */
551 {QMI_FIXED_INTF(0x05c6, 0x9026, 3)},
552 {QMI_FIXED_INTF(0x05c6, 0x902e, 5)},
553 {QMI_FIXED_INTF(0x05c6, 0x9031, 5)},
554 {QMI_FIXED_INTF(0x05c6, 0x9032, 4)},
555 {QMI_FIXED_INTF(0x05c6, 0x9033, 3)},
556 {QMI_FIXED_INTF(0x05c6, 0x9033, 4)},
557 {QMI_FIXED_INTF(0x05c6, 0x9033, 5)},
558 {QMI_FIXED_INTF(0x05c6, 0x9033, 6)},
559 {QMI_FIXED_INTF(0x05c6, 0x9034, 3)},
560 {QMI_FIXED_INTF(0x05c6, 0x9034, 4)},
561 {QMI_FIXED_INTF(0x05c6, 0x9034, 5)},
562 {QMI_FIXED_INTF(0x05c6, 0x9034, 6)},
563 {QMI_FIXED_INTF(0x05c6, 0x9034, 7)},
564 {QMI_FIXED_INTF(0x05c6, 0x9035, 4)},
565 {QMI_FIXED_INTF(0x05c6, 0x9036, 3)},
566 {QMI_FIXED_INTF(0x05c6, 0x9037, 5)},
567 {QMI_FIXED_INTF(0x05c6, 0x9038, 4)},
568 {QMI_FIXED_INTF(0x05c6, 0x903b, 7)},
569 {QMI_FIXED_INTF(0x05c6, 0x903c, 6)},
570 {QMI_FIXED_INTF(0x05c6, 0x903d, 6)},
571 {QMI_FIXED_INTF(0x05c6, 0x903e, 5)},
572 {QMI_FIXED_INTF(0x05c6, 0x9043, 3)},
573 {QMI_FIXED_INTF(0x05c6, 0x9046, 3)},
574 {QMI_FIXED_INTF(0x05c6, 0x9046, 4)},
575 {QMI_FIXED_INTF(0x05c6, 0x9046, 5)},
576 {QMI_FIXED_INTF(0x05c6, 0x9047, 2)},
577 {QMI_FIXED_INTF(0x05c6, 0x9047, 3)},
578 {QMI_FIXED_INTF(0x05c6, 0x9047, 4)},
579 {QMI_FIXED_INTF(0x05c6, 0x9048, 4)},
580 {QMI_FIXED_INTF(0x05c6, 0x9048, 5)},
581 {QMI_FIXED_INTF(0x05c6, 0x9048, 6)},
582 {QMI_FIXED_INTF(0x05c6, 0x9048, 7)},
583 {QMI_FIXED_INTF(0x05c6, 0x9048, 8)},
584 {QMI_FIXED_INTF(0x05c6, 0x904c, 5)},
585 {QMI_FIXED_INTF(0x05c6, 0x904c, 6)},
586 {QMI_FIXED_INTF(0x05c6, 0x904c, 7)},
587 {QMI_FIXED_INTF(0x05c6, 0x904c, 8)},
588 {QMI_FIXED_INTF(0x05c6, 0x9050, 3)},
589 {QMI_FIXED_INTF(0x05c6, 0x9052, 4)},
590 {QMI_FIXED_INTF(0x05c6, 0x9053, 6)},
591 {QMI_FIXED_INTF(0x05c6, 0x9053, 7)},
592 {QMI_FIXED_INTF(0x05c6, 0x9054, 5)},
593 {QMI_FIXED_INTF(0x05c6, 0x9054, 6)},
594 {QMI_FIXED_INTF(0x05c6, 0x9055, 3)},
595 {QMI_FIXED_INTF(0x05c6, 0x9055, 4)},
596 {QMI_FIXED_INTF(0x05c6, 0x9055, 5)},
597 {QMI_FIXED_INTF(0x05c6, 0x9055, 6)},
598 {QMI_FIXED_INTF(0x05c6, 0x9055, 7)},
599 {QMI_FIXED_INTF(0x05c6, 0x9056, 3)},
600 {QMI_FIXED_INTF(0x05c6, 0x9062, 2)},
601 {QMI_FIXED_INTF(0x05c6, 0x9062, 3)},
602 {QMI_FIXED_INTF(0x05c6, 0x9062, 4)},
603 {QMI_FIXED_INTF(0x05c6, 0x9062, 5)},
604 {QMI_FIXED_INTF(0x05c6, 0x9062, 6)},
605 {QMI_FIXED_INTF(0x05c6, 0x9062, 7)},
606 {QMI_FIXED_INTF(0x05c6, 0x9062, 8)},
607 {QMI_FIXED_INTF(0x05c6, 0x9062, 9)},
608 {QMI_FIXED_INTF(0x05c6, 0x9064, 3)},
609 {QMI_FIXED_INTF(0x05c6, 0x9065, 6)},
610 {QMI_FIXED_INTF(0x05c6, 0x9065, 7)},
611 {QMI_FIXED_INTF(0x05c6, 0x9066, 5)},
612 {QMI_FIXED_INTF(0x05c6, 0x9066, 6)},
613 {QMI_FIXED_INTF(0x05c6, 0x9067, 1)},
614 {QMI_FIXED_INTF(0x05c6, 0x9068, 2)},
615 {QMI_FIXED_INTF(0x05c6, 0x9068, 3)},
616 {QMI_FIXED_INTF(0x05c6, 0x9068, 4)},
617 {QMI_FIXED_INTF(0x05c6, 0x9068, 5)},
618 {QMI_FIXED_INTF(0x05c6, 0x9068, 6)},
619 {QMI_FIXED_INTF(0x05c6, 0x9068, 7)},
620 {QMI_FIXED_INTF(0x05c6, 0x9069, 5)},
621 {QMI_FIXED_INTF(0x05c6, 0x9069, 6)},
622 {QMI_FIXED_INTF(0x05c6, 0x9069, 7)},
623 {QMI_FIXED_INTF(0x05c6, 0x9069, 8)},
624 {QMI_FIXED_INTF(0x05c6, 0x9070, 4)},
625 {QMI_FIXED_INTF(0x05c6, 0x9070, 5)},
626 {QMI_FIXED_INTF(0x05c6, 0x9075, 5)},
627 {QMI_FIXED_INTF(0x05c6, 0x9076, 4)},
628 {QMI_FIXED_INTF(0x05c6, 0x9076, 5)},
629 {QMI_FIXED_INTF(0x05c6, 0x9076, 6)},
630 {QMI_FIXED_INTF(0x05c6, 0x9076, 7)},
631 {QMI_FIXED_INTF(0x05c6, 0x9076, 8)},
632 {QMI_FIXED_INTF(0x05c6, 0x9077, 3)},
633 {QMI_FIXED_INTF(0x05c6, 0x9077, 4)},
634 {QMI_FIXED_INTF(0x05c6, 0x9077, 5)},
635 {QMI_FIXED_INTF(0x05c6, 0x9077, 6)},
636 {QMI_FIXED_INTF(0x05c6, 0x9078, 3)},
637 {QMI_FIXED_INTF(0x05c6, 0x9079, 4)},
638 {QMI_FIXED_INTF(0x05c6, 0x9079, 5)},
639 {QMI_FIXED_INTF(0x05c6, 0x9079, 6)},
640 {QMI_FIXED_INTF(0x05c6, 0x9079, 7)},
641 {QMI_FIXED_INTF(0x05c6, 0x9079, 8)},
642 {QMI_FIXED_INTF(0x05c6, 0x9080, 5)},
643 {QMI_FIXED_INTF(0x05c6, 0x9080, 6)},
644 {QMI_FIXED_INTF(0x05c6, 0x9080, 7)},
645 {QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
646 {QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
647 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
648 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
649 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
521 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 650 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
522 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 651 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
523 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, 652 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
@@ -612,7 +741,6 @@ static const struct usb_device_id products[] = {
612 {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ 741 {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
613 {QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ 742 {QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
614 {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ 743 {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
615 {QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
616 {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ 744 {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
617 {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ 745 {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
618 {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ 746 {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 3c4211f0bed6..ea0cc26ab70e 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -190,7 +190,9 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons
190} 190}
191 191
192extern int ndisc_init(void); 192extern int ndisc_init(void);
193extern int ndisc_late_init(void);
193 194
195extern void ndisc_late_cleanup(void);
194extern void ndisc_cleanup(void); 196extern void ndisc_cleanup(void);
195 197
196extern int ndisc_rcv(struct sk_buff *skb); 198extern int ndisc_rcv(struct sk_buff *skb);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 0ff42f029ace..1929af87b260 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -352,7 +352,7 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
352 352
353 if (queue_index != new_index && sk && 353 if (queue_index != new_index && sk &&
354 rcu_access_pointer(sk->sk_dst_cache)) 354 rcu_access_pointer(sk->sk_dst_cache))
355 sk_tx_queue_set(sk, queue_index); 355 sk_tx_queue_set(sk, new_index);
356 356
357 queue_index = new_index; 357 queue_index = new_index;
358 } 358 }
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 136fe55c1a47..7c96100b021e 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -915,6 +915,9 @@ static int __init inet6_init(void)
915 err = ip6_route_init(); 915 err = ip6_route_init();
916 if (err) 916 if (err)
917 goto ip6_route_fail; 917 goto ip6_route_fail;
918 err = ndisc_late_init();
919 if (err)
920 goto ndisc_late_fail;
918 err = ip6_flowlabel_init(); 921 err = ip6_flowlabel_init();
919 if (err) 922 if (err)
920 goto ip6_flowlabel_fail; 923 goto ip6_flowlabel_fail;
@@ -981,6 +984,8 @@ ipv6_exthdrs_fail:
981addrconf_fail: 984addrconf_fail:
982 ip6_flowlabel_cleanup(); 985 ip6_flowlabel_cleanup();
983ip6_flowlabel_fail: 986ip6_flowlabel_fail:
987 ndisc_late_cleanup();
988ndisc_late_fail:
984 ip6_route_cleanup(); 989 ip6_route_cleanup();
985ip6_route_fail: 990ip6_route_fail:
986#ifdef CONFIG_PROC_FS 991#ifdef CONFIG_PROC_FS
@@ -1043,6 +1048,7 @@ static void __exit inet6_exit(void)
1043 ipv6_exthdrs_exit(); 1048 ipv6_exthdrs_exit();
1044 addrconf_cleanup(); 1049 addrconf_cleanup();
1045 ip6_flowlabel_cleanup(); 1050 ip6_flowlabel_cleanup();
1051 ndisc_late_cleanup();
1046 ip6_route_cleanup(); 1052 ip6_route_cleanup();
1047#ifdef CONFIG_PROC_FS 1053#ifdef CONFIG_PROC_FS
1048 1054
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 07a7d65a7cb6..8d67900aa003 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -162,12 +162,6 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb)
162 off += optlen; 162 off += optlen;
163 len -= optlen; 163 len -= optlen;
164 } 164 }
165 /* This case will not be caught by above check since its padding
166 * length is smaller than 7:
167 * 1 byte NH + 1 byte Length + 6 bytes Padding
168 */
169 if ((padlen == 6) && ((off - skb_network_header_len(skb)) == 8))
170 goto bad;
171 165
172 if (len == 0) 166 if (len == 0)
173 return true; 167 return true;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index a6c58ce43d34..e27591635f92 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -138,8 +138,8 @@ static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg
138 return false; 138 return false;
139 139
140suppress_route: 140suppress_route:
141 ip6_rt_put(rt); 141 ip6_rt_put(rt);
142 return true; 142 return true;
143} 143}
144 144
145static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) 145static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 73db48eba1c4..5bec666aba61 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -825,9 +825,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
825 fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen, 825 fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
826 offsetof(struct rt6_info, rt6i_dst), allow_create, 826 offsetof(struct rt6_info, rt6i_dst), allow_create,
827 replace_required); 827 replace_required);
828
829 if (IS_ERR(fn)) { 828 if (IS_ERR(fn)) {
830 err = PTR_ERR(fn); 829 err = PTR_ERR(fn);
830 fn = NULL;
831 goto out; 831 goto out;
832 } 832 }
833 833
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 12179457b2cd..f8a55ff1971b 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1727,24 +1727,28 @@ int __init ndisc_init(void)
1727 if (err) 1727 if (err)
1728 goto out_unregister_pernet; 1728 goto out_unregister_pernet;
1729#endif 1729#endif
1730 err = register_netdevice_notifier(&ndisc_netdev_notifier);
1731 if (err)
1732 goto out_unregister_sysctl;
1733out: 1730out:
1734 return err; 1731 return err;
1735 1732
1736out_unregister_sysctl:
1737#ifdef CONFIG_SYSCTL 1733#ifdef CONFIG_SYSCTL
1738 neigh_sysctl_unregister(&nd_tbl.parms);
1739out_unregister_pernet: 1734out_unregister_pernet:
1740#endif
1741 unregister_pernet_subsys(&ndisc_net_ops); 1735 unregister_pernet_subsys(&ndisc_net_ops);
1742 goto out; 1736 goto out;
1737#endif
1743} 1738}
1744 1739
1745void ndisc_cleanup(void) 1740int __init ndisc_late_init(void)
1741{
1742 return register_netdevice_notifier(&ndisc_netdev_notifier);
1743}
1744
1745void ndisc_late_cleanup(void)
1746{ 1746{
1747 unregister_netdevice_notifier(&ndisc_netdev_notifier); 1747 unregister_netdevice_notifier(&ndisc_netdev_notifier);
1748}
1749
1750void ndisc_cleanup(void)
1751{
1748#ifdef CONFIG_SYSCTL 1752#ifdef CONFIG_SYSCTL
1749 neigh_sysctl_unregister(&nd_tbl.parms); 1753 neigh_sysctl_unregister(&nd_tbl.parms);
1750#endif 1754#endif
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index fb36f8565161..410db90db73d 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -1178,6 +1178,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
1178 if (type > OVS_KEY_ATTR_MAX) { 1178 if (type > OVS_KEY_ATTR_MAX) {
1179 OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n", 1179 OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
1180 type, OVS_KEY_ATTR_MAX); 1180 type, OVS_KEY_ATTR_MAX);
1181 return -EINVAL;
1181 } 1182 }
1182 1183
1183 if (attrs & (1 << type)) { 1184 if (attrs & (1 << type)) {
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index c2178b15ca6e..863846cc5513 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1495,7 +1495,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1495 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil); 1495 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
1496 1496
1497 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); 1497 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1498 cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer); 1498 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1499 1499
1500 sch_tree_unlock(sch); 1500 sch_tree_unlock(sch);
1501 1501
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d5d5882a2891..911b71b26b0e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -806,6 +806,9 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
806 goto skip_mkasconf; 806 goto skip_mkasconf;
807 } 807 }
808 808
809 if (laddr == NULL)
810 return -EINVAL;
811
809 /* We do not need RCU protection throughout this loop 812 /* We do not need RCU protection throughout this loop
810 * because this is done under a socket lock from the 813 * because this is done under a socket lock from the
811 * setsockopt call. 814 * setsockopt call.
@@ -6176,7 +6179,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
6176 /* Is there any exceptional events? */ 6179 /* Is there any exceptional events? */
6177 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 6180 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
6178 mask |= POLLERR | 6181 mask |= POLLERR |
6179 sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0; 6182 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
6180 if (sk->sk_shutdown & RCV_SHUTDOWN) 6183 if (sk->sk_shutdown & RCV_SHUTDOWN)
6181 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 6184 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
6182 if (sk->sk_shutdown == SHUTDOWN_MASK) 6185 if (sk->sk_shutdown == SHUTDOWN_MASK)