diff options
author | David S. Miller <davem@davemloft.net> | 2015-05-27 14:19:44 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-05-27 14:19:44 -0400 |
commit | 516782aca2e71835c6498006ad1cac6d96665c7c (patch) | |
tree | 02debc8fb06d2d5f94b9c71eb9bc093a75f86e76 | |
parent | bde28bc6ad0c575f8b4eebe8cd27e36d6c3b09c6 (diff) | |
parent | 4863dea3fab01734768c9f7fc2c1590a8f1f6266 (diff) |
Merge branch 'thunderx'
Aleksey Makarov says:
====================
Adding support for Cavium ThunderX network controller
This patchset adds support for the Cavium ThunderX network controller.
changes in v6:
* unused preprocessor symbols were removed
* reduce no of atomic operations in SQ maintenance
* support for TCP segmentation at driver level
* reset RBDR if fifo state is FAIL
* fixed an issue with link state mailbox message
changes in v5:
* __packed were removed. now we rely on C language ABI
* nic_dbg() -> netdev_dbg()
* fixes for a typo, constant spelling and using BIT_ULL
* use print_hex_dump()
* unnecessary conditions in a long if() chain were removed
changes in v4:
* the patch "pci: Add Cavium PCI vendor id" was attributed correctly
* a note that Cavium id is used in many drivers was added
* the license comments now match MODULE_LICENSE
* a comment explaining usage of writeq_relaxed()/readq_relaxed() was added
changes in v3:
* code cleanup
* issues discovered by reviewers were addressed
changes in v2:
* non-generic module parameters removed
* ethtool support added (nicvf_set_rxnfc())
v5: https://lkml.kernel.org/g/<1432344498-17131-1-git-send-email-aleksey.makarov@caviumnetworks.com>
v4: https://lkml.kernel.org/g/<1432000757-28700-1-git-send-email-aleksey.makarov@auriga.com>
v3: https://lkml.kernel.org/g/<1431747401-20847-1-git-send-email-aleksey.makarov@auriga.com>
v2: https://lkml.kernel.org/g/<1415596445-10061-1-git-send-email-rric@kernel.org>
v1: https://lkml.kernel.org/g/<20141030165434.GW20170@rric.localhost>
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | MAINTAINERS | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/Kconfig | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/Kconfig | 40 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/Makefile | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/Makefile | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/nic.h | 414 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/nic_main.c | 940 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/nic_reg.h | 213 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c | 601 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/nicvf_main.c | 1332 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 1544 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/nicvf_queues.h | 381 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/q_struct.h | 701 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 966 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/thunder_bgx.h | 223 | ||||
-rw-r--r-- | include/linux/pci_ids.h | 2 |
17 files changed, 7382 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index df106f87a3ba..d1b1d2294b6a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -921,6 +921,13 @@ M: Krzysztof Halasa <khalasa@piap.pl> | |||
921 | S: Maintained | 921 | S: Maintained |
922 | F: arch/arm/mach-cns3xxx/ | 922 | F: arch/arm/mach-cns3xxx/ |
923 | 923 | ||
924 | ARM/CAVIUM THUNDER NETWORK DRIVER | ||
925 | M: Sunil Goutham <sgoutham@cavium.com> | ||
926 | M: Robert Richter <rric@kernel.org> | ||
927 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
928 | S: Supported | ||
929 | F: drivers/net/ethernet/cavium/ | ||
930 | |||
924 | ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE | 931 | ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE |
925 | M: Alexander Shiyan <shc_work@mail.ru> | 932 | M: Alexander Shiyan <shc_work@mail.ru> |
926 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 933 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index eadcb053807e..9a8308553520 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig | |||
@@ -34,6 +34,7 @@ source "drivers/net/ethernet/adi/Kconfig" | |||
34 | source "drivers/net/ethernet/broadcom/Kconfig" | 34 | source "drivers/net/ethernet/broadcom/Kconfig" |
35 | source "drivers/net/ethernet/brocade/Kconfig" | 35 | source "drivers/net/ethernet/brocade/Kconfig" |
36 | source "drivers/net/ethernet/calxeda/Kconfig" | 36 | source "drivers/net/ethernet/calxeda/Kconfig" |
37 | source "drivers/net/ethernet/cavium/Kconfig" | ||
37 | source "drivers/net/ethernet/chelsio/Kconfig" | 38 | source "drivers/net/ethernet/chelsio/Kconfig" |
38 | source "drivers/net/ethernet/cirrus/Kconfig" | 39 | source "drivers/net/ethernet/cirrus/Kconfig" |
39 | source "drivers/net/ethernet/cisco/Kconfig" | 40 | source "drivers/net/ethernet/cisco/Kconfig" |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 1367afcd0a8b..4395d99115a0 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
@@ -20,6 +20,7 @@ obj-$(CONFIG_NET_BFIN) += adi/ | |||
20 | obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ | 20 | obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ |
21 | obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ | 21 | obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ |
22 | obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ | 22 | obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ |
23 | obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/ | ||
23 | obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ | 24 | obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ |
24 | obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ | 25 | obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ |
25 | obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ | 26 | obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ |
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig new file mode 100644 index 000000000000..6365fb4242be --- /dev/null +++ b/drivers/net/ethernet/cavium/Kconfig | |||
@@ -0,0 +1,40 @@ | |||
1 | # | ||
2 | # Cavium ethernet device configuration | ||
3 | # | ||
4 | |||
5 | config NET_VENDOR_CAVIUM | ||
6 | tristate "Cavium ethernet drivers" | ||
7 | depends on PCI | ||
8 | ---help--- | ||
9 | Enable support for the Cavium ThunderX Network Interface | ||
10 | Controller (NIC). The NIC provides the controller and DMA | ||
11 | engines to move network traffic to/from the memory. The NIC | ||
12 | works closely with TNS, BGX and SerDes to implement the | ||
13 | functions replacing and virtualizing those of a typical | ||
14 | standalone PCIe NIC chip. | ||
15 | |||
16 | If you have a Cavium Thunder board, say Y. | ||
17 | |||
18 | if NET_VENDOR_CAVIUM | ||
19 | |||
20 | config THUNDER_NIC_PF | ||
21 | tristate "Thunder Physical function driver" | ||
22 | default NET_VENDOR_CAVIUM | ||
23 | select THUNDER_NIC_BGX | ||
24 | ---help--- | ||
25 | This driver supports Thunder's NIC physical function. | ||
26 | |||
27 | config THUNDER_NIC_VF | ||
28 | tristate "Thunder Virtual function driver" | ||
29 | default NET_VENDOR_CAVIUM | ||
30 | ---help--- | ||
31 | This driver supports Thunder's NIC virtual function | ||
32 | |||
33 | config THUNDER_NIC_BGX | ||
34 | tristate "Thunder MAC interface driver (BGX)" | ||
35 | default NET_VENDOR_CAVIUM | ||
36 | ---help--- | ||
37 | This driver supports programming and controlling of MAC | ||
38 | interface from NIC physical function driver. | ||
39 | |||
40 | endif # NET_VENDOR_CAVIUM | ||
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile new file mode 100644 index 000000000000..7aac4780d050 --- /dev/null +++ b/drivers/net/ethernet/cavium/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Makefile for the Cavium ethernet device drivers. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/ | ||
diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile new file mode 100644 index 000000000000..5c4615ccaa14 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/Makefile | |||
@@ -0,0 +1,11 @@ | |||
1 | # | ||
2 | # Makefile for Cavium's Thunder ethernet device | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o | ||
6 | obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o | ||
7 | obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o | ||
8 | |||
9 | nicpf-y := nic_main.o | ||
10 | nicvf-y := nicvf_main.o nicvf_queues.o | ||
11 | nicvf-y += nicvf_ethtool.o | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h new file mode 100644 index 000000000000..9b0be527909b --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nic.h | |||
@@ -0,0 +1,414 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Cavium, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of version 2 of the GNU General Public License | ||
6 | * as published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef NIC_H | ||
10 | #define NIC_H | ||
11 | |||
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include "thunder_bgx.h" | ||
15 | |||
16 | /* PCI device IDs */ | ||
17 | #define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E | ||
18 | #define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011 | ||
19 | #define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034 | ||
20 | #define PCI_DEVICE_ID_THUNDER_BGX 0xA026 | ||
21 | |||
22 | /* PCI BAR nos */ | ||
23 | #define PCI_CFG_REG_BAR_NUM 0 | ||
24 | #define PCI_MSIX_REG_BAR_NUM 4 | ||
25 | |||
26 | /* NIC SRIOV VF count */ | ||
27 | #define MAX_NUM_VFS_SUPPORTED 128 | ||
28 | #define DEFAULT_NUM_VF_ENABLED 8 | ||
29 | |||
30 | #define NIC_TNS_BYPASS_MODE 0 | ||
31 | #define NIC_TNS_MODE 1 | ||
32 | |||
33 | /* NIC priv flags */ | ||
34 | #define NIC_SRIOV_ENABLED BIT(0) | ||
35 | |||
36 | /* Min/Max packet size */ | ||
37 | #define NIC_HW_MIN_FRS 64 | ||
38 | #define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ | ||
39 | |||
40 | /* Max pkinds */ | ||
41 | #define NIC_MAX_PKIND 16 | ||
42 | |||
43 | /* Rx Channels */ | ||
44 | /* Receive channel configuration in TNS bypass mode | ||
45 | * Below is configuration in TNS bypass mode | ||
46 | * BGX0-LMAC0-CHAN0 - VNIC CHAN0 | ||
47 | * BGX0-LMAC1-CHAN0 - VNIC CHAN16 | ||
48 | * ... | ||
49 | * BGX1-LMAC0-CHAN0 - VNIC CHAN128 | ||
50 | * ... | ||
51 | * BGX1-LMAC3-CHAN0 - VNIC CHAN174 | ||
52 | */ | ||
53 | #define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */ | ||
54 | #define NIC_CHANS_PER_INF 128 | ||
55 | #define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF) | ||
56 | #define NIC_CPI_COUNT 2048 /* No of channel parse indices */ | ||
57 | |||
58 | /* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */ | ||
59 | #define NIC_MAX_BGX MAX_BGX_PER_CN88XX | ||
60 | #define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX) | ||
61 | #define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */ | ||
62 | #define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX) | ||
63 | |||
64 | /* Tx scheduling */ | ||
65 | #define NIC_MAX_TL4 1024 | ||
66 | #define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */ | ||
67 | #define NIC_MAX_TL3 256 | ||
68 | #define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */ | ||
69 | #define NIC_MAX_TL2 64 | ||
70 | #define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */ | ||
71 | #define NIC_MAX_TL1 2 | ||
72 | |||
73 | /* TNS bypass mode */ | ||
74 | #define NIC_TL2_PER_BGX 32 | ||
75 | #define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX) | ||
76 | #define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF) | ||
77 | |||
78 | /* NIC VF Interrupts */ | ||
79 | #define NICVF_INTR_CQ 0 | ||
80 | #define NICVF_INTR_SQ 1 | ||
81 | #define NICVF_INTR_RBDR 2 | ||
82 | #define NICVF_INTR_PKT_DROP 3 | ||
83 | #define NICVF_INTR_TCP_TIMER 4 | ||
84 | #define NICVF_INTR_MBOX 5 | ||
85 | #define NICVF_INTR_QS_ERR 6 | ||
86 | |||
87 | #define NICVF_INTR_CQ_SHIFT 0 | ||
88 | #define NICVF_INTR_SQ_SHIFT 8 | ||
89 | #define NICVF_INTR_RBDR_SHIFT 16 | ||
90 | #define NICVF_INTR_PKT_DROP_SHIFT 20 | ||
91 | #define NICVF_INTR_TCP_TIMER_SHIFT 21 | ||
92 | #define NICVF_INTR_MBOX_SHIFT 22 | ||
93 | #define NICVF_INTR_QS_ERR_SHIFT 23 | ||
94 | |||
95 | #define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT) | ||
96 | #define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT) | ||
97 | #define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT) | ||
98 | #define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT) | ||
99 | #define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT) | ||
100 | #define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT) | ||
101 | #define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT) | ||
102 | |||
103 | /* MSI-X interrupts */ | ||
104 | #define NIC_PF_MSIX_VECTORS 10 | ||
105 | #define NIC_VF_MSIX_VECTORS 20 | ||
106 | |||
107 | #define NIC_PF_INTR_ID_ECC0_SBE 0 | ||
108 | #define NIC_PF_INTR_ID_ECC0_DBE 1 | ||
109 | #define NIC_PF_INTR_ID_ECC1_SBE 2 | ||
110 | #define NIC_PF_INTR_ID_ECC1_DBE 3 | ||
111 | #define NIC_PF_INTR_ID_ECC2_SBE 4 | ||
112 | #define NIC_PF_INTR_ID_ECC2_DBE 5 | ||
113 | #define NIC_PF_INTR_ID_ECC3_SBE 6 | ||
114 | #define NIC_PF_INTR_ID_ECC3_DBE 7 | ||
115 | #define NIC_PF_INTR_ID_MBOX0 8 | ||
116 | #define NIC_PF_INTR_ID_MBOX1 9 | ||
117 | |||
118 | /* Global timer for CQ timer thresh interrupts | ||
119 | * Calculated for SCLK of 700Mhz | ||
120 | * value written should be a 1/16th of what is expected | ||
121 | * | ||
122 | * 1 tick per 0.05usec = value of 2.2 | ||
123 | * This 10% would be covered in CQ timer thresh value | ||
124 | */ | ||
125 | #define NICPF_CLK_PER_INT_TICK 2 | ||
126 | |||
127 | struct nicvf_cq_poll { | ||
128 | u8 cq_idx; /* Completion queue index */ | ||
129 | struct napi_struct napi; | ||
130 | }; | ||
131 | |||
132 | #define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */ | ||
133 | #define NIC_MAX_RSS_HASH_BITS 8 | ||
134 | #define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS) | ||
135 | #define RSS_HASH_KEY_SIZE 5 /* 320 bit key */ | ||
136 | |||
137 | struct nicvf_rss_info { | ||
138 | bool enable; | ||
139 | #define RSS_L2_EXTENDED_HASH_ENA BIT(0) | ||
140 | #define RSS_IP_HASH_ENA BIT(1) | ||
141 | #define RSS_TCP_HASH_ENA BIT(2) | ||
142 | #define RSS_TCP_SYN_DIS BIT(3) | ||
143 | #define RSS_UDP_HASH_ENA BIT(4) | ||
144 | #define RSS_L4_EXTENDED_HASH_ENA BIT(5) | ||
145 | #define RSS_ROCE_ENA BIT(6) | ||
146 | #define RSS_L3_BI_DIRECTION_ENA BIT(7) | ||
147 | #define RSS_L4_BI_DIRECTION_ENA BIT(8) | ||
148 | u64 cfg; | ||
149 | u8 hash_bits; | ||
150 | u16 rss_size; | ||
151 | u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; | ||
152 | u64 key[RSS_HASH_KEY_SIZE]; | ||
153 | } ____cacheline_aligned_in_smp; | ||
154 | |||
155 | enum rx_stats_reg_offset { | ||
156 | RX_OCTS = 0x0, | ||
157 | RX_UCAST = 0x1, | ||
158 | RX_BCAST = 0x2, | ||
159 | RX_MCAST = 0x3, | ||
160 | RX_RED = 0x4, | ||
161 | RX_RED_OCTS = 0x5, | ||
162 | RX_ORUN = 0x6, | ||
163 | RX_ORUN_OCTS = 0x7, | ||
164 | RX_FCS = 0x8, | ||
165 | RX_L2ERR = 0x9, | ||
166 | RX_DRP_BCAST = 0xa, | ||
167 | RX_DRP_MCAST = 0xb, | ||
168 | RX_DRP_L3BCAST = 0xc, | ||
169 | RX_DRP_L3MCAST = 0xd, | ||
170 | RX_STATS_ENUM_LAST, | ||
171 | }; | ||
172 | |||
173 | enum tx_stats_reg_offset { | ||
174 | TX_OCTS = 0x0, | ||
175 | TX_UCAST = 0x1, | ||
176 | TX_BCAST = 0x2, | ||
177 | TX_MCAST = 0x3, | ||
178 | TX_DROP = 0x4, | ||
179 | TX_STATS_ENUM_LAST, | ||
180 | }; | ||
181 | |||
182 | struct nicvf_hw_stats { | ||
183 | u64 rx_bytes_ok; | ||
184 | u64 rx_ucast_frames_ok; | ||
185 | u64 rx_bcast_frames_ok; | ||
186 | u64 rx_mcast_frames_ok; | ||
187 | u64 rx_fcs_errors; | ||
188 | u64 rx_l2_errors; | ||
189 | u64 rx_drop_red; | ||
190 | u64 rx_drop_red_bytes; | ||
191 | u64 rx_drop_overrun; | ||
192 | u64 rx_drop_overrun_bytes; | ||
193 | u64 rx_drop_bcast; | ||
194 | u64 rx_drop_mcast; | ||
195 | u64 rx_drop_l3_bcast; | ||
196 | u64 rx_drop_l3_mcast; | ||
197 | u64 tx_bytes_ok; | ||
198 | u64 tx_ucast_frames_ok; | ||
199 | u64 tx_bcast_frames_ok; | ||
200 | u64 tx_mcast_frames_ok; | ||
201 | u64 tx_drops; | ||
202 | }; | ||
203 | |||
204 | struct nicvf_drv_stats { | ||
205 | /* Rx */ | ||
206 | u64 rx_frames_ok; | ||
207 | u64 rx_frames_64; | ||
208 | u64 rx_frames_127; | ||
209 | u64 rx_frames_255; | ||
210 | u64 rx_frames_511; | ||
211 | u64 rx_frames_1023; | ||
212 | u64 rx_frames_1518; | ||
213 | u64 rx_frames_jumbo; | ||
214 | u64 rx_drops; | ||
215 | /* Tx */ | ||
216 | u64 tx_frames_ok; | ||
217 | u64 tx_drops; | ||
218 | u64 tx_busy; | ||
219 | u64 tx_tso; | ||
220 | }; | ||
221 | |||
222 | struct nicvf { | ||
223 | struct net_device *netdev; | ||
224 | struct pci_dev *pdev; | ||
225 | u8 vf_id; | ||
226 | u8 node; | ||
227 | u8 tns_mode; | ||
228 | u16 mtu; | ||
229 | struct queue_set *qs; | ||
230 | void __iomem *reg_base; | ||
231 | bool link_up; | ||
232 | u8 duplex; | ||
233 | u32 speed; | ||
234 | struct page *rb_page; | ||
235 | u32 rb_page_offset; | ||
236 | bool rb_alloc_fail; | ||
237 | bool rb_work_scheduled; | ||
238 | struct delayed_work rbdr_work; | ||
239 | struct tasklet_struct rbdr_task; | ||
240 | struct tasklet_struct qs_err_task; | ||
241 | struct tasklet_struct cq_task; | ||
242 | struct nicvf_cq_poll *napi[8]; | ||
243 | struct nicvf_rss_info rss_info; | ||
244 | u8 cpi_alg; | ||
245 | /* Interrupt coalescing settings */ | ||
246 | u32 cq_coalesce_usecs; | ||
247 | |||
248 | u32 msg_enable; | ||
249 | struct nicvf_hw_stats stats; | ||
250 | struct nicvf_drv_stats drv_stats; | ||
251 | struct bgx_stats bgx_stats; | ||
252 | struct work_struct reset_task; | ||
253 | |||
254 | /* MSI-X */ | ||
255 | bool msix_enabled; | ||
256 | u8 num_vec; | ||
257 | struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS]; | ||
258 | char irq_name[NIC_VF_MSIX_VECTORS][20]; | ||
259 | bool irq_allocated[NIC_VF_MSIX_VECTORS]; | ||
260 | |||
261 | bool pf_ready_to_rcv_msg; | ||
262 | bool pf_acked; | ||
263 | bool pf_nacked; | ||
264 | bool bgx_stats_acked; | ||
265 | } ____cacheline_aligned_in_smp; | ||
266 | |||
267 | /* PF <--> VF Mailbox communication | ||
268 | * Eight 64bit registers are shared between PF and VF. | ||
269 | * Separate set for each VF. | ||
270 | * Writing '1' into last register mbx7 means end of message. | ||
271 | */ | ||
272 | |||
273 | /* PF <--> VF mailbox communication */ | ||
274 | #define NIC_PF_VF_MAILBOX_SIZE 2 | ||
275 | #define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */ | ||
276 | |||
277 | /* Mailbox message types */ | ||
278 | #define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */ | ||
279 | #define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */ | ||
280 | #define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */ | ||
281 | #define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */ | ||
282 | #define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */ | ||
283 | #define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */ | ||
284 | #define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */ | ||
285 | #define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */ | ||
286 | #define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */ | ||
287 | #define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */ | ||
288 | #define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */ | ||
289 | #define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */ | ||
290 | #define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */ | ||
291 | #define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */ | ||
292 | #define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */ | ||
293 | #define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */ | ||
294 | #define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */ | ||
295 | #define NIC_MBOX_MSG_CFG_DONE 0x12 /* VF configuration done */ | ||
296 | #define NIC_MBOX_MSG_SHUTDOWN 0x13 /* VF is being shutdown */ | ||
297 | |||
298 | struct nic_cfg_msg { | ||
299 | u8 msg; | ||
300 | u8 vf_id; | ||
301 | u8 tns_mode; | ||
302 | u8 node_id; | ||
303 | u64 mac_addr; | ||
304 | }; | ||
305 | |||
306 | /* Qset configuration */ | ||
307 | struct qs_cfg_msg { | ||
308 | u8 msg; | ||
309 | u8 num; | ||
310 | u64 cfg; | ||
311 | }; | ||
312 | |||
313 | /* Receive queue configuration */ | ||
314 | struct rq_cfg_msg { | ||
315 | u8 msg; | ||
316 | u8 qs_num; | ||
317 | u8 rq_num; | ||
318 | u64 cfg; | ||
319 | }; | ||
320 | |||
321 | /* Send queue configuration */ | ||
322 | struct sq_cfg_msg { | ||
323 | u8 msg; | ||
324 | u8 qs_num; | ||
325 | u8 sq_num; | ||
326 | u64 cfg; | ||
327 | }; | ||
328 | |||
329 | /* Set VF's MAC address */ | ||
330 | struct set_mac_msg { | ||
331 | u8 msg; | ||
332 | u8 vf_id; | ||
333 | u64 addr; | ||
334 | }; | ||
335 | |||
336 | /* Set Maximum frame size */ | ||
337 | struct set_frs_msg { | ||
338 | u8 msg; | ||
339 | u8 vf_id; | ||
340 | u16 max_frs; | ||
341 | }; | ||
342 | |||
343 | /* Set CPI algorithm type */ | ||
344 | struct cpi_cfg_msg { | ||
345 | u8 msg; | ||
346 | u8 vf_id; | ||
347 | u8 rq_cnt; | ||
348 | u8 cpi_alg; | ||
349 | }; | ||
350 | |||
351 | /* Get RSS table size */ | ||
352 | struct rss_sz_msg { | ||
353 | u8 msg; | ||
354 | u8 vf_id; | ||
355 | u16 ind_tbl_size; | ||
356 | }; | ||
357 | |||
358 | /* Set RSS configuration */ | ||
359 | struct rss_cfg_msg { | ||
360 | u8 msg; | ||
361 | u8 vf_id; | ||
362 | u8 hash_bits; | ||
363 | u8 tbl_len; | ||
364 | u8 tbl_offset; | ||
365 | #define RSS_IND_TBL_LEN_PER_MBX_MSG 8 | ||
366 | u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG]; | ||
367 | }; | ||
368 | |||
369 | struct bgx_stats_msg { | ||
370 | u8 msg; | ||
371 | u8 vf_id; | ||
372 | u8 rx; | ||
373 | u8 idx; | ||
374 | u64 stats; | ||
375 | }; | ||
376 | |||
377 | /* Physical interface link status */ | ||
378 | struct bgx_link_status { | ||
379 | u8 msg; | ||
380 | u8 link_up; | ||
381 | u8 duplex; | ||
382 | u32 speed; | ||
383 | }; | ||
384 | |||
385 | /* 128 bit shared memory between PF and each VF */ | ||
386 | union nic_mbx { | ||
387 | struct { u8 msg; } msg; | ||
388 | struct nic_cfg_msg nic_cfg; | ||
389 | struct qs_cfg_msg qs; | ||
390 | struct rq_cfg_msg rq; | ||
391 | struct sq_cfg_msg sq; | ||
392 | struct set_mac_msg mac; | ||
393 | struct set_frs_msg frs; | ||
394 | struct cpi_cfg_msg cpi_cfg; | ||
395 | struct rss_sz_msg rss_size; | ||
396 | struct rss_cfg_msg rss_cfg; | ||
397 | struct bgx_stats_msg bgx_stats; | ||
398 | struct bgx_link_status link_status; | ||
399 | }; | ||
400 | |||
401 | int nicvf_set_real_num_queues(struct net_device *netdev, | ||
402 | int tx_queues, int rx_queues); | ||
403 | int nicvf_open(struct net_device *netdev); | ||
404 | int nicvf_stop(struct net_device *netdev); | ||
405 | int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx); | ||
406 | void nicvf_config_cpi(struct nicvf *nic); | ||
407 | void nicvf_config_rss(struct nicvf *nic); | ||
408 | void nicvf_set_rss_key(struct nicvf *nic); | ||
409 | void nicvf_free_skb(struct nicvf *nic, struct sk_buff *skb); | ||
410 | void nicvf_set_ethtool_ops(struct net_device *netdev); | ||
411 | void nicvf_update_stats(struct nicvf *nic); | ||
412 | void nicvf_update_lmac_stats(struct nicvf *nic); | ||
413 | |||
414 | #endif /* NIC_H */ | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c new file mode 100644 index 000000000000..0f1f58b54bf1 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c | |||
@@ -0,0 +1,940 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Cavium, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of version 2 of the GNU General Public License | ||
6 | * as published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <linux/etherdevice.h> | ||
13 | #include <linux/of.h> | ||
14 | |||
15 | #include "nic_reg.h" | ||
16 | #include "nic.h" | ||
17 | #include "q_struct.h" | ||
18 | #include "thunder_bgx.h" | ||
19 | |||
20 | #define DRV_NAME "thunder-nic" | ||
21 | #define DRV_VERSION "1.0" | ||
22 | |||
23 | struct nicpf { | ||
24 | struct pci_dev *pdev; | ||
25 | u8 rev_id; | ||
26 | #define NIC_NODE_ID_MASK 0x300000000000 | ||
27 | #define NIC_NODE_ID(x) ((x & NODE_ID_MASK) >> 44) | ||
28 | u8 node; | ||
29 | unsigned int flags; | ||
30 | u8 num_vf_en; /* No of VF enabled */ | ||
31 | bool vf_enabled[MAX_NUM_VFS_SUPPORTED]; | ||
32 | void __iomem *reg_base; /* Register start address */ | ||
33 | struct pkind_cfg pkind; | ||
34 | #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF)) | ||
35 | #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) | ||
36 | #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) | ||
37 | u8 vf_lmac_map[MAX_LMAC]; | ||
38 | struct delayed_work dwork; | ||
39 | struct workqueue_struct *check_link; | ||
40 | u8 link[MAX_LMAC]; | ||
41 | u8 duplex[MAX_LMAC]; | ||
42 | u32 speed[MAX_LMAC]; | ||
43 | u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; | ||
44 | u16 rss_ind_tbl_size; | ||
45 | bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; | ||
46 | |||
47 | /* MSI-X */ | ||
48 | bool msix_enabled; | ||
49 | u8 num_vec; | ||
50 | struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS]; | ||
51 | bool irq_allocated[NIC_PF_MSIX_VECTORS]; | ||
52 | }; | ||
53 | |||
54 | /* Supported devices */ | ||
55 | static const struct pci_device_id nic_id_table[] = { | ||
56 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) }, | ||
57 | { 0, } /* end of table */ | ||
58 | }; | ||
59 | |||
60 | MODULE_AUTHOR("Sunil Goutham"); | ||
61 | MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver"); | ||
62 | MODULE_LICENSE("GPL v2"); | ||
63 | MODULE_VERSION(DRV_VERSION); | ||
64 | MODULE_DEVICE_TABLE(pci, nic_id_table); | ||
65 | |||
66 | /* The Cavium ThunderX network controller can *only* be found in SoCs | ||
67 | * containing the ThunderX ARM64 CPU implementation. All accesses to the device | ||
68 | * registers on this platform are implicitly strongly ordered with respect | ||
69 | * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use | ||
70 | * with no memory barriers in this driver. The readq()/writeq() functions add | ||
71 | * explicit ordering operation which in this case are redundant, and only | ||
72 | * add overhead. | ||
73 | */ | ||
74 | |||
75 | /* Register read/write APIs */ | ||
76 | static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val) | ||
77 | { | ||
78 | writeq_relaxed(val, nic->reg_base + offset); | ||
79 | } | ||
80 | |||
81 | static u64 nic_reg_read(struct nicpf *nic, u64 offset) | ||
82 | { | ||
83 | return readq_relaxed(nic->reg_base + offset); | ||
84 | } | ||
85 | |||
86 | /* PF -> VF mailbox communication APIs */ | ||
87 | static void nic_enable_mbx_intr(struct nicpf *nic) | ||
88 | { | ||
89 | /* Enable mailbox interrupt for all 128 VFs */ | ||
90 | nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull); | ||
91 | nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull); | ||
92 | } | ||
93 | |||
94 | static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) | ||
95 | { | ||
96 | nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf)); | ||
97 | } | ||
98 | |||
99 | static u64 nic_get_mbx_addr(int vf) | ||
100 | { | ||
101 | return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT); | ||
102 | } | ||
103 | |||
104 | /* Send a mailbox message to VF | ||
105 | * @vf: vf to which this message to be sent | ||
106 | * @mbx: Message to be sent | ||
107 | */ | ||
108 | static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) | ||
109 | { | ||
110 | void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf); | ||
111 | u64 *msg = (u64 *)mbx; | ||
112 | |||
113 | /* In first revision HW, mbox interrupt is triggerred | ||
114 | * when PF writes to MBOX(1), in next revisions when | ||
115 | * PF writes to MBOX(0) | ||
116 | */ | ||
117 | if (nic->rev_id == 0) { | ||
118 | /* see the comment for nic_reg_write()/nic_reg_read() | ||
119 | * functions above | ||
120 | */ | ||
121 | writeq_relaxed(msg[0], mbx_addr); | ||
122 | writeq_relaxed(msg[1], mbx_addr + 8); | ||
123 | } else { | ||
124 | writeq_relaxed(msg[1], mbx_addr + 8); | ||
125 | writeq_relaxed(msg[0], mbx_addr); | ||
126 | } | ||
127 | } | ||
128 | |||
129 | /* Responds to VF's READY message with VF's | ||
130 | * ID, node, MAC address e.t.c | ||
131 | * @vf: VF which sent READY message | ||
132 | */ | ||
133 | static void nic_mbx_send_ready(struct nicpf *nic, int vf) | ||
134 | { | ||
135 | union nic_mbx mbx = {}; | ||
136 | int bgx_idx, lmac; | ||
137 | const char *mac; | ||
138 | |||
139 | mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; | ||
140 | mbx.nic_cfg.vf_id = vf; | ||
141 | |||
142 | mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; | ||
143 | |||
144 | bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
145 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
146 | |||
147 | mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); | ||
148 | if (mac) | ||
149 | ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac); | ||
150 | |||
151 | mbx.nic_cfg.node_id = nic->node; | ||
152 | nic_send_msg_to_vf(nic, vf, &mbx); | ||
153 | } | ||
154 | |||
155 | /* ACKs VF's mailbox message | ||
156 | * @vf: VF to which ACK to be sent | ||
157 | */ | ||
158 | static void nic_mbx_send_ack(struct nicpf *nic, int vf) | ||
159 | { | ||
160 | union nic_mbx mbx = {}; | ||
161 | |||
162 | mbx.msg.msg = NIC_MBOX_MSG_ACK; | ||
163 | nic_send_msg_to_vf(nic, vf, &mbx); | ||
164 | } | ||
165 | |||
166 | /* NACKs VF's mailbox message that PF is not able to | ||
167 | * complete the action | ||
168 | * @vf: VF to which ACK to be sent | ||
169 | */ | ||
170 | static void nic_mbx_send_nack(struct nicpf *nic, int vf) | ||
171 | { | ||
172 | union nic_mbx mbx = {}; | ||
173 | |||
174 | mbx.msg.msg = NIC_MBOX_MSG_NACK; | ||
175 | nic_send_msg_to_vf(nic, vf, &mbx); | ||
176 | } | ||
177 | |||
178 | /* Flush all in flight receive packets to memory and | ||
179 | * bring down an active RQ | ||
180 | */ | ||
181 | static int nic_rcv_queue_sw_sync(struct nicpf *nic) | ||
182 | { | ||
183 | u16 timeout = ~0x00; | ||
184 | |||
185 | nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01); | ||
186 | /* Wait till sync cycle is finished */ | ||
187 | while (timeout) { | ||
188 | if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1) | ||
189 | break; | ||
190 | timeout--; | ||
191 | } | ||
192 | nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00); | ||
193 | if (!timeout) { | ||
194 | dev_err(&nic->pdev->dev, "Receive queue software sync failed"); | ||
195 | return 1; | ||
196 | } | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | /* Get BGX Rx/Tx stats and respond to VF's request */ | ||
201 | static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) | ||
202 | { | ||
203 | int bgx_idx, lmac; | ||
204 | union nic_mbx mbx = {}; | ||
205 | |||
206 | bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); | ||
207 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); | ||
208 | |||
209 | mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; | ||
210 | mbx.bgx_stats.vf_id = bgx->vf_id; | ||
211 | mbx.bgx_stats.rx = bgx->rx; | ||
212 | mbx.bgx_stats.idx = bgx->idx; | ||
213 | if (bgx->rx) | ||
214 | mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx, | ||
215 | lmac, bgx->idx); | ||
216 | else | ||
217 | mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx, | ||
218 | lmac, bgx->idx); | ||
219 | nic_send_msg_to_vf(nic, bgx->vf_id, &mbx); | ||
220 | } | ||
221 | |||
222 | /* Update hardware min/max frame size */ | ||
223 | static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) | ||
224 | { | ||
225 | if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { | ||
226 | dev_err(&nic->pdev->dev, | ||
227 | "Invalid MTU setting from VF%d rejected, should be between %d and %d\n", | ||
228 | vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); | ||
229 | return 1; | ||
230 | } | ||
231 | new_frs += ETH_HLEN; | ||
232 | if (new_frs <= nic->pkind.maxlen) | ||
233 | return 0; | ||
234 | |||
235 | nic->pkind.maxlen = new_frs; | ||
236 | nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); | ||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | /* Set minimum transmit packet size */ | ||
241 | static void nic_set_tx_pkt_pad(struct nicpf *nic, int size) | ||
242 | { | ||
243 | int lmac; | ||
244 | u64 lmac_cfg; | ||
245 | |||
246 | /* Max value that can be set is 60 */ | ||
247 | if (size > 60) | ||
248 | size = 60; | ||
249 | |||
250 | for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { | ||
251 | lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); | ||
252 | lmac_cfg &= ~(0xF << 2); | ||
253 | lmac_cfg |= ((size / 4) << 2); | ||
254 | nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg); | ||
255 | } | ||
256 | } | ||
257 | |||
258 | /* Function to check number of LMACs present and set VF::LMAC mapping. | ||
259 | * Mapping will be used while initializing channels. | ||
260 | */ | ||
261 | static void nic_set_lmac_vf_mapping(struct nicpf *nic) | ||
262 | { | ||
263 | unsigned bgx_map = bgx_get_map(nic->node); | ||
264 | int bgx, next_bgx_lmac = 0; | ||
265 | int lmac, lmac_cnt = 0; | ||
266 | u64 lmac_credit; | ||
267 | |||
268 | nic->num_vf_en = 0; | ||
269 | |||
270 | for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { | ||
271 | if (!(bgx_map & (1 << bgx))) | ||
272 | continue; | ||
273 | lmac_cnt = bgx_get_lmac_count(nic->node, bgx); | ||
274 | for (lmac = 0; lmac < lmac_cnt; lmac++) | ||
275 | nic->vf_lmac_map[next_bgx_lmac++] = | ||
276 | NIC_SET_VF_LMAC_MAP(bgx, lmac); | ||
277 | nic->num_vf_en += lmac_cnt; | ||
278 | |||
279 | /* Program LMAC credits */ | ||
280 | lmac_credit = (1ull << 1); /* channel credit enable */ | ||
281 | lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */ | ||
282 | /* 48KB BGX Tx buffer size, each unit is of size 16bytes */ | ||
283 | lmac_credit |= (((((48 * 1024) / lmac_cnt) - | ||
284 | NIC_HW_MAX_FRS) / 16) << 12); | ||
285 | lmac = bgx * MAX_LMAC_PER_BGX; | ||
286 | for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++) | ||
287 | nic_reg_write(nic, | ||
288 | NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), | ||
289 | lmac_credit); | ||
290 | } | ||
291 | } | ||
292 | |||
293 | #define BGX0_BLOCK 8 | ||
294 | #define BGX1_BLOCK 9 | ||
295 | |||
296 | static void nic_init_hw(struct nicpf *nic) | ||
297 | { | ||
298 | int i; | ||
299 | |||
300 | /* Reset NIC, in case the driver is repeatedly inserted and removed */ | ||
301 | nic_reg_write(nic, NIC_PF_SOFT_RESET, 1); | ||
302 | |||
303 | /* Enable NIC HW block */ | ||
304 | nic_reg_write(nic, NIC_PF_CFG, 0x3); | ||
305 | |||
306 | /* Enable backpressure */ | ||
307 | nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03); | ||
308 | |||
309 | /* Disable TNS mode on both interfaces */ | ||
310 | nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, | ||
311 | (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); | ||
312 | nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), | ||
313 | (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); | ||
314 | nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, | ||
315 | (1ULL << 63) | BGX0_BLOCK); | ||
316 | nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), | ||
317 | (1ULL << 63) | BGX1_BLOCK); | ||
318 | |||
319 | /* PKIND configuration */ | ||
320 | nic->pkind.minlen = 0; | ||
321 | nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; | ||
322 | nic->pkind.lenerr_en = 1; | ||
323 | nic->pkind.rx_hdr = 0; | ||
324 | nic->pkind.hdr_sl = 0; | ||
325 | |||
326 | for (i = 0; i < NIC_MAX_PKIND; i++) | ||
327 | nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), | ||
328 | *(u64 *)&nic->pkind); | ||
329 | |||
330 | nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS); | ||
331 | |||
332 | /* Timer config */ | ||
333 | nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); | ||
334 | } | ||
335 | |||
336 | /* Channel parse index configuration */ | ||
337 | static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) | ||
338 | { | ||
339 | u32 vnic, bgx, lmac, chan; | ||
340 | u32 padd, cpi_count = 0; | ||
341 | u64 cpi_base, cpi, rssi_base, rssi; | ||
342 | u8 qset, rq_idx = 0; | ||
343 | |||
344 | vnic = cfg->vf_id; | ||
345 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); | ||
346 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); | ||
347 | |||
348 | chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); | ||
349 | cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX); | ||
350 | rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX); | ||
351 | |||
352 | /* Rx channel configuration */ | ||
353 | nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), | ||
354 | (1ull << 63) | (vnic << 0)); | ||
355 | nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3), | ||
356 | ((u64)cfg->cpi_alg << 62) | (cpi_base << 48)); | ||
357 | |||
358 | if (cfg->cpi_alg == CPI_ALG_NONE) | ||
359 | cpi_count = 1; | ||
360 | else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */ | ||
361 | cpi_count = 8; | ||
362 | else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */ | ||
363 | cpi_count = 16; | ||
364 | else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */ | ||
365 | cpi_count = NIC_MAX_CPI_PER_LMAC; | ||
366 | |||
367 | /* RSS Qset, Qidx mapping */ | ||
368 | qset = cfg->vf_id; | ||
369 | rssi = rssi_base; | ||
370 | for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) { | ||
371 | nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), | ||
372 | (qset << 3) | rq_idx); | ||
373 | rq_idx++; | ||
374 | } | ||
375 | |||
376 | rssi = 0; | ||
377 | cpi = cpi_base; | ||
378 | for (; cpi < (cpi_base + cpi_count); cpi++) { | ||
379 | /* Determine port to channel adder */ | ||
380 | if (cfg->cpi_alg != CPI_ALG_DIFF) | ||
381 | padd = cpi % cpi_count; | ||
382 | else | ||
383 | padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ | ||
384 | |||
385 | /* Leave RSS_SIZE as '0' to disable RSS */ | ||
386 | nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), | ||
387 | (vnic << 24) | (padd << 16) | (rssi_base + rssi)); | ||
388 | |||
389 | if ((rssi + 1) >= cfg->rq_cnt) | ||
390 | continue; | ||
391 | |||
392 | if (cfg->cpi_alg == CPI_ALG_VLAN) | ||
393 | rssi++; | ||
394 | else if (cfg->cpi_alg == CPI_ALG_VLAN16) | ||
395 | rssi = ((cpi - cpi_base) & 0xe) >> 1; | ||
396 | else if (cfg->cpi_alg == CPI_ALG_DIFF) | ||
397 | rssi = ((cpi - cpi_base) & 0x38) >> 3; | ||
398 | } | ||
399 | nic->cpi_base[cfg->vf_id] = cpi_base; | ||
400 | } | ||
401 | |||
402 | /* Responsds to VF with its RSS indirection table size */ | ||
403 | static void nic_send_rss_size(struct nicpf *nic, int vf) | ||
404 | { | ||
405 | union nic_mbx mbx = {}; | ||
406 | u64 *msg; | ||
407 | |||
408 | msg = (u64 *)&mbx; | ||
409 | |||
410 | mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; | ||
411 | mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size; | ||
412 | nic_send_msg_to_vf(nic, vf, &mbx); | ||
413 | } | ||
414 | |||
415 | /* Receive side scaling configuration | ||
416 | * configure: | ||
417 | * - RSS index | ||
418 | * - indir table i.e hash::RQ mapping | ||
419 | * - no of hash bits to consider | ||
420 | */ | ||
421 | static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) | ||
422 | { | ||
423 | u8 qset, idx = 0; | ||
424 | u64 cpi_cfg, cpi_base, rssi_base, rssi; | ||
425 | |||
426 | cpi_base = nic->cpi_base[cfg->vf_id]; | ||
427 | cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3)); | ||
428 | rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset; | ||
429 | |||
430 | rssi = rssi_base; | ||
431 | qset = cfg->vf_id; | ||
432 | |||
433 | for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { | ||
434 | nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), | ||
435 | (qset << 3) | (cfg->ind_tbl[idx] & 0x7)); | ||
436 | idx++; | ||
437 | } | ||
438 | |||
439 | cpi_cfg &= ~(0xFULL << 20); | ||
440 | cpi_cfg |= (cfg->hash_bits << 20); | ||
441 | nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg); | ||
442 | } | ||
443 | |||
444 | /* 4 level transmit side scheduler configutation | ||
445 | * for TNS bypass mode | ||
446 | * | ||
447 | * Sample configuration for SQ0 | ||
448 | * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0 | ||
449 | * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0 | ||
450 | * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0 | ||
451 | * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0 | ||
452 | * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1 | ||
453 | * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1 | ||
454 | * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1 | ||
455 | * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1 | ||
456 | */ | ||
457 | static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx) | ||
458 | { | ||
459 | u32 bgx, lmac, chan; | ||
460 | u32 tl2, tl3, tl4; | ||
461 | u32 rr_quantum; | ||
462 | |||
463 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); | ||
464 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); | ||
465 | /* 24 bytes for FCS, IPG and preamble */ | ||
466 | rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); | ||
467 | |||
468 | tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); | ||
469 | tl4 += sq_idx; | ||
470 | tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); | ||
471 | nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | | ||
472 | ((u64)vnic << NIC_QS_ID_SHIFT) | | ||
473 | ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4); | ||
474 | nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3), | ||
475 | ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum); | ||
476 | |||
477 | nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); | ||
478 | chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); | ||
479 | nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); | ||
480 | /* Enable backpressure on the channel */ | ||
481 | nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); | ||
482 | |||
483 | tl2 = tl3 >> 2; | ||
484 | nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2); | ||
485 | nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); | ||
486 | /* No priorities as of now */ | ||
487 | nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); | ||
488 | } | ||
489 | |||
490 | /* Interrupt handler to handle mailbox messages from VFs */ | ||
491 | static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | ||
492 | { | ||
493 | union nic_mbx mbx = {}; | ||
494 | u64 *mbx_data; | ||
495 | u64 mbx_addr; | ||
496 | u64 reg_addr; | ||
497 | u64 mac_addr; | ||
498 | int bgx, lmac; | ||
499 | int i; | ||
500 | int ret = 0; | ||
501 | |||
502 | nic->mbx_lock[vf] = true; | ||
503 | |||
504 | mbx_addr = nic_get_mbx_addr(vf); | ||
505 | mbx_data = (u64 *)&mbx; | ||
506 | |||
507 | for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { | ||
508 | *mbx_data = nic_reg_read(nic, mbx_addr); | ||
509 | mbx_data++; | ||
510 | mbx_addr += sizeof(u64); | ||
511 | } | ||
512 | |||
513 | dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n", | ||
514 | __func__, mbx.msg.msg, vf); | ||
515 | switch (mbx.msg.msg) { | ||
516 | case NIC_MBOX_MSG_READY: | ||
517 | nic_mbx_send_ready(nic, vf); | ||
518 | nic->link[vf] = 0; | ||
519 | nic->duplex[vf] = 0; | ||
520 | nic->speed[vf] = 0; | ||
521 | ret = 1; | ||
522 | break; | ||
523 | case NIC_MBOX_MSG_QS_CFG: | ||
524 | reg_addr = NIC_PF_QSET_0_127_CFG | | ||
525 | (mbx.qs.num << NIC_QS_ID_SHIFT); | ||
526 | nic_reg_write(nic, reg_addr, mbx.qs.cfg); | ||
527 | break; | ||
528 | case NIC_MBOX_MSG_RQ_CFG: | ||
529 | reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | | ||
530 | (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | | ||
531 | (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); | ||
532 | nic_reg_write(nic, reg_addr, mbx.rq.cfg); | ||
533 | break; | ||
534 | case NIC_MBOX_MSG_RQ_BP_CFG: | ||
535 | reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | | ||
536 | (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | | ||
537 | (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); | ||
538 | nic_reg_write(nic, reg_addr, mbx.rq.cfg); | ||
539 | break; | ||
540 | case NIC_MBOX_MSG_RQ_SW_SYNC: | ||
541 | ret = nic_rcv_queue_sw_sync(nic); | ||
542 | break; | ||
543 | case NIC_MBOX_MSG_RQ_DROP_CFG: | ||
544 | reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG | | ||
545 | (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | | ||
546 | (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); | ||
547 | nic_reg_write(nic, reg_addr, mbx.rq.cfg); | ||
548 | break; | ||
549 | case NIC_MBOX_MSG_SQ_CFG: | ||
550 | reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG | | ||
551 | (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | | ||
552 | (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); | ||
553 | nic_reg_write(nic, reg_addr, mbx.sq.cfg); | ||
554 | nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num); | ||
555 | break; | ||
556 | case NIC_MBOX_MSG_SET_MAC: | ||
557 | lmac = mbx.mac.vf_id; | ||
558 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); | ||
559 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); | ||
560 | #ifdef __BIG_ENDIAN | ||
561 | mac_addr = cpu_to_be64(mbx.nic_cfg.mac_addr) << 16; | ||
562 | #else | ||
563 | mac_addr = cpu_to_be64(mbx.nic_cfg.mac_addr) >> 16; | ||
564 | #endif | ||
565 | bgx_set_lmac_mac(nic->node, bgx, lmac, (u8 *)&mac_addr); | ||
566 | break; | ||
567 | case NIC_MBOX_MSG_SET_MAX_FRS: | ||
568 | ret = nic_update_hw_frs(nic, mbx.frs.max_frs, | ||
569 | mbx.frs.vf_id); | ||
570 | break; | ||
571 | case NIC_MBOX_MSG_CPI_CFG: | ||
572 | nic_config_cpi(nic, &mbx.cpi_cfg); | ||
573 | break; | ||
574 | case NIC_MBOX_MSG_RSS_SIZE: | ||
575 | nic_send_rss_size(nic, vf); | ||
576 | goto unlock; | ||
577 | case NIC_MBOX_MSG_RSS_CFG: | ||
578 | case NIC_MBOX_MSG_RSS_CFG_CONT: | ||
579 | nic_config_rss(nic, &mbx.rss_cfg); | ||
580 | break; | ||
581 | case NIC_MBOX_MSG_CFG_DONE: | ||
582 | /* Last message of VF config msg sequence */ | ||
583 | nic->vf_enabled[vf] = true; | ||
584 | goto unlock; | ||
585 | case NIC_MBOX_MSG_SHUTDOWN: | ||
586 | /* First msg in VF teardown sequence */ | ||
587 | nic->vf_enabled[vf] = false; | ||
588 | break; | ||
589 | case NIC_MBOX_MSG_BGX_STATS: | ||
590 | nic_get_bgx_stats(nic, &mbx.bgx_stats); | ||
591 | goto unlock; | ||
592 | default: | ||
593 | dev_err(&nic->pdev->dev, | ||
594 | "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); | ||
595 | break; | ||
596 | } | ||
597 | |||
598 | if (!ret) | ||
599 | nic_mbx_send_ack(nic, vf); | ||
600 | else if (mbx.msg.msg != NIC_MBOX_MSG_READY) | ||
601 | nic_mbx_send_nack(nic, vf); | ||
602 | unlock: | ||
603 | nic->mbx_lock[vf] = false; | ||
604 | } | ||
605 | |||
606 | static void nic_mbx_intr_handler (struct nicpf *nic, int mbx) | ||
607 | { | ||
608 | u64 intr; | ||
609 | u8 vf, vf_per_mbx_reg = 64; | ||
610 | |||
611 | intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); | ||
612 | dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr); | ||
613 | for (vf = 0; vf < vf_per_mbx_reg; vf++) { | ||
614 | if (intr & (1ULL << vf)) { | ||
615 | dev_dbg(&nic->pdev->dev, "Intr from VF %d\n", | ||
616 | vf + (mbx * vf_per_mbx_reg)); | ||
617 | if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en) | ||
618 | break; | ||
619 | nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); | ||
620 | nic_clear_mbx_intr(nic, vf, mbx); | ||
621 | } | ||
622 | } | ||
623 | } | ||
624 | |||
625 | static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq) | ||
626 | { | ||
627 | struct nicpf *nic = (struct nicpf *)nic_irq; | ||
628 | |||
629 | nic_mbx_intr_handler(nic, 0); | ||
630 | |||
631 | return IRQ_HANDLED; | ||
632 | } | ||
633 | |||
634 | static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq) | ||
635 | { | ||
636 | struct nicpf *nic = (struct nicpf *)nic_irq; | ||
637 | |||
638 | nic_mbx_intr_handler(nic, 1); | ||
639 | |||
640 | return IRQ_HANDLED; | ||
641 | } | ||
642 | |||
643 | static int nic_enable_msix(struct nicpf *nic) | ||
644 | { | ||
645 | int i, ret; | ||
646 | |||
647 | nic->num_vec = NIC_PF_MSIX_VECTORS; | ||
648 | |||
649 | for (i = 0; i < nic->num_vec; i++) | ||
650 | nic->msix_entries[i].entry = i; | ||
651 | |||
652 | ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); | ||
653 | if (ret) { | ||
654 | dev_err(&nic->pdev->dev, | ||
655 | "Request for #%d msix vectors failed\n", | ||
656 | nic->num_vec); | ||
657 | return ret; | ||
658 | } | ||
659 | |||
660 | nic->msix_enabled = 1; | ||
661 | return 0; | ||
662 | } | ||
663 | |||
664 | static void nic_disable_msix(struct nicpf *nic) | ||
665 | { | ||
666 | if (nic->msix_enabled) { | ||
667 | pci_disable_msix(nic->pdev); | ||
668 | nic->msix_enabled = 0; | ||
669 | nic->num_vec = 0; | ||
670 | } | ||
671 | } | ||
672 | |||
673 | static void nic_free_all_interrupts(struct nicpf *nic) | ||
674 | { | ||
675 | int irq; | ||
676 | |||
677 | for (irq = 0; irq < nic->num_vec; irq++) { | ||
678 | if (nic->irq_allocated[irq]) | ||
679 | free_irq(nic->msix_entries[irq].vector, nic); | ||
680 | nic->irq_allocated[irq] = false; | ||
681 | } | ||
682 | } | ||
683 | |||
684 | static int nic_register_interrupts(struct nicpf *nic) | ||
685 | { | ||
686 | int ret; | ||
687 | |||
688 | /* Enable MSI-X */ | ||
689 | ret = nic_enable_msix(nic); | ||
690 | if (ret) | ||
691 | return ret; | ||
692 | |||
693 | /* Register mailbox interrupt handlers */ | ||
694 | ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector, | ||
695 | nic_mbx0_intr_handler, 0, "NIC Mbox0", nic); | ||
696 | if (ret) | ||
697 | goto fail; | ||
698 | |||
699 | nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true; | ||
700 | |||
701 | ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector, | ||
702 | nic_mbx1_intr_handler, 0, "NIC Mbox1", nic); | ||
703 | if (ret) | ||
704 | goto fail; | ||
705 | |||
706 | nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true; | ||
707 | |||
708 | /* Enable mailbox interrupt */ | ||
709 | nic_enable_mbx_intr(nic); | ||
710 | return 0; | ||
711 | |||
712 | fail: | ||
713 | dev_err(&nic->pdev->dev, "Request irq failed\n"); | ||
714 | nic_free_all_interrupts(nic); | ||
715 | return ret; | ||
716 | } | ||
717 | |||
718 | static void nic_unregister_interrupts(struct nicpf *nic) | ||
719 | { | ||
720 | nic_free_all_interrupts(nic); | ||
721 | nic_disable_msix(nic); | ||
722 | } | ||
723 | |||
724 | static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic) | ||
725 | { | ||
726 | int pos = 0; | ||
727 | int err; | ||
728 | u16 total_vf_cnt; | ||
729 | |||
730 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); | ||
731 | if (!pos) { | ||
732 | dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n"); | ||
733 | return -ENODEV; | ||
734 | } | ||
735 | |||
736 | pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt); | ||
737 | if (total_vf_cnt < nic->num_vf_en) | ||
738 | nic->num_vf_en = total_vf_cnt; | ||
739 | |||
740 | if (!total_vf_cnt) | ||
741 | return 0; | ||
742 | |||
743 | err = pci_enable_sriov(pdev, nic->num_vf_en); | ||
744 | if (err) { | ||
745 | dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n", | ||
746 | nic->num_vf_en); | ||
747 | nic->num_vf_en = 0; | ||
748 | return err; | ||
749 | } | ||
750 | |||
751 | dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n", | ||
752 | nic->num_vf_en); | ||
753 | |||
754 | nic->flags |= NIC_SRIOV_ENABLED; | ||
755 | return 0; | ||
756 | } | ||
757 | |||
758 | /* Poll for BGX LMAC link status and update corresponding VF | ||
759 | * if there is a change, valid only if internal L2 switch | ||
760 | * is not present otherwise VF link is always treated as up | ||
761 | */ | ||
762 | static void nic_poll_for_link(struct work_struct *work) | ||
763 | { | ||
764 | union nic_mbx mbx = {}; | ||
765 | struct nicpf *nic; | ||
766 | struct bgx_link_status link; | ||
767 | u8 vf, bgx, lmac; | ||
768 | |||
769 | nic = container_of(work, struct nicpf, dwork.work); | ||
770 | |||
771 | mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; | ||
772 | |||
773 | for (vf = 0; vf < nic->num_vf_en; vf++) { | ||
774 | /* Poll only if VF is UP */ | ||
775 | if (!nic->vf_enabled[vf]) | ||
776 | continue; | ||
777 | |||
778 | /* Get BGX, LMAC indices for the VF */ | ||
779 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
780 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
781 | /* Get interface link status */ | ||
782 | bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); | ||
783 | |||
784 | /* Inform VF only if link status changed */ | ||
785 | if (nic->link[vf] == link.link_up) | ||
786 | continue; | ||
787 | |||
788 | if (!nic->mbx_lock[vf]) { | ||
789 | nic->link[vf] = link.link_up; | ||
790 | nic->duplex[vf] = link.duplex; | ||
791 | nic->speed[vf] = link.speed; | ||
792 | |||
793 | /* Send a mbox message to VF with current link status */ | ||
794 | mbx.link_status.link_up = link.link_up; | ||
795 | mbx.link_status.duplex = link.duplex; | ||
796 | mbx.link_status.speed = link.speed; | ||
797 | nic_send_msg_to_vf(nic, vf, &mbx); | ||
798 | } | ||
799 | } | ||
800 | queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2); | ||
801 | } | ||
802 | |||
803 | static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
804 | { | ||
805 | struct device *dev = &pdev->dev; | ||
806 | struct nicpf *nic; | ||
807 | int err; | ||
808 | |||
809 | BUILD_BUG_ON(sizeof(union nic_mbx) > 16); | ||
810 | |||
811 | nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL); | ||
812 | if (!nic) | ||
813 | return -ENOMEM; | ||
814 | |||
815 | pci_set_drvdata(pdev, nic); | ||
816 | |||
817 | nic->pdev = pdev; | ||
818 | |||
819 | err = pci_enable_device(pdev); | ||
820 | if (err) { | ||
821 | dev_err(dev, "Failed to enable PCI device\n"); | ||
822 | pci_set_drvdata(pdev, NULL); | ||
823 | return err; | ||
824 | } | ||
825 | |||
826 | err = pci_request_regions(pdev, DRV_NAME); | ||
827 | if (err) { | ||
828 | dev_err(dev, "PCI request regions failed 0x%x\n", err); | ||
829 | goto err_disable_device; | ||
830 | } | ||
831 | |||
832 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); | ||
833 | if (err) { | ||
834 | dev_err(dev, "Unable to get usable DMA configuration\n"); | ||
835 | goto err_release_regions; | ||
836 | } | ||
837 | |||
838 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); | ||
839 | if (err) { | ||
840 | dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n"); | ||
841 | goto err_release_regions; | ||
842 | } | ||
843 | |||
844 | /* MAP PF's configuration registers */ | ||
845 | nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); | ||
846 | if (!nic->reg_base) { | ||
847 | dev_err(dev, "Cannot map config register space, aborting\n"); | ||
848 | err = -ENOMEM; | ||
849 | goto err_release_regions; | ||
850 | } | ||
851 | |||
852 | pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id); | ||
853 | |||
854 | nic->node = NIC_NODE_ID(pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM)); | ||
855 | |||
856 | nic_set_lmac_vf_mapping(nic); | ||
857 | |||
858 | /* Initialize hardware */ | ||
859 | nic_init_hw(nic); | ||
860 | |||
861 | /* Set RSS TBL size for each VF */ | ||
862 | nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; | ||
863 | |||
864 | /* Register interrupts */ | ||
865 | err = nic_register_interrupts(nic); | ||
866 | if (err) | ||
867 | goto err_release_regions; | ||
868 | |||
869 | /* Configure SRIOV */ | ||
870 | err = nic_sriov_init(pdev, nic); | ||
871 | if (err) | ||
872 | goto err_unregister_interrupts; | ||
873 | |||
874 | /* Register a physical link status poll fn() */ | ||
875 | nic->check_link = alloc_workqueue("check_link_status", | ||
876 | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); | ||
877 | if (!nic->check_link) { | ||
878 | err = -ENOMEM; | ||
879 | goto err_disable_sriov; | ||
880 | } | ||
881 | |||
882 | INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link); | ||
883 | queue_delayed_work(nic->check_link, &nic->dwork, 0); | ||
884 | |||
885 | return 0; | ||
886 | |||
887 | err_disable_sriov: | ||
888 | if (nic->flags & NIC_SRIOV_ENABLED) | ||
889 | pci_disable_sriov(pdev); | ||
890 | err_unregister_interrupts: | ||
891 | nic_unregister_interrupts(nic); | ||
892 | err_release_regions: | ||
893 | pci_release_regions(pdev); | ||
894 | err_disable_device: | ||
895 | pci_disable_device(pdev); | ||
896 | pci_set_drvdata(pdev, NULL); | ||
897 | return err; | ||
898 | } | ||
899 | |||
900 | static void nic_remove(struct pci_dev *pdev) | ||
901 | { | ||
902 | struct nicpf *nic = pci_get_drvdata(pdev); | ||
903 | |||
904 | if (nic->flags & NIC_SRIOV_ENABLED) | ||
905 | pci_disable_sriov(pdev); | ||
906 | |||
907 | if (nic->check_link) { | ||
908 | /* Destroy work Queue */ | ||
909 | cancel_delayed_work(&nic->dwork); | ||
910 | flush_workqueue(nic->check_link); | ||
911 | destroy_workqueue(nic->check_link); | ||
912 | } | ||
913 | |||
914 | nic_unregister_interrupts(nic); | ||
915 | pci_release_regions(pdev); | ||
916 | pci_disable_device(pdev); | ||
917 | pci_set_drvdata(pdev, NULL); | ||
918 | } | ||
919 | |||
920 | static struct pci_driver nic_driver = { | ||
921 | .name = DRV_NAME, | ||
922 | .id_table = nic_id_table, | ||
923 | .probe = nic_probe, | ||
924 | .remove = nic_remove, | ||
925 | }; | ||
926 | |||
927 | static int __init nic_init_module(void) | ||
928 | { | ||
929 | pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); | ||
930 | |||
931 | return pci_register_driver(&nic_driver); | ||
932 | } | ||
933 | |||
934 | static void __exit nic_cleanup_module(void) | ||
935 | { | ||
936 | pci_unregister_driver(&nic_driver); | ||
937 | } | ||
938 | |||
939 | module_init(nic_init_module); | ||
940 | module_exit(nic_cleanup_module); | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h new file mode 100644 index 000000000000..58197bb2f805 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h | |||
@@ -0,0 +1,213 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Cavium, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of version 2 of the GNU General Public License | ||
6 | * as published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef NIC_REG_H | ||
10 | #define NIC_REG_H | ||
11 | |||
12 | #define NIC_PF_REG_COUNT 29573 | ||
13 | #define NIC_VF_REG_COUNT 249 | ||
14 | |||
15 | /* Physical function register offsets */ | ||
16 | #define NIC_PF_CFG (0x0000) | ||
17 | #define NIC_PF_STATUS (0x0010) | ||
18 | #define NIC_PF_INTR_TIMER_CFG (0x0030) | ||
19 | #define NIC_PF_BIST_STATUS (0x0040) | ||
20 | #define NIC_PF_SOFT_RESET (0x0050) | ||
21 | #define NIC_PF_TCP_TIMER (0x0060) | ||
22 | #define NIC_PF_BP_CFG (0x0080) | ||
23 | #define NIC_PF_RRM_CFG (0x0088) | ||
24 | #define NIC_PF_CQM_CF (0x00A0) | ||
25 | #define NIC_PF_CNM_CF (0x00A8) | ||
26 | #define NIC_PF_CNM_STATUS (0x00B0) | ||
27 | #define NIC_PF_CQ_AVG_CFG (0x00C0) | ||
28 | #define NIC_PF_RRM_AVG_CFG (0x00C8) | ||
29 | #define NIC_PF_INTF_0_1_SEND_CFG (0x0200) | ||
30 | #define NIC_PF_INTF_0_1_BP_CFG (0x0208) | ||
31 | #define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210) | ||
32 | #define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220) | ||
33 | #define NIC_PF_RBDR_BP_STATE_0_3 (0x0240) | ||
34 | #define NIC_PF_MAILBOX_INT (0x0410) | ||
35 | #define NIC_PF_MAILBOX_INT_W1S (0x0430) | ||
36 | #define NIC_PF_MAILBOX_ENA_W1C (0x0450) | ||
37 | #define NIC_PF_MAILBOX_ENA_W1S (0x0470) | ||
38 | #define NIC_PF_RX_ETYPE_0_7 (0x0500) | ||
39 | #define NIC_PF_PKIND_0_15_CFG (0x0600) | ||
40 | #define NIC_PF_ECC0_FLIP0 (0x1000) | ||
41 | #define NIC_PF_ECC1_FLIP0 (0x1008) | ||
42 | #define NIC_PF_ECC2_FLIP0 (0x1010) | ||
43 | #define NIC_PF_ECC3_FLIP0 (0x1018) | ||
44 | #define NIC_PF_ECC0_FLIP1 (0x1080) | ||
45 | #define NIC_PF_ECC1_FLIP1 (0x1088) | ||
46 | #define NIC_PF_ECC2_FLIP1 (0x1090) | ||
47 | #define NIC_PF_ECC3_FLIP1 (0x1098) | ||
48 | #define NIC_PF_ECC0_CDIS (0x1100) | ||
49 | #define NIC_PF_ECC1_CDIS (0x1108) | ||
50 | #define NIC_PF_ECC2_CDIS (0x1110) | ||
51 | #define NIC_PF_ECC3_CDIS (0x1118) | ||
52 | #define NIC_PF_BIST0_STATUS (0x1280) | ||
53 | #define NIC_PF_BIST1_STATUS (0x1288) | ||
54 | #define NIC_PF_BIST2_STATUS (0x1290) | ||
55 | #define NIC_PF_BIST3_STATUS (0x1298) | ||
56 | #define NIC_PF_ECC0_SBE_INT (0x2000) | ||
57 | #define NIC_PF_ECC0_SBE_INT_W1S (0x2008) | ||
58 | #define NIC_PF_ECC0_SBE_ENA_W1C (0x2010) | ||
59 | #define NIC_PF_ECC0_SBE_ENA_W1S (0x2018) | ||
60 | #define NIC_PF_ECC0_DBE_INT (0x2100) | ||
61 | #define NIC_PF_ECC0_DBE_INT_W1S (0x2108) | ||
62 | #define NIC_PF_ECC0_DBE_ENA_W1C (0x2110) | ||
63 | #define NIC_PF_ECC0_DBE_ENA_W1S (0x2118) | ||
64 | #define NIC_PF_ECC1_SBE_INT (0x2200) | ||
65 | #define NIC_PF_ECC1_SBE_INT_W1S (0x2208) | ||
66 | #define NIC_PF_ECC1_SBE_ENA_W1C (0x2210) | ||
67 | #define NIC_PF_ECC1_SBE_ENA_W1S (0x2218) | ||
68 | #define NIC_PF_ECC1_DBE_INT (0x2300) | ||
69 | #define NIC_PF_ECC1_DBE_INT_W1S (0x2308) | ||
70 | #define NIC_PF_ECC1_DBE_ENA_W1C (0x2310) | ||
71 | #define NIC_PF_ECC1_DBE_ENA_W1S (0x2318) | ||
72 | #define NIC_PF_ECC2_SBE_INT (0x2400) | ||
73 | #define NIC_PF_ECC2_SBE_INT_W1S (0x2408) | ||
74 | #define NIC_PF_ECC2_SBE_ENA_W1C (0x2410) | ||
75 | #define NIC_PF_ECC2_SBE_ENA_W1S (0x2418) | ||
76 | #define NIC_PF_ECC2_DBE_INT (0x2500) | ||
77 | #define NIC_PF_ECC2_DBE_INT_W1S (0x2508) | ||
78 | #define NIC_PF_ECC2_DBE_ENA_W1C (0x2510) | ||
79 | #define NIC_PF_ECC2_DBE_ENA_W1S (0x2518) | ||
80 | #define NIC_PF_ECC3_SBE_INT (0x2600) | ||
81 | #define NIC_PF_ECC3_SBE_INT_W1S (0x2608) | ||
82 | #define NIC_PF_ECC3_SBE_ENA_W1C (0x2610) | ||
83 | #define NIC_PF_ECC3_SBE_ENA_W1S (0x2618) | ||
84 | #define NIC_PF_ECC3_DBE_INT (0x2700) | ||
85 | #define NIC_PF_ECC3_DBE_INT_W1S (0x2708) | ||
86 | #define NIC_PF_ECC3_DBE_ENA_W1C (0x2710) | ||
87 | #define NIC_PF_ECC3_DBE_ENA_W1S (0x2718) | ||
88 | #define NIC_PF_CPI_0_2047_CFG (0x200000) | ||
89 | #define NIC_PF_RSSI_0_4097_RQ (0x220000) | ||
90 | #define NIC_PF_LMAC_0_7_CFG (0x240000) | ||
91 | #define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) | ||
92 | #define NIC_PF_LMAC_0_7_CREDIT (0x244000) | ||
93 | #define NIC_PF_CHAN_0_255_TX_CFG (0x400000) | ||
94 | #define NIC_PF_CHAN_0_255_RX_CFG (0x420000) | ||
95 | #define NIC_PF_CHAN_0_255_SW_XOFF (0x440000) | ||
96 | #define NIC_PF_CHAN_0_255_CREDIT (0x460000) | ||
97 | #define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000) | ||
98 | #define NIC_PF_SW_SYNC_RX (0x490000) | ||
99 | #define NIC_PF_SW_SYNC_RX_DONE (0x490008) | ||
100 | #define NIC_PF_TL2_0_63_CFG (0x500000) | ||
101 | #define NIC_PF_TL2_0_63_PRI (0x520000) | ||
102 | #define NIC_PF_TL2_0_63_SH_STATUS (0x580000) | ||
103 | #define NIC_PF_TL3A_0_63_CFG (0x5F0000) | ||
104 | #define NIC_PF_TL3_0_255_CFG (0x600000) | ||
105 | #define NIC_PF_TL3_0_255_CHAN (0x620000) | ||
106 | #define NIC_PF_TL3_0_255_PIR (0x640000) | ||
107 | #define NIC_PF_TL3_0_255_SW_XOFF (0x660000) | ||
108 | #define NIC_PF_TL3_0_255_CNM_RATE (0x680000) | ||
109 | #define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000) | ||
110 | #define NIC_PF_TL4A_0_255_CFG (0x6F0000) | ||
111 | #define NIC_PF_TL4_0_1023_CFG (0x800000) | ||
112 | #define NIC_PF_TL4_0_1023_SW_XOFF (0x820000) | ||
113 | #define NIC_PF_TL4_0_1023_SH_STATUS (0x840000) | ||
114 | #define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000) | ||
115 | #define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000) | ||
116 | #define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030) | ||
117 | #define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000) | ||
118 | #define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100) | ||
119 | #define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000) | ||
120 | #define NIC_PF_QSET_0_127_CFG (0x20010000) | ||
121 | #define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400) | ||
122 | #define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420) | ||
123 | #define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500) | ||
124 | #define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600) | ||
125 | #define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00) | ||
126 | #define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08) | ||
127 | #define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00) | ||
128 | |||
129 | #define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000) | ||
130 | #define NIC_PF_MSIX_VEC_0_CTL (0x000008) | ||
131 | #define NIC_PF_MSIX_PBA_0 (0x0F0000) | ||
132 | |||
133 | /* Virtual function register offsets */ | ||
134 | #define NIC_VNIC_CFG (0x000020) | ||
135 | #define NIC_VF_PF_MAILBOX_0_1 (0x000130) | ||
136 | #define NIC_VF_INT (0x000200) | ||
137 | #define NIC_VF_INT_W1S (0x000220) | ||
138 | #define NIC_VF_ENA_W1C (0x000240) | ||
139 | #define NIC_VF_ENA_W1S (0x000260) | ||
140 | |||
141 | #define NIC_VNIC_RSS_CFG (0x0020E0) | ||
142 | #define NIC_VNIC_RSS_KEY_0_4 (0x002200) | ||
143 | #define NIC_VNIC_TX_STAT_0_4 (0x004000) | ||
144 | #define NIC_VNIC_RX_STAT_0_13 (0x004100) | ||
145 | #define NIC_QSET_RQ_GEN_CFG (0x010010) | ||
146 | |||
147 | #define NIC_QSET_CQ_0_7_CFG (0x010400) | ||
148 | #define NIC_QSET_CQ_0_7_CFG2 (0x010408) | ||
149 | #define NIC_QSET_CQ_0_7_THRESH (0x010410) | ||
150 | #define NIC_QSET_CQ_0_7_BASE (0x010420) | ||
151 | #define NIC_QSET_CQ_0_7_HEAD (0x010428) | ||
152 | #define NIC_QSET_CQ_0_7_TAIL (0x010430) | ||
153 | #define NIC_QSET_CQ_0_7_DOOR (0x010438) | ||
154 | #define NIC_QSET_CQ_0_7_STATUS (0x010440) | ||
155 | #define NIC_QSET_CQ_0_7_STATUS2 (0x010448) | ||
156 | #define NIC_QSET_CQ_0_7_DEBUG (0x010450) | ||
157 | |||
158 | #define NIC_QSET_RQ_0_7_CFG (0x010600) | ||
159 | #define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700) | ||
160 | |||
161 | #define NIC_QSET_SQ_0_7_CFG (0x010800) | ||
162 | #define NIC_QSET_SQ_0_7_THRESH (0x010810) | ||
163 | #define NIC_QSET_SQ_0_7_BASE (0x010820) | ||
164 | #define NIC_QSET_SQ_0_7_HEAD (0x010828) | ||
165 | #define NIC_QSET_SQ_0_7_TAIL (0x010830) | ||
166 | #define NIC_QSET_SQ_0_7_DOOR (0x010838) | ||
167 | #define NIC_QSET_SQ_0_7_STATUS (0x010840) | ||
168 | #define NIC_QSET_SQ_0_7_DEBUG (0x010848) | ||
169 | #define NIC_QSET_SQ_0_7_CNM_CHG (0x010860) | ||
170 | #define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900) | ||
171 | |||
172 | #define NIC_QSET_RBDR_0_1_CFG (0x010C00) | ||
173 | #define NIC_QSET_RBDR_0_1_THRESH (0x010C10) | ||
174 | #define NIC_QSET_RBDR_0_1_BASE (0x010C20) | ||
175 | #define NIC_QSET_RBDR_0_1_HEAD (0x010C28) | ||
176 | #define NIC_QSET_RBDR_0_1_TAIL (0x010C30) | ||
177 | #define NIC_QSET_RBDR_0_1_DOOR (0x010C38) | ||
178 | #define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40) | ||
179 | #define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48) | ||
180 | #define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50) | ||
181 | |||
182 | #define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000) | ||
183 | #define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008) | ||
184 | #define NIC_VF_MSIX_PBA (0x0F0000) | ||
185 | |||
186 | /* Offsets within registers */ | ||
187 | #define NIC_MSIX_VEC_SHIFT 4 | ||
188 | #define NIC_Q_NUM_SHIFT 18 | ||
189 | #define NIC_QS_ID_SHIFT 21 | ||
190 | #define NIC_VF_NUM_SHIFT 21 | ||
191 | |||
192 | /* Port kind configuration register */ | ||
193 | struct pkind_cfg { | ||
194 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
195 | u64 reserved_42_63:22; | ||
196 | u64 hdr_sl:5; /* Header skip length */ | ||
197 | u64 rx_hdr:3; /* TNS Receive header present */ | ||
198 | u64 lenerr_en:1;/* L2 length error check enable */ | ||
199 | u64 reserved_32_32:1; | ||
200 | u64 maxlen:16; /* Max frame size */ | ||
201 | u64 minlen:16; /* Min frame size */ | ||
202 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
203 | u64 minlen:16; | ||
204 | u64 maxlen:16; | ||
205 | u64 reserved_32_32:1; | ||
206 | u64 lenerr_en:1; | ||
207 | u64 rx_hdr:3; | ||
208 | u64 hdr_sl:5; | ||
209 | u64 reserved_42_63:22; | ||
210 | #endif | ||
211 | }; | ||
212 | |||
213 | #endif /* NIC_REG_H */ | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c new file mode 100644 index 000000000000..0fc4a536afc9 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c | |||
@@ -0,0 +1,601 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Cavium, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of version 2 of the GNU General Public License | ||
6 | * as published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | /* ETHTOOL Support for VNIC_VF Device*/ | ||
10 | |||
11 | #include <linux/pci.h> | ||
12 | |||
13 | #include "nic_reg.h" | ||
14 | #include "nic.h" | ||
15 | #include "nicvf_queues.h" | ||
16 | #include "q_struct.h" | ||
17 | #include "thunder_bgx.h" | ||
18 | |||
19 | #define DRV_NAME "thunder-nicvf" | ||
20 | #define DRV_VERSION "1.0" | ||
21 | |||
22 | struct nicvf_stat { | ||
23 | char name[ETH_GSTRING_LEN]; | ||
24 | unsigned int index; | ||
25 | }; | ||
26 | |||
27 | #define NICVF_HW_STAT(stat) { \ | ||
28 | .name = #stat, \ | ||
29 | .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \ | ||
30 | } | ||
31 | |||
32 | #define NICVF_DRV_STAT(stat) { \ | ||
33 | .name = #stat, \ | ||
34 | .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \ | ||
35 | } | ||
36 | |||
37 | static const struct nicvf_stat nicvf_hw_stats[] = { | ||
38 | NICVF_HW_STAT(rx_bytes_ok), | ||
39 | NICVF_HW_STAT(rx_ucast_frames_ok), | ||
40 | NICVF_HW_STAT(rx_bcast_frames_ok), | ||
41 | NICVF_HW_STAT(rx_mcast_frames_ok), | ||
42 | NICVF_HW_STAT(rx_fcs_errors), | ||
43 | NICVF_HW_STAT(rx_l2_errors), | ||
44 | NICVF_HW_STAT(rx_drop_red), | ||
45 | NICVF_HW_STAT(rx_drop_red_bytes), | ||
46 | NICVF_HW_STAT(rx_drop_overrun), | ||
47 | NICVF_HW_STAT(rx_drop_overrun_bytes), | ||
48 | NICVF_HW_STAT(rx_drop_bcast), | ||
49 | NICVF_HW_STAT(rx_drop_mcast), | ||
50 | NICVF_HW_STAT(rx_drop_l3_bcast), | ||
51 | NICVF_HW_STAT(rx_drop_l3_mcast), | ||
52 | NICVF_HW_STAT(tx_bytes_ok), | ||
53 | NICVF_HW_STAT(tx_ucast_frames_ok), | ||
54 | NICVF_HW_STAT(tx_bcast_frames_ok), | ||
55 | NICVF_HW_STAT(tx_mcast_frames_ok), | ||
56 | }; | ||
57 | |||
58 | static const struct nicvf_stat nicvf_drv_stats[] = { | ||
59 | NICVF_DRV_STAT(rx_frames_ok), | ||
60 | NICVF_DRV_STAT(rx_frames_64), | ||
61 | NICVF_DRV_STAT(rx_frames_127), | ||
62 | NICVF_DRV_STAT(rx_frames_255), | ||
63 | NICVF_DRV_STAT(rx_frames_511), | ||
64 | NICVF_DRV_STAT(rx_frames_1023), | ||
65 | NICVF_DRV_STAT(rx_frames_1518), | ||
66 | NICVF_DRV_STAT(rx_frames_jumbo), | ||
67 | NICVF_DRV_STAT(rx_drops), | ||
68 | NICVF_DRV_STAT(tx_frames_ok), | ||
69 | NICVF_DRV_STAT(tx_busy), | ||
70 | NICVF_DRV_STAT(tx_tso), | ||
71 | NICVF_DRV_STAT(tx_drops), | ||
72 | }; | ||
73 | |||
74 | static const struct nicvf_stat nicvf_queue_stats[] = { | ||
75 | { "bytes", 0 }, | ||
76 | { "frames", 1 }, | ||
77 | }; | ||
78 | |||
79 | static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats); | ||
80 | static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats); | ||
81 | static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats); | ||
82 | |||
83 | static int nicvf_get_settings(struct net_device *netdev, | ||
84 | struct ethtool_cmd *cmd) | ||
85 | { | ||
86 | struct nicvf *nic = netdev_priv(netdev); | ||
87 | |||
88 | cmd->supported = 0; | ||
89 | cmd->transceiver = XCVR_EXTERNAL; | ||
90 | if (nic->speed <= 1000) { | ||
91 | cmd->port = PORT_MII; | ||
92 | cmd->autoneg = AUTONEG_ENABLE; | ||
93 | } else { | ||
94 | cmd->port = PORT_FIBRE; | ||
95 | cmd->autoneg = AUTONEG_DISABLE; | ||
96 | } | ||
97 | cmd->duplex = nic->duplex; | ||
98 | ethtool_cmd_speed_set(cmd, nic->speed); | ||
99 | |||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static void nicvf_get_drvinfo(struct net_device *netdev, | ||
104 | struct ethtool_drvinfo *info) | ||
105 | { | ||
106 | struct nicvf *nic = netdev_priv(netdev); | ||
107 | |||
108 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); | ||
109 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); | ||
110 | strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info)); | ||
111 | } | ||
112 | |||
113 | static u32 nicvf_get_msglevel(struct net_device *netdev) | ||
114 | { | ||
115 | struct nicvf *nic = netdev_priv(netdev); | ||
116 | |||
117 | return nic->msg_enable; | ||
118 | } | ||
119 | |||
120 | static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl) | ||
121 | { | ||
122 | struct nicvf *nic = netdev_priv(netdev); | ||
123 | |||
124 | nic->msg_enable = lvl; | ||
125 | } | ||
126 | |||
127 | static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) | ||
128 | { | ||
129 | int stats, qidx; | ||
130 | |||
131 | if (sset != ETH_SS_STATS) | ||
132 | return; | ||
133 | |||
134 | for (stats = 0; stats < nicvf_n_hw_stats; stats++) { | ||
135 | memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN); | ||
136 | data += ETH_GSTRING_LEN; | ||
137 | } | ||
138 | |||
139 | for (stats = 0; stats < nicvf_n_drv_stats; stats++) { | ||
140 | memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN); | ||
141 | data += ETH_GSTRING_LEN; | ||
142 | } | ||
143 | |||
144 | for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { | ||
145 | for (stats = 0; stats < nicvf_n_queue_stats; stats++) { | ||
146 | sprintf(data, "rxq%d: %s", qidx, | ||
147 | nicvf_queue_stats[stats].name); | ||
148 | data += ETH_GSTRING_LEN; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { | ||
153 | for (stats = 0; stats < nicvf_n_queue_stats; stats++) { | ||
154 | sprintf(data, "txq%d: %s", qidx, | ||
155 | nicvf_queue_stats[stats].name); | ||
156 | data += ETH_GSTRING_LEN; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) { | ||
161 | sprintf(data, "bgx_rxstat%d: ", stats); | ||
162 | data += ETH_GSTRING_LEN; | ||
163 | } | ||
164 | |||
165 | for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) { | ||
166 | sprintf(data, "bgx_txstat%d: ", stats); | ||
167 | data += ETH_GSTRING_LEN; | ||
168 | } | ||
169 | } | ||
170 | |||
171 | static int nicvf_get_sset_count(struct net_device *netdev, int sset) | ||
172 | { | ||
173 | if (sset != ETH_SS_STATS) | ||
174 | return -EINVAL; | ||
175 | |||
176 | return nicvf_n_hw_stats + nicvf_n_drv_stats + | ||
177 | (nicvf_n_queue_stats * | ||
178 | (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) + | ||
179 | BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; | ||
180 | } | ||
181 | |||
182 | static void nicvf_get_ethtool_stats(struct net_device *netdev, | ||
183 | struct ethtool_stats *stats, u64 *data) | ||
184 | { | ||
185 | struct nicvf *nic = netdev_priv(netdev); | ||
186 | int stat, qidx; | ||
187 | |||
188 | nicvf_update_stats(nic); | ||
189 | |||
190 | /* Update LMAC stats */ | ||
191 | nicvf_update_lmac_stats(nic); | ||
192 | |||
193 | for (stat = 0; stat < nicvf_n_hw_stats; stat++) | ||
194 | *(data++) = ((u64 *)&nic->stats) | ||
195 | [nicvf_hw_stats[stat].index]; | ||
196 | for (stat = 0; stat < nicvf_n_drv_stats; stat++) | ||
197 | *(data++) = ((u64 *)&nic->drv_stats) | ||
198 | [nicvf_drv_stats[stat].index]; | ||
199 | |||
200 | for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { | ||
201 | for (stat = 0; stat < nicvf_n_queue_stats; stat++) | ||
202 | *(data++) = ((u64 *)&nic->qs->rq[qidx].stats) | ||
203 | [nicvf_queue_stats[stat].index]; | ||
204 | } | ||
205 | |||
206 | for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { | ||
207 | for (stat = 0; stat < nicvf_n_queue_stats; stat++) | ||
208 | *(data++) = ((u64 *)&nic->qs->sq[qidx].stats) | ||
209 | [nicvf_queue_stats[stat].index]; | ||
210 | } | ||
211 | |||
212 | for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++) | ||
213 | *(data++) = nic->bgx_stats.rx_stats[stat]; | ||
214 | for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++) | ||
215 | *(data++) = nic->bgx_stats.tx_stats[stat]; | ||
216 | } | ||
217 | |||
218 | static int nicvf_get_regs_len(struct net_device *dev) | ||
219 | { | ||
220 | return sizeof(u64) * NIC_VF_REG_COUNT; | ||
221 | } | ||
222 | |||
223 | static void nicvf_get_regs(struct net_device *dev, | ||
224 | struct ethtool_regs *regs, void *reg) | ||
225 | { | ||
226 | struct nicvf *nic = netdev_priv(dev); | ||
227 | u64 *p = (u64 *)reg; | ||
228 | u64 reg_offset; | ||
229 | int mbox, key, stat, q; | ||
230 | int i = 0; | ||
231 | |||
232 | regs->version = 0; | ||
233 | memset(p, 0, NIC_VF_REG_COUNT); | ||
234 | |||
235 | p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG); | ||
236 | /* Mailbox registers */ | ||
237 | for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++) | ||
238 | p[i++] = nicvf_reg_read(nic, | ||
239 | NIC_VF_PF_MAILBOX_0_1 | (mbox << 3)); | ||
240 | |||
241 | p[i++] = nicvf_reg_read(nic, NIC_VF_INT); | ||
242 | p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S); | ||
243 | p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C); | ||
244 | p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S); | ||
245 | p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); | ||
246 | |||
247 | for (key = 0; key < RSS_HASH_KEY_SIZE; key++) | ||
248 | p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3)); | ||
249 | |||
250 | /* Tx/Rx statistics */ | ||
251 | for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++) | ||
252 | p[i++] = nicvf_reg_read(nic, | ||
253 | NIC_VNIC_TX_STAT_0_4 | (stat << 3)); | ||
254 | |||
255 | for (i = 0; i < RX_STATS_ENUM_LAST; i++) | ||
256 | p[i++] = nicvf_reg_read(nic, | ||
257 | NIC_VNIC_RX_STAT_0_13 | (stat << 3)); | ||
258 | |||
259 | p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG); | ||
260 | |||
261 | /* All completion queue's registers */ | ||
262 | for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) { | ||
263 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q); | ||
264 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q); | ||
265 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q); | ||
266 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q); | ||
267 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q); | ||
268 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q); | ||
269 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q); | ||
270 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q); | ||
271 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q); | ||
272 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q); | ||
273 | } | ||
274 | |||
275 | /* All receive queue's registers */ | ||
276 | for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) { | ||
277 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q); | ||
278 | p[i++] = nicvf_queue_reg_read(nic, | ||
279 | NIC_QSET_RQ_0_7_STAT_0_1, q); | ||
280 | reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3); | ||
281 | p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); | ||
282 | } | ||
283 | |||
284 | for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) { | ||
285 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q); | ||
286 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q); | ||
287 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q); | ||
288 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q); | ||
289 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q); | ||
290 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q); | ||
291 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q); | ||
292 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q); | ||
293 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q); | ||
294 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q); | ||
295 | reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3); | ||
296 | p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); | ||
297 | } | ||
298 | |||
299 | for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) { | ||
300 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q); | ||
301 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q); | ||
302 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q); | ||
303 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q); | ||
304 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q); | ||
305 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q); | ||
306 | p[i++] = nicvf_queue_reg_read(nic, | ||
307 | NIC_QSET_RBDR_0_1_STATUS0, q); | ||
308 | p[i++] = nicvf_queue_reg_read(nic, | ||
309 | NIC_QSET_RBDR_0_1_STATUS1, q); | ||
310 | reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS; | ||
311 | p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); | ||
312 | } | ||
313 | } | ||
314 | |||
315 | static int nicvf_get_coalesce(struct net_device *netdev, | ||
316 | struct ethtool_coalesce *cmd) | ||
317 | { | ||
318 | struct nicvf *nic = netdev_priv(netdev); | ||
319 | |||
320 | cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs; | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static void nicvf_get_ringparam(struct net_device *netdev, | ||
325 | struct ethtool_ringparam *ring) | ||
326 | { | ||
327 | struct nicvf *nic = netdev_priv(netdev); | ||
328 | struct queue_set *qs = nic->qs; | ||
329 | |||
330 | ring->rx_max_pending = MAX_RCV_BUF_COUNT; | ||
331 | ring->rx_pending = qs->rbdr_len; | ||
332 | ring->tx_max_pending = MAX_SND_QUEUE_LEN; | ||
333 | ring->tx_pending = qs->sq_len; | ||
334 | } | ||
335 | |||
336 | static int nicvf_get_rss_hash_opts(struct nicvf *nic, | ||
337 | struct ethtool_rxnfc *info) | ||
338 | { | ||
339 | info->data = 0; | ||
340 | |||
341 | switch (info->flow_type) { | ||
342 | case TCP_V4_FLOW: | ||
343 | case TCP_V6_FLOW: | ||
344 | case UDP_V4_FLOW: | ||
345 | case UDP_V6_FLOW: | ||
346 | case SCTP_V4_FLOW: | ||
347 | case SCTP_V6_FLOW: | ||
348 | info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; | ||
349 | case IPV4_FLOW: | ||
350 | case IPV6_FLOW: | ||
351 | info->data |= RXH_IP_SRC | RXH_IP_DST; | ||
352 | break; | ||
353 | default: | ||
354 | return -EINVAL; | ||
355 | } | ||
356 | |||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | static int nicvf_get_rxnfc(struct net_device *dev, | ||
361 | struct ethtool_rxnfc *info, u32 *rules) | ||
362 | { | ||
363 | struct nicvf *nic = netdev_priv(dev); | ||
364 | int ret = -EOPNOTSUPP; | ||
365 | |||
366 | switch (info->cmd) { | ||
367 | case ETHTOOL_GRXRINGS: | ||
368 | info->data = nic->qs->rq_cnt; | ||
369 | ret = 0; | ||
370 | break; | ||
371 | case ETHTOOL_GRXFH: | ||
372 | return nicvf_get_rss_hash_opts(nic, info); | ||
373 | default: | ||
374 | break; | ||
375 | } | ||
376 | return ret; | ||
377 | } | ||
378 | |||
379 | static int nicvf_set_rss_hash_opts(struct nicvf *nic, | ||
380 | struct ethtool_rxnfc *info) | ||
381 | { | ||
382 | struct nicvf_rss_info *rss = &nic->rss_info; | ||
383 | u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); | ||
384 | |||
385 | if (!rss->enable) | ||
386 | netdev_err(nic->netdev, | ||
387 | "RSS is disabled, hash cannot be set\n"); | ||
388 | |||
389 | netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n", | ||
390 | info->flow_type, info->data); | ||
391 | |||
392 | if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST)) | ||
393 | return -EINVAL; | ||
394 | |||
395 | switch (info->flow_type) { | ||
396 | case TCP_V4_FLOW: | ||
397 | case TCP_V6_FLOW: | ||
398 | switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { | ||
399 | case 0: | ||
400 | rss_cfg &= ~(1ULL << RSS_HASH_TCP); | ||
401 | break; | ||
402 | case (RXH_L4_B_0_1 | RXH_L4_B_2_3): | ||
403 | rss_cfg |= (1ULL << RSS_HASH_TCP); | ||
404 | break; | ||
405 | default: | ||
406 | return -EINVAL; | ||
407 | } | ||
408 | break; | ||
409 | case UDP_V4_FLOW: | ||
410 | case UDP_V6_FLOW: | ||
411 | switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { | ||
412 | case 0: | ||
413 | rss_cfg &= ~(1ULL << RSS_HASH_UDP); | ||
414 | break; | ||
415 | case (RXH_L4_B_0_1 | RXH_L4_B_2_3): | ||
416 | rss_cfg |= (1ULL << RSS_HASH_UDP); | ||
417 | break; | ||
418 | default: | ||
419 | return -EINVAL; | ||
420 | } | ||
421 | break; | ||
422 | case SCTP_V4_FLOW: | ||
423 | case SCTP_V6_FLOW: | ||
424 | switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { | ||
425 | case 0: | ||
426 | rss_cfg &= ~(1ULL << RSS_HASH_L4ETC); | ||
427 | break; | ||
428 | case (RXH_L4_B_0_1 | RXH_L4_B_2_3): | ||
429 | rss_cfg |= (1ULL << RSS_HASH_L4ETC); | ||
430 | break; | ||
431 | default: | ||
432 | return -EINVAL; | ||
433 | } | ||
434 | break; | ||
435 | case IPV4_FLOW: | ||
436 | case IPV6_FLOW: | ||
437 | rss_cfg = RSS_HASH_IP; | ||
438 | break; | ||
439 | default: | ||
440 | return -EINVAL; | ||
441 | } | ||
442 | |||
443 | nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg); | ||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) | ||
448 | { | ||
449 | struct nicvf *nic = netdev_priv(dev); | ||
450 | |||
451 | switch (info->cmd) { | ||
452 | case ETHTOOL_SRXFH: | ||
453 | return nicvf_set_rss_hash_opts(nic, info); | ||
454 | default: | ||
455 | break; | ||
456 | } | ||
457 | return -EOPNOTSUPP; | ||
458 | } | ||
459 | |||
460 | static u32 nicvf_get_rxfh_key_size(struct net_device *netdev) | ||
461 | { | ||
462 | return RSS_HASH_KEY_SIZE * sizeof(u64); | ||
463 | } | ||
464 | |||
465 | static u32 nicvf_get_rxfh_indir_size(struct net_device *dev) | ||
466 | { | ||
467 | struct nicvf *nic = netdev_priv(dev); | ||
468 | |||
469 | return nic->rss_info.rss_size; | ||
470 | } | ||
471 | |||
472 | static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey, | ||
473 | u8 *hfunc) | ||
474 | { | ||
475 | struct nicvf *nic = netdev_priv(dev); | ||
476 | struct nicvf_rss_info *rss = &nic->rss_info; | ||
477 | int idx; | ||
478 | |||
479 | if (indir) { | ||
480 | for (idx = 0; idx < rss->rss_size; idx++) | ||
481 | indir[idx] = rss->ind_tbl[idx]; | ||
482 | } | ||
483 | |||
484 | if (hkey) | ||
485 | memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64)); | ||
486 | |||
487 | if (hfunc) | ||
488 | *hfunc = ETH_RSS_HASH_TOP; | ||
489 | |||
490 | return 0; | ||
491 | } | ||
492 | |||
493 | static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, | ||
494 | const u8 *hkey, u8 hfunc) | ||
495 | { | ||
496 | struct nicvf *nic = netdev_priv(dev); | ||
497 | struct nicvf_rss_info *rss = &nic->rss_info; | ||
498 | int idx; | ||
499 | |||
500 | if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) { | ||
501 | rss->enable = false; | ||
502 | rss->hash_bits = 0; | ||
503 | return -EIO; | ||
504 | } | ||
505 | |||
506 | /* We do not allow change in unsupported parameters */ | ||
507 | if (hkey || | ||
508 | (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) | ||
509 | return -EOPNOTSUPP; | ||
510 | |||
511 | rss->enable = true; | ||
512 | if (indir) { | ||
513 | for (idx = 0; idx < rss->rss_size; idx++) | ||
514 | rss->ind_tbl[idx] = indir[idx]; | ||
515 | } | ||
516 | |||
517 | if (hkey) { | ||
518 | memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64)); | ||
519 | nicvf_set_rss_key(nic); | ||
520 | } | ||
521 | |||
522 | nicvf_config_rss(nic); | ||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | /* Get no of queues device supports and current queue count */ | ||
527 | static void nicvf_get_channels(struct net_device *dev, | ||
528 | struct ethtool_channels *channel) | ||
529 | { | ||
530 | struct nicvf *nic = netdev_priv(dev); | ||
531 | |||
532 | memset(channel, 0, sizeof(*channel)); | ||
533 | |||
534 | channel->max_rx = MAX_RCV_QUEUES_PER_QS; | ||
535 | channel->max_tx = MAX_SND_QUEUES_PER_QS; | ||
536 | |||
537 | channel->rx_count = nic->qs->rq_cnt; | ||
538 | channel->tx_count = nic->qs->sq_cnt; | ||
539 | } | ||
540 | |||
541 | /* Set no of Tx, Rx queues to be used */ | ||
542 | static int nicvf_set_channels(struct net_device *dev, | ||
543 | struct ethtool_channels *channel) | ||
544 | { | ||
545 | struct nicvf *nic = netdev_priv(dev); | ||
546 | int err = 0; | ||
547 | |||
548 | if (!channel->rx_count || !channel->tx_count) | ||
549 | return -EINVAL; | ||
550 | if (channel->rx_count > MAX_RCV_QUEUES_PER_QS) | ||
551 | return -EINVAL; | ||
552 | if (channel->tx_count > MAX_SND_QUEUES_PER_QS) | ||
553 | return -EINVAL; | ||
554 | |||
555 | nic->qs->rq_cnt = channel->rx_count; | ||
556 | nic->qs->sq_cnt = channel->tx_count; | ||
557 | nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); | ||
558 | |||
559 | err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt); | ||
560 | if (err) | ||
561 | return err; | ||
562 | |||
563 | if (!netif_running(dev)) | ||
564 | return err; | ||
565 | |||
566 | nicvf_stop(dev); | ||
567 | nicvf_open(dev); | ||
568 | netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", | ||
569 | nic->qs->sq_cnt, nic->qs->rq_cnt); | ||
570 | |||
571 | return err; | ||
572 | } | ||
573 | |||
574 | static const struct ethtool_ops nicvf_ethtool_ops = { | ||
575 | .get_settings = nicvf_get_settings, | ||
576 | .get_link = ethtool_op_get_link, | ||
577 | .get_drvinfo = nicvf_get_drvinfo, | ||
578 | .get_msglevel = nicvf_get_msglevel, | ||
579 | .set_msglevel = nicvf_set_msglevel, | ||
580 | .get_strings = nicvf_get_strings, | ||
581 | .get_sset_count = nicvf_get_sset_count, | ||
582 | .get_ethtool_stats = nicvf_get_ethtool_stats, | ||
583 | .get_regs_len = nicvf_get_regs_len, | ||
584 | .get_regs = nicvf_get_regs, | ||
585 | .get_coalesce = nicvf_get_coalesce, | ||
586 | .get_ringparam = nicvf_get_ringparam, | ||
587 | .get_rxnfc = nicvf_get_rxnfc, | ||
588 | .set_rxnfc = nicvf_set_rxnfc, | ||
589 | .get_rxfh_key_size = nicvf_get_rxfh_key_size, | ||
590 | .get_rxfh_indir_size = nicvf_get_rxfh_indir_size, | ||
591 | .get_rxfh = nicvf_get_rxfh, | ||
592 | .set_rxfh = nicvf_set_rxfh, | ||
593 | .get_channels = nicvf_get_channels, | ||
594 | .set_channels = nicvf_set_channels, | ||
595 | .get_ts_info = ethtool_op_get_ts_info, | ||
596 | }; | ||
597 | |||
598 | void nicvf_set_ethtool_ops(struct net_device *netdev) | ||
599 | { | ||
600 | netdev->ethtool_ops = &nicvf_ethtool_ops; | ||
601 | } | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c new file mode 100644 index 000000000000..abd446e6155b --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
@@ -0,0 +1,1332 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Cavium, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of version 2 of the GNU General Public License | ||
6 | * as published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/etherdevice.h> | ||
14 | #include <linux/ethtool.h> | ||
15 | #include <linux/log2.h> | ||
16 | #include <linux/prefetch.h> | ||
17 | #include <linux/irq.h> | ||
18 | |||
19 | #include "nic_reg.h" | ||
20 | #include "nic.h" | ||
21 | #include "nicvf_queues.h" | ||
22 | #include "thunder_bgx.h" | ||
23 | |||
24 | #define DRV_NAME "thunder-nicvf" | ||
25 | #define DRV_VERSION "1.0" | ||
26 | |||
27 | /* Supported devices */ | ||
28 | static const struct pci_device_id nicvf_id_table[] = { | ||
29 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, | ||
30 | PCI_DEVICE_ID_THUNDER_NIC_VF, | ||
31 | PCI_VENDOR_ID_CAVIUM, 0xA11E) }, | ||
32 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, | ||
33 | PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF, | ||
34 | PCI_VENDOR_ID_CAVIUM, 0xA11E) }, | ||
35 | { 0, } /* end of table */ | ||
36 | }; | ||
37 | |||
38 | MODULE_AUTHOR("Sunil Goutham"); | ||
39 | MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver"); | ||
40 | MODULE_LICENSE("GPL v2"); | ||
41 | MODULE_VERSION(DRV_VERSION); | ||
42 | MODULE_DEVICE_TABLE(pci, nicvf_id_table); | ||
43 | |||
44 | static int debug = 0x00; | ||
45 | module_param(debug, int, 0644); | ||
46 | MODULE_PARM_DESC(debug, "Debug message level bitmap"); | ||
47 | |||
48 | static int cpi_alg = CPI_ALG_NONE; | ||
49 | module_param(cpi_alg, int, S_IRUGO); | ||
50 | MODULE_PARM_DESC(cpi_alg, | ||
51 | "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); | ||
52 | |||
53 | static int nicvf_enable_msix(struct nicvf *nic); | ||
54 | static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev); | ||
55 | static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx); | ||
56 | |||
57 | static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, | ||
58 | struct sk_buff *skb) | ||
59 | { | ||
60 | if (skb->len <= 64) | ||
61 | nic->drv_stats.rx_frames_64++; | ||
62 | else if (skb->len <= 127) | ||
63 | nic->drv_stats.rx_frames_127++; | ||
64 | else if (skb->len <= 255) | ||
65 | nic->drv_stats.rx_frames_255++; | ||
66 | else if (skb->len <= 511) | ||
67 | nic->drv_stats.rx_frames_511++; | ||
68 | else if (skb->len <= 1023) | ||
69 | nic->drv_stats.rx_frames_1023++; | ||
70 | else if (skb->len <= 1518) | ||
71 | nic->drv_stats.rx_frames_1518++; | ||
72 | else | ||
73 | nic->drv_stats.rx_frames_jumbo++; | ||
74 | } | ||
75 | |||
76 | /* The Cavium ThunderX network controller can *only* be found in SoCs | ||
77 | * containing the ThunderX ARM64 CPU implementation. All accesses to the device | ||
78 | * registers on this platform are implicitly strongly ordered with respect | ||
79 | * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use | ||
80 | * with no memory barriers in this driver. The readq()/writeq() functions add | ||
81 | * explicit ordering operation which in this case are redundant, and only | ||
82 | * add overhead. | ||
83 | */ | ||
84 | |||
85 | /* Register read/write APIs */ | ||
86 | void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val) | ||
87 | { | ||
88 | writeq_relaxed(val, nic->reg_base + offset); | ||
89 | } | ||
90 | |||
91 | u64 nicvf_reg_read(struct nicvf *nic, u64 offset) | ||
92 | { | ||
93 | return readq_relaxed(nic->reg_base + offset); | ||
94 | } | ||
95 | |||
96 | void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, | ||
97 | u64 qidx, u64 val) | ||
98 | { | ||
99 | void __iomem *addr = nic->reg_base + offset; | ||
100 | |||
101 | writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT)); | ||
102 | } | ||
103 | |||
104 | u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) | ||
105 | { | ||
106 | void __iomem *addr = nic->reg_base + offset; | ||
107 | |||
108 | return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT)); | ||
109 | } | ||
110 | |||
111 | /* VF -> PF mailbox communication */ | ||
112 | |||
113 | int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) | ||
114 | { | ||
115 | int timeout = NIC_MBOX_MSG_TIMEOUT; | ||
116 | int sleep = 10; | ||
117 | u64 *msg = (u64 *)mbx; | ||
118 | |||
119 | nic->pf_acked = false; | ||
120 | nic->pf_nacked = false; | ||
121 | |||
122 | nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]); | ||
123 | nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]); | ||
124 | |||
125 | /* Wait for previous message to be acked, timeout 2sec */ | ||
126 | while (!nic->pf_acked) { | ||
127 | if (nic->pf_nacked) | ||
128 | return -EINVAL; | ||
129 | msleep(sleep); | ||
130 | if (nic->pf_acked) | ||
131 | break; | ||
132 | timeout -= sleep; | ||
133 | if (!timeout) { | ||
134 | netdev_err(nic->netdev, | ||
135 | "PF didn't ack to mbox msg %d from VF%d\n", | ||
136 | (mbx->msg.msg & 0xFF), nic->vf_id); | ||
137 | return -EBUSY; | ||
138 | } | ||
139 | } | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | /* Checks if VF is able to comminicate with PF | ||
144 | * and also gets the VNIC number this VF is associated to. | ||
145 | */ | ||
146 | static int nicvf_check_pf_ready(struct nicvf *nic) | ||
147 | { | ||
148 | int timeout = 5000, sleep = 20; | ||
149 | |||
150 | nic->pf_ready_to_rcv_msg = false; | ||
151 | |||
152 | nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, | ||
153 | le64_to_cpu(NIC_MBOX_MSG_READY)); | ||
154 | nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, 1ULL); | ||
155 | |||
156 | while (!nic->pf_ready_to_rcv_msg) { | ||
157 | msleep(sleep); | ||
158 | if (nic->pf_ready_to_rcv_msg) | ||
159 | break; | ||
160 | timeout -= sleep; | ||
161 | if (!timeout) { | ||
162 | netdev_err(nic->netdev, | ||
163 | "PF didn't respond to READY msg\n"); | ||
164 | return 0; | ||
165 | } | ||
166 | } | ||
167 | return 1; | ||
168 | } | ||
169 | |||
170 | static void nicvf_handle_mbx_intr(struct nicvf *nic) | ||
171 | { | ||
172 | union nic_mbx mbx = {}; | ||
173 | u64 *mbx_data; | ||
174 | u64 mbx_addr; | ||
175 | int i; | ||
176 | |||
177 | mbx_addr = NIC_VF_PF_MAILBOX_0_1; | ||
178 | mbx_data = (u64 *)&mbx; | ||
179 | |||
180 | for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { | ||
181 | *mbx_data = nicvf_reg_read(nic, mbx_addr); | ||
182 | mbx_data++; | ||
183 | mbx_addr += sizeof(u64); | ||
184 | } | ||
185 | |||
186 | netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg); | ||
187 | switch (mbx.msg.msg) { | ||
188 | case NIC_MBOX_MSG_READY: | ||
189 | nic->pf_ready_to_rcv_msg = true; | ||
190 | nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; | ||
191 | nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; | ||
192 | nic->node = mbx.nic_cfg.node_id; | ||
193 | ether_addr_copy(nic->netdev->dev_addr, | ||
194 | (u8 *)&mbx.nic_cfg.mac_addr); | ||
195 | nic->link_up = false; | ||
196 | nic->duplex = 0; | ||
197 | nic->speed = 0; | ||
198 | break; | ||
199 | case NIC_MBOX_MSG_ACK: | ||
200 | nic->pf_acked = true; | ||
201 | break; | ||
202 | case NIC_MBOX_MSG_NACK: | ||
203 | nic->pf_nacked = true; | ||
204 | break; | ||
205 | case NIC_MBOX_MSG_RSS_SIZE: | ||
206 | nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size; | ||
207 | nic->pf_acked = true; | ||
208 | break; | ||
209 | case NIC_MBOX_MSG_BGX_STATS: | ||
210 | nicvf_read_bgx_stats(nic, &mbx.bgx_stats); | ||
211 | nic->pf_acked = true; | ||
212 | nic->bgx_stats_acked = true; | ||
213 | break; | ||
214 | case NIC_MBOX_MSG_BGX_LINK_CHANGE: | ||
215 | nic->pf_acked = true; | ||
216 | nic->link_up = mbx.link_status.link_up; | ||
217 | nic->duplex = mbx.link_status.duplex; | ||
218 | nic->speed = mbx.link_status.speed; | ||
219 | if (nic->link_up) { | ||
220 | netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n", | ||
221 | nic->netdev->name, nic->speed, | ||
222 | nic->duplex == DUPLEX_FULL ? | ||
223 | "Full duplex" : "Half duplex"); | ||
224 | netif_carrier_on(nic->netdev); | ||
225 | netif_tx_wake_all_queues(nic->netdev); | ||
226 | } else { | ||
227 | netdev_info(nic->netdev, "%s: Link is Down\n", | ||
228 | nic->netdev->name); | ||
229 | netif_carrier_off(nic->netdev); | ||
230 | netif_tx_stop_all_queues(nic->netdev); | ||
231 | } | ||
232 | break; | ||
233 | default: | ||
234 | netdev_err(nic->netdev, | ||
235 | "Invalid message from PF, msg 0x%x\n", mbx.msg.msg); | ||
236 | break; | ||
237 | } | ||
238 | nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0); | ||
239 | } | ||
240 | |||
241 | static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev) | ||
242 | { | ||
243 | union nic_mbx mbx = {}; | ||
244 | int i; | ||
245 | |||
246 | mbx.mac.msg = NIC_MBOX_MSG_SET_MAC; | ||
247 | mbx.mac.vf_id = nic->vf_id; | ||
248 | for (i = 0; i < ETH_ALEN; i++) | ||
249 | mbx.mac.addr = (mbx.mac.addr << 8) | | ||
250 | netdev->dev_addr[i]; | ||
251 | |||
252 | return nicvf_send_msg_to_pf(nic, &mbx); | ||
253 | } | ||
254 | |||
255 | void nicvf_config_cpi(struct nicvf *nic) | ||
256 | { | ||
257 | union nic_mbx mbx = {}; | ||
258 | |||
259 | mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG; | ||
260 | mbx.cpi_cfg.vf_id = nic->vf_id; | ||
261 | mbx.cpi_cfg.cpi_alg = nic->cpi_alg; | ||
262 | mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; | ||
263 | |||
264 | nicvf_send_msg_to_pf(nic, &mbx); | ||
265 | } | ||
266 | |||
267 | void nicvf_get_rss_size(struct nicvf *nic) | ||
268 | { | ||
269 | union nic_mbx mbx = {}; | ||
270 | |||
271 | mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; | ||
272 | mbx.rss_size.vf_id = nic->vf_id; | ||
273 | nicvf_send_msg_to_pf(nic, &mbx); | ||
274 | } | ||
275 | |||
276 | void nicvf_config_rss(struct nicvf *nic) | ||
277 | { | ||
278 | union nic_mbx mbx = {}; | ||
279 | struct nicvf_rss_info *rss = &nic->rss_info; | ||
280 | int ind_tbl_len = rss->rss_size; | ||
281 | int i, nextq = 0; | ||
282 | |||
283 | mbx.rss_cfg.vf_id = nic->vf_id; | ||
284 | mbx.rss_cfg.hash_bits = rss->hash_bits; | ||
285 | while (ind_tbl_len) { | ||
286 | mbx.rss_cfg.tbl_offset = nextq; | ||
287 | mbx.rss_cfg.tbl_len = min(ind_tbl_len, | ||
288 | RSS_IND_TBL_LEN_PER_MBX_MSG); | ||
289 | mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ? | ||
290 | NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG; | ||
291 | |||
292 | for (i = 0; i < mbx.rss_cfg.tbl_len; i++) | ||
293 | mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++]; | ||
294 | |||
295 | nicvf_send_msg_to_pf(nic, &mbx); | ||
296 | |||
297 | ind_tbl_len -= mbx.rss_cfg.tbl_len; | ||
298 | } | ||
299 | } | ||
300 | |||
301 | void nicvf_set_rss_key(struct nicvf *nic) | ||
302 | { | ||
303 | struct nicvf_rss_info *rss = &nic->rss_info; | ||
304 | u64 key_addr = NIC_VNIC_RSS_KEY_0_4; | ||
305 | int idx; | ||
306 | |||
307 | for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) { | ||
308 | nicvf_reg_write(nic, key_addr, rss->key[idx]); | ||
309 | key_addr += sizeof(u64); | ||
310 | } | ||
311 | } | ||
312 | |||
313 | static int nicvf_rss_init(struct nicvf *nic) | ||
314 | { | ||
315 | struct nicvf_rss_info *rss = &nic->rss_info; | ||
316 | int idx; | ||
317 | |||
318 | nicvf_get_rss_size(nic); | ||
319 | |||
320 | if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) { | ||
321 | rss->enable = false; | ||
322 | rss->hash_bits = 0; | ||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | rss->enable = true; | ||
327 | |||
328 | /* Using the HW reset value for now */ | ||
329 | rss->key[0] = 0xFEED0BADFEED0BAD; | ||
330 | rss->key[1] = 0xFEED0BADFEED0BAD; | ||
331 | rss->key[2] = 0xFEED0BADFEED0BAD; | ||
332 | rss->key[3] = 0xFEED0BADFEED0BAD; | ||
333 | rss->key[4] = 0xFEED0BADFEED0BAD; | ||
334 | |||
335 | nicvf_set_rss_key(nic); | ||
336 | |||
337 | rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA; | ||
338 | nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg); | ||
339 | |||
340 | rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size)); | ||
341 | |||
342 | for (idx = 0; idx < rss->rss_size; idx++) | ||
343 | rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx, | ||
344 | nic->qs->rq_cnt); | ||
345 | nicvf_config_rss(nic); | ||
346 | return 1; | ||
347 | } | ||
348 | |||
349 | int nicvf_set_real_num_queues(struct net_device *netdev, | ||
350 | int tx_queues, int rx_queues) | ||
351 | { | ||
352 | int err = 0; | ||
353 | |||
354 | err = netif_set_real_num_tx_queues(netdev, tx_queues); | ||
355 | if (err) { | ||
356 | netdev_err(netdev, | ||
357 | "Failed to set no of Tx queues: %d\n", tx_queues); | ||
358 | return err; | ||
359 | } | ||
360 | |||
361 | err = netif_set_real_num_rx_queues(netdev, rx_queues); | ||
362 | if (err) | ||
363 | netdev_err(netdev, | ||
364 | "Failed to set no of Rx queues: %d\n", rx_queues); | ||
365 | return err; | ||
366 | } | ||
367 | |||
368 | static int nicvf_init_resources(struct nicvf *nic) | ||
369 | { | ||
370 | int err; | ||
371 | u64 mbx_addr = NIC_VF_PF_MAILBOX_0_1; | ||
372 | |||
373 | /* Enable Qset */ | ||
374 | nicvf_qset_config(nic, true); | ||
375 | |||
376 | /* Initialize queues and HW for data transfer */ | ||
377 | err = nicvf_config_data_transfer(nic, true); | ||
378 | if (err) { | ||
379 | netdev_err(nic->netdev, | ||
380 | "Failed to alloc/config VF's QSet resources\n"); | ||
381 | return err; | ||
382 | } | ||
383 | |||
384 | /* Send VF config done msg to PF */ | ||
385 | nicvf_reg_write(nic, mbx_addr, le64_to_cpu(NIC_MBOX_MSG_CFG_DONE)); | ||
386 | mbx_addr += (NIC_PF_VF_MAILBOX_SIZE - 1) * 8; | ||
387 | nicvf_reg_write(nic, mbx_addr, 1ULL); | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static void nicvf_snd_pkt_handler(struct net_device *netdev, | ||
393 | struct cmp_queue *cq, | ||
394 | struct cqe_send_t *cqe_tx, int cqe_type) | ||
395 | { | ||
396 | struct sk_buff *skb = NULL; | ||
397 | struct nicvf *nic = netdev_priv(netdev); | ||
398 | struct snd_queue *sq; | ||
399 | struct sq_hdr_subdesc *hdr; | ||
400 | |||
401 | sq = &nic->qs->sq[cqe_tx->sq_idx]; | ||
402 | |||
403 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); | ||
404 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) | ||
405 | return; | ||
406 | |||
407 | netdev_dbg(nic->netdev, | ||
408 | "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", | ||
409 | __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, | ||
410 | cqe_tx->sqe_ptr, hdr->subdesc_cnt); | ||
411 | |||
412 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); | ||
413 | nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); | ||
414 | skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; | ||
415 | /* For TSO offloaded packets only one head SKB needs to be freed */ | ||
416 | if (skb) { | ||
417 | prefetch(skb); | ||
418 | dev_consume_skb_any(skb); | ||
419 | } | ||
420 | } | ||
421 | |||
422 | static void nicvf_rcv_pkt_handler(struct net_device *netdev, | ||
423 | struct napi_struct *napi, | ||
424 | struct cmp_queue *cq, | ||
425 | struct cqe_rx_t *cqe_rx, int cqe_type) | ||
426 | { | ||
427 | struct sk_buff *skb; | ||
428 | struct nicvf *nic = netdev_priv(netdev); | ||
429 | int err = 0; | ||
430 | |||
431 | /* Check for errors */ | ||
432 | err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); | ||
433 | if (err && !cqe_rx->rb_cnt) | ||
434 | return; | ||
435 | |||
436 | skb = nicvf_get_rcv_skb(nic, cqe_rx); | ||
437 | if (!skb) { | ||
438 | netdev_dbg(nic->netdev, "Packet not received\n"); | ||
439 | return; | ||
440 | } | ||
441 | |||
442 | if (netif_msg_pktdata(nic)) { | ||
443 | netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name, | ||
444 | skb, skb->len); | ||
445 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, | ||
446 | skb->data, skb->len, true); | ||
447 | } | ||
448 | |||
449 | nicvf_set_rx_frame_cnt(nic, skb); | ||
450 | |||
451 | skb_record_rx_queue(skb, cqe_rx->rq_idx); | ||
452 | if (netdev->hw_features & NETIF_F_RXCSUM) { | ||
453 | /* HW by default verifies TCP/UDP/SCTP checksums */ | ||
454 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
455 | } else { | ||
456 | skb_checksum_none_assert(skb); | ||
457 | } | ||
458 | |||
459 | skb->protocol = eth_type_trans(skb, netdev); | ||
460 | |||
461 | if (napi && (netdev->features & NETIF_F_GRO)) | ||
462 | napi_gro_receive(napi, skb); | ||
463 | else | ||
464 | netif_receive_skb(skb); | ||
465 | } | ||
466 | |||
467 | static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, | ||
468 | struct napi_struct *napi, int budget) | ||
469 | { | ||
470 | int processed_cqe, work_done = 0; | ||
471 | int cqe_count, cqe_head; | ||
472 | struct nicvf *nic = netdev_priv(netdev); | ||
473 | struct queue_set *qs = nic->qs; | ||
474 | struct cmp_queue *cq = &qs->cq[cq_idx]; | ||
475 | struct cqe_rx_t *cq_desc; | ||
476 | |||
477 | spin_lock_bh(&cq->lock); | ||
478 | loop: | ||
479 | processed_cqe = 0; | ||
480 | /* Get no of valid CQ entries to process */ | ||
481 | cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); | ||
482 | cqe_count &= CQ_CQE_COUNT; | ||
483 | if (!cqe_count) | ||
484 | goto done; | ||
485 | |||
486 | /* Get head of the valid CQ entries */ | ||
487 | cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; | ||
488 | cqe_head &= 0xFFFF; | ||
489 | |||
490 | netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n", | ||
491 | __func__, cqe_count, cqe_head); | ||
492 | while (processed_cqe < cqe_count) { | ||
493 | /* Get the CQ descriptor */ | ||
494 | cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); | ||
495 | cqe_head++; | ||
496 | cqe_head &= (cq->dmem.q_len - 1); | ||
497 | /* Initiate prefetch for next descriptor */ | ||
498 | prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); | ||
499 | |||
500 | if ((work_done >= budget) && napi && | ||
501 | (cq_desc->cqe_type != CQE_TYPE_SEND)) { | ||
502 | break; | ||
503 | } | ||
504 | |||
505 | netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n", | ||
506 | cq_desc->cqe_type); | ||
507 | switch (cq_desc->cqe_type) { | ||
508 | case CQE_TYPE_RX: | ||
509 | nicvf_rcv_pkt_handler(netdev, napi, cq, | ||
510 | cq_desc, CQE_TYPE_RX); | ||
511 | work_done++; | ||
512 | break; | ||
513 | case CQE_TYPE_SEND: | ||
514 | nicvf_snd_pkt_handler(netdev, cq, | ||
515 | (void *)cq_desc, CQE_TYPE_SEND); | ||
516 | break; | ||
517 | case CQE_TYPE_INVALID: | ||
518 | case CQE_TYPE_RX_SPLIT: | ||
519 | case CQE_TYPE_RX_TCP: | ||
520 | case CQE_TYPE_SEND_PTP: | ||
521 | /* Ignore for now */ | ||
522 | break; | ||
523 | } | ||
524 | processed_cqe++; | ||
525 | } | ||
526 | netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n", | ||
527 | __func__, processed_cqe, work_done, budget); | ||
528 | |||
529 | /* Ring doorbell to inform H/W to reuse processed CQEs */ | ||
530 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, | ||
531 | cq_idx, processed_cqe); | ||
532 | |||
533 | if ((work_done < budget) && napi) | ||
534 | goto loop; | ||
535 | |||
536 | done: | ||
537 | spin_unlock_bh(&cq->lock); | ||
538 | return work_done; | ||
539 | } | ||
540 | |||
541 | static int nicvf_poll(struct napi_struct *napi, int budget) | ||
542 | { | ||
543 | u64 cq_head; | ||
544 | int work_done = 0; | ||
545 | struct net_device *netdev = napi->dev; | ||
546 | struct nicvf *nic = netdev_priv(netdev); | ||
547 | struct nicvf_cq_poll *cq; | ||
548 | struct netdev_queue *txq; | ||
549 | |||
550 | cq = container_of(napi, struct nicvf_cq_poll, napi); | ||
551 | work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); | ||
552 | |||
553 | txq = netdev_get_tx_queue(netdev, cq->cq_idx); | ||
554 | if (netif_tx_queue_stopped(txq)) | ||
555 | netif_tx_wake_queue(txq); | ||
556 | |||
557 | if (work_done < budget) { | ||
558 | /* Slow packet rate, exit polling */ | ||
559 | napi_complete(napi); | ||
560 | /* Re-enable interrupts */ | ||
561 | cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, | ||
562 | cq->cq_idx); | ||
563 | nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx); | ||
564 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD, | ||
565 | cq->cq_idx, cq_head); | ||
566 | nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx); | ||
567 | } | ||
568 | return work_done; | ||
569 | } | ||
570 | |||
571 | /* Qset error interrupt handler | ||
572 | * | ||
573 | * As of now only CQ errors are handled | ||
574 | */ | ||
575 | void nicvf_handle_qs_err(unsigned long data) | ||
576 | { | ||
577 | struct nicvf *nic = (struct nicvf *)data; | ||
578 | struct queue_set *qs = nic->qs; | ||
579 | int qidx; | ||
580 | u64 status; | ||
581 | |||
582 | netif_tx_disable(nic->netdev); | ||
583 | |||
584 | /* Check if it is CQ err */ | ||
585 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | ||
586 | status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, | ||
587 | qidx); | ||
588 | if (!(status & CQ_ERR_MASK)) | ||
589 | continue; | ||
590 | /* Process already queued CQEs and reconfig CQ */ | ||
591 | nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); | ||
592 | nicvf_sq_disable(nic, qidx); | ||
593 | nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0); | ||
594 | nicvf_cmp_queue_config(nic, qs, qidx, true); | ||
595 | nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx); | ||
596 | nicvf_sq_enable(nic, &qs->sq[qidx], qidx); | ||
597 | |||
598 | nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); | ||
599 | } | ||
600 | |||
601 | netif_tx_start_all_queues(nic->netdev); | ||
602 | /* Re-enable Qset error interrupt */ | ||
603 | nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); | ||
604 | } | ||
605 | |||
606 | static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq) | ||
607 | { | ||
608 | struct nicvf *nic = (struct nicvf *)nicvf_irq; | ||
609 | u64 intr; | ||
610 | |||
611 | intr = nicvf_reg_read(nic, NIC_VF_INT); | ||
612 | /* Check for spurious interrupt */ | ||
613 | if (!(intr & NICVF_INTR_MBOX_MASK)) | ||
614 | return IRQ_HANDLED; | ||
615 | |||
616 | nicvf_handle_mbx_intr(nic); | ||
617 | |||
618 | return IRQ_HANDLED; | ||
619 | } | ||
620 | |||
621 | static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq) | ||
622 | { | ||
623 | u64 qidx, intr, clear_intr = 0; | ||
624 | u64 cq_intr, rbdr_intr, qs_err_intr; | ||
625 | struct nicvf *nic = (struct nicvf *)nicvf_irq; | ||
626 | struct queue_set *qs = nic->qs; | ||
627 | struct nicvf_cq_poll *cq_poll = NULL; | ||
628 | |||
629 | intr = nicvf_reg_read(nic, NIC_VF_INT); | ||
630 | if (netif_msg_intr(nic)) | ||
631 | netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n", | ||
632 | nic->netdev->name, intr); | ||
633 | |||
634 | qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK; | ||
635 | if (qs_err_intr) { | ||
636 | /* Disable Qset err interrupt and schedule softirq */ | ||
637 | nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); | ||
638 | tasklet_hi_schedule(&nic->qs_err_task); | ||
639 | clear_intr |= qs_err_intr; | ||
640 | } | ||
641 | |||
642 | /* Disable interrupts and start polling */ | ||
643 | cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT; | ||
644 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | ||
645 | if (!(cq_intr & (1 << qidx))) | ||
646 | continue; | ||
647 | if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx)) | ||
648 | continue; | ||
649 | |||
650 | nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); | ||
651 | clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT); | ||
652 | |||
653 | cq_poll = nic->napi[qidx]; | ||
654 | /* Schedule NAPI */ | ||
655 | if (cq_poll) | ||
656 | napi_schedule(&cq_poll->napi); | ||
657 | } | ||
658 | |||
659 | /* Handle RBDR interrupts */ | ||
660 | rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT; | ||
661 | if (rbdr_intr) { | ||
662 | /* Disable RBDR interrupt and schedule softirq */ | ||
663 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { | ||
664 | if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx)) | ||
665 | continue; | ||
666 | nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); | ||
667 | tasklet_hi_schedule(&nic->rbdr_task); | ||
668 | clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT); | ||
669 | } | ||
670 | } | ||
671 | |||
672 | /* Clear interrupts */ | ||
673 | nicvf_reg_write(nic, NIC_VF_INT, clear_intr); | ||
674 | return IRQ_HANDLED; | ||
675 | } | ||
676 | |||
677 | static int nicvf_enable_msix(struct nicvf *nic) | ||
678 | { | ||
679 | int ret, vec; | ||
680 | |||
681 | nic->num_vec = NIC_VF_MSIX_VECTORS; | ||
682 | |||
683 | for (vec = 0; vec < nic->num_vec; vec++) | ||
684 | nic->msix_entries[vec].entry = vec; | ||
685 | |||
686 | ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); | ||
687 | if (ret) { | ||
688 | netdev_err(nic->netdev, | ||
689 | "Req for #%d msix vectors failed\n", nic->num_vec); | ||
690 | return 0; | ||
691 | } | ||
692 | nic->msix_enabled = 1; | ||
693 | return 1; | ||
694 | } | ||
695 | |||
696 | static void nicvf_disable_msix(struct nicvf *nic) | ||
697 | { | ||
698 | if (nic->msix_enabled) { | ||
699 | pci_disable_msix(nic->pdev); | ||
700 | nic->msix_enabled = 0; | ||
701 | nic->num_vec = 0; | ||
702 | } | ||
703 | } | ||
704 | |||
705 | static int nicvf_register_interrupts(struct nicvf *nic) | ||
706 | { | ||
707 | int irq, free, ret = 0; | ||
708 | int vector; | ||
709 | |||
710 | for_each_cq_irq(irq) | ||
711 | sprintf(nic->irq_name[irq], "NICVF%d CQ%d", | ||
712 | nic->vf_id, irq); | ||
713 | |||
714 | for_each_sq_irq(irq) | ||
715 | sprintf(nic->irq_name[irq], "NICVF%d SQ%d", | ||
716 | nic->vf_id, irq - NICVF_INTR_ID_SQ); | ||
717 | |||
718 | for_each_rbdr_irq(irq) | ||
719 | sprintf(nic->irq_name[irq], "NICVF%d RBDR%d", | ||
720 | nic->vf_id, irq - NICVF_INTR_ID_RBDR); | ||
721 | |||
722 | /* Register all interrupts except mailbox */ | ||
723 | for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) { | ||
724 | vector = nic->msix_entries[irq].vector; | ||
725 | ret = request_irq(vector, nicvf_intr_handler, | ||
726 | 0, nic->irq_name[irq], nic); | ||
727 | if (ret) | ||
728 | break; | ||
729 | nic->irq_allocated[irq] = true; | ||
730 | } | ||
731 | |||
732 | for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) { | ||
733 | vector = nic->msix_entries[irq].vector; | ||
734 | ret = request_irq(vector, nicvf_intr_handler, | ||
735 | 0, nic->irq_name[irq], nic); | ||
736 | if (ret) | ||
737 | break; | ||
738 | nic->irq_allocated[irq] = true; | ||
739 | } | ||
740 | |||
741 | sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], | ||
742 | "NICVF%d Qset error", nic->vf_id); | ||
743 | if (!ret) { | ||
744 | vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector; | ||
745 | irq = NICVF_INTR_ID_QS_ERR; | ||
746 | ret = request_irq(vector, nicvf_intr_handler, | ||
747 | 0, nic->irq_name[irq], nic); | ||
748 | if (!ret) | ||
749 | nic->irq_allocated[irq] = true; | ||
750 | } | ||
751 | |||
752 | if (ret) { | ||
753 | netdev_err(nic->netdev, "Request irq failed\n"); | ||
754 | for (free = 0; free < irq; free++) | ||
755 | free_irq(nic->msix_entries[free].vector, nic); | ||
756 | return ret; | ||
757 | } | ||
758 | |||
759 | return 0; | ||
760 | } | ||
761 | |||
762 | static void nicvf_unregister_interrupts(struct nicvf *nic) | ||
763 | { | ||
764 | int irq; | ||
765 | |||
766 | /* Free registered interrupts */ | ||
767 | for (irq = 0; irq < nic->num_vec; irq++) { | ||
768 | if (nic->irq_allocated[irq]) | ||
769 | free_irq(nic->msix_entries[irq].vector, nic); | ||
770 | nic->irq_allocated[irq] = false; | ||
771 | } | ||
772 | |||
773 | /* Disable MSI-X */ | ||
774 | nicvf_disable_msix(nic); | ||
775 | } | ||
776 | |||
777 | /* Initialize MSIX vectors and register MISC interrupt. | ||
778 | * Send READY message to PF to check if its alive | ||
779 | */ | ||
780 | static int nicvf_register_misc_interrupt(struct nicvf *nic) | ||
781 | { | ||
782 | int ret = 0; | ||
783 | int irq = NICVF_INTR_ID_MISC; | ||
784 | |||
785 | /* Return if mailbox interrupt is already registered */ | ||
786 | if (nic->msix_enabled) | ||
787 | return 0; | ||
788 | |||
789 | /* Enable MSI-X */ | ||
790 | if (!nicvf_enable_msix(nic)) | ||
791 | return 1; | ||
792 | |||
793 | sprintf(nic->irq_name[irq], "%s Mbox", "NICVF"); | ||
794 | /* Register Misc interrupt */ | ||
795 | ret = request_irq(nic->msix_entries[irq].vector, | ||
796 | nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic); | ||
797 | |||
798 | if (ret) | ||
799 | return ret; | ||
800 | nic->irq_allocated[irq] = true; | ||
801 | |||
802 | /* Enable mailbox interrupt */ | ||
803 | nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0); | ||
804 | |||
805 | /* Check if VF is able to communicate with PF */ | ||
806 | if (!nicvf_check_pf_ready(nic)) { | ||
807 | nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); | ||
808 | nicvf_unregister_interrupts(nic); | ||
809 | return 1; | ||
810 | } | ||
811 | |||
812 | return 0; | ||
813 | } | ||
814 | |||
815 | static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) | ||
816 | { | ||
817 | struct nicvf *nic = netdev_priv(netdev); | ||
818 | int qid = skb_get_queue_mapping(skb); | ||
819 | struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid); | ||
820 | |||
821 | /* Check for minimum packet length */ | ||
822 | if (skb->len <= ETH_HLEN) { | ||
823 | dev_kfree_skb(skb); | ||
824 | return NETDEV_TX_OK; | ||
825 | } | ||
826 | |||
827 | if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) { | ||
828 | netif_tx_stop_queue(txq); | ||
829 | nic->drv_stats.tx_busy++; | ||
830 | if (netif_msg_tx_err(nic)) | ||
831 | netdev_warn(netdev, | ||
832 | "%s: Transmit ring full, stopping SQ%d\n", | ||
833 | netdev->name, qid); | ||
834 | |||
835 | return NETDEV_TX_BUSY; | ||
836 | } | ||
837 | |||
838 | return NETDEV_TX_OK; | ||
839 | } | ||
840 | |||
841 | int nicvf_stop(struct net_device *netdev) | ||
842 | { | ||
843 | int irq, qidx; | ||
844 | struct nicvf *nic = netdev_priv(netdev); | ||
845 | struct queue_set *qs = nic->qs; | ||
846 | struct nicvf_cq_poll *cq_poll = NULL; | ||
847 | union nic_mbx mbx = {}; | ||
848 | |||
849 | mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; | ||
850 | nicvf_send_msg_to_pf(nic, &mbx); | ||
851 | |||
852 | netif_carrier_off(netdev); | ||
853 | netif_tx_disable(netdev); | ||
854 | |||
855 | /* Disable RBDR & QS error interrupts */ | ||
856 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { | ||
857 | nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); | ||
858 | nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx); | ||
859 | } | ||
860 | nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); | ||
861 | nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0); | ||
862 | |||
863 | /* Wait for pending IRQ handlers to finish */ | ||
864 | for (irq = 0; irq < nic->num_vec; irq++) | ||
865 | synchronize_irq(nic->msix_entries[irq].vector); | ||
866 | |||
867 | tasklet_kill(&nic->rbdr_task); | ||
868 | tasklet_kill(&nic->qs_err_task); | ||
869 | if (nic->rb_work_scheduled) | ||
870 | cancel_delayed_work_sync(&nic->rbdr_work); | ||
871 | |||
872 | for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) { | ||
873 | cq_poll = nic->napi[qidx]; | ||
874 | if (!cq_poll) | ||
875 | continue; | ||
876 | nic->napi[qidx] = NULL; | ||
877 | napi_synchronize(&cq_poll->napi); | ||
878 | /* CQ intr is enabled while napi_complete, | ||
879 | * so disable it now | ||
880 | */ | ||
881 | nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); | ||
882 | nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); | ||
883 | napi_disable(&cq_poll->napi); | ||
884 | netif_napi_del(&cq_poll->napi); | ||
885 | kfree(cq_poll); | ||
886 | } | ||
887 | |||
888 | /* Free resources */ | ||
889 | nicvf_config_data_transfer(nic, false); | ||
890 | |||
891 | /* Disable HW Qset */ | ||
892 | nicvf_qset_config(nic, false); | ||
893 | |||
894 | /* disable mailbox interrupt */ | ||
895 | nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); | ||
896 | |||
897 | nicvf_unregister_interrupts(nic); | ||
898 | |||
899 | return 0; | ||
900 | } | ||
901 | |||
902 | int nicvf_open(struct net_device *netdev) | ||
903 | { | ||
904 | int err, qidx; | ||
905 | struct nicvf *nic = netdev_priv(netdev); | ||
906 | struct queue_set *qs = nic->qs; | ||
907 | struct nicvf_cq_poll *cq_poll = NULL; | ||
908 | |||
909 | nic->mtu = netdev->mtu; | ||
910 | |||
911 | netif_carrier_off(netdev); | ||
912 | |||
913 | err = nicvf_register_misc_interrupt(nic); | ||
914 | if (err) | ||
915 | return err; | ||
916 | |||
917 | /* Register NAPI handler for processing CQEs */ | ||
918 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | ||
919 | cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL); | ||
920 | if (!cq_poll) { | ||
921 | err = -ENOMEM; | ||
922 | goto napi_del; | ||
923 | } | ||
924 | cq_poll->cq_idx = qidx; | ||
925 | netif_napi_add(netdev, &cq_poll->napi, nicvf_poll, | ||
926 | NAPI_POLL_WEIGHT); | ||
927 | napi_enable(&cq_poll->napi); | ||
928 | nic->napi[qidx] = cq_poll; | ||
929 | } | ||
930 | |||
931 | /* Check if we got MAC address from PF or else generate a radom MAC */ | ||
932 | if (is_zero_ether_addr(netdev->dev_addr)) { | ||
933 | eth_hw_addr_random(netdev); | ||
934 | nicvf_hw_set_mac_addr(nic, netdev); | ||
935 | } | ||
936 | |||
937 | /* Init tasklet for handling Qset err interrupt */ | ||
938 | tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err, | ||
939 | (unsigned long)nic); | ||
940 | |||
941 | /* Init RBDR tasklet which will refill RBDR */ | ||
942 | tasklet_init(&nic->rbdr_task, nicvf_rbdr_task, | ||
943 | (unsigned long)nic); | ||
944 | INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work); | ||
945 | |||
946 | /* Configure CPI alorithm */ | ||
947 | nic->cpi_alg = cpi_alg; | ||
948 | nicvf_config_cpi(nic); | ||
949 | |||
950 | /* Configure receive side scaling */ | ||
951 | nicvf_rss_init(nic); | ||
952 | |||
953 | err = nicvf_register_interrupts(nic); | ||
954 | if (err) | ||
955 | goto cleanup; | ||
956 | |||
957 | /* Initialize the queues */ | ||
958 | err = nicvf_init_resources(nic); | ||
959 | if (err) | ||
960 | goto cleanup; | ||
961 | |||
962 | /* Make sure queue initialization is written */ | ||
963 | wmb(); | ||
964 | |||
965 | nicvf_reg_write(nic, NIC_VF_INT, -1); | ||
966 | /* Enable Qset err interrupt */ | ||
967 | nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); | ||
968 | |||
969 | /* Enable completion queue interrupt */ | ||
970 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | ||
971 | nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); | ||
972 | |||
973 | /* Enable RBDR threshold interrupt */ | ||
974 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | ||
975 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); | ||
976 | |||
977 | netif_carrier_on(netdev); | ||
978 | netif_tx_start_all_queues(netdev); | ||
979 | |||
980 | return 0; | ||
981 | cleanup: | ||
982 | nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); | ||
983 | nicvf_unregister_interrupts(nic); | ||
984 | napi_del: | ||
985 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | ||
986 | cq_poll = nic->napi[qidx]; | ||
987 | if (!cq_poll) | ||
988 | continue; | ||
989 | napi_disable(&cq_poll->napi); | ||
990 | netif_napi_del(&cq_poll->napi); | ||
991 | kfree(cq_poll); | ||
992 | nic->napi[qidx] = NULL; | ||
993 | } | ||
994 | return err; | ||
995 | } | ||
996 | |||
997 | static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) | ||
998 | { | ||
999 | union nic_mbx mbx = {}; | ||
1000 | |||
1001 | mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; | ||
1002 | mbx.frs.max_frs = mtu; | ||
1003 | mbx.frs.vf_id = nic->vf_id; | ||
1004 | |||
1005 | return nicvf_send_msg_to_pf(nic, &mbx); | ||
1006 | } | ||
1007 | |||
1008 | static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) | ||
1009 | { | ||
1010 | struct nicvf *nic = netdev_priv(netdev); | ||
1011 | |||
1012 | if (new_mtu > NIC_HW_MAX_FRS) | ||
1013 | return -EINVAL; | ||
1014 | |||
1015 | if (new_mtu < NIC_HW_MIN_FRS) | ||
1016 | return -EINVAL; | ||
1017 | |||
1018 | if (nicvf_update_hw_max_frs(nic, new_mtu)) | ||
1019 | return -EINVAL; | ||
1020 | netdev->mtu = new_mtu; | ||
1021 | nic->mtu = new_mtu; | ||
1022 | |||
1023 | return 0; | ||
1024 | } | ||
1025 | |||
1026 | static int nicvf_set_mac_address(struct net_device *netdev, void *p) | ||
1027 | { | ||
1028 | struct sockaddr *addr = p; | ||
1029 | struct nicvf *nic = netdev_priv(netdev); | ||
1030 | |||
1031 | if (!is_valid_ether_addr(addr->sa_data)) | ||
1032 | return -EADDRNOTAVAIL; | ||
1033 | |||
1034 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
1035 | |||
1036 | if (nic->msix_enabled) | ||
1037 | if (nicvf_hw_set_mac_addr(nic, netdev)) | ||
1038 | return -EBUSY; | ||
1039 | |||
1040 | return 0; | ||
1041 | } | ||
1042 | |||
1043 | static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) | ||
1044 | { | ||
1045 | if (bgx->rx) | ||
1046 | nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats; | ||
1047 | else | ||
1048 | nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats; | ||
1049 | } | ||
1050 | |||
1051 | void nicvf_update_lmac_stats(struct nicvf *nic) | ||
1052 | { | ||
1053 | int stat = 0; | ||
1054 | union nic_mbx mbx = {}; | ||
1055 | int timeout; | ||
1056 | |||
1057 | if (!netif_running(nic->netdev)) | ||
1058 | return; | ||
1059 | |||
1060 | mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; | ||
1061 | mbx.bgx_stats.vf_id = nic->vf_id; | ||
1062 | /* Rx stats */ | ||
1063 | mbx.bgx_stats.rx = 1; | ||
1064 | while (stat < BGX_RX_STATS_COUNT) { | ||
1065 | nic->bgx_stats_acked = 0; | ||
1066 | mbx.bgx_stats.idx = stat; | ||
1067 | nicvf_send_msg_to_pf(nic, &mbx); | ||
1068 | timeout = 0; | ||
1069 | while ((!nic->bgx_stats_acked) && (timeout < 10)) { | ||
1070 | msleep(2); | ||
1071 | timeout++; | ||
1072 | } | ||
1073 | stat++; | ||
1074 | } | ||
1075 | |||
1076 | stat = 0; | ||
1077 | |||
1078 | /* Tx stats */ | ||
1079 | mbx.bgx_stats.rx = 0; | ||
1080 | while (stat < BGX_TX_STATS_COUNT) { | ||
1081 | nic->bgx_stats_acked = 0; | ||
1082 | mbx.bgx_stats.idx = stat; | ||
1083 | nicvf_send_msg_to_pf(nic, &mbx); | ||
1084 | timeout = 0; | ||
1085 | while ((!nic->bgx_stats_acked) && (timeout < 10)) { | ||
1086 | msleep(2); | ||
1087 | timeout++; | ||
1088 | } | ||
1089 | stat++; | ||
1090 | } | ||
1091 | } | ||
1092 | |||
1093 | void nicvf_update_stats(struct nicvf *nic) | ||
1094 | { | ||
1095 | int qidx; | ||
1096 | struct nicvf_hw_stats *stats = &nic->stats; | ||
1097 | struct nicvf_drv_stats *drv_stats = &nic->drv_stats; | ||
1098 | struct queue_set *qs = nic->qs; | ||
1099 | |||
1100 | #define GET_RX_STATS(reg) \ | ||
1101 | nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3)) | ||
1102 | #define GET_TX_STATS(reg) \ | ||
1103 | nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3)) | ||
1104 | |||
1105 | stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS); | ||
1106 | stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST); | ||
1107 | stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST); | ||
1108 | stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST); | ||
1109 | stats->rx_fcs_errors = GET_RX_STATS(RX_FCS); | ||
1110 | stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR); | ||
1111 | stats->rx_drop_red = GET_RX_STATS(RX_RED); | ||
1112 | stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN); | ||
1113 | stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST); | ||
1114 | stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST); | ||
1115 | stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); | ||
1116 | stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); | ||
1117 | |||
1118 | stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); | ||
1119 | stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); | ||
1120 | stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); | ||
1121 | stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); | ||
1122 | stats->tx_drops = GET_TX_STATS(TX_DROP); | ||
1123 | |||
1124 | drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok + | ||
1125 | stats->rx_bcast_frames_ok + | ||
1126 | stats->rx_mcast_frames_ok; | ||
1127 | drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + | ||
1128 | stats->tx_bcast_frames_ok + | ||
1129 | stats->tx_mcast_frames_ok; | ||
1130 | drv_stats->rx_drops = stats->rx_drop_red + | ||
1131 | stats->rx_drop_overrun; | ||
1132 | drv_stats->tx_drops = stats->tx_drops; | ||
1133 | |||
1134 | /* Update RQ and SQ stats */ | ||
1135 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | ||
1136 | nicvf_update_rq_stats(nic, qidx); | ||
1137 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | ||
1138 | nicvf_update_sq_stats(nic, qidx); | ||
1139 | } | ||
1140 | |||
1141 | struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, | ||
1142 | struct rtnl_link_stats64 *stats) | ||
1143 | { | ||
1144 | struct nicvf *nic = netdev_priv(netdev); | ||
1145 | struct nicvf_hw_stats *hw_stats = &nic->stats; | ||
1146 | struct nicvf_drv_stats *drv_stats = &nic->drv_stats; | ||
1147 | |||
1148 | nicvf_update_stats(nic); | ||
1149 | |||
1150 | stats->rx_bytes = hw_stats->rx_bytes_ok; | ||
1151 | stats->rx_packets = drv_stats->rx_frames_ok; | ||
1152 | stats->rx_dropped = drv_stats->rx_drops; | ||
1153 | |||
1154 | stats->tx_bytes = hw_stats->tx_bytes_ok; | ||
1155 | stats->tx_packets = drv_stats->tx_frames_ok; | ||
1156 | stats->tx_dropped = drv_stats->tx_drops; | ||
1157 | |||
1158 | return stats; | ||
1159 | } | ||
1160 | |||
1161 | static void nicvf_tx_timeout(struct net_device *dev) | ||
1162 | { | ||
1163 | struct nicvf *nic = netdev_priv(dev); | ||
1164 | |||
1165 | if (netif_msg_tx_err(nic)) | ||
1166 | netdev_warn(dev, "%s: Transmit timed out, resetting\n", | ||
1167 | dev->name); | ||
1168 | |||
1169 | schedule_work(&nic->reset_task); | ||
1170 | } | ||
1171 | |||
1172 | static void nicvf_reset_task(struct work_struct *work) | ||
1173 | { | ||
1174 | struct nicvf *nic; | ||
1175 | |||
1176 | nic = container_of(work, struct nicvf, reset_task); | ||
1177 | |||
1178 | if (!netif_running(nic->netdev)) | ||
1179 | return; | ||
1180 | |||
1181 | nicvf_stop(nic->netdev); | ||
1182 | nicvf_open(nic->netdev); | ||
1183 | nic->netdev->trans_start = jiffies; | ||
1184 | } | ||
1185 | |||
1186 | static const struct net_device_ops nicvf_netdev_ops = { | ||
1187 | .ndo_open = nicvf_open, | ||
1188 | .ndo_stop = nicvf_stop, | ||
1189 | .ndo_start_xmit = nicvf_xmit, | ||
1190 | .ndo_change_mtu = nicvf_change_mtu, | ||
1191 | .ndo_set_mac_address = nicvf_set_mac_address, | ||
1192 | .ndo_get_stats64 = nicvf_get_stats64, | ||
1193 | .ndo_tx_timeout = nicvf_tx_timeout, | ||
1194 | }; | ||
1195 | |||
1196 | static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
1197 | { | ||
1198 | struct device *dev = &pdev->dev; | ||
1199 | struct net_device *netdev; | ||
1200 | struct nicvf *nic; | ||
1201 | struct queue_set *qs; | ||
1202 | int err; | ||
1203 | |||
1204 | err = pci_enable_device(pdev); | ||
1205 | if (err) { | ||
1206 | dev_err(dev, "Failed to enable PCI device\n"); | ||
1207 | return err; | ||
1208 | } | ||
1209 | |||
1210 | err = pci_request_regions(pdev, DRV_NAME); | ||
1211 | if (err) { | ||
1212 | dev_err(dev, "PCI request regions failed 0x%x\n", err); | ||
1213 | goto err_disable_device; | ||
1214 | } | ||
1215 | |||
1216 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); | ||
1217 | if (err) { | ||
1218 | dev_err(dev, "Unable to get usable DMA configuration\n"); | ||
1219 | goto err_release_regions; | ||
1220 | } | ||
1221 | |||
1222 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); | ||
1223 | if (err) { | ||
1224 | dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n"); | ||
1225 | goto err_release_regions; | ||
1226 | } | ||
1227 | |||
1228 | netdev = alloc_etherdev_mqs(sizeof(struct nicvf), | ||
1229 | MAX_RCV_QUEUES_PER_QS, | ||
1230 | MAX_SND_QUEUES_PER_QS); | ||
1231 | if (!netdev) { | ||
1232 | err = -ENOMEM; | ||
1233 | goto err_release_regions; | ||
1234 | } | ||
1235 | |||
1236 | pci_set_drvdata(pdev, netdev); | ||
1237 | |||
1238 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
1239 | |||
1240 | nic = netdev_priv(netdev); | ||
1241 | nic->netdev = netdev; | ||
1242 | nic->pdev = pdev; | ||
1243 | |||
1244 | /* MAP VF's configuration registers */ | ||
1245 | nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); | ||
1246 | if (!nic->reg_base) { | ||
1247 | dev_err(dev, "Cannot map config register space, aborting\n"); | ||
1248 | err = -ENOMEM; | ||
1249 | goto err_free_netdev; | ||
1250 | } | ||
1251 | |||
1252 | err = nicvf_set_qset_resources(nic); | ||
1253 | if (err) | ||
1254 | goto err_free_netdev; | ||
1255 | |||
1256 | qs = nic->qs; | ||
1257 | |||
1258 | err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt); | ||
1259 | if (err) | ||
1260 | goto err_free_netdev; | ||
1261 | |||
1262 | /* Check if PF is alive and get MAC address for this VF */ | ||
1263 | err = nicvf_register_misc_interrupt(nic); | ||
1264 | if (err) | ||
1265 | goto err_free_netdev; | ||
1266 | |||
1267 | netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | | ||
1268 | NETIF_F_TSO | NETIF_F_GRO); | ||
1269 | netdev->hw_features = netdev->features; | ||
1270 | |||
1271 | netdev->netdev_ops = &nicvf_netdev_ops; | ||
1272 | |||
1273 | INIT_WORK(&nic->reset_task, nicvf_reset_task); | ||
1274 | |||
1275 | err = register_netdev(netdev); | ||
1276 | if (err) { | ||
1277 | dev_err(dev, "Failed to register netdevice\n"); | ||
1278 | goto err_unregister_interrupts; | ||
1279 | } | ||
1280 | |||
1281 | nic->msg_enable = debug; | ||
1282 | |||
1283 | nicvf_set_ethtool_ops(netdev); | ||
1284 | |||
1285 | return 0; | ||
1286 | |||
1287 | err_unregister_interrupts: | ||
1288 | nicvf_unregister_interrupts(nic); | ||
1289 | err_free_netdev: | ||
1290 | pci_set_drvdata(pdev, NULL); | ||
1291 | free_netdev(netdev); | ||
1292 | err_release_regions: | ||
1293 | pci_release_regions(pdev); | ||
1294 | err_disable_device: | ||
1295 | pci_disable_device(pdev); | ||
1296 | return err; | ||
1297 | } | ||
1298 | |||
1299 | static void nicvf_remove(struct pci_dev *pdev) | ||
1300 | { | ||
1301 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
1302 | struct nicvf *nic = netdev_priv(netdev); | ||
1303 | |||
1304 | unregister_netdev(netdev); | ||
1305 | nicvf_unregister_interrupts(nic); | ||
1306 | pci_set_drvdata(pdev, NULL); | ||
1307 | free_netdev(netdev); | ||
1308 | pci_release_regions(pdev); | ||
1309 | pci_disable_device(pdev); | ||
1310 | } | ||
1311 | |||
1312 | static struct pci_driver nicvf_driver = { | ||
1313 | .name = DRV_NAME, | ||
1314 | .id_table = nicvf_id_table, | ||
1315 | .probe = nicvf_probe, | ||
1316 | .remove = nicvf_remove, | ||
1317 | }; | ||
1318 | |||
1319 | static int __init nicvf_init_module(void) | ||
1320 | { | ||
1321 | pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); | ||
1322 | |||
1323 | return pci_register_driver(&nicvf_driver); | ||
1324 | } | ||
1325 | |||
1326 | static void __exit nicvf_cleanup_module(void) | ||
1327 | { | ||
1328 | pci_unregister_driver(&nicvf_driver); | ||
1329 | } | ||
1330 | |||
1331 | module_init(nicvf_init_module); | ||
1332 | module_exit(nicvf_cleanup_module); | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c new file mode 100644 index 000000000000..196246665444 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
@@ -0,0 +1,1544 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Cavium, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of version 2 of the GNU General Public License | ||
6 | * as published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/pci.h> | ||
10 | #include <linux/netdevice.h> | ||
11 | #include <linux/ip.h> | ||
12 | #include <linux/etherdevice.h> | ||
13 | #include <net/ip.h> | ||
14 | #include <net/tso.h> | ||
15 | |||
16 | #include "nic_reg.h" | ||
17 | #include "nic.h" | ||
18 | #include "q_struct.h" | ||
19 | #include "nicvf_queues.h" | ||
20 | |||
21 | struct rbuf_info { | ||
22 | struct page *page; | ||
23 | void *data; | ||
24 | u64 offset; | ||
25 | }; | ||
26 | |||
27 | #define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES)) | ||
28 | |||
29 | /* Poll a register for a specific value */ | ||
30 | static int nicvf_poll_reg(struct nicvf *nic, int qidx, | ||
31 | u64 reg, int bit_pos, int bits, int val) | ||
32 | { | ||
33 | u64 bit_mask; | ||
34 | u64 reg_val; | ||
35 | int timeout = 10; | ||
36 | |||
37 | bit_mask = (1ULL << bits) - 1; | ||
38 | bit_mask = (bit_mask << bit_pos); | ||
39 | |||
40 | while (timeout) { | ||
41 | reg_val = nicvf_queue_reg_read(nic, reg, qidx); | ||
42 | if (((reg_val & bit_mask) >> bit_pos) == val) | ||
43 | return 0; | ||
44 | usleep_range(1000, 2000); | ||
45 | timeout--; | ||
46 | } | ||
47 | netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); | ||
48 | return 1; | ||
49 | } | ||
50 | |||
51 | /* Allocate memory for a queue's descriptors */ | ||
52 | static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, | ||
53 | int q_len, int desc_size, int align_bytes) | ||
54 | { | ||
55 | dmem->q_len = q_len; | ||
56 | dmem->size = (desc_size * q_len) + align_bytes; | ||
57 | /* Save address, need it while freeing */ | ||
58 | dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, | ||
59 | &dmem->dma, GFP_KERNEL); | ||
60 | if (!dmem->unalign_base) | ||
61 | return -ENOMEM; | ||
62 | |||
63 | /* Align memory address for 'align_bytes' */ | ||
64 | dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); | ||
65 | dmem->base = (void *)((u8 *)dmem->unalign_base + | ||
66 | (dmem->phys_base - dmem->dma)); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | /* Free queue's descriptor memory */ | ||
71 | static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) | ||
72 | { | ||
73 | if (!dmem) | ||
74 | return; | ||
75 | |||
76 | dma_free_coherent(&nic->pdev->dev, dmem->size, | ||
77 | dmem->unalign_base, dmem->dma); | ||
78 | dmem->unalign_base = NULL; | ||
79 | dmem->base = NULL; | ||
80 | } | ||
81 | |||
82 | /* Allocate buffer for packet reception | ||
83 | * HW returns memory address where packet is DMA'ed but not a pointer | ||
84 | * into RBDR ring, so save buffer address at the start of fragment and | ||
85 | * align the start address to a cache aligned address | ||
86 | */ | ||
87 | static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, | ||
88 | u32 buf_len, u64 **rbuf) | ||
89 | { | ||
90 | u64 data; | ||
91 | struct rbuf_info *rinfo; | ||
92 | int order = get_order(buf_len); | ||
93 | |||
94 | /* Check if request can be accomodated in previous allocated page */ | ||
95 | if (nic->rb_page) { | ||
96 | if ((nic->rb_page_offset + buf_len + buf_len) > | ||
97 | (PAGE_SIZE << order)) { | ||
98 | nic->rb_page = NULL; | ||
99 | } else { | ||
100 | nic->rb_page_offset += buf_len; | ||
101 | get_page(nic->rb_page); | ||
102 | } | ||
103 | } | ||
104 | |||
105 | /* Allocate a new page */ | ||
106 | if (!nic->rb_page) { | ||
107 | nic->rb_page = alloc_pages(gfp | __GFP_COMP, order); | ||
108 | if (!nic->rb_page) { | ||
109 | netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n"); | ||
110 | return -ENOMEM; | ||
111 | } | ||
112 | nic->rb_page_offset = 0; | ||
113 | } | ||
114 | |||
115 | data = (u64)page_address(nic->rb_page) + nic->rb_page_offset; | ||
116 | |||
117 | /* Align buffer addr to cache line i.e 128 bytes */ | ||
118 | rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data)); | ||
119 | /* Save page address for reference updation */ | ||
120 | rinfo->page = nic->rb_page; | ||
121 | /* Store start address for later retrieval */ | ||
122 | rinfo->data = (void *)data; | ||
123 | /* Store alignment offset */ | ||
124 | rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data); | ||
125 | |||
126 | data += rinfo->offset; | ||
127 | |||
128 | /* Give next aligned address to hw for DMA */ | ||
129 | *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES); | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | /* Retrieve actual buffer start address and build skb for received packet */ | ||
134 | static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, | ||
135 | u64 rb_ptr, int len) | ||
136 | { | ||
137 | struct sk_buff *skb; | ||
138 | struct rbuf_info *rinfo; | ||
139 | |||
140 | rb_ptr = (u64)phys_to_virt(rb_ptr); | ||
141 | /* Get buffer start address and alignment offset */ | ||
142 | rinfo = GET_RBUF_INFO(rb_ptr); | ||
143 | |||
144 | /* Now build an skb to give to stack */ | ||
145 | skb = build_skb(rinfo->data, RCV_FRAG_LEN); | ||
146 | if (!skb) { | ||
147 | put_page(rinfo->page); | ||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | /* Set correct skb->data */ | ||
152 | skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES); | ||
153 | |||
154 | prefetch((void *)rb_ptr); | ||
155 | return skb; | ||
156 | } | ||
157 | |||
158 | /* Allocate RBDR ring and populate receive buffers */ | ||
159 | static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, | ||
160 | int ring_len, int buf_size) | ||
161 | { | ||
162 | int idx; | ||
163 | u64 *rbuf; | ||
164 | struct rbdr_entry_t *desc; | ||
165 | int err; | ||
166 | |||
167 | err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, | ||
168 | sizeof(struct rbdr_entry_t), | ||
169 | NICVF_RCV_BUF_ALIGN_BYTES); | ||
170 | if (err) | ||
171 | return err; | ||
172 | |||
173 | rbdr->desc = rbdr->dmem.base; | ||
174 | /* Buffer size has to be in multiples of 128 bytes */ | ||
175 | rbdr->dma_size = buf_size; | ||
176 | rbdr->enable = true; | ||
177 | rbdr->thresh = RBDR_THRESH; | ||
178 | |||
179 | nic->rb_page = NULL; | ||
180 | for (idx = 0; idx < ring_len; idx++) { | ||
181 | err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, | ||
182 | &rbuf); | ||
183 | if (err) | ||
184 | return err; | ||
185 | |||
186 | desc = GET_RBDR_DESC(rbdr, idx); | ||
187 | desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; | ||
188 | } | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | /* Free RBDR ring and its receive buffers */ | ||
193 | static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) | ||
194 | { | ||
195 | int head, tail; | ||
196 | u64 buf_addr; | ||
197 | struct rbdr_entry_t *desc; | ||
198 | struct rbuf_info *rinfo; | ||
199 | |||
200 | if (!rbdr) | ||
201 | return; | ||
202 | |||
203 | rbdr->enable = false; | ||
204 | if (!rbdr->dmem.base) | ||
205 | return; | ||
206 | |||
207 | head = rbdr->head; | ||
208 | tail = rbdr->tail; | ||
209 | |||
210 | /* Free SKBs */ | ||
211 | while (head != tail) { | ||
212 | desc = GET_RBDR_DESC(rbdr, head); | ||
213 | buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; | ||
214 | rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); | ||
215 | put_page(rinfo->page); | ||
216 | head++; | ||
217 | head &= (rbdr->dmem.q_len - 1); | ||
218 | } | ||
219 | /* Free SKB of tail desc */ | ||
220 | desc = GET_RBDR_DESC(rbdr, tail); | ||
221 | buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; | ||
222 | rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); | ||
223 | put_page(rinfo->page); | ||
224 | |||
225 | /* Free RBDR ring */ | ||
226 | nicvf_free_q_desc_mem(nic, &rbdr->dmem); | ||
227 | } | ||
228 | |||
229 | /* Refill receive buffer descriptors with new buffers. | ||
230 | */ | ||
231 | void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) | ||
232 | { | ||
233 | struct queue_set *qs = nic->qs; | ||
234 | int rbdr_idx = qs->rbdr_cnt; | ||
235 | int tail, qcount; | ||
236 | int refill_rb_cnt; | ||
237 | struct rbdr *rbdr; | ||
238 | struct rbdr_entry_t *desc; | ||
239 | u64 *rbuf; | ||
240 | int new_rb = 0; | ||
241 | |||
242 | refill: | ||
243 | if (!rbdr_idx) | ||
244 | return; | ||
245 | rbdr_idx--; | ||
246 | rbdr = &qs->rbdr[rbdr_idx]; | ||
247 | /* Check if it's enabled */ | ||
248 | if (!rbdr->enable) | ||
249 | goto next_rbdr; | ||
250 | |||
251 | /* Get no of desc's to be refilled */ | ||
252 | qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); | ||
253 | qcount &= 0x7FFFF; | ||
254 | /* Doorbell can be ringed with a max of ring size minus 1 */ | ||
255 | if (qcount >= (qs->rbdr_len - 1)) | ||
256 | goto next_rbdr; | ||
257 | else | ||
258 | refill_rb_cnt = qs->rbdr_len - qcount - 1; | ||
259 | |||
260 | /* Start filling descs from tail */ | ||
261 | tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; | ||
262 | while (refill_rb_cnt) { | ||
263 | tail++; | ||
264 | tail &= (rbdr->dmem.q_len - 1); | ||
265 | |||
266 | if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) | ||
267 | break; | ||
268 | |||
269 | desc = GET_RBDR_DESC(rbdr, tail); | ||
270 | desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; | ||
271 | refill_rb_cnt--; | ||
272 | new_rb++; | ||
273 | } | ||
274 | |||
275 | /* make sure all memory stores are done before ringing doorbell */ | ||
276 | smp_wmb(); | ||
277 | |||
278 | /* Check if buffer allocation failed */ | ||
279 | if (refill_rb_cnt) | ||
280 | nic->rb_alloc_fail = true; | ||
281 | else | ||
282 | nic->rb_alloc_fail = false; | ||
283 | |||
284 | /* Notify HW */ | ||
285 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, | ||
286 | rbdr_idx, new_rb); | ||
287 | next_rbdr: | ||
288 | /* Re-enable RBDR interrupts only if buffer allocation is success */ | ||
289 | if (!nic->rb_alloc_fail && rbdr->enable) | ||
290 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); | ||
291 | |||
292 | if (rbdr_idx) | ||
293 | goto refill; | ||
294 | } | ||
295 | |||
296 | /* Alloc rcv buffers in non-atomic mode for better success */ | ||
297 | void nicvf_rbdr_work(struct work_struct *work) | ||
298 | { | ||
299 | struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); | ||
300 | |||
301 | nicvf_refill_rbdr(nic, GFP_KERNEL); | ||
302 | if (nic->rb_alloc_fail) | ||
303 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); | ||
304 | else | ||
305 | nic->rb_work_scheduled = false; | ||
306 | } | ||
307 | |||
308 | /* In Softirq context, alloc rcv buffers in atomic mode */ | ||
309 | void nicvf_rbdr_task(unsigned long data) | ||
310 | { | ||
311 | struct nicvf *nic = (struct nicvf *)data; | ||
312 | |||
313 | nicvf_refill_rbdr(nic, GFP_ATOMIC); | ||
314 | if (nic->rb_alloc_fail) { | ||
315 | nic->rb_work_scheduled = true; | ||
316 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); | ||
317 | } | ||
318 | } | ||
319 | |||
320 | /* Initialize completion queue */ | ||
321 | static int nicvf_init_cmp_queue(struct nicvf *nic, | ||
322 | struct cmp_queue *cq, int q_len) | ||
323 | { | ||
324 | int err; | ||
325 | |||
326 | err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, | ||
327 | NICVF_CQ_BASE_ALIGN_BYTES); | ||
328 | if (err) | ||
329 | return err; | ||
330 | |||
331 | cq->desc = cq->dmem.base; | ||
332 | cq->thresh = CMP_QUEUE_CQE_THRESH; | ||
333 | nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; | ||
334 | |||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) | ||
339 | { | ||
340 | if (!cq) | ||
341 | return; | ||
342 | if (!cq->dmem.base) | ||
343 | return; | ||
344 | |||
345 | nicvf_free_q_desc_mem(nic, &cq->dmem); | ||
346 | } | ||
347 | |||
348 | /* Initialize transmit queue */ | ||
349 | static int nicvf_init_snd_queue(struct nicvf *nic, | ||
350 | struct snd_queue *sq, int q_len) | ||
351 | { | ||
352 | int err; | ||
353 | |||
354 | err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, | ||
355 | NICVF_SQ_BASE_ALIGN_BYTES); | ||
356 | if (err) | ||
357 | return err; | ||
358 | |||
359 | sq->desc = sq->dmem.base; | ||
360 | sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_ATOMIC); | ||
361 | sq->head = 0; | ||
362 | sq->tail = 0; | ||
363 | atomic_set(&sq->free_cnt, q_len - 1); | ||
364 | sq->thresh = SND_QUEUE_THRESH; | ||
365 | |||
366 | /* Preallocate memory for TSO segment's header */ | ||
367 | sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, | ||
368 | q_len * TSO_HEADER_SIZE, | ||
369 | &sq->tso_hdrs_phys, GFP_KERNEL); | ||
370 | if (!sq->tso_hdrs) | ||
371 | return -ENOMEM; | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) | ||
377 | { | ||
378 | if (!sq) | ||
379 | return; | ||
380 | if (!sq->dmem.base) | ||
381 | return; | ||
382 | |||
383 | if (sq->tso_hdrs) | ||
384 | dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len, | ||
385 | sq->tso_hdrs, sq->tso_hdrs_phys); | ||
386 | |||
387 | kfree(sq->skbuff); | ||
388 | nicvf_free_q_desc_mem(nic, &sq->dmem); | ||
389 | } | ||
390 | |||
391 | static void nicvf_reclaim_snd_queue(struct nicvf *nic, | ||
392 | struct queue_set *qs, int qidx) | ||
393 | { | ||
394 | /* Disable send queue */ | ||
395 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); | ||
396 | /* Check if SQ is stopped */ | ||
397 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) | ||
398 | return; | ||
399 | /* Reset send queue */ | ||
400 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); | ||
401 | } | ||
402 | |||
403 | static void nicvf_reclaim_rcv_queue(struct nicvf *nic, | ||
404 | struct queue_set *qs, int qidx) | ||
405 | { | ||
406 | union nic_mbx mbx = {}; | ||
407 | |||
408 | /* Make sure all packets in the pipeline are written back into mem */ | ||
409 | mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; | ||
410 | nicvf_send_msg_to_pf(nic, &mbx); | ||
411 | } | ||
412 | |||
413 | static void nicvf_reclaim_cmp_queue(struct nicvf *nic, | ||
414 | struct queue_set *qs, int qidx) | ||
415 | { | ||
416 | /* Disable timer threshold (doesn't get reset upon CQ reset */ | ||
417 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); | ||
418 | /* Disable completion queue */ | ||
419 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); | ||
420 | /* Reset completion queue */ | ||
421 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); | ||
422 | } | ||
423 | |||
424 | static void nicvf_reclaim_rbdr(struct nicvf *nic, | ||
425 | struct rbdr *rbdr, int qidx) | ||
426 | { | ||
427 | u64 tmp, fifo_state; | ||
428 | int timeout = 10; | ||
429 | |||
430 | /* Save head and tail pointers for feeing up buffers */ | ||
431 | rbdr->head = nicvf_queue_reg_read(nic, | ||
432 | NIC_QSET_RBDR_0_1_HEAD, | ||
433 | qidx) >> 3; | ||
434 | rbdr->tail = nicvf_queue_reg_read(nic, | ||
435 | NIC_QSET_RBDR_0_1_TAIL, | ||
436 | qidx) >> 3; | ||
437 | |||
438 | /* If RBDR FIFO is in 'FAIL' state then do a reset first | ||
439 | * before relaiming. | ||
440 | */ | ||
441 | fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); | ||
442 | if (((fifo_state >> 62) & 0x03) == 0x3) | ||
443 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | ||
444 | qidx, NICVF_RBDR_RESET); | ||
445 | |||
446 | /* Disable RBDR */ | ||
447 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); | ||
448 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) | ||
449 | return; | ||
450 | while (1) { | ||
451 | tmp = nicvf_queue_reg_read(nic, | ||
452 | NIC_QSET_RBDR_0_1_PREFETCH_STATUS, | ||
453 | qidx); | ||
454 | if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) | ||
455 | break; | ||
456 | usleep_range(1000, 2000); | ||
457 | timeout--; | ||
458 | if (!timeout) { | ||
459 | netdev_err(nic->netdev, | ||
460 | "Failed polling on prefetch status\n"); | ||
461 | return; | ||
462 | } | ||
463 | } | ||
464 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | ||
465 | qidx, NICVF_RBDR_RESET); | ||
466 | |||
467 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) | ||
468 | return; | ||
469 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); | ||
470 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) | ||
471 | return; | ||
472 | } | ||
473 | |||
474 | /* Configures receive queue */ | ||
475 | static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, | ||
476 | int qidx, bool enable) | ||
477 | { | ||
478 | union nic_mbx mbx = {}; | ||
479 | struct rcv_queue *rq; | ||
480 | struct rq_cfg rq_cfg; | ||
481 | |||
482 | rq = &qs->rq[qidx]; | ||
483 | rq->enable = enable; | ||
484 | |||
485 | /* Disable receive queue */ | ||
486 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); | ||
487 | |||
488 | if (!rq->enable) { | ||
489 | nicvf_reclaim_rcv_queue(nic, qs, qidx); | ||
490 | return; | ||
491 | } | ||
492 | |||
493 | rq->cq_qs = qs->vnic_id; | ||
494 | rq->cq_idx = qidx; | ||
495 | rq->start_rbdr_qs = qs->vnic_id; | ||
496 | rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; | ||
497 | rq->cont_rbdr_qs = qs->vnic_id; | ||
498 | rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; | ||
499 | /* all writes of RBDR data to be loaded into L2 Cache as well*/ | ||
500 | rq->caching = 1; | ||
501 | |||
502 | /* Send a mailbox msg to PF to config RQ */ | ||
503 | mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; | ||
504 | mbx.rq.qs_num = qs->vnic_id; | ||
505 | mbx.rq.rq_num = qidx; | ||
506 | mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | | ||
507 | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | | ||
508 | (rq->cont_qs_rbdr_idx << 8) | | ||
509 | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); | ||
510 | nicvf_send_msg_to_pf(nic, &mbx); | ||
511 | |||
512 | mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; | ||
513 | mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); | ||
514 | nicvf_send_msg_to_pf(nic, &mbx); | ||
515 | |||
516 | /* RQ drop config | ||
517 | * Enable CQ drop to reserve sufficient CQEs for all tx packets | ||
518 | */ | ||
519 | mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; | ||
520 | mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); | ||
521 | nicvf_send_msg_to_pf(nic, &mbx); | ||
522 | |||
523 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00); | ||
524 | |||
525 | /* Enable Receive queue */ | ||
526 | rq_cfg.ena = 1; | ||
527 | rq_cfg.tcp_ena = 0; | ||
528 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); | ||
529 | } | ||
530 | |||
531 | /* Configures completion queue */ | ||
532 | void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, | ||
533 | int qidx, bool enable) | ||
534 | { | ||
535 | struct cmp_queue *cq; | ||
536 | struct cq_cfg cq_cfg; | ||
537 | |||
538 | cq = &qs->cq[qidx]; | ||
539 | cq->enable = enable; | ||
540 | |||
541 | if (!cq->enable) { | ||
542 | nicvf_reclaim_cmp_queue(nic, qs, qidx); | ||
543 | return; | ||
544 | } | ||
545 | |||
546 | /* Reset completion queue */ | ||
547 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); | ||
548 | |||
549 | if (!cq->enable) | ||
550 | return; | ||
551 | |||
552 | spin_lock_init(&cq->lock); | ||
553 | /* Set completion queue base address */ | ||
554 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, | ||
555 | qidx, (u64)(cq->dmem.phys_base)); | ||
556 | |||
557 | /* Enable Completion queue */ | ||
558 | cq_cfg.ena = 1; | ||
559 | cq_cfg.reset = 0; | ||
560 | cq_cfg.caching = 0; | ||
561 | cq_cfg.qsize = CMP_QSIZE; | ||
562 | cq_cfg.avg_con = 0; | ||
563 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); | ||
564 | |||
565 | /* Set threshold value for interrupt generation */ | ||
566 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); | ||
567 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, | ||
568 | qidx, nic->cq_coalesce_usecs); | ||
569 | } | ||
570 | |||
571 | /* Configures transmit queue */ | ||
572 | static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, | ||
573 | int qidx, bool enable) | ||
574 | { | ||
575 | union nic_mbx mbx = {}; | ||
576 | struct snd_queue *sq; | ||
577 | struct sq_cfg sq_cfg; | ||
578 | |||
579 | sq = &qs->sq[qidx]; | ||
580 | sq->enable = enable; | ||
581 | |||
582 | if (!sq->enable) { | ||
583 | nicvf_reclaim_snd_queue(nic, qs, qidx); | ||
584 | return; | ||
585 | } | ||
586 | |||
587 | /* Reset send queue */ | ||
588 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); | ||
589 | |||
590 | sq->cq_qs = qs->vnic_id; | ||
591 | sq->cq_idx = qidx; | ||
592 | |||
593 | /* Send a mailbox msg to PF to config SQ */ | ||
594 | mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; | ||
595 | mbx.sq.qs_num = qs->vnic_id; | ||
596 | mbx.sq.sq_num = qidx; | ||
597 | mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; | ||
598 | nicvf_send_msg_to_pf(nic, &mbx); | ||
599 | |||
600 | /* Set queue base address */ | ||
601 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, | ||
602 | qidx, (u64)(sq->dmem.phys_base)); | ||
603 | |||
604 | /* Enable send queue & set queue size */ | ||
605 | sq_cfg.ena = 1; | ||
606 | sq_cfg.reset = 0; | ||
607 | sq_cfg.ldwb = 0; | ||
608 | sq_cfg.qsize = SND_QSIZE; | ||
609 | sq_cfg.tstmp_bgx_intf = 0; | ||
610 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); | ||
611 | |||
612 | /* Set threshold value for interrupt generation */ | ||
613 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); | ||
614 | |||
615 | /* Set queue:cpu affinity for better load distribution */ | ||
616 | if (cpu_online(qidx)) { | ||
617 | cpumask_set_cpu(qidx, &sq->affinity_mask); | ||
618 | netif_set_xps_queue(nic->netdev, | ||
619 | &sq->affinity_mask, qidx); | ||
620 | } | ||
621 | } | ||
622 | |||
623 | /* Configures receive buffer descriptor ring */ | ||
624 | static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, | ||
625 | int qidx, bool enable) | ||
626 | { | ||
627 | struct rbdr *rbdr; | ||
628 | struct rbdr_cfg rbdr_cfg; | ||
629 | |||
630 | rbdr = &qs->rbdr[qidx]; | ||
631 | nicvf_reclaim_rbdr(nic, rbdr, qidx); | ||
632 | if (!enable) | ||
633 | return; | ||
634 | |||
635 | /* Set descriptor base address */ | ||
636 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, | ||
637 | qidx, (u64)(rbdr->dmem.phys_base)); | ||
638 | |||
639 | /* Enable RBDR & set queue size */ | ||
640 | /* Buffer size should be in multiples of 128 bytes */ | ||
641 | rbdr_cfg.ena = 1; | ||
642 | rbdr_cfg.reset = 0; | ||
643 | rbdr_cfg.ldwb = 0; | ||
644 | rbdr_cfg.qsize = RBDR_SIZE; | ||
645 | rbdr_cfg.avg_con = 0; | ||
646 | rbdr_cfg.lines = rbdr->dma_size / 128; | ||
647 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | ||
648 | qidx, *(u64 *)&rbdr_cfg); | ||
649 | |||
650 | /* Notify HW */ | ||
651 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, | ||
652 | qidx, qs->rbdr_len - 1); | ||
653 | |||
654 | /* Set threshold value for interrupt generation */ | ||
655 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, | ||
656 | qidx, rbdr->thresh - 1); | ||
657 | } | ||
658 | |||
659 | /* Requests PF to assign and enable Qset */ | ||
660 | void nicvf_qset_config(struct nicvf *nic, bool enable) | ||
661 | { | ||
662 | union nic_mbx mbx = {}; | ||
663 | struct queue_set *qs = nic->qs; | ||
664 | struct qs_cfg *qs_cfg; | ||
665 | |||
666 | if (!qs) { | ||
667 | netdev_warn(nic->netdev, | ||
668 | "Qset is still not allocated, don't init queues\n"); | ||
669 | return; | ||
670 | } | ||
671 | |||
672 | qs->enable = enable; | ||
673 | qs->vnic_id = nic->vf_id; | ||
674 | |||
675 | /* Send a mailbox msg to PF to config Qset */ | ||
676 | mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; | ||
677 | mbx.qs.num = qs->vnic_id; | ||
678 | |||
679 | mbx.qs.cfg = 0; | ||
680 | qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; | ||
681 | if (qs->enable) { | ||
682 | qs_cfg->ena = 1; | ||
683 | #ifdef __BIG_ENDIAN | ||
684 | qs_cfg->be = 1; | ||
685 | #endif | ||
686 | qs_cfg->vnic = qs->vnic_id; | ||
687 | } | ||
688 | nicvf_send_msg_to_pf(nic, &mbx); | ||
689 | } | ||
690 | |||
691 | static void nicvf_free_resources(struct nicvf *nic) | ||
692 | { | ||
693 | int qidx; | ||
694 | struct queue_set *qs = nic->qs; | ||
695 | |||
696 | /* Free receive buffer descriptor ring */ | ||
697 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | ||
698 | nicvf_free_rbdr(nic, &qs->rbdr[qidx]); | ||
699 | |||
700 | /* Free completion queue */ | ||
701 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | ||
702 | nicvf_free_cmp_queue(nic, &qs->cq[qidx]); | ||
703 | |||
704 | /* Free send queue */ | ||
705 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | ||
706 | nicvf_free_snd_queue(nic, &qs->sq[qidx]); | ||
707 | } | ||
708 | |||
709 | static int nicvf_alloc_resources(struct nicvf *nic) | ||
710 | { | ||
711 | int qidx; | ||
712 | struct queue_set *qs = nic->qs; | ||
713 | |||
714 | /* Alloc receive buffer descriptor ring */ | ||
715 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { | ||
716 | if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, | ||
717 | DMA_BUFFER_LEN)) | ||
718 | goto alloc_fail; | ||
719 | } | ||
720 | |||
721 | /* Alloc send queue */ | ||
722 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) { | ||
723 | if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) | ||
724 | goto alloc_fail; | ||
725 | } | ||
726 | |||
727 | /* Alloc completion queue */ | ||
728 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | ||
729 | if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) | ||
730 | goto alloc_fail; | ||
731 | } | ||
732 | |||
733 | return 0; | ||
734 | alloc_fail: | ||
735 | nicvf_free_resources(nic); | ||
736 | return -ENOMEM; | ||
737 | } | ||
738 | |||
739 | int nicvf_set_qset_resources(struct nicvf *nic) | ||
740 | { | ||
741 | struct queue_set *qs; | ||
742 | |||
743 | qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); | ||
744 | if (!qs) | ||
745 | return -ENOMEM; | ||
746 | nic->qs = qs; | ||
747 | |||
748 | /* Set count of each queue */ | ||
749 | qs->rbdr_cnt = RBDR_CNT; | ||
750 | qs->rq_cnt = RCV_QUEUE_CNT; | ||
751 | qs->sq_cnt = SND_QUEUE_CNT; | ||
752 | qs->cq_cnt = CMP_QUEUE_CNT; | ||
753 | |||
754 | /* Set queue lengths */ | ||
755 | qs->rbdr_len = RCV_BUF_COUNT; | ||
756 | qs->sq_len = SND_QUEUE_LEN; | ||
757 | qs->cq_len = CMP_QUEUE_LEN; | ||
758 | return 0; | ||
759 | } | ||
760 | |||
761 | int nicvf_config_data_transfer(struct nicvf *nic, bool enable) | ||
762 | { | ||
763 | bool disable = false; | ||
764 | struct queue_set *qs = nic->qs; | ||
765 | int qidx; | ||
766 | |||
767 | if (!qs) | ||
768 | return 0; | ||
769 | |||
770 | if (enable) { | ||
771 | if (nicvf_alloc_resources(nic)) | ||
772 | return -ENOMEM; | ||
773 | |||
774 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | ||
775 | nicvf_snd_queue_config(nic, qs, qidx, enable); | ||
776 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | ||
777 | nicvf_cmp_queue_config(nic, qs, qidx, enable); | ||
778 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | ||
779 | nicvf_rbdr_config(nic, qs, qidx, enable); | ||
780 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | ||
781 | nicvf_rcv_queue_config(nic, qs, qidx, enable); | ||
782 | } else { | ||
783 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | ||
784 | nicvf_rcv_queue_config(nic, qs, qidx, disable); | ||
785 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | ||
786 | nicvf_rbdr_config(nic, qs, qidx, disable); | ||
787 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | ||
788 | nicvf_snd_queue_config(nic, qs, qidx, disable); | ||
789 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | ||
790 | nicvf_cmp_queue_config(nic, qs, qidx, disable); | ||
791 | |||
792 | nicvf_free_resources(nic); | ||
793 | } | ||
794 | |||
795 | return 0; | ||
796 | } | ||
797 | |||
798 | /* Get a free desc from SQ | ||
799 | * returns descriptor ponter & descriptor number | ||
800 | */ | ||
801 | static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) | ||
802 | { | ||
803 | int qentry; | ||
804 | |||
805 | qentry = sq->tail; | ||
806 | atomic_sub(desc_cnt, &sq->free_cnt); | ||
807 | sq->tail += desc_cnt; | ||
808 | sq->tail &= (sq->dmem.q_len - 1); | ||
809 | |||
810 | return qentry; | ||
811 | } | ||
812 | |||
813 | /* Free descriptor back to SQ for future use */ | ||
814 | void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) | ||
815 | { | ||
816 | atomic_add(desc_cnt, &sq->free_cnt); | ||
817 | sq->head += desc_cnt; | ||
818 | sq->head &= (sq->dmem.q_len - 1); | ||
819 | } | ||
820 | |||
821 | static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) | ||
822 | { | ||
823 | qentry++; | ||
824 | qentry &= (sq->dmem.q_len - 1); | ||
825 | return qentry; | ||
826 | } | ||
827 | |||
828 | void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) | ||
829 | { | ||
830 | u64 sq_cfg; | ||
831 | |||
832 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); | ||
833 | sq_cfg |= NICVF_SQ_EN; | ||
834 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); | ||
835 | /* Ring doorbell so that H/W restarts processing SQEs */ | ||
836 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); | ||
837 | } | ||
838 | |||
839 | void nicvf_sq_disable(struct nicvf *nic, int qidx) | ||
840 | { | ||
841 | u64 sq_cfg; | ||
842 | |||
843 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); | ||
844 | sq_cfg &= ~NICVF_SQ_EN; | ||
845 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); | ||
846 | } | ||
847 | |||
848 | void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, | ||
849 | int qidx) | ||
850 | { | ||
851 | u64 head, tail; | ||
852 | struct sk_buff *skb; | ||
853 | struct nicvf *nic = netdev_priv(netdev); | ||
854 | struct sq_hdr_subdesc *hdr; | ||
855 | |||
856 | head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; | ||
857 | tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; | ||
858 | while (sq->head != head) { | ||
859 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); | ||
860 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { | ||
861 | nicvf_put_sq_desc(sq, 1); | ||
862 | continue; | ||
863 | } | ||
864 | skb = (struct sk_buff *)sq->skbuff[sq->head]; | ||
865 | atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); | ||
866 | atomic64_add(hdr->tot_len, | ||
867 | (atomic64_t *)&netdev->stats.tx_bytes); | ||
868 | dev_kfree_skb_any(skb); | ||
869 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); | ||
870 | } | ||
871 | } | ||
872 | |||
873 | /* Calculate no of SQ subdescriptors needed to transmit all | ||
874 | * segments of this TSO packet. | ||
875 | * Taken from 'Tilera network driver' with a minor modification. | ||
876 | */ | ||
877 | static int nicvf_tso_count_subdescs(struct sk_buff *skb) | ||
878 | { | ||
879 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
880 | unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
881 | unsigned int data_len = skb->len - sh_len; | ||
882 | unsigned int p_len = sh->gso_size; | ||
883 | long f_id = -1; /* id of the current fragment */ | ||
884 | long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ | ||
885 | long f_used = 0; /* bytes used from the current fragment */ | ||
886 | long n; /* size of the current piece of payload */ | ||
887 | int num_edescs = 0; | ||
888 | int segment; | ||
889 | |||
890 | for (segment = 0; segment < sh->gso_segs; segment++) { | ||
891 | unsigned int p_used = 0; | ||
892 | |||
893 | /* One edesc for header and for each piece of the payload. */ | ||
894 | for (num_edescs++; p_used < p_len; num_edescs++) { | ||
895 | /* Advance as needed. */ | ||
896 | while (f_used >= f_size) { | ||
897 | f_id++; | ||
898 | f_size = skb_frag_size(&sh->frags[f_id]); | ||
899 | f_used = 0; | ||
900 | } | ||
901 | |||
902 | /* Use bytes from the current fragment. */ | ||
903 | n = p_len - p_used; | ||
904 | if (n > f_size - f_used) | ||
905 | n = f_size - f_used; | ||
906 | f_used += n; | ||
907 | p_used += n; | ||
908 | } | ||
909 | |||
910 | /* The last segment may be less than gso_size. */ | ||
911 | data_len -= p_len; | ||
912 | if (data_len < p_len) | ||
913 | p_len = data_len; | ||
914 | } | ||
915 | |||
916 | /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ | ||
917 | return num_edescs + sh->gso_segs; | ||
918 | } | ||
919 | |||
920 | /* Get the number of SQ descriptors needed to xmit this skb */ | ||
921 | static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) | ||
922 | { | ||
923 | int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; | ||
924 | |||
925 | if (skb_shinfo(skb)->gso_size) { | ||
926 | subdesc_cnt = nicvf_tso_count_subdescs(skb); | ||
927 | return subdesc_cnt; | ||
928 | } | ||
929 | |||
930 | if (skb_shinfo(skb)->nr_frags) | ||
931 | subdesc_cnt += skb_shinfo(skb)->nr_frags; | ||
932 | |||
933 | return subdesc_cnt; | ||
934 | } | ||
935 | |||
936 | /* Add SQ HEADER subdescriptor. | ||
937 | * First subdescriptor for every send descriptor. | ||
938 | */ | ||
939 | static inline void | ||
940 | nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, | ||
941 | int subdesc_cnt, struct sk_buff *skb, int len) | ||
942 | { | ||
943 | int proto; | ||
944 | struct sq_hdr_subdesc *hdr; | ||
945 | |||
946 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); | ||
947 | sq->skbuff[qentry] = (u64)skb; | ||
948 | |||
949 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); | ||
950 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; | ||
951 | /* Enable notification via CQE after processing SQE */ | ||
952 | hdr->post_cqe = 1; | ||
953 | /* No of subdescriptors following this */ | ||
954 | hdr->subdesc_cnt = subdesc_cnt; | ||
955 | hdr->tot_len = len; | ||
956 | |||
957 | /* Offload checksum calculation to HW */ | ||
958 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
959 | if (skb->protocol != htons(ETH_P_IP)) | ||
960 | return; | ||
961 | |||
962 | hdr->csum_l3 = 1; /* Enable IP csum calculation */ | ||
963 | hdr->l3_offset = skb_network_offset(skb); | ||
964 | hdr->l4_offset = skb_transport_offset(skb); | ||
965 | |||
966 | proto = ip_hdr(skb)->protocol; | ||
967 | switch (proto) { | ||
968 | case IPPROTO_TCP: | ||
969 | hdr->csum_l4 = SEND_L4_CSUM_TCP; | ||
970 | break; | ||
971 | case IPPROTO_UDP: | ||
972 | hdr->csum_l4 = SEND_L4_CSUM_UDP; | ||
973 | break; | ||
974 | case IPPROTO_SCTP: | ||
975 | hdr->csum_l4 = SEND_L4_CSUM_SCTP; | ||
976 | break; | ||
977 | } | ||
978 | } | ||
979 | } | ||
980 | |||
981 | /* SQ GATHER subdescriptor | ||
982 | * Must follow HDR descriptor | ||
983 | */ | ||
984 | static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, | ||
985 | int size, u64 data) | ||
986 | { | ||
987 | struct sq_gather_subdesc *gather; | ||
988 | |||
989 | qentry &= (sq->dmem.q_len - 1); | ||
990 | gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); | ||
991 | |||
992 | memset(gather, 0, SND_QUEUE_DESC_SIZE); | ||
993 | gather->subdesc_type = SQ_DESC_TYPE_GATHER; | ||
994 | gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB; | ||
995 | gather->size = size; | ||
996 | gather->addr = data; | ||
997 | } | ||
998 | |||
999 | /* Segment a TSO packet into 'gso_size' segments and append | ||
1000 | * them to SQ for transfer | ||
1001 | */ | ||
1002 | static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, | ||
1003 | int qentry, struct sk_buff *skb) | ||
1004 | { | ||
1005 | struct tso_t tso; | ||
1006 | int seg_subdescs = 0, desc_cnt = 0; | ||
1007 | int seg_len, total_len, data_left; | ||
1008 | int hdr_qentry = qentry; | ||
1009 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
1010 | |||
1011 | tso_start(skb, &tso); | ||
1012 | total_len = skb->len - hdr_len; | ||
1013 | while (total_len > 0) { | ||
1014 | char *hdr; | ||
1015 | |||
1016 | /* Save Qentry for adding HDR_SUBDESC at the end */ | ||
1017 | hdr_qentry = qentry; | ||
1018 | |||
1019 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); | ||
1020 | total_len -= data_left; | ||
1021 | |||
1022 | /* Add segment's header */ | ||
1023 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | ||
1024 | hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; | ||
1025 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); | ||
1026 | nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, | ||
1027 | sq->tso_hdrs_phys + | ||
1028 | qentry * TSO_HEADER_SIZE); | ||
1029 | /* HDR_SUDESC + GATHER */ | ||
1030 | seg_subdescs = 2; | ||
1031 | seg_len = hdr_len; | ||
1032 | |||
1033 | /* Add segment's payload fragments */ | ||
1034 | while (data_left > 0) { | ||
1035 | int size; | ||
1036 | |||
1037 | size = min_t(int, tso.size, data_left); | ||
1038 | |||
1039 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | ||
1040 | nicvf_sq_add_gather_subdesc(sq, qentry, size, | ||
1041 | virt_to_phys(tso.data)); | ||
1042 | seg_subdescs++; | ||
1043 | seg_len += size; | ||
1044 | |||
1045 | data_left -= size; | ||
1046 | tso_build_data(skb, &tso, size); | ||
1047 | } | ||
1048 | nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, | ||
1049 | seg_subdescs - 1, skb, seg_len); | ||
1050 | sq->skbuff[hdr_qentry] = 0; | ||
1051 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | ||
1052 | |||
1053 | desc_cnt += seg_subdescs; | ||
1054 | } | ||
1055 | /* Save SKB in the last segment for freeing */ | ||
1056 | sq->skbuff[hdr_qentry] = (u64)skb; | ||
1057 | |||
1058 | /* make sure all memory stores are done before ringing doorbell */ | ||
1059 | smp_wmb(); | ||
1060 | |||
1061 | /* Inform HW to xmit all TSO segments */ | ||
1062 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, | ||
1063 | skb_get_queue_mapping(skb), desc_cnt); | ||
1064 | return 1; | ||
1065 | } | ||
1066 | |||
1067 | /* Append an skb to a SQ for packet transfer. */ | ||
1068 | int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) | ||
1069 | { | ||
1070 | int i, size; | ||
1071 | int subdesc_cnt; | ||
1072 | int sq_num, qentry; | ||
1073 | struct queue_set *qs = nic->qs; | ||
1074 | struct snd_queue *sq; | ||
1075 | |||
1076 | sq_num = skb_get_queue_mapping(skb); | ||
1077 | sq = &qs->sq[sq_num]; | ||
1078 | |||
1079 | subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); | ||
1080 | if (subdesc_cnt > atomic_read(&sq->free_cnt)) | ||
1081 | goto append_fail; | ||
1082 | |||
1083 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); | ||
1084 | |||
1085 | /* Check if its a TSO packet */ | ||
1086 | if (skb_shinfo(skb)->gso_size) | ||
1087 | return nicvf_sq_append_tso(nic, sq, qentry, skb); | ||
1088 | |||
1089 | /* Add SQ header subdesc */ | ||
1090 | nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); | ||
1091 | |||
1092 | /* Add SQ gather subdescs */ | ||
1093 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | ||
1094 | size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; | ||
1095 | nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); | ||
1096 | |||
1097 | /* Check for scattered buffer */ | ||
1098 | if (!skb_is_nonlinear(skb)) | ||
1099 | goto doorbell; | ||
1100 | |||
1101 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
1102 | const struct skb_frag_struct *frag; | ||
1103 | |||
1104 | frag = &skb_shinfo(skb)->frags[i]; | ||
1105 | |||
1106 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | ||
1107 | size = skb_frag_size(frag); | ||
1108 | nicvf_sq_add_gather_subdesc(sq, qentry, size, | ||
1109 | virt_to_phys( | ||
1110 | skb_frag_address(frag))); | ||
1111 | } | ||
1112 | |||
1113 | doorbell: | ||
1114 | /* make sure all memory stores are done before ringing doorbell */ | ||
1115 | smp_wmb(); | ||
1116 | |||
1117 | /* Inform HW to xmit new packet */ | ||
1118 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, | ||
1119 | sq_num, subdesc_cnt); | ||
1120 | return 1; | ||
1121 | |||
1122 | append_fail: | ||
1123 | netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); | ||
1124 | return 0; | ||
1125 | } | ||
1126 | |||
1127 | static inline unsigned frag_num(unsigned i) | ||
1128 | { | ||
1129 | #ifdef __BIG_ENDIAN | ||
1130 | return (i & ~3) + 3 - (i & 3); | ||
1131 | #else | ||
1132 | return i; | ||
1133 | #endif | ||
1134 | } | ||
1135 | |||
1136 | /* Returns SKB for a received packet */ | ||
1137 | struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) | ||
1138 | { | ||
1139 | int frag; | ||
1140 | int payload_len = 0; | ||
1141 | struct sk_buff *skb = NULL; | ||
1142 | struct sk_buff *skb_frag = NULL; | ||
1143 | struct sk_buff *prev_frag = NULL; | ||
1144 | u16 *rb_lens = NULL; | ||
1145 | u64 *rb_ptrs = NULL; | ||
1146 | |||
1147 | rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); | ||
1148 | rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); | ||
1149 | |||
1150 | netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", | ||
1151 | __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); | ||
1152 | |||
1153 | for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { | ||
1154 | payload_len = rb_lens[frag_num(frag)]; | ||
1155 | if (!frag) { | ||
1156 | /* First fragment */ | ||
1157 | skb = nicvf_rb_ptr_to_skb(nic, | ||
1158 | *rb_ptrs - cqe_rx->align_pad, | ||
1159 | payload_len); | ||
1160 | if (!skb) | ||
1161 | return NULL; | ||
1162 | skb_reserve(skb, cqe_rx->align_pad); | ||
1163 | skb_put(skb, payload_len); | ||
1164 | } else { | ||
1165 | /* Add fragments */ | ||
1166 | skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, | ||
1167 | payload_len); | ||
1168 | if (!skb_frag) { | ||
1169 | dev_kfree_skb(skb); | ||
1170 | return NULL; | ||
1171 | } | ||
1172 | |||
1173 | if (!skb_shinfo(skb)->frag_list) | ||
1174 | skb_shinfo(skb)->frag_list = skb_frag; | ||
1175 | else | ||
1176 | prev_frag->next = skb_frag; | ||
1177 | |||
1178 | prev_frag = skb_frag; | ||
1179 | skb->len += payload_len; | ||
1180 | skb->data_len += payload_len; | ||
1181 | skb_frag->len = payload_len; | ||
1182 | } | ||
1183 | /* Next buffer pointer */ | ||
1184 | rb_ptrs++; | ||
1185 | } | ||
1186 | return skb; | ||
1187 | } | ||
1188 | |||
1189 | /* Enable interrupt */ | ||
1190 | void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) | ||
1191 | { | ||
1192 | u64 reg_val; | ||
1193 | |||
1194 | reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); | ||
1195 | |||
1196 | switch (int_type) { | ||
1197 | case NICVF_INTR_CQ: | ||
1198 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | ||
1199 | break; | ||
1200 | case NICVF_INTR_SQ: | ||
1201 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | ||
1202 | break; | ||
1203 | case NICVF_INTR_RBDR: | ||
1204 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | ||
1205 | break; | ||
1206 | case NICVF_INTR_PKT_DROP: | ||
1207 | reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); | ||
1208 | break; | ||
1209 | case NICVF_INTR_TCP_TIMER: | ||
1210 | reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); | ||
1211 | break; | ||
1212 | case NICVF_INTR_MBOX: | ||
1213 | reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); | ||
1214 | break; | ||
1215 | case NICVF_INTR_QS_ERR: | ||
1216 | reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); | ||
1217 | break; | ||
1218 | default: | ||
1219 | netdev_err(nic->netdev, | ||
1220 | "Failed to enable interrupt: unknown type\n"); | ||
1221 | break; | ||
1222 | } | ||
1223 | |||
1224 | nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); | ||
1225 | } | ||
1226 | |||
1227 | /* Disable interrupt */ | ||
1228 | void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) | ||
1229 | { | ||
1230 | u64 reg_val = 0; | ||
1231 | |||
1232 | switch (int_type) { | ||
1233 | case NICVF_INTR_CQ: | ||
1234 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | ||
1235 | break; | ||
1236 | case NICVF_INTR_SQ: | ||
1237 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | ||
1238 | break; | ||
1239 | case NICVF_INTR_RBDR: | ||
1240 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | ||
1241 | break; | ||
1242 | case NICVF_INTR_PKT_DROP: | ||
1243 | reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); | ||
1244 | break; | ||
1245 | case NICVF_INTR_TCP_TIMER: | ||
1246 | reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); | ||
1247 | break; | ||
1248 | case NICVF_INTR_MBOX: | ||
1249 | reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); | ||
1250 | break; | ||
1251 | case NICVF_INTR_QS_ERR: | ||
1252 | reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); | ||
1253 | break; | ||
1254 | default: | ||
1255 | netdev_err(nic->netdev, | ||
1256 | "Failed to disable interrupt: unknown type\n"); | ||
1257 | break; | ||
1258 | } | ||
1259 | |||
1260 | nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); | ||
1261 | } | ||
1262 | |||
1263 | /* Clear interrupt */ | ||
1264 | void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) | ||
1265 | { | ||
1266 | u64 reg_val = 0; | ||
1267 | |||
1268 | switch (int_type) { | ||
1269 | case NICVF_INTR_CQ: | ||
1270 | reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | ||
1271 | break; | ||
1272 | case NICVF_INTR_SQ: | ||
1273 | reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | ||
1274 | break; | ||
1275 | case NICVF_INTR_RBDR: | ||
1276 | reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | ||
1277 | break; | ||
1278 | case NICVF_INTR_PKT_DROP: | ||
1279 | reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); | ||
1280 | break; | ||
1281 | case NICVF_INTR_TCP_TIMER: | ||
1282 | reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); | ||
1283 | break; | ||
1284 | case NICVF_INTR_MBOX: | ||
1285 | reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); | ||
1286 | break; | ||
1287 | case NICVF_INTR_QS_ERR: | ||
1288 | reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); | ||
1289 | break; | ||
1290 | default: | ||
1291 | netdev_err(nic->netdev, | ||
1292 | "Failed to clear interrupt: unknown type\n"); | ||
1293 | break; | ||
1294 | } | ||
1295 | |||
1296 | nicvf_reg_write(nic, NIC_VF_INT, reg_val); | ||
1297 | } | ||
1298 | |||
1299 | /* Check if interrupt is enabled */ | ||
1300 | int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) | ||
1301 | { | ||
1302 | u64 reg_val; | ||
1303 | u64 mask = 0xff; | ||
1304 | |||
1305 | reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); | ||
1306 | |||
1307 | switch (int_type) { | ||
1308 | case NICVF_INTR_CQ: | ||
1309 | mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | ||
1310 | break; | ||
1311 | case NICVF_INTR_SQ: | ||
1312 | mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | ||
1313 | break; | ||
1314 | case NICVF_INTR_RBDR: | ||
1315 | mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | ||
1316 | break; | ||
1317 | case NICVF_INTR_PKT_DROP: | ||
1318 | mask = NICVF_INTR_PKT_DROP_MASK; | ||
1319 | break; | ||
1320 | case NICVF_INTR_TCP_TIMER: | ||
1321 | mask = NICVF_INTR_TCP_TIMER_MASK; | ||
1322 | break; | ||
1323 | case NICVF_INTR_MBOX: | ||
1324 | mask = NICVF_INTR_MBOX_MASK; | ||
1325 | break; | ||
1326 | case NICVF_INTR_QS_ERR: | ||
1327 | mask = NICVF_INTR_QS_ERR_MASK; | ||
1328 | break; | ||
1329 | default: | ||
1330 | netdev_err(nic->netdev, | ||
1331 | "Failed to check interrupt enable: unknown type\n"); | ||
1332 | break; | ||
1333 | } | ||
1334 | |||
1335 | return (reg_val & mask); | ||
1336 | } | ||
1337 | |||
1338 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) | ||
1339 | { | ||
1340 | struct rcv_queue *rq; | ||
1341 | |||
1342 | #define GET_RQ_STATS(reg) \ | ||
1343 | nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ | ||
1344 | (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) | ||
1345 | |||
1346 | rq = &nic->qs->rq[rq_idx]; | ||
1347 | rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); | ||
1348 | rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); | ||
1349 | } | ||
1350 | |||
1351 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) | ||
1352 | { | ||
1353 | struct snd_queue *sq; | ||
1354 | |||
1355 | #define GET_SQ_STATS(reg) \ | ||
1356 | nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ | ||
1357 | (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) | ||
1358 | |||
1359 | sq = &nic->qs->sq[sq_idx]; | ||
1360 | sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); | ||
1361 | sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); | ||
1362 | } | ||
1363 | |||
1364 | /* Check for errors in the receive cmp.queue entry */ | ||
1365 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, | ||
1366 | struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) | ||
1367 | { | ||
1368 | struct cmp_queue_stats *stats = &cq->stats; | ||
1369 | |||
1370 | if (!cqe_rx->err_level && !cqe_rx->err_opcode) { | ||
1371 | stats->rx.errop.good++; | ||
1372 | return 0; | ||
1373 | } | ||
1374 | |||
1375 | if (netif_msg_rx_err(nic)) | ||
1376 | netdev_err(nic->netdev, | ||
1377 | "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", | ||
1378 | nic->netdev->name, | ||
1379 | cqe_rx->err_level, cqe_rx->err_opcode); | ||
1380 | |||
1381 | switch (cqe_rx->err_level) { | ||
1382 | case CQ_ERRLVL_MAC: | ||
1383 | stats->rx.errlvl.mac_errs++; | ||
1384 | break; | ||
1385 | case CQ_ERRLVL_L2: | ||
1386 | stats->rx.errlvl.l2_errs++; | ||
1387 | break; | ||
1388 | case CQ_ERRLVL_L3: | ||
1389 | stats->rx.errlvl.l3_errs++; | ||
1390 | break; | ||
1391 | case CQ_ERRLVL_L4: | ||
1392 | stats->rx.errlvl.l4_errs++; | ||
1393 | break; | ||
1394 | } | ||
1395 | |||
1396 | switch (cqe_rx->err_opcode) { | ||
1397 | case CQ_RX_ERROP_RE_PARTIAL: | ||
1398 | stats->rx.errop.partial_pkts++; | ||
1399 | break; | ||
1400 | case CQ_RX_ERROP_RE_JABBER: | ||
1401 | stats->rx.errop.jabber_errs++; | ||
1402 | break; | ||
1403 | case CQ_RX_ERROP_RE_FCS: | ||
1404 | stats->rx.errop.fcs_errs++; | ||
1405 | break; | ||
1406 | case CQ_RX_ERROP_RE_TERMINATE: | ||
1407 | stats->rx.errop.terminate_errs++; | ||
1408 | break; | ||
1409 | case CQ_RX_ERROP_RE_RX_CTL: | ||
1410 | stats->rx.errop.bgx_rx_errs++; | ||
1411 | break; | ||
1412 | case CQ_RX_ERROP_PREL2_ERR: | ||
1413 | stats->rx.errop.prel2_errs++; | ||
1414 | break; | ||
1415 | case CQ_RX_ERROP_L2_FRAGMENT: | ||
1416 | stats->rx.errop.l2_frags++; | ||
1417 | break; | ||
1418 | case CQ_RX_ERROP_L2_OVERRUN: | ||
1419 | stats->rx.errop.l2_overruns++; | ||
1420 | break; | ||
1421 | case CQ_RX_ERROP_L2_PFCS: | ||
1422 | stats->rx.errop.l2_pfcs++; | ||
1423 | break; | ||
1424 | case CQ_RX_ERROP_L2_PUNY: | ||
1425 | stats->rx.errop.l2_puny++; | ||
1426 | break; | ||
1427 | case CQ_RX_ERROP_L2_MAL: | ||
1428 | stats->rx.errop.l2_hdr_malformed++; | ||
1429 | break; | ||
1430 | case CQ_RX_ERROP_L2_OVERSIZE: | ||
1431 | stats->rx.errop.l2_oversize++; | ||
1432 | break; | ||
1433 | case CQ_RX_ERROP_L2_UNDERSIZE: | ||
1434 | stats->rx.errop.l2_undersize++; | ||
1435 | break; | ||
1436 | case CQ_RX_ERROP_L2_LENMISM: | ||
1437 | stats->rx.errop.l2_len_mismatch++; | ||
1438 | break; | ||
1439 | case CQ_RX_ERROP_L2_PCLP: | ||
1440 | stats->rx.errop.l2_pclp++; | ||
1441 | break; | ||
1442 | case CQ_RX_ERROP_IP_NOT: | ||
1443 | stats->rx.errop.non_ip++; | ||
1444 | break; | ||
1445 | case CQ_RX_ERROP_IP_CSUM_ERR: | ||
1446 | stats->rx.errop.ip_csum_err++; | ||
1447 | break; | ||
1448 | case CQ_RX_ERROP_IP_MAL: | ||
1449 | stats->rx.errop.ip_hdr_malformed++; | ||
1450 | break; | ||
1451 | case CQ_RX_ERROP_IP_MALD: | ||
1452 | stats->rx.errop.ip_payload_malformed++; | ||
1453 | break; | ||
1454 | case CQ_RX_ERROP_IP_HOP: | ||
1455 | stats->rx.errop.ip_hop_errs++; | ||
1456 | break; | ||
1457 | case CQ_RX_ERROP_L3_ICRC: | ||
1458 | stats->rx.errop.l3_icrc_errs++; | ||
1459 | break; | ||
1460 | case CQ_RX_ERROP_L3_PCLP: | ||
1461 | stats->rx.errop.l3_pclp++; | ||
1462 | break; | ||
1463 | case CQ_RX_ERROP_L4_MAL: | ||
1464 | stats->rx.errop.l4_malformed++; | ||
1465 | break; | ||
1466 | case CQ_RX_ERROP_L4_CHK: | ||
1467 | stats->rx.errop.l4_csum_errs++; | ||
1468 | break; | ||
1469 | case CQ_RX_ERROP_UDP_LEN: | ||
1470 | stats->rx.errop.udp_len_err++; | ||
1471 | break; | ||
1472 | case CQ_RX_ERROP_L4_PORT: | ||
1473 | stats->rx.errop.bad_l4_port++; | ||
1474 | break; | ||
1475 | case CQ_RX_ERROP_TCP_FLAG: | ||
1476 | stats->rx.errop.bad_tcp_flag++; | ||
1477 | break; | ||
1478 | case CQ_RX_ERROP_TCP_OFFSET: | ||
1479 | stats->rx.errop.tcp_offset_errs++; | ||
1480 | break; | ||
1481 | case CQ_RX_ERROP_L4_PCLP: | ||
1482 | stats->rx.errop.l4_pclp++; | ||
1483 | break; | ||
1484 | case CQ_RX_ERROP_RBDR_TRUNC: | ||
1485 | stats->rx.errop.pkt_truncated++; | ||
1486 | break; | ||
1487 | } | ||
1488 | |||
1489 | return 1; | ||
1490 | } | ||
1491 | |||
1492 | /* Check for errors in the send cmp.queue entry */ | ||
1493 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, | ||
1494 | struct cmp_queue *cq, struct cqe_send_t *cqe_tx) | ||
1495 | { | ||
1496 | struct cmp_queue_stats *stats = &cq->stats; | ||
1497 | |||
1498 | switch (cqe_tx->send_status) { | ||
1499 | case CQ_TX_ERROP_GOOD: | ||
1500 | stats->tx.good++; | ||
1501 | return 0; | ||
1502 | case CQ_TX_ERROP_DESC_FAULT: | ||
1503 | stats->tx.desc_fault++; | ||
1504 | break; | ||
1505 | case CQ_TX_ERROP_HDR_CONS_ERR: | ||
1506 | stats->tx.hdr_cons_err++; | ||
1507 | break; | ||
1508 | case CQ_TX_ERROP_SUBDC_ERR: | ||
1509 | stats->tx.subdesc_err++; | ||
1510 | break; | ||
1511 | case CQ_TX_ERROP_IMM_SIZE_OFLOW: | ||
1512 | stats->tx.imm_size_oflow++; | ||
1513 | break; | ||
1514 | case CQ_TX_ERROP_DATA_SEQUENCE_ERR: | ||
1515 | stats->tx.data_seq_err++; | ||
1516 | break; | ||
1517 | case CQ_TX_ERROP_MEM_SEQUENCE_ERR: | ||
1518 | stats->tx.mem_seq_err++; | ||
1519 | break; | ||
1520 | case CQ_TX_ERROP_LOCK_VIOL: | ||
1521 | stats->tx.lock_viol++; | ||
1522 | break; | ||
1523 | case CQ_TX_ERROP_DATA_FAULT: | ||
1524 | stats->tx.data_fault++; | ||
1525 | break; | ||
1526 | case CQ_TX_ERROP_TSTMP_CONFLICT: | ||
1527 | stats->tx.tstmp_conflict++; | ||
1528 | break; | ||
1529 | case CQ_TX_ERROP_TSTMP_TIMEOUT: | ||
1530 | stats->tx.tstmp_timeout++; | ||
1531 | break; | ||
1532 | case CQ_TX_ERROP_MEM_FAULT: | ||
1533 | stats->tx.mem_fault++; | ||
1534 | break; | ||
1535 | case CQ_TX_ERROP_CK_OVERLAP: | ||
1536 | stats->tx.csum_overlap++; | ||
1537 | break; | ||
1538 | case CQ_TX_ERROP_CK_OFLOW: | ||
1539 | stats->tx.csum_overflow++; | ||
1540 | break; | ||
1541 | } | ||
1542 | |||
1543 | return 1; | ||
1544 | } | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h new file mode 100644 index 000000000000..8341bdf755d1 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h | |||
@@ -0,0 +1,381 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Cavium, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of version 2 of the GNU General Public License | ||
6 | * as published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef NICVF_QUEUES_H | ||
10 | #define NICVF_QUEUES_H | ||
11 | |||
12 | #include <linux/netdevice.h> | ||
13 | #include "q_struct.h" | ||
14 | |||
15 | #define MAX_QUEUE_SET 128 | ||
16 | #define MAX_RCV_QUEUES_PER_QS 8 | ||
17 | #define MAX_RCV_BUF_DESC_RINGS_PER_QS 2 | ||
18 | #define MAX_SND_QUEUES_PER_QS 8 | ||
19 | #define MAX_CMP_QUEUES_PER_QS 8 | ||
20 | |||
21 | /* VF's queue interrupt ranges */ | ||
22 | #define NICVF_INTR_ID_CQ 0 | ||
23 | #define NICVF_INTR_ID_SQ 8 | ||
24 | #define NICVF_INTR_ID_RBDR 16 | ||
25 | #define NICVF_INTR_ID_MISC 18 | ||
26 | #define NICVF_INTR_ID_QS_ERR 19 | ||
27 | |||
28 | #define for_each_cq_irq(irq) \ | ||
29 | for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++) | ||
30 | #define for_each_sq_irq(irq) \ | ||
31 | for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++) | ||
32 | #define for_each_rbdr_irq(irq) \ | ||
33 | for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++) | ||
34 | |||
35 | #define RBDR_SIZE0 0ULL /* 8K entries */ | ||
36 | #define RBDR_SIZE1 1ULL /* 16K entries */ | ||
37 | #define RBDR_SIZE2 2ULL /* 32K entries */ | ||
38 | #define RBDR_SIZE3 3ULL /* 64K entries */ | ||
39 | #define RBDR_SIZE4 4ULL /* 126K entries */ | ||
40 | #define RBDR_SIZE5 5ULL /* 256K entries */ | ||
41 | #define RBDR_SIZE6 6ULL /* 512K entries */ | ||
42 | |||
43 | #define SND_QUEUE_SIZE0 0ULL /* 1K entries */ | ||
44 | #define SND_QUEUE_SIZE1 1ULL /* 2K entries */ | ||
45 | #define SND_QUEUE_SIZE2 2ULL /* 4K entries */ | ||
46 | #define SND_QUEUE_SIZE3 3ULL /* 8K entries */ | ||
47 | #define SND_QUEUE_SIZE4 4ULL /* 16K entries */ | ||
48 | #define SND_QUEUE_SIZE5 5ULL /* 32K entries */ | ||
49 | #define SND_QUEUE_SIZE6 6ULL /* 64K entries */ | ||
50 | |||
51 | #define CMP_QUEUE_SIZE0 0ULL /* 1K entries */ | ||
52 | #define CMP_QUEUE_SIZE1 1ULL /* 2K entries */ | ||
53 | #define CMP_QUEUE_SIZE2 2ULL /* 4K entries */ | ||
54 | #define CMP_QUEUE_SIZE3 3ULL /* 8K entries */ | ||
55 | #define CMP_QUEUE_SIZE4 4ULL /* 16K entries */ | ||
56 | #define CMP_QUEUE_SIZE5 5ULL /* 32K entries */ | ||
57 | #define CMP_QUEUE_SIZE6 6ULL /* 64K entries */ | ||
58 | |||
59 | /* Default queue count per QS, its lengths and threshold values */ | ||
60 | #define RBDR_CNT 1 | ||
61 | #define RCV_QUEUE_CNT 8 | ||
62 | #define SND_QUEUE_CNT 8 | ||
63 | #define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ | ||
64 | |||
65 | #define SND_QSIZE SND_QUEUE_SIZE4 | ||
66 | #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) | ||
67 | #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) | ||
68 | #define SND_QUEUE_THRESH 2ULL | ||
69 | #define MIN_SQ_DESC_PER_PKT_XMIT 2 | ||
70 | /* Since timestamp not enabled, otherwise 2 */ | ||
71 | #define MAX_CQE_PER_PKT_XMIT 1 | ||
72 | |||
73 | #define CMP_QSIZE CMP_QUEUE_SIZE4 | ||
74 | #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) | ||
75 | #define CMP_QUEUE_CQE_THRESH 0 | ||
76 | #define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ | ||
77 | |||
78 | #define RBDR_SIZE RBDR_SIZE0 | ||
79 | #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) | ||
80 | #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) | ||
81 | #define RBDR_THRESH (RCV_BUF_COUNT / 2) | ||
82 | #define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ | ||
83 | #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ | ||
84 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ | ||
85 | (NICVF_RCV_BUF_ALIGN_BYTES * 2)) | ||
86 | #define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES | ||
87 | |||
88 | #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ | ||
89 | MAX_CQE_PER_PKT_XMIT) | ||
90 | #define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256) | ||
91 | |||
92 | /* Descriptor size in bytes */ | ||
93 | #define SND_QUEUE_DESC_SIZE 16 | ||
94 | #define CMP_QUEUE_DESC_SIZE 512 | ||
95 | |||
96 | /* Buffer / descriptor alignments */ | ||
97 | #define NICVF_RCV_BUF_ALIGN 7 | ||
98 | #define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN) | ||
99 | #define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */ | ||
100 | #define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */ | ||
101 | |||
102 | #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES) | ||
103 | #define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\ | ||
104 | (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES) | ||
105 | #define NICVF_RCV_BUF_ALIGN_LEN(X)\ | ||
106 | (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X) | ||
107 | |||
108 | /* Queue enable/disable */ | ||
109 | #define NICVF_SQ_EN BIT_ULL(19) | ||
110 | |||
111 | /* Queue reset */ | ||
112 | #define NICVF_CQ_RESET BIT_ULL(41) | ||
113 | #define NICVF_SQ_RESET BIT_ULL(17) | ||
114 | #define NICVF_RBDR_RESET BIT_ULL(43) | ||
115 | |||
116 | enum CQ_RX_ERRLVL_E { | ||
117 | CQ_ERRLVL_MAC, | ||
118 | CQ_ERRLVL_L2, | ||
119 | CQ_ERRLVL_L3, | ||
120 | CQ_ERRLVL_L4, | ||
121 | }; | ||
122 | |||
123 | enum CQ_RX_ERROP_E { | ||
124 | CQ_RX_ERROP_RE_NONE = 0x0, | ||
125 | CQ_RX_ERROP_RE_PARTIAL = 0x1, | ||
126 | CQ_RX_ERROP_RE_JABBER = 0x2, | ||
127 | CQ_RX_ERROP_RE_FCS = 0x7, | ||
128 | CQ_RX_ERROP_RE_TERMINATE = 0x9, | ||
129 | CQ_RX_ERROP_RE_RX_CTL = 0xb, | ||
130 | CQ_RX_ERROP_PREL2_ERR = 0x1f, | ||
131 | CQ_RX_ERROP_L2_FRAGMENT = 0x20, | ||
132 | CQ_RX_ERROP_L2_OVERRUN = 0x21, | ||
133 | CQ_RX_ERROP_L2_PFCS = 0x22, | ||
134 | CQ_RX_ERROP_L2_PUNY = 0x23, | ||
135 | CQ_RX_ERROP_L2_MAL = 0x24, | ||
136 | CQ_RX_ERROP_L2_OVERSIZE = 0x25, | ||
137 | CQ_RX_ERROP_L2_UNDERSIZE = 0x26, | ||
138 | CQ_RX_ERROP_L2_LENMISM = 0x27, | ||
139 | CQ_RX_ERROP_L2_PCLP = 0x28, | ||
140 | CQ_RX_ERROP_IP_NOT = 0x41, | ||
141 | CQ_RX_ERROP_IP_CSUM_ERR = 0x42, | ||
142 | CQ_RX_ERROP_IP_MAL = 0x43, | ||
143 | CQ_RX_ERROP_IP_MALD = 0x44, | ||
144 | CQ_RX_ERROP_IP_HOP = 0x45, | ||
145 | CQ_RX_ERROP_L3_ICRC = 0x46, | ||
146 | CQ_RX_ERROP_L3_PCLP = 0x47, | ||
147 | CQ_RX_ERROP_L4_MAL = 0x61, | ||
148 | CQ_RX_ERROP_L4_CHK = 0x62, | ||
149 | CQ_RX_ERROP_UDP_LEN = 0x63, | ||
150 | CQ_RX_ERROP_L4_PORT = 0x64, | ||
151 | CQ_RX_ERROP_TCP_FLAG = 0x65, | ||
152 | CQ_RX_ERROP_TCP_OFFSET = 0x66, | ||
153 | CQ_RX_ERROP_L4_PCLP = 0x67, | ||
154 | CQ_RX_ERROP_RBDR_TRUNC = 0x70, | ||
155 | }; | ||
156 | |||
157 | enum CQ_TX_ERROP_E { | ||
158 | CQ_TX_ERROP_GOOD = 0x0, | ||
159 | CQ_TX_ERROP_DESC_FAULT = 0x10, | ||
160 | CQ_TX_ERROP_HDR_CONS_ERR = 0x11, | ||
161 | CQ_TX_ERROP_SUBDC_ERR = 0x12, | ||
162 | CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, | ||
163 | CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, | ||
164 | CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, | ||
165 | CQ_TX_ERROP_LOCK_VIOL = 0x83, | ||
166 | CQ_TX_ERROP_DATA_FAULT = 0x84, | ||
167 | CQ_TX_ERROP_TSTMP_CONFLICT = 0x85, | ||
168 | CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86, | ||
169 | CQ_TX_ERROP_MEM_FAULT = 0x87, | ||
170 | CQ_TX_ERROP_CK_OVERLAP = 0x88, | ||
171 | CQ_TX_ERROP_CK_OFLOW = 0x89, | ||
172 | CQ_TX_ERROP_ENUM_LAST = 0x8a, | ||
173 | }; | ||
174 | |||
175 | struct cmp_queue_stats { | ||
176 | struct rx_stats { | ||
177 | struct { | ||
178 | u64 mac_errs; | ||
179 | u64 l2_errs; | ||
180 | u64 l3_errs; | ||
181 | u64 l4_errs; | ||
182 | } errlvl; | ||
183 | struct { | ||
184 | u64 good; | ||
185 | u64 partial_pkts; | ||
186 | u64 jabber_errs; | ||
187 | u64 fcs_errs; | ||
188 | u64 terminate_errs; | ||
189 | u64 bgx_rx_errs; | ||
190 | u64 prel2_errs; | ||
191 | u64 l2_frags; | ||
192 | u64 l2_overruns; | ||
193 | u64 l2_pfcs; | ||
194 | u64 l2_puny; | ||
195 | u64 l2_hdr_malformed; | ||
196 | u64 l2_oversize; | ||
197 | u64 l2_undersize; | ||
198 | u64 l2_len_mismatch; | ||
199 | u64 l2_pclp; | ||
200 | u64 non_ip; | ||
201 | u64 ip_csum_err; | ||
202 | u64 ip_hdr_malformed; | ||
203 | u64 ip_payload_malformed; | ||
204 | u64 ip_hop_errs; | ||
205 | u64 l3_icrc_errs; | ||
206 | u64 l3_pclp; | ||
207 | u64 l4_malformed; | ||
208 | u64 l4_csum_errs; | ||
209 | u64 udp_len_err; | ||
210 | u64 bad_l4_port; | ||
211 | u64 bad_tcp_flag; | ||
212 | u64 tcp_offset_errs; | ||
213 | u64 l4_pclp; | ||
214 | u64 pkt_truncated; | ||
215 | } errop; | ||
216 | } rx; | ||
217 | struct tx_stats { | ||
218 | u64 good; | ||
219 | u64 desc_fault; | ||
220 | u64 hdr_cons_err; | ||
221 | u64 subdesc_err; | ||
222 | u64 imm_size_oflow; | ||
223 | u64 data_seq_err; | ||
224 | u64 mem_seq_err; | ||
225 | u64 lock_viol; | ||
226 | u64 data_fault; | ||
227 | u64 tstmp_conflict; | ||
228 | u64 tstmp_timeout; | ||
229 | u64 mem_fault; | ||
230 | u64 csum_overlap; | ||
231 | u64 csum_overflow; | ||
232 | } tx; | ||
233 | } ____cacheline_aligned_in_smp; | ||
234 | |||
235 | enum RQ_SQ_STATS { | ||
236 | RQ_SQ_STATS_OCTS, | ||
237 | RQ_SQ_STATS_PKTS, | ||
238 | }; | ||
239 | |||
240 | struct rx_tx_queue_stats { | ||
241 | u64 bytes; | ||
242 | u64 pkts; | ||
243 | } ____cacheline_aligned_in_smp; | ||
244 | |||
245 | struct q_desc_mem { | ||
246 | dma_addr_t dma; | ||
247 | u64 size; | ||
248 | u16 q_len; | ||
249 | dma_addr_t phys_base; | ||
250 | void *base; | ||
251 | void *unalign_base; | ||
252 | }; | ||
253 | |||
254 | struct rbdr { | ||
255 | bool enable; | ||
256 | u32 dma_size; | ||
257 | u32 frag_len; | ||
258 | u32 thresh; /* Threshold level for interrupt */ | ||
259 | void *desc; | ||
260 | u32 head; | ||
261 | u32 tail; | ||
262 | struct q_desc_mem dmem; | ||
263 | } ____cacheline_aligned_in_smp; | ||
264 | |||
265 | struct rcv_queue { | ||
266 | bool enable; | ||
267 | struct rbdr *rbdr_start; | ||
268 | struct rbdr *rbdr_cont; | ||
269 | bool en_tcp_reassembly; | ||
270 | u8 cq_qs; /* CQ's QS to which this RQ is assigned */ | ||
271 | u8 cq_idx; /* CQ index (0 to 7) in the QS */ | ||
272 | u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */ | ||
273 | u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */ | ||
274 | u8 start_rbdr_qs; /* First buffer ptrs - QS num */ | ||
275 | u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */ | ||
276 | u8 caching; | ||
277 | struct rx_tx_queue_stats stats; | ||
278 | } ____cacheline_aligned_in_smp; | ||
279 | |||
280 | struct cmp_queue { | ||
281 | bool enable; | ||
282 | u16 thresh; | ||
283 | spinlock_t lock; /* lock to serialize processing CQEs */ | ||
284 | void *desc; | ||
285 | struct q_desc_mem dmem; | ||
286 | struct cmp_queue_stats stats; | ||
287 | } ____cacheline_aligned_in_smp; | ||
288 | |||
289 | struct snd_queue { | ||
290 | bool enable; | ||
291 | u8 cq_qs; /* CQ's QS to which this SQ is pointing */ | ||
292 | u8 cq_idx; /* CQ index (0 to 7) in the above QS */ | ||
293 | u16 thresh; | ||
294 | atomic_t free_cnt; | ||
295 | u32 head; | ||
296 | u32 tail; | ||
297 | u64 *skbuff; | ||
298 | void *desc; | ||
299 | |||
300 | #define TSO_HEADER_SIZE 128 | ||
301 | /* For TSO segment's header */ | ||
302 | char *tso_hdrs; | ||
303 | dma_addr_t tso_hdrs_phys; | ||
304 | |||
305 | cpumask_t affinity_mask; | ||
306 | struct q_desc_mem dmem; | ||
307 | struct rx_tx_queue_stats stats; | ||
308 | } ____cacheline_aligned_in_smp; | ||
309 | |||
310 | struct queue_set { | ||
311 | bool enable; | ||
312 | bool be_en; | ||
313 | u8 vnic_id; | ||
314 | u8 rq_cnt; | ||
315 | u8 cq_cnt; | ||
316 | u64 cq_len; | ||
317 | u8 sq_cnt; | ||
318 | u64 sq_len; | ||
319 | u8 rbdr_cnt; | ||
320 | u64 rbdr_len; | ||
321 | struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS]; | ||
322 | struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS]; | ||
323 | struct snd_queue sq[MAX_SND_QUEUES_PER_QS]; | ||
324 | struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS]; | ||
325 | } ____cacheline_aligned_in_smp; | ||
326 | |||
327 | #define GET_RBDR_DESC(RING, idx)\ | ||
328 | (&(((struct rbdr_entry_t *)((RING)->desc))[idx])) | ||
329 | #define GET_SQ_DESC(RING, idx)\ | ||
330 | (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx])) | ||
331 | #define GET_CQ_DESC(RING, idx)\ | ||
332 | (&(((union cq_desc_t *)((RING)->desc))[idx])) | ||
333 | |||
334 | /* CQ status bits */ | ||
335 | #define CQ_WR_FULL BIT(26) | ||
336 | #define CQ_WR_DISABLE BIT(25) | ||
337 | #define CQ_WR_FAULT BIT(24) | ||
338 | #define CQ_CQE_COUNT (0xFFFF << 0) | ||
339 | |||
340 | #define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) | ||
341 | |||
342 | int nicvf_set_qset_resources(struct nicvf *nic); | ||
343 | int nicvf_config_data_transfer(struct nicvf *nic, bool enable); | ||
344 | void nicvf_qset_config(struct nicvf *nic, bool enable); | ||
345 | void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, | ||
346 | int qidx, bool enable); | ||
347 | |||
348 | void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx); | ||
349 | void nicvf_sq_disable(struct nicvf *nic, int qidx); | ||
350 | void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt); | ||
351 | void nicvf_sq_free_used_descs(struct net_device *netdev, | ||
352 | struct snd_queue *sq, int qidx); | ||
353 | int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb); | ||
354 | |||
355 | struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx); | ||
356 | void nicvf_rbdr_task(unsigned long data); | ||
357 | void nicvf_rbdr_work(struct work_struct *work); | ||
358 | |||
359 | void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx); | ||
360 | void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx); | ||
361 | void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx); | ||
362 | int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx); | ||
363 | |||
364 | /* Register access APIs */ | ||
365 | void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val); | ||
366 | u64 nicvf_reg_read(struct nicvf *nic, u64 offset); | ||
367 | void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val); | ||
368 | u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset); | ||
369 | void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, | ||
370 | u64 qidx, u64 val); | ||
371 | u64 nicvf_queue_reg_read(struct nicvf *nic, | ||
372 | u64 offset, u64 qidx); | ||
373 | |||
374 | /* Stats */ | ||
375 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); | ||
376 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); | ||
377 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, | ||
378 | struct cmp_queue *cq, struct cqe_rx_t *cqe_rx); | ||
379 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, | ||
380 | struct cmp_queue *cq, struct cqe_send_t *cqe_tx); | ||
381 | #endif /* NICVF_QUEUES_H */ | ||
diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h new file mode 100644 index 000000000000..3c1de97b1add --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/q_struct.h | |||
@@ -0,0 +1,701 @@ | |||
1 | /* | ||
2 | * This file contains HW queue descriptor formats, config register | ||
3 | * structures etc | ||
4 | * | ||
5 | * Copyright (C) 2015 Cavium, Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of version 2 of the GNU General Public License | ||
9 | * as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef Q_STRUCT_H | ||
13 | #define Q_STRUCT_H | ||
14 | |||
15 | /* Load transaction types for reading segment bytes specified by | ||
16 | * NIC_SEND_GATHER_S[LD_TYPE]. | ||
17 | */ | ||
18 | enum nic_send_ld_type_e { | ||
19 | NIC_SEND_LD_TYPE_E_LDD = 0x0, | ||
20 | NIC_SEND_LD_TYPE_E_LDT = 0x1, | ||
21 | NIC_SEND_LD_TYPE_E_LDWB = 0x2, | ||
22 | NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3, | ||
23 | }; | ||
24 | |||
25 | enum ether_type_algorithm { | ||
26 | ETYPE_ALG_NONE = 0x0, | ||
27 | ETYPE_ALG_SKIP = 0x1, | ||
28 | ETYPE_ALG_ENDPARSE = 0x2, | ||
29 | ETYPE_ALG_VLAN = 0x3, | ||
30 | ETYPE_ALG_VLAN_STRIP = 0x4, | ||
31 | }; | ||
32 | |||
33 | enum layer3_type { | ||
34 | L3TYPE_NONE = 0x00, | ||
35 | L3TYPE_GRH = 0x01, | ||
36 | L3TYPE_IPV4 = 0x04, | ||
37 | L3TYPE_IPV4_OPTIONS = 0x05, | ||
38 | L3TYPE_IPV6 = 0x06, | ||
39 | L3TYPE_IPV6_OPTIONS = 0x07, | ||
40 | L3TYPE_ET_STOP = 0x0D, | ||
41 | L3TYPE_OTHER = 0x0E, | ||
42 | }; | ||
43 | |||
44 | enum layer4_type { | ||
45 | L4TYPE_NONE = 0x00, | ||
46 | L4TYPE_IPSEC_ESP = 0x01, | ||
47 | L4TYPE_IPFRAG = 0x02, | ||
48 | L4TYPE_IPCOMP = 0x03, | ||
49 | L4TYPE_TCP = 0x04, | ||
50 | L4TYPE_UDP = 0x05, | ||
51 | L4TYPE_SCTP = 0x06, | ||
52 | L4TYPE_GRE = 0x07, | ||
53 | L4TYPE_ROCE_BTH = 0x08, | ||
54 | L4TYPE_OTHER = 0x0E, | ||
55 | }; | ||
56 | |||
57 | /* CPI and RSSI configuration */ | ||
58 | enum cpi_algorithm_type { | ||
59 | CPI_ALG_NONE = 0x0, | ||
60 | CPI_ALG_VLAN = 0x1, | ||
61 | CPI_ALG_VLAN16 = 0x2, | ||
62 | CPI_ALG_DIFF = 0x3, | ||
63 | }; | ||
64 | |||
65 | enum rss_algorithm_type { | ||
66 | RSS_ALG_NONE = 0x00, | ||
67 | RSS_ALG_PORT = 0x01, | ||
68 | RSS_ALG_IP = 0x02, | ||
69 | RSS_ALG_TCP_IP = 0x03, | ||
70 | RSS_ALG_UDP_IP = 0x04, | ||
71 | RSS_ALG_SCTP_IP = 0x05, | ||
72 | RSS_ALG_GRE_IP = 0x06, | ||
73 | RSS_ALG_ROCE = 0x07, | ||
74 | }; | ||
75 | |||
76 | enum rss_hash_cfg { | ||
77 | RSS_HASH_L2ETC = 0x00, | ||
78 | RSS_HASH_IP = 0x01, | ||
79 | RSS_HASH_TCP = 0x02, | ||
80 | RSS_HASH_TCP_SYN_DIS = 0x03, | ||
81 | RSS_HASH_UDP = 0x04, | ||
82 | RSS_HASH_L4ETC = 0x05, | ||
83 | RSS_HASH_ROCE = 0x06, | ||
84 | RSS_L3_BIDI = 0x07, | ||
85 | RSS_L4_BIDI = 0x08, | ||
86 | }; | ||
87 | |||
88 | /* Completion queue entry types */ | ||
89 | enum cqe_type { | ||
90 | CQE_TYPE_INVALID = 0x0, | ||
91 | CQE_TYPE_RX = 0x2, | ||
92 | CQE_TYPE_RX_SPLIT = 0x3, | ||
93 | CQE_TYPE_RX_TCP = 0x4, | ||
94 | CQE_TYPE_SEND = 0x8, | ||
95 | CQE_TYPE_SEND_PTP = 0x9, | ||
96 | }; | ||
97 | |||
98 | enum cqe_rx_tcp_status { | ||
99 | CQE_RX_STATUS_VALID_TCP_CNXT = 0x00, | ||
100 | CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F, | ||
101 | }; | ||
102 | |||
103 | enum cqe_send_status { | ||
104 | CQE_SEND_STATUS_GOOD = 0x00, | ||
105 | CQE_SEND_STATUS_DESC_FAULT = 0x01, | ||
106 | CQE_SEND_STATUS_HDR_CONS_ERR = 0x11, | ||
107 | CQE_SEND_STATUS_SUBDESC_ERR = 0x12, | ||
108 | CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80, | ||
109 | CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81, | ||
110 | CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82, | ||
111 | CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83, | ||
112 | CQE_SEND_STATUS_LOCK_VIOL = 0x84, | ||
113 | CQE_SEND_STATUS_LOCK_UFLOW = 0x85, | ||
114 | CQE_SEND_STATUS_DATA_FAULT = 0x86, | ||
115 | CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87, | ||
116 | CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88, | ||
117 | CQE_SEND_STATUS_MEM_FAULT = 0x89, | ||
118 | CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A, | ||
119 | CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B, | ||
120 | }; | ||
121 | |||
122 | enum cqe_rx_tcp_end_reason { | ||
123 | CQE_RX_TCP_END_FIN_FLAG_DET = 0, | ||
124 | CQE_RX_TCP_END_INVALID_FLAG = 1, | ||
125 | CQE_RX_TCP_END_TIMEOUT = 2, | ||
126 | CQE_RX_TCP_END_OUT_OF_SEQ = 3, | ||
127 | CQE_RX_TCP_END_PKT_ERR = 4, | ||
128 | CQE_RX_TCP_END_QS_DISABLED = 0x0F, | ||
129 | }; | ||
130 | |||
131 | /* Packet protocol level error enumeration */ | ||
132 | enum cqe_rx_err_level { | ||
133 | CQE_RX_ERRLVL_RE = 0x0, | ||
134 | CQE_RX_ERRLVL_L2 = 0x1, | ||
135 | CQE_RX_ERRLVL_L3 = 0x2, | ||
136 | CQE_RX_ERRLVL_L4 = 0x3, | ||
137 | }; | ||
138 | |||
139 | /* Packet protocol level error type enumeration */ | ||
140 | enum cqe_rx_err_opcode { | ||
141 | CQE_RX_ERR_RE_NONE = 0x0, | ||
142 | CQE_RX_ERR_RE_PARTIAL = 0x1, | ||
143 | CQE_RX_ERR_RE_JABBER = 0x2, | ||
144 | CQE_RX_ERR_RE_FCS = 0x7, | ||
145 | CQE_RX_ERR_RE_TERMINATE = 0x9, | ||
146 | CQE_RX_ERR_RE_RX_CTL = 0xb, | ||
147 | CQE_RX_ERR_PREL2_ERR = 0x1f, | ||
148 | CQE_RX_ERR_L2_FRAGMENT = 0x20, | ||
149 | CQE_RX_ERR_L2_OVERRUN = 0x21, | ||
150 | CQE_RX_ERR_L2_PFCS = 0x22, | ||
151 | CQE_RX_ERR_L2_PUNY = 0x23, | ||
152 | CQE_RX_ERR_L2_MAL = 0x24, | ||
153 | CQE_RX_ERR_L2_OVERSIZE = 0x25, | ||
154 | CQE_RX_ERR_L2_UNDERSIZE = 0x26, | ||
155 | CQE_RX_ERR_L2_LENMISM = 0x27, | ||
156 | CQE_RX_ERR_L2_PCLP = 0x28, | ||
157 | CQE_RX_ERR_IP_NOT = 0x41, | ||
158 | CQE_RX_ERR_IP_CHK = 0x42, | ||
159 | CQE_RX_ERR_IP_MAL = 0x43, | ||
160 | CQE_RX_ERR_IP_MALD = 0x44, | ||
161 | CQE_RX_ERR_IP_HOP = 0x45, | ||
162 | CQE_RX_ERR_L3_ICRC = 0x46, | ||
163 | CQE_RX_ERR_L3_PCLP = 0x47, | ||
164 | CQE_RX_ERR_L4_MAL = 0x61, | ||
165 | CQE_RX_ERR_L4_CHK = 0x62, | ||
166 | CQE_RX_ERR_UDP_LEN = 0x63, | ||
167 | CQE_RX_ERR_L4_PORT = 0x64, | ||
168 | CQE_RX_ERR_TCP_FLAG = 0x65, | ||
169 | CQE_RX_ERR_TCP_OFFSET = 0x66, | ||
170 | CQE_RX_ERR_L4_PCLP = 0x67, | ||
171 | CQE_RX_ERR_RBDR_TRUNC = 0x70, | ||
172 | }; | ||
173 | |||
174 | struct cqe_rx_t { | ||
175 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
176 | u64 cqe_type:4; /* W0 */ | ||
177 | u64 stdn_fault:1; | ||
178 | u64 rsvd0:1; | ||
179 | u64 rq_qs:7; | ||
180 | u64 rq_idx:3; | ||
181 | u64 rsvd1:12; | ||
182 | u64 rss_alg:4; | ||
183 | u64 rsvd2:4; | ||
184 | u64 rb_cnt:4; | ||
185 | u64 vlan_found:1; | ||
186 | u64 vlan_stripped:1; | ||
187 | u64 vlan2_found:1; | ||
188 | u64 vlan2_stripped:1; | ||
189 | u64 l4_type:4; | ||
190 | u64 l3_type:4; | ||
191 | u64 l2_present:1; | ||
192 | u64 err_level:3; | ||
193 | u64 err_opcode:8; | ||
194 | |||
195 | u64 pkt_len:16; /* W1 */ | ||
196 | u64 l2_ptr:8; | ||
197 | u64 l3_ptr:8; | ||
198 | u64 l4_ptr:8; | ||
199 | u64 cq_pkt_len:8; | ||
200 | u64 align_pad:3; | ||
201 | u64 rsvd3:1; | ||
202 | u64 chan:12; | ||
203 | |||
204 | u64 rss_tag:32; /* W2 */ | ||
205 | u64 vlan_tci:16; | ||
206 | u64 vlan_ptr:8; | ||
207 | u64 vlan2_ptr:8; | ||
208 | |||
209 | u64 rb3_sz:16; /* W3 */ | ||
210 | u64 rb2_sz:16; | ||
211 | u64 rb1_sz:16; | ||
212 | u64 rb0_sz:16; | ||
213 | |||
214 | u64 rb7_sz:16; /* W4 */ | ||
215 | u64 rb6_sz:16; | ||
216 | u64 rb5_sz:16; | ||
217 | u64 rb4_sz:16; | ||
218 | |||
219 | u64 rb11_sz:16; /* W5 */ | ||
220 | u64 rb10_sz:16; | ||
221 | u64 rb9_sz:16; | ||
222 | u64 rb8_sz:16; | ||
223 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
224 | u64 err_opcode:8; | ||
225 | u64 err_level:3; | ||
226 | u64 l2_present:1; | ||
227 | u64 l3_type:4; | ||
228 | u64 l4_type:4; | ||
229 | u64 vlan2_stripped:1; | ||
230 | u64 vlan2_found:1; | ||
231 | u64 vlan_stripped:1; | ||
232 | u64 vlan_found:1; | ||
233 | u64 rb_cnt:4; | ||
234 | u64 rsvd2:4; | ||
235 | u64 rss_alg:4; | ||
236 | u64 rsvd1:12; | ||
237 | u64 rq_idx:3; | ||
238 | u64 rq_qs:7; | ||
239 | u64 rsvd0:1; | ||
240 | u64 stdn_fault:1; | ||
241 | u64 cqe_type:4; /* W0 */ | ||
242 | u64 chan:12; | ||
243 | u64 rsvd3:1; | ||
244 | u64 align_pad:3; | ||
245 | u64 cq_pkt_len:8; | ||
246 | u64 l4_ptr:8; | ||
247 | u64 l3_ptr:8; | ||
248 | u64 l2_ptr:8; | ||
249 | u64 pkt_len:16; /* W1 */ | ||
250 | u64 vlan2_ptr:8; | ||
251 | u64 vlan_ptr:8; | ||
252 | u64 vlan_tci:16; | ||
253 | u64 rss_tag:32; /* W2 */ | ||
254 | u64 rb0_sz:16; | ||
255 | u64 rb1_sz:16; | ||
256 | u64 rb2_sz:16; | ||
257 | u64 rb3_sz:16; /* W3 */ | ||
258 | u64 rb4_sz:16; | ||
259 | u64 rb5_sz:16; | ||
260 | u64 rb6_sz:16; | ||
261 | u64 rb7_sz:16; /* W4 */ | ||
262 | u64 rb8_sz:16; | ||
263 | u64 rb9_sz:16; | ||
264 | u64 rb10_sz:16; | ||
265 | u64 rb11_sz:16; /* W5 */ | ||
266 | #endif | ||
267 | u64 rb0_ptr:64; | ||
268 | u64 rb1_ptr:64; | ||
269 | u64 rb2_ptr:64; | ||
270 | u64 rb3_ptr:64; | ||
271 | u64 rb4_ptr:64; | ||
272 | u64 rb5_ptr:64; | ||
273 | u64 rb6_ptr:64; | ||
274 | u64 rb7_ptr:64; | ||
275 | u64 rb8_ptr:64; | ||
276 | u64 rb9_ptr:64; | ||
277 | u64 rb10_ptr:64; | ||
278 | u64 rb11_ptr:64; | ||
279 | }; | ||
280 | |||
281 | struct cqe_rx_tcp_err_t { | ||
282 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
283 | u64 cqe_type:4; /* W0 */ | ||
284 | u64 rsvd0:60; | ||
285 | |||
286 | u64 rsvd1:4; /* W1 */ | ||
287 | u64 partial_first:1; | ||
288 | u64 rsvd2:27; | ||
289 | u64 rbdr_bytes:8; | ||
290 | u64 rsvd3:24; | ||
291 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
292 | u64 rsvd0:60; | ||
293 | u64 cqe_type:4; | ||
294 | |||
295 | u64 rsvd3:24; | ||
296 | u64 rbdr_bytes:8; | ||
297 | u64 rsvd2:27; | ||
298 | u64 partial_first:1; | ||
299 | u64 rsvd1:4; | ||
300 | #endif | ||
301 | }; | ||
302 | |||
303 | struct cqe_rx_tcp_t { | ||
304 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
305 | u64 cqe_type:4; /* W0 */ | ||
306 | u64 rsvd0:52; | ||
307 | u64 cq_tcp_status:8; | ||
308 | |||
309 | u64 rsvd1:32; /* W1 */ | ||
310 | u64 tcp_cntx_bytes:8; | ||
311 | u64 rsvd2:8; | ||
312 | u64 tcp_err_bytes:16; | ||
313 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
314 | u64 cq_tcp_status:8; | ||
315 | u64 rsvd0:52; | ||
316 | u64 cqe_type:4; /* W0 */ | ||
317 | |||
318 | u64 tcp_err_bytes:16; | ||
319 | u64 rsvd2:8; | ||
320 | u64 tcp_cntx_bytes:8; | ||
321 | u64 rsvd1:32; /* W1 */ | ||
322 | #endif | ||
323 | }; | ||
324 | |||
325 | struct cqe_send_t { | ||
326 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
327 | u64 cqe_type:4; /* W0 */ | ||
328 | u64 rsvd0:4; | ||
329 | u64 sqe_ptr:16; | ||
330 | u64 rsvd1:4; | ||
331 | u64 rsvd2:10; | ||
332 | u64 sq_qs:7; | ||
333 | u64 sq_idx:3; | ||
334 | u64 rsvd3:8; | ||
335 | u64 send_status:8; | ||
336 | |||
337 | u64 ptp_timestamp:64; /* W1 */ | ||
338 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
339 | u64 send_status:8; | ||
340 | u64 rsvd3:8; | ||
341 | u64 sq_idx:3; | ||
342 | u64 sq_qs:7; | ||
343 | u64 rsvd2:10; | ||
344 | u64 rsvd1:4; | ||
345 | u64 sqe_ptr:16; | ||
346 | u64 rsvd0:4; | ||
347 | u64 cqe_type:4; /* W0 */ | ||
348 | |||
349 | u64 ptp_timestamp:64; /* W1 */ | ||
350 | #endif | ||
351 | }; | ||
352 | |||
353 | union cq_desc_t { | ||
354 | u64 u[64]; | ||
355 | struct cqe_send_t snd_hdr; | ||
356 | struct cqe_rx_t rx_hdr; | ||
357 | struct cqe_rx_tcp_t rx_tcp_hdr; | ||
358 | struct cqe_rx_tcp_err_t rx_tcp_err_hdr; | ||
359 | }; | ||
360 | |||
361 | struct rbdr_entry_t { | ||
362 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
363 | u64 rsvd0:15; | ||
364 | u64 buf_addr:42; | ||
365 | u64 cache_align:7; | ||
366 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
367 | u64 cache_align:7; | ||
368 | u64 buf_addr:42; | ||
369 | u64 rsvd0:15; | ||
370 | #endif | ||
371 | }; | ||
372 | |||
373 | /* TCP reassembly context */ | ||
374 | struct rbe_tcp_cnxt_t { | ||
375 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
376 | u64 tcp_pkt_cnt:12; | ||
377 | u64 rsvd1:4; | ||
378 | u64 align_hdr_bytes:4; | ||
379 | u64 align_ptr_bytes:4; | ||
380 | u64 ptr_bytes:16; | ||
381 | u64 rsvd2:24; | ||
382 | u64 cqe_type:4; | ||
383 | u64 rsvd0:54; | ||
384 | u64 tcp_end_reason:2; | ||
385 | u64 tcp_status:4; | ||
386 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
387 | u64 tcp_status:4; | ||
388 | u64 tcp_end_reason:2; | ||
389 | u64 rsvd0:54; | ||
390 | u64 cqe_type:4; | ||
391 | u64 rsvd2:24; | ||
392 | u64 ptr_bytes:16; | ||
393 | u64 align_ptr_bytes:4; | ||
394 | u64 align_hdr_bytes:4; | ||
395 | u64 rsvd1:4; | ||
396 | u64 tcp_pkt_cnt:12; | ||
397 | #endif | ||
398 | }; | ||
399 | |||
400 | /* Always Big endian */ | ||
401 | struct rx_hdr_t { | ||
402 | u64 opaque:32; | ||
403 | u64 rss_flow:8; | ||
404 | u64 skip_length:6; | ||
405 | u64 disable_rss:1; | ||
406 | u64 disable_tcp_reassembly:1; | ||
407 | u64 nodrop:1; | ||
408 | u64 dest_alg:2; | ||
409 | u64 rsvd0:2; | ||
410 | u64 dest_rq:11; | ||
411 | }; | ||
412 | |||
413 | enum send_l4_csum_type { | ||
414 | SEND_L4_CSUM_DISABLE = 0x00, | ||
415 | SEND_L4_CSUM_UDP = 0x01, | ||
416 | SEND_L4_CSUM_TCP = 0x02, | ||
417 | SEND_L4_CSUM_SCTP = 0x03, | ||
418 | }; | ||
419 | |||
420 | enum send_crc_alg { | ||
421 | SEND_CRCALG_CRC32 = 0x00, | ||
422 | SEND_CRCALG_CRC32C = 0x01, | ||
423 | SEND_CRCALG_ICRC = 0x02, | ||
424 | }; | ||
425 | |||
426 | enum send_load_type { | ||
427 | SEND_LD_TYPE_LDD = 0x00, | ||
428 | SEND_LD_TYPE_LDT = 0x01, | ||
429 | SEND_LD_TYPE_LDWB = 0x02, | ||
430 | }; | ||
431 | |||
432 | enum send_mem_alg_type { | ||
433 | SEND_MEMALG_SET = 0x00, | ||
434 | SEND_MEMALG_ADD = 0x08, | ||
435 | SEND_MEMALG_SUB = 0x09, | ||
436 | SEND_MEMALG_ADDLEN = 0x0A, | ||
437 | SEND_MEMALG_SUBLEN = 0x0B, | ||
438 | }; | ||
439 | |||
440 | enum send_mem_dsz_type { | ||
441 | SEND_MEMDSZ_B64 = 0x00, | ||
442 | SEND_MEMDSZ_B32 = 0x01, | ||
443 | SEND_MEMDSZ_B8 = 0x03, | ||
444 | }; | ||
445 | |||
446 | enum sq_subdesc_type { | ||
447 | SQ_DESC_TYPE_INVALID = 0x00, | ||
448 | SQ_DESC_TYPE_HEADER = 0x01, | ||
449 | SQ_DESC_TYPE_CRC = 0x02, | ||
450 | SQ_DESC_TYPE_IMMEDIATE = 0x03, | ||
451 | SQ_DESC_TYPE_GATHER = 0x04, | ||
452 | SQ_DESC_TYPE_MEMORY = 0x05, | ||
453 | }; | ||
454 | |||
455 | struct sq_crc_subdesc { | ||
456 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
457 | u64 rsvd1:32; | ||
458 | u64 crc_ival:32; | ||
459 | u64 subdesc_type:4; | ||
460 | u64 crc_alg:2; | ||
461 | u64 rsvd0:10; | ||
462 | u64 crc_insert_pos:16; | ||
463 | u64 hdr_start:16; | ||
464 | u64 crc_len:16; | ||
465 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
466 | u64 crc_len:16; | ||
467 | u64 hdr_start:16; | ||
468 | u64 crc_insert_pos:16; | ||
469 | u64 rsvd0:10; | ||
470 | u64 crc_alg:2; | ||
471 | u64 subdesc_type:4; | ||
472 | u64 crc_ival:32; | ||
473 | u64 rsvd1:32; | ||
474 | #endif | ||
475 | }; | ||
476 | |||
477 | struct sq_gather_subdesc { | ||
478 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
479 | u64 subdesc_type:4; /* W0 */ | ||
480 | u64 ld_type:2; | ||
481 | u64 rsvd0:42; | ||
482 | u64 size:16; | ||
483 | |||
484 | u64 rsvd1:15; /* W1 */ | ||
485 | u64 addr:49; | ||
486 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
487 | u64 size:16; | ||
488 | u64 rsvd0:42; | ||
489 | u64 ld_type:2; | ||
490 | u64 subdesc_type:4; /* W0 */ | ||
491 | |||
492 | u64 addr:49; | ||
493 | u64 rsvd1:15; /* W1 */ | ||
494 | #endif | ||
495 | }; | ||
496 | |||
497 | /* SQ immediate subdescriptor */ | ||
498 | struct sq_imm_subdesc { | ||
499 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
500 | u64 subdesc_type:4; /* W0 */ | ||
501 | u64 rsvd0:46; | ||
502 | u64 len:14; | ||
503 | |||
504 | u64 data:64; /* W1 */ | ||
505 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
506 | u64 len:14; | ||
507 | u64 rsvd0:46; | ||
508 | u64 subdesc_type:4; /* W0 */ | ||
509 | |||
510 | u64 data:64; /* W1 */ | ||
511 | #endif | ||
512 | }; | ||
513 | |||
514 | struct sq_mem_subdesc { | ||
515 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
516 | u64 subdesc_type:4; /* W0 */ | ||
517 | u64 mem_alg:4; | ||
518 | u64 mem_dsz:2; | ||
519 | u64 wmem:1; | ||
520 | u64 rsvd0:21; | ||
521 | u64 offset:32; | ||
522 | |||
523 | u64 rsvd1:15; /* W1 */ | ||
524 | u64 addr:49; | ||
525 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
526 | u64 offset:32; | ||
527 | u64 rsvd0:21; | ||
528 | u64 wmem:1; | ||
529 | u64 mem_dsz:2; | ||
530 | u64 mem_alg:4; | ||
531 | u64 subdesc_type:4; /* W0 */ | ||
532 | |||
533 | u64 addr:49; | ||
534 | u64 rsvd1:15; /* W1 */ | ||
535 | #endif | ||
536 | }; | ||
537 | |||
538 | struct sq_hdr_subdesc { | ||
539 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
540 | u64 subdesc_type:4; | ||
541 | u64 tso:1; | ||
542 | u64 post_cqe:1; /* Post CQE on no error also */ | ||
543 | u64 dont_send:1; | ||
544 | u64 tstmp:1; | ||
545 | u64 subdesc_cnt:8; | ||
546 | u64 csum_l4:2; | ||
547 | u64 csum_l3:1; | ||
548 | u64 rsvd0:5; | ||
549 | u64 l4_offset:8; | ||
550 | u64 l3_offset:8; | ||
551 | u64 rsvd1:4; | ||
552 | u64 tot_len:20; /* W0 */ | ||
553 | |||
554 | u64 tso_sdc_cont:8; | ||
555 | u64 tso_sdc_first:8; | ||
556 | u64 tso_l4_offset:8; | ||
557 | u64 tso_flags_last:12; | ||
558 | u64 tso_flags_first:12; | ||
559 | u64 rsvd2:2; | ||
560 | u64 tso_max_paysize:14; /* W1 */ | ||
561 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
562 | u64 tot_len:20; | ||
563 | u64 rsvd1:4; | ||
564 | u64 l3_offset:8; | ||
565 | u64 l4_offset:8; | ||
566 | u64 rsvd0:5; | ||
567 | u64 csum_l3:1; | ||
568 | u64 csum_l4:2; | ||
569 | u64 subdesc_cnt:8; | ||
570 | u64 tstmp:1; | ||
571 | u64 dont_send:1; | ||
572 | u64 post_cqe:1; /* Post CQE on no error also */ | ||
573 | u64 tso:1; | ||
574 | u64 subdesc_type:4; /* W0 */ | ||
575 | |||
576 | u64 tso_max_paysize:14; | ||
577 | u64 rsvd2:2; | ||
578 | u64 tso_flags_first:12; | ||
579 | u64 tso_flags_last:12; | ||
580 | u64 tso_l4_offset:8; | ||
581 | u64 tso_sdc_first:8; | ||
582 | u64 tso_sdc_cont:8; /* W1 */ | ||
583 | #endif | ||
584 | }; | ||
585 | |||
586 | /* Queue config register formats */ | ||
587 | struct rq_cfg { | ||
588 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
589 | u64 reserved_2_63:62; | ||
590 | u64 ena:1; | ||
591 | u64 tcp_ena:1; | ||
592 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
593 | u64 tcp_ena:1; | ||
594 | u64 ena:1; | ||
595 | u64 reserved_2_63:62; | ||
596 | #endif | ||
597 | }; | ||
598 | |||
599 | struct cq_cfg { | ||
600 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
601 | u64 reserved_43_63:21; | ||
602 | u64 ena:1; | ||
603 | u64 reset:1; | ||
604 | u64 caching:1; | ||
605 | u64 reserved_35_39:5; | ||
606 | u64 qsize:3; | ||
607 | u64 reserved_25_31:7; | ||
608 | u64 avg_con:9; | ||
609 | u64 reserved_0_15:16; | ||
610 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
611 | u64 reserved_0_15:16; | ||
612 | u64 avg_con:9; | ||
613 | u64 reserved_25_31:7; | ||
614 | u64 qsize:3; | ||
615 | u64 reserved_35_39:5; | ||
616 | u64 caching:1; | ||
617 | u64 reset:1; | ||
618 | u64 ena:1; | ||
619 | u64 reserved_43_63:21; | ||
620 | #endif | ||
621 | }; | ||
622 | |||
623 | struct sq_cfg { | ||
624 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
625 | u64 reserved_20_63:44; | ||
626 | u64 ena:1; | ||
627 | u64 reserved_18_18:1; | ||
628 | u64 reset:1; | ||
629 | u64 ldwb:1; | ||
630 | u64 reserved_11_15:5; | ||
631 | u64 qsize:3; | ||
632 | u64 reserved_3_7:5; | ||
633 | u64 tstmp_bgx_intf:3; | ||
634 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
635 | u64 tstmp_bgx_intf:3; | ||
636 | u64 reserved_3_7:5; | ||
637 | u64 qsize:3; | ||
638 | u64 reserved_11_15:5; | ||
639 | u64 ldwb:1; | ||
640 | u64 reset:1; | ||
641 | u64 reserved_18_18:1; | ||
642 | u64 ena:1; | ||
643 | u64 reserved_20_63:44; | ||
644 | #endif | ||
645 | }; | ||
646 | |||
647 | struct rbdr_cfg { | ||
648 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
649 | u64 reserved_45_63:19; | ||
650 | u64 ena:1; | ||
651 | u64 reset:1; | ||
652 | u64 ldwb:1; | ||
653 | u64 reserved_36_41:6; | ||
654 | u64 qsize:4; | ||
655 | u64 reserved_25_31:7; | ||
656 | u64 avg_con:9; | ||
657 | u64 reserved_12_15:4; | ||
658 | u64 lines:12; | ||
659 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
660 | u64 lines:12; | ||
661 | u64 reserved_12_15:4; | ||
662 | u64 avg_con:9; | ||
663 | u64 reserved_25_31:7; | ||
664 | u64 qsize:4; | ||
665 | u64 reserved_36_41:6; | ||
666 | u64 ldwb:1; | ||
667 | u64 reset:1; | ||
668 | u64 ena: 1; | ||
669 | u64 reserved_45_63:19; | ||
670 | #endif | ||
671 | }; | ||
672 | |||
673 | struct qs_cfg { | ||
674 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
675 | u64 reserved_32_63:32; | ||
676 | u64 ena:1; | ||
677 | u64 reserved_27_30:4; | ||
678 | u64 sq_ins_ena:1; | ||
679 | u64 sq_ins_pos:6; | ||
680 | u64 lock_ena:1; | ||
681 | u64 lock_viol_cqe_ena:1; | ||
682 | u64 send_tstmp_ena:1; | ||
683 | u64 be:1; | ||
684 | u64 reserved_7_15:9; | ||
685 | u64 vnic:7; | ||
686 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
687 | u64 vnic:7; | ||
688 | u64 reserved_7_15:9; | ||
689 | u64 be:1; | ||
690 | u64 send_tstmp_ena:1; | ||
691 | u64 lock_viol_cqe_ena:1; | ||
692 | u64 lock_ena:1; | ||
693 | u64 sq_ins_pos:6; | ||
694 | u64 sq_ins_ena:1; | ||
695 | u64 reserved_27_30:4; | ||
696 | u64 ena:1; | ||
697 | u64 reserved_32_63:32; | ||
698 | #endif | ||
699 | }; | ||
700 | |||
701 | #endif /* Q_STRUCT_H */ | ||
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c new file mode 100644 index 000000000000..020e11cf3fdd --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c | |||
@@ -0,0 +1,966 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Cavium, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of version 2 of the GNU General Public License | ||
6 | * as published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/etherdevice.h> | ||
14 | #include <linux/phy.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/of_mdio.h> | ||
17 | #include <linux/of_net.h> | ||
18 | |||
19 | #include "nic_reg.h" | ||
20 | #include "nic.h" | ||
21 | #include "thunder_bgx.h" | ||
22 | |||
23 | #define DRV_NAME "thunder-BGX" | ||
24 | #define DRV_VERSION "1.0" | ||
25 | |||
26 | struct lmac { | ||
27 | struct bgx *bgx; | ||
28 | int dmac; | ||
29 | unsigned char mac[ETH_ALEN]; | ||
30 | bool link_up; | ||
31 | int lmacid; /* ID within BGX */ | ||
32 | int lmacid_bd; /* ID on board */ | ||
33 | struct net_device netdev; | ||
34 | struct phy_device *phydev; | ||
35 | unsigned int last_duplex; | ||
36 | unsigned int last_link; | ||
37 | unsigned int last_speed; | ||
38 | bool is_sgmii; | ||
39 | struct delayed_work dwork; | ||
40 | struct workqueue_struct *check_link; | ||
41 | } lmac; | ||
42 | |||
43 | struct bgx { | ||
44 | u8 bgx_id; | ||
45 | u8 qlm_mode; | ||
46 | struct lmac lmac[MAX_LMAC_PER_BGX]; | ||
47 | int lmac_count; | ||
48 | int lmac_type; | ||
49 | int lane_to_sds; | ||
50 | int use_training; | ||
51 | void __iomem *reg_base; | ||
52 | struct pci_dev *pdev; | ||
53 | } bgx; | ||
54 | |||
55 | struct bgx *bgx_vnic[MAX_BGX_THUNDER]; | ||
56 | static int lmac_count; /* Total no of LMACs in system */ | ||
57 | |||
58 | static int bgx_xaui_check_link(struct lmac *lmac); | ||
59 | |||
60 | /* Supported devices */ | ||
61 | static const struct pci_device_id bgx_id_table[] = { | ||
62 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) }, | ||
63 | { 0, } /* end of table */ | ||
64 | }; | ||
65 | |||
66 | MODULE_AUTHOR("Cavium Inc"); | ||
67 | MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver"); | ||
68 | MODULE_LICENSE("GPL v2"); | ||
69 | MODULE_VERSION(DRV_VERSION); | ||
70 | MODULE_DEVICE_TABLE(pci, bgx_id_table); | ||
71 | |||
72 | /* The Cavium ThunderX network controller can *only* be found in SoCs | ||
73 | * containing the ThunderX ARM64 CPU implementation. All accesses to the device | ||
74 | * registers on this platform are implicitly strongly ordered with respect | ||
75 | * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use | ||
76 | * with no memory barriers in this driver. The readq()/writeq() functions add | ||
77 | * explicit ordering operation which in this case are redundant, and only | ||
78 | * add overhead. | ||
79 | */ | ||
80 | |||
81 | /* Register read/write APIs */ | ||
82 | static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset) | ||
83 | { | ||
84 | void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; | ||
85 | |||
86 | return readq_relaxed(addr); | ||
87 | } | ||
88 | |||
89 | static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val) | ||
90 | { | ||
91 | void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; | ||
92 | |||
93 | writeq_relaxed(val, addr); | ||
94 | } | ||
95 | |||
96 | static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val) | ||
97 | { | ||
98 | void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; | ||
99 | |||
100 | writeq_relaxed(val | readq_relaxed(addr), addr); | ||
101 | } | ||
102 | |||
103 | static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero) | ||
104 | { | ||
105 | int timeout = 100; | ||
106 | u64 reg_val; | ||
107 | |||
108 | while (timeout) { | ||
109 | reg_val = bgx_reg_read(bgx, lmac, reg); | ||
110 | if (zero && !(reg_val & mask)) | ||
111 | return 0; | ||
112 | if (!zero && (reg_val & mask)) | ||
113 | return 0; | ||
114 | usleep_range(1000, 2000); | ||
115 | timeout--; | ||
116 | } | ||
117 | return 1; | ||
118 | } | ||
119 | |||
120 | /* Return number of BGX present in HW */ | ||
121 | unsigned bgx_get_map(int node) | ||
122 | { | ||
123 | int i; | ||
124 | unsigned map = 0; | ||
125 | |||
126 | for (i = 0; i < MAX_BGX_PER_CN88XX; i++) { | ||
127 | if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i]) | ||
128 | map |= (1 << i); | ||
129 | } | ||
130 | |||
131 | return map; | ||
132 | } | ||
133 | EXPORT_SYMBOL(bgx_get_map); | ||
134 | |||
135 | /* Return number of LMAC configured for this BGX */ | ||
136 | int bgx_get_lmac_count(int node, int bgx_idx) | ||
137 | { | ||
138 | struct bgx *bgx; | ||
139 | |||
140 | bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; | ||
141 | if (bgx) | ||
142 | return bgx->lmac_count; | ||
143 | |||
144 | return 0; | ||
145 | } | ||
146 | EXPORT_SYMBOL(bgx_get_lmac_count); | ||
147 | |||
148 | /* Returns the current link status of LMAC */ | ||
149 | void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) | ||
150 | { | ||
151 | struct bgx_link_status *link = (struct bgx_link_status *)status; | ||
152 | struct bgx *bgx; | ||
153 | struct lmac *lmac; | ||
154 | |||
155 | bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; | ||
156 | if (!bgx) | ||
157 | return; | ||
158 | |||
159 | lmac = &bgx->lmac[lmacid]; | ||
160 | link->link_up = lmac->link_up; | ||
161 | link->duplex = lmac->last_duplex; | ||
162 | link->speed = lmac->last_speed; | ||
163 | } | ||
164 | EXPORT_SYMBOL(bgx_get_lmac_link_state); | ||
165 | |||
166 | const char *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) | ||
167 | { | ||
168 | struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; | ||
169 | |||
170 | if (bgx) | ||
171 | return bgx->lmac[lmacid].mac; | ||
172 | |||
173 | return NULL; | ||
174 | } | ||
175 | EXPORT_SYMBOL(bgx_get_lmac_mac); | ||
176 | |||
177 | void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const char *mac) | ||
178 | { | ||
179 | struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; | ||
180 | |||
181 | if (!bgx) | ||
182 | return; | ||
183 | |||
184 | ether_addr_copy(bgx->lmac[lmacid].mac, mac); | ||
185 | } | ||
186 | EXPORT_SYMBOL(bgx_set_lmac_mac); | ||
187 | |||
188 | static void bgx_sgmii_change_link_state(struct lmac *lmac) | ||
189 | { | ||
190 | struct bgx *bgx = lmac->bgx; | ||
191 | u64 cmr_cfg; | ||
192 | u64 port_cfg = 0; | ||
193 | u64 misc_ctl = 0; | ||
194 | |||
195 | cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); | ||
196 | cmr_cfg &= ~CMR_EN; | ||
197 | bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); | ||
198 | |||
199 | port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); | ||
200 | misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); | ||
201 | |||
202 | if (lmac->link_up) { | ||
203 | misc_ctl &= ~PCS_MISC_CTL_GMX_ENO; | ||
204 | port_cfg &= ~GMI_PORT_CFG_DUPLEX; | ||
205 | port_cfg |= (lmac->last_duplex << 2); | ||
206 | } else { | ||
207 | misc_ctl |= PCS_MISC_CTL_GMX_ENO; | ||
208 | } | ||
209 | |||
210 | switch (lmac->last_speed) { | ||
211 | case 10: | ||
212 | port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ | ||
213 | port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */ | ||
214 | port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ | ||
215 | misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; | ||
216 | misc_ctl |= 50; /* samp_pt */ | ||
217 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); | ||
218 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); | ||
219 | break; | ||
220 | case 100: | ||
221 | port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ | ||
222 | port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ | ||
223 | port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ | ||
224 | misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; | ||
225 | misc_ctl |= 5; /* samp_pt */ | ||
226 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); | ||
227 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); | ||
228 | break; | ||
229 | case 1000: | ||
230 | port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */ | ||
231 | port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ | ||
232 | port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */ | ||
233 | misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; | ||
234 | misc_ctl |= 1; /* samp_pt */ | ||
235 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512); | ||
236 | if (lmac->last_duplex) | ||
237 | bgx_reg_write(bgx, lmac->lmacid, | ||
238 | BGX_GMP_GMI_TXX_BURST, 0); | ||
239 | else | ||
240 | bgx_reg_write(bgx, lmac->lmacid, | ||
241 | BGX_GMP_GMI_TXX_BURST, 8192); | ||
242 | break; | ||
243 | default: | ||
244 | break; | ||
245 | } | ||
246 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); | ||
247 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); | ||
248 | |||
249 | port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); | ||
250 | |||
251 | /* renable lmac */ | ||
252 | cmr_cfg |= CMR_EN; | ||
253 | bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); | ||
254 | } | ||
255 | |||
256 | void bgx_lmac_handler(struct net_device *netdev) | ||
257 | { | ||
258 | struct lmac *lmac = container_of(netdev, struct lmac, netdev); | ||
259 | struct phy_device *phydev = lmac->phydev; | ||
260 | int link_changed = 0; | ||
261 | |||
262 | if (!lmac) | ||
263 | return; | ||
264 | |||
265 | if (!phydev->link && lmac->last_link) | ||
266 | link_changed = -1; | ||
267 | |||
268 | if (phydev->link && | ||
269 | (lmac->last_duplex != phydev->duplex || | ||
270 | lmac->last_link != phydev->link || | ||
271 | lmac->last_speed != phydev->speed)) { | ||
272 | link_changed = 1; | ||
273 | } | ||
274 | |||
275 | lmac->last_link = phydev->link; | ||
276 | lmac->last_speed = phydev->speed; | ||
277 | lmac->last_duplex = phydev->duplex; | ||
278 | |||
279 | if (!link_changed) | ||
280 | return; | ||
281 | |||
282 | if (link_changed > 0) | ||
283 | lmac->link_up = true; | ||
284 | else | ||
285 | lmac->link_up = false; | ||
286 | |||
287 | if (lmac->is_sgmii) | ||
288 | bgx_sgmii_change_link_state(lmac); | ||
289 | else | ||
290 | bgx_xaui_check_link(lmac); | ||
291 | } | ||
292 | |||
293 | u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) | ||
294 | { | ||
295 | struct bgx *bgx; | ||
296 | |||
297 | bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; | ||
298 | if (!bgx) | ||
299 | return 0; | ||
300 | |||
301 | if (idx > 8) | ||
302 | lmac = 0; | ||
303 | return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)); | ||
304 | } | ||
305 | EXPORT_SYMBOL(bgx_get_rx_stats); | ||
306 | |||
307 | u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) | ||
308 | { | ||
309 | struct bgx *bgx; | ||
310 | |||
311 | bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; | ||
312 | if (!bgx) | ||
313 | return 0; | ||
314 | |||
315 | return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)); | ||
316 | } | ||
317 | EXPORT_SYMBOL(bgx_get_tx_stats); | ||
318 | |||
319 | static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) | ||
320 | { | ||
321 | u64 offset; | ||
322 | |||
323 | while (bgx->lmac[lmac].dmac > 0) { | ||
324 | offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) + | ||
325 | (lmac * MAX_DMAC_PER_LMAC * sizeof(u64)); | ||
326 | bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0); | ||
327 | bgx->lmac[lmac].dmac--; | ||
328 | } | ||
329 | } | ||
330 | |||
331 | static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) | ||
332 | { | ||
333 | u64 cfg; | ||
334 | |||
335 | bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); | ||
336 | /* max packet size */ | ||
337 | bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE); | ||
338 | |||
339 | /* Disable frame alignment if using preamble */ | ||
340 | cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); | ||
341 | if (cfg & 1) | ||
342 | bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0); | ||
343 | |||
344 | /* Enable lmac */ | ||
345 | bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); | ||
346 | |||
347 | /* PCS reset */ | ||
348 | bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET); | ||
349 | if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, | ||
350 | PCS_MRX_CTL_RESET, true)) { | ||
351 | dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n"); | ||
352 | return -1; | ||
353 | } | ||
354 | |||
355 | /* power down, reset autoneg, autoneg enable */ | ||
356 | cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); | ||
357 | cfg &= ~PCS_MRX_CTL_PWR_DN; | ||
358 | cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); | ||
359 | bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); | ||
360 | |||
361 | if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, | ||
362 | PCS_MRX_STATUS_AN_CPT, false)) { | ||
363 | dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); | ||
364 | return -1; | ||
365 | } | ||
366 | |||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) | ||
371 | { | ||
372 | u64 cfg; | ||
373 | |||
374 | /* Reset SPU */ | ||
375 | bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); | ||
376 | if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { | ||
377 | dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); | ||
378 | return -1; | ||
379 | } | ||
380 | |||
381 | /* Disable LMAC */ | ||
382 | cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); | ||
383 | cfg &= ~CMR_EN; | ||
384 | bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); | ||
385 | |||
386 | bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); | ||
387 | /* Set interleaved running disparity for RXAUI */ | ||
388 | if (bgx->lmac_type != BGX_MODE_RXAUI) | ||
389 | bgx_reg_modify(bgx, lmacid, | ||
390 | BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); | ||
391 | else | ||
392 | bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, | ||
393 | SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP); | ||
394 | |||
395 | /* clear all interrupts */ | ||
396 | cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); | ||
397 | bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg); | ||
398 | cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT); | ||
399 | bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg); | ||
400 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); | ||
401 | bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); | ||
402 | |||
403 | if (bgx->use_training) { | ||
404 | bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); | ||
405 | bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); | ||
406 | bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); | ||
407 | /* training enable */ | ||
408 | bgx_reg_modify(bgx, lmacid, | ||
409 | BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN); | ||
410 | } | ||
411 | |||
412 | /* Append FCS to each packet */ | ||
413 | bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D); | ||
414 | |||
415 | /* Disable forward error correction */ | ||
416 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL); | ||
417 | cfg &= ~SPU_FEC_CTL_FEC_EN; | ||
418 | bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg); | ||
419 | |||
420 | /* Disable autoneg */ | ||
421 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL); | ||
422 | cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN); | ||
423 | bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); | ||
424 | |||
425 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); | ||
426 | if (bgx->lmac_type == BGX_MODE_10G_KR) | ||
427 | cfg |= (1 << 23); | ||
428 | else if (bgx->lmac_type == BGX_MODE_40G_KR) | ||
429 | cfg |= (1 << 24); | ||
430 | else | ||
431 | cfg &= ~((1 << 23) | (1 << 24)); | ||
432 | cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12))); | ||
433 | bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg); | ||
434 | |||
435 | cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL); | ||
436 | cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; | ||
437 | bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg); | ||
438 | |||
439 | /* Enable lmac */ | ||
440 | bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); | ||
441 | |||
442 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1); | ||
443 | cfg &= ~SPU_CTL_LOW_POWER; | ||
444 | bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg); | ||
445 | |||
446 | cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL); | ||
447 | cfg &= ~SMU_TX_CTL_UNI_EN; | ||
448 | cfg |= SMU_TX_CTL_DIC_EN; | ||
449 | bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg); | ||
450 | |||
451 | /* take lmac_count into account */ | ||
452 | bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1)); | ||
453 | /* max packet size */ | ||
454 | bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE); | ||
455 | |||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | static int bgx_xaui_check_link(struct lmac *lmac) | ||
460 | { | ||
461 | struct bgx *bgx = lmac->bgx; | ||
462 | int lmacid = lmac->lmacid; | ||
463 | int lmac_type = bgx->lmac_type; | ||
464 | u64 cfg; | ||
465 | |||
466 | bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); | ||
467 | if (bgx->use_training) { | ||
468 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); | ||
469 | if (!(cfg & (1ull << 13))) { | ||
470 | cfg = (1ull << 13) | (1ull << 14); | ||
471 | bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); | ||
472 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL); | ||
473 | cfg |= (1ull << 0); | ||
474 | bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg); | ||
475 | return -1; | ||
476 | } | ||
477 | } | ||
478 | |||
479 | /* wait for PCS to come out of reset */ | ||
480 | if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { | ||
481 | dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); | ||
482 | return -1; | ||
483 | } | ||
484 | |||
485 | if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) || | ||
486 | (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) { | ||
487 | if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1, | ||
488 | SPU_BR_STATUS_BLK_LOCK, false)) { | ||
489 | dev_err(&bgx->pdev->dev, | ||
490 | "SPU_BR_STATUS_BLK_LOCK not completed\n"); | ||
491 | return -1; | ||
492 | } | ||
493 | } else { | ||
494 | if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS, | ||
495 | SPU_BX_STATUS_RX_ALIGN, false)) { | ||
496 | dev_err(&bgx->pdev->dev, | ||
497 | "SPU_BX_STATUS_RX_ALIGN not completed\n"); | ||
498 | return -1; | ||
499 | } | ||
500 | } | ||
501 | |||
502 | /* Clear rcvflt bit (latching high) and read it back */ | ||
503 | bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); | ||
504 | if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { | ||
505 | dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); | ||
506 | if (bgx->use_training) { | ||
507 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); | ||
508 | if (!(cfg & (1ull << 13))) { | ||
509 | cfg = (1ull << 13) | (1ull << 14); | ||
510 | bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); | ||
511 | cfg = bgx_reg_read(bgx, lmacid, | ||
512 | BGX_SPUX_BR_PMD_CRTL); | ||
513 | cfg |= (1ull << 0); | ||
514 | bgx_reg_write(bgx, lmacid, | ||
515 | BGX_SPUX_BR_PMD_CRTL, cfg); | ||
516 | return -1; | ||
517 | } | ||
518 | } | ||
519 | return -1; | ||
520 | } | ||
521 | |||
522 | /* Wait for MAC RX to be ready */ | ||
523 | if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL, | ||
524 | SMU_RX_CTL_STATUS, true)) { | ||
525 | dev_err(&bgx->pdev->dev, "SMU RX link not okay\n"); | ||
526 | return -1; | ||
527 | } | ||
528 | |||
529 | /* Wait for BGX RX to be idle */ | ||
530 | if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { | ||
531 | dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); | ||
532 | return -1; | ||
533 | } | ||
534 | |||
535 | /* Wait for BGX TX to be idle */ | ||
536 | if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) { | ||
537 | dev_err(&bgx->pdev->dev, "SMU TX not idle\n"); | ||
538 | return -1; | ||
539 | } | ||
540 | |||
541 | if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { | ||
542 | dev_err(&bgx->pdev->dev, "Receive fault\n"); | ||
543 | return -1; | ||
544 | } | ||
545 | |||
546 | /* Receive link is latching low. Force it high and verify it */ | ||
547 | bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); | ||
548 | if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1, | ||
549 | SPU_STATUS1_RCV_LNK, false)) { | ||
550 | dev_err(&bgx->pdev->dev, "SPU receive link down\n"); | ||
551 | return -1; | ||
552 | } | ||
553 | |||
554 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); | ||
555 | cfg &= ~SPU_MISC_CTL_RX_DIS; | ||
556 | bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); | ||
557 | return 0; | ||
558 | } | ||
559 | |||
560 | static void bgx_poll_for_link(struct work_struct *work) | ||
561 | { | ||
562 | struct lmac *lmac; | ||
563 | u64 link; | ||
564 | |||
565 | lmac = container_of(work, struct lmac, dwork.work); | ||
566 | |||
567 | /* Receive link is latching low. Force it high and verify it */ | ||
568 | bgx_reg_modify(lmac->bgx, lmac->lmacid, | ||
569 | BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); | ||
570 | bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, | ||
571 | SPU_STATUS1_RCV_LNK, false); | ||
572 | |||
573 | link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); | ||
574 | if (link & SPU_STATUS1_RCV_LNK) { | ||
575 | lmac->link_up = 1; | ||
576 | if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) | ||
577 | lmac->last_speed = 40000; | ||
578 | else | ||
579 | lmac->last_speed = 10000; | ||
580 | lmac->last_duplex = 1; | ||
581 | } else { | ||
582 | lmac->link_up = 0; | ||
583 | } | ||
584 | |||
585 | if (lmac->last_link != lmac->link_up) { | ||
586 | lmac->last_link = lmac->link_up; | ||
587 | if (lmac->link_up) | ||
588 | bgx_xaui_check_link(lmac); | ||
589 | } | ||
590 | |||
591 | queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); | ||
592 | } | ||
593 | |||
594 | static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) | ||
595 | { | ||
596 | struct lmac *lmac; | ||
597 | u64 cfg; | ||
598 | |||
599 | lmac = &bgx->lmac[lmacid]; | ||
600 | lmac->bgx = bgx; | ||
601 | |||
602 | if (bgx->lmac_type == BGX_MODE_SGMII) { | ||
603 | lmac->is_sgmii = 1; | ||
604 | if (bgx_lmac_sgmii_init(bgx, lmacid)) | ||
605 | return -1; | ||
606 | } else { | ||
607 | lmac->is_sgmii = 0; | ||
608 | if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type)) | ||
609 | return -1; | ||
610 | } | ||
611 | |||
612 | if (lmac->is_sgmii) { | ||
613 | cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); | ||
614 | cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ | ||
615 | bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg); | ||
616 | bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1); | ||
617 | } else { | ||
618 | cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND); | ||
619 | cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ | ||
620 | bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg); | ||
621 | bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); | ||
622 | } | ||
623 | |||
624 | /* Enable lmac */ | ||
625 | bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, | ||
626 | CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN); | ||
627 | |||
628 | /* Restore default cfg, incase low level firmware changed it */ | ||
629 | bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); | ||
630 | |||
631 | if ((bgx->lmac_type != BGX_MODE_XFI) && | ||
632 | (bgx->lmac_type != BGX_MODE_XLAUI) && | ||
633 | (bgx->lmac_type != BGX_MODE_40G_KR) && | ||
634 | (bgx->lmac_type != BGX_MODE_10G_KR)) { | ||
635 | if (!lmac->phydev) | ||
636 | return -ENODEV; | ||
637 | |||
638 | lmac->phydev->dev_flags = 0; | ||
639 | |||
640 | if (phy_connect_direct(&lmac->netdev, lmac->phydev, | ||
641 | bgx_lmac_handler, | ||
642 | PHY_INTERFACE_MODE_SGMII)) | ||
643 | return -ENODEV; | ||
644 | |||
645 | phy_start_aneg(lmac->phydev); | ||
646 | } else { | ||
647 | lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | | ||
648 | WQ_MEM_RECLAIM, 1); | ||
649 | if (!lmac->check_link) | ||
650 | return -ENOMEM; | ||
651 | INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); | ||
652 | queue_delayed_work(lmac->check_link, &lmac->dwork, 0); | ||
653 | } | ||
654 | |||
655 | return 0; | ||
656 | } | ||
657 | |||
658 | void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) | ||
659 | { | ||
660 | struct lmac *lmac; | ||
661 | u64 cmrx_cfg; | ||
662 | |||
663 | lmac = &bgx->lmac[lmacid]; | ||
664 | if (lmac->check_link) { | ||
665 | /* Destroy work queue */ | ||
666 | cancel_delayed_work(&lmac->dwork); | ||
667 | flush_workqueue(lmac->check_link); | ||
668 | destroy_workqueue(lmac->check_link); | ||
669 | } | ||
670 | |||
671 | cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); | ||
672 | cmrx_cfg &= ~(1 << 15); | ||
673 | bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); | ||
674 | bgx_flush_dmac_addrs(bgx, lmacid); | ||
675 | |||
676 | if (lmac->phydev) | ||
677 | phy_disconnect(lmac->phydev); | ||
678 | |||
679 | lmac->phydev = NULL; | ||
680 | } | ||
681 | |||
682 | static void bgx_set_num_ports(struct bgx *bgx) | ||
683 | { | ||
684 | u64 lmac_count; | ||
685 | |||
686 | switch (bgx->qlm_mode) { | ||
687 | case QLM_MODE_SGMII: | ||
688 | bgx->lmac_count = 4; | ||
689 | bgx->lmac_type = BGX_MODE_SGMII; | ||
690 | bgx->lane_to_sds = 0; | ||
691 | break; | ||
692 | case QLM_MODE_XAUI_1X4: | ||
693 | bgx->lmac_count = 1; | ||
694 | bgx->lmac_type = BGX_MODE_XAUI; | ||
695 | bgx->lane_to_sds = 0xE4; | ||
696 | break; | ||
697 | case QLM_MODE_RXAUI_2X2: | ||
698 | bgx->lmac_count = 2; | ||
699 | bgx->lmac_type = BGX_MODE_RXAUI; | ||
700 | bgx->lane_to_sds = 0xE4; | ||
701 | break; | ||
702 | case QLM_MODE_XFI_4X1: | ||
703 | bgx->lmac_count = 4; | ||
704 | bgx->lmac_type = BGX_MODE_XFI; | ||
705 | bgx->lane_to_sds = 0; | ||
706 | break; | ||
707 | case QLM_MODE_XLAUI_1X4: | ||
708 | bgx->lmac_count = 1; | ||
709 | bgx->lmac_type = BGX_MODE_XLAUI; | ||
710 | bgx->lane_to_sds = 0xE4; | ||
711 | break; | ||
712 | case QLM_MODE_10G_KR_4X1: | ||
713 | bgx->lmac_count = 4; | ||
714 | bgx->lmac_type = BGX_MODE_10G_KR; | ||
715 | bgx->lane_to_sds = 0; | ||
716 | bgx->use_training = 1; | ||
717 | break; | ||
718 | case QLM_MODE_40G_KR4_1X4: | ||
719 | bgx->lmac_count = 1; | ||
720 | bgx->lmac_type = BGX_MODE_40G_KR; | ||
721 | bgx->lane_to_sds = 0xE4; | ||
722 | bgx->use_training = 1; | ||
723 | break; | ||
724 | default: | ||
725 | bgx->lmac_count = 0; | ||
726 | break; | ||
727 | } | ||
728 | |||
729 | /* Check if low level firmware has programmed LMAC count | ||
730 | * based on board type, if yes consider that otherwise | ||
731 | * the default static values | ||
732 | */ | ||
733 | lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; | ||
734 | if (lmac_count != 4) | ||
735 | bgx->lmac_count = lmac_count; | ||
736 | } | ||
737 | |||
738 | static void bgx_init_hw(struct bgx *bgx) | ||
739 | { | ||
740 | int i; | ||
741 | |||
742 | bgx_set_num_ports(bgx); | ||
743 | |||
744 | bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); | ||
745 | if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) | ||
746 | dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id); | ||
747 | |||
748 | /* Set lmac type and lane2serdes mapping */ | ||
749 | for (i = 0; i < bgx->lmac_count; i++) { | ||
750 | if (bgx->lmac_type == BGX_MODE_RXAUI) { | ||
751 | if (i) | ||
752 | bgx->lane_to_sds = 0x0e; | ||
753 | else | ||
754 | bgx->lane_to_sds = 0x04; | ||
755 | bgx_reg_write(bgx, i, BGX_CMRX_CFG, | ||
756 | (bgx->lmac_type << 8) | bgx->lane_to_sds); | ||
757 | continue; | ||
758 | } | ||
759 | bgx_reg_write(bgx, i, BGX_CMRX_CFG, | ||
760 | (bgx->lmac_type << 8) | (bgx->lane_to_sds + i)); | ||
761 | bgx->lmac[i].lmacid_bd = lmac_count; | ||
762 | lmac_count++; | ||
763 | } | ||
764 | |||
765 | bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count); | ||
766 | bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count); | ||
767 | |||
768 | /* Set the backpressure AND mask */ | ||
769 | for (i = 0; i < bgx->lmac_count; i++) | ||
770 | bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND, | ||
771 | ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) << | ||
772 | (i * MAX_BGX_CHANS_PER_LMAC)); | ||
773 | |||
774 | /* Disable all MAC filtering */ | ||
775 | for (i = 0; i < RX_DMAC_COUNT; i++) | ||
776 | bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00); | ||
777 | |||
778 | /* Disable MAC steering (NCSI traffic) */ | ||
779 | for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) | ||
780 | bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); | ||
781 | } | ||
782 | |||
783 | static void bgx_get_qlm_mode(struct bgx *bgx) | ||
784 | { | ||
785 | struct device *dev = &bgx->pdev->dev; | ||
786 | int lmac_type; | ||
787 | int train_en; | ||
788 | |||
789 | /* Read LMAC0 type to figure out QLM mode | ||
790 | * This is configured by low level firmware | ||
791 | */ | ||
792 | lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); | ||
793 | lmac_type = (lmac_type >> 8) & 0x07; | ||
794 | |||
795 | train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) & | ||
796 | SPU_PMD_CRTL_TRAIN_EN; | ||
797 | |||
798 | switch (lmac_type) { | ||
799 | case BGX_MODE_SGMII: | ||
800 | bgx->qlm_mode = QLM_MODE_SGMII; | ||
801 | dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id); | ||
802 | break; | ||
803 | case BGX_MODE_XAUI: | ||
804 | bgx->qlm_mode = QLM_MODE_XAUI_1X4; | ||
805 | dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id); | ||
806 | break; | ||
807 | case BGX_MODE_RXAUI: | ||
808 | bgx->qlm_mode = QLM_MODE_RXAUI_2X2; | ||
809 | dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id); | ||
810 | break; | ||
811 | case BGX_MODE_XFI: | ||
812 | if (!train_en) { | ||
813 | bgx->qlm_mode = QLM_MODE_XFI_4X1; | ||
814 | dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id); | ||
815 | } else { | ||
816 | bgx->qlm_mode = QLM_MODE_10G_KR_4X1; | ||
817 | dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id); | ||
818 | } | ||
819 | break; | ||
820 | case BGX_MODE_XLAUI: | ||
821 | if (!train_en) { | ||
822 | bgx->qlm_mode = QLM_MODE_XLAUI_1X4; | ||
823 | dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id); | ||
824 | } else { | ||
825 | bgx->qlm_mode = QLM_MODE_40G_KR4_1X4; | ||
826 | dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id); | ||
827 | } | ||
828 | break; | ||
829 | default: | ||
830 | bgx->qlm_mode = QLM_MODE_SGMII; | ||
831 | dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id); | ||
832 | } | ||
833 | } | ||
834 | |||
835 | static void bgx_init_of(struct bgx *bgx, struct device_node *np) | ||
836 | { | ||
837 | struct device_node *np_child; | ||
838 | u8 lmac = 0; | ||
839 | |||
840 | for_each_child_of_node(np, np_child) { | ||
841 | struct device_node *phy_np; | ||
842 | const char *mac; | ||
843 | |||
844 | phy_np = of_parse_phandle(np_child, "phy-handle", 0); | ||
845 | if (phy_np) | ||
846 | bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); | ||
847 | |||
848 | mac = of_get_mac_address(np_child); | ||
849 | if (mac) | ||
850 | ether_addr_copy(bgx->lmac[lmac].mac, mac); | ||
851 | |||
852 | SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); | ||
853 | bgx->lmac[lmac].lmacid = lmac; | ||
854 | lmac++; | ||
855 | if (lmac == MAX_LMAC_PER_BGX) | ||
856 | break; | ||
857 | } | ||
858 | } | ||
859 | |||
860 | static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
861 | { | ||
862 | int err; | ||
863 | struct device *dev = &pdev->dev; | ||
864 | struct bgx *bgx = NULL; | ||
865 | struct device_node *np; | ||
866 | char bgx_sel[5]; | ||
867 | u8 lmac; | ||
868 | |||
869 | bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); | ||
870 | if (!bgx) | ||
871 | return -ENOMEM; | ||
872 | bgx->pdev = pdev; | ||
873 | |||
874 | pci_set_drvdata(pdev, bgx); | ||
875 | |||
876 | err = pci_enable_device(pdev); | ||
877 | if (err) { | ||
878 | dev_err(dev, "Failed to enable PCI device\n"); | ||
879 | pci_set_drvdata(pdev, NULL); | ||
880 | return err; | ||
881 | } | ||
882 | |||
883 | err = pci_request_regions(pdev, DRV_NAME); | ||
884 | if (err) { | ||
885 | dev_err(dev, "PCI request regions failed 0x%x\n", err); | ||
886 | goto err_disable_device; | ||
887 | } | ||
888 | |||
889 | /* MAP configuration registers */ | ||
890 | bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); | ||
891 | if (!bgx->reg_base) { | ||
892 | dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n"); | ||
893 | err = -ENOMEM; | ||
894 | goto err_release_regions; | ||
895 | } | ||
896 | bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; | ||
897 | bgx->bgx_id += NODE_ID(pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM)) | ||
898 | * MAX_BGX_PER_CN88XX; | ||
899 | bgx_vnic[bgx->bgx_id] = bgx; | ||
900 | bgx_get_qlm_mode(bgx); | ||
901 | |||
902 | snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id); | ||
903 | np = of_find_node_by_name(NULL, bgx_sel); | ||
904 | if (np) | ||
905 | bgx_init_of(bgx, np); | ||
906 | |||
907 | bgx_init_hw(bgx); | ||
908 | |||
909 | /* Enable all LMACs */ | ||
910 | for (lmac = 0; lmac < bgx->lmac_count; lmac++) { | ||
911 | err = bgx_lmac_enable(bgx, lmac); | ||
912 | if (err) { | ||
913 | dev_err(dev, "BGX%d failed to enable lmac%d\n", | ||
914 | bgx->bgx_id, lmac); | ||
915 | goto err_enable; | ||
916 | } | ||
917 | } | ||
918 | |||
919 | return 0; | ||
920 | |||
921 | err_enable: | ||
922 | bgx_vnic[bgx->bgx_id] = NULL; | ||
923 | err_release_regions: | ||
924 | pci_release_regions(pdev); | ||
925 | err_disable_device: | ||
926 | pci_disable_device(pdev); | ||
927 | pci_set_drvdata(pdev, NULL); | ||
928 | return err; | ||
929 | } | ||
930 | |||
931 | static void bgx_remove(struct pci_dev *pdev) | ||
932 | { | ||
933 | struct bgx *bgx = pci_get_drvdata(pdev); | ||
934 | u8 lmac; | ||
935 | |||
936 | /* Disable all LMACs */ | ||
937 | for (lmac = 0; lmac < bgx->lmac_count; lmac++) | ||
938 | bgx_lmac_disable(bgx, lmac); | ||
939 | |||
940 | bgx_vnic[bgx->bgx_id] = NULL; | ||
941 | pci_release_regions(pdev); | ||
942 | pci_disable_device(pdev); | ||
943 | pci_set_drvdata(pdev, NULL); | ||
944 | } | ||
945 | |||
946 | static struct pci_driver bgx_driver = { | ||
947 | .name = DRV_NAME, | ||
948 | .id_table = bgx_id_table, | ||
949 | .probe = bgx_probe, | ||
950 | .remove = bgx_remove, | ||
951 | }; | ||
952 | |||
953 | static int __init bgx_init_module(void) | ||
954 | { | ||
955 | pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); | ||
956 | |||
957 | return pci_register_driver(&bgx_driver); | ||
958 | } | ||
959 | |||
960 | static void __exit bgx_cleanup_module(void) | ||
961 | { | ||
962 | pci_unregister_driver(&bgx_driver); | ||
963 | } | ||
964 | |||
965 | module_init(bgx_init_module); | ||
966 | module_exit(bgx_cleanup_module); | ||
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h new file mode 100644 index 000000000000..9d91ce44f8d7 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h | |||
@@ -0,0 +1,223 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Cavium, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of version 2 of the GNU General Public License | ||
6 | * as published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef THUNDER_BGX_H | ||
10 | #define THUNDER_BGX_H | ||
11 | |||
12 | #define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */ | ||
13 | #define MAX_BGX_PER_CN88XX 2 | ||
14 | #define MAX_LMAC_PER_BGX 4 | ||
15 | #define MAX_BGX_CHANS_PER_LMAC 16 | ||
16 | #define MAX_DMAC_PER_LMAC 8 | ||
17 | #define MAX_FRAME_SIZE 9216 | ||
18 | |||
19 | #define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 | ||
20 | |||
21 | #define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX) | ||
22 | |||
23 | #define NODE_ID_MASK 0x300000000000 | ||
24 | #define NODE_ID(x) ((x & NODE_ID_MASK) >> 44) | ||
25 | |||
26 | /* Registers */ | ||
27 | #define BGX_CMRX_CFG 0x00 | ||
28 | #define CMR_PKT_TX_EN BIT_ULL(13) | ||
29 | #define CMR_PKT_RX_EN BIT_ULL(14) | ||
30 | #define CMR_EN BIT_ULL(15) | ||
31 | #define BGX_CMR_GLOBAL_CFG 0x08 | ||
32 | #define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6) | ||
33 | #define BGX_CMRX_RX_ID_MAP 0x60 | ||
34 | #define BGX_CMRX_RX_STAT0 0x70 | ||
35 | #define BGX_CMRX_RX_STAT1 0x78 | ||
36 | #define BGX_CMRX_RX_STAT2 0x80 | ||
37 | #define BGX_CMRX_RX_STAT3 0x88 | ||
38 | #define BGX_CMRX_RX_STAT4 0x90 | ||
39 | #define BGX_CMRX_RX_STAT5 0x98 | ||
40 | #define BGX_CMRX_RX_STAT6 0xA0 | ||
41 | #define BGX_CMRX_RX_STAT7 0xA8 | ||
42 | #define BGX_CMRX_RX_STAT8 0xB0 | ||
43 | #define BGX_CMRX_RX_STAT9 0xB8 | ||
44 | #define BGX_CMRX_RX_STAT10 0xC0 | ||
45 | #define BGX_CMRX_RX_BP_DROP 0xC8 | ||
46 | #define BGX_CMRX_RX_DMAC_CTL 0x0E8 | ||
47 | #define BGX_CMR_RX_DMACX_CAM 0x200 | ||
48 | #define RX_DMACX_CAM_EN BIT_ULL(48) | ||
49 | #define RX_DMACX_CAM_LMACID(x) (x << 49) | ||
50 | #define RX_DMAC_COUNT 32 | ||
51 | #define BGX_CMR_RX_STREERING 0x300 | ||
52 | #define RX_TRAFFIC_STEER_RULE_COUNT 8 | ||
53 | #define BGX_CMR_CHAN_MSK_AND 0x450 | ||
54 | #define BGX_CMR_BIST_STATUS 0x460 | ||
55 | #define BGX_CMR_RX_LMACS 0x468 | ||
56 | #define BGX_CMRX_TX_STAT0 0x600 | ||
57 | #define BGX_CMRX_TX_STAT1 0x608 | ||
58 | #define BGX_CMRX_TX_STAT2 0x610 | ||
59 | #define BGX_CMRX_TX_STAT3 0x618 | ||
60 | #define BGX_CMRX_TX_STAT4 0x620 | ||
61 | #define BGX_CMRX_TX_STAT5 0x628 | ||
62 | #define BGX_CMRX_TX_STAT6 0x630 | ||
63 | #define BGX_CMRX_TX_STAT7 0x638 | ||
64 | #define BGX_CMRX_TX_STAT8 0x640 | ||
65 | #define BGX_CMRX_TX_STAT9 0x648 | ||
66 | #define BGX_CMRX_TX_STAT10 0x650 | ||
67 | #define BGX_CMRX_TX_STAT11 0x658 | ||
68 | #define BGX_CMRX_TX_STAT12 0x660 | ||
69 | #define BGX_CMRX_TX_STAT13 0x668 | ||
70 | #define BGX_CMRX_TX_STAT14 0x670 | ||
71 | #define BGX_CMRX_TX_STAT15 0x678 | ||
72 | #define BGX_CMRX_TX_STAT16 0x680 | ||
73 | #define BGX_CMRX_TX_STAT17 0x688 | ||
74 | #define BGX_CMR_TX_LMACS 0x1000 | ||
75 | |||
76 | #define BGX_SPUX_CONTROL1 0x10000 | ||
77 | #define SPU_CTL_LOW_POWER BIT_ULL(11) | ||
78 | #define SPU_CTL_RESET BIT_ULL(15) | ||
79 | #define BGX_SPUX_STATUS1 0x10008 | ||
80 | #define SPU_STATUS1_RCV_LNK BIT_ULL(2) | ||
81 | #define BGX_SPUX_STATUS2 0x10020 | ||
82 | #define SPU_STATUS2_RCVFLT BIT_ULL(10) | ||
83 | #define BGX_SPUX_BX_STATUS 0x10028 | ||
84 | #define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12) | ||
85 | #define BGX_SPUX_BR_STATUS1 0x10030 | ||
86 | #define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0) | ||
87 | #define SPU_BR_STATUS_RCV_LNK BIT_ULL(12) | ||
88 | #define BGX_SPUX_BR_PMD_CRTL 0x10068 | ||
89 | #define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1) | ||
90 | #define BGX_SPUX_BR_PMD_LP_CUP 0x10078 | ||
91 | #define BGX_SPUX_BR_PMD_LD_CUP 0x10088 | ||
92 | #define BGX_SPUX_BR_PMD_LD_REP 0x10090 | ||
93 | #define BGX_SPUX_FEC_CONTROL 0x100A0 | ||
94 | #define SPU_FEC_CTL_FEC_EN BIT_ULL(0) | ||
95 | #define SPU_FEC_CTL_ERR_EN BIT_ULL(1) | ||
96 | #define BGX_SPUX_AN_CONTROL 0x100C8 | ||
97 | #define SPU_AN_CTL_AN_EN BIT_ULL(12) | ||
98 | #define SPU_AN_CTL_XNP_EN BIT_ULL(13) | ||
99 | #define BGX_SPUX_AN_ADV 0x100D8 | ||
100 | #define BGX_SPUX_MISC_CONTROL 0x10218 | ||
101 | #define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10) | ||
102 | #define SPU_MISC_CTL_RX_DIS BIT_ULL(12) | ||
103 | #define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */ | ||
104 | #define BGX_SPUX_INT_W1S 0x10228 | ||
105 | #define BGX_SPUX_INT_ENA_W1C 0x10230 | ||
106 | #define BGX_SPUX_INT_ENA_W1S 0x10238 | ||
107 | #define BGX_SPU_DBG_CONTROL 0x10300 | ||
108 | #define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18) | ||
109 | #define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29) | ||
110 | |||
111 | #define BGX_SMUX_RX_INT 0x20000 | ||
112 | #define BGX_SMUX_RX_JABBER 0x20030 | ||
113 | #define BGX_SMUX_RX_CTL 0x20048 | ||
114 | #define SMU_RX_CTL_STATUS (3ull << 0) | ||
115 | #define BGX_SMUX_TX_APPEND 0x20100 | ||
116 | #define SMU_TX_APPEND_FCS_D BIT_ULL(2) | ||
117 | #define BGX_SMUX_TX_MIN_PKT 0x20118 | ||
118 | #define BGX_SMUX_TX_INT 0x20140 | ||
119 | #define BGX_SMUX_TX_CTL 0x20178 | ||
120 | #define SMU_TX_CTL_DIC_EN BIT_ULL(0) | ||
121 | #define SMU_TX_CTL_UNI_EN BIT_ULL(1) | ||
122 | #define SMU_TX_CTL_LNK_STATUS (3ull << 4) | ||
123 | #define BGX_SMUX_TX_THRESH 0x20180 | ||
124 | #define BGX_SMUX_CTL 0x20200 | ||
125 | #define SMU_CTL_RX_IDLE BIT_ULL(0) | ||
126 | #define SMU_CTL_TX_IDLE BIT_ULL(1) | ||
127 | |||
128 | #define BGX_GMP_PCS_MRX_CTL 0x30000 | ||
129 | #define PCS_MRX_CTL_RST_AN BIT_ULL(9) | ||
130 | #define PCS_MRX_CTL_PWR_DN BIT_ULL(11) | ||
131 | #define PCS_MRX_CTL_AN_EN BIT_ULL(12) | ||
132 | #define PCS_MRX_CTL_RESET BIT_ULL(15) | ||
133 | #define BGX_GMP_PCS_MRX_STATUS 0x30008 | ||
134 | #define PCS_MRX_STATUS_AN_CPT BIT_ULL(5) | ||
135 | #define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020 | ||
136 | #define BGX_GMP_PCS_SGM_AN_ADV 0x30068 | ||
137 | #define BGX_GMP_PCS_MISCX_CTL 0x30078 | ||
138 | #define PCS_MISC_CTL_GMX_ENO BIT_ULL(11) | ||
139 | #define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full | ||
140 | #define BGX_GMP_GMI_PRTX_CFG 0x38020 | ||
141 | #define GMI_PORT_CFG_SPEED BIT_ULL(1) | ||
142 | #define GMI_PORT_CFG_DUPLEX BIT_ULL(2) | ||
143 | #define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3) | ||
144 | #define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8) | ||
145 | #define BGX_GMP_GMI_RXX_JABBER 0x38038 | ||
146 | #define BGX_GMP_GMI_TXX_THRESH 0x38210 | ||
147 | #define BGX_GMP_GMI_TXX_APPEND 0x38218 | ||
148 | #define BGX_GMP_GMI_TXX_SLOT 0x38220 | ||
149 | #define BGX_GMP_GMI_TXX_BURST 0x38228 | ||
150 | #define BGX_GMP_GMI_TXX_MIN_PKT 0x38240 | ||
151 | #define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300 | ||
152 | |||
153 | #define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */ | ||
154 | #define BGX_MSIX_VEC_0_29_CTL 0x400008 | ||
155 | #define BGX_MSIX_PBA_0 0x4F0000 | ||
156 | |||
157 | /* MSI-X interrupts */ | ||
158 | #define BGX_MSIX_VECTORS 30 | ||
159 | #define BGX_LMAC_VEC_OFFSET 7 | ||
160 | #define BGX_MSIX_VEC_SHIFT 4 | ||
161 | |||
162 | #define CMRX_INT 0 | ||
163 | #define SPUX_INT 1 | ||
164 | #define SMUX_RX_INT 2 | ||
165 | #define SMUX_TX_INT 3 | ||
166 | #define GMPX_PCS_INT 4 | ||
167 | #define GMPX_GMI_RX_INT 5 | ||
168 | #define GMPX_GMI_TX_INT 6 | ||
169 | #define CMR_MEM_INT 28 | ||
170 | #define SPU_MEM_INT 29 | ||
171 | |||
172 | #define LMAC_INTR_LINK_UP BIT(0) | ||
173 | #define LMAC_INTR_LINK_DOWN BIT(1) | ||
174 | |||
175 | /* RX_DMAC_CTL configuration*/ | ||
176 | enum MCAST_MODE { | ||
177 | MCAST_MODE_REJECT, | ||
178 | MCAST_MODE_ACCEPT, | ||
179 | MCAST_MODE_CAM_FILTER, | ||
180 | RSVD | ||
181 | }; | ||
182 | |||
183 | #define BCAST_ACCEPT 1 | ||
184 | #define CAM_ACCEPT 1 | ||
185 | |||
186 | void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); | ||
187 | unsigned bgx_get_map(int node); | ||
188 | int bgx_get_lmac_count(int node, int bgx); | ||
189 | const char *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid); | ||
190 | void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const char *mac); | ||
191 | void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status); | ||
192 | u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx); | ||
193 | u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx); | ||
194 | #define BGX_RX_STATS_COUNT 11 | ||
195 | #define BGX_TX_STATS_COUNT 18 | ||
196 | |||
197 | struct bgx_stats { | ||
198 | u64 rx_stats[BGX_RX_STATS_COUNT]; | ||
199 | u64 tx_stats[BGX_TX_STATS_COUNT]; | ||
200 | }; | ||
201 | |||
202 | enum LMAC_TYPE { | ||
203 | BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */ | ||
204 | BGX_MODE_XAUI = 1, /* 4 lanes, 3.125 Gbaud */ | ||
205 | BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */ | ||
206 | BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */ | ||
207 | BGX_MODE_XFI = 3, /* 1 lane, 10.3125 Gbaud */ | ||
208 | BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */ | ||
209 | BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */ | ||
210 | BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */ | ||
211 | }; | ||
212 | |||
213 | enum qlm_mode { | ||
214 | QLM_MODE_SGMII, /* SGMII, each lane independent */ | ||
215 | QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */ | ||
216 | QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */ | ||
217 | QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */ | ||
218 | QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */ | ||
219 | QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */ | ||
220 | QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */ | ||
221 | }; | ||
222 | |||
223 | #endif /* THUNDER_BGX_H */ | ||
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 2f7b9a40f627..2972c7f3aa1d 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2329,6 +2329,8 @@ | |||
2329 | #define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea | 2329 | #define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea |
2330 | #define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb | 2330 | #define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb |
2331 | 2331 | ||
2332 | #define PCI_VENDOR_ID_CAVIUM 0x177d | ||
2333 | |||
2332 | #define PCI_VENDOR_ID_BELKIN 0x1799 | 2334 | #define PCI_VENDOR_ID_BELKIN 0x1799 |
2333 | #define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f | 2335 | #define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f |
2334 | 2336 | ||