diff options
Diffstat (limited to 'drivers/net/cxgb4')
-rw-r--r-- | drivers/net/cxgb4/Makefile | 7 | ||||
-rw-r--r-- | drivers/net/cxgb4/cxgb4.h | 741 | ||||
-rw-r--r-- | drivers/net/cxgb4/cxgb4_main.c | 3388 | ||||
-rw-r--r-- | drivers/net/cxgb4/cxgb4_uld.h | 239 | ||||
-rw-r--r-- | drivers/net/cxgb4/l2t.c | 624 | ||||
-rw-r--r-- | drivers/net/cxgb4/l2t.h | 110 | ||||
-rw-r--r-- | drivers/net/cxgb4/sge.c | 2431 | ||||
-rw-r--r-- | drivers/net/cxgb4/t4_hw.c | 3131 | ||||
-rw-r--r-- | drivers/net/cxgb4/t4_hw.h | 100 | ||||
-rw-r--r-- | drivers/net/cxgb4/t4_msg.h | 664 | ||||
-rw-r--r-- | drivers/net/cxgb4/t4_regs.h | 878 | ||||
-rw-r--r-- | drivers/net/cxgb4/t4fw_api.h | 1580 |
12 files changed, 13893 insertions, 0 deletions
diff --git a/drivers/net/cxgb4/Makefile b/drivers/net/cxgb4/Makefile new file mode 100644 index 000000000000..498667487f52 --- /dev/null +++ b/drivers/net/cxgb4/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | # | ||
2 | # Chelsio T4 driver | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_CHELSIO_T4) += cxgb4.o | ||
6 | |||
7 | cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o | ||
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h new file mode 100644 index 000000000000..3d8ff4889b56 --- /dev/null +++ b/drivers/net/cxgb4/cxgb4.h | |||
@@ -0,0 +1,741 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __CXGB4_H__ | ||
36 | #define __CXGB4_H__ | ||
37 | |||
38 | #include <linux/bitops.h> | ||
39 | #include <linux/cache.h> | ||
40 | #include <linux/interrupt.h> | ||
41 | #include <linux/list.h> | ||
42 | #include <linux/netdevice.h> | ||
43 | #include <linux/pci.h> | ||
44 | #include <linux/spinlock.h> | ||
45 | #include <linux/timer.h> | ||
46 | #include <asm/io.h> | ||
47 | #include "cxgb4_uld.h" | ||
48 | #include "t4_hw.h" | ||
49 | |||
50 | #define FW_VERSION_MAJOR 1 | ||
51 | #define FW_VERSION_MINOR 1 | ||
52 | #define FW_VERSION_MICRO 0 | ||
53 | |||
54 | enum { | ||
55 | MAX_NPORTS = 4, /* max # of ports */ | ||
56 | SERNUM_LEN = 16, /* Serial # length */ | ||
57 | EC_LEN = 16, /* E/C length */ | ||
58 | ID_LEN = 16, /* ID length */ | ||
59 | }; | ||
60 | |||
61 | enum { | ||
62 | MEM_EDC0, | ||
63 | MEM_EDC1, | ||
64 | MEM_MC | ||
65 | }; | ||
66 | |||
67 | enum dev_master { | ||
68 | MASTER_CANT, | ||
69 | MASTER_MAY, | ||
70 | MASTER_MUST | ||
71 | }; | ||
72 | |||
73 | enum dev_state { | ||
74 | DEV_STATE_UNINIT, | ||
75 | DEV_STATE_INIT, | ||
76 | DEV_STATE_ERR | ||
77 | }; | ||
78 | |||
79 | enum { | ||
80 | PAUSE_RX = 1 << 0, | ||
81 | PAUSE_TX = 1 << 1, | ||
82 | PAUSE_AUTONEG = 1 << 2 | ||
83 | }; | ||
84 | |||
85 | struct port_stats { | ||
86 | u64 tx_octets; /* total # of octets in good frames */ | ||
87 | u64 tx_frames; /* all good frames */ | ||
88 | u64 tx_bcast_frames; /* all broadcast frames */ | ||
89 | u64 tx_mcast_frames; /* all multicast frames */ | ||
90 | u64 tx_ucast_frames; /* all unicast frames */ | ||
91 | u64 tx_error_frames; /* all error frames */ | ||
92 | |||
93 | u64 tx_frames_64; /* # of Tx frames in a particular range */ | ||
94 | u64 tx_frames_65_127; | ||
95 | u64 tx_frames_128_255; | ||
96 | u64 tx_frames_256_511; | ||
97 | u64 tx_frames_512_1023; | ||
98 | u64 tx_frames_1024_1518; | ||
99 | u64 tx_frames_1519_max; | ||
100 | |||
101 | u64 tx_drop; /* # of dropped Tx frames */ | ||
102 | u64 tx_pause; /* # of transmitted pause frames */ | ||
103 | u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ | ||
104 | u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ | ||
105 | u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ | ||
106 | u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ | ||
107 | u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ | ||
108 | u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ | ||
109 | u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ | ||
110 | u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ | ||
111 | |||
112 | u64 rx_octets; /* total # of octets in good frames */ | ||
113 | u64 rx_frames; /* all good frames */ | ||
114 | u64 rx_bcast_frames; /* all broadcast frames */ | ||
115 | u64 rx_mcast_frames; /* all multicast frames */ | ||
116 | u64 rx_ucast_frames; /* all unicast frames */ | ||
117 | u64 rx_too_long; /* # of frames exceeding MTU */ | ||
118 | u64 rx_jabber; /* # of jabber frames */ | ||
119 | u64 rx_fcs_err; /* # of received frames with bad FCS */ | ||
120 | u64 rx_len_err; /* # of received frames with length error */ | ||
121 | u64 rx_symbol_err; /* symbol errors */ | ||
122 | u64 rx_runt; /* # of short frames */ | ||
123 | |||
124 | u64 rx_frames_64; /* # of Rx frames in a particular range */ | ||
125 | u64 rx_frames_65_127; | ||
126 | u64 rx_frames_128_255; | ||
127 | u64 rx_frames_256_511; | ||
128 | u64 rx_frames_512_1023; | ||
129 | u64 rx_frames_1024_1518; | ||
130 | u64 rx_frames_1519_max; | ||
131 | |||
132 | u64 rx_pause; /* # of received pause frames */ | ||
133 | u64 rx_ppp0; /* # of received PPP prio 0 frames */ | ||
134 | u64 rx_ppp1; /* # of received PPP prio 1 frames */ | ||
135 | u64 rx_ppp2; /* # of received PPP prio 2 frames */ | ||
136 | u64 rx_ppp3; /* # of received PPP prio 3 frames */ | ||
137 | u64 rx_ppp4; /* # of received PPP prio 4 frames */ | ||
138 | u64 rx_ppp5; /* # of received PPP prio 5 frames */ | ||
139 | u64 rx_ppp6; /* # of received PPP prio 6 frames */ | ||
140 | u64 rx_ppp7; /* # of received PPP prio 7 frames */ | ||
141 | |||
142 | u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ | ||
143 | u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ | ||
144 | u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ | ||
145 | u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ | ||
146 | u64 rx_trunc0; /* buffer-group 0 truncated packets */ | ||
147 | u64 rx_trunc1; /* buffer-group 1 truncated packets */ | ||
148 | u64 rx_trunc2; /* buffer-group 2 truncated packets */ | ||
149 | u64 rx_trunc3; /* buffer-group 3 truncated packets */ | ||
150 | }; | ||
151 | |||
152 | struct lb_port_stats { | ||
153 | u64 octets; | ||
154 | u64 frames; | ||
155 | u64 bcast_frames; | ||
156 | u64 mcast_frames; | ||
157 | u64 ucast_frames; | ||
158 | u64 error_frames; | ||
159 | |||
160 | u64 frames_64; | ||
161 | u64 frames_65_127; | ||
162 | u64 frames_128_255; | ||
163 | u64 frames_256_511; | ||
164 | u64 frames_512_1023; | ||
165 | u64 frames_1024_1518; | ||
166 | u64 frames_1519_max; | ||
167 | |||
168 | u64 drop; | ||
169 | |||
170 | u64 ovflow0; | ||
171 | u64 ovflow1; | ||
172 | u64 ovflow2; | ||
173 | u64 ovflow3; | ||
174 | u64 trunc0; | ||
175 | u64 trunc1; | ||
176 | u64 trunc2; | ||
177 | u64 trunc3; | ||
178 | }; | ||
179 | |||
180 | struct tp_tcp_stats { | ||
181 | u32 tcpOutRsts; | ||
182 | u64 tcpInSegs; | ||
183 | u64 tcpOutSegs; | ||
184 | u64 tcpRetransSegs; | ||
185 | }; | ||
186 | |||
187 | struct tp_err_stats { | ||
188 | u32 macInErrs[4]; | ||
189 | u32 hdrInErrs[4]; | ||
190 | u32 tcpInErrs[4]; | ||
191 | u32 tnlCongDrops[4]; | ||
192 | u32 ofldChanDrops[4]; | ||
193 | u32 tnlTxDrops[4]; | ||
194 | u32 ofldVlanDrops[4]; | ||
195 | u32 tcp6InErrs[4]; | ||
196 | u32 ofldNoNeigh; | ||
197 | u32 ofldCongDefer; | ||
198 | }; | ||
199 | |||
200 | struct tp_params { | ||
201 | unsigned int ntxchan; /* # of Tx channels */ | ||
202 | unsigned int tre; /* log2 of core clocks per TP tick */ | ||
203 | }; | ||
204 | |||
205 | struct vpd_params { | ||
206 | unsigned int cclk; | ||
207 | u8 ec[EC_LEN + 1]; | ||
208 | u8 sn[SERNUM_LEN + 1]; | ||
209 | u8 id[ID_LEN + 1]; | ||
210 | }; | ||
211 | |||
212 | struct pci_params { | ||
213 | unsigned char speed; | ||
214 | unsigned char width; | ||
215 | }; | ||
216 | |||
217 | struct adapter_params { | ||
218 | struct tp_params tp; | ||
219 | struct vpd_params vpd; | ||
220 | struct pci_params pci; | ||
221 | |||
222 | unsigned int fw_vers; | ||
223 | unsigned int tp_vers; | ||
224 | u8 api_vers[7]; | ||
225 | |||
226 | unsigned short mtus[NMTUS]; | ||
227 | unsigned short a_wnd[NCCTRL_WIN]; | ||
228 | unsigned short b_wnd[NCCTRL_WIN]; | ||
229 | |||
230 | unsigned char nports; /* # of ethernet ports */ | ||
231 | unsigned char portvec; | ||
232 | unsigned char rev; /* chip revision */ | ||
233 | unsigned char offload; | ||
234 | |||
235 | unsigned int ofldq_wr_cred; | ||
236 | }; | ||
237 | |||
238 | struct trace_params { | ||
239 | u32 data[TRACE_LEN / 4]; | ||
240 | u32 mask[TRACE_LEN / 4]; | ||
241 | unsigned short snap_len; | ||
242 | unsigned short min_len; | ||
243 | unsigned char skip_ofst; | ||
244 | unsigned char skip_len; | ||
245 | unsigned char invert; | ||
246 | unsigned char port; | ||
247 | }; | ||
248 | |||
249 | struct link_config { | ||
250 | unsigned short supported; /* link capabilities */ | ||
251 | unsigned short advertising; /* advertised capabilities */ | ||
252 | unsigned short requested_speed; /* speed user has requested */ | ||
253 | unsigned short speed; /* actual link speed */ | ||
254 | unsigned char requested_fc; /* flow control user has requested */ | ||
255 | unsigned char fc; /* actual link flow control */ | ||
256 | unsigned char autoneg; /* autonegotiating? */ | ||
257 | unsigned char link_ok; /* link up? */ | ||
258 | }; | ||
259 | |||
260 | #define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) | ||
261 | |||
262 | enum { | ||
263 | MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ | ||
264 | MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ | ||
265 | MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ | ||
266 | MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ | ||
267 | }; | ||
268 | |||
269 | enum { | ||
270 | MAX_EGRQ = 128, /* max # of egress queues, including FLs */ | ||
271 | MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */ | ||
272 | }; | ||
273 | |||
274 | struct adapter; | ||
275 | struct vlan_group; | ||
276 | struct sge_rspq; | ||
277 | |||
278 | struct port_info { | ||
279 | struct adapter *adapter; | ||
280 | struct vlan_group *vlan_grp; | ||
281 | u16 viid; | ||
282 | s16 xact_addr_filt; /* index of exact MAC address filter */ | ||
283 | u16 rss_size; /* size of VI's RSS table slice */ | ||
284 | s8 mdio_addr; | ||
285 | u8 port_type; | ||
286 | u8 mod_type; | ||
287 | u8 port_id; | ||
288 | u8 tx_chan; | ||
289 | u8 lport; /* associated offload logical port */ | ||
290 | u8 rx_offload; /* CSO, etc */ | ||
291 | u8 nqsets; /* # of qsets */ | ||
292 | u8 first_qset; /* index of first qset */ | ||
293 | struct link_config link_cfg; | ||
294 | }; | ||
295 | |||
296 | /* port_info.rx_offload flags */ | ||
297 | enum { | ||
298 | RX_CSO = 1 << 0, | ||
299 | }; | ||
300 | |||
301 | struct dentry; | ||
302 | struct work_struct; | ||
303 | |||
304 | enum { /* adapter flags */ | ||
305 | FULL_INIT_DONE = (1 << 0), | ||
306 | USING_MSI = (1 << 1), | ||
307 | USING_MSIX = (1 << 2), | ||
308 | QUEUES_BOUND = (1 << 3), | ||
309 | FW_OK = (1 << 4), | ||
310 | }; | ||
311 | |||
312 | struct rx_sw_desc; | ||
313 | |||
314 | struct sge_fl { /* SGE free-buffer queue state */ | ||
315 | unsigned int avail; /* # of available Rx buffers */ | ||
316 | unsigned int pend_cred; /* new buffers since last FL DB ring */ | ||
317 | unsigned int cidx; /* consumer index */ | ||
318 | unsigned int pidx; /* producer index */ | ||
319 | unsigned long alloc_failed; /* # of times buffer allocation failed */ | ||
320 | unsigned long large_alloc_failed; | ||
321 | unsigned long starving; | ||
322 | /* RO fields */ | ||
323 | unsigned int cntxt_id; /* SGE context id for the free list */ | ||
324 | unsigned int size; /* capacity of free list */ | ||
325 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ | ||
326 | __be64 *desc; /* address of HW Rx descriptor ring */ | ||
327 | dma_addr_t addr; /* bus address of HW ring start */ | ||
328 | }; | ||
329 | |||
330 | /* A packet gather list */ | ||
331 | struct pkt_gl { | ||
332 | skb_frag_t frags[MAX_SKB_FRAGS]; | ||
333 | void *va; /* virtual address of first byte */ | ||
334 | unsigned int nfrags; /* # of fragments */ | ||
335 | unsigned int tot_len; /* total length of fragments */ | ||
336 | }; | ||
337 | |||
338 | typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, | ||
339 | const struct pkt_gl *gl); | ||
340 | |||
341 | struct sge_rspq { /* state for an SGE response queue */ | ||
342 | struct napi_struct napi; | ||
343 | const __be64 *cur_desc; /* current descriptor in queue */ | ||
344 | unsigned int cidx; /* consumer index */ | ||
345 | u8 gen; /* current generation bit */ | ||
346 | u8 intr_params; /* interrupt holdoff parameters */ | ||
347 | u8 next_intr_params; /* holdoff params for next interrupt */ | ||
348 | u8 pktcnt_idx; /* interrupt packet threshold */ | ||
349 | u8 uld; /* ULD handling this queue */ | ||
350 | u8 idx; /* queue index within its group */ | ||
351 | int offset; /* offset into current Rx buffer */ | ||
352 | u16 cntxt_id; /* SGE context id for the response q */ | ||
353 | u16 abs_id; /* absolute SGE id for the response q */ | ||
354 | __be64 *desc; /* address of HW response ring */ | ||
355 | dma_addr_t phys_addr; /* physical address of the ring */ | ||
356 | unsigned int iqe_len; /* entry size */ | ||
357 | unsigned int size; /* capacity of response queue */ | ||
358 | struct adapter *adap; | ||
359 | struct net_device *netdev; /* associated net device */ | ||
360 | rspq_handler_t handler; | ||
361 | }; | ||
362 | |||
363 | struct sge_eth_stats { /* Ethernet queue statistics */ | ||
364 | unsigned long pkts; /* # of ethernet packets */ | ||
365 | unsigned long lro_pkts; /* # of LRO super packets */ | ||
366 | unsigned long lro_merged; /* # of wire packets merged by LRO */ | ||
367 | unsigned long rx_cso; /* # of Rx checksum offloads */ | ||
368 | unsigned long vlan_ex; /* # of Rx VLAN extractions */ | ||
369 | unsigned long rx_drops; /* # of packets dropped due to no mem */ | ||
370 | }; | ||
371 | |||
372 | struct sge_eth_rxq { /* SW Ethernet Rx queue */ | ||
373 | struct sge_rspq rspq; | ||
374 | struct sge_fl fl; | ||
375 | struct sge_eth_stats stats; | ||
376 | } ____cacheline_aligned_in_smp; | ||
377 | |||
378 | struct sge_ofld_stats { /* offload queue statistics */ | ||
379 | unsigned long pkts; /* # of packets */ | ||
380 | unsigned long imm; /* # of immediate-data packets */ | ||
381 | unsigned long an; /* # of asynchronous notifications */ | ||
382 | unsigned long nomem; /* # of responses deferred due to no mem */ | ||
383 | }; | ||
384 | |||
385 | struct sge_ofld_rxq { /* SW offload Rx queue */ | ||
386 | struct sge_rspq rspq; | ||
387 | struct sge_fl fl; | ||
388 | struct sge_ofld_stats stats; | ||
389 | } ____cacheline_aligned_in_smp; | ||
390 | |||
391 | struct tx_desc { | ||
392 | __be64 flit[8]; | ||
393 | }; | ||
394 | |||
395 | struct tx_sw_desc; | ||
396 | |||
397 | struct sge_txq { | ||
398 | unsigned int in_use; /* # of in-use Tx descriptors */ | ||
399 | unsigned int size; /* # of descriptors */ | ||
400 | unsigned int cidx; /* SW consumer index */ | ||
401 | unsigned int pidx; /* producer index */ | ||
402 | unsigned long stops; /* # of times q has been stopped */ | ||
403 | unsigned long restarts; /* # of queue restarts */ | ||
404 | unsigned int cntxt_id; /* SGE context id for the Tx q */ | ||
405 | struct tx_desc *desc; /* address of HW Tx descriptor ring */ | ||
406 | struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ | ||
407 | struct sge_qstat *stat; /* queue status entry */ | ||
408 | dma_addr_t phys_addr; /* physical address of the ring */ | ||
409 | }; | ||
410 | |||
411 | struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ | ||
412 | struct sge_txq q; | ||
413 | struct netdev_queue *txq; /* associated netdev TX queue */ | ||
414 | unsigned long tso; /* # of TSO requests */ | ||
415 | unsigned long tx_cso; /* # of Tx checksum offloads */ | ||
416 | unsigned long vlan_ins; /* # of Tx VLAN insertions */ | ||
417 | unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ | ||
418 | } ____cacheline_aligned_in_smp; | ||
419 | |||
420 | struct sge_ofld_txq { /* state for an SGE offload Tx queue */ | ||
421 | struct sge_txq q; | ||
422 | struct adapter *adap; | ||
423 | struct sk_buff_head sendq; /* list of backpressured packets */ | ||
424 | struct tasklet_struct qresume_tsk; /* restarts the queue */ | ||
425 | u8 full; /* the Tx ring is full */ | ||
426 | unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ | ||
427 | } ____cacheline_aligned_in_smp; | ||
428 | |||
429 | struct sge_ctrl_txq { /* state for an SGE control Tx queue */ | ||
430 | struct sge_txq q; | ||
431 | struct adapter *adap; | ||
432 | struct sk_buff_head sendq; /* list of backpressured packets */ | ||
433 | struct tasklet_struct qresume_tsk; /* restarts the queue */ | ||
434 | u8 full; /* the Tx ring is full */ | ||
435 | } ____cacheline_aligned_in_smp; | ||
436 | |||
437 | struct sge { | ||
438 | struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; | ||
439 | struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; | ||
440 | struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; | ||
441 | |||
442 | struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; | ||
443 | struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; | ||
444 | struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; | ||
445 | struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; | ||
446 | |||
447 | struct sge_rspq intrq ____cacheline_aligned_in_smp; | ||
448 | spinlock_t intrq_lock; | ||
449 | |||
450 | u16 max_ethqsets; /* # of available Ethernet queue sets */ | ||
451 | u16 ethqsets; /* # of active Ethernet queue sets */ | ||
452 | u16 ethtxq_rover; /* Tx queue to clean up next */ | ||
453 | u16 ofldqsets; /* # of active offload queue sets */ | ||
454 | u16 rdmaqs; /* # of available RDMA Rx queues */ | ||
455 | u16 ofld_rxq[MAX_OFLD_QSETS]; | ||
456 | u16 rdma_rxq[NCHAN]; | ||
457 | u16 timer_val[SGE_NTIMERS]; | ||
458 | u8 counter_val[SGE_NCOUNTERS]; | ||
459 | unsigned int starve_thres; | ||
460 | u8 idma_state[2]; | ||
461 | void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ | ||
462 | struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ | ||
463 | DECLARE_BITMAP(starving_fl, MAX_EGRQ); | ||
464 | DECLARE_BITMAP(txq_maperr, MAX_EGRQ); | ||
465 | struct timer_list rx_timer; /* refills starving FLs */ | ||
466 | struct timer_list tx_timer; /* checks Tx queues */ | ||
467 | }; | ||
468 | |||
469 | #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) | ||
470 | #define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) | ||
471 | #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) | ||
472 | |||
473 | struct l2t_data; | ||
474 | |||
475 | struct adapter { | ||
476 | void __iomem *regs; | ||
477 | struct pci_dev *pdev; | ||
478 | struct device *pdev_dev; | ||
479 | unsigned long registered_device_map; | ||
480 | unsigned long open_device_map; | ||
481 | unsigned long flags; | ||
482 | |||
483 | const char *name; | ||
484 | int msg_enable; | ||
485 | |||
486 | struct adapter_params params; | ||
487 | struct cxgb4_virt_res vres; | ||
488 | unsigned int swintr; | ||
489 | |||
490 | unsigned int wol; | ||
491 | |||
492 | struct { | ||
493 | unsigned short vec; | ||
494 | char desc[14]; | ||
495 | } msix_info[MAX_INGQ + 1]; | ||
496 | |||
497 | struct sge sge; | ||
498 | |||
499 | struct net_device *port[MAX_NPORTS]; | ||
500 | u8 chan_map[NCHAN]; /* channel -> port map */ | ||
501 | |||
502 | struct l2t_data *l2t; | ||
503 | void *uld_handle[CXGB4_ULD_MAX]; | ||
504 | struct list_head list_node; | ||
505 | |||
506 | struct tid_info tids; | ||
507 | void **tid_release_head; | ||
508 | spinlock_t tid_release_lock; | ||
509 | struct work_struct tid_release_task; | ||
510 | bool tid_release_task_busy; | ||
511 | |||
512 | struct dentry *debugfs_root; | ||
513 | |||
514 | spinlock_t stats_lock; | ||
515 | }; | ||
516 | |||
517 | static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) | ||
518 | { | ||
519 | return readl(adap->regs + reg_addr); | ||
520 | } | ||
521 | |||
522 | static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val) | ||
523 | { | ||
524 | writel(val, adap->regs + reg_addr); | ||
525 | } | ||
526 | |||
527 | #ifndef readq | ||
528 | static inline u64 readq(const volatile void __iomem *addr) | ||
529 | { | ||
530 | return readl(addr) + ((u64)readl(addr + 4) << 32); | ||
531 | } | ||
532 | |||
533 | static inline void writeq(u64 val, volatile void __iomem *addr) | ||
534 | { | ||
535 | writel(val, addr); | ||
536 | writel(val >> 32, addr + 4); | ||
537 | } | ||
538 | #endif | ||
539 | |||
540 | static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr) | ||
541 | { | ||
542 | return readq(adap->regs + reg_addr); | ||
543 | } | ||
544 | |||
545 | static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) | ||
546 | { | ||
547 | writeq(val, adap->regs + reg_addr); | ||
548 | } | ||
549 | |||
550 | /** | ||
551 | * netdev2pinfo - return the port_info structure associated with a net_device | ||
552 | * @dev: the netdev | ||
553 | * | ||
554 | * Return the struct port_info associated with a net_device | ||
555 | */ | ||
556 | static inline struct port_info *netdev2pinfo(const struct net_device *dev) | ||
557 | { | ||
558 | return netdev_priv(dev); | ||
559 | } | ||
560 | |||
561 | /** | ||
562 | * adap2pinfo - return the port_info of a port | ||
563 | * @adap: the adapter | ||
564 | * @idx: the port index | ||
565 | * | ||
566 | * Return the port_info structure for the port of the given index. | ||
567 | */ | ||
568 | static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) | ||
569 | { | ||
570 | return netdev_priv(adap->port[idx]); | ||
571 | } | ||
572 | |||
573 | /** | ||
574 | * netdev2adap - return the adapter structure associated with a net_device | ||
575 | * @dev: the netdev | ||
576 | * | ||
577 | * Return the struct adapter associated with a net_device | ||
578 | */ | ||
579 | static inline struct adapter *netdev2adap(const struct net_device *dev) | ||
580 | { | ||
581 | return netdev2pinfo(dev)->adapter; | ||
582 | } | ||
583 | |||
584 | void t4_os_portmod_changed(const struct adapter *adap, int port_id); | ||
585 | void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); | ||
586 | |||
587 | void *t4_alloc_mem(size_t size); | ||
588 | void t4_free_mem(void *addr); | ||
589 | |||
590 | void t4_free_sge_resources(struct adapter *adap); | ||
591 | irq_handler_t t4_intr_handler(struct adapter *adap); | ||
592 | netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); | ||
593 | int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, | ||
594 | const struct pkt_gl *gl); | ||
595 | int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); | ||
596 | int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); | ||
597 | int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, | ||
598 | struct net_device *dev, int intr_idx, | ||
599 | struct sge_fl *fl, rspq_handler_t hnd); | ||
600 | int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, | ||
601 | struct net_device *dev, struct netdev_queue *netdevq, | ||
602 | unsigned int iqid); | ||
603 | int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, | ||
604 | struct net_device *dev, unsigned int iqid, | ||
605 | unsigned int cmplqid); | ||
606 | int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, | ||
607 | struct net_device *dev, unsigned int iqid); | ||
608 | irqreturn_t t4_sge_intr_msix(int irq, void *cookie); | ||
609 | void t4_sge_init(struct adapter *adap); | ||
610 | void t4_sge_start(struct adapter *adap); | ||
611 | void t4_sge_stop(struct adapter *adap); | ||
612 | |||
613 | #define for_each_port(adapter, iter) \ | ||
614 | for (iter = 0; iter < (adapter)->params.nports; ++iter) | ||
615 | |||
616 | static inline unsigned int core_ticks_per_usec(const struct adapter *adap) | ||
617 | { | ||
618 | return adap->params.vpd.cclk / 1000; | ||
619 | } | ||
620 | |||
621 | static inline unsigned int us_to_core_ticks(const struct adapter *adap, | ||
622 | unsigned int us) | ||
623 | { | ||
624 | return (us * adap->params.vpd.cclk) / 1000; | ||
625 | } | ||
626 | |||
627 | void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, | ||
628 | u32 val); | ||
629 | |||
630 | int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, | ||
631 | void *rpl, bool sleep_ok); | ||
632 | |||
633 | static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, | ||
634 | int size, void *rpl) | ||
635 | { | ||
636 | return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); | ||
637 | } | ||
638 | |||
639 | static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, | ||
640 | int size, void *rpl) | ||
641 | { | ||
642 | return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); | ||
643 | } | ||
644 | |||
645 | void t4_intr_enable(struct adapter *adapter); | ||
646 | void t4_intr_disable(struct adapter *adapter); | ||
647 | void t4_intr_clear(struct adapter *adapter); | ||
648 | int t4_slow_intr_handler(struct adapter *adapter); | ||
649 | |||
650 | int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, | ||
651 | struct link_config *lc); | ||
652 | int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); | ||
653 | int t4_seeprom_wp(struct adapter *adapter, bool enable); | ||
654 | int t4_read_flash(struct adapter *adapter, unsigned int addr, | ||
655 | unsigned int nwords, u32 *data, int byte_oriented); | ||
656 | int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); | ||
657 | int t4_check_fw_version(struct adapter *adapter); | ||
658 | int t4_prep_adapter(struct adapter *adapter); | ||
659 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); | ||
660 | void t4_fatal_err(struct adapter *adapter); | ||
661 | void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); | ||
662 | int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp, | ||
663 | int filter_index, int enable); | ||
664 | void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, | ||
665 | int filter_index, int *enabled); | ||
666 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, | ||
667 | int start, int n, const u16 *rspq, unsigned int nrspq); | ||
668 | int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, | ||
669 | unsigned int flags); | ||
670 | int t4_read_rss(struct adapter *adapter, u16 *entries); | ||
671 | int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); | ||
672 | int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, | ||
673 | u64 *parity); | ||
674 | |||
675 | void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); | ||
676 | void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p); | ||
677 | |||
678 | void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); | ||
679 | void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st); | ||
680 | void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, | ||
681 | struct tp_tcp_stats *v6); | ||
682 | void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, | ||
683 | const unsigned short *alpha, const unsigned short *beta); | ||
684 | |||
685 | void t4_wol_magic_enable(struct adapter *adap, unsigned int port, | ||
686 | const u8 *addr); | ||
687 | int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, | ||
688 | u64 mask0, u64 mask1, unsigned int crc, bool enable); | ||
689 | |||
690 | int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, | ||
691 | enum dev_master master, enum dev_state *state); | ||
692 | int t4_fw_bye(struct adapter *adap, unsigned int mbox); | ||
693 | int t4_early_init(struct adapter *adap, unsigned int mbox); | ||
694 | int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); | ||
695 | int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
696 | unsigned int vf, unsigned int nparams, const u32 *params, | ||
697 | u32 *val); | ||
698 | int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
699 | unsigned int vf, unsigned int nparams, const u32 *params, | ||
700 | const u32 *val); | ||
701 | int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
702 | unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, | ||
703 | unsigned int rxqi, unsigned int rxq, unsigned int tc, | ||
704 | unsigned int vi, unsigned int cmask, unsigned int pmask, | ||
705 | unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); | ||
706 | int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, | ||
707 | unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, | ||
708 | unsigned int *rss_size); | ||
709 | int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
710 | unsigned int vf, unsigned int viid); | ||
711 | int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
712 | int mtu, int promisc, int all_multi, int bcast, bool sleep_ok); | ||
713 | int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, | ||
714 | unsigned int viid, bool free, unsigned int naddr, | ||
715 | const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); | ||
716 | int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
717 | int idx, const u8 *addr, bool persist, bool add_smt); | ||
718 | int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
719 | bool ucast, u64 vec, bool sleep_ok); | ||
720 | int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
721 | bool rx_en, bool tx_en); | ||
722 | int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
723 | unsigned int nblinks); | ||
724 | int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | ||
725 | unsigned int mmd, unsigned int reg, u16 *valp); | ||
726 | int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | ||
727 | unsigned int mmd, unsigned int reg, u16 val); | ||
728 | int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, | ||
729 | unsigned int pf, unsigned int vf, unsigned int iqid, | ||
730 | unsigned int fl0id, unsigned int fl1id); | ||
731 | int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
732 | unsigned int vf, unsigned int iqtype, unsigned int iqid, | ||
733 | unsigned int fl0id, unsigned int fl1id); | ||
734 | int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
735 | unsigned int vf, unsigned int eqid); | ||
736 | int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
737 | unsigned int vf, unsigned int eqid); | ||
738 | int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
739 | unsigned int vf, unsigned int eqid); | ||
740 | int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); | ||
741 | #endif /* __CXGB4_H__ */ | ||
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c new file mode 100644 index 000000000000..a7e30a23d322 --- /dev/null +++ b/drivers/net/cxgb4/cxgb4_main.c | |||
@@ -0,0 +1,3388 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
36 | |||
37 | #include <linux/bitmap.h> | ||
38 | #include <linux/crc32.h> | ||
39 | #include <linux/ctype.h> | ||
40 | #include <linux/debugfs.h> | ||
41 | #include <linux/err.h> | ||
42 | #include <linux/etherdevice.h> | ||
43 | #include <linux/firmware.h> | ||
44 | #include <linux/if_vlan.h> | ||
45 | #include <linux/init.h> | ||
46 | #include <linux/log2.h> | ||
47 | #include <linux/mdio.h> | ||
48 | #include <linux/module.h> | ||
49 | #include <linux/moduleparam.h> | ||
50 | #include <linux/mutex.h> | ||
51 | #include <linux/netdevice.h> | ||
52 | #include <linux/pci.h> | ||
53 | #include <linux/aer.h> | ||
54 | #include <linux/rtnetlink.h> | ||
55 | #include <linux/sched.h> | ||
56 | #include <linux/seq_file.h> | ||
57 | #include <linux/sockios.h> | ||
58 | #include <linux/vmalloc.h> | ||
59 | #include <linux/workqueue.h> | ||
60 | #include <net/neighbour.h> | ||
61 | #include <net/netevent.h> | ||
62 | #include <asm/uaccess.h> | ||
63 | |||
64 | #include "cxgb4.h" | ||
65 | #include "t4_regs.h" | ||
66 | #include "t4_msg.h" | ||
67 | #include "t4fw_api.h" | ||
68 | #include "l2t.h" | ||
69 | |||
70 | #define DRV_VERSION "1.0.0-ko" | ||
71 | #define DRV_DESC "Chelsio T4 Network Driver" | ||
72 | |||
73 | /* | ||
74 | * Max interrupt hold-off timer value in us. Queues fall back to this value | ||
75 | * under extreme memory pressure so it's largish to give the system time to | ||
76 | * recover. | ||
77 | */ | ||
78 | #define MAX_SGE_TIMERVAL 200U | ||
79 | |||
80 | enum { | ||
81 | MEMWIN0_APERTURE = 65536, | ||
82 | MEMWIN0_BASE = 0x30000, | ||
83 | MEMWIN1_APERTURE = 32768, | ||
84 | MEMWIN1_BASE = 0x28000, | ||
85 | MEMWIN2_APERTURE = 2048, | ||
86 | MEMWIN2_BASE = 0x1b800, | ||
87 | }; | ||
88 | |||
89 | enum { | ||
90 | MAX_TXQ_ENTRIES = 16384, | ||
91 | MAX_CTRL_TXQ_ENTRIES = 1024, | ||
92 | MAX_RSPQ_ENTRIES = 16384, | ||
93 | MAX_RX_BUFFERS = 16384, | ||
94 | MIN_TXQ_ENTRIES = 32, | ||
95 | MIN_CTRL_TXQ_ENTRIES = 32, | ||
96 | MIN_RSPQ_ENTRIES = 128, | ||
97 | MIN_FL_ENTRIES = 16 | ||
98 | }; | ||
99 | |||
100 | #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ | ||
101 | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ | ||
102 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) | ||
103 | |||
104 | #define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 } | ||
105 | |||
106 | static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { | ||
107 | CH_DEVICE(0xa000), /* PE10K */ | ||
108 | { 0, } | ||
109 | }; | ||
110 | |||
111 | #define FW_FNAME "cxgb4/t4fw.bin" | ||
112 | |||
113 | MODULE_DESCRIPTION(DRV_DESC); | ||
114 | MODULE_AUTHOR("Chelsio Communications"); | ||
115 | MODULE_LICENSE("Dual BSD/GPL"); | ||
116 | MODULE_VERSION(DRV_VERSION); | ||
117 | MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); | ||
118 | MODULE_FIRMWARE(FW_FNAME); | ||
119 | |||
120 | static int dflt_msg_enable = DFLT_MSG_ENABLE; | ||
121 | |||
122 | module_param(dflt_msg_enable, int, 0644); | ||
123 | MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap"); | ||
124 | |||
125 | /* | ||
126 | * The driver uses the best interrupt scheme available on a platform in the | ||
127 | * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which | ||
128 | * of these schemes the driver may consider as follows: | ||
129 | * | ||
130 | * msi = 2: choose from among all three options | ||
131 | * msi = 1: only consider MSI and INTx interrupts | ||
132 | * msi = 0: force INTx interrupts | ||
133 | */ | ||
134 | static int msi = 2; | ||
135 | |||
136 | module_param(msi, int, 0644); | ||
137 | MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); | ||
138 | |||
139 | /* | ||
140 | * Queue interrupt hold-off timer values. Queues default to the first of these | ||
141 | * upon creation. | ||
142 | */ | ||
143 | static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 }; | ||
144 | |||
145 | module_param_array(intr_holdoff, uint, NULL, 0644); | ||
146 | MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " | ||
147 | "0..4 in microseconds"); | ||
148 | |||
149 | static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; | ||
150 | |||
151 | module_param_array(intr_cnt, uint, NULL, 0644); | ||
152 | MODULE_PARM_DESC(intr_cnt, | ||
153 | "thresholds 1..3 for queue interrupt packet counters"); | ||
154 | |||
155 | static int vf_acls; | ||
156 | |||
157 | #ifdef CONFIG_PCI_IOV | ||
158 | module_param(vf_acls, bool, 0644); | ||
159 | MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); | ||
160 | |||
161 | static unsigned int num_vf[4]; | ||
162 | |||
163 | module_param_array(num_vf, uint, NULL, 0644); | ||
164 | MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); | ||
165 | #endif | ||
166 | |||
167 | static struct dentry *cxgb4_debugfs_root; | ||
168 | |||
169 | static LIST_HEAD(adapter_list); | ||
170 | static DEFINE_MUTEX(uld_mutex); | ||
171 | static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; | ||
172 | static const char *uld_str[] = { "RDMA", "iSCSI" }; | ||
173 | |||
174 | static void link_report(struct net_device *dev) | ||
175 | { | ||
176 | if (!netif_carrier_ok(dev)) | ||
177 | netdev_info(dev, "link down\n"); | ||
178 | else { | ||
179 | static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" }; | ||
180 | |||
181 | const char *s = "10Mbps"; | ||
182 | const struct port_info *p = netdev_priv(dev); | ||
183 | |||
184 | switch (p->link_cfg.speed) { | ||
185 | case SPEED_10000: | ||
186 | s = "10Gbps"; | ||
187 | break; | ||
188 | case SPEED_1000: | ||
189 | s = "1000Mbps"; | ||
190 | break; | ||
191 | case SPEED_100: | ||
192 | s = "100Mbps"; | ||
193 | break; | ||
194 | } | ||
195 | |||
196 | netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, | ||
197 | fc[p->link_cfg.fc]); | ||
198 | } | ||
199 | } | ||
200 | |||
201 | void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) | ||
202 | { | ||
203 | struct net_device *dev = adapter->port[port_id]; | ||
204 | |||
205 | /* Skip changes from disabled ports. */ | ||
206 | if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { | ||
207 | if (link_stat) | ||
208 | netif_carrier_on(dev); | ||
209 | else | ||
210 | netif_carrier_off(dev); | ||
211 | |||
212 | link_report(dev); | ||
213 | } | ||
214 | } | ||
215 | |||
216 | void t4_os_portmod_changed(const struct adapter *adap, int port_id) | ||
217 | { | ||
218 | static const char *mod_str[] = { | ||
219 | NULL, "LR", "SR", "ER", "passive DA", "active DA" | ||
220 | }; | ||
221 | |||
222 | const struct net_device *dev = adap->port[port_id]; | ||
223 | const struct port_info *pi = netdev_priv(dev); | ||
224 | |||
225 | if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) | ||
226 | netdev_info(dev, "port module unplugged\n"); | ||
227 | else | ||
228 | netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); | ||
229 | } | ||
230 | |||
231 | /* | ||
232 | * Configure the exact and hash address filters to handle a port's multicast | ||
233 | * and secondary unicast MAC addresses. | ||
234 | */ | ||
235 | static int set_addr_filters(const struct net_device *dev, bool sleep) | ||
236 | { | ||
237 | u64 mhash = 0; | ||
238 | u64 uhash = 0; | ||
239 | bool free = true; | ||
240 | u16 filt_idx[7]; | ||
241 | const u8 *addr[7]; | ||
242 | int ret, naddr = 0; | ||
243 | const struct dev_addr_list *d; | ||
244 | const struct netdev_hw_addr *ha; | ||
245 | int uc_cnt = netdev_uc_count(dev); | ||
246 | const struct port_info *pi = netdev_priv(dev); | ||
247 | |||
248 | /* first do the secondary unicast addresses */ | ||
249 | netdev_for_each_uc_addr(ha, dev) { | ||
250 | addr[naddr++] = ha->addr; | ||
251 | if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { | ||
252 | ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, | ||
253 | naddr, addr, filt_idx, &uhash, sleep); | ||
254 | if (ret < 0) | ||
255 | return ret; | ||
256 | |||
257 | free = false; | ||
258 | naddr = 0; | ||
259 | } | ||
260 | } | ||
261 | |||
262 | /* next set up the multicast addresses */ | ||
263 | netdev_for_each_mc_addr(d, dev) { | ||
264 | addr[naddr++] = d->dmi_addr; | ||
265 | if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) { | ||
266 | ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, | ||
267 | naddr, addr, filt_idx, &mhash, sleep); | ||
268 | if (ret < 0) | ||
269 | return ret; | ||
270 | |||
271 | free = false; | ||
272 | naddr = 0; | ||
273 | } | ||
274 | } | ||
275 | |||
276 | return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0, | ||
277 | uhash | mhash, sleep); | ||
278 | } | ||
279 | |||
280 | /* | ||
281 | * Set Rx properties of a port, such as promiscruity, address filters, and MTU. | ||
282 | * If @mtu is -1 it is left unchanged. | ||
283 | */ | ||
284 | static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) | ||
285 | { | ||
286 | int ret; | ||
287 | struct port_info *pi = netdev_priv(dev); | ||
288 | |||
289 | ret = set_addr_filters(dev, sleep_ok); | ||
290 | if (ret == 0) | ||
291 | ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu, | ||
292 | (dev->flags & IFF_PROMISC) ? 1 : 0, | ||
293 | (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, | ||
294 | sleep_ok); | ||
295 | return ret; | ||
296 | } | ||
297 | |||
298 | /** | ||
299 | * link_start - enable a port | ||
300 | * @dev: the port to enable | ||
301 | * | ||
302 | * Performs the MAC and PHY actions needed to enable a port. | ||
303 | */ | ||
304 | static int link_start(struct net_device *dev) | ||
305 | { | ||
306 | int ret; | ||
307 | struct port_info *pi = netdev_priv(dev); | ||
308 | |||
309 | /* | ||
310 | * We do not set address filters and promiscuity here, the stack does | ||
311 | * that step explicitly. | ||
312 | */ | ||
313 | ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1, | ||
314 | true); | ||
315 | if (ret == 0) { | ||
316 | ret = t4_change_mac(pi->adapter, 0, pi->viid, | ||
317 | pi->xact_addr_filt, dev->dev_addr, true, | ||
318 | false); | ||
319 | if (ret >= 0) { | ||
320 | pi->xact_addr_filt = ret; | ||
321 | ret = 0; | ||
322 | } | ||
323 | } | ||
324 | if (ret == 0) | ||
325 | ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg); | ||
326 | if (ret == 0) | ||
327 | ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true); | ||
328 | return ret; | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * Response queue handler for the FW event queue. | ||
333 | */ | ||
334 | static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, | ||
335 | const struct pkt_gl *gl) | ||
336 | { | ||
337 | u8 opcode = ((const struct rss_header *)rsp)->opcode; | ||
338 | |||
339 | rsp++; /* skip RSS header */ | ||
340 | if (likely(opcode == CPL_SGE_EGR_UPDATE)) { | ||
341 | const struct cpl_sge_egr_update *p = (void *)rsp; | ||
342 | unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); | ||
343 | struct sge_txq *txq = q->adap->sge.egr_map[qid]; | ||
344 | |||
345 | txq->restarts++; | ||
346 | if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) { | ||
347 | struct sge_eth_txq *eq; | ||
348 | |||
349 | eq = container_of(txq, struct sge_eth_txq, q); | ||
350 | netif_tx_wake_queue(eq->txq); | ||
351 | } else { | ||
352 | struct sge_ofld_txq *oq; | ||
353 | |||
354 | oq = container_of(txq, struct sge_ofld_txq, q); | ||
355 | tasklet_schedule(&oq->qresume_tsk); | ||
356 | } | ||
357 | } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { | ||
358 | const struct cpl_fw6_msg *p = (void *)rsp; | ||
359 | |||
360 | if (p->type == 0) | ||
361 | t4_handle_fw_rpl(q->adap, p->data); | ||
362 | } else if (opcode == CPL_L2T_WRITE_RPL) { | ||
363 | const struct cpl_l2t_write_rpl *p = (void *)rsp; | ||
364 | |||
365 | do_l2t_write_rpl(q->adap, p); | ||
366 | } else | ||
367 | dev_err(q->adap->pdev_dev, | ||
368 | "unexpected CPL %#x on FW event queue\n", opcode); | ||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | /** | ||
373 | * uldrx_handler - response queue handler for ULD queues | ||
374 | * @q: the response queue that received the packet | ||
375 | * @rsp: the response queue descriptor holding the offload message | ||
376 | * @gl: the gather list of packet fragments | ||
377 | * | ||
378 | * Deliver an ingress offload packet to a ULD. All processing is done by | ||
379 | * the ULD, we just maintain statistics. | ||
380 | */ | ||
381 | static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, | ||
382 | const struct pkt_gl *gl) | ||
383 | { | ||
384 | struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); | ||
385 | |||
386 | if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { | ||
387 | rxq->stats.nomem++; | ||
388 | return -1; | ||
389 | } | ||
390 | if (gl == NULL) | ||
391 | rxq->stats.imm++; | ||
392 | else if (gl == CXGB4_MSG_AN) | ||
393 | rxq->stats.an++; | ||
394 | else | ||
395 | rxq->stats.pkts++; | ||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | static void disable_msi(struct adapter *adapter) | ||
400 | { | ||
401 | if (adapter->flags & USING_MSIX) { | ||
402 | pci_disable_msix(adapter->pdev); | ||
403 | adapter->flags &= ~USING_MSIX; | ||
404 | } else if (adapter->flags & USING_MSI) { | ||
405 | pci_disable_msi(adapter->pdev); | ||
406 | adapter->flags &= ~USING_MSI; | ||
407 | } | ||
408 | } | ||
409 | |||
410 | /* | ||
411 | * Interrupt handler for non-data events used with MSI-X. | ||
412 | */ | ||
413 | static irqreturn_t t4_nondata_intr(int irq, void *cookie) | ||
414 | { | ||
415 | struct adapter *adap = cookie; | ||
416 | |||
417 | u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE)); | ||
418 | if (v & PFSW) { | ||
419 | adap->swintr = 1; | ||
420 | t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v); | ||
421 | } | ||
422 | t4_slow_intr_handler(adap); | ||
423 | return IRQ_HANDLED; | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * Name the MSI-X interrupts. | ||
428 | */ | ||
429 | static void name_msix_vecs(struct adapter *adap) | ||
430 | { | ||
431 | int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1; | ||
432 | |||
433 | /* non-data interrupts */ | ||
434 | snprintf(adap->msix_info[0].desc, n, "%s", adap->name); | ||
435 | adap->msix_info[0].desc[n] = 0; | ||
436 | |||
437 | /* FW events */ | ||
438 | snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name); | ||
439 | adap->msix_info[1].desc[n] = 0; | ||
440 | |||
441 | /* Ethernet queues */ | ||
442 | for_each_port(adap, j) { | ||
443 | struct net_device *d = adap->port[j]; | ||
444 | const struct port_info *pi = netdev_priv(d); | ||
445 | |||
446 | for (i = 0; i < pi->nqsets; i++, msi_idx++) { | ||
447 | snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", | ||
448 | d->name, i); | ||
449 | adap->msix_info[msi_idx].desc[n] = 0; | ||
450 | } | ||
451 | } | ||
452 | |||
453 | /* offload queues */ | ||
454 | for_each_ofldrxq(&adap->sge, i) { | ||
455 | snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d", | ||
456 | adap->name, i); | ||
457 | adap->msix_info[msi_idx++].desc[n] = 0; | ||
458 | } | ||
459 | for_each_rdmarxq(&adap->sge, i) { | ||
460 | snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d", | ||
461 | adap->name, i); | ||
462 | adap->msix_info[msi_idx++].desc[n] = 0; | ||
463 | } | ||
464 | } | ||
465 | |||
466 | static int request_msix_queue_irqs(struct adapter *adap) | ||
467 | { | ||
468 | struct sge *s = &adap->sge; | ||
469 | int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2; | ||
470 | |||
471 | err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, | ||
472 | adap->msix_info[1].desc, &s->fw_evtq); | ||
473 | if (err) | ||
474 | return err; | ||
475 | |||
476 | for_each_ethrxq(s, ethqidx) { | ||
477 | err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, | ||
478 | adap->msix_info[msi].desc, | ||
479 | &s->ethrxq[ethqidx].rspq); | ||
480 | if (err) | ||
481 | goto unwind; | ||
482 | msi++; | ||
483 | } | ||
484 | for_each_ofldrxq(s, ofldqidx) { | ||
485 | err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, | ||
486 | adap->msix_info[msi].desc, | ||
487 | &s->ofldrxq[ofldqidx].rspq); | ||
488 | if (err) | ||
489 | goto unwind; | ||
490 | msi++; | ||
491 | } | ||
492 | for_each_rdmarxq(s, rdmaqidx) { | ||
493 | err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, | ||
494 | adap->msix_info[msi].desc, | ||
495 | &s->rdmarxq[rdmaqidx].rspq); | ||
496 | if (err) | ||
497 | goto unwind; | ||
498 | msi++; | ||
499 | } | ||
500 | return 0; | ||
501 | |||
502 | unwind: | ||
503 | while (--rdmaqidx >= 0) | ||
504 | free_irq(adap->msix_info[--msi].vec, | ||
505 | &s->rdmarxq[rdmaqidx].rspq); | ||
506 | while (--ofldqidx >= 0) | ||
507 | free_irq(adap->msix_info[--msi].vec, | ||
508 | &s->ofldrxq[ofldqidx].rspq); | ||
509 | while (--ethqidx >= 0) | ||
510 | free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq); | ||
511 | free_irq(adap->msix_info[1].vec, &s->fw_evtq); | ||
512 | return err; | ||
513 | } | ||
514 | |||
515 | static void free_msix_queue_irqs(struct adapter *adap) | ||
516 | { | ||
517 | int i, msi = 2; | ||
518 | struct sge *s = &adap->sge; | ||
519 | |||
520 | free_irq(adap->msix_info[1].vec, &s->fw_evtq); | ||
521 | for_each_ethrxq(s, i) | ||
522 | free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq); | ||
523 | for_each_ofldrxq(s, i) | ||
524 | free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq); | ||
525 | for_each_rdmarxq(s, i) | ||
526 | free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); | ||
527 | } | ||
528 | |||
529 | /** | ||
530 | * setup_rss - configure RSS | ||
531 | * @adap: the adapter | ||
532 | * | ||
533 | * Sets up RSS to distribute packets to multiple receive queues. We | ||
534 | * configure the RSS CPU lookup table to distribute to the number of HW | ||
535 | * receive queues, and the response queue lookup table to narrow that | ||
536 | * down to the response queues actually configured for each port. | ||
537 | * We always configure the RSS mapping for all ports since the mapping | ||
538 | * table has plenty of entries. | ||
539 | */ | ||
540 | static int setup_rss(struct adapter *adap) | ||
541 | { | ||
542 | int i, j, err; | ||
543 | u16 rss[MAX_ETH_QSETS]; | ||
544 | |||
545 | for_each_port(adap, i) { | ||
546 | const struct port_info *pi = adap2pinfo(adap, i); | ||
547 | const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; | ||
548 | |||
549 | for (j = 0; j < pi->nqsets; j++) | ||
550 | rss[j] = q[j].rspq.abs_id; | ||
551 | |||
552 | err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size, | ||
553 | rss, pi->nqsets); | ||
554 | if (err) | ||
555 | return err; | ||
556 | } | ||
557 | return 0; | ||
558 | } | ||
559 | |||
560 | /* | ||
561 | * Wait until all NAPI handlers are descheduled. | ||
562 | */ | ||
563 | static void quiesce_rx(struct adapter *adap) | ||
564 | { | ||
565 | int i; | ||
566 | |||
567 | for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { | ||
568 | struct sge_rspq *q = adap->sge.ingr_map[i]; | ||
569 | |||
570 | if (q && q->handler) | ||
571 | napi_disable(&q->napi); | ||
572 | } | ||
573 | } | ||
574 | |||
575 | /* | ||
576 | * Enable NAPI scheduling and interrupt generation for all Rx queues. | ||
577 | */ | ||
578 | static void enable_rx(struct adapter *adap) | ||
579 | { | ||
580 | int i; | ||
581 | |||
582 | for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { | ||
583 | struct sge_rspq *q = adap->sge.ingr_map[i]; | ||
584 | |||
585 | if (!q) | ||
586 | continue; | ||
587 | if (q->handler) | ||
588 | napi_enable(&q->napi); | ||
589 | /* 0-increment GTS to start the timer and enable interrupts */ | ||
590 | t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), | ||
591 | SEINTARM(q->intr_params) | | ||
592 | INGRESSQID(q->cntxt_id)); | ||
593 | } | ||
594 | } | ||
595 | |||
596 | /** | ||
597 | * setup_sge_queues - configure SGE Tx/Rx/response queues | ||
598 | * @adap: the adapter | ||
599 | * | ||
600 | * Determines how many sets of SGE queues to use and initializes them. | ||
601 | * We support multiple queue sets per port if we have MSI-X, otherwise | ||
602 | * just one queue set per port. | ||
603 | */ | ||
604 | static int setup_sge_queues(struct adapter *adap) | ||
605 | { | ||
606 | int err, msi_idx, i, j; | ||
607 | struct sge *s = &adap->sge; | ||
608 | |||
609 | bitmap_zero(s->starving_fl, MAX_EGRQ); | ||
610 | bitmap_zero(s->txq_maperr, MAX_EGRQ); | ||
611 | |||
612 | if (adap->flags & USING_MSIX) | ||
613 | msi_idx = 1; /* vector 0 is for non-queue interrupts */ | ||
614 | else { | ||
615 | err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, | ||
616 | NULL, NULL); | ||
617 | if (err) | ||
618 | return err; | ||
619 | msi_idx = -((int)s->intrq.abs_id + 1); | ||
620 | } | ||
621 | |||
622 | err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], | ||
623 | msi_idx, NULL, fwevtq_handler); | ||
624 | if (err) { | ||
625 | freeout: t4_free_sge_resources(adap); | ||
626 | return err; | ||
627 | } | ||
628 | |||
629 | for_each_port(adap, i) { | ||
630 | struct net_device *dev = adap->port[i]; | ||
631 | struct port_info *pi = netdev_priv(dev); | ||
632 | struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; | ||
633 | struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; | ||
634 | |||
635 | for (j = 0; j < pi->nqsets; j++, q++) { | ||
636 | if (msi_idx > 0) | ||
637 | msi_idx++; | ||
638 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, | ||
639 | msi_idx, &q->fl, | ||
640 | t4_ethrx_handler); | ||
641 | if (err) | ||
642 | goto freeout; | ||
643 | q->rspq.idx = j; | ||
644 | memset(&q->stats, 0, sizeof(q->stats)); | ||
645 | } | ||
646 | for (j = 0; j < pi->nqsets; j++, t++) { | ||
647 | err = t4_sge_alloc_eth_txq(adap, t, dev, | ||
648 | netdev_get_tx_queue(dev, j), | ||
649 | s->fw_evtq.cntxt_id); | ||
650 | if (err) | ||
651 | goto freeout; | ||
652 | } | ||
653 | } | ||
654 | |||
655 | j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ | ||
656 | for_each_ofldrxq(s, i) { | ||
657 | struct sge_ofld_rxq *q = &s->ofldrxq[i]; | ||
658 | struct net_device *dev = adap->port[i / j]; | ||
659 | |||
660 | if (msi_idx > 0) | ||
661 | msi_idx++; | ||
662 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, | ||
663 | &q->fl, uldrx_handler); | ||
664 | if (err) | ||
665 | goto freeout; | ||
666 | memset(&q->stats, 0, sizeof(q->stats)); | ||
667 | s->ofld_rxq[i] = q->rspq.abs_id; | ||
668 | err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev, | ||
669 | s->fw_evtq.cntxt_id); | ||
670 | if (err) | ||
671 | goto freeout; | ||
672 | } | ||
673 | |||
674 | for_each_rdmarxq(s, i) { | ||
675 | struct sge_ofld_rxq *q = &s->rdmarxq[i]; | ||
676 | |||
677 | if (msi_idx > 0) | ||
678 | msi_idx++; | ||
679 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], | ||
680 | msi_idx, &q->fl, uldrx_handler); | ||
681 | if (err) | ||
682 | goto freeout; | ||
683 | memset(&q->stats, 0, sizeof(q->stats)); | ||
684 | s->rdma_rxq[i] = q->rspq.abs_id; | ||
685 | } | ||
686 | |||
687 | for_each_port(adap, i) { | ||
688 | /* | ||
689 | * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't | ||
690 | * have RDMA queues, and that's the right value. | ||
691 | */ | ||
692 | err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], | ||
693 | s->fw_evtq.cntxt_id, | ||
694 | s->rdmarxq[i].rspq.cntxt_id); | ||
695 | if (err) | ||
696 | goto freeout; | ||
697 | } | ||
698 | |||
699 | t4_write_reg(adap, MPS_TRC_RSS_CONTROL, | ||
700 | RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | | ||
701 | QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); | ||
702 | return 0; | ||
703 | } | ||
704 | |||
705 | /* | ||
706 | * Returns 0 if new FW was successfully loaded, a positive errno if a load was | ||
707 | * started but failed, and a negative errno if flash load couldn't start. | ||
708 | */ | ||
709 | static int upgrade_fw(struct adapter *adap) | ||
710 | { | ||
711 | int ret; | ||
712 | u32 vers; | ||
713 | const struct fw_hdr *hdr; | ||
714 | const struct firmware *fw; | ||
715 | struct device *dev = adap->pdev_dev; | ||
716 | |||
717 | ret = request_firmware(&fw, FW_FNAME, dev); | ||
718 | if (ret < 0) { | ||
719 | dev_err(dev, "unable to load firmware image " FW_FNAME | ||
720 | ", error %d\n", ret); | ||
721 | return ret; | ||
722 | } | ||
723 | |||
724 | hdr = (const struct fw_hdr *)fw->data; | ||
725 | vers = ntohl(hdr->fw_ver); | ||
726 | if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) { | ||
727 | ret = -EINVAL; /* wrong major version, won't do */ | ||
728 | goto out; | ||
729 | } | ||
730 | |||
731 | /* | ||
732 | * If the flash FW is unusable or we found something newer, load it. | ||
733 | */ | ||
734 | if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || | ||
735 | vers > adap->params.fw_vers) { | ||
736 | ret = -t4_load_fw(adap, fw->data, fw->size); | ||
737 | if (!ret) | ||
738 | dev_info(dev, "firmware upgraded to version %pI4 from " | ||
739 | FW_FNAME "\n", &hdr->fw_ver); | ||
740 | } | ||
741 | out: release_firmware(fw); | ||
742 | return ret; | ||
743 | } | ||
744 | |||
745 | /* | ||
746 | * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. | ||
747 | * The allocated memory is cleared. | ||
748 | */ | ||
749 | void *t4_alloc_mem(size_t size) | ||
750 | { | ||
751 | void *p = kmalloc(size, GFP_KERNEL); | ||
752 | |||
753 | if (!p) | ||
754 | p = vmalloc(size); | ||
755 | if (p) | ||
756 | memset(p, 0, size); | ||
757 | return p; | ||
758 | } | ||
759 | |||
760 | /* | ||
761 | * Free memory allocated through alloc_mem(). | ||
762 | */ | ||
763 | void t4_free_mem(void *addr) | ||
764 | { | ||
765 | if (is_vmalloc_addr(addr)) | ||
766 | vfree(addr); | ||
767 | else | ||
768 | kfree(addr); | ||
769 | } | ||
770 | |||
771 | static inline int is_offload(const struct adapter *adap) | ||
772 | { | ||
773 | return adap->params.offload; | ||
774 | } | ||
775 | |||
776 | /* | ||
777 | * Implementation of ethtool operations. | ||
778 | */ | ||
779 | |||
780 | static u32 get_msglevel(struct net_device *dev) | ||
781 | { | ||
782 | return netdev2adap(dev)->msg_enable; | ||
783 | } | ||
784 | |||
785 | static void set_msglevel(struct net_device *dev, u32 val) | ||
786 | { | ||
787 | netdev2adap(dev)->msg_enable = val; | ||
788 | } | ||
789 | |||
790 | static char stats_strings[][ETH_GSTRING_LEN] = { | ||
791 | "TxOctetsOK ", | ||
792 | "TxFramesOK ", | ||
793 | "TxBroadcastFrames ", | ||
794 | "TxMulticastFrames ", | ||
795 | "TxUnicastFrames ", | ||
796 | "TxErrorFrames ", | ||
797 | |||
798 | "TxFrames64 ", | ||
799 | "TxFrames65To127 ", | ||
800 | "TxFrames128To255 ", | ||
801 | "TxFrames256To511 ", | ||
802 | "TxFrames512To1023 ", | ||
803 | "TxFrames1024To1518 ", | ||
804 | "TxFrames1519ToMax ", | ||
805 | |||
806 | "TxFramesDropped ", | ||
807 | "TxPauseFrames ", | ||
808 | "TxPPP0Frames ", | ||
809 | "TxPPP1Frames ", | ||
810 | "TxPPP2Frames ", | ||
811 | "TxPPP3Frames ", | ||
812 | "TxPPP4Frames ", | ||
813 | "TxPPP5Frames ", | ||
814 | "TxPPP6Frames ", | ||
815 | "TxPPP7Frames ", | ||
816 | |||
817 | "RxOctetsOK ", | ||
818 | "RxFramesOK ", | ||
819 | "RxBroadcastFrames ", | ||
820 | "RxMulticastFrames ", | ||
821 | "RxUnicastFrames ", | ||
822 | |||
823 | "RxFramesTooLong ", | ||
824 | "RxJabberErrors ", | ||
825 | "RxFCSErrors ", | ||
826 | "RxLengthErrors ", | ||
827 | "RxSymbolErrors ", | ||
828 | "RxRuntFrames ", | ||
829 | |||
830 | "RxFrames64 ", | ||
831 | "RxFrames65To127 ", | ||
832 | "RxFrames128To255 ", | ||
833 | "RxFrames256To511 ", | ||
834 | "RxFrames512To1023 ", | ||
835 | "RxFrames1024To1518 ", | ||
836 | "RxFrames1519ToMax ", | ||
837 | |||
838 | "RxPauseFrames ", | ||
839 | "RxPPP0Frames ", | ||
840 | "RxPPP1Frames ", | ||
841 | "RxPPP2Frames ", | ||
842 | "RxPPP3Frames ", | ||
843 | "RxPPP4Frames ", | ||
844 | "RxPPP5Frames ", | ||
845 | "RxPPP6Frames ", | ||
846 | "RxPPP7Frames ", | ||
847 | |||
848 | "RxBG0FramesDropped ", | ||
849 | "RxBG1FramesDropped ", | ||
850 | "RxBG2FramesDropped ", | ||
851 | "RxBG3FramesDropped ", | ||
852 | "RxBG0FramesTrunc ", | ||
853 | "RxBG1FramesTrunc ", | ||
854 | "RxBG2FramesTrunc ", | ||
855 | "RxBG3FramesTrunc ", | ||
856 | |||
857 | "TSO ", | ||
858 | "TxCsumOffload ", | ||
859 | "RxCsumGood ", | ||
860 | "VLANextractions ", | ||
861 | "VLANinsertions ", | ||
862 | }; | ||
863 | |||
864 | static int get_sset_count(struct net_device *dev, int sset) | ||
865 | { | ||
866 | switch (sset) { | ||
867 | case ETH_SS_STATS: | ||
868 | return ARRAY_SIZE(stats_strings); | ||
869 | default: | ||
870 | return -EOPNOTSUPP; | ||
871 | } | ||
872 | } | ||
873 | |||
874 | #define T4_REGMAP_SIZE (160 * 1024) | ||
875 | |||
876 | static int get_regs_len(struct net_device *dev) | ||
877 | { | ||
878 | return T4_REGMAP_SIZE; | ||
879 | } | ||
880 | |||
881 | static int get_eeprom_len(struct net_device *dev) | ||
882 | { | ||
883 | return EEPROMSIZE; | ||
884 | } | ||
885 | |||
886 | static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
887 | { | ||
888 | struct adapter *adapter = netdev2adap(dev); | ||
889 | |||
890 | strcpy(info->driver, KBUILD_MODNAME); | ||
891 | strcpy(info->version, DRV_VERSION); | ||
892 | strcpy(info->bus_info, pci_name(adapter->pdev)); | ||
893 | |||
894 | if (!adapter->params.fw_vers) | ||
895 | strcpy(info->fw_version, "N/A"); | ||
896 | else | ||
897 | snprintf(info->fw_version, sizeof(info->fw_version), | ||
898 | "%u.%u.%u.%u, TP %u.%u.%u.%u", | ||
899 | FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), | ||
900 | FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers), | ||
901 | FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers), | ||
902 | FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers), | ||
903 | FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers), | ||
904 | FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers), | ||
905 | FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers), | ||
906 | FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers)); | ||
907 | } | ||
908 | |||
909 | static void get_strings(struct net_device *dev, u32 stringset, u8 *data) | ||
910 | { | ||
911 | if (stringset == ETH_SS_STATS) | ||
912 | memcpy(data, stats_strings, sizeof(stats_strings)); | ||
913 | } | ||
914 | |||
915 | /* | ||
916 | * port stats maintained per queue of the port. They should be in the same | ||
917 | * order as in stats_strings above. | ||
918 | */ | ||
919 | struct queue_port_stats { | ||
920 | u64 tso; | ||
921 | u64 tx_csum; | ||
922 | u64 rx_csum; | ||
923 | u64 vlan_ex; | ||
924 | u64 vlan_ins; | ||
925 | }; | ||
926 | |||
927 | static void collect_sge_port_stats(const struct adapter *adap, | ||
928 | const struct port_info *p, struct queue_port_stats *s) | ||
929 | { | ||
930 | int i; | ||
931 | const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; | ||
932 | const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; | ||
933 | |||
934 | memset(s, 0, sizeof(*s)); | ||
935 | for (i = 0; i < p->nqsets; i++, rx++, tx++) { | ||
936 | s->tso += tx->tso; | ||
937 | s->tx_csum += tx->tx_cso; | ||
938 | s->rx_csum += rx->stats.rx_cso; | ||
939 | s->vlan_ex += rx->stats.vlan_ex; | ||
940 | s->vlan_ins += tx->vlan_ins; | ||
941 | } | ||
942 | } | ||
943 | |||
944 | static void get_stats(struct net_device *dev, struct ethtool_stats *stats, | ||
945 | u64 *data) | ||
946 | { | ||
947 | struct port_info *pi = netdev_priv(dev); | ||
948 | struct adapter *adapter = pi->adapter; | ||
949 | |||
950 | t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); | ||
951 | |||
952 | data += sizeof(struct port_stats) / sizeof(u64); | ||
953 | collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); | ||
954 | } | ||
955 | |||
956 | /* | ||
957 | * Return a version number to identify the type of adapter. The scheme is: | ||
958 | * - bits 0..9: chip version | ||
959 | * - bits 10..15: chip revision | ||
960 | */ | ||
961 | static inline unsigned int mk_adap_vers(const struct adapter *ap) | ||
962 | { | ||
963 | return 4 | (ap->params.rev << 10); | ||
964 | } | ||
965 | |||
966 | static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, | ||
967 | unsigned int end) | ||
968 | { | ||
969 | u32 *p = buf + start; | ||
970 | |||
971 | for ( ; start <= end; start += sizeof(u32)) | ||
972 | *p++ = t4_read_reg(ap, start); | ||
973 | } | ||
974 | |||
975 | static void get_regs(struct net_device *dev, struct ethtool_regs *regs, | ||
976 | void *buf) | ||
977 | { | ||
978 | static const unsigned int reg_ranges[] = { | ||
979 | 0x1008, 0x1108, | ||
980 | 0x1180, 0x11b4, | ||
981 | 0x11fc, 0x123c, | ||
982 | 0x1300, 0x173c, | ||
983 | 0x1800, 0x18fc, | ||
984 | 0x3000, 0x30d8, | ||
985 | 0x30e0, 0x5924, | ||
986 | 0x5960, 0x59d4, | ||
987 | 0x5a00, 0x5af8, | ||
988 | 0x6000, 0x6098, | ||
989 | 0x6100, 0x6150, | ||
990 | 0x6200, 0x6208, | ||
991 | 0x6240, 0x6248, | ||
992 | 0x6280, 0x6338, | ||
993 | 0x6370, 0x638c, | ||
994 | 0x6400, 0x643c, | ||
995 | 0x6500, 0x6524, | ||
996 | 0x6a00, 0x6a38, | ||
997 | 0x6a60, 0x6a78, | ||
998 | 0x6b00, 0x6b84, | ||
999 | 0x6bf0, 0x6c84, | ||
1000 | 0x6cf0, 0x6d84, | ||
1001 | 0x6df0, 0x6e84, | ||
1002 | 0x6ef0, 0x6f84, | ||
1003 | 0x6ff0, 0x7084, | ||
1004 | 0x70f0, 0x7184, | ||
1005 | 0x71f0, 0x7284, | ||
1006 | 0x72f0, 0x7384, | ||
1007 | 0x73f0, 0x7450, | ||
1008 | 0x7500, 0x7530, | ||
1009 | 0x7600, 0x761c, | ||
1010 | 0x7680, 0x76cc, | ||
1011 | 0x7700, 0x7798, | ||
1012 | 0x77c0, 0x77fc, | ||
1013 | 0x7900, 0x79fc, | ||
1014 | 0x7b00, 0x7c38, | ||
1015 | 0x7d00, 0x7efc, | ||
1016 | 0x8dc0, 0x8e1c, | ||
1017 | 0x8e30, 0x8e78, | ||
1018 | 0x8ea0, 0x8f6c, | ||
1019 | 0x8fc0, 0x9074, | ||
1020 | 0x90fc, 0x90fc, | ||
1021 | 0x9400, 0x9458, | ||
1022 | 0x9600, 0x96bc, | ||
1023 | 0x9800, 0x9808, | ||
1024 | 0x9820, 0x983c, | ||
1025 | 0x9850, 0x9864, | ||
1026 | 0x9c00, 0x9c6c, | ||
1027 | 0x9c80, 0x9cec, | ||
1028 | 0x9d00, 0x9d6c, | ||
1029 | 0x9d80, 0x9dec, | ||
1030 | 0x9e00, 0x9e6c, | ||
1031 | 0x9e80, 0x9eec, | ||
1032 | 0x9f00, 0x9f6c, | ||
1033 | 0x9f80, 0x9fec, | ||
1034 | 0xd004, 0xd03c, | ||
1035 | 0xdfc0, 0xdfe0, | ||
1036 | 0xe000, 0xea7c, | ||
1037 | 0xf000, 0x11190, | ||
1038 | 0x19040, 0x19124, | ||
1039 | 0x19150, 0x191b0, | ||
1040 | 0x191d0, 0x191e8, | ||
1041 | 0x19238, 0x1924c, | ||
1042 | 0x193f8, 0x19474, | ||
1043 | 0x19490, 0x194f8, | ||
1044 | 0x19800, 0x19f30, | ||
1045 | 0x1a000, 0x1a06c, | ||
1046 | 0x1a0b0, 0x1a120, | ||
1047 | 0x1a128, 0x1a138, | ||
1048 | 0x1a190, 0x1a1c4, | ||
1049 | 0x1a1fc, 0x1a1fc, | ||
1050 | 0x1e040, 0x1e04c, | ||
1051 | 0x1e240, 0x1e28c, | ||
1052 | 0x1e2c0, 0x1e2c0, | ||
1053 | 0x1e2e0, 0x1e2e0, | ||
1054 | 0x1e300, 0x1e384, | ||
1055 | 0x1e3c0, 0x1e3c8, | ||
1056 | 0x1e440, 0x1e44c, | ||
1057 | 0x1e640, 0x1e68c, | ||
1058 | 0x1e6c0, 0x1e6c0, | ||
1059 | 0x1e6e0, 0x1e6e0, | ||
1060 | 0x1e700, 0x1e784, | ||
1061 | 0x1e7c0, 0x1e7c8, | ||
1062 | 0x1e840, 0x1e84c, | ||
1063 | 0x1ea40, 0x1ea8c, | ||
1064 | 0x1eac0, 0x1eac0, | ||
1065 | 0x1eae0, 0x1eae0, | ||
1066 | 0x1eb00, 0x1eb84, | ||
1067 | 0x1ebc0, 0x1ebc8, | ||
1068 | 0x1ec40, 0x1ec4c, | ||
1069 | 0x1ee40, 0x1ee8c, | ||
1070 | 0x1eec0, 0x1eec0, | ||
1071 | 0x1eee0, 0x1eee0, | ||
1072 | 0x1ef00, 0x1ef84, | ||
1073 | 0x1efc0, 0x1efc8, | ||
1074 | 0x1f040, 0x1f04c, | ||
1075 | 0x1f240, 0x1f28c, | ||
1076 | 0x1f2c0, 0x1f2c0, | ||
1077 | 0x1f2e0, 0x1f2e0, | ||
1078 | 0x1f300, 0x1f384, | ||
1079 | 0x1f3c0, 0x1f3c8, | ||
1080 | 0x1f440, 0x1f44c, | ||
1081 | 0x1f640, 0x1f68c, | ||
1082 | 0x1f6c0, 0x1f6c0, | ||
1083 | 0x1f6e0, 0x1f6e0, | ||
1084 | 0x1f700, 0x1f784, | ||
1085 | 0x1f7c0, 0x1f7c8, | ||
1086 | 0x1f840, 0x1f84c, | ||
1087 | 0x1fa40, 0x1fa8c, | ||
1088 | 0x1fac0, 0x1fac0, | ||
1089 | 0x1fae0, 0x1fae0, | ||
1090 | 0x1fb00, 0x1fb84, | ||
1091 | 0x1fbc0, 0x1fbc8, | ||
1092 | 0x1fc40, 0x1fc4c, | ||
1093 | 0x1fe40, 0x1fe8c, | ||
1094 | 0x1fec0, 0x1fec0, | ||
1095 | 0x1fee0, 0x1fee0, | ||
1096 | 0x1ff00, 0x1ff84, | ||
1097 | 0x1ffc0, 0x1ffc8, | ||
1098 | 0x20000, 0x2002c, | ||
1099 | 0x20100, 0x2013c, | ||
1100 | 0x20190, 0x201c8, | ||
1101 | 0x20200, 0x20318, | ||
1102 | 0x20400, 0x20528, | ||
1103 | 0x20540, 0x20614, | ||
1104 | 0x21000, 0x21040, | ||
1105 | 0x2104c, 0x21060, | ||
1106 | 0x210c0, 0x210ec, | ||
1107 | 0x21200, 0x21268, | ||
1108 | 0x21270, 0x21284, | ||
1109 | 0x212fc, 0x21388, | ||
1110 | 0x21400, 0x21404, | ||
1111 | 0x21500, 0x21518, | ||
1112 | 0x2152c, 0x2153c, | ||
1113 | 0x21550, 0x21554, | ||
1114 | 0x21600, 0x21600, | ||
1115 | 0x21608, 0x21628, | ||
1116 | 0x21630, 0x2163c, | ||
1117 | 0x21700, 0x2171c, | ||
1118 | 0x21780, 0x2178c, | ||
1119 | 0x21800, 0x21c38, | ||
1120 | 0x21c80, 0x21d7c, | ||
1121 | 0x21e00, 0x21e04, | ||
1122 | 0x22000, 0x2202c, | ||
1123 | 0x22100, 0x2213c, | ||
1124 | 0x22190, 0x221c8, | ||
1125 | 0x22200, 0x22318, | ||
1126 | 0x22400, 0x22528, | ||
1127 | 0x22540, 0x22614, | ||
1128 | 0x23000, 0x23040, | ||
1129 | 0x2304c, 0x23060, | ||
1130 | 0x230c0, 0x230ec, | ||
1131 | 0x23200, 0x23268, | ||
1132 | 0x23270, 0x23284, | ||
1133 | 0x232fc, 0x23388, | ||
1134 | 0x23400, 0x23404, | ||
1135 | 0x23500, 0x23518, | ||
1136 | 0x2352c, 0x2353c, | ||
1137 | 0x23550, 0x23554, | ||
1138 | 0x23600, 0x23600, | ||
1139 | 0x23608, 0x23628, | ||
1140 | 0x23630, 0x2363c, | ||
1141 | 0x23700, 0x2371c, | ||
1142 | 0x23780, 0x2378c, | ||
1143 | 0x23800, 0x23c38, | ||
1144 | 0x23c80, 0x23d7c, | ||
1145 | 0x23e00, 0x23e04, | ||
1146 | 0x24000, 0x2402c, | ||
1147 | 0x24100, 0x2413c, | ||
1148 | 0x24190, 0x241c8, | ||
1149 | 0x24200, 0x24318, | ||
1150 | 0x24400, 0x24528, | ||
1151 | 0x24540, 0x24614, | ||
1152 | 0x25000, 0x25040, | ||
1153 | 0x2504c, 0x25060, | ||
1154 | 0x250c0, 0x250ec, | ||
1155 | 0x25200, 0x25268, | ||
1156 | 0x25270, 0x25284, | ||
1157 | 0x252fc, 0x25388, | ||
1158 | 0x25400, 0x25404, | ||
1159 | 0x25500, 0x25518, | ||
1160 | 0x2552c, 0x2553c, | ||
1161 | 0x25550, 0x25554, | ||
1162 | 0x25600, 0x25600, | ||
1163 | 0x25608, 0x25628, | ||
1164 | 0x25630, 0x2563c, | ||
1165 | 0x25700, 0x2571c, | ||
1166 | 0x25780, 0x2578c, | ||
1167 | 0x25800, 0x25c38, | ||
1168 | 0x25c80, 0x25d7c, | ||
1169 | 0x25e00, 0x25e04, | ||
1170 | 0x26000, 0x2602c, | ||
1171 | 0x26100, 0x2613c, | ||
1172 | 0x26190, 0x261c8, | ||
1173 | 0x26200, 0x26318, | ||
1174 | 0x26400, 0x26528, | ||
1175 | 0x26540, 0x26614, | ||
1176 | 0x27000, 0x27040, | ||
1177 | 0x2704c, 0x27060, | ||
1178 | 0x270c0, 0x270ec, | ||
1179 | 0x27200, 0x27268, | ||
1180 | 0x27270, 0x27284, | ||
1181 | 0x272fc, 0x27388, | ||
1182 | 0x27400, 0x27404, | ||
1183 | 0x27500, 0x27518, | ||
1184 | 0x2752c, 0x2753c, | ||
1185 | 0x27550, 0x27554, | ||
1186 | 0x27600, 0x27600, | ||
1187 | 0x27608, 0x27628, | ||
1188 | 0x27630, 0x2763c, | ||
1189 | 0x27700, 0x2771c, | ||
1190 | 0x27780, 0x2778c, | ||
1191 | 0x27800, 0x27c38, | ||
1192 | 0x27c80, 0x27d7c, | ||
1193 | 0x27e00, 0x27e04 | ||
1194 | }; | ||
1195 | |||
1196 | int i; | ||
1197 | struct adapter *ap = netdev2adap(dev); | ||
1198 | |||
1199 | regs->version = mk_adap_vers(ap); | ||
1200 | |||
1201 | memset(buf, 0, T4_REGMAP_SIZE); | ||
1202 | for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) | ||
1203 | reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); | ||
1204 | } | ||
1205 | |||
1206 | static int restart_autoneg(struct net_device *dev) | ||
1207 | { | ||
1208 | struct port_info *p = netdev_priv(dev); | ||
1209 | |||
1210 | if (!netif_running(dev)) | ||
1211 | return -EAGAIN; | ||
1212 | if (p->link_cfg.autoneg != AUTONEG_ENABLE) | ||
1213 | return -EINVAL; | ||
1214 | t4_restart_aneg(p->adapter, 0, p->tx_chan); | ||
1215 | return 0; | ||
1216 | } | ||
1217 | |||
1218 | static int identify_port(struct net_device *dev, u32 data) | ||
1219 | { | ||
1220 | if (data == 0) | ||
1221 | data = 2; /* default to 2 seconds */ | ||
1222 | |||
1223 | return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid, | ||
1224 | data * 5); | ||
1225 | } | ||
1226 | |||
1227 | static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) | ||
1228 | { | ||
1229 | unsigned int v = 0; | ||
1230 | |||
1231 | if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) { | ||
1232 | v |= SUPPORTED_TP; | ||
1233 | if (caps & FW_PORT_CAP_SPEED_100M) | ||
1234 | v |= SUPPORTED_100baseT_Full; | ||
1235 | if (caps & FW_PORT_CAP_SPEED_1G) | ||
1236 | v |= SUPPORTED_1000baseT_Full; | ||
1237 | if (caps & FW_PORT_CAP_SPEED_10G) | ||
1238 | v |= SUPPORTED_10000baseT_Full; | ||
1239 | } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { | ||
1240 | v |= SUPPORTED_Backplane; | ||
1241 | if (caps & FW_PORT_CAP_SPEED_1G) | ||
1242 | v |= SUPPORTED_1000baseKX_Full; | ||
1243 | if (caps & FW_PORT_CAP_SPEED_10G) | ||
1244 | v |= SUPPORTED_10000baseKX4_Full; | ||
1245 | } else if (type == FW_PORT_TYPE_KR) | ||
1246 | v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; | ||
1247 | else if (type == FW_PORT_TYPE_FIBER) | ||
1248 | v |= SUPPORTED_FIBRE; | ||
1249 | |||
1250 | if (caps & FW_PORT_CAP_ANEG) | ||
1251 | v |= SUPPORTED_Autoneg; | ||
1252 | return v; | ||
1253 | } | ||
1254 | |||
1255 | static unsigned int to_fw_linkcaps(unsigned int caps) | ||
1256 | { | ||
1257 | unsigned int v = 0; | ||
1258 | |||
1259 | if (caps & ADVERTISED_100baseT_Full) | ||
1260 | v |= FW_PORT_CAP_SPEED_100M; | ||
1261 | if (caps & ADVERTISED_1000baseT_Full) | ||
1262 | v |= FW_PORT_CAP_SPEED_1G; | ||
1263 | if (caps & ADVERTISED_10000baseT_Full) | ||
1264 | v |= FW_PORT_CAP_SPEED_10G; | ||
1265 | return v; | ||
1266 | } | ||
1267 | |||
1268 | static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1269 | { | ||
1270 | const struct port_info *p = netdev_priv(dev); | ||
1271 | |||
1272 | if (p->port_type == FW_PORT_TYPE_BT_SGMII || | ||
1273 | p->port_type == FW_PORT_TYPE_BT_XAUI) | ||
1274 | cmd->port = PORT_TP; | ||
1275 | else if (p->port_type == FW_PORT_TYPE_FIBER) | ||
1276 | cmd->port = PORT_FIBRE; | ||
1277 | else if (p->port_type == FW_PORT_TYPE_TWINAX) | ||
1278 | cmd->port = PORT_DA; | ||
1279 | else | ||
1280 | cmd->port = PORT_OTHER; | ||
1281 | |||
1282 | if (p->mdio_addr >= 0) { | ||
1283 | cmd->phy_address = p->mdio_addr; | ||
1284 | cmd->transceiver = XCVR_EXTERNAL; | ||
1285 | cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? | ||
1286 | MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; | ||
1287 | } else { | ||
1288 | cmd->phy_address = 0; /* not really, but no better option */ | ||
1289 | cmd->transceiver = XCVR_INTERNAL; | ||
1290 | cmd->mdio_support = 0; | ||
1291 | } | ||
1292 | |||
1293 | cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); | ||
1294 | cmd->advertising = from_fw_linkcaps(p->port_type, | ||
1295 | p->link_cfg.advertising); | ||
1296 | cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0; | ||
1297 | cmd->duplex = DUPLEX_FULL; | ||
1298 | cmd->autoneg = p->link_cfg.autoneg; | ||
1299 | cmd->maxtxpkt = 0; | ||
1300 | cmd->maxrxpkt = 0; | ||
1301 | return 0; | ||
1302 | } | ||
1303 | |||
1304 | static unsigned int speed_to_caps(int speed) | ||
1305 | { | ||
1306 | if (speed == SPEED_100) | ||
1307 | return FW_PORT_CAP_SPEED_100M; | ||
1308 | if (speed == SPEED_1000) | ||
1309 | return FW_PORT_CAP_SPEED_1G; | ||
1310 | if (speed == SPEED_10000) | ||
1311 | return FW_PORT_CAP_SPEED_10G; | ||
1312 | return 0; | ||
1313 | } | ||
1314 | |||
1315 | static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1316 | { | ||
1317 | unsigned int cap; | ||
1318 | struct port_info *p = netdev_priv(dev); | ||
1319 | struct link_config *lc = &p->link_cfg; | ||
1320 | |||
1321 | if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ | ||
1322 | return -EINVAL; | ||
1323 | |||
1324 | if (!(lc->supported & FW_PORT_CAP_ANEG)) { | ||
1325 | /* | ||
1326 | * PHY offers a single speed. See if that's what's | ||
1327 | * being requested. | ||
1328 | */ | ||
1329 | if (cmd->autoneg == AUTONEG_DISABLE && | ||
1330 | (lc->supported & speed_to_caps(cmd->speed))) | ||
1331 | return 0; | ||
1332 | return -EINVAL; | ||
1333 | } | ||
1334 | |||
1335 | if (cmd->autoneg == AUTONEG_DISABLE) { | ||
1336 | cap = speed_to_caps(cmd->speed); | ||
1337 | |||
1338 | if (!(lc->supported & cap) || cmd->speed == SPEED_1000 || | ||
1339 | cmd->speed == SPEED_10000) | ||
1340 | return -EINVAL; | ||
1341 | lc->requested_speed = cap; | ||
1342 | lc->advertising = 0; | ||
1343 | } else { | ||
1344 | cap = to_fw_linkcaps(cmd->advertising); | ||
1345 | if (!(lc->supported & cap)) | ||
1346 | return -EINVAL; | ||
1347 | lc->requested_speed = 0; | ||
1348 | lc->advertising = cap | FW_PORT_CAP_ANEG; | ||
1349 | } | ||
1350 | lc->autoneg = cmd->autoneg; | ||
1351 | |||
1352 | if (netif_running(dev)) | ||
1353 | return t4_link_start(p->adapter, 0, p->tx_chan, lc); | ||
1354 | return 0; | ||
1355 | } | ||
1356 | |||
1357 | static void get_pauseparam(struct net_device *dev, | ||
1358 | struct ethtool_pauseparam *epause) | ||
1359 | { | ||
1360 | struct port_info *p = netdev_priv(dev); | ||
1361 | |||
1362 | epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; | ||
1363 | epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; | ||
1364 | epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; | ||
1365 | } | ||
1366 | |||
1367 | static int set_pauseparam(struct net_device *dev, | ||
1368 | struct ethtool_pauseparam *epause) | ||
1369 | { | ||
1370 | struct port_info *p = netdev_priv(dev); | ||
1371 | struct link_config *lc = &p->link_cfg; | ||
1372 | |||
1373 | if (epause->autoneg == AUTONEG_DISABLE) | ||
1374 | lc->requested_fc = 0; | ||
1375 | else if (lc->supported & FW_PORT_CAP_ANEG) | ||
1376 | lc->requested_fc = PAUSE_AUTONEG; | ||
1377 | else | ||
1378 | return -EINVAL; | ||
1379 | |||
1380 | if (epause->rx_pause) | ||
1381 | lc->requested_fc |= PAUSE_RX; | ||
1382 | if (epause->tx_pause) | ||
1383 | lc->requested_fc |= PAUSE_TX; | ||
1384 | if (netif_running(dev)) | ||
1385 | return t4_link_start(p->adapter, 0, p->tx_chan, lc); | ||
1386 | return 0; | ||
1387 | } | ||
1388 | |||
1389 | static u32 get_rx_csum(struct net_device *dev) | ||
1390 | { | ||
1391 | struct port_info *p = netdev_priv(dev); | ||
1392 | |||
1393 | return p->rx_offload & RX_CSO; | ||
1394 | } | ||
1395 | |||
1396 | static int set_rx_csum(struct net_device *dev, u32 data) | ||
1397 | { | ||
1398 | struct port_info *p = netdev_priv(dev); | ||
1399 | |||
1400 | if (data) | ||
1401 | p->rx_offload |= RX_CSO; | ||
1402 | else | ||
1403 | p->rx_offload &= ~RX_CSO; | ||
1404 | return 0; | ||
1405 | } | ||
1406 | |||
1407 | static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) | ||
1408 | { | ||
1409 | const struct port_info *pi = netdev_priv(dev); | ||
1410 | const struct sge *s = &pi->adapter->sge; | ||
1411 | |||
1412 | e->rx_max_pending = MAX_RX_BUFFERS; | ||
1413 | e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; | ||
1414 | e->rx_jumbo_max_pending = 0; | ||
1415 | e->tx_max_pending = MAX_TXQ_ENTRIES; | ||
1416 | |||
1417 | e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; | ||
1418 | e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; | ||
1419 | e->rx_jumbo_pending = 0; | ||
1420 | e->tx_pending = s->ethtxq[pi->first_qset].q.size; | ||
1421 | } | ||
1422 | |||
1423 | static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) | ||
1424 | { | ||
1425 | int i; | ||
1426 | const struct port_info *pi = netdev_priv(dev); | ||
1427 | struct adapter *adapter = pi->adapter; | ||
1428 | struct sge *s = &adapter->sge; | ||
1429 | |||
1430 | if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || | ||
1431 | e->tx_pending > MAX_TXQ_ENTRIES || | ||
1432 | e->rx_mini_pending > MAX_RSPQ_ENTRIES || | ||
1433 | e->rx_mini_pending < MIN_RSPQ_ENTRIES || | ||
1434 | e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) | ||
1435 | return -EINVAL; | ||
1436 | |||
1437 | if (adapter->flags & FULL_INIT_DONE) | ||
1438 | return -EBUSY; | ||
1439 | |||
1440 | for (i = 0; i < pi->nqsets; ++i) { | ||
1441 | s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; | ||
1442 | s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; | ||
1443 | s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; | ||
1444 | } | ||
1445 | return 0; | ||
1446 | } | ||
1447 | |||
1448 | static int closest_timer(const struct sge *s, int time) | ||
1449 | { | ||
1450 | int i, delta, match = 0, min_delta = INT_MAX; | ||
1451 | |||
1452 | for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { | ||
1453 | delta = time - s->timer_val[i]; | ||
1454 | if (delta < 0) | ||
1455 | delta = -delta; | ||
1456 | if (delta < min_delta) { | ||
1457 | min_delta = delta; | ||
1458 | match = i; | ||
1459 | } | ||
1460 | } | ||
1461 | return match; | ||
1462 | } | ||
1463 | |||
1464 | static int closest_thres(const struct sge *s, int thres) | ||
1465 | { | ||
1466 | int i, delta, match = 0, min_delta = INT_MAX; | ||
1467 | |||
1468 | for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { | ||
1469 | delta = thres - s->counter_val[i]; | ||
1470 | if (delta < 0) | ||
1471 | delta = -delta; | ||
1472 | if (delta < min_delta) { | ||
1473 | min_delta = delta; | ||
1474 | match = i; | ||
1475 | } | ||
1476 | } | ||
1477 | return match; | ||
1478 | } | ||
1479 | |||
1480 | /* | ||
1481 | * Return a queue's interrupt hold-off time in us. 0 means no timer. | ||
1482 | */ | ||
1483 | static unsigned int qtimer_val(const struct adapter *adap, | ||
1484 | const struct sge_rspq *q) | ||
1485 | { | ||
1486 | unsigned int idx = q->intr_params >> 1; | ||
1487 | |||
1488 | return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; | ||
1489 | } | ||
1490 | |||
1491 | /** | ||
1492 | * set_rxq_intr_params - set a queue's interrupt holdoff parameters | ||
1493 | * @adap: the adapter | ||
1494 | * @q: the Rx queue | ||
1495 | * @us: the hold-off time in us, or 0 to disable timer | ||
1496 | * @cnt: the hold-off packet count, or 0 to disable counter | ||
1497 | * | ||
1498 | * Sets an Rx queue's interrupt hold-off time and packet count. At least | ||
1499 | * one of the two needs to be enabled for the queue to generate interrupts. | ||
1500 | */ | ||
1501 | static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, | ||
1502 | unsigned int us, unsigned int cnt) | ||
1503 | { | ||
1504 | if ((us | cnt) == 0) | ||
1505 | cnt = 1; | ||
1506 | |||
1507 | if (cnt) { | ||
1508 | int err; | ||
1509 | u32 v, new_idx; | ||
1510 | |||
1511 | new_idx = closest_thres(&adap->sge, cnt); | ||
1512 | if (q->desc && q->pktcnt_idx != new_idx) { | ||
1513 | /* the queue has already been created, update it */ | ||
1514 | v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | | ||
1515 | FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | | ||
1516 | FW_PARAMS_PARAM_YZ(q->cntxt_id); | ||
1517 | err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx); | ||
1518 | if (err) | ||
1519 | return err; | ||
1520 | } | ||
1521 | q->pktcnt_idx = new_idx; | ||
1522 | } | ||
1523 | |||
1524 | us = us == 0 ? 6 : closest_timer(&adap->sge, us); | ||
1525 | q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0); | ||
1526 | return 0; | ||
1527 | } | ||
1528 | |||
1529 | static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | ||
1530 | { | ||
1531 | const struct port_info *pi = netdev_priv(dev); | ||
1532 | struct adapter *adap = pi->adapter; | ||
1533 | |||
1534 | return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq, | ||
1535 | c->rx_coalesce_usecs, c->rx_max_coalesced_frames); | ||
1536 | } | ||
1537 | |||
1538 | static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | ||
1539 | { | ||
1540 | const struct port_info *pi = netdev_priv(dev); | ||
1541 | const struct adapter *adap = pi->adapter; | ||
1542 | const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; | ||
1543 | |||
1544 | c->rx_coalesce_usecs = qtimer_val(adap, rq); | ||
1545 | c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? | ||
1546 | adap->sge.counter_val[rq->pktcnt_idx] : 0; | ||
1547 | return 0; | ||
1548 | } | ||
1549 | |||
1550 | /* | ||
1551 | * Translate a physical EEPROM address to virtual. The first 1K is accessed | ||
1552 | * through virtual addresses starting at 31K, the rest is accessed through | ||
1553 | * virtual addresses starting at 0. This mapping is correct only for PF0. | ||
1554 | */ | ||
1555 | static int eeprom_ptov(unsigned int phys_addr) | ||
1556 | { | ||
1557 | if (phys_addr < 1024) | ||
1558 | return phys_addr + (31 << 10); | ||
1559 | if (phys_addr < EEPROMSIZE) | ||
1560 | return phys_addr - 1024; | ||
1561 | return -EINVAL; | ||
1562 | } | ||
1563 | |||
1564 | /* | ||
1565 | * The next two routines implement eeprom read/write from physical addresses. | ||
1566 | * The physical->virtual translation is correct only for PF0. | ||
1567 | */ | ||
1568 | static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) | ||
1569 | { | ||
1570 | int vaddr = eeprom_ptov(phys_addr); | ||
1571 | |||
1572 | if (vaddr >= 0) | ||
1573 | vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); | ||
1574 | return vaddr < 0 ? vaddr : 0; | ||
1575 | } | ||
1576 | |||
1577 | static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) | ||
1578 | { | ||
1579 | int vaddr = eeprom_ptov(phys_addr); | ||
1580 | |||
1581 | if (vaddr >= 0) | ||
1582 | vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); | ||
1583 | return vaddr < 0 ? vaddr : 0; | ||
1584 | } | ||
1585 | |||
1586 | #define EEPROM_MAGIC 0x38E2F10C | ||
1587 | |||
1588 | static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, | ||
1589 | u8 *data) | ||
1590 | { | ||
1591 | int i, err = 0; | ||
1592 | struct adapter *adapter = netdev2adap(dev); | ||
1593 | |||
1594 | u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); | ||
1595 | if (!buf) | ||
1596 | return -ENOMEM; | ||
1597 | |||
1598 | e->magic = EEPROM_MAGIC; | ||
1599 | for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) | ||
1600 | err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); | ||
1601 | |||
1602 | if (!err) | ||
1603 | memcpy(data, buf + e->offset, e->len); | ||
1604 | kfree(buf); | ||
1605 | return err; | ||
1606 | } | ||
1607 | |||
1608 | static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | ||
1609 | u8 *data) | ||
1610 | { | ||
1611 | u8 *buf; | ||
1612 | int err = 0; | ||
1613 | u32 aligned_offset, aligned_len, *p; | ||
1614 | struct adapter *adapter = netdev2adap(dev); | ||
1615 | |||
1616 | if (eeprom->magic != EEPROM_MAGIC) | ||
1617 | return -EINVAL; | ||
1618 | |||
1619 | aligned_offset = eeprom->offset & ~3; | ||
1620 | aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; | ||
1621 | |||
1622 | if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { | ||
1623 | /* | ||
1624 | * RMW possibly needed for first or last words. | ||
1625 | */ | ||
1626 | buf = kmalloc(aligned_len, GFP_KERNEL); | ||
1627 | if (!buf) | ||
1628 | return -ENOMEM; | ||
1629 | err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); | ||
1630 | if (!err && aligned_len > 4) | ||
1631 | err = eeprom_rd_phys(adapter, | ||
1632 | aligned_offset + aligned_len - 4, | ||
1633 | (u32 *)&buf[aligned_len - 4]); | ||
1634 | if (err) | ||
1635 | goto out; | ||
1636 | memcpy(buf + (eeprom->offset & 3), data, eeprom->len); | ||
1637 | } else | ||
1638 | buf = data; | ||
1639 | |||
1640 | err = t4_seeprom_wp(adapter, false); | ||
1641 | if (err) | ||
1642 | goto out; | ||
1643 | |||
1644 | for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { | ||
1645 | err = eeprom_wr_phys(adapter, aligned_offset, *p); | ||
1646 | aligned_offset += 4; | ||
1647 | } | ||
1648 | |||
1649 | if (!err) | ||
1650 | err = t4_seeprom_wp(adapter, true); | ||
1651 | out: | ||
1652 | if (buf != data) | ||
1653 | kfree(buf); | ||
1654 | return err; | ||
1655 | } | ||
1656 | |||
1657 | static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) | ||
1658 | { | ||
1659 | int ret; | ||
1660 | const struct firmware *fw; | ||
1661 | struct adapter *adap = netdev2adap(netdev); | ||
1662 | |||
1663 | ef->data[sizeof(ef->data) - 1] = '\0'; | ||
1664 | ret = request_firmware(&fw, ef->data, adap->pdev_dev); | ||
1665 | if (ret < 0) | ||
1666 | return ret; | ||
1667 | |||
1668 | ret = t4_load_fw(adap, fw->data, fw->size); | ||
1669 | release_firmware(fw); | ||
1670 | if (!ret) | ||
1671 | dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data); | ||
1672 | return ret; | ||
1673 | } | ||
1674 | |||
1675 | #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) | ||
1676 | #define BCAST_CRC 0xa0ccc1a6 | ||
1677 | |||
1678 | static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
1679 | { | ||
1680 | wol->supported = WAKE_BCAST | WAKE_MAGIC; | ||
1681 | wol->wolopts = netdev2adap(dev)->wol; | ||
1682 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | ||
1683 | } | ||
1684 | |||
1685 | static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
1686 | { | ||
1687 | int err = 0; | ||
1688 | struct port_info *pi = netdev_priv(dev); | ||
1689 | |||
1690 | if (wol->wolopts & ~WOL_SUPPORTED) | ||
1691 | return -EINVAL; | ||
1692 | t4_wol_magic_enable(pi->adapter, pi->tx_chan, | ||
1693 | (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); | ||
1694 | if (wol->wolopts & WAKE_BCAST) { | ||
1695 | err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, | ||
1696 | ~0ULL, 0, false); | ||
1697 | if (!err) | ||
1698 | err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, | ||
1699 | ~6ULL, ~0ULL, BCAST_CRC, true); | ||
1700 | } else | ||
1701 | t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); | ||
1702 | return err; | ||
1703 | } | ||
1704 | |||
1705 | static int set_tso(struct net_device *dev, u32 value) | ||
1706 | { | ||
1707 | if (value) | ||
1708 | dev->features |= NETIF_F_TSO | NETIF_F_TSO6; | ||
1709 | else | ||
1710 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | ||
1711 | return 0; | ||
1712 | } | ||
1713 | |||
1714 | static struct ethtool_ops cxgb_ethtool_ops = { | ||
1715 | .get_settings = get_settings, | ||
1716 | .set_settings = set_settings, | ||
1717 | .get_drvinfo = get_drvinfo, | ||
1718 | .get_msglevel = get_msglevel, | ||
1719 | .set_msglevel = set_msglevel, | ||
1720 | .get_ringparam = get_sge_param, | ||
1721 | .set_ringparam = set_sge_param, | ||
1722 | .get_coalesce = get_coalesce, | ||
1723 | .set_coalesce = set_coalesce, | ||
1724 | .get_eeprom_len = get_eeprom_len, | ||
1725 | .get_eeprom = get_eeprom, | ||
1726 | .set_eeprom = set_eeprom, | ||
1727 | .get_pauseparam = get_pauseparam, | ||
1728 | .set_pauseparam = set_pauseparam, | ||
1729 | .get_rx_csum = get_rx_csum, | ||
1730 | .set_rx_csum = set_rx_csum, | ||
1731 | .set_tx_csum = ethtool_op_set_tx_ipv6_csum, | ||
1732 | .set_sg = ethtool_op_set_sg, | ||
1733 | .get_link = ethtool_op_get_link, | ||
1734 | .get_strings = get_strings, | ||
1735 | .phys_id = identify_port, | ||
1736 | .nway_reset = restart_autoneg, | ||
1737 | .get_sset_count = get_sset_count, | ||
1738 | .get_ethtool_stats = get_stats, | ||
1739 | .get_regs_len = get_regs_len, | ||
1740 | .get_regs = get_regs, | ||
1741 | .get_wol = get_wol, | ||
1742 | .set_wol = set_wol, | ||
1743 | .set_tso = set_tso, | ||
1744 | .flash_device = set_flash, | ||
1745 | }; | ||
1746 | |||
1747 | /* | ||
1748 | * debugfs support | ||
1749 | */ | ||
1750 | |||
1751 | static int mem_open(struct inode *inode, struct file *file) | ||
1752 | { | ||
1753 | file->private_data = inode->i_private; | ||
1754 | return 0; | ||
1755 | } | ||
1756 | |||
1757 | static ssize_t mem_read(struct file *file, char __user *buf, size_t count, | ||
1758 | loff_t *ppos) | ||
1759 | { | ||
1760 | loff_t pos = *ppos; | ||
1761 | loff_t avail = file->f_path.dentry->d_inode->i_size; | ||
1762 | unsigned int mem = (uintptr_t)file->private_data & 3; | ||
1763 | struct adapter *adap = file->private_data - mem; | ||
1764 | |||
1765 | if (pos < 0) | ||
1766 | return -EINVAL; | ||
1767 | if (pos >= avail) | ||
1768 | return 0; | ||
1769 | if (count > avail - pos) | ||
1770 | count = avail - pos; | ||
1771 | |||
1772 | while (count) { | ||
1773 | size_t len; | ||
1774 | int ret, ofst; | ||
1775 | __be32 data[16]; | ||
1776 | |||
1777 | if (mem == MEM_MC) | ||
1778 | ret = t4_mc_read(adap, pos, data, NULL); | ||
1779 | else | ||
1780 | ret = t4_edc_read(adap, mem, pos, data, NULL); | ||
1781 | if (ret) | ||
1782 | return ret; | ||
1783 | |||
1784 | ofst = pos % sizeof(data); | ||
1785 | len = min(count, sizeof(data) - ofst); | ||
1786 | if (copy_to_user(buf, (u8 *)data + ofst, len)) | ||
1787 | return -EFAULT; | ||
1788 | |||
1789 | buf += len; | ||
1790 | pos += len; | ||
1791 | count -= len; | ||
1792 | } | ||
1793 | count = pos - *ppos; | ||
1794 | *ppos = pos; | ||
1795 | return count; | ||
1796 | } | ||
1797 | |||
1798 | static const struct file_operations mem_debugfs_fops = { | ||
1799 | .owner = THIS_MODULE, | ||
1800 | .open = mem_open, | ||
1801 | .read = mem_read, | ||
1802 | }; | ||
1803 | |||
1804 | static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, | ||
1805 | unsigned int idx, unsigned int size_mb) | ||
1806 | { | ||
1807 | struct dentry *de; | ||
1808 | |||
1809 | de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, | ||
1810 | (void *)adap + idx, &mem_debugfs_fops); | ||
1811 | if (de && de->d_inode) | ||
1812 | de->d_inode->i_size = size_mb << 20; | ||
1813 | } | ||
1814 | |||
1815 | static int __devinit setup_debugfs(struct adapter *adap) | ||
1816 | { | ||
1817 | int i; | ||
1818 | |||
1819 | if (IS_ERR_OR_NULL(adap->debugfs_root)) | ||
1820 | return -1; | ||
1821 | |||
1822 | i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); | ||
1823 | if (i & EDRAM0_ENABLE) | ||
1824 | add_debugfs_mem(adap, "edc0", MEM_EDC0, 5); | ||
1825 | if (i & EDRAM1_ENABLE) | ||
1826 | add_debugfs_mem(adap, "edc1", MEM_EDC1, 5); | ||
1827 | if (i & EXT_MEM_ENABLE) | ||
1828 | add_debugfs_mem(adap, "mc", MEM_MC, | ||
1829 | EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR))); | ||
1830 | if (adap->l2t) | ||
1831 | debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, | ||
1832 | &t4_l2t_fops); | ||
1833 | return 0; | ||
1834 | } | ||
1835 | |||
1836 | /* | ||
1837 | * upper-layer driver support | ||
1838 | */ | ||
1839 | |||
1840 | /* | ||
1841 | * Allocate an active-open TID and set it to the supplied value. | ||
1842 | */ | ||
1843 | int cxgb4_alloc_atid(struct tid_info *t, void *data) | ||
1844 | { | ||
1845 | int atid = -1; | ||
1846 | |||
1847 | spin_lock_bh(&t->atid_lock); | ||
1848 | if (t->afree) { | ||
1849 | union aopen_entry *p = t->afree; | ||
1850 | |||
1851 | atid = p - t->atid_tab; | ||
1852 | t->afree = p->next; | ||
1853 | p->data = data; | ||
1854 | t->atids_in_use++; | ||
1855 | } | ||
1856 | spin_unlock_bh(&t->atid_lock); | ||
1857 | return atid; | ||
1858 | } | ||
1859 | EXPORT_SYMBOL(cxgb4_alloc_atid); | ||
1860 | |||
1861 | /* | ||
1862 | * Release an active-open TID. | ||
1863 | */ | ||
1864 | void cxgb4_free_atid(struct tid_info *t, unsigned int atid) | ||
1865 | { | ||
1866 | union aopen_entry *p = &t->atid_tab[atid]; | ||
1867 | |||
1868 | spin_lock_bh(&t->atid_lock); | ||
1869 | p->next = t->afree; | ||
1870 | t->afree = p; | ||
1871 | t->atids_in_use--; | ||
1872 | spin_unlock_bh(&t->atid_lock); | ||
1873 | } | ||
1874 | EXPORT_SYMBOL(cxgb4_free_atid); | ||
1875 | |||
1876 | /* | ||
1877 | * Allocate a server TID and set it to the supplied value. | ||
1878 | */ | ||
1879 | int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) | ||
1880 | { | ||
1881 | int stid; | ||
1882 | |||
1883 | spin_lock_bh(&t->stid_lock); | ||
1884 | if (family == PF_INET) { | ||
1885 | stid = find_first_zero_bit(t->stid_bmap, t->nstids); | ||
1886 | if (stid < t->nstids) | ||
1887 | __set_bit(stid, t->stid_bmap); | ||
1888 | else | ||
1889 | stid = -1; | ||
1890 | } else { | ||
1891 | stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2); | ||
1892 | if (stid < 0) | ||
1893 | stid = -1; | ||
1894 | } | ||
1895 | if (stid >= 0) { | ||
1896 | t->stid_tab[stid].data = data; | ||
1897 | stid += t->stid_base; | ||
1898 | t->stids_in_use++; | ||
1899 | } | ||
1900 | spin_unlock_bh(&t->stid_lock); | ||
1901 | return stid; | ||
1902 | } | ||
1903 | EXPORT_SYMBOL(cxgb4_alloc_stid); | ||
1904 | |||
1905 | /* | ||
1906 | * Release a server TID. | ||
1907 | */ | ||
1908 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) | ||
1909 | { | ||
1910 | stid -= t->stid_base; | ||
1911 | spin_lock_bh(&t->stid_lock); | ||
1912 | if (family == PF_INET) | ||
1913 | __clear_bit(stid, t->stid_bmap); | ||
1914 | else | ||
1915 | bitmap_release_region(t->stid_bmap, stid, 2); | ||
1916 | t->stid_tab[stid].data = NULL; | ||
1917 | t->stids_in_use--; | ||
1918 | spin_unlock_bh(&t->stid_lock); | ||
1919 | } | ||
1920 | EXPORT_SYMBOL(cxgb4_free_stid); | ||
1921 | |||
1922 | /* | ||
1923 | * Populate a TID_RELEASE WR. Caller must properly size the skb. | ||
1924 | */ | ||
1925 | static void mk_tid_release(struct sk_buff *skb, unsigned int chan, | ||
1926 | unsigned int tid) | ||
1927 | { | ||
1928 | struct cpl_tid_release *req; | ||
1929 | |||
1930 | set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); | ||
1931 | req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); | ||
1932 | INIT_TP_WR(req, tid); | ||
1933 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); | ||
1934 | } | ||
1935 | |||
1936 | /* | ||
1937 | * Queue a TID release request and if necessary schedule a work queue to | ||
1938 | * process it. | ||
1939 | */ | ||
1940 | void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, | ||
1941 | unsigned int tid) | ||
1942 | { | ||
1943 | void **p = &t->tid_tab[tid]; | ||
1944 | struct adapter *adap = container_of(t, struct adapter, tids); | ||
1945 | |||
1946 | spin_lock_bh(&adap->tid_release_lock); | ||
1947 | *p = adap->tid_release_head; | ||
1948 | /* Low 2 bits encode the Tx channel number */ | ||
1949 | adap->tid_release_head = (void **)((uintptr_t)p | chan); | ||
1950 | if (!adap->tid_release_task_busy) { | ||
1951 | adap->tid_release_task_busy = true; | ||
1952 | schedule_work(&adap->tid_release_task); | ||
1953 | } | ||
1954 | spin_unlock_bh(&adap->tid_release_lock); | ||
1955 | } | ||
1956 | EXPORT_SYMBOL(cxgb4_queue_tid_release); | ||
1957 | |||
1958 | /* | ||
1959 | * Process the list of pending TID release requests. | ||
1960 | */ | ||
1961 | static void process_tid_release_list(struct work_struct *work) | ||
1962 | { | ||
1963 | struct sk_buff *skb; | ||
1964 | struct adapter *adap; | ||
1965 | |||
1966 | adap = container_of(work, struct adapter, tid_release_task); | ||
1967 | |||
1968 | spin_lock_bh(&adap->tid_release_lock); | ||
1969 | while (adap->tid_release_head) { | ||
1970 | void **p = adap->tid_release_head; | ||
1971 | unsigned int chan = (uintptr_t)p & 3; | ||
1972 | p = (void *)p - chan; | ||
1973 | |||
1974 | adap->tid_release_head = *p; | ||
1975 | *p = NULL; | ||
1976 | spin_unlock_bh(&adap->tid_release_lock); | ||
1977 | |||
1978 | while (!(skb = alloc_skb(sizeof(struct cpl_tid_release), | ||
1979 | GFP_KERNEL))) | ||
1980 | schedule_timeout_uninterruptible(1); | ||
1981 | |||
1982 | mk_tid_release(skb, chan, p - adap->tids.tid_tab); | ||
1983 | t4_ofld_send(adap, skb); | ||
1984 | spin_lock_bh(&adap->tid_release_lock); | ||
1985 | } | ||
1986 | adap->tid_release_task_busy = false; | ||
1987 | spin_unlock_bh(&adap->tid_release_lock); | ||
1988 | } | ||
1989 | |||
1990 | /* | ||
1991 | * Release a TID and inform HW. If we are unable to allocate the release | ||
1992 | * message we defer to a work queue. | ||
1993 | */ | ||
1994 | void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) | ||
1995 | { | ||
1996 | void *old; | ||
1997 | struct sk_buff *skb; | ||
1998 | struct adapter *adap = container_of(t, struct adapter, tids); | ||
1999 | |||
2000 | old = t->tid_tab[tid]; | ||
2001 | skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); | ||
2002 | if (likely(skb)) { | ||
2003 | t->tid_tab[tid] = NULL; | ||
2004 | mk_tid_release(skb, chan, tid); | ||
2005 | t4_ofld_send(adap, skb); | ||
2006 | } else | ||
2007 | cxgb4_queue_tid_release(t, chan, tid); | ||
2008 | if (old) | ||
2009 | atomic_dec(&t->tids_in_use); | ||
2010 | } | ||
2011 | EXPORT_SYMBOL(cxgb4_remove_tid); | ||
2012 | |||
2013 | /* | ||
2014 | * Allocate and initialize the TID tables. Returns 0 on success. | ||
2015 | */ | ||
2016 | static int tid_init(struct tid_info *t) | ||
2017 | { | ||
2018 | size_t size; | ||
2019 | unsigned int natids = t->natids; | ||
2020 | |||
2021 | size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + | ||
2022 | t->nstids * sizeof(*t->stid_tab) + | ||
2023 | BITS_TO_LONGS(t->nstids) * sizeof(long); | ||
2024 | t->tid_tab = t4_alloc_mem(size); | ||
2025 | if (!t->tid_tab) | ||
2026 | return -ENOMEM; | ||
2027 | |||
2028 | t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; | ||
2029 | t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; | ||
2030 | t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids]; | ||
2031 | spin_lock_init(&t->stid_lock); | ||
2032 | spin_lock_init(&t->atid_lock); | ||
2033 | |||
2034 | t->stids_in_use = 0; | ||
2035 | t->afree = NULL; | ||
2036 | t->atids_in_use = 0; | ||
2037 | atomic_set(&t->tids_in_use, 0); | ||
2038 | |||
2039 | /* Setup the free list for atid_tab and clear the stid bitmap. */ | ||
2040 | if (natids) { | ||
2041 | while (--natids) | ||
2042 | t->atid_tab[natids - 1].next = &t->atid_tab[natids]; | ||
2043 | t->afree = t->atid_tab; | ||
2044 | } | ||
2045 | bitmap_zero(t->stid_bmap, t->nstids); | ||
2046 | return 0; | ||
2047 | } | ||
2048 | |||
2049 | /** | ||
2050 | * cxgb4_create_server - create an IP server | ||
2051 | * @dev: the device | ||
2052 | * @stid: the server TID | ||
2053 | * @sip: local IP address to bind server to | ||
2054 | * @sport: the server's TCP port | ||
2055 | * @queue: queue to direct messages from this server to | ||
2056 | * | ||
2057 | * Create an IP server for the given port and address. | ||
2058 | * Returns <0 on error and one of the %NET_XMIT_* values on success. | ||
2059 | */ | ||
2060 | int cxgb4_create_server(const struct net_device *dev, unsigned int stid, | ||
2061 | __be32 sip, __be16 sport, unsigned int queue) | ||
2062 | { | ||
2063 | unsigned int chan; | ||
2064 | struct sk_buff *skb; | ||
2065 | struct adapter *adap; | ||
2066 | struct cpl_pass_open_req *req; | ||
2067 | |||
2068 | skb = alloc_skb(sizeof(*req), GFP_KERNEL); | ||
2069 | if (!skb) | ||
2070 | return -ENOMEM; | ||
2071 | |||
2072 | adap = netdev2adap(dev); | ||
2073 | req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req)); | ||
2074 | INIT_TP_WR(req, 0); | ||
2075 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); | ||
2076 | req->local_port = sport; | ||
2077 | req->peer_port = htons(0); | ||
2078 | req->local_ip = sip; | ||
2079 | req->peer_ip = htonl(0); | ||
2080 | chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; | ||
2081 | req->opt0 = cpu_to_be64(TX_CHAN(chan)); | ||
2082 | req->opt1 = cpu_to_be64(CONN_POLICY_ASK | | ||
2083 | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); | ||
2084 | return t4_mgmt_tx(adap, skb); | ||
2085 | } | ||
2086 | EXPORT_SYMBOL(cxgb4_create_server); | ||
2087 | |||
2088 | /** | ||
2089 | * cxgb4_create_server6 - create an IPv6 server | ||
2090 | * @dev: the device | ||
2091 | * @stid: the server TID | ||
2092 | * @sip: local IPv6 address to bind server to | ||
2093 | * @sport: the server's TCP port | ||
2094 | * @queue: queue to direct messages from this server to | ||
2095 | * | ||
2096 | * Create an IPv6 server for the given port and address. | ||
2097 | * Returns <0 on error and one of the %NET_XMIT_* values on success. | ||
2098 | */ | ||
2099 | int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, | ||
2100 | const struct in6_addr *sip, __be16 sport, | ||
2101 | unsigned int queue) | ||
2102 | { | ||
2103 | unsigned int chan; | ||
2104 | struct sk_buff *skb; | ||
2105 | struct adapter *adap; | ||
2106 | struct cpl_pass_open_req6 *req; | ||
2107 | |||
2108 | skb = alloc_skb(sizeof(*req), GFP_KERNEL); | ||
2109 | if (!skb) | ||
2110 | return -ENOMEM; | ||
2111 | |||
2112 | adap = netdev2adap(dev); | ||
2113 | req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); | ||
2114 | INIT_TP_WR(req, 0); | ||
2115 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); | ||
2116 | req->local_port = sport; | ||
2117 | req->peer_port = htons(0); | ||
2118 | req->local_ip_hi = *(__be64 *)(sip->s6_addr); | ||
2119 | req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); | ||
2120 | req->peer_ip_hi = cpu_to_be64(0); | ||
2121 | req->peer_ip_lo = cpu_to_be64(0); | ||
2122 | chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; | ||
2123 | req->opt0 = cpu_to_be64(TX_CHAN(chan)); | ||
2124 | req->opt1 = cpu_to_be64(CONN_POLICY_ASK | | ||
2125 | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); | ||
2126 | return t4_mgmt_tx(adap, skb); | ||
2127 | } | ||
2128 | EXPORT_SYMBOL(cxgb4_create_server6); | ||
2129 | |||
2130 | /** | ||
2131 | * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU | ||
2132 | * @mtus: the HW MTU table | ||
2133 | * @mtu: the target MTU | ||
2134 | * @idx: index of selected entry in the MTU table | ||
2135 | * | ||
2136 | * Returns the index and the value in the HW MTU table that is closest to | ||
2137 | * but does not exceed @mtu, unless @mtu is smaller than any value in the | ||
2138 | * table, in which case that smallest available value is selected. | ||
2139 | */ | ||
2140 | unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, | ||
2141 | unsigned int *idx) | ||
2142 | { | ||
2143 | unsigned int i = 0; | ||
2144 | |||
2145 | while (i < NMTUS - 1 && mtus[i + 1] <= mtu) | ||
2146 | ++i; | ||
2147 | if (idx) | ||
2148 | *idx = i; | ||
2149 | return mtus[i]; | ||
2150 | } | ||
2151 | EXPORT_SYMBOL(cxgb4_best_mtu); | ||
2152 | |||
2153 | /** | ||
2154 | * cxgb4_port_chan - get the HW channel of a port | ||
2155 | * @dev: the net device for the port | ||
2156 | * | ||
2157 | * Return the HW Tx channel of the given port. | ||
2158 | */ | ||
2159 | unsigned int cxgb4_port_chan(const struct net_device *dev) | ||
2160 | { | ||
2161 | return netdev2pinfo(dev)->tx_chan; | ||
2162 | } | ||
2163 | EXPORT_SYMBOL(cxgb4_port_chan); | ||
2164 | |||
2165 | /** | ||
2166 | * cxgb4_port_viid - get the VI id of a port | ||
2167 | * @dev: the net device for the port | ||
2168 | * | ||
2169 | * Return the VI id of the given port. | ||
2170 | */ | ||
2171 | unsigned int cxgb4_port_viid(const struct net_device *dev) | ||
2172 | { | ||
2173 | return netdev2pinfo(dev)->viid; | ||
2174 | } | ||
2175 | EXPORT_SYMBOL(cxgb4_port_viid); | ||
2176 | |||
2177 | /** | ||
2178 | * cxgb4_port_idx - get the index of a port | ||
2179 | * @dev: the net device for the port | ||
2180 | * | ||
2181 | * Return the index of the given port. | ||
2182 | */ | ||
2183 | unsigned int cxgb4_port_idx(const struct net_device *dev) | ||
2184 | { | ||
2185 | return netdev2pinfo(dev)->port_id; | ||
2186 | } | ||
2187 | EXPORT_SYMBOL(cxgb4_port_idx); | ||
2188 | |||
2189 | /** | ||
2190 | * cxgb4_netdev_by_hwid - return the net device of a HW port | ||
2191 | * @pdev: identifies the adapter | ||
2192 | * @id: the HW port id | ||
2193 | * | ||
2194 | * Return the net device associated with the interface with the given HW | ||
2195 | * id. | ||
2196 | */ | ||
2197 | struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id) | ||
2198 | { | ||
2199 | const struct adapter *adap = pci_get_drvdata(pdev); | ||
2200 | |||
2201 | if (!adap || id >= NCHAN) | ||
2202 | return NULL; | ||
2203 | id = adap->chan_map[id]; | ||
2204 | return id < MAX_NPORTS ? adap->port[id] : NULL; | ||
2205 | } | ||
2206 | EXPORT_SYMBOL(cxgb4_netdev_by_hwid); | ||
2207 | |||
2208 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, | ||
2209 | struct tp_tcp_stats *v6) | ||
2210 | { | ||
2211 | struct adapter *adap = pci_get_drvdata(pdev); | ||
2212 | |||
2213 | spin_lock(&adap->stats_lock); | ||
2214 | t4_tp_get_tcp_stats(adap, v4, v6); | ||
2215 | spin_unlock(&adap->stats_lock); | ||
2216 | } | ||
2217 | EXPORT_SYMBOL(cxgb4_get_tcp_stats); | ||
2218 | |||
2219 | void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, | ||
2220 | const unsigned int *pgsz_order) | ||
2221 | { | ||
2222 | struct adapter *adap = netdev2adap(dev); | ||
2223 | |||
2224 | t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask); | ||
2225 | t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) | | ||
2226 | HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) | | ||
2227 | HPZ3(pgsz_order[3])); | ||
2228 | } | ||
2229 | EXPORT_SYMBOL(cxgb4_iscsi_init); | ||
2230 | |||
2231 | static struct pci_driver cxgb4_driver; | ||
2232 | |||
2233 | static void check_neigh_update(struct neighbour *neigh) | ||
2234 | { | ||
2235 | const struct device *parent; | ||
2236 | const struct net_device *netdev = neigh->dev; | ||
2237 | |||
2238 | if (netdev->priv_flags & IFF_802_1Q_VLAN) | ||
2239 | netdev = vlan_dev_real_dev(netdev); | ||
2240 | parent = netdev->dev.parent; | ||
2241 | if (parent && parent->driver == &cxgb4_driver.driver) | ||
2242 | t4_l2t_update(dev_get_drvdata(parent), neigh); | ||
2243 | } | ||
2244 | |||
2245 | static int netevent_cb(struct notifier_block *nb, unsigned long event, | ||
2246 | void *data) | ||
2247 | { | ||
2248 | switch (event) { | ||
2249 | case NETEVENT_NEIGH_UPDATE: | ||
2250 | check_neigh_update(data); | ||
2251 | break; | ||
2252 | case NETEVENT_PMTU_UPDATE: | ||
2253 | case NETEVENT_REDIRECT: | ||
2254 | default: | ||
2255 | break; | ||
2256 | } | ||
2257 | return 0; | ||
2258 | } | ||
2259 | |||
2260 | static bool netevent_registered; | ||
2261 | static struct notifier_block cxgb4_netevent_nb = { | ||
2262 | .notifier_call = netevent_cb | ||
2263 | }; | ||
2264 | |||
2265 | static void uld_attach(struct adapter *adap, unsigned int uld) | ||
2266 | { | ||
2267 | void *handle; | ||
2268 | struct cxgb4_lld_info lli; | ||
2269 | |||
2270 | lli.pdev = adap->pdev; | ||
2271 | lli.l2t = adap->l2t; | ||
2272 | lli.tids = &adap->tids; | ||
2273 | lli.ports = adap->port; | ||
2274 | lli.vr = &adap->vres; | ||
2275 | lli.mtus = adap->params.mtus; | ||
2276 | if (uld == CXGB4_ULD_RDMA) { | ||
2277 | lli.rxq_ids = adap->sge.rdma_rxq; | ||
2278 | lli.nrxq = adap->sge.rdmaqs; | ||
2279 | } else if (uld == CXGB4_ULD_ISCSI) { | ||
2280 | lli.rxq_ids = adap->sge.ofld_rxq; | ||
2281 | lli.nrxq = adap->sge.ofldqsets; | ||
2282 | } | ||
2283 | lli.ntxq = adap->sge.ofldqsets; | ||
2284 | lli.nchan = adap->params.nports; | ||
2285 | lli.nports = adap->params.nports; | ||
2286 | lli.wr_cred = adap->params.ofldq_wr_cred; | ||
2287 | lli.adapter_type = adap->params.rev; | ||
2288 | lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); | ||
2289 | lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( | ||
2290 | t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF)); | ||
2291 | lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( | ||
2292 | t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF)); | ||
2293 | lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); | ||
2294 | lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); | ||
2295 | lli.fw_vers = adap->params.fw_vers; | ||
2296 | |||
2297 | handle = ulds[uld].add(&lli); | ||
2298 | if (IS_ERR(handle)) { | ||
2299 | dev_warn(adap->pdev_dev, | ||
2300 | "could not attach to the %s driver, error %ld\n", | ||
2301 | uld_str[uld], PTR_ERR(handle)); | ||
2302 | return; | ||
2303 | } | ||
2304 | |||
2305 | adap->uld_handle[uld] = handle; | ||
2306 | |||
2307 | if (!netevent_registered) { | ||
2308 | register_netevent_notifier(&cxgb4_netevent_nb); | ||
2309 | netevent_registered = true; | ||
2310 | } | ||
2311 | } | ||
2312 | |||
2313 | static void attach_ulds(struct adapter *adap) | ||
2314 | { | ||
2315 | unsigned int i; | ||
2316 | |||
2317 | mutex_lock(&uld_mutex); | ||
2318 | list_add_tail(&adap->list_node, &adapter_list); | ||
2319 | for (i = 0; i < CXGB4_ULD_MAX; i++) | ||
2320 | if (ulds[i].add) | ||
2321 | uld_attach(adap, i); | ||
2322 | mutex_unlock(&uld_mutex); | ||
2323 | } | ||
2324 | |||
2325 | static void detach_ulds(struct adapter *adap) | ||
2326 | { | ||
2327 | unsigned int i; | ||
2328 | |||
2329 | mutex_lock(&uld_mutex); | ||
2330 | list_del(&adap->list_node); | ||
2331 | for (i = 0; i < CXGB4_ULD_MAX; i++) | ||
2332 | if (adap->uld_handle[i]) { | ||
2333 | ulds[i].state_change(adap->uld_handle[i], | ||
2334 | CXGB4_STATE_DETACH); | ||
2335 | adap->uld_handle[i] = NULL; | ||
2336 | } | ||
2337 | if (netevent_registered && list_empty(&adapter_list)) { | ||
2338 | unregister_netevent_notifier(&cxgb4_netevent_nb); | ||
2339 | netevent_registered = false; | ||
2340 | } | ||
2341 | mutex_unlock(&uld_mutex); | ||
2342 | } | ||
2343 | |||
2344 | static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) | ||
2345 | { | ||
2346 | unsigned int i; | ||
2347 | |||
2348 | mutex_lock(&uld_mutex); | ||
2349 | for (i = 0; i < CXGB4_ULD_MAX; i++) | ||
2350 | if (adap->uld_handle[i]) | ||
2351 | ulds[i].state_change(adap->uld_handle[i], new_state); | ||
2352 | mutex_unlock(&uld_mutex); | ||
2353 | } | ||
2354 | |||
2355 | /** | ||
2356 | * cxgb4_register_uld - register an upper-layer driver | ||
2357 | * @type: the ULD type | ||
2358 | * @p: the ULD methods | ||
2359 | * | ||
2360 | * Registers an upper-layer driver with this driver and notifies the ULD | ||
2361 | * about any presently available devices that support its type. Returns | ||
2362 | * %-EBUSY if a ULD of the same type is already registered. | ||
2363 | */ | ||
2364 | int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) | ||
2365 | { | ||
2366 | int ret = 0; | ||
2367 | struct adapter *adap; | ||
2368 | |||
2369 | if (type >= CXGB4_ULD_MAX) | ||
2370 | return -EINVAL; | ||
2371 | mutex_lock(&uld_mutex); | ||
2372 | if (ulds[type].add) { | ||
2373 | ret = -EBUSY; | ||
2374 | goto out; | ||
2375 | } | ||
2376 | ulds[type] = *p; | ||
2377 | list_for_each_entry(adap, &adapter_list, list_node) | ||
2378 | uld_attach(adap, type); | ||
2379 | out: mutex_unlock(&uld_mutex); | ||
2380 | return ret; | ||
2381 | } | ||
2382 | EXPORT_SYMBOL(cxgb4_register_uld); | ||
2383 | |||
2384 | /** | ||
2385 | * cxgb4_unregister_uld - unregister an upper-layer driver | ||
2386 | * @type: the ULD type | ||
2387 | * | ||
2388 | * Unregisters an existing upper-layer driver. | ||
2389 | */ | ||
2390 | int cxgb4_unregister_uld(enum cxgb4_uld type) | ||
2391 | { | ||
2392 | struct adapter *adap; | ||
2393 | |||
2394 | if (type >= CXGB4_ULD_MAX) | ||
2395 | return -EINVAL; | ||
2396 | mutex_lock(&uld_mutex); | ||
2397 | list_for_each_entry(adap, &adapter_list, list_node) | ||
2398 | adap->uld_handle[type] = NULL; | ||
2399 | ulds[type].add = NULL; | ||
2400 | mutex_unlock(&uld_mutex); | ||
2401 | return 0; | ||
2402 | } | ||
2403 | EXPORT_SYMBOL(cxgb4_unregister_uld); | ||
2404 | |||
2405 | /** | ||
2406 | * cxgb_up - enable the adapter | ||
2407 | * @adap: adapter being enabled | ||
2408 | * | ||
2409 | * Called when the first port is enabled, this function performs the | ||
2410 | * actions necessary to make an adapter operational, such as completing | ||
2411 | * the initialization of HW modules, and enabling interrupts. | ||
2412 | * | ||
2413 | * Must be called with the rtnl lock held. | ||
2414 | */ | ||
2415 | static int cxgb_up(struct adapter *adap) | ||
2416 | { | ||
2417 | int err = 0; | ||
2418 | |||
2419 | if (!(adap->flags & FULL_INIT_DONE)) { | ||
2420 | err = setup_sge_queues(adap); | ||
2421 | if (err) | ||
2422 | goto out; | ||
2423 | err = setup_rss(adap); | ||
2424 | if (err) { | ||
2425 | t4_free_sge_resources(adap); | ||
2426 | goto out; | ||
2427 | } | ||
2428 | if (adap->flags & USING_MSIX) | ||
2429 | name_msix_vecs(adap); | ||
2430 | adap->flags |= FULL_INIT_DONE; | ||
2431 | } | ||
2432 | |||
2433 | if (adap->flags & USING_MSIX) { | ||
2434 | err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, | ||
2435 | adap->msix_info[0].desc, adap); | ||
2436 | if (err) | ||
2437 | goto irq_err; | ||
2438 | |||
2439 | err = request_msix_queue_irqs(adap); | ||
2440 | if (err) { | ||
2441 | free_irq(adap->msix_info[0].vec, adap); | ||
2442 | goto irq_err; | ||
2443 | } | ||
2444 | } else { | ||
2445 | err = request_irq(adap->pdev->irq, t4_intr_handler(adap), | ||
2446 | (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, | ||
2447 | adap->name, adap); | ||
2448 | if (err) | ||
2449 | goto irq_err; | ||
2450 | } | ||
2451 | enable_rx(adap); | ||
2452 | t4_sge_start(adap); | ||
2453 | t4_intr_enable(adap); | ||
2454 | notify_ulds(adap, CXGB4_STATE_UP); | ||
2455 | out: | ||
2456 | return err; | ||
2457 | irq_err: | ||
2458 | dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); | ||
2459 | goto out; | ||
2460 | } | ||
2461 | |||
2462 | static void cxgb_down(struct adapter *adapter) | ||
2463 | { | ||
2464 | t4_intr_disable(adapter); | ||
2465 | cancel_work_sync(&adapter->tid_release_task); | ||
2466 | adapter->tid_release_task_busy = false; | ||
2467 | |||
2468 | if (adapter->flags & USING_MSIX) { | ||
2469 | free_msix_queue_irqs(adapter); | ||
2470 | free_irq(adapter->msix_info[0].vec, adapter); | ||
2471 | } else | ||
2472 | free_irq(adapter->pdev->irq, adapter); | ||
2473 | quiesce_rx(adapter); | ||
2474 | } | ||
2475 | |||
2476 | /* | ||
2477 | * net_device operations | ||
2478 | */ | ||
2479 | static int cxgb_open(struct net_device *dev) | ||
2480 | { | ||
2481 | int err; | ||
2482 | struct port_info *pi = netdev_priv(dev); | ||
2483 | struct adapter *adapter = pi->adapter; | ||
2484 | |||
2485 | if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) | ||
2486 | return err; | ||
2487 | |||
2488 | dev->real_num_tx_queues = pi->nqsets; | ||
2489 | set_bit(pi->tx_chan, &adapter->open_device_map); | ||
2490 | link_start(dev); | ||
2491 | netif_tx_start_all_queues(dev); | ||
2492 | return 0; | ||
2493 | } | ||
2494 | |||
2495 | static int cxgb_close(struct net_device *dev) | ||
2496 | { | ||
2497 | int ret; | ||
2498 | struct port_info *pi = netdev_priv(dev); | ||
2499 | struct adapter *adapter = pi->adapter; | ||
2500 | |||
2501 | netif_tx_stop_all_queues(dev); | ||
2502 | netif_carrier_off(dev); | ||
2503 | ret = t4_enable_vi(adapter, 0, pi->viid, false, false); | ||
2504 | |||
2505 | clear_bit(pi->tx_chan, &adapter->open_device_map); | ||
2506 | |||
2507 | if (!adapter->open_device_map) | ||
2508 | cxgb_down(adapter); | ||
2509 | return 0; | ||
2510 | } | ||
2511 | |||
2512 | static struct net_device_stats *cxgb_get_stats(struct net_device *dev) | ||
2513 | { | ||
2514 | struct port_stats stats; | ||
2515 | struct port_info *p = netdev_priv(dev); | ||
2516 | struct adapter *adapter = p->adapter; | ||
2517 | struct net_device_stats *ns = &dev->stats; | ||
2518 | |||
2519 | spin_lock(&adapter->stats_lock); | ||
2520 | t4_get_port_stats(adapter, p->tx_chan, &stats); | ||
2521 | spin_unlock(&adapter->stats_lock); | ||
2522 | |||
2523 | ns->tx_bytes = stats.tx_octets; | ||
2524 | ns->tx_packets = stats.tx_frames; | ||
2525 | ns->rx_bytes = stats.rx_octets; | ||
2526 | ns->rx_packets = stats.rx_frames; | ||
2527 | ns->multicast = stats.rx_mcast_frames; | ||
2528 | |||
2529 | /* detailed rx_errors */ | ||
2530 | ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + | ||
2531 | stats.rx_runt; | ||
2532 | ns->rx_over_errors = 0; | ||
2533 | ns->rx_crc_errors = stats.rx_fcs_err; | ||
2534 | ns->rx_frame_errors = stats.rx_symbol_err; | ||
2535 | ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 + | ||
2536 | stats.rx_ovflow2 + stats.rx_ovflow3 + | ||
2537 | stats.rx_trunc0 + stats.rx_trunc1 + | ||
2538 | stats.rx_trunc2 + stats.rx_trunc3; | ||
2539 | ns->rx_missed_errors = 0; | ||
2540 | |||
2541 | /* detailed tx_errors */ | ||
2542 | ns->tx_aborted_errors = 0; | ||
2543 | ns->tx_carrier_errors = 0; | ||
2544 | ns->tx_fifo_errors = 0; | ||
2545 | ns->tx_heartbeat_errors = 0; | ||
2546 | ns->tx_window_errors = 0; | ||
2547 | |||
2548 | ns->tx_errors = stats.tx_error_frames; | ||
2549 | ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + | ||
2550 | ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; | ||
2551 | return ns; | ||
2552 | } | ||
2553 | |||
2554 | static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) | ||
2555 | { | ||
2556 | int ret = 0, prtad, devad; | ||
2557 | struct port_info *pi = netdev_priv(dev); | ||
2558 | struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; | ||
2559 | |||
2560 | switch (cmd) { | ||
2561 | case SIOCGMIIPHY: | ||
2562 | if (pi->mdio_addr < 0) | ||
2563 | return -EOPNOTSUPP; | ||
2564 | data->phy_id = pi->mdio_addr; | ||
2565 | break; | ||
2566 | case SIOCGMIIREG: | ||
2567 | case SIOCSMIIREG: | ||
2568 | if (mdio_phy_id_is_c45(data->phy_id)) { | ||
2569 | prtad = mdio_phy_id_prtad(data->phy_id); | ||
2570 | devad = mdio_phy_id_devad(data->phy_id); | ||
2571 | } else if (data->phy_id < 32) { | ||
2572 | prtad = data->phy_id; | ||
2573 | devad = 0; | ||
2574 | data->reg_num &= 0x1f; | ||
2575 | } else | ||
2576 | return -EINVAL; | ||
2577 | |||
2578 | if (cmd == SIOCGMIIREG) | ||
2579 | ret = t4_mdio_rd(pi->adapter, 0, prtad, devad, | ||
2580 | data->reg_num, &data->val_out); | ||
2581 | else | ||
2582 | ret = t4_mdio_wr(pi->adapter, 0, prtad, devad, | ||
2583 | data->reg_num, data->val_in); | ||
2584 | break; | ||
2585 | default: | ||
2586 | return -EOPNOTSUPP; | ||
2587 | } | ||
2588 | return ret; | ||
2589 | } | ||
2590 | |||
2591 | static void cxgb_set_rxmode(struct net_device *dev) | ||
2592 | { | ||
2593 | /* unfortunately we can't return errors to the stack */ | ||
2594 | set_rxmode(dev, -1, false); | ||
2595 | } | ||
2596 | |||
2597 | static int cxgb_change_mtu(struct net_device *dev, int new_mtu) | ||
2598 | { | ||
2599 | int ret; | ||
2600 | struct port_info *pi = netdev_priv(dev); | ||
2601 | |||
2602 | if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ | ||
2603 | return -EINVAL; | ||
2604 | ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, | ||
2605 | true); | ||
2606 | if (!ret) | ||
2607 | dev->mtu = new_mtu; | ||
2608 | return ret; | ||
2609 | } | ||
2610 | |||
2611 | static int cxgb_set_mac_addr(struct net_device *dev, void *p) | ||
2612 | { | ||
2613 | int ret; | ||
2614 | struct sockaddr *addr = p; | ||
2615 | struct port_info *pi = netdev_priv(dev); | ||
2616 | |||
2617 | if (!is_valid_ether_addr(addr->sa_data)) | ||
2618 | return -EINVAL; | ||
2619 | |||
2620 | ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt, | ||
2621 | addr->sa_data, true, true); | ||
2622 | if (ret < 0) | ||
2623 | return ret; | ||
2624 | |||
2625 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
2626 | pi->xact_addr_filt = ret; | ||
2627 | return 0; | ||
2628 | } | ||
2629 | |||
2630 | static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
2631 | { | ||
2632 | struct port_info *pi = netdev_priv(dev); | ||
2633 | |||
2634 | pi->vlan_grp = grp; | ||
2635 | t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL); | ||
2636 | } | ||
2637 | |||
2638 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2639 | static void cxgb_netpoll(struct net_device *dev) | ||
2640 | { | ||
2641 | struct port_info *pi = netdev_priv(dev); | ||
2642 | struct adapter *adap = pi->adapter; | ||
2643 | |||
2644 | if (adap->flags & USING_MSIX) { | ||
2645 | int i; | ||
2646 | struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; | ||
2647 | |||
2648 | for (i = pi->nqsets; i; i--, rx++) | ||
2649 | t4_sge_intr_msix(0, &rx->rspq); | ||
2650 | } else | ||
2651 | t4_intr_handler(adap)(0, adap); | ||
2652 | } | ||
2653 | #endif | ||
2654 | |||
2655 | static const struct net_device_ops cxgb4_netdev_ops = { | ||
2656 | .ndo_open = cxgb_open, | ||
2657 | .ndo_stop = cxgb_close, | ||
2658 | .ndo_start_xmit = t4_eth_xmit, | ||
2659 | .ndo_get_stats = cxgb_get_stats, | ||
2660 | .ndo_set_rx_mode = cxgb_set_rxmode, | ||
2661 | .ndo_set_mac_address = cxgb_set_mac_addr, | ||
2662 | .ndo_validate_addr = eth_validate_addr, | ||
2663 | .ndo_do_ioctl = cxgb_ioctl, | ||
2664 | .ndo_change_mtu = cxgb_change_mtu, | ||
2665 | .ndo_vlan_rx_register = vlan_rx_register, | ||
2666 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2667 | .ndo_poll_controller = cxgb_netpoll, | ||
2668 | #endif | ||
2669 | }; | ||
2670 | |||
2671 | void t4_fatal_err(struct adapter *adap) | ||
2672 | { | ||
2673 | t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0); | ||
2674 | t4_intr_disable(adap); | ||
2675 | dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); | ||
2676 | } | ||
2677 | |||
2678 | static void setup_memwin(struct adapter *adap) | ||
2679 | { | ||
2680 | u32 bar0; | ||
2681 | |||
2682 | bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ | ||
2683 | t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), | ||
2684 | (bar0 + MEMWIN0_BASE) | BIR(0) | | ||
2685 | WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); | ||
2686 | t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), | ||
2687 | (bar0 + MEMWIN1_BASE) | BIR(0) | | ||
2688 | WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); | ||
2689 | t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), | ||
2690 | (bar0 + MEMWIN2_BASE) | BIR(0) | | ||
2691 | WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); | ||
2692 | } | ||
2693 | |||
2694 | /* | ||
2695 | * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. | ||
2696 | */ | ||
2697 | #define MAX_ATIDS 8192U | ||
2698 | |||
2699 | /* | ||
2700 | * Phase 0 of initialization: contact FW, obtain config, perform basic init. | ||
2701 | */ | ||
2702 | static int adap_init0(struct adapter *adap) | ||
2703 | { | ||
2704 | int ret; | ||
2705 | u32 v, port_vec; | ||
2706 | enum dev_state state; | ||
2707 | u32 params[7], val[7]; | ||
2708 | struct fw_caps_config_cmd c; | ||
2709 | |||
2710 | ret = t4_check_fw_version(adap); | ||
2711 | if (ret == -EINVAL || ret > 0) { | ||
2712 | if (upgrade_fw(adap) >= 0) /* recache FW version */ | ||
2713 | ret = t4_check_fw_version(adap); | ||
2714 | } | ||
2715 | if (ret < 0) | ||
2716 | return ret; | ||
2717 | |||
2718 | /* contact FW, request master */ | ||
2719 | ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state); | ||
2720 | if (ret < 0) { | ||
2721 | dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", | ||
2722 | ret); | ||
2723 | return ret; | ||
2724 | } | ||
2725 | |||
2726 | /* reset device */ | ||
2727 | ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST); | ||
2728 | if (ret < 0) | ||
2729 | goto bye; | ||
2730 | |||
2731 | /* get device capabilities */ | ||
2732 | memset(&c, 0, sizeof(c)); | ||
2733 | c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | | ||
2734 | FW_CMD_REQUEST | FW_CMD_READ); | ||
2735 | c.retval_len16 = htonl(FW_LEN16(c)); | ||
2736 | ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); | ||
2737 | if (ret < 0) | ||
2738 | goto bye; | ||
2739 | |||
2740 | /* select capabilities we'll be using */ | ||
2741 | if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { | ||
2742 | if (!vf_acls) | ||
2743 | c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); | ||
2744 | else | ||
2745 | c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM); | ||
2746 | } else if (vf_acls) { | ||
2747 | dev_err(adap->pdev_dev, "virtualization ACLs not supported"); | ||
2748 | goto bye; | ||
2749 | } | ||
2750 | c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | | ||
2751 | FW_CMD_REQUEST | FW_CMD_WRITE); | ||
2752 | ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL); | ||
2753 | if (ret < 0) | ||
2754 | goto bye; | ||
2755 | |||
2756 | ret = t4_config_glbl_rss(adap, 0, | ||
2757 | FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, | ||
2758 | FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | | ||
2759 | FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); | ||
2760 | if (ret < 0) | ||
2761 | goto bye; | ||
2762 | |||
2763 | ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16, | ||
2764 | FW_CMD_CAP_PF, FW_CMD_CAP_PF); | ||
2765 | if (ret < 0) | ||
2766 | goto bye; | ||
2767 | |||
2768 | for (v = 0; v < SGE_NTIMERS - 1; v++) | ||
2769 | adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL); | ||
2770 | adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; | ||
2771 | adap->sge.counter_val[0] = 1; | ||
2772 | for (v = 1; v < SGE_NCOUNTERS; v++) | ||
2773 | adap->sge.counter_val[v] = min(intr_cnt[v - 1], | ||
2774 | THRESHOLD_3_MASK); | ||
2775 | t4_sge_init(adap); | ||
2776 | |||
2777 | /* get basic stuff going */ | ||
2778 | ret = t4_early_init(adap, 0); | ||
2779 | if (ret < 0) | ||
2780 | goto bye; | ||
2781 | |||
2782 | #define FW_PARAM_DEV(param) \ | ||
2783 | (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ | ||
2784 | FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) | ||
2785 | |||
2786 | #define FW_PARAM_PFVF(param) \ | ||
2787 | (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ | ||
2788 | FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) | ||
2789 | |||
2790 | params[0] = FW_PARAM_DEV(PORTVEC); | ||
2791 | params[1] = FW_PARAM_PFVF(L2T_START); | ||
2792 | params[2] = FW_PARAM_PFVF(L2T_END); | ||
2793 | params[3] = FW_PARAM_PFVF(FILTER_START); | ||
2794 | params[4] = FW_PARAM_PFVF(FILTER_END); | ||
2795 | ret = t4_query_params(adap, 0, 0, 0, 5, params, val); | ||
2796 | if (ret < 0) | ||
2797 | goto bye; | ||
2798 | port_vec = val[0]; | ||
2799 | adap->tids.ftid_base = val[3]; | ||
2800 | adap->tids.nftids = val[4] - val[3] + 1; | ||
2801 | |||
2802 | if (c.ofldcaps) { | ||
2803 | /* query offload-related parameters */ | ||
2804 | params[0] = FW_PARAM_DEV(NTID); | ||
2805 | params[1] = FW_PARAM_PFVF(SERVER_START); | ||
2806 | params[2] = FW_PARAM_PFVF(SERVER_END); | ||
2807 | params[3] = FW_PARAM_PFVF(TDDP_START); | ||
2808 | params[4] = FW_PARAM_PFVF(TDDP_END); | ||
2809 | params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); | ||
2810 | ret = t4_query_params(adap, 0, 0, 0, 6, params, val); | ||
2811 | if (ret < 0) | ||
2812 | goto bye; | ||
2813 | adap->tids.ntids = val[0]; | ||
2814 | adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); | ||
2815 | adap->tids.stid_base = val[1]; | ||
2816 | adap->tids.nstids = val[2] - val[1] + 1; | ||
2817 | adap->vres.ddp.start = val[3]; | ||
2818 | adap->vres.ddp.size = val[4] - val[3] + 1; | ||
2819 | adap->params.ofldq_wr_cred = val[5]; | ||
2820 | adap->params.offload = 1; | ||
2821 | } | ||
2822 | if (c.rdmacaps) { | ||
2823 | params[0] = FW_PARAM_PFVF(STAG_START); | ||
2824 | params[1] = FW_PARAM_PFVF(STAG_END); | ||
2825 | params[2] = FW_PARAM_PFVF(RQ_START); | ||
2826 | params[3] = FW_PARAM_PFVF(RQ_END); | ||
2827 | params[4] = FW_PARAM_PFVF(PBL_START); | ||
2828 | params[5] = FW_PARAM_PFVF(PBL_END); | ||
2829 | ret = t4_query_params(adap, 0, 0, 0, 6, params, val); | ||
2830 | if (ret < 0) | ||
2831 | goto bye; | ||
2832 | adap->vres.stag.start = val[0]; | ||
2833 | adap->vres.stag.size = val[1] - val[0] + 1; | ||
2834 | adap->vres.rq.start = val[2]; | ||
2835 | adap->vres.rq.size = val[3] - val[2] + 1; | ||
2836 | adap->vres.pbl.start = val[4]; | ||
2837 | adap->vres.pbl.size = val[5] - val[4] + 1; | ||
2838 | } | ||
2839 | if (c.iscsicaps) { | ||
2840 | params[0] = FW_PARAM_PFVF(ISCSI_START); | ||
2841 | params[1] = FW_PARAM_PFVF(ISCSI_END); | ||
2842 | ret = t4_query_params(adap, 0, 0, 0, 2, params, val); | ||
2843 | if (ret < 0) | ||
2844 | goto bye; | ||
2845 | adap->vres.iscsi.start = val[0]; | ||
2846 | adap->vres.iscsi.size = val[1] - val[0] + 1; | ||
2847 | } | ||
2848 | #undef FW_PARAM_PFVF | ||
2849 | #undef FW_PARAM_DEV | ||
2850 | |||
2851 | adap->params.nports = hweight32(port_vec); | ||
2852 | adap->params.portvec = port_vec; | ||
2853 | adap->flags |= FW_OK; | ||
2854 | |||
2855 | /* These are finalized by FW initialization, load their values now */ | ||
2856 | v = t4_read_reg(adap, TP_TIMER_RESOLUTION); | ||
2857 | adap->params.tp.tre = TIMERRESOLUTION_GET(v); | ||
2858 | t4_read_mtu_tbl(adap, adap->params.mtus, NULL); | ||
2859 | t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, | ||
2860 | adap->params.b_wnd); | ||
2861 | |||
2862 | /* tweak some settings */ | ||
2863 | t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); | ||
2864 | t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); | ||
2865 | t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); | ||
2866 | v = t4_read_reg(adap, TP_PIO_DATA); | ||
2867 | t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); | ||
2868 | setup_memwin(adap); | ||
2869 | return 0; | ||
2870 | |||
2871 | /* | ||
2872 | * If a command timed out or failed with EIO FW does not operate within | ||
2873 | * its spec or something catastrophic happened to HW/FW, stop issuing | ||
2874 | * commands. | ||
2875 | */ | ||
2876 | bye: if (ret != -ETIMEDOUT && ret != -EIO) | ||
2877 | t4_fw_bye(adap, 0); | ||
2878 | return ret; | ||
2879 | } | ||
2880 | |||
2881 | static inline bool is_10g_port(const struct link_config *lc) | ||
2882 | { | ||
2883 | return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; | ||
2884 | } | ||
2885 | |||
2886 | static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, | ||
2887 | unsigned int size, unsigned int iqe_size) | ||
2888 | { | ||
2889 | q->intr_params = QINTR_TIMER_IDX(timer_idx) | | ||
2890 | (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); | ||
2891 | q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0; | ||
2892 | q->iqe_len = iqe_size; | ||
2893 | q->size = size; | ||
2894 | } | ||
2895 | |||
2896 | /* | ||
2897 | * Perform default configuration of DMA queues depending on the number and type | ||
2898 | * of ports we found and the number of available CPUs. Most settings can be | ||
2899 | * modified by the admin prior to actual use. | ||
2900 | */ | ||
2901 | static void __devinit cfg_queues(struct adapter *adap) | ||
2902 | { | ||
2903 | struct sge *s = &adap->sge; | ||
2904 | int i, q10g = 0, n10g = 0, qidx = 0; | ||
2905 | |||
2906 | for_each_port(adap, i) | ||
2907 | n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg); | ||
2908 | |||
2909 | /* | ||
2910 | * We default to 1 queue per non-10G port and up to # of cores queues | ||
2911 | * per 10G port. | ||
2912 | */ | ||
2913 | if (n10g) | ||
2914 | q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; | ||
2915 | if (q10g > num_online_cpus()) | ||
2916 | q10g = num_online_cpus(); | ||
2917 | |||
2918 | for_each_port(adap, i) { | ||
2919 | struct port_info *pi = adap2pinfo(adap, i); | ||
2920 | |||
2921 | pi->first_qset = qidx; | ||
2922 | pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; | ||
2923 | qidx += pi->nqsets; | ||
2924 | } | ||
2925 | |||
2926 | s->ethqsets = qidx; | ||
2927 | s->max_ethqsets = qidx; /* MSI-X may lower it later */ | ||
2928 | |||
2929 | if (is_offload(adap)) { | ||
2930 | /* | ||
2931 | * For offload we use 1 queue/channel if all ports are up to 1G, | ||
2932 | * otherwise we divide all available queues amongst the channels | ||
2933 | * capped by the number of available cores. | ||
2934 | */ | ||
2935 | if (n10g) { | ||
2936 | i = min_t(int, ARRAY_SIZE(s->ofldrxq), | ||
2937 | num_online_cpus()); | ||
2938 | s->ofldqsets = roundup(i, adap->params.nports); | ||
2939 | } else | ||
2940 | s->ofldqsets = adap->params.nports; | ||
2941 | /* For RDMA one Rx queue per channel suffices */ | ||
2942 | s->rdmaqs = adap->params.nports; | ||
2943 | } | ||
2944 | |||
2945 | for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { | ||
2946 | struct sge_eth_rxq *r = &s->ethrxq[i]; | ||
2947 | |||
2948 | init_rspq(&r->rspq, 0, 0, 1024, 64); | ||
2949 | r->fl.size = 72; | ||
2950 | } | ||
2951 | |||
2952 | for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) | ||
2953 | s->ethtxq[i].q.size = 1024; | ||
2954 | |||
2955 | for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) | ||
2956 | s->ctrlq[i].q.size = 512; | ||
2957 | |||
2958 | for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) | ||
2959 | s->ofldtxq[i].q.size = 1024; | ||
2960 | |||
2961 | for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { | ||
2962 | struct sge_ofld_rxq *r = &s->ofldrxq[i]; | ||
2963 | |||
2964 | init_rspq(&r->rspq, 0, 0, 1024, 64); | ||
2965 | r->rspq.uld = CXGB4_ULD_ISCSI; | ||
2966 | r->fl.size = 72; | ||
2967 | } | ||
2968 | |||
2969 | for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { | ||
2970 | struct sge_ofld_rxq *r = &s->rdmarxq[i]; | ||
2971 | |||
2972 | init_rspq(&r->rspq, 0, 0, 511, 64); | ||
2973 | r->rspq.uld = CXGB4_ULD_RDMA; | ||
2974 | r->fl.size = 72; | ||
2975 | } | ||
2976 | |||
2977 | init_rspq(&s->fw_evtq, 6, 0, 512, 64); | ||
2978 | init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); | ||
2979 | } | ||
2980 | |||
2981 | /* | ||
2982 | * Reduce the number of Ethernet queues across all ports to at most n. | ||
2983 | * n provides at least one queue per port. | ||
2984 | */ | ||
2985 | static void __devinit reduce_ethqs(struct adapter *adap, int n) | ||
2986 | { | ||
2987 | int i; | ||
2988 | struct port_info *pi; | ||
2989 | |||
2990 | while (n < adap->sge.ethqsets) | ||
2991 | for_each_port(adap, i) { | ||
2992 | pi = adap2pinfo(adap, i); | ||
2993 | if (pi->nqsets > 1) { | ||
2994 | pi->nqsets--; | ||
2995 | adap->sge.ethqsets--; | ||
2996 | if (adap->sge.ethqsets <= n) | ||
2997 | break; | ||
2998 | } | ||
2999 | } | ||
3000 | |||
3001 | n = 0; | ||
3002 | for_each_port(adap, i) { | ||
3003 | pi = adap2pinfo(adap, i); | ||
3004 | pi->first_qset = n; | ||
3005 | n += pi->nqsets; | ||
3006 | } | ||
3007 | } | ||
3008 | |||
3009 | /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ | ||
3010 | #define EXTRA_VECS 2 | ||
3011 | |||
3012 | static int __devinit enable_msix(struct adapter *adap) | ||
3013 | { | ||
3014 | int ofld_need = 0; | ||
3015 | int i, err, want, need; | ||
3016 | struct sge *s = &adap->sge; | ||
3017 | unsigned int nchan = adap->params.nports; | ||
3018 | struct msix_entry entries[MAX_INGQ + 1]; | ||
3019 | |||
3020 | for (i = 0; i < ARRAY_SIZE(entries); ++i) | ||
3021 | entries[i].entry = i; | ||
3022 | |||
3023 | want = s->max_ethqsets + EXTRA_VECS; | ||
3024 | if (is_offload(adap)) { | ||
3025 | want += s->rdmaqs + s->ofldqsets; | ||
3026 | /* need nchan for each possible ULD */ | ||
3027 | ofld_need = 2 * nchan; | ||
3028 | } | ||
3029 | need = adap->params.nports + EXTRA_VECS + ofld_need; | ||
3030 | |||
3031 | while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need) | ||
3032 | want = err; | ||
3033 | |||
3034 | if (!err) { | ||
3035 | /* | ||
3036 | * Distribute available vectors to the various queue groups. | ||
3037 | * Every group gets its minimum requirement and NIC gets top | ||
3038 | * priority for leftovers. | ||
3039 | */ | ||
3040 | i = want - EXTRA_VECS - ofld_need; | ||
3041 | if (i < s->max_ethqsets) { | ||
3042 | s->max_ethqsets = i; | ||
3043 | if (i < s->ethqsets) | ||
3044 | reduce_ethqs(adap, i); | ||
3045 | } | ||
3046 | if (is_offload(adap)) { | ||
3047 | i = want - EXTRA_VECS - s->max_ethqsets; | ||
3048 | i -= ofld_need - nchan; | ||
3049 | s->ofldqsets = (i / nchan) * nchan; /* round down */ | ||
3050 | } | ||
3051 | for (i = 0; i < want; ++i) | ||
3052 | adap->msix_info[i].vec = entries[i].vector; | ||
3053 | } else if (err > 0) | ||
3054 | dev_info(adap->pdev_dev, | ||
3055 | "only %d MSI-X vectors left, not using MSI-X\n", err); | ||
3056 | return err; | ||
3057 | } | ||
3058 | |||
3059 | #undef EXTRA_VECS | ||
3060 | |||
3061 | static void __devinit print_port_info(struct adapter *adap) | ||
3062 | { | ||
3063 | static const char *base[] = { | ||
3064 | "R", "KX4", "T", "KX", "T", "KR", "CX4" | ||
3065 | }; | ||
3066 | |||
3067 | int i; | ||
3068 | char buf[80]; | ||
3069 | |||
3070 | for_each_port(adap, i) { | ||
3071 | struct net_device *dev = adap->port[i]; | ||
3072 | const struct port_info *pi = netdev_priv(dev); | ||
3073 | char *bufp = buf; | ||
3074 | |||
3075 | if (!test_bit(i, &adap->registered_device_map)) | ||
3076 | continue; | ||
3077 | |||
3078 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) | ||
3079 | bufp += sprintf(bufp, "100/"); | ||
3080 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) | ||
3081 | bufp += sprintf(bufp, "1000/"); | ||
3082 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) | ||
3083 | bufp += sprintf(bufp, "10G/"); | ||
3084 | if (bufp != buf) | ||
3085 | --bufp; | ||
3086 | sprintf(bufp, "BASE-%s", base[pi->port_type]); | ||
3087 | |||
3088 | netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n", | ||
3089 | adap->params.vpd.id, adap->params.rev, | ||
3090 | buf, is_offload(adap) ? "R" : "", | ||
3091 | adap->params.pci.width, | ||
3092 | (adap->flags & USING_MSIX) ? " MSI-X" : | ||
3093 | (adap->flags & USING_MSI) ? " MSI" : ""); | ||
3094 | if (adap->name == dev->name) | ||
3095 | netdev_info(dev, "S/N: %s, E/C: %s\n", | ||
3096 | adap->params.vpd.sn, adap->params.vpd.ec); | ||
3097 | } | ||
3098 | } | ||
3099 | |||
3100 | #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\ | ||
3101 | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) | ||
3102 | |||
3103 | static int __devinit init_one(struct pci_dev *pdev, | ||
3104 | const struct pci_device_id *ent) | ||
3105 | { | ||
3106 | int func, i, err; | ||
3107 | struct port_info *pi; | ||
3108 | unsigned int highdma = 0; | ||
3109 | struct adapter *adapter = NULL; | ||
3110 | |||
3111 | printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); | ||
3112 | |||
3113 | err = pci_request_regions(pdev, KBUILD_MODNAME); | ||
3114 | if (err) { | ||
3115 | /* Just info, some other driver may have claimed the device. */ | ||
3116 | dev_info(&pdev->dev, "cannot obtain PCI resources\n"); | ||
3117 | return err; | ||
3118 | } | ||
3119 | |||
3120 | /* We control everything through PF 0 */ | ||
3121 | func = PCI_FUNC(pdev->devfn); | ||
3122 | if (func > 0) | ||
3123 | goto sriov; | ||
3124 | |||
3125 | err = pci_enable_device(pdev); | ||
3126 | if (err) { | ||
3127 | dev_err(&pdev->dev, "cannot enable PCI device\n"); | ||
3128 | goto out_release_regions; | ||
3129 | } | ||
3130 | |||
3131 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
3132 | highdma = NETIF_F_HIGHDMA; | ||
3133 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
3134 | if (err) { | ||
3135 | dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " | ||
3136 | "coherent allocations\n"); | ||
3137 | goto out_disable_device; | ||
3138 | } | ||
3139 | } else { | ||
3140 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
3141 | if (err) { | ||
3142 | dev_err(&pdev->dev, "no usable DMA configuration\n"); | ||
3143 | goto out_disable_device; | ||
3144 | } | ||
3145 | } | ||
3146 | |||
3147 | pci_enable_pcie_error_reporting(pdev); | ||
3148 | pci_set_master(pdev); | ||
3149 | pci_save_state(pdev); | ||
3150 | |||
3151 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | ||
3152 | if (!adapter) { | ||
3153 | err = -ENOMEM; | ||
3154 | goto out_disable_device; | ||
3155 | } | ||
3156 | |||
3157 | adapter->regs = pci_ioremap_bar(pdev, 0); | ||
3158 | if (!adapter->regs) { | ||
3159 | dev_err(&pdev->dev, "cannot map device registers\n"); | ||
3160 | err = -ENOMEM; | ||
3161 | goto out_free_adapter; | ||
3162 | } | ||
3163 | |||
3164 | adapter->pdev = pdev; | ||
3165 | adapter->pdev_dev = &pdev->dev; | ||
3166 | adapter->name = pci_name(pdev); | ||
3167 | adapter->msg_enable = dflt_msg_enable; | ||
3168 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); | ||
3169 | |||
3170 | spin_lock_init(&adapter->stats_lock); | ||
3171 | spin_lock_init(&adapter->tid_release_lock); | ||
3172 | |||
3173 | INIT_WORK(&adapter->tid_release_task, process_tid_release_list); | ||
3174 | |||
3175 | err = t4_prep_adapter(adapter); | ||
3176 | if (err) | ||
3177 | goto out_unmap_bar; | ||
3178 | err = adap_init0(adapter); | ||
3179 | if (err) | ||
3180 | goto out_unmap_bar; | ||
3181 | |||
3182 | for_each_port(adapter, i) { | ||
3183 | struct net_device *netdev; | ||
3184 | |||
3185 | netdev = alloc_etherdev_mq(sizeof(struct port_info), | ||
3186 | MAX_ETH_QSETS); | ||
3187 | if (!netdev) { | ||
3188 | err = -ENOMEM; | ||
3189 | goto out_free_dev; | ||
3190 | } | ||
3191 | |||
3192 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
3193 | |||
3194 | adapter->port[i] = netdev; | ||
3195 | pi = netdev_priv(netdev); | ||
3196 | pi->adapter = adapter; | ||
3197 | pi->xact_addr_filt = -1; | ||
3198 | pi->rx_offload = RX_CSO; | ||
3199 | pi->port_id = i; | ||
3200 | netif_carrier_off(netdev); | ||
3201 | netif_tx_stop_all_queues(netdev); | ||
3202 | netdev->irq = pdev->irq; | ||
3203 | |||
3204 | netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; | ||
3205 | netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | ||
3206 | netdev->features |= NETIF_F_GRO | highdma; | ||
3207 | netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
3208 | netdev->vlan_features = netdev->features & VLAN_FEAT; | ||
3209 | |||
3210 | netdev->netdev_ops = &cxgb4_netdev_ops; | ||
3211 | SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); | ||
3212 | } | ||
3213 | |||
3214 | pci_set_drvdata(pdev, adapter); | ||
3215 | |||
3216 | if (adapter->flags & FW_OK) { | ||
3217 | err = t4_port_init(adapter, 0, 0, 0); | ||
3218 | if (err) | ||
3219 | goto out_free_dev; | ||
3220 | } | ||
3221 | |||
3222 | /* | ||
3223 | * Configure queues and allocate tables now, they can be needed as | ||
3224 | * soon as the first register_netdev completes. | ||
3225 | */ | ||
3226 | cfg_queues(adapter); | ||
3227 | |||
3228 | adapter->l2t = t4_init_l2t(); | ||
3229 | if (!adapter->l2t) { | ||
3230 | /* We tolerate a lack of L2T, giving up some functionality */ | ||
3231 | dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); | ||
3232 | adapter->params.offload = 0; | ||
3233 | } | ||
3234 | |||
3235 | if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { | ||
3236 | dev_warn(&pdev->dev, "could not allocate TID table, " | ||
3237 | "continuing\n"); | ||
3238 | adapter->params.offload = 0; | ||
3239 | } | ||
3240 | |||
3241 | /* | ||
3242 | * The card is now ready to go. If any errors occur during device | ||
3243 | * registration we do not fail the whole card but rather proceed only | ||
3244 | * with the ports we manage to register successfully. However we must | ||
3245 | * register at least one net device. | ||
3246 | */ | ||
3247 | for_each_port(adapter, i) { | ||
3248 | err = register_netdev(adapter->port[i]); | ||
3249 | if (err) | ||
3250 | dev_warn(&pdev->dev, | ||
3251 | "cannot register net device %s, skipping\n", | ||
3252 | adapter->port[i]->name); | ||
3253 | else { | ||
3254 | /* | ||
3255 | * Change the name we use for messages to the name of | ||
3256 | * the first successfully registered interface. | ||
3257 | */ | ||
3258 | if (!adapter->registered_device_map) | ||
3259 | adapter->name = adapter->port[i]->name; | ||
3260 | |||
3261 | __set_bit(i, &adapter->registered_device_map); | ||
3262 | adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; | ||
3263 | } | ||
3264 | } | ||
3265 | if (!adapter->registered_device_map) { | ||
3266 | dev_err(&pdev->dev, "could not register any net devices\n"); | ||
3267 | goto out_free_dev; | ||
3268 | } | ||
3269 | |||
3270 | if (cxgb4_debugfs_root) { | ||
3271 | adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), | ||
3272 | cxgb4_debugfs_root); | ||
3273 | setup_debugfs(adapter); | ||
3274 | } | ||
3275 | |||
3276 | /* See what interrupts we'll be using */ | ||
3277 | if (msi > 1 && enable_msix(adapter) == 0) | ||
3278 | adapter->flags |= USING_MSIX; | ||
3279 | else if (msi > 0 && pci_enable_msi(pdev) == 0) | ||
3280 | adapter->flags |= USING_MSI; | ||
3281 | |||
3282 | if (is_offload(adapter)) | ||
3283 | attach_ulds(adapter); | ||
3284 | |||
3285 | print_port_info(adapter); | ||
3286 | |||
3287 | sriov: | ||
3288 | #ifdef CONFIG_PCI_IOV | ||
3289 | if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) | ||
3290 | if (pci_enable_sriov(pdev, num_vf[func]) == 0) | ||
3291 | dev_info(&pdev->dev, | ||
3292 | "instantiated %u virtual functions\n", | ||
3293 | num_vf[func]); | ||
3294 | #endif | ||
3295 | return 0; | ||
3296 | |||
3297 | out_free_dev: | ||
3298 | t4_free_mem(adapter->tids.tid_tab); | ||
3299 | t4_free_mem(adapter->l2t); | ||
3300 | for_each_port(adapter, i) | ||
3301 | if (adapter->port[i]) | ||
3302 | free_netdev(adapter->port[i]); | ||
3303 | if (adapter->flags & FW_OK) | ||
3304 | t4_fw_bye(adapter, 0); | ||
3305 | out_unmap_bar: | ||
3306 | iounmap(adapter->regs); | ||
3307 | out_free_adapter: | ||
3308 | kfree(adapter); | ||
3309 | out_disable_device: | ||
3310 | pci_disable_pcie_error_reporting(pdev); | ||
3311 | pci_disable_device(pdev); | ||
3312 | out_release_regions: | ||
3313 | pci_release_regions(pdev); | ||
3314 | pci_set_drvdata(pdev, NULL); | ||
3315 | return err; | ||
3316 | } | ||
3317 | |||
3318 | static void __devexit remove_one(struct pci_dev *pdev) | ||
3319 | { | ||
3320 | struct adapter *adapter = pci_get_drvdata(pdev); | ||
3321 | |||
3322 | pci_disable_sriov(pdev); | ||
3323 | |||
3324 | if (adapter) { | ||
3325 | int i; | ||
3326 | |||
3327 | if (is_offload(adapter)) | ||
3328 | detach_ulds(adapter); | ||
3329 | |||
3330 | for_each_port(adapter, i) | ||
3331 | if (test_bit(i, &adapter->registered_device_map)) | ||
3332 | unregister_netdev(adapter->port[i]); | ||
3333 | |||
3334 | if (adapter->debugfs_root) | ||
3335 | debugfs_remove_recursive(adapter->debugfs_root); | ||
3336 | |||
3337 | t4_sge_stop(adapter); | ||
3338 | t4_free_sge_resources(adapter); | ||
3339 | t4_free_mem(adapter->l2t); | ||
3340 | t4_free_mem(adapter->tids.tid_tab); | ||
3341 | disable_msi(adapter); | ||
3342 | |||
3343 | for_each_port(adapter, i) | ||
3344 | if (adapter->port[i]) | ||
3345 | free_netdev(adapter->port[i]); | ||
3346 | |||
3347 | if (adapter->flags & FW_OK) | ||
3348 | t4_fw_bye(adapter, 0); | ||
3349 | iounmap(adapter->regs); | ||
3350 | kfree(adapter); | ||
3351 | pci_disable_pcie_error_reporting(pdev); | ||
3352 | pci_disable_device(pdev); | ||
3353 | pci_release_regions(pdev); | ||
3354 | pci_set_drvdata(pdev, NULL); | ||
3355 | } else if (PCI_FUNC(pdev->devfn) > 0) | ||
3356 | pci_release_regions(pdev); | ||
3357 | } | ||
3358 | |||
3359 | static struct pci_driver cxgb4_driver = { | ||
3360 | .name = KBUILD_MODNAME, | ||
3361 | .id_table = cxgb4_pci_tbl, | ||
3362 | .probe = init_one, | ||
3363 | .remove = __devexit_p(remove_one), | ||
3364 | }; | ||
3365 | |||
3366 | static int __init cxgb4_init_module(void) | ||
3367 | { | ||
3368 | int ret; | ||
3369 | |||
3370 | /* Debugfs support is optional, just warn if this fails */ | ||
3371 | cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | ||
3372 | if (!cxgb4_debugfs_root) | ||
3373 | pr_warning("could not create debugfs entry, continuing\n"); | ||
3374 | |||
3375 | ret = pci_register_driver(&cxgb4_driver); | ||
3376 | if (ret < 0) | ||
3377 | debugfs_remove(cxgb4_debugfs_root); | ||
3378 | return ret; | ||
3379 | } | ||
3380 | |||
3381 | static void __exit cxgb4_cleanup_module(void) | ||
3382 | { | ||
3383 | pci_unregister_driver(&cxgb4_driver); | ||
3384 | debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ | ||
3385 | } | ||
3386 | |||
3387 | module_init(cxgb4_init_module); | ||
3388 | module_exit(cxgb4_cleanup_module); | ||
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h new file mode 100644 index 000000000000..5b98546ac92d --- /dev/null +++ b/drivers/net/cxgb4/cxgb4_uld.h | |||
@@ -0,0 +1,239 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __CXGB4_OFLD_H | ||
36 | #define __CXGB4_OFLD_H | ||
37 | |||
38 | #include <linux/cache.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/skbuff.h> | ||
41 | #include <asm/atomic.h> | ||
42 | |||
43 | /* CPL message priority levels */ | ||
44 | enum { | ||
45 | CPL_PRIORITY_DATA = 0, /* data messages */ | ||
46 | CPL_PRIORITY_SETUP = 1, /* connection setup messages */ | ||
47 | CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */ | ||
48 | CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */ | ||
49 | CPL_PRIORITY_ACK = 1, /* RX ACK messages */ | ||
50 | CPL_PRIORITY_CONTROL = 1 /* control messages */ | ||
51 | }; | ||
52 | |||
53 | #define INIT_TP_WR(w, tid) do { \ | ||
54 | (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \ | ||
55 | FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \ | ||
56 | (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \ | ||
57 | FW_WR_FLOWID(tid)); \ | ||
58 | (w)->wr.wr_lo = cpu_to_be64(0); \ | ||
59 | } while (0) | ||
60 | |||
61 | #define INIT_TP_WR_CPL(w, cpl, tid) do { \ | ||
62 | INIT_TP_WR(w, tid); \ | ||
63 | OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \ | ||
64 | } while (0) | ||
65 | |||
66 | #define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \ | ||
67 | (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \ | ||
68 | (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \ | ||
69 | FW_WR_FLOWID(tid)); \ | ||
70 | (w)->wr.wr_lo = cpu_to_be64(0); \ | ||
71 | } while (0) | ||
72 | |||
73 | /* Special asynchronous notification message */ | ||
74 | #define CXGB4_MSG_AN ((void *)1) | ||
75 | |||
76 | struct serv_entry { | ||
77 | void *data; | ||
78 | }; | ||
79 | |||
80 | union aopen_entry { | ||
81 | void *data; | ||
82 | union aopen_entry *next; | ||
83 | }; | ||
84 | |||
85 | /* | ||
86 | * Holds the size, base address, free list start, etc of the TID, server TID, | ||
87 | * and active-open TID tables. The tables themselves are allocated dynamically. | ||
88 | */ | ||
89 | struct tid_info { | ||
90 | void **tid_tab; | ||
91 | unsigned int ntids; | ||
92 | |||
93 | struct serv_entry *stid_tab; | ||
94 | unsigned long *stid_bmap; | ||
95 | unsigned int nstids; | ||
96 | unsigned int stid_base; | ||
97 | |||
98 | union aopen_entry *atid_tab; | ||
99 | unsigned int natids; | ||
100 | |||
101 | unsigned int nftids; | ||
102 | unsigned int ftid_base; | ||
103 | |||
104 | spinlock_t atid_lock ____cacheline_aligned_in_smp; | ||
105 | union aopen_entry *afree; | ||
106 | unsigned int atids_in_use; | ||
107 | |||
108 | spinlock_t stid_lock; | ||
109 | unsigned int stids_in_use; | ||
110 | |||
111 | atomic_t tids_in_use; | ||
112 | }; | ||
113 | |||
114 | static inline void *lookup_tid(const struct tid_info *t, unsigned int tid) | ||
115 | { | ||
116 | return tid < t->ntids ? t->tid_tab[tid] : NULL; | ||
117 | } | ||
118 | |||
119 | static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) | ||
120 | { | ||
121 | return atid < t->natids ? t->atid_tab[atid].data : NULL; | ||
122 | } | ||
123 | |||
124 | static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) | ||
125 | { | ||
126 | stid -= t->stid_base; | ||
127 | return stid < t->nstids ? t->stid_tab[stid].data : NULL; | ||
128 | } | ||
129 | |||
130 | static inline void cxgb4_insert_tid(struct tid_info *t, void *data, | ||
131 | unsigned int tid) | ||
132 | { | ||
133 | t->tid_tab[tid] = data; | ||
134 | atomic_inc(&t->tids_in_use); | ||
135 | } | ||
136 | |||
137 | int cxgb4_alloc_atid(struct tid_info *t, void *data); | ||
138 | int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); | ||
139 | void cxgb4_free_atid(struct tid_info *t, unsigned int atid); | ||
140 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); | ||
141 | void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); | ||
142 | void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, | ||
143 | unsigned int tid); | ||
144 | |||
145 | struct in6_addr; | ||
146 | |||
147 | int cxgb4_create_server(const struct net_device *dev, unsigned int stid, | ||
148 | __be32 sip, __be16 sport, unsigned int queue); | ||
149 | int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, | ||
150 | const struct in6_addr *sip, __be16 sport, | ||
151 | unsigned int queue); | ||
152 | |||
153 | static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) | ||
154 | { | ||
155 | skb_set_queue_mapping(skb, (queue << 1) | prio); | ||
156 | } | ||
157 | |||
158 | enum cxgb4_uld { | ||
159 | CXGB4_ULD_RDMA, | ||
160 | CXGB4_ULD_ISCSI, | ||
161 | CXGB4_ULD_MAX | ||
162 | }; | ||
163 | |||
164 | enum cxgb4_state { | ||
165 | CXGB4_STATE_UP, | ||
166 | CXGB4_STATE_START_RECOVERY, | ||
167 | CXGB4_STATE_DOWN, | ||
168 | CXGB4_STATE_DETACH | ||
169 | }; | ||
170 | |||
171 | struct pci_dev; | ||
172 | struct l2t_data; | ||
173 | struct net_device; | ||
174 | struct pkt_gl; | ||
175 | struct tp_tcp_stats; | ||
176 | |||
177 | struct cxgb4_range { | ||
178 | unsigned int start; | ||
179 | unsigned int size; | ||
180 | }; | ||
181 | |||
182 | struct cxgb4_virt_res { /* virtualized HW resources */ | ||
183 | struct cxgb4_range ddp; | ||
184 | struct cxgb4_range iscsi; | ||
185 | struct cxgb4_range stag; | ||
186 | struct cxgb4_range rq; | ||
187 | struct cxgb4_range pbl; | ||
188 | }; | ||
189 | |||
190 | /* | ||
191 | * Block of information the LLD provides to ULDs attaching to a device. | ||
192 | */ | ||
193 | struct cxgb4_lld_info { | ||
194 | struct pci_dev *pdev; /* associated PCI device */ | ||
195 | struct l2t_data *l2t; /* L2 table */ | ||
196 | struct tid_info *tids; /* TID table */ | ||
197 | struct net_device **ports; /* device ports */ | ||
198 | const struct cxgb4_virt_res *vr; /* assorted HW resources */ | ||
199 | const unsigned short *mtus; /* MTU table */ | ||
200 | const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ | ||
201 | unsigned short nrxq; /* # of Rx queues */ | ||
202 | unsigned short ntxq; /* # of Tx queues */ | ||
203 | unsigned char nchan:4; /* # of channels */ | ||
204 | unsigned char nports:4; /* # of ports */ | ||
205 | unsigned char wr_cred; /* WR 16-byte credits */ | ||
206 | unsigned char adapter_type; /* type of adapter */ | ||
207 | unsigned char fw_api_ver; /* FW API version */ | ||
208 | unsigned int fw_vers; /* FW version */ | ||
209 | unsigned int iscsi_iolen; /* iSCSI max I/O length */ | ||
210 | unsigned short udb_density; /* # of user DB/page */ | ||
211 | unsigned short ucq_density; /* # of user CQs/page */ | ||
212 | void __iomem *gts_reg; /* address of GTS register */ | ||
213 | void __iomem *db_reg; /* address of kernel doorbell */ | ||
214 | }; | ||
215 | |||
216 | struct cxgb4_uld_info { | ||
217 | const char *name; | ||
218 | void *(*add)(const struct cxgb4_lld_info *p); | ||
219 | int (*rx_handler)(void *handle, const __be64 *rsp, | ||
220 | const struct pkt_gl *gl); | ||
221 | int (*state_change)(void *handle, enum cxgb4_state new_state); | ||
222 | }; | ||
223 | |||
224 | int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); | ||
225 | int cxgb4_unregister_uld(enum cxgb4_uld type); | ||
226 | int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); | ||
227 | unsigned int cxgb4_port_chan(const struct net_device *dev); | ||
228 | unsigned int cxgb4_port_viid(const struct net_device *dev); | ||
229 | unsigned int cxgb4_port_idx(const struct net_device *dev); | ||
230 | struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id); | ||
231 | unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, | ||
232 | unsigned int *idx); | ||
233 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, | ||
234 | struct tp_tcp_stats *v6); | ||
235 | void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, | ||
236 | const unsigned int *pgsz_order); | ||
237 | struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, | ||
238 | unsigned int skb_len, unsigned int pull_len); | ||
239 | #endif /* !__CXGB4_OFLD_H */ | ||
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c new file mode 100644 index 000000000000..9f96724a133a --- /dev/null +++ b/drivers/net/cxgb4/l2t.c | |||
@@ -0,0 +1,624 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/skbuff.h> | ||
36 | #include <linux/netdevice.h> | ||
37 | #include <linux/if.h> | ||
38 | #include <linux/if_vlan.h> | ||
39 | #include <linux/jhash.h> | ||
40 | #include <net/neighbour.h> | ||
41 | #include "cxgb4.h" | ||
42 | #include "l2t.h" | ||
43 | #include "t4_msg.h" | ||
44 | #include "t4fw_api.h" | ||
45 | |||
46 | #define VLAN_NONE 0xfff | ||
47 | |||
48 | /* identifies sync vs async L2T_WRITE_REQs */ | ||
49 | #define F_SYNC_WR (1 << 12) | ||
50 | |||
51 | enum { | ||
52 | L2T_STATE_VALID, /* entry is up to date */ | ||
53 | L2T_STATE_STALE, /* entry may be used but needs revalidation */ | ||
54 | L2T_STATE_RESOLVING, /* entry needs address resolution */ | ||
55 | L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ | ||
56 | |||
57 | /* when state is one of the below the entry is not hashed */ | ||
58 | L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ | ||
59 | L2T_STATE_UNUSED /* entry not in use */ | ||
60 | }; | ||
61 | |||
62 | struct l2t_data { | ||
63 | rwlock_t lock; | ||
64 | atomic_t nfree; /* number of free entries */ | ||
65 | struct l2t_entry *rover; /* starting point for next allocation */ | ||
66 | struct l2t_entry l2tab[L2T_SIZE]; | ||
67 | }; | ||
68 | |||
69 | static inline unsigned int vlan_prio(const struct l2t_entry *e) | ||
70 | { | ||
71 | return e->vlan >> 13; | ||
72 | } | ||
73 | |||
74 | static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) | ||
75 | { | ||
76 | if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ | ||
77 | atomic_dec(&d->nfree); | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * To avoid having to check address families we do not allow v4 and v6 | ||
82 | * neighbors to be on the same hash chain. We keep v4 entries in the first | ||
83 | * half of available hash buckets and v6 in the second. | ||
84 | */ | ||
85 | enum { | ||
86 | L2T_SZ_HALF = L2T_SIZE / 2, | ||
87 | L2T_HASH_MASK = L2T_SZ_HALF - 1 | ||
88 | }; | ||
89 | |||
90 | static inline unsigned int arp_hash(const u32 *key, int ifindex) | ||
91 | { | ||
92 | return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK; | ||
93 | } | ||
94 | |||
95 | static inline unsigned int ipv6_hash(const u32 *key, int ifindex) | ||
96 | { | ||
97 | u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; | ||
98 | |||
99 | return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK); | ||
100 | } | ||
101 | |||
102 | static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex) | ||
103 | { | ||
104 | return addr_len == 4 ? arp_hash(addr, ifindex) : | ||
105 | ipv6_hash(addr, ifindex); | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Checks if an L2T entry is for the given IP/IPv6 address. It does not check | ||
110 | * whether the L2T entry and the address are of the same address family. | ||
111 | * Callers ensure an address is only checked against L2T entries of the same | ||
112 | * family, something made trivial by the separation of IP and IPv6 hash chains | ||
113 | * mentioned above. Returns 0 if there's a match, | ||
114 | */ | ||
115 | static int addreq(const struct l2t_entry *e, const u32 *addr) | ||
116 | { | ||
117 | if (e->v6) | ||
118 | return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | | ||
119 | (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); | ||
120 | return e->addr[0] ^ addr[0]; | ||
121 | } | ||
122 | |||
123 | static void neigh_replace(struct l2t_entry *e, struct neighbour *n) | ||
124 | { | ||
125 | neigh_hold(n); | ||
126 | if (e->neigh) | ||
127 | neigh_release(e->neigh); | ||
128 | e->neigh = n; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Write an L2T entry. Must be called with the entry locked. | ||
133 | * The write may be synchronous or asynchronous. | ||
134 | */ | ||
135 | static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) | ||
136 | { | ||
137 | struct sk_buff *skb; | ||
138 | struct cpl_l2t_write_req *req; | ||
139 | |||
140 | skb = alloc_skb(sizeof(*req), GFP_ATOMIC); | ||
141 | if (!skb) | ||
142 | return -ENOMEM; | ||
143 | |||
144 | req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); | ||
145 | INIT_TP_WR(req, 0); | ||
146 | |||
147 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, | ||
148 | e->idx | (sync ? F_SYNC_WR : 0) | | ||
149 | TID_QID(adap->sge.fw_evtq.abs_id))); | ||
150 | req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); | ||
151 | req->l2t_idx = htons(e->idx); | ||
152 | req->vlan = htons(e->vlan); | ||
153 | if (e->neigh) | ||
154 | memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); | ||
155 | memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); | ||
156 | |||
157 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | ||
158 | t4_ofld_send(adap, skb); | ||
159 | |||
160 | if (sync && e->state != L2T_STATE_SWITCHING) | ||
161 | e->state = L2T_STATE_SYNC_WRITE; | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * Send packets waiting in an L2T entry's ARP queue. Must be called with the | ||
167 | * entry locked. | ||
168 | */ | ||
169 | static void send_pending(struct adapter *adap, struct l2t_entry *e) | ||
170 | { | ||
171 | while (e->arpq_head) { | ||
172 | struct sk_buff *skb = e->arpq_head; | ||
173 | |||
174 | e->arpq_head = skb->next; | ||
175 | skb->next = NULL; | ||
176 | t4_ofld_send(adap, skb); | ||
177 | } | ||
178 | e->arpq_tail = NULL; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a | ||
183 | * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T | ||
184 | * index it refers to. | ||
185 | */ | ||
186 | void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) | ||
187 | { | ||
188 | unsigned int tid = GET_TID(rpl); | ||
189 | unsigned int idx = tid & (L2T_SIZE - 1); | ||
190 | |||
191 | if (unlikely(rpl->status != CPL_ERR_NONE)) { | ||
192 | dev_err(adap->pdev_dev, | ||
193 | "Unexpected L2T_WRITE_RPL status %u for entry %u\n", | ||
194 | rpl->status, idx); | ||
195 | return; | ||
196 | } | ||
197 | |||
198 | if (tid & F_SYNC_WR) { | ||
199 | struct l2t_entry *e = &adap->l2t->l2tab[idx]; | ||
200 | |||
201 | spin_lock(&e->lock); | ||
202 | if (e->state != L2T_STATE_SWITCHING) { | ||
203 | send_pending(adap, e); | ||
204 | e->state = (e->neigh->nud_state & NUD_STALE) ? | ||
205 | L2T_STATE_STALE : L2T_STATE_VALID; | ||
206 | } | ||
207 | spin_unlock(&e->lock); | ||
208 | } | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * Add a packet to an L2T entry's queue of packets awaiting resolution. | ||
213 | * Must be called with the entry's lock held. | ||
214 | */ | ||
215 | static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) | ||
216 | { | ||
217 | skb->next = NULL; | ||
218 | if (e->arpq_head) | ||
219 | e->arpq_tail->next = skb; | ||
220 | else | ||
221 | e->arpq_head = skb; | ||
222 | e->arpq_tail = skb; | ||
223 | } | ||
224 | |||
225 | int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, | ||
226 | struct l2t_entry *e) | ||
227 | { | ||
228 | struct adapter *adap = netdev2adap(dev); | ||
229 | |||
230 | again: | ||
231 | switch (e->state) { | ||
232 | case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ | ||
233 | neigh_event_send(e->neigh, NULL); | ||
234 | spin_lock_bh(&e->lock); | ||
235 | if (e->state == L2T_STATE_STALE) | ||
236 | e->state = L2T_STATE_VALID; | ||
237 | spin_unlock_bh(&e->lock); | ||
238 | case L2T_STATE_VALID: /* fast-path, send the packet on */ | ||
239 | return t4_ofld_send(adap, skb); | ||
240 | case L2T_STATE_RESOLVING: | ||
241 | case L2T_STATE_SYNC_WRITE: | ||
242 | spin_lock_bh(&e->lock); | ||
243 | if (e->state != L2T_STATE_SYNC_WRITE && | ||
244 | e->state != L2T_STATE_RESOLVING) { | ||
245 | spin_unlock_bh(&e->lock); | ||
246 | goto again; | ||
247 | } | ||
248 | arpq_enqueue(e, skb); | ||
249 | spin_unlock_bh(&e->lock); | ||
250 | |||
251 | if (e->state == L2T_STATE_RESOLVING && | ||
252 | !neigh_event_send(e->neigh, NULL)) { | ||
253 | spin_lock_bh(&e->lock); | ||
254 | if (e->state == L2T_STATE_RESOLVING && e->arpq_head) | ||
255 | write_l2e(adap, e, 1); | ||
256 | spin_unlock_bh(&e->lock); | ||
257 | } | ||
258 | } | ||
259 | return 0; | ||
260 | } | ||
261 | EXPORT_SYMBOL(cxgb4_l2t_send); | ||
262 | |||
263 | /* | ||
264 | * Allocate a free L2T entry. Must be called with l2t_data.lock held. | ||
265 | */ | ||
266 | static struct l2t_entry *alloc_l2e(struct l2t_data *d) | ||
267 | { | ||
268 | struct l2t_entry *end, *e, **p; | ||
269 | |||
270 | if (!atomic_read(&d->nfree)) | ||
271 | return NULL; | ||
272 | |||
273 | /* there's definitely a free entry */ | ||
274 | for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) | ||
275 | if (atomic_read(&e->refcnt) == 0) | ||
276 | goto found; | ||
277 | |||
278 | for (e = d->l2tab; atomic_read(&e->refcnt); ++e) | ||
279 | ; | ||
280 | found: | ||
281 | d->rover = e + 1; | ||
282 | atomic_dec(&d->nfree); | ||
283 | |||
284 | /* | ||
285 | * The entry we found may be an inactive entry that is | ||
286 | * presently in the hash table. We need to remove it. | ||
287 | */ | ||
288 | if (e->state < L2T_STATE_SWITCHING) | ||
289 | for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) | ||
290 | if (*p == e) { | ||
291 | *p = e->next; | ||
292 | e->next = NULL; | ||
293 | break; | ||
294 | } | ||
295 | |||
296 | e->state = L2T_STATE_UNUSED; | ||
297 | return e; | ||
298 | } | ||
299 | |||
300 | /* | ||
301 | * Called when an L2T entry has no more users. | ||
302 | */ | ||
303 | static void t4_l2e_free(struct l2t_entry *e) | ||
304 | { | ||
305 | struct l2t_data *d; | ||
306 | |||
307 | spin_lock_bh(&e->lock); | ||
308 | if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ | ||
309 | if (e->neigh) { | ||
310 | neigh_release(e->neigh); | ||
311 | e->neigh = NULL; | ||
312 | } | ||
313 | } | ||
314 | spin_unlock_bh(&e->lock); | ||
315 | |||
316 | d = container_of(e, struct l2t_data, l2tab[e->idx]); | ||
317 | atomic_inc(&d->nfree); | ||
318 | } | ||
319 | |||
320 | void cxgb4_l2t_release(struct l2t_entry *e) | ||
321 | { | ||
322 | if (atomic_dec_and_test(&e->refcnt)) | ||
323 | t4_l2e_free(e); | ||
324 | } | ||
325 | EXPORT_SYMBOL(cxgb4_l2t_release); | ||
326 | |||
327 | /* | ||
328 | * Update an L2T entry that was previously used for the same next hop as neigh. | ||
329 | * Must be called with softirqs disabled. | ||
330 | */ | ||
331 | static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) | ||
332 | { | ||
333 | unsigned int nud_state; | ||
334 | |||
335 | spin_lock(&e->lock); /* avoid race with t4_l2t_free */ | ||
336 | if (neigh != e->neigh) | ||
337 | neigh_replace(e, neigh); | ||
338 | nud_state = neigh->nud_state; | ||
339 | if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || | ||
340 | !(nud_state & NUD_VALID)) | ||
341 | e->state = L2T_STATE_RESOLVING; | ||
342 | else if (nud_state & NUD_CONNECTED) | ||
343 | e->state = L2T_STATE_VALID; | ||
344 | else | ||
345 | e->state = L2T_STATE_STALE; | ||
346 | spin_unlock(&e->lock); | ||
347 | } | ||
348 | |||
349 | struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, | ||
350 | const struct net_device *physdev, | ||
351 | unsigned int priority) | ||
352 | { | ||
353 | u8 lport; | ||
354 | u16 vlan; | ||
355 | struct l2t_entry *e; | ||
356 | int addr_len = neigh->tbl->key_len; | ||
357 | u32 *addr = (u32 *)neigh->primary_key; | ||
358 | int ifidx = neigh->dev->ifindex; | ||
359 | int hash = addr_hash(addr, addr_len, ifidx); | ||
360 | |||
361 | if (neigh->dev->flags & IFF_LOOPBACK) | ||
362 | lport = netdev2pinfo(physdev)->tx_chan + 4; | ||
363 | else | ||
364 | lport = netdev2pinfo(physdev)->lport; | ||
365 | |||
366 | if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) | ||
367 | vlan = vlan_dev_vlan_id(neigh->dev); | ||
368 | else | ||
369 | vlan = VLAN_NONE; | ||
370 | |||
371 | write_lock_bh(&d->lock); | ||
372 | for (e = d->l2tab[hash].first; e; e = e->next) | ||
373 | if (!addreq(e, addr) && e->ifindex == ifidx && | ||
374 | e->vlan == vlan && e->lport == lport) { | ||
375 | l2t_hold(d, e); | ||
376 | if (atomic_read(&e->refcnt) == 1) | ||
377 | reuse_entry(e, neigh); | ||
378 | goto done; | ||
379 | } | ||
380 | |||
381 | /* Need to allocate a new entry */ | ||
382 | e = alloc_l2e(d); | ||
383 | if (e) { | ||
384 | spin_lock(&e->lock); /* avoid race with t4_l2t_free */ | ||
385 | e->state = L2T_STATE_RESOLVING; | ||
386 | memcpy(e->addr, addr, addr_len); | ||
387 | e->ifindex = ifidx; | ||
388 | e->hash = hash; | ||
389 | e->lport = lport; | ||
390 | e->v6 = addr_len == 16; | ||
391 | atomic_set(&e->refcnt, 1); | ||
392 | neigh_replace(e, neigh); | ||
393 | e->vlan = vlan; | ||
394 | e->next = d->l2tab[hash].first; | ||
395 | d->l2tab[hash].first = e; | ||
396 | spin_unlock(&e->lock); | ||
397 | } | ||
398 | done: | ||
399 | write_unlock_bh(&d->lock); | ||
400 | return e; | ||
401 | } | ||
402 | EXPORT_SYMBOL(cxgb4_l2t_get); | ||
403 | |||
404 | /* | ||
405 | * Called when address resolution fails for an L2T entry to handle packets | ||
406 | * on the arpq head. If a packet specifies a failure handler it is invoked, | ||
407 | * otherwise the packet is sent to the device. | ||
408 | */ | ||
409 | static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) | ||
410 | { | ||
411 | while (arpq) { | ||
412 | struct sk_buff *skb = arpq; | ||
413 | const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); | ||
414 | |||
415 | arpq = skb->next; | ||
416 | skb->next = NULL; | ||
417 | if (cb->arp_err_handler) | ||
418 | cb->arp_err_handler(cb->handle, skb); | ||
419 | else | ||
420 | t4_ofld_send(adap, skb); | ||
421 | } | ||
422 | } | ||
423 | |||
424 | /* | ||
425 | * Called when the host's neighbor layer makes a change to some entry that is | ||
426 | * loaded into the HW L2 table. | ||
427 | */ | ||
428 | void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) | ||
429 | { | ||
430 | struct l2t_entry *e; | ||
431 | struct sk_buff *arpq = NULL; | ||
432 | struct l2t_data *d = adap->l2t; | ||
433 | int addr_len = neigh->tbl->key_len; | ||
434 | u32 *addr = (u32 *) neigh->primary_key; | ||
435 | int ifidx = neigh->dev->ifindex; | ||
436 | int hash = addr_hash(addr, addr_len, ifidx); | ||
437 | |||
438 | read_lock_bh(&d->lock); | ||
439 | for (e = d->l2tab[hash].first; e; e = e->next) | ||
440 | if (!addreq(e, addr) && e->ifindex == ifidx) { | ||
441 | spin_lock(&e->lock); | ||
442 | if (atomic_read(&e->refcnt)) | ||
443 | goto found; | ||
444 | spin_unlock(&e->lock); | ||
445 | break; | ||
446 | } | ||
447 | read_unlock_bh(&d->lock); | ||
448 | return; | ||
449 | |||
450 | found: | ||
451 | read_unlock(&d->lock); | ||
452 | |||
453 | if (neigh != e->neigh) | ||
454 | neigh_replace(e, neigh); | ||
455 | |||
456 | if (e->state == L2T_STATE_RESOLVING) { | ||
457 | if (neigh->nud_state & NUD_FAILED) { | ||
458 | arpq = e->arpq_head; | ||
459 | e->arpq_head = e->arpq_tail = NULL; | ||
460 | } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && | ||
461 | e->arpq_head) { | ||
462 | write_l2e(adap, e, 1); | ||
463 | } | ||
464 | } else { | ||
465 | e->state = neigh->nud_state & NUD_CONNECTED ? | ||
466 | L2T_STATE_VALID : L2T_STATE_STALE; | ||
467 | if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac))) | ||
468 | write_l2e(adap, e, 0); | ||
469 | } | ||
470 | |||
471 | spin_unlock_bh(&e->lock); | ||
472 | |||
473 | if (arpq) | ||
474 | handle_failed_resolution(adap, arpq); | ||
475 | } | ||
476 | |||
477 | /* | ||
478 | * Allocate an L2T entry for use by a switching rule. Such entries need to be | ||
479 | * explicitly freed and while busy they are not on any hash chain, so normal | ||
480 | * address resolution updates do not see them. | ||
481 | */ | ||
482 | struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d) | ||
483 | { | ||
484 | struct l2t_entry *e; | ||
485 | |||
486 | write_lock_bh(&d->lock); | ||
487 | e = alloc_l2e(d); | ||
488 | if (e) { | ||
489 | spin_lock(&e->lock); /* avoid race with t4_l2t_free */ | ||
490 | e->state = L2T_STATE_SWITCHING; | ||
491 | atomic_set(&e->refcnt, 1); | ||
492 | spin_unlock(&e->lock); | ||
493 | } | ||
494 | write_unlock_bh(&d->lock); | ||
495 | return e; | ||
496 | } | ||
497 | |||
498 | /* | ||
499 | * Sets/updates the contents of a switching L2T entry that has been allocated | ||
500 | * with an earlier call to @t4_l2t_alloc_switching. | ||
501 | */ | ||
502 | int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, | ||
503 | u8 port, u8 *eth_addr) | ||
504 | { | ||
505 | e->vlan = vlan; | ||
506 | e->lport = port; | ||
507 | memcpy(e->dmac, eth_addr, ETH_ALEN); | ||
508 | return write_l2e(adap, e, 0); | ||
509 | } | ||
510 | |||
511 | struct l2t_data *t4_init_l2t(void) | ||
512 | { | ||
513 | int i; | ||
514 | struct l2t_data *d; | ||
515 | |||
516 | d = t4_alloc_mem(sizeof(*d)); | ||
517 | if (!d) | ||
518 | return NULL; | ||
519 | |||
520 | d->rover = d->l2tab; | ||
521 | atomic_set(&d->nfree, L2T_SIZE); | ||
522 | rwlock_init(&d->lock); | ||
523 | |||
524 | for (i = 0; i < L2T_SIZE; ++i) { | ||
525 | d->l2tab[i].idx = i; | ||
526 | d->l2tab[i].state = L2T_STATE_UNUSED; | ||
527 | spin_lock_init(&d->l2tab[i].lock); | ||
528 | atomic_set(&d->l2tab[i].refcnt, 0); | ||
529 | } | ||
530 | return d; | ||
531 | } | ||
532 | |||
533 | #include <linux/module.h> | ||
534 | #include <linux/debugfs.h> | ||
535 | #include <linux/seq_file.h> | ||
536 | |||
537 | static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) | ||
538 | { | ||
539 | struct l2t_entry *l2tab = seq->private; | ||
540 | |||
541 | return pos >= L2T_SIZE ? NULL : &l2tab[pos]; | ||
542 | } | ||
543 | |||
544 | static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) | ||
545 | { | ||
546 | return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; | ||
547 | } | ||
548 | |||
549 | static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
550 | { | ||
551 | v = l2t_get_idx(seq, *pos); | ||
552 | if (v) | ||
553 | ++*pos; | ||
554 | return v; | ||
555 | } | ||
556 | |||
557 | static void l2t_seq_stop(struct seq_file *seq, void *v) | ||
558 | { | ||
559 | } | ||
560 | |||
561 | static char l2e_state(const struct l2t_entry *e) | ||
562 | { | ||
563 | switch (e->state) { | ||
564 | case L2T_STATE_VALID: return 'V'; | ||
565 | case L2T_STATE_STALE: return 'S'; | ||
566 | case L2T_STATE_SYNC_WRITE: return 'W'; | ||
567 | case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R'; | ||
568 | case L2T_STATE_SWITCHING: return 'X'; | ||
569 | default: | ||
570 | return 'U'; | ||
571 | } | ||
572 | } | ||
573 | |||
574 | static int l2t_seq_show(struct seq_file *seq, void *v) | ||
575 | { | ||
576 | if (v == SEQ_START_TOKEN) | ||
577 | seq_puts(seq, " Idx IP address " | ||
578 | "Ethernet address VLAN/P LP State Users Port\n"); | ||
579 | else { | ||
580 | char ip[60]; | ||
581 | struct l2t_entry *e = v; | ||
582 | |||
583 | spin_lock_bh(&e->lock); | ||
584 | if (e->state == L2T_STATE_SWITCHING) | ||
585 | ip[0] = '\0'; | ||
586 | else | ||
587 | sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); | ||
588 | seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", | ||
589 | e->idx, ip, e->dmac, | ||
590 | e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, | ||
591 | l2e_state(e), atomic_read(&e->refcnt), | ||
592 | e->neigh ? e->neigh->dev->name : ""); | ||
593 | spin_unlock_bh(&e->lock); | ||
594 | } | ||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | static const struct seq_operations l2t_seq_ops = { | ||
599 | .start = l2t_seq_start, | ||
600 | .next = l2t_seq_next, | ||
601 | .stop = l2t_seq_stop, | ||
602 | .show = l2t_seq_show | ||
603 | }; | ||
604 | |||
605 | static int l2t_seq_open(struct inode *inode, struct file *file) | ||
606 | { | ||
607 | int rc = seq_open(file, &l2t_seq_ops); | ||
608 | |||
609 | if (!rc) { | ||
610 | struct adapter *adap = inode->i_private; | ||
611 | struct seq_file *seq = file->private_data; | ||
612 | |||
613 | seq->private = adap->l2t->l2tab; | ||
614 | } | ||
615 | return rc; | ||
616 | } | ||
617 | |||
618 | const struct file_operations t4_l2t_fops = { | ||
619 | .owner = THIS_MODULE, | ||
620 | .open = l2t_seq_open, | ||
621 | .read = seq_read, | ||
622 | .llseek = seq_lseek, | ||
623 | .release = seq_release, | ||
624 | }; | ||
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h new file mode 100644 index 000000000000..643f27ed3cf4 --- /dev/null +++ b/drivers/net/cxgb4/l2t.h | |||
@@ -0,0 +1,110 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __CXGB4_L2T_H | ||
36 | #define __CXGB4_L2T_H | ||
37 | |||
38 | #include <linux/spinlock.h> | ||
39 | #include <linux/if_ether.h> | ||
40 | #include <asm/atomic.h> | ||
41 | |||
42 | struct adapter; | ||
43 | struct l2t_data; | ||
44 | struct neighbour; | ||
45 | struct net_device; | ||
46 | struct file_operations; | ||
47 | struct cpl_l2t_write_rpl; | ||
48 | |||
49 | /* | ||
50 | * Each L2T entry plays multiple roles. First of all, it keeps state for the | ||
51 | * corresponding entry of the HW L2 table and maintains a queue of offload | ||
52 | * packets awaiting address resolution. Second, it is a node of a hash table | ||
53 | * chain, where the nodes of the chain are linked together through their next | ||
54 | * pointer. Finally, each node is a bucket of a hash table, pointing to the | ||
55 | * first element in its chain through its first pointer. | ||
56 | */ | ||
57 | struct l2t_entry { | ||
58 | u16 state; /* entry state */ | ||
59 | u16 idx; /* entry index */ | ||
60 | u32 addr[4]; /* next hop IP or IPv6 address */ | ||
61 | int ifindex; /* neighbor's net_device's ifindex */ | ||
62 | struct neighbour *neigh; /* associated neighbour */ | ||
63 | struct l2t_entry *first; /* start of hash chain */ | ||
64 | struct l2t_entry *next; /* next l2t_entry on chain */ | ||
65 | struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ | ||
66 | struct sk_buff *arpq_tail; | ||
67 | spinlock_t lock; | ||
68 | atomic_t refcnt; /* entry reference count */ | ||
69 | u16 hash; /* hash bucket the entry is on */ | ||
70 | u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ | ||
71 | u8 v6; /* whether entry is for IPv6 */ | ||
72 | u8 lport; /* associated offload logical interface */ | ||
73 | u8 dmac[ETH_ALEN]; /* neighbour's MAC address */ | ||
74 | }; | ||
75 | |||
76 | typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb); | ||
77 | |||
78 | /* | ||
79 | * Callback stored in an skb to handle address resolution failure. | ||
80 | */ | ||
81 | struct l2t_skb_cb { | ||
82 | void *handle; | ||
83 | arp_err_handler_t arp_err_handler; | ||
84 | }; | ||
85 | |||
86 | #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) | ||
87 | |||
88 | static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle, | ||
89 | arp_err_handler_t handler) | ||
90 | { | ||
91 | L2T_SKB_CB(skb)->handle = handle; | ||
92 | L2T_SKB_CB(skb)->arp_err_handler = handler; | ||
93 | } | ||
94 | |||
95 | void cxgb4_l2t_release(struct l2t_entry *e); | ||
96 | int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, | ||
97 | struct l2t_entry *e); | ||
98 | struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, | ||
99 | const struct net_device *physdev, | ||
100 | unsigned int priority); | ||
101 | |||
102 | void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); | ||
103 | struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); | ||
104 | int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, | ||
105 | u8 port, u8 *eth_addr); | ||
106 | struct l2t_data *t4_init_l2t(void); | ||
107 | void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); | ||
108 | |||
109 | extern const struct file_operations t4_l2t_fops; | ||
110 | #endif /* __CXGB4_L2T_H */ | ||
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c new file mode 100644 index 000000000000..14adc58e71c3 --- /dev/null +++ b/drivers/net/cxgb4/sge.c | |||
@@ -0,0 +1,2431 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/skbuff.h> | ||
36 | #include <linux/netdevice.h> | ||
37 | #include <linux/etherdevice.h> | ||
38 | #include <linux/if_vlan.h> | ||
39 | #include <linux/ip.h> | ||
40 | #include <linux/dma-mapping.h> | ||
41 | #include <linux/jiffies.h> | ||
42 | #include <net/ipv6.h> | ||
43 | #include <net/tcp.h> | ||
44 | #include "cxgb4.h" | ||
45 | #include "t4_regs.h" | ||
46 | #include "t4_msg.h" | ||
47 | #include "t4fw_api.h" | ||
48 | |||
49 | /* | ||
50 | * Rx buffer size. We use largish buffers if possible but settle for single | ||
51 | * pages under memory shortage. | ||
52 | */ | ||
53 | #if PAGE_SHIFT >= 16 | ||
54 | # define FL_PG_ORDER 0 | ||
55 | #else | ||
56 | # define FL_PG_ORDER (16 - PAGE_SHIFT) | ||
57 | #endif | ||
58 | |||
59 | /* RX_PULL_LEN should be <= RX_COPY_THRES */ | ||
60 | #define RX_COPY_THRES 256 | ||
61 | #define RX_PULL_LEN 128 | ||
62 | |||
63 | /* | ||
64 | * Main body length for sk_buffs used for Rx Ethernet packets with fragments. | ||
65 | * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. | ||
66 | */ | ||
67 | #define RX_PKT_SKB_LEN 512 | ||
68 | |||
69 | /* Ethernet header padding prepended to RX_PKTs */ | ||
70 | #define RX_PKT_PAD 2 | ||
71 | |||
72 | /* | ||
73 | * Max number of Tx descriptors we clean up at a time. Should be modest as | ||
74 | * freeing skbs isn't cheap and it happens while holding locks. We just need | ||
75 | * to free packets faster than they arrive, we eventually catch up and keep | ||
76 | * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. | ||
77 | */ | ||
78 | #define MAX_TX_RECLAIM 16 | ||
79 | |||
80 | /* | ||
81 | * Max number of Rx buffers we replenish at a time. Again keep this modest, | ||
82 | * allocating buffers isn't cheap either. | ||
83 | */ | ||
84 | #define MAX_RX_REFILL 16U | ||
85 | |||
86 | /* | ||
87 | * Period of the Rx queue check timer. This timer is infrequent as it has | ||
88 | * something to do only when the system experiences severe memory shortage. | ||
89 | */ | ||
90 | #define RX_QCHECK_PERIOD (HZ / 2) | ||
91 | |||
92 | /* | ||
93 | * Period of the Tx queue check timer. | ||
94 | */ | ||
95 | #define TX_QCHECK_PERIOD (HZ / 2) | ||
96 | |||
97 | /* | ||
98 | * Max number of Tx descriptors to be reclaimed by the Tx timer. | ||
99 | */ | ||
100 | #define MAX_TIMER_TX_RECLAIM 100 | ||
101 | |||
102 | /* | ||
103 | * Timer index used when backing off due to memory shortage. | ||
104 | */ | ||
105 | #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) | ||
106 | |||
107 | /* | ||
108 | * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will | ||
109 | * attempt to refill it. | ||
110 | */ | ||
111 | #define FL_STARVE_THRES 4 | ||
112 | |||
113 | /* | ||
114 | * Suspend an Ethernet Tx queue with fewer available descriptors than this. | ||
115 | * This is the same as calc_tx_descs() for a TSO packet with | ||
116 | * nr_frags == MAX_SKB_FRAGS. | ||
117 | */ | ||
118 | #define ETHTXQ_STOP_THRES \ | ||
119 | (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) | ||
120 | |||
121 | /* | ||
122 | * Suspension threshold for non-Ethernet Tx queues. We require enough room | ||
123 | * for a full sized WR. | ||
124 | */ | ||
125 | #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) | ||
126 | |||
127 | /* | ||
128 | * Max Tx descriptor space we allow for an Ethernet packet to be inlined | ||
129 | * into a WR. | ||
130 | */ | ||
131 | #define MAX_IMM_TX_PKT_LEN 128 | ||
132 | |||
133 | /* | ||
134 | * Max size of a WR sent through a control Tx queue. | ||
135 | */ | ||
136 | #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN | ||
137 | |||
138 | enum { | ||
139 | /* packet alignment in FL buffers */ | ||
140 | FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES, | ||
141 | /* egress status entry size */ | ||
142 | STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64 | ||
143 | }; | ||
144 | |||
145 | struct tx_sw_desc { /* SW state per Tx descriptor */ | ||
146 | struct sk_buff *skb; | ||
147 | struct ulptx_sgl *sgl; | ||
148 | }; | ||
149 | |||
150 | struct rx_sw_desc { /* SW state per Rx descriptor */ | ||
151 | struct page *page; | ||
152 | dma_addr_t dma_addr; | ||
153 | }; | ||
154 | |||
155 | /* | ||
156 | * The low bits of rx_sw_desc.dma_addr have special meaning. | ||
157 | */ | ||
158 | enum { | ||
159 | RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */ | ||
160 | RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ | ||
161 | }; | ||
162 | |||
163 | static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) | ||
164 | { | ||
165 | return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); | ||
166 | } | ||
167 | |||
168 | static inline bool is_buf_mapped(const struct rx_sw_desc *d) | ||
169 | { | ||
170 | return !(d->dma_addr & RX_UNMAPPED_BUF); | ||
171 | } | ||
172 | |||
173 | /** | ||
174 | * txq_avail - return the number of available slots in a Tx queue | ||
175 | * @q: the Tx queue | ||
176 | * | ||
177 | * Returns the number of descriptors in a Tx queue available to write new | ||
178 | * packets. | ||
179 | */ | ||
180 | static inline unsigned int txq_avail(const struct sge_txq *q) | ||
181 | { | ||
182 | return q->size - 1 - q->in_use; | ||
183 | } | ||
184 | |||
185 | /** | ||
186 | * fl_cap - return the capacity of a free-buffer list | ||
187 | * @fl: the FL | ||
188 | * | ||
189 | * Returns the capacity of a free-buffer list. The capacity is less than | ||
190 | * the size because one descriptor needs to be left unpopulated, otherwise | ||
191 | * HW will think the FL is empty. | ||
192 | */ | ||
193 | static inline unsigned int fl_cap(const struct sge_fl *fl) | ||
194 | { | ||
195 | return fl->size - 8; /* 1 descriptor = 8 buffers */ | ||
196 | } | ||
197 | |||
198 | static inline bool fl_starving(const struct sge_fl *fl) | ||
199 | { | ||
200 | return fl->avail - fl->pend_cred <= FL_STARVE_THRES; | ||
201 | } | ||
202 | |||
203 | static int map_skb(struct device *dev, const struct sk_buff *skb, | ||
204 | dma_addr_t *addr) | ||
205 | { | ||
206 | const skb_frag_t *fp, *end; | ||
207 | const struct skb_shared_info *si; | ||
208 | |||
209 | *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); | ||
210 | if (dma_mapping_error(dev, *addr)) | ||
211 | goto out_err; | ||
212 | |||
213 | si = skb_shinfo(skb); | ||
214 | end = &si->frags[si->nr_frags]; | ||
215 | |||
216 | for (fp = si->frags; fp < end; fp++) { | ||
217 | *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, | ||
218 | DMA_TO_DEVICE); | ||
219 | if (dma_mapping_error(dev, *addr)) | ||
220 | goto unwind; | ||
221 | } | ||
222 | return 0; | ||
223 | |||
224 | unwind: | ||
225 | while (fp-- > si->frags) | ||
226 | dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); | ||
227 | |||
228 | dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); | ||
229 | out_err: | ||
230 | return -ENOMEM; | ||
231 | } | ||
232 | |||
233 | #ifdef CONFIG_NEED_DMA_MAP_STATE | ||
234 | static void unmap_skb(struct device *dev, const struct sk_buff *skb, | ||
235 | const dma_addr_t *addr) | ||
236 | { | ||
237 | const skb_frag_t *fp, *end; | ||
238 | const struct skb_shared_info *si; | ||
239 | |||
240 | dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); | ||
241 | |||
242 | si = skb_shinfo(skb); | ||
243 | end = &si->frags[si->nr_frags]; | ||
244 | for (fp = si->frags; fp < end; fp++) | ||
245 | dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE); | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * deferred_unmap_destructor - unmap a packet when it is freed | ||
250 | * @skb: the packet | ||
251 | * | ||
252 | * This is the packet destructor used for Tx packets that need to remain | ||
253 | * mapped until they are freed rather than until their Tx descriptors are | ||
254 | * freed. | ||
255 | */ | ||
256 | static void deferred_unmap_destructor(struct sk_buff *skb) | ||
257 | { | ||
258 | unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); | ||
259 | } | ||
260 | #endif | ||
261 | |||
262 | static void unmap_sgl(struct device *dev, const struct sk_buff *skb, | ||
263 | const struct ulptx_sgl *sgl, const struct sge_txq *q) | ||
264 | { | ||
265 | const struct ulptx_sge_pair *p; | ||
266 | unsigned int nfrags = skb_shinfo(skb)->nr_frags; | ||
267 | |||
268 | if (likely(skb_headlen(skb))) | ||
269 | dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), | ||
270 | DMA_TO_DEVICE); | ||
271 | else { | ||
272 | dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), | ||
273 | DMA_TO_DEVICE); | ||
274 | nfrags--; | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * the complexity below is because of the possibility of a wrap-around | ||
279 | * in the middle of an SGL | ||
280 | */ | ||
281 | for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { | ||
282 | if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { | ||
283 | unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), | ||
284 | ntohl(p->len[0]), DMA_TO_DEVICE); | ||
285 | dma_unmap_page(dev, be64_to_cpu(p->addr[1]), | ||
286 | ntohl(p->len[1]), DMA_TO_DEVICE); | ||
287 | p++; | ||
288 | } else if ((u8 *)p == (u8 *)q->stat) { | ||
289 | p = (const struct ulptx_sge_pair *)q->desc; | ||
290 | goto unmap; | ||
291 | } else if ((u8 *)p + 8 == (u8 *)q->stat) { | ||
292 | const __be64 *addr = (const __be64 *)q->desc; | ||
293 | |||
294 | dma_unmap_page(dev, be64_to_cpu(addr[0]), | ||
295 | ntohl(p->len[0]), DMA_TO_DEVICE); | ||
296 | dma_unmap_page(dev, be64_to_cpu(addr[1]), | ||
297 | ntohl(p->len[1]), DMA_TO_DEVICE); | ||
298 | p = (const struct ulptx_sge_pair *)&addr[2]; | ||
299 | } else { | ||
300 | const __be64 *addr = (const __be64 *)q->desc; | ||
301 | |||
302 | dma_unmap_page(dev, be64_to_cpu(p->addr[0]), | ||
303 | ntohl(p->len[0]), DMA_TO_DEVICE); | ||
304 | dma_unmap_page(dev, be64_to_cpu(addr[0]), | ||
305 | ntohl(p->len[1]), DMA_TO_DEVICE); | ||
306 | p = (const struct ulptx_sge_pair *)&addr[1]; | ||
307 | } | ||
308 | } | ||
309 | if (nfrags) { | ||
310 | __be64 addr; | ||
311 | |||
312 | if ((u8 *)p == (u8 *)q->stat) | ||
313 | p = (const struct ulptx_sge_pair *)q->desc; | ||
314 | addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : | ||
315 | *(const __be64 *)q->desc; | ||
316 | dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), | ||
317 | DMA_TO_DEVICE); | ||
318 | } | ||
319 | } | ||
320 | |||
321 | /** | ||
322 | * free_tx_desc - reclaims Tx descriptors and their buffers | ||
323 | * @adapter: the adapter | ||
324 | * @q: the Tx queue to reclaim descriptors from | ||
325 | * @n: the number of descriptors to reclaim | ||
326 | * @unmap: whether the buffers should be unmapped for DMA | ||
327 | * | ||
328 | * Reclaims Tx descriptors from an SGE Tx queue and frees the associated | ||
329 | * Tx buffers. Called with the Tx queue lock held. | ||
330 | */ | ||
331 | static void free_tx_desc(struct adapter *adap, struct sge_txq *q, | ||
332 | unsigned int n, bool unmap) | ||
333 | { | ||
334 | struct tx_sw_desc *d; | ||
335 | unsigned int cidx = q->cidx; | ||
336 | struct device *dev = adap->pdev_dev; | ||
337 | |||
338 | d = &q->sdesc[cidx]; | ||
339 | while (n--) { | ||
340 | if (d->skb) { /* an SGL is present */ | ||
341 | if (unmap) | ||
342 | unmap_sgl(dev, d->skb, d->sgl, q); | ||
343 | kfree_skb(d->skb); | ||
344 | d->skb = NULL; | ||
345 | } | ||
346 | ++d; | ||
347 | if (++cidx == q->size) { | ||
348 | cidx = 0; | ||
349 | d = q->sdesc; | ||
350 | } | ||
351 | } | ||
352 | q->cidx = cidx; | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Return the number of reclaimable descriptors in a Tx queue. | ||
357 | */ | ||
358 | static inline int reclaimable(const struct sge_txq *q) | ||
359 | { | ||
360 | int hw_cidx = ntohs(q->stat->cidx); | ||
361 | hw_cidx -= q->cidx; | ||
362 | return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; | ||
363 | } | ||
364 | |||
365 | /** | ||
366 | * reclaim_completed_tx - reclaims completed Tx descriptors | ||
367 | * @adap: the adapter | ||
368 | * @q: the Tx queue to reclaim completed descriptors from | ||
369 | * @unmap: whether the buffers should be unmapped for DMA | ||
370 | * | ||
371 | * Reclaims Tx descriptors that the SGE has indicated it has processed, | ||
372 | * and frees the associated buffers if possible. Called with the Tx | ||
373 | * queue locked. | ||
374 | */ | ||
375 | static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, | ||
376 | bool unmap) | ||
377 | { | ||
378 | int avail = reclaimable(q); | ||
379 | |||
380 | if (avail) { | ||
381 | /* | ||
382 | * Limit the amount of clean up work we do at a time to keep | ||
383 | * the Tx lock hold time O(1). | ||
384 | */ | ||
385 | if (avail > MAX_TX_RECLAIM) | ||
386 | avail = MAX_TX_RECLAIM; | ||
387 | |||
388 | free_tx_desc(adap, q, avail, unmap); | ||
389 | q->in_use -= avail; | ||
390 | } | ||
391 | } | ||
392 | |||
393 | static inline int get_buf_size(const struct rx_sw_desc *d) | ||
394 | { | ||
395 | #if FL_PG_ORDER > 0 | ||
396 | return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) : | ||
397 | PAGE_SIZE; | ||
398 | #else | ||
399 | return PAGE_SIZE; | ||
400 | #endif | ||
401 | } | ||
402 | |||
403 | /** | ||
404 | * free_rx_bufs - free the Rx buffers on an SGE free list | ||
405 | * @adap: the adapter | ||
406 | * @q: the SGE free list to free buffers from | ||
407 | * @n: how many buffers to free | ||
408 | * | ||
409 | * Release the next @n buffers on an SGE free-buffer Rx queue. The | ||
410 | * buffers must be made inaccessible to HW before calling this function. | ||
411 | */ | ||
412 | static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) | ||
413 | { | ||
414 | while (n--) { | ||
415 | struct rx_sw_desc *d = &q->sdesc[q->cidx]; | ||
416 | |||
417 | if (is_buf_mapped(d)) | ||
418 | dma_unmap_page(adap->pdev_dev, get_buf_addr(d), | ||
419 | get_buf_size(d), PCI_DMA_FROMDEVICE); | ||
420 | put_page(d->page); | ||
421 | d->page = NULL; | ||
422 | if (++q->cidx == q->size) | ||
423 | q->cidx = 0; | ||
424 | q->avail--; | ||
425 | } | ||
426 | } | ||
427 | |||
428 | /** | ||
429 | * unmap_rx_buf - unmap the current Rx buffer on an SGE free list | ||
430 | * @adap: the adapter | ||
431 | * @q: the SGE free list | ||
432 | * | ||
433 | * Unmap the current buffer on an SGE free-buffer Rx queue. The | ||
434 | * buffer must be made inaccessible to HW before calling this function. | ||
435 | * | ||
436 | * This is similar to @free_rx_bufs above but does not free the buffer. | ||
437 | * Do note that the FL still loses any further access to the buffer. | ||
438 | */ | ||
439 | static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) | ||
440 | { | ||
441 | struct rx_sw_desc *d = &q->sdesc[q->cidx]; | ||
442 | |||
443 | if (is_buf_mapped(d)) | ||
444 | dma_unmap_page(adap->pdev_dev, get_buf_addr(d), | ||
445 | get_buf_size(d), PCI_DMA_FROMDEVICE); | ||
446 | d->page = NULL; | ||
447 | if (++q->cidx == q->size) | ||
448 | q->cidx = 0; | ||
449 | q->avail--; | ||
450 | } | ||
451 | |||
452 | static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) | ||
453 | { | ||
454 | if (q->pend_cred >= 8) { | ||
455 | wmb(); | ||
456 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO | | ||
457 | QID(q->cntxt_id) | PIDX(q->pend_cred / 8)); | ||
458 | q->pend_cred &= 7; | ||
459 | } | ||
460 | } | ||
461 | |||
462 | static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, | ||
463 | dma_addr_t mapping) | ||
464 | { | ||
465 | sd->page = pg; | ||
466 | sd->dma_addr = mapping; /* includes size low bits */ | ||
467 | } | ||
468 | |||
469 | /** | ||
470 | * refill_fl - refill an SGE Rx buffer ring | ||
471 | * @adap: the adapter | ||
472 | * @q: the ring to refill | ||
473 | * @n: the number of new buffers to allocate | ||
474 | * @gfp: the gfp flags for the allocations | ||
475 | * | ||
476 | * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, | ||
477 | * allocated with the supplied gfp flags. The caller must assure that | ||
478 | * @n does not exceed the queue's capacity. If afterwards the queue is | ||
479 | * found critically low mark it as starving in the bitmap of starving FLs. | ||
480 | * | ||
481 | * Returns the number of buffers allocated. | ||
482 | */ | ||
483 | static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, | ||
484 | gfp_t gfp) | ||
485 | { | ||
486 | struct page *pg; | ||
487 | dma_addr_t mapping; | ||
488 | unsigned int cred = q->avail; | ||
489 | __be64 *d = &q->desc[q->pidx]; | ||
490 | struct rx_sw_desc *sd = &q->sdesc[q->pidx]; | ||
491 | |||
492 | gfp |= __GFP_NOWARN; /* failures are expected */ | ||
493 | |||
494 | #if FL_PG_ORDER > 0 | ||
495 | /* | ||
496 | * Prefer large buffers | ||
497 | */ | ||
498 | while (n) { | ||
499 | pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER); | ||
500 | if (unlikely(!pg)) { | ||
501 | q->large_alloc_failed++; | ||
502 | break; /* fall back to single pages */ | ||
503 | } | ||
504 | |||
505 | mapping = dma_map_page(adap->pdev_dev, pg, 0, | ||
506 | PAGE_SIZE << FL_PG_ORDER, | ||
507 | PCI_DMA_FROMDEVICE); | ||
508 | if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { | ||
509 | __free_pages(pg, FL_PG_ORDER); | ||
510 | goto out; /* do not try small pages for this error */ | ||
511 | } | ||
512 | mapping |= RX_LARGE_BUF; | ||
513 | *d++ = cpu_to_be64(mapping); | ||
514 | |||
515 | set_rx_sw_desc(sd, pg, mapping); | ||
516 | sd++; | ||
517 | |||
518 | q->avail++; | ||
519 | if (++q->pidx == q->size) { | ||
520 | q->pidx = 0; | ||
521 | sd = q->sdesc; | ||
522 | d = q->desc; | ||
523 | } | ||
524 | n--; | ||
525 | } | ||
526 | #endif | ||
527 | |||
528 | while (n--) { | ||
529 | pg = __netdev_alloc_page(adap->port[0], gfp); | ||
530 | if (unlikely(!pg)) { | ||
531 | q->alloc_failed++; | ||
532 | break; | ||
533 | } | ||
534 | |||
535 | mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, | ||
536 | PCI_DMA_FROMDEVICE); | ||
537 | if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { | ||
538 | netdev_free_page(adap->port[0], pg); | ||
539 | goto out; | ||
540 | } | ||
541 | *d++ = cpu_to_be64(mapping); | ||
542 | |||
543 | set_rx_sw_desc(sd, pg, mapping); | ||
544 | sd++; | ||
545 | |||
546 | q->avail++; | ||
547 | if (++q->pidx == q->size) { | ||
548 | q->pidx = 0; | ||
549 | sd = q->sdesc; | ||
550 | d = q->desc; | ||
551 | } | ||
552 | } | ||
553 | |||
554 | out: cred = q->avail - cred; | ||
555 | q->pend_cred += cred; | ||
556 | ring_fl_db(adap, q); | ||
557 | |||
558 | if (unlikely(fl_starving(q))) { | ||
559 | smp_wmb(); | ||
560 | set_bit(q->cntxt_id, adap->sge.starving_fl); | ||
561 | } | ||
562 | |||
563 | return cred; | ||
564 | } | ||
565 | |||
566 | static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) | ||
567 | { | ||
568 | refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), | ||
569 | GFP_ATOMIC); | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * alloc_ring - allocate resources for an SGE descriptor ring | ||
574 | * @dev: the PCI device's core device | ||
575 | * @nelem: the number of descriptors | ||
576 | * @elem_size: the size of each descriptor | ||
577 | * @sw_size: the size of the SW state associated with each ring element | ||
578 | * @phys: the physical address of the allocated ring | ||
579 | * @metadata: address of the array holding the SW state for the ring | ||
580 | * @stat_size: extra space in HW ring for status information | ||
581 | * | ||
582 | * Allocates resources for an SGE descriptor ring, such as Tx queues, | ||
583 | * free buffer lists, or response queues. Each SGE ring requires | ||
584 | * space for its HW descriptors plus, optionally, space for the SW state | ||
585 | * associated with each HW entry (the metadata). The function returns | ||
586 | * three values: the virtual address for the HW ring (the return value | ||
587 | * of the function), the bus address of the HW ring, and the address | ||
588 | * of the SW ring. | ||
589 | */ | ||
590 | static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, | ||
591 | size_t sw_size, dma_addr_t *phys, void *metadata, | ||
592 | size_t stat_size) | ||
593 | { | ||
594 | size_t len = nelem * elem_size + stat_size; | ||
595 | void *s = NULL; | ||
596 | void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); | ||
597 | |||
598 | if (!p) | ||
599 | return NULL; | ||
600 | if (sw_size) { | ||
601 | s = kcalloc(nelem, sw_size, GFP_KERNEL); | ||
602 | |||
603 | if (!s) { | ||
604 | dma_free_coherent(dev, len, p, *phys); | ||
605 | return NULL; | ||
606 | } | ||
607 | } | ||
608 | if (metadata) | ||
609 | *(void **)metadata = s; | ||
610 | memset(p, 0, len); | ||
611 | return p; | ||
612 | } | ||
613 | |||
614 | /** | ||
615 | * sgl_len - calculates the size of an SGL of the given capacity | ||
616 | * @n: the number of SGL entries | ||
617 | * | ||
618 | * Calculates the number of flits needed for a scatter/gather list that | ||
619 | * can hold the given number of entries. | ||
620 | */ | ||
621 | static inline unsigned int sgl_len(unsigned int n) | ||
622 | { | ||
623 | n--; | ||
624 | return (3 * n) / 2 + (n & 1) + 2; | ||
625 | } | ||
626 | |||
627 | /** | ||
628 | * flits_to_desc - returns the num of Tx descriptors for the given flits | ||
629 | * @n: the number of flits | ||
630 | * | ||
631 | * Returns the number of Tx descriptors needed for the supplied number | ||
632 | * of flits. | ||
633 | */ | ||
634 | static inline unsigned int flits_to_desc(unsigned int n) | ||
635 | { | ||
636 | BUG_ON(n > SGE_MAX_WR_LEN / 8); | ||
637 | return DIV_ROUND_UP(n, 8); | ||
638 | } | ||
639 | |||
640 | /** | ||
641 | * is_eth_imm - can an Ethernet packet be sent as immediate data? | ||
642 | * @skb: the packet | ||
643 | * | ||
644 | * Returns whether an Ethernet packet is small enough to fit as | ||
645 | * immediate data. | ||
646 | */ | ||
647 | static inline int is_eth_imm(const struct sk_buff *skb) | ||
648 | { | ||
649 | return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt); | ||
650 | } | ||
651 | |||
652 | /** | ||
653 | * calc_tx_flits - calculate the number of flits for a packet Tx WR | ||
654 | * @skb: the packet | ||
655 | * | ||
656 | * Returns the number of flits needed for a Tx WR for the given Ethernet | ||
657 | * packet, including the needed WR and CPL headers. | ||
658 | */ | ||
659 | static inline unsigned int calc_tx_flits(const struct sk_buff *skb) | ||
660 | { | ||
661 | unsigned int flits; | ||
662 | |||
663 | if (is_eth_imm(skb)) | ||
664 | return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8); | ||
665 | |||
666 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; | ||
667 | if (skb_shinfo(skb)->gso_size) | ||
668 | flits += 2; | ||
669 | return flits; | ||
670 | } | ||
671 | |||
672 | /** | ||
673 | * calc_tx_descs - calculate the number of Tx descriptors for a packet | ||
674 | * @skb: the packet | ||
675 | * | ||
676 | * Returns the number of Tx descriptors needed for the given Ethernet | ||
677 | * packet, including the needed WR and CPL headers. | ||
678 | */ | ||
679 | static inline unsigned int calc_tx_descs(const struct sk_buff *skb) | ||
680 | { | ||
681 | return flits_to_desc(calc_tx_flits(skb)); | ||
682 | } | ||
683 | |||
684 | /** | ||
685 | * write_sgl - populate a scatter/gather list for a packet | ||
686 | * @skb: the packet | ||
687 | * @q: the Tx queue we are writing into | ||
688 | * @sgl: starting location for writing the SGL | ||
689 | * @end: points right after the end of the SGL | ||
690 | * @start: start offset into skb main-body data to include in the SGL | ||
691 | * @addr: the list of bus addresses for the SGL elements | ||
692 | * | ||
693 | * Generates a gather list for the buffers that make up a packet. | ||
694 | * The caller must provide adequate space for the SGL that will be written. | ||
695 | * The SGL includes all of the packet's page fragments and the data in its | ||
696 | * main body except for the first @start bytes. @sgl must be 16-byte | ||
697 | * aligned and within a Tx descriptor with available space. @end points | ||
698 | * right after the end of the SGL but does not account for any potential | ||
699 | * wrap around, i.e., @end > @sgl. | ||
700 | */ | ||
701 | static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, | ||
702 | struct ulptx_sgl *sgl, u64 *end, unsigned int start, | ||
703 | const dma_addr_t *addr) | ||
704 | { | ||
705 | unsigned int i, len; | ||
706 | struct ulptx_sge_pair *to; | ||
707 | const struct skb_shared_info *si = skb_shinfo(skb); | ||
708 | unsigned int nfrags = si->nr_frags; | ||
709 | struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; | ||
710 | |||
711 | len = skb_headlen(skb) - start; | ||
712 | if (likely(len)) { | ||
713 | sgl->len0 = htonl(len); | ||
714 | sgl->addr0 = cpu_to_be64(addr[0] + start); | ||
715 | nfrags++; | ||
716 | } else { | ||
717 | sgl->len0 = htonl(si->frags[0].size); | ||
718 | sgl->addr0 = cpu_to_be64(addr[1]); | ||
719 | } | ||
720 | |||
721 | sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); | ||
722 | if (likely(--nfrags == 0)) | ||
723 | return; | ||
724 | /* | ||
725 | * Most of the complexity below deals with the possibility we hit the | ||
726 | * end of the queue in the middle of writing the SGL. For this case | ||
727 | * only we create the SGL in a temporary buffer and then copy it. | ||
728 | */ | ||
729 | to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; | ||
730 | |||
731 | for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { | ||
732 | to->len[0] = cpu_to_be32(si->frags[i].size); | ||
733 | to->len[1] = cpu_to_be32(si->frags[++i].size); | ||
734 | to->addr[0] = cpu_to_be64(addr[i]); | ||
735 | to->addr[1] = cpu_to_be64(addr[++i]); | ||
736 | } | ||
737 | if (nfrags) { | ||
738 | to->len[0] = cpu_to_be32(si->frags[i].size); | ||
739 | to->len[1] = cpu_to_be32(0); | ||
740 | to->addr[0] = cpu_to_be64(addr[i + 1]); | ||
741 | } | ||
742 | if (unlikely((u8 *)end > (u8 *)q->stat)) { | ||
743 | unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; | ||
744 | |||
745 | if (likely(part0)) | ||
746 | memcpy(sgl->sge, buf, part0); | ||
747 | part1 = (u8 *)end - (u8 *)q->stat; | ||
748 | memcpy(q->desc, (u8 *)buf + part0, part1); | ||
749 | end = (void *)q->desc + part1; | ||
750 | } | ||
751 | if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ | ||
752 | *(u64 *)end = 0; | ||
753 | } | ||
754 | |||
755 | /** | ||
756 | * ring_tx_db - check and potentially ring a Tx queue's doorbell | ||
757 | * @adap: the adapter | ||
758 | * @q: the Tx queue | ||
759 | * @n: number of new descriptors to give to HW | ||
760 | * | ||
761 | * Ring the doorbel for a Tx queue. | ||
762 | */ | ||
763 | static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) | ||
764 | { | ||
765 | wmb(); /* write descriptors before telling HW */ | ||
766 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), | ||
767 | QID(q->cntxt_id) | PIDX(n)); | ||
768 | } | ||
769 | |||
770 | /** | ||
771 | * inline_tx_skb - inline a packet's data into Tx descriptors | ||
772 | * @skb: the packet | ||
773 | * @q: the Tx queue where the packet will be inlined | ||
774 | * @pos: starting position in the Tx queue where to inline the packet | ||
775 | * | ||
776 | * Inline a packet's contents directly into Tx descriptors, starting at | ||
777 | * the given position within the Tx DMA ring. | ||
778 | * Most of the complexity of this operation is dealing with wrap arounds | ||
779 | * in the middle of the packet we want to inline. | ||
780 | */ | ||
781 | static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, | ||
782 | void *pos) | ||
783 | { | ||
784 | u64 *p; | ||
785 | int left = (void *)q->stat - pos; | ||
786 | |||
787 | if (likely(skb->len <= left)) { | ||
788 | if (likely(!skb->data_len)) | ||
789 | skb_copy_from_linear_data(skb, pos, skb->len); | ||
790 | else | ||
791 | skb_copy_bits(skb, 0, pos, skb->len); | ||
792 | pos += skb->len; | ||
793 | } else { | ||
794 | skb_copy_bits(skb, 0, pos, left); | ||
795 | skb_copy_bits(skb, left, q->desc, skb->len - left); | ||
796 | pos = (void *)q->desc + (skb->len - left); | ||
797 | } | ||
798 | |||
799 | /* 0-pad to multiple of 16 */ | ||
800 | p = PTR_ALIGN(pos, 8); | ||
801 | if ((uintptr_t)p & 8) | ||
802 | *p = 0; | ||
803 | } | ||
804 | |||
805 | /* | ||
806 | * Figure out what HW csum a packet wants and return the appropriate control | ||
807 | * bits. | ||
808 | */ | ||
809 | static u64 hwcsum(const struct sk_buff *skb) | ||
810 | { | ||
811 | int csum_type; | ||
812 | const struct iphdr *iph = ip_hdr(skb); | ||
813 | |||
814 | if (iph->version == 4) { | ||
815 | if (iph->protocol == IPPROTO_TCP) | ||
816 | csum_type = TX_CSUM_TCPIP; | ||
817 | else if (iph->protocol == IPPROTO_UDP) | ||
818 | csum_type = TX_CSUM_UDPIP; | ||
819 | else { | ||
820 | nocsum: /* | ||
821 | * unknown protocol, disable HW csum | ||
822 | * and hope a bad packet is detected | ||
823 | */ | ||
824 | return TXPKT_L4CSUM_DIS; | ||
825 | } | ||
826 | } else { | ||
827 | /* | ||
828 | * this doesn't work with extension headers | ||
829 | */ | ||
830 | const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; | ||
831 | |||
832 | if (ip6h->nexthdr == IPPROTO_TCP) | ||
833 | csum_type = TX_CSUM_TCPIP6; | ||
834 | else if (ip6h->nexthdr == IPPROTO_UDP) | ||
835 | csum_type = TX_CSUM_UDPIP6; | ||
836 | else | ||
837 | goto nocsum; | ||
838 | } | ||
839 | |||
840 | if (likely(csum_type >= TX_CSUM_TCPIP)) | ||
841 | return TXPKT_CSUM_TYPE(csum_type) | | ||
842 | TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | | ||
843 | TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); | ||
844 | else { | ||
845 | int start = skb_transport_offset(skb); | ||
846 | |||
847 | return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) | | ||
848 | TXPKT_CSUM_LOC(start + skb->csum_offset); | ||
849 | } | ||
850 | } | ||
851 | |||
852 | static void eth_txq_stop(struct sge_eth_txq *q) | ||
853 | { | ||
854 | netif_tx_stop_queue(q->txq); | ||
855 | q->q.stops++; | ||
856 | } | ||
857 | |||
858 | static inline void txq_advance(struct sge_txq *q, unsigned int n) | ||
859 | { | ||
860 | q->in_use += n; | ||
861 | q->pidx += n; | ||
862 | if (q->pidx >= q->size) | ||
863 | q->pidx -= q->size; | ||
864 | } | ||
865 | |||
866 | /** | ||
867 | * t4_eth_xmit - add a packet to an Ethernet Tx queue | ||
868 | * @skb: the packet | ||
869 | * @dev: the egress net device | ||
870 | * | ||
871 | * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. | ||
872 | */ | ||
873 | netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) | ||
874 | { | ||
875 | u32 wr_mid; | ||
876 | u64 cntrl, *end; | ||
877 | int qidx, credits; | ||
878 | unsigned int flits, ndesc; | ||
879 | struct adapter *adap; | ||
880 | struct sge_eth_txq *q; | ||
881 | const struct port_info *pi; | ||
882 | struct fw_eth_tx_pkt_wr *wr; | ||
883 | struct cpl_tx_pkt_core *cpl; | ||
884 | const struct skb_shared_info *ssi; | ||
885 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | ||
886 | |||
887 | /* | ||
888 | * The chip min packet length is 10 octets but play safe and reject | ||
889 | * anything shorter than an Ethernet header. | ||
890 | */ | ||
891 | if (unlikely(skb->len < ETH_HLEN)) { | ||
892 | out_free: dev_kfree_skb(skb); | ||
893 | return NETDEV_TX_OK; | ||
894 | } | ||
895 | |||
896 | pi = netdev_priv(dev); | ||
897 | adap = pi->adapter; | ||
898 | qidx = skb_get_queue_mapping(skb); | ||
899 | q = &adap->sge.ethtxq[qidx + pi->first_qset]; | ||
900 | |||
901 | reclaim_completed_tx(adap, &q->q, true); | ||
902 | |||
903 | flits = calc_tx_flits(skb); | ||
904 | ndesc = flits_to_desc(flits); | ||
905 | credits = txq_avail(&q->q) - ndesc; | ||
906 | |||
907 | if (unlikely(credits < 0)) { | ||
908 | eth_txq_stop(q); | ||
909 | dev_err(adap->pdev_dev, | ||
910 | "%s: Tx ring %u full while queue awake!\n", | ||
911 | dev->name, qidx); | ||
912 | return NETDEV_TX_BUSY; | ||
913 | } | ||
914 | |||
915 | if (!is_eth_imm(skb) && | ||
916 | unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { | ||
917 | q->mapping_err++; | ||
918 | goto out_free; | ||
919 | } | ||
920 | |||
921 | wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); | ||
922 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { | ||
923 | eth_txq_stop(q); | ||
924 | wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; | ||
925 | } | ||
926 | |||
927 | wr = (void *)&q->q.desc[q->q.pidx]; | ||
928 | wr->equiq_to_len16 = htonl(wr_mid); | ||
929 | wr->r3 = cpu_to_be64(0); | ||
930 | end = (u64 *)wr + flits; | ||
931 | |||
932 | ssi = skb_shinfo(skb); | ||
933 | if (ssi->gso_size) { | ||
934 | struct cpl_tx_pkt_lso *lso = (void *)wr; | ||
935 | bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; | ||
936 | int l3hdr_len = skb_network_header_len(skb); | ||
937 | int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; | ||
938 | |||
939 | wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | | ||
940 | FW_WR_IMMDLEN(sizeof(*lso))); | ||
941 | lso->lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | | ||
942 | LSO_FIRST_SLICE | LSO_LAST_SLICE | | ||
943 | LSO_IPV6(v6) | | ||
944 | LSO_ETHHDR_LEN(eth_xtra_len / 4) | | ||
945 | LSO_IPHDR_LEN(l3hdr_len / 4) | | ||
946 | LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); | ||
947 | lso->ipid_ofst = htons(0); | ||
948 | lso->mss = htons(ssi->gso_size); | ||
949 | lso->seqno_offset = htonl(0); | ||
950 | lso->len = htonl(skb->len); | ||
951 | cpl = (void *)(lso + 1); | ||
952 | cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | | ||
953 | TXPKT_IPHDR_LEN(l3hdr_len) | | ||
954 | TXPKT_ETHHDR_LEN(eth_xtra_len); | ||
955 | q->tso++; | ||
956 | q->tx_cso += ssi->gso_segs; | ||
957 | } else { | ||
958 | int len; | ||
959 | |||
960 | len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); | ||
961 | wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | | ||
962 | FW_WR_IMMDLEN(len)); | ||
963 | cpl = (void *)(wr + 1); | ||
964 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
965 | cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; | ||
966 | q->tx_cso++; | ||
967 | } else | ||
968 | cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; | ||
969 | } | ||
970 | |||
971 | if (vlan_tx_tag_present(skb)) { | ||
972 | q->vlan_ins++; | ||
973 | cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); | ||
974 | } | ||
975 | |||
976 | cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | | ||
977 | TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0)); | ||
978 | cpl->pack = htons(0); | ||
979 | cpl->len = htons(skb->len); | ||
980 | cpl->ctrl1 = cpu_to_be64(cntrl); | ||
981 | |||
982 | if (is_eth_imm(skb)) { | ||
983 | inline_tx_skb(skb, &q->q, cpl + 1); | ||
984 | dev_kfree_skb(skb); | ||
985 | } else { | ||
986 | int last_desc; | ||
987 | |||
988 | write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, | ||
989 | addr); | ||
990 | skb_orphan(skb); | ||
991 | |||
992 | last_desc = q->q.pidx + ndesc - 1; | ||
993 | if (last_desc >= q->q.size) | ||
994 | last_desc -= q->q.size; | ||
995 | q->q.sdesc[last_desc].skb = skb; | ||
996 | q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); | ||
997 | } | ||
998 | |||
999 | txq_advance(&q->q, ndesc); | ||
1000 | |||
1001 | ring_tx_db(adap, &q->q, ndesc); | ||
1002 | return NETDEV_TX_OK; | ||
1003 | } | ||
1004 | |||
1005 | /** | ||
1006 | * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs | ||
1007 | * @q: the SGE control Tx queue | ||
1008 | * | ||
1009 | * This is a variant of reclaim_completed_tx() that is used for Tx queues | ||
1010 | * that send only immediate data (presently just the control queues) and | ||
1011 | * thus do not have any sk_buffs to release. | ||
1012 | */ | ||
1013 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) | ||
1014 | { | ||
1015 | int hw_cidx = ntohs(q->stat->cidx); | ||
1016 | int reclaim = hw_cidx - q->cidx; | ||
1017 | |||
1018 | if (reclaim < 0) | ||
1019 | reclaim += q->size; | ||
1020 | |||
1021 | q->in_use -= reclaim; | ||
1022 | q->cidx = hw_cidx; | ||
1023 | } | ||
1024 | |||
1025 | /** | ||
1026 | * is_imm - check whether a packet can be sent as immediate data | ||
1027 | * @skb: the packet | ||
1028 | * | ||
1029 | * Returns true if a packet can be sent as a WR with immediate data. | ||
1030 | */ | ||
1031 | static inline int is_imm(const struct sk_buff *skb) | ||
1032 | { | ||
1033 | return skb->len <= MAX_CTRL_WR_LEN; | ||
1034 | } | ||
1035 | |||
1036 | /** | ||
1037 | * ctrlq_check_stop - check if a control queue is full and should stop | ||
1038 | * @q: the queue | ||
1039 | * @wr: most recent WR written to the queue | ||
1040 | * | ||
1041 | * Check if a control queue has become full and should be stopped. | ||
1042 | * We clean up control queue descriptors very lazily, only when we are out. | ||
1043 | * If the queue is still full after reclaiming any completed descriptors | ||
1044 | * we suspend it and have the last WR wake it up. | ||
1045 | */ | ||
1046 | static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) | ||
1047 | { | ||
1048 | reclaim_completed_tx_imm(&q->q); | ||
1049 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { | ||
1050 | wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); | ||
1051 | q->q.stops++; | ||
1052 | q->full = 1; | ||
1053 | } | ||
1054 | } | ||
1055 | |||
1056 | /** | ||
1057 | * ctrl_xmit - send a packet through an SGE control Tx queue | ||
1058 | * @q: the control queue | ||
1059 | * @skb: the packet | ||
1060 | * | ||
1061 | * Send a packet through an SGE control Tx queue. Packets sent through | ||
1062 | * a control queue must fit entirely as immediate data. | ||
1063 | */ | ||
1064 | static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) | ||
1065 | { | ||
1066 | unsigned int ndesc; | ||
1067 | struct fw_wr_hdr *wr; | ||
1068 | |||
1069 | if (unlikely(!is_imm(skb))) { | ||
1070 | WARN_ON(1); | ||
1071 | dev_kfree_skb(skb); | ||
1072 | return NET_XMIT_DROP; | ||
1073 | } | ||
1074 | |||
1075 | ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); | ||
1076 | spin_lock(&q->sendq.lock); | ||
1077 | |||
1078 | if (unlikely(q->full)) { | ||
1079 | skb->priority = ndesc; /* save for restart */ | ||
1080 | __skb_queue_tail(&q->sendq, skb); | ||
1081 | spin_unlock(&q->sendq.lock); | ||
1082 | return NET_XMIT_CN; | ||
1083 | } | ||
1084 | |||
1085 | wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; | ||
1086 | inline_tx_skb(skb, &q->q, wr); | ||
1087 | |||
1088 | txq_advance(&q->q, ndesc); | ||
1089 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) | ||
1090 | ctrlq_check_stop(q, wr); | ||
1091 | |||
1092 | ring_tx_db(q->adap, &q->q, ndesc); | ||
1093 | spin_unlock(&q->sendq.lock); | ||
1094 | |||
1095 | kfree_skb(skb); | ||
1096 | return NET_XMIT_SUCCESS; | ||
1097 | } | ||
1098 | |||
1099 | /** | ||
1100 | * restart_ctrlq - restart a suspended control queue | ||
1101 | * @data: the control queue to restart | ||
1102 | * | ||
1103 | * Resumes transmission on a suspended Tx control queue. | ||
1104 | */ | ||
1105 | static void restart_ctrlq(unsigned long data) | ||
1106 | { | ||
1107 | struct sk_buff *skb; | ||
1108 | unsigned int written = 0; | ||
1109 | struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; | ||
1110 | |||
1111 | spin_lock(&q->sendq.lock); | ||
1112 | reclaim_completed_tx_imm(&q->q); | ||
1113 | BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ | ||
1114 | |||
1115 | while ((skb = __skb_dequeue(&q->sendq)) != NULL) { | ||
1116 | struct fw_wr_hdr *wr; | ||
1117 | unsigned int ndesc = skb->priority; /* previously saved */ | ||
1118 | |||
1119 | /* | ||
1120 | * Write descriptors and free skbs outside the lock to limit | ||
1121 | * wait times. q->full is still set so new skbs will be queued. | ||
1122 | */ | ||
1123 | spin_unlock(&q->sendq.lock); | ||
1124 | |||
1125 | wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; | ||
1126 | inline_tx_skb(skb, &q->q, wr); | ||
1127 | kfree_skb(skb); | ||
1128 | |||
1129 | written += ndesc; | ||
1130 | txq_advance(&q->q, ndesc); | ||
1131 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { | ||
1132 | unsigned long old = q->q.stops; | ||
1133 | |||
1134 | ctrlq_check_stop(q, wr); | ||
1135 | if (q->q.stops != old) { /* suspended anew */ | ||
1136 | spin_lock(&q->sendq.lock); | ||
1137 | goto ringdb; | ||
1138 | } | ||
1139 | } | ||
1140 | if (written > 16) { | ||
1141 | ring_tx_db(q->adap, &q->q, written); | ||
1142 | written = 0; | ||
1143 | } | ||
1144 | spin_lock(&q->sendq.lock); | ||
1145 | } | ||
1146 | q->full = 0; | ||
1147 | ringdb: if (written) | ||
1148 | ring_tx_db(q->adap, &q->q, written); | ||
1149 | spin_unlock(&q->sendq.lock); | ||
1150 | } | ||
1151 | |||
1152 | /** | ||
1153 | * t4_mgmt_tx - send a management message | ||
1154 | * @adap: the adapter | ||
1155 | * @skb: the packet containing the management message | ||
1156 | * | ||
1157 | * Send a management message through control queue 0. | ||
1158 | */ | ||
1159 | int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) | ||
1160 | { | ||
1161 | int ret; | ||
1162 | |||
1163 | local_bh_disable(); | ||
1164 | ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); | ||
1165 | local_bh_enable(); | ||
1166 | return ret; | ||
1167 | } | ||
1168 | |||
1169 | /** | ||
1170 | * is_ofld_imm - check whether a packet can be sent as immediate data | ||
1171 | * @skb: the packet | ||
1172 | * | ||
1173 | * Returns true if a packet can be sent as an offload WR with immediate | ||
1174 | * data. We currently use the same limit as for Ethernet packets. | ||
1175 | */ | ||
1176 | static inline int is_ofld_imm(const struct sk_buff *skb) | ||
1177 | { | ||
1178 | return skb->len <= MAX_IMM_TX_PKT_LEN; | ||
1179 | } | ||
1180 | |||
1181 | /** | ||
1182 | * calc_tx_flits_ofld - calculate # of flits for an offload packet | ||
1183 | * @skb: the packet | ||
1184 | * | ||
1185 | * Returns the number of flits needed for the given offload packet. | ||
1186 | * These packets are already fully constructed and no additional headers | ||
1187 | * will be added. | ||
1188 | */ | ||
1189 | static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) | ||
1190 | { | ||
1191 | unsigned int flits, cnt; | ||
1192 | |||
1193 | if (is_ofld_imm(skb)) | ||
1194 | return DIV_ROUND_UP(skb->len, 8); | ||
1195 | |||
1196 | flits = skb_transport_offset(skb) / 8U; /* headers */ | ||
1197 | cnt = skb_shinfo(skb)->nr_frags; | ||
1198 | if (skb->tail != skb->transport_header) | ||
1199 | cnt++; | ||
1200 | return flits + sgl_len(cnt); | ||
1201 | } | ||
1202 | |||
1203 | /** | ||
1204 | * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion | ||
1205 | * @adap: the adapter | ||
1206 | * @q: the queue to stop | ||
1207 | * | ||
1208 | * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting | ||
1209 | * inability to map packets. A periodic timer attempts to restart | ||
1210 | * queues so marked. | ||
1211 | */ | ||
1212 | static void txq_stop_maperr(struct sge_ofld_txq *q) | ||
1213 | { | ||
1214 | q->mapping_err++; | ||
1215 | q->q.stops++; | ||
1216 | set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr); | ||
1217 | } | ||
1218 | |||
1219 | /** | ||
1220 | * ofldtxq_stop - stop an offload Tx queue that has become full | ||
1221 | * @q: the queue to stop | ||
1222 | * @skb: the packet causing the queue to become full | ||
1223 | * | ||
1224 | * Stops an offload Tx queue that has become full and modifies the packet | ||
1225 | * being written to request a wakeup. | ||
1226 | */ | ||
1227 | static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) | ||
1228 | { | ||
1229 | struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; | ||
1230 | |||
1231 | wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); | ||
1232 | q->q.stops++; | ||
1233 | q->full = 1; | ||
1234 | } | ||
1235 | |||
1236 | /** | ||
1237 | * service_ofldq - restart a suspended offload queue | ||
1238 | * @q: the offload queue | ||
1239 | * | ||
1240 | * Services an offload Tx queue by moving packets from its packet queue | ||
1241 | * to the HW Tx ring. The function starts and ends with the queue locked. | ||
1242 | */ | ||
1243 | static void service_ofldq(struct sge_ofld_txq *q) | ||
1244 | { | ||
1245 | u64 *pos; | ||
1246 | int credits; | ||
1247 | struct sk_buff *skb; | ||
1248 | unsigned int written = 0; | ||
1249 | unsigned int flits, ndesc; | ||
1250 | |||
1251 | while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { | ||
1252 | /* | ||
1253 | * We drop the lock but leave skb on sendq, thus retaining | ||
1254 | * exclusive access to the state of the queue. | ||
1255 | */ | ||
1256 | spin_unlock(&q->sendq.lock); | ||
1257 | |||
1258 | reclaim_completed_tx(q->adap, &q->q, false); | ||
1259 | |||
1260 | flits = skb->priority; /* previously saved */ | ||
1261 | ndesc = flits_to_desc(flits); | ||
1262 | credits = txq_avail(&q->q) - ndesc; | ||
1263 | BUG_ON(credits < 0); | ||
1264 | if (unlikely(credits < TXQ_STOP_THRES)) | ||
1265 | ofldtxq_stop(q, skb); | ||
1266 | |||
1267 | pos = (u64 *)&q->q.desc[q->q.pidx]; | ||
1268 | if (is_ofld_imm(skb)) | ||
1269 | inline_tx_skb(skb, &q->q, pos); | ||
1270 | else if (map_skb(q->adap->pdev_dev, skb, | ||
1271 | (dma_addr_t *)skb->head)) { | ||
1272 | txq_stop_maperr(q); | ||
1273 | spin_lock(&q->sendq.lock); | ||
1274 | break; | ||
1275 | } else { | ||
1276 | int last_desc, hdr_len = skb_transport_offset(skb); | ||
1277 | |||
1278 | memcpy(pos, skb->data, hdr_len); | ||
1279 | write_sgl(skb, &q->q, (void *)pos + hdr_len, | ||
1280 | pos + flits, hdr_len, | ||
1281 | (dma_addr_t *)skb->head); | ||
1282 | #ifdef CONFIG_NEED_DMA_MAP_STATE | ||
1283 | skb->dev = q->adap->port[0]; | ||
1284 | skb->destructor = deferred_unmap_destructor; | ||
1285 | #endif | ||
1286 | last_desc = q->q.pidx + ndesc - 1; | ||
1287 | if (last_desc >= q->q.size) | ||
1288 | last_desc -= q->q.size; | ||
1289 | q->q.sdesc[last_desc].skb = skb; | ||
1290 | } | ||
1291 | |||
1292 | txq_advance(&q->q, ndesc); | ||
1293 | written += ndesc; | ||
1294 | if (unlikely(written > 32)) { | ||
1295 | ring_tx_db(q->adap, &q->q, written); | ||
1296 | written = 0; | ||
1297 | } | ||
1298 | |||
1299 | spin_lock(&q->sendq.lock); | ||
1300 | __skb_unlink(skb, &q->sendq); | ||
1301 | if (is_ofld_imm(skb)) | ||
1302 | kfree_skb(skb); | ||
1303 | } | ||
1304 | if (likely(written)) | ||
1305 | ring_tx_db(q->adap, &q->q, written); | ||
1306 | } | ||
1307 | |||
1308 | /** | ||
1309 | * ofld_xmit - send a packet through an offload queue | ||
1310 | * @q: the Tx offload queue | ||
1311 | * @skb: the packet | ||
1312 | * | ||
1313 | * Send an offload packet through an SGE offload queue. | ||
1314 | */ | ||
1315 | static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) | ||
1316 | { | ||
1317 | skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ | ||
1318 | spin_lock(&q->sendq.lock); | ||
1319 | __skb_queue_tail(&q->sendq, skb); | ||
1320 | if (q->sendq.qlen == 1) | ||
1321 | service_ofldq(q); | ||
1322 | spin_unlock(&q->sendq.lock); | ||
1323 | return NET_XMIT_SUCCESS; | ||
1324 | } | ||
1325 | |||
1326 | /** | ||
1327 | * restart_ofldq - restart a suspended offload queue | ||
1328 | * @data: the offload queue to restart | ||
1329 | * | ||
1330 | * Resumes transmission on a suspended Tx offload queue. | ||
1331 | */ | ||
1332 | static void restart_ofldq(unsigned long data) | ||
1333 | { | ||
1334 | struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; | ||
1335 | |||
1336 | spin_lock(&q->sendq.lock); | ||
1337 | q->full = 0; /* the queue actually is completely empty now */ | ||
1338 | service_ofldq(q); | ||
1339 | spin_unlock(&q->sendq.lock); | ||
1340 | } | ||
1341 | |||
1342 | /** | ||
1343 | * skb_txq - return the Tx queue an offload packet should use | ||
1344 | * @skb: the packet | ||
1345 | * | ||
1346 | * Returns the Tx queue an offload packet should use as indicated by bits | ||
1347 | * 1-15 in the packet's queue_mapping. | ||
1348 | */ | ||
1349 | static inline unsigned int skb_txq(const struct sk_buff *skb) | ||
1350 | { | ||
1351 | return skb->queue_mapping >> 1; | ||
1352 | } | ||
1353 | |||
1354 | /** | ||
1355 | * is_ctrl_pkt - return whether an offload packet is a control packet | ||
1356 | * @skb: the packet | ||
1357 | * | ||
1358 | * Returns whether an offload packet should use an OFLD or a CTRL | ||
1359 | * Tx queue as indicated by bit 0 in the packet's queue_mapping. | ||
1360 | */ | ||
1361 | static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) | ||
1362 | { | ||
1363 | return skb->queue_mapping & 1; | ||
1364 | } | ||
1365 | |||
1366 | static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) | ||
1367 | { | ||
1368 | unsigned int idx = skb_txq(skb); | ||
1369 | |||
1370 | if (unlikely(is_ctrl_pkt(skb))) | ||
1371 | return ctrl_xmit(&adap->sge.ctrlq[idx], skb); | ||
1372 | return ofld_xmit(&adap->sge.ofldtxq[idx], skb); | ||
1373 | } | ||
1374 | |||
1375 | /** | ||
1376 | * t4_ofld_send - send an offload packet | ||
1377 | * @adap: the adapter | ||
1378 | * @skb: the packet | ||
1379 | * | ||
1380 | * Sends an offload packet. We use the packet queue_mapping to select the | ||
1381 | * appropriate Tx queue as follows: bit 0 indicates whether the packet | ||
1382 | * should be sent as regular or control, bits 1-15 select the queue. | ||
1383 | */ | ||
1384 | int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) | ||
1385 | { | ||
1386 | int ret; | ||
1387 | |||
1388 | local_bh_disable(); | ||
1389 | ret = ofld_send(adap, skb); | ||
1390 | local_bh_enable(); | ||
1391 | return ret; | ||
1392 | } | ||
1393 | |||
1394 | /** | ||
1395 | * cxgb4_ofld_send - send an offload packet | ||
1396 | * @dev: the net device | ||
1397 | * @skb: the packet | ||
1398 | * | ||
1399 | * Sends an offload packet. This is an exported version of @t4_ofld_send, | ||
1400 | * intended for ULDs. | ||
1401 | */ | ||
1402 | int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) | ||
1403 | { | ||
1404 | return t4_ofld_send(netdev2adap(dev), skb); | ||
1405 | } | ||
1406 | EXPORT_SYMBOL(cxgb4_ofld_send); | ||
1407 | |||
1408 | static inline void copy_frags(struct skb_shared_info *ssi, | ||
1409 | const struct pkt_gl *gl, unsigned int offset) | ||
1410 | { | ||
1411 | unsigned int n; | ||
1412 | |||
1413 | /* usually there's just one frag */ | ||
1414 | ssi->frags[0].page = gl->frags[0].page; | ||
1415 | ssi->frags[0].page_offset = gl->frags[0].page_offset + offset; | ||
1416 | ssi->frags[0].size = gl->frags[0].size - offset; | ||
1417 | ssi->nr_frags = gl->nfrags; | ||
1418 | n = gl->nfrags - 1; | ||
1419 | if (n) | ||
1420 | memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t)); | ||
1421 | |||
1422 | /* get a reference to the last page, we don't own it */ | ||
1423 | get_page(gl->frags[n].page); | ||
1424 | } | ||
1425 | |||
1426 | /** | ||
1427 | * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list | ||
1428 | * @gl: the gather list | ||
1429 | * @skb_len: size of sk_buff main body if it carries fragments | ||
1430 | * @pull_len: amount of data to move to the sk_buff's main body | ||
1431 | * | ||
1432 | * Builds an sk_buff from the given packet gather list. Returns the | ||
1433 | * sk_buff or %NULL if sk_buff allocation failed. | ||
1434 | */ | ||
1435 | struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, | ||
1436 | unsigned int skb_len, unsigned int pull_len) | ||
1437 | { | ||
1438 | struct sk_buff *skb; | ||
1439 | |||
1440 | /* | ||
1441 | * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer | ||
1442 | * size, which is expected since buffers are at least PAGE_SIZEd. | ||
1443 | * In this case packets up to RX_COPY_THRES have only one fragment. | ||
1444 | */ | ||
1445 | if (gl->tot_len <= RX_COPY_THRES) { | ||
1446 | skb = dev_alloc_skb(gl->tot_len); | ||
1447 | if (unlikely(!skb)) | ||
1448 | goto out; | ||
1449 | __skb_put(skb, gl->tot_len); | ||
1450 | skb_copy_to_linear_data(skb, gl->va, gl->tot_len); | ||
1451 | } else { | ||
1452 | skb = dev_alloc_skb(skb_len); | ||
1453 | if (unlikely(!skb)) | ||
1454 | goto out; | ||
1455 | __skb_put(skb, pull_len); | ||
1456 | skb_copy_to_linear_data(skb, gl->va, pull_len); | ||
1457 | |||
1458 | copy_frags(skb_shinfo(skb), gl, pull_len); | ||
1459 | skb->len = gl->tot_len; | ||
1460 | skb->data_len = skb->len - pull_len; | ||
1461 | skb->truesize += skb->data_len; | ||
1462 | } | ||
1463 | out: return skb; | ||
1464 | } | ||
1465 | EXPORT_SYMBOL(cxgb4_pktgl_to_skb); | ||
1466 | |||
1467 | /** | ||
1468 | * t4_pktgl_free - free a packet gather list | ||
1469 | * @gl: the gather list | ||
1470 | * | ||
1471 | * Releases the pages of a packet gather list. We do not own the last | ||
1472 | * page on the list and do not free it. | ||
1473 | */ | ||
1474 | void t4_pktgl_free(const struct pkt_gl *gl) | ||
1475 | { | ||
1476 | int n; | ||
1477 | const skb_frag_t *p; | ||
1478 | |||
1479 | for (p = gl->frags, n = gl->nfrags - 1; n--; p++) | ||
1480 | put_page(p->page); | ||
1481 | } | ||
1482 | |||
1483 | /* | ||
1484 | * Process an MPS trace packet. Give it an unused protocol number so it won't | ||
1485 | * be delivered to anyone and send it to the stack for capture. | ||
1486 | */ | ||
1487 | static noinline int handle_trace_pkt(struct adapter *adap, | ||
1488 | const struct pkt_gl *gl) | ||
1489 | { | ||
1490 | struct sk_buff *skb; | ||
1491 | struct cpl_trace_pkt *p; | ||
1492 | |||
1493 | skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); | ||
1494 | if (unlikely(!skb)) { | ||
1495 | t4_pktgl_free(gl); | ||
1496 | return 0; | ||
1497 | } | ||
1498 | |||
1499 | p = (struct cpl_trace_pkt *)skb->data; | ||
1500 | __skb_pull(skb, sizeof(*p)); | ||
1501 | skb_reset_mac_header(skb); | ||
1502 | skb->protocol = htons(0xffff); | ||
1503 | skb->dev = adap->port[0]; | ||
1504 | netif_receive_skb(skb); | ||
1505 | return 0; | ||
1506 | } | ||
1507 | |||
1508 | static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, | ||
1509 | const struct cpl_rx_pkt *pkt) | ||
1510 | { | ||
1511 | int ret; | ||
1512 | struct sk_buff *skb; | ||
1513 | |||
1514 | skb = napi_get_frags(&rxq->rspq.napi); | ||
1515 | if (unlikely(!skb)) { | ||
1516 | t4_pktgl_free(gl); | ||
1517 | rxq->stats.rx_drops++; | ||
1518 | return; | ||
1519 | } | ||
1520 | |||
1521 | copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD); | ||
1522 | skb->len = gl->tot_len - RX_PKT_PAD; | ||
1523 | skb->data_len = skb->len; | ||
1524 | skb->truesize += skb->data_len; | ||
1525 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1526 | skb_record_rx_queue(skb, rxq->rspq.idx); | ||
1527 | |||
1528 | if (unlikely(pkt->vlan_ex)) { | ||
1529 | struct port_info *pi = netdev_priv(rxq->rspq.netdev); | ||
1530 | struct vlan_group *grp = pi->vlan_grp; | ||
1531 | |||
1532 | rxq->stats.vlan_ex++; | ||
1533 | if (likely(grp)) { | ||
1534 | ret = vlan_gro_frags(&rxq->rspq.napi, grp, | ||
1535 | ntohs(pkt->vlan)); | ||
1536 | goto stats; | ||
1537 | } | ||
1538 | } | ||
1539 | ret = napi_gro_frags(&rxq->rspq.napi); | ||
1540 | stats: if (ret == GRO_HELD) | ||
1541 | rxq->stats.lro_pkts++; | ||
1542 | else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) | ||
1543 | rxq->stats.lro_merged++; | ||
1544 | rxq->stats.pkts++; | ||
1545 | rxq->stats.rx_cso++; | ||
1546 | } | ||
1547 | |||
1548 | /** | ||
1549 | * t4_ethrx_handler - process an ingress ethernet packet | ||
1550 | * @q: the response queue that received the packet | ||
1551 | * @rsp: the response queue descriptor holding the RX_PKT message | ||
1552 | * @si: the gather list of packet fragments | ||
1553 | * | ||
1554 | * Process an ingress ethernet packet and deliver it to the stack. | ||
1555 | */ | ||
1556 | int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, | ||
1557 | const struct pkt_gl *si) | ||
1558 | { | ||
1559 | bool csum_ok; | ||
1560 | struct sk_buff *skb; | ||
1561 | struct port_info *pi; | ||
1562 | const struct cpl_rx_pkt *pkt; | ||
1563 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); | ||
1564 | |||
1565 | if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) | ||
1566 | return handle_trace_pkt(q->adap, si); | ||
1567 | |||
1568 | pkt = (void *)&rsp[1]; | ||
1569 | csum_ok = pkt->csum_calc && !pkt->err_vec; | ||
1570 | if ((pkt->l2info & htonl(RXF_TCP)) && | ||
1571 | (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { | ||
1572 | do_gro(rxq, si, pkt); | ||
1573 | return 0; | ||
1574 | } | ||
1575 | |||
1576 | skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); | ||
1577 | if (unlikely(!skb)) { | ||
1578 | t4_pktgl_free(si); | ||
1579 | rxq->stats.rx_drops++; | ||
1580 | return 0; | ||
1581 | } | ||
1582 | |||
1583 | __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ | ||
1584 | skb->protocol = eth_type_trans(skb, q->netdev); | ||
1585 | skb_record_rx_queue(skb, q->idx); | ||
1586 | pi = netdev_priv(skb->dev); | ||
1587 | rxq->stats.pkts++; | ||
1588 | |||
1589 | if (csum_ok && (pi->rx_offload & RX_CSO) && | ||
1590 | (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { | ||
1591 | if (!pkt->ip_frag) | ||
1592 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1593 | else { | ||
1594 | __sum16 c = (__force __sum16)pkt->csum; | ||
1595 | skb->csum = csum_unfold(c); | ||
1596 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
1597 | } | ||
1598 | rxq->stats.rx_cso++; | ||
1599 | } else | ||
1600 | skb->ip_summed = CHECKSUM_NONE; | ||
1601 | |||
1602 | if (unlikely(pkt->vlan_ex)) { | ||
1603 | struct vlan_group *grp = pi->vlan_grp; | ||
1604 | |||
1605 | rxq->stats.vlan_ex++; | ||
1606 | if (likely(grp)) | ||
1607 | vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan)); | ||
1608 | else | ||
1609 | dev_kfree_skb_any(skb); | ||
1610 | } else | ||
1611 | netif_receive_skb(skb); | ||
1612 | |||
1613 | return 0; | ||
1614 | } | ||
1615 | |||
1616 | /** | ||
1617 | * restore_rx_bufs - put back a packet's Rx buffers | ||
1618 | * @si: the packet gather list | ||
1619 | * @q: the SGE free list | ||
1620 | * @frags: number of FL buffers to restore | ||
1621 | * | ||
1622 | * Puts back on an FL the Rx buffers associated with @si. The buffers | ||
1623 | * have already been unmapped and are left unmapped, we mark them so to | ||
1624 | * prevent further unmapping attempts. | ||
1625 | * | ||
1626 | * This function undoes a series of @unmap_rx_buf calls when we find out | ||
1627 | * that the current packet can't be processed right away afterall and we | ||
1628 | * need to come back to it later. This is a very rare event and there's | ||
1629 | * no effort to make this particularly efficient. | ||
1630 | */ | ||
1631 | static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, | ||
1632 | int frags) | ||
1633 | { | ||
1634 | struct rx_sw_desc *d; | ||
1635 | |||
1636 | while (frags--) { | ||
1637 | if (q->cidx == 0) | ||
1638 | q->cidx = q->size - 1; | ||
1639 | else | ||
1640 | q->cidx--; | ||
1641 | d = &q->sdesc[q->cidx]; | ||
1642 | d->page = si->frags[frags].page; | ||
1643 | d->dma_addr |= RX_UNMAPPED_BUF; | ||
1644 | q->avail++; | ||
1645 | } | ||
1646 | } | ||
1647 | |||
1648 | /** | ||
1649 | * is_new_response - check if a response is newly written | ||
1650 | * @r: the response descriptor | ||
1651 | * @q: the response queue | ||
1652 | * | ||
1653 | * Returns true if a response descriptor contains a yet unprocessed | ||
1654 | * response. | ||
1655 | */ | ||
1656 | static inline bool is_new_response(const struct rsp_ctrl *r, | ||
1657 | const struct sge_rspq *q) | ||
1658 | { | ||
1659 | return RSPD_GEN(r->type_gen) == q->gen; | ||
1660 | } | ||
1661 | |||
1662 | /** | ||
1663 | * rspq_next - advance to the next entry in a response queue | ||
1664 | * @q: the queue | ||
1665 | * | ||
1666 | * Updates the state of a response queue to advance it to the next entry. | ||
1667 | */ | ||
1668 | static inline void rspq_next(struct sge_rspq *q) | ||
1669 | { | ||
1670 | q->cur_desc = (void *)q->cur_desc + q->iqe_len; | ||
1671 | if (unlikely(++q->cidx == q->size)) { | ||
1672 | q->cidx = 0; | ||
1673 | q->gen ^= 1; | ||
1674 | q->cur_desc = q->desc; | ||
1675 | } | ||
1676 | } | ||
1677 | |||
1678 | /** | ||
1679 | * process_responses - process responses from an SGE response queue | ||
1680 | * @q: the ingress queue to process | ||
1681 | * @budget: how many responses can be processed in this round | ||
1682 | * | ||
1683 | * Process responses from an SGE response queue up to the supplied budget. | ||
1684 | * Responses include received packets as well as control messages from FW | ||
1685 | * or HW. | ||
1686 | * | ||
1687 | * Additionally choose the interrupt holdoff time for the next interrupt | ||
1688 | * on this queue. If the system is under memory shortage use a fairly | ||
1689 | * long delay to help recovery. | ||
1690 | */ | ||
1691 | static int process_responses(struct sge_rspq *q, int budget) | ||
1692 | { | ||
1693 | int ret, rsp_type; | ||
1694 | int budget_left = budget; | ||
1695 | const struct rsp_ctrl *rc; | ||
1696 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); | ||
1697 | |||
1698 | while (likely(budget_left)) { | ||
1699 | rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); | ||
1700 | if (!is_new_response(rc, q)) | ||
1701 | break; | ||
1702 | |||
1703 | rmb(); | ||
1704 | rsp_type = RSPD_TYPE(rc->type_gen); | ||
1705 | if (likely(rsp_type == RSP_TYPE_FLBUF)) { | ||
1706 | skb_frag_t *fp; | ||
1707 | struct pkt_gl si; | ||
1708 | const struct rx_sw_desc *rsd; | ||
1709 | u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; | ||
1710 | |||
1711 | if (len & RSPD_NEWBUF) { | ||
1712 | if (likely(q->offset > 0)) { | ||
1713 | free_rx_bufs(q->adap, &rxq->fl, 1); | ||
1714 | q->offset = 0; | ||
1715 | } | ||
1716 | len &= RSPD_LEN; | ||
1717 | } | ||
1718 | si.tot_len = len; | ||
1719 | |||
1720 | /* gather packet fragments */ | ||
1721 | for (frags = 0, fp = si.frags; ; frags++, fp++) { | ||
1722 | rsd = &rxq->fl.sdesc[rxq->fl.cidx]; | ||
1723 | bufsz = get_buf_size(rsd); | ||
1724 | fp->page = rsd->page; | ||
1725 | fp->page_offset = q->offset; | ||
1726 | fp->size = min(bufsz, len); | ||
1727 | len -= fp->size; | ||
1728 | if (!len) | ||
1729 | break; | ||
1730 | unmap_rx_buf(q->adap, &rxq->fl); | ||
1731 | } | ||
1732 | |||
1733 | /* | ||
1734 | * Last buffer remains mapped so explicitly make it | ||
1735 | * coherent for CPU access. | ||
1736 | */ | ||
1737 | dma_sync_single_for_cpu(q->adap->pdev_dev, | ||
1738 | get_buf_addr(rsd), | ||
1739 | fp->size, DMA_FROM_DEVICE); | ||
1740 | |||
1741 | si.va = page_address(si.frags[0].page) + | ||
1742 | si.frags[0].page_offset; | ||
1743 | prefetch(si.va); | ||
1744 | |||
1745 | si.nfrags = frags + 1; | ||
1746 | ret = q->handler(q, q->cur_desc, &si); | ||
1747 | if (likely(ret == 0)) | ||
1748 | q->offset += ALIGN(fp->size, FL_ALIGN); | ||
1749 | else | ||
1750 | restore_rx_bufs(&si, &rxq->fl, frags); | ||
1751 | } else if (likely(rsp_type == RSP_TYPE_CPL)) { | ||
1752 | ret = q->handler(q, q->cur_desc, NULL); | ||
1753 | } else { | ||
1754 | ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); | ||
1755 | } | ||
1756 | |||
1757 | if (unlikely(ret)) { | ||
1758 | /* couldn't process descriptor, back off for recovery */ | ||
1759 | q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX); | ||
1760 | break; | ||
1761 | } | ||
1762 | |||
1763 | rspq_next(q); | ||
1764 | budget_left--; | ||
1765 | } | ||
1766 | |||
1767 | if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) | ||
1768 | __refill_fl(q->adap, &rxq->fl); | ||
1769 | return budget - budget_left; | ||
1770 | } | ||
1771 | |||
1772 | /** | ||
1773 | * napi_rx_handler - the NAPI handler for Rx processing | ||
1774 | * @napi: the napi instance | ||
1775 | * @budget: how many packets we can process in this round | ||
1776 | * | ||
1777 | * Handler for new data events when using NAPI. This does not need any | ||
1778 | * locking or protection from interrupts as data interrupts are off at | ||
1779 | * this point and other adapter interrupts do not interfere (the latter | ||
1780 | * in not a concern at all with MSI-X as non-data interrupts then have | ||
1781 | * a separate handler). | ||
1782 | */ | ||
1783 | static int napi_rx_handler(struct napi_struct *napi, int budget) | ||
1784 | { | ||
1785 | unsigned int params; | ||
1786 | struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); | ||
1787 | int work_done = process_responses(q, budget); | ||
1788 | |||
1789 | if (likely(work_done < budget)) { | ||
1790 | napi_complete(napi); | ||
1791 | params = q->next_intr_params; | ||
1792 | q->next_intr_params = q->intr_params; | ||
1793 | } else | ||
1794 | params = QINTR_TIMER_IDX(7); | ||
1795 | |||
1796 | t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) | | ||
1797 | INGRESSQID((u32)q->cntxt_id) | SEINTARM(params)); | ||
1798 | return work_done; | ||
1799 | } | ||
1800 | |||
1801 | /* | ||
1802 | * The MSI-X interrupt handler for an SGE response queue. | ||
1803 | */ | ||
1804 | irqreturn_t t4_sge_intr_msix(int irq, void *cookie) | ||
1805 | { | ||
1806 | struct sge_rspq *q = cookie; | ||
1807 | |||
1808 | napi_schedule(&q->napi); | ||
1809 | return IRQ_HANDLED; | ||
1810 | } | ||
1811 | |||
1812 | /* | ||
1813 | * Process the indirect interrupt entries in the interrupt queue and kick off | ||
1814 | * NAPI for each queue that has generated an entry. | ||
1815 | */ | ||
1816 | static unsigned int process_intrq(struct adapter *adap) | ||
1817 | { | ||
1818 | unsigned int credits; | ||
1819 | const struct rsp_ctrl *rc; | ||
1820 | struct sge_rspq *q = &adap->sge.intrq; | ||
1821 | |||
1822 | spin_lock(&adap->sge.intrq_lock); | ||
1823 | for (credits = 0; ; credits++) { | ||
1824 | rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); | ||
1825 | if (!is_new_response(rc, q)) | ||
1826 | break; | ||
1827 | |||
1828 | rmb(); | ||
1829 | if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { | ||
1830 | unsigned int qid = ntohl(rc->pldbuflen_qid); | ||
1831 | |||
1832 | napi_schedule(&adap->sge.ingr_map[qid]->napi); | ||
1833 | } | ||
1834 | |||
1835 | rspq_next(q); | ||
1836 | } | ||
1837 | |||
1838 | t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) | | ||
1839 | INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params)); | ||
1840 | spin_unlock(&adap->sge.intrq_lock); | ||
1841 | return credits; | ||
1842 | } | ||
1843 | |||
1844 | /* | ||
1845 | * The MSI interrupt handler, which handles data events from SGE response queues | ||
1846 | * as well as error and other async events as they all use the same MSI vector. | ||
1847 | */ | ||
1848 | static irqreturn_t t4_intr_msi(int irq, void *cookie) | ||
1849 | { | ||
1850 | struct adapter *adap = cookie; | ||
1851 | |||
1852 | t4_slow_intr_handler(adap); | ||
1853 | process_intrq(adap); | ||
1854 | return IRQ_HANDLED; | ||
1855 | } | ||
1856 | |||
1857 | /* | ||
1858 | * Interrupt handler for legacy INTx interrupts. | ||
1859 | * Handles data events from SGE response queues as well as error and other | ||
1860 | * async events as they all use the same interrupt line. | ||
1861 | */ | ||
1862 | static irqreturn_t t4_intr_intx(int irq, void *cookie) | ||
1863 | { | ||
1864 | struct adapter *adap = cookie; | ||
1865 | |||
1866 | t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0); | ||
1867 | if (t4_slow_intr_handler(adap) | process_intrq(adap)) | ||
1868 | return IRQ_HANDLED; | ||
1869 | return IRQ_NONE; /* probably shared interrupt */ | ||
1870 | } | ||
1871 | |||
1872 | /** | ||
1873 | * t4_intr_handler - select the top-level interrupt handler | ||
1874 | * @adap: the adapter | ||
1875 | * | ||
1876 | * Selects the top-level interrupt handler based on the type of interrupts | ||
1877 | * (MSI-X, MSI, or INTx). | ||
1878 | */ | ||
1879 | irq_handler_t t4_intr_handler(struct adapter *adap) | ||
1880 | { | ||
1881 | if (adap->flags & USING_MSIX) | ||
1882 | return t4_sge_intr_msix; | ||
1883 | if (adap->flags & USING_MSI) | ||
1884 | return t4_intr_msi; | ||
1885 | return t4_intr_intx; | ||
1886 | } | ||
1887 | |||
1888 | static void sge_rx_timer_cb(unsigned long data) | ||
1889 | { | ||
1890 | unsigned long m; | ||
1891 | unsigned int i, cnt[2]; | ||
1892 | struct adapter *adap = (struct adapter *)data; | ||
1893 | struct sge *s = &adap->sge; | ||
1894 | |||
1895 | for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) | ||
1896 | for (m = s->starving_fl[i]; m; m &= m - 1) { | ||
1897 | struct sge_eth_rxq *rxq; | ||
1898 | unsigned int id = __ffs(m) + i * BITS_PER_LONG; | ||
1899 | struct sge_fl *fl = s->egr_map[id]; | ||
1900 | |||
1901 | clear_bit(id, s->starving_fl); | ||
1902 | smp_mb__after_clear_bit(); | ||
1903 | |||
1904 | if (fl_starving(fl)) { | ||
1905 | rxq = container_of(fl, struct sge_eth_rxq, fl); | ||
1906 | if (napi_reschedule(&rxq->rspq.napi)) | ||
1907 | fl->starving++; | ||
1908 | else | ||
1909 | set_bit(id, s->starving_fl); | ||
1910 | } | ||
1911 | } | ||
1912 | |||
1913 | t4_write_reg(adap, SGE_DEBUG_INDEX, 13); | ||
1914 | cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); | ||
1915 | cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); | ||
1916 | |||
1917 | for (i = 0; i < 2; i++) | ||
1918 | if (cnt[i] >= s->starve_thres) { | ||
1919 | if (s->idma_state[i] || cnt[i] == 0xffffffff) | ||
1920 | continue; | ||
1921 | s->idma_state[i] = 1; | ||
1922 | t4_write_reg(adap, SGE_DEBUG_INDEX, 11); | ||
1923 | m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16); | ||
1924 | dev_warn(adap->pdev_dev, | ||
1925 | "SGE idma%u starvation detected for " | ||
1926 | "queue %lu\n", i, m & 0xffff); | ||
1927 | } else if (s->idma_state[i]) | ||
1928 | s->idma_state[i] = 0; | ||
1929 | |||
1930 | mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); | ||
1931 | } | ||
1932 | |||
1933 | static void sge_tx_timer_cb(unsigned long data) | ||
1934 | { | ||
1935 | unsigned long m; | ||
1936 | unsigned int i, budget; | ||
1937 | struct adapter *adap = (struct adapter *)data; | ||
1938 | struct sge *s = &adap->sge; | ||
1939 | |||
1940 | for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) | ||
1941 | for (m = s->txq_maperr[i]; m; m &= m - 1) { | ||
1942 | unsigned long id = __ffs(m) + i * BITS_PER_LONG; | ||
1943 | struct sge_ofld_txq *txq = s->egr_map[id]; | ||
1944 | |||
1945 | clear_bit(id, s->txq_maperr); | ||
1946 | tasklet_schedule(&txq->qresume_tsk); | ||
1947 | } | ||
1948 | |||
1949 | budget = MAX_TIMER_TX_RECLAIM; | ||
1950 | i = s->ethtxq_rover; | ||
1951 | do { | ||
1952 | struct sge_eth_txq *q = &s->ethtxq[i]; | ||
1953 | |||
1954 | if (q->q.in_use && | ||
1955 | time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && | ||
1956 | __netif_tx_trylock(q->txq)) { | ||
1957 | int avail = reclaimable(&q->q); | ||
1958 | |||
1959 | if (avail) { | ||
1960 | if (avail > budget) | ||
1961 | avail = budget; | ||
1962 | |||
1963 | free_tx_desc(adap, &q->q, avail, true); | ||
1964 | q->q.in_use -= avail; | ||
1965 | budget -= avail; | ||
1966 | } | ||
1967 | __netif_tx_unlock(q->txq); | ||
1968 | } | ||
1969 | |||
1970 | if (++i >= s->ethqsets) | ||
1971 | i = 0; | ||
1972 | } while (budget && i != s->ethtxq_rover); | ||
1973 | s->ethtxq_rover = i; | ||
1974 | mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); | ||
1975 | } | ||
1976 | |||
1977 | int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, | ||
1978 | struct net_device *dev, int intr_idx, | ||
1979 | struct sge_fl *fl, rspq_handler_t hnd) | ||
1980 | { | ||
1981 | int ret, flsz = 0; | ||
1982 | struct fw_iq_cmd c; | ||
1983 | struct port_info *pi = netdev_priv(dev); | ||
1984 | |||
1985 | /* Size needs to be multiple of 16, including status entry. */ | ||
1986 | iq->size = roundup(iq->size, 16); | ||
1987 | |||
1988 | iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, | ||
1989 | &iq->phys_addr, NULL, 0); | ||
1990 | if (!iq->desc) | ||
1991 | return -ENOMEM; | ||
1992 | |||
1993 | memset(&c, 0, sizeof(c)); | ||
1994 | c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | | ||
1995 | FW_CMD_WRITE | FW_CMD_EXEC | | ||
1996 | FW_IQ_CMD_PFN(0) | FW_IQ_CMD_VFN(0)); | ||
1997 | c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) | | ||
1998 | FW_LEN16(c)); | ||
1999 | c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | | ||
2000 | FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) | | ||
2001 | FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) | | ||
2002 | FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : | ||
2003 | -intr_idx - 1)); | ||
2004 | c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) | | ||
2005 | FW_IQ_CMD_IQGTSMODE | | ||
2006 | FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | | ||
2007 | FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); | ||
2008 | c.iqsize = htons(iq->size); | ||
2009 | c.iqaddr = cpu_to_be64(iq->phys_addr); | ||
2010 | |||
2011 | if (fl) { | ||
2012 | fl->size = roundup(fl->size, 8); | ||
2013 | fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), | ||
2014 | sizeof(struct rx_sw_desc), &fl->addr, | ||
2015 | &fl->sdesc, STAT_LEN); | ||
2016 | if (!fl->desc) | ||
2017 | goto fl_nomem; | ||
2018 | |||
2019 | flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc); | ||
2020 | c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN | | ||
2021 | FW_IQ_CMD_FL0PADEN); | ||
2022 | c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) | | ||
2023 | FW_IQ_CMD_FL0FBMAX(3)); | ||
2024 | c.fl0size = htons(flsz); | ||
2025 | c.fl0addr = cpu_to_be64(fl->addr); | ||
2026 | } | ||
2027 | |||
2028 | ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); | ||
2029 | if (ret) | ||
2030 | goto err; | ||
2031 | |||
2032 | netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); | ||
2033 | iq->cur_desc = iq->desc; | ||
2034 | iq->cidx = 0; | ||
2035 | iq->gen = 1; | ||
2036 | iq->next_intr_params = iq->intr_params; | ||
2037 | iq->cntxt_id = ntohs(c.iqid); | ||
2038 | iq->abs_id = ntohs(c.physiqid); | ||
2039 | iq->size--; /* subtract status entry */ | ||
2040 | iq->adap = adap; | ||
2041 | iq->netdev = dev; | ||
2042 | iq->handler = hnd; | ||
2043 | |||
2044 | /* set offset to -1 to distinguish ingress queues without FL */ | ||
2045 | iq->offset = fl ? 0 : -1; | ||
2046 | |||
2047 | adap->sge.ingr_map[iq->cntxt_id] = iq; | ||
2048 | |||
2049 | if (fl) { | ||
2050 | fl->cntxt_id = htons(c.fl0id); | ||
2051 | fl->avail = fl->pend_cred = 0; | ||
2052 | fl->pidx = fl->cidx = 0; | ||
2053 | fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; | ||
2054 | adap->sge.egr_map[fl->cntxt_id] = fl; | ||
2055 | refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); | ||
2056 | } | ||
2057 | return 0; | ||
2058 | |||
2059 | fl_nomem: | ||
2060 | ret = -ENOMEM; | ||
2061 | err: | ||
2062 | if (iq->desc) { | ||
2063 | dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, | ||
2064 | iq->desc, iq->phys_addr); | ||
2065 | iq->desc = NULL; | ||
2066 | } | ||
2067 | if (fl && fl->desc) { | ||
2068 | kfree(fl->sdesc); | ||
2069 | fl->sdesc = NULL; | ||
2070 | dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), | ||
2071 | fl->desc, fl->addr); | ||
2072 | fl->desc = NULL; | ||
2073 | } | ||
2074 | return ret; | ||
2075 | } | ||
2076 | |||
2077 | static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) | ||
2078 | { | ||
2079 | q->in_use = 0; | ||
2080 | q->cidx = q->pidx = 0; | ||
2081 | q->stops = q->restarts = 0; | ||
2082 | q->stat = (void *)&q->desc[q->size]; | ||
2083 | q->cntxt_id = id; | ||
2084 | adap->sge.egr_map[id] = q; | ||
2085 | } | ||
2086 | |||
2087 | int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, | ||
2088 | struct net_device *dev, struct netdev_queue *netdevq, | ||
2089 | unsigned int iqid) | ||
2090 | { | ||
2091 | int ret, nentries; | ||
2092 | struct fw_eq_eth_cmd c; | ||
2093 | struct port_info *pi = netdev_priv(dev); | ||
2094 | |||
2095 | /* Add status entries */ | ||
2096 | nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); | ||
2097 | |||
2098 | txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, | ||
2099 | sizeof(struct tx_desc), sizeof(struct tx_sw_desc), | ||
2100 | &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); | ||
2101 | if (!txq->q.desc) | ||
2102 | return -ENOMEM; | ||
2103 | |||
2104 | memset(&c, 0, sizeof(c)); | ||
2105 | c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | | ||
2106 | FW_CMD_WRITE | FW_CMD_EXEC | | ||
2107 | FW_EQ_ETH_CMD_PFN(0) | FW_EQ_ETH_CMD_VFN(0)); | ||
2108 | c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | | ||
2109 | FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); | ||
2110 | c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); | ||
2111 | c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | | ||
2112 | FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | | ||
2113 | FW_EQ_ETH_CMD_IQID(iqid)); | ||
2114 | c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) | | ||
2115 | FW_EQ_ETH_CMD_FBMAX(3) | | ||
2116 | FW_EQ_ETH_CMD_CIDXFTHRESH(5) | | ||
2117 | FW_EQ_ETH_CMD_EQSIZE(nentries)); | ||
2118 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); | ||
2119 | |||
2120 | ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); | ||
2121 | if (ret) { | ||
2122 | kfree(txq->q.sdesc); | ||
2123 | txq->q.sdesc = NULL; | ||
2124 | dma_free_coherent(adap->pdev_dev, | ||
2125 | nentries * sizeof(struct tx_desc), | ||
2126 | txq->q.desc, txq->q.phys_addr); | ||
2127 | txq->q.desc = NULL; | ||
2128 | return ret; | ||
2129 | } | ||
2130 | |||
2131 | init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd))); | ||
2132 | txq->txq = netdevq; | ||
2133 | txq->tso = txq->tx_cso = txq->vlan_ins = 0; | ||
2134 | txq->mapping_err = 0; | ||
2135 | return 0; | ||
2136 | } | ||
2137 | |||
2138 | int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, | ||
2139 | struct net_device *dev, unsigned int iqid, | ||
2140 | unsigned int cmplqid) | ||
2141 | { | ||
2142 | int ret, nentries; | ||
2143 | struct fw_eq_ctrl_cmd c; | ||
2144 | struct port_info *pi = netdev_priv(dev); | ||
2145 | |||
2146 | /* Add status entries */ | ||
2147 | nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); | ||
2148 | |||
2149 | txq->q.desc = alloc_ring(adap->pdev_dev, nentries, | ||
2150 | sizeof(struct tx_desc), 0, &txq->q.phys_addr, | ||
2151 | NULL, 0); | ||
2152 | if (!txq->q.desc) | ||
2153 | return -ENOMEM; | ||
2154 | |||
2155 | c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | | ||
2156 | FW_CMD_WRITE | FW_CMD_EXEC | | ||
2157 | FW_EQ_CTRL_CMD_PFN(0) | FW_EQ_CTRL_CMD_VFN(0)); | ||
2158 | c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC | | ||
2159 | FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); | ||
2160 | c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid)); | ||
2161 | c.physeqid_pkd = htonl(0); | ||
2162 | c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) | | ||
2163 | FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | | ||
2164 | FW_EQ_CTRL_CMD_IQID(iqid)); | ||
2165 | c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) | | ||
2166 | FW_EQ_CTRL_CMD_FBMAX(3) | | ||
2167 | FW_EQ_CTRL_CMD_CIDXFTHRESH(5) | | ||
2168 | FW_EQ_CTRL_CMD_EQSIZE(nentries)); | ||
2169 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); | ||
2170 | |||
2171 | ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); | ||
2172 | if (ret) { | ||
2173 | dma_free_coherent(adap->pdev_dev, | ||
2174 | nentries * sizeof(struct tx_desc), | ||
2175 | txq->q.desc, txq->q.phys_addr); | ||
2176 | txq->q.desc = NULL; | ||
2177 | return ret; | ||
2178 | } | ||
2179 | |||
2180 | init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid))); | ||
2181 | txq->adap = adap; | ||
2182 | skb_queue_head_init(&txq->sendq); | ||
2183 | tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); | ||
2184 | txq->full = 0; | ||
2185 | return 0; | ||
2186 | } | ||
2187 | |||
2188 | int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, | ||
2189 | struct net_device *dev, unsigned int iqid) | ||
2190 | { | ||
2191 | int ret, nentries; | ||
2192 | struct fw_eq_ofld_cmd c; | ||
2193 | struct port_info *pi = netdev_priv(dev); | ||
2194 | |||
2195 | /* Add status entries */ | ||
2196 | nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); | ||
2197 | |||
2198 | txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, | ||
2199 | sizeof(struct tx_desc), sizeof(struct tx_sw_desc), | ||
2200 | &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); | ||
2201 | if (!txq->q.desc) | ||
2202 | return -ENOMEM; | ||
2203 | |||
2204 | memset(&c, 0, sizeof(c)); | ||
2205 | c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | | ||
2206 | FW_CMD_WRITE | FW_CMD_EXEC | | ||
2207 | FW_EQ_OFLD_CMD_PFN(0) | FW_EQ_OFLD_CMD_VFN(0)); | ||
2208 | c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | | ||
2209 | FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); | ||
2210 | c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | | ||
2211 | FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) | | ||
2212 | FW_EQ_OFLD_CMD_IQID(iqid)); | ||
2213 | c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) | | ||
2214 | FW_EQ_OFLD_CMD_FBMAX(3) | | ||
2215 | FW_EQ_OFLD_CMD_CIDXFTHRESH(5) | | ||
2216 | FW_EQ_OFLD_CMD_EQSIZE(nentries)); | ||
2217 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); | ||
2218 | |||
2219 | ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); | ||
2220 | if (ret) { | ||
2221 | kfree(txq->q.sdesc); | ||
2222 | txq->q.sdesc = NULL; | ||
2223 | dma_free_coherent(adap->pdev_dev, | ||
2224 | nentries * sizeof(struct tx_desc), | ||
2225 | txq->q.desc, txq->q.phys_addr); | ||
2226 | txq->q.desc = NULL; | ||
2227 | return ret; | ||
2228 | } | ||
2229 | |||
2230 | init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd))); | ||
2231 | txq->adap = adap; | ||
2232 | skb_queue_head_init(&txq->sendq); | ||
2233 | tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); | ||
2234 | txq->full = 0; | ||
2235 | txq->mapping_err = 0; | ||
2236 | return 0; | ||
2237 | } | ||
2238 | |||
2239 | static void free_txq(struct adapter *adap, struct sge_txq *q) | ||
2240 | { | ||
2241 | dma_free_coherent(adap->pdev_dev, | ||
2242 | q->size * sizeof(struct tx_desc) + STAT_LEN, | ||
2243 | q->desc, q->phys_addr); | ||
2244 | q->cntxt_id = 0; | ||
2245 | q->sdesc = NULL; | ||
2246 | q->desc = NULL; | ||
2247 | } | ||
2248 | |||
2249 | static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, | ||
2250 | struct sge_fl *fl) | ||
2251 | { | ||
2252 | unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; | ||
2253 | |||
2254 | adap->sge.ingr_map[rq->cntxt_id] = NULL; | ||
2255 | t4_iq_free(adap, 0, 0, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id, | ||
2256 | 0xffff); | ||
2257 | dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, | ||
2258 | rq->desc, rq->phys_addr); | ||
2259 | netif_napi_del(&rq->napi); | ||
2260 | rq->netdev = NULL; | ||
2261 | rq->cntxt_id = rq->abs_id = 0; | ||
2262 | rq->desc = NULL; | ||
2263 | |||
2264 | if (fl) { | ||
2265 | free_rx_bufs(adap, fl, fl->avail); | ||
2266 | dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN, | ||
2267 | fl->desc, fl->addr); | ||
2268 | kfree(fl->sdesc); | ||
2269 | fl->sdesc = NULL; | ||
2270 | fl->cntxt_id = 0; | ||
2271 | fl->desc = NULL; | ||
2272 | } | ||
2273 | } | ||
2274 | |||
2275 | /** | ||
2276 | * t4_free_sge_resources - free SGE resources | ||
2277 | * @adap: the adapter | ||
2278 | * | ||
2279 | * Frees resources used by the SGE queue sets. | ||
2280 | */ | ||
2281 | void t4_free_sge_resources(struct adapter *adap) | ||
2282 | { | ||
2283 | int i; | ||
2284 | struct sge_eth_rxq *eq = adap->sge.ethrxq; | ||
2285 | struct sge_eth_txq *etq = adap->sge.ethtxq; | ||
2286 | struct sge_ofld_rxq *oq = adap->sge.ofldrxq; | ||
2287 | |||
2288 | /* clean up Ethernet Tx/Rx queues */ | ||
2289 | for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { | ||
2290 | if (eq->rspq.desc) | ||
2291 | free_rspq_fl(adap, &eq->rspq, &eq->fl); | ||
2292 | if (etq->q.desc) { | ||
2293 | t4_eth_eq_free(adap, 0, 0, 0, etq->q.cntxt_id); | ||
2294 | free_tx_desc(adap, &etq->q, etq->q.in_use, true); | ||
2295 | kfree(etq->q.sdesc); | ||
2296 | free_txq(adap, &etq->q); | ||
2297 | } | ||
2298 | } | ||
2299 | |||
2300 | /* clean up RDMA and iSCSI Rx queues */ | ||
2301 | for (i = 0; i < adap->sge.ofldqsets; i++, oq++) { | ||
2302 | if (oq->rspq.desc) | ||
2303 | free_rspq_fl(adap, &oq->rspq, &oq->fl); | ||
2304 | } | ||
2305 | for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) { | ||
2306 | if (oq->rspq.desc) | ||
2307 | free_rspq_fl(adap, &oq->rspq, &oq->fl); | ||
2308 | } | ||
2309 | |||
2310 | /* clean up offload Tx queues */ | ||
2311 | for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { | ||
2312 | struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; | ||
2313 | |||
2314 | if (q->q.desc) { | ||
2315 | tasklet_kill(&q->qresume_tsk); | ||
2316 | t4_ofld_eq_free(adap, 0, 0, 0, q->q.cntxt_id); | ||
2317 | free_tx_desc(adap, &q->q, q->q.in_use, false); | ||
2318 | kfree(q->q.sdesc); | ||
2319 | __skb_queue_purge(&q->sendq); | ||
2320 | free_txq(adap, &q->q); | ||
2321 | } | ||
2322 | } | ||
2323 | |||
2324 | /* clean up control Tx queues */ | ||
2325 | for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { | ||
2326 | struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; | ||
2327 | |||
2328 | if (cq->q.desc) { | ||
2329 | tasklet_kill(&cq->qresume_tsk); | ||
2330 | t4_ctrl_eq_free(adap, 0, 0, 0, cq->q.cntxt_id); | ||
2331 | __skb_queue_purge(&cq->sendq); | ||
2332 | free_txq(adap, &cq->q); | ||
2333 | } | ||
2334 | } | ||
2335 | |||
2336 | if (adap->sge.fw_evtq.desc) | ||
2337 | free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); | ||
2338 | |||
2339 | if (adap->sge.intrq.desc) | ||
2340 | free_rspq_fl(adap, &adap->sge.intrq, NULL); | ||
2341 | |||
2342 | /* clear the reverse egress queue map */ | ||
2343 | memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); | ||
2344 | } | ||
2345 | |||
2346 | void t4_sge_start(struct adapter *adap) | ||
2347 | { | ||
2348 | adap->sge.ethtxq_rover = 0; | ||
2349 | mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); | ||
2350 | mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); | ||
2351 | } | ||
2352 | |||
2353 | /** | ||
2354 | * t4_sge_stop - disable SGE operation | ||
2355 | * @adap: the adapter | ||
2356 | * | ||
2357 | * Stop tasklets and timers associated with the DMA engine. Note that | ||
2358 | * this is effective only if measures have been taken to disable any HW | ||
2359 | * events that may restart them. | ||
2360 | */ | ||
2361 | void t4_sge_stop(struct adapter *adap) | ||
2362 | { | ||
2363 | int i; | ||
2364 | struct sge *s = &adap->sge; | ||
2365 | |||
2366 | if (in_interrupt()) /* actions below require waiting */ | ||
2367 | return; | ||
2368 | |||
2369 | if (s->rx_timer.function) | ||
2370 | del_timer_sync(&s->rx_timer); | ||
2371 | if (s->tx_timer.function) | ||
2372 | del_timer_sync(&s->tx_timer); | ||
2373 | |||
2374 | for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) { | ||
2375 | struct sge_ofld_txq *q = &s->ofldtxq[i]; | ||
2376 | |||
2377 | if (q->q.desc) | ||
2378 | tasklet_kill(&q->qresume_tsk); | ||
2379 | } | ||
2380 | for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { | ||
2381 | struct sge_ctrl_txq *cq = &s->ctrlq[i]; | ||
2382 | |||
2383 | if (cq->q.desc) | ||
2384 | tasklet_kill(&cq->qresume_tsk); | ||
2385 | } | ||
2386 | } | ||
2387 | |||
2388 | /** | ||
2389 | * t4_sge_init - initialize SGE | ||
2390 | * @adap: the adapter | ||
2391 | * | ||
2392 | * Performs SGE initialization needed every time after a chip reset. | ||
2393 | * We do not initialize any of the queues here, instead the driver | ||
2394 | * top-level must request them individually. | ||
2395 | */ | ||
2396 | void t4_sge_init(struct adapter *adap) | ||
2397 | { | ||
2398 | struct sge *s = &adap->sge; | ||
2399 | unsigned int fl_align_log = ilog2(FL_ALIGN); | ||
2400 | |||
2401 | t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK | | ||
2402 | INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE, | ||
2403 | INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) | | ||
2404 | RXPKTCPLMODE | | ||
2405 | (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); | ||
2406 | t4_set_reg_field(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0_MASK, | ||
2407 | HOSTPAGESIZEPF0(PAGE_SHIFT - 10)); | ||
2408 | t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE); | ||
2409 | #if FL_PG_ORDER > 0 | ||
2410 | t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER); | ||
2411 | #endif | ||
2412 | t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD, | ||
2413 | THRESHOLD_0(s->counter_val[0]) | | ||
2414 | THRESHOLD_1(s->counter_val[1]) | | ||
2415 | THRESHOLD_2(s->counter_val[2]) | | ||
2416 | THRESHOLD_3(s->counter_val[3])); | ||
2417 | t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1, | ||
2418 | TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) | | ||
2419 | TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1]))); | ||
2420 | t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3, | ||
2421 | TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) | | ||
2422 | TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3]))); | ||
2423 | t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5, | ||
2424 | TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) | | ||
2425 | TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5]))); | ||
2426 | setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); | ||
2427 | setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); | ||
2428 | s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ | ||
2429 | s->idma_state[0] = s->idma_state[1] = 0; | ||
2430 | spin_lock_init(&s->intrq_lock); | ||
2431 | } | ||
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c new file mode 100644 index 000000000000..a814a3afe123 --- /dev/null +++ b/drivers/net/cxgb4/t4_hw.c | |||
@@ -0,0 +1,3131 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/init.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include "cxgb4.h" | ||
38 | #include "t4_regs.h" | ||
39 | #include "t4fw_api.h" | ||
40 | |||
41 | /** | ||
42 | * t4_wait_op_done_val - wait until an operation is completed | ||
43 | * @adapter: the adapter performing the operation | ||
44 | * @reg: the register to check for completion | ||
45 | * @mask: a single-bit field within @reg that indicates completion | ||
46 | * @polarity: the value of the field when the operation is completed | ||
47 | * @attempts: number of check iterations | ||
48 | * @delay: delay in usecs between iterations | ||
49 | * @valp: where to store the value of the register at completion time | ||
50 | * | ||
51 | * Wait until an operation is completed by checking a bit in a register | ||
52 | * up to @attempts times. If @valp is not NULL the value of the register | ||
53 | * at the time it indicated completion is stored there. Returns 0 if the | ||
54 | * operation completes and -EAGAIN otherwise. | ||
55 | */ | ||
56 | int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, | ||
57 | int polarity, int attempts, int delay, u32 *valp) | ||
58 | { | ||
59 | while (1) { | ||
60 | u32 val = t4_read_reg(adapter, reg); | ||
61 | |||
62 | if (!!(val & mask) == polarity) { | ||
63 | if (valp) | ||
64 | *valp = val; | ||
65 | return 0; | ||
66 | } | ||
67 | if (--attempts == 0) | ||
68 | return -EAGAIN; | ||
69 | if (delay) | ||
70 | udelay(delay); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, | ||
75 | int polarity, int attempts, int delay) | ||
76 | { | ||
77 | return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, | ||
78 | delay, NULL); | ||
79 | } | ||
80 | |||
81 | /** | ||
82 | * t4_set_reg_field - set a register field to a value | ||
83 | * @adapter: the adapter to program | ||
84 | * @addr: the register address | ||
85 | * @mask: specifies the portion of the register to modify | ||
86 | * @val: the new value for the register field | ||
87 | * | ||
88 | * Sets a register field specified by the supplied mask to the | ||
89 | * given value. | ||
90 | */ | ||
91 | void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, | ||
92 | u32 val) | ||
93 | { | ||
94 | u32 v = t4_read_reg(adapter, addr) & ~mask; | ||
95 | |||
96 | t4_write_reg(adapter, addr, v | val); | ||
97 | (void) t4_read_reg(adapter, addr); /* flush */ | ||
98 | } | ||
99 | |||
100 | /** | ||
101 | * t4_read_indirect - read indirectly addressed registers | ||
102 | * @adap: the adapter | ||
103 | * @addr_reg: register holding the indirect address | ||
104 | * @data_reg: register holding the value of the indirect register | ||
105 | * @vals: where the read register values are stored | ||
106 | * @nregs: how many indirect registers to read | ||
107 | * @start_idx: index of first indirect register to read | ||
108 | * | ||
109 | * Reads registers that are accessed indirectly through an address/data | ||
110 | * register pair. | ||
111 | */ | ||
112 | void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, | ||
113 | unsigned int data_reg, u32 *vals, unsigned int nregs, | ||
114 | unsigned int start_idx) | ||
115 | { | ||
116 | while (nregs--) { | ||
117 | t4_write_reg(adap, addr_reg, start_idx); | ||
118 | *vals++ = t4_read_reg(adap, data_reg); | ||
119 | start_idx++; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | /** | ||
124 | * t4_write_indirect - write indirectly addressed registers | ||
125 | * @adap: the adapter | ||
126 | * @addr_reg: register holding the indirect addresses | ||
127 | * @data_reg: register holding the value for the indirect registers | ||
128 | * @vals: values to write | ||
129 | * @nregs: how many indirect registers to write | ||
130 | * @start_idx: address of first indirect register to write | ||
131 | * | ||
132 | * Writes a sequential block of registers that are accessed indirectly | ||
133 | * through an address/data register pair. | ||
134 | */ | ||
135 | void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, | ||
136 | unsigned int data_reg, const u32 *vals, | ||
137 | unsigned int nregs, unsigned int start_idx) | ||
138 | { | ||
139 | while (nregs--) { | ||
140 | t4_write_reg(adap, addr_reg, start_idx++); | ||
141 | t4_write_reg(adap, data_reg, *vals++); | ||
142 | } | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * Get the reply to a mailbox command and store it in @rpl in big-endian order. | ||
147 | */ | ||
148 | static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, | ||
149 | u32 mbox_addr) | ||
150 | { | ||
151 | for ( ; nflit; nflit--, mbox_addr += 8) | ||
152 | *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * Handle a FW assertion reported in a mailbox. | ||
157 | */ | ||
158 | static void fw_asrt(struct adapter *adap, u32 mbox_addr) | ||
159 | { | ||
160 | struct fw_debug_cmd asrt; | ||
161 | |||
162 | get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); | ||
163 | dev_alert(adap->pdev_dev, | ||
164 | "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", | ||
165 | asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), | ||
166 | ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); | ||
167 | } | ||
168 | |||
169 | static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) | ||
170 | { | ||
171 | dev_err(adap->pdev_dev, | ||
172 | "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, | ||
173 | (unsigned long long)t4_read_reg64(adap, data_reg), | ||
174 | (unsigned long long)t4_read_reg64(adap, data_reg + 8), | ||
175 | (unsigned long long)t4_read_reg64(adap, data_reg + 16), | ||
176 | (unsigned long long)t4_read_reg64(adap, data_reg + 24), | ||
177 | (unsigned long long)t4_read_reg64(adap, data_reg + 32), | ||
178 | (unsigned long long)t4_read_reg64(adap, data_reg + 40), | ||
179 | (unsigned long long)t4_read_reg64(adap, data_reg + 48), | ||
180 | (unsigned long long)t4_read_reg64(adap, data_reg + 56)); | ||
181 | } | ||
182 | |||
183 | /** | ||
184 | * t4_wr_mbox_meat - send a command to FW through the given mailbox | ||
185 | * @adap: the adapter | ||
186 | * @mbox: index of the mailbox to use | ||
187 | * @cmd: the command to write | ||
188 | * @size: command length in bytes | ||
189 | * @rpl: where to optionally store the reply | ||
190 | * @sleep_ok: if true we may sleep while awaiting command completion | ||
191 | * | ||
192 | * Sends the given command to FW through the selected mailbox and waits | ||
193 | * for the FW to execute the command. If @rpl is not %NULL it is used to | ||
194 | * store the FW's reply to the command. The command and its optional | ||
195 | * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms | ||
196 | * to respond. @sleep_ok determines whether we may sleep while awaiting | ||
197 | * the response. If sleeping is allowed we use progressive backoff | ||
198 | * otherwise we spin. | ||
199 | * | ||
200 | * The return value is 0 on success or a negative errno on failure. A | ||
201 | * failure can happen either because we are not able to execute the | ||
202 | * command or FW executes it but signals an error. In the latter case | ||
203 | * the return value is the error code indicated by FW (negated). | ||
204 | */ | ||
205 | int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, | ||
206 | void *rpl, bool sleep_ok) | ||
207 | { | ||
208 | static int delay[] = { | ||
209 | 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 | ||
210 | }; | ||
211 | |||
212 | u32 v; | ||
213 | u64 res; | ||
214 | int i, ms, delay_idx; | ||
215 | const __be64 *p = cmd; | ||
216 | u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); | ||
217 | u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); | ||
218 | |||
219 | if ((size & 15) || size > MBOX_LEN) | ||
220 | return -EINVAL; | ||
221 | |||
222 | v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); | ||
223 | for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) | ||
224 | v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); | ||
225 | |||
226 | if (v != MBOX_OWNER_DRV) | ||
227 | return v ? -EBUSY : -ETIMEDOUT; | ||
228 | |||
229 | for (i = 0; i < size; i += 8) | ||
230 | t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); | ||
231 | |||
232 | t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); | ||
233 | t4_read_reg(adap, ctl_reg); /* flush write */ | ||
234 | |||
235 | delay_idx = 0; | ||
236 | ms = delay[0]; | ||
237 | |||
238 | for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { | ||
239 | if (sleep_ok) { | ||
240 | ms = delay[delay_idx]; /* last element may repeat */ | ||
241 | if (delay_idx < ARRAY_SIZE(delay) - 1) | ||
242 | delay_idx++; | ||
243 | msleep(ms); | ||
244 | } else | ||
245 | mdelay(ms); | ||
246 | |||
247 | v = t4_read_reg(adap, ctl_reg); | ||
248 | if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { | ||
249 | if (!(v & MBMSGVALID)) { | ||
250 | t4_write_reg(adap, ctl_reg, 0); | ||
251 | continue; | ||
252 | } | ||
253 | |||
254 | res = t4_read_reg64(adap, data_reg); | ||
255 | if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { | ||
256 | fw_asrt(adap, data_reg); | ||
257 | res = FW_CMD_RETVAL(EIO); | ||
258 | } else if (rpl) | ||
259 | get_mbox_rpl(adap, rpl, size / 8, data_reg); | ||
260 | |||
261 | if (FW_CMD_RETVAL_GET((int)res)) | ||
262 | dump_mbox(adap, mbox, data_reg); | ||
263 | t4_write_reg(adap, ctl_reg, 0); | ||
264 | return -FW_CMD_RETVAL_GET((int)res); | ||
265 | } | ||
266 | } | ||
267 | |||
268 | dump_mbox(adap, mbox, data_reg); | ||
269 | dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", | ||
270 | *(const u8 *)cmd, mbox); | ||
271 | return -ETIMEDOUT; | ||
272 | } | ||
273 | |||
274 | /** | ||
275 | * t4_mc_read - read from MC through backdoor accesses | ||
276 | * @adap: the adapter | ||
277 | * @addr: address of first byte requested | ||
278 | * @data: 64 bytes of data containing the requested address | ||
279 | * @ecc: where to store the corresponding 64-bit ECC word | ||
280 | * | ||
281 | * Read 64 bytes of data from MC starting at a 64-byte-aligned address | ||
282 | * that covers the requested address @addr. If @parity is not %NULL it | ||
283 | * is assigned the 64-bit ECC word for the read data. | ||
284 | */ | ||
285 | int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) | ||
286 | { | ||
287 | int i; | ||
288 | |||
289 | if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) | ||
290 | return -EBUSY; | ||
291 | t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); | ||
292 | t4_write_reg(adap, MC_BIST_CMD_LEN, 64); | ||
293 | t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); | ||
294 | t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | | ||
295 | BIST_CMD_GAP(1)); | ||
296 | i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); | ||
297 | if (i) | ||
298 | return i; | ||
299 | |||
300 | #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) | ||
301 | |||
302 | for (i = 15; i >= 0; i--) | ||
303 | *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); | ||
304 | if (ecc) | ||
305 | *ecc = t4_read_reg64(adap, MC_DATA(16)); | ||
306 | #undef MC_DATA | ||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | /** | ||
311 | * t4_edc_read - read from EDC through backdoor accesses | ||
312 | * @adap: the adapter | ||
313 | * @idx: which EDC to access | ||
314 | * @addr: address of first byte requested | ||
315 | * @data: 64 bytes of data containing the requested address | ||
316 | * @ecc: where to store the corresponding 64-bit ECC word | ||
317 | * | ||
318 | * Read 64 bytes of data from EDC starting at a 64-byte-aligned address | ||
319 | * that covers the requested address @addr. If @parity is not %NULL it | ||
320 | * is assigned the 64-bit ECC word for the read data. | ||
321 | */ | ||
322 | int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) | ||
323 | { | ||
324 | int i; | ||
325 | |||
326 | idx *= EDC_STRIDE; | ||
327 | if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST) | ||
328 | return -EBUSY; | ||
329 | t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); | ||
330 | t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); | ||
331 | t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); | ||
332 | t4_write_reg(adap, EDC_BIST_CMD + idx, | ||
333 | BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); | ||
334 | i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); | ||
335 | if (i) | ||
336 | return i; | ||
337 | |||
338 | #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) | ||
339 | |||
340 | for (i = 15; i >= 0; i--) | ||
341 | *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); | ||
342 | if (ecc) | ||
343 | *ecc = t4_read_reg64(adap, EDC_DATA(16)); | ||
344 | #undef EDC_DATA | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | #define VPD_ENTRY(name, len) \ | ||
349 | u8 name##_kword[2]; u8 name##_len; u8 name##_data[len] | ||
350 | |||
351 | /* | ||
352 | * Partial EEPROM Vital Product Data structure. Includes only the ID and | ||
353 | * VPD-R sections. | ||
354 | */ | ||
355 | struct t4_vpd { | ||
356 | u8 id_tag; | ||
357 | u8 id_len[2]; | ||
358 | u8 id_data[ID_LEN]; | ||
359 | u8 vpdr_tag; | ||
360 | u8 vpdr_len[2]; | ||
361 | VPD_ENTRY(pn, 16); /* part number */ | ||
362 | VPD_ENTRY(ec, EC_LEN); /* EC level */ | ||
363 | VPD_ENTRY(sn, SERNUM_LEN); /* serial number */ | ||
364 | VPD_ENTRY(na, 12); /* MAC address base */ | ||
365 | VPD_ENTRY(port_type, 8); /* port types */ | ||
366 | VPD_ENTRY(gpio, 14); /* GPIO usage */ | ||
367 | VPD_ENTRY(cclk, 6); /* core clock */ | ||
368 | VPD_ENTRY(port_addr, 8); /* port MDIO addresses */ | ||
369 | VPD_ENTRY(rv, 1); /* csum */ | ||
370 | u32 pad; /* for multiple-of-4 sizing and alignment */ | ||
371 | }; | ||
372 | |||
373 | #define EEPROM_STAT_ADDR 0x7bfc | ||
374 | #define VPD_BASE 0 | ||
375 | |||
376 | /** | ||
377 | * t4_seeprom_wp - enable/disable EEPROM write protection | ||
378 | * @adapter: the adapter | ||
379 | * @enable: whether to enable or disable write protection | ||
380 | * | ||
381 | * Enables or disables write protection on the serial EEPROM. | ||
382 | */ | ||
383 | int t4_seeprom_wp(struct adapter *adapter, bool enable) | ||
384 | { | ||
385 | unsigned int v = enable ? 0xc : 0; | ||
386 | int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); | ||
387 | return ret < 0 ? ret : 0; | ||
388 | } | ||
389 | |||
390 | /** | ||
391 | * get_vpd_params - read VPD parameters from VPD EEPROM | ||
392 | * @adapter: adapter to read | ||
393 | * @p: where to store the parameters | ||
394 | * | ||
395 | * Reads card parameters stored in VPD EEPROM. | ||
396 | */ | ||
397 | static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) | ||
398 | { | ||
399 | int ret; | ||
400 | struct t4_vpd vpd; | ||
401 | u8 *q = (u8 *)&vpd, csum; | ||
402 | |||
403 | ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd); | ||
404 | if (ret < 0) | ||
405 | return ret; | ||
406 | |||
407 | for (csum = 0; q <= vpd.rv_data; q++) | ||
408 | csum += *q; | ||
409 | |||
410 | if (csum) { | ||
411 | dev_err(adapter->pdev_dev, | ||
412 | "corrupted VPD EEPROM, actual csum %u\n", csum); | ||
413 | return -EINVAL; | ||
414 | } | ||
415 | |||
416 | p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); | ||
417 | memcpy(p->id, vpd.id_data, sizeof(vpd.id_data)); | ||
418 | strim(p->id); | ||
419 | memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data)); | ||
420 | strim(p->ec); | ||
421 | memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data)); | ||
422 | strim(p->sn); | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | /* serial flash and firmware constants */ | ||
427 | enum { | ||
428 | SF_ATTEMPTS = 10, /* max retries for SF operations */ | ||
429 | |||
430 | /* flash command opcodes */ | ||
431 | SF_PROG_PAGE = 2, /* program page */ | ||
432 | SF_WR_DISABLE = 4, /* disable writes */ | ||
433 | SF_RD_STATUS = 5, /* read status register */ | ||
434 | SF_WR_ENABLE = 6, /* enable writes */ | ||
435 | SF_RD_DATA_FAST = 0xb, /* read flash */ | ||
436 | SF_ERASE_SECTOR = 0xd8, /* erase sector */ | ||
437 | |||
438 | FW_START_SEC = 8, /* first flash sector for FW */ | ||
439 | FW_END_SEC = 15, /* last flash sector for FW */ | ||
440 | FW_IMG_START = FW_START_SEC * SF_SEC_SIZE, | ||
441 | FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE, | ||
442 | }; | ||
443 | |||
444 | /** | ||
445 | * sf1_read - read data from the serial flash | ||
446 | * @adapter: the adapter | ||
447 | * @byte_cnt: number of bytes to read | ||
448 | * @cont: whether another operation will be chained | ||
449 | * @lock: whether to lock SF for PL access only | ||
450 | * @valp: where to store the read data | ||
451 | * | ||
452 | * Reads up to 4 bytes of data from the serial flash. The location of | ||
453 | * the read needs to be specified prior to calling this by issuing the | ||
454 | * appropriate commands to the serial flash. | ||
455 | */ | ||
456 | static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, | ||
457 | int lock, u32 *valp) | ||
458 | { | ||
459 | int ret; | ||
460 | |||
461 | if (!byte_cnt || byte_cnt > 4) | ||
462 | return -EINVAL; | ||
463 | if (t4_read_reg(adapter, SF_OP) & BUSY) | ||
464 | return -EBUSY; | ||
465 | cont = cont ? SF_CONT : 0; | ||
466 | lock = lock ? SF_LOCK : 0; | ||
467 | t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); | ||
468 | ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); | ||
469 | if (!ret) | ||
470 | *valp = t4_read_reg(adapter, SF_DATA); | ||
471 | return ret; | ||
472 | } | ||
473 | |||
474 | /** | ||
475 | * sf1_write - write data to the serial flash | ||
476 | * @adapter: the adapter | ||
477 | * @byte_cnt: number of bytes to write | ||
478 | * @cont: whether another operation will be chained | ||
479 | * @lock: whether to lock SF for PL access only | ||
480 | * @val: value to write | ||
481 | * | ||
482 | * Writes up to 4 bytes of data to the serial flash. The location of | ||
483 | * the write needs to be specified prior to calling this by issuing the | ||
484 | * appropriate commands to the serial flash. | ||
485 | */ | ||
486 | static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, | ||
487 | int lock, u32 val) | ||
488 | { | ||
489 | if (!byte_cnt || byte_cnt > 4) | ||
490 | return -EINVAL; | ||
491 | if (t4_read_reg(adapter, SF_OP) & BUSY) | ||
492 | return -EBUSY; | ||
493 | cont = cont ? SF_CONT : 0; | ||
494 | lock = lock ? SF_LOCK : 0; | ||
495 | t4_write_reg(adapter, SF_DATA, val); | ||
496 | t4_write_reg(adapter, SF_OP, lock | | ||
497 | cont | BYTECNT(byte_cnt - 1) | OP_WR); | ||
498 | return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); | ||
499 | } | ||
500 | |||
501 | /** | ||
502 | * flash_wait_op - wait for a flash operation to complete | ||
503 | * @adapter: the adapter | ||
504 | * @attempts: max number of polls of the status register | ||
505 | * @delay: delay between polls in ms | ||
506 | * | ||
507 | * Wait for a flash operation to complete by polling the status register. | ||
508 | */ | ||
509 | static int flash_wait_op(struct adapter *adapter, int attempts, int delay) | ||
510 | { | ||
511 | int ret; | ||
512 | u32 status; | ||
513 | |||
514 | while (1) { | ||
515 | if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || | ||
516 | (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) | ||
517 | return ret; | ||
518 | if (!(status & 1)) | ||
519 | return 0; | ||
520 | if (--attempts == 0) | ||
521 | return -EAGAIN; | ||
522 | if (delay) | ||
523 | msleep(delay); | ||
524 | } | ||
525 | } | ||
526 | |||
527 | /** | ||
528 | * t4_read_flash - read words from serial flash | ||
529 | * @adapter: the adapter | ||
530 | * @addr: the start address for the read | ||
531 | * @nwords: how many 32-bit words to read | ||
532 | * @data: where to store the read data | ||
533 | * @byte_oriented: whether to store data as bytes or as words | ||
534 | * | ||
535 | * Read the specified number of 32-bit words from the serial flash. | ||
536 | * If @byte_oriented is set the read data is stored as a byte array | ||
537 | * (i.e., big-endian), otherwise as 32-bit words in the platform's | ||
538 | * natural endianess. | ||
539 | */ | ||
540 | int t4_read_flash(struct adapter *adapter, unsigned int addr, | ||
541 | unsigned int nwords, u32 *data, int byte_oriented) | ||
542 | { | ||
543 | int ret; | ||
544 | |||
545 | if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3)) | ||
546 | return -EINVAL; | ||
547 | |||
548 | addr = swab32(addr) | SF_RD_DATA_FAST; | ||
549 | |||
550 | if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || | ||
551 | (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) | ||
552 | return ret; | ||
553 | |||
554 | for ( ; nwords; nwords--, data++) { | ||
555 | ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); | ||
556 | if (nwords == 1) | ||
557 | t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ | ||
558 | if (ret) | ||
559 | return ret; | ||
560 | if (byte_oriented) | ||
561 | *data = htonl(*data); | ||
562 | } | ||
563 | return 0; | ||
564 | } | ||
565 | |||
566 | /** | ||
567 | * t4_write_flash - write up to a page of data to the serial flash | ||
568 | * @adapter: the adapter | ||
569 | * @addr: the start address to write | ||
570 | * @n: length of data to write in bytes | ||
571 | * @data: the data to write | ||
572 | * | ||
573 | * Writes up to a page of data (256 bytes) to the serial flash starting | ||
574 | * at the given address. All the data must be written to the same page. | ||
575 | */ | ||
576 | static int t4_write_flash(struct adapter *adapter, unsigned int addr, | ||
577 | unsigned int n, const u8 *data) | ||
578 | { | ||
579 | int ret; | ||
580 | u32 buf[64]; | ||
581 | unsigned int i, c, left, val, offset = addr & 0xff; | ||
582 | |||
583 | if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE) | ||
584 | return -EINVAL; | ||
585 | |||
586 | val = swab32(addr) | SF_PROG_PAGE; | ||
587 | |||
588 | if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || | ||
589 | (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) | ||
590 | goto unlock; | ||
591 | |||
592 | for (left = n; left; left -= c) { | ||
593 | c = min(left, 4U); | ||
594 | for (val = 0, i = 0; i < c; ++i) | ||
595 | val = (val << 8) + *data++; | ||
596 | |||
597 | ret = sf1_write(adapter, c, c != left, 1, val); | ||
598 | if (ret) | ||
599 | goto unlock; | ||
600 | } | ||
601 | ret = flash_wait_op(adapter, 5, 1); | ||
602 | if (ret) | ||
603 | goto unlock; | ||
604 | |||
605 | t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ | ||
606 | |||
607 | /* Read the page to verify the write succeeded */ | ||
608 | ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); | ||
609 | if (ret) | ||
610 | return ret; | ||
611 | |||
612 | if (memcmp(data - n, (u8 *)buf + offset, n)) { | ||
613 | dev_err(adapter->pdev_dev, | ||
614 | "failed to correctly write the flash page at %#x\n", | ||
615 | addr); | ||
616 | return -EIO; | ||
617 | } | ||
618 | return 0; | ||
619 | |||
620 | unlock: | ||
621 | t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ | ||
622 | return ret; | ||
623 | } | ||
624 | |||
625 | /** | ||
626 | * get_fw_version - read the firmware version | ||
627 | * @adapter: the adapter | ||
628 | * @vers: where to place the version | ||
629 | * | ||
630 | * Reads the FW version from flash. | ||
631 | */ | ||
632 | static int get_fw_version(struct adapter *adapter, u32 *vers) | ||
633 | { | ||
634 | return t4_read_flash(adapter, | ||
635 | FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1, | ||
636 | vers, 0); | ||
637 | } | ||
638 | |||
639 | /** | ||
640 | * get_tp_version - read the TP microcode version | ||
641 | * @adapter: the adapter | ||
642 | * @vers: where to place the version | ||
643 | * | ||
644 | * Reads the TP microcode version from flash. | ||
645 | */ | ||
646 | static int get_tp_version(struct adapter *adapter, u32 *vers) | ||
647 | { | ||
648 | return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr, | ||
649 | tp_microcode_ver), | ||
650 | 1, vers, 0); | ||
651 | } | ||
652 | |||
653 | /** | ||
654 | * t4_check_fw_version - check if the FW is compatible with this driver | ||
655 | * @adapter: the adapter | ||
656 | * | ||
657 | * Checks if an adapter's FW is compatible with the driver. Returns 0 | ||
658 | * if there's exact match, a negative error if the version could not be | ||
659 | * read or there's a major version mismatch, and a positive value if the | ||
660 | * expected major version is found but there's a minor version mismatch. | ||
661 | */ | ||
662 | int t4_check_fw_version(struct adapter *adapter) | ||
663 | { | ||
664 | u32 api_vers[2]; | ||
665 | int ret, major, minor, micro; | ||
666 | |||
667 | ret = get_fw_version(adapter, &adapter->params.fw_vers); | ||
668 | if (!ret) | ||
669 | ret = get_tp_version(adapter, &adapter->params.tp_vers); | ||
670 | if (!ret) | ||
671 | ret = t4_read_flash(adapter, | ||
672 | FW_IMG_START + offsetof(struct fw_hdr, intfver_nic), | ||
673 | 2, api_vers, 1); | ||
674 | if (ret) | ||
675 | return ret; | ||
676 | |||
677 | major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); | ||
678 | minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); | ||
679 | micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); | ||
680 | memcpy(adapter->params.api_vers, api_vers, | ||
681 | sizeof(adapter->params.api_vers)); | ||
682 | |||
683 | if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ | ||
684 | dev_err(adapter->pdev_dev, | ||
685 | "card FW has major version %u, driver wants %u\n", | ||
686 | major, FW_VERSION_MAJOR); | ||
687 | return -EINVAL; | ||
688 | } | ||
689 | |||
690 | if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) | ||
691 | return 0; /* perfect match */ | ||
692 | |||
693 | /* Minor/micro version mismatch. Report it but often it's OK. */ | ||
694 | return 1; | ||
695 | } | ||
696 | |||
697 | /** | ||
698 | * t4_flash_erase_sectors - erase a range of flash sectors | ||
699 | * @adapter: the adapter | ||
700 | * @start: the first sector to erase | ||
701 | * @end: the last sector to erase | ||
702 | * | ||
703 | * Erases the sectors in the given inclusive range. | ||
704 | */ | ||
705 | static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) | ||
706 | { | ||
707 | int ret = 0; | ||
708 | |||
709 | while (start <= end) { | ||
710 | if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || | ||
711 | (ret = sf1_write(adapter, 4, 0, 1, | ||
712 | SF_ERASE_SECTOR | (start << 8))) != 0 || | ||
713 | (ret = flash_wait_op(adapter, 5, 500)) != 0) { | ||
714 | dev_err(adapter->pdev_dev, | ||
715 | "erase of flash sector %d failed, error %d\n", | ||
716 | start, ret); | ||
717 | break; | ||
718 | } | ||
719 | start++; | ||
720 | } | ||
721 | t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ | ||
722 | return ret; | ||
723 | } | ||
724 | |||
725 | /** | ||
726 | * t4_load_fw - download firmware | ||
727 | * @adap: the adapter | ||
728 | * @fw_data: the firmware image to write | ||
729 | * @size: image size | ||
730 | * | ||
731 | * Write the supplied firmware image to the card's serial flash. | ||
732 | */ | ||
733 | int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) | ||
734 | { | ||
735 | u32 csum; | ||
736 | int ret, addr; | ||
737 | unsigned int i; | ||
738 | u8 first_page[SF_PAGE_SIZE]; | ||
739 | const u32 *p = (const u32 *)fw_data; | ||
740 | const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; | ||
741 | |||
742 | if (!size) { | ||
743 | dev_err(adap->pdev_dev, "FW image has no data\n"); | ||
744 | return -EINVAL; | ||
745 | } | ||
746 | if (size & 511) { | ||
747 | dev_err(adap->pdev_dev, | ||
748 | "FW image size not multiple of 512 bytes\n"); | ||
749 | return -EINVAL; | ||
750 | } | ||
751 | if (ntohs(hdr->len512) * 512 != size) { | ||
752 | dev_err(adap->pdev_dev, | ||
753 | "FW image size differs from size in FW header\n"); | ||
754 | return -EINVAL; | ||
755 | } | ||
756 | if (size > FW_MAX_SIZE) { | ||
757 | dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", | ||
758 | FW_MAX_SIZE); | ||
759 | return -EFBIG; | ||
760 | } | ||
761 | |||
762 | for (csum = 0, i = 0; i < size / sizeof(csum); i++) | ||
763 | csum += ntohl(p[i]); | ||
764 | |||
765 | if (csum != 0xffffffff) { | ||
766 | dev_err(adap->pdev_dev, | ||
767 | "corrupted firmware image, checksum %#x\n", csum); | ||
768 | return -EINVAL; | ||
769 | } | ||
770 | |||
771 | i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */ | ||
772 | ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1); | ||
773 | if (ret) | ||
774 | goto out; | ||
775 | |||
776 | /* | ||
777 | * We write the correct version at the end so the driver can see a bad | ||
778 | * version if the FW write fails. Start by writing a copy of the | ||
779 | * first page with a bad version. | ||
780 | */ | ||
781 | memcpy(first_page, fw_data, SF_PAGE_SIZE); | ||
782 | ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); | ||
783 | ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page); | ||
784 | if (ret) | ||
785 | goto out; | ||
786 | |||
787 | addr = FW_IMG_START; | ||
788 | for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { | ||
789 | addr += SF_PAGE_SIZE; | ||
790 | fw_data += SF_PAGE_SIZE; | ||
791 | ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); | ||
792 | if (ret) | ||
793 | goto out; | ||
794 | } | ||
795 | |||
796 | ret = t4_write_flash(adap, | ||
797 | FW_IMG_START + offsetof(struct fw_hdr, fw_ver), | ||
798 | sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); | ||
799 | out: | ||
800 | if (ret) | ||
801 | dev_err(adap->pdev_dev, "firmware download failed, error %d\n", | ||
802 | ret); | ||
803 | return ret; | ||
804 | } | ||
805 | |||
806 | #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ | ||
807 | FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) | ||
808 | |||
809 | /** | ||
810 | * t4_link_start - apply link configuration to MAC/PHY | ||
811 | * @phy: the PHY to setup | ||
812 | * @mac: the MAC to setup | ||
813 | * @lc: the requested link configuration | ||
814 | * | ||
815 | * Set up a port's MAC and PHY according to a desired link configuration. | ||
816 | * - If the PHY can auto-negotiate first decide what to advertise, then | ||
817 | * enable/disable auto-negotiation as desired, and reset. | ||
818 | * - If the PHY does not auto-negotiate just reset it. | ||
819 | * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, | ||
820 | * otherwise do it later based on the outcome of auto-negotiation. | ||
821 | */ | ||
822 | int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, | ||
823 | struct link_config *lc) | ||
824 | { | ||
825 | struct fw_port_cmd c; | ||
826 | unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); | ||
827 | |||
828 | lc->link_ok = 0; | ||
829 | if (lc->requested_fc & PAUSE_RX) | ||
830 | fc |= FW_PORT_CAP_FC_RX; | ||
831 | if (lc->requested_fc & PAUSE_TX) | ||
832 | fc |= FW_PORT_CAP_FC_TX; | ||
833 | |||
834 | memset(&c, 0, sizeof(c)); | ||
835 | c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | | ||
836 | FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); | ||
837 | c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | | ||
838 | FW_LEN16(c)); | ||
839 | |||
840 | if (!(lc->supported & FW_PORT_CAP_ANEG)) { | ||
841 | c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); | ||
842 | lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); | ||
843 | } else if (lc->autoneg == AUTONEG_DISABLE) { | ||
844 | c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); | ||
845 | lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); | ||
846 | } else | ||
847 | c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); | ||
848 | |||
849 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
850 | } | ||
851 | |||
852 | /** | ||
853 | * t4_restart_aneg - restart autonegotiation | ||
854 | * @adap: the adapter | ||
855 | * @mbox: mbox to use for the FW command | ||
856 | * @port: the port id | ||
857 | * | ||
858 | * Restarts autonegotiation for the selected port. | ||
859 | */ | ||
860 | int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) | ||
861 | { | ||
862 | struct fw_port_cmd c; | ||
863 | |||
864 | memset(&c, 0, sizeof(c)); | ||
865 | c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | | ||
866 | FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); | ||
867 | c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | | ||
868 | FW_LEN16(c)); | ||
869 | c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); | ||
870 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
871 | } | ||
872 | |||
873 | /** | ||
874 | * t4_set_vlan_accel - configure HW VLAN extraction | ||
875 | * @adap: the adapter | ||
876 | * @ports: bitmap of adapter ports to operate on | ||
877 | * @on: enable (1) or disable (0) HW VLAN extraction | ||
878 | * | ||
879 | * Enables or disables HW extraction of VLAN tags for the ports specified | ||
880 | * by @ports. @ports is a bitmap with the ith bit designating the port | ||
881 | * associated with the ith adapter channel. | ||
882 | */ | ||
883 | void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on) | ||
884 | { | ||
885 | ports <<= VLANEXTENABLE_SHIFT; | ||
886 | t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0); | ||
887 | } | ||
888 | |||
889 | struct intr_info { | ||
890 | unsigned int mask; /* bits to check in interrupt status */ | ||
891 | const char *msg; /* message to print or NULL */ | ||
892 | short stat_idx; /* stat counter to increment or -1 */ | ||
893 | unsigned short fatal; /* whether the condition reported is fatal */ | ||
894 | }; | ||
895 | |||
896 | /** | ||
897 | * t4_handle_intr_status - table driven interrupt handler | ||
898 | * @adapter: the adapter that generated the interrupt | ||
899 | * @reg: the interrupt status register to process | ||
900 | * @acts: table of interrupt actions | ||
901 | * | ||
902 | * A table driven interrupt handler that applies a set of masks to an | ||
903 | * interrupt status word and performs the corresponding actions if the | ||
904 | * interrupts described by the mask have occured. The actions include | ||
905 | * optionally emitting a warning or alert message. The table is terminated | ||
906 | * by an entry specifying mask 0. Returns the number of fatal interrupt | ||
907 | * conditions. | ||
908 | */ | ||
909 | static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, | ||
910 | const struct intr_info *acts) | ||
911 | { | ||
912 | int fatal = 0; | ||
913 | unsigned int mask = 0; | ||
914 | unsigned int status = t4_read_reg(adapter, reg); | ||
915 | |||
916 | for ( ; acts->mask; ++acts) { | ||
917 | if (!(status & acts->mask)) | ||
918 | continue; | ||
919 | if (acts->fatal) { | ||
920 | fatal++; | ||
921 | dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, | ||
922 | status & acts->mask); | ||
923 | } else if (acts->msg && printk_ratelimit()) | ||
924 | dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, | ||
925 | status & acts->mask); | ||
926 | mask |= acts->mask; | ||
927 | } | ||
928 | status &= mask; | ||
929 | if (status) /* clear processed interrupts */ | ||
930 | t4_write_reg(adapter, reg, status); | ||
931 | return fatal; | ||
932 | } | ||
933 | |||
934 | /* | ||
935 | * Interrupt handler for the PCIE module. | ||
936 | */ | ||
937 | static void pcie_intr_handler(struct adapter *adapter) | ||
938 | { | ||
939 | static struct intr_info sysbus_intr_info[] = { | ||
940 | { RNPP, "RXNP array parity error", -1, 1 }, | ||
941 | { RPCP, "RXPC array parity error", -1, 1 }, | ||
942 | { RCIP, "RXCIF array parity error", -1, 1 }, | ||
943 | { RCCP, "Rx completions control array parity error", -1, 1 }, | ||
944 | { RFTP, "RXFT array parity error", -1, 1 }, | ||
945 | { 0 } | ||
946 | }; | ||
947 | static struct intr_info pcie_port_intr_info[] = { | ||
948 | { TPCP, "TXPC array parity error", -1, 1 }, | ||
949 | { TNPP, "TXNP array parity error", -1, 1 }, | ||
950 | { TFTP, "TXFT array parity error", -1, 1 }, | ||
951 | { TCAP, "TXCA array parity error", -1, 1 }, | ||
952 | { TCIP, "TXCIF array parity error", -1, 1 }, | ||
953 | { RCAP, "RXCA array parity error", -1, 1 }, | ||
954 | { OTDD, "outbound request TLP discarded", -1, 1 }, | ||
955 | { RDPE, "Rx data parity error", -1, 1 }, | ||
956 | { TDUE, "Tx uncorrectable data error", -1, 1 }, | ||
957 | { 0 } | ||
958 | }; | ||
959 | static struct intr_info pcie_intr_info[] = { | ||
960 | { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, | ||
961 | { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, | ||
962 | { MSIDATAPERR, "MSI data parity error", -1, 1 }, | ||
963 | { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, | ||
964 | { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, | ||
965 | { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, | ||
966 | { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, | ||
967 | { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, | ||
968 | { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, | ||
969 | { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, | ||
970 | { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, | ||
971 | { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, | ||
972 | { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, | ||
973 | { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, | ||
974 | { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, | ||
975 | { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, | ||
976 | { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, | ||
977 | { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, | ||
978 | { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, | ||
979 | { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, | ||
980 | { FIDPERR, "PCI FID parity error", -1, 1 }, | ||
981 | { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, | ||
982 | { MATAGPERR, "PCI MA tag parity error", -1, 1 }, | ||
983 | { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, | ||
984 | { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, | ||
985 | { RXWRPERR, "PCI Rx write parity error", -1, 1 }, | ||
986 | { RPLPERR, "PCI replay buffer parity error", -1, 1 }, | ||
987 | { PCIESINT, "PCI core secondary fault", -1, 1 }, | ||
988 | { PCIEPINT, "PCI core primary fault", -1, 1 }, | ||
989 | { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, | ||
990 | { 0 } | ||
991 | }; | ||
992 | |||
993 | int fat; | ||
994 | |||
995 | fat = t4_handle_intr_status(adapter, | ||
996 | PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, | ||
997 | sysbus_intr_info) + | ||
998 | t4_handle_intr_status(adapter, | ||
999 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, | ||
1000 | pcie_port_intr_info) + | ||
1001 | t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); | ||
1002 | if (fat) | ||
1003 | t4_fatal_err(adapter); | ||
1004 | } | ||
1005 | |||
1006 | /* | ||
1007 | * TP interrupt handler. | ||
1008 | */ | ||
1009 | static void tp_intr_handler(struct adapter *adapter) | ||
1010 | { | ||
1011 | static struct intr_info tp_intr_info[] = { | ||
1012 | { 0x3fffffff, "TP parity error", -1, 1 }, | ||
1013 | { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, | ||
1014 | { 0 } | ||
1015 | }; | ||
1016 | |||
1017 | if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) | ||
1018 | t4_fatal_err(adapter); | ||
1019 | } | ||
1020 | |||
1021 | /* | ||
1022 | * SGE interrupt handler. | ||
1023 | */ | ||
1024 | static void sge_intr_handler(struct adapter *adapter) | ||
1025 | { | ||
1026 | u64 v; | ||
1027 | |||
1028 | static struct intr_info sge_intr_info[] = { | ||
1029 | { ERR_CPL_EXCEED_IQE_SIZE, | ||
1030 | "SGE received CPL exceeding IQE size", -1, 1 }, | ||
1031 | { ERR_INVALID_CIDX_INC, | ||
1032 | "SGE GTS CIDX increment too large", -1, 0 }, | ||
1033 | { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, | ||
1034 | { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, | ||
1035 | { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, | ||
1036 | "SGE IQID > 1023 received CPL for FL", -1, 0 }, | ||
1037 | { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, | ||
1038 | 0 }, | ||
1039 | { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, | ||
1040 | 0 }, | ||
1041 | { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, | ||
1042 | 0 }, | ||
1043 | { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, | ||
1044 | 0 }, | ||
1045 | { ERR_ING_CTXT_PRIO, | ||
1046 | "SGE too many priority ingress contexts", -1, 0 }, | ||
1047 | { ERR_EGR_CTXT_PRIO, | ||
1048 | "SGE too many priority egress contexts", -1, 0 }, | ||
1049 | { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, | ||
1050 | { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, | ||
1051 | { 0 } | ||
1052 | }; | ||
1053 | |||
1054 | v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | | ||
1055 | ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); | ||
1056 | if (v) { | ||
1057 | dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", | ||
1058 | (unsigned long long)v); | ||
1059 | t4_write_reg(adapter, SGE_INT_CAUSE1, v); | ||
1060 | t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); | ||
1061 | } | ||
1062 | |||
1063 | if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || | ||
1064 | v != 0) | ||
1065 | t4_fatal_err(adapter); | ||
1066 | } | ||
1067 | |||
1068 | /* | ||
1069 | * CIM interrupt handler. | ||
1070 | */ | ||
1071 | static void cim_intr_handler(struct adapter *adapter) | ||
1072 | { | ||
1073 | static struct intr_info cim_intr_info[] = { | ||
1074 | { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, | ||
1075 | { OBQPARERR, "CIM OBQ parity error", -1, 1 }, | ||
1076 | { IBQPARERR, "CIM IBQ parity error", -1, 1 }, | ||
1077 | { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, | ||
1078 | { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, | ||
1079 | { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, | ||
1080 | { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, | ||
1081 | { 0 } | ||
1082 | }; | ||
1083 | static struct intr_info cim_upintr_info[] = { | ||
1084 | { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, | ||
1085 | { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, | ||
1086 | { ILLWRINT, "CIM illegal write", -1, 1 }, | ||
1087 | { ILLRDINT, "CIM illegal read", -1, 1 }, | ||
1088 | { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, | ||
1089 | { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, | ||
1090 | { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, | ||
1091 | { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, | ||
1092 | { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, | ||
1093 | { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, | ||
1094 | { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, | ||
1095 | { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, | ||
1096 | { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, | ||
1097 | { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, | ||
1098 | { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, | ||
1099 | { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, | ||
1100 | { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, | ||
1101 | { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, | ||
1102 | { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, | ||
1103 | { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, | ||
1104 | { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, | ||
1105 | { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, | ||
1106 | { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, | ||
1107 | { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, | ||
1108 | { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, | ||
1109 | { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, | ||
1110 | { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, | ||
1111 | { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, | ||
1112 | { 0 } | ||
1113 | }; | ||
1114 | |||
1115 | int fat; | ||
1116 | |||
1117 | fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, | ||
1118 | cim_intr_info) + | ||
1119 | t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, | ||
1120 | cim_upintr_info); | ||
1121 | if (fat) | ||
1122 | t4_fatal_err(adapter); | ||
1123 | } | ||
1124 | |||
1125 | /* | ||
1126 | * ULP RX interrupt handler. | ||
1127 | */ | ||
1128 | static void ulprx_intr_handler(struct adapter *adapter) | ||
1129 | { | ||
1130 | static struct intr_info ulprx_intr_info[] = { | ||
1131 | { 0x7fffff, "ULPRX parity error", -1, 1 }, | ||
1132 | { 0 } | ||
1133 | }; | ||
1134 | |||
1135 | if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) | ||
1136 | t4_fatal_err(adapter); | ||
1137 | } | ||
1138 | |||
1139 | /* | ||
1140 | * ULP TX interrupt handler. | ||
1141 | */ | ||
1142 | static void ulptx_intr_handler(struct adapter *adapter) | ||
1143 | { | ||
1144 | static struct intr_info ulptx_intr_info[] = { | ||
1145 | { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, | ||
1146 | 0 }, | ||
1147 | { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, | ||
1148 | 0 }, | ||
1149 | { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, | ||
1150 | 0 }, | ||
1151 | { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, | ||
1152 | 0 }, | ||
1153 | { 0xfffffff, "ULPTX parity error", -1, 1 }, | ||
1154 | { 0 } | ||
1155 | }; | ||
1156 | |||
1157 | if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) | ||
1158 | t4_fatal_err(adapter); | ||
1159 | } | ||
1160 | |||
1161 | /* | ||
1162 | * PM TX interrupt handler. | ||
1163 | */ | ||
1164 | static void pmtx_intr_handler(struct adapter *adapter) | ||
1165 | { | ||
1166 | static struct intr_info pmtx_intr_info[] = { | ||
1167 | { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, | ||
1168 | { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, | ||
1169 | { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, | ||
1170 | { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, | ||
1171 | { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, | ||
1172 | { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, | ||
1173 | { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, | ||
1174 | { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, | ||
1175 | { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, | ||
1176 | { 0 } | ||
1177 | }; | ||
1178 | |||
1179 | if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) | ||
1180 | t4_fatal_err(adapter); | ||
1181 | } | ||
1182 | |||
1183 | /* | ||
1184 | * PM RX interrupt handler. | ||
1185 | */ | ||
1186 | static void pmrx_intr_handler(struct adapter *adapter) | ||
1187 | { | ||
1188 | static struct intr_info pmrx_intr_info[] = { | ||
1189 | { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, | ||
1190 | { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, | ||
1191 | { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, | ||
1192 | { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, | ||
1193 | { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, | ||
1194 | { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, | ||
1195 | { 0 } | ||
1196 | }; | ||
1197 | |||
1198 | if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) | ||
1199 | t4_fatal_err(adapter); | ||
1200 | } | ||
1201 | |||
1202 | /* | ||
1203 | * CPL switch interrupt handler. | ||
1204 | */ | ||
1205 | static void cplsw_intr_handler(struct adapter *adapter) | ||
1206 | { | ||
1207 | static struct intr_info cplsw_intr_info[] = { | ||
1208 | { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, | ||
1209 | { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, | ||
1210 | { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, | ||
1211 | { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, | ||
1212 | { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, | ||
1213 | { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, | ||
1214 | { 0 } | ||
1215 | }; | ||
1216 | |||
1217 | if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) | ||
1218 | t4_fatal_err(adapter); | ||
1219 | } | ||
1220 | |||
1221 | /* | ||
1222 | * LE interrupt handler. | ||
1223 | */ | ||
1224 | static void le_intr_handler(struct adapter *adap) | ||
1225 | { | ||
1226 | static struct intr_info le_intr_info[] = { | ||
1227 | { LIPMISS, "LE LIP miss", -1, 0 }, | ||
1228 | { LIP0, "LE 0 LIP error", -1, 0 }, | ||
1229 | { PARITYERR, "LE parity error", -1, 1 }, | ||
1230 | { UNKNOWNCMD, "LE unknown command", -1, 1 }, | ||
1231 | { REQQPARERR, "LE request queue parity error", -1, 1 }, | ||
1232 | { 0 } | ||
1233 | }; | ||
1234 | |||
1235 | if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) | ||
1236 | t4_fatal_err(adap); | ||
1237 | } | ||
1238 | |||
1239 | /* | ||
1240 | * MPS interrupt handler. | ||
1241 | */ | ||
1242 | static void mps_intr_handler(struct adapter *adapter) | ||
1243 | { | ||
1244 | static struct intr_info mps_rx_intr_info[] = { | ||
1245 | { 0xffffff, "MPS Rx parity error", -1, 1 }, | ||
1246 | { 0 } | ||
1247 | }; | ||
1248 | static struct intr_info mps_tx_intr_info[] = { | ||
1249 | { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, | ||
1250 | { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, | ||
1251 | { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, | ||
1252 | { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, | ||
1253 | { BUBBLE, "MPS Tx underflow", -1, 1 }, | ||
1254 | { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, | ||
1255 | { FRMERR, "MPS Tx framing error", -1, 1 }, | ||
1256 | { 0 } | ||
1257 | }; | ||
1258 | static struct intr_info mps_trc_intr_info[] = { | ||
1259 | { FILTMEM, "MPS TRC filter parity error", -1, 1 }, | ||
1260 | { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, | ||
1261 | { MISCPERR, "MPS TRC misc parity error", -1, 1 }, | ||
1262 | { 0 } | ||
1263 | }; | ||
1264 | static struct intr_info mps_stat_sram_intr_info[] = { | ||
1265 | { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, | ||
1266 | { 0 } | ||
1267 | }; | ||
1268 | static struct intr_info mps_stat_tx_intr_info[] = { | ||
1269 | { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, | ||
1270 | { 0 } | ||
1271 | }; | ||
1272 | static struct intr_info mps_stat_rx_intr_info[] = { | ||
1273 | { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, | ||
1274 | { 0 } | ||
1275 | }; | ||
1276 | static struct intr_info mps_cls_intr_info[] = { | ||
1277 | { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, | ||
1278 | { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, | ||
1279 | { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, | ||
1280 | { 0 } | ||
1281 | }; | ||
1282 | |||
1283 | int fat; | ||
1284 | |||
1285 | fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, | ||
1286 | mps_rx_intr_info) + | ||
1287 | t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, | ||
1288 | mps_tx_intr_info) + | ||
1289 | t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, | ||
1290 | mps_trc_intr_info) + | ||
1291 | t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, | ||
1292 | mps_stat_sram_intr_info) + | ||
1293 | t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, | ||
1294 | mps_stat_tx_intr_info) + | ||
1295 | t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, | ||
1296 | mps_stat_rx_intr_info) + | ||
1297 | t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, | ||
1298 | mps_cls_intr_info); | ||
1299 | |||
1300 | t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | | ||
1301 | RXINT | TXINT | STATINT); | ||
1302 | t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ | ||
1303 | if (fat) | ||
1304 | t4_fatal_err(adapter); | ||
1305 | } | ||
1306 | |||
1307 | #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) | ||
1308 | |||
1309 | /* | ||
1310 | * EDC/MC interrupt handler. | ||
1311 | */ | ||
1312 | static void mem_intr_handler(struct adapter *adapter, int idx) | ||
1313 | { | ||
1314 | static const char name[3][5] = { "EDC0", "EDC1", "MC" }; | ||
1315 | |||
1316 | unsigned int addr, cnt_addr, v; | ||
1317 | |||
1318 | if (idx <= MEM_EDC1) { | ||
1319 | addr = EDC_REG(EDC_INT_CAUSE, idx); | ||
1320 | cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); | ||
1321 | } else { | ||
1322 | addr = MC_INT_CAUSE; | ||
1323 | cnt_addr = MC_ECC_STATUS; | ||
1324 | } | ||
1325 | |||
1326 | v = t4_read_reg(adapter, addr) & MEM_INT_MASK; | ||
1327 | if (v & PERR_INT_CAUSE) | ||
1328 | dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", | ||
1329 | name[idx]); | ||
1330 | if (v & ECC_CE_INT_CAUSE) { | ||
1331 | u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); | ||
1332 | |||
1333 | t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); | ||
1334 | if (printk_ratelimit()) | ||
1335 | dev_warn(adapter->pdev_dev, | ||
1336 | "%u %s correctable ECC data error%s\n", | ||
1337 | cnt, name[idx], cnt > 1 ? "s" : ""); | ||
1338 | } | ||
1339 | if (v & ECC_UE_INT_CAUSE) | ||
1340 | dev_alert(adapter->pdev_dev, | ||
1341 | "%s uncorrectable ECC data error\n", name[idx]); | ||
1342 | |||
1343 | t4_write_reg(adapter, addr, v); | ||
1344 | if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) | ||
1345 | t4_fatal_err(adapter); | ||
1346 | } | ||
1347 | |||
1348 | /* | ||
1349 | * MA interrupt handler. | ||
1350 | */ | ||
1351 | static void ma_intr_handler(struct adapter *adap) | ||
1352 | { | ||
1353 | u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); | ||
1354 | |||
1355 | if (status & MEM_PERR_INT_CAUSE) | ||
1356 | dev_alert(adap->pdev_dev, | ||
1357 | "MA parity error, parity status %#x\n", | ||
1358 | t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); | ||
1359 | if (status & MEM_WRAP_INT_CAUSE) { | ||
1360 | v = t4_read_reg(adap, MA_INT_WRAP_STATUS); | ||
1361 | dev_alert(adap->pdev_dev, "MA address wrap-around error by " | ||
1362 | "client %u to address %#x\n", | ||
1363 | MEM_WRAP_CLIENT_NUM_GET(v), | ||
1364 | MEM_WRAP_ADDRESS_GET(v) << 4); | ||
1365 | } | ||
1366 | t4_write_reg(adap, MA_INT_CAUSE, status); | ||
1367 | t4_fatal_err(adap); | ||
1368 | } | ||
1369 | |||
1370 | /* | ||
1371 | * SMB interrupt handler. | ||
1372 | */ | ||
1373 | static void smb_intr_handler(struct adapter *adap) | ||
1374 | { | ||
1375 | static struct intr_info smb_intr_info[] = { | ||
1376 | { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, | ||
1377 | { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, | ||
1378 | { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, | ||
1379 | { 0 } | ||
1380 | }; | ||
1381 | |||
1382 | if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) | ||
1383 | t4_fatal_err(adap); | ||
1384 | } | ||
1385 | |||
1386 | /* | ||
1387 | * NC-SI interrupt handler. | ||
1388 | */ | ||
1389 | static void ncsi_intr_handler(struct adapter *adap) | ||
1390 | { | ||
1391 | static struct intr_info ncsi_intr_info[] = { | ||
1392 | { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, | ||
1393 | { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, | ||
1394 | { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, | ||
1395 | { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, | ||
1396 | { 0 } | ||
1397 | }; | ||
1398 | |||
1399 | if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) | ||
1400 | t4_fatal_err(adap); | ||
1401 | } | ||
1402 | |||
1403 | /* | ||
1404 | * XGMAC interrupt handler. | ||
1405 | */ | ||
1406 | static void xgmac_intr_handler(struct adapter *adap, int port) | ||
1407 | { | ||
1408 | u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); | ||
1409 | |||
1410 | v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; | ||
1411 | if (!v) | ||
1412 | return; | ||
1413 | |||
1414 | if (v & TXFIFO_PRTY_ERR) | ||
1415 | dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", | ||
1416 | port); | ||
1417 | if (v & RXFIFO_PRTY_ERR) | ||
1418 | dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", | ||
1419 | port); | ||
1420 | t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); | ||
1421 | t4_fatal_err(adap); | ||
1422 | } | ||
1423 | |||
1424 | /* | ||
1425 | * PL interrupt handler. | ||
1426 | */ | ||
1427 | static void pl_intr_handler(struct adapter *adap) | ||
1428 | { | ||
1429 | static struct intr_info pl_intr_info[] = { | ||
1430 | { FATALPERR, "T4 fatal parity error", -1, 1 }, | ||
1431 | { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, | ||
1432 | { 0 } | ||
1433 | }; | ||
1434 | |||
1435 | if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) | ||
1436 | t4_fatal_err(adap); | ||
1437 | } | ||
1438 | |||
1439 | #define PF_INTR_MASK (PFSW | PFCIM) | ||
1440 | #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ | ||
1441 | EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ | ||
1442 | CPL_SWITCH | SGE | ULP_TX) | ||
1443 | |||
1444 | /** | ||
1445 | * t4_slow_intr_handler - control path interrupt handler | ||
1446 | * @adapter: the adapter | ||
1447 | * | ||
1448 | * T4 interrupt handler for non-data global interrupt events, e.g., errors. | ||
1449 | * The designation 'slow' is because it involves register reads, while | ||
1450 | * data interrupts typically don't involve any MMIOs. | ||
1451 | */ | ||
1452 | int t4_slow_intr_handler(struct adapter *adapter) | ||
1453 | { | ||
1454 | u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); | ||
1455 | |||
1456 | if (!(cause & GLBL_INTR_MASK)) | ||
1457 | return 0; | ||
1458 | if (cause & CIM) | ||
1459 | cim_intr_handler(adapter); | ||
1460 | if (cause & MPS) | ||
1461 | mps_intr_handler(adapter); | ||
1462 | if (cause & NCSI) | ||
1463 | ncsi_intr_handler(adapter); | ||
1464 | if (cause & PL) | ||
1465 | pl_intr_handler(adapter); | ||
1466 | if (cause & SMB) | ||
1467 | smb_intr_handler(adapter); | ||
1468 | if (cause & XGMAC0) | ||
1469 | xgmac_intr_handler(adapter, 0); | ||
1470 | if (cause & XGMAC1) | ||
1471 | xgmac_intr_handler(adapter, 1); | ||
1472 | if (cause & XGMAC_KR0) | ||
1473 | xgmac_intr_handler(adapter, 2); | ||
1474 | if (cause & XGMAC_KR1) | ||
1475 | xgmac_intr_handler(adapter, 3); | ||
1476 | if (cause & PCIE) | ||
1477 | pcie_intr_handler(adapter); | ||
1478 | if (cause & MC) | ||
1479 | mem_intr_handler(adapter, MEM_MC); | ||
1480 | if (cause & EDC0) | ||
1481 | mem_intr_handler(adapter, MEM_EDC0); | ||
1482 | if (cause & EDC1) | ||
1483 | mem_intr_handler(adapter, MEM_EDC1); | ||
1484 | if (cause & LE) | ||
1485 | le_intr_handler(adapter); | ||
1486 | if (cause & TP) | ||
1487 | tp_intr_handler(adapter); | ||
1488 | if (cause & MA) | ||
1489 | ma_intr_handler(adapter); | ||
1490 | if (cause & PM_TX) | ||
1491 | pmtx_intr_handler(adapter); | ||
1492 | if (cause & PM_RX) | ||
1493 | pmrx_intr_handler(adapter); | ||
1494 | if (cause & ULP_RX) | ||
1495 | ulprx_intr_handler(adapter); | ||
1496 | if (cause & CPL_SWITCH) | ||
1497 | cplsw_intr_handler(adapter); | ||
1498 | if (cause & SGE) | ||
1499 | sge_intr_handler(adapter); | ||
1500 | if (cause & ULP_TX) | ||
1501 | ulptx_intr_handler(adapter); | ||
1502 | |||
1503 | /* Clear the interrupts just processed for which we are the master. */ | ||
1504 | t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); | ||
1505 | (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ | ||
1506 | return 1; | ||
1507 | } | ||
1508 | |||
1509 | /** | ||
1510 | * t4_intr_enable - enable interrupts | ||
1511 | * @adapter: the adapter whose interrupts should be enabled | ||
1512 | * | ||
1513 | * Enable PF-specific interrupts for the calling function and the top-level | ||
1514 | * interrupt concentrator for global interrupts. Interrupts are already | ||
1515 | * enabled at each module, here we just enable the roots of the interrupt | ||
1516 | * hierarchies. | ||
1517 | * | ||
1518 | * Note: this function should be called only when the driver manages | ||
1519 | * non PF-specific interrupts from the various HW modules. Only one PCI | ||
1520 | * function at a time should be doing this. | ||
1521 | */ | ||
1522 | void t4_intr_enable(struct adapter *adapter) | ||
1523 | { | ||
1524 | u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); | ||
1525 | |||
1526 | t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | | ||
1527 | ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | | ||
1528 | ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | | ||
1529 | ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | | ||
1530 | ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | | ||
1531 | ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | | ||
1532 | ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | | ||
1533 | EGRESS_SIZE_ERR); | ||
1534 | t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); | ||
1535 | t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); | ||
1536 | } | ||
1537 | |||
1538 | /** | ||
1539 | * t4_intr_disable - disable interrupts | ||
1540 | * @adapter: the adapter whose interrupts should be disabled | ||
1541 | * | ||
1542 | * Disable interrupts. We only disable the top-level interrupt | ||
1543 | * concentrators. The caller must be a PCI function managing global | ||
1544 | * interrupts. | ||
1545 | */ | ||
1546 | void t4_intr_disable(struct adapter *adapter) | ||
1547 | { | ||
1548 | u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); | ||
1549 | |||
1550 | t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); | ||
1551 | t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); | ||
1552 | } | ||
1553 | |||
1554 | /** | ||
1555 | * t4_intr_clear - clear all interrupts | ||
1556 | * @adapter: the adapter whose interrupts should be cleared | ||
1557 | * | ||
1558 | * Clears all interrupts. The caller must be a PCI function managing | ||
1559 | * global interrupts. | ||
1560 | */ | ||
1561 | void t4_intr_clear(struct adapter *adapter) | ||
1562 | { | ||
1563 | static const unsigned int cause_reg[] = { | ||
1564 | SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3, | ||
1565 | PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, | ||
1566 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, | ||
1567 | PCIE_NONFAT_ERR, PCIE_INT_CAUSE, | ||
1568 | MC_INT_CAUSE, | ||
1569 | MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE, | ||
1570 | EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1), | ||
1571 | CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE, | ||
1572 | MYPF_REG(CIM_PF_HOST_INT_CAUSE), | ||
1573 | TP_INT_CAUSE, | ||
1574 | ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE, | ||
1575 | PM_RX_INT_CAUSE, PM_TX_INT_CAUSE, | ||
1576 | MPS_RX_PERR_INT_CAUSE, | ||
1577 | CPL_INTR_CAUSE, | ||
1578 | MYPF_REG(PL_PF_INT_CAUSE), | ||
1579 | PL_PL_INT_CAUSE, | ||
1580 | LE_DB_INT_CAUSE, | ||
1581 | }; | ||
1582 | |||
1583 | unsigned int i; | ||
1584 | |||
1585 | for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) | ||
1586 | t4_write_reg(adapter, cause_reg[i], 0xffffffff); | ||
1587 | |||
1588 | t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK); | ||
1589 | (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ | ||
1590 | } | ||
1591 | |||
1592 | /** | ||
1593 | * hash_mac_addr - return the hash value of a MAC address | ||
1594 | * @addr: the 48-bit Ethernet MAC address | ||
1595 | * | ||
1596 | * Hashes a MAC address according to the hash function used by HW inexact | ||
1597 | * (hash) address matching. | ||
1598 | */ | ||
1599 | static int hash_mac_addr(const u8 *addr) | ||
1600 | { | ||
1601 | u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; | ||
1602 | u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; | ||
1603 | a ^= b; | ||
1604 | a ^= (a >> 12); | ||
1605 | a ^= (a >> 6); | ||
1606 | return a & 0x3f; | ||
1607 | } | ||
1608 | |||
1609 | /** | ||
1610 | * t4_config_rss_range - configure a portion of the RSS mapping table | ||
1611 | * @adapter: the adapter | ||
1612 | * @mbox: mbox to use for the FW command | ||
1613 | * @viid: virtual interface whose RSS subtable is to be written | ||
1614 | * @start: start entry in the table to write | ||
1615 | * @n: how many table entries to write | ||
1616 | * @rspq: values for the response queue lookup table | ||
1617 | * @nrspq: number of values in @rspq | ||
1618 | * | ||
1619 | * Programs the selected part of the VI's RSS mapping table with the | ||
1620 | * provided values. If @nrspq < @n the supplied values are used repeatedly | ||
1621 | * until the full table range is populated. | ||
1622 | * | ||
1623 | * The caller must ensure the values in @rspq are in the range allowed for | ||
1624 | * @viid. | ||
1625 | */ | ||
1626 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, | ||
1627 | int start, int n, const u16 *rspq, unsigned int nrspq) | ||
1628 | { | ||
1629 | int ret; | ||
1630 | const u16 *rsp = rspq; | ||
1631 | const u16 *rsp_end = rspq + nrspq; | ||
1632 | struct fw_rss_ind_tbl_cmd cmd; | ||
1633 | |||
1634 | memset(&cmd, 0, sizeof(cmd)); | ||
1635 | cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | | ||
1636 | FW_CMD_REQUEST | FW_CMD_WRITE | | ||
1637 | FW_RSS_IND_TBL_CMD_VIID(viid)); | ||
1638 | cmd.retval_len16 = htonl(FW_LEN16(cmd)); | ||
1639 | |||
1640 | /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ | ||
1641 | while (n > 0) { | ||
1642 | int nq = min(n, 32); | ||
1643 | __be32 *qp = &cmd.iq0_to_iq2; | ||
1644 | |||
1645 | cmd.niqid = htons(nq); | ||
1646 | cmd.startidx = htons(start); | ||
1647 | |||
1648 | start += nq; | ||
1649 | n -= nq; | ||
1650 | |||
1651 | while (nq > 0) { | ||
1652 | unsigned int v; | ||
1653 | |||
1654 | v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); | ||
1655 | if (++rsp >= rsp_end) | ||
1656 | rsp = rspq; | ||
1657 | v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); | ||
1658 | if (++rsp >= rsp_end) | ||
1659 | rsp = rspq; | ||
1660 | v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); | ||
1661 | if (++rsp >= rsp_end) | ||
1662 | rsp = rspq; | ||
1663 | |||
1664 | *qp++ = htonl(v); | ||
1665 | nq -= 3; | ||
1666 | } | ||
1667 | |||
1668 | ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); | ||
1669 | if (ret) | ||
1670 | return ret; | ||
1671 | } | ||
1672 | return 0; | ||
1673 | } | ||
1674 | |||
1675 | /** | ||
1676 | * t4_config_glbl_rss - configure the global RSS mode | ||
1677 | * @adapter: the adapter | ||
1678 | * @mbox: mbox to use for the FW command | ||
1679 | * @mode: global RSS mode | ||
1680 | * @flags: mode-specific flags | ||
1681 | * | ||
1682 | * Sets the global RSS mode. | ||
1683 | */ | ||
1684 | int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, | ||
1685 | unsigned int flags) | ||
1686 | { | ||
1687 | struct fw_rss_glb_config_cmd c; | ||
1688 | |||
1689 | memset(&c, 0, sizeof(c)); | ||
1690 | c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | | ||
1691 | FW_CMD_REQUEST | FW_CMD_WRITE); | ||
1692 | c.retval_len16 = htonl(FW_LEN16(c)); | ||
1693 | if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { | ||
1694 | c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); | ||
1695 | } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { | ||
1696 | c.u.basicvirtual.mode_pkd = | ||
1697 | htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); | ||
1698 | c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); | ||
1699 | } else | ||
1700 | return -EINVAL; | ||
1701 | return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); | ||
1702 | } | ||
1703 | |||
1704 | /* Read an RSS table row */ | ||
1705 | static int rd_rss_row(struct adapter *adap, int row, u32 *val) | ||
1706 | { | ||
1707 | t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row); | ||
1708 | return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1, | ||
1709 | 5, 0, val); | ||
1710 | } | ||
1711 | |||
1712 | /** | ||
1713 | * t4_read_rss - read the contents of the RSS mapping table | ||
1714 | * @adapter: the adapter | ||
1715 | * @map: holds the contents of the RSS mapping table | ||
1716 | * | ||
1717 | * Reads the contents of the RSS hash->queue mapping table. | ||
1718 | */ | ||
1719 | int t4_read_rss(struct adapter *adapter, u16 *map) | ||
1720 | { | ||
1721 | u32 val; | ||
1722 | int i, ret; | ||
1723 | |||
1724 | for (i = 0; i < RSS_NENTRIES / 2; ++i) { | ||
1725 | ret = rd_rss_row(adapter, i, &val); | ||
1726 | if (ret) | ||
1727 | return ret; | ||
1728 | *map++ = LKPTBLQUEUE0_GET(val); | ||
1729 | *map++ = LKPTBLQUEUE1_GET(val); | ||
1730 | } | ||
1731 | return 0; | ||
1732 | } | ||
1733 | |||
1734 | /** | ||
1735 | * t4_tp_get_tcp_stats - read TP's TCP MIB counters | ||
1736 | * @adap: the adapter | ||
1737 | * @v4: holds the TCP/IP counter values | ||
1738 | * @v6: holds the TCP/IPv6 counter values | ||
1739 | * | ||
1740 | * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. | ||
1741 | * Either @v4 or @v6 may be %NULL to skip the corresponding stats. | ||
1742 | */ | ||
1743 | void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, | ||
1744 | struct tp_tcp_stats *v6) | ||
1745 | { | ||
1746 | u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; | ||
1747 | |||
1748 | #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) | ||
1749 | #define STAT(x) val[STAT_IDX(x)] | ||
1750 | #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) | ||
1751 | |||
1752 | if (v4) { | ||
1753 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, | ||
1754 | ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); | ||
1755 | v4->tcpOutRsts = STAT(OUT_RST); | ||
1756 | v4->tcpInSegs = STAT64(IN_SEG); | ||
1757 | v4->tcpOutSegs = STAT64(OUT_SEG); | ||
1758 | v4->tcpRetransSegs = STAT64(RXT_SEG); | ||
1759 | } | ||
1760 | if (v6) { | ||
1761 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, | ||
1762 | ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); | ||
1763 | v6->tcpOutRsts = STAT(OUT_RST); | ||
1764 | v6->tcpInSegs = STAT64(IN_SEG); | ||
1765 | v6->tcpOutSegs = STAT64(OUT_SEG); | ||
1766 | v6->tcpRetransSegs = STAT64(RXT_SEG); | ||
1767 | } | ||
1768 | #undef STAT64 | ||
1769 | #undef STAT | ||
1770 | #undef STAT_IDX | ||
1771 | } | ||
1772 | |||
1773 | /** | ||
1774 | * t4_tp_get_err_stats - read TP's error MIB counters | ||
1775 | * @adap: the adapter | ||
1776 | * @st: holds the counter values | ||
1777 | * | ||
1778 | * Returns the values of TP's error counters. | ||
1779 | */ | ||
1780 | void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) | ||
1781 | { | ||
1782 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs, | ||
1783 | 12, TP_MIB_MAC_IN_ERR_0); | ||
1784 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops, | ||
1785 | 8, TP_MIB_TNL_CNG_DROP_0); | ||
1786 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops, | ||
1787 | 4, TP_MIB_TNL_DROP_0); | ||
1788 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops, | ||
1789 | 4, TP_MIB_OFD_VLN_DROP_0); | ||
1790 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs, | ||
1791 | 4, TP_MIB_TCP_V6IN_ERR_0); | ||
1792 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh, | ||
1793 | 2, TP_MIB_OFD_ARP_DROP); | ||
1794 | } | ||
1795 | |||
1796 | /** | ||
1797 | * t4_read_mtu_tbl - returns the values in the HW path MTU table | ||
1798 | * @adap: the adapter | ||
1799 | * @mtus: where to store the MTU values | ||
1800 | * @mtu_log: where to store the MTU base-2 log (may be %NULL) | ||
1801 | * | ||
1802 | * Reads the HW path MTU table. | ||
1803 | */ | ||
1804 | void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) | ||
1805 | { | ||
1806 | u32 v; | ||
1807 | int i; | ||
1808 | |||
1809 | for (i = 0; i < NMTUS; ++i) { | ||
1810 | t4_write_reg(adap, TP_MTU_TABLE, | ||
1811 | MTUINDEX(0xff) | MTUVALUE(i)); | ||
1812 | v = t4_read_reg(adap, TP_MTU_TABLE); | ||
1813 | mtus[i] = MTUVALUE_GET(v); | ||
1814 | if (mtu_log) | ||
1815 | mtu_log[i] = MTUWIDTH_GET(v); | ||
1816 | } | ||
1817 | } | ||
1818 | |||
1819 | /** | ||
1820 | * init_cong_ctrl - initialize congestion control parameters | ||
1821 | * @a: the alpha values for congestion control | ||
1822 | * @b: the beta values for congestion control | ||
1823 | * | ||
1824 | * Initialize the congestion control parameters. | ||
1825 | */ | ||
1826 | static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) | ||
1827 | { | ||
1828 | a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; | ||
1829 | a[9] = 2; | ||
1830 | a[10] = 3; | ||
1831 | a[11] = 4; | ||
1832 | a[12] = 5; | ||
1833 | a[13] = 6; | ||
1834 | a[14] = 7; | ||
1835 | a[15] = 8; | ||
1836 | a[16] = 9; | ||
1837 | a[17] = 10; | ||
1838 | a[18] = 14; | ||
1839 | a[19] = 17; | ||
1840 | a[20] = 21; | ||
1841 | a[21] = 25; | ||
1842 | a[22] = 30; | ||
1843 | a[23] = 35; | ||
1844 | a[24] = 45; | ||
1845 | a[25] = 60; | ||
1846 | a[26] = 80; | ||
1847 | a[27] = 100; | ||
1848 | a[28] = 200; | ||
1849 | a[29] = 300; | ||
1850 | a[30] = 400; | ||
1851 | a[31] = 500; | ||
1852 | |||
1853 | b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; | ||
1854 | b[9] = b[10] = 1; | ||
1855 | b[11] = b[12] = 2; | ||
1856 | b[13] = b[14] = b[15] = b[16] = 3; | ||
1857 | b[17] = b[18] = b[19] = b[20] = b[21] = 4; | ||
1858 | b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; | ||
1859 | b[28] = b[29] = 6; | ||
1860 | b[30] = b[31] = 7; | ||
1861 | } | ||
1862 | |||
1863 | /* The minimum additive increment value for the congestion control table */ | ||
1864 | #define CC_MIN_INCR 2U | ||
1865 | |||
1866 | /** | ||
1867 | * t4_load_mtus - write the MTU and congestion control HW tables | ||
1868 | * @adap: the adapter | ||
1869 | * @mtus: the values for the MTU table | ||
1870 | * @alpha: the values for the congestion control alpha parameter | ||
1871 | * @beta: the values for the congestion control beta parameter | ||
1872 | * | ||
1873 | * Write the HW MTU table with the supplied MTUs and the high-speed | ||
1874 | * congestion control table with the supplied alpha, beta, and MTUs. | ||
1875 | * We write the two tables together because the additive increments | ||
1876 | * depend on the MTUs. | ||
1877 | */ | ||
1878 | void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, | ||
1879 | const unsigned short *alpha, const unsigned short *beta) | ||
1880 | { | ||
1881 | static const unsigned int avg_pkts[NCCTRL_WIN] = { | ||
1882 | 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, | ||
1883 | 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, | ||
1884 | 28672, 40960, 57344, 81920, 114688, 163840, 229376 | ||
1885 | }; | ||
1886 | |||
1887 | unsigned int i, w; | ||
1888 | |||
1889 | for (i = 0; i < NMTUS; ++i) { | ||
1890 | unsigned int mtu = mtus[i]; | ||
1891 | unsigned int log2 = fls(mtu); | ||
1892 | |||
1893 | if (!(mtu & ((1 << log2) >> 2))) /* round */ | ||
1894 | log2--; | ||
1895 | t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | | ||
1896 | MTUWIDTH(log2) | MTUVALUE(mtu)); | ||
1897 | |||
1898 | for (w = 0; w < NCCTRL_WIN; ++w) { | ||
1899 | unsigned int inc; | ||
1900 | |||
1901 | inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], | ||
1902 | CC_MIN_INCR); | ||
1903 | |||
1904 | t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | | ||
1905 | (w << 16) | (beta[w] << 13) | inc); | ||
1906 | } | ||
1907 | } | ||
1908 | } | ||
1909 | |||
1910 | /** | ||
1911 | * t4_set_trace_filter - configure one of the tracing filters | ||
1912 | * @adap: the adapter | ||
1913 | * @tp: the desired trace filter parameters | ||
1914 | * @idx: which filter to configure | ||
1915 | * @enable: whether to enable or disable the filter | ||
1916 | * | ||
1917 | * Configures one of the tracing filters available in HW. If @enable is | ||
1918 | * %0 @tp is not examined and may be %NULL. | ||
1919 | */ | ||
1920 | int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, | ||
1921 | int idx, int enable) | ||
1922 | { | ||
1923 | int i, ofst = idx * 4; | ||
1924 | u32 data_reg, mask_reg, cfg; | ||
1925 | u32 multitrc = TRCMULTIFILTER; | ||
1926 | |||
1927 | if (!enable) { | ||
1928 | t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); | ||
1929 | goto out; | ||
1930 | } | ||
1931 | |||
1932 | if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f || | ||
1933 | tp->skip_ofst > 0x1f || tp->min_len > 0x1ff || | ||
1934 | tp->snap_len > 9600 || (idx && tp->snap_len > 256)) | ||
1935 | return -EINVAL; | ||
1936 | |||
1937 | if (tp->snap_len > 256) { /* must be tracer 0 */ | ||
1938 | if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) | | ||
1939 | t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) | | ||
1940 | t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN) | ||
1941 | return -EINVAL; /* other tracers are enabled */ | ||
1942 | multitrc = 0; | ||
1943 | } else if (idx) { | ||
1944 | i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B); | ||
1945 | if (TFCAPTUREMAX_GET(i) > 256 && | ||
1946 | (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN)) | ||
1947 | return -EINVAL; | ||
1948 | } | ||
1949 | |||
1950 | /* stop the tracer we'll be changing */ | ||
1951 | t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); | ||
1952 | |||
1953 | /* disable tracing globally if running in the wrong single/multi mode */ | ||
1954 | cfg = t4_read_reg(adap, MPS_TRC_CFG); | ||
1955 | if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) { | ||
1956 | t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN); | ||
1957 | t4_read_reg(adap, MPS_TRC_CFG); /* flush */ | ||
1958 | msleep(1); | ||
1959 | if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY)) | ||
1960 | return -ETIMEDOUT; | ||
1961 | } | ||
1962 | /* | ||
1963 | * At this point either the tracing is enabled and in the right mode or | ||
1964 | * disabled. | ||
1965 | */ | ||
1966 | |||
1967 | idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH); | ||
1968 | data_reg = MPS_TRC_FILTER0_MATCH + idx; | ||
1969 | mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx; | ||
1970 | |||
1971 | for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { | ||
1972 | t4_write_reg(adap, data_reg, tp->data[i]); | ||
1973 | t4_write_reg(adap, mask_reg, ~tp->mask[i]); | ||
1974 | } | ||
1975 | t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst, | ||
1976 | TFCAPTUREMAX(tp->snap_len) | | ||
1977 | TFMINPKTSIZE(tp->min_len)); | ||
1978 | t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, | ||
1979 | TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) | | ||
1980 | TFPORT(tp->port) | TFEN | | ||
1981 | (tp->invert ? TFINVERTMATCH : 0)); | ||
1982 | |||
1983 | cfg &= ~TRCMULTIFILTER; | ||
1984 | t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc); | ||
1985 | out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */ | ||
1986 | return 0; | ||
1987 | } | ||
1988 | |||
1989 | /** | ||
1990 | * t4_get_trace_filter - query one of the tracing filters | ||
1991 | * @adap: the adapter | ||
1992 | * @tp: the current trace filter parameters | ||
1993 | * @idx: which trace filter to query | ||
1994 | * @enabled: non-zero if the filter is enabled | ||
1995 | * | ||
1996 | * Returns the current settings of one of the HW tracing filters. | ||
1997 | */ | ||
1998 | void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, | ||
1999 | int *enabled) | ||
2000 | { | ||
2001 | u32 ctla, ctlb; | ||
2002 | int i, ofst = idx * 4; | ||
2003 | u32 data_reg, mask_reg; | ||
2004 | |||
2005 | ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst); | ||
2006 | ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst); | ||
2007 | |||
2008 | *enabled = !!(ctla & TFEN); | ||
2009 | tp->snap_len = TFCAPTUREMAX_GET(ctlb); | ||
2010 | tp->min_len = TFMINPKTSIZE_GET(ctlb); | ||
2011 | tp->skip_ofst = TFOFFSET_GET(ctla); | ||
2012 | tp->skip_len = TFLENGTH_GET(ctla); | ||
2013 | tp->invert = !!(ctla & TFINVERTMATCH); | ||
2014 | tp->port = TFPORT_GET(ctla); | ||
2015 | |||
2016 | ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx; | ||
2017 | data_reg = MPS_TRC_FILTER0_MATCH + ofst; | ||
2018 | mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst; | ||
2019 | |||
2020 | for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { | ||
2021 | tp->mask[i] = ~t4_read_reg(adap, mask_reg); | ||
2022 | tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; | ||
2023 | } | ||
2024 | } | ||
2025 | |||
2026 | /** | ||
2027 | * get_mps_bg_map - return the buffer groups associated with a port | ||
2028 | * @adap: the adapter | ||
2029 | * @idx: the port index | ||
2030 | * | ||
2031 | * Returns a bitmap indicating which MPS buffer groups are associated | ||
2032 | * with the given port. Bit i is set if buffer group i is used by the | ||
2033 | * port. | ||
2034 | */ | ||
2035 | static unsigned int get_mps_bg_map(struct adapter *adap, int idx) | ||
2036 | { | ||
2037 | u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); | ||
2038 | |||
2039 | if (n == 0) | ||
2040 | return idx == 0 ? 0xf : 0; | ||
2041 | if (n == 1) | ||
2042 | return idx < 2 ? (3 << (2 * idx)) : 0; | ||
2043 | return 1 << idx; | ||
2044 | } | ||
2045 | |||
2046 | /** | ||
2047 | * t4_get_port_stats - collect port statistics | ||
2048 | * @adap: the adapter | ||
2049 | * @idx: the port index | ||
2050 | * @p: the stats structure to fill | ||
2051 | * | ||
2052 | * Collect statistics related to the given port from HW. | ||
2053 | */ | ||
2054 | void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) | ||
2055 | { | ||
2056 | u32 bgmap = get_mps_bg_map(adap, idx); | ||
2057 | |||
2058 | #define GET_STAT(name) \ | ||
2059 | t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) | ||
2060 | #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) | ||
2061 | |||
2062 | p->tx_octets = GET_STAT(TX_PORT_BYTES); | ||
2063 | p->tx_frames = GET_STAT(TX_PORT_FRAMES); | ||
2064 | p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); | ||
2065 | p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); | ||
2066 | p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); | ||
2067 | p->tx_error_frames = GET_STAT(TX_PORT_ERROR); | ||
2068 | p->tx_frames_64 = GET_STAT(TX_PORT_64B); | ||
2069 | p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); | ||
2070 | p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); | ||
2071 | p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); | ||
2072 | p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); | ||
2073 | p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); | ||
2074 | p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); | ||
2075 | p->tx_drop = GET_STAT(TX_PORT_DROP); | ||
2076 | p->tx_pause = GET_STAT(TX_PORT_PAUSE); | ||
2077 | p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); | ||
2078 | p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); | ||
2079 | p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); | ||
2080 | p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); | ||
2081 | p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); | ||
2082 | p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); | ||
2083 | p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); | ||
2084 | p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); | ||
2085 | |||
2086 | p->rx_octets = GET_STAT(RX_PORT_BYTES); | ||
2087 | p->rx_frames = GET_STAT(RX_PORT_FRAMES); | ||
2088 | p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); | ||
2089 | p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); | ||
2090 | p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); | ||
2091 | p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); | ||
2092 | p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); | ||
2093 | p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); | ||
2094 | p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); | ||
2095 | p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); | ||
2096 | p->rx_runt = GET_STAT(RX_PORT_LESS_64B); | ||
2097 | p->rx_frames_64 = GET_STAT(RX_PORT_64B); | ||
2098 | p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); | ||
2099 | p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); | ||
2100 | p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); | ||
2101 | p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); | ||
2102 | p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); | ||
2103 | p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); | ||
2104 | p->rx_pause = GET_STAT(RX_PORT_PAUSE); | ||
2105 | p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); | ||
2106 | p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); | ||
2107 | p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); | ||
2108 | p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); | ||
2109 | p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); | ||
2110 | p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); | ||
2111 | p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); | ||
2112 | p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); | ||
2113 | |||
2114 | p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; | ||
2115 | p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; | ||
2116 | p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; | ||
2117 | p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; | ||
2118 | p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; | ||
2119 | p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; | ||
2120 | p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; | ||
2121 | p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; | ||
2122 | |||
2123 | #undef GET_STAT | ||
2124 | #undef GET_STAT_COM | ||
2125 | } | ||
2126 | |||
2127 | /** | ||
2128 | * t4_get_lb_stats - collect loopback port statistics | ||
2129 | * @adap: the adapter | ||
2130 | * @idx: the loopback port index | ||
2131 | * @p: the stats structure to fill | ||
2132 | * | ||
2133 | * Return HW statistics for the given loopback port. | ||
2134 | */ | ||
2135 | void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) | ||
2136 | { | ||
2137 | u32 bgmap = get_mps_bg_map(adap, idx); | ||
2138 | |||
2139 | #define GET_STAT(name) \ | ||
2140 | t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)) | ||
2141 | #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) | ||
2142 | |||
2143 | p->octets = GET_STAT(BYTES); | ||
2144 | p->frames = GET_STAT(FRAMES); | ||
2145 | p->bcast_frames = GET_STAT(BCAST); | ||
2146 | p->mcast_frames = GET_STAT(MCAST); | ||
2147 | p->ucast_frames = GET_STAT(UCAST); | ||
2148 | p->error_frames = GET_STAT(ERROR); | ||
2149 | |||
2150 | p->frames_64 = GET_STAT(64B); | ||
2151 | p->frames_65_127 = GET_STAT(65B_127B); | ||
2152 | p->frames_128_255 = GET_STAT(128B_255B); | ||
2153 | p->frames_256_511 = GET_STAT(256B_511B); | ||
2154 | p->frames_512_1023 = GET_STAT(512B_1023B); | ||
2155 | p->frames_1024_1518 = GET_STAT(1024B_1518B); | ||
2156 | p->frames_1519_max = GET_STAT(1519B_MAX); | ||
2157 | p->drop = t4_read_reg(adap, PORT_REG(idx, | ||
2158 | MPS_PORT_STAT_LB_PORT_DROP_FRAMES)); | ||
2159 | |||
2160 | p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; | ||
2161 | p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; | ||
2162 | p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; | ||
2163 | p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; | ||
2164 | p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; | ||
2165 | p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; | ||
2166 | p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; | ||
2167 | p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; | ||
2168 | |||
2169 | #undef GET_STAT | ||
2170 | #undef GET_STAT_COM | ||
2171 | } | ||
2172 | |||
2173 | /** | ||
2174 | * t4_wol_magic_enable - enable/disable magic packet WoL | ||
2175 | * @adap: the adapter | ||
2176 | * @port: the physical port index | ||
2177 | * @addr: MAC address expected in magic packets, %NULL to disable | ||
2178 | * | ||
2179 | * Enables/disables magic packet wake-on-LAN for the selected port. | ||
2180 | */ | ||
2181 | void t4_wol_magic_enable(struct adapter *adap, unsigned int port, | ||
2182 | const u8 *addr) | ||
2183 | { | ||
2184 | if (addr) { | ||
2185 | t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), | ||
2186 | (addr[2] << 24) | (addr[3] << 16) | | ||
2187 | (addr[4] << 8) | addr[5]); | ||
2188 | t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), | ||
2189 | (addr[0] << 8) | addr[1]); | ||
2190 | } | ||
2191 | t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, | ||
2192 | addr ? MAGICEN : 0); | ||
2193 | } | ||
2194 | |||
2195 | /** | ||
2196 | * t4_wol_pat_enable - enable/disable pattern-based WoL | ||
2197 | * @adap: the adapter | ||
2198 | * @port: the physical port index | ||
2199 | * @map: bitmap of which HW pattern filters to set | ||
2200 | * @mask0: byte mask for bytes 0-63 of a packet | ||
2201 | * @mask1: byte mask for bytes 64-127 of a packet | ||
2202 | * @crc: Ethernet CRC for selected bytes | ||
2203 | * @enable: enable/disable switch | ||
2204 | * | ||
2205 | * Sets the pattern filters indicated in @map to mask out the bytes | ||
2206 | * specified in @mask0/@mask1 in received packets and compare the CRC of | ||
2207 | * the resulting packet against @crc. If @enable is %true pattern-based | ||
2208 | * WoL is enabled, otherwise disabled. | ||
2209 | */ | ||
2210 | int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, | ||
2211 | u64 mask0, u64 mask1, unsigned int crc, bool enable) | ||
2212 | { | ||
2213 | int i; | ||
2214 | |||
2215 | if (!enable) { | ||
2216 | t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), | ||
2217 | PATEN, 0); | ||
2218 | return 0; | ||
2219 | } | ||
2220 | if (map > 0xff) | ||
2221 | return -EINVAL; | ||
2222 | |||
2223 | #define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) | ||
2224 | |||
2225 | t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); | ||
2226 | t4_write_reg(adap, EPIO_REG(DATA2), mask1); | ||
2227 | t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); | ||
2228 | |||
2229 | for (i = 0; i < NWOL_PAT; i++, map >>= 1) { | ||
2230 | if (!(map & 1)) | ||
2231 | continue; | ||
2232 | |||
2233 | /* write byte masks */ | ||
2234 | t4_write_reg(adap, EPIO_REG(DATA0), mask0); | ||
2235 | t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); | ||
2236 | t4_read_reg(adap, EPIO_REG(OP)); /* flush */ | ||
2237 | if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) | ||
2238 | return -ETIMEDOUT; | ||
2239 | |||
2240 | /* write CRC */ | ||
2241 | t4_write_reg(adap, EPIO_REG(DATA0), crc); | ||
2242 | t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); | ||
2243 | t4_read_reg(adap, EPIO_REG(OP)); /* flush */ | ||
2244 | if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) | ||
2245 | return -ETIMEDOUT; | ||
2246 | } | ||
2247 | #undef EPIO_REG | ||
2248 | |||
2249 | t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); | ||
2250 | return 0; | ||
2251 | } | ||
2252 | |||
2253 | #define INIT_CMD(var, cmd, rd_wr) do { \ | ||
2254 | (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ | ||
2255 | FW_CMD_REQUEST | FW_CMD_##rd_wr); \ | ||
2256 | (var).retval_len16 = htonl(FW_LEN16(var)); \ | ||
2257 | } while (0) | ||
2258 | |||
2259 | /** | ||
2260 | * t4_mdio_rd - read a PHY register through MDIO | ||
2261 | * @adap: the adapter | ||
2262 | * @mbox: mailbox to use for the FW command | ||
2263 | * @phy_addr: the PHY address | ||
2264 | * @mmd: the PHY MMD to access (0 for clause 22 PHYs) | ||
2265 | * @reg: the register to read | ||
2266 | * @valp: where to store the value | ||
2267 | * | ||
2268 | * Issues a FW command through the given mailbox to read a PHY register. | ||
2269 | */ | ||
2270 | int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | ||
2271 | unsigned int mmd, unsigned int reg, u16 *valp) | ||
2272 | { | ||
2273 | int ret; | ||
2274 | struct fw_ldst_cmd c; | ||
2275 | |||
2276 | memset(&c, 0, sizeof(c)); | ||
2277 | c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | | ||
2278 | FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); | ||
2279 | c.cycles_to_len16 = htonl(FW_LEN16(c)); | ||
2280 | c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | | ||
2281 | FW_LDST_CMD_MMD(mmd)); | ||
2282 | c.u.mdio.raddr = htons(reg); | ||
2283 | |||
2284 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
2285 | if (ret == 0) | ||
2286 | *valp = ntohs(c.u.mdio.rval); | ||
2287 | return ret; | ||
2288 | } | ||
2289 | |||
2290 | /** | ||
2291 | * t4_mdio_wr - write a PHY register through MDIO | ||
2292 | * @adap: the adapter | ||
2293 | * @mbox: mailbox to use for the FW command | ||
2294 | * @phy_addr: the PHY address | ||
2295 | * @mmd: the PHY MMD to access (0 for clause 22 PHYs) | ||
2296 | * @reg: the register to write | ||
2297 | * @valp: value to write | ||
2298 | * | ||
2299 | * Issues a FW command through the given mailbox to write a PHY register. | ||
2300 | */ | ||
2301 | int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | ||
2302 | unsigned int mmd, unsigned int reg, u16 val) | ||
2303 | { | ||
2304 | struct fw_ldst_cmd c; | ||
2305 | |||
2306 | memset(&c, 0, sizeof(c)); | ||
2307 | c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | | ||
2308 | FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); | ||
2309 | c.cycles_to_len16 = htonl(FW_LEN16(c)); | ||
2310 | c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | | ||
2311 | FW_LDST_CMD_MMD(mmd)); | ||
2312 | c.u.mdio.raddr = htons(reg); | ||
2313 | c.u.mdio.rval = htons(val); | ||
2314 | |||
2315 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2316 | } | ||
2317 | |||
2318 | /** | ||
2319 | * t4_fw_hello - establish communication with FW | ||
2320 | * @adap: the adapter | ||
2321 | * @mbox: mailbox to use for the FW command | ||
2322 | * @evt_mbox: mailbox to receive async FW events | ||
2323 | * @master: specifies the caller's willingness to be the device master | ||
2324 | * @state: returns the current device state | ||
2325 | * | ||
2326 | * Issues a command to establish communication with FW. | ||
2327 | */ | ||
2328 | int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, | ||
2329 | enum dev_master master, enum dev_state *state) | ||
2330 | { | ||
2331 | int ret; | ||
2332 | struct fw_hello_cmd c; | ||
2333 | |||
2334 | INIT_CMD(c, HELLO, WRITE); | ||
2335 | c.err_to_mbasyncnot = htonl( | ||
2336 | FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | | ||
2337 | FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | | ||
2338 | FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) | | ||
2339 | FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); | ||
2340 | |||
2341 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
2342 | if (ret == 0 && state) { | ||
2343 | u32 v = ntohl(c.err_to_mbasyncnot); | ||
2344 | if (v & FW_HELLO_CMD_INIT) | ||
2345 | *state = DEV_STATE_INIT; | ||
2346 | else if (v & FW_HELLO_CMD_ERR) | ||
2347 | *state = DEV_STATE_ERR; | ||
2348 | else | ||
2349 | *state = DEV_STATE_UNINIT; | ||
2350 | } | ||
2351 | return ret; | ||
2352 | } | ||
2353 | |||
2354 | /** | ||
2355 | * t4_fw_bye - end communication with FW | ||
2356 | * @adap: the adapter | ||
2357 | * @mbox: mailbox to use for the FW command | ||
2358 | * | ||
2359 | * Issues a command to terminate communication with FW. | ||
2360 | */ | ||
2361 | int t4_fw_bye(struct adapter *adap, unsigned int mbox) | ||
2362 | { | ||
2363 | struct fw_bye_cmd c; | ||
2364 | |||
2365 | INIT_CMD(c, BYE, WRITE); | ||
2366 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2367 | } | ||
2368 | |||
2369 | /** | ||
2370 | * t4_init_cmd - ask FW to initialize the device | ||
2371 | * @adap: the adapter | ||
2372 | * @mbox: mailbox to use for the FW command | ||
2373 | * | ||
2374 | * Issues a command to FW to partially initialize the device. This | ||
2375 | * performs initialization that generally doesn't depend on user input. | ||
2376 | */ | ||
2377 | int t4_early_init(struct adapter *adap, unsigned int mbox) | ||
2378 | { | ||
2379 | struct fw_initialize_cmd c; | ||
2380 | |||
2381 | INIT_CMD(c, INITIALIZE, WRITE); | ||
2382 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2383 | } | ||
2384 | |||
2385 | /** | ||
2386 | * t4_fw_reset - issue a reset to FW | ||
2387 | * @adap: the adapter | ||
2388 | * @mbox: mailbox to use for the FW command | ||
2389 | * @reset: specifies the type of reset to perform | ||
2390 | * | ||
2391 | * Issues a reset command of the specified type to FW. | ||
2392 | */ | ||
2393 | int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) | ||
2394 | { | ||
2395 | struct fw_reset_cmd c; | ||
2396 | |||
2397 | INIT_CMD(c, RESET, WRITE); | ||
2398 | c.val = htonl(reset); | ||
2399 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2400 | } | ||
2401 | |||
2402 | /** | ||
2403 | * t4_query_params - query FW or device parameters | ||
2404 | * @adap: the adapter | ||
2405 | * @mbox: mailbox to use for the FW command | ||
2406 | * @pf: the PF | ||
2407 | * @vf: the VF | ||
2408 | * @nparams: the number of parameters | ||
2409 | * @params: the parameter names | ||
2410 | * @val: the parameter values | ||
2411 | * | ||
2412 | * Reads the value of FW or device parameters. Up to 7 parameters can be | ||
2413 | * queried at once. | ||
2414 | */ | ||
2415 | int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
2416 | unsigned int vf, unsigned int nparams, const u32 *params, | ||
2417 | u32 *val) | ||
2418 | { | ||
2419 | int i, ret; | ||
2420 | struct fw_params_cmd c; | ||
2421 | __be32 *p = &c.param[0].mnem; | ||
2422 | |||
2423 | if (nparams > 7) | ||
2424 | return -EINVAL; | ||
2425 | |||
2426 | memset(&c, 0, sizeof(c)); | ||
2427 | c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | | ||
2428 | FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | | ||
2429 | FW_PARAMS_CMD_VFN(vf)); | ||
2430 | c.retval_len16 = htonl(FW_LEN16(c)); | ||
2431 | for (i = 0; i < nparams; i++, p += 2) | ||
2432 | *p = htonl(*params++); | ||
2433 | |||
2434 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
2435 | if (ret == 0) | ||
2436 | for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) | ||
2437 | *val++ = ntohl(*p); | ||
2438 | return ret; | ||
2439 | } | ||
2440 | |||
2441 | /** | ||
2442 | * t4_set_params - sets FW or device parameters | ||
2443 | * @adap: the adapter | ||
2444 | * @mbox: mailbox to use for the FW command | ||
2445 | * @pf: the PF | ||
2446 | * @vf: the VF | ||
2447 | * @nparams: the number of parameters | ||
2448 | * @params: the parameter names | ||
2449 | * @val: the parameter values | ||
2450 | * | ||
2451 | * Sets the value of FW or device parameters. Up to 7 parameters can be | ||
2452 | * specified at once. | ||
2453 | */ | ||
2454 | int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
2455 | unsigned int vf, unsigned int nparams, const u32 *params, | ||
2456 | const u32 *val) | ||
2457 | { | ||
2458 | struct fw_params_cmd c; | ||
2459 | __be32 *p = &c.param[0].mnem; | ||
2460 | |||
2461 | if (nparams > 7) | ||
2462 | return -EINVAL; | ||
2463 | |||
2464 | memset(&c, 0, sizeof(c)); | ||
2465 | c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | | ||
2466 | FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | | ||
2467 | FW_PARAMS_CMD_VFN(vf)); | ||
2468 | c.retval_len16 = htonl(FW_LEN16(c)); | ||
2469 | while (nparams--) { | ||
2470 | *p++ = htonl(*params++); | ||
2471 | *p++ = htonl(*val++); | ||
2472 | } | ||
2473 | |||
2474 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2475 | } | ||
2476 | |||
2477 | /** | ||
2478 | * t4_cfg_pfvf - configure PF/VF resource limits | ||
2479 | * @adap: the adapter | ||
2480 | * @mbox: mailbox to use for the FW command | ||
2481 | * @pf: the PF being configured | ||
2482 | * @vf: the VF being configured | ||
2483 | * @txq: the max number of egress queues | ||
2484 | * @txq_eth_ctrl: the max number of egress Ethernet or control queues | ||
2485 | * @rxqi: the max number of interrupt-capable ingress queues | ||
2486 | * @rxq: the max number of interruptless ingress queues | ||
2487 | * @tc: the PCI traffic class | ||
2488 | * @vi: the max number of virtual interfaces | ||
2489 | * @cmask: the channel access rights mask for the PF/VF | ||
2490 | * @pmask: the port access rights mask for the PF/VF | ||
2491 | * @nexact: the maximum number of exact MPS filters | ||
2492 | * @rcaps: read capabilities | ||
2493 | * @wxcaps: write/execute capabilities | ||
2494 | * | ||
2495 | * Configures resource limits and capabilities for a physical or virtual | ||
2496 | * function. | ||
2497 | */ | ||
2498 | int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
2499 | unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, | ||
2500 | unsigned int rxqi, unsigned int rxq, unsigned int tc, | ||
2501 | unsigned int vi, unsigned int cmask, unsigned int pmask, | ||
2502 | unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) | ||
2503 | { | ||
2504 | struct fw_pfvf_cmd c; | ||
2505 | |||
2506 | memset(&c, 0, sizeof(c)); | ||
2507 | c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | | ||
2508 | FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | | ||
2509 | FW_PFVF_CMD_VFN(vf)); | ||
2510 | c.retval_len16 = htonl(FW_LEN16(c)); | ||
2511 | c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | | ||
2512 | FW_PFVF_CMD_NIQ(rxq)); | ||
2513 | c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | | ||
2514 | FW_PFVF_CMD_PMASK(pmask) | | ||
2515 | FW_PFVF_CMD_NEQ(txq)); | ||
2516 | c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | | ||
2517 | FW_PFVF_CMD_NEXACTF(nexact)); | ||
2518 | c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | | ||
2519 | FW_PFVF_CMD_WX_CAPS(wxcaps) | | ||
2520 | FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); | ||
2521 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2522 | } | ||
2523 | |||
2524 | /** | ||
2525 | * t4_alloc_vi - allocate a virtual interface | ||
2526 | * @adap: the adapter | ||
2527 | * @mbox: mailbox to use for the FW command | ||
2528 | * @port: physical port associated with the VI | ||
2529 | * @pf: the PF owning the VI | ||
2530 | * @vf: the VF owning the VI | ||
2531 | * @nmac: number of MAC addresses needed (1 to 5) | ||
2532 | * @mac: the MAC addresses of the VI | ||
2533 | * @rss_size: size of RSS table slice associated with this VI | ||
2534 | * | ||
2535 | * Allocates a virtual interface for the given physical port. If @mac is | ||
2536 | * not %NULL it contains the MAC addresses of the VI as assigned by FW. | ||
2537 | * @mac should be large enough to hold @nmac Ethernet addresses, they are | ||
2538 | * stored consecutively so the space needed is @nmac * 6 bytes. | ||
2539 | * Returns a negative error number or the non-negative VI id. | ||
2540 | */ | ||
2541 | int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, | ||
2542 | unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, | ||
2543 | unsigned int *rss_size) | ||
2544 | { | ||
2545 | int ret; | ||
2546 | struct fw_vi_cmd c; | ||
2547 | |||
2548 | memset(&c, 0, sizeof(c)); | ||
2549 | c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | | ||
2550 | FW_CMD_WRITE | FW_CMD_EXEC | | ||
2551 | FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); | ||
2552 | c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); | ||
2553 | c.portid_pkd = FW_VI_CMD_PORTID(port); | ||
2554 | c.nmac = nmac - 1; | ||
2555 | |||
2556 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
2557 | if (ret) | ||
2558 | return ret; | ||
2559 | |||
2560 | if (mac) { | ||
2561 | memcpy(mac, c.mac, sizeof(c.mac)); | ||
2562 | switch (nmac) { | ||
2563 | case 5: | ||
2564 | memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); | ||
2565 | case 4: | ||
2566 | memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); | ||
2567 | case 3: | ||
2568 | memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); | ||
2569 | case 2: | ||
2570 | memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); | ||
2571 | } | ||
2572 | } | ||
2573 | if (rss_size) | ||
2574 | *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); | ||
2575 | return ntohs(c.viid_pkd); | ||
2576 | } | ||
2577 | |||
2578 | /** | ||
2579 | * t4_free_vi - free a virtual interface | ||
2580 | * @adap: the adapter | ||
2581 | * @mbox: mailbox to use for the FW command | ||
2582 | * @pf: the PF owning the VI | ||
2583 | * @vf: the VF owning the VI | ||
2584 | * @viid: virtual interface identifiler | ||
2585 | * | ||
2586 | * Free a previously allocated virtual interface. | ||
2587 | */ | ||
2588 | int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
2589 | unsigned int vf, unsigned int viid) | ||
2590 | { | ||
2591 | struct fw_vi_cmd c; | ||
2592 | |||
2593 | memset(&c, 0, sizeof(c)); | ||
2594 | c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | | ||
2595 | FW_CMD_EXEC | FW_VI_CMD_PFN(pf) | | ||
2596 | FW_VI_CMD_VFN(vf)); | ||
2597 | c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c)); | ||
2598 | c.viid_pkd = htons(FW_VI_CMD_VIID(viid)); | ||
2599 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
2600 | } | ||
2601 | |||
2602 | /** | ||
2603 | * t4_set_rxmode - set Rx properties of a virtual interface | ||
2604 | * @adap: the adapter | ||
2605 | * @mbox: mailbox to use for the FW command | ||
2606 | * @viid: the VI id | ||
2607 | * @mtu: the new MTU or -1 | ||
2608 | * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change | ||
2609 | * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change | ||
2610 | * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change | ||
2611 | * @sleep_ok: if true we may sleep while awaiting command completion | ||
2612 | * | ||
2613 | * Sets Rx properties of a virtual interface. | ||
2614 | */ | ||
2615 | int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
2616 | int mtu, int promisc, int all_multi, int bcast, bool sleep_ok) | ||
2617 | { | ||
2618 | struct fw_vi_rxmode_cmd c; | ||
2619 | |||
2620 | /* convert to FW values */ | ||
2621 | if (mtu < 0) | ||
2622 | mtu = FW_RXMODE_MTU_NO_CHG; | ||
2623 | if (promisc < 0) | ||
2624 | promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; | ||
2625 | if (all_multi < 0) | ||
2626 | all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; | ||
2627 | if (bcast < 0) | ||
2628 | bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; | ||
2629 | |||
2630 | memset(&c, 0, sizeof(c)); | ||
2631 | c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | | ||
2632 | FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); | ||
2633 | c.retval_len16 = htonl(FW_LEN16(c)); | ||
2634 | c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | | ||
2635 | FW_VI_RXMODE_CMD_PROMISCEN(promisc) | | ||
2636 | FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | | ||
2637 | FW_VI_RXMODE_CMD_BROADCASTEN(bcast)); | ||
2638 | return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); | ||
2639 | } | ||
2640 | |||
2641 | /** | ||
2642 | * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses | ||
2643 | * @adap: the adapter | ||
2644 | * @mbox: mailbox to use for the FW command | ||
2645 | * @viid: the VI id | ||
2646 | * @free: if true any existing filters for this VI id are first removed | ||
2647 | * @naddr: the number of MAC addresses to allocate filters for (up to 7) | ||
2648 | * @addr: the MAC address(es) | ||
2649 | * @idx: where to store the index of each allocated filter | ||
2650 | * @hash: pointer to hash address filter bitmap | ||
2651 | * @sleep_ok: call is allowed to sleep | ||
2652 | * | ||
2653 | * Allocates an exact-match filter for each of the supplied addresses and | ||
2654 | * sets it to the corresponding address. If @idx is not %NULL it should | ||
2655 | * have at least @naddr entries, each of which will be set to the index of | ||
2656 | * the filter allocated for the corresponding MAC address. If a filter | ||
2657 | * could not be allocated for an address its index is set to 0xffff. | ||
2658 | * If @hash is not %NULL addresses that fail to allocate an exact filter | ||
2659 | * are hashed and update the hash filter bitmap pointed at by @hash. | ||
2660 | * | ||
2661 | * Returns a negative error number or the number of filters allocated. | ||
2662 | */ | ||
2663 | int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, | ||
2664 | unsigned int viid, bool free, unsigned int naddr, | ||
2665 | const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) | ||
2666 | { | ||
2667 | int i, ret; | ||
2668 | struct fw_vi_mac_cmd c; | ||
2669 | struct fw_vi_mac_exact *p; | ||
2670 | |||
2671 | if (naddr > 7) | ||
2672 | return -EINVAL; | ||
2673 | |||
2674 | memset(&c, 0, sizeof(c)); | ||
2675 | c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | | ||
2676 | FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | | ||
2677 | FW_VI_MAC_CMD_VIID(viid)); | ||
2678 | c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | | ||
2679 | FW_CMD_LEN16((naddr + 2) / 2)); | ||
2680 | |||
2681 | for (i = 0, p = c.u.exact; i < naddr; i++, p++) { | ||
2682 | p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | | ||
2683 | FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); | ||
2684 | memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); | ||
2685 | } | ||
2686 | |||
2687 | ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); | ||
2688 | if (ret) | ||
2689 | return ret; | ||
2690 | |||
2691 | for (i = 0, p = c.u.exact; i < naddr; i++, p++) { | ||
2692 | u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); | ||
2693 | |||
2694 | if (idx) | ||
2695 | idx[i] = index >= NEXACT_MAC ? 0xffff : index; | ||
2696 | if (index < NEXACT_MAC) | ||
2697 | ret++; | ||
2698 | else if (hash) | ||
2699 | *hash |= (1 << hash_mac_addr(addr[i])); | ||
2700 | } | ||
2701 | return ret; | ||
2702 | } | ||
2703 | |||
2704 | /** | ||
2705 | * t4_change_mac - modifies the exact-match filter for a MAC address | ||
2706 | * @adap: the adapter | ||
2707 | * @mbox: mailbox to use for the FW command | ||
2708 | * @viid: the VI id | ||
2709 | * @idx: index of existing filter for old value of MAC address, or -1 | ||
2710 | * @addr: the new MAC address value | ||
2711 | * @persist: whether a new MAC allocation should be persistent | ||
2712 | * @add_smt: if true also add the address to the HW SMT | ||
2713 | * | ||
2714 | * Modifies an exact-match filter and sets it to the new MAC address. | ||
2715 | * Note that in general it is not possible to modify the value of a given | ||
2716 | * filter so the generic way to modify an address filter is to free the one | ||
2717 | * being used by the old address value and allocate a new filter for the | ||
2718 | * new address value. @idx can be -1 if the address is a new addition. | ||
2719 | * | ||
2720 | * Returns a negative error number or the index of the filter with the new | ||
2721 | * MAC value. | ||
2722 | */ | ||
2723 | int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
2724 | int idx, const u8 *addr, bool persist, bool add_smt) | ||
2725 | { | ||
2726 | int ret, mode; | ||
2727 | struct fw_vi_mac_cmd c; | ||
2728 | struct fw_vi_mac_exact *p = c.u.exact; | ||
2729 | |||
2730 | if (idx < 0) /* new allocation */ | ||
2731 | idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; | ||
2732 | mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; | ||
2733 | |||
2734 | memset(&c, 0, sizeof(c)); | ||
2735 | c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | | ||
2736 | FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); | ||
2737 | c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); | ||
2738 | p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | | ||
2739 | FW_VI_MAC_CMD_SMAC_RESULT(mode) | | ||
2740 | FW_VI_MAC_CMD_IDX(idx)); | ||
2741 | memcpy(p->macaddr, addr, sizeof(p->macaddr)); | ||
2742 | |||
2743 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
2744 | if (ret == 0) { | ||
2745 | ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); | ||
2746 | if (ret >= NEXACT_MAC) | ||
2747 | ret = -ENOMEM; | ||
2748 | } | ||
2749 | return ret; | ||
2750 | } | ||
2751 | |||
2752 | /** | ||
2753 | * t4_set_addr_hash - program the MAC inexact-match hash filter | ||
2754 | * @adap: the adapter | ||
2755 | * @mbox: mailbox to use for the FW command | ||
2756 | * @viid: the VI id | ||
2757 | * @ucast: whether the hash filter should also match unicast addresses | ||
2758 | * @vec: the value to be written to the hash filter | ||
2759 | * @sleep_ok: call is allowed to sleep | ||
2760 | * | ||
2761 | * Sets the 64-bit inexact-match hash filter for a virtual interface. | ||
2762 | */ | ||
2763 | int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
2764 | bool ucast, u64 vec, bool sleep_ok) | ||
2765 | { | ||
2766 | struct fw_vi_mac_cmd c; | ||
2767 | |||
2768 | memset(&c, 0, sizeof(c)); | ||
2769 | c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | | ||
2770 | FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); | ||
2771 | c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | | ||
2772 | FW_VI_MAC_CMD_HASHUNIEN(ucast) | | ||
2773 | FW_CMD_LEN16(1)); | ||
2774 | c.u.hash.hashvec = cpu_to_be64(vec); | ||
2775 | return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); | ||
2776 | } | ||
2777 | |||
2778 | /** | ||
2779 | * t4_enable_vi - enable/disable a virtual interface | ||
2780 | * @adap: the adapter | ||
2781 | * @mbox: mailbox to use for the FW command | ||
2782 | * @viid: the VI id | ||
2783 | * @rx_en: 1=enable Rx, 0=disable Rx | ||
2784 | * @tx_en: 1=enable Tx, 0=disable Tx | ||
2785 | * | ||
2786 | * Enables/disables a virtual interface. | ||
2787 | */ | ||
2788 | int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
2789 | bool rx_en, bool tx_en) | ||
2790 | { | ||
2791 | struct fw_vi_enable_cmd c; | ||
2792 | |||
2793 | memset(&c, 0, sizeof(c)); | ||
2794 | c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | | ||
2795 | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); | ||
2796 | c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | | ||
2797 | FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); | ||
2798 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2799 | } | ||
2800 | |||
2801 | /** | ||
2802 | * t4_identify_port - identify a VI's port by blinking its LED | ||
2803 | * @adap: the adapter | ||
2804 | * @mbox: mailbox to use for the FW command | ||
2805 | * @viid: the VI id | ||
2806 | * @nblinks: how many times to blink LED at 2.5 Hz | ||
2807 | * | ||
2808 | * Identifies a VI's port by blinking its LED. | ||
2809 | */ | ||
2810 | int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
2811 | unsigned int nblinks) | ||
2812 | { | ||
2813 | struct fw_vi_enable_cmd c; | ||
2814 | |||
2815 | c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | | ||
2816 | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); | ||
2817 | c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); | ||
2818 | c.blinkdur = htons(nblinks); | ||
2819 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2820 | } | ||
2821 | |||
2822 | /** | ||
2823 | * t4_iq_start_stop - enable/disable an ingress queue and its FLs | ||
2824 | * @adap: the adapter | ||
2825 | * @mbox: mailbox to use for the FW command | ||
2826 | * @start: %true to enable the queues, %false to disable them | ||
2827 | * @pf: the PF owning the queues | ||
2828 | * @vf: the VF owning the queues | ||
2829 | * @iqid: ingress queue id | ||
2830 | * @fl0id: FL0 queue id or 0xffff if no attached FL0 | ||
2831 | * @fl1id: FL1 queue id or 0xffff if no attached FL1 | ||
2832 | * | ||
2833 | * Starts or stops an ingress queue and its associated FLs, if any. | ||
2834 | */ | ||
2835 | int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, | ||
2836 | unsigned int pf, unsigned int vf, unsigned int iqid, | ||
2837 | unsigned int fl0id, unsigned int fl1id) | ||
2838 | { | ||
2839 | struct fw_iq_cmd c; | ||
2840 | |||
2841 | memset(&c, 0, sizeof(c)); | ||
2842 | c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | | ||
2843 | FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | | ||
2844 | FW_IQ_CMD_VFN(vf)); | ||
2845 | c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) | | ||
2846 | FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c)); | ||
2847 | c.iqid = htons(iqid); | ||
2848 | c.fl0id = htons(fl0id); | ||
2849 | c.fl1id = htons(fl1id); | ||
2850 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2851 | } | ||
2852 | |||
2853 | /** | ||
2854 | * t4_iq_free - free an ingress queue and its FLs | ||
2855 | * @adap: the adapter | ||
2856 | * @mbox: mailbox to use for the FW command | ||
2857 | * @pf: the PF owning the queues | ||
2858 | * @vf: the VF owning the queues | ||
2859 | * @iqtype: the ingress queue type | ||
2860 | * @iqid: ingress queue id | ||
2861 | * @fl0id: FL0 queue id or 0xffff if no attached FL0 | ||
2862 | * @fl1id: FL1 queue id or 0xffff if no attached FL1 | ||
2863 | * | ||
2864 | * Frees an ingress queue and its associated FLs, if any. | ||
2865 | */ | ||
2866 | int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
2867 | unsigned int vf, unsigned int iqtype, unsigned int iqid, | ||
2868 | unsigned int fl0id, unsigned int fl1id) | ||
2869 | { | ||
2870 | struct fw_iq_cmd c; | ||
2871 | |||
2872 | memset(&c, 0, sizeof(c)); | ||
2873 | c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | | ||
2874 | FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | | ||
2875 | FW_IQ_CMD_VFN(vf)); | ||
2876 | c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); | ||
2877 | c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); | ||
2878 | c.iqid = htons(iqid); | ||
2879 | c.fl0id = htons(fl0id); | ||
2880 | c.fl1id = htons(fl1id); | ||
2881 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2882 | } | ||
2883 | |||
2884 | /** | ||
2885 | * t4_eth_eq_free - free an Ethernet egress queue | ||
2886 | * @adap: the adapter | ||
2887 | * @mbox: mailbox to use for the FW command | ||
2888 | * @pf: the PF owning the queue | ||
2889 | * @vf: the VF owning the queue | ||
2890 | * @eqid: egress queue id | ||
2891 | * | ||
2892 | * Frees an Ethernet egress queue. | ||
2893 | */ | ||
2894 | int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
2895 | unsigned int vf, unsigned int eqid) | ||
2896 | { | ||
2897 | struct fw_eq_eth_cmd c; | ||
2898 | |||
2899 | memset(&c, 0, sizeof(c)); | ||
2900 | c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | | ||
2901 | FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | | ||
2902 | FW_EQ_ETH_CMD_VFN(vf)); | ||
2903 | c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); | ||
2904 | c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); | ||
2905 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2906 | } | ||
2907 | |||
2908 | /** | ||
2909 | * t4_ctrl_eq_free - free a control egress queue | ||
2910 | * @adap: the adapter | ||
2911 | * @mbox: mailbox to use for the FW command | ||
2912 | * @pf: the PF owning the queue | ||
2913 | * @vf: the VF owning the queue | ||
2914 | * @eqid: egress queue id | ||
2915 | * | ||
2916 | * Frees a control egress queue. | ||
2917 | */ | ||
2918 | int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
2919 | unsigned int vf, unsigned int eqid) | ||
2920 | { | ||
2921 | struct fw_eq_ctrl_cmd c; | ||
2922 | |||
2923 | memset(&c, 0, sizeof(c)); | ||
2924 | c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | | ||
2925 | FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | | ||
2926 | FW_EQ_CTRL_CMD_VFN(vf)); | ||
2927 | c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); | ||
2928 | c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); | ||
2929 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2930 | } | ||
2931 | |||
2932 | /** | ||
2933 | * t4_ofld_eq_free - free an offload egress queue | ||
2934 | * @adap: the adapter | ||
2935 | * @mbox: mailbox to use for the FW command | ||
2936 | * @pf: the PF owning the queue | ||
2937 | * @vf: the VF owning the queue | ||
2938 | * @eqid: egress queue id | ||
2939 | * | ||
2940 | * Frees a control egress queue. | ||
2941 | */ | ||
2942 | int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
2943 | unsigned int vf, unsigned int eqid) | ||
2944 | { | ||
2945 | struct fw_eq_ofld_cmd c; | ||
2946 | |||
2947 | memset(&c, 0, sizeof(c)); | ||
2948 | c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | | ||
2949 | FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | | ||
2950 | FW_EQ_OFLD_CMD_VFN(vf)); | ||
2951 | c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); | ||
2952 | c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); | ||
2953 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2954 | } | ||
2955 | |||
2956 | /** | ||
2957 | * t4_handle_fw_rpl - process a FW reply message | ||
2958 | * @adap: the adapter | ||
2959 | * @rpl: start of the FW message | ||
2960 | * | ||
2961 | * Processes a FW message, such as link state change messages. | ||
2962 | */ | ||
2963 | int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) | ||
2964 | { | ||
2965 | u8 opcode = *(const u8 *)rpl; | ||
2966 | |||
2967 | if (opcode == FW_PORT_CMD) { /* link/module state change message */ | ||
2968 | int speed = 0, fc = 0; | ||
2969 | const struct fw_port_cmd *p = (void *)rpl; | ||
2970 | int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); | ||
2971 | int port = adap->chan_map[chan]; | ||
2972 | struct port_info *pi = adap2pinfo(adap, port); | ||
2973 | struct link_config *lc = &pi->link_cfg; | ||
2974 | u32 stat = ntohl(p->u.info.lstatus_to_modtype); | ||
2975 | int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; | ||
2976 | u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); | ||
2977 | |||
2978 | if (stat & FW_PORT_CMD_RXPAUSE) | ||
2979 | fc |= PAUSE_RX; | ||
2980 | if (stat & FW_PORT_CMD_TXPAUSE) | ||
2981 | fc |= PAUSE_TX; | ||
2982 | if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) | ||
2983 | speed = SPEED_100; | ||
2984 | else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) | ||
2985 | speed = SPEED_1000; | ||
2986 | else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) | ||
2987 | speed = SPEED_10000; | ||
2988 | |||
2989 | if (link_ok != lc->link_ok || speed != lc->speed || | ||
2990 | fc != lc->fc) { /* something changed */ | ||
2991 | lc->link_ok = link_ok; | ||
2992 | lc->speed = speed; | ||
2993 | lc->fc = fc; | ||
2994 | t4_os_link_changed(adap, port, link_ok); | ||
2995 | } | ||
2996 | if (mod != pi->mod_type) { | ||
2997 | pi->mod_type = mod; | ||
2998 | t4_os_portmod_changed(adap, port); | ||
2999 | } | ||
3000 | } | ||
3001 | return 0; | ||
3002 | } | ||
3003 | |||
3004 | static void __devinit get_pci_mode(struct adapter *adapter, | ||
3005 | struct pci_params *p) | ||
3006 | { | ||
3007 | u16 val; | ||
3008 | u32 pcie_cap = pci_pcie_cap(adapter->pdev); | ||
3009 | |||
3010 | if (pcie_cap) { | ||
3011 | pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, | ||
3012 | &val); | ||
3013 | p->speed = val & PCI_EXP_LNKSTA_CLS; | ||
3014 | p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; | ||
3015 | } | ||
3016 | } | ||
3017 | |||
3018 | /** | ||
3019 | * init_link_config - initialize a link's SW state | ||
3020 | * @lc: structure holding the link state | ||
3021 | * @caps: link capabilities | ||
3022 | * | ||
3023 | * Initializes the SW state maintained for each link, including the link's | ||
3024 | * capabilities and default speed/flow-control/autonegotiation settings. | ||
3025 | */ | ||
3026 | static void __devinit init_link_config(struct link_config *lc, | ||
3027 | unsigned int caps) | ||
3028 | { | ||
3029 | lc->supported = caps; | ||
3030 | lc->requested_speed = 0; | ||
3031 | lc->speed = 0; | ||
3032 | lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; | ||
3033 | if (lc->supported & FW_PORT_CAP_ANEG) { | ||
3034 | lc->advertising = lc->supported & ADVERT_MASK; | ||
3035 | lc->autoneg = AUTONEG_ENABLE; | ||
3036 | lc->requested_fc |= PAUSE_AUTONEG; | ||
3037 | } else { | ||
3038 | lc->advertising = 0; | ||
3039 | lc->autoneg = AUTONEG_DISABLE; | ||
3040 | } | ||
3041 | } | ||
3042 | |||
3043 | static int __devinit wait_dev_ready(struct adapter *adap) | ||
3044 | { | ||
3045 | if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) | ||
3046 | return 0; | ||
3047 | msleep(500); | ||
3048 | return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; | ||
3049 | } | ||
3050 | |||
3051 | /** | ||
3052 | * t4_prep_adapter - prepare SW and HW for operation | ||
3053 | * @adapter: the adapter | ||
3054 | * @reset: if true perform a HW reset | ||
3055 | * | ||
3056 | * Initialize adapter SW state for the various HW modules, set initial | ||
3057 | * values for some adapter tunables, take PHYs out of reset, and | ||
3058 | * initialize the MDIO interface. | ||
3059 | */ | ||
3060 | int __devinit t4_prep_adapter(struct adapter *adapter) | ||
3061 | { | ||
3062 | int ret; | ||
3063 | |||
3064 | ret = wait_dev_ready(adapter); | ||
3065 | if (ret < 0) | ||
3066 | return ret; | ||
3067 | |||
3068 | get_pci_mode(adapter, &adapter->params.pci); | ||
3069 | adapter->params.rev = t4_read_reg(adapter, PL_REV); | ||
3070 | |||
3071 | ret = get_vpd_params(adapter, &adapter->params.vpd); | ||
3072 | if (ret < 0) | ||
3073 | return ret; | ||
3074 | |||
3075 | init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); | ||
3076 | |||
3077 | /* | ||
3078 | * Default port for debugging in case we can't reach FW. | ||
3079 | */ | ||
3080 | adapter->params.nports = 1; | ||
3081 | adapter->params.portvec = 1; | ||
3082 | return 0; | ||
3083 | } | ||
3084 | |||
3085 | int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf) | ||
3086 | { | ||
3087 | u8 addr[6]; | ||
3088 | int ret, i, j = 0; | ||
3089 | struct fw_port_cmd c; | ||
3090 | |||
3091 | memset(&c, 0, sizeof(c)); | ||
3092 | |||
3093 | for_each_port(adap, i) { | ||
3094 | unsigned int rss_size; | ||
3095 | struct port_info *p = adap2pinfo(adap, i); | ||
3096 | |||
3097 | while ((adap->params.portvec & (1 << j)) == 0) | ||
3098 | j++; | ||
3099 | |||
3100 | c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | | ||
3101 | FW_CMD_REQUEST | FW_CMD_READ | | ||
3102 | FW_PORT_CMD_PORTID(j)); | ||
3103 | c.action_to_len16 = htonl( | ||
3104 | FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | | ||
3105 | FW_LEN16(c)); | ||
3106 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
3107 | if (ret) | ||
3108 | return ret; | ||
3109 | |||
3110 | ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); | ||
3111 | if (ret < 0) | ||
3112 | return ret; | ||
3113 | |||
3114 | p->viid = ret; | ||
3115 | p->tx_chan = j; | ||
3116 | p->lport = j; | ||
3117 | p->rss_size = rss_size; | ||
3118 | memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); | ||
3119 | memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); | ||
3120 | |||
3121 | ret = ntohl(c.u.info.lstatus_to_modtype); | ||
3122 | p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? | ||
3123 | FW_PORT_CMD_MDIOADDR_GET(ret) : -1; | ||
3124 | p->port_type = FW_PORT_CMD_PTYPE_GET(ret); | ||
3125 | p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret); | ||
3126 | |||
3127 | init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); | ||
3128 | j++; | ||
3129 | } | ||
3130 | return 0; | ||
3131 | } | ||
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h new file mode 100644 index 000000000000..025623285c93 --- /dev/null +++ b/drivers/net/cxgb4/t4_hw.h | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __T4_HW_H | ||
36 | #define __T4_HW_H | ||
37 | |||
38 | #include <linux/types.h> | ||
39 | |||
40 | enum { | ||
41 | NCHAN = 4, /* # of HW channels */ | ||
42 | MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ | ||
43 | EEPROMSIZE = 17408, /* Serial EEPROM physical size */ | ||
44 | EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ | ||
45 | RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ | ||
46 | TCB_SIZE = 128, /* TCB size */ | ||
47 | NMTUS = 16, /* size of MTU table */ | ||
48 | NCCTRL_WIN = 32, /* # of congestion control windows */ | ||
49 | NEXACT_MAC = 336, /* # of exact MAC address filters */ | ||
50 | L2T_SIZE = 4096, /* # of L2T entries */ | ||
51 | MBOX_LEN = 64, /* mailbox size in bytes */ | ||
52 | TRACE_LEN = 112, /* length of trace data and mask */ | ||
53 | FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ | ||
54 | NWOL_PAT = 8, /* # of WoL patterns */ | ||
55 | WOL_PAT_LEN = 128, /* length of WoL patterns */ | ||
56 | }; | ||
57 | |||
58 | enum { | ||
59 | SF_PAGE_SIZE = 256, /* serial flash page size */ | ||
60 | SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ | ||
61 | SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ | ||
62 | }; | ||
63 | |||
64 | enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ | ||
65 | |||
66 | enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */ | ||
67 | |||
68 | enum { | ||
69 | SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ | ||
70 | SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ | ||
71 | SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ | ||
72 | }; | ||
73 | |||
74 | struct sge_qstat { /* data written to SGE queue status entries */ | ||
75 | __be32 qid; | ||
76 | __be16 cidx; | ||
77 | __be16 pidx; | ||
78 | }; | ||
79 | |||
80 | /* | ||
81 | * Structure for last 128 bits of response descriptors | ||
82 | */ | ||
83 | struct rsp_ctrl { | ||
84 | __be32 hdrbuflen_pidx; | ||
85 | __be32 pldbuflen_qid; | ||
86 | union { | ||
87 | u8 type_gen; | ||
88 | __be64 last_flit; | ||
89 | }; | ||
90 | }; | ||
91 | |||
92 | #define RSPD_NEWBUF 0x80000000U | ||
93 | #define RSPD_LEN 0x7fffffffU | ||
94 | |||
95 | #define RSPD_GEN(x) ((x) >> 7) | ||
96 | #define RSPD_TYPE(x) (((x) >> 4) & 3) | ||
97 | |||
98 | #define QINTR_CNT_EN 0x1 | ||
99 | #define QINTR_TIMER_IDX(x) ((x) << 1) | ||
100 | #endif /* __T4_HW_H */ | ||
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h new file mode 100644 index 000000000000..fdb117443144 --- /dev/null +++ b/drivers/net/cxgb4/t4_msg.h | |||
@@ -0,0 +1,664 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __T4_MSG_H | ||
36 | #define __T4_MSG_H | ||
37 | |||
38 | #include <linux/types.h> | ||
39 | |||
40 | enum { | ||
41 | CPL_PASS_OPEN_REQ = 0x1, | ||
42 | CPL_PASS_ACCEPT_RPL = 0x2, | ||
43 | CPL_ACT_OPEN_REQ = 0x3, | ||
44 | CPL_SET_TCB_FIELD = 0x5, | ||
45 | CPL_GET_TCB = 0x6, | ||
46 | CPL_CLOSE_CON_REQ = 0x8, | ||
47 | CPL_CLOSE_LISTSRV_REQ = 0x9, | ||
48 | CPL_ABORT_REQ = 0xA, | ||
49 | CPL_ABORT_RPL = 0xB, | ||
50 | CPL_RX_DATA_ACK = 0xD, | ||
51 | CPL_TX_PKT = 0xE, | ||
52 | CPL_L2T_WRITE_REQ = 0x12, | ||
53 | CPL_TID_RELEASE = 0x1A, | ||
54 | |||
55 | CPL_CLOSE_LISTSRV_RPL = 0x20, | ||
56 | CPL_L2T_WRITE_RPL = 0x23, | ||
57 | CPL_PASS_OPEN_RPL = 0x24, | ||
58 | CPL_ACT_OPEN_RPL = 0x25, | ||
59 | CPL_PEER_CLOSE = 0x26, | ||
60 | CPL_ABORT_REQ_RSS = 0x2B, | ||
61 | CPL_ABORT_RPL_RSS = 0x2D, | ||
62 | |||
63 | CPL_CLOSE_CON_RPL = 0x32, | ||
64 | CPL_ISCSI_HDR = 0x33, | ||
65 | CPL_RDMA_CQE = 0x35, | ||
66 | CPL_RDMA_CQE_READ_RSP = 0x36, | ||
67 | CPL_RDMA_CQE_ERR = 0x37, | ||
68 | CPL_RX_DATA = 0x39, | ||
69 | CPL_SET_TCB_RPL = 0x3A, | ||
70 | CPL_RX_PKT = 0x3B, | ||
71 | CPL_RX_DDP_COMPLETE = 0x3F, | ||
72 | |||
73 | CPL_ACT_ESTABLISH = 0x40, | ||
74 | CPL_PASS_ESTABLISH = 0x41, | ||
75 | CPL_RX_DATA_DDP = 0x42, | ||
76 | CPL_PASS_ACCEPT_REQ = 0x44, | ||
77 | |||
78 | CPL_RDMA_READ_REQ = 0x60, | ||
79 | |||
80 | CPL_PASS_OPEN_REQ6 = 0x81, | ||
81 | CPL_ACT_OPEN_REQ6 = 0x83, | ||
82 | |||
83 | CPL_RDMA_TERMINATE = 0xA2, | ||
84 | CPL_RDMA_WRITE = 0xA4, | ||
85 | CPL_SGE_EGR_UPDATE = 0xA5, | ||
86 | |||
87 | CPL_TRACE_PKT = 0xB0, | ||
88 | |||
89 | CPL_FW4_MSG = 0xC0, | ||
90 | CPL_FW4_PLD = 0xC1, | ||
91 | CPL_FW4_ACK = 0xC3, | ||
92 | |||
93 | CPL_FW6_MSG = 0xE0, | ||
94 | CPL_FW6_PLD = 0xE1, | ||
95 | CPL_TX_PKT_LSO = 0xED, | ||
96 | CPL_TX_PKT_XT = 0xEE, | ||
97 | |||
98 | NUM_CPL_CMDS | ||
99 | }; | ||
100 | |||
101 | enum CPL_error { | ||
102 | CPL_ERR_NONE = 0, | ||
103 | CPL_ERR_TCAM_FULL = 3, | ||
104 | CPL_ERR_BAD_LENGTH = 15, | ||
105 | CPL_ERR_BAD_ROUTE = 18, | ||
106 | CPL_ERR_CONN_RESET = 20, | ||
107 | CPL_ERR_CONN_EXIST_SYNRECV = 21, | ||
108 | CPL_ERR_CONN_EXIST = 22, | ||
109 | CPL_ERR_ARP_MISS = 23, | ||
110 | CPL_ERR_BAD_SYN = 24, | ||
111 | CPL_ERR_CONN_TIMEDOUT = 30, | ||
112 | CPL_ERR_XMIT_TIMEDOUT = 31, | ||
113 | CPL_ERR_PERSIST_TIMEDOUT = 32, | ||
114 | CPL_ERR_FINWAIT2_TIMEDOUT = 33, | ||
115 | CPL_ERR_KEEPALIVE_TIMEDOUT = 34, | ||
116 | CPL_ERR_RTX_NEG_ADVICE = 35, | ||
117 | CPL_ERR_PERSIST_NEG_ADVICE = 36, | ||
118 | CPL_ERR_ABORT_FAILED = 42, | ||
119 | CPL_ERR_IWARP_FLM = 50, | ||
120 | }; | ||
121 | |||
122 | enum { | ||
123 | ULP_MODE_NONE = 0, | ||
124 | ULP_MODE_ISCSI = 2, | ||
125 | ULP_MODE_RDMA = 4, | ||
126 | ULP_MODE_FCOE = 6, | ||
127 | }; | ||
128 | |||
129 | enum { | ||
130 | ULP_CRC_HEADER = 1 << 0, | ||
131 | ULP_CRC_DATA = 1 << 1 | ||
132 | }; | ||
133 | |||
134 | enum { | ||
135 | CPL_ABORT_SEND_RST = 0, | ||
136 | CPL_ABORT_NO_RST, | ||
137 | }; | ||
138 | |||
139 | enum { /* TX_PKT_XT checksum types */ | ||
140 | TX_CSUM_TCP = 0, | ||
141 | TX_CSUM_UDP = 1, | ||
142 | TX_CSUM_CRC16 = 4, | ||
143 | TX_CSUM_CRC32 = 5, | ||
144 | TX_CSUM_CRC32C = 6, | ||
145 | TX_CSUM_FCOE = 7, | ||
146 | TX_CSUM_TCPIP = 8, | ||
147 | TX_CSUM_UDPIP = 9, | ||
148 | TX_CSUM_TCPIP6 = 10, | ||
149 | TX_CSUM_UDPIP6 = 11, | ||
150 | TX_CSUM_IP = 12, | ||
151 | }; | ||
152 | |||
153 | union opcode_tid { | ||
154 | __be32 opcode_tid; | ||
155 | u8 opcode; | ||
156 | }; | ||
157 | |||
158 | #define CPL_OPCODE(x) ((x) << 24) | ||
159 | #define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid)) | ||
160 | #define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) | ||
161 | #define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF) | ||
162 | |||
163 | /* partitioning of TID fields that also carry a queue id */ | ||
164 | #define GET_TID_TID(x) ((x) & 0x3fff) | ||
165 | #define GET_TID_QID(x) (((x) >> 14) & 0x3ff) | ||
166 | #define TID_QID(x) ((x) << 14) | ||
167 | |||
168 | struct rss_header { | ||
169 | u8 opcode; | ||
170 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
171 | u8 channel:2; | ||
172 | u8 filter_hit:1; | ||
173 | u8 filter_tid:1; | ||
174 | u8 hash_type:2; | ||
175 | u8 ipv6:1; | ||
176 | u8 send2fw:1; | ||
177 | #else | ||
178 | u8 send2fw:1; | ||
179 | u8 ipv6:1; | ||
180 | u8 hash_type:2; | ||
181 | u8 filter_tid:1; | ||
182 | u8 filter_hit:1; | ||
183 | u8 channel:2; | ||
184 | #endif | ||
185 | __be16 qid; | ||
186 | __be32 hash_val; | ||
187 | }; | ||
188 | |||
189 | struct work_request_hdr { | ||
190 | __be32 wr_hi; | ||
191 | __be32 wr_mid; | ||
192 | __be64 wr_lo; | ||
193 | }; | ||
194 | |||
195 | #define WR_HDR struct work_request_hdr wr | ||
196 | |||
197 | struct cpl_pass_open_req { | ||
198 | WR_HDR; | ||
199 | union opcode_tid ot; | ||
200 | __be16 local_port; | ||
201 | __be16 peer_port; | ||
202 | __be32 local_ip; | ||
203 | __be32 peer_ip; | ||
204 | __be64 opt0; | ||
205 | #define TX_CHAN(x) ((x) << 2) | ||
206 | #define DELACK(x) ((x) << 5) | ||
207 | #define ULP_MODE(x) ((x) << 8) | ||
208 | #define RCV_BUFSIZ(x) ((x) << 12) | ||
209 | #define DSCP(x) ((x) << 22) | ||
210 | #define SMAC_SEL(x) ((u64)(x) << 28) | ||
211 | #define L2T_IDX(x) ((u64)(x) << 36) | ||
212 | #define NAGLE(x) ((u64)(x) << 49) | ||
213 | #define WND_SCALE(x) ((u64)(x) << 50) | ||
214 | #define KEEP_ALIVE(x) ((u64)(x) << 54) | ||
215 | #define MSS_IDX(x) ((u64)(x) << 60) | ||
216 | __be64 opt1; | ||
217 | #define SYN_RSS_ENABLE (1 << 0) | ||
218 | #define SYN_RSS_QUEUE(x) ((x) << 2) | ||
219 | #define CONN_POLICY_ASK (1 << 22) | ||
220 | }; | ||
221 | |||
222 | struct cpl_pass_open_req6 { | ||
223 | WR_HDR; | ||
224 | union opcode_tid ot; | ||
225 | __be16 local_port; | ||
226 | __be16 peer_port; | ||
227 | __be64 local_ip_hi; | ||
228 | __be64 local_ip_lo; | ||
229 | __be64 peer_ip_hi; | ||
230 | __be64 peer_ip_lo; | ||
231 | __be64 opt0; | ||
232 | __be64 opt1; | ||
233 | }; | ||
234 | |||
235 | struct cpl_pass_open_rpl { | ||
236 | union opcode_tid ot; | ||
237 | u8 rsvd[3]; | ||
238 | u8 status; | ||
239 | }; | ||
240 | |||
241 | struct cpl_pass_accept_rpl { | ||
242 | WR_HDR; | ||
243 | union opcode_tid ot; | ||
244 | __be32 opt2; | ||
245 | #define RSS_QUEUE(x) ((x) << 0) | ||
246 | #define RSS_QUEUE_VALID (1 << 10) | ||
247 | #define RX_COALESCE_VALID(x) ((x) << 11) | ||
248 | #define RX_COALESCE(x) ((x) << 12) | ||
249 | #define TX_QUEUE(x) ((x) << 23) | ||
250 | #define RX_CHANNEL(x) ((x) << 26) | ||
251 | #define WND_SCALE_EN(x) ((x) << 28) | ||
252 | #define TSTAMPS_EN(x) ((x) << 29) | ||
253 | #define SACK_EN(x) ((x) << 30) | ||
254 | __be64 opt0; | ||
255 | }; | ||
256 | |||
257 | struct cpl_act_open_req { | ||
258 | WR_HDR; | ||
259 | union opcode_tid ot; | ||
260 | __be16 local_port; | ||
261 | __be16 peer_port; | ||
262 | __be32 local_ip; | ||
263 | __be32 peer_ip; | ||
264 | __be64 opt0; | ||
265 | __be32 params; | ||
266 | __be32 opt2; | ||
267 | }; | ||
268 | |||
269 | struct cpl_act_open_req6 { | ||
270 | WR_HDR; | ||
271 | union opcode_tid ot; | ||
272 | __be16 local_port; | ||
273 | __be16 peer_port; | ||
274 | __be64 local_ip_hi; | ||
275 | __be64 local_ip_lo; | ||
276 | __be64 peer_ip_hi; | ||
277 | __be64 peer_ip_lo; | ||
278 | __be64 opt0; | ||
279 | __be32 params; | ||
280 | __be32 opt2; | ||
281 | }; | ||
282 | |||
283 | struct cpl_act_open_rpl { | ||
284 | union opcode_tid ot; | ||
285 | __be32 atid_status; | ||
286 | #define GET_AOPEN_STATUS(x) ((x) & 0xff) | ||
287 | #define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff) | ||
288 | }; | ||
289 | |||
290 | struct cpl_pass_establish { | ||
291 | union opcode_tid ot; | ||
292 | __be32 rsvd; | ||
293 | __be32 tos_stid; | ||
294 | #define GET_POPEN_TID(x) ((x) & 0xffffff) | ||
295 | #define GET_POPEN_TOS(x) (((x) >> 24) & 0xff) | ||
296 | __be16 mac_idx; | ||
297 | __be16 tcp_opt; | ||
298 | #define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1) | ||
299 | #define GET_TCPOPT_SACK(x) (((x) >> 6) & 1) | ||
300 | #define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1) | ||
301 | #define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf) | ||
302 | #define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf) | ||
303 | __be32 snd_isn; | ||
304 | __be32 rcv_isn; | ||
305 | }; | ||
306 | |||
307 | struct cpl_act_establish { | ||
308 | union opcode_tid ot; | ||
309 | __be32 rsvd; | ||
310 | __be32 tos_atid; | ||
311 | __be16 mac_idx; | ||
312 | __be16 tcp_opt; | ||
313 | __be32 snd_isn; | ||
314 | __be32 rcv_isn; | ||
315 | }; | ||
316 | |||
317 | struct cpl_get_tcb { | ||
318 | WR_HDR; | ||
319 | union opcode_tid ot; | ||
320 | __be16 reply_ctrl; | ||
321 | #define QUEUENO(x) ((x) << 0) | ||
322 | #define REPLY_CHAN(x) ((x) << 14) | ||
323 | #define NO_REPLY(x) ((x) << 15) | ||
324 | __be16 cookie; | ||
325 | }; | ||
326 | |||
327 | struct cpl_set_tcb_field { | ||
328 | WR_HDR; | ||
329 | union opcode_tid ot; | ||
330 | __be16 reply_ctrl; | ||
331 | __be16 word_cookie; | ||
332 | #define TCB_WORD(x) ((x) << 0) | ||
333 | #define TCB_COOKIE(x) ((x) << 5) | ||
334 | __be64 mask; | ||
335 | __be64 val; | ||
336 | }; | ||
337 | |||
338 | struct cpl_set_tcb_rpl { | ||
339 | union opcode_tid ot; | ||
340 | __be16 rsvd; | ||
341 | u8 cookie; | ||
342 | u8 status; | ||
343 | __be64 oldval; | ||
344 | }; | ||
345 | |||
346 | struct cpl_close_con_req { | ||
347 | WR_HDR; | ||
348 | union opcode_tid ot; | ||
349 | __be32 rsvd; | ||
350 | }; | ||
351 | |||
352 | struct cpl_close_con_rpl { | ||
353 | union opcode_tid ot; | ||
354 | u8 rsvd[3]; | ||
355 | u8 status; | ||
356 | __be32 snd_nxt; | ||
357 | __be32 rcv_nxt; | ||
358 | }; | ||
359 | |||
360 | struct cpl_close_listsvr_req { | ||
361 | WR_HDR; | ||
362 | union opcode_tid ot; | ||
363 | __be16 reply_ctrl; | ||
364 | #define LISTSVR_IPV6 (1 << 14) | ||
365 | __be16 rsvd; | ||
366 | }; | ||
367 | |||
368 | struct cpl_close_listsvr_rpl { | ||
369 | union opcode_tid ot; | ||
370 | u8 rsvd[3]; | ||
371 | u8 status; | ||
372 | }; | ||
373 | |||
374 | struct cpl_abort_req_rss { | ||
375 | union opcode_tid ot; | ||
376 | u8 rsvd[3]; | ||
377 | u8 status; | ||
378 | }; | ||
379 | |||
380 | struct cpl_abort_req { | ||
381 | WR_HDR; | ||
382 | union opcode_tid ot; | ||
383 | __be32 rsvd0; | ||
384 | u8 rsvd1; | ||
385 | u8 cmd; | ||
386 | u8 rsvd2[6]; | ||
387 | }; | ||
388 | |||
389 | struct cpl_abort_rpl_rss { | ||
390 | union opcode_tid ot; | ||
391 | u8 rsvd[3]; | ||
392 | u8 status; | ||
393 | }; | ||
394 | |||
395 | struct cpl_abort_rpl { | ||
396 | WR_HDR; | ||
397 | union opcode_tid ot; | ||
398 | __be32 rsvd0; | ||
399 | u8 rsvd1; | ||
400 | u8 cmd; | ||
401 | u8 rsvd2[6]; | ||
402 | }; | ||
403 | |||
404 | struct cpl_peer_close { | ||
405 | union opcode_tid ot; | ||
406 | __be32 rcv_nxt; | ||
407 | }; | ||
408 | |||
409 | struct cpl_tid_release { | ||
410 | WR_HDR; | ||
411 | union opcode_tid ot; | ||
412 | __be32 rsvd; | ||
413 | }; | ||
414 | |||
415 | struct cpl_tx_pkt_core { | ||
416 | __be32 ctrl0; | ||
417 | #define TXPKT_VF(x) ((x) << 0) | ||
418 | #define TXPKT_PF(x) ((x) << 8) | ||
419 | #define TXPKT_VF_VLD (1 << 11) | ||
420 | #define TXPKT_OVLAN_IDX(x) ((x) << 12) | ||
421 | #define TXPKT_INTF(x) ((x) << 16) | ||
422 | #define TXPKT_INS_OVLAN (1 << 21) | ||
423 | #define TXPKT_OPCODE(x) ((x) << 24) | ||
424 | __be16 pack; | ||
425 | __be16 len; | ||
426 | __be64 ctrl1; | ||
427 | #define TXPKT_CSUM_END(x) ((x) << 12) | ||
428 | #define TXPKT_CSUM_START(x) ((x) << 20) | ||
429 | #define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20) | ||
430 | #define TXPKT_CSUM_LOC(x) ((u64)(x) << 30) | ||
431 | #define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34) | ||
432 | #define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40) | ||
433 | #define TXPKT_VLAN(x) ((u64)(x) << 44) | ||
434 | #define TXPKT_VLAN_VLD (1ULL << 60) | ||
435 | #define TXPKT_IPCSUM_DIS (1ULL << 62) | ||
436 | #define TXPKT_L4CSUM_DIS (1ULL << 63) | ||
437 | }; | ||
438 | |||
439 | struct cpl_tx_pkt { | ||
440 | WR_HDR; | ||
441 | struct cpl_tx_pkt_core c; | ||
442 | }; | ||
443 | |||
444 | #define cpl_tx_pkt_xt cpl_tx_pkt | ||
445 | |||
446 | struct cpl_tx_pkt_lso { | ||
447 | WR_HDR; | ||
448 | __be32 lso_ctrl; | ||
449 | #define LSO_TCPHDR_LEN(x) ((x) << 0) | ||
450 | #define LSO_IPHDR_LEN(x) ((x) << 4) | ||
451 | #define LSO_ETHHDR_LEN(x) ((x) << 16) | ||
452 | #define LSO_IPV6(x) ((x) << 20) | ||
453 | #define LSO_LAST_SLICE (1 << 22) | ||
454 | #define LSO_FIRST_SLICE (1 << 23) | ||
455 | #define LSO_OPCODE(x) ((x) << 24) | ||
456 | __be16 ipid_ofst; | ||
457 | __be16 mss; | ||
458 | __be32 seqno_offset; | ||
459 | __be32 len; | ||
460 | /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ | ||
461 | }; | ||
462 | |||
463 | struct cpl_iscsi_hdr { | ||
464 | union opcode_tid ot; | ||
465 | __be16 pdu_len_ddp; | ||
466 | #define ISCSI_PDU_LEN(x) ((x) & 0x7FFF) | ||
467 | #define ISCSI_DDP (1 << 15) | ||
468 | __be16 len; | ||
469 | __be32 seq; | ||
470 | __be16 urg; | ||
471 | u8 rsvd; | ||
472 | u8 status; | ||
473 | }; | ||
474 | |||
475 | struct cpl_rx_data { | ||
476 | union opcode_tid ot; | ||
477 | __be16 rsvd; | ||
478 | __be16 len; | ||
479 | __be32 seq; | ||
480 | __be16 urg; | ||
481 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
482 | u8 dack_mode:2; | ||
483 | u8 psh:1; | ||
484 | u8 heartbeat:1; | ||
485 | u8 ddp_off:1; | ||
486 | u8 :3; | ||
487 | #else | ||
488 | u8 :3; | ||
489 | u8 ddp_off:1; | ||
490 | u8 heartbeat:1; | ||
491 | u8 psh:1; | ||
492 | u8 dack_mode:2; | ||
493 | #endif | ||
494 | u8 status; | ||
495 | }; | ||
496 | |||
497 | struct cpl_rx_data_ack { | ||
498 | WR_HDR; | ||
499 | union opcode_tid ot; | ||
500 | __be32 credit_dack; | ||
501 | #define RX_CREDITS(x) ((x) << 0) | ||
502 | #define RX_FORCE_ACK(x) ((x) << 28) | ||
503 | }; | ||
504 | |||
505 | struct cpl_rx_pkt { | ||
506 | u8 opcode; | ||
507 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
508 | u8 iff:4; | ||
509 | u8 csum_calc:1; | ||
510 | u8 ipmi_pkt:1; | ||
511 | u8 vlan_ex:1; | ||
512 | u8 ip_frag:1; | ||
513 | #else | ||
514 | u8 ip_frag:1; | ||
515 | u8 vlan_ex:1; | ||
516 | u8 ipmi_pkt:1; | ||
517 | u8 csum_calc:1; | ||
518 | u8 iff:4; | ||
519 | #endif | ||
520 | __be16 csum; | ||
521 | __be16 vlan; | ||
522 | __be16 len; | ||
523 | __be32 l2info; | ||
524 | #define RXF_UDP (1 << 22) | ||
525 | #define RXF_TCP (1 << 23) | ||
526 | __be16 hdr_len; | ||
527 | __be16 err_vec; | ||
528 | }; | ||
529 | |||
530 | struct cpl_trace_pkt { | ||
531 | u8 opcode; | ||
532 | u8 intf; | ||
533 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
534 | u8 runt:4; | ||
535 | u8 filter_hit:4; | ||
536 | u8 :6; | ||
537 | u8 err:1; | ||
538 | u8 trunc:1; | ||
539 | #else | ||
540 | u8 filter_hit:4; | ||
541 | u8 runt:4; | ||
542 | u8 trunc:1; | ||
543 | u8 err:1; | ||
544 | u8 :6; | ||
545 | #endif | ||
546 | __be16 rsvd; | ||
547 | __be16 len; | ||
548 | __be64 tstamp; | ||
549 | }; | ||
550 | |||
551 | struct cpl_l2t_write_req { | ||
552 | WR_HDR; | ||
553 | union opcode_tid ot; | ||
554 | __be16 params; | ||
555 | #define L2T_W_INFO(x) ((x) << 2) | ||
556 | #define L2T_W_PORT(x) ((x) << 8) | ||
557 | #define L2T_W_NOREPLY(x) ((x) << 15) | ||
558 | __be16 l2t_idx; | ||
559 | __be16 vlan; | ||
560 | u8 dst_mac[6]; | ||
561 | }; | ||
562 | |||
563 | struct cpl_l2t_write_rpl { | ||
564 | union opcode_tid ot; | ||
565 | u8 status; | ||
566 | u8 rsvd[3]; | ||
567 | }; | ||
568 | |||
569 | struct cpl_rdma_terminate { | ||
570 | union opcode_tid ot; | ||
571 | __be16 rsvd; | ||
572 | __be16 len; | ||
573 | }; | ||
574 | |||
575 | struct cpl_sge_egr_update { | ||
576 | __be32 opcode_qid; | ||
577 | #define EGR_QID(x) ((x) & 0x1FFFF) | ||
578 | __be16 cidx; | ||
579 | __be16 pidx; | ||
580 | }; | ||
581 | |||
582 | struct cpl_fw4_pld { | ||
583 | u8 opcode; | ||
584 | u8 rsvd0[3]; | ||
585 | u8 type; | ||
586 | u8 rsvd1; | ||
587 | __be16 len; | ||
588 | __be64 data; | ||
589 | __be64 rsvd2; | ||
590 | }; | ||
591 | |||
592 | struct cpl_fw6_pld { | ||
593 | u8 opcode; | ||
594 | u8 rsvd[5]; | ||
595 | __be16 len; | ||
596 | __be64 data[4]; | ||
597 | }; | ||
598 | |||
599 | struct cpl_fw4_msg { | ||
600 | u8 opcode; | ||
601 | u8 type; | ||
602 | __be16 rsvd0; | ||
603 | __be32 rsvd1; | ||
604 | __be64 data[2]; | ||
605 | }; | ||
606 | |||
607 | struct cpl_fw4_ack { | ||
608 | union opcode_tid ot; | ||
609 | u8 credits; | ||
610 | u8 rsvd0[2]; | ||
611 | u8 seq_vld; | ||
612 | __be32 snd_nxt; | ||
613 | __be32 snd_una; | ||
614 | __be64 rsvd1; | ||
615 | }; | ||
616 | |||
617 | struct cpl_fw6_msg { | ||
618 | u8 opcode; | ||
619 | u8 type; | ||
620 | __be16 rsvd0; | ||
621 | __be32 rsvd1; | ||
622 | __be64 data[4]; | ||
623 | }; | ||
624 | |||
625 | enum { | ||
626 | ULP_TX_MEM_READ = 2, | ||
627 | ULP_TX_MEM_WRITE = 3, | ||
628 | ULP_TX_PKT = 4 | ||
629 | }; | ||
630 | |||
631 | enum { | ||
632 | ULP_TX_SC_NOOP = 0x80, | ||
633 | ULP_TX_SC_IMM = 0x81, | ||
634 | ULP_TX_SC_DSGL = 0x82, | ||
635 | ULP_TX_SC_ISGL = 0x83 | ||
636 | }; | ||
637 | |||
638 | struct ulptx_sge_pair { | ||
639 | __be32 len[2]; | ||
640 | __be64 addr[2]; | ||
641 | }; | ||
642 | |||
643 | struct ulptx_sgl { | ||
644 | __be32 cmd_nsge; | ||
645 | #define ULPTX_CMD(x) ((x) << 24) | ||
646 | #define ULPTX_NSGE(x) ((x) << 0) | ||
647 | __be32 len0; | ||
648 | __be64 addr0; | ||
649 | struct ulptx_sge_pair sge[0]; | ||
650 | }; | ||
651 | |||
652 | struct ulp_mem_io { | ||
653 | WR_HDR; | ||
654 | __be32 cmd; | ||
655 | #define ULP_MEMIO_ORDER(x) ((x) << 23) | ||
656 | __be32 len16; /* command length */ | ||
657 | __be32 dlen; /* data length in 32-byte units */ | ||
658 | #define ULP_MEMIO_DATA_LEN(x) ((x) << 0) | ||
659 | __be32 lock_addr; | ||
660 | #define ULP_MEMIO_ADDR(x) ((x) << 0) | ||
661 | #define ULP_MEMIO_LOCK(x) ((x) << 31) | ||
662 | }; | ||
663 | |||
664 | #endif /* __T4_MSG_H */ | ||
diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h new file mode 100644 index 000000000000..5ed56483cbc2 --- /dev/null +++ b/drivers/net/cxgb4/t4_regs.h | |||
@@ -0,0 +1,878 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __T4_REGS_H | ||
36 | #define __T4_REGS_H | ||
37 | |||
38 | #define MYPF_BASE 0x1b000 | ||
39 | #define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr)) | ||
40 | |||
41 | #define PF0_BASE 0x1e000 | ||
42 | #define PF0_REG(reg_addr) (PF0_BASE + (reg_addr)) | ||
43 | |||
44 | #define PF_STRIDE 0x400 | ||
45 | #define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE) | ||
46 | #define PF_REG(idx, reg) (PF_BASE(idx) + (reg)) | ||
47 | |||
48 | #define MYPORT_BASE 0x1c000 | ||
49 | #define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr)) | ||
50 | |||
51 | #define PORT0_BASE 0x20000 | ||
52 | #define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr)) | ||
53 | |||
54 | #define PORT_STRIDE 0x2000 | ||
55 | #define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE) | ||
56 | #define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg)) | ||
57 | |||
58 | #define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR) | ||
59 | #define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx) | ||
60 | |||
61 | #define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) | ||
62 | #define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) | ||
63 | #define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) | ||
64 | #define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) | ||
65 | |||
66 | #define SGE_PF_KDOORBELL 0x0 | ||
67 | #define QID_MASK 0xffff8000U | ||
68 | #define QID_SHIFT 15 | ||
69 | #define QID(x) ((x) << QID_SHIFT) | ||
70 | #define DBPRIO 0x00004000U | ||
71 | #define PIDX_MASK 0x00003fffU | ||
72 | #define PIDX_SHIFT 0 | ||
73 | #define PIDX(x) ((x) << PIDX_SHIFT) | ||
74 | |||
75 | #define SGE_PF_GTS 0x4 | ||
76 | #define INGRESSQID_MASK 0xffff0000U | ||
77 | #define INGRESSQID_SHIFT 16 | ||
78 | #define INGRESSQID(x) ((x) << INGRESSQID_SHIFT) | ||
79 | #define TIMERREG_MASK 0x0000e000U | ||
80 | #define TIMERREG_SHIFT 13 | ||
81 | #define TIMERREG(x) ((x) << TIMERREG_SHIFT) | ||
82 | #define SEINTARM_MASK 0x00001000U | ||
83 | #define SEINTARM_SHIFT 12 | ||
84 | #define SEINTARM(x) ((x) << SEINTARM_SHIFT) | ||
85 | #define CIDXINC_MASK 0x00000fffU | ||
86 | #define CIDXINC_SHIFT 0 | ||
87 | #define CIDXINC(x) ((x) << CIDXINC_SHIFT) | ||
88 | |||
89 | #define SGE_CONTROL 0x1008 | ||
90 | #define DCASYSTYPE 0x00080000U | ||
91 | #define RXPKTCPLMODE 0x00040000U | ||
92 | #define EGRSTATUSPAGESIZE 0x00020000U | ||
93 | #define PKTSHIFT_MASK 0x00001c00U | ||
94 | #define PKTSHIFT_SHIFT 10 | ||
95 | #define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) | ||
96 | #define INGPCIEBOUNDARY_MASK 0x00000380U | ||
97 | #define INGPCIEBOUNDARY_SHIFT 7 | ||
98 | #define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) | ||
99 | #define INGPADBOUNDARY_MASK 0x00000070U | ||
100 | #define INGPADBOUNDARY_SHIFT 4 | ||
101 | #define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) | ||
102 | #define EGRPCIEBOUNDARY_MASK 0x0000000eU | ||
103 | #define EGRPCIEBOUNDARY_SHIFT 1 | ||
104 | #define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) | ||
105 | #define GLOBALENABLE 0x00000001U | ||
106 | |||
107 | #define SGE_HOST_PAGE_SIZE 0x100c | ||
108 | #define HOSTPAGESIZEPF0_MASK 0x0000000fU | ||
109 | #define HOSTPAGESIZEPF0_SHIFT 0 | ||
110 | #define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT) | ||
111 | |||
112 | #define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 | ||
113 | #define QUEUESPERPAGEPF0_MASK 0x0000000fU | ||
114 | #define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) | ||
115 | |||
116 | #define SGE_INT_CAUSE1 0x1024 | ||
117 | #define SGE_INT_CAUSE2 0x1030 | ||
118 | #define SGE_INT_CAUSE3 0x103c | ||
119 | #define ERR_FLM_DBP 0x80000000U | ||
120 | #define ERR_FLM_IDMA1 0x40000000U | ||
121 | #define ERR_FLM_IDMA0 0x20000000U | ||
122 | #define ERR_FLM_HINT 0x10000000U | ||
123 | #define ERR_PCIE_ERROR3 0x08000000U | ||
124 | #define ERR_PCIE_ERROR2 0x04000000U | ||
125 | #define ERR_PCIE_ERROR1 0x02000000U | ||
126 | #define ERR_PCIE_ERROR0 0x01000000U | ||
127 | #define ERR_TIMER_ABOVE_MAX_QID 0x00800000U | ||
128 | #define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U | ||
129 | #define ERR_INVALID_CIDX_INC 0x00200000U | ||
130 | #define ERR_ITP_TIME_PAUSED 0x00100000U | ||
131 | #define ERR_CPL_OPCODE_0 0x00080000U | ||
132 | #define ERR_DROPPED_DB 0x00040000U | ||
133 | #define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U | ||
134 | #define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U | ||
135 | #define ERR_BAD_DB_PIDX3 0x00008000U | ||
136 | #define ERR_BAD_DB_PIDX2 0x00004000U | ||
137 | #define ERR_BAD_DB_PIDX1 0x00002000U | ||
138 | #define ERR_BAD_DB_PIDX0 0x00001000U | ||
139 | #define ERR_ING_PCIE_CHAN 0x00000800U | ||
140 | #define ERR_ING_CTXT_PRIO 0x00000400U | ||
141 | #define ERR_EGR_CTXT_PRIO 0x00000200U | ||
142 | #define DBFIFO_HP_INT 0x00000100U | ||
143 | #define DBFIFO_LP_INT 0x00000080U | ||
144 | #define REG_ADDRESS_ERR 0x00000040U | ||
145 | #define INGRESS_SIZE_ERR 0x00000020U | ||
146 | #define EGRESS_SIZE_ERR 0x00000010U | ||
147 | #define ERR_INV_CTXT3 0x00000008U | ||
148 | #define ERR_INV_CTXT2 0x00000004U | ||
149 | #define ERR_INV_CTXT1 0x00000002U | ||
150 | #define ERR_INV_CTXT0 0x00000001U | ||
151 | |||
152 | #define SGE_INT_ENABLE3 0x1040 | ||
153 | #define SGE_FL_BUFFER_SIZE0 0x1044 | ||
154 | #define SGE_FL_BUFFER_SIZE1 0x1048 | ||
155 | #define SGE_INGRESS_RX_THRESHOLD 0x10a0 | ||
156 | #define THRESHOLD_0_MASK 0x3f000000U | ||
157 | #define THRESHOLD_0_SHIFT 24 | ||
158 | #define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT) | ||
159 | #define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT) | ||
160 | #define THRESHOLD_1_MASK 0x003f0000U | ||
161 | #define THRESHOLD_1_SHIFT 16 | ||
162 | #define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT) | ||
163 | #define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT) | ||
164 | #define THRESHOLD_2_MASK 0x00003f00U | ||
165 | #define THRESHOLD_2_SHIFT 8 | ||
166 | #define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT) | ||
167 | #define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT) | ||
168 | #define THRESHOLD_3_MASK 0x0000003fU | ||
169 | #define THRESHOLD_3_SHIFT 0 | ||
170 | #define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT) | ||
171 | #define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT) | ||
172 | |||
173 | #define SGE_TIMER_VALUE_0_AND_1 0x10b8 | ||
174 | #define TIMERVALUE0_MASK 0xffff0000U | ||
175 | #define TIMERVALUE0_SHIFT 16 | ||
176 | #define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT) | ||
177 | #define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT) | ||
178 | #define TIMERVALUE1_MASK 0x0000ffffU | ||
179 | #define TIMERVALUE1_SHIFT 0 | ||
180 | #define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT) | ||
181 | #define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT) | ||
182 | |||
183 | #define SGE_TIMER_VALUE_2_AND_3 0x10bc | ||
184 | #define SGE_TIMER_VALUE_4_AND_5 0x10c0 | ||
185 | #define SGE_DEBUG_INDEX 0x10cc | ||
186 | #define SGE_DEBUG_DATA_HIGH 0x10d0 | ||
187 | #define SGE_DEBUG_DATA_LOW 0x10d4 | ||
188 | #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 | ||
189 | |||
190 | #define PCIE_PF_CLI 0x44 | ||
191 | #define PCIE_INT_CAUSE 0x3004 | ||
192 | #define UNXSPLCPLERR 0x20000000U | ||
193 | #define PCIEPINT 0x10000000U | ||
194 | #define PCIESINT 0x08000000U | ||
195 | #define RPLPERR 0x04000000U | ||
196 | #define RXWRPERR 0x02000000U | ||
197 | #define RXCPLPERR 0x01000000U | ||
198 | #define PIOTAGPERR 0x00800000U | ||
199 | #define MATAGPERR 0x00400000U | ||
200 | #define INTXCLRPERR 0x00200000U | ||
201 | #define FIDPERR 0x00100000U | ||
202 | #define CFGSNPPERR 0x00080000U | ||
203 | #define HRSPPERR 0x00040000U | ||
204 | #define HREQPERR 0x00020000U | ||
205 | #define HCNTPERR 0x00010000U | ||
206 | #define DRSPPERR 0x00008000U | ||
207 | #define DREQPERR 0x00004000U | ||
208 | #define DCNTPERR 0x00002000U | ||
209 | #define CRSPPERR 0x00001000U | ||
210 | #define CREQPERR 0x00000800U | ||
211 | #define CCNTPERR 0x00000400U | ||
212 | #define TARTAGPERR 0x00000200U | ||
213 | #define PIOREQPERR 0x00000100U | ||
214 | #define PIOCPLPERR 0x00000080U | ||
215 | #define MSIXDIPERR 0x00000040U | ||
216 | #define MSIXDATAPERR 0x00000020U | ||
217 | #define MSIXADDRHPERR 0x00000010U | ||
218 | #define MSIXADDRLPERR 0x00000008U | ||
219 | #define MSIDATAPERR 0x00000004U | ||
220 | #define MSIADDRHPERR 0x00000002U | ||
221 | #define MSIADDRLPERR 0x00000001U | ||
222 | |||
223 | #define PCIE_NONFAT_ERR 0x3010 | ||
224 | #define PCIE_MEM_ACCESS_BASE_WIN 0x3068 | ||
225 | #define PCIEOFST_MASK 0xfffffc00U | ||
226 | #define BIR_MASK 0x00000300U | ||
227 | #define BIR_SHIFT 8 | ||
228 | #define BIR(x) ((x) << BIR_SHIFT) | ||
229 | #define WINDOW_MASK 0x000000ffU | ||
230 | #define WINDOW_SHIFT 0 | ||
231 | #define WINDOW(x) ((x) << WINDOW_SHIFT) | ||
232 | |||
233 | #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908 | ||
234 | #define RNPP 0x80000000U | ||
235 | #define RPCP 0x20000000U | ||
236 | #define RCIP 0x08000000U | ||
237 | #define RCCP 0x04000000U | ||
238 | #define RFTP 0x00800000U | ||
239 | #define PTRP 0x00100000U | ||
240 | |||
241 | #define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4 | ||
242 | #define TPCP 0x40000000U | ||
243 | #define TNPP 0x20000000U | ||
244 | #define TFTP 0x10000000U | ||
245 | #define TCAP 0x08000000U | ||
246 | #define TCIP 0x04000000U | ||
247 | #define RCAP 0x02000000U | ||
248 | #define PLUP 0x00800000U | ||
249 | #define PLDN 0x00400000U | ||
250 | #define OTDD 0x00200000U | ||
251 | #define GTRP 0x00100000U | ||
252 | #define RDPE 0x00040000U | ||
253 | #define TDCE 0x00020000U | ||
254 | #define TDUE 0x00010000U | ||
255 | |||
256 | #define MC_INT_CAUSE 0x7518 | ||
257 | #define ECC_UE_INT_CAUSE 0x00000004U | ||
258 | #define ECC_CE_INT_CAUSE 0x00000002U | ||
259 | #define PERR_INT_CAUSE 0x00000001U | ||
260 | |||
261 | #define MC_ECC_STATUS 0x751c | ||
262 | #define ECC_CECNT_MASK 0xffff0000U | ||
263 | #define ECC_CECNT_SHIFT 16 | ||
264 | #define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT) | ||
265 | #define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT) | ||
266 | #define ECC_UECNT_MASK 0x0000ffffU | ||
267 | #define ECC_UECNT_SHIFT 0 | ||
268 | #define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT) | ||
269 | #define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT) | ||
270 | |||
271 | #define MC_BIST_CMD 0x7600 | ||
272 | #define START_BIST 0x80000000U | ||
273 | #define BIST_CMD_GAP_MASK 0x0000ff00U | ||
274 | #define BIST_CMD_GAP_SHIFT 8 | ||
275 | #define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT) | ||
276 | #define BIST_OPCODE_MASK 0x00000003U | ||
277 | #define BIST_OPCODE_SHIFT 0 | ||
278 | #define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT) | ||
279 | |||
280 | #define MC_BIST_CMD_ADDR 0x7604 | ||
281 | #define MC_BIST_CMD_LEN 0x7608 | ||
282 | #define MC_BIST_DATA_PATTERN 0x760c | ||
283 | #define BIST_DATA_TYPE_MASK 0x0000000fU | ||
284 | #define BIST_DATA_TYPE_SHIFT 0 | ||
285 | #define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT) | ||
286 | |||
287 | #define MC_BIST_STATUS_RDATA 0x7688 | ||
288 | |||
289 | #define MA_EXT_MEMORY_BAR 0x77c8 | ||
290 | #define EXT_MEM_SIZE_MASK 0x00000fffU | ||
291 | #define EXT_MEM_SIZE_SHIFT 0 | ||
292 | #define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT) | ||
293 | |||
294 | #define MA_TARGET_MEM_ENABLE 0x77d8 | ||
295 | #define EXT_MEM_ENABLE 0x00000004U | ||
296 | #define EDRAM1_ENABLE 0x00000002U | ||
297 | #define EDRAM0_ENABLE 0x00000001U | ||
298 | |||
299 | #define MA_INT_CAUSE 0x77e0 | ||
300 | #define MEM_PERR_INT_CAUSE 0x00000002U | ||
301 | #define MEM_WRAP_INT_CAUSE 0x00000001U | ||
302 | |||
303 | #define MA_INT_WRAP_STATUS 0x77e4 | ||
304 | #define MEM_WRAP_ADDRESS_MASK 0xfffffff0U | ||
305 | #define MEM_WRAP_ADDRESS_SHIFT 4 | ||
306 | #define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT) | ||
307 | #define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU | ||
308 | #define MEM_WRAP_CLIENT_NUM_SHIFT 0 | ||
309 | #define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) | ||
310 | |||
311 | #define MA_PARITY_ERROR_STATUS 0x77f4 | ||
312 | |||
313 | #define EDC_0_BASE_ADDR 0x7900 | ||
314 | |||
315 | #define EDC_BIST_CMD 0x7904 | ||
316 | #define EDC_BIST_CMD_ADDR 0x7908 | ||
317 | #define EDC_BIST_CMD_LEN 0x790c | ||
318 | #define EDC_BIST_DATA_PATTERN 0x7910 | ||
319 | #define EDC_BIST_STATUS_RDATA 0x7928 | ||
320 | #define EDC_INT_CAUSE 0x7978 | ||
321 | #define ECC_UE_PAR 0x00000020U | ||
322 | #define ECC_CE_PAR 0x00000010U | ||
323 | #define PERR_PAR_CAUSE 0x00000008U | ||
324 | |||
325 | #define EDC_ECC_STATUS 0x797c | ||
326 | |||
327 | #define EDC_1_BASE_ADDR 0x7980 | ||
328 | |||
329 | #define CIM_PF_MAILBOX_DATA 0x240 | ||
330 | #define CIM_PF_MAILBOX_CTRL 0x280 | ||
331 | #define MBMSGVALID 0x00000008U | ||
332 | #define MBINTREQ 0x00000004U | ||
333 | #define MBOWNER_MASK 0x00000003U | ||
334 | #define MBOWNER_SHIFT 0 | ||
335 | #define MBOWNER(x) ((x) << MBOWNER_SHIFT) | ||
336 | #define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT) | ||
337 | |||
338 | #define CIM_PF_HOST_INT_CAUSE 0x28c | ||
339 | #define MBMSGRDYINT 0x00080000U | ||
340 | |||
341 | #define CIM_HOST_INT_CAUSE 0x7b2c | ||
342 | #define TIEQOUTPARERRINT 0x00100000U | ||
343 | #define TIEQINPARERRINT 0x00080000U | ||
344 | #define MBHOSTPARERR 0x00040000U | ||
345 | #define MBUPPARERR 0x00020000U | ||
346 | #define IBQPARERR 0x0001f800U | ||
347 | #define IBQTP0PARERR 0x00010000U | ||
348 | #define IBQTP1PARERR 0x00008000U | ||
349 | #define IBQULPPARERR 0x00004000U | ||
350 | #define IBQSGELOPARERR 0x00002000U | ||
351 | #define IBQSGEHIPARERR 0x00001000U | ||
352 | #define IBQNCSIPARERR 0x00000800U | ||
353 | #define OBQPARERR 0x000007e0U | ||
354 | #define OBQULP0PARERR 0x00000400U | ||
355 | #define OBQULP1PARERR 0x00000200U | ||
356 | #define OBQULP2PARERR 0x00000100U | ||
357 | #define OBQULP3PARERR 0x00000080U | ||
358 | #define OBQSGEPARERR 0x00000040U | ||
359 | #define OBQNCSIPARERR 0x00000020U | ||
360 | #define PREFDROPINT 0x00000002U | ||
361 | #define UPACCNONZERO 0x00000001U | ||
362 | |||
363 | #define CIM_HOST_UPACC_INT_CAUSE 0x7b34 | ||
364 | #define EEPROMWRINT 0x40000000U | ||
365 | #define TIMEOUTMAINT 0x20000000U | ||
366 | #define TIMEOUTINT 0x10000000U | ||
367 | #define RSPOVRLOOKUPINT 0x08000000U | ||
368 | #define REQOVRLOOKUPINT 0x04000000U | ||
369 | #define BLKWRPLINT 0x02000000U | ||
370 | #define BLKRDPLINT 0x01000000U | ||
371 | #define SGLWRPLINT 0x00800000U | ||
372 | #define SGLRDPLINT 0x00400000U | ||
373 | #define BLKWRCTLINT 0x00200000U | ||
374 | #define BLKRDCTLINT 0x00100000U | ||
375 | #define SGLWRCTLINT 0x00080000U | ||
376 | #define SGLRDCTLINT 0x00040000U | ||
377 | #define BLKWREEPROMINT 0x00020000U | ||
378 | #define BLKRDEEPROMINT 0x00010000U | ||
379 | #define SGLWREEPROMINT 0x00008000U | ||
380 | #define SGLRDEEPROMINT 0x00004000U | ||
381 | #define BLKWRFLASHINT 0x00002000U | ||
382 | #define BLKRDFLASHINT 0x00001000U | ||
383 | #define SGLWRFLASHINT 0x00000800U | ||
384 | #define SGLRDFLASHINT 0x00000400U | ||
385 | #define BLKWRBOOTINT 0x00000200U | ||
386 | #define BLKRDBOOTINT 0x00000100U | ||
387 | #define SGLWRBOOTINT 0x00000080U | ||
388 | #define SGLRDBOOTINT 0x00000040U | ||
389 | #define ILLWRBEINT 0x00000020U | ||
390 | #define ILLRDBEINT 0x00000010U | ||
391 | #define ILLRDINT 0x00000008U | ||
392 | #define ILLWRINT 0x00000004U | ||
393 | #define ILLTRANSINT 0x00000002U | ||
394 | #define RSVDSPACEINT 0x00000001U | ||
395 | |||
396 | #define TP_OUT_CONFIG 0x7d04 | ||
397 | #define VLANEXTENABLE_MASK 0x0000f000U | ||
398 | #define VLANEXTENABLE_SHIFT 12 | ||
399 | |||
400 | #define TP_PARA_REG2 0x7d68 | ||
401 | #define MAXRXDATA_MASK 0xffff0000U | ||
402 | #define MAXRXDATA_SHIFT 16 | ||
403 | #define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT) | ||
404 | |||
405 | #define TP_TIMER_RESOLUTION 0x7d90 | ||
406 | #define TIMERRESOLUTION_MASK 0x00ff0000U | ||
407 | #define TIMERRESOLUTION_SHIFT 16 | ||
408 | #define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT) | ||
409 | |||
410 | #define TP_SHIFT_CNT 0x7dc0 | ||
411 | |||
412 | #define TP_CCTRL_TABLE 0x7ddc | ||
413 | #define TP_MTU_TABLE 0x7de4 | ||
414 | #define MTUINDEX_MASK 0xff000000U | ||
415 | #define MTUINDEX_SHIFT 24 | ||
416 | #define MTUINDEX(x) ((x) << MTUINDEX_SHIFT) | ||
417 | #define MTUWIDTH_MASK 0x000f0000U | ||
418 | #define MTUWIDTH_SHIFT 16 | ||
419 | #define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT) | ||
420 | #define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT) | ||
421 | #define MTUVALUE_MASK 0x00003fffU | ||
422 | #define MTUVALUE_SHIFT 0 | ||
423 | #define MTUVALUE(x) ((x) << MTUVALUE_SHIFT) | ||
424 | #define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT) | ||
425 | |||
426 | #define TP_RSS_LKP_TABLE 0x7dec | ||
427 | #define LKPTBLROWVLD 0x80000000U | ||
428 | #define LKPTBLQUEUE1_MASK 0x000ffc00U | ||
429 | #define LKPTBLQUEUE1_SHIFT 10 | ||
430 | #define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT) | ||
431 | #define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT) | ||
432 | #define LKPTBLQUEUE0_MASK 0x000003ffU | ||
433 | #define LKPTBLQUEUE0_SHIFT 0 | ||
434 | #define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT) | ||
435 | #define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT) | ||
436 | |||
437 | #define TP_PIO_ADDR 0x7e40 | ||
438 | #define TP_PIO_DATA 0x7e44 | ||
439 | #define TP_MIB_INDEX 0x7e50 | ||
440 | #define TP_MIB_DATA 0x7e54 | ||
441 | #define TP_INT_CAUSE 0x7e74 | ||
442 | #define FLMTXFLSTEMPTY 0x40000000U | ||
443 | |||
444 | #define TP_INGRESS_CONFIG 0x141 | ||
445 | #define VNIC 0x00000800U | ||
446 | #define CSUM_HAS_PSEUDO_HDR 0x00000400U | ||
447 | #define RM_OVLAN 0x00000200U | ||
448 | #define LOOKUPEVERYPKT 0x00000100U | ||
449 | |||
450 | #define TP_MIB_MAC_IN_ERR_0 0x0 | ||
451 | #define TP_MIB_TCP_OUT_RST 0xc | ||
452 | #define TP_MIB_TCP_IN_SEG_HI 0x10 | ||
453 | #define TP_MIB_TCP_IN_SEG_LO 0x11 | ||
454 | #define TP_MIB_TCP_OUT_SEG_HI 0x12 | ||
455 | #define TP_MIB_TCP_OUT_SEG_LO 0x13 | ||
456 | #define TP_MIB_TCP_RXT_SEG_HI 0x14 | ||
457 | #define TP_MIB_TCP_RXT_SEG_LO 0x15 | ||
458 | #define TP_MIB_TNL_CNG_DROP_0 0x18 | ||
459 | #define TP_MIB_TCP_V6IN_ERR_0 0x28 | ||
460 | #define TP_MIB_TCP_V6OUT_RST 0x2c | ||
461 | #define TP_MIB_OFD_ARP_DROP 0x36 | ||
462 | #define TP_MIB_TNL_DROP_0 0x44 | ||
463 | #define TP_MIB_OFD_VLN_DROP_0 0x58 | ||
464 | |||
465 | #define ULP_TX_INT_CAUSE 0x8dcc | ||
466 | #define PBL_BOUND_ERR_CH3 0x80000000U | ||
467 | #define PBL_BOUND_ERR_CH2 0x40000000U | ||
468 | #define PBL_BOUND_ERR_CH1 0x20000000U | ||
469 | #define PBL_BOUND_ERR_CH0 0x10000000U | ||
470 | |||
471 | #define PM_RX_INT_CAUSE 0x8fdc | ||
472 | #define ZERO_E_CMD_ERROR 0x00400000U | ||
473 | #define PMRX_FRAMING_ERROR 0x003ffff0U | ||
474 | #define OCSPI_PAR_ERROR 0x00000008U | ||
475 | #define DB_OPTIONS_PAR_ERROR 0x00000004U | ||
476 | #define IESPI_PAR_ERROR 0x00000002U | ||
477 | #define E_PCMD_PAR_ERROR 0x00000001U | ||
478 | |||
479 | #define PM_TX_INT_CAUSE 0x8ffc | ||
480 | #define PCMD_LEN_OVFL0 0x80000000U | ||
481 | #define PCMD_LEN_OVFL1 0x40000000U | ||
482 | #define PCMD_LEN_OVFL2 0x20000000U | ||
483 | #define ZERO_C_CMD_ERROR 0x10000000U | ||
484 | #define PMTX_FRAMING_ERROR 0x0ffffff0U | ||
485 | #define OESPI_PAR_ERROR 0x00000008U | ||
486 | #define ICSPI_PAR_ERROR 0x00000002U | ||
487 | #define C_PCMD_PAR_ERROR 0x00000001U | ||
488 | |||
489 | #define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 | ||
490 | #define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 | ||
491 | #define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408 | ||
492 | #define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c | ||
493 | #define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410 | ||
494 | #define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414 | ||
495 | #define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418 | ||
496 | #define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c | ||
497 | #define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420 | ||
498 | #define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424 | ||
499 | #define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428 | ||
500 | #define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c | ||
501 | #define MPS_PORT_STAT_TX_PORT_64B_L 0x430 | ||
502 | #define MPS_PORT_STAT_TX_PORT_64B_H 0x434 | ||
503 | #define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438 | ||
504 | #define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c | ||
505 | #define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440 | ||
506 | #define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444 | ||
507 | #define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448 | ||
508 | #define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c | ||
509 | #define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450 | ||
510 | #define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454 | ||
511 | #define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458 | ||
512 | #define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c | ||
513 | #define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460 | ||
514 | #define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464 | ||
515 | #define MPS_PORT_STAT_TX_PORT_DROP_L 0x468 | ||
516 | #define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c | ||
517 | #define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470 | ||
518 | #define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474 | ||
519 | #define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478 | ||
520 | #define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c | ||
521 | #define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480 | ||
522 | #define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484 | ||
523 | #define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488 | ||
524 | #define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c | ||
525 | #define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490 | ||
526 | #define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494 | ||
527 | #define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498 | ||
528 | #define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c | ||
529 | #define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0 | ||
530 | #define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4 | ||
531 | #define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8 | ||
532 | #define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac | ||
533 | #define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0 | ||
534 | #define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4 | ||
535 | #define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0 | ||
536 | #define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4 | ||
537 | #define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8 | ||
538 | #define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc | ||
539 | #define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0 | ||
540 | #define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4 | ||
541 | #define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8 | ||
542 | #define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc | ||
543 | #define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0 | ||
544 | #define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4 | ||
545 | #define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8 | ||
546 | #define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec | ||
547 | #define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0 | ||
548 | #define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4 | ||
549 | #define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8 | ||
550 | #define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc | ||
551 | #define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500 | ||
552 | #define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504 | ||
553 | #define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508 | ||
554 | #define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c | ||
555 | #define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510 | ||
556 | #define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514 | ||
557 | #define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518 | ||
558 | #define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c | ||
559 | #define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520 | ||
560 | #define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524 | ||
561 | #define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528 | ||
562 | #define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540 | ||
563 | #define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544 | ||
564 | #define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548 | ||
565 | #define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c | ||
566 | #define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550 | ||
567 | #define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554 | ||
568 | #define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558 | ||
569 | #define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c | ||
570 | #define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560 | ||
571 | #define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564 | ||
572 | #define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568 | ||
573 | #define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c | ||
574 | #define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570 | ||
575 | #define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574 | ||
576 | #define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578 | ||
577 | #define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c | ||
578 | #define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580 | ||
579 | #define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584 | ||
580 | #define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588 | ||
581 | #define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c | ||
582 | #define MPS_PORT_STAT_RX_PORT_64B_L 0x590 | ||
583 | #define MPS_PORT_STAT_RX_PORT_64B_H 0x594 | ||
584 | #define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598 | ||
585 | #define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c | ||
586 | #define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0 | ||
587 | #define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4 | ||
588 | #define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8 | ||
589 | #define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac | ||
590 | #define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0 | ||
591 | #define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4 | ||
592 | #define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8 | ||
593 | #define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc | ||
594 | #define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0 | ||
595 | #define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4 | ||
596 | #define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8 | ||
597 | #define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc | ||
598 | #define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0 | ||
599 | #define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4 | ||
600 | #define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8 | ||
601 | #define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc | ||
602 | #define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0 | ||
603 | #define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4 | ||
604 | #define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8 | ||
605 | #define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec | ||
606 | #define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0 | ||
607 | #define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4 | ||
608 | #define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8 | ||
609 | #define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc | ||
610 | #define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600 | ||
611 | #define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604 | ||
612 | #define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608 | ||
613 | #define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c | ||
614 | #define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 | ||
615 | #define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 | ||
616 | #define MPS_CMN_CTL 0x9000 | ||
617 | #define NUMPORTS_MASK 0x00000003U | ||
618 | #define NUMPORTS_SHIFT 0 | ||
619 | #define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT) | ||
620 | |||
621 | #define MPS_INT_CAUSE 0x9008 | ||
622 | #define STATINT 0x00000020U | ||
623 | #define TXINT 0x00000010U | ||
624 | #define RXINT 0x00000008U | ||
625 | #define TRCINT 0x00000004U | ||
626 | #define CLSINT 0x00000002U | ||
627 | #define PLINT 0x00000001U | ||
628 | |||
629 | #define MPS_TX_INT_CAUSE 0x9408 | ||
630 | #define PORTERR 0x00010000U | ||
631 | #define FRMERR 0x00008000U | ||
632 | #define SECNTERR 0x00004000U | ||
633 | #define BUBBLE 0x00002000U | ||
634 | #define TXDESCFIFO 0x00001e00U | ||
635 | #define TXDATAFIFO 0x000001e0U | ||
636 | #define NCSIFIFO 0x00000010U | ||
637 | #define TPFIFO 0x0000000fU | ||
638 | |||
639 | #define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614 | ||
640 | #define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620 | ||
641 | #define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c | ||
642 | |||
643 | #define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640 | ||
644 | #define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644 | ||
645 | #define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648 | ||
646 | #define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c | ||
647 | #define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650 | ||
648 | #define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654 | ||
649 | #define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658 | ||
650 | #define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c | ||
651 | #define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660 | ||
652 | #define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664 | ||
653 | #define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668 | ||
654 | #define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c | ||
655 | #define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670 | ||
656 | #define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674 | ||
657 | #define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678 | ||
658 | #define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c | ||
659 | #define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680 | ||
660 | #define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684 | ||
661 | #define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688 | ||
662 | #define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c | ||
663 | #define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690 | ||
664 | #define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694 | ||
665 | #define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698 | ||
666 | #define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c | ||
667 | #define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0 | ||
668 | #define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4 | ||
669 | #define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8 | ||
670 | #define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac | ||
671 | #define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0 | ||
672 | #define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4 | ||
673 | #define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 | ||
674 | #define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc | ||
675 | #define MPS_TRC_CFG 0x9800 | ||
676 | #define TRCFIFOEMPTY 0x00000010U | ||
677 | #define TRCIGNOREDROPINPUT 0x00000008U | ||
678 | #define TRCKEEPDUPLICATES 0x00000004U | ||
679 | #define TRCEN 0x00000002U | ||
680 | #define TRCMULTIFILTER 0x00000001U | ||
681 | |||
682 | #define MPS_TRC_RSS_CONTROL 0x9808 | ||
683 | #define RSSCONTROL_MASK 0x00ff0000U | ||
684 | #define RSSCONTROL_SHIFT 16 | ||
685 | #define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) | ||
686 | #define QUEUENUMBER_MASK 0x0000ffffU | ||
687 | #define QUEUENUMBER_SHIFT 0 | ||
688 | #define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT) | ||
689 | |||
690 | #define MPS_TRC_FILTER_MATCH_CTL_A 0x9810 | ||
691 | #define TFINVERTMATCH 0x01000000U | ||
692 | #define TFPKTTOOLARGE 0x00800000U | ||
693 | #define TFEN 0x00400000U | ||
694 | #define TFPORT_MASK 0x003c0000U | ||
695 | #define TFPORT_SHIFT 18 | ||
696 | #define TFPORT(x) ((x) << TFPORT_SHIFT) | ||
697 | #define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT) | ||
698 | #define TFDROP 0x00020000U | ||
699 | #define TFSOPEOPERR 0x00010000U | ||
700 | #define TFLENGTH_MASK 0x00001f00U | ||
701 | #define TFLENGTH_SHIFT 8 | ||
702 | #define TFLENGTH(x) ((x) << TFLENGTH_SHIFT) | ||
703 | #define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT) | ||
704 | #define TFOFFSET_MASK 0x0000001fU | ||
705 | #define TFOFFSET_SHIFT 0 | ||
706 | #define TFOFFSET(x) ((x) << TFOFFSET_SHIFT) | ||
707 | #define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT) | ||
708 | |||
709 | #define MPS_TRC_FILTER_MATCH_CTL_B 0x9820 | ||
710 | #define TFMINPKTSIZE_MASK 0x01ff0000U | ||
711 | #define TFMINPKTSIZE_SHIFT 16 | ||
712 | #define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT) | ||
713 | #define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT) | ||
714 | #define TFCAPTUREMAX_MASK 0x00003fffU | ||
715 | #define TFCAPTUREMAX_SHIFT 0 | ||
716 | #define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT) | ||
717 | #define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT) | ||
718 | |||
719 | #define MPS_TRC_INT_CAUSE 0x985c | ||
720 | #define MISCPERR 0x00000100U | ||
721 | #define PKTFIFO 0x000000f0U | ||
722 | #define FILTMEM 0x0000000fU | ||
723 | |||
724 | #define MPS_TRC_FILTER0_MATCH 0x9c00 | ||
725 | #define MPS_TRC_FILTER0_DONT_CARE 0x9c80 | ||
726 | #define MPS_TRC_FILTER1_MATCH 0x9d00 | ||
727 | #define MPS_CLS_INT_CAUSE 0xd028 | ||
728 | #define PLERRENB 0x00000008U | ||
729 | #define HASHSRAM 0x00000004U | ||
730 | #define MATCHTCAM 0x00000002U | ||
731 | #define MATCHSRAM 0x00000001U | ||
732 | |||
733 | #define MPS_RX_PERR_INT_CAUSE 0x11074 | ||
734 | |||
735 | #define CPL_INTR_CAUSE 0x19054 | ||
736 | #define CIM_OP_MAP_PERR 0x00000020U | ||
737 | #define CIM_OVFL_ERROR 0x00000010U | ||
738 | #define TP_FRAMING_ERROR 0x00000008U | ||
739 | #define SGE_FRAMING_ERROR 0x00000004U | ||
740 | #define CIM_FRAMING_ERROR 0x00000002U | ||
741 | #define ZERO_SWITCH_ERROR 0x00000001U | ||
742 | |||
743 | #define SMB_INT_CAUSE 0x19090 | ||
744 | #define MSTTXFIFOPARINT 0x00200000U | ||
745 | #define MSTRXFIFOPARINT 0x00100000U | ||
746 | #define SLVFIFOPARINT 0x00080000U | ||
747 | |||
748 | #define ULP_RX_INT_CAUSE 0x19158 | ||
749 | #define ULP_RX_ISCSI_TAGMASK 0x19164 | ||
750 | #define ULP_RX_ISCSI_PSZ 0x19168 | ||
751 | #define HPZ3_MASK 0x0f000000U | ||
752 | #define HPZ3_SHIFT 24 | ||
753 | #define HPZ3(x) ((x) << HPZ3_SHIFT) | ||
754 | #define HPZ2_MASK 0x000f0000U | ||
755 | #define HPZ2_SHIFT 16 | ||
756 | #define HPZ2(x) ((x) << HPZ2_SHIFT) | ||
757 | #define HPZ1_MASK 0x00000f00U | ||
758 | #define HPZ1_SHIFT 8 | ||
759 | #define HPZ1(x) ((x) << HPZ1_SHIFT) | ||
760 | #define HPZ0_MASK 0x0000000fU | ||
761 | #define HPZ0_SHIFT 0 | ||
762 | #define HPZ0(x) ((x) << HPZ0_SHIFT) | ||
763 | |||
764 | #define ULP_RX_TDDP_PSZ 0x19178 | ||
765 | |||
766 | #define SF_DATA 0x193f8 | ||
767 | #define SF_OP 0x193fc | ||
768 | #define BUSY 0x80000000U | ||
769 | #define SF_LOCK 0x00000010U | ||
770 | #define SF_CONT 0x00000008U | ||
771 | #define BYTECNT_MASK 0x00000006U | ||
772 | #define BYTECNT_SHIFT 1 | ||
773 | #define BYTECNT(x) ((x) << BYTECNT_SHIFT) | ||
774 | #define OP_WR 0x00000001U | ||
775 | |||
776 | #define PL_PF_INT_CAUSE 0x3c0 | ||
777 | #define PFSW 0x00000008U | ||
778 | #define PFSGE 0x00000004U | ||
779 | #define PFCIM 0x00000002U | ||
780 | #define PFMPS 0x00000001U | ||
781 | |||
782 | #define PL_PF_INT_ENABLE 0x3c4 | ||
783 | #define PL_PF_CTL 0x3c8 | ||
784 | #define SWINT 0x00000001U | ||
785 | |||
786 | #define PL_WHOAMI 0x19400 | ||
787 | #define SOURCEPF_MASK 0x00000700U | ||
788 | #define SOURCEPF_SHIFT 8 | ||
789 | #define SOURCEPF(x) ((x) << SOURCEPF_SHIFT) | ||
790 | #define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT) | ||
791 | #define ISVF 0x00000080U | ||
792 | #define VFID_MASK 0x0000007fU | ||
793 | #define VFID_SHIFT 0 | ||
794 | #define VFID(x) ((x) << VFID_SHIFT) | ||
795 | #define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT) | ||
796 | |||
797 | #define PL_INT_CAUSE 0x1940c | ||
798 | #define ULP_TX 0x08000000U | ||
799 | #define SGE 0x04000000U | ||
800 | #define HMA 0x02000000U | ||
801 | #define CPL_SWITCH 0x01000000U | ||
802 | #define ULP_RX 0x00800000U | ||
803 | #define PM_RX 0x00400000U | ||
804 | #define PM_TX 0x00200000U | ||
805 | #define MA 0x00100000U | ||
806 | #define TP 0x00080000U | ||
807 | #define LE 0x00040000U | ||
808 | #define EDC1 0x00020000U | ||
809 | #define EDC0 0x00010000U | ||
810 | #define MC 0x00008000U | ||
811 | #define PCIE 0x00004000U | ||
812 | #define PMU 0x00002000U | ||
813 | #define XGMAC_KR1 0x00001000U | ||
814 | #define XGMAC_KR0 0x00000800U | ||
815 | #define XGMAC1 0x00000400U | ||
816 | #define XGMAC0 0x00000200U | ||
817 | #define SMB 0x00000100U | ||
818 | #define SF 0x00000080U | ||
819 | #define PL 0x00000040U | ||
820 | #define NCSI 0x00000020U | ||
821 | #define MPS 0x00000010U | ||
822 | #define MI 0x00000008U | ||
823 | #define DBG 0x00000004U | ||
824 | #define I2CM 0x00000002U | ||
825 | #define CIM 0x00000001U | ||
826 | |||
827 | #define PL_INT_MAP0 0x19414 | ||
828 | #define PL_RST 0x19428 | ||
829 | #define PIORST 0x00000002U | ||
830 | #define PIORSTMODE 0x00000001U | ||
831 | |||
832 | #define PL_PL_INT_CAUSE 0x19430 | ||
833 | #define FATALPERR 0x00000010U | ||
834 | #define PERRVFID 0x00000001U | ||
835 | |||
836 | #define PL_REV 0x1943c | ||
837 | |||
838 | #define LE_DB_CONFIG 0x19c04 | ||
839 | #define HASHEN 0x00100000U | ||
840 | |||
841 | #define LE_DB_SERVER_INDEX 0x19c18 | ||
842 | #define LE_DB_ACT_CNT_IPV4 0x19c20 | ||
843 | #define LE_DB_ACT_CNT_IPV6 0x19c24 | ||
844 | |||
845 | #define LE_DB_INT_CAUSE 0x19c3c | ||
846 | #define REQQPARERR 0x00010000U | ||
847 | #define UNKNOWNCMD 0x00008000U | ||
848 | #define PARITYERR 0x00000040U | ||
849 | #define LIPMISS 0x00000020U | ||
850 | #define LIP0 0x00000010U | ||
851 | |||
852 | #define LE_DB_TID_HASHBASE 0x19df8 | ||
853 | |||
854 | #define NCSI_INT_CAUSE 0x1a0d8 | ||
855 | #define CIM_DM_PRTY_ERR 0x00000100U | ||
856 | #define MPS_DM_PRTY_ERR 0x00000080U | ||
857 | #define TXFIFO_PRTY_ERR 0x00000002U | ||
858 | #define RXFIFO_PRTY_ERR 0x00000001U | ||
859 | |||
860 | #define XGMAC_PORT_CFG2 0x1018 | ||
861 | #define PATEN 0x00040000U | ||
862 | #define MAGICEN 0x00020000U | ||
863 | |||
864 | #define XGMAC_PORT_MAGIC_MACID_LO 0x1024 | ||
865 | #define XGMAC_PORT_MAGIC_MACID_HI 0x1028 | ||
866 | |||
867 | #define XGMAC_PORT_EPIO_DATA0 0x10c0 | ||
868 | #define XGMAC_PORT_EPIO_DATA1 0x10c4 | ||
869 | #define XGMAC_PORT_EPIO_DATA2 0x10c8 | ||
870 | #define XGMAC_PORT_EPIO_DATA3 0x10cc | ||
871 | #define XGMAC_PORT_EPIO_OP 0x10d0 | ||
872 | #define EPIOWR 0x00000100U | ||
873 | #define ADDRESS_MASK 0x000000ffU | ||
874 | #define ADDRESS_SHIFT 0 | ||
875 | #define ADDRESS(x) ((x) << ADDRESS_SHIFT) | ||
876 | |||
877 | #define XGMAC_PORT_INT_CAUSE 0x10dc | ||
878 | #endif /* __T4_REGS_H */ | ||
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h new file mode 100644 index 000000000000..3393d05a388a --- /dev/null +++ b/drivers/net/cxgb4/t4fw_api.h | |||
@@ -0,0 +1,1580 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _T4FW_INTERFACE_H_ | ||
36 | #define _T4FW_INTERFACE_H_ | ||
37 | |||
38 | #define FW_T4VF_SGE_BASE_ADDR 0x0000 | ||
39 | #define FW_T4VF_MPS_BASE_ADDR 0x0100 | ||
40 | #define FW_T4VF_PL_BASE_ADDR 0x0200 | ||
41 | #define FW_T4VF_MBDATA_BASE_ADDR 0x0240 | ||
42 | #define FW_T4VF_CIM_BASE_ADDR 0x0300 | ||
43 | |||
44 | enum fw_wr_opcodes { | ||
45 | FW_FILTER_WR = 0x02, | ||
46 | FW_ULPTX_WR = 0x04, | ||
47 | FW_TP_WR = 0x05, | ||
48 | FW_ETH_TX_PKT_WR = 0x08, | ||
49 | FW_FLOWC_WR = 0x0a, | ||
50 | FW_OFLD_TX_DATA_WR = 0x0b, | ||
51 | FW_CMD_WR = 0x10, | ||
52 | FW_ETH_TX_PKT_VM_WR = 0x11, | ||
53 | FW_RI_RES_WR = 0x0c, | ||
54 | FW_RI_INIT_WR = 0x0d, | ||
55 | FW_RI_RDMA_WRITE_WR = 0x14, | ||
56 | FW_RI_SEND_WR = 0x15, | ||
57 | FW_RI_RDMA_READ_WR = 0x16, | ||
58 | FW_RI_RECV_WR = 0x17, | ||
59 | FW_RI_BIND_MW_WR = 0x18, | ||
60 | FW_RI_FR_NSMR_WR = 0x19, | ||
61 | FW_RI_INV_LSTAG_WR = 0x1a, | ||
62 | FW_LASTC2E_WR = 0x40 | ||
63 | }; | ||
64 | |||
65 | struct fw_wr_hdr { | ||
66 | __be32 hi; | ||
67 | __be32 lo; | ||
68 | }; | ||
69 | |||
70 | #define FW_WR_OP(x) ((x) << 24) | ||
71 | #define FW_WR_ATOMIC(x) ((x) << 23) | ||
72 | #define FW_WR_FLUSH(x) ((x) << 22) | ||
73 | #define FW_WR_COMPL(x) ((x) << 21) | ||
74 | #define FW_WR_IMMDLEN(x) ((x) << 0) | ||
75 | |||
76 | #define FW_WR_EQUIQ (1U << 31) | ||
77 | #define FW_WR_EQUEQ (1U << 30) | ||
78 | #define FW_WR_FLOWID(x) ((x) << 8) | ||
79 | #define FW_WR_LEN16(x) ((x) << 0) | ||
80 | |||
81 | struct fw_ulptx_wr { | ||
82 | __be32 op_to_compl; | ||
83 | __be32 flowid_len16; | ||
84 | u64 cookie; | ||
85 | }; | ||
86 | |||
87 | struct fw_tp_wr { | ||
88 | __be32 op_to_immdlen; | ||
89 | __be32 flowid_len16; | ||
90 | u64 cookie; | ||
91 | }; | ||
92 | |||
93 | struct fw_eth_tx_pkt_wr { | ||
94 | __be32 op_immdlen; | ||
95 | __be32 equiq_to_len16; | ||
96 | __be64 r3; | ||
97 | }; | ||
98 | |||
99 | enum fw_flowc_mnem { | ||
100 | FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ | ||
101 | FW_FLOWC_MNEM_CH, | ||
102 | FW_FLOWC_MNEM_PORT, | ||
103 | FW_FLOWC_MNEM_IQID, | ||
104 | FW_FLOWC_MNEM_SNDNXT, | ||
105 | FW_FLOWC_MNEM_RCVNXT, | ||
106 | FW_FLOWC_MNEM_SNDBUF, | ||
107 | FW_FLOWC_MNEM_MSS, | ||
108 | }; | ||
109 | |||
110 | struct fw_flowc_mnemval { | ||
111 | u8 mnemonic; | ||
112 | u8 r4[3]; | ||
113 | __be32 val; | ||
114 | }; | ||
115 | |||
116 | struct fw_flowc_wr { | ||
117 | __be32 op_to_nparams; | ||
118 | #define FW_FLOWC_WR_NPARAMS(x) ((x) << 0) | ||
119 | __be32 flowid_len16; | ||
120 | struct fw_flowc_mnemval mnemval[0]; | ||
121 | }; | ||
122 | |||
123 | struct fw_ofld_tx_data_wr { | ||
124 | __be32 op_to_immdlen; | ||
125 | __be32 flowid_len16; | ||
126 | __be32 plen; | ||
127 | __be32 tunnel_to_proxy; | ||
128 | #define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19) | ||
129 | #define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18) | ||
130 | #define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17) | ||
131 | #define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16) | ||
132 | #define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15) | ||
133 | #define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14) | ||
134 | #define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10) | ||
135 | #define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6) | ||
136 | }; | ||
137 | |||
138 | struct fw_cmd_wr { | ||
139 | __be32 op_dma; | ||
140 | #define FW_CMD_WR_DMA (1U << 17) | ||
141 | __be32 len16_pkd; | ||
142 | __be64 cookie_daddr; | ||
143 | }; | ||
144 | |||
145 | struct fw_eth_tx_pkt_vm_wr { | ||
146 | __be32 op_immdlen; | ||
147 | __be32 equiq_to_len16; | ||
148 | __be32 r3[2]; | ||
149 | u8 ethmacdst[6]; | ||
150 | u8 ethmacsrc[6]; | ||
151 | __be16 ethtype; | ||
152 | __be16 vlantci; | ||
153 | }; | ||
154 | |||
155 | #define FW_CMD_MAX_TIMEOUT 3000 | ||
156 | |||
157 | enum fw_cmd_opcodes { | ||
158 | FW_LDST_CMD = 0x01, | ||
159 | FW_RESET_CMD = 0x03, | ||
160 | FW_HELLO_CMD = 0x04, | ||
161 | FW_BYE_CMD = 0x05, | ||
162 | FW_INITIALIZE_CMD = 0x06, | ||
163 | FW_CAPS_CONFIG_CMD = 0x07, | ||
164 | FW_PARAMS_CMD = 0x08, | ||
165 | FW_PFVF_CMD = 0x09, | ||
166 | FW_IQ_CMD = 0x10, | ||
167 | FW_EQ_MNGT_CMD = 0x11, | ||
168 | FW_EQ_ETH_CMD = 0x12, | ||
169 | FW_EQ_CTRL_CMD = 0x13, | ||
170 | FW_EQ_OFLD_CMD = 0x21, | ||
171 | FW_VI_CMD = 0x14, | ||
172 | FW_VI_MAC_CMD = 0x15, | ||
173 | FW_VI_RXMODE_CMD = 0x16, | ||
174 | FW_VI_ENABLE_CMD = 0x17, | ||
175 | FW_ACL_MAC_CMD = 0x18, | ||
176 | FW_ACL_VLAN_CMD = 0x19, | ||
177 | FW_VI_STATS_CMD = 0x1a, | ||
178 | FW_PORT_CMD = 0x1b, | ||
179 | FW_PORT_STATS_CMD = 0x1c, | ||
180 | FW_PORT_LB_STATS_CMD = 0x1d, | ||
181 | FW_PORT_TRACE_CMD = 0x1e, | ||
182 | FW_PORT_TRACE_MMAP_CMD = 0x1f, | ||
183 | FW_RSS_IND_TBL_CMD = 0x20, | ||
184 | FW_RSS_GLB_CONFIG_CMD = 0x22, | ||
185 | FW_RSS_VI_CONFIG_CMD = 0x23, | ||
186 | FW_LASTC2E_CMD = 0x40, | ||
187 | FW_ERROR_CMD = 0x80, | ||
188 | FW_DEBUG_CMD = 0x81, | ||
189 | }; | ||
190 | |||
191 | enum fw_cmd_cap { | ||
192 | FW_CMD_CAP_PF = 0x01, | ||
193 | FW_CMD_CAP_DMAQ = 0x02, | ||
194 | FW_CMD_CAP_PORT = 0x04, | ||
195 | FW_CMD_CAP_PORTPROMISC = 0x08, | ||
196 | FW_CMD_CAP_PORTSTATS = 0x10, | ||
197 | FW_CMD_CAP_VF = 0x80, | ||
198 | }; | ||
199 | |||
200 | /* | ||
201 | * Generic command header flit0 | ||
202 | */ | ||
203 | struct fw_cmd_hdr { | ||
204 | __be32 hi; | ||
205 | __be32 lo; | ||
206 | }; | ||
207 | |||
208 | #define FW_CMD_OP(x) ((x) << 24) | ||
209 | #define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff) | ||
210 | #define FW_CMD_REQUEST (1U << 23) | ||
211 | #define FW_CMD_READ (1U << 22) | ||
212 | #define FW_CMD_WRITE (1U << 21) | ||
213 | #define FW_CMD_EXEC (1U << 20) | ||
214 | #define FW_CMD_RAMASK(x) ((x) << 20) | ||
215 | #define FW_CMD_RETVAL(x) ((x) << 8) | ||
216 | #define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff) | ||
217 | #define FW_CMD_LEN16(x) ((x) << 0) | ||
218 | |||
219 | enum fw_ldst_addrspc { | ||
220 | FW_LDST_ADDRSPC_FIRMWARE = 0x0001, | ||
221 | FW_LDST_ADDRSPC_SGE_EGRC = 0x0008, | ||
222 | FW_LDST_ADDRSPC_SGE_INGC = 0x0009, | ||
223 | FW_LDST_ADDRSPC_SGE_FLMC = 0x000a, | ||
224 | FW_LDST_ADDRSPC_SGE_CONMC = 0x000b, | ||
225 | FW_LDST_ADDRSPC_TP_PIO = 0x0010, | ||
226 | FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011, | ||
227 | FW_LDST_ADDRSPC_TP_MIB = 0x0012, | ||
228 | FW_LDST_ADDRSPC_MDIO = 0x0018, | ||
229 | FW_LDST_ADDRSPC_MPS = 0x0020, | ||
230 | FW_LDST_ADDRSPC_FUNC = 0x0028 | ||
231 | }; | ||
232 | |||
233 | enum fw_ldst_mps_fid { | ||
234 | FW_LDST_MPS_ATRB, | ||
235 | FW_LDST_MPS_RPLC | ||
236 | }; | ||
237 | |||
238 | enum fw_ldst_func_access_ctl { | ||
239 | FW_LDST_FUNC_ACC_CTL_VIID, | ||
240 | FW_LDST_FUNC_ACC_CTL_FID | ||
241 | }; | ||
242 | |||
243 | enum fw_ldst_func_mod_index { | ||
244 | FW_LDST_FUNC_MPS | ||
245 | }; | ||
246 | |||
247 | struct fw_ldst_cmd { | ||
248 | __be32 op_to_addrspace; | ||
249 | #define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0) | ||
250 | __be32 cycles_to_len16; | ||
251 | union fw_ldst { | ||
252 | struct fw_ldst_addrval { | ||
253 | __be32 addr; | ||
254 | __be32 val; | ||
255 | } addrval; | ||
256 | struct fw_ldst_idctxt { | ||
257 | __be32 physid; | ||
258 | __be32 msg_pkd; | ||
259 | __be32 ctxt_data7; | ||
260 | __be32 ctxt_data6; | ||
261 | __be32 ctxt_data5; | ||
262 | __be32 ctxt_data4; | ||
263 | __be32 ctxt_data3; | ||
264 | __be32 ctxt_data2; | ||
265 | __be32 ctxt_data1; | ||
266 | __be32 ctxt_data0; | ||
267 | } idctxt; | ||
268 | struct fw_ldst_mdio { | ||
269 | __be16 paddr_mmd; | ||
270 | __be16 raddr; | ||
271 | __be16 vctl; | ||
272 | __be16 rval; | ||
273 | } mdio; | ||
274 | struct fw_ldst_mps { | ||
275 | __be16 fid_ctl; | ||
276 | __be16 rplcpf_pkd; | ||
277 | __be32 rplc127_96; | ||
278 | __be32 rplc95_64; | ||
279 | __be32 rplc63_32; | ||
280 | __be32 rplc31_0; | ||
281 | __be32 atrb; | ||
282 | __be16 vlan[16]; | ||
283 | } mps; | ||
284 | struct fw_ldst_func { | ||
285 | u8 access_ctl; | ||
286 | u8 mod_index; | ||
287 | __be16 ctl_id; | ||
288 | __be32 offset; | ||
289 | __be64 data0; | ||
290 | __be64 data1; | ||
291 | } func; | ||
292 | } u; | ||
293 | }; | ||
294 | |||
295 | #define FW_LDST_CMD_MSG(x) ((x) << 31) | ||
296 | #define FW_LDST_CMD_PADDR(x) ((x) << 8) | ||
297 | #define FW_LDST_CMD_MMD(x) ((x) << 0) | ||
298 | #define FW_LDST_CMD_FID(x) ((x) << 15) | ||
299 | #define FW_LDST_CMD_CTL(x) ((x) << 0) | ||
300 | #define FW_LDST_CMD_RPLCPF(x) ((x) << 0) | ||
301 | |||
302 | struct fw_reset_cmd { | ||
303 | __be32 op_to_write; | ||
304 | __be32 retval_len16; | ||
305 | __be32 val; | ||
306 | __be32 r3; | ||
307 | }; | ||
308 | |||
309 | struct fw_hello_cmd { | ||
310 | __be32 op_to_write; | ||
311 | __be32 retval_len16; | ||
312 | __be32 err_to_mbasyncnot; | ||
313 | #define FW_HELLO_CMD_ERR (1U << 31) | ||
314 | #define FW_HELLO_CMD_INIT (1U << 30) | ||
315 | #define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29) | ||
316 | #define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28) | ||
317 | #define FW_HELLO_CMD_MBMASTER(x) ((x) << 24) | ||
318 | #define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20) | ||
319 | __be32 fwrev; | ||
320 | }; | ||
321 | |||
322 | struct fw_bye_cmd { | ||
323 | __be32 op_to_write; | ||
324 | __be32 retval_len16; | ||
325 | __be64 r3; | ||
326 | }; | ||
327 | |||
328 | struct fw_initialize_cmd { | ||
329 | __be32 op_to_write; | ||
330 | __be32 retval_len16; | ||
331 | __be64 r3; | ||
332 | }; | ||
333 | |||
334 | enum fw_caps_config_hm { | ||
335 | FW_CAPS_CONFIG_HM_PCIE = 0x00000001, | ||
336 | FW_CAPS_CONFIG_HM_PL = 0x00000002, | ||
337 | FW_CAPS_CONFIG_HM_SGE = 0x00000004, | ||
338 | FW_CAPS_CONFIG_HM_CIM = 0x00000008, | ||
339 | FW_CAPS_CONFIG_HM_ULPTX = 0x00000010, | ||
340 | FW_CAPS_CONFIG_HM_TP = 0x00000020, | ||
341 | FW_CAPS_CONFIG_HM_ULPRX = 0x00000040, | ||
342 | FW_CAPS_CONFIG_HM_PMRX = 0x00000080, | ||
343 | FW_CAPS_CONFIG_HM_PMTX = 0x00000100, | ||
344 | FW_CAPS_CONFIG_HM_MC = 0x00000200, | ||
345 | FW_CAPS_CONFIG_HM_LE = 0x00000400, | ||
346 | FW_CAPS_CONFIG_HM_MPS = 0x00000800, | ||
347 | FW_CAPS_CONFIG_HM_XGMAC = 0x00001000, | ||
348 | FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000, | ||
349 | FW_CAPS_CONFIG_HM_T4DBG = 0x00004000, | ||
350 | FW_CAPS_CONFIG_HM_MI = 0x00008000, | ||
351 | FW_CAPS_CONFIG_HM_I2CM = 0x00010000, | ||
352 | FW_CAPS_CONFIG_HM_NCSI = 0x00020000, | ||
353 | FW_CAPS_CONFIG_HM_SMB = 0x00040000, | ||
354 | FW_CAPS_CONFIG_HM_MA = 0x00080000, | ||
355 | FW_CAPS_CONFIG_HM_EDRAM = 0x00100000, | ||
356 | FW_CAPS_CONFIG_HM_PMU = 0x00200000, | ||
357 | FW_CAPS_CONFIG_HM_UART = 0x00400000, | ||
358 | FW_CAPS_CONFIG_HM_SF = 0x00800000, | ||
359 | }; | ||
360 | |||
361 | enum fw_caps_config_nbm { | ||
362 | FW_CAPS_CONFIG_NBM_IPMI = 0x00000001, | ||
363 | FW_CAPS_CONFIG_NBM_NCSI = 0x00000002, | ||
364 | }; | ||
365 | |||
366 | enum fw_caps_config_link { | ||
367 | FW_CAPS_CONFIG_LINK_PPP = 0x00000001, | ||
368 | FW_CAPS_CONFIG_LINK_QFC = 0x00000002, | ||
369 | FW_CAPS_CONFIG_LINK_DCBX = 0x00000004, | ||
370 | }; | ||
371 | |||
372 | enum fw_caps_config_switch { | ||
373 | FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001, | ||
374 | FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002, | ||
375 | }; | ||
376 | |||
377 | enum fw_caps_config_nic { | ||
378 | FW_CAPS_CONFIG_NIC = 0x00000001, | ||
379 | FW_CAPS_CONFIG_NIC_VM = 0x00000002, | ||
380 | }; | ||
381 | |||
382 | enum fw_caps_config_ofld { | ||
383 | FW_CAPS_CONFIG_OFLD = 0x00000001, | ||
384 | }; | ||
385 | |||
386 | enum fw_caps_config_rdma { | ||
387 | FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001, | ||
388 | FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002, | ||
389 | }; | ||
390 | |||
391 | enum fw_caps_config_iscsi { | ||
392 | FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001, | ||
393 | FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002, | ||
394 | FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004, | ||
395 | FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008, | ||
396 | }; | ||
397 | |||
398 | enum fw_caps_config_fcoe { | ||
399 | FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001, | ||
400 | FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, | ||
401 | }; | ||
402 | |||
403 | struct fw_caps_config_cmd { | ||
404 | __be32 op_to_write; | ||
405 | __be32 retval_len16; | ||
406 | __be32 r2; | ||
407 | __be32 hwmbitmap; | ||
408 | __be16 nbmcaps; | ||
409 | __be16 linkcaps; | ||
410 | __be16 switchcaps; | ||
411 | __be16 r3; | ||
412 | __be16 niccaps; | ||
413 | __be16 ofldcaps; | ||
414 | __be16 rdmacaps; | ||
415 | __be16 r4; | ||
416 | __be16 iscsicaps; | ||
417 | __be16 fcoecaps; | ||
418 | __be32 r5; | ||
419 | __be64 r6; | ||
420 | }; | ||
421 | |||
422 | /* | ||
423 | * params command mnemonics | ||
424 | */ | ||
425 | enum fw_params_mnem { | ||
426 | FW_PARAMS_MNEM_DEV = 1, /* device params */ | ||
427 | FW_PARAMS_MNEM_PFVF = 2, /* function params */ | ||
428 | FW_PARAMS_MNEM_REG = 3, /* limited register access */ | ||
429 | FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ | ||
430 | FW_PARAMS_MNEM_LAST | ||
431 | }; | ||
432 | |||
433 | /* | ||
434 | * device parameters | ||
435 | */ | ||
436 | enum fw_params_param_dev { | ||
437 | FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ | ||
438 | FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ | ||
439 | FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs | ||
440 | * allocated by the device's | ||
441 | * Lookup Engine | ||
442 | */ | ||
443 | FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03, | ||
444 | FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04, | ||
445 | FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05, | ||
446 | FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06, | ||
447 | FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07, | ||
448 | FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08, | ||
449 | FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09, | ||
450 | FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A | ||
451 | }; | ||
452 | |||
453 | /* | ||
454 | * physical and virtual function parameters | ||
455 | */ | ||
456 | enum fw_params_param_pfvf { | ||
457 | FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00, | ||
458 | FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01, | ||
459 | FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02, | ||
460 | FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03, | ||
461 | FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04, | ||
462 | FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05, | ||
463 | FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06, | ||
464 | FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07, | ||
465 | FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08, | ||
466 | FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09, | ||
467 | FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A, | ||
468 | FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B, | ||
469 | FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C, | ||
470 | FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D, | ||
471 | FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E, | ||
472 | FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F, | ||
473 | FW_PARAMS_PARAM_PFVF_RQ_END = 0x10, | ||
474 | FW_PARAMS_PARAM_PFVF_PBL_START = 0x11, | ||
475 | FW_PARAMS_PARAM_PFVF_PBL_END = 0x12, | ||
476 | FW_PARAMS_PARAM_PFVF_L2T_START = 0x13, | ||
477 | FW_PARAMS_PARAM_PFVF_L2T_END = 0x14, | ||
478 | FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20, | ||
479 | }; | ||
480 | |||
481 | /* | ||
482 | * dma queue parameters | ||
483 | */ | ||
484 | enum fw_params_param_dmaq { | ||
485 | FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00, | ||
486 | FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01, | ||
487 | FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10, | ||
488 | FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11, | ||
489 | FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12, | ||
490 | }; | ||
491 | |||
492 | #define FW_PARAMS_MNEM(x) ((x) << 24) | ||
493 | #define FW_PARAMS_PARAM_X(x) ((x) << 16) | ||
494 | #define FW_PARAMS_PARAM_Y(x) ((x) << 8) | ||
495 | #define FW_PARAMS_PARAM_Z(x) ((x) << 0) | ||
496 | #define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) | ||
497 | #define FW_PARAMS_PARAM_YZ(x) ((x) << 0) | ||
498 | |||
499 | struct fw_params_cmd { | ||
500 | __be32 op_to_vfn; | ||
501 | __be32 retval_len16; | ||
502 | struct fw_params_param { | ||
503 | __be32 mnem; | ||
504 | __be32 val; | ||
505 | } param[7]; | ||
506 | }; | ||
507 | |||
508 | #define FW_PARAMS_CMD_PFN(x) ((x) << 8) | ||
509 | #define FW_PARAMS_CMD_VFN(x) ((x) << 0) | ||
510 | |||
511 | struct fw_pfvf_cmd { | ||
512 | __be32 op_to_vfn; | ||
513 | __be32 retval_len16; | ||
514 | __be32 niqflint_niq; | ||
515 | __be32 cmask_to_neq; | ||
516 | __be32 tc_to_nexactf; | ||
517 | __be32 r_caps_to_nethctrl; | ||
518 | __be16 nricq; | ||
519 | __be16 nriqp; | ||
520 | __be32 r4; | ||
521 | }; | ||
522 | |||
523 | #define FW_PFVF_CMD_PFN(x) ((x) << 8) | ||
524 | #define FW_PFVF_CMD_VFN(x) ((x) << 0) | ||
525 | |||
526 | #define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20) | ||
527 | #define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff) | ||
528 | |||
529 | #define FW_PFVF_CMD_NIQ(x) ((x) << 0) | ||
530 | #define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff) | ||
531 | |||
532 | #define FW_PFVF_CMD_CMASK(x) ((x) << 24) | ||
533 | #define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & 0xf) | ||
534 | |||
535 | #define FW_PFVF_CMD_PMASK(x) ((x) << 20) | ||
536 | #define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & 0xf) | ||
537 | |||
538 | #define FW_PFVF_CMD_NEQ(x) ((x) << 0) | ||
539 | #define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff) | ||
540 | |||
541 | #define FW_PFVF_CMD_TC(x) ((x) << 24) | ||
542 | #define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff) | ||
543 | |||
544 | #define FW_PFVF_CMD_NVI(x) ((x) << 16) | ||
545 | #define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff) | ||
546 | |||
547 | #define FW_PFVF_CMD_NEXACTF(x) ((x) << 0) | ||
548 | #define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff) | ||
549 | |||
550 | #define FW_PFVF_CMD_R_CAPS(x) ((x) << 24) | ||
551 | #define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff) | ||
552 | |||
553 | #define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16) | ||
554 | #define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff) | ||
555 | |||
556 | #define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0) | ||
557 | #define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff) | ||
558 | |||
559 | enum fw_iq_type { | ||
560 | FW_IQ_TYPE_FL_INT_CAP, | ||
561 | FW_IQ_TYPE_NO_FL_INT_CAP | ||
562 | }; | ||
563 | |||
564 | struct fw_iq_cmd { | ||
565 | __be32 op_to_vfn; | ||
566 | __be32 alloc_to_len16; | ||
567 | __be16 physiqid; | ||
568 | __be16 iqid; | ||
569 | __be16 fl0id; | ||
570 | __be16 fl1id; | ||
571 | __be32 type_to_iqandstindex; | ||
572 | __be16 iqdroprss_to_iqesize; | ||
573 | __be16 iqsize; | ||
574 | __be64 iqaddr; | ||
575 | __be32 iqns_to_fl0congen; | ||
576 | __be16 fl0dcaen_to_fl0cidxfthresh; | ||
577 | __be16 fl0size; | ||
578 | __be64 fl0addr; | ||
579 | __be32 fl1cngchmap_to_fl1congen; | ||
580 | __be16 fl1dcaen_to_fl1cidxfthresh; | ||
581 | __be16 fl1size; | ||
582 | __be64 fl1addr; | ||
583 | }; | ||
584 | |||
585 | #define FW_IQ_CMD_PFN(x) ((x) << 8) | ||
586 | #define FW_IQ_CMD_VFN(x) ((x) << 0) | ||
587 | |||
588 | #define FW_IQ_CMD_ALLOC (1U << 31) | ||
589 | #define FW_IQ_CMD_FREE (1U << 30) | ||
590 | #define FW_IQ_CMD_MODIFY (1U << 29) | ||
591 | #define FW_IQ_CMD_IQSTART(x) ((x) << 28) | ||
592 | #define FW_IQ_CMD_IQSTOP(x) ((x) << 27) | ||
593 | |||
594 | #define FW_IQ_CMD_TYPE(x) ((x) << 29) | ||
595 | #define FW_IQ_CMD_IQASYNCH(x) ((x) << 28) | ||
596 | #define FW_IQ_CMD_VIID(x) ((x) << 16) | ||
597 | #define FW_IQ_CMD_IQANDST(x) ((x) << 15) | ||
598 | #define FW_IQ_CMD_IQANUS(x) ((x) << 14) | ||
599 | #define FW_IQ_CMD_IQANUD(x) ((x) << 12) | ||
600 | #define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0) | ||
601 | |||
602 | #define FW_IQ_CMD_IQDROPRSS (1U << 15) | ||
603 | #define FW_IQ_CMD_IQGTSMODE (1U << 14) | ||
604 | #define FW_IQ_CMD_IQPCIECH(x) ((x) << 12) | ||
605 | #define FW_IQ_CMD_IQDCAEN(x) ((x) << 11) | ||
606 | #define FW_IQ_CMD_IQDCACPU(x) ((x) << 6) | ||
607 | #define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4) | ||
608 | #define FW_IQ_CMD_IQO (1U << 3) | ||
609 | #define FW_IQ_CMD_IQCPRIO(x) ((x) << 2) | ||
610 | #define FW_IQ_CMD_IQESIZE(x) ((x) << 0) | ||
611 | |||
612 | #define FW_IQ_CMD_IQNS(x) ((x) << 31) | ||
613 | #define FW_IQ_CMD_IQRO(x) ((x) << 30) | ||
614 | #define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28) | ||
615 | #define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27) | ||
616 | #define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26) | ||
617 | #define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20) | ||
618 | #define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15) | ||
619 | #define FW_IQ_CMD_FL0DBP(x) ((x) << 14) | ||
620 | #define FW_IQ_CMD_FL0DATANS(x) ((x) << 13) | ||
621 | #define FW_IQ_CMD_FL0DATARO(x) ((x) << 12) | ||
622 | #define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11) | ||
623 | #define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10) | ||
624 | #define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9) | ||
625 | #define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8) | ||
626 | #define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7) | ||
627 | #define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6) | ||
628 | #define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4) | ||
629 | #define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3) | ||
630 | #define FW_IQ_CMD_FL0PADEN (1U << 2) | ||
631 | #define FW_IQ_CMD_FL0PACKEN (1U << 1) | ||
632 | #define FW_IQ_CMD_FL0CONGEN (1U << 0) | ||
633 | |||
634 | #define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15) | ||
635 | #define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10) | ||
636 | #define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7) | ||
637 | #define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4) | ||
638 | #define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3) | ||
639 | #define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0) | ||
640 | |||
641 | #define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20) | ||
642 | #define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15) | ||
643 | #define FW_IQ_CMD_FL1DBP(x) ((x) << 14) | ||
644 | #define FW_IQ_CMD_FL1DATANS(x) ((x) << 13) | ||
645 | #define FW_IQ_CMD_FL1DATARO(x) ((x) << 12) | ||
646 | #define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11) | ||
647 | #define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10) | ||
648 | #define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9) | ||
649 | #define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8) | ||
650 | #define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7) | ||
651 | #define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6) | ||
652 | #define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4) | ||
653 | #define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3) | ||
654 | #define FW_IQ_CMD_FL1PADEN (1U << 2) | ||
655 | #define FW_IQ_CMD_FL1PACKEN (1U << 1) | ||
656 | #define FW_IQ_CMD_FL1CONGEN (1U << 0) | ||
657 | |||
658 | #define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15) | ||
659 | #define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10) | ||
660 | #define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7) | ||
661 | #define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4) | ||
662 | #define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3) | ||
663 | #define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0) | ||
664 | |||
665 | struct fw_eq_eth_cmd { | ||
666 | __be32 op_to_vfn; | ||
667 | __be32 alloc_to_len16; | ||
668 | __be32 eqid_pkd; | ||
669 | __be32 physeqid_pkd; | ||
670 | __be32 fetchszm_to_iqid; | ||
671 | __be32 dcaen_to_eqsize; | ||
672 | __be64 eqaddr; | ||
673 | __be32 viid_pkd; | ||
674 | __be32 r8_lo; | ||
675 | __be64 r9; | ||
676 | }; | ||
677 | |||
678 | #define FW_EQ_ETH_CMD_PFN(x) ((x) << 8) | ||
679 | #define FW_EQ_ETH_CMD_VFN(x) ((x) << 0) | ||
680 | #define FW_EQ_ETH_CMD_ALLOC (1U << 31) | ||
681 | #define FW_EQ_ETH_CMD_FREE (1U << 30) | ||
682 | #define FW_EQ_ETH_CMD_MODIFY (1U << 29) | ||
683 | #define FW_EQ_ETH_CMD_EQSTART (1U << 28) | ||
684 | #define FW_EQ_ETH_CMD_EQSTOP (1U << 27) | ||
685 | |||
686 | #define FW_EQ_ETH_CMD_EQID(x) ((x) << 0) | ||
687 | #define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) | ||
688 | #define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0) | ||
689 | |||
690 | #define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26) | ||
691 | #define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25) | ||
692 | #define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24) | ||
693 | #define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23) | ||
694 | #define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22) | ||
695 | #define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20) | ||
696 | #define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19) | ||
697 | #define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18) | ||
698 | #define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16) | ||
699 | #define FW_EQ_ETH_CMD_IQID(x) ((x) << 0) | ||
700 | |||
701 | #define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31) | ||
702 | #define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26) | ||
703 | #define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23) | ||
704 | #define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20) | ||
705 | #define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19) | ||
706 | #define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) | ||
707 | #define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) | ||
708 | |||
709 | #define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) | ||
710 | |||
711 | struct fw_eq_ctrl_cmd { | ||
712 | __be32 op_to_vfn; | ||
713 | __be32 alloc_to_len16; | ||
714 | __be32 cmpliqid_eqid; | ||
715 | __be32 physeqid_pkd; | ||
716 | __be32 fetchszm_to_iqid; | ||
717 | __be32 dcaen_to_eqsize; | ||
718 | __be64 eqaddr; | ||
719 | }; | ||
720 | |||
721 | #define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8) | ||
722 | #define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0) | ||
723 | |||
724 | #define FW_EQ_CTRL_CMD_ALLOC (1U << 31) | ||
725 | #define FW_EQ_CTRL_CMD_FREE (1U << 30) | ||
726 | #define FW_EQ_CTRL_CMD_MODIFY (1U << 29) | ||
727 | #define FW_EQ_CTRL_CMD_EQSTART (1U << 28) | ||
728 | #define FW_EQ_CTRL_CMD_EQSTOP (1U << 27) | ||
729 | |||
730 | #define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20) | ||
731 | #define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0) | ||
732 | #define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) | ||
733 | #define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) | ||
734 | |||
735 | #define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26) | ||
736 | #define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25) | ||
737 | #define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24) | ||
738 | #define FW_EQ_CTRL_CMD_FETCHNS (1U << 23) | ||
739 | #define FW_EQ_CTRL_CMD_FETCHRO (1U << 22) | ||
740 | #define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20) | ||
741 | #define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19) | ||
742 | #define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18) | ||
743 | #define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16) | ||
744 | #define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0) | ||
745 | |||
746 | #define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31) | ||
747 | #define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26) | ||
748 | #define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23) | ||
749 | #define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20) | ||
750 | #define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19) | ||
751 | #define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16) | ||
752 | #define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0) | ||
753 | |||
754 | struct fw_eq_ofld_cmd { | ||
755 | __be32 op_to_vfn; | ||
756 | __be32 alloc_to_len16; | ||
757 | __be32 eqid_pkd; | ||
758 | __be32 physeqid_pkd; | ||
759 | __be32 fetchszm_to_iqid; | ||
760 | __be32 dcaen_to_eqsize; | ||
761 | __be64 eqaddr; | ||
762 | }; | ||
763 | |||
764 | #define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8) | ||
765 | #define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0) | ||
766 | |||
767 | #define FW_EQ_OFLD_CMD_ALLOC (1U << 31) | ||
768 | #define FW_EQ_OFLD_CMD_FREE (1U << 30) | ||
769 | #define FW_EQ_OFLD_CMD_MODIFY (1U << 29) | ||
770 | #define FW_EQ_OFLD_CMD_EQSTART (1U << 28) | ||
771 | #define FW_EQ_OFLD_CMD_EQSTOP (1U << 27) | ||
772 | |||
773 | #define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0) | ||
774 | #define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) | ||
775 | #define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) | ||
776 | |||
777 | #define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26) | ||
778 | #define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25) | ||
779 | #define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24) | ||
780 | #define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23) | ||
781 | #define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22) | ||
782 | #define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20) | ||
783 | #define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19) | ||
784 | #define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18) | ||
785 | #define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16) | ||
786 | #define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0) | ||
787 | |||
788 | #define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31) | ||
789 | #define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26) | ||
790 | #define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23) | ||
791 | #define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20) | ||
792 | #define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19) | ||
793 | #define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16) | ||
794 | #define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0) | ||
795 | |||
796 | /* | ||
797 | * Macros for VIID parsing: | ||
798 | * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number | ||
799 | */ | ||
800 | #define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7) | ||
801 | #define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1) | ||
802 | #define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F) | ||
803 | |||
804 | struct fw_vi_cmd { | ||
805 | __be32 op_to_vfn; | ||
806 | __be32 alloc_to_len16; | ||
807 | __be16 viid_pkd; | ||
808 | u8 mac[6]; | ||
809 | u8 portid_pkd; | ||
810 | u8 nmac; | ||
811 | u8 nmac0[6]; | ||
812 | __be16 rsssize_pkd; | ||
813 | u8 nmac1[6]; | ||
814 | __be16 r7; | ||
815 | u8 nmac2[6]; | ||
816 | __be16 r8; | ||
817 | u8 nmac3[6]; | ||
818 | __be64 r9; | ||
819 | __be64 r10; | ||
820 | }; | ||
821 | |||
822 | #define FW_VI_CMD_PFN(x) ((x) << 8) | ||
823 | #define FW_VI_CMD_VFN(x) ((x) << 0) | ||
824 | #define FW_VI_CMD_ALLOC (1U << 31) | ||
825 | #define FW_VI_CMD_FREE (1U << 30) | ||
826 | #define FW_VI_CMD_VIID(x) ((x) << 0) | ||
827 | #define FW_VI_CMD_PORTID(x) ((x) << 4) | ||
828 | #define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff) | ||
829 | |||
830 | /* Special VI_MAC command index ids */ | ||
831 | #define FW_VI_MAC_ADD_MAC 0x3FF | ||
832 | #define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE | ||
833 | #define FW_VI_MAC_MAC_BASED_FREE 0x3FD | ||
834 | |||
835 | enum fw_vi_mac_smac { | ||
836 | FW_VI_MAC_MPS_TCAM_ENTRY, | ||
837 | FW_VI_MAC_MPS_TCAM_ONLY, | ||
838 | FW_VI_MAC_SMT_ONLY, | ||
839 | FW_VI_MAC_SMT_AND_MPSTCAM | ||
840 | }; | ||
841 | |||
842 | enum fw_vi_mac_result { | ||
843 | FW_VI_MAC_R_SUCCESS, | ||
844 | FW_VI_MAC_R_F_NONEXISTENT_NOMEM, | ||
845 | FW_VI_MAC_R_SMAC_FAIL, | ||
846 | FW_VI_MAC_R_F_ACL_CHECK | ||
847 | }; | ||
848 | |||
849 | struct fw_vi_mac_cmd { | ||
850 | __be32 op_to_viid; | ||
851 | __be32 freemacs_to_len16; | ||
852 | union fw_vi_mac { | ||
853 | struct fw_vi_mac_exact { | ||
854 | __be16 valid_to_idx; | ||
855 | u8 macaddr[6]; | ||
856 | } exact[7]; | ||
857 | struct fw_vi_mac_hash { | ||
858 | __be64 hashvec; | ||
859 | } hash; | ||
860 | } u; | ||
861 | }; | ||
862 | |||
863 | #define FW_VI_MAC_CMD_VIID(x) ((x) << 0) | ||
864 | #define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31) | ||
865 | #define FW_VI_MAC_CMD_HASHVECEN (1U << 23) | ||
866 | #define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22) | ||
867 | #define FW_VI_MAC_CMD_VALID (1U << 15) | ||
868 | #define FW_VI_MAC_CMD_PRIO(x) ((x) << 12) | ||
869 | #define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10) | ||
870 | #define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3) | ||
871 | #define FW_VI_MAC_CMD_IDX(x) ((x) << 0) | ||
872 | #define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff) | ||
873 | |||
874 | #define FW_RXMODE_MTU_NO_CHG 65535 | ||
875 | |||
876 | struct fw_vi_rxmode_cmd { | ||
877 | __be32 op_to_viid; | ||
878 | __be32 retval_len16; | ||
879 | __be32 mtu_to_broadcasten; | ||
880 | __be32 r4_lo; | ||
881 | }; | ||
882 | |||
883 | #define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0) | ||
884 | #define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16) | ||
885 | #define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3 | ||
886 | #define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14) | ||
887 | #define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3 | ||
888 | #define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12) | ||
889 | #define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3 | ||
890 | #define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10) | ||
891 | |||
892 | struct fw_vi_enable_cmd { | ||
893 | __be32 op_to_viid; | ||
894 | __be32 ien_to_len16; | ||
895 | __be16 blinkdur; | ||
896 | __be16 r3; | ||
897 | __be32 r4; | ||
898 | }; | ||
899 | |||
900 | #define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0) | ||
901 | #define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31) | ||
902 | #define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30) | ||
903 | #define FW_VI_ENABLE_CMD_LED (1U << 29) | ||
904 | |||
905 | /* VI VF stats offset definitions */ | ||
906 | #define VI_VF_NUM_STATS 16 | ||
907 | enum fw_vi_stats_vf_index { | ||
908 | FW_VI_VF_STAT_TX_BCAST_BYTES_IX, | ||
909 | FW_VI_VF_STAT_TX_BCAST_FRAMES_IX, | ||
910 | FW_VI_VF_STAT_TX_MCAST_BYTES_IX, | ||
911 | FW_VI_VF_STAT_TX_MCAST_FRAMES_IX, | ||
912 | FW_VI_VF_STAT_TX_UCAST_BYTES_IX, | ||
913 | FW_VI_VF_STAT_TX_UCAST_FRAMES_IX, | ||
914 | FW_VI_VF_STAT_TX_DROP_FRAMES_IX, | ||
915 | FW_VI_VF_STAT_TX_OFLD_BYTES_IX, | ||
916 | FW_VI_VF_STAT_TX_OFLD_FRAMES_IX, | ||
917 | FW_VI_VF_STAT_RX_BCAST_BYTES_IX, | ||
918 | FW_VI_VF_STAT_RX_BCAST_FRAMES_IX, | ||
919 | FW_VI_VF_STAT_RX_MCAST_BYTES_IX, | ||
920 | FW_VI_VF_STAT_RX_MCAST_FRAMES_IX, | ||
921 | FW_VI_VF_STAT_RX_UCAST_BYTES_IX, | ||
922 | FW_VI_VF_STAT_RX_UCAST_FRAMES_IX, | ||
923 | FW_VI_VF_STAT_RX_ERR_FRAMES_IX | ||
924 | }; | ||
925 | |||
926 | /* VI PF stats offset definitions */ | ||
927 | #define VI_PF_NUM_STATS 17 | ||
928 | enum fw_vi_stats_pf_index { | ||
929 | FW_VI_PF_STAT_TX_BCAST_BYTES_IX, | ||
930 | FW_VI_PF_STAT_TX_BCAST_FRAMES_IX, | ||
931 | FW_VI_PF_STAT_TX_MCAST_BYTES_IX, | ||
932 | FW_VI_PF_STAT_TX_MCAST_FRAMES_IX, | ||
933 | FW_VI_PF_STAT_TX_UCAST_BYTES_IX, | ||
934 | FW_VI_PF_STAT_TX_UCAST_FRAMES_IX, | ||
935 | FW_VI_PF_STAT_TX_OFLD_BYTES_IX, | ||
936 | FW_VI_PF_STAT_TX_OFLD_FRAMES_IX, | ||
937 | FW_VI_PF_STAT_RX_BYTES_IX, | ||
938 | FW_VI_PF_STAT_RX_FRAMES_IX, | ||
939 | FW_VI_PF_STAT_RX_BCAST_BYTES_IX, | ||
940 | FW_VI_PF_STAT_RX_BCAST_FRAMES_IX, | ||
941 | FW_VI_PF_STAT_RX_MCAST_BYTES_IX, | ||
942 | FW_VI_PF_STAT_RX_MCAST_FRAMES_IX, | ||
943 | FW_VI_PF_STAT_RX_UCAST_BYTES_IX, | ||
944 | FW_VI_PF_STAT_RX_UCAST_FRAMES_IX, | ||
945 | FW_VI_PF_STAT_RX_ERR_FRAMES_IX | ||
946 | }; | ||
947 | |||
948 | struct fw_vi_stats_cmd { | ||
949 | __be32 op_to_viid; | ||
950 | __be32 retval_len16; | ||
951 | union fw_vi_stats { | ||
952 | struct fw_vi_stats_ctl { | ||
953 | __be16 nstats_ix; | ||
954 | __be16 r6; | ||
955 | __be32 r7; | ||
956 | __be64 stat0; | ||
957 | __be64 stat1; | ||
958 | __be64 stat2; | ||
959 | __be64 stat3; | ||
960 | __be64 stat4; | ||
961 | __be64 stat5; | ||
962 | } ctl; | ||
963 | struct fw_vi_stats_pf { | ||
964 | __be64 tx_bcast_bytes; | ||
965 | __be64 tx_bcast_frames; | ||
966 | __be64 tx_mcast_bytes; | ||
967 | __be64 tx_mcast_frames; | ||
968 | __be64 tx_ucast_bytes; | ||
969 | __be64 tx_ucast_frames; | ||
970 | __be64 tx_offload_bytes; | ||
971 | __be64 tx_offload_frames; | ||
972 | __be64 rx_pf_bytes; | ||
973 | __be64 rx_pf_frames; | ||
974 | __be64 rx_bcast_bytes; | ||
975 | __be64 rx_bcast_frames; | ||
976 | __be64 rx_mcast_bytes; | ||
977 | __be64 rx_mcast_frames; | ||
978 | __be64 rx_ucast_bytes; | ||
979 | __be64 rx_ucast_frames; | ||
980 | __be64 rx_err_frames; | ||
981 | } pf; | ||
982 | struct fw_vi_stats_vf { | ||
983 | __be64 tx_bcast_bytes; | ||
984 | __be64 tx_bcast_frames; | ||
985 | __be64 tx_mcast_bytes; | ||
986 | __be64 tx_mcast_frames; | ||
987 | __be64 tx_ucast_bytes; | ||
988 | __be64 tx_ucast_frames; | ||
989 | __be64 tx_drop_frames; | ||
990 | __be64 tx_offload_bytes; | ||
991 | __be64 tx_offload_frames; | ||
992 | __be64 rx_bcast_bytes; | ||
993 | __be64 rx_bcast_frames; | ||
994 | __be64 rx_mcast_bytes; | ||
995 | __be64 rx_mcast_frames; | ||
996 | __be64 rx_ucast_bytes; | ||
997 | __be64 rx_ucast_frames; | ||
998 | __be64 rx_err_frames; | ||
999 | } vf; | ||
1000 | } u; | ||
1001 | }; | ||
1002 | |||
1003 | #define FW_VI_STATS_CMD_VIID(x) ((x) << 0) | ||
1004 | #define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12) | ||
1005 | #define FW_VI_STATS_CMD_IX(x) ((x) << 0) | ||
1006 | |||
1007 | struct fw_acl_mac_cmd { | ||
1008 | __be32 op_to_vfn; | ||
1009 | __be32 en_to_len16; | ||
1010 | u8 nmac; | ||
1011 | u8 r3[7]; | ||
1012 | __be16 r4; | ||
1013 | u8 macaddr0[6]; | ||
1014 | __be16 r5; | ||
1015 | u8 macaddr1[6]; | ||
1016 | __be16 r6; | ||
1017 | u8 macaddr2[6]; | ||
1018 | __be16 r7; | ||
1019 | u8 macaddr3[6]; | ||
1020 | }; | ||
1021 | |||
1022 | #define FW_ACL_MAC_CMD_PFN(x) ((x) << 8) | ||
1023 | #define FW_ACL_MAC_CMD_VFN(x) ((x) << 0) | ||
1024 | #define FW_ACL_MAC_CMD_EN(x) ((x) << 31) | ||
1025 | |||
1026 | struct fw_acl_vlan_cmd { | ||
1027 | __be32 op_to_vfn; | ||
1028 | __be32 en_to_len16; | ||
1029 | u8 nvlan; | ||
1030 | u8 dropnovlan_fm; | ||
1031 | u8 r3_lo[6]; | ||
1032 | __be16 vlanid[16]; | ||
1033 | }; | ||
1034 | |||
1035 | #define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8) | ||
1036 | #define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0) | ||
1037 | #define FW_ACL_VLAN_CMD_EN(x) ((x) << 31) | ||
1038 | #define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7) | ||
1039 | #define FW_ACL_VLAN_CMD_FM(x) ((x) << 6) | ||
1040 | |||
1041 | enum fw_port_cap { | ||
1042 | FW_PORT_CAP_SPEED_100M = 0x0001, | ||
1043 | FW_PORT_CAP_SPEED_1G = 0x0002, | ||
1044 | FW_PORT_CAP_SPEED_2_5G = 0x0004, | ||
1045 | FW_PORT_CAP_SPEED_10G = 0x0008, | ||
1046 | FW_PORT_CAP_SPEED_40G = 0x0010, | ||
1047 | FW_PORT_CAP_SPEED_100G = 0x0020, | ||
1048 | FW_PORT_CAP_FC_RX = 0x0040, | ||
1049 | FW_PORT_CAP_FC_TX = 0x0080, | ||
1050 | FW_PORT_CAP_ANEG = 0x0100, | ||
1051 | FW_PORT_CAP_MDI_0 = 0x0200, | ||
1052 | FW_PORT_CAP_MDI_1 = 0x0400, | ||
1053 | FW_PORT_CAP_BEAN = 0x0800, | ||
1054 | FW_PORT_CAP_PMA_LPBK = 0x1000, | ||
1055 | FW_PORT_CAP_PCS_LPBK = 0x2000, | ||
1056 | FW_PORT_CAP_PHYXS_LPBK = 0x4000, | ||
1057 | FW_PORT_CAP_FAR_END_LPBK = 0x8000, | ||
1058 | }; | ||
1059 | |||
1060 | enum fw_port_mdi { | ||
1061 | FW_PORT_MDI_UNCHANGED, | ||
1062 | FW_PORT_MDI_AUTO, | ||
1063 | FW_PORT_MDI_F_STRAIGHT, | ||
1064 | FW_PORT_MDI_F_CROSSOVER | ||
1065 | }; | ||
1066 | |||
1067 | #define FW_PORT_MDI(x) ((x) << 9) | ||
1068 | |||
1069 | enum fw_port_action { | ||
1070 | FW_PORT_ACTION_L1_CFG = 0x0001, | ||
1071 | FW_PORT_ACTION_L2_CFG = 0x0002, | ||
1072 | FW_PORT_ACTION_GET_PORT_INFO = 0x0003, | ||
1073 | FW_PORT_ACTION_L2_PPP_CFG = 0x0004, | ||
1074 | FW_PORT_ACTION_L2_DCB_CFG = 0x0005, | ||
1075 | FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010, | ||
1076 | FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011, | ||
1077 | FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012, | ||
1078 | FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020, | ||
1079 | FW_PORT_ACTION_L1_LPBK = 0x0021, | ||
1080 | FW_PORT_ACTION_L1_PMA_LPBK = 0x0022, | ||
1081 | FW_PORT_ACTION_L1_PCS_LPBK = 0x0023, | ||
1082 | FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024, | ||
1083 | FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025, | ||
1084 | FW_PORT_ACTION_PHY_RESET = 0x0040, | ||
1085 | FW_PORT_ACTION_PMA_RESET = 0x0041, | ||
1086 | FW_PORT_ACTION_PCS_RESET = 0x0042, | ||
1087 | FW_PORT_ACTION_PHYXS_RESET = 0x0043, | ||
1088 | FW_PORT_ACTION_DTEXS_REEST = 0x0044, | ||
1089 | FW_PORT_ACTION_AN_RESET = 0x0045 | ||
1090 | }; | ||
1091 | |||
1092 | enum fw_port_l2cfg_ctlbf { | ||
1093 | FW_PORT_L2_CTLBF_OVLAN0 = 0x01, | ||
1094 | FW_PORT_L2_CTLBF_OVLAN1 = 0x02, | ||
1095 | FW_PORT_L2_CTLBF_OVLAN2 = 0x04, | ||
1096 | FW_PORT_L2_CTLBF_OVLAN3 = 0x08, | ||
1097 | FW_PORT_L2_CTLBF_IVLAN = 0x10, | ||
1098 | FW_PORT_L2_CTLBF_TXIPG = 0x20 | ||
1099 | }; | ||
1100 | |||
1101 | enum fw_port_dcb_cfg { | ||
1102 | FW_PORT_DCB_CFG_PG = 0x01, | ||
1103 | FW_PORT_DCB_CFG_PFC = 0x02, | ||
1104 | FW_PORT_DCB_CFG_APPL = 0x04 | ||
1105 | }; | ||
1106 | |||
1107 | enum fw_port_dcb_cfg_rc { | ||
1108 | FW_PORT_DCB_CFG_SUCCESS = 0x0, | ||
1109 | FW_PORT_DCB_CFG_ERROR = 0x1 | ||
1110 | }; | ||
1111 | |||
1112 | struct fw_port_cmd { | ||
1113 | __be32 op_to_portid; | ||
1114 | __be32 action_to_len16; | ||
1115 | union fw_port { | ||
1116 | struct fw_port_l1cfg { | ||
1117 | __be32 rcap; | ||
1118 | __be32 r; | ||
1119 | } l1cfg; | ||
1120 | struct fw_port_l2cfg { | ||
1121 | __be16 ctlbf_to_ivlan0; | ||
1122 | __be16 ivlantype; | ||
1123 | __be32 txipg_pkd; | ||
1124 | __be16 ovlan0mask; | ||
1125 | __be16 ovlan0type; | ||
1126 | __be16 ovlan1mask; | ||
1127 | __be16 ovlan1type; | ||
1128 | __be16 ovlan2mask; | ||
1129 | __be16 ovlan2type; | ||
1130 | __be16 ovlan3mask; | ||
1131 | __be16 ovlan3type; | ||
1132 | } l2cfg; | ||
1133 | struct fw_port_info { | ||
1134 | __be32 lstatus_to_modtype; | ||
1135 | __be16 pcap; | ||
1136 | __be16 acap; | ||
1137 | } info; | ||
1138 | struct fw_port_ppp { | ||
1139 | __be32 pppen_to_ncsich; | ||
1140 | __be32 r11; | ||
1141 | } ppp; | ||
1142 | struct fw_port_dcb { | ||
1143 | __be16 cfg; | ||
1144 | u8 up_map; | ||
1145 | u8 sf_cfgrc; | ||
1146 | __be16 prot_ix; | ||
1147 | u8 pe7_to_pe0; | ||
1148 | u8 numTCPFCs; | ||
1149 | __be32 pgid0_to_pgid7; | ||
1150 | __be32 numTCs_oui; | ||
1151 | u8 pgpc[8]; | ||
1152 | } dcb; | ||
1153 | } u; | ||
1154 | }; | ||
1155 | |||
1156 | #define FW_PORT_CMD_READ (1U << 22) | ||
1157 | |||
1158 | #define FW_PORT_CMD_PORTID(x) ((x) << 0) | ||
1159 | #define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) | ||
1160 | |||
1161 | #define FW_PORT_CMD_ACTION(x) ((x) << 16) | ||
1162 | |||
1163 | #define FW_PORT_CMD_CTLBF(x) ((x) << 10) | ||
1164 | #define FW_PORT_CMD_OVLAN3(x) ((x) << 7) | ||
1165 | #define FW_PORT_CMD_OVLAN2(x) ((x) << 6) | ||
1166 | #define FW_PORT_CMD_OVLAN1(x) ((x) << 5) | ||
1167 | #define FW_PORT_CMD_OVLAN0(x) ((x) << 4) | ||
1168 | #define FW_PORT_CMD_IVLAN0(x) ((x) << 3) | ||
1169 | |||
1170 | #define FW_PORT_CMD_TXIPG(x) ((x) << 19) | ||
1171 | |||
1172 | #define FW_PORT_CMD_LSTATUS (1U << 31) | ||
1173 | #define FW_PORT_CMD_LSPEED(x) ((x) << 24) | ||
1174 | #define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f) | ||
1175 | #define FW_PORT_CMD_TXPAUSE (1U << 23) | ||
1176 | #define FW_PORT_CMD_RXPAUSE (1U << 22) | ||
1177 | #define FW_PORT_CMD_MDIOCAP (1U << 21) | ||
1178 | #define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f) | ||
1179 | #define FW_PORT_CMD_LPTXPAUSE (1U << 15) | ||
1180 | #define FW_PORT_CMD_LPRXPAUSE (1U << 14) | ||
1181 | #define FW_PORT_CMD_PTYPE_MASK 0x1f | ||
1182 | #define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK) | ||
1183 | #define FW_PORT_CMD_MODTYPE_MASK 0x1f | ||
1184 | #define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK) | ||
1185 | |||
1186 | #define FW_PORT_CMD_PPPEN(x) ((x) << 31) | ||
1187 | #define FW_PORT_CMD_TPSRC(x) ((x) << 28) | ||
1188 | #define FW_PORT_CMD_NCSISRC(x) ((x) << 24) | ||
1189 | |||
1190 | #define FW_PORT_CMD_CH0(x) ((x) << 20) | ||
1191 | #define FW_PORT_CMD_CH1(x) ((x) << 16) | ||
1192 | #define FW_PORT_CMD_CH2(x) ((x) << 12) | ||
1193 | #define FW_PORT_CMD_CH3(x) ((x) << 8) | ||
1194 | #define FW_PORT_CMD_NCSICH(x) ((x) << 4) | ||
1195 | |||
1196 | enum fw_port_type { | ||
1197 | FW_PORT_TYPE_FIBER, | ||
1198 | FW_PORT_TYPE_KX4, | ||
1199 | FW_PORT_TYPE_BT_SGMII, | ||
1200 | FW_PORT_TYPE_KX, | ||
1201 | FW_PORT_TYPE_BT_XAUI, | ||
1202 | FW_PORT_TYPE_KR, | ||
1203 | FW_PORT_TYPE_CX4, | ||
1204 | FW_PORT_TYPE_TWINAX, | ||
1205 | |||
1206 | FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK | ||
1207 | }; | ||
1208 | |||
1209 | enum fw_port_module_type { | ||
1210 | FW_PORT_MOD_TYPE_NA, | ||
1211 | FW_PORT_MOD_TYPE_LR, | ||
1212 | FW_PORT_MOD_TYPE_SR, | ||
1213 | FW_PORT_MOD_TYPE_ER, | ||
1214 | |||
1215 | FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK | ||
1216 | }; | ||
1217 | |||
1218 | /* port stats */ | ||
1219 | #define FW_NUM_PORT_STATS 50 | ||
1220 | #define FW_NUM_PORT_TX_STATS 23 | ||
1221 | #define FW_NUM_PORT_RX_STATS 27 | ||
1222 | |||
1223 | enum fw_port_stats_tx_index { | ||
1224 | FW_STAT_TX_PORT_BYTES_IX, | ||
1225 | FW_STAT_TX_PORT_FRAMES_IX, | ||
1226 | FW_STAT_TX_PORT_BCAST_IX, | ||
1227 | FW_STAT_TX_PORT_MCAST_IX, | ||
1228 | FW_STAT_TX_PORT_UCAST_IX, | ||
1229 | FW_STAT_TX_PORT_ERROR_IX, | ||
1230 | FW_STAT_TX_PORT_64B_IX, | ||
1231 | FW_STAT_TX_PORT_65B_127B_IX, | ||
1232 | FW_STAT_TX_PORT_128B_255B_IX, | ||
1233 | FW_STAT_TX_PORT_256B_511B_IX, | ||
1234 | FW_STAT_TX_PORT_512B_1023B_IX, | ||
1235 | FW_STAT_TX_PORT_1024B_1518B_IX, | ||
1236 | FW_STAT_TX_PORT_1519B_MAX_IX, | ||
1237 | FW_STAT_TX_PORT_DROP_IX, | ||
1238 | FW_STAT_TX_PORT_PAUSE_IX, | ||
1239 | FW_STAT_TX_PORT_PPP0_IX, | ||
1240 | FW_STAT_TX_PORT_PPP1_IX, | ||
1241 | FW_STAT_TX_PORT_PPP2_IX, | ||
1242 | FW_STAT_TX_PORT_PPP3_IX, | ||
1243 | FW_STAT_TX_PORT_PPP4_IX, | ||
1244 | FW_STAT_TX_PORT_PPP5_IX, | ||
1245 | FW_STAT_TX_PORT_PPP6_IX, | ||
1246 | FW_STAT_TX_PORT_PPP7_IX | ||
1247 | }; | ||
1248 | |||
1249 | enum fw_port_stat_rx_index { | ||
1250 | FW_STAT_RX_PORT_BYTES_IX, | ||
1251 | FW_STAT_RX_PORT_FRAMES_IX, | ||
1252 | FW_STAT_RX_PORT_BCAST_IX, | ||
1253 | FW_STAT_RX_PORT_MCAST_IX, | ||
1254 | FW_STAT_RX_PORT_UCAST_IX, | ||
1255 | FW_STAT_RX_PORT_MTU_ERROR_IX, | ||
1256 | FW_STAT_RX_PORT_MTU_CRC_ERROR_IX, | ||
1257 | FW_STAT_RX_PORT_CRC_ERROR_IX, | ||
1258 | FW_STAT_RX_PORT_LEN_ERROR_IX, | ||
1259 | FW_STAT_RX_PORT_SYM_ERROR_IX, | ||
1260 | FW_STAT_RX_PORT_64B_IX, | ||
1261 | FW_STAT_RX_PORT_65B_127B_IX, | ||
1262 | FW_STAT_RX_PORT_128B_255B_IX, | ||
1263 | FW_STAT_RX_PORT_256B_511B_IX, | ||
1264 | FW_STAT_RX_PORT_512B_1023B_IX, | ||
1265 | FW_STAT_RX_PORT_1024B_1518B_IX, | ||
1266 | FW_STAT_RX_PORT_1519B_MAX_IX, | ||
1267 | FW_STAT_RX_PORT_PAUSE_IX, | ||
1268 | FW_STAT_RX_PORT_PPP0_IX, | ||
1269 | FW_STAT_RX_PORT_PPP1_IX, | ||
1270 | FW_STAT_RX_PORT_PPP2_IX, | ||
1271 | FW_STAT_RX_PORT_PPP3_IX, | ||
1272 | FW_STAT_RX_PORT_PPP4_IX, | ||
1273 | FW_STAT_RX_PORT_PPP5_IX, | ||
1274 | FW_STAT_RX_PORT_PPP6_IX, | ||
1275 | FW_STAT_RX_PORT_PPP7_IX, | ||
1276 | FW_STAT_RX_PORT_LESS_64B_IX | ||
1277 | }; | ||
1278 | |||
1279 | struct fw_port_stats_cmd { | ||
1280 | __be32 op_to_portid; | ||
1281 | __be32 retval_len16; | ||
1282 | union fw_port_stats { | ||
1283 | struct fw_port_stats_ctl { | ||
1284 | u8 nstats_bg_bm; | ||
1285 | u8 tx_ix; | ||
1286 | __be16 r6; | ||
1287 | __be32 r7; | ||
1288 | __be64 stat0; | ||
1289 | __be64 stat1; | ||
1290 | __be64 stat2; | ||
1291 | __be64 stat3; | ||
1292 | __be64 stat4; | ||
1293 | __be64 stat5; | ||
1294 | } ctl; | ||
1295 | struct fw_port_stats_all { | ||
1296 | __be64 tx_bytes; | ||
1297 | __be64 tx_frames; | ||
1298 | __be64 tx_bcast; | ||
1299 | __be64 tx_mcast; | ||
1300 | __be64 tx_ucast; | ||
1301 | __be64 tx_error; | ||
1302 | __be64 tx_64b; | ||
1303 | __be64 tx_65b_127b; | ||
1304 | __be64 tx_128b_255b; | ||
1305 | __be64 tx_256b_511b; | ||
1306 | __be64 tx_512b_1023b; | ||
1307 | __be64 tx_1024b_1518b; | ||
1308 | __be64 tx_1519b_max; | ||
1309 | __be64 tx_drop; | ||
1310 | __be64 tx_pause; | ||
1311 | __be64 tx_ppp0; | ||
1312 | __be64 tx_ppp1; | ||
1313 | __be64 tx_ppp2; | ||
1314 | __be64 tx_ppp3; | ||
1315 | __be64 tx_ppp4; | ||
1316 | __be64 tx_ppp5; | ||
1317 | __be64 tx_ppp6; | ||
1318 | __be64 tx_ppp7; | ||
1319 | __be64 rx_bytes; | ||
1320 | __be64 rx_frames; | ||
1321 | __be64 rx_bcast; | ||
1322 | __be64 rx_mcast; | ||
1323 | __be64 rx_ucast; | ||
1324 | __be64 rx_mtu_error; | ||
1325 | __be64 rx_mtu_crc_error; | ||
1326 | __be64 rx_crc_error; | ||
1327 | __be64 rx_len_error; | ||
1328 | __be64 rx_sym_error; | ||
1329 | __be64 rx_64b; | ||
1330 | __be64 rx_65b_127b; | ||
1331 | __be64 rx_128b_255b; | ||
1332 | __be64 rx_256b_511b; | ||
1333 | __be64 rx_512b_1023b; | ||
1334 | __be64 rx_1024b_1518b; | ||
1335 | __be64 rx_1519b_max; | ||
1336 | __be64 rx_pause; | ||
1337 | __be64 rx_ppp0; | ||
1338 | __be64 rx_ppp1; | ||
1339 | __be64 rx_ppp2; | ||
1340 | __be64 rx_ppp3; | ||
1341 | __be64 rx_ppp4; | ||
1342 | __be64 rx_ppp5; | ||
1343 | __be64 rx_ppp6; | ||
1344 | __be64 rx_ppp7; | ||
1345 | __be64 rx_less_64b; | ||
1346 | __be64 rx_bg_drop; | ||
1347 | __be64 rx_bg_trunc; | ||
1348 | } all; | ||
1349 | } u; | ||
1350 | }; | ||
1351 | |||
1352 | #define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4) | ||
1353 | #define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0) | ||
1354 | #define FW_PORT_STATS_CMD_TX(x) ((x) << 7) | ||
1355 | #define FW_PORT_STATS_CMD_IX(x) ((x) << 0) | ||
1356 | |||
1357 | /* port loopback stats */ | ||
1358 | #define FW_NUM_LB_STATS 16 | ||
1359 | enum fw_port_lb_stats_index { | ||
1360 | FW_STAT_LB_PORT_BYTES_IX, | ||
1361 | FW_STAT_LB_PORT_FRAMES_IX, | ||
1362 | FW_STAT_LB_PORT_BCAST_IX, | ||
1363 | FW_STAT_LB_PORT_MCAST_IX, | ||
1364 | FW_STAT_LB_PORT_UCAST_IX, | ||
1365 | FW_STAT_LB_PORT_ERROR_IX, | ||
1366 | FW_STAT_LB_PORT_64B_IX, | ||
1367 | FW_STAT_LB_PORT_65B_127B_IX, | ||
1368 | FW_STAT_LB_PORT_128B_255B_IX, | ||
1369 | FW_STAT_LB_PORT_256B_511B_IX, | ||
1370 | FW_STAT_LB_PORT_512B_1023B_IX, | ||
1371 | FW_STAT_LB_PORT_1024B_1518B_IX, | ||
1372 | FW_STAT_LB_PORT_1519B_MAX_IX, | ||
1373 | FW_STAT_LB_PORT_DROP_FRAMES_IX | ||
1374 | }; | ||
1375 | |||
1376 | struct fw_port_lb_stats_cmd { | ||
1377 | __be32 op_to_lbport; | ||
1378 | __be32 retval_len16; | ||
1379 | union fw_port_lb_stats { | ||
1380 | struct fw_port_lb_stats_ctl { | ||
1381 | u8 nstats_bg_bm; | ||
1382 | u8 ix_pkd; | ||
1383 | __be16 r6; | ||
1384 | __be32 r7; | ||
1385 | __be64 stat0; | ||
1386 | __be64 stat1; | ||
1387 | __be64 stat2; | ||
1388 | __be64 stat3; | ||
1389 | __be64 stat4; | ||
1390 | __be64 stat5; | ||
1391 | } ctl; | ||
1392 | struct fw_port_lb_stats_all { | ||
1393 | __be64 tx_bytes; | ||
1394 | __be64 tx_frames; | ||
1395 | __be64 tx_bcast; | ||
1396 | __be64 tx_mcast; | ||
1397 | __be64 tx_ucast; | ||
1398 | __be64 tx_error; | ||
1399 | __be64 tx_64b; | ||
1400 | __be64 tx_65b_127b; | ||
1401 | __be64 tx_128b_255b; | ||
1402 | __be64 tx_256b_511b; | ||
1403 | __be64 tx_512b_1023b; | ||
1404 | __be64 tx_1024b_1518b; | ||
1405 | __be64 tx_1519b_max; | ||
1406 | __be64 rx_lb_drop; | ||
1407 | __be64 rx_lb_trunc; | ||
1408 | } all; | ||
1409 | } u; | ||
1410 | }; | ||
1411 | |||
1412 | #define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0) | ||
1413 | #define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4) | ||
1414 | #define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0) | ||
1415 | #define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0) | ||
1416 | |||
1417 | struct fw_rss_ind_tbl_cmd { | ||
1418 | __be32 op_to_viid; | ||
1419 | #define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0) | ||
1420 | __be32 retval_len16; | ||
1421 | __be16 niqid; | ||
1422 | __be16 startidx; | ||
1423 | __be32 r3; | ||
1424 | __be32 iq0_to_iq2; | ||
1425 | #define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20) | ||
1426 | #define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10) | ||
1427 | #define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0) | ||
1428 | __be32 iq3_to_iq5; | ||
1429 | __be32 iq6_to_iq8; | ||
1430 | __be32 iq9_to_iq11; | ||
1431 | __be32 iq12_to_iq14; | ||
1432 | __be32 iq15_to_iq17; | ||
1433 | __be32 iq18_to_iq20; | ||
1434 | __be32 iq21_to_iq23; | ||
1435 | __be32 iq24_to_iq26; | ||
1436 | __be32 iq27_to_iq29; | ||
1437 | __be32 iq30_iq31; | ||
1438 | __be32 r15_lo; | ||
1439 | }; | ||
1440 | |||
1441 | struct fw_rss_glb_config_cmd { | ||
1442 | __be32 op_to_write; | ||
1443 | __be32 retval_len16; | ||
1444 | union fw_rss_glb_config { | ||
1445 | struct fw_rss_glb_config_manual { | ||
1446 | __be32 mode_pkd; | ||
1447 | __be32 r3; | ||
1448 | __be64 r4; | ||
1449 | __be64 r5; | ||
1450 | } manual; | ||
1451 | struct fw_rss_glb_config_basicvirtual { | ||
1452 | __be32 mode_pkd; | ||
1453 | __be32 synmapen_to_hashtoeplitz; | ||
1454 | #define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8) | ||
1455 | #define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7) | ||
1456 | #define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6) | ||
1457 | #define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5) | ||
1458 | #define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4) | ||
1459 | #define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3) | ||
1460 | #define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2) | ||
1461 | #define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1) | ||
1462 | #define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0) | ||
1463 | __be64 r8; | ||
1464 | __be64 r9; | ||
1465 | } basicvirtual; | ||
1466 | } u; | ||
1467 | }; | ||
1468 | |||
1469 | #define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28) | ||
1470 | |||
1471 | #define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 | ||
1472 | #define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 | ||
1473 | |||
1474 | struct fw_rss_vi_config_cmd { | ||
1475 | __be32 op_to_viid; | ||
1476 | #define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0) | ||
1477 | __be32 retval_len16; | ||
1478 | union fw_rss_vi_config { | ||
1479 | struct fw_rss_vi_config_manual { | ||
1480 | __be64 r3; | ||
1481 | __be64 r4; | ||
1482 | __be64 r5; | ||
1483 | } manual; | ||
1484 | struct fw_rss_vi_config_basicvirtual { | ||
1485 | __be32 r6; | ||
1486 | __be32 defaultq_to_ip4udpen; | ||
1487 | #define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16) | ||
1488 | #define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4) | ||
1489 | #define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3) | ||
1490 | #define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2) | ||
1491 | #define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1) | ||
1492 | #define FW_RSS_VI_CONFIG_CMD_IP4UDPEN (1U << 0) | ||
1493 | __be64 r9; | ||
1494 | __be64 r10; | ||
1495 | } basicvirtual; | ||
1496 | } u; | ||
1497 | }; | ||
1498 | |||
1499 | enum fw_error_type { | ||
1500 | FW_ERROR_TYPE_EXCEPTION = 0x0, | ||
1501 | FW_ERROR_TYPE_HWMODULE = 0x1, | ||
1502 | FW_ERROR_TYPE_WR = 0x2, | ||
1503 | FW_ERROR_TYPE_ACL = 0x3, | ||
1504 | }; | ||
1505 | |||
1506 | struct fw_error_cmd { | ||
1507 | __be32 op_to_type; | ||
1508 | __be32 len16_pkd; | ||
1509 | union fw_error { | ||
1510 | struct fw_error_exception { | ||
1511 | __be32 info[6]; | ||
1512 | } exception; | ||
1513 | struct fw_error_hwmodule { | ||
1514 | __be32 regaddr; | ||
1515 | __be32 regval; | ||
1516 | } hwmodule; | ||
1517 | struct fw_error_wr { | ||
1518 | __be16 cidx; | ||
1519 | __be16 pfn_vfn; | ||
1520 | __be32 eqid; | ||
1521 | u8 wrhdr[16]; | ||
1522 | } wr; | ||
1523 | struct fw_error_acl { | ||
1524 | __be16 cidx; | ||
1525 | __be16 pfn_vfn; | ||
1526 | __be32 eqid; | ||
1527 | __be16 mv_pkd; | ||
1528 | u8 val[6]; | ||
1529 | __be64 r4; | ||
1530 | } acl; | ||
1531 | } u; | ||
1532 | }; | ||
1533 | |||
1534 | struct fw_debug_cmd { | ||
1535 | __be32 op_type; | ||
1536 | #define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff) | ||
1537 | __be32 len16_pkd; | ||
1538 | union fw_debug { | ||
1539 | struct fw_debug_assert { | ||
1540 | __be32 fcid; | ||
1541 | __be32 line; | ||
1542 | __be32 x; | ||
1543 | __be32 y; | ||
1544 | u8 filename_0_7[8]; | ||
1545 | u8 filename_8_15[8]; | ||
1546 | __be64 r3; | ||
1547 | } assert; | ||
1548 | struct fw_debug_prt { | ||
1549 | __be16 dprtstridx; | ||
1550 | __be16 r3[3]; | ||
1551 | __be32 dprtstrparam0; | ||
1552 | __be32 dprtstrparam1; | ||
1553 | __be32 dprtstrparam2; | ||
1554 | __be32 dprtstrparam3; | ||
1555 | } prt; | ||
1556 | } u; | ||
1557 | }; | ||
1558 | |||
1559 | struct fw_hdr { | ||
1560 | u8 ver; | ||
1561 | u8 reserved1; | ||
1562 | __be16 len512; /* bin length in units of 512-bytes */ | ||
1563 | __be32 fw_ver; /* firmware version */ | ||
1564 | __be32 tp_microcode_ver; | ||
1565 | u8 intfver_nic; | ||
1566 | u8 intfver_vnic; | ||
1567 | u8 intfver_ofld; | ||
1568 | u8 intfver_ri; | ||
1569 | u8 intfver_iscsipdu; | ||
1570 | u8 intfver_iscsi; | ||
1571 | u8 intfver_fcoe; | ||
1572 | u8 reserved2; | ||
1573 | __be32 reserved3[27]; | ||
1574 | }; | ||
1575 | |||
1576 | #define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) | ||
1577 | #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) | ||
1578 | #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) | ||
1579 | #define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) | ||
1580 | #endif /* _T4FW_INTERFACE_H_ */ | ||