aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb4
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/net/cxgb4
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/net/cxgb4')
-rw-r--r--drivers/net/cxgb4/Makefile7
-rw-r--r--drivers/net/cxgb4/cxgb4.h722
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c3809
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h239
-rw-r--r--drivers/net/cxgb4/l2t.c597
-rw-r--r--drivers/net/cxgb4/l2t.h107
-rw-r--r--drivers/net/cxgb4/sge.c2442
-rw-r--r--drivers/net/cxgb4/t4_hw.c2856
-rw-r--r--drivers/net/cxgb4/t4_hw.h140
-rw-r--r--drivers/net/cxgb4/t4_msg.h678
-rw-r--r--drivers/net/cxgb4/t4_regs.h885
-rw-r--r--drivers/net/cxgb4/t4fw_api.h1623
12 files changed, 14105 insertions, 0 deletions
diff --git a/drivers/net/cxgb4/Makefile b/drivers/net/cxgb4/Makefile
new file mode 100644
index 00000000000..498667487f5
--- /dev/null
+++ b/drivers/net/cxgb4/Makefile
@@ -0,0 +1,7 @@
1#
2# Chelsio T4 driver
3#
4
5obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
6
7cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
new file mode 100644
index 00000000000..223a7f72343
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -0,0 +1,722 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __CXGB4_H__
36#define __CXGB4_H__
37
38#include <linux/bitops.h>
39#include <linux/cache.h>
40#include <linux/interrupt.h>
41#include <linux/list.h>
42#include <linux/netdevice.h>
43#include <linux/pci.h>
44#include <linux/spinlock.h>
45#include <linux/timer.h>
46#include <asm/io.h>
47#include "cxgb4_uld.h"
48#include "t4_hw.h"
49
50#define FW_VERSION_MAJOR 1
51#define FW_VERSION_MINOR 1
52#define FW_VERSION_MICRO 0
53
54enum {
55 MAX_NPORTS = 4, /* max # of ports */
56 SERNUM_LEN = 24, /* Serial # length */
57 EC_LEN = 16, /* E/C length */
58 ID_LEN = 16, /* ID length */
59};
60
61enum {
62 MEM_EDC0,
63 MEM_EDC1,
64 MEM_MC
65};
66
67enum dev_master {
68 MASTER_CANT,
69 MASTER_MAY,
70 MASTER_MUST
71};
72
73enum dev_state {
74 DEV_STATE_UNINIT,
75 DEV_STATE_INIT,
76 DEV_STATE_ERR
77};
78
79enum {
80 PAUSE_RX = 1 << 0,
81 PAUSE_TX = 1 << 1,
82 PAUSE_AUTONEG = 1 << 2
83};
84
85struct port_stats {
86 u64 tx_octets; /* total # of octets in good frames */
87 u64 tx_frames; /* all good frames */
88 u64 tx_bcast_frames; /* all broadcast frames */
89 u64 tx_mcast_frames; /* all multicast frames */
90 u64 tx_ucast_frames; /* all unicast frames */
91 u64 tx_error_frames; /* all error frames */
92
93 u64 tx_frames_64; /* # of Tx frames in a particular range */
94 u64 tx_frames_65_127;
95 u64 tx_frames_128_255;
96 u64 tx_frames_256_511;
97 u64 tx_frames_512_1023;
98 u64 tx_frames_1024_1518;
99 u64 tx_frames_1519_max;
100
101 u64 tx_drop; /* # of dropped Tx frames */
102 u64 tx_pause; /* # of transmitted pause frames */
103 u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
104 u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
105 u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
106 u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
107 u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
108 u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
109 u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
110 u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
111
112 u64 rx_octets; /* total # of octets in good frames */
113 u64 rx_frames; /* all good frames */
114 u64 rx_bcast_frames; /* all broadcast frames */
115 u64 rx_mcast_frames; /* all multicast frames */
116 u64 rx_ucast_frames; /* all unicast frames */
117 u64 rx_too_long; /* # of frames exceeding MTU */
118 u64 rx_jabber; /* # of jabber frames */
119 u64 rx_fcs_err; /* # of received frames with bad FCS */
120 u64 rx_len_err; /* # of received frames with length error */
121 u64 rx_symbol_err; /* symbol errors */
122 u64 rx_runt; /* # of short frames */
123
124 u64 rx_frames_64; /* # of Rx frames in a particular range */
125 u64 rx_frames_65_127;
126 u64 rx_frames_128_255;
127 u64 rx_frames_256_511;
128 u64 rx_frames_512_1023;
129 u64 rx_frames_1024_1518;
130 u64 rx_frames_1519_max;
131
132 u64 rx_pause; /* # of received pause frames */
133 u64 rx_ppp0; /* # of received PPP prio 0 frames */
134 u64 rx_ppp1; /* # of received PPP prio 1 frames */
135 u64 rx_ppp2; /* # of received PPP prio 2 frames */
136 u64 rx_ppp3; /* # of received PPP prio 3 frames */
137 u64 rx_ppp4; /* # of received PPP prio 4 frames */
138 u64 rx_ppp5; /* # of received PPP prio 5 frames */
139 u64 rx_ppp6; /* # of received PPP prio 6 frames */
140 u64 rx_ppp7; /* # of received PPP prio 7 frames */
141
142 u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
143 u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
144 u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
145 u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
146 u64 rx_trunc0; /* buffer-group 0 truncated packets */
147 u64 rx_trunc1; /* buffer-group 1 truncated packets */
148 u64 rx_trunc2; /* buffer-group 2 truncated packets */
149 u64 rx_trunc3; /* buffer-group 3 truncated packets */
150};
151
152struct lb_port_stats {
153 u64 octets;
154 u64 frames;
155 u64 bcast_frames;
156 u64 mcast_frames;
157 u64 ucast_frames;
158 u64 error_frames;
159
160 u64 frames_64;
161 u64 frames_65_127;
162 u64 frames_128_255;
163 u64 frames_256_511;
164 u64 frames_512_1023;
165 u64 frames_1024_1518;
166 u64 frames_1519_max;
167
168 u64 drop;
169
170 u64 ovflow0;
171 u64 ovflow1;
172 u64 ovflow2;
173 u64 ovflow3;
174 u64 trunc0;
175 u64 trunc1;
176 u64 trunc2;
177 u64 trunc3;
178};
179
180struct tp_tcp_stats {
181 u32 tcpOutRsts;
182 u64 tcpInSegs;
183 u64 tcpOutSegs;
184 u64 tcpRetransSegs;
185};
186
187struct tp_err_stats {
188 u32 macInErrs[4];
189 u32 hdrInErrs[4];
190 u32 tcpInErrs[4];
191 u32 tnlCongDrops[4];
192 u32 ofldChanDrops[4];
193 u32 tnlTxDrops[4];
194 u32 ofldVlanDrops[4];
195 u32 tcp6InErrs[4];
196 u32 ofldNoNeigh;
197 u32 ofldCongDefer;
198};
199
200struct tp_params {
201 unsigned int ntxchan; /* # of Tx channels */
202 unsigned int tre; /* log2 of core clocks per TP tick */
203};
204
205struct vpd_params {
206 unsigned int cclk;
207 u8 ec[EC_LEN + 1];
208 u8 sn[SERNUM_LEN + 1];
209 u8 id[ID_LEN + 1];
210};
211
212struct pci_params {
213 unsigned char speed;
214 unsigned char width;
215};
216
217struct adapter_params {
218 struct tp_params tp;
219 struct vpd_params vpd;
220 struct pci_params pci;
221
222 unsigned int sf_size; /* serial flash size in bytes */
223 unsigned int sf_nsec; /* # of flash sectors */
224 unsigned int sf_fw_start; /* start of FW image in flash */
225
226 unsigned int fw_vers;
227 unsigned int tp_vers;
228 u8 api_vers[7];
229
230 unsigned short mtus[NMTUS];
231 unsigned short a_wnd[NCCTRL_WIN];
232 unsigned short b_wnd[NCCTRL_WIN];
233
234 unsigned char nports; /* # of ethernet ports */
235 unsigned char portvec;
236 unsigned char rev; /* chip revision */
237 unsigned char offload;
238
239 unsigned int ofldq_wr_cred;
240};
241
242struct trace_params {
243 u32 data[TRACE_LEN / 4];
244 u32 mask[TRACE_LEN / 4];
245 unsigned short snap_len;
246 unsigned short min_len;
247 unsigned char skip_ofst;
248 unsigned char skip_len;
249 unsigned char invert;
250 unsigned char port;
251};
252
253struct link_config {
254 unsigned short supported; /* link capabilities */
255 unsigned short advertising; /* advertised capabilities */
256 unsigned short requested_speed; /* speed user has requested */
257 unsigned short speed; /* actual link speed */
258 unsigned char requested_fc; /* flow control user has requested */
259 unsigned char fc; /* actual link flow control */
260 unsigned char autoneg; /* autonegotiating? */
261 unsigned char link_ok; /* link up? */
262};
263
264#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
265
266enum {
267 MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
268 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
269 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
270 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
271};
272
273enum {
274 MAX_EGRQ = 128, /* max # of egress queues, including FLs */
275 MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */
276};
277
278struct adapter;
279struct sge_rspq;
280
281struct port_info {
282 struct adapter *adapter;
283 u16 viid;
284 s16 xact_addr_filt; /* index of exact MAC address filter */
285 u16 rss_size; /* size of VI's RSS table slice */
286 s8 mdio_addr;
287 u8 port_type;
288 u8 mod_type;
289 u8 port_id;
290 u8 tx_chan;
291 u8 lport; /* associated offload logical port */
292 u8 nqsets; /* # of qsets */
293 u8 first_qset; /* index of first qset */
294 u8 rss_mode;
295 struct link_config link_cfg;
296 u16 *rss;
297};
298
299struct dentry;
300struct work_struct;
301
302enum { /* adapter flags */
303 FULL_INIT_DONE = (1 << 0),
304 USING_MSI = (1 << 1),
305 USING_MSIX = (1 << 2),
306 FW_OK = (1 << 4),
307};
308
309struct rx_sw_desc;
310
311struct sge_fl { /* SGE free-buffer queue state */
312 unsigned int avail; /* # of available Rx buffers */
313 unsigned int pend_cred; /* new buffers since last FL DB ring */
314 unsigned int cidx; /* consumer index */
315 unsigned int pidx; /* producer index */
316 unsigned long alloc_failed; /* # of times buffer allocation failed */
317 unsigned long large_alloc_failed;
318 unsigned long starving;
319 /* RO fields */
320 unsigned int cntxt_id; /* SGE context id for the free list */
321 unsigned int size; /* capacity of free list */
322 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
323 __be64 *desc; /* address of HW Rx descriptor ring */
324 dma_addr_t addr; /* bus address of HW ring start */
325};
326
327/* A packet gather list */
328struct pkt_gl {
329 skb_frag_t frags[MAX_SKB_FRAGS];
330 void *va; /* virtual address of first byte */
331 unsigned int nfrags; /* # of fragments */
332 unsigned int tot_len; /* total length of fragments */
333};
334
335typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
336 const struct pkt_gl *gl);
337
338struct sge_rspq { /* state for an SGE response queue */
339 struct napi_struct napi;
340 const __be64 *cur_desc; /* current descriptor in queue */
341 unsigned int cidx; /* consumer index */
342 u8 gen; /* current generation bit */
343 u8 intr_params; /* interrupt holdoff parameters */
344 u8 next_intr_params; /* holdoff params for next interrupt */
345 u8 pktcnt_idx; /* interrupt packet threshold */
346 u8 uld; /* ULD handling this queue */
347 u8 idx; /* queue index within its group */
348 int offset; /* offset into current Rx buffer */
349 u16 cntxt_id; /* SGE context id for the response q */
350 u16 abs_id; /* absolute SGE id for the response q */
351 __be64 *desc; /* address of HW response ring */
352 dma_addr_t phys_addr; /* physical address of the ring */
353 unsigned int iqe_len; /* entry size */
354 unsigned int size; /* capacity of response queue */
355 struct adapter *adap;
356 struct net_device *netdev; /* associated net device */
357 rspq_handler_t handler;
358};
359
360struct sge_eth_stats { /* Ethernet queue statistics */
361 unsigned long pkts; /* # of ethernet packets */
362 unsigned long lro_pkts; /* # of LRO super packets */
363 unsigned long lro_merged; /* # of wire packets merged by LRO */
364 unsigned long rx_cso; /* # of Rx checksum offloads */
365 unsigned long vlan_ex; /* # of Rx VLAN extractions */
366 unsigned long rx_drops; /* # of packets dropped due to no mem */
367};
368
369struct sge_eth_rxq { /* SW Ethernet Rx queue */
370 struct sge_rspq rspq;
371 struct sge_fl fl;
372 struct sge_eth_stats stats;
373} ____cacheline_aligned_in_smp;
374
375struct sge_ofld_stats { /* offload queue statistics */
376 unsigned long pkts; /* # of packets */
377 unsigned long imm; /* # of immediate-data packets */
378 unsigned long an; /* # of asynchronous notifications */
379 unsigned long nomem; /* # of responses deferred due to no mem */
380};
381
382struct sge_ofld_rxq { /* SW offload Rx queue */
383 struct sge_rspq rspq;
384 struct sge_fl fl;
385 struct sge_ofld_stats stats;
386} ____cacheline_aligned_in_smp;
387
388struct tx_desc {
389 __be64 flit[8];
390};
391
392struct tx_sw_desc;
393
394struct sge_txq {
395 unsigned int in_use; /* # of in-use Tx descriptors */
396 unsigned int size; /* # of descriptors */
397 unsigned int cidx; /* SW consumer index */
398 unsigned int pidx; /* producer index */
399 unsigned long stops; /* # of times q has been stopped */
400 unsigned long restarts; /* # of queue restarts */
401 unsigned int cntxt_id; /* SGE context id for the Tx q */
402 struct tx_desc *desc; /* address of HW Tx descriptor ring */
403 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
404 struct sge_qstat *stat; /* queue status entry */
405 dma_addr_t phys_addr; /* physical address of the ring */
406};
407
408struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
409 struct sge_txq q;
410 struct netdev_queue *txq; /* associated netdev TX queue */
411 unsigned long tso; /* # of TSO requests */
412 unsigned long tx_cso; /* # of Tx checksum offloads */
413 unsigned long vlan_ins; /* # of Tx VLAN insertions */
414 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
415} ____cacheline_aligned_in_smp;
416
417struct sge_ofld_txq { /* state for an SGE offload Tx queue */
418 struct sge_txq q;
419 struct adapter *adap;
420 struct sk_buff_head sendq; /* list of backpressured packets */
421 struct tasklet_struct qresume_tsk; /* restarts the queue */
422 u8 full; /* the Tx ring is full */
423 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
424} ____cacheline_aligned_in_smp;
425
426struct sge_ctrl_txq { /* state for an SGE control Tx queue */
427 struct sge_txq q;
428 struct adapter *adap;
429 struct sk_buff_head sendq; /* list of backpressured packets */
430 struct tasklet_struct qresume_tsk; /* restarts the queue */
431 u8 full; /* the Tx ring is full */
432} ____cacheline_aligned_in_smp;
433
434struct sge {
435 struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
436 struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
437 struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
438
439 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
440 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
441 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
442 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
443
444 struct sge_rspq intrq ____cacheline_aligned_in_smp;
445 spinlock_t intrq_lock;
446
447 u16 max_ethqsets; /* # of available Ethernet queue sets */
448 u16 ethqsets; /* # of active Ethernet queue sets */
449 u16 ethtxq_rover; /* Tx queue to clean up next */
450 u16 ofldqsets; /* # of active offload queue sets */
451 u16 rdmaqs; /* # of available RDMA Rx queues */
452 u16 ofld_rxq[MAX_OFLD_QSETS];
453 u16 rdma_rxq[NCHAN];
454 u16 timer_val[SGE_NTIMERS];
455 u8 counter_val[SGE_NCOUNTERS];
456 unsigned int starve_thres;
457 u8 idma_state[2];
458 unsigned int egr_start;
459 unsigned int ingr_start;
460 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
461 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
462 DECLARE_BITMAP(starving_fl, MAX_EGRQ);
463 DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
464 struct timer_list rx_timer; /* refills starving FLs */
465 struct timer_list tx_timer; /* checks Tx queues */
466};
467
468#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
469#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
470#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
471
472struct l2t_data;
473
474struct adapter {
475 void __iomem *regs;
476 struct pci_dev *pdev;
477 struct device *pdev_dev;
478 unsigned int fn;
479 unsigned int flags;
480
481 int msg_enable;
482
483 struct adapter_params params;
484 struct cxgb4_virt_res vres;
485 unsigned int swintr;
486
487 unsigned int wol;
488
489 struct {
490 unsigned short vec;
491 char desc[IFNAMSIZ + 10];
492 } msix_info[MAX_INGQ + 1];
493
494 struct sge sge;
495
496 struct net_device *port[MAX_NPORTS];
497 u8 chan_map[NCHAN]; /* channel -> port map */
498
499 struct l2t_data *l2t;
500 void *uld_handle[CXGB4_ULD_MAX];
501 struct list_head list_node;
502
503 struct tid_info tids;
504 void **tid_release_head;
505 spinlock_t tid_release_lock;
506 struct work_struct tid_release_task;
507 bool tid_release_task_busy;
508
509 struct dentry *debugfs_root;
510
511 spinlock_t stats_lock;
512};
513
514static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
515{
516 return readl(adap->regs + reg_addr);
517}
518
519static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
520{
521 writel(val, adap->regs + reg_addr);
522}
523
524#ifndef readq
525static inline u64 readq(const volatile void __iomem *addr)
526{
527 return readl(addr) + ((u64)readl(addr + 4) << 32);
528}
529
530static inline void writeq(u64 val, volatile void __iomem *addr)
531{
532 writel(val, addr);
533 writel(val >> 32, addr + 4);
534}
535#endif
536
537static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
538{
539 return readq(adap->regs + reg_addr);
540}
541
542static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
543{
544 writeq(val, adap->regs + reg_addr);
545}
546
547/**
548 * netdev2pinfo - return the port_info structure associated with a net_device
549 * @dev: the netdev
550 *
551 * Return the struct port_info associated with a net_device
552 */
553static inline struct port_info *netdev2pinfo(const struct net_device *dev)
554{
555 return netdev_priv(dev);
556}
557
558/**
559 * adap2pinfo - return the port_info of a port
560 * @adap: the adapter
561 * @idx: the port index
562 *
563 * Return the port_info structure for the port of the given index.
564 */
565static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
566{
567 return netdev_priv(adap->port[idx]);
568}
569
570/**
571 * netdev2adap - return the adapter structure associated with a net_device
572 * @dev: the netdev
573 *
574 * Return the struct adapter associated with a net_device
575 */
576static inline struct adapter *netdev2adap(const struct net_device *dev)
577{
578 return netdev2pinfo(dev)->adapter;
579}
580
581void t4_os_portmod_changed(const struct adapter *adap, int port_id);
582void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
583
584void *t4_alloc_mem(size_t size);
585
586void t4_free_sge_resources(struct adapter *adap);
587irq_handler_t t4_intr_handler(struct adapter *adap);
588netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
589int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
590 const struct pkt_gl *gl);
591int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
592int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
593int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
594 struct net_device *dev, int intr_idx,
595 struct sge_fl *fl, rspq_handler_t hnd);
596int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
597 struct net_device *dev, struct netdev_queue *netdevq,
598 unsigned int iqid);
599int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
600 struct net_device *dev, unsigned int iqid,
601 unsigned int cmplqid);
602int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
603 struct net_device *dev, unsigned int iqid);
604irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
605void t4_sge_init(struct adapter *adap);
606void t4_sge_start(struct adapter *adap);
607void t4_sge_stop(struct adapter *adap);
608
609#define for_each_port(adapter, iter) \
610 for (iter = 0; iter < (adapter)->params.nports; ++iter)
611
612static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
613{
614 return adap->params.vpd.cclk / 1000;
615}
616
617static inline unsigned int us_to_core_ticks(const struct adapter *adap,
618 unsigned int us)
619{
620 return (us * adap->params.vpd.cclk) / 1000;
621}
622
623void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
624 u32 val);
625
626int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
627 void *rpl, bool sleep_ok);
628
629static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
630 int size, void *rpl)
631{
632 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
633}
634
635static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
636 int size, void *rpl)
637{
638 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
639}
640
641void t4_intr_enable(struct adapter *adapter);
642void t4_intr_disable(struct adapter *adapter);
643int t4_slow_intr_handler(struct adapter *adapter);
644
645int t4_wait_dev_ready(struct adapter *adap);
646int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
647 struct link_config *lc);
648int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
649int t4_seeprom_wp(struct adapter *adapter, bool enable);
650int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
651int t4_check_fw_version(struct adapter *adapter);
652int t4_prep_adapter(struct adapter *adapter);
653int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
654void t4_fatal_err(struct adapter *adapter);
655int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
656 int start, int n, const u16 *rspq, unsigned int nrspq);
657int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
658 unsigned int flags);
659int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
660int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
661 u64 *parity);
662
663void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
664void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
665void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
666 struct tp_tcp_stats *v6);
667void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
668 const unsigned short *alpha, const unsigned short *beta);
669
670void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
671 const u8 *addr);
672int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
673 u64 mask0, u64 mask1, unsigned int crc, bool enable);
674
675int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
676 enum dev_master master, enum dev_state *state);
677int t4_fw_bye(struct adapter *adap, unsigned int mbox);
678int t4_early_init(struct adapter *adap, unsigned int mbox);
679int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
680int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
681 unsigned int vf, unsigned int nparams, const u32 *params,
682 u32 *val);
683int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
684 unsigned int vf, unsigned int nparams, const u32 *params,
685 const u32 *val);
686int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
687 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
688 unsigned int rxqi, unsigned int rxq, unsigned int tc,
689 unsigned int vi, unsigned int cmask, unsigned int pmask,
690 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
691int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
692 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
693 unsigned int *rss_size);
694int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
695 int mtu, int promisc, int all_multi, int bcast, int vlanex,
696 bool sleep_ok);
697int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
698 unsigned int viid, bool free, unsigned int naddr,
699 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
700int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
701 int idx, const u8 *addr, bool persist, bool add_smt);
702int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
703 bool ucast, u64 vec, bool sleep_ok);
704int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
705 bool rx_en, bool tx_en);
706int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
707 unsigned int nblinks);
708int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
709 unsigned int mmd, unsigned int reg, u16 *valp);
710int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
711 unsigned int mmd, unsigned int reg, u16 val);
712int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
713 unsigned int vf, unsigned int iqtype, unsigned int iqid,
714 unsigned int fl0id, unsigned int fl1id);
715int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
716 unsigned int vf, unsigned int eqid);
717int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
718 unsigned int vf, unsigned int eqid);
719int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
720 unsigned int vf, unsigned int eqid);
721int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
722#endif /* __CXGB4_H__ */
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
new file mode 100644
index 00000000000..b4efa292fd6
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -0,0 +1,3809 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if_vlan.h>
45#include <linux/init.h>
46#include <linux/log2.h>
47#include <linux/mdio.h>
48#include <linux/module.h>
49#include <linux/moduleparam.h>
50#include <linux/mutex.h>
51#include <linux/netdevice.h>
52#include <linux/pci.h>
53#include <linux/aer.h>
54#include <linux/rtnetlink.h>
55#include <linux/sched.h>
56#include <linux/seq_file.h>
57#include <linux/sockios.h>
58#include <linux/vmalloc.h>
59#include <linux/workqueue.h>
60#include <net/neighbour.h>
61#include <net/netevent.h>
62#include <asm/uaccess.h>
63
64#include "cxgb4.h"
65#include "t4_regs.h"
66#include "t4_msg.h"
67#include "t4fw_api.h"
68#include "l2t.h"
69
70#define DRV_VERSION "1.3.0-ko"
71#define DRV_DESC "Chelsio T4 Network Driver"
72
73/*
74 * Max interrupt hold-off timer value in us. Queues fall back to this value
75 * under extreme memory pressure so it's largish to give the system time to
76 * recover.
77 */
78#define MAX_SGE_TIMERVAL 200U
79
80#ifdef CONFIG_PCI_IOV
81/*
82 * Virtual Function provisioning constants. We need two extra Ingress Queues
83 * with Interrupt capability to serve as the VF's Firmware Event Queue and
84 * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
85 * Lists associated with them). For each Ethernet/Control Egress Queue and
86 * for each Free List, we need an Egress Context.
87 */
88enum {
89 VFRES_NPORTS = 1, /* # of "ports" per VF */
90 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
91
92 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
93 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
94 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
95 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
96 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
97 VFRES_TC = 0, /* PCI-E traffic class */
98 VFRES_NEXACTF = 16, /* # of exact MPS filters */
99
100 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
101 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
102};
103
104/*
105 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
106 * static and likely not to be useful in the long run. We really need to
107 * implement some form of persistent configuration which the firmware
108 * controls.
109 */
110static unsigned int pfvfres_pmask(struct adapter *adapter,
111 unsigned int pf, unsigned int vf)
112{
113 unsigned int portn, portvec;
114
115 /*
116 * Give PF's access to all of the ports.
117 */
118 if (vf == 0)
119 return FW_PFVF_CMD_PMASK_MASK;
120
121 /*
122 * For VFs, we'll assign them access to the ports based purely on the
123 * PF. We assign active ports in order, wrapping around if there are
124 * fewer active ports than PFs: e.g. active port[pf % nports].
125 * Unfortunately the adapter's port_info structs haven't been
126 * initialized yet so we have to compute this.
127 */
128 if (adapter->params.nports == 0)
129 return 0;
130
131 portn = pf % adapter->params.nports;
132 portvec = adapter->params.portvec;
133 for (;;) {
134 /*
135 * Isolate the lowest set bit in the port vector. If we're at
136 * the port number that we want, return that as the pmask.
137 * otherwise mask that bit out of the port vector and
138 * decrement our port number ...
139 */
140 unsigned int pmask = portvec ^ (portvec & (portvec-1));
141 if (portn == 0)
142 return pmask;
143 portn--;
144 portvec &= ~pmask;
145 }
146 /*NOTREACHED*/
147}
148#endif
149
150enum {
151 MEMWIN0_APERTURE = 65536,
152 MEMWIN0_BASE = 0x30000,
153 MEMWIN1_APERTURE = 32768,
154 MEMWIN1_BASE = 0x28000,
155 MEMWIN2_APERTURE = 2048,
156 MEMWIN2_BASE = 0x1b800,
157};
158
159enum {
160 MAX_TXQ_ENTRIES = 16384,
161 MAX_CTRL_TXQ_ENTRIES = 1024,
162 MAX_RSPQ_ENTRIES = 16384,
163 MAX_RX_BUFFERS = 16384,
164 MIN_TXQ_ENTRIES = 32,
165 MIN_CTRL_TXQ_ENTRIES = 32,
166 MIN_RSPQ_ENTRIES = 128,
167 MIN_FL_ENTRIES = 16
168};
169
170#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
171 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
172 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
173
174#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
175
176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
177 CH_DEVICE(0xa000, 0), /* PE10K */
178 CH_DEVICE(0x4001, -1),
179 CH_DEVICE(0x4002, -1),
180 CH_DEVICE(0x4003, -1),
181 CH_DEVICE(0x4004, -1),
182 CH_DEVICE(0x4005, -1),
183 CH_DEVICE(0x4006, -1),
184 CH_DEVICE(0x4007, -1),
185 CH_DEVICE(0x4008, -1),
186 CH_DEVICE(0x4009, -1),
187 CH_DEVICE(0x400a, -1),
188 CH_DEVICE(0x4401, 4),
189 CH_DEVICE(0x4402, 4),
190 CH_DEVICE(0x4403, 4),
191 CH_DEVICE(0x4404, 4),
192 CH_DEVICE(0x4405, 4),
193 CH_DEVICE(0x4406, 4),
194 CH_DEVICE(0x4407, 4),
195 CH_DEVICE(0x4408, 4),
196 CH_DEVICE(0x4409, 4),
197 CH_DEVICE(0x440a, 4),
198 { 0, }
199};
200
201#define FW_FNAME "cxgb4/t4fw.bin"
202
203MODULE_DESCRIPTION(DRV_DESC);
204MODULE_AUTHOR("Chelsio Communications");
205MODULE_LICENSE("Dual BSD/GPL");
206MODULE_VERSION(DRV_VERSION);
207MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
208MODULE_FIRMWARE(FW_FNAME);
209
210static int dflt_msg_enable = DFLT_MSG_ENABLE;
211
212module_param(dflt_msg_enable, int, 0644);
213MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
214
215/*
216 * The driver uses the best interrupt scheme available on a platform in the
217 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
218 * of these schemes the driver may consider as follows:
219 *
220 * msi = 2: choose from among all three options
221 * msi = 1: only consider MSI and INTx interrupts
222 * msi = 0: force INTx interrupts
223 */
224static int msi = 2;
225
226module_param(msi, int, 0644);
227MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
228
229/*
230 * Queue interrupt hold-off timer values. Queues default to the first of these
231 * upon creation.
232 */
233static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
234
235module_param_array(intr_holdoff, uint, NULL, 0644);
236MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
237 "0..4 in microseconds");
238
239static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
240
241module_param_array(intr_cnt, uint, NULL, 0644);
242MODULE_PARM_DESC(intr_cnt,
243 "thresholds 1..3 for queue interrupt packet counters");
244
245static int vf_acls;
246
247#ifdef CONFIG_PCI_IOV
248module_param(vf_acls, bool, 0644);
249MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
250
251static unsigned int num_vf[4];
252
253module_param_array(num_vf, uint, NULL, 0644);
254MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
255#endif
256
257static struct dentry *cxgb4_debugfs_root;
258
259static LIST_HEAD(adapter_list);
260static DEFINE_MUTEX(uld_mutex);
261static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
262static const char *uld_str[] = { "RDMA", "iSCSI" };
263
264static void link_report(struct net_device *dev)
265{
266 if (!netif_carrier_ok(dev))
267 netdev_info(dev, "link down\n");
268 else {
269 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
270
271 const char *s = "10Mbps";
272 const struct port_info *p = netdev_priv(dev);
273
274 switch (p->link_cfg.speed) {
275 case SPEED_10000:
276 s = "10Gbps";
277 break;
278 case SPEED_1000:
279 s = "1000Mbps";
280 break;
281 case SPEED_100:
282 s = "100Mbps";
283 break;
284 }
285
286 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
287 fc[p->link_cfg.fc]);
288 }
289}
290
291void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
292{
293 struct net_device *dev = adapter->port[port_id];
294
295 /* Skip changes from disabled ports. */
296 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
297 if (link_stat)
298 netif_carrier_on(dev);
299 else
300 netif_carrier_off(dev);
301
302 link_report(dev);
303 }
304}
305
306void t4_os_portmod_changed(const struct adapter *adap, int port_id)
307{
308 static const char *mod_str[] = {
309 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
310 };
311
312 const struct net_device *dev = adap->port[port_id];
313 const struct port_info *pi = netdev_priv(dev);
314
315 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
316 netdev_info(dev, "port module unplugged\n");
317 else if (pi->mod_type < ARRAY_SIZE(mod_str))
318 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
319}
320
321/*
322 * Configure the exact and hash address filters to handle a port's multicast
323 * and secondary unicast MAC addresses.
324 */
325static int set_addr_filters(const struct net_device *dev, bool sleep)
326{
327 u64 mhash = 0;
328 u64 uhash = 0;
329 bool free = true;
330 u16 filt_idx[7];
331 const u8 *addr[7];
332 int ret, naddr = 0;
333 const struct netdev_hw_addr *ha;
334 int uc_cnt = netdev_uc_count(dev);
335 int mc_cnt = netdev_mc_count(dev);
336 const struct port_info *pi = netdev_priv(dev);
337 unsigned int mb = pi->adapter->fn;
338
339 /* first do the secondary unicast addresses */
340 netdev_for_each_uc_addr(ha, dev) {
341 addr[naddr++] = ha->addr;
342 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
343 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
344 naddr, addr, filt_idx, &uhash, sleep);
345 if (ret < 0)
346 return ret;
347
348 free = false;
349 naddr = 0;
350 }
351 }
352
353 /* next set up the multicast addresses */
354 netdev_for_each_mc_addr(ha, dev) {
355 addr[naddr++] = ha->addr;
356 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
357 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
358 naddr, addr, filt_idx, &mhash, sleep);
359 if (ret < 0)
360 return ret;
361
362 free = false;
363 naddr = 0;
364 }
365 }
366
367 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
368 uhash | mhash, sleep);
369}
370
371/*
372 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
373 * If @mtu is -1 it is left unchanged.
374 */
375static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
376{
377 int ret;
378 struct port_info *pi = netdev_priv(dev);
379
380 ret = set_addr_filters(dev, sleep_ok);
381 if (ret == 0)
382 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
383 (dev->flags & IFF_PROMISC) ? 1 : 0,
384 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
385 sleep_ok);
386 return ret;
387}
388
389/**
390 * link_start - enable a port
391 * @dev: the port to enable
392 *
393 * Performs the MAC and PHY actions needed to enable a port.
394 */
395static int link_start(struct net_device *dev)
396{
397 int ret;
398 struct port_info *pi = netdev_priv(dev);
399 unsigned int mb = pi->adapter->fn;
400
401 /*
402 * We do not set address filters and promiscuity here, the stack does
403 * that step explicitly.
404 */
405 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
406 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
407 if (ret == 0) {
408 ret = t4_change_mac(pi->adapter, mb, pi->viid,
409 pi->xact_addr_filt, dev->dev_addr, true,
410 true);
411 if (ret >= 0) {
412 pi->xact_addr_filt = ret;
413 ret = 0;
414 }
415 }
416 if (ret == 0)
417 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
418 &pi->link_cfg);
419 if (ret == 0)
420 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
421 return ret;
422}
423
424/*
425 * Response queue handler for the FW event queue.
426 */
427static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
428 const struct pkt_gl *gl)
429{
430 u8 opcode = ((const struct rss_header *)rsp)->opcode;
431
432 rsp++; /* skip RSS header */
433 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
434 const struct cpl_sge_egr_update *p = (void *)rsp;
435 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
436 struct sge_txq *txq;
437
438 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
439 txq->restarts++;
440 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
441 struct sge_eth_txq *eq;
442
443 eq = container_of(txq, struct sge_eth_txq, q);
444 netif_tx_wake_queue(eq->txq);
445 } else {
446 struct sge_ofld_txq *oq;
447
448 oq = container_of(txq, struct sge_ofld_txq, q);
449 tasklet_schedule(&oq->qresume_tsk);
450 }
451 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
452 const struct cpl_fw6_msg *p = (void *)rsp;
453
454 if (p->type == 0)
455 t4_handle_fw_rpl(q->adap, p->data);
456 } else if (opcode == CPL_L2T_WRITE_RPL) {
457 const struct cpl_l2t_write_rpl *p = (void *)rsp;
458
459 do_l2t_write_rpl(q->adap, p);
460 } else
461 dev_err(q->adap->pdev_dev,
462 "unexpected CPL %#x on FW event queue\n", opcode);
463 return 0;
464}
465
466/**
467 * uldrx_handler - response queue handler for ULD queues
468 * @q: the response queue that received the packet
469 * @rsp: the response queue descriptor holding the offload message
470 * @gl: the gather list of packet fragments
471 *
472 * Deliver an ingress offload packet to a ULD. All processing is done by
473 * the ULD, we just maintain statistics.
474 */
475static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
476 const struct pkt_gl *gl)
477{
478 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
479
480 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
481 rxq->stats.nomem++;
482 return -1;
483 }
484 if (gl == NULL)
485 rxq->stats.imm++;
486 else if (gl == CXGB4_MSG_AN)
487 rxq->stats.an++;
488 else
489 rxq->stats.pkts++;
490 return 0;
491}
492
493static void disable_msi(struct adapter *adapter)
494{
495 if (adapter->flags & USING_MSIX) {
496 pci_disable_msix(adapter->pdev);
497 adapter->flags &= ~USING_MSIX;
498 } else if (adapter->flags & USING_MSI) {
499 pci_disable_msi(adapter->pdev);
500 adapter->flags &= ~USING_MSI;
501 }
502}
503
504/*
505 * Interrupt handler for non-data events used with MSI-X.
506 */
507static irqreturn_t t4_nondata_intr(int irq, void *cookie)
508{
509 struct adapter *adap = cookie;
510
511 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
512 if (v & PFSW) {
513 adap->swintr = 1;
514 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
515 }
516 t4_slow_intr_handler(adap);
517 return IRQ_HANDLED;
518}
519
520/*
521 * Name the MSI-X interrupts.
522 */
523static void name_msix_vecs(struct adapter *adap)
524{
525 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
526
527 /* non-data interrupts */
528 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
529
530 /* FW events */
531 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
532 adap->port[0]->name);
533
534 /* Ethernet queues */
535 for_each_port(adap, j) {
536 struct net_device *d = adap->port[j];
537 const struct port_info *pi = netdev_priv(d);
538
539 for (i = 0; i < pi->nqsets; i++, msi_idx++)
540 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
541 d->name, i);
542 }
543
544 /* offload queues */
545 for_each_ofldrxq(&adap->sge, i)
546 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
547 adap->port[0]->name, i);
548
549 for_each_rdmarxq(&adap->sge, i)
550 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
551 adap->port[0]->name, i);
552}
553
554static int request_msix_queue_irqs(struct adapter *adap)
555{
556 struct sge *s = &adap->sge;
557 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
558
559 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
560 adap->msix_info[1].desc, &s->fw_evtq);
561 if (err)
562 return err;
563
564 for_each_ethrxq(s, ethqidx) {
565 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
566 adap->msix_info[msi].desc,
567 &s->ethrxq[ethqidx].rspq);
568 if (err)
569 goto unwind;
570 msi++;
571 }
572 for_each_ofldrxq(s, ofldqidx) {
573 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
574 adap->msix_info[msi].desc,
575 &s->ofldrxq[ofldqidx].rspq);
576 if (err)
577 goto unwind;
578 msi++;
579 }
580 for_each_rdmarxq(s, rdmaqidx) {
581 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
582 adap->msix_info[msi].desc,
583 &s->rdmarxq[rdmaqidx].rspq);
584 if (err)
585 goto unwind;
586 msi++;
587 }
588 return 0;
589
590unwind:
591 while (--rdmaqidx >= 0)
592 free_irq(adap->msix_info[--msi].vec,
593 &s->rdmarxq[rdmaqidx].rspq);
594 while (--ofldqidx >= 0)
595 free_irq(adap->msix_info[--msi].vec,
596 &s->ofldrxq[ofldqidx].rspq);
597 while (--ethqidx >= 0)
598 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
599 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
600 return err;
601}
602
603static void free_msix_queue_irqs(struct adapter *adap)
604{
605 int i, msi = 2;
606 struct sge *s = &adap->sge;
607
608 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
609 for_each_ethrxq(s, i)
610 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
611 for_each_ofldrxq(s, i)
612 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
613 for_each_rdmarxq(s, i)
614 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
615}
616
617/**
618 * write_rss - write the RSS table for a given port
619 * @pi: the port
620 * @queues: array of queue indices for RSS
621 *
622 * Sets up the portion of the HW RSS table for the port's VI to distribute
623 * packets to the Rx queues in @queues.
624 */
625static int write_rss(const struct port_info *pi, const u16 *queues)
626{
627 u16 *rss;
628 int i, err;
629 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
630
631 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
632 if (!rss)
633 return -ENOMEM;
634
635 /* map the queue indices to queue ids */
636 for (i = 0; i < pi->rss_size; i++, queues++)
637 rss[i] = q[*queues].rspq.abs_id;
638
639 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
640 pi->rss_size, rss, pi->rss_size);
641 kfree(rss);
642 return err;
643}
644
645/**
646 * setup_rss - configure RSS
647 * @adap: the adapter
648 *
649 * Sets up RSS for each port.
650 */
651static int setup_rss(struct adapter *adap)
652{
653 int i, err;
654
655 for_each_port(adap, i) {
656 const struct port_info *pi = adap2pinfo(adap, i);
657
658 err = write_rss(pi, pi->rss);
659 if (err)
660 return err;
661 }
662 return 0;
663}
664
665/*
666 * Return the channel of the ingress queue with the given qid.
667 */
668static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
669{
670 qid -= p->ingr_start;
671 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
672}
673
674/*
675 * Wait until all NAPI handlers are descheduled.
676 */
677static void quiesce_rx(struct adapter *adap)
678{
679 int i;
680
681 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
682 struct sge_rspq *q = adap->sge.ingr_map[i];
683
684 if (q && q->handler)
685 napi_disable(&q->napi);
686 }
687}
688
689/*
690 * Enable NAPI scheduling and interrupt generation for all Rx queues.
691 */
692static void enable_rx(struct adapter *adap)
693{
694 int i;
695
696 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
697 struct sge_rspq *q = adap->sge.ingr_map[i];
698
699 if (!q)
700 continue;
701 if (q->handler)
702 napi_enable(&q->napi);
703 /* 0-increment GTS to start the timer and enable interrupts */
704 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
705 SEINTARM(q->intr_params) |
706 INGRESSQID(q->cntxt_id));
707 }
708}
709
710/**
711 * setup_sge_queues - configure SGE Tx/Rx/response queues
712 * @adap: the adapter
713 *
714 * Determines how many sets of SGE queues to use and initializes them.
715 * We support multiple queue sets per port if we have MSI-X, otherwise
716 * just one queue set per port.
717 */
718static int setup_sge_queues(struct adapter *adap)
719{
720 int err, msi_idx, i, j;
721 struct sge *s = &adap->sge;
722
723 bitmap_zero(s->starving_fl, MAX_EGRQ);
724 bitmap_zero(s->txq_maperr, MAX_EGRQ);
725
726 if (adap->flags & USING_MSIX)
727 msi_idx = 1; /* vector 0 is for non-queue interrupts */
728 else {
729 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
730 NULL, NULL);
731 if (err)
732 return err;
733 msi_idx = -((int)s->intrq.abs_id + 1);
734 }
735
736 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
737 msi_idx, NULL, fwevtq_handler);
738 if (err) {
739freeout: t4_free_sge_resources(adap);
740 return err;
741 }
742
743 for_each_port(adap, i) {
744 struct net_device *dev = adap->port[i];
745 struct port_info *pi = netdev_priv(dev);
746 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
747 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
748
749 for (j = 0; j < pi->nqsets; j++, q++) {
750 if (msi_idx > 0)
751 msi_idx++;
752 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
753 msi_idx, &q->fl,
754 t4_ethrx_handler);
755 if (err)
756 goto freeout;
757 q->rspq.idx = j;
758 memset(&q->stats, 0, sizeof(q->stats));
759 }
760 for (j = 0; j < pi->nqsets; j++, t++) {
761 err = t4_sge_alloc_eth_txq(adap, t, dev,
762 netdev_get_tx_queue(dev, j),
763 s->fw_evtq.cntxt_id);
764 if (err)
765 goto freeout;
766 }
767 }
768
769 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
770 for_each_ofldrxq(s, i) {
771 struct sge_ofld_rxq *q = &s->ofldrxq[i];
772 struct net_device *dev = adap->port[i / j];
773
774 if (msi_idx > 0)
775 msi_idx++;
776 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
777 &q->fl, uldrx_handler);
778 if (err)
779 goto freeout;
780 memset(&q->stats, 0, sizeof(q->stats));
781 s->ofld_rxq[i] = q->rspq.abs_id;
782 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
783 s->fw_evtq.cntxt_id);
784 if (err)
785 goto freeout;
786 }
787
788 for_each_rdmarxq(s, i) {
789 struct sge_ofld_rxq *q = &s->rdmarxq[i];
790
791 if (msi_idx > 0)
792 msi_idx++;
793 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
794 msi_idx, &q->fl, uldrx_handler);
795 if (err)
796 goto freeout;
797 memset(&q->stats, 0, sizeof(q->stats));
798 s->rdma_rxq[i] = q->rspq.abs_id;
799 }
800
801 for_each_port(adap, i) {
802 /*
803 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
804 * have RDMA queues, and that's the right value.
805 */
806 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
807 s->fw_evtq.cntxt_id,
808 s->rdmarxq[i].rspq.cntxt_id);
809 if (err)
810 goto freeout;
811 }
812
813 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
814 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
815 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
816 return 0;
817}
818
819/*
820 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
821 * started but failed, and a negative errno if flash load couldn't start.
822 */
823static int upgrade_fw(struct adapter *adap)
824{
825 int ret;
826 u32 vers;
827 const struct fw_hdr *hdr;
828 const struct firmware *fw;
829 struct device *dev = adap->pdev_dev;
830
831 ret = request_firmware(&fw, FW_FNAME, dev);
832 if (ret < 0) {
833 dev_err(dev, "unable to load firmware image " FW_FNAME
834 ", error %d\n", ret);
835 return ret;
836 }
837
838 hdr = (const struct fw_hdr *)fw->data;
839 vers = ntohl(hdr->fw_ver);
840 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
841 ret = -EINVAL; /* wrong major version, won't do */
842 goto out;
843 }
844
845 /*
846 * If the flash FW is unusable or we found something newer, load it.
847 */
848 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
849 vers > adap->params.fw_vers) {
850 ret = -t4_load_fw(adap, fw->data, fw->size);
851 if (!ret)
852 dev_info(dev, "firmware upgraded to version %pI4 from "
853 FW_FNAME "\n", &hdr->fw_ver);
854 }
855out: release_firmware(fw);
856 return ret;
857}
858
859/*
860 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
861 * The allocated memory is cleared.
862 */
863void *t4_alloc_mem(size_t size)
864{
865 void *p = kzalloc(size, GFP_KERNEL);
866
867 if (!p)
868 p = vzalloc(size);
869 return p;
870}
871
872/*
873 * Free memory allocated through alloc_mem().
874 */
875static void t4_free_mem(void *addr)
876{
877 if (is_vmalloc_addr(addr))
878 vfree(addr);
879 else
880 kfree(addr);
881}
882
883static inline int is_offload(const struct adapter *adap)
884{
885 return adap->params.offload;
886}
887
888/*
889 * Implementation of ethtool operations.
890 */
891
892static u32 get_msglevel(struct net_device *dev)
893{
894 return netdev2adap(dev)->msg_enable;
895}
896
897static void set_msglevel(struct net_device *dev, u32 val)
898{
899 netdev2adap(dev)->msg_enable = val;
900}
901
902static char stats_strings[][ETH_GSTRING_LEN] = {
903 "TxOctetsOK ",
904 "TxFramesOK ",
905 "TxBroadcastFrames ",
906 "TxMulticastFrames ",
907 "TxUnicastFrames ",
908 "TxErrorFrames ",
909
910 "TxFrames64 ",
911 "TxFrames65To127 ",
912 "TxFrames128To255 ",
913 "TxFrames256To511 ",
914 "TxFrames512To1023 ",
915 "TxFrames1024To1518 ",
916 "TxFrames1519ToMax ",
917
918 "TxFramesDropped ",
919 "TxPauseFrames ",
920 "TxPPP0Frames ",
921 "TxPPP1Frames ",
922 "TxPPP2Frames ",
923 "TxPPP3Frames ",
924 "TxPPP4Frames ",
925 "TxPPP5Frames ",
926 "TxPPP6Frames ",
927 "TxPPP7Frames ",
928
929 "RxOctetsOK ",
930 "RxFramesOK ",
931 "RxBroadcastFrames ",
932 "RxMulticastFrames ",
933 "RxUnicastFrames ",
934
935 "RxFramesTooLong ",
936 "RxJabberErrors ",
937 "RxFCSErrors ",
938 "RxLengthErrors ",
939 "RxSymbolErrors ",
940 "RxRuntFrames ",
941
942 "RxFrames64 ",
943 "RxFrames65To127 ",
944 "RxFrames128To255 ",
945 "RxFrames256To511 ",
946 "RxFrames512To1023 ",
947 "RxFrames1024To1518 ",
948 "RxFrames1519ToMax ",
949
950 "RxPauseFrames ",
951 "RxPPP0Frames ",
952 "RxPPP1Frames ",
953 "RxPPP2Frames ",
954 "RxPPP3Frames ",
955 "RxPPP4Frames ",
956 "RxPPP5Frames ",
957 "RxPPP6Frames ",
958 "RxPPP7Frames ",
959
960 "RxBG0FramesDropped ",
961 "RxBG1FramesDropped ",
962 "RxBG2FramesDropped ",
963 "RxBG3FramesDropped ",
964 "RxBG0FramesTrunc ",
965 "RxBG1FramesTrunc ",
966 "RxBG2FramesTrunc ",
967 "RxBG3FramesTrunc ",
968
969 "TSO ",
970 "TxCsumOffload ",
971 "RxCsumGood ",
972 "VLANextractions ",
973 "VLANinsertions ",
974 "GROpackets ",
975 "GROmerged ",
976};
977
978static int get_sset_count(struct net_device *dev, int sset)
979{
980 switch (sset) {
981 case ETH_SS_STATS:
982 return ARRAY_SIZE(stats_strings);
983 default:
984 return -EOPNOTSUPP;
985 }
986}
987
988#define T4_REGMAP_SIZE (160 * 1024)
989
990static int get_regs_len(struct net_device *dev)
991{
992 return T4_REGMAP_SIZE;
993}
994
995static int get_eeprom_len(struct net_device *dev)
996{
997 return EEPROMSIZE;
998}
999
1000static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1001{
1002 struct adapter *adapter = netdev2adap(dev);
1003
1004 strcpy(info->driver, KBUILD_MODNAME);
1005 strcpy(info->version, DRV_VERSION);
1006 strcpy(info->bus_info, pci_name(adapter->pdev));
1007
1008 if (!adapter->params.fw_vers)
1009 strcpy(info->fw_version, "N/A");
1010 else
1011 snprintf(info->fw_version, sizeof(info->fw_version),
1012 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1013 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1014 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1015 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1016 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1017 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1018 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1019 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1020 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1021}
1022
1023static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1024{
1025 if (stringset == ETH_SS_STATS)
1026 memcpy(data, stats_strings, sizeof(stats_strings));
1027}
1028
1029/*
1030 * port stats maintained per queue of the port. They should be in the same
1031 * order as in stats_strings above.
1032 */
1033struct queue_port_stats {
1034 u64 tso;
1035 u64 tx_csum;
1036 u64 rx_csum;
1037 u64 vlan_ex;
1038 u64 vlan_ins;
1039 u64 gro_pkts;
1040 u64 gro_merged;
1041};
1042
1043static void collect_sge_port_stats(const struct adapter *adap,
1044 const struct port_info *p, struct queue_port_stats *s)
1045{
1046 int i;
1047 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1048 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1049
1050 memset(s, 0, sizeof(*s));
1051 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1052 s->tso += tx->tso;
1053 s->tx_csum += tx->tx_cso;
1054 s->rx_csum += rx->stats.rx_cso;
1055 s->vlan_ex += rx->stats.vlan_ex;
1056 s->vlan_ins += tx->vlan_ins;
1057 s->gro_pkts += rx->stats.lro_pkts;
1058 s->gro_merged += rx->stats.lro_merged;
1059 }
1060}
1061
1062static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1063 u64 *data)
1064{
1065 struct port_info *pi = netdev_priv(dev);
1066 struct adapter *adapter = pi->adapter;
1067
1068 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1069
1070 data += sizeof(struct port_stats) / sizeof(u64);
1071 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1072}
1073
1074/*
1075 * Return a version number to identify the type of adapter. The scheme is:
1076 * - bits 0..9: chip version
1077 * - bits 10..15: chip revision
1078 * - bits 16..23: register dump version
1079 */
1080static inline unsigned int mk_adap_vers(const struct adapter *ap)
1081{
1082 return 4 | (ap->params.rev << 10) | (1 << 16);
1083}
1084
1085static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1086 unsigned int end)
1087{
1088 u32 *p = buf + start;
1089
1090 for ( ; start <= end; start += sizeof(u32))
1091 *p++ = t4_read_reg(ap, start);
1092}
1093
1094static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1095 void *buf)
1096{
1097 static const unsigned int reg_ranges[] = {
1098 0x1008, 0x1108,
1099 0x1180, 0x11b4,
1100 0x11fc, 0x123c,
1101 0x1300, 0x173c,
1102 0x1800, 0x18fc,
1103 0x3000, 0x30d8,
1104 0x30e0, 0x5924,
1105 0x5960, 0x59d4,
1106 0x5a00, 0x5af8,
1107 0x6000, 0x6098,
1108 0x6100, 0x6150,
1109 0x6200, 0x6208,
1110 0x6240, 0x6248,
1111 0x6280, 0x6338,
1112 0x6370, 0x638c,
1113 0x6400, 0x643c,
1114 0x6500, 0x6524,
1115 0x6a00, 0x6a38,
1116 0x6a60, 0x6a78,
1117 0x6b00, 0x6b84,
1118 0x6bf0, 0x6c84,
1119 0x6cf0, 0x6d84,
1120 0x6df0, 0x6e84,
1121 0x6ef0, 0x6f84,
1122 0x6ff0, 0x7084,
1123 0x70f0, 0x7184,
1124 0x71f0, 0x7284,
1125 0x72f0, 0x7384,
1126 0x73f0, 0x7450,
1127 0x7500, 0x7530,
1128 0x7600, 0x761c,
1129 0x7680, 0x76cc,
1130 0x7700, 0x7798,
1131 0x77c0, 0x77fc,
1132 0x7900, 0x79fc,
1133 0x7b00, 0x7c38,
1134 0x7d00, 0x7efc,
1135 0x8dc0, 0x8e1c,
1136 0x8e30, 0x8e78,
1137 0x8ea0, 0x8f6c,
1138 0x8fc0, 0x9074,
1139 0x90fc, 0x90fc,
1140 0x9400, 0x9458,
1141 0x9600, 0x96bc,
1142 0x9800, 0x9808,
1143 0x9820, 0x983c,
1144 0x9850, 0x9864,
1145 0x9c00, 0x9c6c,
1146 0x9c80, 0x9cec,
1147 0x9d00, 0x9d6c,
1148 0x9d80, 0x9dec,
1149 0x9e00, 0x9e6c,
1150 0x9e80, 0x9eec,
1151 0x9f00, 0x9f6c,
1152 0x9f80, 0x9fec,
1153 0xd004, 0xd03c,
1154 0xdfc0, 0xdfe0,
1155 0xe000, 0xea7c,
1156 0xf000, 0x11190,
1157 0x19040, 0x1906c,
1158 0x19078, 0x19080,
1159 0x1908c, 0x19124,
1160 0x19150, 0x191b0,
1161 0x191d0, 0x191e8,
1162 0x19238, 0x1924c,
1163 0x193f8, 0x19474,
1164 0x19490, 0x194f8,
1165 0x19800, 0x19f30,
1166 0x1a000, 0x1a06c,
1167 0x1a0b0, 0x1a120,
1168 0x1a128, 0x1a138,
1169 0x1a190, 0x1a1c4,
1170 0x1a1fc, 0x1a1fc,
1171 0x1e040, 0x1e04c,
1172 0x1e284, 0x1e28c,
1173 0x1e2c0, 0x1e2c0,
1174 0x1e2e0, 0x1e2e0,
1175 0x1e300, 0x1e384,
1176 0x1e3c0, 0x1e3c8,
1177 0x1e440, 0x1e44c,
1178 0x1e684, 0x1e68c,
1179 0x1e6c0, 0x1e6c0,
1180 0x1e6e0, 0x1e6e0,
1181 0x1e700, 0x1e784,
1182 0x1e7c0, 0x1e7c8,
1183 0x1e840, 0x1e84c,
1184 0x1ea84, 0x1ea8c,
1185 0x1eac0, 0x1eac0,
1186 0x1eae0, 0x1eae0,
1187 0x1eb00, 0x1eb84,
1188 0x1ebc0, 0x1ebc8,
1189 0x1ec40, 0x1ec4c,
1190 0x1ee84, 0x1ee8c,
1191 0x1eec0, 0x1eec0,
1192 0x1eee0, 0x1eee0,
1193 0x1ef00, 0x1ef84,
1194 0x1efc0, 0x1efc8,
1195 0x1f040, 0x1f04c,
1196 0x1f284, 0x1f28c,
1197 0x1f2c0, 0x1f2c0,
1198 0x1f2e0, 0x1f2e0,
1199 0x1f300, 0x1f384,
1200 0x1f3c0, 0x1f3c8,
1201 0x1f440, 0x1f44c,
1202 0x1f684, 0x1f68c,
1203 0x1f6c0, 0x1f6c0,
1204 0x1f6e0, 0x1f6e0,
1205 0x1f700, 0x1f784,
1206 0x1f7c0, 0x1f7c8,
1207 0x1f840, 0x1f84c,
1208 0x1fa84, 0x1fa8c,
1209 0x1fac0, 0x1fac0,
1210 0x1fae0, 0x1fae0,
1211 0x1fb00, 0x1fb84,
1212 0x1fbc0, 0x1fbc8,
1213 0x1fc40, 0x1fc4c,
1214 0x1fe84, 0x1fe8c,
1215 0x1fec0, 0x1fec0,
1216 0x1fee0, 0x1fee0,
1217 0x1ff00, 0x1ff84,
1218 0x1ffc0, 0x1ffc8,
1219 0x20000, 0x2002c,
1220 0x20100, 0x2013c,
1221 0x20190, 0x201c8,
1222 0x20200, 0x20318,
1223 0x20400, 0x20528,
1224 0x20540, 0x20614,
1225 0x21000, 0x21040,
1226 0x2104c, 0x21060,
1227 0x210c0, 0x210ec,
1228 0x21200, 0x21268,
1229 0x21270, 0x21284,
1230 0x212fc, 0x21388,
1231 0x21400, 0x21404,
1232 0x21500, 0x21518,
1233 0x2152c, 0x2153c,
1234 0x21550, 0x21554,
1235 0x21600, 0x21600,
1236 0x21608, 0x21628,
1237 0x21630, 0x2163c,
1238 0x21700, 0x2171c,
1239 0x21780, 0x2178c,
1240 0x21800, 0x21c38,
1241 0x21c80, 0x21d7c,
1242 0x21e00, 0x21e04,
1243 0x22000, 0x2202c,
1244 0x22100, 0x2213c,
1245 0x22190, 0x221c8,
1246 0x22200, 0x22318,
1247 0x22400, 0x22528,
1248 0x22540, 0x22614,
1249 0x23000, 0x23040,
1250 0x2304c, 0x23060,
1251 0x230c0, 0x230ec,
1252 0x23200, 0x23268,
1253 0x23270, 0x23284,
1254 0x232fc, 0x23388,
1255 0x23400, 0x23404,
1256 0x23500, 0x23518,
1257 0x2352c, 0x2353c,
1258 0x23550, 0x23554,
1259 0x23600, 0x23600,
1260 0x23608, 0x23628,
1261 0x23630, 0x2363c,
1262 0x23700, 0x2371c,
1263 0x23780, 0x2378c,
1264 0x23800, 0x23c38,
1265 0x23c80, 0x23d7c,
1266 0x23e00, 0x23e04,
1267 0x24000, 0x2402c,
1268 0x24100, 0x2413c,
1269 0x24190, 0x241c8,
1270 0x24200, 0x24318,
1271 0x24400, 0x24528,
1272 0x24540, 0x24614,
1273 0x25000, 0x25040,
1274 0x2504c, 0x25060,
1275 0x250c0, 0x250ec,
1276 0x25200, 0x25268,
1277 0x25270, 0x25284,
1278 0x252fc, 0x25388,
1279 0x25400, 0x25404,
1280 0x25500, 0x25518,
1281 0x2552c, 0x2553c,
1282 0x25550, 0x25554,
1283 0x25600, 0x25600,
1284 0x25608, 0x25628,
1285 0x25630, 0x2563c,
1286 0x25700, 0x2571c,
1287 0x25780, 0x2578c,
1288 0x25800, 0x25c38,
1289 0x25c80, 0x25d7c,
1290 0x25e00, 0x25e04,
1291 0x26000, 0x2602c,
1292 0x26100, 0x2613c,
1293 0x26190, 0x261c8,
1294 0x26200, 0x26318,
1295 0x26400, 0x26528,
1296 0x26540, 0x26614,
1297 0x27000, 0x27040,
1298 0x2704c, 0x27060,
1299 0x270c0, 0x270ec,
1300 0x27200, 0x27268,
1301 0x27270, 0x27284,
1302 0x272fc, 0x27388,
1303 0x27400, 0x27404,
1304 0x27500, 0x27518,
1305 0x2752c, 0x2753c,
1306 0x27550, 0x27554,
1307 0x27600, 0x27600,
1308 0x27608, 0x27628,
1309 0x27630, 0x2763c,
1310 0x27700, 0x2771c,
1311 0x27780, 0x2778c,
1312 0x27800, 0x27c38,
1313 0x27c80, 0x27d7c,
1314 0x27e00, 0x27e04
1315 };
1316
1317 int i;
1318 struct adapter *ap = netdev2adap(dev);
1319
1320 regs->version = mk_adap_vers(ap);
1321
1322 memset(buf, 0, T4_REGMAP_SIZE);
1323 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1324 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1325}
1326
1327static int restart_autoneg(struct net_device *dev)
1328{
1329 struct port_info *p = netdev_priv(dev);
1330
1331 if (!netif_running(dev))
1332 return -EAGAIN;
1333 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1334 return -EINVAL;
1335 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
1336 return 0;
1337}
1338
1339static int identify_port(struct net_device *dev,
1340 enum ethtool_phys_id_state state)
1341{
1342 unsigned int val;
1343 struct adapter *adap = netdev2adap(dev);
1344
1345 if (state == ETHTOOL_ID_ACTIVE)
1346 val = 0xffff;
1347 else if (state == ETHTOOL_ID_INACTIVE)
1348 val = 0;
1349 else
1350 return -EINVAL;
1351
1352 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
1353}
1354
1355static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1356{
1357 unsigned int v = 0;
1358
1359 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1360 type == FW_PORT_TYPE_BT_XAUI) {
1361 v |= SUPPORTED_TP;
1362 if (caps & FW_PORT_CAP_SPEED_100M)
1363 v |= SUPPORTED_100baseT_Full;
1364 if (caps & FW_PORT_CAP_SPEED_1G)
1365 v |= SUPPORTED_1000baseT_Full;
1366 if (caps & FW_PORT_CAP_SPEED_10G)
1367 v |= SUPPORTED_10000baseT_Full;
1368 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1369 v |= SUPPORTED_Backplane;
1370 if (caps & FW_PORT_CAP_SPEED_1G)
1371 v |= SUPPORTED_1000baseKX_Full;
1372 if (caps & FW_PORT_CAP_SPEED_10G)
1373 v |= SUPPORTED_10000baseKX4_Full;
1374 } else if (type == FW_PORT_TYPE_KR)
1375 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1376 else if (type == FW_PORT_TYPE_BP_AP)
1377 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1378 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1379 else if (type == FW_PORT_TYPE_BP4_AP)
1380 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1381 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1382 SUPPORTED_10000baseKX4_Full;
1383 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1384 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
1385 v |= SUPPORTED_FIBRE;
1386
1387 if (caps & FW_PORT_CAP_ANEG)
1388 v |= SUPPORTED_Autoneg;
1389 return v;
1390}
1391
1392static unsigned int to_fw_linkcaps(unsigned int caps)
1393{
1394 unsigned int v = 0;
1395
1396 if (caps & ADVERTISED_100baseT_Full)
1397 v |= FW_PORT_CAP_SPEED_100M;
1398 if (caps & ADVERTISED_1000baseT_Full)
1399 v |= FW_PORT_CAP_SPEED_1G;
1400 if (caps & ADVERTISED_10000baseT_Full)
1401 v |= FW_PORT_CAP_SPEED_10G;
1402 return v;
1403}
1404
1405static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1406{
1407 const struct port_info *p = netdev_priv(dev);
1408
1409 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1410 p->port_type == FW_PORT_TYPE_BT_XFI ||
1411 p->port_type == FW_PORT_TYPE_BT_XAUI)
1412 cmd->port = PORT_TP;
1413 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1414 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
1415 cmd->port = PORT_FIBRE;
1416 else if (p->port_type == FW_PORT_TYPE_SFP) {
1417 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1418 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1419 cmd->port = PORT_DA;
1420 else
1421 cmd->port = PORT_FIBRE;
1422 } else
1423 cmd->port = PORT_OTHER;
1424
1425 if (p->mdio_addr >= 0) {
1426 cmd->phy_address = p->mdio_addr;
1427 cmd->transceiver = XCVR_EXTERNAL;
1428 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1429 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1430 } else {
1431 cmd->phy_address = 0; /* not really, but no better option */
1432 cmd->transceiver = XCVR_INTERNAL;
1433 cmd->mdio_support = 0;
1434 }
1435
1436 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1437 cmd->advertising = from_fw_linkcaps(p->port_type,
1438 p->link_cfg.advertising);
1439 ethtool_cmd_speed_set(cmd,
1440 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
1441 cmd->duplex = DUPLEX_FULL;
1442 cmd->autoneg = p->link_cfg.autoneg;
1443 cmd->maxtxpkt = 0;
1444 cmd->maxrxpkt = 0;
1445 return 0;
1446}
1447
1448static unsigned int speed_to_caps(int speed)
1449{
1450 if (speed == SPEED_100)
1451 return FW_PORT_CAP_SPEED_100M;
1452 if (speed == SPEED_1000)
1453 return FW_PORT_CAP_SPEED_1G;
1454 if (speed == SPEED_10000)
1455 return FW_PORT_CAP_SPEED_10G;
1456 return 0;
1457}
1458
1459static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1460{
1461 unsigned int cap;
1462 struct port_info *p = netdev_priv(dev);
1463 struct link_config *lc = &p->link_cfg;
1464 u32 speed = ethtool_cmd_speed(cmd);
1465
1466 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1467 return -EINVAL;
1468
1469 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1470 /*
1471 * PHY offers a single speed. See if that's what's
1472 * being requested.
1473 */
1474 if (cmd->autoneg == AUTONEG_DISABLE &&
1475 (lc->supported & speed_to_caps(speed)))
1476 return 0;
1477 return -EINVAL;
1478 }
1479
1480 if (cmd->autoneg == AUTONEG_DISABLE) {
1481 cap = speed_to_caps(speed);
1482
1483 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
1484 (speed == SPEED_10000))
1485 return -EINVAL;
1486 lc->requested_speed = cap;
1487 lc->advertising = 0;
1488 } else {
1489 cap = to_fw_linkcaps(cmd->advertising);
1490 if (!(lc->supported & cap))
1491 return -EINVAL;
1492 lc->requested_speed = 0;
1493 lc->advertising = cap | FW_PORT_CAP_ANEG;
1494 }
1495 lc->autoneg = cmd->autoneg;
1496
1497 if (netif_running(dev))
1498 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1499 lc);
1500 return 0;
1501}
1502
1503static void get_pauseparam(struct net_device *dev,
1504 struct ethtool_pauseparam *epause)
1505{
1506 struct port_info *p = netdev_priv(dev);
1507
1508 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1509 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1510 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1511}
1512
1513static int set_pauseparam(struct net_device *dev,
1514 struct ethtool_pauseparam *epause)
1515{
1516 struct port_info *p = netdev_priv(dev);
1517 struct link_config *lc = &p->link_cfg;
1518
1519 if (epause->autoneg == AUTONEG_DISABLE)
1520 lc->requested_fc = 0;
1521 else if (lc->supported & FW_PORT_CAP_ANEG)
1522 lc->requested_fc = PAUSE_AUTONEG;
1523 else
1524 return -EINVAL;
1525
1526 if (epause->rx_pause)
1527 lc->requested_fc |= PAUSE_RX;
1528 if (epause->tx_pause)
1529 lc->requested_fc |= PAUSE_TX;
1530 if (netif_running(dev))
1531 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1532 lc);
1533 return 0;
1534}
1535
1536static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1537{
1538 const struct port_info *pi = netdev_priv(dev);
1539 const struct sge *s = &pi->adapter->sge;
1540
1541 e->rx_max_pending = MAX_RX_BUFFERS;
1542 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1543 e->rx_jumbo_max_pending = 0;
1544 e->tx_max_pending = MAX_TXQ_ENTRIES;
1545
1546 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1547 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1548 e->rx_jumbo_pending = 0;
1549 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1550}
1551
1552static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1553{
1554 int i;
1555 const struct port_info *pi = netdev_priv(dev);
1556 struct adapter *adapter = pi->adapter;
1557 struct sge *s = &adapter->sge;
1558
1559 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1560 e->tx_pending > MAX_TXQ_ENTRIES ||
1561 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1562 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1563 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1564 return -EINVAL;
1565
1566 if (adapter->flags & FULL_INIT_DONE)
1567 return -EBUSY;
1568
1569 for (i = 0; i < pi->nqsets; ++i) {
1570 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1571 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1572 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1573 }
1574 return 0;
1575}
1576
1577static int closest_timer(const struct sge *s, int time)
1578{
1579 int i, delta, match = 0, min_delta = INT_MAX;
1580
1581 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1582 delta = time - s->timer_val[i];
1583 if (delta < 0)
1584 delta = -delta;
1585 if (delta < min_delta) {
1586 min_delta = delta;
1587 match = i;
1588 }
1589 }
1590 return match;
1591}
1592
1593static int closest_thres(const struct sge *s, int thres)
1594{
1595 int i, delta, match = 0, min_delta = INT_MAX;
1596
1597 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1598 delta = thres - s->counter_val[i];
1599 if (delta < 0)
1600 delta = -delta;
1601 if (delta < min_delta) {
1602 min_delta = delta;
1603 match = i;
1604 }
1605 }
1606 return match;
1607}
1608
1609/*
1610 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1611 */
1612static unsigned int qtimer_val(const struct adapter *adap,
1613 const struct sge_rspq *q)
1614{
1615 unsigned int idx = q->intr_params >> 1;
1616
1617 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1618}
1619
1620/**
1621 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1622 * @adap: the adapter
1623 * @q: the Rx queue
1624 * @us: the hold-off time in us, or 0 to disable timer
1625 * @cnt: the hold-off packet count, or 0 to disable counter
1626 *
1627 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1628 * one of the two needs to be enabled for the queue to generate interrupts.
1629 */
1630static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1631 unsigned int us, unsigned int cnt)
1632{
1633 if ((us | cnt) == 0)
1634 cnt = 1;
1635
1636 if (cnt) {
1637 int err;
1638 u32 v, new_idx;
1639
1640 new_idx = closest_thres(&adap->sge, cnt);
1641 if (q->desc && q->pktcnt_idx != new_idx) {
1642 /* the queue has already been created, update it */
1643 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1644 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1645 FW_PARAMS_PARAM_YZ(q->cntxt_id);
1646 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1647 &new_idx);
1648 if (err)
1649 return err;
1650 }
1651 q->pktcnt_idx = new_idx;
1652 }
1653
1654 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1655 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1656 return 0;
1657}
1658
1659static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1660{
1661 const struct port_info *pi = netdev_priv(dev);
1662 struct adapter *adap = pi->adapter;
1663
1664 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1665 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1666}
1667
1668static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1669{
1670 const struct port_info *pi = netdev_priv(dev);
1671 const struct adapter *adap = pi->adapter;
1672 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1673
1674 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1675 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1676 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1677 return 0;
1678}
1679
1680/**
1681 * eeprom_ptov - translate a physical EEPROM address to virtual
1682 * @phys_addr: the physical EEPROM address
1683 * @fn: the PCI function number
1684 * @sz: size of function-specific area
1685 *
1686 * Translate a physical EEPROM address to virtual. The first 1K is
1687 * accessed through virtual addresses starting at 31K, the rest is
1688 * accessed through virtual addresses starting at 0.
1689 *
1690 * The mapping is as follows:
1691 * [0..1K) -> [31K..32K)
1692 * [1K..1K+A) -> [31K-A..31K)
1693 * [1K+A..ES) -> [0..ES-A-1K)
1694 *
1695 * where A = @fn * @sz, and ES = EEPROM size.
1696 */
1697static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1698{
1699 fn *= sz;
1700 if (phys_addr < 1024)
1701 return phys_addr + (31 << 10);
1702 if (phys_addr < 1024 + fn)
1703 return 31744 - fn + phys_addr - 1024;
1704 if (phys_addr < EEPROMSIZE)
1705 return phys_addr - 1024 - fn;
1706 return -EINVAL;
1707}
1708
1709/*
1710 * The next two routines implement eeprom read/write from physical addresses.
1711 */
1712static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1713{
1714 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1715
1716 if (vaddr >= 0)
1717 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1718 return vaddr < 0 ? vaddr : 0;
1719}
1720
1721static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1722{
1723 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1724
1725 if (vaddr >= 0)
1726 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1727 return vaddr < 0 ? vaddr : 0;
1728}
1729
1730#define EEPROM_MAGIC 0x38E2F10C
1731
1732static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1733 u8 *data)
1734{
1735 int i, err = 0;
1736 struct adapter *adapter = netdev2adap(dev);
1737
1738 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1739 if (!buf)
1740 return -ENOMEM;
1741
1742 e->magic = EEPROM_MAGIC;
1743 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1744 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1745
1746 if (!err)
1747 memcpy(data, buf + e->offset, e->len);
1748 kfree(buf);
1749 return err;
1750}
1751
1752static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1753 u8 *data)
1754{
1755 u8 *buf;
1756 int err = 0;
1757 u32 aligned_offset, aligned_len, *p;
1758 struct adapter *adapter = netdev2adap(dev);
1759
1760 if (eeprom->magic != EEPROM_MAGIC)
1761 return -EINVAL;
1762
1763 aligned_offset = eeprom->offset & ~3;
1764 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1765
1766 if (adapter->fn > 0) {
1767 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
1768
1769 if (aligned_offset < start ||
1770 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1771 return -EPERM;
1772 }
1773
1774 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1775 /*
1776 * RMW possibly needed for first or last words.
1777 */
1778 buf = kmalloc(aligned_len, GFP_KERNEL);
1779 if (!buf)
1780 return -ENOMEM;
1781 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1782 if (!err && aligned_len > 4)
1783 err = eeprom_rd_phys(adapter,
1784 aligned_offset + aligned_len - 4,
1785 (u32 *)&buf[aligned_len - 4]);
1786 if (err)
1787 goto out;
1788 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1789 } else
1790 buf = data;
1791
1792 err = t4_seeprom_wp(adapter, false);
1793 if (err)
1794 goto out;
1795
1796 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1797 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1798 aligned_offset += 4;
1799 }
1800
1801 if (!err)
1802 err = t4_seeprom_wp(adapter, true);
1803out:
1804 if (buf != data)
1805 kfree(buf);
1806 return err;
1807}
1808
1809static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1810{
1811 int ret;
1812 const struct firmware *fw;
1813 struct adapter *adap = netdev2adap(netdev);
1814
1815 ef->data[sizeof(ef->data) - 1] = '\0';
1816 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1817 if (ret < 0)
1818 return ret;
1819
1820 ret = t4_load_fw(adap, fw->data, fw->size);
1821 release_firmware(fw);
1822 if (!ret)
1823 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1824 return ret;
1825}
1826
1827#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1828#define BCAST_CRC 0xa0ccc1a6
1829
1830static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1831{
1832 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1833 wol->wolopts = netdev2adap(dev)->wol;
1834 memset(&wol->sopass, 0, sizeof(wol->sopass));
1835}
1836
1837static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1838{
1839 int err = 0;
1840 struct port_info *pi = netdev_priv(dev);
1841
1842 if (wol->wolopts & ~WOL_SUPPORTED)
1843 return -EINVAL;
1844 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1845 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1846 if (wol->wolopts & WAKE_BCAST) {
1847 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1848 ~0ULL, 0, false);
1849 if (!err)
1850 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1851 ~6ULL, ~0ULL, BCAST_CRC, true);
1852 } else
1853 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1854 return err;
1855}
1856
1857static int cxgb_set_features(struct net_device *dev, u32 features)
1858{
1859 const struct port_info *pi = netdev_priv(dev);
1860 u32 changed = dev->features ^ features;
1861 int err;
1862
1863 if (!(changed & NETIF_F_HW_VLAN_RX))
1864 return 0;
1865
1866 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1867 -1, -1, -1,
1868 !!(features & NETIF_F_HW_VLAN_RX), true);
1869 if (unlikely(err))
1870 dev->features = features ^ NETIF_F_HW_VLAN_RX;
1871 return err;
1872}
1873
1874static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
1875{
1876 const struct port_info *pi = netdev_priv(dev);
1877 unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
1878
1879 p->size = pi->rss_size;
1880 while (n--)
1881 p->ring_index[n] = pi->rss[n];
1882 return 0;
1883}
1884
1885static int set_rss_table(struct net_device *dev,
1886 const struct ethtool_rxfh_indir *p)
1887{
1888 unsigned int i;
1889 struct port_info *pi = netdev_priv(dev);
1890
1891 if (p->size != pi->rss_size)
1892 return -EINVAL;
1893 for (i = 0; i < p->size; i++)
1894 if (p->ring_index[i] >= pi->nqsets)
1895 return -EINVAL;
1896 for (i = 0; i < p->size; i++)
1897 pi->rss[i] = p->ring_index[i];
1898 if (pi->adapter->flags & FULL_INIT_DONE)
1899 return write_rss(pi, pi->rss);
1900 return 0;
1901}
1902
1903static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1904 void *rules)
1905{
1906 const struct port_info *pi = netdev_priv(dev);
1907
1908 switch (info->cmd) {
1909 case ETHTOOL_GRXFH: {
1910 unsigned int v = pi->rss_mode;
1911
1912 info->data = 0;
1913 switch (info->flow_type) {
1914 case TCP_V4_FLOW:
1915 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
1916 info->data = RXH_IP_SRC | RXH_IP_DST |
1917 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1918 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1919 info->data = RXH_IP_SRC | RXH_IP_DST;
1920 break;
1921 case UDP_V4_FLOW:
1922 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
1923 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1924 info->data = RXH_IP_SRC | RXH_IP_DST |
1925 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1926 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1927 info->data = RXH_IP_SRC | RXH_IP_DST;
1928 break;
1929 case SCTP_V4_FLOW:
1930 case AH_ESP_V4_FLOW:
1931 case IPV4_FLOW:
1932 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1933 info->data = RXH_IP_SRC | RXH_IP_DST;
1934 break;
1935 case TCP_V6_FLOW:
1936 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
1937 info->data = RXH_IP_SRC | RXH_IP_DST |
1938 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1939 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1940 info->data = RXH_IP_SRC | RXH_IP_DST;
1941 break;
1942 case UDP_V6_FLOW:
1943 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
1944 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1945 info->data = RXH_IP_SRC | RXH_IP_DST |
1946 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1947 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1948 info->data = RXH_IP_SRC | RXH_IP_DST;
1949 break;
1950 case SCTP_V6_FLOW:
1951 case AH_ESP_V6_FLOW:
1952 case IPV6_FLOW:
1953 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1954 info->data = RXH_IP_SRC | RXH_IP_DST;
1955 break;
1956 }
1957 return 0;
1958 }
1959 case ETHTOOL_GRXRINGS:
1960 info->data = pi->nqsets;
1961 return 0;
1962 }
1963 return -EOPNOTSUPP;
1964}
1965
1966static struct ethtool_ops cxgb_ethtool_ops = {
1967 .get_settings = get_settings,
1968 .set_settings = set_settings,
1969 .get_drvinfo = get_drvinfo,
1970 .get_msglevel = get_msglevel,
1971 .set_msglevel = set_msglevel,
1972 .get_ringparam = get_sge_param,
1973 .set_ringparam = set_sge_param,
1974 .get_coalesce = get_coalesce,
1975 .set_coalesce = set_coalesce,
1976 .get_eeprom_len = get_eeprom_len,
1977 .get_eeprom = get_eeprom,
1978 .set_eeprom = set_eeprom,
1979 .get_pauseparam = get_pauseparam,
1980 .set_pauseparam = set_pauseparam,
1981 .get_link = ethtool_op_get_link,
1982 .get_strings = get_strings,
1983 .set_phys_id = identify_port,
1984 .nway_reset = restart_autoneg,
1985 .get_sset_count = get_sset_count,
1986 .get_ethtool_stats = get_stats,
1987 .get_regs_len = get_regs_len,
1988 .get_regs = get_regs,
1989 .get_wol = get_wol,
1990 .set_wol = set_wol,
1991 .get_rxnfc = get_rxnfc,
1992 .get_rxfh_indir = get_rss_table,
1993 .set_rxfh_indir = set_rss_table,
1994 .flash_device = set_flash,
1995};
1996
1997/*
1998 * debugfs support
1999 */
2000
2001static int mem_open(struct inode *inode, struct file *file)
2002{
2003 file->private_data = inode->i_private;
2004 return 0;
2005}
2006
2007static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2008 loff_t *ppos)
2009{
2010 loff_t pos = *ppos;
2011 loff_t avail = file->f_path.dentry->d_inode->i_size;
2012 unsigned int mem = (uintptr_t)file->private_data & 3;
2013 struct adapter *adap = file->private_data - mem;
2014
2015 if (pos < 0)
2016 return -EINVAL;
2017 if (pos >= avail)
2018 return 0;
2019 if (count > avail - pos)
2020 count = avail - pos;
2021
2022 while (count) {
2023 size_t len;
2024 int ret, ofst;
2025 __be32 data[16];
2026
2027 if (mem == MEM_MC)
2028 ret = t4_mc_read(adap, pos, data, NULL);
2029 else
2030 ret = t4_edc_read(adap, mem, pos, data, NULL);
2031 if (ret)
2032 return ret;
2033
2034 ofst = pos % sizeof(data);
2035 len = min(count, sizeof(data) - ofst);
2036 if (copy_to_user(buf, (u8 *)data + ofst, len))
2037 return -EFAULT;
2038
2039 buf += len;
2040 pos += len;
2041 count -= len;
2042 }
2043 count = pos - *ppos;
2044 *ppos = pos;
2045 return count;
2046}
2047
2048static const struct file_operations mem_debugfs_fops = {
2049 .owner = THIS_MODULE,
2050 .open = mem_open,
2051 .read = mem_read,
2052 .llseek = default_llseek,
2053};
2054
2055static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
2056 unsigned int idx, unsigned int size_mb)
2057{
2058 struct dentry *de;
2059
2060 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2061 (void *)adap + idx, &mem_debugfs_fops);
2062 if (de && de->d_inode)
2063 de->d_inode->i_size = size_mb << 20;
2064}
2065
2066static int __devinit setup_debugfs(struct adapter *adap)
2067{
2068 int i;
2069
2070 if (IS_ERR_OR_NULL(adap->debugfs_root))
2071 return -1;
2072
2073 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2074 if (i & EDRAM0_ENABLE)
2075 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2076 if (i & EDRAM1_ENABLE)
2077 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2078 if (i & EXT_MEM_ENABLE)
2079 add_debugfs_mem(adap, "mc", MEM_MC,
2080 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2081 if (adap->l2t)
2082 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2083 &t4_l2t_fops);
2084 return 0;
2085}
2086
2087/*
2088 * upper-layer driver support
2089 */
2090
2091/*
2092 * Allocate an active-open TID and set it to the supplied value.
2093 */
2094int cxgb4_alloc_atid(struct tid_info *t, void *data)
2095{
2096 int atid = -1;
2097
2098 spin_lock_bh(&t->atid_lock);
2099 if (t->afree) {
2100 union aopen_entry *p = t->afree;
2101
2102 atid = p - t->atid_tab;
2103 t->afree = p->next;
2104 p->data = data;
2105 t->atids_in_use++;
2106 }
2107 spin_unlock_bh(&t->atid_lock);
2108 return atid;
2109}
2110EXPORT_SYMBOL(cxgb4_alloc_atid);
2111
2112/*
2113 * Release an active-open TID.
2114 */
2115void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2116{
2117 union aopen_entry *p = &t->atid_tab[atid];
2118
2119 spin_lock_bh(&t->atid_lock);
2120 p->next = t->afree;
2121 t->afree = p;
2122 t->atids_in_use--;
2123 spin_unlock_bh(&t->atid_lock);
2124}
2125EXPORT_SYMBOL(cxgb4_free_atid);
2126
2127/*
2128 * Allocate a server TID and set it to the supplied value.
2129 */
2130int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2131{
2132 int stid;
2133
2134 spin_lock_bh(&t->stid_lock);
2135 if (family == PF_INET) {
2136 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2137 if (stid < t->nstids)
2138 __set_bit(stid, t->stid_bmap);
2139 else
2140 stid = -1;
2141 } else {
2142 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2143 if (stid < 0)
2144 stid = -1;
2145 }
2146 if (stid >= 0) {
2147 t->stid_tab[stid].data = data;
2148 stid += t->stid_base;
2149 t->stids_in_use++;
2150 }
2151 spin_unlock_bh(&t->stid_lock);
2152 return stid;
2153}
2154EXPORT_SYMBOL(cxgb4_alloc_stid);
2155
2156/*
2157 * Release a server TID.
2158 */
2159void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2160{
2161 stid -= t->stid_base;
2162 spin_lock_bh(&t->stid_lock);
2163 if (family == PF_INET)
2164 __clear_bit(stid, t->stid_bmap);
2165 else
2166 bitmap_release_region(t->stid_bmap, stid, 2);
2167 t->stid_tab[stid].data = NULL;
2168 t->stids_in_use--;
2169 spin_unlock_bh(&t->stid_lock);
2170}
2171EXPORT_SYMBOL(cxgb4_free_stid);
2172
2173/*
2174 * Populate a TID_RELEASE WR. Caller must properly size the skb.
2175 */
2176static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2177 unsigned int tid)
2178{
2179 struct cpl_tid_release *req;
2180
2181 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2182 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2183 INIT_TP_WR(req, tid);
2184 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2185}
2186
2187/*
2188 * Queue a TID release request and if necessary schedule a work queue to
2189 * process it.
2190 */
2191static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2192 unsigned int tid)
2193{
2194 void **p = &t->tid_tab[tid];
2195 struct adapter *adap = container_of(t, struct adapter, tids);
2196
2197 spin_lock_bh(&adap->tid_release_lock);
2198 *p = adap->tid_release_head;
2199 /* Low 2 bits encode the Tx channel number */
2200 adap->tid_release_head = (void **)((uintptr_t)p | chan);
2201 if (!adap->tid_release_task_busy) {
2202 adap->tid_release_task_busy = true;
2203 schedule_work(&adap->tid_release_task);
2204 }
2205 spin_unlock_bh(&adap->tid_release_lock);
2206}
2207
2208/*
2209 * Process the list of pending TID release requests.
2210 */
2211static void process_tid_release_list(struct work_struct *work)
2212{
2213 struct sk_buff *skb;
2214 struct adapter *adap;
2215
2216 adap = container_of(work, struct adapter, tid_release_task);
2217
2218 spin_lock_bh(&adap->tid_release_lock);
2219 while (adap->tid_release_head) {
2220 void **p = adap->tid_release_head;
2221 unsigned int chan = (uintptr_t)p & 3;
2222 p = (void *)p - chan;
2223
2224 adap->tid_release_head = *p;
2225 *p = NULL;
2226 spin_unlock_bh(&adap->tid_release_lock);
2227
2228 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2229 GFP_KERNEL)))
2230 schedule_timeout_uninterruptible(1);
2231
2232 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2233 t4_ofld_send(adap, skb);
2234 spin_lock_bh(&adap->tid_release_lock);
2235 }
2236 adap->tid_release_task_busy = false;
2237 spin_unlock_bh(&adap->tid_release_lock);
2238}
2239
2240/*
2241 * Release a TID and inform HW. If we are unable to allocate the release
2242 * message we defer to a work queue.
2243 */
2244void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2245{
2246 void *old;
2247 struct sk_buff *skb;
2248 struct adapter *adap = container_of(t, struct adapter, tids);
2249
2250 old = t->tid_tab[tid];
2251 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2252 if (likely(skb)) {
2253 t->tid_tab[tid] = NULL;
2254 mk_tid_release(skb, chan, tid);
2255 t4_ofld_send(adap, skb);
2256 } else
2257 cxgb4_queue_tid_release(t, chan, tid);
2258 if (old)
2259 atomic_dec(&t->tids_in_use);
2260}
2261EXPORT_SYMBOL(cxgb4_remove_tid);
2262
2263/*
2264 * Allocate and initialize the TID tables. Returns 0 on success.
2265 */
2266static int tid_init(struct tid_info *t)
2267{
2268 size_t size;
2269 unsigned int natids = t->natids;
2270
2271 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2272 t->nstids * sizeof(*t->stid_tab) +
2273 BITS_TO_LONGS(t->nstids) * sizeof(long);
2274 t->tid_tab = t4_alloc_mem(size);
2275 if (!t->tid_tab)
2276 return -ENOMEM;
2277
2278 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2279 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2280 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2281 spin_lock_init(&t->stid_lock);
2282 spin_lock_init(&t->atid_lock);
2283
2284 t->stids_in_use = 0;
2285 t->afree = NULL;
2286 t->atids_in_use = 0;
2287 atomic_set(&t->tids_in_use, 0);
2288
2289 /* Setup the free list for atid_tab and clear the stid bitmap. */
2290 if (natids) {
2291 while (--natids)
2292 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2293 t->afree = t->atid_tab;
2294 }
2295 bitmap_zero(t->stid_bmap, t->nstids);
2296 return 0;
2297}
2298
2299/**
2300 * cxgb4_create_server - create an IP server
2301 * @dev: the device
2302 * @stid: the server TID
2303 * @sip: local IP address to bind server to
2304 * @sport: the server's TCP port
2305 * @queue: queue to direct messages from this server to
2306 *
2307 * Create an IP server for the given port and address.
2308 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2309 */
2310int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2311 __be32 sip, __be16 sport, unsigned int queue)
2312{
2313 unsigned int chan;
2314 struct sk_buff *skb;
2315 struct adapter *adap;
2316 struct cpl_pass_open_req *req;
2317
2318 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2319 if (!skb)
2320 return -ENOMEM;
2321
2322 adap = netdev2adap(dev);
2323 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2324 INIT_TP_WR(req, 0);
2325 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2326 req->local_port = sport;
2327 req->peer_port = htons(0);
2328 req->local_ip = sip;
2329 req->peer_ip = htonl(0);
2330 chan = rxq_to_chan(&adap->sge, queue);
2331 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2332 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2333 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2334 return t4_mgmt_tx(adap, skb);
2335}
2336EXPORT_SYMBOL(cxgb4_create_server);
2337
2338/**
2339 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2340 * @mtus: the HW MTU table
2341 * @mtu: the target MTU
2342 * @idx: index of selected entry in the MTU table
2343 *
2344 * Returns the index and the value in the HW MTU table that is closest to
2345 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2346 * table, in which case that smallest available value is selected.
2347 */
2348unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2349 unsigned int *idx)
2350{
2351 unsigned int i = 0;
2352
2353 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2354 ++i;
2355 if (idx)
2356 *idx = i;
2357 return mtus[i];
2358}
2359EXPORT_SYMBOL(cxgb4_best_mtu);
2360
2361/**
2362 * cxgb4_port_chan - get the HW channel of a port
2363 * @dev: the net device for the port
2364 *
2365 * Return the HW Tx channel of the given port.
2366 */
2367unsigned int cxgb4_port_chan(const struct net_device *dev)
2368{
2369 return netdev2pinfo(dev)->tx_chan;
2370}
2371EXPORT_SYMBOL(cxgb4_port_chan);
2372
2373/**
2374 * cxgb4_port_viid - get the VI id of a port
2375 * @dev: the net device for the port
2376 *
2377 * Return the VI id of the given port.
2378 */
2379unsigned int cxgb4_port_viid(const struct net_device *dev)
2380{
2381 return netdev2pinfo(dev)->viid;
2382}
2383EXPORT_SYMBOL(cxgb4_port_viid);
2384
2385/**
2386 * cxgb4_port_idx - get the index of a port
2387 * @dev: the net device for the port
2388 *
2389 * Return the index of the given port.
2390 */
2391unsigned int cxgb4_port_idx(const struct net_device *dev)
2392{
2393 return netdev2pinfo(dev)->port_id;
2394}
2395EXPORT_SYMBOL(cxgb4_port_idx);
2396
2397void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2398 struct tp_tcp_stats *v6)
2399{
2400 struct adapter *adap = pci_get_drvdata(pdev);
2401
2402 spin_lock(&adap->stats_lock);
2403 t4_tp_get_tcp_stats(adap, v4, v6);
2404 spin_unlock(&adap->stats_lock);
2405}
2406EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2407
2408void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2409 const unsigned int *pgsz_order)
2410{
2411 struct adapter *adap = netdev2adap(dev);
2412
2413 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2414 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2415 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2416 HPZ3(pgsz_order[3]));
2417}
2418EXPORT_SYMBOL(cxgb4_iscsi_init);
2419
2420static struct pci_driver cxgb4_driver;
2421
2422static void check_neigh_update(struct neighbour *neigh)
2423{
2424 const struct device *parent;
2425 const struct net_device *netdev = neigh->dev;
2426
2427 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2428 netdev = vlan_dev_real_dev(netdev);
2429 parent = netdev->dev.parent;
2430 if (parent && parent->driver == &cxgb4_driver.driver)
2431 t4_l2t_update(dev_get_drvdata(parent), neigh);
2432}
2433
2434static int netevent_cb(struct notifier_block *nb, unsigned long event,
2435 void *data)
2436{
2437 switch (event) {
2438 case NETEVENT_NEIGH_UPDATE:
2439 check_neigh_update(data);
2440 break;
2441 case NETEVENT_REDIRECT:
2442 default:
2443 break;
2444 }
2445 return 0;
2446}
2447
2448static bool netevent_registered;
2449static struct notifier_block cxgb4_netevent_nb = {
2450 .notifier_call = netevent_cb
2451};
2452
2453static void uld_attach(struct adapter *adap, unsigned int uld)
2454{
2455 void *handle;
2456 struct cxgb4_lld_info lli;
2457
2458 lli.pdev = adap->pdev;
2459 lli.l2t = adap->l2t;
2460 lli.tids = &adap->tids;
2461 lli.ports = adap->port;
2462 lli.vr = &adap->vres;
2463 lli.mtus = adap->params.mtus;
2464 if (uld == CXGB4_ULD_RDMA) {
2465 lli.rxq_ids = adap->sge.rdma_rxq;
2466 lli.nrxq = adap->sge.rdmaqs;
2467 } else if (uld == CXGB4_ULD_ISCSI) {
2468 lli.rxq_ids = adap->sge.ofld_rxq;
2469 lli.nrxq = adap->sge.ofldqsets;
2470 }
2471 lli.ntxq = adap->sge.ofldqsets;
2472 lli.nchan = adap->params.nports;
2473 lli.nports = adap->params.nports;
2474 lli.wr_cred = adap->params.ofldq_wr_cred;
2475 lli.adapter_type = adap->params.rev;
2476 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2477 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
2478 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2479 (adap->fn * 4));
2480 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2481 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2482 (adap->fn * 4));
2483 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2484 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2485 lli.fw_vers = adap->params.fw_vers;
2486
2487 handle = ulds[uld].add(&lli);
2488 if (IS_ERR(handle)) {
2489 dev_warn(adap->pdev_dev,
2490 "could not attach to the %s driver, error %ld\n",
2491 uld_str[uld], PTR_ERR(handle));
2492 return;
2493 }
2494
2495 adap->uld_handle[uld] = handle;
2496
2497 if (!netevent_registered) {
2498 register_netevent_notifier(&cxgb4_netevent_nb);
2499 netevent_registered = true;
2500 }
2501
2502 if (adap->flags & FULL_INIT_DONE)
2503 ulds[uld].state_change(handle, CXGB4_STATE_UP);
2504}
2505
2506static void attach_ulds(struct adapter *adap)
2507{
2508 unsigned int i;
2509
2510 mutex_lock(&uld_mutex);
2511 list_add_tail(&adap->list_node, &adapter_list);
2512 for (i = 0; i < CXGB4_ULD_MAX; i++)
2513 if (ulds[i].add)
2514 uld_attach(adap, i);
2515 mutex_unlock(&uld_mutex);
2516}
2517
2518static void detach_ulds(struct adapter *adap)
2519{
2520 unsigned int i;
2521
2522 mutex_lock(&uld_mutex);
2523 list_del(&adap->list_node);
2524 for (i = 0; i < CXGB4_ULD_MAX; i++)
2525 if (adap->uld_handle[i]) {
2526 ulds[i].state_change(adap->uld_handle[i],
2527 CXGB4_STATE_DETACH);
2528 adap->uld_handle[i] = NULL;
2529 }
2530 if (netevent_registered && list_empty(&adapter_list)) {
2531 unregister_netevent_notifier(&cxgb4_netevent_nb);
2532 netevent_registered = false;
2533 }
2534 mutex_unlock(&uld_mutex);
2535}
2536
2537static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2538{
2539 unsigned int i;
2540
2541 mutex_lock(&uld_mutex);
2542 for (i = 0; i < CXGB4_ULD_MAX; i++)
2543 if (adap->uld_handle[i])
2544 ulds[i].state_change(adap->uld_handle[i], new_state);
2545 mutex_unlock(&uld_mutex);
2546}
2547
2548/**
2549 * cxgb4_register_uld - register an upper-layer driver
2550 * @type: the ULD type
2551 * @p: the ULD methods
2552 *
2553 * Registers an upper-layer driver with this driver and notifies the ULD
2554 * about any presently available devices that support its type. Returns
2555 * %-EBUSY if a ULD of the same type is already registered.
2556 */
2557int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2558{
2559 int ret = 0;
2560 struct adapter *adap;
2561
2562 if (type >= CXGB4_ULD_MAX)
2563 return -EINVAL;
2564 mutex_lock(&uld_mutex);
2565 if (ulds[type].add) {
2566 ret = -EBUSY;
2567 goto out;
2568 }
2569 ulds[type] = *p;
2570 list_for_each_entry(adap, &adapter_list, list_node)
2571 uld_attach(adap, type);
2572out: mutex_unlock(&uld_mutex);
2573 return ret;
2574}
2575EXPORT_SYMBOL(cxgb4_register_uld);
2576
2577/**
2578 * cxgb4_unregister_uld - unregister an upper-layer driver
2579 * @type: the ULD type
2580 *
2581 * Unregisters an existing upper-layer driver.
2582 */
2583int cxgb4_unregister_uld(enum cxgb4_uld type)
2584{
2585 struct adapter *adap;
2586
2587 if (type >= CXGB4_ULD_MAX)
2588 return -EINVAL;
2589 mutex_lock(&uld_mutex);
2590 list_for_each_entry(adap, &adapter_list, list_node)
2591 adap->uld_handle[type] = NULL;
2592 ulds[type].add = NULL;
2593 mutex_unlock(&uld_mutex);
2594 return 0;
2595}
2596EXPORT_SYMBOL(cxgb4_unregister_uld);
2597
2598/**
2599 * cxgb_up - enable the adapter
2600 * @adap: adapter being enabled
2601 *
2602 * Called when the first port is enabled, this function performs the
2603 * actions necessary to make an adapter operational, such as completing
2604 * the initialization of HW modules, and enabling interrupts.
2605 *
2606 * Must be called with the rtnl lock held.
2607 */
2608static int cxgb_up(struct adapter *adap)
2609{
2610 int err;
2611
2612 err = setup_sge_queues(adap);
2613 if (err)
2614 goto out;
2615 err = setup_rss(adap);
2616 if (err)
2617 goto freeq;
2618
2619 if (adap->flags & USING_MSIX) {
2620 name_msix_vecs(adap);
2621 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2622 adap->msix_info[0].desc, adap);
2623 if (err)
2624 goto irq_err;
2625
2626 err = request_msix_queue_irqs(adap);
2627 if (err) {
2628 free_irq(adap->msix_info[0].vec, adap);
2629 goto irq_err;
2630 }
2631 } else {
2632 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2633 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2634 adap->port[0]->name, adap);
2635 if (err)
2636 goto irq_err;
2637 }
2638 enable_rx(adap);
2639 t4_sge_start(adap);
2640 t4_intr_enable(adap);
2641 adap->flags |= FULL_INIT_DONE;
2642 notify_ulds(adap, CXGB4_STATE_UP);
2643 out:
2644 return err;
2645 irq_err:
2646 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2647 freeq:
2648 t4_free_sge_resources(adap);
2649 goto out;
2650}
2651
2652static void cxgb_down(struct adapter *adapter)
2653{
2654 t4_intr_disable(adapter);
2655 cancel_work_sync(&adapter->tid_release_task);
2656 adapter->tid_release_task_busy = false;
2657 adapter->tid_release_head = NULL;
2658
2659 if (adapter->flags & USING_MSIX) {
2660 free_msix_queue_irqs(adapter);
2661 free_irq(adapter->msix_info[0].vec, adapter);
2662 } else
2663 free_irq(adapter->pdev->irq, adapter);
2664 quiesce_rx(adapter);
2665 t4_sge_stop(adapter);
2666 t4_free_sge_resources(adapter);
2667 adapter->flags &= ~FULL_INIT_DONE;
2668}
2669
2670/*
2671 * net_device operations
2672 */
2673static int cxgb_open(struct net_device *dev)
2674{
2675 int err;
2676 struct port_info *pi = netdev_priv(dev);
2677 struct adapter *adapter = pi->adapter;
2678
2679 netif_carrier_off(dev);
2680
2681 if (!(adapter->flags & FULL_INIT_DONE)) {
2682 err = cxgb_up(adapter);
2683 if (err < 0)
2684 return err;
2685 }
2686
2687 err = link_start(dev);
2688 if (!err)
2689 netif_tx_start_all_queues(dev);
2690 return err;
2691}
2692
2693static int cxgb_close(struct net_device *dev)
2694{
2695 struct port_info *pi = netdev_priv(dev);
2696 struct adapter *adapter = pi->adapter;
2697
2698 netif_tx_stop_all_queues(dev);
2699 netif_carrier_off(dev);
2700 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
2701}
2702
2703static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2704 struct rtnl_link_stats64 *ns)
2705{
2706 struct port_stats stats;
2707 struct port_info *p = netdev_priv(dev);
2708 struct adapter *adapter = p->adapter;
2709
2710 spin_lock(&adapter->stats_lock);
2711 t4_get_port_stats(adapter, p->tx_chan, &stats);
2712 spin_unlock(&adapter->stats_lock);
2713
2714 ns->tx_bytes = stats.tx_octets;
2715 ns->tx_packets = stats.tx_frames;
2716 ns->rx_bytes = stats.rx_octets;
2717 ns->rx_packets = stats.rx_frames;
2718 ns->multicast = stats.rx_mcast_frames;
2719
2720 /* detailed rx_errors */
2721 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2722 stats.rx_runt;
2723 ns->rx_over_errors = 0;
2724 ns->rx_crc_errors = stats.rx_fcs_err;
2725 ns->rx_frame_errors = stats.rx_symbol_err;
2726 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2727 stats.rx_ovflow2 + stats.rx_ovflow3 +
2728 stats.rx_trunc0 + stats.rx_trunc1 +
2729 stats.rx_trunc2 + stats.rx_trunc3;
2730 ns->rx_missed_errors = 0;
2731
2732 /* detailed tx_errors */
2733 ns->tx_aborted_errors = 0;
2734 ns->tx_carrier_errors = 0;
2735 ns->tx_fifo_errors = 0;
2736 ns->tx_heartbeat_errors = 0;
2737 ns->tx_window_errors = 0;
2738
2739 ns->tx_errors = stats.tx_error_frames;
2740 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2741 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2742 return ns;
2743}
2744
2745static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2746{
2747 unsigned int mbox;
2748 int ret = 0, prtad, devad;
2749 struct port_info *pi = netdev_priv(dev);
2750 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2751
2752 switch (cmd) {
2753 case SIOCGMIIPHY:
2754 if (pi->mdio_addr < 0)
2755 return -EOPNOTSUPP;
2756 data->phy_id = pi->mdio_addr;
2757 break;
2758 case SIOCGMIIREG:
2759 case SIOCSMIIREG:
2760 if (mdio_phy_id_is_c45(data->phy_id)) {
2761 prtad = mdio_phy_id_prtad(data->phy_id);
2762 devad = mdio_phy_id_devad(data->phy_id);
2763 } else if (data->phy_id < 32) {
2764 prtad = data->phy_id;
2765 devad = 0;
2766 data->reg_num &= 0x1f;
2767 } else
2768 return -EINVAL;
2769
2770 mbox = pi->adapter->fn;
2771 if (cmd == SIOCGMIIREG)
2772 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2773 data->reg_num, &data->val_out);
2774 else
2775 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2776 data->reg_num, data->val_in);
2777 break;
2778 default:
2779 return -EOPNOTSUPP;
2780 }
2781 return ret;
2782}
2783
2784static void cxgb_set_rxmode(struct net_device *dev)
2785{
2786 /* unfortunately we can't return errors to the stack */
2787 set_rxmode(dev, -1, false);
2788}
2789
2790static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2791{
2792 int ret;
2793 struct port_info *pi = netdev_priv(dev);
2794
2795 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2796 return -EINVAL;
2797 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
2798 -1, -1, -1, true);
2799 if (!ret)
2800 dev->mtu = new_mtu;
2801 return ret;
2802}
2803
2804static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2805{
2806 int ret;
2807 struct sockaddr *addr = p;
2808 struct port_info *pi = netdev_priv(dev);
2809
2810 if (!is_valid_ether_addr(addr->sa_data))
2811 return -EINVAL;
2812
2813 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
2814 pi->xact_addr_filt, addr->sa_data, true, true);
2815 if (ret < 0)
2816 return ret;
2817
2818 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2819 pi->xact_addr_filt = ret;
2820 return 0;
2821}
2822
2823#ifdef CONFIG_NET_POLL_CONTROLLER
2824static void cxgb_netpoll(struct net_device *dev)
2825{
2826 struct port_info *pi = netdev_priv(dev);
2827 struct adapter *adap = pi->adapter;
2828
2829 if (adap->flags & USING_MSIX) {
2830 int i;
2831 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2832
2833 for (i = pi->nqsets; i; i--, rx++)
2834 t4_sge_intr_msix(0, &rx->rspq);
2835 } else
2836 t4_intr_handler(adap)(0, adap);
2837}
2838#endif
2839
2840static const struct net_device_ops cxgb4_netdev_ops = {
2841 .ndo_open = cxgb_open,
2842 .ndo_stop = cxgb_close,
2843 .ndo_start_xmit = t4_eth_xmit,
2844 .ndo_get_stats64 = cxgb_get_stats,
2845 .ndo_set_rx_mode = cxgb_set_rxmode,
2846 .ndo_set_mac_address = cxgb_set_mac_addr,
2847 .ndo_set_features = cxgb_set_features,
2848 .ndo_validate_addr = eth_validate_addr,
2849 .ndo_do_ioctl = cxgb_ioctl,
2850 .ndo_change_mtu = cxgb_change_mtu,
2851#ifdef CONFIG_NET_POLL_CONTROLLER
2852 .ndo_poll_controller = cxgb_netpoll,
2853#endif
2854};
2855
2856void t4_fatal_err(struct adapter *adap)
2857{
2858 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2859 t4_intr_disable(adap);
2860 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2861}
2862
2863static void setup_memwin(struct adapter *adap)
2864{
2865 u32 bar0;
2866
2867 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
2868 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2869 (bar0 + MEMWIN0_BASE) | BIR(0) |
2870 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2871 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2872 (bar0 + MEMWIN1_BASE) | BIR(0) |
2873 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2874 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2875 (bar0 + MEMWIN2_BASE) | BIR(0) |
2876 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
2877 if (adap->vres.ocq.size) {
2878 unsigned int start, sz_kb;
2879
2880 start = pci_resource_start(adap->pdev, 2) +
2881 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
2882 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
2883 t4_write_reg(adap,
2884 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
2885 start | BIR(1) | WINDOW(ilog2(sz_kb)));
2886 t4_write_reg(adap,
2887 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
2888 adap->vres.ocq.start);
2889 t4_read_reg(adap,
2890 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
2891 }
2892}
2893
2894static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2895{
2896 u32 v;
2897 int ret;
2898
2899 /* get device capabilities */
2900 memset(c, 0, sizeof(*c));
2901 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2902 FW_CMD_REQUEST | FW_CMD_READ);
2903 c->retval_len16 = htonl(FW_LEN16(*c));
2904 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
2905 if (ret < 0)
2906 return ret;
2907
2908 /* select capabilities we'll be using */
2909 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2910 if (!vf_acls)
2911 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2912 else
2913 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2914 } else if (vf_acls) {
2915 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2916 return ret;
2917 }
2918 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2919 FW_CMD_REQUEST | FW_CMD_WRITE);
2920 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
2921 if (ret < 0)
2922 return ret;
2923
2924 ret = t4_config_glbl_rss(adap, adap->fn,
2925 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2926 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2927 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2928 if (ret < 0)
2929 return ret;
2930
2931 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
2932 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
2933 if (ret < 0)
2934 return ret;
2935
2936 t4_sge_init(adap);
2937
2938 /* tweak some settings */
2939 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2940 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2941 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2942 v = t4_read_reg(adap, TP_PIO_DATA);
2943 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
2944
2945 /* get basic stuff going */
2946 return t4_early_init(adap, adap->fn);
2947}
2948
2949/*
2950 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2951 */
2952#define MAX_ATIDS 8192U
2953
2954/*
2955 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2956 */
2957static int adap_init0(struct adapter *adap)
2958{
2959 int ret;
2960 u32 v, port_vec;
2961 enum dev_state state;
2962 u32 params[7], val[7];
2963 struct fw_caps_config_cmd c;
2964
2965 ret = t4_check_fw_version(adap);
2966 if (ret == -EINVAL || ret > 0) {
2967 if (upgrade_fw(adap) >= 0) /* recache FW version */
2968 ret = t4_check_fw_version(adap);
2969 }
2970 if (ret < 0)
2971 return ret;
2972
2973 /* contact FW, request master */
2974 ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
2975 if (ret < 0) {
2976 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
2977 ret);
2978 return ret;
2979 }
2980
2981 /* reset device */
2982 ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
2983 if (ret < 0)
2984 goto bye;
2985
2986 for (v = 0; v < SGE_NTIMERS - 1; v++)
2987 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
2988 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
2989 adap->sge.counter_val[0] = 1;
2990 for (v = 1; v < SGE_NCOUNTERS; v++)
2991 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
2992 THRESHOLD_3_MASK);
2993#define FW_PARAM_DEV(param) \
2994 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2995 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2996
2997 params[0] = FW_PARAM_DEV(CCLK);
2998 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
2999 if (ret < 0)
3000 goto bye;
3001 adap->params.vpd.cclk = val[0];
3002
3003 ret = adap_init1(adap, &c);
3004 if (ret < 0)
3005 goto bye;
3006
3007#define FW_PARAM_PFVF(param) \
3008 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3009 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
3010 FW_PARAMS_PARAM_Y(adap->fn))
3011
3012 params[0] = FW_PARAM_DEV(PORTVEC);
3013 params[1] = FW_PARAM_PFVF(L2T_START);
3014 params[2] = FW_PARAM_PFVF(L2T_END);
3015 params[3] = FW_PARAM_PFVF(FILTER_START);
3016 params[4] = FW_PARAM_PFVF(FILTER_END);
3017 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3018 params[6] = FW_PARAM_PFVF(EQ_START);
3019 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
3020 if (ret < 0)
3021 goto bye;
3022 port_vec = val[0];
3023 adap->tids.ftid_base = val[3];
3024 adap->tids.nftids = val[4] - val[3] + 1;
3025 adap->sge.ingr_start = val[5];
3026 adap->sge.egr_start = val[6];
3027
3028 if (c.ofldcaps) {
3029 /* query offload-related parameters */
3030 params[0] = FW_PARAM_DEV(NTID);
3031 params[1] = FW_PARAM_PFVF(SERVER_START);
3032 params[2] = FW_PARAM_PFVF(SERVER_END);
3033 params[3] = FW_PARAM_PFVF(TDDP_START);
3034 params[4] = FW_PARAM_PFVF(TDDP_END);
3035 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3036 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3037 val);
3038 if (ret < 0)
3039 goto bye;
3040 adap->tids.ntids = val[0];
3041 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3042 adap->tids.stid_base = val[1];
3043 adap->tids.nstids = val[2] - val[1] + 1;
3044 adap->vres.ddp.start = val[3];
3045 adap->vres.ddp.size = val[4] - val[3] + 1;
3046 adap->params.ofldq_wr_cred = val[5];
3047 adap->params.offload = 1;
3048 }
3049 if (c.rdmacaps) {
3050 params[0] = FW_PARAM_PFVF(STAG_START);
3051 params[1] = FW_PARAM_PFVF(STAG_END);
3052 params[2] = FW_PARAM_PFVF(RQ_START);
3053 params[3] = FW_PARAM_PFVF(RQ_END);
3054 params[4] = FW_PARAM_PFVF(PBL_START);
3055 params[5] = FW_PARAM_PFVF(PBL_END);
3056 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3057 val);
3058 if (ret < 0)
3059 goto bye;
3060 adap->vres.stag.start = val[0];
3061 adap->vres.stag.size = val[1] - val[0] + 1;
3062 adap->vres.rq.start = val[2];
3063 adap->vres.rq.size = val[3] - val[2] + 1;
3064 adap->vres.pbl.start = val[4];
3065 adap->vres.pbl.size = val[5] - val[4] + 1;
3066
3067 params[0] = FW_PARAM_PFVF(SQRQ_START);
3068 params[1] = FW_PARAM_PFVF(SQRQ_END);
3069 params[2] = FW_PARAM_PFVF(CQ_START);
3070 params[3] = FW_PARAM_PFVF(CQ_END);
3071 params[4] = FW_PARAM_PFVF(OCQ_START);
3072 params[5] = FW_PARAM_PFVF(OCQ_END);
3073 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3074 val);
3075 if (ret < 0)
3076 goto bye;
3077 adap->vres.qp.start = val[0];
3078 adap->vres.qp.size = val[1] - val[0] + 1;
3079 adap->vres.cq.start = val[2];
3080 adap->vres.cq.size = val[3] - val[2] + 1;
3081 adap->vres.ocq.start = val[4];
3082 adap->vres.ocq.size = val[5] - val[4] + 1;
3083 }
3084 if (c.iscsicaps) {
3085 params[0] = FW_PARAM_PFVF(ISCSI_START);
3086 params[1] = FW_PARAM_PFVF(ISCSI_END);
3087 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
3088 val);
3089 if (ret < 0)
3090 goto bye;
3091 adap->vres.iscsi.start = val[0];
3092 adap->vres.iscsi.size = val[1] - val[0] + 1;
3093 }
3094#undef FW_PARAM_PFVF
3095#undef FW_PARAM_DEV
3096
3097 adap->params.nports = hweight32(port_vec);
3098 adap->params.portvec = port_vec;
3099 adap->flags |= FW_OK;
3100
3101 /* These are finalized by FW initialization, load their values now */
3102 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3103 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3104 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3105 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3106 adap->params.b_wnd);
3107
3108#ifdef CONFIG_PCI_IOV
3109 /*
3110 * Provision resource limits for Virtual Functions. We currently
3111 * grant them all the same static resource limits except for the Port
3112 * Access Rights Mask which we're assigning based on the PF. All of
3113 * the static provisioning stuff for both the PF and VF really needs
3114 * to be managed in a persistent manner for each device which the
3115 * firmware controls.
3116 */
3117 {
3118 int pf, vf;
3119
3120 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3121 if (num_vf[pf] <= 0)
3122 continue;
3123
3124 /* VF numbering starts at 1! */
3125 for (vf = 1; vf <= num_vf[pf]; vf++) {
3126 ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
3127 VFRES_NEQ, VFRES_NETHCTRL,
3128 VFRES_NIQFLINT, VFRES_NIQ,
3129 VFRES_TC, VFRES_NVI,
3130 FW_PFVF_CMD_CMASK_MASK,
3131 pfvfres_pmask(adap, pf, vf),
3132 VFRES_NEXACTF,
3133 VFRES_R_CAPS, VFRES_WX_CAPS);
3134 if (ret < 0)
3135 dev_warn(adap->pdev_dev, "failed to "
3136 "provision pf/vf=%d/%d; "
3137 "err=%d\n", pf, vf, ret);
3138 }
3139 }
3140 }
3141#endif
3142
3143 setup_memwin(adap);
3144 return 0;
3145
3146 /*
3147 * If a command timed out or failed with EIO FW does not operate within
3148 * its spec or something catastrophic happened to HW/FW, stop issuing
3149 * commands.
3150 */
3151bye: if (ret != -ETIMEDOUT && ret != -EIO)
3152 t4_fw_bye(adap, adap->fn);
3153 return ret;
3154}
3155
3156/* EEH callbacks */
3157
3158static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3159 pci_channel_state_t state)
3160{
3161 int i;
3162 struct adapter *adap = pci_get_drvdata(pdev);
3163
3164 if (!adap)
3165 goto out;
3166
3167 rtnl_lock();
3168 adap->flags &= ~FW_OK;
3169 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3170 for_each_port(adap, i) {
3171 struct net_device *dev = adap->port[i];
3172
3173 netif_device_detach(dev);
3174 netif_carrier_off(dev);
3175 }
3176 if (adap->flags & FULL_INIT_DONE)
3177 cxgb_down(adap);
3178 rtnl_unlock();
3179 pci_disable_device(pdev);
3180out: return state == pci_channel_io_perm_failure ?
3181 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3182}
3183
3184static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
3185{
3186 int i, ret;
3187 struct fw_caps_config_cmd c;
3188 struct adapter *adap = pci_get_drvdata(pdev);
3189
3190 if (!adap) {
3191 pci_restore_state(pdev);
3192 pci_save_state(pdev);
3193 return PCI_ERS_RESULT_RECOVERED;
3194 }
3195
3196 if (pci_enable_device(pdev)) {
3197 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
3198 return PCI_ERS_RESULT_DISCONNECT;
3199 }
3200
3201 pci_set_master(pdev);
3202 pci_restore_state(pdev);
3203 pci_save_state(pdev);
3204 pci_cleanup_aer_uncorrect_error_status(pdev);
3205
3206 if (t4_wait_dev_ready(adap) < 0)
3207 return PCI_ERS_RESULT_DISCONNECT;
3208 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
3209 return PCI_ERS_RESULT_DISCONNECT;
3210 adap->flags |= FW_OK;
3211 if (adap_init1(adap, &c))
3212 return PCI_ERS_RESULT_DISCONNECT;
3213
3214 for_each_port(adap, i) {
3215 struct port_info *p = adap2pinfo(adap, i);
3216
3217 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
3218 NULL, NULL);
3219 if (ret < 0)
3220 return PCI_ERS_RESULT_DISCONNECT;
3221 p->viid = ret;
3222 p->xact_addr_filt = -1;
3223 }
3224
3225 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3226 adap->params.b_wnd);
3227 setup_memwin(adap);
3228 if (cxgb_up(adap))
3229 return PCI_ERS_RESULT_DISCONNECT;
3230 return PCI_ERS_RESULT_RECOVERED;
3231}
3232
3233static void eeh_resume(struct pci_dev *pdev)
3234{
3235 int i;
3236 struct adapter *adap = pci_get_drvdata(pdev);
3237
3238 if (!adap)
3239 return;
3240
3241 rtnl_lock();
3242 for_each_port(adap, i) {
3243 struct net_device *dev = adap->port[i];
3244
3245 if (netif_running(dev)) {
3246 link_start(dev);
3247 cxgb_set_rxmode(dev);
3248 }
3249 netif_device_attach(dev);
3250 }
3251 rtnl_unlock();
3252}
3253
3254static struct pci_error_handlers cxgb4_eeh = {
3255 .error_detected = eeh_err_detected,
3256 .slot_reset = eeh_slot_reset,
3257 .resume = eeh_resume,
3258};
3259
3260static inline bool is_10g_port(const struct link_config *lc)
3261{
3262 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
3263}
3264
3265static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
3266 unsigned int size, unsigned int iqe_size)
3267{
3268 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
3269 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
3270 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
3271 q->iqe_len = iqe_size;
3272 q->size = size;
3273}
3274
3275/*
3276 * Perform default configuration of DMA queues depending on the number and type
3277 * of ports we found and the number of available CPUs. Most settings can be
3278 * modified by the admin prior to actual use.
3279 */
3280static void __devinit cfg_queues(struct adapter *adap)
3281{
3282 struct sge *s = &adap->sge;
3283 int i, q10g = 0, n10g = 0, qidx = 0;
3284
3285 for_each_port(adap, i)
3286 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
3287
3288 /*
3289 * We default to 1 queue per non-10G port and up to # of cores queues
3290 * per 10G port.
3291 */
3292 if (n10g)
3293 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3294 if (q10g > num_online_cpus())
3295 q10g = num_online_cpus();
3296
3297 for_each_port(adap, i) {
3298 struct port_info *pi = adap2pinfo(adap, i);
3299
3300 pi->first_qset = qidx;
3301 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
3302 qidx += pi->nqsets;
3303 }
3304
3305 s->ethqsets = qidx;
3306 s->max_ethqsets = qidx; /* MSI-X may lower it later */
3307
3308 if (is_offload(adap)) {
3309 /*
3310 * For offload we use 1 queue/channel if all ports are up to 1G,
3311 * otherwise we divide all available queues amongst the channels
3312 * capped by the number of available cores.
3313 */
3314 if (n10g) {
3315 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
3316 num_online_cpus());
3317 s->ofldqsets = roundup(i, adap->params.nports);
3318 } else
3319 s->ofldqsets = adap->params.nports;
3320 /* For RDMA one Rx queue per channel suffices */
3321 s->rdmaqs = adap->params.nports;
3322 }
3323
3324 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
3325 struct sge_eth_rxq *r = &s->ethrxq[i];
3326
3327 init_rspq(&r->rspq, 0, 0, 1024, 64);
3328 r->fl.size = 72;
3329 }
3330
3331 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
3332 s->ethtxq[i].q.size = 1024;
3333
3334 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
3335 s->ctrlq[i].q.size = 512;
3336
3337 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
3338 s->ofldtxq[i].q.size = 1024;
3339
3340 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
3341 struct sge_ofld_rxq *r = &s->ofldrxq[i];
3342
3343 init_rspq(&r->rspq, 0, 0, 1024, 64);
3344 r->rspq.uld = CXGB4_ULD_ISCSI;
3345 r->fl.size = 72;
3346 }
3347
3348 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
3349 struct sge_ofld_rxq *r = &s->rdmarxq[i];
3350
3351 init_rspq(&r->rspq, 0, 0, 511, 64);
3352 r->rspq.uld = CXGB4_ULD_RDMA;
3353 r->fl.size = 72;
3354 }
3355
3356 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
3357 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
3358}
3359
3360/*
3361 * Reduce the number of Ethernet queues across all ports to at most n.
3362 * n provides at least one queue per port.
3363 */
3364static void __devinit reduce_ethqs(struct adapter *adap, int n)
3365{
3366 int i;
3367 struct port_info *pi;
3368
3369 while (n < adap->sge.ethqsets)
3370 for_each_port(adap, i) {
3371 pi = adap2pinfo(adap, i);
3372 if (pi->nqsets > 1) {
3373 pi->nqsets--;
3374 adap->sge.ethqsets--;
3375 if (adap->sge.ethqsets <= n)
3376 break;
3377 }
3378 }
3379
3380 n = 0;
3381 for_each_port(adap, i) {
3382 pi = adap2pinfo(adap, i);
3383 pi->first_qset = n;
3384 n += pi->nqsets;
3385 }
3386}
3387
3388/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3389#define EXTRA_VECS 2
3390
3391static int __devinit enable_msix(struct adapter *adap)
3392{
3393 int ofld_need = 0;
3394 int i, err, want, need;
3395 struct sge *s = &adap->sge;
3396 unsigned int nchan = adap->params.nports;
3397 struct msix_entry entries[MAX_INGQ + 1];
3398
3399 for (i = 0; i < ARRAY_SIZE(entries); ++i)
3400 entries[i].entry = i;
3401
3402 want = s->max_ethqsets + EXTRA_VECS;
3403 if (is_offload(adap)) {
3404 want += s->rdmaqs + s->ofldqsets;
3405 /* need nchan for each possible ULD */
3406 ofld_need = 2 * nchan;
3407 }
3408 need = adap->params.nports + EXTRA_VECS + ofld_need;
3409
3410 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3411 want = err;
3412
3413 if (!err) {
3414 /*
3415 * Distribute available vectors to the various queue groups.
3416 * Every group gets its minimum requirement and NIC gets top
3417 * priority for leftovers.
3418 */
3419 i = want - EXTRA_VECS - ofld_need;
3420 if (i < s->max_ethqsets) {
3421 s->max_ethqsets = i;
3422 if (i < s->ethqsets)
3423 reduce_ethqs(adap, i);
3424 }
3425 if (is_offload(adap)) {
3426 i = want - EXTRA_VECS - s->max_ethqsets;
3427 i -= ofld_need - nchan;
3428 s->ofldqsets = (i / nchan) * nchan; /* round down */
3429 }
3430 for (i = 0; i < want; ++i)
3431 adap->msix_info[i].vec = entries[i].vector;
3432 } else if (err > 0)
3433 dev_info(adap->pdev_dev,
3434 "only %d MSI-X vectors left, not using MSI-X\n", err);
3435 return err;
3436}
3437
3438#undef EXTRA_VECS
3439
3440static int __devinit init_rss(struct adapter *adap)
3441{
3442 unsigned int i, j;
3443
3444 for_each_port(adap, i) {
3445 struct port_info *pi = adap2pinfo(adap, i);
3446
3447 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
3448 if (!pi->rss)
3449 return -ENOMEM;
3450 for (j = 0; j < pi->rss_size; j++)
3451 pi->rss[j] = j % pi->nqsets;
3452 }
3453 return 0;
3454}
3455
3456static void __devinit print_port_info(const struct net_device *dev)
3457{
3458 static const char *base[] = {
3459 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
3460 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
3461 };
3462
3463 char buf[80];
3464 char *bufp = buf;
3465 const char *spd = "";
3466 const struct port_info *pi = netdev_priv(dev);
3467 const struct adapter *adap = pi->adapter;
3468
3469 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3470 spd = " 2.5 GT/s";
3471 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3472 spd = " 5 GT/s";
3473
3474 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3475 bufp += sprintf(bufp, "100/");
3476 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3477 bufp += sprintf(bufp, "1000/");
3478 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3479 bufp += sprintf(bufp, "10G/");
3480 if (bufp != buf)
3481 --bufp;
3482 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3483
3484 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
3485 adap->params.vpd.id, adap->params.rev, buf,
3486 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
3487 (adap->flags & USING_MSIX) ? " MSI-X" :
3488 (adap->flags & USING_MSI) ? " MSI" : "");
3489 netdev_info(dev, "S/N: %s, E/C: %s\n",
3490 adap->params.vpd.sn, adap->params.vpd.ec);
3491}
3492
3493static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
3494{
3495 u16 v;
3496 int pos;
3497
3498 pos = pci_pcie_cap(dev);
3499 if (pos > 0) {
3500 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
3501 v |= PCI_EXP_DEVCTL_RELAX_EN;
3502 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
3503 }
3504}
3505
3506/*
3507 * Free the following resources:
3508 * - memory used for tables
3509 * - MSI/MSI-X
3510 * - net devices
3511 * - resources FW is holding for us
3512 */
3513static void free_some_resources(struct adapter *adapter)
3514{
3515 unsigned int i;
3516
3517 t4_free_mem(adapter->l2t);
3518 t4_free_mem(adapter->tids.tid_tab);
3519 disable_msi(adapter);
3520
3521 for_each_port(adapter, i)
3522 if (adapter->port[i]) {
3523 kfree(adap2pinfo(adapter, i)->rss);
3524 free_netdev(adapter->port[i]);
3525 }
3526 if (adapter->flags & FW_OK)
3527 t4_fw_bye(adapter, adapter->fn);
3528}
3529
3530#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3531#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3532 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3533
3534static int __devinit init_one(struct pci_dev *pdev,
3535 const struct pci_device_id *ent)
3536{
3537 int func, i, err;
3538 struct port_info *pi;
3539 unsigned int highdma = 0;
3540 struct adapter *adapter = NULL;
3541
3542 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3543
3544 err = pci_request_regions(pdev, KBUILD_MODNAME);
3545 if (err) {
3546 /* Just info, some other driver may have claimed the device. */
3547 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3548 return err;
3549 }
3550
3551 /* We control everything through one PF */
3552 func = PCI_FUNC(pdev->devfn);
3553 if (func != ent->driver_data) {
3554 pci_save_state(pdev); /* to restore SR-IOV later */
3555 goto sriov;
3556 }
3557
3558 err = pci_enable_device(pdev);
3559 if (err) {
3560 dev_err(&pdev->dev, "cannot enable PCI device\n");
3561 goto out_release_regions;
3562 }
3563
3564 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3565 highdma = NETIF_F_HIGHDMA;
3566 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3567 if (err) {
3568 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3569 "coherent allocations\n");
3570 goto out_disable_device;
3571 }
3572 } else {
3573 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3574 if (err) {
3575 dev_err(&pdev->dev, "no usable DMA configuration\n");
3576 goto out_disable_device;
3577 }
3578 }
3579
3580 pci_enable_pcie_error_reporting(pdev);
3581 enable_pcie_relaxed_ordering(pdev);
3582 pci_set_master(pdev);
3583 pci_save_state(pdev);
3584
3585 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3586 if (!adapter) {
3587 err = -ENOMEM;
3588 goto out_disable_device;
3589 }
3590
3591 adapter->regs = pci_ioremap_bar(pdev, 0);
3592 if (!adapter->regs) {
3593 dev_err(&pdev->dev, "cannot map device registers\n");
3594 err = -ENOMEM;
3595 goto out_free_adapter;
3596 }
3597
3598 adapter->pdev = pdev;
3599 adapter->pdev_dev = &pdev->dev;
3600 adapter->fn = func;
3601 adapter->msg_enable = dflt_msg_enable;
3602 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3603
3604 spin_lock_init(&adapter->stats_lock);
3605 spin_lock_init(&adapter->tid_release_lock);
3606
3607 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3608
3609 err = t4_prep_adapter(adapter);
3610 if (err)
3611 goto out_unmap_bar;
3612 err = adap_init0(adapter);
3613 if (err)
3614 goto out_unmap_bar;
3615
3616 for_each_port(adapter, i) {
3617 struct net_device *netdev;
3618
3619 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3620 MAX_ETH_QSETS);
3621 if (!netdev) {
3622 err = -ENOMEM;
3623 goto out_free_dev;
3624 }
3625
3626 SET_NETDEV_DEV(netdev, &pdev->dev);
3627
3628 adapter->port[i] = netdev;
3629 pi = netdev_priv(netdev);
3630 pi->adapter = adapter;
3631 pi->xact_addr_filt = -1;
3632 pi->port_id = i;
3633 netdev->irq = pdev->irq;
3634
3635 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
3636 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3637 NETIF_F_RXCSUM | NETIF_F_RXHASH |
3638 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3639 netdev->features |= netdev->hw_features | highdma;
3640 netdev->vlan_features = netdev->features & VLAN_FEAT;
3641
3642 netdev->netdev_ops = &cxgb4_netdev_ops;
3643 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3644 }
3645
3646 pci_set_drvdata(pdev, adapter);
3647
3648 if (adapter->flags & FW_OK) {
3649 err = t4_port_init(adapter, func, func, 0);
3650 if (err)
3651 goto out_free_dev;
3652 }
3653
3654 /*
3655 * Configure queues and allocate tables now, they can be needed as
3656 * soon as the first register_netdev completes.
3657 */
3658 cfg_queues(adapter);
3659
3660 adapter->l2t = t4_init_l2t();
3661 if (!adapter->l2t) {
3662 /* We tolerate a lack of L2T, giving up some functionality */
3663 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3664 adapter->params.offload = 0;
3665 }
3666
3667 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3668 dev_warn(&pdev->dev, "could not allocate TID table, "
3669 "continuing\n");
3670 adapter->params.offload = 0;
3671 }
3672
3673 /* See what interrupts we'll be using */
3674 if (msi > 1 && enable_msix(adapter) == 0)
3675 adapter->flags |= USING_MSIX;
3676 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3677 adapter->flags |= USING_MSI;
3678
3679 err = init_rss(adapter);
3680 if (err)
3681 goto out_free_dev;
3682
3683 /*
3684 * The card is now ready to go. If any errors occur during device
3685 * registration we do not fail the whole card but rather proceed only
3686 * with the ports we manage to register successfully. However we must
3687 * register at least one net device.
3688 */
3689 for_each_port(adapter, i) {
3690 pi = adap2pinfo(adapter, i);
3691 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
3692 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
3693
3694 err = register_netdev(adapter->port[i]);
3695 if (err)
3696 break;
3697 adapter->chan_map[pi->tx_chan] = i;
3698 print_port_info(adapter->port[i]);
3699 }
3700 if (i == 0) {
3701 dev_err(&pdev->dev, "could not register any net devices\n");
3702 goto out_free_dev;
3703 }
3704 if (err) {
3705 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
3706 err = 0;
3707 }
3708
3709 if (cxgb4_debugfs_root) {
3710 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3711 cxgb4_debugfs_root);
3712 setup_debugfs(adapter);
3713 }
3714
3715 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3716 pdev->needs_freset = 1;
3717
3718 if (is_offload(adapter))
3719 attach_ulds(adapter);
3720
3721sriov:
3722#ifdef CONFIG_PCI_IOV
3723 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3724 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3725 dev_info(&pdev->dev,
3726 "instantiated %u virtual functions\n",
3727 num_vf[func]);
3728#endif
3729 return 0;
3730
3731 out_free_dev:
3732 free_some_resources(adapter);
3733 out_unmap_bar:
3734 iounmap(adapter->regs);
3735 out_free_adapter:
3736 kfree(adapter);
3737 out_disable_device:
3738 pci_disable_pcie_error_reporting(pdev);
3739 pci_disable_device(pdev);
3740 out_release_regions:
3741 pci_release_regions(pdev);
3742 pci_set_drvdata(pdev, NULL);
3743 return err;
3744}
3745
3746static void __devexit remove_one(struct pci_dev *pdev)
3747{
3748 struct adapter *adapter = pci_get_drvdata(pdev);
3749
3750 pci_disable_sriov(pdev);
3751
3752 if (adapter) {
3753 int i;
3754
3755 if (is_offload(adapter))
3756 detach_ulds(adapter);
3757
3758 for_each_port(adapter, i)
3759 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
3760 unregister_netdev(adapter->port[i]);
3761
3762 if (adapter->debugfs_root)
3763 debugfs_remove_recursive(adapter->debugfs_root);
3764
3765 if (adapter->flags & FULL_INIT_DONE)
3766 cxgb_down(adapter);
3767
3768 free_some_resources(adapter);
3769 iounmap(adapter->regs);
3770 kfree(adapter);
3771 pci_disable_pcie_error_reporting(pdev);
3772 pci_disable_device(pdev);
3773 pci_release_regions(pdev);
3774 pci_set_drvdata(pdev, NULL);
3775 } else
3776 pci_release_regions(pdev);
3777}
3778
3779static struct pci_driver cxgb4_driver = {
3780 .name = KBUILD_MODNAME,
3781 .id_table = cxgb4_pci_tbl,
3782 .probe = init_one,
3783 .remove = __devexit_p(remove_one),
3784 .err_handler = &cxgb4_eeh,
3785};
3786
3787static int __init cxgb4_init_module(void)
3788{
3789 int ret;
3790
3791 /* Debugfs support is optional, just warn if this fails */
3792 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3793 if (!cxgb4_debugfs_root)
3794 pr_warning("could not create debugfs entry, continuing\n");
3795
3796 ret = pci_register_driver(&cxgb4_driver);
3797 if (ret < 0)
3798 debugfs_remove(cxgb4_debugfs_root);
3799 return ret;
3800}
3801
3802static void __exit cxgb4_cleanup_module(void)
3803{
3804 pci_unregister_driver(&cxgb4_driver);
3805 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3806}
3807
3808module_init(cxgb4_init_module);
3809module_exit(cxgb4_cleanup_module);
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
new file mode 100644
index 00000000000..b1d39b8d141
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -0,0 +1,239 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __CXGB4_OFLD_H
36#define __CXGB4_OFLD_H
37
38#include <linux/cache.h>
39#include <linux/spinlock.h>
40#include <linux/skbuff.h>
41#include <linux/atomic.h>
42
43/* CPL message priority levels */
44enum {
45 CPL_PRIORITY_DATA = 0, /* data messages */
46 CPL_PRIORITY_SETUP = 1, /* connection setup messages */
47 CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
48 CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
49 CPL_PRIORITY_ACK = 1, /* RX ACK messages */
50 CPL_PRIORITY_CONTROL = 1 /* control messages */
51};
52
53#define INIT_TP_WR(w, tid) do { \
54 (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \
55 FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
56 (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
57 FW_WR_FLOWID(tid)); \
58 (w)->wr.wr_lo = cpu_to_be64(0); \
59} while (0)
60
61#define INIT_TP_WR_CPL(w, cpl, tid) do { \
62 INIT_TP_WR(w, tid); \
63 OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \
64} while (0)
65
66#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
67 (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \
68 (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
69 FW_WR_FLOWID(tid)); \
70 (w)->wr.wr_lo = cpu_to_be64(0); \
71} while (0)
72
73/* Special asynchronous notification message */
74#define CXGB4_MSG_AN ((void *)1)
75
76struct serv_entry {
77 void *data;
78};
79
80union aopen_entry {
81 void *data;
82 union aopen_entry *next;
83};
84
85/*
86 * Holds the size, base address, free list start, etc of the TID, server TID,
87 * and active-open TID tables. The tables themselves are allocated dynamically.
88 */
89struct tid_info {
90 void **tid_tab;
91 unsigned int ntids;
92
93 struct serv_entry *stid_tab;
94 unsigned long *stid_bmap;
95 unsigned int nstids;
96 unsigned int stid_base;
97
98 union aopen_entry *atid_tab;
99 unsigned int natids;
100
101 unsigned int nftids;
102 unsigned int ftid_base;
103
104 spinlock_t atid_lock ____cacheline_aligned_in_smp;
105 union aopen_entry *afree;
106 unsigned int atids_in_use;
107
108 spinlock_t stid_lock;
109 unsigned int stids_in_use;
110
111 atomic_t tids_in_use;
112};
113
114static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
115{
116 return tid < t->ntids ? t->tid_tab[tid] : NULL;
117}
118
119static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
120{
121 return atid < t->natids ? t->atid_tab[atid].data : NULL;
122}
123
124static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
125{
126 stid -= t->stid_base;
127 return stid < t->nstids ? t->stid_tab[stid].data : NULL;
128}
129
130static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
131 unsigned int tid)
132{
133 t->tid_tab[tid] = data;
134 atomic_inc(&t->tids_in_use);
135}
136
137int cxgb4_alloc_atid(struct tid_info *t, void *data);
138int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
139void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
140void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
141void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
142
143struct in6_addr;
144
145int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
146 __be32 sip, __be16 sport, unsigned int queue);
147
148static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
149{
150 skb_set_queue_mapping(skb, (queue << 1) | prio);
151}
152
153enum cxgb4_uld {
154 CXGB4_ULD_RDMA,
155 CXGB4_ULD_ISCSI,
156 CXGB4_ULD_MAX
157};
158
159enum cxgb4_state {
160 CXGB4_STATE_UP,
161 CXGB4_STATE_START_RECOVERY,
162 CXGB4_STATE_DOWN,
163 CXGB4_STATE_DETACH
164};
165
166struct pci_dev;
167struct l2t_data;
168struct net_device;
169struct pkt_gl;
170struct tp_tcp_stats;
171
172struct cxgb4_range {
173 unsigned int start;
174 unsigned int size;
175};
176
177struct cxgb4_virt_res { /* virtualized HW resources */
178 struct cxgb4_range ddp;
179 struct cxgb4_range iscsi;
180 struct cxgb4_range stag;
181 struct cxgb4_range rq;
182 struct cxgb4_range pbl;
183 struct cxgb4_range qp;
184 struct cxgb4_range cq;
185 struct cxgb4_range ocq;
186};
187
188#define OCQ_WIN_OFFSET(pdev, vres) \
189 (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
190
191/*
192 * Block of information the LLD provides to ULDs attaching to a device.
193 */
194struct cxgb4_lld_info {
195 struct pci_dev *pdev; /* associated PCI device */
196 struct l2t_data *l2t; /* L2 table */
197 struct tid_info *tids; /* TID table */
198 struct net_device **ports; /* device ports */
199 const struct cxgb4_virt_res *vr; /* assorted HW resources */
200 const unsigned short *mtus; /* MTU table */
201 const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
202 unsigned short nrxq; /* # of Rx queues */
203 unsigned short ntxq; /* # of Tx queues */
204 unsigned char nchan:4; /* # of channels */
205 unsigned char nports:4; /* # of ports */
206 unsigned char wr_cred; /* WR 16-byte credits */
207 unsigned char adapter_type; /* type of adapter */
208 unsigned char fw_api_ver; /* FW API version */
209 unsigned int fw_vers; /* FW version */
210 unsigned int iscsi_iolen; /* iSCSI max I/O length */
211 unsigned short udb_density; /* # of user DB/page */
212 unsigned short ucq_density; /* # of user CQs/page */
213 void __iomem *gts_reg; /* address of GTS register */
214 void __iomem *db_reg; /* address of kernel doorbell */
215};
216
217struct cxgb4_uld_info {
218 const char *name;
219 void *(*add)(const struct cxgb4_lld_info *p);
220 int (*rx_handler)(void *handle, const __be64 *rsp,
221 const struct pkt_gl *gl);
222 int (*state_change)(void *handle, enum cxgb4_state new_state);
223};
224
225int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
226int cxgb4_unregister_uld(enum cxgb4_uld type);
227int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
228unsigned int cxgb4_port_chan(const struct net_device *dev);
229unsigned int cxgb4_port_viid(const struct net_device *dev);
230unsigned int cxgb4_port_idx(const struct net_device *dev);
231unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
232 unsigned int *idx);
233void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
234 struct tp_tcp_stats *v6);
235void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
236 const unsigned int *pgsz_order);
237struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
238 unsigned int skb_len, unsigned int pull_len);
239#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c
new file mode 100644
index 00000000000..a2d323c473f
--- /dev/null
+++ b/drivers/net/cxgb4/l2t.c
@@ -0,0 +1,597 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/if.h>
38#include <linux/if_vlan.h>
39#include <linux/jhash.h>
40#include <net/neighbour.h>
41#include "cxgb4.h"
42#include "l2t.h"
43#include "t4_msg.h"
44#include "t4fw_api.h"
45
46#define VLAN_NONE 0xfff
47
48/* identifies sync vs async L2T_WRITE_REQs */
49#define F_SYNC_WR (1 << 12)
50
51enum {
52 L2T_STATE_VALID, /* entry is up to date */
53 L2T_STATE_STALE, /* entry may be used but needs revalidation */
54 L2T_STATE_RESOLVING, /* entry needs address resolution */
55 L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
56
57 /* when state is one of the below the entry is not hashed */
58 L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
59 L2T_STATE_UNUSED /* entry not in use */
60};
61
62struct l2t_data {
63 rwlock_t lock;
64 atomic_t nfree; /* number of free entries */
65 struct l2t_entry *rover; /* starting point for next allocation */
66 struct l2t_entry l2tab[L2T_SIZE];
67};
68
69static inline unsigned int vlan_prio(const struct l2t_entry *e)
70{
71 return e->vlan >> 13;
72}
73
74static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
75{
76 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
77 atomic_dec(&d->nfree);
78}
79
80/*
81 * To avoid having to check address families we do not allow v4 and v6
82 * neighbors to be on the same hash chain. We keep v4 entries in the first
83 * half of available hash buckets and v6 in the second.
84 */
85enum {
86 L2T_SZ_HALF = L2T_SIZE / 2,
87 L2T_HASH_MASK = L2T_SZ_HALF - 1
88};
89
90static inline unsigned int arp_hash(const u32 *key, int ifindex)
91{
92 return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
93}
94
95static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
96{
97 u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
98
99 return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
100}
101
102static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
103{
104 return addr_len == 4 ? arp_hash(addr, ifindex) :
105 ipv6_hash(addr, ifindex);
106}
107
108/*
109 * Checks if an L2T entry is for the given IP/IPv6 address. It does not check
110 * whether the L2T entry and the address are of the same address family.
111 * Callers ensure an address is only checked against L2T entries of the same
112 * family, something made trivial by the separation of IP and IPv6 hash chains
113 * mentioned above. Returns 0 if there's a match,
114 */
115static int addreq(const struct l2t_entry *e, const u32 *addr)
116{
117 if (e->v6)
118 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
119 (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
120 return e->addr[0] ^ addr[0];
121}
122
123static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
124{
125 neigh_hold(n);
126 if (e->neigh)
127 neigh_release(e->neigh);
128 e->neigh = n;
129}
130
131/*
132 * Write an L2T entry. Must be called with the entry locked.
133 * The write may be synchronous or asynchronous.
134 */
135static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
136{
137 struct sk_buff *skb;
138 struct cpl_l2t_write_req *req;
139
140 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
141 if (!skb)
142 return -ENOMEM;
143
144 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
145 INIT_TP_WR(req, 0);
146
147 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
148 e->idx | (sync ? F_SYNC_WR : 0) |
149 TID_QID(adap->sge.fw_evtq.abs_id)));
150 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
151 req->l2t_idx = htons(e->idx);
152 req->vlan = htons(e->vlan);
153 if (e->neigh)
154 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
155 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
156
157 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
158 t4_ofld_send(adap, skb);
159
160 if (sync && e->state != L2T_STATE_SWITCHING)
161 e->state = L2T_STATE_SYNC_WRITE;
162 return 0;
163}
164
165/*
166 * Send packets waiting in an L2T entry's ARP queue. Must be called with the
167 * entry locked.
168 */
169static void send_pending(struct adapter *adap, struct l2t_entry *e)
170{
171 while (e->arpq_head) {
172 struct sk_buff *skb = e->arpq_head;
173
174 e->arpq_head = skb->next;
175 skb->next = NULL;
176 t4_ofld_send(adap, skb);
177 }
178 e->arpq_tail = NULL;
179}
180
181/*
182 * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a
183 * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T
184 * index it refers to.
185 */
186void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
187{
188 unsigned int tid = GET_TID(rpl);
189 unsigned int idx = tid & (L2T_SIZE - 1);
190
191 if (unlikely(rpl->status != CPL_ERR_NONE)) {
192 dev_err(adap->pdev_dev,
193 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
194 rpl->status, idx);
195 return;
196 }
197
198 if (tid & F_SYNC_WR) {
199 struct l2t_entry *e = &adap->l2t->l2tab[idx];
200
201 spin_lock(&e->lock);
202 if (e->state != L2T_STATE_SWITCHING) {
203 send_pending(adap, e);
204 e->state = (e->neigh->nud_state & NUD_STALE) ?
205 L2T_STATE_STALE : L2T_STATE_VALID;
206 }
207 spin_unlock(&e->lock);
208 }
209}
210
211/*
212 * Add a packet to an L2T entry's queue of packets awaiting resolution.
213 * Must be called with the entry's lock held.
214 */
215static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
216{
217 skb->next = NULL;
218 if (e->arpq_head)
219 e->arpq_tail->next = skb;
220 else
221 e->arpq_head = skb;
222 e->arpq_tail = skb;
223}
224
225int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
226 struct l2t_entry *e)
227{
228 struct adapter *adap = netdev2adap(dev);
229
230again:
231 switch (e->state) {
232 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
233 neigh_event_send(e->neigh, NULL);
234 spin_lock_bh(&e->lock);
235 if (e->state == L2T_STATE_STALE)
236 e->state = L2T_STATE_VALID;
237 spin_unlock_bh(&e->lock);
238 case L2T_STATE_VALID: /* fast-path, send the packet on */
239 return t4_ofld_send(adap, skb);
240 case L2T_STATE_RESOLVING:
241 case L2T_STATE_SYNC_WRITE:
242 spin_lock_bh(&e->lock);
243 if (e->state != L2T_STATE_SYNC_WRITE &&
244 e->state != L2T_STATE_RESOLVING) {
245 spin_unlock_bh(&e->lock);
246 goto again;
247 }
248 arpq_enqueue(e, skb);
249 spin_unlock_bh(&e->lock);
250
251 if (e->state == L2T_STATE_RESOLVING &&
252 !neigh_event_send(e->neigh, NULL)) {
253 spin_lock_bh(&e->lock);
254 if (e->state == L2T_STATE_RESOLVING && e->arpq_head)
255 write_l2e(adap, e, 1);
256 spin_unlock_bh(&e->lock);
257 }
258 }
259 return 0;
260}
261EXPORT_SYMBOL(cxgb4_l2t_send);
262
263/*
264 * Allocate a free L2T entry. Must be called with l2t_data.lock held.
265 */
266static struct l2t_entry *alloc_l2e(struct l2t_data *d)
267{
268 struct l2t_entry *end, *e, **p;
269
270 if (!atomic_read(&d->nfree))
271 return NULL;
272
273 /* there's definitely a free entry */
274 for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
275 if (atomic_read(&e->refcnt) == 0)
276 goto found;
277
278 for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
279 ;
280found:
281 d->rover = e + 1;
282 atomic_dec(&d->nfree);
283
284 /*
285 * The entry we found may be an inactive entry that is
286 * presently in the hash table. We need to remove it.
287 */
288 if (e->state < L2T_STATE_SWITCHING)
289 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
290 if (*p == e) {
291 *p = e->next;
292 e->next = NULL;
293 break;
294 }
295
296 e->state = L2T_STATE_UNUSED;
297 return e;
298}
299
300/*
301 * Called when an L2T entry has no more users.
302 */
303static void t4_l2e_free(struct l2t_entry *e)
304{
305 struct l2t_data *d;
306
307 spin_lock_bh(&e->lock);
308 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
309 if (e->neigh) {
310 neigh_release(e->neigh);
311 e->neigh = NULL;
312 }
313 while (e->arpq_head) {
314 struct sk_buff *skb = e->arpq_head;
315
316 e->arpq_head = skb->next;
317 kfree_skb(skb);
318 }
319 e->arpq_tail = NULL;
320 }
321 spin_unlock_bh(&e->lock);
322
323 d = container_of(e, struct l2t_data, l2tab[e->idx]);
324 atomic_inc(&d->nfree);
325}
326
327void cxgb4_l2t_release(struct l2t_entry *e)
328{
329 if (atomic_dec_and_test(&e->refcnt))
330 t4_l2e_free(e);
331}
332EXPORT_SYMBOL(cxgb4_l2t_release);
333
334/*
335 * Update an L2T entry that was previously used for the same next hop as neigh.
336 * Must be called with softirqs disabled.
337 */
338static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
339{
340 unsigned int nud_state;
341
342 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
343 if (neigh != e->neigh)
344 neigh_replace(e, neigh);
345 nud_state = neigh->nud_state;
346 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
347 !(nud_state & NUD_VALID))
348 e->state = L2T_STATE_RESOLVING;
349 else if (nud_state & NUD_CONNECTED)
350 e->state = L2T_STATE_VALID;
351 else
352 e->state = L2T_STATE_STALE;
353 spin_unlock(&e->lock);
354}
355
356struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
357 const struct net_device *physdev,
358 unsigned int priority)
359{
360 u8 lport;
361 u16 vlan;
362 struct l2t_entry *e;
363 int addr_len = neigh->tbl->key_len;
364 u32 *addr = (u32 *)neigh->primary_key;
365 int ifidx = neigh->dev->ifindex;
366 int hash = addr_hash(addr, addr_len, ifidx);
367
368 if (neigh->dev->flags & IFF_LOOPBACK)
369 lport = netdev2pinfo(physdev)->tx_chan + 4;
370 else
371 lport = netdev2pinfo(physdev)->lport;
372
373 if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
374 vlan = vlan_dev_vlan_id(neigh->dev);
375 else
376 vlan = VLAN_NONE;
377
378 write_lock_bh(&d->lock);
379 for (e = d->l2tab[hash].first; e; e = e->next)
380 if (!addreq(e, addr) && e->ifindex == ifidx &&
381 e->vlan == vlan && e->lport == lport) {
382 l2t_hold(d, e);
383 if (atomic_read(&e->refcnt) == 1)
384 reuse_entry(e, neigh);
385 goto done;
386 }
387
388 /* Need to allocate a new entry */
389 e = alloc_l2e(d);
390 if (e) {
391 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
392 e->state = L2T_STATE_RESOLVING;
393 memcpy(e->addr, addr, addr_len);
394 e->ifindex = ifidx;
395 e->hash = hash;
396 e->lport = lport;
397 e->v6 = addr_len == 16;
398 atomic_set(&e->refcnt, 1);
399 neigh_replace(e, neigh);
400 e->vlan = vlan;
401 e->next = d->l2tab[hash].first;
402 d->l2tab[hash].first = e;
403 spin_unlock(&e->lock);
404 }
405done:
406 write_unlock_bh(&d->lock);
407 return e;
408}
409EXPORT_SYMBOL(cxgb4_l2t_get);
410
411/*
412 * Called when address resolution fails for an L2T entry to handle packets
413 * on the arpq head. If a packet specifies a failure handler it is invoked,
414 * otherwise the packet is sent to the device.
415 */
416static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq)
417{
418 while (arpq) {
419 struct sk_buff *skb = arpq;
420 const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
421
422 arpq = skb->next;
423 skb->next = NULL;
424 if (cb->arp_err_handler)
425 cb->arp_err_handler(cb->handle, skb);
426 else
427 t4_ofld_send(adap, skb);
428 }
429}
430
431/*
432 * Called when the host's neighbor layer makes a change to some entry that is
433 * loaded into the HW L2 table.
434 */
435void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
436{
437 struct l2t_entry *e;
438 struct sk_buff *arpq = NULL;
439 struct l2t_data *d = adap->l2t;
440 int addr_len = neigh->tbl->key_len;
441 u32 *addr = (u32 *) neigh->primary_key;
442 int ifidx = neigh->dev->ifindex;
443 int hash = addr_hash(addr, addr_len, ifidx);
444
445 read_lock_bh(&d->lock);
446 for (e = d->l2tab[hash].first; e; e = e->next)
447 if (!addreq(e, addr) && e->ifindex == ifidx) {
448 spin_lock(&e->lock);
449 if (atomic_read(&e->refcnt))
450 goto found;
451 spin_unlock(&e->lock);
452 break;
453 }
454 read_unlock_bh(&d->lock);
455 return;
456
457 found:
458 read_unlock(&d->lock);
459
460 if (neigh != e->neigh)
461 neigh_replace(e, neigh);
462
463 if (e->state == L2T_STATE_RESOLVING) {
464 if (neigh->nud_state & NUD_FAILED) {
465 arpq = e->arpq_head;
466 e->arpq_head = e->arpq_tail = NULL;
467 } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
468 e->arpq_head) {
469 write_l2e(adap, e, 1);
470 }
471 } else {
472 e->state = neigh->nud_state & NUD_CONNECTED ?
473 L2T_STATE_VALID : L2T_STATE_STALE;
474 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
475 write_l2e(adap, e, 0);
476 }
477
478 spin_unlock_bh(&e->lock);
479
480 if (arpq)
481 handle_failed_resolution(adap, arpq);
482}
483
484struct l2t_data *t4_init_l2t(void)
485{
486 int i;
487 struct l2t_data *d;
488
489 d = t4_alloc_mem(sizeof(*d));
490 if (!d)
491 return NULL;
492
493 d->rover = d->l2tab;
494 atomic_set(&d->nfree, L2T_SIZE);
495 rwlock_init(&d->lock);
496
497 for (i = 0; i < L2T_SIZE; ++i) {
498 d->l2tab[i].idx = i;
499 d->l2tab[i].state = L2T_STATE_UNUSED;
500 spin_lock_init(&d->l2tab[i].lock);
501 atomic_set(&d->l2tab[i].refcnt, 0);
502 }
503 return d;
504}
505
506#include <linux/module.h>
507#include <linux/debugfs.h>
508#include <linux/seq_file.h>
509
510static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
511{
512 struct l2t_entry *l2tab = seq->private;
513
514 return pos >= L2T_SIZE ? NULL : &l2tab[pos];
515}
516
517static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
518{
519 return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
520}
521
522static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
523{
524 v = l2t_get_idx(seq, *pos);
525 if (v)
526 ++*pos;
527 return v;
528}
529
530static void l2t_seq_stop(struct seq_file *seq, void *v)
531{
532}
533
534static char l2e_state(const struct l2t_entry *e)
535{
536 switch (e->state) {
537 case L2T_STATE_VALID: return 'V';
538 case L2T_STATE_STALE: return 'S';
539 case L2T_STATE_SYNC_WRITE: return 'W';
540 case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R';
541 case L2T_STATE_SWITCHING: return 'X';
542 default:
543 return 'U';
544 }
545}
546
547static int l2t_seq_show(struct seq_file *seq, void *v)
548{
549 if (v == SEQ_START_TOKEN)
550 seq_puts(seq, " Idx IP address "
551 "Ethernet address VLAN/P LP State Users Port\n");
552 else {
553 char ip[60];
554 struct l2t_entry *e = v;
555
556 spin_lock_bh(&e->lock);
557 if (e->state == L2T_STATE_SWITCHING)
558 ip[0] = '\0';
559 else
560 sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
561 seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n",
562 e->idx, ip, e->dmac,
563 e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
564 l2e_state(e), atomic_read(&e->refcnt),
565 e->neigh ? e->neigh->dev->name : "");
566 spin_unlock_bh(&e->lock);
567 }
568 return 0;
569}
570
571static const struct seq_operations l2t_seq_ops = {
572 .start = l2t_seq_start,
573 .next = l2t_seq_next,
574 .stop = l2t_seq_stop,
575 .show = l2t_seq_show
576};
577
578static int l2t_seq_open(struct inode *inode, struct file *file)
579{
580 int rc = seq_open(file, &l2t_seq_ops);
581
582 if (!rc) {
583 struct adapter *adap = inode->i_private;
584 struct seq_file *seq = file->private_data;
585
586 seq->private = adap->l2t->l2tab;
587 }
588 return rc;
589}
590
591const struct file_operations t4_l2t_fops = {
592 .owner = THIS_MODULE,
593 .open = l2t_seq_open,
594 .read = seq_read,
595 .llseek = seq_lseek,
596 .release = seq_release,
597};
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h
new file mode 100644
index 00000000000..02b31d0c641
--- /dev/null
+++ b/drivers/net/cxgb4/l2t.h
@@ -0,0 +1,107 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __CXGB4_L2T_H
36#define __CXGB4_L2T_H
37
38#include <linux/spinlock.h>
39#include <linux/if_ether.h>
40#include <linux/atomic.h>
41
42struct adapter;
43struct l2t_data;
44struct neighbour;
45struct net_device;
46struct file_operations;
47struct cpl_l2t_write_rpl;
48
49/*
50 * Each L2T entry plays multiple roles. First of all, it keeps state for the
51 * corresponding entry of the HW L2 table and maintains a queue of offload
52 * packets awaiting address resolution. Second, it is a node of a hash table
53 * chain, where the nodes of the chain are linked together through their next
54 * pointer. Finally, each node is a bucket of a hash table, pointing to the
55 * first element in its chain through its first pointer.
56 */
57struct l2t_entry {
58 u16 state; /* entry state */
59 u16 idx; /* entry index */
60 u32 addr[4]; /* next hop IP or IPv6 address */
61 int ifindex; /* neighbor's net_device's ifindex */
62 struct neighbour *neigh; /* associated neighbour */
63 struct l2t_entry *first; /* start of hash chain */
64 struct l2t_entry *next; /* next l2t_entry on chain */
65 struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
66 struct sk_buff *arpq_tail;
67 spinlock_t lock;
68 atomic_t refcnt; /* entry reference count */
69 u16 hash; /* hash bucket the entry is on */
70 u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
71 u8 v6; /* whether entry is for IPv6 */
72 u8 lport; /* associated offload logical interface */
73 u8 dmac[ETH_ALEN]; /* neighbour's MAC address */
74};
75
76typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb);
77
78/*
79 * Callback stored in an skb to handle address resolution failure.
80 */
81struct l2t_skb_cb {
82 void *handle;
83 arp_err_handler_t arp_err_handler;
84};
85
86#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
87
88static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle,
89 arp_err_handler_t handler)
90{
91 L2T_SKB_CB(skb)->handle = handle;
92 L2T_SKB_CB(skb)->arp_err_handler = handler;
93}
94
95void cxgb4_l2t_release(struct l2t_entry *e);
96int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
97 struct l2t_entry *e);
98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
99 const struct net_device *physdev,
100 unsigned int priority);
101
102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
103struct l2t_data *t4_init_l2t(void);
104void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
105
106extern const struct file_operations t4_l2t_fops;
107#endif /* __CXGB4_L2T_H */
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
new file mode 100644
index 00000000000..56adf448b9f
--- /dev/null
+++ b/drivers/net/cxgb4/sge.c
@@ -0,0 +1,2442 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/if_vlan.h>
39#include <linux/ip.h>
40#include <linux/dma-mapping.h>
41#include <linux/jiffies.h>
42#include <linux/prefetch.h>
43#include <net/ipv6.h>
44#include <net/tcp.h>
45#include "cxgb4.h"
46#include "t4_regs.h"
47#include "t4_msg.h"
48#include "t4fw_api.h"
49
50/*
51 * Rx buffer size. We use largish buffers if possible but settle for single
52 * pages under memory shortage.
53 */
54#if PAGE_SHIFT >= 16
55# define FL_PG_ORDER 0
56#else
57# define FL_PG_ORDER (16 - PAGE_SHIFT)
58#endif
59
60/* RX_PULL_LEN should be <= RX_COPY_THRES */
61#define RX_COPY_THRES 256
62#define RX_PULL_LEN 128
63
64/*
65 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
66 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
67 */
68#define RX_PKT_SKB_LEN 512
69
70/* Ethernet header padding prepended to RX_PKTs */
71#define RX_PKT_PAD 2
72
73/*
74 * Max number of Tx descriptors we clean up at a time. Should be modest as
75 * freeing skbs isn't cheap and it happens while holding locks. We just need
76 * to free packets faster than they arrive, we eventually catch up and keep
77 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
78 */
79#define MAX_TX_RECLAIM 16
80
81/*
82 * Max number of Rx buffers we replenish at a time. Again keep this modest,
83 * allocating buffers isn't cheap either.
84 */
85#define MAX_RX_REFILL 16U
86
87/*
88 * Period of the Rx queue check timer. This timer is infrequent as it has
89 * something to do only when the system experiences severe memory shortage.
90 */
91#define RX_QCHECK_PERIOD (HZ / 2)
92
93/*
94 * Period of the Tx queue check timer.
95 */
96#define TX_QCHECK_PERIOD (HZ / 2)
97
98/*
99 * Max number of Tx descriptors to be reclaimed by the Tx timer.
100 */
101#define MAX_TIMER_TX_RECLAIM 100
102
103/*
104 * Timer index used when backing off due to memory shortage.
105 */
106#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
107
108/*
109 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
110 * attempt to refill it.
111 */
112#define FL_STARVE_THRES 4
113
114/*
115 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
116 * This is the same as calc_tx_descs() for a TSO packet with
117 * nr_frags == MAX_SKB_FRAGS.
118 */
119#define ETHTXQ_STOP_THRES \
120 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
121
122/*
123 * Suspension threshold for non-Ethernet Tx queues. We require enough room
124 * for a full sized WR.
125 */
126#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
127
128/*
129 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
130 * into a WR.
131 */
132#define MAX_IMM_TX_PKT_LEN 128
133
134/*
135 * Max size of a WR sent through a control Tx queue.
136 */
137#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
138
139enum {
140 /* packet alignment in FL buffers */
141 FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
142 /* egress status entry size */
143 STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
144};
145
146struct tx_sw_desc { /* SW state per Tx descriptor */
147 struct sk_buff *skb;
148 struct ulptx_sgl *sgl;
149};
150
151struct rx_sw_desc { /* SW state per Rx descriptor */
152 struct page *page;
153 dma_addr_t dma_addr;
154};
155
156/*
157 * The low bits of rx_sw_desc.dma_addr have special meaning.
158 */
159enum {
160 RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */
161 RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
162};
163
164static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
165{
166 return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
167}
168
169static inline bool is_buf_mapped(const struct rx_sw_desc *d)
170{
171 return !(d->dma_addr & RX_UNMAPPED_BUF);
172}
173
174/**
175 * txq_avail - return the number of available slots in a Tx queue
176 * @q: the Tx queue
177 *
178 * Returns the number of descriptors in a Tx queue available to write new
179 * packets.
180 */
181static inline unsigned int txq_avail(const struct sge_txq *q)
182{
183 return q->size - 1 - q->in_use;
184}
185
186/**
187 * fl_cap - return the capacity of a free-buffer list
188 * @fl: the FL
189 *
190 * Returns the capacity of a free-buffer list. The capacity is less than
191 * the size because one descriptor needs to be left unpopulated, otherwise
192 * HW will think the FL is empty.
193 */
194static inline unsigned int fl_cap(const struct sge_fl *fl)
195{
196 return fl->size - 8; /* 1 descriptor = 8 buffers */
197}
198
199static inline bool fl_starving(const struct sge_fl *fl)
200{
201 return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
202}
203
204static int map_skb(struct device *dev, const struct sk_buff *skb,
205 dma_addr_t *addr)
206{
207 const skb_frag_t *fp, *end;
208 const struct skb_shared_info *si;
209
210 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
211 if (dma_mapping_error(dev, *addr))
212 goto out_err;
213
214 si = skb_shinfo(skb);
215 end = &si->frags[si->nr_frags];
216
217 for (fp = si->frags; fp < end; fp++) {
218 *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
219 DMA_TO_DEVICE);
220 if (dma_mapping_error(dev, *addr))
221 goto unwind;
222 }
223 return 0;
224
225unwind:
226 while (fp-- > si->frags)
227 dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
228
229 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
230out_err:
231 return -ENOMEM;
232}
233
234#ifdef CONFIG_NEED_DMA_MAP_STATE
235static void unmap_skb(struct device *dev, const struct sk_buff *skb,
236 const dma_addr_t *addr)
237{
238 const skb_frag_t *fp, *end;
239 const struct skb_shared_info *si;
240
241 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
242
243 si = skb_shinfo(skb);
244 end = &si->frags[si->nr_frags];
245 for (fp = si->frags; fp < end; fp++)
246 dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE);
247}
248
249/**
250 * deferred_unmap_destructor - unmap a packet when it is freed
251 * @skb: the packet
252 *
253 * This is the packet destructor used for Tx packets that need to remain
254 * mapped until they are freed rather than until their Tx descriptors are
255 * freed.
256 */
257static void deferred_unmap_destructor(struct sk_buff *skb)
258{
259 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
260}
261#endif
262
263static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
264 const struct ulptx_sgl *sgl, const struct sge_txq *q)
265{
266 const struct ulptx_sge_pair *p;
267 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
268
269 if (likely(skb_headlen(skb)))
270 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
271 DMA_TO_DEVICE);
272 else {
273 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
274 DMA_TO_DEVICE);
275 nfrags--;
276 }
277
278 /*
279 * the complexity below is because of the possibility of a wrap-around
280 * in the middle of an SGL
281 */
282 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
283 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
284unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
285 ntohl(p->len[0]), DMA_TO_DEVICE);
286 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
287 ntohl(p->len[1]), DMA_TO_DEVICE);
288 p++;
289 } else if ((u8 *)p == (u8 *)q->stat) {
290 p = (const struct ulptx_sge_pair *)q->desc;
291 goto unmap;
292 } else if ((u8 *)p + 8 == (u8 *)q->stat) {
293 const __be64 *addr = (const __be64 *)q->desc;
294
295 dma_unmap_page(dev, be64_to_cpu(addr[0]),
296 ntohl(p->len[0]), DMA_TO_DEVICE);
297 dma_unmap_page(dev, be64_to_cpu(addr[1]),
298 ntohl(p->len[1]), DMA_TO_DEVICE);
299 p = (const struct ulptx_sge_pair *)&addr[2];
300 } else {
301 const __be64 *addr = (const __be64 *)q->desc;
302
303 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
304 ntohl(p->len[0]), DMA_TO_DEVICE);
305 dma_unmap_page(dev, be64_to_cpu(addr[0]),
306 ntohl(p->len[1]), DMA_TO_DEVICE);
307 p = (const struct ulptx_sge_pair *)&addr[1];
308 }
309 }
310 if (nfrags) {
311 __be64 addr;
312
313 if ((u8 *)p == (u8 *)q->stat)
314 p = (const struct ulptx_sge_pair *)q->desc;
315 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
316 *(const __be64 *)q->desc;
317 dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
318 DMA_TO_DEVICE);
319 }
320}
321
322/**
323 * free_tx_desc - reclaims Tx descriptors and their buffers
324 * @adapter: the adapter
325 * @q: the Tx queue to reclaim descriptors from
326 * @n: the number of descriptors to reclaim
327 * @unmap: whether the buffers should be unmapped for DMA
328 *
329 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
330 * Tx buffers. Called with the Tx queue lock held.
331 */
332static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
333 unsigned int n, bool unmap)
334{
335 struct tx_sw_desc *d;
336 unsigned int cidx = q->cidx;
337 struct device *dev = adap->pdev_dev;
338
339 d = &q->sdesc[cidx];
340 while (n--) {
341 if (d->skb) { /* an SGL is present */
342 if (unmap)
343 unmap_sgl(dev, d->skb, d->sgl, q);
344 kfree_skb(d->skb);
345 d->skb = NULL;
346 }
347 ++d;
348 if (++cidx == q->size) {
349 cidx = 0;
350 d = q->sdesc;
351 }
352 }
353 q->cidx = cidx;
354}
355
356/*
357 * Return the number of reclaimable descriptors in a Tx queue.
358 */
359static inline int reclaimable(const struct sge_txq *q)
360{
361 int hw_cidx = ntohs(q->stat->cidx);
362 hw_cidx -= q->cidx;
363 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
364}
365
366/**
367 * reclaim_completed_tx - reclaims completed Tx descriptors
368 * @adap: the adapter
369 * @q: the Tx queue to reclaim completed descriptors from
370 * @unmap: whether the buffers should be unmapped for DMA
371 *
372 * Reclaims Tx descriptors that the SGE has indicated it has processed,
373 * and frees the associated buffers if possible. Called with the Tx
374 * queue locked.
375 */
376static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
377 bool unmap)
378{
379 int avail = reclaimable(q);
380
381 if (avail) {
382 /*
383 * Limit the amount of clean up work we do at a time to keep
384 * the Tx lock hold time O(1).
385 */
386 if (avail > MAX_TX_RECLAIM)
387 avail = MAX_TX_RECLAIM;
388
389 free_tx_desc(adap, q, avail, unmap);
390 q->in_use -= avail;
391 }
392}
393
394static inline int get_buf_size(const struct rx_sw_desc *d)
395{
396#if FL_PG_ORDER > 0
397 return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
398 PAGE_SIZE;
399#else
400 return PAGE_SIZE;
401#endif
402}
403
404/**
405 * free_rx_bufs - free the Rx buffers on an SGE free list
406 * @adap: the adapter
407 * @q: the SGE free list to free buffers from
408 * @n: how many buffers to free
409 *
410 * Release the next @n buffers on an SGE free-buffer Rx queue. The
411 * buffers must be made inaccessible to HW before calling this function.
412 */
413static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
414{
415 while (n--) {
416 struct rx_sw_desc *d = &q->sdesc[q->cidx];
417
418 if (is_buf_mapped(d))
419 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
420 get_buf_size(d), PCI_DMA_FROMDEVICE);
421 put_page(d->page);
422 d->page = NULL;
423 if (++q->cidx == q->size)
424 q->cidx = 0;
425 q->avail--;
426 }
427}
428
429/**
430 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
431 * @adap: the adapter
432 * @q: the SGE free list
433 *
434 * Unmap the current buffer on an SGE free-buffer Rx queue. The
435 * buffer must be made inaccessible to HW before calling this function.
436 *
437 * This is similar to @free_rx_bufs above but does not free the buffer.
438 * Do note that the FL still loses any further access to the buffer.
439 */
440static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
441{
442 struct rx_sw_desc *d = &q->sdesc[q->cidx];
443
444 if (is_buf_mapped(d))
445 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
446 get_buf_size(d), PCI_DMA_FROMDEVICE);
447 d->page = NULL;
448 if (++q->cidx == q->size)
449 q->cidx = 0;
450 q->avail--;
451}
452
453static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
454{
455 if (q->pend_cred >= 8) {
456 wmb();
457 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
458 QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
459 q->pend_cred &= 7;
460 }
461}
462
463static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
464 dma_addr_t mapping)
465{
466 sd->page = pg;
467 sd->dma_addr = mapping; /* includes size low bits */
468}
469
470/**
471 * refill_fl - refill an SGE Rx buffer ring
472 * @adap: the adapter
473 * @q: the ring to refill
474 * @n: the number of new buffers to allocate
475 * @gfp: the gfp flags for the allocations
476 *
477 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
478 * allocated with the supplied gfp flags. The caller must assure that
479 * @n does not exceed the queue's capacity. If afterwards the queue is
480 * found critically low mark it as starving in the bitmap of starving FLs.
481 *
482 * Returns the number of buffers allocated.
483 */
484static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
485 gfp_t gfp)
486{
487 struct page *pg;
488 dma_addr_t mapping;
489 unsigned int cred = q->avail;
490 __be64 *d = &q->desc[q->pidx];
491 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
492
493 gfp |= __GFP_NOWARN; /* failures are expected */
494
495#if FL_PG_ORDER > 0
496 /*
497 * Prefer large buffers
498 */
499 while (n) {
500 pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
501 if (unlikely(!pg)) {
502 q->large_alloc_failed++;
503 break; /* fall back to single pages */
504 }
505
506 mapping = dma_map_page(adap->pdev_dev, pg, 0,
507 PAGE_SIZE << FL_PG_ORDER,
508 PCI_DMA_FROMDEVICE);
509 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
510 __free_pages(pg, FL_PG_ORDER);
511 goto out; /* do not try small pages for this error */
512 }
513 mapping |= RX_LARGE_BUF;
514 *d++ = cpu_to_be64(mapping);
515
516 set_rx_sw_desc(sd, pg, mapping);
517 sd++;
518
519 q->avail++;
520 if (++q->pidx == q->size) {
521 q->pidx = 0;
522 sd = q->sdesc;
523 d = q->desc;
524 }
525 n--;
526 }
527#endif
528
529 while (n--) {
530 pg = __netdev_alloc_page(adap->port[0], gfp);
531 if (unlikely(!pg)) {
532 q->alloc_failed++;
533 break;
534 }
535
536 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
537 PCI_DMA_FROMDEVICE);
538 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
539 netdev_free_page(adap->port[0], pg);
540 goto out;
541 }
542 *d++ = cpu_to_be64(mapping);
543
544 set_rx_sw_desc(sd, pg, mapping);
545 sd++;
546
547 q->avail++;
548 if (++q->pidx == q->size) {
549 q->pidx = 0;
550 sd = q->sdesc;
551 d = q->desc;
552 }
553 }
554
555out: cred = q->avail - cred;
556 q->pend_cred += cred;
557 ring_fl_db(adap, q);
558
559 if (unlikely(fl_starving(q))) {
560 smp_wmb();
561 set_bit(q->cntxt_id - adap->sge.egr_start,
562 adap->sge.starving_fl);
563 }
564
565 return cred;
566}
567
568static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
569{
570 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
571 GFP_ATOMIC);
572}
573
574/**
575 * alloc_ring - allocate resources for an SGE descriptor ring
576 * @dev: the PCI device's core device
577 * @nelem: the number of descriptors
578 * @elem_size: the size of each descriptor
579 * @sw_size: the size of the SW state associated with each ring element
580 * @phys: the physical address of the allocated ring
581 * @metadata: address of the array holding the SW state for the ring
582 * @stat_size: extra space in HW ring for status information
583 * @node: preferred node for memory allocations
584 *
585 * Allocates resources for an SGE descriptor ring, such as Tx queues,
586 * free buffer lists, or response queues. Each SGE ring requires
587 * space for its HW descriptors plus, optionally, space for the SW state
588 * associated with each HW entry (the metadata). The function returns
589 * three values: the virtual address for the HW ring (the return value
590 * of the function), the bus address of the HW ring, and the address
591 * of the SW ring.
592 */
593static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
594 size_t sw_size, dma_addr_t *phys, void *metadata,
595 size_t stat_size, int node)
596{
597 size_t len = nelem * elem_size + stat_size;
598 void *s = NULL;
599 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
600
601 if (!p)
602 return NULL;
603 if (sw_size) {
604 s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
605
606 if (!s) {
607 dma_free_coherent(dev, len, p, *phys);
608 return NULL;
609 }
610 }
611 if (metadata)
612 *(void **)metadata = s;
613 memset(p, 0, len);
614 return p;
615}
616
617/**
618 * sgl_len - calculates the size of an SGL of the given capacity
619 * @n: the number of SGL entries
620 *
621 * Calculates the number of flits needed for a scatter/gather list that
622 * can hold the given number of entries.
623 */
624static inline unsigned int sgl_len(unsigned int n)
625{
626 n--;
627 return (3 * n) / 2 + (n & 1) + 2;
628}
629
630/**
631 * flits_to_desc - returns the num of Tx descriptors for the given flits
632 * @n: the number of flits
633 *
634 * Returns the number of Tx descriptors needed for the supplied number
635 * of flits.
636 */
637static inline unsigned int flits_to_desc(unsigned int n)
638{
639 BUG_ON(n > SGE_MAX_WR_LEN / 8);
640 return DIV_ROUND_UP(n, 8);
641}
642
643/**
644 * is_eth_imm - can an Ethernet packet be sent as immediate data?
645 * @skb: the packet
646 *
647 * Returns whether an Ethernet packet is small enough to fit as
648 * immediate data.
649 */
650static inline int is_eth_imm(const struct sk_buff *skb)
651{
652 return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
653}
654
655/**
656 * calc_tx_flits - calculate the number of flits for a packet Tx WR
657 * @skb: the packet
658 *
659 * Returns the number of flits needed for a Tx WR for the given Ethernet
660 * packet, including the needed WR and CPL headers.
661 */
662static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
663{
664 unsigned int flits;
665
666 if (is_eth_imm(skb))
667 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
668
669 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
670 if (skb_shinfo(skb)->gso_size)
671 flits += 2;
672 return flits;
673}
674
675/**
676 * calc_tx_descs - calculate the number of Tx descriptors for a packet
677 * @skb: the packet
678 *
679 * Returns the number of Tx descriptors needed for the given Ethernet
680 * packet, including the needed WR and CPL headers.
681 */
682static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
683{
684 return flits_to_desc(calc_tx_flits(skb));
685}
686
687/**
688 * write_sgl - populate a scatter/gather list for a packet
689 * @skb: the packet
690 * @q: the Tx queue we are writing into
691 * @sgl: starting location for writing the SGL
692 * @end: points right after the end of the SGL
693 * @start: start offset into skb main-body data to include in the SGL
694 * @addr: the list of bus addresses for the SGL elements
695 *
696 * Generates a gather list for the buffers that make up a packet.
697 * The caller must provide adequate space for the SGL that will be written.
698 * The SGL includes all of the packet's page fragments and the data in its
699 * main body except for the first @start bytes. @sgl must be 16-byte
700 * aligned and within a Tx descriptor with available space. @end points
701 * right after the end of the SGL but does not account for any potential
702 * wrap around, i.e., @end > @sgl.
703 */
704static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
705 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
706 const dma_addr_t *addr)
707{
708 unsigned int i, len;
709 struct ulptx_sge_pair *to;
710 const struct skb_shared_info *si = skb_shinfo(skb);
711 unsigned int nfrags = si->nr_frags;
712 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
713
714 len = skb_headlen(skb) - start;
715 if (likely(len)) {
716 sgl->len0 = htonl(len);
717 sgl->addr0 = cpu_to_be64(addr[0] + start);
718 nfrags++;
719 } else {
720 sgl->len0 = htonl(si->frags[0].size);
721 sgl->addr0 = cpu_to_be64(addr[1]);
722 }
723
724 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
725 if (likely(--nfrags == 0))
726 return;
727 /*
728 * Most of the complexity below deals with the possibility we hit the
729 * end of the queue in the middle of writing the SGL. For this case
730 * only we create the SGL in a temporary buffer and then copy it.
731 */
732 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
733
734 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
735 to->len[0] = cpu_to_be32(si->frags[i].size);
736 to->len[1] = cpu_to_be32(si->frags[++i].size);
737 to->addr[0] = cpu_to_be64(addr[i]);
738 to->addr[1] = cpu_to_be64(addr[++i]);
739 }
740 if (nfrags) {
741 to->len[0] = cpu_to_be32(si->frags[i].size);
742 to->len[1] = cpu_to_be32(0);
743 to->addr[0] = cpu_to_be64(addr[i + 1]);
744 }
745 if (unlikely((u8 *)end > (u8 *)q->stat)) {
746 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
747
748 if (likely(part0))
749 memcpy(sgl->sge, buf, part0);
750 part1 = (u8 *)end - (u8 *)q->stat;
751 memcpy(q->desc, (u8 *)buf + part0, part1);
752 end = (void *)q->desc + part1;
753 }
754 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
755 *(u64 *)end = 0;
756}
757
758/**
759 * ring_tx_db - check and potentially ring a Tx queue's doorbell
760 * @adap: the adapter
761 * @q: the Tx queue
762 * @n: number of new descriptors to give to HW
763 *
764 * Ring the doorbel for a Tx queue.
765 */
766static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
767{
768 wmb(); /* write descriptors before telling HW */
769 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
770 QID(q->cntxt_id) | PIDX(n));
771}
772
773/**
774 * inline_tx_skb - inline a packet's data into Tx descriptors
775 * @skb: the packet
776 * @q: the Tx queue where the packet will be inlined
777 * @pos: starting position in the Tx queue where to inline the packet
778 *
779 * Inline a packet's contents directly into Tx descriptors, starting at
780 * the given position within the Tx DMA ring.
781 * Most of the complexity of this operation is dealing with wrap arounds
782 * in the middle of the packet we want to inline.
783 */
784static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
785 void *pos)
786{
787 u64 *p;
788 int left = (void *)q->stat - pos;
789
790 if (likely(skb->len <= left)) {
791 if (likely(!skb->data_len))
792 skb_copy_from_linear_data(skb, pos, skb->len);
793 else
794 skb_copy_bits(skb, 0, pos, skb->len);
795 pos += skb->len;
796 } else {
797 skb_copy_bits(skb, 0, pos, left);
798 skb_copy_bits(skb, left, q->desc, skb->len - left);
799 pos = (void *)q->desc + (skb->len - left);
800 }
801
802 /* 0-pad to multiple of 16 */
803 p = PTR_ALIGN(pos, 8);
804 if ((uintptr_t)p & 8)
805 *p = 0;
806}
807
808/*
809 * Figure out what HW csum a packet wants and return the appropriate control
810 * bits.
811 */
812static u64 hwcsum(const struct sk_buff *skb)
813{
814 int csum_type;
815 const struct iphdr *iph = ip_hdr(skb);
816
817 if (iph->version == 4) {
818 if (iph->protocol == IPPROTO_TCP)
819 csum_type = TX_CSUM_TCPIP;
820 else if (iph->protocol == IPPROTO_UDP)
821 csum_type = TX_CSUM_UDPIP;
822 else {
823nocsum: /*
824 * unknown protocol, disable HW csum
825 * and hope a bad packet is detected
826 */
827 return TXPKT_L4CSUM_DIS;
828 }
829 } else {
830 /*
831 * this doesn't work with extension headers
832 */
833 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
834
835 if (ip6h->nexthdr == IPPROTO_TCP)
836 csum_type = TX_CSUM_TCPIP6;
837 else if (ip6h->nexthdr == IPPROTO_UDP)
838 csum_type = TX_CSUM_UDPIP6;
839 else
840 goto nocsum;
841 }
842
843 if (likely(csum_type >= TX_CSUM_TCPIP))
844 return TXPKT_CSUM_TYPE(csum_type) |
845 TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
846 TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
847 else {
848 int start = skb_transport_offset(skb);
849
850 return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
851 TXPKT_CSUM_LOC(start + skb->csum_offset);
852 }
853}
854
855static void eth_txq_stop(struct sge_eth_txq *q)
856{
857 netif_tx_stop_queue(q->txq);
858 q->q.stops++;
859}
860
861static inline void txq_advance(struct sge_txq *q, unsigned int n)
862{
863 q->in_use += n;
864 q->pidx += n;
865 if (q->pidx >= q->size)
866 q->pidx -= q->size;
867}
868
869/**
870 * t4_eth_xmit - add a packet to an Ethernet Tx queue
871 * @skb: the packet
872 * @dev: the egress net device
873 *
874 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
875 */
876netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
877{
878 u32 wr_mid;
879 u64 cntrl, *end;
880 int qidx, credits;
881 unsigned int flits, ndesc;
882 struct adapter *adap;
883 struct sge_eth_txq *q;
884 const struct port_info *pi;
885 struct fw_eth_tx_pkt_wr *wr;
886 struct cpl_tx_pkt_core *cpl;
887 const struct skb_shared_info *ssi;
888 dma_addr_t addr[MAX_SKB_FRAGS + 1];
889
890 /*
891 * The chip min packet length is 10 octets but play safe and reject
892 * anything shorter than an Ethernet header.
893 */
894 if (unlikely(skb->len < ETH_HLEN)) {
895out_free: dev_kfree_skb(skb);
896 return NETDEV_TX_OK;
897 }
898
899 pi = netdev_priv(dev);
900 adap = pi->adapter;
901 qidx = skb_get_queue_mapping(skb);
902 q = &adap->sge.ethtxq[qidx + pi->first_qset];
903
904 reclaim_completed_tx(adap, &q->q, true);
905
906 flits = calc_tx_flits(skb);
907 ndesc = flits_to_desc(flits);
908 credits = txq_avail(&q->q) - ndesc;
909
910 if (unlikely(credits < 0)) {
911 eth_txq_stop(q);
912 dev_err(adap->pdev_dev,
913 "%s: Tx ring %u full while queue awake!\n",
914 dev->name, qidx);
915 return NETDEV_TX_BUSY;
916 }
917
918 if (!is_eth_imm(skb) &&
919 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
920 q->mapping_err++;
921 goto out_free;
922 }
923
924 wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
925 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
926 eth_txq_stop(q);
927 wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
928 }
929
930 wr = (void *)&q->q.desc[q->q.pidx];
931 wr->equiq_to_len16 = htonl(wr_mid);
932 wr->r3 = cpu_to_be64(0);
933 end = (u64 *)wr + flits;
934
935 ssi = skb_shinfo(skb);
936 if (ssi->gso_size) {
937 struct cpl_tx_pkt_lso *lso = (void *)wr;
938 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
939 int l3hdr_len = skb_network_header_len(skb);
940 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
941
942 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
943 FW_WR_IMMDLEN(sizeof(*lso)));
944 lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
945 LSO_FIRST_SLICE | LSO_LAST_SLICE |
946 LSO_IPV6(v6) |
947 LSO_ETHHDR_LEN(eth_xtra_len / 4) |
948 LSO_IPHDR_LEN(l3hdr_len / 4) |
949 LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
950 lso->c.ipid_ofst = htons(0);
951 lso->c.mss = htons(ssi->gso_size);
952 lso->c.seqno_offset = htonl(0);
953 lso->c.len = htonl(skb->len);
954 cpl = (void *)(lso + 1);
955 cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
956 TXPKT_IPHDR_LEN(l3hdr_len) |
957 TXPKT_ETHHDR_LEN(eth_xtra_len);
958 q->tso++;
959 q->tx_cso += ssi->gso_segs;
960 } else {
961 int len;
962
963 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
964 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
965 FW_WR_IMMDLEN(len));
966 cpl = (void *)(wr + 1);
967 if (skb->ip_summed == CHECKSUM_PARTIAL) {
968 cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
969 q->tx_cso++;
970 } else
971 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
972 }
973
974 if (vlan_tx_tag_present(skb)) {
975 q->vlan_ins++;
976 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
977 }
978
979 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
980 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
981 cpl->pack = htons(0);
982 cpl->len = htons(skb->len);
983 cpl->ctrl1 = cpu_to_be64(cntrl);
984
985 if (is_eth_imm(skb)) {
986 inline_tx_skb(skb, &q->q, cpl + 1);
987 dev_kfree_skb(skb);
988 } else {
989 int last_desc;
990
991 write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
992 addr);
993 skb_orphan(skb);
994
995 last_desc = q->q.pidx + ndesc - 1;
996 if (last_desc >= q->q.size)
997 last_desc -= q->q.size;
998 q->q.sdesc[last_desc].skb = skb;
999 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1000 }
1001
1002 txq_advance(&q->q, ndesc);
1003
1004 ring_tx_db(adap, &q->q, ndesc);
1005 return NETDEV_TX_OK;
1006}
1007
1008/**
1009 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1010 * @q: the SGE control Tx queue
1011 *
1012 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1013 * that send only immediate data (presently just the control queues) and
1014 * thus do not have any sk_buffs to release.
1015 */
1016static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1017{
1018 int hw_cidx = ntohs(q->stat->cidx);
1019 int reclaim = hw_cidx - q->cidx;
1020
1021 if (reclaim < 0)
1022 reclaim += q->size;
1023
1024 q->in_use -= reclaim;
1025 q->cidx = hw_cidx;
1026}
1027
1028/**
1029 * is_imm - check whether a packet can be sent as immediate data
1030 * @skb: the packet
1031 *
1032 * Returns true if a packet can be sent as a WR with immediate data.
1033 */
1034static inline int is_imm(const struct sk_buff *skb)
1035{
1036 return skb->len <= MAX_CTRL_WR_LEN;
1037}
1038
1039/**
1040 * ctrlq_check_stop - check if a control queue is full and should stop
1041 * @q: the queue
1042 * @wr: most recent WR written to the queue
1043 *
1044 * Check if a control queue has become full and should be stopped.
1045 * We clean up control queue descriptors very lazily, only when we are out.
1046 * If the queue is still full after reclaiming any completed descriptors
1047 * we suspend it and have the last WR wake it up.
1048 */
1049static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1050{
1051 reclaim_completed_tx_imm(&q->q);
1052 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1053 wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1054 q->q.stops++;
1055 q->full = 1;
1056 }
1057}
1058
1059/**
1060 * ctrl_xmit - send a packet through an SGE control Tx queue
1061 * @q: the control queue
1062 * @skb: the packet
1063 *
1064 * Send a packet through an SGE control Tx queue. Packets sent through
1065 * a control queue must fit entirely as immediate data.
1066 */
1067static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1068{
1069 unsigned int ndesc;
1070 struct fw_wr_hdr *wr;
1071
1072 if (unlikely(!is_imm(skb))) {
1073 WARN_ON(1);
1074 dev_kfree_skb(skb);
1075 return NET_XMIT_DROP;
1076 }
1077
1078 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1079 spin_lock(&q->sendq.lock);
1080
1081 if (unlikely(q->full)) {
1082 skb->priority = ndesc; /* save for restart */
1083 __skb_queue_tail(&q->sendq, skb);
1084 spin_unlock(&q->sendq.lock);
1085 return NET_XMIT_CN;
1086 }
1087
1088 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1089 inline_tx_skb(skb, &q->q, wr);
1090
1091 txq_advance(&q->q, ndesc);
1092 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1093 ctrlq_check_stop(q, wr);
1094
1095 ring_tx_db(q->adap, &q->q, ndesc);
1096 spin_unlock(&q->sendq.lock);
1097
1098 kfree_skb(skb);
1099 return NET_XMIT_SUCCESS;
1100}
1101
1102/**
1103 * restart_ctrlq - restart a suspended control queue
1104 * @data: the control queue to restart
1105 *
1106 * Resumes transmission on a suspended Tx control queue.
1107 */
1108static void restart_ctrlq(unsigned long data)
1109{
1110 struct sk_buff *skb;
1111 unsigned int written = 0;
1112 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1113
1114 spin_lock(&q->sendq.lock);
1115 reclaim_completed_tx_imm(&q->q);
1116 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
1117
1118 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1119 struct fw_wr_hdr *wr;
1120 unsigned int ndesc = skb->priority; /* previously saved */
1121
1122 /*
1123 * Write descriptors and free skbs outside the lock to limit
1124 * wait times. q->full is still set so new skbs will be queued.
1125 */
1126 spin_unlock(&q->sendq.lock);
1127
1128 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1129 inline_tx_skb(skb, &q->q, wr);
1130 kfree_skb(skb);
1131
1132 written += ndesc;
1133 txq_advance(&q->q, ndesc);
1134 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1135 unsigned long old = q->q.stops;
1136
1137 ctrlq_check_stop(q, wr);
1138 if (q->q.stops != old) { /* suspended anew */
1139 spin_lock(&q->sendq.lock);
1140 goto ringdb;
1141 }
1142 }
1143 if (written > 16) {
1144 ring_tx_db(q->adap, &q->q, written);
1145 written = 0;
1146 }
1147 spin_lock(&q->sendq.lock);
1148 }
1149 q->full = 0;
1150ringdb: if (written)
1151 ring_tx_db(q->adap, &q->q, written);
1152 spin_unlock(&q->sendq.lock);
1153}
1154
1155/**
1156 * t4_mgmt_tx - send a management message
1157 * @adap: the adapter
1158 * @skb: the packet containing the management message
1159 *
1160 * Send a management message through control queue 0.
1161 */
1162int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1163{
1164 int ret;
1165
1166 local_bh_disable();
1167 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1168 local_bh_enable();
1169 return ret;
1170}
1171
1172/**
1173 * is_ofld_imm - check whether a packet can be sent as immediate data
1174 * @skb: the packet
1175 *
1176 * Returns true if a packet can be sent as an offload WR with immediate
1177 * data. We currently use the same limit as for Ethernet packets.
1178 */
1179static inline int is_ofld_imm(const struct sk_buff *skb)
1180{
1181 return skb->len <= MAX_IMM_TX_PKT_LEN;
1182}
1183
1184/**
1185 * calc_tx_flits_ofld - calculate # of flits for an offload packet
1186 * @skb: the packet
1187 *
1188 * Returns the number of flits needed for the given offload packet.
1189 * These packets are already fully constructed and no additional headers
1190 * will be added.
1191 */
1192static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1193{
1194 unsigned int flits, cnt;
1195
1196 if (is_ofld_imm(skb))
1197 return DIV_ROUND_UP(skb->len, 8);
1198
1199 flits = skb_transport_offset(skb) / 8U; /* headers */
1200 cnt = skb_shinfo(skb)->nr_frags;
1201 if (skb->tail != skb->transport_header)
1202 cnt++;
1203 return flits + sgl_len(cnt);
1204}
1205
1206/**
1207 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1208 * @adap: the adapter
1209 * @q: the queue to stop
1210 *
1211 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1212 * inability to map packets. A periodic timer attempts to restart
1213 * queues so marked.
1214 */
1215static void txq_stop_maperr(struct sge_ofld_txq *q)
1216{
1217 q->mapping_err++;
1218 q->q.stops++;
1219 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1220 q->adap->sge.txq_maperr);
1221}
1222
1223/**
1224 * ofldtxq_stop - stop an offload Tx queue that has become full
1225 * @q: the queue to stop
1226 * @skb: the packet causing the queue to become full
1227 *
1228 * Stops an offload Tx queue that has become full and modifies the packet
1229 * being written to request a wakeup.
1230 */
1231static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1232{
1233 struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1234
1235 wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1236 q->q.stops++;
1237 q->full = 1;
1238}
1239
1240/**
1241 * service_ofldq - restart a suspended offload queue
1242 * @q: the offload queue
1243 *
1244 * Services an offload Tx queue by moving packets from its packet queue
1245 * to the HW Tx ring. The function starts and ends with the queue locked.
1246 */
1247static void service_ofldq(struct sge_ofld_txq *q)
1248{
1249 u64 *pos;
1250 int credits;
1251 struct sk_buff *skb;
1252 unsigned int written = 0;
1253 unsigned int flits, ndesc;
1254
1255 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1256 /*
1257 * We drop the lock but leave skb on sendq, thus retaining
1258 * exclusive access to the state of the queue.
1259 */
1260 spin_unlock(&q->sendq.lock);
1261
1262 reclaim_completed_tx(q->adap, &q->q, false);
1263
1264 flits = skb->priority; /* previously saved */
1265 ndesc = flits_to_desc(flits);
1266 credits = txq_avail(&q->q) - ndesc;
1267 BUG_ON(credits < 0);
1268 if (unlikely(credits < TXQ_STOP_THRES))
1269 ofldtxq_stop(q, skb);
1270
1271 pos = (u64 *)&q->q.desc[q->q.pidx];
1272 if (is_ofld_imm(skb))
1273 inline_tx_skb(skb, &q->q, pos);
1274 else if (map_skb(q->adap->pdev_dev, skb,
1275 (dma_addr_t *)skb->head)) {
1276 txq_stop_maperr(q);
1277 spin_lock(&q->sendq.lock);
1278 break;
1279 } else {
1280 int last_desc, hdr_len = skb_transport_offset(skb);
1281
1282 memcpy(pos, skb->data, hdr_len);
1283 write_sgl(skb, &q->q, (void *)pos + hdr_len,
1284 pos + flits, hdr_len,
1285 (dma_addr_t *)skb->head);
1286#ifdef CONFIG_NEED_DMA_MAP_STATE
1287 skb->dev = q->adap->port[0];
1288 skb->destructor = deferred_unmap_destructor;
1289#endif
1290 last_desc = q->q.pidx + ndesc - 1;
1291 if (last_desc >= q->q.size)
1292 last_desc -= q->q.size;
1293 q->q.sdesc[last_desc].skb = skb;
1294 }
1295
1296 txq_advance(&q->q, ndesc);
1297 written += ndesc;
1298 if (unlikely(written > 32)) {
1299 ring_tx_db(q->adap, &q->q, written);
1300 written = 0;
1301 }
1302
1303 spin_lock(&q->sendq.lock);
1304 __skb_unlink(skb, &q->sendq);
1305 if (is_ofld_imm(skb))
1306 kfree_skb(skb);
1307 }
1308 if (likely(written))
1309 ring_tx_db(q->adap, &q->q, written);
1310}
1311
1312/**
1313 * ofld_xmit - send a packet through an offload queue
1314 * @q: the Tx offload queue
1315 * @skb: the packet
1316 *
1317 * Send an offload packet through an SGE offload queue.
1318 */
1319static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1320{
1321 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
1322 spin_lock(&q->sendq.lock);
1323 __skb_queue_tail(&q->sendq, skb);
1324 if (q->sendq.qlen == 1)
1325 service_ofldq(q);
1326 spin_unlock(&q->sendq.lock);
1327 return NET_XMIT_SUCCESS;
1328}
1329
1330/**
1331 * restart_ofldq - restart a suspended offload queue
1332 * @data: the offload queue to restart
1333 *
1334 * Resumes transmission on a suspended Tx offload queue.
1335 */
1336static void restart_ofldq(unsigned long data)
1337{
1338 struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1339
1340 spin_lock(&q->sendq.lock);
1341 q->full = 0; /* the queue actually is completely empty now */
1342 service_ofldq(q);
1343 spin_unlock(&q->sendq.lock);
1344}
1345
1346/**
1347 * skb_txq - return the Tx queue an offload packet should use
1348 * @skb: the packet
1349 *
1350 * Returns the Tx queue an offload packet should use as indicated by bits
1351 * 1-15 in the packet's queue_mapping.
1352 */
1353static inline unsigned int skb_txq(const struct sk_buff *skb)
1354{
1355 return skb->queue_mapping >> 1;
1356}
1357
1358/**
1359 * is_ctrl_pkt - return whether an offload packet is a control packet
1360 * @skb: the packet
1361 *
1362 * Returns whether an offload packet should use an OFLD or a CTRL
1363 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
1364 */
1365static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1366{
1367 return skb->queue_mapping & 1;
1368}
1369
1370static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1371{
1372 unsigned int idx = skb_txq(skb);
1373
1374 if (unlikely(is_ctrl_pkt(skb)))
1375 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1376 return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1377}
1378
1379/**
1380 * t4_ofld_send - send an offload packet
1381 * @adap: the adapter
1382 * @skb: the packet
1383 *
1384 * Sends an offload packet. We use the packet queue_mapping to select the
1385 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1386 * should be sent as regular or control, bits 1-15 select the queue.
1387 */
1388int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1389{
1390 int ret;
1391
1392 local_bh_disable();
1393 ret = ofld_send(adap, skb);
1394 local_bh_enable();
1395 return ret;
1396}
1397
1398/**
1399 * cxgb4_ofld_send - send an offload packet
1400 * @dev: the net device
1401 * @skb: the packet
1402 *
1403 * Sends an offload packet. This is an exported version of @t4_ofld_send,
1404 * intended for ULDs.
1405 */
1406int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1407{
1408 return t4_ofld_send(netdev2adap(dev), skb);
1409}
1410EXPORT_SYMBOL(cxgb4_ofld_send);
1411
1412static inline void copy_frags(struct skb_shared_info *ssi,
1413 const struct pkt_gl *gl, unsigned int offset)
1414{
1415 unsigned int n;
1416
1417 /* usually there's just one frag */
1418 ssi->frags[0].page = gl->frags[0].page;
1419 ssi->frags[0].page_offset = gl->frags[0].page_offset + offset;
1420 ssi->frags[0].size = gl->frags[0].size - offset;
1421 ssi->nr_frags = gl->nfrags;
1422 n = gl->nfrags - 1;
1423 if (n)
1424 memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
1425
1426 /* get a reference to the last page, we don't own it */
1427 get_page(gl->frags[n].page);
1428}
1429
1430/**
1431 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1432 * @gl: the gather list
1433 * @skb_len: size of sk_buff main body if it carries fragments
1434 * @pull_len: amount of data to move to the sk_buff's main body
1435 *
1436 * Builds an sk_buff from the given packet gather list. Returns the
1437 * sk_buff or %NULL if sk_buff allocation failed.
1438 */
1439struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1440 unsigned int skb_len, unsigned int pull_len)
1441{
1442 struct sk_buff *skb;
1443
1444 /*
1445 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1446 * size, which is expected since buffers are at least PAGE_SIZEd.
1447 * In this case packets up to RX_COPY_THRES have only one fragment.
1448 */
1449 if (gl->tot_len <= RX_COPY_THRES) {
1450 skb = dev_alloc_skb(gl->tot_len);
1451 if (unlikely(!skb))
1452 goto out;
1453 __skb_put(skb, gl->tot_len);
1454 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1455 } else {
1456 skb = dev_alloc_skb(skb_len);
1457 if (unlikely(!skb))
1458 goto out;
1459 __skb_put(skb, pull_len);
1460 skb_copy_to_linear_data(skb, gl->va, pull_len);
1461
1462 copy_frags(skb_shinfo(skb), gl, pull_len);
1463 skb->len = gl->tot_len;
1464 skb->data_len = skb->len - pull_len;
1465 skb->truesize += skb->data_len;
1466 }
1467out: return skb;
1468}
1469EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1470
1471/**
1472 * t4_pktgl_free - free a packet gather list
1473 * @gl: the gather list
1474 *
1475 * Releases the pages of a packet gather list. We do not own the last
1476 * page on the list and do not free it.
1477 */
1478static void t4_pktgl_free(const struct pkt_gl *gl)
1479{
1480 int n;
1481 const skb_frag_t *p;
1482
1483 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1484 put_page(p->page);
1485}
1486
1487/*
1488 * Process an MPS trace packet. Give it an unused protocol number so it won't
1489 * be delivered to anyone and send it to the stack for capture.
1490 */
1491static noinline int handle_trace_pkt(struct adapter *adap,
1492 const struct pkt_gl *gl)
1493{
1494 struct sk_buff *skb;
1495 struct cpl_trace_pkt *p;
1496
1497 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1498 if (unlikely(!skb)) {
1499 t4_pktgl_free(gl);
1500 return 0;
1501 }
1502
1503 p = (struct cpl_trace_pkt *)skb->data;
1504 __skb_pull(skb, sizeof(*p));
1505 skb_reset_mac_header(skb);
1506 skb->protocol = htons(0xffff);
1507 skb->dev = adap->port[0];
1508 netif_receive_skb(skb);
1509 return 0;
1510}
1511
1512static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1513 const struct cpl_rx_pkt *pkt)
1514{
1515 int ret;
1516 struct sk_buff *skb;
1517
1518 skb = napi_get_frags(&rxq->rspq.napi);
1519 if (unlikely(!skb)) {
1520 t4_pktgl_free(gl);
1521 rxq->stats.rx_drops++;
1522 return;
1523 }
1524
1525 copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD);
1526 skb->len = gl->tot_len - RX_PKT_PAD;
1527 skb->data_len = skb->len;
1528 skb->truesize += skb->data_len;
1529 skb->ip_summed = CHECKSUM_UNNECESSARY;
1530 skb_record_rx_queue(skb, rxq->rspq.idx);
1531 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1532 skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1533
1534 if (unlikely(pkt->vlan_ex)) {
1535 __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1536 rxq->stats.vlan_ex++;
1537 }
1538 ret = napi_gro_frags(&rxq->rspq.napi);
1539 if (ret == GRO_HELD)
1540 rxq->stats.lro_pkts++;
1541 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1542 rxq->stats.lro_merged++;
1543 rxq->stats.pkts++;
1544 rxq->stats.rx_cso++;
1545}
1546
1547/**
1548 * t4_ethrx_handler - process an ingress ethernet packet
1549 * @q: the response queue that received the packet
1550 * @rsp: the response queue descriptor holding the RX_PKT message
1551 * @si: the gather list of packet fragments
1552 *
1553 * Process an ingress ethernet packet and deliver it to the stack.
1554 */
1555int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1556 const struct pkt_gl *si)
1557{
1558 bool csum_ok;
1559 struct sk_buff *skb;
1560 const struct cpl_rx_pkt *pkt;
1561 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1562
1563 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1564 return handle_trace_pkt(q->adap, si);
1565
1566 pkt = (const struct cpl_rx_pkt *)rsp;
1567 csum_ok = pkt->csum_calc && !pkt->err_vec;
1568 if ((pkt->l2info & htonl(RXF_TCP)) &&
1569 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1570 do_gro(rxq, si, pkt);
1571 return 0;
1572 }
1573
1574 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1575 if (unlikely(!skb)) {
1576 t4_pktgl_free(si);
1577 rxq->stats.rx_drops++;
1578 return 0;
1579 }
1580
1581 __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
1582 skb->protocol = eth_type_trans(skb, q->netdev);
1583 skb_record_rx_queue(skb, q->idx);
1584 if (skb->dev->features & NETIF_F_RXHASH)
1585 skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1586
1587 rxq->stats.pkts++;
1588
1589 if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
1590 (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1591 if (!pkt->ip_frag) {
1592 skb->ip_summed = CHECKSUM_UNNECESSARY;
1593 rxq->stats.rx_cso++;
1594 } else if (pkt->l2info & htonl(RXF_IP)) {
1595 __sum16 c = (__force __sum16)pkt->csum;
1596 skb->csum = csum_unfold(c);
1597 skb->ip_summed = CHECKSUM_COMPLETE;
1598 rxq->stats.rx_cso++;
1599 }
1600 } else
1601 skb_checksum_none_assert(skb);
1602
1603 if (unlikely(pkt->vlan_ex)) {
1604 __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1605 rxq->stats.vlan_ex++;
1606 }
1607 netif_receive_skb(skb);
1608 return 0;
1609}
1610
1611/**
1612 * restore_rx_bufs - put back a packet's Rx buffers
1613 * @si: the packet gather list
1614 * @q: the SGE free list
1615 * @frags: number of FL buffers to restore
1616 *
1617 * Puts back on an FL the Rx buffers associated with @si. The buffers
1618 * have already been unmapped and are left unmapped, we mark them so to
1619 * prevent further unmapping attempts.
1620 *
1621 * This function undoes a series of @unmap_rx_buf calls when we find out
1622 * that the current packet can't be processed right away afterall and we
1623 * need to come back to it later. This is a very rare event and there's
1624 * no effort to make this particularly efficient.
1625 */
1626static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1627 int frags)
1628{
1629 struct rx_sw_desc *d;
1630
1631 while (frags--) {
1632 if (q->cidx == 0)
1633 q->cidx = q->size - 1;
1634 else
1635 q->cidx--;
1636 d = &q->sdesc[q->cidx];
1637 d->page = si->frags[frags].page;
1638 d->dma_addr |= RX_UNMAPPED_BUF;
1639 q->avail++;
1640 }
1641}
1642
1643/**
1644 * is_new_response - check if a response is newly written
1645 * @r: the response descriptor
1646 * @q: the response queue
1647 *
1648 * Returns true if a response descriptor contains a yet unprocessed
1649 * response.
1650 */
1651static inline bool is_new_response(const struct rsp_ctrl *r,
1652 const struct sge_rspq *q)
1653{
1654 return RSPD_GEN(r->type_gen) == q->gen;
1655}
1656
1657/**
1658 * rspq_next - advance to the next entry in a response queue
1659 * @q: the queue
1660 *
1661 * Updates the state of a response queue to advance it to the next entry.
1662 */
1663static inline void rspq_next(struct sge_rspq *q)
1664{
1665 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1666 if (unlikely(++q->cidx == q->size)) {
1667 q->cidx = 0;
1668 q->gen ^= 1;
1669 q->cur_desc = q->desc;
1670 }
1671}
1672
1673/**
1674 * process_responses - process responses from an SGE response queue
1675 * @q: the ingress queue to process
1676 * @budget: how many responses can be processed in this round
1677 *
1678 * Process responses from an SGE response queue up to the supplied budget.
1679 * Responses include received packets as well as control messages from FW
1680 * or HW.
1681 *
1682 * Additionally choose the interrupt holdoff time for the next interrupt
1683 * on this queue. If the system is under memory shortage use a fairly
1684 * long delay to help recovery.
1685 */
1686static int process_responses(struct sge_rspq *q, int budget)
1687{
1688 int ret, rsp_type;
1689 int budget_left = budget;
1690 const struct rsp_ctrl *rc;
1691 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1692
1693 while (likely(budget_left)) {
1694 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1695 if (!is_new_response(rc, q))
1696 break;
1697
1698 rmb();
1699 rsp_type = RSPD_TYPE(rc->type_gen);
1700 if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1701 skb_frag_t *fp;
1702 struct pkt_gl si;
1703 const struct rx_sw_desc *rsd;
1704 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1705
1706 if (len & RSPD_NEWBUF) {
1707 if (likely(q->offset > 0)) {
1708 free_rx_bufs(q->adap, &rxq->fl, 1);
1709 q->offset = 0;
1710 }
1711 len = RSPD_LEN(len);
1712 }
1713 si.tot_len = len;
1714
1715 /* gather packet fragments */
1716 for (frags = 0, fp = si.frags; ; frags++, fp++) {
1717 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1718 bufsz = get_buf_size(rsd);
1719 fp->page = rsd->page;
1720 fp->page_offset = q->offset;
1721 fp->size = min(bufsz, len);
1722 len -= fp->size;
1723 if (!len)
1724 break;
1725 unmap_rx_buf(q->adap, &rxq->fl);
1726 }
1727
1728 /*
1729 * Last buffer remains mapped so explicitly make it
1730 * coherent for CPU access.
1731 */
1732 dma_sync_single_for_cpu(q->adap->pdev_dev,
1733 get_buf_addr(rsd),
1734 fp->size, DMA_FROM_DEVICE);
1735
1736 si.va = page_address(si.frags[0].page) +
1737 si.frags[0].page_offset;
1738 prefetch(si.va);
1739
1740 si.nfrags = frags + 1;
1741 ret = q->handler(q, q->cur_desc, &si);
1742 if (likely(ret == 0))
1743 q->offset += ALIGN(fp->size, FL_ALIGN);
1744 else
1745 restore_rx_bufs(&si, &rxq->fl, frags);
1746 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
1747 ret = q->handler(q, q->cur_desc, NULL);
1748 } else {
1749 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1750 }
1751
1752 if (unlikely(ret)) {
1753 /* couldn't process descriptor, back off for recovery */
1754 q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1755 break;
1756 }
1757
1758 rspq_next(q);
1759 budget_left--;
1760 }
1761
1762 if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
1763 __refill_fl(q->adap, &rxq->fl);
1764 return budget - budget_left;
1765}
1766
1767/**
1768 * napi_rx_handler - the NAPI handler for Rx processing
1769 * @napi: the napi instance
1770 * @budget: how many packets we can process in this round
1771 *
1772 * Handler for new data events when using NAPI. This does not need any
1773 * locking or protection from interrupts as data interrupts are off at
1774 * this point and other adapter interrupts do not interfere (the latter
1775 * in not a concern at all with MSI-X as non-data interrupts then have
1776 * a separate handler).
1777 */
1778static int napi_rx_handler(struct napi_struct *napi, int budget)
1779{
1780 unsigned int params;
1781 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1782 int work_done = process_responses(q, budget);
1783
1784 if (likely(work_done < budget)) {
1785 napi_complete(napi);
1786 params = q->next_intr_params;
1787 q->next_intr_params = q->intr_params;
1788 } else
1789 params = QINTR_TIMER_IDX(7);
1790
1791 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
1792 INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
1793 return work_done;
1794}
1795
1796/*
1797 * The MSI-X interrupt handler for an SGE response queue.
1798 */
1799irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
1800{
1801 struct sge_rspq *q = cookie;
1802
1803 napi_schedule(&q->napi);
1804 return IRQ_HANDLED;
1805}
1806
1807/*
1808 * Process the indirect interrupt entries in the interrupt queue and kick off
1809 * NAPI for each queue that has generated an entry.
1810 */
1811static unsigned int process_intrq(struct adapter *adap)
1812{
1813 unsigned int credits;
1814 const struct rsp_ctrl *rc;
1815 struct sge_rspq *q = &adap->sge.intrq;
1816
1817 spin_lock(&adap->sge.intrq_lock);
1818 for (credits = 0; ; credits++) {
1819 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1820 if (!is_new_response(rc, q))
1821 break;
1822
1823 rmb();
1824 if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
1825 unsigned int qid = ntohl(rc->pldbuflen_qid);
1826
1827 qid -= adap->sge.ingr_start;
1828 napi_schedule(&adap->sge.ingr_map[qid]->napi);
1829 }
1830
1831 rspq_next(q);
1832 }
1833
1834 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
1835 INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
1836 spin_unlock(&adap->sge.intrq_lock);
1837 return credits;
1838}
1839
1840/*
1841 * The MSI interrupt handler, which handles data events from SGE response queues
1842 * as well as error and other async events as they all use the same MSI vector.
1843 */
1844static irqreturn_t t4_intr_msi(int irq, void *cookie)
1845{
1846 struct adapter *adap = cookie;
1847
1848 t4_slow_intr_handler(adap);
1849 process_intrq(adap);
1850 return IRQ_HANDLED;
1851}
1852
1853/*
1854 * Interrupt handler for legacy INTx interrupts.
1855 * Handles data events from SGE response queues as well as error and other
1856 * async events as they all use the same interrupt line.
1857 */
1858static irqreturn_t t4_intr_intx(int irq, void *cookie)
1859{
1860 struct adapter *adap = cookie;
1861
1862 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
1863 if (t4_slow_intr_handler(adap) | process_intrq(adap))
1864 return IRQ_HANDLED;
1865 return IRQ_NONE; /* probably shared interrupt */
1866}
1867
1868/**
1869 * t4_intr_handler - select the top-level interrupt handler
1870 * @adap: the adapter
1871 *
1872 * Selects the top-level interrupt handler based on the type of interrupts
1873 * (MSI-X, MSI, or INTx).
1874 */
1875irq_handler_t t4_intr_handler(struct adapter *adap)
1876{
1877 if (adap->flags & USING_MSIX)
1878 return t4_sge_intr_msix;
1879 if (adap->flags & USING_MSI)
1880 return t4_intr_msi;
1881 return t4_intr_intx;
1882}
1883
1884static void sge_rx_timer_cb(unsigned long data)
1885{
1886 unsigned long m;
1887 unsigned int i, cnt[2];
1888 struct adapter *adap = (struct adapter *)data;
1889 struct sge *s = &adap->sge;
1890
1891 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
1892 for (m = s->starving_fl[i]; m; m &= m - 1) {
1893 struct sge_eth_rxq *rxq;
1894 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
1895 struct sge_fl *fl = s->egr_map[id];
1896
1897 clear_bit(id, s->starving_fl);
1898 smp_mb__after_clear_bit();
1899
1900 if (fl_starving(fl)) {
1901 rxq = container_of(fl, struct sge_eth_rxq, fl);
1902 if (napi_reschedule(&rxq->rspq.napi))
1903 fl->starving++;
1904 else
1905 set_bit(id, s->starving_fl);
1906 }
1907 }
1908
1909 t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
1910 cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
1911 cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
1912
1913 for (i = 0; i < 2; i++)
1914 if (cnt[i] >= s->starve_thres) {
1915 if (s->idma_state[i] || cnt[i] == 0xffffffff)
1916 continue;
1917 s->idma_state[i] = 1;
1918 t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
1919 m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
1920 dev_warn(adap->pdev_dev,
1921 "SGE idma%u starvation detected for "
1922 "queue %lu\n", i, m & 0xffff);
1923 } else if (s->idma_state[i])
1924 s->idma_state[i] = 0;
1925
1926 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
1927}
1928
1929static void sge_tx_timer_cb(unsigned long data)
1930{
1931 unsigned long m;
1932 unsigned int i, budget;
1933 struct adapter *adap = (struct adapter *)data;
1934 struct sge *s = &adap->sge;
1935
1936 for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
1937 for (m = s->txq_maperr[i]; m; m &= m - 1) {
1938 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
1939 struct sge_ofld_txq *txq = s->egr_map[id];
1940
1941 clear_bit(id, s->txq_maperr);
1942 tasklet_schedule(&txq->qresume_tsk);
1943 }
1944
1945 budget = MAX_TIMER_TX_RECLAIM;
1946 i = s->ethtxq_rover;
1947 do {
1948 struct sge_eth_txq *q = &s->ethtxq[i];
1949
1950 if (q->q.in_use &&
1951 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
1952 __netif_tx_trylock(q->txq)) {
1953 int avail = reclaimable(&q->q);
1954
1955 if (avail) {
1956 if (avail > budget)
1957 avail = budget;
1958
1959 free_tx_desc(adap, &q->q, avail, true);
1960 q->q.in_use -= avail;
1961 budget -= avail;
1962 }
1963 __netif_tx_unlock(q->txq);
1964 }
1965
1966 if (++i >= s->ethqsets)
1967 i = 0;
1968 } while (budget && i != s->ethtxq_rover);
1969 s->ethtxq_rover = i;
1970 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
1971}
1972
1973int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1974 struct net_device *dev, int intr_idx,
1975 struct sge_fl *fl, rspq_handler_t hnd)
1976{
1977 int ret, flsz = 0;
1978 struct fw_iq_cmd c;
1979 struct port_info *pi = netdev_priv(dev);
1980
1981 /* Size needs to be multiple of 16, including status entry. */
1982 iq->size = roundup(iq->size, 16);
1983
1984 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
1985 &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
1986 if (!iq->desc)
1987 return -ENOMEM;
1988
1989 memset(&c, 0, sizeof(c));
1990 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
1991 FW_CMD_WRITE | FW_CMD_EXEC |
1992 FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
1993 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
1994 FW_LEN16(c));
1995 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1996 FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
1997 FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
1998 FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
1999 -intr_idx - 1));
2000 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
2001 FW_IQ_CMD_IQGTSMODE |
2002 FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
2003 FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
2004 c.iqsize = htons(iq->size);
2005 c.iqaddr = cpu_to_be64(iq->phys_addr);
2006
2007 if (fl) {
2008 fl->size = roundup(fl->size, 8);
2009 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2010 sizeof(struct rx_sw_desc), &fl->addr,
2011 &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
2012 if (!fl->desc)
2013 goto fl_nomem;
2014
2015 flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
2016 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
2017 FW_IQ_CMD_FL0FETCHRO(1) |
2018 FW_IQ_CMD_FL0DATARO(1) |
2019 FW_IQ_CMD_FL0PADEN);
2020 c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
2021 FW_IQ_CMD_FL0FBMAX(3));
2022 c.fl0size = htons(flsz);
2023 c.fl0addr = cpu_to_be64(fl->addr);
2024 }
2025
2026 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2027 if (ret)
2028 goto err;
2029
2030 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2031 iq->cur_desc = iq->desc;
2032 iq->cidx = 0;
2033 iq->gen = 1;
2034 iq->next_intr_params = iq->intr_params;
2035 iq->cntxt_id = ntohs(c.iqid);
2036 iq->abs_id = ntohs(c.physiqid);
2037 iq->size--; /* subtract status entry */
2038 iq->adap = adap;
2039 iq->netdev = dev;
2040 iq->handler = hnd;
2041
2042 /* set offset to -1 to distinguish ingress queues without FL */
2043 iq->offset = fl ? 0 : -1;
2044
2045 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2046
2047 if (fl) {
2048 fl->cntxt_id = ntohs(c.fl0id);
2049 fl->avail = fl->pend_cred = 0;
2050 fl->pidx = fl->cidx = 0;
2051 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2052 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2053 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2054 }
2055 return 0;
2056
2057fl_nomem:
2058 ret = -ENOMEM;
2059err:
2060 if (iq->desc) {
2061 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2062 iq->desc, iq->phys_addr);
2063 iq->desc = NULL;
2064 }
2065 if (fl && fl->desc) {
2066 kfree(fl->sdesc);
2067 fl->sdesc = NULL;
2068 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2069 fl->desc, fl->addr);
2070 fl->desc = NULL;
2071 }
2072 return ret;
2073}
2074
2075static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2076{
2077 q->in_use = 0;
2078 q->cidx = q->pidx = 0;
2079 q->stops = q->restarts = 0;
2080 q->stat = (void *)&q->desc[q->size];
2081 q->cntxt_id = id;
2082 adap->sge.egr_map[id - adap->sge.egr_start] = q;
2083}
2084
2085int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2086 struct net_device *dev, struct netdev_queue *netdevq,
2087 unsigned int iqid)
2088{
2089 int ret, nentries;
2090 struct fw_eq_eth_cmd c;
2091 struct port_info *pi = netdev_priv(dev);
2092
2093 /* Add status entries */
2094 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2095
2096 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2097 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2098 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
2099 netdev_queue_numa_node_read(netdevq));
2100 if (!txq->q.desc)
2101 return -ENOMEM;
2102
2103 memset(&c, 0, sizeof(c));
2104 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2105 FW_CMD_WRITE | FW_CMD_EXEC |
2106 FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
2107 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
2108 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2109 c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
2110 c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
2111 FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
2112 FW_EQ_ETH_CMD_FETCHRO(1) |
2113 FW_EQ_ETH_CMD_IQID(iqid));
2114 c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
2115 FW_EQ_ETH_CMD_FBMAX(3) |
2116 FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
2117 FW_EQ_ETH_CMD_EQSIZE(nentries));
2118 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2119
2120 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2121 if (ret) {
2122 kfree(txq->q.sdesc);
2123 txq->q.sdesc = NULL;
2124 dma_free_coherent(adap->pdev_dev,
2125 nentries * sizeof(struct tx_desc),
2126 txq->q.desc, txq->q.phys_addr);
2127 txq->q.desc = NULL;
2128 return ret;
2129 }
2130
2131 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2132 txq->txq = netdevq;
2133 txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2134 txq->mapping_err = 0;
2135 return 0;
2136}
2137
2138int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2139 struct net_device *dev, unsigned int iqid,
2140 unsigned int cmplqid)
2141{
2142 int ret, nentries;
2143 struct fw_eq_ctrl_cmd c;
2144 struct port_info *pi = netdev_priv(dev);
2145
2146 /* Add status entries */
2147 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2148
2149 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2150 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2151 NULL, 0, NUMA_NO_NODE);
2152 if (!txq->q.desc)
2153 return -ENOMEM;
2154
2155 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2156 FW_CMD_WRITE | FW_CMD_EXEC |
2157 FW_EQ_CTRL_CMD_PFN(adap->fn) |
2158 FW_EQ_CTRL_CMD_VFN(0));
2159 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
2160 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
2161 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
2162 c.physeqid_pkd = htonl(0);
2163 c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
2164 FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
2165 FW_EQ_CTRL_CMD_FETCHRO |
2166 FW_EQ_CTRL_CMD_IQID(iqid));
2167 c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
2168 FW_EQ_CTRL_CMD_FBMAX(3) |
2169 FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
2170 FW_EQ_CTRL_CMD_EQSIZE(nentries));
2171 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2172
2173 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2174 if (ret) {
2175 dma_free_coherent(adap->pdev_dev,
2176 nentries * sizeof(struct tx_desc),
2177 txq->q.desc, txq->q.phys_addr);
2178 txq->q.desc = NULL;
2179 return ret;
2180 }
2181
2182 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
2183 txq->adap = adap;
2184 skb_queue_head_init(&txq->sendq);
2185 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2186 txq->full = 0;
2187 return 0;
2188}
2189
2190int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2191 struct net_device *dev, unsigned int iqid)
2192{
2193 int ret, nentries;
2194 struct fw_eq_ofld_cmd c;
2195 struct port_info *pi = netdev_priv(dev);
2196
2197 /* Add status entries */
2198 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2199
2200 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2201 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2202 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
2203 NUMA_NO_NODE);
2204 if (!txq->q.desc)
2205 return -ENOMEM;
2206
2207 memset(&c, 0, sizeof(c));
2208 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2209 FW_CMD_WRITE | FW_CMD_EXEC |
2210 FW_EQ_OFLD_CMD_PFN(adap->fn) |
2211 FW_EQ_OFLD_CMD_VFN(0));
2212 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
2213 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2214 c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
2215 FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
2216 FW_EQ_OFLD_CMD_FETCHRO(1) |
2217 FW_EQ_OFLD_CMD_IQID(iqid));
2218 c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
2219 FW_EQ_OFLD_CMD_FBMAX(3) |
2220 FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
2221 FW_EQ_OFLD_CMD_EQSIZE(nentries));
2222 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2223
2224 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2225 if (ret) {
2226 kfree(txq->q.sdesc);
2227 txq->q.sdesc = NULL;
2228 dma_free_coherent(adap->pdev_dev,
2229 nentries * sizeof(struct tx_desc),
2230 txq->q.desc, txq->q.phys_addr);
2231 txq->q.desc = NULL;
2232 return ret;
2233 }
2234
2235 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2236 txq->adap = adap;
2237 skb_queue_head_init(&txq->sendq);
2238 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2239 txq->full = 0;
2240 txq->mapping_err = 0;
2241 return 0;
2242}
2243
2244static void free_txq(struct adapter *adap, struct sge_txq *q)
2245{
2246 dma_free_coherent(adap->pdev_dev,
2247 q->size * sizeof(struct tx_desc) + STAT_LEN,
2248 q->desc, q->phys_addr);
2249 q->cntxt_id = 0;
2250 q->sdesc = NULL;
2251 q->desc = NULL;
2252}
2253
2254static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2255 struct sge_fl *fl)
2256{
2257 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2258
2259 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2260 t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2261 rq->cntxt_id, fl_id, 0xffff);
2262 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2263 rq->desc, rq->phys_addr);
2264 netif_napi_del(&rq->napi);
2265 rq->netdev = NULL;
2266 rq->cntxt_id = rq->abs_id = 0;
2267 rq->desc = NULL;
2268
2269 if (fl) {
2270 free_rx_bufs(adap, fl, fl->avail);
2271 dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
2272 fl->desc, fl->addr);
2273 kfree(fl->sdesc);
2274 fl->sdesc = NULL;
2275 fl->cntxt_id = 0;
2276 fl->desc = NULL;
2277 }
2278}
2279
2280/**
2281 * t4_free_sge_resources - free SGE resources
2282 * @adap: the adapter
2283 *
2284 * Frees resources used by the SGE queue sets.
2285 */
2286void t4_free_sge_resources(struct adapter *adap)
2287{
2288 int i;
2289 struct sge_eth_rxq *eq = adap->sge.ethrxq;
2290 struct sge_eth_txq *etq = adap->sge.ethtxq;
2291 struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
2292
2293 /* clean up Ethernet Tx/Rx queues */
2294 for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2295 if (eq->rspq.desc)
2296 free_rspq_fl(adap, &eq->rspq, &eq->fl);
2297 if (etq->q.desc) {
2298 t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2299 etq->q.cntxt_id);
2300 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2301 kfree(etq->q.sdesc);
2302 free_txq(adap, &etq->q);
2303 }
2304 }
2305
2306 /* clean up RDMA and iSCSI Rx queues */
2307 for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
2308 if (oq->rspq.desc)
2309 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2310 }
2311 for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
2312 if (oq->rspq.desc)
2313 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2314 }
2315
2316 /* clean up offload Tx queues */
2317 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2318 struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2319
2320 if (q->q.desc) {
2321 tasklet_kill(&q->qresume_tsk);
2322 t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2323 q->q.cntxt_id);
2324 free_tx_desc(adap, &q->q, q->q.in_use, false);
2325 kfree(q->q.sdesc);
2326 __skb_queue_purge(&q->sendq);
2327 free_txq(adap, &q->q);
2328 }
2329 }
2330
2331 /* clean up control Tx queues */
2332 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2333 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2334
2335 if (cq->q.desc) {
2336 tasklet_kill(&cq->qresume_tsk);
2337 t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2338 cq->q.cntxt_id);
2339 __skb_queue_purge(&cq->sendq);
2340 free_txq(adap, &cq->q);
2341 }
2342 }
2343
2344 if (adap->sge.fw_evtq.desc)
2345 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2346
2347 if (adap->sge.intrq.desc)
2348 free_rspq_fl(adap, &adap->sge.intrq, NULL);
2349
2350 /* clear the reverse egress queue map */
2351 memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
2352}
2353
2354void t4_sge_start(struct adapter *adap)
2355{
2356 adap->sge.ethtxq_rover = 0;
2357 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2358 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2359}
2360
2361/**
2362 * t4_sge_stop - disable SGE operation
2363 * @adap: the adapter
2364 *
2365 * Stop tasklets and timers associated with the DMA engine. Note that
2366 * this is effective only if measures have been taken to disable any HW
2367 * events that may restart them.
2368 */
2369void t4_sge_stop(struct adapter *adap)
2370{
2371 int i;
2372 struct sge *s = &adap->sge;
2373
2374 if (in_interrupt()) /* actions below require waiting */
2375 return;
2376
2377 if (s->rx_timer.function)
2378 del_timer_sync(&s->rx_timer);
2379 if (s->tx_timer.function)
2380 del_timer_sync(&s->tx_timer);
2381
2382 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2383 struct sge_ofld_txq *q = &s->ofldtxq[i];
2384
2385 if (q->q.desc)
2386 tasklet_kill(&q->qresume_tsk);
2387 }
2388 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2389 struct sge_ctrl_txq *cq = &s->ctrlq[i];
2390
2391 if (cq->q.desc)
2392 tasklet_kill(&cq->qresume_tsk);
2393 }
2394}
2395
2396/**
2397 * t4_sge_init - initialize SGE
2398 * @adap: the adapter
2399 *
2400 * Performs SGE initialization needed every time after a chip reset.
2401 * We do not initialize any of the queues here, instead the driver
2402 * top-level must request them individually.
2403 */
2404void t4_sge_init(struct adapter *adap)
2405{
2406 unsigned int i, v;
2407 struct sge *s = &adap->sge;
2408 unsigned int fl_align_log = ilog2(FL_ALIGN);
2409
2410 t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
2411 INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
2412 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
2413 RXPKTCPLMODE |
2414 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
2415
2416 for (i = v = 0; i < 32; i += 4)
2417 v |= (PAGE_SHIFT - 10) << i;
2418 t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
2419 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
2420#if FL_PG_ORDER > 0
2421 t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
2422#endif
2423 t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2424 THRESHOLD_0(s->counter_val[0]) |
2425 THRESHOLD_1(s->counter_val[1]) |
2426 THRESHOLD_2(s->counter_val[2]) |
2427 THRESHOLD_3(s->counter_val[3]));
2428 t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
2429 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2430 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2431 t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2432 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
2433 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
2434 t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2435 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
2436 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
2437 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2438 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2439 s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
2440 s->idma_state[0] = s->idma_state[1] = 0;
2441 spin_lock_init(&s->intrq_lock);
2442}
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
new file mode 100644
index 00000000000..d1ec111aebd
--- /dev/null
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -0,0 +1,2856 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
58{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
123/*
124 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
125 */
126static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
127 u32 mbox_addr)
128{
129 for ( ; nflit; nflit--, mbox_addr += 8)
130 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
131}
132
133/*
134 * Handle a FW assertion reported in a mailbox.
135 */
136static void fw_asrt(struct adapter *adap, u32 mbox_addr)
137{
138 struct fw_debug_cmd asrt;
139
140 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
141 dev_alert(adap->pdev_dev,
142 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
143 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
144 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
145}
146
147static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
148{
149 dev_err(adap->pdev_dev,
150 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
151 (unsigned long long)t4_read_reg64(adap, data_reg),
152 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
153 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
154 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
155 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
156 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
157 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
158 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
159}
160
161/**
162 * t4_wr_mbox_meat - send a command to FW through the given mailbox
163 * @adap: the adapter
164 * @mbox: index of the mailbox to use
165 * @cmd: the command to write
166 * @size: command length in bytes
167 * @rpl: where to optionally store the reply
168 * @sleep_ok: if true we may sleep while awaiting command completion
169 *
170 * Sends the given command to FW through the selected mailbox and waits
171 * for the FW to execute the command. If @rpl is not %NULL it is used to
172 * store the FW's reply to the command. The command and its optional
173 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
174 * to respond. @sleep_ok determines whether we may sleep while awaiting
175 * the response. If sleeping is allowed we use progressive backoff
176 * otherwise we spin.
177 *
178 * The return value is 0 on success or a negative errno on failure. A
179 * failure can happen either because we are not able to execute the
180 * command or FW executes it but signals an error. In the latter case
181 * the return value is the error code indicated by FW (negated).
182 */
183int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
184 void *rpl, bool sleep_ok)
185{
186 static const int delay[] = {
187 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
188 };
189
190 u32 v;
191 u64 res;
192 int i, ms, delay_idx;
193 const __be64 *p = cmd;
194 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
195 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
196
197 if ((size & 15) || size > MBOX_LEN)
198 return -EINVAL;
199
200 /*
201 * If the device is off-line, as in EEH, commands will time out.
202 * Fail them early so we don't waste time waiting.
203 */
204 if (adap->pdev->error_state != pci_channel_io_normal)
205 return -EIO;
206
207 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
208 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
209 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
210
211 if (v != MBOX_OWNER_DRV)
212 return v ? -EBUSY : -ETIMEDOUT;
213
214 for (i = 0; i < size; i += 8)
215 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
216
217 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
218 t4_read_reg(adap, ctl_reg); /* flush write */
219
220 delay_idx = 0;
221 ms = delay[0];
222
223 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
224 if (sleep_ok) {
225 ms = delay[delay_idx]; /* last element may repeat */
226 if (delay_idx < ARRAY_SIZE(delay) - 1)
227 delay_idx++;
228 msleep(ms);
229 } else
230 mdelay(ms);
231
232 v = t4_read_reg(adap, ctl_reg);
233 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
234 if (!(v & MBMSGVALID)) {
235 t4_write_reg(adap, ctl_reg, 0);
236 continue;
237 }
238
239 res = t4_read_reg64(adap, data_reg);
240 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
241 fw_asrt(adap, data_reg);
242 res = FW_CMD_RETVAL(EIO);
243 } else if (rpl)
244 get_mbox_rpl(adap, rpl, size / 8, data_reg);
245
246 if (FW_CMD_RETVAL_GET((int)res))
247 dump_mbox(adap, mbox, data_reg);
248 t4_write_reg(adap, ctl_reg, 0);
249 return -FW_CMD_RETVAL_GET((int)res);
250 }
251 }
252
253 dump_mbox(adap, mbox, data_reg);
254 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
255 *(const u8 *)cmd, mbox);
256 return -ETIMEDOUT;
257}
258
259/**
260 * t4_mc_read - read from MC through backdoor accesses
261 * @adap: the adapter
262 * @addr: address of first byte requested
263 * @data: 64 bytes of data containing the requested address
264 * @ecc: where to store the corresponding 64-bit ECC word
265 *
266 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
267 * that covers the requested address @addr. If @parity is not %NULL it
268 * is assigned the 64-bit ECC word for the read data.
269 */
270int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
271{
272 int i;
273
274 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
275 return -EBUSY;
276 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
277 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
278 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
279 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
280 BIST_CMD_GAP(1));
281 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
282 if (i)
283 return i;
284
285#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
286
287 for (i = 15; i >= 0; i--)
288 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
289 if (ecc)
290 *ecc = t4_read_reg64(adap, MC_DATA(16));
291#undef MC_DATA
292 return 0;
293}
294
295/**
296 * t4_edc_read - read from EDC through backdoor accesses
297 * @adap: the adapter
298 * @idx: which EDC to access
299 * @addr: address of first byte requested
300 * @data: 64 bytes of data containing the requested address
301 * @ecc: where to store the corresponding 64-bit ECC word
302 *
303 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
304 * that covers the requested address @addr. If @parity is not %NULL it
305 * is assigned the 64-bit ECC word for the read data.
306 */
307int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
308{
309 int i;
310
311 idx *= EDC_STRIDE;
312 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
313 return -EBUSY;
314 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
315 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
316 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
317 t4_write_reg(adap, EDC_BIST_CMD + idx,
318 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
319 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
320 if (i)
321 return i;
322
323#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
324
325 for (i = 15; i >= 0; i--)
326 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
327 if (ecc)
328 *ecc = t4_read_reg64(adap, EDC_DATA(16));
329#undef EDC_DATA
330 return 0;
331}
332
333#define EEPROM_STAT_ADDR 0x7bfc
334#define VPD_BASE 0
335#define VPD_LEN 512
336
337/**
338 * t4_seeprom_wp - enable/disable EEPROM write protection
339 * @adapter: the adapter
340 * @enable: whether to enable or disable write protection
341 *
342 * Enables or disables write protection on the serial EEPROM.
343 */
344int t4_seeprom_wp(struct adapter *adapter, bool enable)
345{
346 unsigned int v = enable ? 0xc : 0;
347 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
348 return ret < 0 ? ret : 0;
349}
350
351/**
352 * get_vpd_params - read VPD parameters from VPD EEPROM
353 * @adapter: adapter to read
354 * @p: where to store the parameters
355 *
356 * Reads card parameters stored in VPD EEPROM.
357 */
358static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
359{
360 int i, ret;
361 int ec, sn;
362 u8 vpd[VPD_LEN], csum;
363 unsigned int vpdr_len, kw_offset, id_len;
364
365 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
366 if (ret < 0)
367 return ret;
368
369 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
370 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
371 return -EINVAL;
372 }
373
374 id_len = pci_vpd_lrdt_size(vpd);
375 if (id_len > ID_LEN)
376 id_len = ID_LEN;
377
378 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
379 if (i < 0) {
380 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
381 return -EINVAL;
382 }
383
384 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
385 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
386 if (vpdr_len + kw_offset > VPD_LEN) {
387 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
388 return -EINVAL;
389 }
390
391#define FIND_VPD_KW(var, name) do { \
392 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
393 if (var < 0) { \
394 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
395 return -EINVAL; \
396 } \
397 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
398} while (0)
399
400 FIND_VPD_KW(i, "RV");
401 for (csum = 0; i >= 0; i--)
402 csum += vpd[i];
403
404 if (csum) {
405 dev_err(adapter->pdev_dev,
406 "corrupted VPD EEPROM, actual csum %u\n", csum);
407 return -EINVAL;
408 }
409
410 FIND_VPD_KW(ec, "EC");
411 FIND_VPD_KW(sn, "SN");
412#undef FIND_VPD_KW
413
414 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
415 strim(p->id);
416 memcpy(p->ec, vpd + ec, EC_LEN);
417 strim(p->ec);
418 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
419 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
420 strim(p->sn);
421 return 0;
422}
423
424/* serial flash and firmware constants */
425enum {
426 SF_ATTEMPTS = 10, /* max retries for SF operations */
427
428 /* flash command opcodes */
429 SF_PROG_PAGE = 2, /* program page */
430 SF_WR_DISABLE = 4, /* disable writes */
431 SF_RD_STATUS = 5, /* read status register */
432 SF_WR_ENABLE = 6, /* enable writes */
433 SF_RD_DATA_FAST = 0xb, /* read flash */
434 SF_RD_ID = 0x9f, /* read ID */
435 SF_ERASE_SECTOR = 0xd8, /* erase sector */
436
437 FW_MAX_SIZE = 512 * 1024,
438};
439
440/**
441 * sf1_read - read data from the serial flash
442 * @adapter: the adapter
443 * @byte_cnt: number of bytes to read
444 * @cont: whether another operation will be chained
445 * @lock: whether to lock SF for PL access only
446 * @valp: where to store the read data
447 *
448 * Reads up to 4 bytes of data from the serial flash. The location of
449 * the read needs to be specified prior to calling this by issuing the
450 * appropriate commands to the serial flash.
451 */
452static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
453 int lock, u32 *valp)
454{
455 int ret;
456
457 if (!byte_cnt || byte_cnt > 4)
458 return -EINVAL;
459 if (t4_read_reg(adapter, SF_OP) & BUSY)
460 return -EBUSY;
461 cont = cont ? SF_CONT : 0;
462 lock = lock ? SF_LOCK : 0;
463 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
464 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
465 if (!ret)
466 *valp = t4_read_reg(adapter, SF_DATA);
467 return ret;
468}
469
470/**
471 * sf1_write - write data to the serial flash
472 * @adapter: the adapter
473 * @byte_cnt: number of bytes to write
474 * @cont: whether another operation will be chained
475 * @lock: whether to lock SF for PL access only
476 * @val: value to write
477 *
478 * Writes up to 4 bytes of data to the serial flash. The location of
479 * the write needs to be specified prior to calling this by issuing the
480 * appropriate commands to the serial flash.
481 */
482static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
483 int lock, u32 val)
484{
485 if (!byte_cnt || byte_cnt > 4)
486 return -EINVAL;
487 if (t4_read_reg(adapter, SF_OP) & BUSY)
488 return -EBUSY;
489 cont = cont ? SF_CONT : 0;
490 lock = lock ? SF_LOCK : 0;
491 t4_write_reg(adapter, SF_DATA, val);
492 t4_write_reg(adapter, SF_OP, lock |
493 cont | BYTECNT(byte_cnt - 1) | OP_WR);
494 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
495}
496
497/**
498 * flash_wait_op - wait for a flash operation to complete
499 * @adapter: the adapter
500 * @attempts: max number of polls of the status register
501 * @delay: delay between polls in ms
502 *
503 * Wait for a flash operation to complete by polling the status register.
504 */
505static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
506{
507 int ret;
508 u32 status;
509
510 while (1) {
511 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
512 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
513 return ret;
514 if (!(status & 1))
515 return 0;
516 if (--attempts == 0)
517 return -EAGAIN;
518 if (delay)
519 msleep(delay);
520 }
521}
522
523/**
524 * t4_read_flash - read words from serial flash
525 * @adapter: the adapter
526 * @addr: the start address for the read
527 * @nwords: how many 32-bit words to read
528 * @data: where to store the read data
529 * @byte_oriented: whether to store data as bytes or as words
530 *
531 * Read the specified number of 32-bit words from the serial flash.
532 * If @byte_oriented is set the read data is stored as a byte array
533 * (i.e., big-endian), otherwise as 32-bit words in the platform's
534 * natural endianess.
535 */
536static int t4_read_flash(struct adapter *adapter, unsigned int addr,
537 unsigned int nwords, u32 *data, int byte_oriented)
538{
539 int ret;
540
541 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
542 return -EINVAL;
543
544 addr = swab32(addr) | SF_RD_DATA_FAST;
545
546 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
547 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
548 return ret;
549
550 for ( ; nwords; nwords--, data++) {
551 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
552 if (nwords == 1)
553 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
554 if (ret)
555 return ret;
556 if (byte_oriented)
557 *data = htonl(*data);
558 }
559 return 0;
560}
561
562/**
563 * t4_write_flash - write up to a page of data to the serial flash
564 * @adapter: the adapter
565 * @addr: the start address to write
566 * @n: length of data to write in bytes
567 * @data: the data to write
568 *
569 * Writes up to a page of data (256 bytes) to the serial flash starting
570 * at the given address. All the data must be written to the same page.
571 */
572static int t4_write_flash(struct adapter *adapter, unsigned int addr,
573 unsigned int n, const u8 *data)
574{
575 int ret;
576 u32 buf[64];
577 unsigned int i, c, left, val, offset = addr & 0xff;
578
579 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
580 return -EINVAL;
581
582 val = swab32(addr) | SF_PROG_PAGE;
583
584 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
585 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
586 goto unlock;
587
588 for (left = n; left; left -= c) {
589 c = min(left, 4U);
590 for (val = 0, i = 0; i < c; ++i)
591 val = (val << 8) + *data++;
592
593 ret = sf1_write(adapter, c, c != left, 1, val);
594 if (ret)
595 goto unlock;
596 }
597 ret = flash_wait_op(adapter, 8, 1);
598 if (ret)
599 goto unlock;
600
601 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
602
603 /* Read the page to verify the write succeeded */
604 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
605 if (ret)
606 return ret;
607
608 if (memcmp(data - n, (u8 *)buf + offset, n)) {
609 dev_err(adapter->pdev_dev,
610 "failed to correctly write the flash page at %#x\n",
611 addr);
612 return -EIO;
613 }
614 return 0;
615
616unlock:
617 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
618 return ret;
619}
620
621/**
622 * get_fw_version - read the firmware version
623 * @adapter: the adapter
624 * @vers: where to place the version
625 *
626 * Reads the FW version from flash.
627 */
628static int get_fw_version(struct adapter *adapter, u32 *vers)
629{
630 return t4_read_flash(adapter, adapter->params.sf_fw_start +
631 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
632}
633
634/**
635 * get_tp_version - read the TP microcode version
636 * @adapter: the adapter
637 * @vers: where to place the version
638 *
639 * Reads the TP microcode version from flash.
640 */
641static int get_tp_version(struct adapter *adapter, u32 *vers)
642{
643 return t4_read_flash(adapter, adapter->params.sf_fw_start +
644 offsetof(struct fw_hdr, tp_microcode_ver),
645 1, vers, 0);
646}
647
648/**
649 * t4_check_fw_version - check if the FW is compatible with this driver
650 * @adapter: the adapter
651 *
652 * Checks if an adapter's FW is compatible with the driver. Returns 0
653 * if there's exact match, a negative error if the version could not be
654 * read or there's a major version mismatch, and a positive value if the
655 * expected major version is found but there's a minor version mismatch.
656 */
657int t4_check_fw_version(struct adapter *adapter)
658{
659 u32 api_vers[2];
660 int ret, major, minor, micro;
661
662 ret = get_fw_version(adapter, &adapter->params.fw_vers);
663 if (!ret)
664 ret = get_tp_version(adapter, &adapter->params.tp_vers);
665 if (!ret)
666 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
667 offsetof(struct fw_hdr, intfver_nic),
668 2, api_vers, 1);
669 if (ret)
670 return ret;
671
672 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
673 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
674 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
675 memcpy(adapter->params.api_vers, api_vers,
676 sizeof(adapter->params.api_vers));
677
678 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
679 dev_err(adapter->pdev_dev,
680 "card FW has major version %u, driver wants %u\n",
681 major, FW_VERSION_MAJOR);
682 return -EINVAL;
683 }
684
685 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
686 return 0; /* perfect match */
687
688 /* Minor/micro version mismatch. Report it but often it's OK. */
689 return 1;
690}
691
692/**
693 * t4_flash_erase_sectors - erase a range of flash sectors
694 * @adapter: the adapter
695 * @start: the first sector to erase
696 * @end: the last sector to erase
697 *
698 * Erases the sectors in the given inclusive range.
699 */
700static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
701{
702 int ret = 0;
703
704 while (start <= end) {
705 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
706 (ret = sf1_write(adapter, 4, 0, 1,
707 SF_ERASE_SECTOR | (start << 8))) != 0 ||
708 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
709 dev_err(adapter->pdev_dev,
710 "erase of flash sector %d failed, error %d\n",
711 start, ret);
712 break;
713 }
714 start++;
715 }
716 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
717 return ret;
718}
719
720/**
721 * t4_load_fw - download firmware
722 * @adap: the adapter
723 * @fw_data: the firmware image to write
724 * @size: image size
725 *
726 * Write the supplied firmware image to the card's serial flash.
727 */
728int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
729{
730 u32 csum;
731 int ret, addr;
732 unsigned int i;
733 u8 first_page[SF_PAGE_SIZE];
734 const u32 *p = (const u32 *)fw_data;
735 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
736 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
737 unsigned int fw_img_start = adap->params.sf_fw_start;
738 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
739
740 if (!size) {
741 dev_err(adap->pdev_dev, "FW image has no data\n");
742 return -EINVAL;
743 }
744 if (size & 511) {
745 dev_err(adap->pdev_dev,
746 "FW image size not multiple of 512 bytes\n");
747 return -EINVAL;
748 }
749 if (ntohs(hdr->len512) * 512 != size) {
750 dev_err(adap->pdev_dev,
751 "FW image size differs from size in FW header\n");
752 return -EINVAL;
753 }
754 if (size > FW_MAX_SIZE) {
755 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
756 FW_MAX_SIZE);
757 return -EFBIG;
758 }
759
760 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
761 csum += ntohl(p[i]);
762
763 if (csum != 0xffffffff) {
764 dev_err(adap->pdev_dev,
765 "corrupted firmware image, checksum %#x\n", csum);
766 return -EINVAL;
767 }
768
769 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
770 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
771 if (ret)
772 goto out;
773
774 /*
775 * We write the correct version at the end so the driver can see a bad
776 * version if the FW write fails. Start by writing a copy of the
777 * first page with a bad version.
778 */
779 memcpy(first_page, fw_data, SF_PAGE_SIZE);
780 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
781 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
782 if (ret)
783 goto out;
784
785 addr = fw_img_start;
786 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
787 addr += SF_PAGE_SIZE;
788 fw_data += SF_PAGE_SIZE;
789 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
790 if (ret)
791 goto out;
792 }
793
794 ret = t4_write_flash(adap,
795 fw_img_start + offsetof(struct fw_hdr, fw_ver),
796 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
797out:
798 if (ret)
799 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
800 ret);
801 return ret;
802}
803
804#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
805 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
806
807/**
808 * t4_link_start - apply link configuration to MAC/PHY
809 * @phy: the PHY to setup
810 * @mac: the MAC to setup
811 * @lc: the requested link configuration
812 *
813 * Set up a port's MAC and PHY according to a desired link configuration.
814 * - If the PHY can auto-negotiate first decide what to advertise, then
815 * enable/disable auto-negotiation as desired, and reset.
816 * - If the PHY does not auto-negotiate just reset it.
817 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
818 * otherwise do it later based on the outcome of auto-negotiation.
819 */
820int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
821 struct link_config *lc)
822{
823 struct fw_port_cmd c;
824 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
825
826 lc->link_ok = 0;
827 if (lc->requested_fc & PAUSE_RX)
828 fc |= FW_PORT_CAP_FC_RX;
829 if (lc->requested_fc & PAUSE_TX)
830 fc |= FW_PORT_CAP_FC_TX;
831
832 memset(&c, 0, sizeof(c));
833 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
834 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
835 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
836 FW_LEN16(c));
837
838 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
839 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
840 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
841 } else if (lc->autoneg == AUTONEG_DISABLE) {
842 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
843 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
844 } else
845 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
846
847 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
848}
849
850/**
851 * t4_restart_aneg - restart autonegotiation
852 * @adap: the adapter
853 * @mbox: mbox to use for the FW command
854 * @port: the port id
855 *
856 * Restarts autonegotiation for the selected port.
857 */
858int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
859{
860 struct fw_port_cmd c;
861
862 memset(&c, 0, sizeof(c));
863 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
864 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
865 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
866 FW_LEN16(c));
867 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
868 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
869}
870
871struct intr_info {
872 unsigned int mask; /* bits to check in interrupt status */
873 const char *msg; /* message to print or NULL */
874 short stat_idx; /* stat counter to increment or -1 */
875 unsigned short fatal; /* whether the condition reported is fatal */
876};
877
878/**
879 * t4_handle_intr_status - table driven interrupt handler
880 * @adapter: the adapter that generated the interrupt
881 * @reg: the interrupt status register to process
882 * @acts: table of interrupt actions
883 *
884 * A table driven interrupt handler that applies a set of masks to an
885 * interrupt status word and performs the corresponding actions if the
886 * interrupts described by the mask have occurred. The actions include
887 * optionally emitting a warning or alert message. The table is terminated
888 * by an entry specifying mask 0. Returns the number of fatal interrupt
889 * conditions.
890 */
891static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
892 const struct intr_info *acts)
893{
894 int fatal = 0;
895 unsigned int mask = 0;
896 unsigned int status = t4_read_reg(adapter, reg);
897
898 for ( ; acts->mask; ++acts) {
899 if (!(status & acts->mask))
900 continue;
901 if (acts->fatal) {
902 fatal++;
903 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
904 status & acts->mask);
905 } else if (acts->msg && printk_ratelimit())
906 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
907 status & acts->mask);
908 mask |= acts->mask;
909 }
910 status &= mask;
911 if (status) /* clear processed interrupts */
912 t4_write_reg(adapter, reg, status);
913 return fatal;
914}
915
916/*
917 * Interrupt handler for the PCIE module.
918 */
919static void pcie_intr_handler(struct adapter *adapter)
920{
921 static const struct intr_info sysbus_intr_info[] = {
922 { RNPP, "RXNP array parity error", -1, 1 },
923 { RPCP, "RXPC array parity error", -1, 1 },
924 { RCIP, "RXCIF array parity error", -1, 1 },
925 { RCCP, "Rx completions control array parity error", -1, 1 },
926 { RFTP, "RXFT array parity error", -1, 1 },
927 { 0 }
928 };
929 static const struct intr_info pcie_port_intr_info[] = {
930 { TPCP, "TXPC array parity error", -1, 1 },
931 { TNPP, "TXNP array parity error", -1, 1 },
932 { TFTP, "TXFT array parity error", -1, 1 },
933 { TCAP, "TXCA array parity error", -1, 1 },
934 { TCIP, "TXCIF array parity error", -1, 1 },
935 { RCAP, "RXCA array parity error", -1, 1 },
936 { OTDD, "outbound request TLP discarded", -1, 1 },
937 { RDPE, "Rx data parity error", -1, 1 },
938 { TDUE, "Tx uncorrectable data error", -1, 1 },
939 { 0 }
940 };
941 static const struct intr_info pcie_intr_info[] = {
942 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
943 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
944 { MSIDATAPERR, "MSI data parity error", -1, 1 },
945 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
946 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
947 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
948 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
949 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
950 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
951 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
952 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
953 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
954 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
955 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
956 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
957 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
958 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
959 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
960 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
961 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
962 { FIDPERR, "PCI FID parity error", -1, 1 },
963 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
964 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
965 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
966 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
967 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
968 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
969 { PCIESINT, "PCI core secondary fault", -1, 1 },
970 { PCIEPINT, "PCI core primary fault", -1, 1 },
971 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
972 { 0 }
973 };
974
975 int fat;
976
977 fat = t4_handle_intr_status(adapter,
978 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
979 sysbus_intr_info) +
980 t4_handle_intr_status(adapter,
981 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
982 pcie_port_intr_info) +
983 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
984 if (fat)
985 t4_fatal_err(adapter);
986}
987
988/*
989 * TP interrupt handler.
990 */
991static void tp_intr_handler(struct adapter *adapter)
992{
993 static const struct intr_info tp_intr_info[] = {
994 { 0x3fffffff, "TP parity error", -1, 1 },
995 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
996 { 0 }
997 };
998
999 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1000 t4_fatal_err(adapter);
1001}
1002
1003/*
1004 * SGE interrupt handler.
1005 */
1006static void sge_intr_handler(struct adapter *adapter)
1007{
1008 u64 v;
1009
1010 static const struct intr_info sge_intr_info[] = {
1011 { ERR_CPL_EXCEED_IQE_SIZE,
1012 "SGE received CPL exceeding IQE size", -1, 1 },
1013 { ERR_INVALID_CIDX_INC,
1014 "SGE GTS CIDX increment too large", -1, 0 },
1015 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1016 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1017 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1018 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1019 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1020 0 },
1021 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1022 0 },
1023 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1024 0 },
1025 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1026 0 },
1027 { ERR_ING_CTXT_PRIO,
1028 "SGE too many priority ingress contexts", -1, 0 },
1029 { ERR_EGR_CTXT_PRIO,
1030 "SGE too many priority egress contexts", -1, 0 },
1031 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1032 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1033 { 0 }
1034 };
1035
1036 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1037 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1038 if (v) {
1039 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1040 (unsigned long long)v);
1041 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1042 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1043 }
1044
1045 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1046 v != 0)
1047 t4_fatal_err(adapter);
1048}
1049
1050/*
1051 * CIM interrupt handler.
1052 */
1053static void cim_intr_handler(struct adapter *adapter)
1054{
1055 static const struct intr_info cim_intr_info[] = {
1056 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1057 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1058 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1059 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1060 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1061 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1062 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1063 { 0 }
1064 };
1065 static const struct intr_info cim_upintr_info[] = {
1066 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1067 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1068 { ILLWRINT, "CIM illegal write", -1, 1 },
1069 { ILLRDINT, "CIM illegal read", -1, 1 },
1070 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1071 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1072 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1073 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1074 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1075 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1076 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1077 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1078 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1079 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1080 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1081 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1082 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1083 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1084 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1085 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1086 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1087 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1088 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1089 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1090 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1091 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1092 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1093 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1094 { 0 }
1095 };
1096
1097 int fat;
1098
1099 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1100 cim_intr_info) +
1101 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1102 cim_upintr_info);
1103 if (fat)
1104 t4_fatal_err(adapter);
1105}
1106
1107/*
1108 * ULP RX interrupt handler.
1109 */
1110static void ulprx_intr_handler(struct adapter *adapter)
1111{
1112 static const struct intr_info ulprx_intr_info[] = {
1113 { 0x1800000, "ULPRX context error", -1, 1 },
1114 { 0x7fffff, "ULPRX parity error", -1, 1 },
1115 { 0 }
1116 };
1117
1118 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1119 t4_fatal_err(adapter);
1120}
1121
1122/*
1123 * ULP TX interrupt handler.
1124 */
1125static void ulptx_intr_handler(struct adapter *adapter)
1126{
1127 static const struct intr_info ulptx_intr_info[] = {
1128 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1129 0 },
1130 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1131 0 },
1132 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1133 0 },
1134 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1135 0 },
1136 { 0xfffffff, "ULPTX parity error", -1, 1 },
1137 { 0 }
1138 };
1139
1140 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1141 t4_fatal_err(adapter);
1142}
1143
1144/*
1145 * PM TX interrupt handler.
1146 */
1147static void pmtx_intr_handler(struct adapter *adapter)
1148{
1149 static const struct intr_info pmtx_intr_info[] = {
1150 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1151 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1152 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1153 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1154 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1155 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1156 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1157 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1158 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1159 { 0 }
1160 };
1161
1162 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1163 t4_fatal_err(adapter);
1164}
1165
1166/*
1167 * PM RX interrupt handler.
1168 */
1169static void pmrx_intr_handler(struct adapter *adapter)
1170{
1171 static const struct intr_info pmrx_intr_info[] = {
1172 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1173 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1174 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1175 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1176 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1177 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1178 { 0 }
1179 };
1180
1181 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1182 t4_fatal_err(adapter);
1183}
1184
1185/*
1186 * CPL switch interrupt handler.
1187 */
1188static void cplsw_intr_handler(struct adapter *adapter)
1189{
1190 static const struct intr_info cplsw_intr_info[] = {
1191 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1192 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1193 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1194 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1195 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1196 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1197 { 0 }
1198 };
1199
1200 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1201 t4_fatal_err(adapter);
1202}
1203
1204/*
1205 * LE interrupt handler.
1206 */
1207static void le_intr_handler(struct adapter *adap)
1208{
1209 static const struct intr_info le_intr_info[] = {
1210 { LIPMISS, "LE LIP miss", -1, 0 },
1211 { LIP0, "LE 0 LIP error", -1, 0 },
1212 { PARITYERR, "LE parity error", -1, 1 },
1213 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1214 { REQQPARERR, "LE request queue parity error", -1, 1 },
1215 { 0 }
1216 };
1217
1218 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1219 t4_fatal_err(adap);
1220}
1221
1222/*
1223 * MPS interrupt handler.
1224 */
1225static void mps_intr_handler(struct adapter *adapter)
1226{
1227 static const struct intr_info mps_rx_intr_info[] = {
1228 { 0xffffff, "MPS Rx parity error", -1, 1 },
1229 { 0 }
1230 };
1231 static const struct intr_info mps_tx_intr_info[] = {
1232 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1233 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1234 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1235 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1236 { BUBBLE, "MPS Tx underflow", -1, 1 },
1237 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1238 { FRMERR, "MPS Tx framing error", -1, 1 },
1239 { 0 }
1240 };
1241 static const struct intr_info mps_trc_intr_info[] = {
1242 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1243 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1244 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1245 { 0 }
1246 };
1247 static const struct intr_info mps_stat_sram_intr_info[] = {
1248 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1249 { 0 }
1250 };
1251 static const struct intr_info mps_stat_tx_intr_info[] = {
1252 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1253 { 0 }
1254 };
1255 static const struct intr_info mps_stat_rx_intr_info[] = {
1256 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1257 { 0 }
1258 };
1259 static const struct intr_info mps_cls_intr_info[] = {
1260 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1261 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1262 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1263 { 0 }
1264 };
1265
1266 int fat;
1267
1268 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1269 mps_rx_intr_info) +
1270 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1271 mps_tx_intr_info) +
1272 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1273 mps_trc_intr_info) +
1274 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1275 mps_stat_sram_intr_info) +
1276 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1277 mps_stat_tx_intr_info) +
1278 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1279 mps_stat_rx_intr_info) +
1280 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1281 mps_cls_intr_info);
1282
1283 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1284 RXINT | TXINT | STATINT);
1285 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1286 if (fat)
1287 t4_fatal_err(adapter);
1288}
1289
1290#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1291
1292/*
1293 * EDC/MC interrupt handler.
1294 */
1295static void mem_intr_handler(struct adapter *adapter, int idx)
1296{
1297 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1298
1299 unsigned int addr, cnt_addr, v;
1300
1301 if (idx <= MEM_EDC1) {
1302 addr = EDC_REG(EDC_INT_CAUSE, idx);
1303 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1304 } else {
1305 addr = MC_INT_CAUSE;
1306 cnt_addr = MC_ECC_STATUS;
1307 }
1308
1309 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1310 if (v & PERR_INT_CAUSE)
1311 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1312 name[idx]);
1313 if (v & ECC_CE_INT_CAUSE) {
1314 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1315
1316 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1317 if (printk_ratelimit())
1318 dev_warn(adapter->pdev_dev,
1319 "%u %s correctable ECC data error%s\n",
1320 cnt, name[idx], cnt > 1 ? "s" : "");
1321 }
1322 if (v & ECC_UE_INT_CAUSE)
1323 dev_alert(adapter->pdev_dev,
1324 "%s uncorrectable ECC data error\n", name[idx]);
1325
1326 t4_write_reg(adapter, addr, v);
1327 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1328 t4_fatal_err(adapter);
1329}
1330
1331/*
1332 * MA interrupt handler.
1333 */
1334static void ma_intr_handler(struct adapter *adap)
1335{
1336 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1337
1338 if (status & MEM_PERR_INT_CAUSE)
1339 dev_alert(adap->pdev_dev,
1340 "MA parity error, parity status %#x\n",
1341 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1342 if (status & MEM_WRAP_INT_CAUSE) {
1343 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1344 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1345 "client %u to address %#x\n",
1346 MEM_WRAP_CLIENT_NUM_GET(v),
1347 MEM_WRAP_ADDRESS_GET(v) << 4);
1348 }
1349 t4_write_reg(adap, MA_INT_CAUSE, status);
1350 t4_fatal_err(adap);
1351}
1352
1353/*
1354 * SMB interrupt handler.
1355 */
1356static void smb_intr_handler(struct adapter *adap)
1357{
1358 static const struct intr_info smb_intr_info[] = {
1359 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1360 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1361 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1362 { 0 }
1363 };
1364
1365 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1366 t4_fatal_err(adap);
1367}
1368
1369/*
1370 * NC-SI interrupt handler.
1371 */
1372static void ncsi_intr_handler(struct adapter *adap)
1373{
1374 static const struct intr_info ncsi_intr_info[] = {
1375 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1376 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1377 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1378 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1379 { 0 }
1380 };
1381
1382 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1383 t4_fatal_err(adap);
1384}
1385
1386/*
1387 * XGMAC interrupt handler.
1388 */
1389static void xgmac_intr_handler(struct adapter *adap, int port)
1390{
1391 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1392
1393 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1394 if (!v)
1395 return;
1396
1397 if (v & TXFIFO_PRTY_ERR)
1398 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1399 port);
1400 if (v & RXFIFO_PRTY_ERR)
1401 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1402 port);
1403 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1404 t4_fatal_err(adap);
1405}
1406
1407/*
1408 * PL interrupt handler.
1409 */
1410static void pl_intr_handler(struct adapter *adap)
1411{
1412 static const struct intr_info pl_intr_info[] = {
1413 { FATALPERR, "T4 fatal parity error", -1, 1 },
1414 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1415 { 0 }
1416 };
1417
1418 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1419 t4_fatal_err(adap);
1420}
1421
1422#define PF_INTR_MASK (PFSW)
1423#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1424 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1425 CPL_SWITCH | SGE | ULP_TX)
1426
1427/**
1428 * t4_slow_intr_handler - control path interrupt handler
1429 * @adapter: the adapter
1430 *
1431 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1432 * The designation 'slow' is because it involves register reads, while
1433 * data interrupts typically don't involve any MMIOs.
1434 */
1435int t4_slow_intr_handler(struct adapter *adapter)
1436{
1437 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1438
1439 if (!(cause & GLBL_INTR_MASK))
1440 return 0;
1441 if (cause & CIM)
1442 cim_intr_handler(adapter);
1443 if (cause & MPS)
1444 mps_intr_handler(adapter);
1445 if (cause & NCSI)
1446 ncsi_intr_handler(adapter);
1447 if (cause & PL)
1448 pl_intr_handler(adapter);
1449 if (cause & SMB)
1450 smb_intr_handler(adapter);
1451 if (cause & XGMAC0)
1452 xgmac_intr_handler(adapter, 0);
1453 if (cause & XGMAC1)
1454 xgmac_intr_handler(adapter, 1);
1455 if (cause & XGMAC_KR0)
1456 xgmac_intr_handler(adapter, 2);
1457 if (cause & XGMAC_KR1)
1458 xgmac_intr_handler(adapter, 3);
1459 if (cause & PCIE)
1460 pcie_intr_handler(adapter);
1461 if (cause & MC)
1462 mem_intr_handler(adapter, MEM_MC);
1463 if (cause & EDC0)
1464 mem_intr_handler(adapter, MEM_EDC0);
1465 if (cause & EDC1)
1466 mem_intr_handler(adapter, MEM_EDC1);
1467 if (cause & LE)
1468 le_intr_handler(adapter);
1469 if (cause & TP)
1470 tp_intr_handler(adapter);
1471 if (cause & MA)
1472 ma_intr_handler(adapter);
1473 if (cause & PM_TX)
1474 pmtx_intr_handler(adapter);
1475 if (cause & PM_RX)
1476 pmrx_intr_handler(adapter);
1477 if (cause & ULP_RX)
1478 ulprx_intr_handler(adapter);
1479 if (cause & CPL_SWITCH)
1480 cplsw_intr_handler(adapter);
1481 if (cause & SGE)
1482 sge_intr_handler(adapter);
1483 if (cause & ULP_TX)
1484 ulptx_intr_handler(adapter);
1485
1486 /* Clear the interrupts just processed for which we are the master. */
1487 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1488 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1489 return 1;
1490}
1491
1492/**
1493 * t4_intr_enable - enable interrupts
1494 * @adapter: the adapter whose interrupts should be enabled
1495 *
1496 * Enable PF-specific interrupts for the calling function and the top-level
1497 * interrupt concentrator for global interrupts. Interrupts are already
1498 * enabled at each module, here we just enable the roots of the interrupt
1499 * hierarchies.
1500 *
1501 * Note: this function should be called only when the driver manages
1502 * non PF-specific interrupts from the various HW modules. Only one PCI
1503 * function at a time should be doing this.
1504 */
1505void t4_intr_enable(struct adapter *adapter)
1506{
1507 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1508
1509 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1510 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1511 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1512 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1513 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1514 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1515 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1516 EGRESS_SIZE_ERR);
1517 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1518 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1519}
1520
1521/**
1522 * t4_intr_disable - disable interrupts
1523 * @adapter: the adapter whose interrupts should be disabled
1524 *
1525 * Disable interrupts. We only disable the top-level interrupt
1526 * concentrators. The caller must be a PCI function managing global
1527 * interrupts.
1528 */
1529void t4_intr_disable(struct adapter *adapter)
1530{
1531 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1532
1533 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1534 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1535}
1536
1537/**
1538 * hash_mac_addr - return the hash value of a MAC address
1539 * @addr: the 48-bit Ethernet MAC address
1540 *
1541 * Hashes a MAC address according to the hash function used by HW inexact
1542 * (hash) address matching.
1543 */
1544static int hash_mac_addr(const u8 *addr)
1545{
1546 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1547 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1548 a ^= b;
1549 a ^= (a >> 12);
1550 a ^= (a >> 6);
1551 return a & 0x3f;
1552}
1553
1554/**
1555 * t4_config_rss_range - configure a portion of the RSS mapping table
1556 * @adapter: the adapter
1557 * @mbox: mbox to use for the FW command
1558 * @viid: virtual interface whose RSS subtable is to be written
1559 * @start: start entry in the table to write
1560 * @n: how many table entries to write
1561 * @rspq: values for the response queue lookup table
1562 * @nrspq: number of values in @rspq
1563 *
1564 * Programs the selected part of the VI's RSS mapping table with the
1565 * provided values. If @nrspq < @n the supplied values are used repeatedly
1566 * until the full table range is populated.
1567 *
1568 * The caller must ensure the values in @rspq are in the range allowed for
1569 * @viid.
1570 */
1571int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1572 int start, int n, const u16 *rspq, unsigned int nrspq)
1573{
1574 int ret;
1575 const u16 *rsp = rspq;
1576 const u16 *rsp_end = rspq + nrspq;
1577 struct fw_rss_ind_tbl_cmd cmd;
1578
1579 memset(&cmd, 0, sizeof(cmd));
1580 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1581 FW_CMD_REQUEST | FW_CMD_WRITE |
1582 FW_RSS_IND_TBL_CMD_VIID(viid));
1583 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1584
1585 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1586 while (n > 0) {
1587 int nq = min(n, 32);
1588 __be32 *qp = &cmd.iq0_to_iq2;
1589
1590 cmd.niqid = htons(nq);
1591 cmd.startidx = htons(start);
1592
1593 start += nq;
1594 n -= nq;
1595
1596 while (nq > 0) {
1597 unsigned int v;
1598
1599 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1600 if (++rsp >= rsp_end)
1601 rsp = rspq;
1602 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1603 if (++rsp >= rsp_end)
1604 rsp = rspq;
1605 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1606 if (++rsp >= rsp_end)
1607 rsp = rspq;
1608
1609 *qp++ = htonl(v);
1610 nq -= 3;
1611 }
1612
1613 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1614 if (ret)
1615 return ret;
1616 }
1617 return 0;
1618}
1619
1620/**
1621 * t4_config_glbl_rss - configure the global RSS mode
1622 * @adapter: the adapter
1623 * @mbox: mbox to use for the FW command
1624 * @mode: global RSS mode
1625 * @flags: mode-specific flags
1626 *
1627 * Sets the global RSS mode.
1628 */
1629int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1630 unsigned int flags)
1631{
1632 struct fw_rss_glb_config_cmd c;
1633
1634 memset(&c, 0, sizeof(c));
1635 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1636 FW_CMD_REQUEST | FW_CMD_WRITE);
1637 c.retval_len16 = htonl(FW_LEN16(c));
1638 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1639 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1640 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1641 c.u.basicvirtual.mode_pkd =
1642 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1643 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1644 } else
1645 return -EINVAL;
1646 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1647}
1648
1649/**
1650 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1651 * @adap: the adapter
1652 * @v4: holds the TCP/IP counter values
1653 * @v6: holds the TCP/IPv6 counter values
1654 *
1655 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1656 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1657 */
1658void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1659 struct tp_tcp_stats *v6)
1660{
1661 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1662
1663#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1664#define STAT(x) val[STAT_IDX(x)]
1665#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1666
1667 if (v4) {
1668 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1669 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1670 v4->tcpOutRsts = STAT(OUT_RST);
1671 v4->tcpInSegs = STAT64(IN_SEG);
1672 v4->tcpOutSegs = STAT64(OUT_SEG);
1673 v4->tcpRetransSegs = STAT64(RXT_SEG);
1674 }
1675 if (v6) {
1676 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1677 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1678 v6->tcpOutRsts = STAT(OUT_RST);
1679 v6->tcpInSegs = STAT64(IN_SEG);
1680 v6->tcpOutSegs = STAT64(OUT_SEG);
1681 v6->tcpRetransSegs = STAT64(RXT_SEG);
1682 }
1683#undef STAT64
1684#undef STAT
1685#undef STAT_IDX
1686}
1687
1688/**
1689 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1690 * @adap: the adapter
1691 * @mtus: where to store the MTU values
1692 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1693 *
1694 * Reads the HW path MTU table.
1695 */
1696void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1697{
1698 u32 v;
1699 int i;
1700
1701 for (i = 0; i < NMTUS; ++i) {
1702 t4_write_reg(adap, TP_MTU_TABLE,
1703 MTUINDEX(0xff) | MTUVALUE(i));
1704 v = t4_read_reg(adap, TP_MTU_TABLE);
1705 mtus[i] = MTUVALUE_GET(v);
1706 if (mtu_log)
1707 mtu_log[i] = MTUWIDTH_GET(v);
1708 }
1709}
1710
1711/**
1712 * init_cong_ctrl - initialize congestion control parameters
1713 * @a: the alpha values for congestion control
1714 * @b: the beta values for congestion control
1715 *
1716 * Initialize the congestion control parameters.
1717 */
1718static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1719{
1720 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1721 a[9] = 2;
1722 a[10] = 3;
1723 a[11] = 4;
1724 a[12] = 5;
1725 a[13] = 6;
1726 a[14] = 7;
1727 a[15] = 8;
1728 a[16] = 9;
1729 a[17] = 10;
1730 a[18] = 14;
1731 a[19] = 17;
1732 a[20] = 21;
1733 a[21] = 25;
1734 a[22] = 30;
1735 a[23] = 35;
1736 a[24] = 45;
1737 a[25] = 60;
1738 a[26] = 80;
1739 a[27] = 100;
1740 a[28] = 200;
1741 a[29] = 300;
1742 a[30] = 400;
1743 a[31] = 500;
1744
1745 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1746 b[9] = b[10] = 1;
1747 b[11] = b[12] = 2;
1748 b[13] = b[14] = b[15] = b[16] = 3;
1749 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1750 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1751 b[28] = b[29] = 6;
1752 b[30] = b[31] = 7;
1753}
1754
1755/* The minimum additive increment value for the congestion control table */
1756#define CC_MIN_INCR 2U
1757
1758/**
1759 * t4_load_mtus - write the MTU and congestion control HW tables
1760 * @adap: the adapter
1761 * @mtus: the values for the MTU table
1762 * @alpha: the values for the congestion control alpha parameter
1763 * @beta: the values for the congestion control beta parameter
1764 *
1765 * Write the HW MTU table with the supplied MTUs and the high-speed
1766 * congestion control table with the supplied alpha, beta, and MTUs.
1767 * We write the two tables together because the additive increments
1768 * depend on the MTUs.
1769 */
1770void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1771 const unsigned short *alpha, const unsigned short *beta)
1772{
1773 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1774 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1775 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1776 28672, 40960, 57344, 81920, 114688, 163840, 229376
1777 };
1778
1779 unsigned int i, w;
1780
1781 for (i = 0; i < NMTUS; ++i) {
1782 unsigned int mtu = mtus[i];
1783 unsigned int log2 = fls(mtu);
1784
1785 if (!(mtu & ((1 << log2) >> 2))) /* round */
1786 log2--;
1787 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1788 MTUWIDTH(log2) | MTUVALUE(mtu));
1789
1790 for (w = 0; w < NCCTRL_WIN; ++w) {
1791 unsigned int inc;
1792
1793 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1794 CC_MIN_INCR);
1795
1796 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1797 (w << 16) | (beta[w] << 13) | inc);
1798 }
1799 }
1800}
1801
1802/**
1803 * get_mps_bg_map - return the buffer groups associated with a port
1804 * @adap: the adapter
1805 * @idx: the port index
1806 *
1807 * Returns a bitmap indicating which MPS buffer groups are associated
1808 * with the given port. Bit i is set if buffer group i is used by the
1809 * port.
1810 */
1811static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
1812{
1813 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
1814
1815 if (n == 0)
1816 return idx == 0 ? 0xf : 0;
1817 if (n == 1)
1818 return idx < 2 ? (3 << (2 * idx)) : 0;
1819 return 1 << idx;
1820}
1821
1822/**
1823 * t4_get_port_stats - collect port statistics
1824 * @adap: the adapter
1825 * @idx: the port index
1826 * @p: the stats structure to fill
1827 *
1828 * Collect statistics related to the given port from HW.
1829 */
1830void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
1831{
1832 u32 bgmap = get_mps_bg_map(adap, idx);
1833
1834#define GET_STAT(name) \
1835 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
1836#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
1837
1838 p->tx_octets = GET_STAT(TX_PORT_BYTES);
1839 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
1840 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
1841 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
1842 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
1843 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
1844 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
1845 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
1846 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
1847 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
1848 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
1849 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
1850 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
1851 p->tx_drop = GET_STAT(TX_PORT_DROP);
1852 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
1853 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
1854 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
1855 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
1856 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
1857 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
1858 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
1859 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
1860 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
1861
1862 p->rx_octets = GET_STAT(RX_PORT_BYTES);
1863 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
1864 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
1865 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
1866 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
1867 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
1868 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
1869 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
1870 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
1871 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
1872 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
1873 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
1874 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
1875 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
1876 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
1877 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
1878 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
1879 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
1880 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
1881 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
1882 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
1883 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
1884 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
1885 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
1886 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
1887 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
1888 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
1889
1890 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
1891 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
1892 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
1893 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
1894 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
1895 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
1896 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
1897 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
1898
1899#undef GET_STAT
1900#undef GET_STAT_COM
1901}
1902
1903/**
1904 * t4_wol_magic_enable - enable/disable magic packet WoL
1905 * @adap: the adapter
1906 * @port: the physical port index
1907 * @addr: MAC address expected in magic packets, %NULL to disable
1908 *
1909 * Enables/disables magic packet wake-on-LAN for the selected port.
1910 */
1911void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
1912 const u8 *addr)
1913{
1914 if (addr) {
1915 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
1916 (addr[2] << 24) | (addr[3] << 16) |
1917 (addr[4] << 8) | addr[5]);
1918 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
1919 (addr[0] << 8) | addr[1]);
1920 }
1921 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
1922 addr ? MAGICEN : 0);
1923}
1924
1925/**
1926 * t4_wol_pat_enable - enable/disable pattern-based WoL
1927 * @adap: the adapter
1928 * @port: the physical port index
1929 * @map: bitmap of which HW pattern filters to set
1930 * @mask0: byte mask for bytes 0-63 of a packet
1931 * @mask1: byte mask for bytes 64-127 of a packet
1932 * @crc: Ethernet CRC for selected bytes
1933 * @enable: enable/disable switch
1934 *
1935 * Sets the pattern filters indicated in @map to mask out the bytes
1936 * specified in @mask0/@mask1 in received packets and compare the CRC of
1937 * the resulting packet against @crc. If @enable is %true pattern-based
1938 * WoL is enabled, otherwise disabled.
1939 */
1940int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
1941 u64 mask0, u64 mask1, unsigned int crc, bool enable)
1942{
1943 int i;
1944
1945 if (!enable) {
1946 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
1947 PATEN, 0);
1948 return 0;
1949 }
1950 if (map > 0xff)
1951 return -EINVAL;
1952
1953#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
1954
1955 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
1956 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
1957 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
1958
1959 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
1960 if (!(map & 1))
1961 continue;
1962
1963 /* write byte masks */
1964 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
1965 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
1966 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1967 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1968 return -ETIMEDOUT;
1969
1970 /* write CRC */
1971 t4_write_reg(adap, EPIO_REG(DATA0), crc);
1972 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
1973 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1974 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1975 return -ETIMEDOUT;
1976 }
1977#undef EPIO_REG
1978
1979 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
1980 return 0;
1981}
1982
1983#define INIT_CMD(var, cmd, rd_wr) do { \
1984 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
1985 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
1986 (var).retval_len16 = htonl(FW_LEN16(var)); \
1987} while (0)
1988
1989/**
1990 * t4_mdio_rd - read a PHY register through MDIO
1991 * @adap: the adapter
1992 * @mbox: mailbox to use for the FW command
1993 * @phy_addr: the PHY address
1994 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
1995 * @reg: the register to read
1996 * @valp: where to store the value
1997 *
1998 * Issues a FW command through the given mailbox to read a PHY register.
1999 */
2000int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2001 unsigned int mmd, unsigned int reg, u16 *valp)
2002{
2003 int ret;
2004 struct fw_ldst_cmd c;
2005
2006 memset(&c, 0, sizeof(c));
2007 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2008 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2009 c.cycles_to_len16 = htonl(FW_LEN16(c));
2010 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2011 FW_LDST_CMD_MMD(mmd));
2012 c.u.mdio.raddr = htons(reg);
2013
2014 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2015 if (ret == 0)
2016 *valp = ntohs(c.u.mdio.rval);
2017 return ret;
2018}
2019
2020/**
2021 * t4_mdio_wr - write a PHY register through MDIO
2022 * @adap: the adapter
2023 * @mbox: mailbox to use for the FW command
2024 * @phy_addr: the PHY address
2025 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2026 * @reg: the register to write
2027 * @valp: value to write
2028 *
2029 * Issues a FW command through the given mailbox to write a PHY register.
2030 */
2031int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2032 unsigned int mmd, unsigned int reg, u16 val)
2033{
2034 struct fw_ldst_cmd c;
2035
2036 memset(&c, 0, sizeof(c));
2037 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2038 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2039 c.cycles_to_len16 = htonl(FW_LEN16(c));
2040 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2041 FW_LDST_CMD_MMD(mmd));
2042 c.u.mdio.raddr = htons(reg);
2043 c.u.mdio.rval = htons(val);
2044
2045 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2046}
2047
2048/**
2049 * t4_fw_hello - establish communication with FW
2050 * @adap: the adapter
2051 * @mbox: mailbox to use for the FW command
2052 * @evt_mbox: mailbox to receive async FW events
2053 * @master: specifies the caller's willingness to be the device master
2054 * @state: returns the current device state
2055 *
2056 * Issues a command to establish communication with FW.
2057 */
2058int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2059 enum dev_master master, enum dev_state *state)
2060{
2061 int ret;
2062 struct fw_hello_cmd c;
2063
2064 INIT_CMD(c, HELLO, WRITE);
2065 c.err_to_mbasyncnot = htonl(
2066 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2067 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2068 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2069 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2070
2071 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2072 if (ret == 0 && state) {
2073 u32 v = ntohl(c.err_to_mbasyncnot);
2074 if (v & FW_HELLO_CMD_INIT)
2075 *state = DEV_STATE_INIT;
2076 else if (v & FW_HELLO_CMD_ERR)
2077 *state = DEV_STATE_ERR;
2078 else
2079 *state = DEV_STATE_UNINIT;
2080 }
2081 return ret;
2082}
2083
2084/**
2085 * t4_fw_bye - end communication with FW
2086 * @adap: the adapter
2087 * @mbox: mailbox to use for the FW command
2088 *
2089 * Issues a command to terminate communication with FW.
2090 */
2091int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2092{
2093 struct fw_bye_cmd c;
2094
2095 INIT_CMD(c, BYE, WRITE);
2096 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2097}
2098
2099/**
2100 * t4_init_cmd - ask FW to initialize the device
2101 * @adap: the adapter
2102 * @mbox: mailbox to use for the FW command
2103 *
2104 * Issues a command to FW to partially initialize the device. This
2105 * performs initialization that generally doesn't depend on user input.
2106 */
2107int t4_early_init(struct adapter *adap, unsigned int mbox)
2108{
2109 struct fw_initialize_cmd c;
2110
2111 INIT_CMD(c, INITIALIZE, WRITE);
2112 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2113}
2114
2115/**
2116 * t4_fw_reset - issue a reset to FW
2117 * @adap: the adapter
2118 * @mbox: mailbox to use for the FW command
2119 * @reset: specifies the type of reset to perform
2120 *
2121 * Issues a reset command of the specified type to FW.
2122 */
2123int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2124{
2125 struct fw_reset_cmd c;
2126
2127 INIT_CMD(c, RESET, WRITE);
2128 c.val = htonl(reset);
2129 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2130}
2131
2132/**
2133 * t4_query_params - query FW or device parameters
2134 * @adap: the adapter
2135 * @mbox: mailbox to use for the FW command
2136 * @pf: the PF
2137 * @vf: the VF
2138 * @nparams: the number of parameters
2139 * @params: the parameter names
2140 * @val: the parameter values
2141 *
2142 * Reads the value of FW or device parameters. Up to 7 parameters can be
2143 * queried at once.
2144 */
2145int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2146 unsigned int vf, unsigned int nparams, const u32 *params,
2147 u32 *val)
2148{
2149 int i, ret;
2150 struct fw_params_cmd c;
2151 __be32 *p = &c.param[0].mnem;
2152
2153 if (nparams > 7)
2154 return -EINVAL;
2155
2156 memset(&c, 0, sizeof(c));
2157 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2158 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2159 FW_PARAMS_CMD_VFN(vf));
2160 c.retval_len16 = htonl(FW_LEN16(c));
2161 for (i = 0; i < nparams; i++, p += 2)
2162 *p = htonl(*params++);
2163
2164 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2165 if (ret == 0)
2166 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2167 *val++ = ntohl(*p);
2168 return ret;
2169}
2170
2171/**
2172 * t4_set_params - sets FW or device parameters
2173 * @adap: the adapter
2174 * @mbox: mailbox to use for the FW command
2175 * @pf: the PF
2176 * @vf: the VF
2177 * @nparams: the number of parameters
2178 * @params: the parameter names
2179 * @val: the parameter values
2180 *
2181 * Sets the value of FW or device parameters. Up to 7 parameters can be
2182 * specified at once.
2183 */
2184int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2185 unsigned int vf, unsigned int nparams, const u32 *params,
2186 const u32 *val)
2187{
2188 struct fw_params_cmd c;
2189 __be32 *p = &c.param[0].mnem;
2190
2191 if (nparams > 7)
2192 return -EINVAL;
2193
2194 memset(&c, 0, sizeof(c));
2195 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2196 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2197 FW_PARAMS_CMD_VFN(vf));
2198 c.retval_len16 = htonl(FW_LEN16(c));
2199 while (nparams--) {
2200 *p++ = htonl(*params++);
2201 *p++ = htonl(*val++);
2202 }
2203
2204 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2205}
2206
2207/**
2208 * t4_cfg_pfvf - configure PF/VF resource limits
2209 * @adap: the adapter
2210 * @mbox: mailbox to use for the FW command
2211 * @pf: the PF being configured
2212 * @vf: the VF being configured
2213 * @txq: the max number of egress queues
2214 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2215 * @rxqi: the max number of interrupt-capable ingress queues
2216 * @rxq: the max number of interruptless ingress queues
2217 * @tc: the PCI traffic class
2218 * @vi: the max number of virtual interfaces
2219 * @cmask: the channel access rights mask for the PF/VF
2220 * @pmask: the port access rights mask for the PF/VF
2221 * @nexact: the maximum number of exact MPS filters
2222 * @rcaps: read capabilities
2223 * @wxcaps: write/execute capabilities
2224 *
2225 * Configures resource limits and capabilities for a physical or virtual
2226 * function.
2227 */
2228int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2229 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2230 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2231 unsigned int vi, unsigned int cmask, unsigned int pmask,
2232 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2233{
2234 struct fw_pfvf_cmd c;
2235
2236 memset(&c, 0, sizeof(c));
2237 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2238 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2239 FW_PFVF_CMD_VFN(vf));
2240 c.retval_len16 = htonl(FW_LEN16(c));
2241 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2242 FW_PFVF_CMD_NIQ(rxq));
2243 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
2244 FW_PFVF_CMD_PMASK(pmask) |
2245 FW_PFVF_CMD_NEQ(txq));
2246 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2247 FW_PFVF_CMD_NEXACTF(nexact));
2248 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2249 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2250 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2251 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2252}
2253
2254/**
2255 * t4_alloc_vi - allocate a virtual interface
2256 * @adap: the adapter
2257 * @mbox: mailbox to use for the FW command
2258 * @port: physical port associated with the VI
2259 * @pf: the PF owning the VI
2260 * @vf: the VF owning the VI
2261 * @nmac: number of MAC addresses needed (1 to 5)
2262 * @mac: the MAC addresses of the VI
2263 * @rss_size: size of RSS table slice associated with this VI
2264 *
2265 * Allocates a virtual interface for the given physical port. If @mac is
2266 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2267 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2268 * stored consecutively so the space needed is @nmac * 6 bytes.
2269 * Returns a negative error number or the non-negative VI id.
2270 */
2271int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2272 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2273 unsigned int *rss_size)
2274{
2275 int ret;
2276 struct fw_vi_cmd c;
2277
2278 memset(&c, 0, sizeof(c));
2279 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2280 FW_CMD_WRITE | FW_CMD_EXEC |
2281 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2282 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2283 c.portid_pkd = FW_VI_CMD_PORTID(port);
2284 c.nmac = nmac - 1;
2285
2286 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2287 if (ret)
2288 return ret;
2289
2290 if (mac) {
2291 memcpy(mac, c.mac, sizeof(c.mac));
2292 switch (nmac) {
2293 case 5:
2294 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2295 case 4:
2296 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2297 case 3:
2298 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2299 case 2:
2300 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2301 }
2302 }
2303 if (rss_size)
2304 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
2305 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
2306}
2307
2308/**
2309 * t4_set_rxmode - set Rx properties of a virtual interface
2310 * @adap: the adapter
2311 * @mbox: mailbox to use for the FW command
2312 * @viid: the VI id
2313 * @mtu: the new MTU or -1
2314 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2315 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2316 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2317 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
2318 * @sleep_ok: if true we may sleep while awaiting command completion
2319 *
2320 * Sets Rx properties of a virtual interface.
2321 */
2322int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2323 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2324 bool sleep_ok)
2325{
2326 struct fw_vi_rxmode_cmd c;
2327
2328 /* convert to FW values */
2329 if (mtu < 0)
2330 mtu = FW_RXMODE_MTU_NO_CHG;
2331 if (promisc < 0)
2332 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2333 if (all_multi < 0)
2334 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2335 if (bcast < 0)
2336 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
2337 if (vlanex < 0)
2338 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
2339
2340 memset(&c, 0, sizeof(c));
2341 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2342 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2343 c.retval_len16 = htonl(FW_LEN16(c));
2344 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2345 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2346 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2347 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2348 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
2349 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2350}
2351
2352/**
2353 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2354 * @adap: the adapter
2355 * @mbox: mailbox to use for the FW command
2356 * @viid: the VI id
2357 * @free: if true any existing filters for this VI id are first removed
2358 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2359 * @addr: the MAC address(es)
2360 * @idx: where to store the index of each allocated filter
2361 * @hash: pointer to hash address filter bitmap
2362 * @sleep_ok: call is allowed to sleep
2363 *
2364 * Allocates an exact-match filter for each of the supplied addresses and
2365 * sets it to the corresponding address. If @idx is not %NULL it should
2366 * have at least @naddr entries, each of which will be set to the index of
2367 * the filter allocated for the corresponding MAC address. If a filter
2368 * could not be allocated for an address its index is set to 0xffff.
2369 * If @hash is not %NULL addresses that fail to allocate an exact filter
2370 * are hashed and update the hash filter bitmap pointed at by @hash.
2371 *
2372 * Returns a negative error number or the number of filters allocated.
2373 */
2374int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2375 unsigned int viid, bool free, unsigned int naddr,
2376 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2377{
2378 int i, ret;
2379 struct fw_vi_mac_cmd c;
2380 struct fw_vi_mac_exact *p;
2381
2382 if (naddr > 7)
2383 return -EINVAL;
2384
2385 memset(&c, 0, sizeof(c));
2386 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2387 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2388 FW_VI_MAC_CMD_VIID(viid));
2389 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2390 FW_CMD_LEN16((naddr + 2) / 2));
2391
2392 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2393 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2394 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2395 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2396 }
2397
2398 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2399 if (ret)
2400 return ret;
2401
2402 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2403 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2404
2405 if (idx)
2406 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2407 if (index < NEXACT_MAC)
2408 ret++;
2409 else if (hash)
2410 *hash |= (1ULL << hash_mac_addr(addr[i]));
2411 }
2412 return ret;
2413}
2414
2415/**
2416 * t4_change_mac - modifies the exact-match filter for a MAC address
2417 * @adap: the adapter
2418 * @mbox: mailbox to use for the FW command
2419 * @viid: the VI id
2420 * @idx: index of existing filter for old value of MAC address, or -1
2421 * @addr: the new MAC address value
2422 * @persist: whether a new MAC allocation should be persistent
2423 * @add_smt: if true also add the address to the HW SMT
2424 *
2425 * Modifies an exact-match filter and sets it to the new MAC address.
2426 * Note that in general it is not possible to modify the value of a given
2427 * filter so the generic way to modify an address filter is to free the one
2428 * being used by the old address value and allocate a new filter for the
2429 * new address value. @idx can be -1 if the address is a new addition.
2430 *
2431 * Returns a negative error number or the index of the filter with the new
2432 * MAC value.
2433 */
2434int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2435 int idx, const u8 *addr, bool persist, bool add_smt)
2436{
2437 int ret, mode;
2438 struct fw_vi_mac_cmd c;
2439 struct fw_vi_mac_exact *p = c.u.exact;
2440
2441 if (idx < 0) /* new allocation */
2442 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2443 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2444
2445 memset(&c, 0, sizeof(c));
2446 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2447 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2448 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2449 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2450 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2451 FW_VI_MAC_CMD_IDX(idx));
2452 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2453
2454 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2455 if (ret == 0) {
2456 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2457 if (ret >= NEXACT_MAC)
2458 ret = -ENOMEM;
2459 }
2460 return ret;
2461}
2462
2463/**
2464 * t4_set_addr_hash - program the MAC inexact-match hash filter
2465 * @adap: the adapter
2466 * @mbox: mailbox to use for the FW command
2467 * @viid: the VI id
2468 * @ucast: whether the hash filter should also match unicast addresses
2469 * @vec: the value to be written to the hash filter
2470 * @sleep_ok: call is allowed to sleep
2471 *
2472 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2473 */
2474int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2475 bool ucast, u64 vec, bool sleep_ok)
2476{
2477 struct fw_vi_mac_cmd c;
2478
2479 memset(&c, 0, sizeof(c));
2480 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2481 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2482 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2483 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2484 FW_CMD_LEN16(1));
2485 c.u.hash.hashvec = cpu_to_be64(vec);
2486 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2487}
2488
2489/**
2490 * t4_enable_vi - enable/disable a virtual interface
2491 * @adap: the adapter
2492 * @mbox: mailbox to use for the FW command
2493 * @viid: the VI id
2494 * @rx_en: 1=enable Rx, 0=disable Rx
2495 * @tx_en: 1=enable Tx, 0=disable Tx
2496 *
2497 * Enables/disables a virtual interface.
2498 */
2499int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2500 bool rx_en, bool tx_en)
2501{
2502 struct fw_vi_enable_cmd c;
2503
2504 memset(&c, 0, sizeof(c));
2505 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2506 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2507 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2508 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2509 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2510}
2511
2512/**
2513 * t4_identify_port - identify a VI's port by blinking its LED
2514 * @adap: the adapter
2515 * @mbox: mailbox to use for the FW command
2516 * @viid: the VI id
2517 * @nblinks: how many times to blink LED at 2.5 Hz
2518 *
2519 * Identifies a VI's port by blinking its LED.
2520 */
2521int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2522 unsigned int nblinks)
2523{
2524 struct fw_vi_enable_cmd c;
2525
2526 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2527 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2528 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2529 c.blinkdur = htons(nblinks);
2530 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2531}
2532
2533/**
2534 * t4_iq_free - free an ingress queue and its FLs
2535 * @adap: the adapter
2536 * @mbox: mailbox to use for the FW command
2537 * @pf: the PF owning the queues
2538 * @vf: the VF owning the queues
2539 * @iqtype: the ingress queue type
2540 * @iqid: ingress queue id
2541 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2542 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2543 *
2544 * Frees an ingress queue and its associated FLs, if any.
2545 */
2546int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2547 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2548 unsigned int fl0id, unsigned int fl1id)
2549{
2550 struct fw_iq_cmd c;
2551
2552 memset(&c, 0, sizeof(c));
2553 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2554 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2555 FW_IQ_CMD_VFN(vf));
2556 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2557 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2558 c.iqid = htons(iqid);
2559 c.fl0id = htons(fl0id);
2560 c.fl1id = htons(fl1id);
2561 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2562}
2563
2564/**
2565 * t4_eth_eq_free - free an Ethernet egress queue
2566 * @adap: the adapter
2567 * @mbox: mailbox to use for the FW command
2568 * @pf: the PF owning the queue
2569 * @vf: the VF owning the queue
2570 * @eqid: egress queue id
2571 *
2572 * Frees an Ethernet egress queue.
2573 */
2574int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2575 unsigned int vf, unsigned int eqid)
2576{
2577 struct fw_eq_eth_cmd c;
2578
2579 memset(&c, 0, sizeof(c));
2580 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2581 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2582 FW_EQ_ETH_CMD_VFN(vf));
2583 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2584 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2585 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2586}
2587
2588/**
2589 * t4_ctrl_eq_free - free a control egress queue
2590 * @adap: the adapter
2591 * @mbox: mailbox to use for the FW command
2592 * @pf: the PF owning the queue
2593 * @vf: the VF owning the queue
2594 * @eqid: egress queue id
2595 *
2596 * Frees a control egress queue.
2597 */
2598int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2599 unsigned int vf, unsigned int eqid)
2600{
2601 struct fw_eq_ctrl_cmd c;
2602
2603 memset(&c, 0, sizeof(c));
2604 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2605 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2606 FW_EQ_CTRL_CMD_VFN(vf));
2607 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2608 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2609 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2610}
2611
2612/**
2613 * t4_ofld_eq_free - free an offload egress queue
2614 * @adap: the adapter
2615 * @mbox: mailbox to use for the FW command
2616 * @pf: the PF owning the queue
2617 * @vf: the VF owning the queue
2618 * @eqid: egress queue id
2619 *
2620 * Frees a control egress queue.
2621 */
2622int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2623 unsigned int vf, unsigned int eqid)
2624{
2625 struct fw_eq_ofld_cmd c;
2626
2627 memset(&c, 0, sizeof(c));
2628 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2629 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2630 FW_EQ_OFLD_CMD_VFN(vf));
2631 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2632 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2633 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2634}
2635
2636/**
2637 * t4_handle_fw_rpl - process a FW reply message
2638 * @adap: the adapter
2639 * @rpl: start of the FW message
2640 *
2641 * Processes a FW message, such as link state change messages.
2642 */
2643int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2644{
2645 u8 opcode = *(const u8 *)rpl;
2646
2647 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2648 int speed = 0, fc = 0;
2649 const struct fw_port_cmd *p = (void *)rpl;
2650 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2651 int port = adap->chan_map[chan];
2652 struct port_info *pi = adap2pinfo(adap, port);
2653 struct link_config *lc = &pi->link_cfg;
2654 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2655 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2656 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2657
2658 if (stat & FW_PORT_CMD_RXPAUSE)
2659 fc |= PAUSE_RX;
2660 if (stat & FW_PORT_CMD_TXPAUSE)
2661 fc |= PAUSE_TX;
2662 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2663 speed = SPEED_100;
2664 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2665 speed = SPEED_1000;
2666 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2667 speed = SPEED_10000;
2668
2669 if (link_ok != lc->link_ok || speed != lc->speed ||
2670 fc != lc->fc) { /* something changed */
2671 lc->link_ok = link_ok;
2672 lc->speed = speed;
2673 lc->fc = fc;
2674 t4_os_link_changed(adap, port, link_ok);
2675 }
2676 if (mod != pi->mod_type) {
2677 pi->mod_type = mod;
2678 t4_os_portmod_changed(adap, port);
2679 }
2680 }
2681 return 0;
2682}
2683
2684static void __devinit get_pci_mode(struct adapter *adapter,
2685 struct pci_params *p)
2686{
2687 u16 val;
2688 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
2689
2690 if (pcie_cap) {
2691 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
2692 &val);
2693 p->speed = val & PCI_EXP_LNKSTA_CLS;
2694 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
2695 }
2696}
2697
2698/**
2699 * init_link_config - initialize a link's SW state
2700 * @lc: structure holding the link state
2701 * @caps: link capabilities
2702 *
2703 * Initializes the SW state maintained for each link, including the link's
2704 * capabilities and default speed/flow-control/autonegotiation settings.
2705 */
2706static void __devinit init_link_config(struct link_config *lc,
2707 unsigned int caps)
2708{
2709 lc->supported = caps;
2710 lc->requested_speed = 0;
2711 lc->speed = 0;
2712 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
2713 if (lc->supported & FW_PORT_CAP_ANEG) {
2714 lc->advertising = lc->supported & ADVERT_MASK;
2715 lc->autoneg = AUTONEG_ENABLE;
2716 lc->requested_fc |= PAUSE_AUTONEG;
2717 } else {
2718 lc->advertising = 0;
2719 lc->autoneg = AUTONEG_DISABLE;
2720 }
2721}
2722
2723int t4_wait_dev_ready(struct adapter *adap)
2724{
2725 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
2726 return 0;
2727 msleep(500);
2728 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
2729}
2730
2731static int __devinit get_flash_params(struct adapter *adap)
2732{
2733 int ret;
2734 u32 info;
2735
2736 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
2737 if (!ret)
2738 ret = sf1_read(adap, 3, 0, 1, &info);
2739 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
2740 if (ret)
2741 return ret;
2742
2743 if ((info & 0xff) != 0x20) /* not a Numonix flash */
2744 return -EINVAL;
2745 info >>= 16; /* log2 of size */
2746 if (info >= 0x14 && info < 0x18)
2747 adap->params.sf_nsec = 1 << (info - 16);
2748 else if (info == 0x18)
2749 adap->params.sf_nsec = 64;
2750 else
2751 return -EINVAL;
2752 adap->params.sf_size = 1 << info;
2753 adap->params.sf_fw_start =
2754 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
2755 return 0;
2756}
2757
2758/**
2759 * t4_prep_adapter - prepare SW and HW for operation
2760 * @adapter: the adapter
2761 * @reset: if true perform a HW reset
2762 *
2763 * Initialize adapter SW state for the various HW modules, set initial
2764 * values for some adapter tunables, take PHYs out of reset, and
2765 * initialize the MDIO interface.
2766 */
2767int __devinit t4_prep_adapter(struct adapter *adapter)
2768{
2769 int ret;
2770
2771 ret = t4_wait_dev_ready(adapter);
2772 if (ret < 0)
2773 return ret;
2774
2775 get_pci_mode(adapter, &adapter->params.pci);
2776 adapter->params.rev = t4_read_reg(adapter, PL_REV);
2777
2778 ret = get_flash_params(adapter);
2779 if (ret < 0) {
2780 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
2781 return ret;
2782 }
2783
2784 ret = get_vpd_params(adapter, &adapter->params.vpd);
2785 if (ret < 0)
2786 return ret;
2787
2788 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
2789
2790 /*
2791 * Default port for debugging in case we can't reach FW.
2792 */
2793 adapter->params.nports = 1;
2794 adapter->params.portvec = 1;
2795 return 0;
2796}
2797
2798int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
2799{
2800 u8 addr[6];
2801 int ret, i, j = 0;
2802 struct fw_port_cmd c;
2803 struct fw_rss_vi_config_cmd rvc;
2804
2805 memset(&c, 0, sizeof(c));
2806 memset(&rvc, 0, sizeof(rvc));
2807
2808 for_each_port(adap, i) {
2809 unsigned int rss_size;
2810 struct port_info *p = adap2pinfo(adap, i);
2811
2812 while ((adap->params.portvec & (1 << j)) == 0)
2813 j++;
2814
2815 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
2816 FW_CMD_REQUEST | FW_CMD_READ |
2817 FW_PORT_CMD_PORTID(j));
2818 c.action_to_len16 = htonl(
2819 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
2820 FW_LEN16(c));
2821 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2822 if (ret)
2823 return ret;
2824
2825 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
2826 if (ret < 0)
2827 return ret;
2828
2829 p->viid = ret;
2830 p->tx_chan = j;
2831 p->lport = j;
2832 p->rss_size = rss_size;
2833 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
2834 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
2835 adap->port[i]->dev_id = j;
2836
2837 ret = ntohl(c.u.info.lstatus_to_modtype);
2838 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
2839 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
2840 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
2841 p->mod_type = FW_PORT_MOD_TYPE_NA;
2842
2843 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2844 FW_CMD_REQUEST | FW_CMD_READ |
2845 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
2846 rvc.retval_len16 = htonl(FW_LEN16(rvc));
2847 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
2848 if (ret)
2849 return ret;
2850 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
2851
2852 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
2853 j++;
2854 }
2855 return 0;
2856}
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
new file mode 100644
index 00000000000..c26b455f37d
--- /dev/null
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -0,0 +1,140 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __T4_HW_H
36#define __T4_HW_H
37
38#include <linux/types.h>
39
40enum {
41 NCHAN = 4, /* # of HW channels */
42 MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
43 EEPROMSIZE = 17408, /* Serial EEPROM physical size */
44 EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
45 EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
46 RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
47 TCB_SIZE = 128, /* TCB size */
48 NMTUS = 16, /* size of MTU table */
49 NCCTRL_WIN = 32, /* # of congestion control windows */
50 NEXACT_MAC = 336, /* # of exact MAC address filters */
51 L2T_SIZE = 4096, /* # of L2T entries */
52 MBOX_LEN = 64, /* mailbox size in bytes */
53 TRACE_LEN = 112, /* length of trace data and mask */
54 FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
55 NWOL_PAT = 8, /* # of WoL patterns */
56 WOL_PAT_LEN = 128, /* length of WoL patterns */
57};
58
59enum {
60 SF_PAGE_SIZE = 256, /* serial flash page size */
61};
62
63enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
64
65enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */
66
67enum {
68 SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
69 SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
70 SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
71
72 SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
73 SGE_TIMER_UPD_CIDX = 7, /* update cidx only */
74
75 SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */
76
77 SGE_INTRDST_PCI = 0, /* interrupt destination is PCI-E */
78 SGE_INTRDST_IQ = 1, /* destination is an ingress queue */
79
80 SGE_UPDATEDEL_NONE = 0, /* ingress queue pidx update delivery */
81 SGE_UPDATEDEL_INTR = 1, /* interrupt */
82 SGE_UPDATEDEL_STPG = 2, /* status page */
83 SGE_UPDATEDEL_BOTH = 3, /* interrupt and status page */
84
85 SGE_HOSTFCMODE_NONE = 0, /* egress queue cidx updates */
86 SGE_HOSTFCMODE_IQ = 1, /* sent to ingress queue */
87 SGE_HOSTFCMODE_STPG = 2, /* sent to status page */
88 SGE_HOSTFCMODE_BOTH = 3, /* ingress queue and status page */
89
90 SGE_FETCHBURSTMIN_16B = 0,/* egress queue descriptor fetch minimum */
91 SGE_FETCHBURSTMIN_32B = 1,
92 SGE_FETCHBURSTMIN_64B = 2,
93 SGE_FETCHBURSTMIN_128B = 3,
94
95 SGE_FETCHBURSTMAX_64B = 0,/* egress queue descriptor fetch maximum */
96 SGE_FETCHBURSTMAX_128B = 1,
97 SGE_FETCHBURSTMAX_256B = 2,
98 SGE_FETCHBURSTMAX_512B = 3,
99
100 SGE_CIDXFLUSHTHRESH_1 = 0,/* egress queue cidx flush threshold */
101 SGE_CIDXFLUSHTHRESH_2 = 1,
102 SGE_CIDXFLUSHTHRESH_4 = 2,
103 SGE_CIDXFLUSHTHRESH_8 = 3,
104 SGE_CIDXFLUSHTHRESH_16 = 4,
105 SGE_CIDXFLUSHTHRESH_32 = 5,
106 SGE_CIDXFLUSHTHRESH_64 = 6,
107 SGE_CIDXFLUSHTHRESH_128 = 7,
108
109 SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
110};
111
112struct sge_qstat { /* data written to SGE queue status entries */
113 __be32 qid;
114 __be16 cidx;
115 __be16 pidx;
116};
117
118/*
119 * Structure for last 128 bits of response descriptors
120 */
121struct rsp_ctrl {
122 __be32 hdrbuflen_pidx;
123 __be32 pldbuflen_qid;
124 union {
125 u8 type_gen;
126 __be64 last_flit;
127 };
128};
129
130#define RSPD_NEWBUF 0x80000000U
131#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU)
132#define RSPD_QID(x) RSPD_LEN(x)
133
134#define RSPD_GEN(x) ((x) >> 7)
135#define RSPD_TYPE(x) (((x) >> 4) & 3)
136
137#define QINTR_CNT_EN 0x1
138#define QINTR_TIMER_IDX(x) ((x) << 1)
139#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
140#endif /* __T4_HW_H */
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
new file mode 100644
index 00000000000..eb71b8250b9
--- /dev/null
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -0,0 +1,678 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __T4_MSG_H
36#define __T4_MSG_H
37
38#include <linux/types.h>
39
40enum {
41 CPL_PASS_OPEN_REQ = 0x1,
42 CPL_PASS_ACCEPT_RPL = 0x2,
43 CPL_ACT_OPEN_REQ = 0x3,
44 CPL_SET_TCB_FIELD = 0x5,
45 CPL_GET_TCB = 0x6,
46 CPL_CLOSE_CON_REQ = 0x8,
47 CPL_CLOSE_LISTSRV_REQ = 0x9,
48 CPL_ABORT_REQ = 0xA,
49 CPL_ABORT_RPL = 0xB,
50 CPL_RX_DATA_ACK = 0xD,
51 CPL_TX_PKT = 0xE,
52 CPL_L2T_WRITE_REQ = 0x12,
53 CPL_TID_RELEASE = 0x1A,
54
55 CPL_CLOSE_LISTSRV_RPL = 0x20,
56 CPL_L2T_WRITE_RPL = 0x23,
57 CPL_PASS_OPEN_RPL = 0x24,
58 CPL_ACT_OPEN_RPL = 0x25,
59 CPL_PEER_CLOSE = 0x26,
60 CPL_ABORT_REQ_RSS = 0x2B,
61 CPL_ABORT_RPL_RSS = 0x2D,
62
63 CPL_CLOSE_CON_RPL = 0x32,
64 CPL_ISCSI_HDR = 0x33,
65 CPL_RDMA_CQE = 0x35,
66 CPL_RDMA_CQE_READ_RSP = 0x36,
67 CPL_RDMA_CQE_ERR = 0x37,
68 CPL_RX_DATA = 0x39,
69 CPL_SET_TCB_RPL = 0x3A,
70 CPL_RX_PKT = 0x3B,
71 CPL_RX_DDP_COMPLETE = 0x3F,
72
73 CPL_ACT_ESTABLISH = 0x40,
74 CPL_PASS_ESTABLISH = 0x41,
75 CPL_RX_DATA_DDP = 0x42,
76 CPL_PASS_ACCEPT_REQ = 0x44,
77
78 CPL_RDMA_READ_REQ = 0x60,
79
80 CPL_PASS_OPEN_REQ6 = 0x81,
81 CPL_ACT_OPEN_REQ6 = 0x83,
82
83 CPL_RDMA_TERMINATE = 0xA2,
84 CPL_RDMA_WRITE = 0xA4,
85 CPL_SGE_EGR_UPDATE = 0xA5,
86
87 CPL_TRACE_PKT = 0xB0,
88
89 CPL_FW4_MSG = 0xC0,
90 CPL_FW4_PLD = 0xC1,
91 CPL_FW4_ACK = 0xC3,
92
93 CPL_FW6_MSG = 0xE0,
94 CPL_FW6_PLD = 0xE1,
95 CPL_TX_PKT_LSO = 0xED,
96 CPL_TX_PKT_XT = 0xEE,
97
98 NUM_CPL_CMDS
99};
100
101enum CPL_error {
102 CPL_ERR_NONE = 0,
103 CPL_ERR_TCAM_FULL = 3,
104 CPL_ERR_BAD_LENGTH = 15,
105 CPL_ERR_BAD_ROUTE = 18,
106 CPL_ERR_CONN_RESET = 20,
107 CPL_ERR_CONN_EXIST_SYNRECV = 21,
108 CPL_ERR_CONN_EXIST = 22,
109 CPL_ERR_ARP_MISS = 23,
110 CPL_ERR_BAD_SYN = 24,
111 CPL_ERR_CONN_TIMEDOUT = 30,
112 CPL_ERR_XMIT_TIMEDOUT = 31,
113 CPL_ERR_PERSIST_TIMEDOUT = 32,
114 CPL_ERR_FINWAIT2_TIMEDOUT = 33,
115 CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
116 CPL_ERR_RTX_NEG_ADVICE = 35,
117 CPL_ERR_PERSIST_NEG_ADVICE = 36,
118 CPL_ERR_ABORT_FAILED = 42,
119 CPL_ERR_IWARP_FLM = 50,
120};
121
122enum {
123 ULP_MODE_NONE = 0,
124 ULP_MODE_ISCSI = 2,
125 ULP_MODE_RDMA = 4,
126 ULP_MODE_TCPDDP = 5,
127 ULP_MODE_FCOE = 6,
128};
129
130enum {
131 ULP_CRC_HEADER = 1 << 0,
132 ULP_CRC_DATA = 1 << 1
133};
134
135enum {
136 CPL_ABORT_SEND_RST = 0,
137 CPL_ABORT_NO_RST,
138};
139
140enum { /* TX_PKT_XT checksum types */
141 TX_CSUM_TCP = 0,
142 TX_CSUM_UDP = 1,
143 TX_CSUM_CRC16 = 4,
144 TX_CSUM_CRC32 = 5,
145 TX_CSUM_CRC32C = 6,
146 TX_CSUM_FCOE = 7,
147 TX_CSUM_TCPIP = 8,
148 TX_CSUM_UDPIP = 9,
149 TX_CSUM_TCPIP6 = 10,
150 TX_CSUM_UDPIP6 = 11,
151 TX_CSUM_IP = 12,
152};
153
154union opcode_tid {
155 __be32 opcode_tid;
156 u8 opcode;
157};
158
159#define CPL_OPCODE(x) ((x) << 24)
160#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid))
161#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
162#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
163
164/* partitioning of TID fields that also carry a queue id */
165#define GET_TID_TID(x) ((x) & 0x3fff)
166#define GET_TID_QID(x) (((x) >> 14) & 0x3ff)
167#define TID_QID(x) ((x) << 14)
168
169struct rss_header {
170 u8 opcode;
171#if defined(__LITTLE_ENDIAN_BITFIELD)
172 u8 channel:2;
173 u8 filter_hit:1;
174 u8 filter_tid:1;
175 u8 hash_type:2;
176 u8 ipv6:1;
177 u8 send2fw:1;
178#else
179 u8 send2fw:1;
180 u8 ipv6:1;
181 u8 hash_type:2;
182 u8 filter_tid:1;
183 u8 filter_hit:1;
184 u8 channel:2;
185#endif
186 __be16 qid;
187 __be32 hash_val;
188};
189
190struct work_request_hdr {
191 __be32 wr_hi;
192 __be32 wr_mid;
193 __be64 wr_lo;
194};
195
196#define WR_HDR struct work_request_hdr wr
197
198struct cpl_pass_open_req {
199 WR_HDR;
200 union opcode_tid ot;
201 __be16 local_port;
202 __be16 peer_port;
203 __be32 local_ip;
204 __be32 peer_ip;
205 __be64 opt0;
206#define TX_CHAN(x) ((x) << 2)
207#define DELACK(x) ((x) << 5)
208#define ULP_MODE(x) ((x) << 8)
209#define RCV_BUFSIZ(x) ((x) << 12)
210#define DSCP(x) ((x) << 22)
211#define SMAC_SEL(x) ((u64)(x) << 28)
212#define L2T_IDX(x) ((u64)(x) << 36)
213#define NAGLE(x) ((u64)(x) << 49)
214#define WND_SCALE(x) ((u64)(x) << 50)
215#define KEEP_ALIVE(x) ((u64)(x) << 54)
216#define MSS_IDX(x) ((u64)(x) << 60)
217 __be64 opt1;
218#define SYN_RSS_ENABLE (1 << 0)
219#define SYN_RSS_QUEUE(x) ((x) << 2)
220#define CONN_POLICY_ASK (1 << 22)
221};
222
223struct cpl_pass_open_req6 {
224 WR_HDR;
225 union opcode_tid ot;
226 __be16 local_port;
227 __be16 peer_port;
228 __be64 local_ip_hi;
229 __be64 local_ip_lo;
230 __be64 peer_ip_hi;
231 __be64 peer_ip_lo;
232 __be64 opt0;
233 __be64 opt1;
234};
235
236struct cpl_pass_open_rpl {
237 union opcode_tid ot;
238 u8 rsvd[3];
239 u8 status;
240};
241
242struct cpl_pass_accept_rpl {
243 WR_HDR;
244 union opcode_tid ot;
245 __be32 opt2;
246#define RSS_QUEUE(x) ((x) << 0)
247#define RSS_QUEUE_VALID (1 << 10)
248#define RX_COALESCE_VALID(x) ((x) << 11)
249#define RX_COALESCE(x) ((x) << 12)
250#define TX_QUEUE(x) ((x) << 23)
251#define RX_CHANNEL(x) ((x) << 26)
252#define WND_SCALE_EN(x) ((x) << 28)
253#define TSTAMPS_EN(x) ((x) << 29)
254#define SACK_EN(x) ((x) << 30)
255 __be64 opt0;
256};
257
258struct cpl_act_open_req {
259 WR_HDR;
260 union opcode_tid ot;
261 __be16 local_port;
262 __be16 peer_port;
263 __be32 local_ip;
264 __be32 peer_ip;
265 __be64 opt0;
266 __be32 params;
267 __be32 opt2;
268};
269
270struct cpl_act_open_req6 {
271 WR_HDR;
272 union opcode_tid ot;
273 __be16 local_port;
274 __be16 peer_port;
275 __be64 local_ip_hi;
276 __be64 local_ip_lo;
277 __be64 peer_ip_hi;
278 __be64 peer_ip_lo;
279 __be64 opt0;
280 __be32 params;
281 __be32 opt2;
282};
283
284struct cpl_act_open_rpl {
285 union opcode_tid ot;
286 __be32 atid_status;
287#define GET_AOPEN_STATUS(x) ((x) & 0xff)
288#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff)
289};
290
291struct cpl_pass_establish {
292 union opcode_tid ot;
293 __be32 rsvd;
294 __be32 tos_stid;
295#define GET_POPEN_TID(x) ((x) & 0xffffff)
296#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
297 __be16 mac_idx;
298 __be16 tcp_opt;
299#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
300#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1)
301#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
302#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
303#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
304 __be32 snd_isn;
305 __be32 rcv_isn;
306};
307
308struct cpl_act_establish {
309 union opcode_tid ot;
310 __be32 rsvd;
311 __be32 tos_atid;
312 __be16 mac_idx;
313 __be16 tcp_opt;
314 __be32 snd_isn;
315 __be32 rcv_isn;
316};
317
318struct cpl_get_tcb {
319 WR_HDR;
320 union opcode_tid ot;
321 __be16 reply_ctrl;
322#define QUEUENO(x) ((x) << 0)
323#define REPLY_CHAN(x) ((x) << 14)
324#define NO_REPLY(x) ((x) << 15)
325 __be16 cookie;
326};
327
328struct cpl_set_tcb_field {
329 WR_HDR;
330 union opcode_tid ot;
331 __be16 reply_ctrl;
332 __be16 word_cookie;
333#define TCB_WORD(x) ((x) << 0)
334#define TCB_COOKIE(x) ((x) << 5)
335 __be64 mask;
336 __be64 val;
337};
338
339struct cpl_set_tcb_rpl {
340 union opcode_tid ot;
341 __be16 rsvd;
342 u8 cookie;
343 u8 status;
344 __be64 oldval;
345};
346
347struct cpl_close_con_req {
348 WR_HDR;
349 union opcode_tid ot;
350 __be32 rsvd;
351};
352
353struct cpl_close_con_rpl {
354 union opcode_tid ot;
355 u8 rsvd[3];
356 u8 status;
357 __be32 snd_nxt;
358 __be32 rcv_nxt;
359};
360
361struct cpl_close_listsvr_req {
362 WR_HDR;
363 union opcode_tid ot;
364 __be16 reply_ctrl;
365#define LISTSVR_IPV6 (1 << 14)
366 __be16 rsvd;
367};
368
369struct cpl_close_listsvr_rpl {
370 union opcode_tid ot;
371 u8 rsvd[3];
372 u8 status;
373};
374
375struct cpl_abort_req_rss {
376 union opcode_tid ot;
377 u8 rsvd[3];
378 u8 status;
379};
380
381struct cpl_abort_req {
382 WR_HDR;
383 union opcode_tid ot;
384 __be32 rsvd0;
385 u8 rsvd1;
386 u8 cmd;
387 u8 rsvd2[6];
388};
389
390struct cpl_abort_rpl_rss {
391 union opcode_tid ot;
392 u8 rsvd[3];
393 u8 status;
394};
395
396struct cpl_abort_rpl {
397 WR_HDR;
398 union opcode_tid ot;
399 __be32 rsvd0;
400 u8 rsvd1;
401 u8 cmd;
402 u8 rsvd2[6];
403};
404
405struct cpl_peer_close {
406 union opcode_tid ot;
407 __be32 rcv_nxt;
408};
409
410struct cpl_tid_release {
411 WR_HDR;
412 union opcode_tid ot;
413 __be32 rsvd;
414};
415
416struct cpl_tx_pkt_core {
417 __be32 ctrl0;
418#define TXPKT_VF(x) ((x) << 0)
419#define TXPKT_PF(x) ((x) << 8)
420#define TXPKT_VF_VLD (1 << 11)
421#define TXPKT_OVLAN_IDX(x) ((x) << 12)
422#define TXPKT_INTF(x) ((x) << 16)
423#define TXPKT_INS_OVLAN (1 << 21)
424#define TXPKT_OPCODE(x) ((x) << 24)
425 __be16 pack;
426 __be16 len;
427 __be64 ctrl1;
428#define TXPKT_CSUM_END(x) ((x) << 12)
429#define TXPKT_CSUM_START(x) ((x) << 20)
430#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20)
431#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30)
432#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
433#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40)
434#define TXPKT_VLAN(x) ((u64)(x) << 44)
435#define TXPKT_VLAN_VLD (1ULL << 60)
436#define TXPKT_IPCSUM_DIS (1ULL << 62)
437#define TXPKT_L4CSUM_DIS (1ULL << 63)
438};
439
440struct cpl_tx_pkt {
441 WR_HDR;
442 struct cpl_tx_pkt_core c;
443};
444
445#define cpl_tx_pkt_xt cpl_tx_pkt
446
447struct cpl_tx_pkt_lso_core {
448 __be32 lso_ctrl;
449#define LSO_TCPHDR_LEN(x) ((x) << 0)
450#define LSO_IPHDR_LEN(x) ((x) << 4)
451#define LSO_ETHHDR_LEN(x) ((x) << 16)
452#define LSO_IPV6(x) ((x) << 20)
453#define LSO_LAST_SLICE (1 << 22)
454#define LSO_FIRST_SLICE (1 << 23)
455#define LSO_OPCODE(x) ((x) << 24)
456 __be16 ipid_ofst;
457 __be16 mss;
458 __be32 seqno_offset;
459 __be32 len;
460 /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
461};
462
463struct cpl_tx_pkt_lso {
464 WR_HDR;
465 struct cpl_tx_pkt_lso_core c;
466 /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
467};
468
469struct cpl_iscsi_hdr {
470 union opcode_tid ot;
471 __be16 pdu_len_ddp;
472#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
473#define ISCSI_DDP (1 << 15)
474 __be16 len;
475 __be32 seq;
476 __be16 urg;
477 u8 rsvd;
478 u8 status;
479};
480
481struct cpl_rx_data {
482 union opcode_tid ot;
483 __be16 rsvd;
484 __be16 len;
485 __be32 seq;
486 __be16 urg;
487#if defined(__LITTLE_ENDIAN_BITFIELD)
488 u8 dack_mode:2;
489 u8 psh:1;
490 u8 heartbeat:1;
491 u8 ddp_off:1;
492 u8 :3;
493#else
494 u8 :3;
495 u8 ddp_off:1;
496 u8 heartbeat:1;
497 u8 psh:1;
498 u8 dack_mode:2;
499#endif
500 u8 status;
501};
502
503struct cpl_rx_data_ack {
504 WR_HDR;
505 union opcode_tid ot;
506 __be32 credit_dack;
507#define RX_CREDITS(x) ((x) << 0)
508#define RX_FORCE_ACK(x) ((x) << 28)
509};
510
511struct cpl_rx_pkt {
512 struct rss_header rsshdr;
513 u8 opcode;
514#if defined(__LITTLE_ENDIAN_BITFIELD)
515 u8 iff:4;
516 u8 csum_calc:1;
517 u8 ipmi_pkt:1;
518 u8 vlan_ex:1;
519 u8 ip_frag:1;
520#else
521 u8 ip_frag:1;
522 u8 vlan_ex:1;
523 u8 ipmi_pkt:1;
524 u8 csum_calc:1;
525 u8 iff:4;
526#endif
527 __be16 csum;
528 __be16 vlan;
529 __be16 len;
530 __be32 l2info;
531#define RXF_UDP (1 << 22)
532#define RXF_TCP (1 << 23)
533#define RXF_IP (1 << 24)
534#define RXF_IP6 (1 << 25)
535 __be16 hdr_len;
536 __be16 err_vec;
537};
538
539struct cpl_trace_pkt {
540 u8 opcode;
541 u8 intf;
542#if defined(__LITTLE_ENDIAN_BITFIELD)
543 u8 runt:4;
544 u8 filter_hit:4;
545 u8 :6;
546 u8 err:1;
547 u8 trunc:1;
548#else
549 u8 filter_hit:4;
550 u8 runt:4;
551 u8 trunc:1;
552 u8 err:1;
553 u8 :6;
554#endif
555 __be16 rsvd;
556 __be16 len;
557 __be64 tstamp;
558};
559
560struct cpl_l2t_write_req {
561 WR_HDR;
562 union opcode_tid ot;
563 __be16 params;
564#define L2T_W_INFO(x) ((x) << 2)
565#define L2T_W_PORT(x) ((x) << 8)
566#define L2T_W_NOREPLY(x) ((x) << 15)
567 __be16 l2t_idx;
568 __be16 vlan;
569 u8 dst_mac[6];
570};
571
572struct cpl_l2t_write_rpl {
573 union opcode_tid ot;
574 u8 status;
575 u8 rsvd[3];
576};
577
578struct cpl_rdma_terminate {
579 union opcode_tid ot;
580 __be16 rsvd;
581 __be16 len;
582};
583
584struct cpl_sge_egr_update {
585 __be32 opcode_qid;
586#define EGR_QID(x) ((x) & 0x1FFFF)
587 __be16 cidx;
588 __be16 pidx;
589};
590
591struct cpl_fw4_pld {
592 u8 opcode;
593 u8 rsvd0[3];
594 u8 type;
595 u8 rsvd1;
596 __be16 len;
597 __be64 data;
598 __be64 rsvd2;
599};
600
601struct cpl_fw6_pld {
602 u8 opcode;
603 u8 rsvd[5];
604 __be16 len;
605 __be64 data[4];
606};
607
608struct cpl_fw4_msg {
609 u8 opcode;
610 u8 type;
611 __be16 rsvd0;
612 __be32 rsvd1;
613 __be64 data[2];
614};
615
616struct cpl_fw4_ack {
617 union opcode_tid ot;
618 u8 credits;
619 u8 rsvd0[2];
620 u8 seq_vld;
621 __be32 snd_nxt;
622 __be32 snd_una;
623 __be64 rsvd1;
624};
625
626struct cpl_fw6_msg {
627 u8 opcode;
628 u8 type;
629 __be16 rsvd0;
630 __be32 rsvd1;
631 __be64 data[4];
632};
633
634/* cpl_fw6_msg.type values */
635enum {
636 FW6_TYPE_CMD_RPL = 0,
637};
638
639enum {
640 ULP_TX_MEM_READ = 2,
641 ULP_TX_MEM_WRITE = 3,
642 ULP_TX_PKT = 4
643};
644
645enum {
646 ULP_TX_SC_NOOP = 0x80,
647 ULP_TX_SC_IMM = 0x81,
648 ULP_TX_SC_DSGL = 0x82,
649 ULP_TX_SC_ISGL = 0x83
650};
651
652struct ulptx_sge_pair {
653 __be32 len[2];
654 __be64 addr[2];
655};
656
657struct ulptx_sgl {
658 __be32 cmd_nsge;
659#define ULPTX_CMD(x) ((x) << 24)
660#define ULPTX_NSGE(x) ((x) << 0)
661 __be32 len0;
662 __be64 addr0;
663 struct ulptx_sge_pair sge[0];
664};
665
666struct ulp_mem_io {
667 WR_HDR;
668 __be32 cmd;
669#define ULP_MEMIO_ORDER(x) ((x) << 23)
670 __be32 len16; /* command length */
671 __be32 dlen; /* data length in 32-byte units */
672#define ULP_MEMIO_DATA_LEN(x) ((x) << 0)
673 __be32 lock_addr;
674#define ULP_MEMIO_ADDR(x) ((x) << 0)
675#define ULP_MEMIO_LOCK(x) ((x) << 31)
676};
677
678#endif /* __T4_MSG_H */
diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h
new file mode 100644
index 00000000000..0adc5bcec7c
--- /dev/null
+++ b/drivers/net/cxgb4/t4_regs.h
@@ -0,0 +1,885 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __T4_REGS_H
36#define __T4_REGS_H
37
38#define MYPF_BASE 0x1b000
39#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
40
41#define PF0_BASE 0x1e000
42#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr))
43
44#define PF_STRIDE 0x400
45#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
46#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
47
48#define MYPORT_BASE 0x1c000
49#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
50
51#define PORT0_BASE 0x20000
52#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr))
53
54#define PORT_STRIDE 0x2000
55#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
56#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
57
58#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
59#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
60
61#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
62#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
63#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
64#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
65
66#define SGE_PF_KDOORBELL 0x0
67#define QID_MASK 0xffff8000U
68#define QID_SHIFT 15
69#define QID(x) ((x) << QID_SHIFT)
70#define DBPRIO 0x00004000U
71#define PIDX_MASK 0x00003fffU
72#define PIDX_SHIFT 0
73#define PIDX(x) ((x) << PIDX_SHIFT)
74
75#define SGE_PF_GTS 0x4
76#define INGRESSQID_MASK 0xffff0000U
77#define INGRESSQID_SHIFT 16
78#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT)
79#define TIMERREG_MASK 0x0000e000U
80#define TIMERREG_SHIFT 13
81#define TIMERREG(x) ((x) << TIMERREG_SHIFT)
82#define SEINTARM_MASK 0x00001000U
83#define SEINTARM_SHIFT 12
84#define SEINTARM(x) ((x) << SEINTARM_SHIFT)
85#define CIDXINC_MASK 0x00000fffU
86#define CIDXINC_SHIFT 0
87#define CIDXINC(x) ((x) << CIDXINC_SHIFT)
88
89#define SGE_CONTROL 0x1008
90#define DCASYSTYPE 0x00080000U
91#define RXPKTCPLMODE 0x00040000U
92#define EGRSTATUSPAGESIZE 0x00020000U
93#define PKTSHIFT_MASK 0x00001c00U
94#define PKTSHIFT_SHIFT 10
95#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
96#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
97#define INGPCIEBOUNDARY_MASK 0x00000380U
98#define INGPCIEBOUNDARY_SHIFT 7
99#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
100#define INGPADBOUNDARY_MASK 0x00000070U
101#define INGPADBOUNDARY_SHIFT 4
102#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
103#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
104 >> INGPADBOUNDARY_SHIFT)
105#define EGRPCIEBOUNDARY_MASK 0x0000000eU
106#define EGRPCIEBOUNDARY_SHIFT 1
107#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
108#define GLOBALENABLE 0x00000001U
109
110#define SGE_HOST_PAGE_SIZE 0x100c
111#define HOSTPAGESIZEPF0_MASK 0x0000000fU
112#define HOSTPAGESIZEPF0_SHIFT 0
113#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT)
114
115#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
116#define QUEUESPERPAGEPF0_MASK 0x0000000fU
117#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
118
119#define SGE_INT_CAUSE1 0x1024
120#define SGE_INT_CAUSE2 0x1030
121#define SGE_INT_CAUSE3 0x103c
122#define ERR_FLM_DBP 0x80000000U
123#define ERR_FLM_IDMA1 0x40000000U
124#define ERR_FLM_IDMA0 0x20000000U
125#define ERR_FLM_HINT 0x10000000U
126#define ERR_PCIE_ERROR3 0x08000000U
127#define ERR_PCIE_ERROR2 0x04000000U
128#define ERR_PCIE_ERROR1 0x02000000U
129#define ERR_PCIE_ERROR0 0x01000000U
130#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U
131#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U
132#define ERR_INVALID_CIDX_INC 0x00200000U
133#define ERR_ITP_TIME_PAUSED 0x00100000U
134#define ERR_CPL_OPCODE_0 0x00080000U
135#define ERR_DROPPED_DB 0x00040000U
136#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U
137#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U
138#define ERR_BAD_DB_PIDX3 0x00008000U
139#define ERR_BAD_DB_PIDX2 0x00004000U
140#define ERR_BAD_DB_PIDX1 0x00002000U
141#define ERR_BAD_DB_PIDX0 0x00001000U
142#define ERR_ING_PCIE_CHAN 0x00000800U
143#define ERR_ING_CTXT_PRIO 0x00000400U
144#define ERR_EGR_CTXT_PRIO 0x00000200U
145#define DBFIFO_HP_INT 0x00000100U
146#define DBFIFO_LP_INT 0x00000080U
147#define REG_ADDRESS_ERR 0x00000040U
148#define INGRESS_SIZE_ERR 0x00000020U
149#define EGRESS_SIZE_ERR 0x00000010U
150#define ERR_INV_CTXT3 0x00000008U
151#define ERR_INV_CTXT2 0x00000004U
152#define ERR_INV_CTXT1 0x00000002U
153#define ERR_INV_CTXT0 0x00000001U
154
155#define SGE_INT_ENABLE3 0x1040
156#define SGE_FL_BUFFER_SIZE0 0x1044
157#define SGE_FL_BUFFER_SIZE1 0x1048
158#define SGE_INGRESS_RX_THRESHOLD 0x10a0
159#define THRESHOLD_0_MASK 0x3f000000U
160#define THRESHOLD_0_SHIFT 24
161#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT)
162#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT)
163#define THRESHOLD_1_MASK 0x003f0000U
164#define THRESHOLD_1_SHIFT 16
165#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT)
166#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT)
167#define THRESHOLD_2_MASK 0x00003f00U
168#define THRESHOLD_2_SHIFT 8
169#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT)
170#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT)
171#define THRESHOLD_3_MASK 0x0000003fU
172#define THRESHOLD_3_SHIFT 0
173#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT)
174#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
175
176#define SGE_TIMER_VALUE_0_AND_1 0x10b8
177#define TIMERVALUE0_MASK 0xffff0000U
178#define TIMERVALUE0_SHIFT 16
179#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT)
180#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT)
181#define TIMERVALUE1_MASK 0x0000ffffU
182#define TIMERVALUE1_SHIFT 0
183#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT)
184#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
185
186#define SGE_TIMER_VALUE_2_AND_3 0x10bc
187#define SGE_TIMER_VALUE_4_AND_5 0x10c0
188#define SGE_DEBUG_INDEX 0x10cc
189#define SGE_DEBUG_DATA_HIGH 0x10d0
190#define SGE_DEBUG_DATA_LOW 0x10d4
191#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
192
193#define PCIE_PF_CLI 0x44
194#define PCIE_INT_CAUSE 0x3004
195#define UNXSPLCPLERR 0x20000000U
196#define PCIEPINT 0x10000000U
197#define PCIESINT 0x08000000U
198#define RPLPERR 0x04000000U
199#define RXWRPERR 0x02000000U
200#define RXCPLPERR 0x01000000U
201#define PIOTAGPERR 0x00800000U
202#define MATAGPERR 0x00400000U
203#define INTXCLRPERR 0x00200000U
204#define FIDPERR 0x00100000U
205#define CFGSNPPERR 0x00080000U
206#define HRSPPERR 0x00040000U
207#define HREQPERR 0x00020000U
208#define HCNTPERR 0x00010000U
209#define DRSPPERR 0x00008000U
210#define DREQPERR 0x00004000U
211#define DCNTPERR 0x00002000U
212#define CRSPPERR 0x00001000U
213#define CREQPERR 0x00000800U
214#define CCNTPERR 0x00000400U
215#define TARTAGPERR 0x00000200U
216#define PIOREQPERR 0x00000100U
217#define PIOCPLPERR 0x00000080U
218#define MSIXDIPERR 0x00000040U
219#define MSIXDATAPERR 0x00000020U
220#define MSIXADDRHPERR 0x00000010U
221#define MSIXADDRLPERR 0x00000008U
222#define MSIDATAPERR 0x00000004U
223#define MSIADDRHPERR 0x00000002U
224#define MSIADDRLPERR 0x00000001U
225
226#define PCIE_NONFAT_ERR 0x3010
227#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
228#define PCIEOFST_MASK 0xfffffc00U
229#define BIR_MASK 0x00000300U
230#define BIR_SHIFT 8
231#define BIR(x) ((x) << BIR_SHIFT)
232#define WINDOW_MASK 0x000000ffU
233#define WINDOW_SHIFT 0
234#define WINDOW(x) ((x) << WINDOW_SHIFT)
235#define PCIE_MEM_ACCESS_OFFSET 0x306c
236
237#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
238#define RNPP 0x80000000U
239#define RPCP 0x20000000U
240#define RCIP 0x08000000U
241#define RCCP 0x04000000U
242#define RFTP 0x00800000U
243#define PTRP 0x00100000U
244
245#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
246#define TPCP 0x40000000U
247#define TNPP 0x20000000U
248#define TFTP 0x10000000U
249#define TCAP 0x08000000U
250#define TCIP 0x04000000U
251#define RCAP 0x02000000U
252#define PLUP 0x00800000U
253#define PLDN 0x00400000U
254#define OTDD 0x00200000U
255#define GTRP 0x00100000U
256#define RDPE 0x00040000U
257#define TDCE 0x00020000U
258#define TDUE 0x00010000U
259
260#define MC_INT_CAUSE 0x7518
261#define ECC_UE_INT_CAUSE 0x00000004U
262#define ECC_CE_INT_CAUSE 0x00000002U
263#define PERR_INT_CAUSE 0x00000001U
264
265#define MC_ECC_STATUS 0x751c
266#define ECC_CECNT_MASK 0xffff0000U
267#define ECC_CECNT_SHIFT 16
268#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT)
269#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT)
270#define ECC_UECNT_MASK 0x0000ffffU
271#define ECC_UECNT_SHIFT 0
272#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT)
273#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT)
274
275#define MC_BIST_CMD 0x7600
276#define START_BIST 0x80000000U
277#define BIST_CMD_GAP_MASK 0x0000ff00U
278#define BIST_CMD_GAP_SHIFT 8
279#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT)
280#define BIST_OPCODE_MASK 0x00000003U
281#define BIST_OPCODE_SHIFT 0
282#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT)
283
284#define MC_BIST_CMD_ADDR 0x7604
285#define MC_BIST_CMD_LEN 0x7608
286#define MC_BIST_DATA_PATTERN 0x760c
287#define BIST_DATA_TYPE_MASK 0x0000000fU
288#define BIST_DATA_TYPE_SHIFT 0
289#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT)
290
291#define MC_BIST_STATUS_RDATA 0x7688
292
293#define MA_EXT_MEMORY_BAR 0x77c8
294#define EXT_MEM_SIZE_MASK 0x00000fffU
295#define EXT_MEM_SIZE_SHIFT 0
296#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
297
298#define MA_TARGET_MEM_ENABLE 0x77d8
299#define EXT_MEM_ENABLE 0x00000004U
300#define EDRAM1_ENABLE 0x00000002U
301#define EDRAM0_ENABLE 0x00000001U
302
303#define MA_INT_CAUSE 0x77e0
304#define MEM_PERR_INT_CAUSE 0x00000002U
305#define MEM_WRAP_INT_CAUSE 0x00000001U
306
307#define MA_INT_WRAP_STATUS 0x77e4
308#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U
309#define MEM_WRAP_ADDRESS_SHIFT 4
310#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT)
311#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU
312#define MEM_WRAP_CLIENT_NUM_SHIFT 0
313#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
314
315#define MA_PARITY_ERROR_STATUS 0x77f4
316
317#define EDC_0_BASE_ADDR 0x7900
318
319#define EDC_BIST_CMD 0x7904
320#define EDC_BIST_CMD_ADDR 0x7908
321#define EDC_BIST_CMD_LEN 0x790c
322#define EDC_BIST_DATA_PATTERN 0x7910
323#define EDC_BIST_STATUS_RDATA 0x7928
324#define EDC_INT_CAUSE 0x7978
325#define ECC_UE_PAR 0x00000020U
326#define ECC_CE_PAR 0x00000010U
327#define PERR_PAR_CAUSE 0x00000008U
328
329#define EDC_ECC_STATUS 0x797c
330
331#define EDC_1_BASE_ADDR 0x7980
332
333#define CIM_BOOT_CFG 0x7b00
334#define BOOTADDR_MASK 0xffffff00U
335
336#define CIM_PF_MAILBOX_DATA 0x240
337#define CIM_PF_MAILBOX_CTRL 0x280
338#define MBMSGVALID 0x00000008U
339#define MBINTREQ 0x00000004U
340#define MBOWNER_MASK 0x00000003U
341#define MBOWNER_SHIFT 0
342#define MBOWNER(x) ((x) << MBOWNER_SHIFT)
343#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
344
345#define CIM_PF_HOST_INT_CAUSE 0x28c
346#define MBMSGRDYINT 0x00080000U
347
348#define CIM_HOST_INT_CAUSE 0x7b2c
349#define TIEQOUTPARERRINT 0x00100000U
350#define TIEQINPARERRINT 0x00080000U
351#define MBHOSTPARERR 0x00040000U
352#define MBUPPARERR 0x00020000U
353#define IBQPARERR 0x0001f800U
354#define IBQTP0PARERR 0x00010000U
355#define IBQTP1PARERR 0x00008000U
356#define IBQULPPARERR 0x00004000U
357#define IBQSGELOPARERR 0x00002000U
358#define IBQSGEHIPARERR 0x00001000U
359#define IBQNCSIPARERR 0x00000800U
360#define OBQPARERR 0x000007e0U
361#define OBQULP0PARERR 0x00000400U
362#define OBQULP1PARERR 0x00000200U
363#define OBQULP2PARERR 0x00000100U
364#define OBQULP3PARERR 0x00000080U
365#define OBQSGEPARERR 0x00000040U
366#define OBQNCSIPARERR 0x00000020U
367#define PREFDROPINT 0x00000002U
368#define UPACCNONZERO 0x00000001U
369
370#define CIM_HOST_UPACC_INT_CAUSE 0x7b34
371#define EEPROMWRINT 0x40000000U
372#define TIMEOUTMAINT 0x20000000U
373#define TIMEOUTINT 0x10000000U
374#define RSPOVRLOOKUPINT 0x08000000U
375#define REQOVRLOOKUPINT 0x04000000U
376#define BLKWRPLINT 0x02000000U
377#define BLKRDPLINT 0x01000000U
378#define SGLWRPLINT 0x00800000U
379#define SGLRDPLINT 0x00400000U
380#define BLKWRCTLINT 0x00200000U
381#define BLKRDCTLINT 0x00100000U
382#define SGLWRCTLINT 0x00080000U
383#define SGLRDCTLINT 0x00040000U
384#define BLKWREEPROMINT 0x00020000U
385#define BLKRDEEPROMINT 0x00010000U
386#define SGLWREEPROMINT 0x00008000U
387#define SGLRDEEPROMINT 0x00004000U
388#define BLKWRFLASHINT 0x00002000U
389#define BLKRDFLASHINT 0x00001000U
390#define SGLWRFLASHINT 0x00000800U
391#define SGLRDFLASHINT 0x00000400U
392#define BLKWRBOOTINT 0x00000200U
393#define BLKRDBOOTINT 0x00000100U
394#define SGLWRBOOTINT 0x00000080U
395#define SGLRDBOOTINT 0x00000040U
396#define ILLWRBEINT 0x00000020U
397#define ILLRDBEINT 0x00000010U
398#define ILLRDINT 0x00000008U
399#define ILLWRINT 0x00000004U
400#define ILLTRANSINT 0x00000002U
401#define RSVDSPACEINT 0x00000001U
402
403#define TP_OUT_CONFIG 0x7d04
404#define VLANEXTENABLE_MASK 0x0000f000U
405#define VLANEXTENABLE_SHIFT 12
406
407#define TP_PARA_REG2 0x7d68
408#define MAXRXDATA_MASK 0xffff0000U
409#define MAXRXDATA_SHIFT 16
410#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT)
411
412#define TP_TIMER_RESOLUTION 0x7d90
413#define TIMERRESOLUTION_MASK 0x00ff0000U
414#define TIMERRESOLUTION_SHIFT 16
415#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
416
417#define TP_SHIFT_CNT 0x7dc0
418
419#define TP_CCTRL_TABLE 0x7ddc
420#define TP_MTU_TABLE 0x7de4
421#define MTUINDEX_MASK 0xff000000U
422#define MTUINDEX_SHIFT 24
423#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT)
424#define MTUWIDTH_MASK 0x000f0000U
425#define MTUWIDTH_SHIFT 16
426#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT)
427#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT)
428#define MTUVALUE_MASK 0x00003fffU
429#define MTUVALUE_SHIFT 0
430#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT)
431#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT)
432
433#define TP_RSS_LKP_TABLE 0x7dec
434#define LKPTBLROWVLD 0x80000000U
435#define LKPTBLQUEUE1_MASK 0x000ffc00U
436#define LKPTBLQUEUE1_SHIFT 10
437#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT)
438#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT)
439#define LKPTBLQUEUE0_MASK 0x000003ffU
440#define LKPTBLQUEUE0_SHIFT 0
441#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT)
442#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT)
443
444#define TP_PIO_ADDR 0x7e40
445#define TP_PIO_DATA 0x7e44
446#define TP_MIB_INDEX 0x7e50
447#define TP_MIB_DATA 0x7e54
448#define TP_INT_CAUSE 0x7e74
449#define FLMTXFLSTEMPTY 0x40000000U
450
451#define TP_INGRESS_CONFIG 0x141
452#define VNIC 0x00000800U
453#define CSUM_HAS_PSEUDO_HDR 0x00000400U
454#define RM_OVLAN 0x00000200U
455#define LOOKUPEVERYPKT 0x00000100U
456
457#define TP_MIB_MAC_IN_ERR_0 0x0
458#define TP_MIB_TCP_OUT_RST 0xc
459#define TP_MIB_TCP_IN_SEG_HI 0x10
460#define TP_MIB_TCP_IN_SEG_LO 0x11
461#define TP_MIB_TCP_OUT_SEG_HI 0x12
462#define TP_MIB_TCP_OUT_SEG_LO 0x13
463#define TP_MIB_TCP_RXT_SEG_HI 0x14
464#define TP_MIB_TCP_RXT_SEG_LO 0x15
465#define TP_MIB_TNL_CNG_DROP_0 0x18
466#define TP_MIB_TCP_V6IN_ERR_0 0x28
467#define TP_MIB_TCP_V6OUT_RST 0x2c
468#define TP_MIB_OFD_ARP_DROP 0x36
469#define TP_MIB_TNL_DROP_0 0x44
470#define TP_MIB_OFD_VLN_DROP_0 0x58
471
472#define ULP_TX_INT_CAUSE 0x8dcc
473#define PBL_BOUND_ERR_CH3 0x80000000U
474#define PBL_BOUND_ERR_CH2 0x40000000U
475#define PBL_BOUND_ERR_CH1 0x20000000U
476#define PBL_BOUND_ERR_CH0 0x10000000U
477
478#define PM_RX_INT_CAUSE 0x8fdc
479#define ZERO_E_CMD_ERROR 0x00400000U
480#define PMRX_FRAMING_ERROR 0x003ffff0U
481#define OCSPI_PAR_ERROR 0x00000008U
482#define DB_OPTIONS_PAR_ERROR 0x00000004U
483#define IESPI_PAR_ERROR 0x00000002U
484#define E_PCMD_PAR_ERROR 0x00000001U
485
486#define PM_TX_INT_CAUSE 0x8ffc
487#define PCMD_LEN_OVFL0 0x80000000U
488#define PCMD_LEN_OVFL1 0x40000000U
489#define PCMD_LEN_OVFL2 0x20000000U
490#define ZERO_C_CMD_ERROR 0x10000000U
491#define PMTX_FRAMING_ERROR 0x0ffffff0U
492#define OESPI_PAR_ERROR 0x00000008U
493#define ICSPI_PAR_ERROR 0x00000002U
494#define C_PCMD_PAR_ERROR 0x00000001U
495
496#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
497#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
498#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
499#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c
500#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410
501#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414
502#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418
503#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c
504#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420
505#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424
506#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428
507#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c
508#define MPS_PORT_STAT_TX_PORT_64B_L 0x430
509#define MPS_PORT_STAT_TX_PORT_64B_H 0x434
510#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438
511#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c
512#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440
513#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444
514#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448
515#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c
516#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450
517#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454
518#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458
519#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c
520#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460
521#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464
522#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468
523#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c
524#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470
525#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474
526#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478
527#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c
528#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480
529#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484
530#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488
531#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c
532#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490
533#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494
534#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498
535#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c
536#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0
537#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4
538#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8
539#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac
540#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0
541#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4
542#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0
543#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4
544#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8
545#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc
546#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0
547#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4
548#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8
549#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc
550#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0
551#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4
552#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8
553#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec
554#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0
555#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4
556#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8
557#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc
558#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500
559#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504
560#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508
561#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c
562#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510
563#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514
564#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518
565#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c
566#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
567#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
568#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
569#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
570#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
571#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
572#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c
573#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550
574#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554
575#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558
576#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c
577#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560
578#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564
579#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568
580#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c
581#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570
582#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574
583#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578
584#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c
585#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580
586#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584
587#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588
588#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c
589#define MPS_PORT_STAT_RX_PORT_64B_L 0x590
590#define MPS_PORT_STAT_RX_PORT_64B_H 0x594
591#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598
592#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c
593#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0
594#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4
595#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8
596#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac
597#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0
598#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4
599#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8
600#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc
601#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0
602#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4
603#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8
604#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc
605#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0
606#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4
607#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8
608#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc
609#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0
610#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4
611#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8
612#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec
613#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0
614#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4
615#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8
616#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc
617#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600
618#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604
619#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608
620#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
621#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
622#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
623#define MPS_CMN_CTL 0x9000
624#define NUMPORTS_MASK 0x00000003U
625#define NUMPORTS_SHIFT 0
626#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT)
627
628#define MPS_INT_CAUSE 0x9008
629#define STATINT 0x00000020U
630#define TXINT 0x00000010U
631#define RXINT 0x00000008U
632#define TRCINT 0x00000004U
633#define CLSINT 0x00000002U
634#define PLINT 0x00000001U
635
636#define MPS_TX_INT_CAUSE 0x9408
637#define PORTERR 0x00010000U
638#define FRMERR 0x00008000U
639#define SECNTERR 0x00004000U
640#define BUBBLE 0x00002000U
641#define TXDESCFIFO 0x00001e00U
642#define TXDATAFIFO 0x000001e0U
643#define NCSIFIFO 0x00000010U
644#define TPFIFO 0x0000000fU
645
646#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
647#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620
648#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c
649
650#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
651#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
652#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648
653#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c
654#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650
655#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654
656#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658
657#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c
658#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660
659#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664
660#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668
661#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c
662#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670
663#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674
664#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678
665#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c
666#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680
667#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684
668#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688
669#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c
670#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690
671#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694
672#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698
673#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c
674#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0
675#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4
676#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8
677#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac
678#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0
679#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
680#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
681#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
682#define MPS_TRC_CFG 0x9800
683#define TRCFIFOEMPTY 0x00000010U
684#define TRCIGNOREDROPINPUT 0x00000008U
685#define TRCKEEPDUPLICATES 0x00000004U
686#define TRCEN 0x00000002U
687#define TRCMULTIFILTER 0x00000001U
688
689#define MPS_TRC_RSS_CONTROL 0x9808
690#define RSSCONTROL_MASK 0x00ff0000U
691#define RSSCONTROL_SHIFT 16
692#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
693#define QUEUENUMBER_MASK 0x0000ffffU
694#define QUEUENUMBER_SHIFT 0
695#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT)
696
697#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810
698#define TFINVERTMATCH 0x01000000U
699#define TFPKTTOOLARGE 0x00800000U
700#define TFEN 0x00400000U
701#define TFPORT_MASK 0x003c0000U
702#define TFPORT_SHIFT 18
703#define TFPORT(x) ((x) << TFPORT_SHIFT)
704#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT)
705#define TFDROP 0x00020000U
706#define TFSOPEOPERR 0x00010000U
707#define TFLENGTH_MASK 0x00001f00U
708#define TFLENGTH_SHIFT 8
709#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT)
710#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT)
711#define TFOFFSET_MASK 0x0000001fU
712#define TFOFFSET_SHIFT 0
713#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT)
714#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT)
715
716#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820
717#define TFMINPKTSIZE_MASK 0x01ff0000U
718#define TFMINPKTSIZE_SHIFT 16
719#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT)
720#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT)
721#define TFCAPTUREMAX_MASK 0x00003fffU
722#define TFCAPTUREMAX_SHIFT 0
723#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT)
724#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT)
725
726#define MPS_TRC_INT_CAUSE 0x985c
727#define MISCPERR 0x00000100U
728#define PKTFIFO 0x000000f0U
729#define FILTMEM 0x0000000fU
730
731#define MPS_TRC_FILTER0_MATCH 0x9c00
732#define MPS_TRC_FILTER0_DONT_CARE 0x9c80
733#define MPS_TRC_FILTER1_MATCH 0x9d00
734#define MPS_CLS_INT_CAUSE 0xd028
735#define PLERRENB 0x00000008U
736#define HASHSRAM 0x00000004U
737#define MATCHTCAM 0x00000002U
738#define MATCHSRAM 0x00000001U
739
740#define MPS_RX_PERR_INT_CAUSE 0x11074
741
742#define CPL_INTR_CAUSE 0x19054
743#define CIM_OP_MAP_PERR 0x00000020U
744#define CIM_OVFL_ERROR 0x00000010U
745#define TP_FRAMING_ERROR 0x00000008U
746#define SGE_FRAMING_ERROR 0x00000004U
747#define CIM_FRAMING_ERROR 0x00000002U
748#define ZERO_SWITCH_ERROR 0x00000001U
749
750#define SMB_INT_CAUSE 0x19090
751#define MSTTXFIFOPARINT 0x00200000U
752#define MSTRXFIFOPARINT 0x00100000U
753#define SLVFIFOPARINT 0x00080000U
754
755#define ULP_RX_INT_CAUSE 0x19158
756#define ULP_RX_ISCSI_TAGMASK 0x19164
757#define ULP_RX_ISCSI_PSZ 0x19168
758#define HPZ3_MASK 0x0f000000U
759#define HPZ3_SHIFT 24
760#define HPZ3(x) ((x) << HPZ3_SHIFT)
761#define HPZ2_MASK 0x000f0000U
762#define HPZ2_SHIFT 16
763#define HPZ2(x) ((x) << HPZ2_SHIFT)
764#define HPZ1_MASK 0x00000f00U
765#define HPZ1_SHIFT 8
766#define HPZ1(x) ((x) << HPZ1_SHIFT)
767#define HPZ0_MASK 0x0000000fU
768#define HPZ0_SHIFT 0
769#define HPZ0(x) ((x) << HPZ0_SHIFT)
770
771#define ULP_RX_TDDP_PSZ 0x19178
772
773#define SF_DATA 0x193f8
774#define SF_OP 0x193fc
775#define BUSY 0x80000000U
776#define SF_LOCK 0x00000010U
777#define SF_CONT 0x00000008U
778#define BYTECNT_MASK 0x00000006U
779#define BYTECNT_SHIFT 1
780#define BYTECNT(x) ((x) << BYTECNT_SHIFT)
781#define OP_WR 0x00000001U
782
783#define PL_PF_INT_CAUSE 0x3c0
784#define PFSW 0x00000008U
785#define PFSGE 0x00000004U
786#define PFCIM 0x00000002U
787#define PFMPS 0x00000001U
788
789#define PL_PF_INT_ENABLE 0x3c4
790#define PL_PF_CTL 0x3c8
791#define SWINT 0x00000001U
792
793#define PL_WHOAMI 0x19400
794#define SOURCEPF_MASK 0x00000700U
795#define SOURCEPF_SHIFT 8
796#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT)
797#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT)
798#define ISVF 0x00000080U
799#define VFID_MASK 0x0000007fU
800#define VFID_SHIFT 0
801#define VFID(x) ((x) << VFID_SHIFT)
802#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT)
803
804#define PL_INT_CAUSE 0x1940c
805#define ULP_TX 0x08000000U
806#define SGE 0x04000000U
807#define HMA 0x02000000U
808#define CPL_SWITCH 0x01000000U
809#define ULP_RX 0x00800000U
810#define PM_RX 0x00400000U
811#define PM_TX 0x00200000U
812#define MA 0x00100000U
813#define TP 0x00080000U
814#define LE 0x00040000U
815#define EDC1 0x00020000U
816#define EDC0 0x00010000U
817#define MC 0x00008000U
818#define PCIE 0x00004000U
819#define PMU 0x00002000U
820#define XGMAC_KR1 0x00001000U
821#define XGMAC_KR0 0x00000800U
822#define XGMAC1 0x00000400U
823#define XGMAC0 0x00000200U
824#define SMB 0x00000100U
825#define SF 0x00000080U
826#define PL 0x00000040U
827#define NCSI 0x00000020U
828#define MPS 0x00000010U
829#define MI 0x00000008U
830#define DBG 0x00000004U
831#define I2CM 0x00000002U
832#define CIM 0x00000001U
833
834#define PL_INT_MAP0 0x19414
835#define PL_RST 0x19428
836#define PIORST 0x00000002U
837#define PIORSTMODE 0x00000001U
838
839#define PL_PL_INT_CAUSE 0x19430
840#define FATALPERR 0x00000010U
841#define PERRVFID 0x00000001U
842
843#define PL_REV 0x1943c
844
845#define LE_DB_CONFIG 0x19c04
846#define HASHEN 0x00100000U
847
848#define LE_DB_SERVER_INDEX 0x19c18
849#define LE_DB_ACT_CNT_IPV4 0x19c20
850#define LE_DB_ACT_CNT_IPV6 0x19c24
851
852#define LE_DB_INT_CAUSE 0x19c3c
853#define REQQPARERR 0x00010000U
854#define UNKNOWNCMD 0x00008000U
855#define PARITYERR 0x00000040U
856#define LIPMISS 0x00000020U
857#define LIP0 0x00000010U
858
859#define LE_DB_TID_HASHBASE 0x19df8
860
861#define NCSI_INT_CAUSE 0x1a0d8
862#define CIM_DM_PRTY_ERR 0x00000100U
863#define MPS_DM_PRTY_ERR 0x00000080U
864#define TXFIFO_PRTY_ERR 0x00000002U
865#define RXFIFO_PRTY_ERR 0x00000001U
866
867#define XGMAC_PORT_CFG2 0x1018
868#define PATEN 0x00040000U
869#define MAGICEN 0x00020000U
870
871#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
872#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
873
874#define XGMAC_PORT_EPIO_DATA0 0x10c0
875#define XGMAC_PORT_EPIO_DATA1 0x10c4
876#define XGMAC_PORT_EPIO_DATA2 0x10c8
877#define XGMAC_PORT_EPIO_DATA3 0x10cc
878#define XGMAC_PORT_EPIO_OP 0x10d0
879#define EPIOWR 0x00000100U
880#define ADDRESS_MASK 0x000000ffU
881#define ADDRESS_SHIFT 0
882#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
883
884#define XGMAC_PORT_INT_CAUSE 0x10dc
885#endif /* __T4_REGS_H */
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
new file mode 100644
index 00000000000..edcfd7ec780
--- /dev/null
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -0,0 +1,1623 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef _T4FW_INTERFACE_H_
36#define _T4FW_INTERFACE_H_
37
38#define FW_T4VF_SGE_BASE_ADDR 0x0000
39#define FW_T4VF_MPS_BASE_ADDR 0x0100
40#define FW_T4VF_PL_BASE_ADDR 0x0200
41#define FW_T4VF_MBDATA_BASE_ADDR 0x0240
42#define FW_T4VF_CIM_BASE_ADDR 0x0300
43
44enum fw_wr_opcodes {
45 FW_FILTER_WR = 0x02,
46 FW_ULPTX_WR = 0x04,
47 FW_TP_WR = 0x05,
48 FW_ETH_TX_PKT_WR = 0x08,
49 FW_FLOWC_WR = 0x0a,
50 FW_OFLD_TX_DATA_WR = 0x0b,
51 FW_CMD_WR = 0x10,
52 FW_ETH_TX_PKT_VM_WR = 0x11,
53 FW_RI_RES_WR = 0x0c,
54 FW_RI_INIT_WR = 0x0d,
55 FW_RI_RDMA_WRITE_WR = 0x14,
56 FW_RI_SEND_WR = 0x15,
57 FW_RI_RDMA_READ_WR = 0x16,
58 FW_RI_RECV_WR = 0x17,
59 FW_RI_BIND_MW_WR = 0x18,
60 FW_RI_FR_NSMR_WR = 0x19,
61 FW_RI_INV_LSTAG_WR = 0x1a,
62 FW_LASTC2E_WR = 0x40
63};
64
65struct fw_wr_hdr {
66 __be32 hi;
67 __be32 lo;
68};
69
70#define FW_WR_OP(x) ((x) << 24)
71#define FW_WR_ATOMIC(x) ((x) << 23)
72#define FW_WR_FLUSH(x) ((x) << 22)
73#define FW_WR_COMPL(x) ((x) << 21)
74#define FW_WR_IMMDLEN_MASK 0xff
75#define FW_WR_IMMDLEN(x) ((x) << 0)
76
77#define FW_WR_EQUIQ (1U << 31)
78#define FW_WR_EQUEQ (1U << 30)
79#define FW_WR_FLOWID(x) ((x) << 8)
80#define FW_WR_LEN16(x) ((x) << 0)
81
82struct fw_ulptx_wr {
83 __be32 op_to_compl;
84 __be32 flowid_len16;
85 u64 cookie;
86};
87
88struct fw_tp_wr {
89 __be32 op_to_immdlen;
90 __be32 flowid_len16;
91 u64 cookie;
92};
93
94struct fw_eth_tx_pkt_wr {
95 __be32 op_immdlen;
96 __be32 equiq_to_len16;
97 __be64 r3;
98};
99
100enum fw_flowc_mnem {
101 FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
102 FW_FLOWC_MNEM_CH,
103 FW_FLOWC_MNEM_PORT,
104 FW_FLOWC_MNEM_IQID,
105 FW_FLOWC_MNEM_SNDNXT,
106 FW_FLOWC_MNEM_RCVNXT,
107 FW_FLOWC_MNEM_SNDBUF,
108 FW_FLOWC_MNEM_MSS,
109};
110
111struct fw_flowc_mnemval {
112 u8 mnemonic;
113 u8 r4[3];
114 __be32 val;
115};
116
117struct fw_flowc_wr {
118 __be32 op_to_nparams;
119#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0)
120 __be32 flowid_len16;
121 struct fw_flowc_mnemval mnemval[0];
122};
123
124struct fw_ofld_tx_data_wr {
125 __be32 op_to_immdlen;
126 __be32 flowid_len16;
127 __be32 plen;
128 __be32 tunnel_to_proxy;
129#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19)
130#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18)
131#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17)
132#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16)
133#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15)
134#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14)
135#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10)
136#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6)
137};
138
139struct fw_cmd_wr {
140 __be32 op_dma;
141#define FW_CMD_WR_DMA (1U << 17)
142 __be32 len16_pkd;
143 __be64 cookie_daddr;
144};
145
146struct fw_eth_tx_pkt_vm_wr {
147 __be32 op_immdlen;
148 __be32 equiq_to_len16;
149 __be32 r3[2];
150 u8 ethmacdst[6];
151 u8 ethmacsrc[6];
152 __be16 ethtype;
153 __be16 vlantci;
154};
155
156#define FW_CMD_MAX_TIMEOUT 3000
157
158enum fw_cmd_opcodes {
159 FW_LDST_CMD = 0x01,
160 FW_RESET_CMD = 0x03,
161 FW_HELLO_CMD = 0x04,
162 FW_BYE_CMD = 0x05,
163 FW_INITIALIZE_CMD = 0x06,
164 FW_CAPS_CONFIG_CMD = 0x07,
165 FW_PARAMS_CMD = 0x08,
166 FW_PFVF_CMD = 0x09,
167 FW_IQ_CMD = 0x10,
168 FW_EQ_MNGT_CMD = 0x11,
169 FW_EQ_ETH_CMD = 0x12,
170 FW_EQ_CTRL_CMD = 0x13,
171 FW_EQ_OFLD_CMD = 0x21,
172 FW_VI_CMD = 0x14,
173 FW_VI_MAC_CMD = 0x15,
174 FW_VI_RXMODE_CMD = 0x16,
175 FW_VI_ENABLE_CMD = 0x17,
176 FW_ACL_MAC_CMD = 0x18,
177 FW_ACL_VLAN_CMD = 0x19,
178 FW_VI_STATS_CMD = 0x1a,
179 FW_PORT_CMD = 0x1b,
180 FW_PORT_STATS_CMD = 0x1c,
181 FW_PORT_LB_STATS_CMD = 0x1d,
182 FW_PORT_TRACE_CMD = 0x1e,
183 FW_PORT_TRACE_MMAP_CMD = 0x1f,
184 FW_RSS_IND_TBL_CMD = 0x20,
185 FW_RSS_GLB_CONFIG_CMD = 0x22,
186 FW_RSS_VI_CONFIG_CMD = 0x23,
187 FW_LASTC2E_CMD = 0x40,
188 FW_ERROR_CMD = 0x80,
189 FW_DEBUG_CMD = 0x81,
190};
191
192enum fw_cmd_cap {
193 FW_CMD_CAP_PF = 0x01,
194 FW_CMD_CAP_DMAQ = 0x02,
195 FW_CMD_CAP_PORT = 0x04,
196 FW_CMD_CAP_PORTPROMISC = 0x08,
197 FW_CMD_CAP_PORTSTATS = 0x10,
198 FW_CMD_CAP_VF = 0x80,
199};
200
201/*
202 * Generic command header flit0
203 */
204struct fw_cmd_hdr {
205 __be32 hi;
206 __be32 lo;
207};
208
209#define FW_CMD_OP(x) ((x) << 24)
210#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff)
211#define FW_CMD_REQUEST (1U << 23)
212#define FW_CMD_READ (1U << 22)
213#define FW_CMD_WRITE (1U << 21)
214#define FW_CMD_EXEC (1U << 20)
215#define FW_CMD_RAMASK(x) ((x) << 20)
216#define FW_CMD_RETVAL(x) ((x) << 8)
217#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff)
218#define FW_CMD_LEN16(x) ((x) << 0)
219
220enum fw_ldst_addrspc {
221 FW_LDST_ADDRSPC_FIRMWARE = 0x0001,
222 FW_LDST_ADDRSPC_SGE_EGRC = 0x0008,
223 FW_LDST_ADDRSPC_SGE_INGC = 0x0009,
224 FW_LDST_ADDRSPC_SGE_FLMC = 0x000a,
225 FW_LDST_ADDRSPC_SGE_CONMC = 0x000b,
226 FW_LDST_ADDRSPC_TP_PIO = 0x0010,
227 FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011,
228 FW_LDST_ADDRSPC_TP_MIB = 0x0012,
229 FW_LDST_ADDRSPC_MDIO = 0x0018,
230 FW_LDST_ADDRSPC_MPS = 0x0020,
231 FW_LDST_ADDRSPC_FUNC = 0x0028
232};
233
234enum fw_ldst_mps_fid {
235 FW_LDST_MPS_ATRB,
236 FW_LDST_MPS_RPLC
237};
238
239enum fw_ldst_func_access_ctl {
240 FW_LDST_FUNC_ACC_CTL_VIID,
241 FW_LDST_FUNC_ACC_CTL_FID
242};
243
244enum fw_ldst_func_mod_index {
245 FW_LDST_FUNC_MPS
246};
247
248struct fw_ldst_cmd {
249 __be32 op_to_addrspace;
250#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0)
251 __be32 cycles_to_len16;
252 union fw_ldst {
253 struct fw_ldst_addrval {
254 __be32 addr;
255 __be32 val;
256 } addrval;
257 struct fw_ldst_idctxt {
258 __be32 physid;
259 __be32 msg_pkd;
260 __be32 ctxt_data7;
261 __be32 ctxt_data6;
262 __be32 ctxt_data5;
263 __be32 ctxt_data4;
264 __be32 ctxt_data3;
265 __be32 ctxt_data2;
266 __be32 ctxt_data1;
267 __be32 ctxt_data0;
268 } idctxt;
269 struct fw_ldst_mdio {
270 __be16 paddr_mmd;
271 __be16 raddr;
272 __be16 vctl;
273 __be16 rval;
274 } mdio;
275 struct fw_ldst_mps {
276 __be16 fid_ctl;
277 __be16 rplcpf_pkd;
278 __be32 rplc127_96;
279 __be32 rplc95_64;
280 __be32 rplc63_32;
281 __be32 rplc31_0;
282 __be32 atrb;
283 __be16 vlan[16];
284 } mps;
285 struct fw_ldst_func {
286 u8 access_ctl;
287 u8 mod_index;
288 __be16 ctl_id;
289 __be32 offset;
290 __be64 data0;
291 __be64 data1;
292 } func;
293 } u;
294};
295
296#define FW_LDST_CMD_MSG(x) ((x) << 31)
297#define FW_LDST_CMD_PADDR(x) ((x) << 8)
298#define FW_LDST_CMD_MMD(x) ((x) << 0)
299#define FW_LDST_CMD_FID(x) ((x) << 15)
300#define FW_LDST_CMD_CTL(x) ((x) << 0)
301#define FW_LDST_CMD_RPLCPF(x) ((x) << 0)
302
303struct fw_reset_cmd {
304 __be32 op_to_write;
305 __be32 retval_len16;
306 __be32 val;
307 __be32 r3;
308};
309
310struct fw_hello_cmd {
311 __be32 op_to_write;
312 __be32 retval_len16;
313 __be32 err_to_mbasyncnot;
314#define FW_HELLO_CMD_ERR (1U << 31)
315#define FW_HELLO_CMD_INIT (1U << 30)
316#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
317#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28)
318#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24)
319#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
320 __be32 fwrev;
321};
322
323struct fw_bye_cmd {
324 __be32 op_to_write;
325 __be32 retval_len16;
326 __be64 r3;
327};
328
329struct fw_initialize_cmd {
330 __be32 op_to_write;
331 __be32 retval_len16;
332 __be64 r3;
333};
334
335enum fw_caps_config_hm {
336 FW_CAPS_CONFIG_HM_PCIE = 0x00000001,
337 FW_CAPS_CONFIG_HM_PL = 0x00000002,
338 FW_CAPS_CONFIG_HM_SGE = 0x00000004,
339 FW_CAPS_CONFIG_HM_CIM = 0x00000008,
340 FW_CAPS_CONFIG_HM_ULPTX = 0x00000010,
341 FW_CAPS_CONFIG_HM_TP = 0x00000020,
342 FW_CAPS_CONFIG_HM_ULPRX = 0x00000040,
343 FW_CAPS_CONFIG_HM_PMRX = 0x00000080,
344 FW_CAPS_CONFIG_HM_PMTX = 0x00000100,
345 FW_CAPS_CONFIG_HM_MC = 0x00000200,
346 FW_CAPS_CONFIG_HM_LE = 0x00000400,
347 FW_CAPS_CONFIG_HM_MPS = 0x00000800,
348 FW_CAPS_CONFIG_HM_XGMAC = 0x00001000,
349 FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000,
350 FW_CAPS_CONFIG_HM_T4DBG = 0x00004000,
351 FW_CAPS_CONFIG_HM_MI = 0x00008000,
352 FW_CAPS_CONFIG_HM_I2CM = 0x00010000,
353 FW_CAPS_CONFIG_HM_NCSI = 0x00020000,
354 FW_CAPS_CONFIG_HM_SMB = 0x00040000,
355 FW_CAPS_CONFIG_HM_MA = 0x00080000,
356 FW_CAPS_CONFIG_HM_EDRAM = 0x00100000,
357 FW_CAPS_CONFIG_HM_PMU = 0x00200000,
358 FW_CAPS_CONFIG_HM_UART = 0x00400000,
359 FW_CAPS_CONFIG_HM_SF = 0x00800000,
360};
361
362enum fw_caps_config_nbm {
363 FW_CAPS_CONFIG_NBM_IPMI = 0x00000001,
364 FW_CAPS_CONFIG_NBM_NCSI = 0x00000002,
365};
366
367enum fw_caps_config_link {
368 FW_CAPS_CONFIG_LINK_PPP = 0x00000001,
369 FW_CAPS_CONFIG_LINK_QFC = 0x00000002,
370 FW_CAPS_CONFIG_LINK_DCBX = 0x00000004,
371};
372
373enum fw_caps_config_switch {
374 FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001,
375 FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002,
376};
377
378enum fw_caps_config_nic {
379 FW_CAPS_CONFIG_NIC = 0x00000001,
380 FW_CAPS_CONFIG_NIC_VM = 0x00000002,
381};
382
383enum fw_caps_config_ofld {
384 FW_CAPS_CONFIG_OFLD = 0x00000001,
385};
386
387enum fw_caps_config_rdma {
388 FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001,
389 FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002,
390};
391
392enum fw_caps_config_iscsi {
393 FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001,
394 FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002,
395 FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004,
396 FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008,
397};
398
399enum fw_caps_config_fcoe {
400 FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001,
401 FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
402};
403
404struct fw_caps_config_cmd {
405 __be32 op_to_write;
406 __be32 retval_len16;
407 __be32 r2;
408 __be32 hwmbitmap;
409 __be16 nbmcaps;
410 __be16 linkcaps;
411 __be16 switchcaps;
412 __be16 r3;
413 __be16 niccaps;
414 __be16 ofldcaps;
415 __be16 rdmacaps;
416 __be16 r4;
417 __be16 iscsicaps;
418 __be16 fcoecaps;
419 __be32 r5;
420 __be64 r6;
421};
422
423/*
424 * params command mnemonics
425 */
426enum fw_params_mnem {
427 FW_PARAMS_MNEM_DEV = 1, /* device params */
428 FW_PARAMS_MNEM_PFVF = 2, /* function params */
429 FW_PARAMS_MNEM_REG = 3, /* limited register access */
430 FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
431 FW_PARAMS_MNEM_LAST
432};
433
434/*
435 * device parameters
436 */
437enum fw_params_param_dev {
438 FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
439 FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
440 FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs
441 * allocated by the device's
442 * Lookup Engine
443 */
444 FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03,
445 FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04,
446 FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05,
447 FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06,
448 FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07,
449 FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08,
450 FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09,
451 FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A,
452 FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
453 FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
454};
455
456/*
457 * physical and virtual function parameters
458 */
459enum fw_params_param_pfvf {
460 FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00,
461 FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01,
462 FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02,
463 FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03,
464 FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
465 FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
466 FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
467 FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07,
468 FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08,
469 FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09,
470 FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A,
471 FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B,
472 FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C,
473 FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D,
474 FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E,
475 FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F,
476 FW_PARAMS_PARAM_PFVF_RQ_END = 0x10,
477 FW_PARAMS_PARAM_PFVF_PBL_START = 0x11,
478 FW_PARAMS_PARAM_PFVF_PBL_END = 0x12,
479 FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
480 FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
481 FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15,
482 FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16,
483 FW_PARAMS_PARAM_PFVF_CQ_START = 0x17,
484 FW_PARAMS_PARAM_PFVF_CQ_END = 0x18,
485 FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
486 FW_PARAMS_PARAM_PFVF_VIID = 0x24,
487 FW_PARAMS_PARAM_PFVF_CPMASK = 0x25,
488 FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26,
489 FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27,
490 FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28,
491 FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29,
492 FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
493 FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
494 FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
495};
496
497/*
498 * dma queue parameters
499 */
500enum fw_params_param_dmaq {
501 FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00,
502 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01,
503 FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10,
504 FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
505 FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
506};
507
508#define FW_PARAMS_MNEM(x) ((x) << 24)
509#define FW_PARAMS_PARAM_X(x) ((x) << 16)
510#define FW_PARAMS_PARAM_Y(x) ((x) << 8)
511#define FW_PARAMS_PARAM_Z(x) ((x) << 0)
512#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
513#define FW_PARAMS_PARAM_YZ(x) ((x) << 0)
514
515struct fw_params_cmd {
516 __be32 op_to_vfn;
517 __be32 retval_len16;
518 struct fw_params_param {
519 __be32 mnem;
520 __be32 val;
521 } param[7];
522};
523
524#define FW_PARAMS_CMD_PFN(x) ((x) << 8)
525#define FW_PARAMS_CMD_VFN(x) ((x) << 0)
526
527struct fw_pfvf_cmd {
528 __be32 op_to_vfn;
529 __be32 retval_len16;
530 __be32 niqflint_niq;
531 __be32 type_to_neq;
532 __be32 tc_to_nexactf;
533 __be32 r_caps_to_nethctrl;
534 __be16 nricq;
535 __be16 nriqp;
536 __be32 r4;
537};
538
539#define FW_PFVF_CMD_PFN(x) ((x) << 8)
540#define FW_PFVF_CMD_VFN(x) ((x) << 0)
541
542#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20)
543#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff)
544
545#define FW_PFVF_CMD_NIQ(x) ((x) << 0)
546#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff)
547
548#define FW_PFVF_CMD_TYPE (1 << 31)
549#define FW_PFVF_CMD_TYPE_GET(x) (((x) >> 31) & 0x1)
550
551#define FW_PFVF_CMD_CMASK(x) ((x) << 24)
552#define FW_PFVF_CMD_CMASK_MASK 0xf
553#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & FW_PFVF_CMD_CMASK_MASK)
554
555#define FW_PFVF_CMD_PMASK(x) ((x) << 20)
556#define FW_PFVF_CMD_PMASK_MASK 0xf
557#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & FW_PFVF_CMD_PMASK_MASK)
558
559#define FW_PFVF_CMD_NEQ(x) ((x) << 0)
560#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff)
561
562#define FW_PFVF_CMD_TC(x) ((x) << 24)
563#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff)
564
565#define FW_PFVF_CMD_NVI(x) ((x) << 16)
566#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff)
567
568#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0)
569#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff)
570
571#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24)
572#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff)
573
574#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16)
575#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff)
576
577#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0)
578#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff)
579
580enum fw_iq_type {
581 FW_IQ_TYPE_FL_INT_CAP,
582 FW_IQ_TYPE_NO_FL_INT_CAP
583};
584
585struct fw_iq_cmd {
586 __be32 op_to_vfn;
587 __be32 alloc_to_len16;
588 __be16 physiqid;
589 __be16 iqid;
590 __be16 fl0id;
591 __be16 fl1id;
592 __be32 type_to_iqandstindex;
593 __be16 iqdroprss_to_iqesize;
594 __be16 iqsize;
595 __be64 iqaddr;
596 __be32 iqns_to_fl0congen;
597 __be16 fl0dcaen_to_fl0cidxfthresh;
598 __be16 fl0size;
599 __be64 fl0addr;
600 __be32 fl1cngchmap_to_fl1congen;
601 __be16 fl1dcaen_to_fl1cidxfthresh;
602 __be16 fl1size;
603 __be64 fl1addr;
604};
605
606#define FW_IQ_CMD_PFN(x) ((x) << 8)
607#define FW_IQ_CMD_VFN(x) ((x) << 0)
608
609#define FW_IQ_CMD_ALLOC (1U << 31)
610#define FW_IQ_CMD_FREE (1U << 30)
611#define FW_IQ_CMD_MODIFY (1U << 29)
612#define FW_IQ_CMD_IQSTART(x) ((x) << 28)
613#define FW_IQ_CMD_IQSTOP(x) ((x) << 27)
614
615#define FW_IQ_CMD_TYPE(x) ((x) << 29)
616#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28)
617#define FW_IQ_CMD_VIID(x) ((x) << 16)
618#define FW_IQ_CMD_IQANDST(x) ((x) << 15)
619#define FW_IQ_CMD_IQANUS(x) ((x) << 14)
620#define FW_IQ_CMD_IQANUD(x) ((x) << 12)
621#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0)
622
623#define FW_IQ_CMD_IQDROPRSS (1U << 15)
624#define FW_IQ_CMD_IQGTSMODE (1U << 14)
625#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12)
626#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11)
627#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6)
628#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4)
629#define FW_IQ_CMD_IQO (1U << 3)
630#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2)
631#define FW_IQ_CMD_IQESIZE(x) ((x) << 0)
632
633#define FW_IQ_CMD_IQNS(x) ((x) << 31)
634#define FW_IQ_CMD_IQRO(x) ((x) << 30)
635#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28)
636#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27)
637#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26)
638#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20)
639#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15)
640#define FW_IQ_CMD_FL0DBP(x) ((x) << 14)
641#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13)
642#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12)
643#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11)
644#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10)
645#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9)
646#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8)
647#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7)
648#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6)
649#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4)
650#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3)
651#define FW_IQ_CMD_FL0PADEN (1U << 2)
652#define FW_IQ_CMD_FL0PACKEN (1U << 1)
653#define FW_IQ_CMD_FL0CONGEN (1U << 0)
654
655#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15)
656#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10)
657#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7)
658#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4)
659#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3)
660#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0)
661
662#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20)
663#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15)
664#define FW_IQ_CMD_FL1DBP(x) ((x) << 14)
665#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13)
666#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12)
667#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11)
668#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10)
669#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9)
670#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8)
671#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7)
672#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6)
673#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4)
674#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3)
675#define FW_IQ_CMD_FL1PADEN (1U << 2)
676#define FW_IQ_CMD_FL1PACKEN (1U << 1)
677#define FW_IQ_CMD_FL1CONGEN (1U << 0)
678
679#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15)
680#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10)
681#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7)
682#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4)
683#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3)
684#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0)
685
686struct fw_eq_eth_cmd {
687 __be32 op_to_vfn;
688 __be32 alloc_to_len16;
689 __be32 eqid_pkd;
690 __be32 physeqid_pkd;
691 __be32 fetchszm_to_iqid;
692 __be32 dcaen_to_eqsize;
693 __be64 eqaddr;
694 __be32 viid_pkd;
695 __be32 r8_lo;
696 __be64 r9;
697};
698
699#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8)
700#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0)
701#define FW_EQ_ETH_CMD_ALLOC (1U << 31)
702#define FW_EQ_ETH_CMD_FREE (1U << 30)
703#define FW_EQ_ETH_CMD_MODIFY (1U << 29)
704#define FW_EQ_ETH_CMD_EQSTART (1U << 28)
705#define FW_EQ_ETH_CMD_EQSTOP (1U << 27)
706
707#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0)
708#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
709#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0)
710#define FW_EQ_ETH_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
711
712#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26)
713#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25)
714#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24)
715#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23)
716#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22)
717#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20)
718#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19)
719#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18)
720#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16)
721#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0)
722
723#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31)
724#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26)
725#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23)
726#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20)
727#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19)
728#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16)
729#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0)
730
731#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16)
732
733struct fw_eq_ctrl_cmd {
734 __be32 op_to_vfn;
735 __be32 alloc_to_len16;
736 __be32 cmpliqid_eqid;
737 __be32 physeqid_pkd;
738 __be32 fetchszm_to_iqid;
739 __be32 dcaen_to_eqsize;
740 __be64 eqaddr;
741};
742
743#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8)
744#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0)
745
746#define FW_EQ_CTRL_CMD_ALLOC (1U << 31)
747#define FW_EQ_CTRL_CMD_FREE (1U << 30)
748#define FW_EQ_CTRL_CMD_MODIFY (1U << 29)
749#define FW_EQ_CTRL_CMD_EQSTART (1U << 28)
750#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27)
751
752#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20)
753#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0)
754#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
755#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
756
757#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26)
758#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25)
759#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24)
760#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23)
761#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22)
762#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20)
763#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19)
764#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18)
765#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16)
766#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0)
767
768#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31)
769#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26)
770#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23)
771#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20)
772#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19)
773#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16)
774#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0)
775
776struct fw_eq_ofld_cmd {
777 __be32 op_to_vfn;
778 __be32 alloc_to_len16;
779 __be32 eqid_pkd;
780 __be32 physeqid_pkd;
781 __be32 fetchszm_to_iqid;
782 __be32 dcaen_to_eqsize;
783 __be64 eqaddr;
784};
785
786#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8)
787#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0)
788
789#define FW_EQ_OFLD_CMD_ALLOC (1U << 31)
790#define FW_EQ_OFLD_CMD_FREE (1U << 30)
791#define FW_EQ_OFLD_CMD_MODIFY (1U << 29)
792#define FW_EQ_OFLD_CMD_EQSTART (1U << 28)
793#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27)
794
795#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0)
796#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
797#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
798
799#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26)
800#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25)
801#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24)
802#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23)
803#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22)
804#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20)
805#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19)
806#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18)
807#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16)
808#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0)
809
810#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31)
811#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26)
812#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23)
813#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20)
814#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19)
815#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16)
816#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0)
817
818/*
819 * Macros for VIID parsing:
820 * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number
821 */
822#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7)
823#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1)
824#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F)
825
826struct fw_vi_cmd {
827 __be32 op_to_vfn;
828 __be32 alloc_to_len16;
829 __be16 type_viid;
830 u8 mac[6];
831 u8 portid_pkd;
832 u8 nmac;
833 u8 nmac0[6];
834 __be16 rsssize_pkd;
835 u8 nmac1[6];
836 __be16 idsiiq_pkd;
837 u8 nmac2[6];
838 __be16 idseiq_pkd;
839 u8 nmac3[6];
840 __be64 r9;
841 __be64 r10;
842};
843
844#define FW_VI_CMD_PFN(x) ((x) << 8)
845#define FW_VI_CMD_VFN(x) ((x) << 0)
846#define FW_VI_CMD_ALLOC (1U << 31)
847#define FW_VI_CMD_FREE (1U << 30)
848#define FW_VI_CMD_VIID(x) ((x) << 0)
849#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff)
850#define FW_VI_CMD_PORTID(x) ((x) << 4)
851#define FW_VI_CMD_PORTID_GET(x) (((x) >> 4) & 0xf)
852#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff)
853
854/* Special VI_MAC command index ids */
855#define FW_VI_MAC_ADD_MAC 0x3FF
856#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
857#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
858#define FW_CLS_TCAM_NUM_ENTRIES 336
859
860enum fw_vi_mac_smac {
861 FW_VI_MAC_MPS_TCAM_ENTRY,
862 FW_VI_MAC_MPS_TCAM_ONLY,
863 FW_VI_MAC_SMT_ONLY,
864 FW_VI_MAC_SMT_AND_MPSTCAM
865};
866
867enum fw_vi_mac_result {
868 FW_VI_MAC_R_SUCCESS,
869 FW_VI_MAC_R_F_NONEXISTENT_NOMEM,
870 FW_VI_MAC_R_SMAC_FAIL,
871 FW_VI_MAC_R_F_ACL_CHECK
872};
873
874struct fw_vi_mac_cmd {
875 __be32 op_to_viid;
876 __be32 freemacs_to_len16;
877 union fw_vi_mac {
878 struct fw_vi_mac_exact {
879 __be16 valid_to_idx;
880 u8 macaddr[6];
881 } exact[7];
882 struct fw_vi_mac_hash {
883 __be64 hashvec;
884 } hash;
885 } u;
886};
887
888#define FW_VI_MAC_CMD_VIID(x) ((x) << 0)
889#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31)
890#define FW_VI_MAC_CMD_HASHVECEN (1U << 23)
891#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22)
892#define FW_VI_MAC_CMD_VALID (1U << 15)
893#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12)
894#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10)
895#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3)
896#define FW_VI_MAC_CMD_IDX(x) ((x) << 0)
897#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff)
898
899#define FW_RXMODE_MTU_NO_CHG 65535
900
901struct fw_vi_rxmode_cmd {
902 __be32 op_to_viid;
903 __be32 retval_len16;
904 __be32 mtu_to_vlanexen;
905 __be32 r4_lo;
906};
907
908#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0)
909#define FW_VI_RXMODE_CMD_MTU_MASK 0xffff
910#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16)
911#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3
912#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14)
913#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3
914#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12)
915#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3
916#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10)
917#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3
918#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8)
919
920struct fw_vi_enable_cmd {
921 __be32 op_to_viid;
922 __be32 ien_to_len16;
923 __be16 blinkdur;
924 __be16 r3;
925 __be32 r4;
926};
927
928#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0)
929#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31)
930#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30)
931#define FW_VI_ENABLE_CMD_LED (1U << 29)
932
933/* VI VF stats offset definitions */
934#define VI_VF_NUM_STATS 16
935enum fw_vi_stats_vf_index {
936 FW_VI_VF_STAT_TX_BCAST_BYTES_IX,
937 FW_VI_VF_STAT_TX_BCAST_FRAMES_IX,
938 FW_VI_VF_STAT_TX_MCAST_BYTES_IX,
939 FW_VI_VF_STAT_TX_MCAST_FRAMES_IX,
940 FW_VI_VF_STAT_TX_UCAST_BYTES_IX,
941 FW_VI_VF_STAT_TX_UCAST_FRAMES_IX,
942 FW_VI_VF_STAT_TX_DROP_FRAMES_IX,
943 FW_VI_VF_STAT_TX_OFLD_BYTES_IX,
944 FW_VI_VF_STAT_TX_OFLD_FRAMES_IX,
945 FW_VI_VF_STAT_RX_BCAST_BYTES_IX,
946 FW_VI_VF_STAT_RX_BCAST_FRAMES_IX,
947 FW_VI_VF_STAT_RX_MCAST_BYTES_IX,
948 FW_VI_VF_STAT_RX_MCAST_FRAMES_IX,
949 FW_VI_VF_STAT_RX_UCAST_BYTES_IX,
950 FW_VI_VF_STAT_RX_UCAST_FRAMES_IX,
951 FW_VI_VF_STAT_RX_ERR_FRAMES_IX
952};
953
954/* VI PF stats offset definitions */
955#define VI_PF_NUM_STATS 17
956enum fw_vi_stats_pf_index {
957 FW_VI_PF_STAT_TX_BCAST_BYTES_IX,
958 FW_VI_PF_STAT_TX_BCAST_FRAMES_IX,
959 FW_VI_PF_STAT_TX_MCAST_BYTES_IX,
960 FW_VI_PF_STAT_TX_MCAST_FRAMES_IX,
961 FW_VI_PF_STAT_TX_UCAST_BYTES_IX,
962 FW_VI_PF_STAT_TX_UCAST_FRAMES_IX,
963 FW_VI_PF_STAT_TX_OFLD_BYTES_IX,
964 FW_VI_PF_STAT_TX_OFLD_FRAMES_IX,
965 FW_VI_PF_STAT_RX_BYTES_IX,
966 FW_VI_PF_STAT_RX_FRAMES_IX,
967 FW_VI_PF_STAT_RX_BCAST_BYTES_IX,
968 FW_VI_PF_STAT_RX_BCAST_FRAMES_IX,
969 FW_VI_PF_STAT_RX_MCAST_BYTES_IX,
970 FW_VI_PF_STAT_RX_MCAST_FRAMES_IX,
971 FW_VI_PF_STAT_RX_UCAST_BYTES_IX,
972 FW_VI_PF_STAT_RX_UCAST_FRAMES_IX,
973 FW_VI_PF_STAT_RX_ERR_FRAMES_IX
974};
975
976struct fw_vi_stats_cmd {
977 __be32 op_to_viid;
978 __be32 retval_len16;
979 union fw_vi_stats {
980 struct fw_vi_stats_ctl {
981 __be16 nstats_ix;
982 __be16 r6;
983 __be32 r7;
984 __be64 stat0;
985 __be64 stat1;
986 __be64 stat2;
987 __be64 stat3;
988 __be64 stat4;
989 __be64 stat5;
990 } ctl;
991 struct fw_vi_stats_pf {
992 __be64 tx_bcast_bytes;
993 __be64 tx_bcast_frames;
994 __be64 tx_mcast_bytes;
995 __be64 tx_mcast_frames;
996 __be64 tx_ucast_bytes;
997 __be64 tx_ucast_frames;
998 __be64 tx_offload_bytes;
999 __be64 tx_offload_frames;
1000 __be64 rx_pf_bytes;
1001 __be64 rx_pf_frames;
1002 __be64 rx_bcast_bytes;
1003 __be64 rx_bcast_frames;
1004 __be64 rx_mcast_bytes;
1005 __be64 rx_mcast_frames;
1006 __be64 rx_ucast_bytes;
1007 __be64 rx_ucast_frames;
1008 __be64 rx_err_frames;
1009 } pf;
1010 struct fw_vi_stats_vf {
1011 __be64 tx_bcast_bytes;
1012 __be64 tx_bcast_frames;
1013 __be64 tx_mcast_bytes;
1014 __be64 tx_mcast_frames;
1015 __be64 tx_ucast_bytes;
1016 __be64 tx_ucast_frames;
1017 __be64 tx_drop_frames;
1018 __be64 tx_offload_bytes;
1019 __be64 tx_offload_frames;
1020 __be64 rx_bcast_bytes;
1021 __be64 rx_bcast_frames;
1022 __be64 rx_mcast_bytes;
1023 __be64 rx_mcast_frames;
1024 __be64 rx_ucast_bytes;
1025 __be64 rx_ucast_frames;
1026 __be64 rx_err_frames;
1027 } vf;
1028 } u;
1029};
1030
1031#define FW_VI_STATS_CMD_VIID(x) ((x) << 0)
1032#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12)
1033#define FW_VI_STATS_CMD_IX(x) ((x) << 0)
1034
1035struct fw_acl_mac_cmd {
1036 __be32 op_to_vfn;
1037 __be32 en_to_len16;
1038 u8 nmac;
1039 u8 r3[7];
1040 __be16 r4;
1041 u8 macaddr0[6];
1042 __be16 r5;
1043 u8 macaddr1[6];
1044 __be16 r6;
1045 u8 macaddr2[6];
1046 __be16 r7;
1047 u8 macaddr3[6];
1048};
1049
1050#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8)
1051#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0)
1052#define FW_ACL_MAC_CMD_EN(x) ((x) << 31)
1053
1054struct fw_acl_vlan_cmd {
1055 __be32 op_to_vfn;
1056 __be32 en_to_len16;
1057 u8 nvlan;
1058 u8 dropnovlan_fm;
1059 u8 r3_lo[6];
1060 __be16 vlanid[16];
1061};
1062
1063#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8)
1064#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0)
1065#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31)
1066#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7)
1067#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6)
1068
1069enum fw_port_cap {
1070 FW_PORT_CAP_SPEED_100M = 0x0001,
1071 FW_PORT_CAP_SPEED_1G = 0x0002,
1072 FW_PORT_CAP_SPEED_2_5G = 0x0004,
1073 FW_PORT_CAP_SPEED_10G = 0x0008,
1074 FW_PORT_CAP_SPEED_40G = 0x0010,
1075 FW_PORT_CAP_SPEED_100G = 0x0020,
1076 FW_PORT_CAP_FC_RX = 0x0040,
1077 FW_PORT_CAP_FC_TX = 0x0080,
1078 FW_PORT_CAP_ANEG = 0x0100,
1079 FW_PORT_CAP_MDI_0 = 0x0200,
1080 FW_PORT_CAP_MDI_1 = 0x0400,
1081 FW_PORT_CAP_BEAN = 0x0800,
1082 FW_PORT_CAP_PMA_LPBK = 0x1000,
1083 FW_PORT_CAP_PCS_LPBK = 0x2000,
1084 FW_PORT_CAP_PHYXS_LPBK = 0x4000,
1085 FW_PORT_CAP_FAR_END_LPBK = 0x8000,
1086};
1087
1088enum fw_port_mdi {
1089 FW_PORT_MDI_UNCHANGED,
1090 FW_PORT_MDI_AUTO,
1091 FW_PORT_MDI_F_STRAIGHT,
1092 FW_PORT_MDI_F_CROSSOVER
1093};
1094
1095#define FW_PORT_MDI(x) ((x) << 9)
1096
1097enum fw_port_action {
1098 FW_PORT_ACTION_L1_CFG = 0x0001,
1099 FW_PORT_ACTION_L2_CFG = 0x0002,
1100 FW_PORT_ACTION_GET_PORT_INFO = 0x0003,
1101 FW_PORT_ACTION_L2_PPP_CFG = 0x0004,
1102 FW_PORT_ACTION_L2_DCB_CFG = 0x0005,
1103 FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010,
1104 FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011,
1105 FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012,
1106 FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020,
1107 FW_PORT_ACTION_L1_LPBK = 0x0021,
1108 FW_PORT_ACTION_L1_PMA_LPBK = 0x0022,
1109 FW_PORT_ACTION_L1_PCS_LPBK = 0x0023,
1110 FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024,
1111 FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025,
1112 FW_PORT_ACTION_PHY_RESET = 0x0040,
1113 FW_PORT_ACTION_PMA_RESET = 0x0041,
1114 FW_PORT_ACTION_PCS_RESET = 0x0042,
1115 FW_PORT_ACTION_PHYXS_RESET = 0x0043,
1116 FW_PORT_ACTION_DTEXS_REEST = 0x0044,
1117 FW_PORT_ACTION_AN_RESET = 0x0045
1118};
1119
1120enum fw_port_l2cfg_ctlbf {
1121 FW_PORT_L2_CTLBF_OVLAN0 = 0x01,
1122 FW_PORT_L2_CTLBF_OVLAN1 = 0x02,
1123 FW_PORT_L2_CTLBF_OVLAN2 = 0x04,
1124 FW_PORT_L2_CTLBF_OVLAN3 = 0x08,
1125 FW_PORT_L2_CTLBF_IVLAN = 0x10,
1126 FW_PORT_L2_CTLBF_TXIPG = 0x20
1127};
1128
1129enum fw_port_dcb_cfg {
1130 FW_PORT_DCB_CFG_PG = 0x01,
1131 FW_PORT_DCB_CFG_PFC = 0x02,
1132 FW_PORT_DCB_CFG_APPL = 0x04
1133};
1134
1135enum fw_port_dcb_cfg_rc {
1136 FW_PORT_DCB_CFG_SUCCESS = 0x0,
1137 FW_PORT_DCB_CFG_ERROR = 0x1
1138};
1139
1140struct fw_port_cmd {
1141 __be32 op_to_portid;
1142 __be32 action_to_len16;
1143 union fw_port {
1144 struct fw_port_l1cfg {
1145 __be32 rcap;
1146 __be32 r;
1147 } l1cfg;
1148 struct fw_port_l2cfg {
1149 __be16 ctlbf_to_ivlan0;
1150 __be16 ivlantype;
1151 __be32 txipg_pkd;
1152 __be16 ovlan0mask;
1153 __be16 ovlan0type;
1154 __be16 ovlan1mask;
1155 __be16 ovlan1type;
1156 __be16 ovlan2mask;
1157 __be16 ovlan2type;
1158 __be16 ovlan3mask;
1159 __be16 ovlan3type;
1160 } l2cfg;
1161 struct fw_port_info {
1162 __be32 lstatus_to_modtype;
1163 __be16 pcap;
1164 __be16 acap;
1165 __be16 mtu;
1166 __u8 cbllen;
1167 __u8 r9;
1168 __be32 r10;
1169 __be64 r11;
1170 } info;
1171 struct fw_port_ppp {
1172 __be32 pppen_to_ncsich;
1173 __be32 r11;
1174 } ppp;
1175 struct fw_port_dcb {
1176 __be16 cfg;
1177 u8 up_map;
1178 u8 sf_cfgrc;
1179 __be16 prot_ix;
1180 u8 pe7_to_pe0;
1181 u8 numTCPFCs;
1182 __be32 pgid0_to_pgid7;
1183 __be32 numTCs_oui;
1184 u8 pgpc[8];
1185 } dcb;
1186 } u;
1187};
1188
1189#define FW_PORT_CMD_READ (1U << 22)
1190
1191#define FW_PORT_CMD_PORTID(x) ((x) << 0)
1192#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
1193
1194#define FW_PORT_CMD_ACTION(x) ((x) << 16)
1195#define FW_PORT_CMD_ACTION_GET(x) (((x) >> 16) & 0xffff)
1196
1197#define FW_PORT_CMD_CTLBF(x) ((x) << 10)
1198#define FW_PORT_CMD_OVLAN3(x) ((x) << 7)
1199#define FW_PORT_CMD_OVLAN2(x) ((x) << 6)
1200#define FW_PORT_CMD_OVLAN1(x) ((x) << 5)
1201#define FW_PORT_CMD_OVLAN0(x) ((x) << 4)
1202#define FW_PORT_CMD_IVLAN0(x) ((x) << 3)
1203
1204#define FW_PORT_CMD_TXIPG(x) ((x) << 19)
1205
1206#define FW_PORT_CMD_LSTATUS (1U << 31)
1207#define FW_PORT_CMD_LSPEED(x) ((x) << 24)
1208#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f)
1209#define FW_PORT_CMD_TXPAUSE (1U << 23)
1210#define FW_PORT_CMD_RXPAUSE (1U << 22)
1211#define FW_PORT_CMD_MDIOCAP (1U << 21)
1212#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f)
1213#define FW_PORT_CMD_LPTXPAUSE (1U << 15)
1214#define FW_PORT_CMD_LPRXPAUSE (1U << 14)
1215#define FW_PORT_CMD_PTYPE_MASK 0x1f
1216#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK)
1217#define FW_PORT_CMD_MODTYPE_MASK 0x1f
1218#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK)
1219
1220#define FW_PORT_CMD_PPPEN(x) ((x) << 31)
1221#define FW_PORT_CMD_TPSRC(x) ((x) << 28)
1222#define FW_PORT_CMD_NCSISRC(x) ((x) << 24)
1223
1224#define FW_PORT_CMD_CH0(x) ((x) << 20)
1225#define FW_PORT_CMD_CH1(x) ((x) << 16)
1226#define FW_PORT_CMD_CH2(x) ((x) << 12)
1227#define FW_PORT_CMD_CH3(x) ((x) << 8)
1228#define FW_PORT_CMD_NCSICH(x) ((x) << 4)
1229
1230enum fw_port_type {
1231 FW_PORT_TYPE_FIBER_XFI,
1232 FW_PORT_TYPE_FIBER_XAUI,
1233 FW_PORT_TYPE_BT_SGMII,
1234 FW_PORT_TYPE_BT_XFI,
1235 FW_PORT_TYPE_BT_XAUI,
1236 FW_PORT_TYPE_KX4,
1237 FW_PORT_TYPE_CX4,
1238 FW_PORT_TYPE_KX,
1239 FW_PORT_TYPE_KR,
1240 FW_PORT_TYPE_SFP,
1241 FW_PORT_TYPE_BP_AP,
1242 FW_PORT_TYPE_BP4_AP,
1243
1244 FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
1245};
1246
1247enum fw_port_module_type {
1248 FW_PORT_MOD_TYPE_NA,
1249 FW_PORT_MOD_TYPE_LR,
1250 FW_PORT_MOD_TYPE_SR,
1251 FW_PORT_MOD_TYPE_ER,
1252 FW_PORT_MOD_TYPE_TWINAX_PASSIVE,
1253 FW_PORT_MOD_TYPE_TWINAX_ACTIVE,
1254 FW_PORT_MOD_TYPE_LRM,
1255
1256 FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
1257};
1258
1259/* port stats */
1260#define FW_NUM_PORT_STATS 50
1261#define FW_NUM_PORT_TX_STATS 23
1262#define FW_NUM_PORT_RX_STATS 27
1263
1264enum fw_port_stats_tx_index {
1265 FW_STAT_TX_PORT_BYTES_IX,
1266 FW_STAT_TX_PORT_FRAMES_IX,
1267 FW_STAT_TX_PORT_BCAST_IX,
1268 FW_STAT_TX_PORT_MCAST_IX,
1269 FW_STAT_TX_PORT_UCAST_IX,
1270 FW_STAT_TX_PORT_ERROR_IX,
1271 FW_STAT_TX_PORT_64B_IX,
1272 FW_STAT_TX_PORT_65B_127B_IX,
1273 FW_STAT_TX_PORT_128B_255B_IX,
1274 FW_STAT_TX_PORT_256B_511B_IX,
1275 FW_STAT_TX_PORT_512B_1023B_IX,
1276 FW_STAT_TX_PORT_1024B_1518B_IX,
1277 FW_STAT_TX_PORT_1519B_MAX_IX,
1278 FW_STAT_TX_PORT_DROP_IX,
1279 FW_STAT_TX_PORT_PAUSE_IX,
1280 FW_STAT_TX_PORT_PPP0_IX,
1281 FW_STAT_TX_PORT_PPP1_IX,
1282 FW_STAT_TX_PORT_PPP2_IX,
1283 FW_STAT_TX_PORT_PPP3_IX,
1284 FW_STAT_TX_PORT_PPP4_IX,
1285 FW_STAT_TX_PORT_PPP5_IX,
1286 FW_STAT_TX_PORT_PPP6_IX,
1287 FW_STAT_TX_PORT_PPP7_IX
1288};
1289
1290enum fw_port_stat_rx_index {
1291 FW_STAT_RX_PORT_BYTES_IX,
1292 FW_STAT_RX_PORT_FRAMES_IX,
1293 FW_STAT_RX_PORT_BCAST_IX,
1294 FW_STAT_RX_PORT_MCAST_IX,
1295 FW_STAT_RX_PORT_UCAST_IX,
1296 FW_STAT_RX_PORT_MTU_ERROR_IX,
1297 FW_STAT_RX_PORT_MTU_CRC_ERROR_IX,
1298 FW_STAT_RX_PORT_CRC_ERROR_IX,
1299 FW_STAT_RX_PORT_LEN_ERROR_IX,
1300 FW_STAT_RX_PORT_SYM_ERROR_IX,
1301 FW_STAT_RX_PORT_64B_IX,
1302 FW_STAT_RX_PORT_65B_127B_IX,
1303 FW_STAT_RX_PORT_128B_255B_IX,
1304 FW_STAT_RX_PORT_256B_511B_IX,
1305 FW_STAT_RX_PORT_512B_1023B_IX,
1306 FW_STAT_RX_PORT_1024B_1518B_IX,
1307 FW_STAT_RX_PORT_1519B_MAX_IX,
1308 FW_STAT_RX_PORT_PAUSE_IX,
1309 FW_STAT_RX_PORT_PPP0_IX,
1310 FW_STAT_RX_PORT_PPP1_IX,
1311 FW_STAT_RX_PORT_PPP2_IX,
1312 FW_STAT_RX_PORT_PPP3_IX,
1313 FW_STAT_RX_PORT_PPP4_IX,
1314 FW_STAT_RX_PORT_PPP5_IX,
1315 FW_STAT_RX_PORT_PPP6_IX,
1316 FW_STAT_RX_PORT_PPP7_IX,
1317 FW_STAT_RX_PORT_LESS_64B_IX
1318};
1319
1320struct fw_port_stats_cmd {
1321 __be32 op_to_portid;
1322 __be32 retval_len16;
1323 union fw_port_stats {
1324 struct fw_port_stats_ctl {
1325 u8 nstats_bg_bm;
1326 u8 tx_ix;
1327 __be16 r6;
1328 __be32 r7;
1329 __be64 stat0;
1330 __be64 stat1;
1331 __be64 stat2;
1332 __be64 stat3;
1333 __be64 stat4;
1334 __be64 stat5;
1335 } ctl;
1336 struct fw_port_stats_all {
1337 __be64 tx_bytes;
1338 __be64 tx_frames;
1339 __be64 tx_bcast;
1340 __be64 tx_mcast;
1341 __be64 tx_ucast;
1342 __be64 tx_error;
1343 __be64 tx_64b;
1344 __be64 tx_65b_127b;
1345 __be64 tx_128b_255b;
1346 __be64 tx_256b_511b;
1347 __be64 tx_512b_1023b;
1348 __be64 tx_1024b_1518b;
1349 __be64 tx_1519b_max;
1350 __be64 tx_drop;
1351 __be64 tx_pause;
1352 __be64 tx_ppp0;
1353 __be64 tx_ppp1;
1354 __be64 tx_ppp2;
1355 __be64 tx_ppp3;
1356 __be64 tx_ppp4;
1357 __be64 tx_ppp5;
1358 __be64 tx_ppp6;
1359 __be64 tx_ppp7;
1360 __be64 rx_bytes;
1361 __be64 rx_frames;
1362 __be64 rx_bcast;
1363 __be64 rx_mcast;
1364 __be64 rx_ucast;
1365 __be64 rx_mtu_error;
1366 __be64 rx_mtu_crc_error;
1367 __be64 rx_crc_error;
1368 __be64 rx_len_error;
1369 __be64 rx_sym_error;
1370 __be64 rx_64b;
1371 __be64 rx_65b_127b;
1372 __be64 rx_128b_255b;
1373 __be64 rx_256b_511b;
1374 __be64 rx_512b_1023b;
1375 __be64 rx_1024b_1518b;
1376 __be64 rx_1519b_max;
1377 __be64 rx_pause;
1378 __be64 rx_ppp0;
1379 __be64 rx_ppp1;
1380 __be64 rx_ppp2;
1381 __be64 rx_ppp3;
1382 __be64 rx_ppp4;
1383 __be64 rx_ppp5;
1384 __be64 rx_ppp6;
1385 __be64 rx_ppp7;
1386 __be64 rx_less_64b;
1387 __be64 rx_bg_drop;
1388 __be64 rx_bg_trunc;
1389 } all;
1390 } u;
1391};
1392
1393#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4)
1394#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0)
1395#define FW_PORT_STATS_CMD_TX(x) ((x) << 7)
1396#define FW_PORT_STATS_CMD_IX(x) ((x) << 0)
1397
1398/* port loopback stats */
1399#define FW_NUM_LB_STATS 16
1400enum fw_port_lb_stats_index {
1401 FW_STAT_LB_PORT_BYTES_IX,
1402 FW_STAT_LB_PORT_FRAMES_IX,
1403 FW_STAT_LB_PORT_BCAST_IX,
1404 FW_STAT_LB_PORT_MCAST_IX,
1405 FW_STAT_LB_PORT_UCAST_IX,
1406 FW_STAT_LB_PORT_ERROR_IX,
1407 FW_STAT_LB_PORT_64B_IX,
1408 FW_STAT_LB_PORT_65B_127B_IX,
1409 FW_STAT_LB_PORT_128B_255B_IX,
1410 FW_STAT_LB_PORT_256B_511B_IX,
1411 FW_STAT_LB_PORT_512B_1023B_IX,
1412 FW_STAT_LB_PORT_1024B_1518B_IX,
1413 FW_STAT_LB_PORT_1519B_MAX_IX,
1414 FW_STAT_LB_PORT_DROP_FRAMES_IX
1415};
1416
1417struct fw_port_lb_stats_cmd {
1418 __be32 op_to_lbport;
1419 __be32 retval_len16;
1420 union fw_port_lb_stats {
1421 struct fw_port_lb_stats_ctl {
1422 u8 nstats_bg_bm;
1423 u8 ix_pkd;
1424 __be16 r6;
1425 __be32 r7;
1426 __be64 stat0;
1427 __be64 stat1;
1428 __be64 stat2;
1429 __be64 stat3;
1430 __be64 stat4;
1431 __be64 stat5;
1432 } ctl;
1433 struct fw_port_lb_stats_all {
1434 __be64 tx_bytes;
1435 __be64 tx_frames;
1436 __be64 tx_bcast;
1437 __be64 tx_mcast;
1438 __be64 tx_ucast;
1439 __be64 tx_error;
1440 __be64 tx_64b;
1441 __be64 tx_65b_127b;
1442 __be64 tx_128b_255b;
1443 __be64 tx_256b_511b;
1444 __be64 tx_512b_1023b;
1445 __be64 tx_1024b_1518b;
1446 __be64 tx_1519b_max;
1447 __be64 rx_lb_drop;
1448 __be64 rx_lb_trunc;
1449 } all;
1450 } u;
1451};
1452
1453#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0)
1454#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4)
1455#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0)
1456#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0)
1457
1458struct fw_rss_ind_tbl_cmd {
1459 __be32 op_to_viid;
1460#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0)
1461 __be32 retval_len16;
1462 __be16 niqid;
1463 __be16 startidx;
1464 __be32 r3;
1465 __be32 iq0_to_iq2;
1466#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20)
1467#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10)
1468#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0)
1469 __be32 iq3_to_iq5;
1470 __be32 iq6_to_iq8;
1471 __be32 iq9_to_iq11;
1472 __be32 iq12_to_iq14;
1473 __be32 iq15_to_iq17;
1474 __be32 iq18_to_iq20;
1475 __be32 iq21_to_iq23;
1476 __be32 iq24_to_iq26;
1477 __be32 iq27_to_iq29;
1478 __be32 iq30_iq31;
1479 __be32 r15_lo;
1480};
1481
1482struct fw_rss_glb_config_cmd {
1483 __be32 op_to_write;
1484 __be32 retval_len16;
1485 union fw_rss_glb_config {
1486 struct fw_rss_glb_config_manual {
1487 __be32 mode_pkd;
1488 __be32 r3;
1489 __be64 r4;
1490 __be64 r5;
1491 } manual;
1492 struct fw_rss_glb_config_basicvirtual {
1493 __be32 mode_pkd;
1494 __be32 synmapen_to_hashtoeplitz;
1495#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8)
1496#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7)
1497#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6)
1498#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5)
1499#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4)
1500#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3)
1501#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2)
1502#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1)
1503#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0)
1504 __be64 r8;
1505 __be64 r9;
1506 } basicvirtual;
1507 } u;
1508};
1509
1510#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28)
1511#define FW_RSS_GLB_CONFIG_CMD_MODE_GET(x) (((x) >> 28) & 0xf)
1512
1513#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0
1514#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
1515
1516struct fw_rss_vi_config_cmd {
1517 __be32 op_to_viid;
1518#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0)
1519 __be32 retval_len16;
1520 union fw_rss_vi_config {
1521 struct fw_rss_vi_config_manual {
1522 __be64 r3;
1523 __be64 r4;
1524 __be64 r5;
1525 } manual;
1526 struct fw_rss_vi_config_basicvirtual {
1527 __be32 r6;
1528 __be32 defaultq_to_udpen;
1529#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16)
1530#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(x) (((x) >> 16) & 0x3ff)
1531#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4)
1532#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3)
1533#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2)
1534#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1)
1535#define FW_RSS_VI_CONFIG_CMD_UDPEN (1U << 0)
1536 __be64 r9;
1537 __be64 r10;
1538 } basicvirtual;
1539 } u;
1540};
1541
1542enum fw_error_type {
1543 FW_ERROR_TYPE_EXCEPTION = 0x0,
1544 FW_ERROR_TYPE_HWMODULE = 0x1,
1545 FW_ERROR_TYPE_WR = 0x2,
1546 FW_ERROR_TYPE_ACL = 0x3,
1547};
1548
1549struct fw_error_cmd {
1550 __be32 op_to_type;
1551 __be32 len16_pkd;
1552 union fw_error {
1553 struct fw_error_exception {
1554 __be32 info[6];
1555 } exception;
1556 struct fw_error_hwmodule {
1557 __be32 regaddr;
1558 __be32 regval;
1559 } hwmodule;
1560 struct fw_error_wr {
1561 __be16 cidx;
1562 __be16 pfn_vfn;
1563 __be32 eqid;
1564 u8 wrhdr[16];
1565 } wr;
1566 struct fw_error_acl {
1567 __be16 cidx;
1568 __be16 pfn_vfn;
1569 __be32 eqid;
1570 __be16 mv_pkd;
1571 u8 val[6];
1572 __be64 r4;
1573 } acl;
1574 } u;
1575};
1576
1577struct fw_debug_cmd {
1578 __be32 op_type;
1579#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff)
1580 __be32 len16_pkd;
1581 union fw_debug {
1582 struct fw_debug_assert {
1583 __be32 fcid;
1584 __be32 line;
1585 __be32 x;
1586 __be32 y;
1587 u8 filename_0_7[8];
1588 u8 filename_8_15[8];
1589 __be64 r3;
1590 } assert;
1591 struct fw_debug_prt {
1592 __be16 dprtstridx;
1593 __be16 r3[3];
1594 __be32 dprtstrparam0;
1595 __be32 dprtstrparam1;
1596 __be32 dprtstrparam2;
1597 __be32 dprtstrparam3;
1598 } prt;
1599 } u;
1600};
1601
1602struct fw_hdr {
1603 u8 ver;
1604 u8 reserved1;
1605 __be16 len512; /* bin length in units of 512-bytes */
1606 __be32 fw_ver; /* firmware version */
1607 __be32 tp_microcode_ver;
1608 u8 intfver_nic;
1609 u8 intfver_vnic;
1610 u8 intfver_ofld;
1611 u8 intfver_ri;
1612 u8 intfver_iscsipdu;
1613 u8 intfver_iscsi;
1614 u8 intfver_fcoe;
1615 u8 reserved2;
1616 __be32 reserved3[27];
1617};
1618
1619#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
1620#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
1621#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
1622#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
1623#endif /* _T4FW_INTERFACE_H_ */