aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r--drivers/net/cxgb3/Makefile8
-rw-r--r--drivers/net/cxgb3/adapter.h334
-rw-r--r--drivers/net/cxgb3/ael1002.c941
-rw-r--r--drivers/net/cxgb3/aq100x.c354
-rw-r--r--drivers/net/cxgb3/common.h775
-rw-r--r--drivers/net/cxgb3/cxgb3_ctl_defs.h189
-rw-r--r--drivers/net/cxgb3/cxgb3_defs.h114
-rw-r--r--drivers/net/cxgb3/cxgb3_ioctl.h177
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c3448
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c1431
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h209
-rw-r--r--drivers/net/cxgb3/firmware_exports.h177
-rw-r--r--drivers/net/cxgb3/l2t.c454
-rw-r--r--drivers/net/cxgb3/l2t.h149
-rw-r--r--drivers/net/cxgb3/mc5.c438
-rw-r--r--drivers/net/cxgb3/regs.h2598
-rw-r--r--drivers/net/cxgb3/sge.c3303
-rw-r--r--drivers/net/cxgb3/sge_defs.h255
-rw-r--r--drivers/net/cxgb3/t3_cpl.h1495
-rw-r--r--drivers/net/cxgb3/t3_hw.c3785
-rw-r--r--drivers/net/cxgb3/t3cdev.h70
-rw-r--r--drivers/net/cxgb3/version.h44
-rw-r--r--drivers/net/cxgb3/vsc8211.c416
-rw-r--r--drivers/net/cxgb3/xgmac.c657
24 files changed, 21821 insertions, 0 deletions
diff --git a/drivers/net/cxgb3/Makefile b/drivers/net/cxgb3/Makefile
new file mode 100644
index 00000000000..29aff78c782
--- /dev/null
+++ b/drivers/net/cxgb3/Makefile
@@ -0,0 +1,8 @@
1#
2# Chelsio T3 driver
3#
4
5obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
6
7cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
8 xgmac.o sge.o l2t.o cxgb3_offload.o aq100x.o
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
new file mode 100644
index 00000000000..8b395b53733
--- /dev/null
+++ b/drivers/net/cxgb3/adapter.h
@@ -0,0 +1,334 @@
1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/* This file should not be included directly. Include common.h instead. */
34
35#ifndef __T3_ADAPTER_H__
36#define __T3_ADAPTER_H__
37
38#include <linux/pci.h>
39#include <linux/spinlock.h>
40#include <linux/interrupt.h>
41#include <linux/timer.h>
42#include <linux/cache.h>
43#include <linux/mutex.h>
44#include <linux/bitops.h>
45#include "t3cdev.h"
46#include <asm/io.h>
47
48struct adapter;
49struct sge_qset;
50struct port_info;
51
52enum mac_idx_types {
53 LAN_MAC_IDX = 0,
54 SAN_MAC_IDX,
55
56 MAX_MAC_IDX
57};
58
59struct iscsi_config {
60 __u8 mac_addr[ETH_ALEN];
61 __u32 flags;
62 int (*send)(struct port_info *pi, struct sk_buff **skb);
63 int (*recv)(struct port_info *pi, struct sk_buff *skb);
64};
65
66struct port_info {
67 struct adapter *adapter;
68 struct sge_qset *qs;
69 u8 port_id;
70 u8 nqsets;
71 u8 first_qset;
72 struct cphy phy;
73 struct cmac mac;
74 struct link_config link_config;
75 struct net_device_stats netstats;
76 int activity;
77 __be32 iscsi_ipv4addr;
78 struct iscsi_config iscsic;
79
80 int link_fault; /* link fault was detected */
81};
82
83enum { /* adapter flags */
84 FULL_INIT_DONE = (1 << 0),
85 USING_MSI = (1 << 1),
86 USING_MSIX = (1 << 2),
87 QUEUES_BOUND = (1 << 3),
88 TP_PARITY_INIT = (1 << 4),
89 NAPI_INIT = (1 << 5),
90};
91
92struct fl_pg_chunk {
93 struct page *page;
94 void *va;
95 unsigned int offset;
96 unsigned long *p_cnt;
97 dma_addr_t mapping;
98};
99
100struct rx_desc;
101struct rx_sw_desc;
102
103struct sge_fl { /* SGE per free-buffer list state */
104 unsigned int buf_size; /* size of each Rx buffer */
105 unsigned int credits; /* # of available Rx buffers */
106 unsigned int pend_cred; /* new buffers since last FL DB ring */
107 unsigned int size; /* capacity of free list */
108 unsigned int cidx; /* consumer index */
109 unsigned int pidx; /* producer index */
110 unsigned int gen; /* free list generation */
111 struct fl_pg_chunk pg_chunk;/* page chunk cache */
112 unsigned int use_pages; /* whether FL uses pages or sk_buffs */
113 unsigned int order; /* order of page allocations */
114 unsigned int alloc_size; /* size of allocated buffer */
115 struct rx_desc *desc; /* address of HW Rx descriptor ring */
116 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
117 dma_addr_t phys_addr; /* physical address of HW ring start */
118 unsigned int cntxt_id; /* SGE context id for the free list */
119 unsigned long empty; /* # of times queue ran out of buffers */
120 unsigned long alloc_failed; /* # of times buffer allocation failed */
121};
122
123/*
124 * Bundle size for grouping offload RX packets for delivery to the stack.
125 * Don't make this too big as we do prefetch on each packet in a bundle.
126 */
127# define RX_BUNDLE_SIZE 8
128
129struct rsp_desc;
130
131struct sge_rspq { /* state for an SGE response queue */
132 unsigned int credits; /* # of pending response credits */
133 unsigned int size; /* capacity of response queue */
134 unsigned int cidx; /* consumer index */
135 unsigned int gen; /* current generation bit */
136 unsigned int polling; /* is the queue serviced through NAPI? */
137 unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
138 unsigned int next_holdoff; /* holdoff time for next interrupt */
139 unsigned int rx_recycle_buf; /* whether recycling occurred
140 within current sop-eop */
141 struct rsp_desc *desc; /* address of HW response ring */
142 dma_addr_t phys_addr; /* physical address of the ring */
143 unsigned int cntxt_id; /* SGE context id for the response q */
144 spinlock_t lock; /* guards response processing */
145 struct sk_buff_head rx_queue; /* offload packet receive queue */
146 struct sk_buff *pg_skb; /* used to build frag list in napi handler */
147
148 unsigned long offload_pkts;
149 unsigned long offload_bundles;
150 unsigned long eth_pkts; /* # of ethernet packets */
151 unsigned long pure_rsps; /* # of pure (non-data) responses */
152 unsigned long imm_data; /* responses with immediate data */
153 unsigned long rx_drops; /* # of packets dropped due to no mem */
154 unsigned long async_notif; /* # of asynchronous notification events */
155 unsigned long empty; /* # of times queue ran out of credits */
156 unsigned long nomem; /* # of responses deferred due to no mem */
157 unsigned long unhandled_irqs; /* # of spurious intrs */
158 unsigned long starved;
159 unsigned long restarted;
160};
161
162struct tx_desc;
163struct tx_sw_desc;
164
165struct sge_txq { /* state for an SGE Tx queue */
166 unsigned long flags; /* HW DMA fetch status */
167 unsigned int in_use; /* # of in-use Tx descriptors */
168 unsigned int size; /* # of descriptors */
169 unsigned int processed; /* total # of descs HW has processed */
170 unsigned int cleaned; /* total # of descs SW has reclaimed */
171 unsigned int stop_thres; /* SW TX queue suspend threshold */
172 unsigned int cidx; /* consumer index */
173 unsigned int pidx; /* producer index */
174 unsigned int gen; /* current value of generation bit */
175 unsigned int unacked; /* Tx descriptors used since last COMPL */
176 struct tx_desc *desc; /* address of HW Tx descriptor ring */
177 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
178 spinlock_t lock; /* guards enqueueing of new packets */
179 unsigned int token; /* WR token */
180 dma_addr_t phys_addr; /* physical address of the ring */
181 struct sk_buff_head sendq; /* List of backpressured offload packets */
182 struct tasklet_struct qresume_tsk; /* restarts the queue */
183 unsigned int cntxt_id; /* SGE context id for the Tx q */
184 unsigned long stops; /* # of times q has been stopped */
185 unsigned long restarts; /* # of queue restarts */
186};
187
188enum { /* per port SGE statistics */
189 SGE_PSTAT_TSO, /* # of TSO requests */
190 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
191 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
192 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
193 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
194
195 SGE_PSTAT_MAX /* must be last */
196};
197
198struct napi_gro_fraginfo;
199
200struct sge_qset { /* an SGE queue set */
201 struct adapter *adap;
202 struct napi_struct napi;
203 struct sge_rspq rspq;
204 struct sge_fl fl[SGE_RXQ_PER_SET];
205 struct sge_txq txq[SGE_TXQ_PER_SET];
206 int nomem;
207 void *lro_va;
208 struct net_device *netdev;
209 struct netdev_queue *tx_q; /* associated netdev TX queue */
210 unsigned long txq_stopped; /* which Tx queues are stopped */
211 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
212 struct timer_list rx_reclaim_timer; /* reclaims RX buffers */
213 unsigned long port_stats[SGE_PSTAT_MAX];
214} ____cacheline_aligned;
215
216struct sge {
217 struct sge_qset qs[SGE_QSETS];
218 spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
219};
220
221struct adapter {
222 struct t3cdev tdev;
223 struct list_head adapter_list;
224 void __iomem *regs;
225 struct pci_dev *pdev;
226 unsigned long registered_device_map;
227 unsigned long open_device_map;
228 unsigned long flags;
229
230 const char *name;
231 int msg_enable;
232 unsigned int mmio_len;
233
234 struct adapter_params params;
235 unsigned int slow_intr_mask;
236 unsigned long irq_stats[IRQ_NUM_STATS];
237
238 int msix_nvectors;
239 struct {
240 unsigned short vec;
241 char desc[22];
242 } msix_info[SGE_QSETS + 1];
243
244 /* T3 modules */
245 struct sge sge;
246 struct mc7 pmrx;
247 struct mc7 pmtx;
248 struct mc7 cm;
249 struct mc5 mc5;
250
251 struct net_device *port[MAX_NPORTS];
252 unsigned int check_task_cnt;
253 struct delayed_work adap_check_task;
254 struct work_struct ext_intr_handler_task;
255 struct work_struct fatal_error_handler_task;
256 struct work_struct link_fault_handler_task;
257
258 struct work_struct db_full_task;
259 struct work_struct db_empty_task;
260 struct work_struct db_drop_task;
261
262 struct dentry *debugfs_root;
263
264 struct mutex mdio_lock;
265 spinlock_t stats_lock;
266 spinlock_t work_lock;
267
268 struct sk_buff *nofail_skb;
269};
270
271static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
272{
273 u32 val = readl(adapter->regs + reg_addr);
274
275 CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
276 return val;
277}
278
279static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
280{
281 CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
282 writel(val, adapter->regs + reg_addr);
283}
284
285static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
286{
287 return netdev_priv(adap->port[idx]);
288}
289
290static inline int phy2portid(struct cphy *phy)
291{
292 struct adapter *adap = phy->adapter;
293 struct port_info *port0 = adap2pinfo(adap, 0);
294
295 return &port0->phy == phy ? 0 : 1;
296}
297
298#define OFFLOAD_DEVMAP_BIT 15
299
300#define tdev2adap(d) container_of(d, struct adapter, tdev)
301
302static inline int offload_running(struct adapter *adapter)
303{
304 return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
305}
306
307int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
308
309void t3_os_ext_intr_handler(struct adapter *adapter);
310void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
311 int speed, int duplex, int fc);
312void t3_os_phymod_changed(struct adapter *adap, int port_id);
313void t3_os_link_fault(struct adapter *adapter, int port_id, int state);
314void t3_os_link_fault_handler(struct adapter *adapter, int port_id);
315
316void t3_sge_start(struct adapter *adap);
317void t3_sge_stop(struct adapter *adap);
318void t3_start_sge_timers(struct adapter *adap);
319void t3_stop_sge_timers(struct adapter *adap);
320void t3_free_sge_resources(struct adapter *adap);
321void t3_sge_err_intr_handler(struct adapter *adapter);
322irq_handler_t t3_intr_handler(struct adapter *adap, int polling);
323netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
324int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
325void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
326int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
327 int irq_vec_idx, const struct qset_params *p,
328 int ntxq, struct net_device *dev,
329 struct netdev_queue *netdevq);
330extern struct workqueue_struct *cxgb3_wq;
331
332int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
333
334#endif /* __T3_ADAPTER_H__ */
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
new file mode 100644
index 00000000000..2028da95afa
--- /dev/null
+++ b/drivers/net/cxgb3/ael1002.c
@@ -0,0 +1,941 @@
1/*
2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35enum {
36 AEL100X_TX_CONFIG1 = 0xc002,
37 AEL1002_PWR_DOWN_HI = 0xc011,
38 AEL1002_PWR_DOWN_LO = 0xc012,
39 AEL1002_XFI_EQL = 0xc015,
40 AEL1002_LB_EN = 0xc017,
41 AEL_OPT_SETTINGS = 0xc017,
42 AEL_I2C_CTRL = 0xc30a,
43 AEL_I2C_DATA = 0xc30b,
44 AEL_I2C_STAT = 0xc30c,
45 AEL2005_GPIO_CTRL = 0xc214,
46 AEL2005_GPIO_STAT = 0xc215,
47
48 AEL2020_GPIO_INTR = 0xc103, /* Latch High (LH) */
49 AEL2020_GPIO_CTRL = 0xc108, /* Store Clear (SC) */
50 AEL2020_GPIO_STAT = 0xc10c, /* Read Only (RO) */
51 AEL2020_GPIO_CFG = 0xc110, /* Read Write (RW) */
52
53 AEL2020_GPIO_SDA = 0, /* IN: i2c serial data */
54 AEL2020_GPIO_MODDET = 1, /* IN: Module Detect */
55 AEL2020_GPIO_0 = 3, /* IN: unassigned */
56 AEL2020_GPIO_1 = 2, /* OUT: unassigned */
57 AEL2020_GPIO_LSTAT = AEL2020_GPIO_1, /* wired to link status LED */
58};
59
60enum { edc_none, edc_sr, edc_twinax };
61
62/* PHY module I2C device address */
63enum {
64 MODULE_DEV_ADDR = 0xa0,
65 SFF_DEV_ADDR = 0xa2,
66};
67
68/* PHY transceiver type */
69enum {
70 phy_transtype_unknown = 0,
71 phy_transtype_sfp = 3,
72 phy_transtype_xfp = 6,
73};
74
75#define AEL2005_MODDET_IRQ 4
76
77struct reg_val {
78 unsigned short mmd_addr;
79 unsigned short reg_addr;
80 unsigned short clear_bits;
81 unsigned short set_bits;
82};
83
84static int set_phy_regs(struct cphy *phy, const struct reg_val *rv)
85{
86 int err;
87
88 for (err = 0; rv->mmd_addr && !err; rv++) {
89 if (rv->clear_bits == 0xffff)
90 err = t3_mdio_write(phy, rv->mmd_addr, rv->reg_addr,
91 rv->set_bits);
92 else
93 err = t3_mdio_change_bits(phy, rv->mmd_addr,
94 rv->reg_addr, rv->clear_bits,
95 rv->set_bits);
96 }
97 return err;
98}
99
100static void ael100x_txon(struct cphy *phy)
101{
102 int tx_on_gpio =
103 phy->mdio.prtad == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
104
105 msleep(100);
106 t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
107 msleep(30);
108}
109
110/*
111 * Read an 8-bit word from a device attached to the PHY's i2c bus.
112 */
113static int ael_i2c_rd(struct cphy *phy, int dev_addr, int word_addr)
114{
115 int i, err;
116 unsigned int stat, data;
117
118 err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL,
119 (dev_addr << 8) | (1 << 8) | word_addr);
120 if (err)
121 return err;
122
123 for (i = 0; i < 200; i++) {
124 msleep(1);
125 err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat);
126 if (err)
127 return err;
128 if ((stat & 3) == 1) {
129 err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA,
130 &data);
131 if (err)
132 return err;
133 return data >> 8;
134 }
135 }
136 CH_WARN(phy->adapter, "PHY %u i2c read of dev.addr %#x.%#x timed out\n",
137 phy->mdio.prtad, dev_addr, word_addr);
138 return -ETIMEDOUT;
139}
140
141static int ael1002_power_down(struct cphy *phy, int enable)
142{
143 int err;
144
145 err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, !!enable);
146 if (!err)
147 err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
148 MDIO_MMD_PMAPMD, MDIO_CTRL1,
149 MDIO_CTRL1_LPOWER, enable);
150 return err;
151}
152
153static int ael1002_reset(struct cphy *phy, int wait)
154{
155 int err;
156
157 if ((err = ael1002_power_down(phy, 0)) ||
158 (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL100X_TX_CONFIG1, 1)) ||
159 (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_HI, 0)) ||
160 (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_LO, 0)) ||
161 (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_XFI_EQL, 0x18)) ||
162 (err = t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL1002_LB_EN,
163 0, 1 << 5)))
164 return err;
165 return 0;
166}
167
168static int ael1002_intr_noop(struct cphy *phy)
169{
170 return 0;
171}
172
173/*
174 * Get link status for a 10GBASE-R device.
175 */
176static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed,
177 int *duplex, int *fc)
178{
179 if (link_ok) {
180 unsigned int stat0, stat1, stat2;
181 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD,
182 MDIO_PMA_RXDET, &stat0);
183
184 if (!err)
185 err = t3_mdio_read(phy, MDIO_MMD_PCS,
186 MDIO_PCS_10GBRT_STAT1, &stat1);
187 if (!err)
188 err = t3_mdio_read(phy, MDIO_MMD_PHYXS,
189 MDIO_PHYXS_LNSTAT, &stat2);
190 if (err)
191 return err;
192 *link_ok = (stat0 & stat1 & (stat2 >> 12)) & 1;
193 }
194 if (speed)
195 *speed = SPEED_10000;
196 if (duplex)
197 *duplex = DUPLEX_FULL;
198 return 0;
199}
200
201static struct cphy_ops ael1002_ops = {
202 .reset = ael1002_reset,
203 .intr_enable = ael1002_intr_noop,
204 .intr_disable = ael1002_intr_noop,
205 .intr_clear = ael1002_intr_noop,
206 .intr_handler = ael1002_intr_noop,
207 .get_link_status = get_link_status_r,
208 .power_down = ael1002_power_down,
209 .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
210};
211
212int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
213 int phy_addr, const struct mdio_ops *mdio_ops)
214{
215 cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops,
216 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
217 "10GBASE-R");
218 ael100x_txon(phy);
219 return 0;
220}
221
222static int ael1006_reset(struct cphy *phy, int wait)
223{
224 return t3_phy_reset(phy, MDIO_MMD_PMAPMD, wait);
225}
226
227static struct cphy_ops ael1006_ops = {
228 .reset = ael1006_reset,
229 .intr_enable = t3_phy_lasi_intr_enable,
230 .intr_disable = t3_phy_lasi_intr_disable,
231 .intr_clear = t3_phy_lasi_intr_clear,
232 .intr_handler = t3_phy_lasi_intr_handler,
233 .get_link_status = get_link_status_r,
234 .power_down = ael1002_power_down,
235 .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
236};
237
238int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
239 int phy_addr, const struct mdio_ops *mdio_ops)
240{
241 cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops,
242 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
243 "10GBASE-SR");
244 ael100x_txon(phy);
245 return 0;
246}
247
248/*
249 * Decode our module type.
250 */
251static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms)
252{
253 int v;
254
255 if (delay_ms)
256 msleep(delay_ms);
257
258 /* see SFF-8472 for below */
259 v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 3);
260 if (v < 0)
261 return v;
262
263 if (v == 0x10)
264 return phy_modtype_sr;
265 if (v == 0x20)
266 return phy_modtype_lr;
267 if (v == 0x40)
268 return phy_modtype_lrm;
269
270 v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 6);
271 if (v < 0)
272 return v;
273 if (v != 4)
274 goto unknown;
275
276 v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 10);
277 if (v < 0)
278 return v;
279
280 if (v & 0x80) {
281 v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0x12);
282 if (v < 0)
283 return v;
284 return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax;
285 }
286unknown:
287 return phy_modtype_unknown;
288}
289
290/*
291 * Code to support the Aeluros/NetLogic 2005 10Gb PHY.
292 */
293static int ael2005_setup_sr_edc(struct cphy *phy)
294{
295 static const struct reg_val regs[] = {
296 { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 },
297 { MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a },
298 { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 },
299 { 0, 0, 0, 0 }
300 };
301
302 int i, err;
303
304 err = set_phy_regs(phy, regs);
305 if (err)
306 return err;
307
308 msleep(50);
309
310 if (phy->priv != edc_sr)
311 err = t3_get_edc_fw(phy, EDC_OPT_AEL2005,
312 EDC_OPT_AEL2005_SIZE);
313 if (err)
314 return err;
315
316 for (i = 0; i < EDC_OPT_AEL2005_SIZE / sizeof(u16) && !err; i += 2)
317 err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
318 phy->phy_cache[i],
319 phy->phy_cache[i + 1]);
320 if (!err)
321 phy->priv = edc_sr;
322 return err;
323}
324
325static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype)
326{
327 static const struct reg_val regs[] = {
328 { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 },
329 { 0, 0, 0, 0 }
330 };
331 static const struct reg_val preemphasis[] = {
332 { MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 },
333 { MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 },
334 { 0, 0, 0, 0 }
335 };
336 int i, err;
337
338 err = set_phy_regs(phy, regs);
339 if (!err && modtype == phy_modtype_twinax_long)
340 err = set_phy_regs(phy, preemphasis);
341 if (err)
342 return err;
343
344 msleep(50);
345
346 if (phy->priv != edc_twinax)
347 err = t3_get_edc_fw(phy, EDC_TWX_AEL2005,
348 EDC_TWX_AEL2005_SIZE);
349 if (err)
350 return err;
351
352 for (i = 0; i < EDC_TWX_AEL2005_SIZE / sizeof(u16) && !err; i += 2)
353 err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
354 phy->phy_cache[i],
355 phy->phy_cache[i + 1]);
356 if (!err)
357 phy->priv = edc_twinax;
358 return err;
359}
360
361static int ael2005_get_module_type(struct cphy *phy, int delay_ms)
362{
363 int v;
364 unsigned int stat;
365
366 v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, &stat);
367 if (v)
368 return v;
369
370 if (stat & (1 << 8)) /* module absent */
371 return phy_modtype_none;
372
373 return ael2xxx_get_module_type(phy, delay_ms);
374}
375
376static int ael2005_intr_enable(struct cphy *phy)
377{
378 int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x200);
379 return err ? err : t3_phy_lasi_intr_enable(phy);
380}
381
382static int ael2005_intr_disable(struct cphy *phy)
383{
384 int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x100);
385 return err ? err : t3_phy_lasi_intr_disable(phy);
386}
387
388static int ael2005_intr_clear(struct cphy *phy)
389{
390 int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0xd00);
391 return err ? err : t3_phy_lasi_intr_clear(phy);
392}
393
394static int ael2005_reset(struct cphy *phy, int wait)
395{
396 static const struct reg_val regs0[] = {
397 { MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 },
398 { MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 },
399 { MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 },
400 { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 },
401 { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8100 },
402 { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 },
403 { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 },
404 { 0, 0, 0, 0 }
405 };
406 static const struct reg_val regs1[] = {
407 { MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 },
408 { MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 },
409 { 0, 0, 0, 0 }
410 };
411
412 int err;
413 unsigned int lasi_ctrl;
414
415 err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
416 &lasi_ctrl);
417 if (err)
418 return err;
419
420 err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 0);
421 if (err)
422 return err;
423
424 msleep(125);
425 phy->priv = edc_none;
426 err = set_phy_regs(phy, regs0);
427 if (err)
428 return err;
429
430 msleep(50);
431
432 err = ael2005_get_module_type(phy, 0);
433 if (err < 0)
434 return err;
435 phy->modtype = err;
436
437 if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
438 err = ael2005_setup_twinax_edc(phy, err);
439 else
440 err = ael2005_setup_sr_edc(phy);
441 if (err)
442 return err;
443
444 err = set_phy_regs(phy, regs1);
445 if (err)
446 return err;
447
448 /* reset wipes out interrupts, reenable them if they were on */
449 if (lasi_ctrl & 1)
450 err = ael2005_intr_enable(phy);
451 return err;
452}
453
454static int ael2005_intr_handler(struct cphy *phy)
455{
456 unsigned int stat;
457 int ret, edc_needed, cause = 0;
458
459 ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_STAT, &stat);
460 if (ret)
461 return ret;
462
463 if (stat & AEL2005_MODDET_IRQ) {
464 ret = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL,
465 0xd00);
466 if (ret)
467 return ret;
468
469 /* modules have max 300 ms init time after hot plug */
470 ret = ael2005_get_module_type(phy, 300);
471 if (ret < 0)
472 return ret;
473
474 phy->modtype = ret;
475 if (ret == phy_modtype_none)
476 edc_needed = phy->priv; /* on unplug retain EDC */
477 else if (ret == phy_modtype_twinax ||
478 ret == phy_modtype_twinax_long)
479 edc_needed = edc_twinax;
480 else
481 edc_needed = edc_sr;
482
483 if (edc_needed != phy->priv) {
484 ret = ael2005_reset(phy, 0);
485 return ret ? ret : cphy_cause_module_change;
486 }
487 cause = cphy_cause_module_change;
488 }
489
490 ret = t3_phy_lasi_intr_handler(phy);
491 if (ret < 0)
492 return ret;
493
494 ret |= cause;
495 return ret ? ret : cphy_cause_link_change;
496}
497
498static struct cphy_ops ael2005_ops = {
499 .reset = ael2005_reset,
500 .intr_enable = ael2005_intr_enable,
501 .intr_disable = ael2005_intr_disable,
502 .intr_clear = ael2005_intr_clear,
503 .intr_handler = ael2005_intr_handler,
504 .get_link_status = get_link_status_r,
505 .power_down = ael1002_power_down,
506 .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
507};
508
509int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
510 int phy_addr, const struct mdio_ops *mdio_ops)
511{
512 cphy_init(phy, adapter, phy_addr, &ael2005_ops, mdio_ops,
513 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
514 SUPPORTED_IRQ, "10GBASE-R");
515 msleep(125);
516 return t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL_OPT_SETTINGS, 0,
517 1 << 5);
518}
519
520/*
521 * Setup EDC and other parameters for operation with an optical module.
522 */
523static int ael2020_setup_sr_edc(struct cphy *phy)
524{
525 static const struct reg_val regs[] = {
526 /* set CDR offset to 10 */
527 { MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a },
528
529 /* adjust 10G RX bias current */
530 { MDIO_MMD_PMAPMD, 0xcb1b, 0xffff, 0x0200 },
531 { MDIO_MMD_PMAPMD, 0xcb1c, 0xffff, 0x00f0 },
532 { MDIO_MMD_PMAPMD, 0xcc06, 0xffff, 0x00e0 },
533
534 /* end */
535 { 0, 0, 0, 0 }
536 };
537 int err;
538
539 err = set_phy_regs(phy, regs);
540 msleep(50);
541 if (err)
542 return err;
543
544 phy->priv = edc_sr;
545 return 0;
546}
547
548/*
549 * Setup EDC and other parameters for operation with an TWINAX module.
550 */
551static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype)
552{
553 /* set uC to 40MHz */
554 static const struct reg_val uCclock40MHz[] = {
555 { MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 },
556 { MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 },
557 { 0, 0, 0, 0 }
558 };
559
560 /* activate uC clock */
561 static const struct reg_val uCclockActivate[] = {
562 { MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 },
563 { 0, 0, 0, 0 }
564 };
565
566 /* set PC to start of SRAM and activate uC */
567 static const struct reg_val uCactivate[] = {
568 { MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 },
569 { MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 },
570 { 0, 0, 0, 0 }
571 };
572 int i, err;
573
574 /* set uC clock and activate it */
575 err = set_phy_regs(phy, uCclock40MHz);
576 msleep(500);
577 if (err)
578 return err;
579 err = set_phy_regs(phy, uCclockActivate);
580 msleep(500);
581 if (err)
582 return err;
583
584 if (phy->priv != edc_twinax)
585 err = t3_get_edc_fw(phy, EDC_TWX_AEL2020,
586 EDC_TWX_AEL2020_SIZE);
587 if (err)
588 return err;
589
590 for (i = 0; i < EDC_TWX_AEL2020_SIZE / sizeof(u16) && !err; i += 2)
591 err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
592 phy->phy_cache[i],
593 phy->phy_cache[i + 1]);
594 /* activate uC */
595 err = set_phy_regs(phy, uCactivate);
596 if (!err)
597 phy->priv = edc_twinax;
598 return err;
599}
600
601/*
602 * Return Module Type.
603 */
604static int ael2020_get_module_type(struct cphy *phy, int delay_ms)
605{
606 int v;
607 unsigned int stat;
608
609 v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_STAT, &stat);
610 if (v)
611 return v;
612
613 if (stat & (0x1 << (AEL2020_GPIO_MODDET*4))) {
614 /* module absent */
615 return phy_modtype_none;
616 }
617
618 return ael2xxx_get_module_type(phy, delay_ms);
619}
620
621/*
622 * Enable PHY interrupts. We enable "Module Detection" interrupts (on any
623 * state transition) and then generic Link Alarm Status Interrupt (LASI).
624 */
625static int ael2020_intr_enable(struct cphy *phy)
626{
627 static const struct reg_val regs[] = {
628 /* output Module's Loss Of Signal (LOS) to LED */
629 { MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT,
630 0xffff, 0x4 },
631 { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
632 0xffff, 0x8 << (AEL2020_GPIO_LSTAT*4) },
633
634 /* enable module detect status change interrupts */
635 { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
636 0xffff, 0x2 << (AEL2020_GPIO_MODDET*4) },
637
638 /* end */
639 { 0, 0, 0, 0 }
640 };
641 int err, link_ok = 0;
642
643 /* set up "link status" LED and enable module change interrupts */
644 err = set_phy_regs(phy, regs);
645 if (err)
646 return err;
647
648 err = get_link_status_r(phy, &link_ok, NULL, NULL, NULL);
649 if (err)
650 return err;
651 if (link_ok)
652 t3_link_changed(phy->adapter,
653 phy2portid(phy));
654
655 err = t3_phy_lasi_intr_enable(phy);
656 if (err)
657 return err;
658
659 return 0;
660}
661
662/*
663 * Disable PHY interrupts. The mirror of the above ...
664 */
665static int ael2020_intr_disable(struct cphy *phy)
666{
667 static const struct reg_val regs[] = {
668 /* reset "link status" LED to "off" */
669 { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
670 0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) },
671
672 /* disable module detect status change interrupts */
673 { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
674 0xffff, 0x1 << (AEL2020_GPIO_MODDET*4) },
675
676 /* end */
677 { 0, 0, 0, 0 }
678 };
679 int err;
680
681 /* turn off "link status" LED and disable module change interrupts */
682 err = set_phy_regs(phy, regs);
683 if (err)
684 return err;
685
686 return t3_phy_lasi_intr_disable(phy);
687}
688
689/*
690 * Clear PHY interrupt state.
691 */
692static int ael2020_intr_clear(struct cphy *phy)
693{
694 /*
695 * The GPIO Interrupt register on the AEL2020 is a "Latching High"
696 * (LH) register which is cleared to the current state when it's read.
697 * Thus, we simply read the register and discard the result.
698 */
699 unsigned int stat;
700 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
701 return err ? err : t3_phy_lasi_intr_clear(phy);
702}
703
704static const struct reg_val ael2020_reset_regs[] = {
705 /* Erratum #2: CDRLOL asserted, causing PMA link down status */
706 { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 },
707
708 /* force XAUI to send LF when RX_LOS is asserted */
709 { MDIO_MMD_PMAPMD, 0xcd40, 0xffff, 0x0001 },
710
711 /* allow writes to transceiver module EEPROM on i2c bus */
712 { MDIO_MMD_PMAPMD, 0xff02, 0xffff, 0x0023 },
713 { MDIO_MMD_PMAPMD, 0xff03, 0xffff, 0x0000 },
714 { MDIO_MMD_PMAPMD, 0xff04, 0xffff, 0x0000 },
715
716 /* end */
717 { 0, 0, 0, 0 }
718};
719/*
720 * Reset the PHY and put it into a canonical operating state.
721 */
722static int ael2020_reset(struct cphy *phy, int wait)
723{
724 int err;
725 unsigned int lasi_ctrl;
726
727 /* grab current interrupt state */
728 err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
729 &lasi_ctrl);
730 if (err)
731 return err;
732
733 err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 125);
734 if (err)
735 return err;
736 msleep(100);
737
738 /* basic initialization for all module types */
739 phy->priv = edc_none;
740 err = set_phy_regs(phy, ael2020_reset_regs);
741 if (err)
742 return err;
743
744 /* determine module type and perform appropriate initialization */
745 err = ael2020_get_module_type(phy, 0);
746 if (err < 0)
747 return err;
748 phy->modtype = (u8)err;
749 if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
750 err = ael2020_setup_twinax_edc(phy, err);
751 else
752 err = ael2020_setup_sr_edc(phy);
753 if (err)
754 return err;
755
756 /* reset wipes out interrupts, reenable them if they were on */
757 if (lasi_ctrl & 1)
758 err = ael2005_intr_enable(phy);
759 return err;
760}
761
762/*
763 * Handle a PHY interrupt.
764 */
765static int ael2020_intr_handler(struct cphy *phy)
766{
767 unsigned int stat;
768 int ret, edc_needed, cause = 0;
769
770 ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
771 if (ret)
772 return ret;
773
774 if (stat & (0x1 << AEL2020_GPIO_MODDET)) {
775 /* modules have max 300 ms init time after hot plug */
776 ret = ael2020_get_module_type(phy, 300);
777 if (ret < 0)
778 return ret;
779
780 phy->modtype = (u8)ret;
781 if (ret == phy_modtype_none)
782 edc_needed = phy->priv; /* on unplug retain EDC */
783 else if (ret == phy_modtype_twinax ||
784 ret == phy_modtype_twinax_long)
785 edc_needed = edc_twinax;
786 else
787 edc_needed = edc_sr;
788
789 if (edc_needed != phy->priv) {
790 ret = ael2020_reset(phy, 0);
791 return ret ? ret : cphy_cause_module_change;
792 }
793 cause = cphy_cause_module_change;
794 }
795
796 ret = t3_phy_lasi_intr_handler(phy);
797 if (ret < 0)
798 return ret;
799
800 ret |= cause;
801 return ret ? ret : cphy_cause_link_change;
802}
803
804static struct cphy_ops ael2020_ops = {
805 .reset = ael2020_reset,
806 .intr_enable = ael2020_intr_enable,
807 .intr_disable = ael2020_intr_disable,
808 .intr_clear = ael2020_intr_clear,
809 .intr_handler = ael2020_intr_handler,
810 .get_link_status = get_link_status_r,
811 .power_down = ael1002_power_down,
812 .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
813};
814
815int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
816 const struct mdio_ops *mdio_ops)
817{
818 int err;
819
820 cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops,
821 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
822 SUPPORTED_IRQ, "10GBASE-R");
823 msleep(125);
824
825 err = set_phy_regs(phy, ael2020_reset_regs);
826 if (err)
827 return err;
828 return 0;
829}
830
831/*
832 * Get link status for a 10GBASE-X device.
833 */
834static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed,
835 int *duplex, int *fc)
836{
837 if (link_ok) {
838 unsigned int stat0, stat1, stat2;
839 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD,
840 MDIO_PMA_RXDET, &stat0);
841
842 if (!err)
843 err = t3_mdio_read(phy, MDIO_MMD_PCS,
844 MDIO_PCS_10GBX_STAT1, &stat1);
845 if (!err)
846 err = t3_mdio_read(phy, MDIO_MMD_PHYXS,
847 MDIO_PHYXS_LNSTAT, &stat2);
848 if (err)
849 return err;
850 *link_ok = (stat0 & (stat1 >> 12) & (stat2 >> 12)) & 1;
851 }
852 if (speed)
853 *speed = SPEED_10000;
854 if (duplex)
855 *duplex = DUPLEX_FULL;
856 return 0;
857}
858
859static struct cphy_ops qt2045_ops = {
860 .reset = ael1006_reset,
861 .intr_enable = t3_phy_lasi_intr_enable,
862 .intr_disable = t3_phy_lasi_intr_disable,
863 .intr_clear = t3_phy_lasi_intr_clear,
864 .intr_handler = t3_phy_lasi_intr_handler,
865 .get_link_status = get_link_status_x,
866 .power_down = ael1002_power_down,
867 .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
868};
869
870int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
871 int phy_addr, const struct mdio_ops *mdio_ops)
872{
873 unsigned int stat;
874
875 cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops,
876 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
877 "10GBASE-CX4");
878
879 /*
880 * Some cards where the PHY is supposed to be at address 0 actually
881 * have it at 1.
882 */
883 if (!phy_addr &&
884 !t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &stat) &&
885 stat == 0xffff)
886 phy->mdio.prtad = 1;
887 return 0;
888}
889
890static int xaui_direct_reset(struct cphy *phy, int wait)
891{
892 return 0;
893}
894
895static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
896 int *speed, int *duplex, int *fc)
897{
898 if (link_ok) {
899 unsigned int status;
900 int prtad = phy->mdio.prtad;
901
902 status = t3_read_reg(phy->adapter,
903 XGM_REG(A_XGM_SERDES_STAT0, prtad)) |
904 t3_read_reg(phy->adapter,
905 XGM_REG(A_XGM_SERDES_STAT1, prtad)) |
906 t3_read_reg(phy->adapter,
907 XGM_REG(A_XGM_SERDES_STAT2, prtad)) |
908 t3_read_reg(phy->adapter,
909 XGM_REG(A_XGM_SERDES_STAT3, prtad));
910 *link_ok = !(status & F_LOWSIG0);
911 }
912 if (speed)
913 *speed = SPEED_10000;
914 if (duplex)
915 *duplex = DUPLEX_FULL;
916 return 0;
917}
918
919static int xaui_direct_power_down(struct cphy *phy, int enable)
920{
921 return 0;
922}
923
924static struct cphy_ops xaui_direct_ops = {
925 .reset = xaui_direct_reset,
926 .intr_enable = ael1002_intr_noop,
927 .intr_disable = ael1002_intr_noop,
928 .intr_clear = ael1002_intr_noop,
929 .intr_handler = ael1002_intr_noop,
930 .get_link_status = xaui_direct_get_link_status,
931 .power_down = xaui_direct_power_down,
932};
933
934int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
935 int phy_addr, const struct mdio_ops *mdio_ops)
936{
937 cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
938 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
939 "10GBASE-CX4");
940 return 0;
941}
diff --git a/drivers/net/cxgb3/aq100x.c b/drivers/net/cxgb3/aq100x.c
new file mode 100644
index 00000000000..341b7ef1508
--- /dev/null
+++ b/drivers/net/cxgb3/aq100x.c
@@ -0,0 +1,354 @@
1/*
2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "common.h"
34#include "regs.h"
35
36enum {
37 /* MDIO_DEV_PMA_PMD registers */
38 AQ_LINK_STAT = 0xe800,
39 AQ_IMASK_PMA = 0xf000,
40
41 /* MDIO_DEV_XGXS registers */
42 AQ_XAUI_RX_CFG = 0xc400,
43 AQ_XAUI_TX_CFG = 0xe400,
44
45 /* MDIO_DEV_ANEG registers */
46 AQ_1G_CTRL = 0xc400,
47 AQ_ANEG_STAT = 0xc800,
48
49 /* MDIO_DEV_VEND1 registers */
50 AQ_FW_VERSION = 0x0020,
51 AQ_IFLAG_GLOBAL = 0xfc00,
52 AQ_IMASK_GLOBAL = 0xff00,
53};
54
55enum {
56 IMASK_PMA = 1 << 2,
57 IMASK_GLOBAL = 1 << 15,
58 ADV_1G_FULL = 1 << 15,
59 ADV_1G_HALF = 1 << 14,
60 ADV_10G_FULL = 1 << 12,
61 AQ_RESET = (1 << 14) | (1 << 15),
62 AQ_LOWPOWER = 1 << 12,
63};
64
65static int aq100x_reset(struct cphy *phy, int wait)
66{
67 /*
68 * Ignore the caller specified wait time; always wait for the reset to
69 * complete. Can take up to 3s.
70 */
71 int err = t3_phy_reset(phy, MDIO_MMD_VEND1, 3000);
72
73 if (err)
74 CH_WARN(phy->adapter, "PHY%d: reset failed (0x%x).\n",
75 phy->mdio.prtad, err);
76
77 return err;
78}
79
80static int aq100x_intr_enable(struct cphy *phy)
81{
82 int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AQ_IMASK_PMA, IMASK_PMA);
83 if (err)
84 return err;
85
86 err = t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, IMASK_GLOBAL);
87 return err;
88}
89
90static int aq100x_intr_disable(struct cphy *phy)
91{
92 return t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, 0);
93}
94
95static int aq100x_intr_clear(struct cphy *phy)
96{
97 unsigned int v;
98
99 t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &v);
100 t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
101
102 return 0;
103}
104
105static int aq100x_intr_handler(struct cphy *phy)
106{
107 int err;
108 unsigned int cause, v;
109
110 err = t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &cause);
111 if (err)
112 return err;
113
114 /* Read (and reset) the latching version of the status */
115 t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
116
117 return cphy_cause_link_change;
118}
119
120static int aq100x_power_down(struct cphy *phy, int off)
121{
122 return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
123 MDIO_MMD_PMAPMD, MDIO_CTRL1,
124 MDIO_CTRL1_LPOWER, off);
125}
126
127static int aq100x_autoneg_enable(struct cphy *phy)
128{
129 int err;
130
131 err = aq100x_power_down(phy, 0);
132 if (!err)
133 err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
134 MDIO_MMD_AN, MDIO_CTRL1,
135 BMCR_ANENABLE | BMCR_ANRESTART, 1);
136
137 return err;
138}
139
140static int aq100x_autoneg_restart(struct cphy *phy)
141{
142 int err;
143
144 err = aq100x_power_down(phy, 0);
145 if (!err)
146 err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
147 MDIO_MMD_AN, MDIO_CTRL1,
148 BMCR_ANENABLE | BMCR_ANRESTART, 1);
149
150 return err;
151}
152
153static int aq100x_advertise(struct cphy *phy, unsigned int advertise_map)
154{
155 unsigned int adv;
156 int err;
157
158 /* 10G advertisement */
159 adv = 0;
160 if (advertise_map & ADVERTISED_10000baseT_Full)
161 adv |= ADV_10G_FULL;
162 err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
163 ADV_10G_FULL, adv);
164 if (err)
165 return err;
166
167 /* 1G advertisement */
168 adv = 0;
169 if (advertise_map & ADVERTISED_1000baseT_Full)
170 adv |= ADV_1G_FULL;
171 if (advertise_map & ADVERTISED_1000baseT_Half)
172 adv |= ADV_1G_HALF;
173 err = t3_mdio_change_bits(phy, MDIO_MMD_AN, AQ_1G_CTRL,
174 ADV_1G_FULL | ADV_1G_HALF, adv);
175 if (err)
176 return err;
177
178 /* 100M, pause advertisement */
179 adv = 0;
180 if (advertise_map & ADVERTISED_100baseT_Half)
181 adv |= ADVERTISE_100HALF;
182 if (advertise_map & ADVERTISED_100baseT_Full)
183 adv |= ADVERTISE_100FULL;
184 if (advertise_map & ADVERTISED_Pause)
185 adv |= ADVERTISE_PAUSE_CAP;
186 if (advertise_map & ADVERTISED_Asym_Pause)
187 adv |= ADVERTISE_PAUSE_ASYM;
188 err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
189 0xfe0, adv);
190
191 return err;
192}
193
194static int aq100x_set_loopback(struct cphy *phy, int mmd, int dir, int enable)
195{
196 return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
197 MDIO_MMD_PMAPMD, MDIO_CTRL1,
198 BMCR_LOOPBACK, enable);
199}
200
201static int aq100x_set_speed_duplex(struct cphy *phy, int speed, int duplex)
202{
203 /* no can do */
204 return -1;
205}
206
207static int aq100x_get_link_status(struct cphy *phy, int *link_ok,
208 int *speed, int *duplex, int *fc)
209{
210 int err;
211 unsigned int v;
212
213 if (link_ok) {
214 err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AQ_LINK_STAT, &v);
215 if (err)
216 return err;
217
218 *link_ok = v & 1;
219 if (!*link_ok)
220 return 0;
221 }
222
223 err = t3_mdio_read(phy, MDIO_MMD_AN, AQ_ANEG_STAT, &v);
224 if (err)
225 return err;
226
227 if (speed) {
228 switch (v & 0x6) {
229 case 0x6:
230 *speed = SPEED_10000;
231 break;
232 case 0x4:
233 *speed = SPEED_1000;
234 break;
235 case 0x2:
236 *speed = SPEED_100;
237 break;
238 case 0x0:
239 *speed = SPEED_10;
240 break;
241 }
242 }
243
244 if (duplex)
245 *duplex = v & 1 ? DUPLEX_FULL : DUPLEX_HALF;
246
247 return 0;
248}
249
250static struct cphy_ops aq100x_ops = {
251 .reset = aq100x_reset,
252 .intr_enable = aq100x_intr_enable,
253 .intr_disable = aq100x_intr_disable,
254 .intr_clear = aq100x_intr_clear,
255 .intr_handler = aq100x_intr_handler,
256 .autoneg_enable = aq100x_autoneg_enable,
257 .autoneg_restart = aq100x_autoneg_restart,
258 .advertise = aq100x_advertise,
259 .set_loopback = aq100x_set_loopback,
260 .set_speed_duplex = aq100x_set_speed_duplex,
261 .get_link_status = aq100x_get_link_status,
262 .power_down = aq100x_power_down,
263 .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
264};
265
266int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
267 const struct mdio_ops *mdio_ops)
268{
269 unsigned int v, v2, gpio, wait;
270 int err;
271
272 cphy_init(phy, adapter, phy_addr, &aq100x_ops, mdio_ops,
273 SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full |
274 SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_AUI,
275 "1000/10GBASE-T");
276
277 /*
278 * The PHY has been out of reset ever since the system powered up. So
279 * we do a hard reset over here.
280 */
281 gpio = phy_addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL;
282 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, 0);
283 msleep(1);
284 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, gpio);
285
286 /*
287 * Give it enough time to load the firmware and get ready for mdio.
288 */
289 msleep(1000);
290 wait = 500; /* in 10ms increments */
291 do {
292 err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
293 if (err || v == 0xffff) {
294
295 /* Allow prep_adapter to succeed when ffff is read */
296
297 CH_WARN(adapter, "PHY%d: reset failed (0x%x, 0x%x).\n",
298 phy_addr, err, v);
299 goto done;
300 }
301
302 v &= AQ_RESET;
303 if (v)
304 msleep(10);
305 } while (v && --wait);
306 if (v) {
307 CH_WARN(adapter, "PHY%d: reset timed out (0x%x).\n",
308 phy_addr, v);
309
310 goto done; /* let prep_adapter succeed */
311 }
312
313 /* Datasheet says 3s max but this has been observed */
314 wait = (500 - wait) * 10 + 1000;
315 if (wait > 3000)
316 CH_WARN(adapter, "PHY%d: reset took %ums\n", phy_addr, wait);
317
318 /* Firmware version check. */
319 t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_FW_VERSION, &v);
320 if (v != 101)
321 CH_WARN(adapter, "PHY%d: unsupported firmware %d\n",
322 phy_addr, v);
323
324 /*
325 * The PHY should start in really-low-power mode. Prepare it for normal
326 * operations.
327 */
328 err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
329 if (err)
330 return err;
331 if (v & AQ_LOWPOWER) {
332 err = t3_mdio_change_bits(phy, MDIO_MMD_VEND1, MDIO_CTRL1,
333 AQ_LOWPOWER, 0);
334 if (err)
335 return err;
336 msleep(10);
337 } else
338 CH_WARN(adapter, "PHY%d does not start in low power mode.\n",
339 phy_addr);
340
341 /*
342 * Verify XAUI settings, but let prep succeed no matter what.
343 */
344 v = v2 = 0;
345 t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_RX_CFG, &v);
346 t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_TX_CFG, &v2);
347 if (v != 0x1b || v2 != 0x1b)
348 CH_WARN(adapter,
349 "PHY%d: incorrect XAUI settings (0x%x, 0x%x).\n",
350 phy_addr, v, v2);
351
352done:
353 return err;
354}
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
new file mode 100644
index 00000000000..df01b634324
--- /dev/null
+++ b/drivers/net/cxgb3/common.h
@@ -0,0 +1,775 @@
1/*
2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __CHELSIO_COMMON_H
33#define __CHELSIO_COMMON_H
34
35#include <linux/kernel.h>
36#include <linux/types.h>
37#include <linux/ctype.h>
38#include <linux/delay.h>
39#include <linux/init.h>
40#include <linux/netdevice.h>
41#include <linux/ethtool.h>
42#include <linux/mdio.h>
43#include "version.h"
44
45#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
46#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
47#define CH_ALERT(adap, fmt, ...) \
48 dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
49
50/*
51 * More powerful macro that selectively prints messages based on msg_enable.
52 * For info and debugging messages.
53 */
54#define CH_MSG(adapter, level, category, fmt, ...) do { \
55 if ((adapter)->msg_enable & NETIF_MSG_##category) \
56 dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
57 ## __VA_ARGS__); \
58} while (0)
59
60#ifdef DEBUG
61# define CH_DBG(adapter, category, fmt, ...) \
62 CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
63#else
64# define CH_DBG(adapter, category, fmt, ...)
65#endif
66
67/* Additional NETIF_MSG_* categories */
68#define NETIF_MSG_MMIO 0x8000000
69
70enum {
71 MAX_NPORTS = 2, /* max # of ports */
72 MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
73 EEPROMSIZE = 8192, /* Serial EEPROM size */
74 SERNUM_LEN = 16, /* Serial # length */
75 RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
76 TCB_SIZE = 128, /* TCB size */
77 NMTUS = 16, /* size of MTU table */
78 NCCTRL_WIN = 32, /* # of congestion control windows */
79 PROTO_SRAM_LINES = 128, /* size of TP sram */
80};
81
82#define MAX_RX_COALESCING_LEN 12288U
83
84enum {
85 PAUSE_RX = 1 << 0,
86 PAUSE_TX = 1 << 1,
87 PAUSE_AUTONEG = 1 << 2
88};
89
90enum {
91 SUPPORTED_IRQ = 1 << 24
92};
93
94enum { /* adapter interrupt-maintained statistics */
95 STAT_ULP_CH0_PBL_OOB,
96 STAT_ULP_CH1_PBL_OOB,
97 STAT_PCI_CORR_ECC,
98
99 IRQ_NUM_STATS /* keep last */
100};
101
102#define TP_VERSION_MAJOR 1
103#define TP_VERSION_MINOR 1
104#define TP_VERSION_MICRO 0
105
106#define S_TP_VERSION_MAJOR 16
107#define M_TP_VERSION_MAJOR 0xFF
108#define V_TP_VERSION_MAJOR(x) ((x) << S_TP_VERSION_MAJOR)
109#define G_TP_VERSION_MAJOR(x) \
110 (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR)
111
112#define S_TP_VERSION_MINOR 8
113#define M_TP_VERSION_MINOR 0xFF
114#define V_TP_VERSION_MINOR(x) ((x) << S_TP_VERSION_MINOR)
115#define G_TP_VERSION_MINOR(x) \
116 (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR)
117
118#define S_TP_VERSION_MICRO 0
119#define M_TP_VERSION_MICRO 0xFF
120#define V_TP_VERSION_MICRO(x) ((x) << S_TP_VERSION_MICRO)
121#define G_TP_VERSION_MICRO(x) \
122 (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO)
123
124enum {
125 SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
126 SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
127 SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
128};
129
130enum sge_context_type { /* SGE egress context types */
131 SGE_CNTXT_RDMA = 0,
132 SGE_CNTXT_ETH = 2,
133 SGE_CNTXT_OFLD = 4,
134 SGE_CNTXT_CTRL = 5
135};
136
137enum {
138 AN_PKT_SIZE = 32, /* async notification packet size */
139 IMMED_PKT_SIZE = 48 /* packet size for immediate data */
140};
141
142struct sg_ent { /* SGE scatter/gather entry */
143 __be32 len[2];
144 __be64 addr[2];
145};
146
147#ifndef SGE_NUM_GENBITS
148/* Must be 1 or 2 */
149# define SGE_NUM_GENBITS 2
150#endif
151
152#define TX_DESC_FLITS 16U
153#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
154
155struct cphy;
156struct adapter;
157
158struct mdio_ops {
159 int (*read)(struct net_device *dev, int phy_addr, int mmd_addr,
160 u16 reg_addr);
161 int (*write)(struct net_device *dev, int phy_addr, int mmd_addr,
162 u16 reg_addr, u16 val);
163 unsigned mode_support;
164};
165
166struct adapter_info {
167 unsigned char nports0; /* # of ports on channel 0 */
168 unsigned char nports1; /* # of ports on channel 1 */
169 unsigned char phy_base_addr; /* MDIO PHY base address */
170 unsigned int gpio_out; /* GPIO output settings */
171 unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */
172 unsigned long caps; /* adapter capabilities */
173 const struct mdio_ops *mdio_ops; /* MDIO operations */
174 const char *desc; /* product description */
175};
176
177struct mc5_stats {
178 unsigned long parity_err;
179 unsigned long active_rgn_full;
180 unsigned long nfa_srch_err;
181 unsigned long unknown_cmd;
182 unsigned long reqq_parity_err;
183 unsigned long dispq_parity_err;
184 unsigned long del_act_empty;
185};
186
187struct mc7_stats {
188 unsigned long corr_err;
189 unsigned long uncorr_err;
190 unsigned long parity_err;
191 unsigned long addr_err;
192};
193
194struct mac_stats {
195 u64 tx_octets; /* total # of octets in good frames */
196 u64 tx_octets_bad; /* total # of octets in error frames */
197 u64 tx_frames; /* all good frames */
198 u64 tx_mcast_frames; /* good multicast frames */
199 u64 tx_bcast_frames; /* good broadcast frames */
200 u64 tx_pause; /* # of transmitted pause frames */
201 u64 tx_deferred; /* frames with deferred transmissions */
202 u64 tx_late_collisions; /* # of late collisions */
203 u64 tx_total_collisions; /* # of total collisions */
204 u64 tx_excess_collisions; /* frame errors from excessive collissions */
205 u64 tx_underrun; /* # of Tx FIFO underruns */
206 u64 tx_len_errs; /* # of Tx length errors */
207 u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
208 u64 tx_excess_deferral; /* # of frames with excessive deferral */
209 u64 tx_fcs_errs; /* # of frames with bad FCS */
210
211 u64 tx_frames_64; /* # of Tx frames in a particular range */
212 u64 tx_frames_65_127;
213 u64 tx_frames_128_255;
214 u64 tx_frames_256_511;
215 u64 tx_frames_512_1023;
216 u64 tx_frames_1024_1518;
217 u64 tx_frames_1519_max;
218
219 u64 rx_octets; /* total # of octets in good frames */
220 u64 rx_octets_bad; /* total # of octets in error frames */
221 u64 rx_frames; /* all good frames */
222 u64 rx_mcast_frames; /* good multicast frames */
223 u64 rx_bcast_frames; /* good broadcast frames */
224 u64 rx_pause; /* # of received pause frames */
225 u64 rx_fcs_errs; /* # of received frames with bad FCS */
226 u64 rx_align_errs; /* alignment errors */
227 u64 rx_symbol_errs; /* symbol errors */
228 u64 rx_data_errs; /* data errors */
229 u64 rx_sequence_errs; /* sequence errors */
230 u64 rx_runt; /* # of runt frames */
231 u64 rx_jabber; /* # of jabber frames */
232 u64 rx_short; /* # of short frames */
233 u64 rx_too_long; /* # of oversized frames */
234 u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
235
236 u64 rx_frames_64; /* # of Rx frames in a particular range */
237 u64 rx_frames_65_127;
238 u64 rx_frames_128_255;
239 u64 rx_frames_256_511;
240 u64 rx_frames_512_1023;
241 u64 rx_frames_1024_1518;
242 u64 rx_frames_1519_max;
243
244 u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
245
246 unsigned long tx_fifo_parity_err;
247 unsigned long rx_fifo_parity_err;
248 unsigned long tx_fifo_urun;
249 unsigned long rx_fifo_ovfl;
250 unsigned long serdes_signal_loss;
251 unsigned long xaui_pcs_ctc_err;
252 unsigned long xaui_pcs_align_change;
253
254 unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */
255 unsigned long num_resets; /* # times reset due to stuck TX */
256
257 unsigned long link_faults; /* # detected link faults */
258};
259
260struct tp_mib_stats {
261 u32 ipInReceive_hi;
262 u32 ipInReceive_lo;
263 u32 ipInHdrErrors_hi;
264 u32 ipInHdrErrors_lo;
265 u32 ipInAddrErrors_hi;
266 u32 ipInAddrErrors_lo;
267 u32 ipInUnknownProtos_hi;
268 u32 ipInUnknownProtos_lo;
269 u32 ipInDiscards_hi;
270 u32 ipInDiscards_lo;
271 u32 ipInDelivers_hi;
272 u32 ipInDelivers_lo;
273 u32 ipOutRequests_hi;
274 u32 ipOutRequests_lo;
275 u32 ipOutDiscards_hi;
276 u32 ipOutDiscards_lo;
277 u32 ipOutNoRoutes_hi;
278 u32 ipOutNoRoutes_lo;
279 u32 ipReasmTimeout;
280 u32 ipReasmReqds;
281 u32 ipReasmOKs;
282 u32 ipReasmFails;
283
284 u32 reserved[8];
285
286 u32 tcpActiveOpens;
287 u32 tcpPassiveOpens;
288 u32 tcpAttemptFails;
289 u32 tcpEstabResets;
290 u32 tcpOutRsts;
291 u32 tcpCurrEstab;
292 u32 tcpInSegs_hi;
293 u32 tcpInSegs_lo;
294 u32 tcpOutSegs_hi;
295 u32 tcpOutSegs_lo;
296 u32 tcpRetransSeg_hi;
297 u32 tcpRetransSeg_lo;
298 u32 tcpInErrs_hi;
299 u32 tcpInErrs_lo;
300 u32 tcpRtoMin;
301 u32 tcpRtoMax;
302};
303
304struct tp_params {
305 unsigned int nchan; /* # of channels */
306 unsigned int pmrx_size; /* total PMRX capacity */
307 unsigned int pmtx_size; /* total PMTX capacity */
308 unsigned int cm_size; /* total CM capacity */
309 unsigned int chan_rx_size; /* per channel Rx size */
310 unsigned int chan_tx_size; /* per channel Tx size */
311 unsigned int rx_pg_size; /* Rx page size */
312 unsigned int tx_pg_size; /* Tx page size */
313 unsigned int rx_num_pgs; /* # of Rx pages */
314 unsigned int tx_num_pgs; /* # of Tx pages */
315 unsigned int ntimer_qs; /* # of timer queues */
316};
317
318struct qset_params { /* SGE queue set parameters */
319 unsigned int polling; /* polling/interrupt service for rspq */
320 unsigned int coalesce_usecs; /* irq coalescing timer */
321 unsigned int rspq_size; /* # of entries in response queue */
322 unsigned int fl_size; /* # of entries in regular free list */
323 unsigned int jumbo_size; /* # of entries in jumbo free list */
324 unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
325 unsigned int cong_thres; /* FL congestion threshold */
326 unsigned int vector; /* Interrupt (line or vector) number */
327};
328
329struct sge_params {
330 unsigned int max_pkt_size; /* max offload pkt size */
331 struct qset_params qset[SGE_QSETS];
332};
333
334struct mc5_params {
335 unsigned int mode; /* selects MC5 width */
336 unsigned int nservers; /* size of server region */
337 unsigned int nfilters; /* size of filter region */
338 unsigned int nroutes; /* size of routing region */
339};
340
341/* Default MC5 region sizes */
342enum {
343 DEFAULT_NSERVERS = 512,
344 DEFAULT_NFILTERS = 128
345};
346
347/* MC5 modes, these must be non-0 */
348enum {
349 MC5_MODE_144_BIT = 1,
350 MC5_MODE_72_BIT = 2
351};
352
353/* MC5 min active region size */
354enum { MC5_MIN_TIDS = 16 };
355
356struct vpd_params {
357 unsigned int cclk;
358 unsigned int mclk;
359 unsigned int uclk;
360 unsigned int mdc;
361 unsigned int mem_timing;
362 u8 sn[SERNUM_LEN + 1];
363 u8 eth_base[6];
364 u8 port_type[MAX_NPORTS];
365 unsigned short xauicfg[2];
366};
367
368struct pci_params {
369 unsigned int vpd_cap_addr;
370 unsigned short speed;
371 unsigned char width;
372 unsigned char variant;
373};
374
375enum {
376 PCI_VARIANT_PCI,
377 PCI_VARIANT_PCIX_MODE1_PARITY,
378 PCI_VARIANT_PCIX_MODE1_ECC,
379 PCI_VARIANT_PCIX_266_MODE2,
380 PCI_VARIANT_PCIE
381};
382
383struct adapter_params {
384 struct sge_params sge;
385 struct mc5_params mc5;
386 struct tp_params tp;
387 struct vpd_params vpd;
388 struct pci_params pci;
389
390 const struct adapter_info *info;
391
392 unsigned short mtus[NMTUS];
393 unsigned short a_wnd[NCCTRL_WIN];
394 unsigned short b_wnd[NCCTRL_WIN];
395
396 unsigned int nports; /* # of ethernet ports */
397 unsigned int chan_map; /* bitmap of in-use Tx channels */
398 unsigned int stats_update_period; /* MAC stats accumulation period */
399 unsigned int linkpoll_period; /* link poll period in 0.1s */
400 unsigned int rev; /* chip revision */
401 unsigned int offload;
402};
403
404enum { /* chip revisions */
405 T3_REV_A = 0,
406 T3_REV_B = 2,
407 T3_REV_B2 = 3,
408 T3_REV_C = 4,
409};
410
411struct trace_params {
412 u32 sip;
413 u32 sip_mask;
414 u32 dip;
415 u32 dip_mask;
416 u16 sport;
417 u16 sport_mask;
418 u16 dport;
419 u16 dport_mask;
420 u32 vlan:12;
421 u32 vlan_mask:12;
422 u32 intf:4;
423 u32 intf_mask:4;
424 u8 proto;
425 u8 proto_mask;
426};
427
428struct link_config {
429 unsigned int supported; /* link capabilities */
430 unsigned int advertising; /* advertised capabilities */
431 unsigned short requested_speed; /* speed user has requested */
432 unsigned short speed; /* actual link speed */
433 unsigned char requested_duplex; /* duplex user has requested */
434 unsigned char duplex; /* actual link duplex */
435 unsigned char requested_fc; /* flow control user has requested */
436 unsigned char fc; /* actual link flow control */
437 unsigned char autoneg; /* autonegotiating? */
438 unsigned int link_ok; /* link up? */
439};
440
441#define SPEED_INVALID 0xffff
442#define DUPLEX_INVALID 0xff
443
444struct mc5 {
445 struct adapter *adapter;
446 unsigned int tcam_size;
447 unsigned char part_type;
448 unsigned char parity_enabled;
449 unsigned char mode;
450 struct mc5_stats stats;
451};
452
453static inline unsigned int t3_mc5_size(const struct mc5 *p)
454{
455 return p->tcam_size;
456}
457
458struct mc7 {
459 struct adapter *adapter; /* backpointer to adapter */
460 unsigned int size; /* memory size in bytes */
461 unsigned int width; /* MC7 interface width */
462 unsigned int offset; /* register address offset for MC7 instance */
463 const char *name; /* name of MC7 instance */
464 struct mc7_stats stats; /* MC7 statistics */
465};
466
467static inline unsigned int t3_mc7_size(const struct mc7 *p)
468{
469 return p->size;
470}
471
472struct cmac {
473 struct adapter *adapter;
474 unsigned int offset;
475 unsigned int nucast; /* # of address filters for unicast MACs */
476 unsigned int tx_tcnt;
477 unsigned int tx_xcnt;
478 u64 tx_mcnt;
479 unsigned int rx_xcnt;
480 unsigned int rx_ocnt;
481 u64 rx_mcnt;
482 unsigned int toggle_cnt;
483 unsigned int txen;
484 u64 rx_pause;
485 struct mac_stats stats;
486};
487
488enum {
489 MAC_DIRECTION_RX = 1,
490 MAC_DIRECTION_TX = 2,
491 MAC_RXFIFO_SIZE = 32768
492};
493
494/* PHY loopback direction */
495enum {
496 PHY_LOOPBACK_TX = 1,
497 PHY_LOOPBACK_RX = 2
498};
499
500/* PHY interrupt types */
501enum {
502 cphy_cause_link_change = 1,
503 cphy_cause_fifo_error = 2,
504 cphy_cause_module_change = 4,
505};
506
507/* PHY module types */
508enum {
509 phy_modtype_none,
510 phy_modtype_sr,
511 phy_modtype_lr,
512 phy_modtype_lrm,
513 phy_modtype_twinax,
514 phy_modtype_twinax_long,
515 phy_modtype_unknown
516};
517
518/* PHY operations */
519struct cphy_ops {
520 int (*reset)(struct cphy *phy, int wait);
521
522 int (*intr_enable)(struct cphy *phy);
523 int (*intr_disable)(struct cphy *phy);
524 int (*intr_clear)(struct cphy *phy);
525 int (*intr_handler)(struct cphy *phy);
526
527 int (*autoneg_enable)(struct cphy *phy);
528 int (*autoneg_restart)(struct cphy *phy);
529
530 int (*advertise)(struct cphy *phy, unsigned int advertise_map);
531 int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
532 int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
533 int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
534 int *duplex, int *fc);
535 int (*power_down)(struct cphy *phy, int enable);
536
537 u32 mmds;
538};
539enum {
540 EDC_OPT_AEL2005 = 0,
541 EDC_OPT_AEL2005_SIZE = 1084,
542 EDC_TWX_AEL2005 = 1,
543 EDC_TWX_AEL2005_SIZE = 1464,
544 EDC_TWX_AEL2020 = 2,
545 EDC_TWX_AEL2020_SIZE = 1628,
546 EDC_MAX_SIZE = EDC_TWX_AEL2020_SIZE, /* Max cache size */
547};
548
549/* A PHY instance */
550struct cphy {
551 u8 modtype; /* PHY module type */
552 short priv; /* scratch pad */
553 unsigned int caps; /* PHY capabilities */
554 struct adapter *adapter; /* associated adapter */
555 const char *desc; /* PHY description */
556 unsigned long fifo_errors; /* FIFO over/under-flows */
557 const struct cphy_ops *ops; /* PHY operations */
558 struct mdio_if_info mdio;
559 u16 phy_cache[EDC_MAX_SIZE]; /* EDC cache */
560};
561
562/* Convenience MDIO read/write wrappers */
563static inline int t3_mdio_read(struct cphy *phy, int mmd, int reg,
564 unsigned int *valp)
565{
566 int rc = phy->mdio.mdio_read(phy->mdio.dev, phy->mdio.prtad, mmd, reg);
567 *valp = (rc >= 0) ? rc : -1;
568 return (rc >= 0) ? 0 : rc;
569}
570
571static inline int t3_mdio_write(struct cphy *phy, int mmd, int reg,
572 unsigned int val)
573{
574 return phy->mdio.mdio_write(phy->mdio.dev, phy->mdio.prtad, mmd,
575 reg, val);
576}
577
578/* Convenience initializer */
579static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
580 int phy_addr, struct cphy_ops *phy_ops,
581 const struct mdio_ops *mdio_ops,
582 unsigned int caps, const char *desc)
583{
584 phy->caps = caps;
585 phy->adapter = adapter;
586 phy->desc = desc;
587 phy->ops = phy_ops;
588 if (mdio_ops) {
589 phy->mdio.prtad = phy_addr;
590 phy->mdio.mmds = phy_ops->mmds;
591 phy->mdio.mode_support = mdio_ops->mode_support;
592 phy->mdio.mdio_read = mdio_ops->read;
593 phy->mdio.mdio_write = mdio_ops->write;
594 }
595}
596
597/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
598#define MAC_STATS_ACCUM_SECS 180
599
600#define XGM_REG(reg_addr, idx) \
601 ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
602
603struct addr_val_pair {
604 unsigned int reg_addr;
605 unsigned int val;
606};
607
608#include "adapter.h"
609
610#ifndef PCI_VENDOR_ID_CHELSIO
611# define PCI_VENDOR_ID_CHELSIO 0x1425
612#endif
613
614#define for_each_port(adapter, iter) \
615 for (iter = 0; iter < (adapter)->params.nports; ++iter)
616
617#define adapter_info(adap) ((adap)->params.info)
618
619static inline int uses_xaui(const struct adapter *adap)
620{
621 return adapter_info(adap)->caps & SUPPORTED_AUI;
622}
623
624static inline int is_10G(const struct adapter *adap)
625{
626 return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
627}
628
629static inline int is_offload(const struct adapter *adap)
630{
631 return adap->params.offload;
632}
633
634static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
635{
636 return adap->params.vpd.cclk / 1000;
637}
638
639static inline unsigned int is_pcie(const struct adapter *adap)
640{
641 return adap->params.pci.variant == PCI_VARIANT_PCIE;
642}
643
644void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
645 u32 val);
646void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
647 int n, unsigned int offset);
648int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
649 int polarity, int attempts, int delay, u32 *valp);
650static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
651 int polarity, int attempts, int delay)
652{
653 return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
654 delay, NULL);
655}
656int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
657 unsigned int set);
658int t3_phy_reset(struct cphy *phy, int mmd, int wait);
659int t3_phy_advertise(struct cphy *phy, unsigned int advert);
660int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert);
661int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
662int t3_phy_lasi_intr_enable(struct cphy *phy);
663int t3_phy_lasi_intr_disable(struct cphy *phy);
664int t3_phy_lasi_intr_clear(struct cphy *phy);
665int t3_phy_lasi_intr_handler(struct cphy *phy);
666
667void t3_intr_enable(struct adapter *adapter);
668void t3_intr_disable(struct adapter *adapter);
669void t3_intr_clear(struct adapter *adapter);
670void t3_xgm_intr_enable(struct adapter *adapter, int idx);
671void t3_xgm_intr_disable(struct adapter *adapter, int idx);
672void t3_port_intr_enable(struct adapter *adapter, int idx);
673void t3_port_intr_disable(struct adapter *adapter, int idx);
674int t3_slow_intr_handler(struct adapter *adapter);
675int t3_phy_intr_handler(struct adapter *adapter);
676
677void t3_link_changed(struct adapter *adapter, int port_id);
678void t3_link_fault(struct adapter *adapter, int port_id);
679int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
680const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
681int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data);
682int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data);
683int t3_seeprom_wp(struct adapter *adapter, int enable);
684int t3_get_tp_version(struct adapter *adapter, u32 *vers);
685int t3_check_tpsram_version(struct adapter *adapter);
686int t3_check_tpsram(struct adapter *adapter, const u8 *tp_ram,
687 unsigned int size);
688int t3_set_proto_sram(struct adapter *adap, const u8 *data);
689int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
690int t3_get_fw_version(struct adapter *adapter, u32 *vers);
691int t3_check_fw_version(struct adapter *adapter);
692int t3_init_hw(struct adapter *adapter, u32 fw_params);
693int t3_reset_adapter(struct adapter *adapter);
694int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
695 int reset);
696int t3_replay_prep_adapter(struct adapter *adapter);
697void t3_led_ready(struct adapter *adapter);
698void t3_fatal_err(struct adapter *adapter);
699void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
700void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
701 const u8 * cpus, const u16 *rspq);
702int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
703 unsigned int n, unsigned int *valp);
704int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
705 u64 *buf);
706
707int t3_mac_reset(struct cmac *mac);
708void t3b_pcs_reset(struct cmac *mac);
709void t3_mac_disable_exact_filters(struct cmac *mac);
710void t3_mac_enable_exact_filters(struct cmac *mac);
711int t3_mac_enable(struct cmac *mac, int which);
712int t3_mac_disable(struct cmac *mac, int which);
713int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
714int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
715int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
716int t3_mac_set_num_ucast(struct cmac *mac, int n);
717const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
718int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
719int t3b2_mac_watchdog_task(struct cmac *mac);
720
721void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
722int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
723 unsigned int nroutes);
724void t3_mc5_intr_handler(struct mc5 *mc5);
725
726void t3_tp_set_offload_mode(struct adapter *adap, int enable);
727void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
728void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
729 unsigned short alpha[NCCTRL_WIN],
730 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
731void t3_config_trace_filter(struct adapter *adapter,
732 const struct trace_params *tp, int filter_index,
733 int invert, int enable);
734int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
735
736void t3_sge_prep(struct adapter *adap, struct sge_params *p);
737void t3_sge_init(struct adapter *adap, struct sge_params *p);
738int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
739 enum sge_context_type type, int respq, u64 base_addr,
740 unsigned int size, unsigned int token, int gen,
741 unsigned int cidx);
742int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
743 int gts_enable, u64 base_addr, unsigned int size,
744 unsigned int esize, unsigned int cong_thres, int gen,
745 unsigned int cidx);
746int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
747 int irq_vec_idx, u64 base_addr, unsigned int size,
748 unsigned int fl_thres, int gen, unsigned int cidx);
749int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
750 unsigned int size, int rspq, int ovfl_mode,
751 unsigned int credits, unsigned int credit_thres);
752int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
753int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
754int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
755int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
756int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
757 unsigned int credits);
758
759int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
760 int phy_addr, const struct mdio_ops *mdio_ops);
761int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
762 int phy_addr, const struct mdio_ops *mdio_ops);
763int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
764 int phy_addr, const struct mdio_ops *mdio_ops);
765int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
766 int phy_addr, const struct mdio_ops *mdio_ops);
767int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter,
768 int phy_addr, const struct mdio_ops *mdio_ops);
769int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
770 const struct mdio_ops *mdio_ops);
771int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
772 int phy_addr, const struct mdio_ops *mdio_ops);
773int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter,
774 int phy_addr, const struct mdio_ops *mdio_ops);
775#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h
new file mode 100644
index 00000000000..369fe711fd7
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_ctl_defs.h
@@ -0,0 +1,189 @@
1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
33#define _CXGB3_OFFLOAD_CTL_DEFS_H
34
35enum {
36 GET_MAX_OUTSTANDING_WR = 0,
37 GET_TX_MAX_CHUNK = 1,
38 GET_TID_RANGE = 2,
39 GET_STID_RANGE = 3,
40 GET_RTBL_RANGE = 4,
41 GET_L2T_CAPACITY = 5,
42 GET_MTUS = 6,
43 GET_WR_LEN = 7,
44 GET_IFF_FROM_MAC = 8,
45 GET_DDP_PARAMS = 9,
46 GET_PORTS = 10,
47
48 ULP_ISCSI_GET_PARAMS = 11,
49 ULP_ISCSI_SET_PARAMS = 12,
50
51 RDMA_GET_PARAMS = 13,
52 RDMA_CQ_OP = 14,
53 RDMA_CQ_SETUP = 15,
54 RDMA_CQ_DISABLE = 16,
55 RDMA_CTRL_QP_SETUP = 17,
56 RDMA_GET_MEM = 18,
57 RDMA_GET_MIB = 19,
58
59 GET_RX_PAGE_INFO = 50,
60 GET_ISCSI_IPV4ADDR = 51,
61
62 GET_EMBEDDED_INFO = 70,
63};
64
65/*
66 * Structure used to describe a TID range. Valid TIDs are [base, base+num).
67 */
68struct tid_range {
69 unsigned int base; /* first TID */
70 unsigned int num; /* number of TIDs in range */
71};
72
73/*
74 * Structure used to request the size and contents of the MTU table.
75 */
76struct mtutab {
77 unsigned int size; /* # of entries in the MTU table */
78 const unsigned short *mtus; /* the MTU table values */
79};
80
81struct net_device;
82
83/*
84 * Structure used to request the adapter net_device owning a given MAC address.
85 */
86struct iff_mac {
87 struct net_device *dev; /* the net_device */
88 const unsigned char *mac_addr; /* MAC address to lookup */
89 u16 vlan_tag;
90};
91
92/* Structure used to request a port's iSCSI IPv4 address */
93struct iscsi_ipv4addr {
94 struct net_device *dev; /* the net_device */
95 __be32 ipv4addr; /* the return iSCSI IPv4 address */
96};
97
98struct pci_dev;
99
100/*
101 * Structure used to request the TCP DDP parameters.
102 */
103struct ddp_params {
104 unsigned int llimit; /* TDDP region start address */
105 unsigned int ulimit; /* TDDP region end address */
106 unsigned int tag_mask; /* TDDP tag mask */
107 struct pci_dev *pdev;
108};
109
110struct adap_ports {
111 unsigned int nports; /* number of ports on this adapter */
112 struct net_device *lldevs[2];
113};
114
115/*
116 * Structure used to return information to the iscsi layer.
117 */
118struct ulp_iscsi_info {
119 unsigned int offset;
120 unsigned int llimit;
121 unsigned int ulimit;
122 unsigned int tagmask;
123 u8 pgsz_factor[4];
124 unsigned int max_rxsz;
125 unsigned int max_txsz;
126 struct pci_dev *pdev;
127};
128
129/*
130 * Structure used to return information to the RDMA layer.
131 */
132struct rdma_info {
133 unsigned int tpt_base; /* TPT base address */
134 unsigned int tpt_top; /* TPT last entry address */
135 unsigned int pbl_base; /* PBL base address */
136 unsigned int pbl_top; /* PBL last entry address */
137 unsigned int rqt_base; /* RQT base address */
138 unsigned int rqt_top; /* RQT last entry address */
139 unsigned int udbell_len; /* user doorbell region length */
140 unsigned long udbell_physbase; /* user doorbell physical start addr */
141 void __iomem *kdb_addr; /* kernel doorbell register address */
142 struct pci_dev *pdev; /* associated PCI device */
143};
144
145/*
146 * Structure used to request an operation on an RDMA completion queue.
147 */
148struct rdma_cq_op {
149 unsigned int id;
150 unsigned int op;
151 unsigned int credits;
152};
153
154/*
155 * Structure used to setup RDMA completion queues.
156 */
157struct rdma_cq_setup {
158 unsigned int id;
159 unsigned long long base_addr;
160 unsigned int size;
161 unsigned int credits;
162 unsigned int credit_thres;
163 unsigned int ovfl_mode;
164};
165
166/*
167 * Structure used to setup the RDMA control egress context.
168 */
169struct rdma_ctrlqp_setup {
170 unsigned long long base_addr;
171 unsigned int size;
172};
173
174/*
175 * Offload TX/RX page information.
176 */
177struct ofld_page_info {
178 unsigned int page_size; /* Page size, should be a power of 2 */
179 unsigned int num; /* Number of pages */
180};
181
182/*
183 * Structure used to get firmware and protocol engine versions.
184 */
185struct ch_embedded_info {
186 u32 fw_vers;
187 u32 tp_vers;
188};
189#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
new file mode 100644
index 00000000000..920d918ed19
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -0,0 +1,114 @@
1/*
2 * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _CHELSIO_DEFS_H
33#define _CHELSIO_DEFS_H
34
35#include <linux/skbuff.h>
36#include <net/tcp.h>
37
38#include "t3cdev.h"
39
40#include "cxgb3_offload.h"
41
42#define VALIDATE_TID 1
43
44void *cxgb_alloc_mem(unsigned long size);
45void cxgb_free_mem(void *addr);
46
47/*
48 * Map an ATID or STID to their entries in the corresponding TID tables.
49 */
50static inline union active_open_entry *atid2entry(const struct tid_info *t,
51 unsigned int atid)
52{
53 return &t->atid_tab[atid - t->atid_base];
54}
55
56static inline union listen_entry *stid2entry(const struct tid_info *t,
57 unsigned int stid)
58{
59 return &t->stid_tab[stid - t->stid_base];
60}
61
62/*
63 * Find the connection corresponding to a TID.
64 */
65static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
66 unsigned int tid)
67{
68 struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
69 &(t->tid_tab[tid]) : NULL;
70
71 return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
72}
73
74/*
75 * Find the connection corresponding to a server TID.
76 */
77static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
78 unsigned int tid)
79{
80 union listen_entry *e;
81
82 if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
83 return NULL;
84
85 e = stid2entry(t, tid);
86 if ((void *)e->next >= (void *)t->tid_tab &&
87 (void *)e->next < (void *)&t->atid_tab[t->natids])
88 return NULL;
89
90 return &e->t3c_tid;
91}
92
93/*
94 * Find the connection corresponding to an active-open TID.
95 */
96static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
97 unsigned int tid)
98{
99 union active_open_entry *e;
100
101 if (tid < t->atid_base || tid >= t->atid_base + t->natids)
102 return NULL;
103
104 e = atid2entry(t, tid);
105 if ((void *)e->next >= (void *)t->tid_tab &&
106 (void *)e->next < (void *)&t->atid_tab[t->natids])
107 return NULL;
108
109 return &e->t3c_tid;
110}
111
112int attach_t3cdev(struct t3cdev *dev);
113void detach_t3cdev(struct t3cdev *dev);
114#endif
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h
new file mode 100644
index 00000000000..b19e4376ba7
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_ioctl.h
@@ -0,0 +1,177 @@
1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __CHIOCTL_H__
33#define __CHIOCTL_H__
34
35/*
36 * Ioctl commands specific to this driver.
37 */
38enum {
39 CHELSIO_GETMTUTAB = 1029,
40 CHELSIO_SETMTUTAB = 1030,
41 CHELSIO_SET_PM = 1032,
42 CHELSIO_GET_PM = 1033,
43 CHELSIO_GET_MEM = 1038,
44 CHELSIO_LOAD_FW = 1041,
45 CHELSIO_SET_TRACE_FILTER = 1044,
46 CHELSIO_SET_QSET_PARAMS = 1045,
47 CHELSIO_GET_QSET_PARAMS = 1046,
48 CHELSIO_SET_QSET_NUM = 1047,
49 CHELSIO_GET_QSET_NUM = 1048,
50};
51
52struct ch_reg {
53 uint32_t cmd;
54 uint32_t addr;
55 uint32_t val;
56};
57
58struct ch_cntxt {
59 uint32_t cmd;
60 uint32_t cntxt_type;
61 uint32_t cntxt_id;
62 uint32_t data[4];
63};
64
65/* context types */
66enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
67
68struct ch_desc {
69 uint32_t cmd;
70 uint32_t queue_num;
71 uint32_t idx;
72 uint32_t size;
73 uint8_t data[128];
74};
75
76struct ch_mem_range {
77 uint32_t cmd;
78 uint32_t mem_id;
79 uint32_t addr;
80 uint32_t len;
81 uint32_t version;
82 uint8_t buf[0];
83};
84
85struct ch_qset_params {
86 uint32_t cmd;
87 uint32_t qset_idx;
88 int32_t txq_size[3];
89 int32_t rspq_size;
90 int32_t fl_size[2];
91 int32_t intr_lat;
92 int32_t polling;
93 int32_t lro;
94 int32_t cong_thres;
95 int32_t vector;
96 int32_t qnum;
97};
98
99struct ch_pktsched_params {
100 uint32_t cmd;
101 uint8_t sched;
102 uint8_t idx;
103 uint8_t min;
104 uint8_t max;
105 uint8_t binding;
106};
107
108#ifndef TCB_SIZE
109# define TCB_SIZE 128
110#endif
111
112/* TCB size in 32-bit words */
113#define TCB_WORDS (TCB_SIZE / 4)
114
115enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
116
117struct ch_mtus {
118 uint32_t cmd;
119 uint32_t nmtus;
120 uint16_t mtus[NMTUS];
121};
122
123struct ch_pm {
124 uint32_t cmd;
125 uint32_t tx_pg_sz;
126 uint32_t tx_num_pg;
127 uint32_t rx_pg_sz;
128 uint32_t rx_num_pg;
129 uint32_t pm_total;
130};
131
132struct ch_tcam {
133 uint32_t cmd;
134 uint32_t tcam_size;
135 uint32_t nservers;
136 uint32_t nroutes;
137 uint32_t nfilters;
138};
139
140struct ch_tcb {
141 uint32_t cmd;
142 uint32_t tcb_index;
143 uint32_t tcb_data[TCB_WORDS];
144};
145
146struct ch_tcam_word {
147 uint32_t cmd;
148 uint32_t addr;
149 uint32_t buf[3];
150};
151
152struct ch_trace {
153 uint32_t cmd;
154 uint32_t sip;
155 uint32_t sip_mask;
156 uint32_t dip;
157 uint32_t dip_mask;
158 uint16_t sport;
159 uint16_t sport_mask;
160 uint16_t dport;
161 uint16_t dport_mask;
162 uint32_t vlan:12;
163 uint32_t vlan_mask:12;
164 uint32_t intf:4;
165 uint32_t intf_mask:4;
166 uint8_t proto;
167 uint8_t proto_mask;
168 uint8_t invert_match:1;
169 uint8_t config_tx:1;
170 uint8_t config_rx:1;
171 uint8_t trace_tx:1;
172 uint8_t trace_rx:1;
173};
174
175#define SIOCCHIOCTL SIOCDEVPRIVATE
176
177#endif
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
new file mode 100644
index 00000000000..93b41a7ac17
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -0,0 +1,3448 @@
1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mdio.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
45#include <linux/firmware.h>
46#include <linux/log2.h>
47#include <linux/stringify.h>
48#include <linux/sched.h>
49#include <linux/slab.h>
50#include <asm/uaccess.h>
51
52#include "common.h"
53#include "cxgb3_ioctl.h"
54#include "regs.h"
55#include "cxgb3_offload.h"
56#include "version.h"
57
58#include "cxgb3_ctl_defs.h"
59#include "t3_cpl.h"
60#include "firmware_exports.h"
61
62enum {
63 MAX_TXQ_ENTRIES = 16384,
64 MAX_CTRL_TXQ_ENTRIES = 1024,
65 MAX_RSPQ_ENTRIES = 16384,
66 MAX_RX_BUFFERS = 16384,
67 MAX_RX_JUMBO_BUFFERS = 16384,
68 MIN_TXQ_ENTRIES = 4,
69 MIN_CTRL_TXQ_ENTRIES = 4,
70 MIN_RSPQ_ENTRIES = 32,
71 MIN_FL_ENTRIES = 32
72};
73
74#define PORT_MASK ((1 << MAX_NPORTS) - 1)
75
76#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
78 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
79
80#define EEPROM_MAGIC 0x38E2F10C
81
82#define CH_DEVICE(devid, idx) \
83 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
84
85static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
86 CH_DEVICE(0x20, 0), /* PE9000 */
87 CH_DEVICE(0x21, 1), /* T302E */
88 CH_DEVICE(0x22, 2), /* T310E */
89 CH_DEVICE(0x23, 3), /* T320X */
90 CH_DEVICE(0x24, 1), /* T302X */
91 CH_DEVICE(0x25, 3), /* T320E */
92 CH_DEVICE(0x26, 2), /* T310X */
93 CH_DEVICE(0x30, 2), /* T3B10 */
94 CH_DEVICE(0x31, 3), /* T3B20 */
95 CH_DEVICE(0x32, 1), /* T3B02 */
96 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
97 CH_DEVICE(0x36, 3), /* S320E-CR */
98 CH_DEVICE(0x37, 7), /* N320E-G2 */
99 {0,}
100};
101
102MODULE_DESCRIPTION(DRV_DESC);
103MODULE_AUTHOR("Chelsio Communications");
104MODULE_LICENSE("Dual BSD/GPL");
105MODULE_VERSION(DRV_VERSION);
106MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
107
108static int dflt_msg_enable = DFLT_MSG_ENABLE;
109
110module_param(dflt_msg_enable, int, 0644);
111MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
112
113/*
114 * The driver uses the best interrupt scheme available on a platform in the
115 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
116 * of these schemes the driver may consider as follows:
117 *
118 * msi = 2: choose from among all three options
119 * msi = 1: only consider MSI and pin interrupts
120 * msi = 0: force pin interrupts
121 */
122static int msi = 2;
123
124module_param(msi, int, 0644);
125MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
126
127/*
128 * The driver enables offload as a default.
129 * To disable it, use ofld_disable = 1.
130 */
131
132static int ofld_disable = 0;
133
134module_param(ofld_disable, int, 0644);
135MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
136
137/*
138 * We have work elements that we need to cancel when an interface is taken
139 * down. Normally the work elements would be executed by keventd but that
140 * can deadlock because of linkwatch. If our close method takes the rtnl
141 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
142 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
143 * for our work to complete. Get our own work queue to solve this.
144 */
145struct workqueue_struct *cxgb3_wq;
146
147/**
148 * link_report - show link status and link speed/duplex
149 * @p: the port whose settings are to be reported
150 *
151 * Shows the link status, speed, and duplex of a port.
152 */
153static void link_report(struct net_device *dev)
154{
155 if (!netif_carrier_ok(dev))
156 printk(KERN_INFO "%s: link down\n", dev->name);
157 else {
158 const char *s = "10Mbps";
159 const struct port_info *p = netdev_priv(dev);
160
161 switch (p->link_config.speed) {
162 case SPEED_10000:
163 s = "10Gbps";
164 break;
165 case SPEED_1000:
166 s = "1000Mbps";
167 break;
168 case SPEED_100:
169 s = "100Mbps";
170 break;
171 }
172
173 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
174 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
175 }
176}
177
178static void enable_tx_fifo_drain(struct adapter *adapter,
179 struct port_info *pi)
180{
181 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
182 F_ENDROPPKT);
183 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
184 t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
185 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
186}
187
188static void disable_tx_fifo_drain(struct adapter *adapter,
189 struct port_info *pi)
190{
191 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
192 F_ENDROPPKT, 0);
193}
194
195void t3_os_link_fault(struct adapter *adap, int port_id, int state)
196{
197 struct net_device *dev = adap->port[port_id];
198 struct port_info *pi = netdev_priv(dev);
199
200 if (state == netif_carrier_ok(dev))
201 return;
202
203 if (state) {
204 struct cmac *mac = &pi->mac;
205
206 netif_carrier_on(dev);
207
208 disable_tx_fifo_drain(adap, pi);
209
210 /* Clear local faults */
211 t3_xgm_intr_disable(adap, pi->port_id);
212 t3_read_reg(adap, A_XGM_INT_STATUS +
213 pi->mac.offset);
214 t3_write_reg(adap,
215 A_XGM_INT_CAUSE + pi->mac.offset,
216 F_XGM_INT);
217
218 t3_set_reg_field(adap,
219 A_XGM_INT_ENABLE +
220 pi->mac.offset,
221 F_XGM_INT, F_XGM_INT);
222 t3_xgm_intr_enable(adap, pi->port_id);
223
224 t3_mac_enable(mac, MAC_DIRECTION_TX);
225 } else {
226 netif_carrier_off(dev);
227
228 /* Flush TX FIFO */
229 enable_tx_fifo_drain(adap, pi);
230 }
231 link_report(dev);
232}
233
234/**
235 * t3_os_link_changed - handle link status changes
236 * @adapter: the adapter associated with the link change
237 * @port_id: the port index whose limk status has changed
238 * @link_stat: the new status of the link
239 * @speed: the new speed setting
240 * @duplex: the new duplex setting
241 * @pause: the new flow-control setting
242 *
243 * This is the OS-dependent handler for link status changes. The OS
244 * neutral handler takes care of most of the processing for these events,
245 * then calls this handler for any OS-specific processing.
246 */
247void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
248 int speed, int duplex, int pause)
249{
250 struct net_device *dev = adapter->port[port_id];
251 struct port_info *pi = netdev_priv(dev);
252 struct cmac *mac = &pi->mac;
253
254 /* Skip changes from disabled ports. */
255 if (!netif_running(dev))
256 return;
257
258 if (link_stat != netif_carrier_ok(dev)) {
259 if (link_stat) {
260 disable_tx_fifo_drain(adapter, pi);
261
262 t3_mac_enable(mac, MAC_DIRECTION_RX);
263
264 /* Clear local faults */
265 t3_xgm_intr_disable(adapter, pi->port_id);
266 t3_read_reg(adapter, A_XGM_INT_STATUS +
267 pi->mac.offset);
268 t3_write_reg(adapter,
269 A_XGM_INT_CAUSE + pi->mac.offset,
270 F_XGM_INT);
271
272 t3_set_reg_field(adapter,
273 A_XGM_INT_ENABLE + pi->mac.offset,
274 F_XGM_INT, F_XGM_INT);
275 t3_xgm_intr_enable(adapter, pi->port_id);
276
277 netif_carrier_on(dev);
278 } else {
279 netif_carrier_off(dev);
280
281 t3_xgm_intr_disable(adapter, pi->port_id);
282 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
283 t3_set_reg_field(adapter,
284 A_XGM_INT_ENABLE + pi->mac.offset,
285 F_XGM_INT, 0);
286
287 if (is_10G(adapter))
288 pi->phy.ops->power_down(&pi->phy, 1);
289
290 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
291 t3_mac_disable(mac, MAC_DIRECTION_RX);
292 t3_link_start(&pi->phy, mac, &pi->link_config);
293
294 /* Flush TX FIFO */
295 enable_tx_fifo_drain(adapter, pi);
296 }
297
298 link_report(dev);
299 }
300}
301
302/**
303 * t3_os_phymod_changed - handle PHY module changes
304 * @phy: the PHY reporting the module change
305 * @mod_type: new module type
306 *
307 * This is the OS-dependent handler for PHY module changes. It is
308 * invoked when a PHY module is removed or inserted for any OS-specific
309 * processing.
310 */
311void t3_os_phymod_changed(struct adapter *adap, int port_id)
312{
313 static const char *mod_str[] = {
314 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
315 };
316
317 const struct net_device *dev = adap->port[port_id];
318 const struct port_info *pi = netdev_priv(dev);
319
320 if (pi->phy.modtype == phy_modtype_none)
321 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
322 else
323 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
324 mod_str[pi->phy.modtype]);
325}
326
327static void cxgb_set_rxmode(struct net_device *dev)
328{
329 struct port_info *pi = netdev_priv(dev);
330
331 t3_mac_set_rx_mode(&pi->mac, dev);
332}
333
334/**
335 * link_start - enable a port
336 * @dev: the device to enable
337 *
338 * Performs the MAC and PHY actions needed to enable a port.
339 */
340static void link_start(struct net_device *dev)
341{
342 struct port_info *pi = netdev_priv(dev);
343 struct cmac *mac = &pi->mac;
344
345 t3_mac_reset(mac);
346 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
347 t3_mac_set_mtu(mac, dev->mtu);
348 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
349 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
350 t3_mac_set_rx_mode(mac, dev);
351 t3_link_start(&pi->phy, mac, &pi->link_config);
352 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
353}
354
355static inline void cxgb_disable_msi(struct adapter *adapter)
356{
357 if (adapter->flags & USING_MSIX) {
358 pci_disable_msix(adapter->pdev);
359 adapter->flags &= ~USING_MSIX;
360 } else if (adapter->flags & USING_MSI) {
361 pci_disable_msi(adapter->pdev);
362 adapter->flags &= ~USING_MSI;
363 }
364}
365
366/*
367 * Interrupt handler for asynchronous events used with MSI-X.
368 */
369static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
370{
371 t3_slow_intr_handler(cookie);
372 return IRQ_HANDLED;
373}
374
375/*
376 * Name the MSI-X interrupts.
377 */
378static void name_msix_vecs(struct adapter *adap)
379{
380 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
381
382 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
383 adap->msix_info[0].desc[n] = 0;
384
385 for_each_port(adap, j) {
386 struct net_device *d = adap->port[j];
387 const struct port_info *pi = netdev_priv(d);
388
389 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
390 snprintf(adap->msix_info[msi_idx].desc, n,
391 "%s-%d", d->name, pi->first_qset + i);
392 adap->msix_info[msi_idx].desc[n] = 0;
393 }
394 }
395}
396
397static int request_msix_data_irqs(struct adapter *adap)
398{
399 int i, j, err, qidx = 0;
400
401 for_each_port(adap, i) {
402 int nqsets = adap2pinfo(adap, i)->nqsets;
403
404 for (j = 0; j < nqsets; ++j) {
405 err = request_irq(adap->msix_info[qidx + 1].vec,
406 t3_intr_handler(adap,
407 adap->sge.qs[qidx].
408 rspq.polling), 0,
409 adap->msix_info[qidx + 1].desc,
410 &adap->sge.qs[qidx]);
411 if (err) {
412 while (--qidx >= 0)
413 free_irq(adap->msix_info[qidx + 1].vec,
414 &adap->sge.qs[qidx]);
415 return err;
416 }
417 qidx++;
418 }
419 }
420 return 0;
421}
422
423static void free_irq_resources(struct adapter *adapter)
424{
425 if (adapter->flags & USING_MSIX) {
426 int i, n = 0;
427
428 free_irq(adapter->msix_info[0].vec, adapter);
429 for_each_port(adapter, i)
430 n += adap2pinfo(adapter, i)->nqsets;
431
432 for (i = 0; i < n; ++i)
433 free_irq(adapter->msix_info[i + 1].vec,
434 &adapter->sge.qs[i]);
435 } else
436 free_irq(adapter->pdev->irq, adapter);
437}
438
439static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
440 unsigned long n)
441{
442 int attempts = 10;
443
444 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
445 if (!--attempts)
446 return -ETIMEDOUT;
447 msleep(10);
448 }
449 return 0;
450}
451
452static int init_tp_parity(struct adapter *adap)
453{
454 int i;
455 struct sk_buff *skb;
456 struct cpl_set_tcb_field *greq;
457 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
458
459 t3_tp_set_offload_mode(adap, 1);
460
461 for (i = 0; i < 16; i++) {
462 struct cpl_smt_write_req *req;
463
464 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
465 if (!skb)
466 skb = adap->nofail_skb;
467 if (!skb)
468 goto alloc_skb_fail;
469
470 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
471 memset(req, 0, sizeof(*req));
472 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
473 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
474 req->mtu_idx = NMTUS - 1;
475 req->iff = i;
476 t3_mgmt_tx(adap, skb);
477 if (skb == adap->nofail_skb) {
478 await_mgmt_replies(adap, cnt, i + 1);
479 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
480 if (!adap->nofail_skb)
481 goto alloc_skb_fail;
482 }
483 }
484
485 for (i = 0; i < 2048; i++) {
486 struct cpl_l2t_write_req *req;
487
488 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
489 if (!skb)
490 skb = adap->nofail_skb;
491 if (!skb)
492 goto alloc_skb_fail;
493
494 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
495 memset(req, 0, sizeof(*req));
496 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
497 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
498 req->params = htonl(V_L2T_W_IDX(i));
499 t3_mgmt_tx(adap, skb);
500 if (skb == adap->nofail_skb) {
501 await_mgmt_replies(adap, cnt, 16 + i + 1);
502 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503 if (!adap->nofail_skb)
504 goto alloc_skb_fail;
505 }
506 }
507
508 for (i = 0; i < 2048; i++) {
509 struct cpl_rte_write_req *req;
510
511 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
512 if (!skb)
513 skb = adap->nofail_skb;
514 if (!skb)
515 goto alloc_skb_fail;
516
517 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
518 memset(req, 0, sizeof(*req));
519 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
520 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
521 req->l2t_idx = htonl(V_L2T_W_IDX(i));
522 t3_mgmt_tx(adap, skb);
523 if (skb == adap->nofail_skb) {
524 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
525 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
526 if (!adap->nofail_skb)
527 goto alloc_skb_fail;
528 }
529 }
530
531 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
532 if (!skb)
533 skb = adap->nofail_skb;
534 if (!skb)
535 goto alloc_skb_fail;
536
537 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
538 memset(greq, 0, sizeof(*greq));
539 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
540 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
541 greq->mask = cpu_to_be64(1);
542 t3_mgmt_tx(adap, skb);
543
544 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
545 if (skb == adap->nofail_skb) {
546 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
547 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
548 }
549
550 t3_tp_set_offload_mode(adap, 0);
551 return i;
552
553alloc_skb_fail:
554 t3_tp_set_offload_mode(adap, 0);
555 return -ENOMEM;
556}
557
558/**
559 * setup_rss - configure RSS
560 * @adap: the adapter
561 *
562 * Sets up RSS to distribute packets to multiple receive queues. We
563 * configure the RSS CPU lookup table to distribute to the number of HW
564 * receive queues, and the response queue lookup table to narrow that
565 * down to the response queues actually configured for each port.
566 * We always configure the RSS mapping for two ports since the mapping
567 * table has plenty of entries.
568 */
569static void setup_rss(struct adapter *adap)
570{
571 int i;
572 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
573 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
574 u8 cpus[SGE_QSETS + 1];
575 u16 rspq_map[RSS_TABLE_SIZE];
576
577 for (i = 0; i < SGE_QSETS; ++i)
578 cpus[i] = i;
579 cpus[SGE_QSETS] = 0xff; /* terminator */
580
581 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
582 rspq_map[i] = i % nq0;
583 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
584 }
585
586 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
587 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
588 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
589}
590
591static void ring_dbs(struct adapter *adap)
592{
593 int i, j;
594
595 for (i = 0; i < SGE_QSETS; i++) {
596 struct sge_qset *qs = &adap->sge.qs[i];
597
598 if (qs->adap)
599 for (j = 0; j < SGE_TXQ_PER_SET; j++)
600 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
601 }
602}
603
604static void init_napi(struct adapter *adap)
605{
606 int i;
607
608 for (i = 0; i < SGE_QSETS; i++) {
609 struct sge_qset *qs = &adap->sge.qs[i];
610
611 if (qs->adap)
612 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
613 64);
614 }
615
616 /*
617 * netif_napi_add() can be called only once per napi_struct because it
618 * adds each new napi_struct to a list. Be careful not to call it a
619 * second time, e.g., during EEH recovery, by making a note of it.
620 */
621 adap->flags |= NAPI_INIT;
622}
623
624/*
625 * Wait until all NAPI handlers are descheduled. This includes the handlers of
626 * both netdevices representing interfaces and the dummy ones for the extra
627 * queues.
628 */
629static void quiesce_rx(struct adapter *adap)
630{
631 int i;
632
633 for (i = 0; i < SGE_QSETS; i++)
634 if (adap->sge.qs[i].adap)
635 napi_disable(&adap->sge.qs[i].napi);
636}
637
638static void enable_all_napi(struct adapter *adap)
639{
640 int i;
641 for (i = 0; i < SGE_QSETS; i++)
642 if (adap->sge.qs[i].adap)
643 napi_enable(&adap->sge.qs[i].napi);
644}
645
646/**
647 * setup_sge_qsets - configure SGE Tx/Rx/response queues
648 * @adap: the adapter
649 *
650 * Determines how many sets of SGE queues to use and initializes them.
651 * We support multiple queue sets per port if we have MSI-X, otherwise
652 * just one queue set per port.
653 */
654static int setup_sge_qsets(struct adapter *adap)
655{
656 int i, j, err, irq_idx = 0, qset_idx = 0;
657 unsigned int ntxq = SGE_TXQ_PER_SET;
658
659 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
660 irq_idx = -1;
661
662 for_each_port(adap, i) {
663 struct net_device *dev = adap->port[i];
664 struct port_info *pi = netdev_priv(dev);
665
666 pi->qs = &adap->sge.qs[pi->first_qset];
667 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
668 err = t3_sge_alloc_qset(adap, qset_idx, 1,
669 (adap->flags & USING_MSIX) ? qset_idx + 1 :
670 irq_idx,
671 &adap->params.sge.qset[qset_idx], ntxq, dev,
672 netdev_get_tx_queue(dev, j));
673 if (err) {
674 t3_free_sge_resources(adap);
675 return err;
676 }
677 }
678 }
679
680 return 0;
681}
682
683static ssize_t attr_show(struct device *d, char *buf,
684 ssize_t(*format) (struct net_device *, char *))
685{
686 ssize_t len;
687
688 /* Synchronize with ioctls that may shut down the device */
689 rtnl_lock();
690 len = (*format) (to_net_dev(d), buf);
691 rtnl_unlock();
692 return len;
693}
694
695static ssize_t attr_store(struct device *d,
696 const char *buf, size_t len,
697 ssize_t(*set) (struct net_device *, unsigned int),
698 unsigned int min_val, unsigned int max_val)
699{
700 char *endp;
701 ssize_t ret;
702 unsigned int val;
703
704 if (!capable(CAP_NET_ADMIN))
705 return -EPERM;
706
707 val = simple_strtoul(buf, &endp, 0);
708 if (endp == buf || val < min_val || val > max_val)
709 return -EINVAL;
710
711 rtnl_lock();
712 ret = (*set) (to_net_dev(d), val);
713 if (!ret)
714 ret = len;
715 rtnl_unlock();
716 return ret;
717}
718
719#define CXGB3_SHOW(name, val_expr) \
720static ssize_t format_##name(struct net_device *dev, char *buf) \
721{ \
722 struct port_info *pi = netdev_priv(dev); \
723 struct adapter *adap = pi->adapter; \
724 return sprintf(buf, "%u\n", val_expr); \
725} \
726static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
727 char *buf) \
728{ \
729 return attr_show(d, buf, format_##name); \
730}
731
732static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
733{
734 struct port_info *pi = netdev_priv(dev);
735 struct adapter *adap = pi->adapter;
736 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
737
738 if (adap->flags & FULL_INIT_DONE)
739 return -EBUSY;
740 if (val && adap->params.rev == 0)
741 return -EINVAL;
742 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
743 min_tids)
744 return -EINVAL;
745 adap->params.mc5.nfilters = val;
746 return 0;
747}
748
749static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
750 const char *buf, size_t len)
751{
752 return attr_store(d, buf, len, set_nfilters, 0, ~0);
753}
754
755static ssize_t set_nservers(struct net_device *dev, unsigned int val)
756{
757 struct port_info *pi = netdev_priv(dev);
758 struct adapter *adap = pi->adapter;
759
760 if (adap->flags & FULL_INIT_DONE)
761 return -EBUSY;
762 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
763 MC5_MIN_TIDS)
764 return -EINVAL;
765 adap->params.mc5.nservers = val;
766 return 0;
767}
768
769static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
770 const char *buf, size_t len)
771{
772 return attr_store(d, buf, len, set_nservers, 0, ~0);
773}
774
775#define CXGB3_ATTR_R(name, val_expr) \
776CXGB3_SHOW(name, val_expr) \
777static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
778
779#define CXGB3_ATTR_RW(name, val_expr, store_method) \
780CXGB3_SHOW(name, val_expr) \
781static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
782
783CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
784CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
785CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
786
787static struct attribute *cxgb3_attrs[] = {
788 &dev_attr_cam_size.attr,
789 &dev_attr_nfilters.attr,
790 &dev_attr_nservers.attr,
791 NULL
792};
793
794static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
795
796static ssize_t tm_attr_show(struct device *d,
797 char *buf, int sched)
798{
799 struct port_info *pi = netdev_priv(to_net_dev(d));
800 struct adapter *adap = pi->adapter;
801 unsigned int v, addr, bpt, cpt;
802 ssize_t len;
803
804 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
805 rtnl_lock();
806 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
807 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
808 if (sched & 1)
809 v >>= 16;
810 bpt = (v >> 8) & 0xff;
811 cpt = v & 0xff;
812 if (!cpt)
813 len = sprintf(buf, "disabled\n");
814 else {
815 v = (adap->params.vpd.cclk * 1000) / cpt;
816 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
817 }
818 rtnl_unlock();
819 return len;
820}
821
822static ssize_t tm_attr_store(struct device *d,
823 const char *buf, size_t len, int sched)
824{
825 struct port_info *pi = netdev_priv(to_net_dev(d));
826 struct adapter *adap = pi->adapter;
827 unsigned int val;
828 char *endp;
829 ssize_t ret;
830
831 if (!capable(CAP_NET_ADMIN))
832 return -EPERM;
833
834 val = simple_strtoul(buf, &endp, 0);
835 if (endp == buf || val > 10000000)
836 return -EINVAL;
837
838 rtnl_lock();
839 ret = t3_config_sched(adap, val, sched);
840 if (!ret)
841 ret = len;
842 rtnl_unlock();
843 return ret;
844}
845
846#define TM_ATTR(name, sched) \
847static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
848 char *buf) \
849{ \
850 return tm_attr_show(d, buf, sched); \
851} \
852static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
853 const char *buf, size_t len) \
854{ \
855 return tm_attr_store(d, buf, len, sched); \
856} \
857static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
858
859TM_ATTR(sched0, 0);
860TM_ATTR(sched1, 1);
861TM_ATTR(sched2, 2);
862TM_ATTR(sched3, 3);
863TM_ATTR(sched4, 4);
864TM_ATTR(sched5, 5);
865TM_ATTR(sched6, 6);
866TM_ATTR(sched7, 7);
867
868static struct attribute *offload_attrs[] = {
869 &dev_attr_sched0.attr,
870 &dev_attr_sched1.attr,
871 &dev_attr_sched2.attr,
872 &dev_attr_sched3.attr,
873 &dev_attr_sched4.attr,
874 &dev_attr_sched5.attr,
875 &dev_attr_sched6.attr,
876 &dev_attr_sched7.attr,
877 NULL
878};
879
880static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
881
882/*
883 * Sends an sk_buff to an offload queue driver
884 * after dealing with any active network taps.
885 */
886static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
887{
888 int ret;
889
890 local_bh_disable();
891 ret = t3_offload_tx(tdev, skb);
892 local_bh_enable();
893 return ret;
894}
895
896static int write_smt_entry(struct adapter *adapter, int idx)
897{
898 struct cpl_smt_write_req *req;
899 struct port_info *pi = netdev_priv(adapter->port[idx]);
900 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
901
902 if (!skb)
903 return -ENOMEM;
904
905 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
906 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
907 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
908 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
909 req->iff = idx;
910 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
911 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
912 skb->priority = 1;
913 offload_tx(&adapter->tdev, skb);
914 return 0;
915}
916
917static int init_smt(struct adapter *adapter)
918{
919 int i;
920
921 for_each_port(adapter, i)
922 write_smt_entry(adapter, i);
923 return 0;
924}
925
926static void init_port_mtus(struct adapter *adapter)
927{
928 unsigned int mtus = adapter->port[0]->mtu;
929
930 if (adapter->port[1])
931 mtus |= adapter->port[1]->mtu << 16;
932 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
933}
934
935static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
936 int hi, int port)
937{
938 struct sk_buff *skb;
939 struct mngt_pktsched_wr *req;
940 int ret;
941
942 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
943 if (!skb)
944 skb = adap->nofail_skb;
945 if (!skb)
946 return -ENOMEM;
947
948 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
949 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
950 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
951 req->sched = sched;
952 req->idx = qidx;
953 req->min = lo;
954 req->max = hi;
955 req->binding = port;
956 ret = t3_mgmt_tx(adap, skb);
957 if (skb == adap->nofail_skb) {
958 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
959 GFP_KERNEL);
960 if (!adap->nofail_skb)
961 ret = -ENOMEM;
962 }
963
964 return ret;
965}
966
967static int bind_qsets(struct adapter *adap)
968{
969 int i, j, err = 0;
970
971 for_each_port(adap, i) {
972 const struct port_info *pi = adap2pinfo(adap, i);
973
974 for (j = 0; j < pi->nqsets; ++j) {
975 int ret = send_pktsched_cmd(adap, 1,
976 pi->first_qset + j, -1,
977 -1, i);
978 if (ret)
979 err = ret;
980 }
981 }
982
983 return err;
984}
985
986#define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
987 __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
988#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
989#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
990 __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
991#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
992#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
993#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
994#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
995MODULE_FIRMWARE(FW_FNAME);
996MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
997MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
998MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
999MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1000MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1001
1002static inline const char *get_edc_fw_name(int edc_idx)
1003{
1004 const char *fw_name = NULL;
1005
1006 switch (edc_idx) {
1007 case EDC_OPT_AEL2005:
1008 fw_name = AEL2005_OPT_EDC_NAME;
1009 break;
1010 case EDC_TWX_AEL2005:
1011 fw_name = AEL2005_TWX_EDC_NAME;
1012 break;
1013 case EDC_TWX_AEL2020:
1014 fw_name = AEL2020_TWX_EDC_NAME;
1015 break;
1016 }
1017 return fw_name;
1018}
1019
1020int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1021{
1022 struct adapter *adapter = phy->adapter;
1023 const struct firmware *fw;
1024 char buf[64];
1025 u32 csum;
1026 const __be32 *p;
1027 u16 *cache = phy->phy_cache;
1028 int i, ret;
1029
1030 snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1031
1032 ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1033 if (ret < 0) {
1034 dev_err(&adapter->pdev->dev,
1035 "could not upgrade firmware: unable to load %s\n",
1036 buf);
1037 return ret;
1038 }
1039
1040 /* check size, take checksum in account */
1041 if (fw->size > size + 4) {
1042 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1043 (unsigned int)fw->size, size + 4);
1044 ret = -EINVAL;
1045 }
1046
1047 /* compute checksum */
1048 p = (const __be32 *)fw->data;
1049 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1050 csum += ntohl(p[i]);
1051
1052 if (csum != 0xffffffff) {
1053 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1054 csum);
1055 ret = -EINVAL;
1056 }
1057
1058 for (i = 0; i < size / 4 ; i++) {
1059 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1060 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1061 }
1062
1063 release_firmware(fw);
1064
1065 return ret;
1066}
1067
1068static int upgrade_fw(struct adapter *adap)
1069{
1070 int ret;
1071 const struct firmware *fw;
1072 struct device *dev = &adap->pdev->dev;
1073
1074 ret = request_firmware(&fw, FW_FNAME, dev);
1075 if (ret < 0) {
1076 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1077 FW_FNAME);
1078 return ret;
1079 }
1080 ret = t3_load_fw(adap, fw->data, fw->size);
1081 release_firmware(fw);
1082
1083 if (ret == 0)
1084 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1085 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1086 else
1087 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1088 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1089
1090 return ret;
1091}
1092
1093static inline char t3rev2char(struct adapter *adapter)
1094{
1095 char rev = 0;
1096
1097 switch(adapter->params.rev) {
1098 case T3_REV_B:
1099 case T3_REV_B2:
1100 rev = 'b';
1101 break;
1102 case T3_REV_C:
1103 rev = 'c';
1104 break;
1105 }
1106 return rev;
1107}
1108
1109static int update_tpsram(struct adapter *adap)
1110{
1111 const struct firmware *tpsram;
1112 char buf[64];
1113 struct device *dev = &adap->pdev->dev;
1114 int ret;
1115 char rev;
1116
1117 rev = t3rev2char(adap);
1118 if (!rev)
1119 return 0;
1120
1121 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1122
1123 ret = request_firmware(&tpsram, buf, dev);
1124 if (ret < 0) {
1125 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1126 buf);
1127 return ret;
1128 }
1129
1130 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1131 if (ret)
1132 goto release_tpsram;
1133
1134 ret = t3_set_proto_sram(adap, tpsram->data);
1135 if (ret == 0)
1136 dev_info(dev,
1137 "successful update of protocol engine "
1138 "to %d.%d.%d\n",
1139 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1140 else
1141 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1142 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1143 if (ret)
1144 dev_err(dev, "loading protocol SRAM failed\n");
1145
1146release_tpsram:
1147 release_firmware(tpsram);
1148
1149 return ret;
1150}
1151
1152/**
1153 * cxgb_up - enable the adapter
1154 * @adapter: adapter being enabled
1155 *
1156 * Called when the first port is enabled, this function performs the
1157 * actions necessary to make an adapter operational, such as completing
1158 * the initialization of HW modules, and enabling interrupts.
1159 *
1160 * Must be called with the rtnl lock held.
1161 */
1162static int cxgb_up(struct adapter *adap)
1163{
1164 int err;
1165
1166 if (!(adap->flags & FULL_INIT_DONE)) {
1167 err = t3_check_fw_version(adap);
1168 if (err == -EINVAL) {
1169 err = upgrade_fw(adap);
1170 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1171 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1172 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1173 }
1174
1175 err = t3_check_tpsram_version(adap);
1176 if (err == -EINVAL) {
1177 err = update_tpsram(adap);
1178 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1179 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1180 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1181 }
1182
1183 /*
1184 * Clear interrupts now to catch errors if t3_init_hw fails.
1185 * We clear them again later as initialization may trigger
1186 * conditions that can interrupt.
1187 */
1188 t3_intr_clear(adap);
1189
1190 err = t3_init_hw(adap, 0);
1191 if (err)
1192 goto out;
1193
1194 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1195 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1196
1197 err = setup_sge_qsets(adap);
1198 if (err)
1199 goto out;
1200
1201 setup_rss(adap);
1202 if (!(adap->flags & NAPI_INIT))
1203 init_napi(adap);
1204
1205 t3_start_sge_timers(adap);
1206 adap->flags |= FULL_INIT_DONE;
1207 }
1208
1209 t3_intr_clear(adap);
1210
1211 if (adap->flags & USING_MSIX) {
1212 name_msix_vecs(adap);
1213 err = request_irq(adap->msix_info[0].vec,
1214 t3_async_intr_handler, 0,
1215 adap->msix_info[0].desc, adap);
1216 if (err)
1217 goto irq_err;
1218
1219 err = request_msix_data_irqs(adap);
1220 if (err) {
1221 free_irq(adap->msix_info[0].vec, adap);
1222 goto irq_err;
1223 }
1224 } else if ((err = request_irq(adap->pdev->irq,
1225 t3_intr_handler(adap,
1226 adap->sge.qs[0].rspq.
1227 polling),
1228 (adap->flags & USING_MSI) ?
1229 0 : IRQF_SHARED,
1230 adap->name, adap)))
1231 goto irq_err;
1232
1233 enable_all_napi(adap);
1234 t3_sge_start(adap);
1235 t3_intr_enable(adap);
1236
1237 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1238 is_offload(adap) && init_tp_parity(adap) == 0)
1239 adap->flags |= TP_PARITY_INIT;
1240
1241 if (adap->flags & TP_PARITY_INIT) {
1242 t3_write_reg(adap, A_TP_INT_CAUSE,
1243 F_CMCACHEPERR | F_ARPLUTPERR);
1244 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1245 }
1246
1247 if (!(adap->flags & QUEUES_BOUND)) {
1248 int ret = bind_qsets(adap);
1249
1250 if (ret < 0) {
1251 CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1252 t3_intr_disable(adap);
1253 free_irq_resources(adap);
1254 err = ret;
1255 goto out;
1256 }
1257 adap->flags |= QUEUES_BOUND;
1258 }
1259
1260out:
1261 return err;
1262irq_err:
1263 CH_ERR(adap, "request_irq failed, err %d\n", err);
1264 goto out;
1265}
1266
1267/*
1268 * Release resources when all the ports and offloading have been stopped.
1269 */
1270static void cxgb_down(struct adapter *adapter, int on_wq)
1271{
1272 t3_sge_stop(adapter);
1273 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1274 t3_intr_disable(adapter);
1275 spin_unlock_irq(&adapter->work_lock);
1276
1277 free_irq_resources(adapter);
1278 quiesce_rx(adapter);
1279 t3_sge_stop(adapter);
1280 if (!on_wq)
1281 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1282}
1283
1284static void schedule_chk_task(struct adapter *adap)
1285{
1286 unsigned int timeo;
1287
1288 timeo = adap->params.linkpoll_period ?
1289 (HZ * adap->params.linkpoll_period) / 10 :
1290 adap->params.stats_update_period * HZ;
1291 if (timeo)
1292 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1293}
1294
1295static int offload_open(struct net_device *dev)
1296{
1297 struct port_info *pi = netdev_priv(dev);
1298 struct adapter *adapter = pi->adapter;
1299 struct t3cdev *tdev = dev2t3cdev(dev);
1300 int adap_up = adapter->open_device_map & PORT_MASK;
1301 int err;
1302
1303 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1304 return 0;
1305
1306 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1307 goto out;
1308
1309 t3_tp_set_offload_mode(adapter, 1);
1310 tdev->lldev = adapter->port[0];
1311 err = cxgb3_offload_activate(adapter);
1312 if (err)
1313 goto out;
1314
1315 init_port_mtus(adapter);
1316 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1317 adapter->params.b_wnd,
1318 adapter->params.rev == 0 ?
1319 adapter->port[0]->mtu : 0xffff);
1320 init_smt(adapter);
1321
1322 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1323 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1324
1325 /* Call back all registered clients */
1326 cxgb3_add_clients(tdev);
1327
1328out:
1329 /* restore them in case the offload module has changed them */
1330 if (err) {
1331 t3_tp_set_offload_mode(adapter, 0);
1332 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1333 cxgb3_set_dummy_ops(tdev);
1334 }
1335 return err;
1336}
1337
1338static int offload_close(struct t3cdev *tdev)
1339{
1340 struct adapter *adapter = tdev2adap(tdev);
1341 struct t3c_data *td = T3C_DATA(tdev);
1342
1343 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1344 return 0;
1345
1346 /* Call back all registered clients */
1347 cxgb3_remove_clients(tdev);
1348
1349 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1350
1351 /* Flush work scheduled while releasing TIDs */
1352 flush_work_sync(&td->tid_release_task);
1353
1354 tdev->lldev = NULL;
1355 cxgb3_set_dummy_ops(tdev);
1356 t3_tp_set_offload_mode(adapter, 0);
1357 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1358
1359 if (!adapter->open_device_map)
1360 cxgb_down(adapter, 0);
1361
1362 cxgb3_offload_deactivate(adapter);
1363 return 0;
1364}
1365
1366static int cxgb_open(struct net_device *dev)
1367{
1368 struct port_info *pi = netdev_priv(dev);
1369 struct adapter *adapter = pi->adapter;
1370 int other_ports = adapter->open_device_map & PORT_MASK;
1371 int err;
1372
1373 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1374 return err;
1375
1376 set_bit(pi->port_id, &adapter->open_device_map);
1377 if (is_offload(adapter) && !ofld_disable) {
1378 err = offload_open(dev);
1379 if (err)
1380 printk(KERN_WARNING
1381 "Could not initialize offload capabilities\n");
1382 }
1383
1384 netif_set_real_num_tx_queues(dev, pi->nqsets);
1385 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1386 if (err)
1387 return err;
1388 link_start(dev);
1389 t3_port_intr_enable(adapter, pi->port_id);
1390 netif_tx_start_all_queues(dev);
1391 if (!other_ports)
1392 schedule_chk_task(adapter);
1393
1394 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1395 return 0;
1396}
1397
1398static int __cxgb_close(struct net_device *dev, int on_wq)
1399{
1400 struct port_info *pi = netdev_priv(dev);
1401 struct adapter *adapter = pi->adapter;
1402
1403
1404 if (!adapter->open_device_map)
1405 return 0;
1406
1407 /* Stop link fault interrupts */
1408 t3_xgm_intr_disable(adapter, pi->port_id);
1409 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1410
1411 t3_port_intr_disable(adapter, pi->port_id);
1412 netif_tx_stop_all_queues(dev);
1413 pi->phy.ops->power_down(&pi->phy, 1);
1414 netif_carrier_off(dev);
1415 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1416
1417 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1418 clear_bit(pi->port_id, &adapter->open_device_map);
1419 spin_unlock_irq(&adapter->work_lock);
1420
1421 if (!(adapter->open_device_map & PORT_MASK))
1422 cancel_delayed_work_sync(&adapter->adap_check_task);
1423
1424 if (!adapter->open_device_map)
1425 cxgb_down(adapter, on_wq);
1426
1427 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1428 return 0;
1429}
1430
1431static int cxgb_close(struct net_device *dev)
1432{
1433 return __cxgb_close(dev, 0);
1434}
1435
1436static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1437{
1438 struct port_info *pi = netdev_priv(dev);
1439 struct adapter *adapter = pi->adapter;
1440 struct net_device_stats *ns = &pi->netstats;
1441 const struct mac_stats *pstats;
1442
1443 spin_lock(&adapter->stats_lock);
1444 pstats = t3_mac_update_stats(&pi->mac);
1445 spin_unlock(&adapter->stats_lock);
1446
1447 ns->tx_bytes = pstats->tx_octets;
1448 ns->tx_packets = pstats->tx_frames;
1449 ns->rx_bytes = pstats->rx_octets;
1450 ns->rx_packets = pstats->rx_frames;
1451 ns->multicast = pstats->rx_mcast_frames;
1452
1453 ns->tx_errors = pstats->tx_underrun;
1454 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1455 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1456 pstats->rx_fifo_ovfl;
1457
1458 /* detailed rx_errors */
1459 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1460 ns->rx_over_errors = 0;
1461 ns->rx_crc_errors = pstats->rx_fcs_errs;
1462 ns->rx_frame_errors = pstats->rx_symbol_errs;
1463 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1464 ns->rx_missed_errors = pstats->rx_cong_drops;
1465
1466 /* detailed tx_errors */
1467 ns->tx_aborted_errors = 0;
1468 ns->tx_carrier_errors = 0;
1469 ns->tx_fifo_errors = pstats->tx_underrun;
1470 ns->tx_heartbeat_errors = 0;
1471 ns->tx_window_errors = 0;
1472 return ns;
1473}
1474
1475static u32 get_msglevel(struct net_device *dev)
1476{
1477 struct port_info *pi = netdev_priv(dev);
1478 struct adapter *adapter = pi->adapter;
1479
1480 return adapter->msg_enable;
1481}
1482
1483static void set_msglevel(struct net_device *dev, u32 val)
1484{
1485 struct port_info *pi = netdev_priv(dev);
1486 struct adapter *adapter = pi->adapter;
1487
1488 adapter->msg_enable = val;
1489}
1490
1491static char stats_strings[][ETH_GSTRING_LEN] = {
1492 "TxOctetsOK ",
1493 "TxFramesOK ",
1494 "TxMulticastFramesOK",
1495 "TxBroadcastFramesOK",
1496 "TxPauseFrames ",
1497 "TxUnderrun ",
1498 "TxExtUnderrun ",
1499
1500 "TxFrames64 ",
1501 "TxFrames65To127 ",
1502 "TxFrames128To255 ",
1503 "TxFrames256To511 ",
1504 "TxFrames512To1023 ",
1505 "TxFrames1024To1518 ",
1506 "TxFrames1519ToMax ",
1507
1508 "RxOctetsOK ",
1509 "RxFramesOK ",
1510 "RxMulticastFramesOK",
1511 "RxBroadcastFramesOK",
1512 "RxPauseFrames ",
1513 "RxFCSErrors ",
1514 "RxSymbolErrors ",
1515 "RxShortErrors ",
1516 "RxJabberErrors ",
1517 "RxLengthErrors ",
1518 "RxFIFOoverflow ",
1519
1520 "RxFrames64 ",
1521 "RxFrames65To127 ",
1522 "RxFrames128To255 ",
1523 "RxFrames256To511 ",
1524 "RxFrames512To1023 ",
1525 "RxFrames1024To1518 ",
1526 "RxFrames1519ToMax ",
1527
1528 "PhyFIFOErrors ",
1529 "TSO ",
1530 "VLANextractions ",
1531 "VLANinsertions ",
1532 "TxCsumOffload ",
1533 "RxCsumGood ",
1534 "LroAggregated ",
1535 "LroFlushed ",
1536 "LroNoDesc ",
1537 "RxDrops ",
1538
1539 "CheckTXEnToggled ",
1540 "CheckResets ",
1541
1542 "LinkFaults ",
1543};
1544
1545static int get_sset_count(struct net_device *dev, int sset)
1546{
1547 switch (sset) {
1548 case ETH_SS_STATS:
1549 return ARRAY_SIZE(stats_strings);
1550 default:
1551 return -EOPNOTSUPP;
1552 }
1553}
1554
1555#define T3_REGMAP_SIZE (3 * 1024)
1556
1557static int get_regs_len(struct net_device *dev)
1558{
1559 return T3_REGMAP_SIZE;
1560}
1561
1562static int get_eeprom_len(struct net_device *dev)
1563{
1564 return EEPROMSIZE;
1565}
1566
1567static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1568{
1569 struct port_info *pi = netdev_priv(dev);
1570 struct adapter *adapter = pi->adapter;
1571 u32 fw_vers = 0;
1572 u32 tp_vers = 0;
1573
1574 spin_lock(&adapter->stats_lock);
1575 t3_get_fw_version(adapter, &fw_vers);
1576 t3_get_tp_version(adapter, &tp_vers);
1577 spin_unlock(&adapter->stats_lock);
1578
1579 strcpy(info->driver, DRV_NAME);
1580 strcpy(info->version, DRV_VERSION);
1581 strcpy(info->bus_info, pci_name(adapter->pdev));
1582 if (!fw_vers)
1583 strcpy(info->fw_version, "N/A");
1584 else {
1585 snprintf(info->fw_version, sizeof(info->fw_version),
1586 "%s %u.%u.%u TP %u.%u.%u",
1587 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1588 G_FW_VERSION_MAJOR(fw_vers),
1589 G_FW_VERSION_MINOR(fw_vers),
1590 G_FW_VERSION_MICRO(fw_vers),
1591 G_TP_VERSION_MAJOR(tp_vers),
1592 G_TP_VERSION_MINOR(tp_vers),
1593 G_TP_VERSION_MICRO(tp_vers));
1594 }
1595}
1596
1597static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1598{
1599 if (stringset == ETH_SS_STATS)
1600 memcpy(data, stats_strings, sizeof(stats_strings));
1601}
1602
1603static unsigned long collect_sge_port_stats(struct adapter *adapter,
1604 struct port_info *p, int idx)
1605{
1606 int i;
1607 unsigned long tot = 0;
1608
1609 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1610 tot += adapter->sge.qs[i].port_stats[idx];
1611 return tot;
1612}
1613
1614static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1615 u64 *data)
1616{
1617 struct port_info *pi = netdev_priv(dev);
1618 struct adapter *adapter = pi->adapter;
1619 const struct mac_stats *s;
1620
1621 spin_lock(&adapter->stats_lock);
1622 s = t3_mac_update_stats(&pi->mac);
1623 spin_unlock(&adapter->stats_lock);
1624
1625 *data++ = s->tx_octets;
1626 *data++ = s->tx_frames;
1627 *data++ = s->tx_mcast_frames;
1628 *data++ = s->tx_bcast_frames;
1629 *data++ = s->tx_pause;
1630 *data++ = s->tx_underrun;
1631 *data++ = s->tx_fifo_urun;
1632
1633 *data++ = s->tx_frames_64;
1634 *data++ = s->tx_frames_65_127;
1635 *data++ = s->tx_frames_128_255;
1636 *data++ = s->tx_frames_256_511;
1637 *data++ = s->tx_frames_512_1023;
1638 *data++ = s->tx_frames_1024_1518;
1639 *data++ = s->tx_frames_1519_max;
1640
1641 *data++ = s->rx_octets;
1642 *data++ = s->rx_frames;
1643 *data++ = s->rx_mcast_frames;
1644 *data++ = s->rx_bcast_frames;
1645 *data++ = s->rx_pause;
1646 *data++ = s->rx_fcs_errs;
1647 *data++ = s->rx_symbol_errs;
1648 *data++ = s->rx_short;
1649 *data++ = s->rx_jabber;
1650 *data++ = s->rx_too_long;
1651 *data++ = s->rx_fifo_ovfl;
1652
1653 *data++ = s->rx_frames_64;
1654 *data++ = s->rx_frames_65_127;
1655 *data++ = s->rx_frames_128_255;
1656 *data++ = s->rx_frames_256_511;
1657 *data++ = s->rx_frames_512_1023;
1658 *data++ = s->rx_frames_1024_1518;
1659 *data++ = s->rx_frames_1519_max;
1660
1661 *data++ = pi->phy.fifo_errors;
1662
1663 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1664 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1665 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1666 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1667 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1668 *data++ = 0;
1669 *data++ = 0;
1670 *data++ = 0;
1671 *data++ = s->rx_cong_drops;
1672
1673 *data++ = s->num_toggled;
1674 *data++ = s->num_resets;
1675
1676 *data++ = s->link_faults;
1677}
1678
1679static inline void reg_block_dump(struct adapter *ap, void *buf,
1680 unsigned int start, unsigned int end)
1681{
1682 u32 *p = buf + start;
1683
1684 for (; start <= end; start += sizeof(u32))
1685 *p++ = t3_read_reg(ap, start);
1686}
1687
1688static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1689 void *buf)
1690{
1691 struct port_info *pi = netdev_priv(dev);
1692 struct adapter *ap = pi->adapter;
1693
1694 /*
1695 * Version scheme:
1696 * bits 0..9: chip version
1697 * bits 10..15: chip revision
1698 * bit 31: set for PCIe cards
1699 */
1700 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1701
1702 /*
1703 * We skip the MAC statistics registers because they are clear-on-read.
1704 * Also reading multi-register stats would need to synchronize with the
1705 * periodic mac stats accumulation. Hard to justify the complexity.
1706 */
1707 memset(buf, 0, T3_REGMAP_SIZE);
1708 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1709 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1710 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1711 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1712 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1713 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1714 XGM_REG(A_XGM_SERDES_STAT3, 1));
1715 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1716 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1717}
1718
1719static int restart_autoneg(struct net_device *dev)
1720{
1721 struct port_info *p = netdev_priv(dev);
1722
1723 if (!netif_running(dev))
1724 return -EAGAIN;
1725 if (p->link_config.autoneg != AUTONEG_ENABLE)
1726 return -EINVAL;
1727 p->phy.ops->autoneg_restart(&p->phy);
1728 return 0;
1729}
1730
1731static int set_phys_id(struct net_device *dev,
1732 enum ethtool_phys_id_state state)
1733{
1734 struct port_info *pi = netdev_priv(dev);
1735 struct adapter *adapter = pi->adapter;
1736
1737 switch (state) {
1738 case ETHTOOL_ID_ACTIVE:
1739 return 1; /* cycle on/off once per second */
1740
1741 case ETHTOOL_ID_OFF:
1742 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1743 break;
1744
1745 case ETHTOOL_ID_ON:
1746 case ETHTOOL_ID_INACTIVE:
1747 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1748 F_GPIO0_OUT_VAL);
1749 }
1750
1751 return 0;
1752}
1753
1754static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1755{
1756 struct port_info *p = netdev_priv(dev);
1757
1758 cmd->supported = p->link_config.supported;
1759 cmd->advertising = p->link_config.advertising;
1760
1761 if (netif_carrier_ok(dev)) {
1762 ethtool_cmd_speed_set(cmd, p->link_config.speed);
1763 cmd->duplex = p->link_config.duplex;
1764 } else {
1765 ethtool_cmd_speed_set(cmd, -1);
1766 cmd->duplex = -1;
1767 }
1768
1769 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1770 cmd->phy_address = p->phy.mdio.prtad;
1771 cmd->transceiver = XCVR_EXTERNAL;
1772 cmd->autoneg = p->link_config.autoneg;
1773 cmd->maxtxpkt = 0;
1774 cmd->maxrxpkt = 0;
1775 return 0;
1776}
1777
1778static int speed_duplex_to_caps(int speed, int duplex)
1779{
1780 int cap = 0;
1781
1782 switch (speed) {
1783 case SPEED_10:
1784 if (duplex == DUPLEX_FULL)
1785 cap = SUPPORTED_10baseT_Full;
1786 else
1787 cap = SUPPORTED_10baseT_Half;
1788 break;
1789 case SPEED_100:
1790 if (duplex == DUPLEX_FULL)
1791 cap = SUPPORTED_100baseT_Full;
1792 else
1793 cap = SUPPORTED_100baseT_Half;
1794 break;
1795 case SPEED_1000:
1796 if (duplex == DUPLEX_FULL)
1797 cap = SUPPORTED_1000baseT_Full;
1798 else
1799 cap = SUPPORTED_1000baseT_Half;
1800 break;
1801 case SPEED_10000:
1802 if (duplex == DUPLEX_FULL)
1803 cap = SUPPORTED_10000baseT_Full;
1804 }
1805 return cap;
1806}
1807
1808#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1809 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1810 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1811 ADVERTISED_10000baseT_Full)
1812
1813static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1814{
1815 struct port_info *p = netdev_priv(dev);
1816 struct link_config *lc = &p->link_config;
1817
1818 if (!(lc->supported & SUPPORTED_Autoneg)) {
1819 /*
1820 * PHY offers a single speed/duplex. See if that's what's
1821 * being requested.
1822 */
1823 if (cmd->autoneg == AUTONEG_DISABLE) {
1824 u32 speed = ethtool_cmd_speed(cmd);
1825 int cap = speed_duplex_to_caps(speed, cmd->duplex);
1826 if (lc->supported & cap)
1827 return 0;
1828 }
1829 return -EINVAL;
1830 }
1831
1832 if (cmd->autoneg == AUTONEG_DISABLE) {
1833 u32 speed = ethtool_cmd_speed(cmd);
1834 int cap = speed_duplex_to_caps(speed, cmd->duplex);
1835
1836 if (!(lc->supported & cap) || (speed == SPEED_1000))
1837 return -EINVAL;
1838 lc->requested_speed = speed;
1839 lc->requested_duplex = cmd->duplex;
1840 lc->advertising = 0;
1841 } else {
1842 cmd->advertising &= ADVERTISED_MASK;
1843 cmd->advertising &= lc->supported;
1844 if (!cmd->advertising)
1845 return -EINVAL;
1846 lc->requested_speed = SPEED_INVALID;
1847 lc->requested_duplex = DUPLEX_INVALID;
1848 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1849 }
1850 lc->autoneg = cmd->autoneg;
1851 if (netif_running(dev))
1852 t3_link_start(&p->phy, &p->mac, lc);
1853 return 0;
1854}
1855
1856static void get_pauseparam(struct net_device *dev,
1857 struct ethtool_pauseparam *epause)
1858{
1859 struct port_info *p = netdev_priv(dev);
1860
1861 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1862 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1863 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1864}
1865
1866static int set_pauseparam(struct net_device *dev,
1867 struct ethtool_pauseparam *epause)
1868{
1869 struct port_info *p = netdev_priv(dev);
1870 struct link_config *lc = &p->link_config;
1871
1872 if (epause->autoneg == AUTONEG_DISABLE)
1873 lc->requested_fc = 0;
1874 else if (lc->supported & SUPPORTED_Autoneg)
1875 lc->requested_fc = PAUSE_AUTONEG;
1876 else
1877 return -EINVAL;
1878
1879 if (epause->rx_pause)
1880 lc->requested_fc |= PAUSE_RX;
1881 if (epause->tx_pause)
1882 lc->requested_fc |= PAUSE_TX;
1883 if (lc->autoneg == AUTONEG_ENABLE) {
1884 if (netif_running(dev))
1885 t3_link_start(&p->phy, &p->mac, lc);
1886 } else {
1887 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1888 if (netif_running(dev))
1889 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1890 }
1891 return 0;
1892}
1893
1894static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1895{
1896 struct port_info *pi = netdev_priv(dev);
1897 struct adapter *adapter = pi->adapter;
1898 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1899
1900 e->rx_max_pending = MAX_RX_BUFFERS;
1901 e->rx_mini_max_pending = 0;
1902 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1903 e->tx_max_pending = MAX_TXQ_ENTRIES;
1904
1905 e->rx_pending = q->fl_size;
1906 e->rx_mini_pending = q->rspq_size;
1907 e->rx_jumbo_pending = q->jumbo_size;
1908 e->tx_pending = q->txq_size[0];
1909}
1910
1911static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1912{
1913 struct port_info *pi = netdev_priv(dev);
1914 struct adapter *adapter = pi->adapter;
1915 struct qset_params *q;
1916 int i;
1917
1918 if (e->rx_pending > MAX_RX_BUFFERS ||
1919 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1920 e->tx_pending > MAX_TXQ_ENTRIES ||
1921 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1922 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1923 e->rx_pending < MIN_FL_ENTRIES ||
1924 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1925 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1926 return -EINVAL;
1927
1928 if (adapter->flags & FULL_INIT_DONE)
1929 return -EBUSY;
1930
1931 q = &adapter->params.sge.qset[pi->first_qset];
1932 for (i = 0; i < pi->nqsets; ++i, ++q) {
1933 q->rspq_size = e->rx_mini_pending;
1934 q->fl_size = e->rx_pending;
1935 q->jumbo_size = e->rx_jumbo_pending;
1936 q->txq_size[0] = e->tx_pending;
1937 q->txq_size[1] = e->tx_pending;
1938 q->txq_size[2] = e->tx_pending;
1939 }
1940 return 0;
1941}
1942
1943static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1944{
1945 struct port_info *pi = netdev_priv(dev);
1946 struct adapter *adapter = pi->adapter;
1947 struct qset_params *qsp;
1948 struct sge_qset *qs;
1949 int i;
1950
1951 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1952 return -EINVAL;
1953
1954 for (i = 0; i < pi->nqsets; i++) {
1955 qsp = &adapter->params.sge.qset[i];
1956 qs = &adapter->sge.qs[i];
1957 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1958 t3_update_qset_coalesce(qs, qsp);
1959 }
1960
1961 return 0;
1962}
1963
1964static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1965{
1966 struct port_info *pi = netdev_priv(dev);
1967 struct adapter *adapter = pi->adapter;
1968 struct qset_params *q = adapter->params.sge.qset;
1969
1970 c->rx_coalesce_usecs = q->coalesce_usecs;
1971 return 0;
1972}
1973
1974static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1975 u8 * data)
1976{
1977 struct port_info *pi = netdev_priv(dev);
1978 struct adapter *adapter = pi->adapter;
1979 int i, err = 0;
1980
1981 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1982 if (!buf)
1983 return -ENOMEM;
1984
1985 e->magic = EEPROM_MAGIC;
1986 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1987 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1988
1989 if (!err)
1990 memcpy(data, buf + e->offset, e->len);
1991 kfree(buf);
1992 return err;
1993}
1994
1995static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1996 u8 * data)
1997{
1998 struct port_info *pi = netdev_priv(dev);
1999 struct adapter *adapter = pi->adapter;
2000 u32 aligned_offset, aligned_len;
2001 __le32 *p;
2002 u8 *buf;
2003 int err;
2004
2005 if (eeprom->magic != EEPROM_MAGIC)
2006 return -EINVAL;
2007
2008 aligned_offset = eeprom->offset & ~3;
2009 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2010
2011 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2012 buf = kmalloc(aligned_len, GFP_KERNEL);
2013 if (!buf)
2014 return -ENOMEM;
2015 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2016 if (!err && aligned_len > 4)
2017 err = t3_seeprom_read(adapter,
2018 aligned_offset + aligned_len - 4,
2019 (__le32 *) & buf[aligned_len - 4]);
2020 if (err)
2021 goto out;
2022 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2023 } else
2024 buf = data;
2025
2026 err = t3_seeprom_wp(adapter, 0);
2027 if (err)
2028 goto out;
2029
2030 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2031 err = t3_seeprom_write(adapter, aligned_offset, *p);
2032 aligned_offset += 4;
2033 }
2034
2035 if (!err)
2036 err = t3_seeprom_wp(adapter, 1);
2037out:
2038 if (buf != data)
2039 kfree(buf);
2040 return err;
2041}
2042
2043static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2044{
2045 wol->supported = 0;
2046 wol->wolopts = 0;
2047 memset(&wol->sopass, 0, sizeof(wol->sopass));
2048}
2049
2050static const struct ethtool_ops cxgb_ethtool_ops = {
2051 .get_settings = get_settings,
2052 .set_settings = set_settings,
2053 .get_drvinfo = get_drvinfo,
2054 .get_msglevel = get_msglevel,
2055 .set_msglevel = set_msglevel,
2056 .get_ringparam = get_sge_param,
2057 .set_ringparam = set_sge_param,
2058 .get_coalesce = get_coalesce,
2059 .set_coalesce = set_coalesce,
2060 .get_eeprom_len = get_eeprom_len,
2061 .get_eeprom = get_eeprom,
2062 .set_eeprom = set_eeprom,
2063 .get_pauseparam = get_pauseparam,
2064 .set_pauseparam = set_pauseparam,
2065 .get_link = ethtool_op_get_link,
2066 .get_strings = get_strings,
2067 .set_phys_id = set_phys_id,
2068 .nway_reset = restart_autoneg,
2069 .get_sset_count = get_sset_count,
2070 .get_ethtool_stats = get_stats,
2071 .get_regs_len = get_regs_len,
2072 .get_regs = get_regs,
2073 .get_wol = get_wol,
2074};
2075
2076static int in_range(int val, int lo, int hi)
2077{
2078 return val < 0 || (val <= hi && val >= lo);
2079}
2080
2081static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2082{
2083 struct port_info *pi = netdev_priv(dev);
2084 struct adapter *adapter = pi->adapter;
2085 u32 cmd;
2086 int ret;
2087
2088 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2089 return -EFAULT;
2090
2091 switch (cmd) {
2092 case CHELSIO_SET_QSET_PARAMS:{
2093 int i;
2094 struct qset_params *q;
2095 struct ch_qset_params t;
2096 int q1 = pi->first_qset;
2097 int nqsets = pi->nqsets;
2098
2099 if (!capable(CAP_NET_ADMIN))
2100 return -EPERM;
2101 if (copy_from_user(&t, useraddr, sizeof(t)))
2102 return -EFAULT;
2103 if (t.qset_idx >= SGE_QSETS)
2104 return -EINVAL;
2105 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2106 !in_range(t.cong_thres, 0, 255) ||
2107 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2108 MAX_TXQ_ENTRIES) ||
2109 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2110 MAX_TXQ_ENTRIES) ||
2111 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2112 MAX_CTRL_TXQ_ENTRIES) ||
2113 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2114 MAX_RX_BUFFERS) ||
2115 !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2116 MAX_RX_JUMBO_BUFFERS) ||
2117 !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2118 MAX_RSPQ_ENTRIES))
2119 return -EINVAL;
2120
2121 if ((adapter->flags & FULL_INIT_DONE) &&
2122 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2123 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2124 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2125 t.polling >= 0 || t.cong_thres >= 0))
2126 return -EBUSY;
2127
2128 /* Allow setting of any available qset when offload enabled */
2129 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2130 q1 = 0;
2131 for_each_port(adapter, i) {
2132 pi = adap2pinfo(adapter, i);
2133 nqsets += pi->first_qset + pi->nqsets;
2134 }
2135 }
2136
2137 if (t.qset_idx < q1)
2138 return -EINVAL;
2139 if (t.qset_idx > q1 + nqsets - 1)
2140 return -EINVAL;
2141
2142 q = &adapter->params.sge.qset[t.qset_idx];
2143
2144 if (t.rspq_size >= 0)
2145 q->rspq_size = t.rspq_size;
2146 if (t.fl_size[0] >= 0)
2147 q->fl_size = t.fl_size[0];
2148 if (t.fl_size[1] >= 0)
2149 q->jumbo_size = t.fl_size[1];
2150 if (t.txq_size[0] >= 0)
2151 q->txq_size[0] = t.txq_size[0];
2152 if (t.txq_size[1] >= 0)
2153 q->txq_size[1] = t.txq_size[1];
2154 if (t.txq_size[2] >= 0)
2155 q->txq_size[2] = t.txq_size[2];
2156 if (t.cong_thres >= 0)
2157 q->cong_thres = t.cong_thres;
2158 if (t.intr_lat >= 0) {
2159 struct sge_qset *qs =
2160 &adapter->sge.qs[t.qset_idx];
2161
2162 q->coalesce_usecs = t.intr_lat;
2163 t3_update_qset_coalesce(qs, q);
2164 }
2165 if (t.polling >= 0) {
2166 if (adapter->flags & USING_MSIX)
2167 q->polling = t.polling;
2168 else {
2169 /* No polling with INTx for T3A */
2170 if (adapter->params.rev == 0 &&
2171 !(adapter->flags & USING_MSI))
2172 t.polling = 0;
2173
2174 for (i = 0; i < SGE_QSETS; i++) {
2175 q = &adapter->params.sge.
2176 qset[i];
2177 q->polling = t.polling;
2178 }
2179 }
2180 }
2181
2182 if (t.lro >= 0) {
2183 if (t.lro)
2184 dev->wanted_features |= NETIF_F_GRO;
2185 else
2186 dev->wanted_features &= ~NETIF_F_GRO;
2187 netdev_update_features(dev);
2188 }
2189
2190 break;
2191 }
2192 case CHELSIO_GET_QSET_PARAMS:{
2193 struct qset_params *q;
2194 struct ch_qset_params t;
2195 int q1 = pi->first_qset;
2196 int nqsets = pi->nqsets;
2197 int i;
2198
2199 if (copy_from_user(&t, useraddr, sizeof(t)))
2200 return -EFAULT;
2201
2202 /* Display qsets for all ports when offload enabled */
2203 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2204 q1 = 0;
2205 for_each_port(adapter, i) {
2206 pi = adap2pinfo(adapter, i);
2207 nqsets = pi->first_qset + pi->nqsets;
2208 }
2209 }
2210
2211 if (t.qset_idx >= nqsets)
2212 return -EINVAL;
2213
2214 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2215 t.rspq_size = q->rspq_size;
2216 t.txq_size[0] = q->txq_size[0];
2217 t.txq_size[1] = q->txq_size[1];
2218 t.txq_size[2] = q->txq_size[2];
2219 t.fl_size[0] = q->fl_size;
2220 t.fl_size[1] = q->jumbo_size;
2221 t.polling = q->polling;
2222 t.lro = !!(dev->features & NETIF_F_GRO);
2223 t.intr_lat = q->coalesce_usecs;
2224 t.cong_thres = q->cong_thres;
2225 t.qnum = q1;
2226
2227 if (adapter->flags & USING_MSIX)
2228 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2229 else
2230 t.vector = adapter->pdev->irq;
2231
2232 if (copy_to_user(useraddr, &t, sizeof(t)))
2233 return -EFAULT;
2234 break;
2235 }
2236 case CHELSIO_SET_QSET_NUM:{
2237 struct ch_reg edata;
2238 unsigned int i, first_qset = 0, other_qsets = 0;
2239
2240 if (!capable(CAP_NET_ADMIN))
2241 return -EPERM;
2242 if (adapter->flags & FULL_INIT_DONE)
2243 return -EBUSY;
2244 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2245 return -EFAULT;
2246 if (edata.val < 1 ||
2247 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2248 return -EINVAL;
2249
2250 for_each_port(adapter, i)
2251 if (adapter->port[i] && adapter->port[i] != dev)
2252 other_qsets += adap2pinfo(adapter, i)->nqsets;
2253
2254 if (edata.val + other_qsets > SGE_QSETS)
2255 return -EINVAL;
2256
2257 pi->nqsets = edata.val;
2258
2259 for_each_port(adapter, i)
2260 if (adapter->port[i]) {
2261 pi = adap2pinfo(adapter, i);
2262 pi->first_qset = first_qset;
2263 first_qset += pi->nqsets;
2264 }
2265 break;
2266 }
2267 case CHELSIO_GET_QSET_NUM:{
2268 struct ch_reg edata;
2269
2270 memset(&edata, 0, sizeof(struct ch_reg));
2271
2272 edata.cmd = CHELSIO_GET_QSET_NUM;
2273 edata.val = pi->nqsets;
2274 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2275 return -EFAULT;
2276 break;
2277 }
2278 case CHELSIO_LOAD_FW:{
2279 u8 *fw_data;
2280 struct ch_mem_range t;
2281
2282 if (!capable(CAP_SYS_RAWIO))
2283 return -EPERM;
2284 if (copy_from_user(&t, useraddr, sizeof(t)))
2285 return -EFAULT;
2286 /* Check t.len sanity ? */
2287 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2288 if (IS_ERR(fw_data))
2289 return PTR_ERR(fw_data);
2290
2291 ret = t3_load_fw(adapter, fw_data, t.len);
2292 kfree(fw_data);
2293 if (ret)
2294 return ret;
2295 break;
2296 }
2297 case CHELSIO_SETMTUTAB:{
2298 struct ch_mtus m;
2299 int i;
2300
2301 if (!is_offload(adapter))
2302 return -EOPNOTSUPP;
2303 if (!capable(CAP_NET_ADMIN))
2304 return -EPERM;
2305 if (offload_running(adapter))
2306 return -EBUSY;
2307 if (copy_from_user(&m, useraddr, sizeof(m)))
2308 return -EFAULT;
2309 if (m.nmtus != NMTUS)
2310 return -EINVAL;
2311 if (m.mtus[0] < 81) /* accommodate SACK */
2312 return -EINVAL;
2313
2314 /* MTUs must be in ascending order */
2315 for (i = 1; i < NMTUS; ++i)
2316 if (m.mtus[i] < m.mtus[i - 1])
2317 return -EINVAL;
2318
2319 memcpy(adapter->params.mtus, m.mtus,
2320 sizeof(adapter->params.mtus));
2321 break;
2322 }
2323 case CHELSIO_GET_PM:{
2324 struct tp_params *p = &adapter->params.tp;
2325 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2326
2327 if (!is_offload(adapter))
2328 return -EOPNOTSUPP;
2329 m.tx_pg_sz = p->tx_pg_size;
2330 m.tx_num_pg = p->tx_num_pgs;
2331 m.rx_pg_sz = p->rx_pg_size;
2332 m.rx_num_pg = p->rx_num_pgs;
2333 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2334 if (copy_to_user(useraddr, &m, sizeof(m)))
2335 return -EFAULT;
2336 break;
2337 }
2338 case CHELSIO_SET_PM:{
2339 struct ch_pm m;
2340 struct tp_params *p = &adapter->params.tp;
2341
2342 if (!is_offload(adapter))
2343 return -EOPNOTSUPP;
2344 if (!capable(CAP_NET_ADMIN))
2345 return -EPERM;
2346 if (adapter->flags & FULL_INIT_DONE)
2347 return -EBUSY;
2348 if (copy_from_user(&m, useraddr, sizeof(m)))
2349 return -EFAULT;
2350 if (!is_power_of_2(m.rx_pg_sz) ||
2351 !is_power_of_2(m.tx_pg_sz))
2352 return -EINVAL; /* not power of 2 */
2353 if (!(m.rx_pg_sz & 0x14000))
2354 return -EINVAL; /* not 16KB or 64KB */
2355 if (!(m.tx_pg_sz & 0x1554000))
2356 return -EINVAL;
2357 if (m.tx_num_pg == -1)
2358 m.tx_num_pg = p->tx_num_pgs;
2359 if (m.rx_num_pg == -1)
2360 m.rx_num_pg = p->rx_num_pgs;
2361 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2362 return -EINVAL;
2363 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2364 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2365 return -EINVAL;
2366 p->rx_pg_size = m.rx_pg_sz;
2367 p->tx_pg_size = m.tx_pg_sz;
2368 p->rx_num_pgs = m.rx_num_pg;
2369 p->tx_num_pgs = m.tx_num_pg;
2370 break;
2371 }
2372 case CHELSIO_GET_MEM:{
2373 struct ch_mem_range t;
2374 struct mc7 *mem;
2375 u64 buf[32];
2376
2377 if (!is_offload(adapter))
2378 return -EOPNOTSUPP;
2379 if (!(adapter->flags & FULL_INIT_DONE))
2380 return -EIO; /* need the memory controllers */
2381 if (copy_from_user(&t, useraddr, sizeof(t)))
2382 return -EFAULT;
2383 if ((t.addr & 7) || (t.len & 7))
2384 return -EINVAL;
2385 if (t.mem_id == MEM_CM)
2386 mem = &adapter->cm;
2387 else if (t.mem_id == MEM_PMRX)
2388 mem = &adapter->pmrx;
2389 else if (t.mem_id == MEM_PMTX)
2390 mem = &adapter->pmtx;
2391 else
2392 return -EINVAL;
2393
2394 /*
2395 * Version scheme:
2396 * bits 0..9: chip version
2397 * bits 10..15: chip revision
2398 */
2399 t.version = 3 | (adapter->params.rev << 10);
2400 if (copy_to_user(useraddr, &t, sizeof(t)))
2401 return -EFAULT;
2402
2403 /*
2404 * Read 256 bytes at a time as len can be large and we don't
2405 * want to use huge intermediate buffers.
2406 */
2407 useraddr += sizeof(t); /* advance to start of buffer */
2408 while (t.len) {
2409 unsigned int chunk =
2410 min_t(unsigned int, t.len, sizeof(buf));
2411
2412 ret =
2413 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2414 buf);
2415 if (ret)
2416 return ret;
2417 if (copy_to_user(useraddr, buf, chunk))
2418 return -EFAULT;
2419 useraddr += chunk;
2420 t.addr += chunk;
2421 t.len -= chunk;
2422 }
2423 break;
2424 }
2425 case CHELSIO_SET_TRACE_FILTER:{
2426 struct ch_trace t;
2427 const struct trace_params *tp;
2428
2429 if (!capable(CAP_NET_ADMIN))
2430 return -EPERM;
2431 if (!offload_running(adapter))
2432 return -EAGAIN;
2433 if (copy_from_user(&t, useraddr, sizeof(t)))
2434 return -EFAULT;
2435
2436 tp = (const struct trace_params *)&t.sip;
2437 if (t.config_tx)
2438 t3_config_trace_filter(adapter, tp, 0,
2439 t.invert_match,
2440 t.trace_tx);
2441 if (t.config_rx)
2442 t3_config_trace_filter(adapter, tp, 1,
2443 t.invert_match,
2444 t.trace_rx);
2445 break;
2446 }
2447 default:
2448 return -EOPNOTSUPP;
2449 }
2450 return 0;
2451}
2452
2453static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2454{
2455 struct mii_ioctl_data *data = if_mii(req);
2456 struct port_info *pi = netdev_priv(dev);
2457 struct adapter *adapter = pi->adapter;
2458
2459 switch (cmd) {
2460 case SIOCGMIIREG:
2461 case SIOCSMIIREG:
2462 /* Convert phy_id from older PRTAD/DEVAD format */
2463 if (is_10G(adapter) &&
2464 !mdio_phy_id_is_c45(data->phy_id) &&
2465 (data->phy_id & 0x1f00) &&
2466 !(data->phy_id & 0xe0e0))
2467 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2468 data->phy_id & 0x1f);
2469 /* FALLTHRU */
2470 case SIOCGMIIPHY:
2471 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2472 case SIOCCHIOCTL:
2473 return cxgb_extension_ioctl(dev, req->ifr_data);
2474 default:
2475 return -EOPNOTSUPP;
2476 }
2477}
2478
2479static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2480{
2481 struct port_info *pi = netdev_priv(dev);
2482 struct adapter *adapter = pi->adapter;
2483 int ret;
2484
2485 if (new_mtu < 81) /* accommodate SACK */
2486 return -EINVAL;
2487 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2488 return ret;
2489 dev->mtu = new_mtu;
2490 init_port_mtus(adapter);
2491 if (adapter->params.rev == 0 && offload_running(adapter))
2492 t3_load_mtus(adapter, adapter->params.mtus,
2493 adapter->params.a_wnd, adapter->params.b_wnd,
2494 adapter->port[0]->mtu);
2495 return 0;
2496}
2497
2498static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2499{
2500 struct port_info *pi = netdev_priv(dev);
2501 struct adapter *adapter = pi->adapter;
2502 struct sockaddr *addr = p;
2503
2504 if (!is_valid_ether_addr(addr->sa_data))
2505 return -EINVAL;
2506
2507 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2508 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2509 if (offload_running(adapter))
2510 write_smt_entry(adapter, pi->port_id);
2511 return 0;
2512}
2513
2514/**
2515 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2516 * @adap: the adapter
2517 * @p: the port
2518 *
2519 * Ensures that current Rx processing on any of the queues associated with
2520 * the given port completes before returning. We do this by acquiring and
2521 * releasing the locks of the response queues associated with the port.
2522 */
2523static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2524{
2525 int i;
2526
2527 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2528 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2529
2530 spin_lock_irq(&q->lock);
2531 spin_unlock_irq(&q->lock);
2532 }
2533}
2534
2535static void cxgb_vlan_mode(struct net_device *dev, u32 features)
2536{
2537 struct port_info *pi = netdev_priv(dev);
2538 struct adapter *adapter = pi->adapter;
2539
2540 if (adapter->params.rev > 0) {
2541 t3_set_vlan_accel(adapter, 1 << pi->port_id,
2542 features & NETIF_F_HW_VLAN_RX);
2543 } else {
2544 /* single control for all ports */
2545 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
2546
2547 for_each_port(adapter, i)
2548 have_vlans |=
2549 adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
2550
2551 t3_set_vlan_accel(adapter, 1, have_vlans);
2552 }
2553 t3_synchronize_rx(adapter, pi);
2554}
2555
2556static u32 cxgb_fix_features(struct net_device *dev, u32 features)
2557{
2558 /*
2559 * Since there is no support for separate rx/tx vlan accel
2560 * enable/disable make sure tx flag is always in same state as rx.
2561 */
2562 if (features & NETIF_F_HW_VLAN_RX)
2563 features |= NETIF_F_HW_VLAN_TX;
2564 else
2565 features &= ~NETIF_F_HW_VLAN_TX;
2566
2567 return features;
2568}
2569
2570static int cxgb_set_features(struct net_device *dev, u32 features)
2571{
2572 u32 changed = dev->features ^ features;
2573
2574 if (changed & NETIF_F_HW_VLAN_RX)
2575 cxgb_vlan_mode(dev, features);
2576
2577 return 0;
2578}
2579
2580#ifdef CONFIG_NET_POLL_CONTROLLER
2581static void cxgb_netpoll(struct net_device *dev)
2582{
2583 struct port_info *pi = netdev_priv(dev);
2584 struct adapter *adapter = pi->adapter;
2585 int qidx;
2586
2587 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2588 struct sge_qset *qs = &adapter->sge.qs[qidx];
2589 void *source;
2590
2591 if (adapter->flags & USING_MSIX)
2592 source = qs;
2593 else
2594 source = adapter;
2595
2596 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2597 }
2598}
2599#endif
2600
2601/*
2602 * Periodic accumulation of MAC statistics.
2603 */
2604static void mac_stats_update(struct adapter *adapter)
2605{
2606 int i;
2607
2608 for_each_port(adapter, i) {
2609 struct net_device *dev = adapter->port[i];
2610 struct port_info *p = netdev_priv(dev);
2611
2612 if (netif_running(dev)) {
2613 spin_lock(&adapter->stats_lock);
2614 t3_mac_update_stats(&p->mac);
2615 spin_unlock(&adapter->stats_lock);
2616 }
2617 }
2618}
2619
2620static void check_link_status(struct adapter *adapter)
2621{
2622 int i;
2623
2624 for_each_port(adapter, i) {
2625 struct net_device *dev = adapter->port[i];
2626 struct port_info *p = netdev_priv(dev);
2627 int link_fault;
2628
2629 spin_lock_irq(&adapter->work_lock);
2630 link_fault = p->link_fault;
2631 spin_unlock_irq(&adapter->work_lock);
2632
2633 if (link_fault) {
2634 t3_link_fault(adapter, i);
2635 continue;
2636 }
2637
2638 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2639 t3_xgm_intr_disable(adapter, i);
2640 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2641
2642 t3_link_changed(adapter, i);
2643 t3_xgm_intr_enable(adapter, i);
2644 }
2645 }
2646}
2647
2648static void check_t3b2_mac(struct adapter *adapter)
2649{
2650 int i;
2651
2652 if (!rtnl_trylock()) /* synchronize with ifdown */
2653 return;
2654
2655 for_each_port(adapter, i) {
2656 struct net_device *dev = adapter->port[i];
2657 struct port_info *p = netdev_priv(dev);
2658 int status;
2659
2660 if (!netif_running(dev))
2661 continue;
2662
2663 status = 0;
2664 if (netif_running(dev) && netif_carrier_ok(dev))
2665 status = t3b2_mac_watchdog_task(&p->mac);
2666 if (status == 1)
2667 p->mac.stats.num_toggled++;
2668 else if (status == 2) {
2669 struct cmac *mac = &p->mac;
2670
2671 t3_mac_set_mtu(mac, dev->mtu);
2672 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2673 cxgb_set_rxmode(dev);
2674 t3_link_start(&p->phy, mac, &p->link_config);
2675 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2676 t3_port_intr_enable(adapter, p->port_id);
2677 p->mac.stats.num_resets++;
2678 }
2679 }
2680 rtnl_unlock();
2681}
2682
2683
2684static void t3_adap_check_task(struct work_struct *work)
2685{
2686 struct adapter *adapter = container_of(work, struct adapter,
2687 adap_check_task.work);
2688 const struct adapter_params *p = &adapter->params;
2689 int port;
2690 unsigned int v, status, reset;
2691
2692 adapter->check_task_cnt++;
2693
2694 check_link_status(adapter);
2695
2696 /* Accumulate MAC stats if needed */
2697 if (!p->linkpoll_period ||
2698 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2699 p->stats_update_period) {
2700 mac_stats_update(adapter);
2701 adapter->check_task_cnt = 0;
2702 }
2703
2704 if (p->rev == T3_REV_B2)
2705 check_t3b2_mac(adapter);
2706
2707 /*
2708 * Scan the XGMAC's to check for various conditions which we want to
2709 * monitor in a periodic polling manner rather than via an interrupt
2710 * condition. This is used for conditions which would otherwise flood
2711 * the system with interrupts and we only really need to know that the
2712 * conditions are "happening" ... For each condition we count the
2713 * detection of the condition and reset it for the next polling loop.
2714 */
2715 for_each_port(adapter, port) {
2716 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2717 u32 cause;
2718
2719 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2720 reset = 0;
2721 if (cause & F_RXFIFO_OVERFLOW) {
2722 mac->stats.rx_fifo_ovfl++;
2723 reset |= F_RXFIFO_OVERFLOW;
2724 }
2725
2726 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2727 }
2728
2729 /*
2730 * We do the same as above for FL_EMPTY interrupts.
2731 */
2732 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2733 reset = 0;
2734
2735 if (status & F_FLEMPTY) {
2736 struct sge_qset *qs = &adapter->sge.qs[0];
2737 int i = 0;
2738
2739 reset |= F_FLEMPTY;
2740
2741 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2742 0xffff;
2743
2744 while (v) {
2745 qs->fl[i].empty += (v & 1);
2746 if (i)
2747 qs++;
2748 i ^= 1;
2749 v >>= 1;
2750 }
2751 }
2752
2753 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2754
2755 /* Schedule the next check update if any port is active. */
2756 spin_lock_irq(&adapter->work_lock);
2757 if (adapter->open_device_map & PORT_MASK)
2758 schedule_chk_task(adapter);
2759 spin_unlock_irq(&adapter->work_lock);
2760}
2761
2762static void db_full_task(struct work_struct *work)
2763{
2764 struct adapter *adapter = container_of(work, struct adapter,
2765 db_full_task);
2766
2767 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2768}
2769
2770static void db_empty_task(struct work_struct *work)
2771{
2772 struct adapter *adapter = container_of(work, struct adapter,
2773 db_empty_task);
2774
2775 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2776}
2777
2778static void db_drop_task(struct work_struct *work)
2779{
2780 struct adapter *adapter = container_of(work, struct adapter,
2781 db_drop_task);
2782 unsigned long delay = 1000;
2783 unsigned short r;
2784
2785 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2786
2787 /*
2788 * Sleep a while before ringing the driver qset dbs.
2789 * The delay is between 1000-2023 usecs.
2790 */
2791 get_random_bytes(&r, 2);
2792 delay += r & 1023;
2793 set_current_state(TASK_UNINTERRUPTIBLE);
2794 schedule_timeout(usecs_to_jiffies(delay));
2795 ring_dbs(adapter);
2796}
2797
2798/*
2799 * Processes external (PHY) interrupts in process context.
2800 */
2801static void ext_intr_task(struct work_struct *work)
2802{
2803 struct adapter *adapter = container_of(work, struct adapter,
2804 ext_intr_handler_task);
2805 int i;
2806
2807 /* Disable link fault interrupts */
2808 for_each_port(adapter, i) {
2809 struct net_device *dev = adapter->port[i];
2810 struct port_info *p = netdev_priv(dev);
2811
2812 t3_xgm_intr_disable(adapter, i);
2813 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2814 }
2815
2816 /* Re-enable link fault interrupts */
2817 t3_phy_intr_handler(adapter);
2818
2819 for_each_port(adapter, i)
2820 t3_xgm_intr_enable(adapter, i);
2821
2822 /* Now reenable external interrupts */
2823 spin_lock_irq(&adapter->work_lock);
2824 if (adapter->slow_intr_mask) {
2825 adapter->slow_intr_mask |= F_T3DBG;
2826 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2827 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2828 adapter->slow_intr_mask);
2829 }
2830 spin_unlock_irq(&adapter->work_lock);
2831}
2832
2833/*
2834 * Interrupt-context handler for external (PHY) interrupts.
2835 */
2836void t3_os_ext_intr_handler(struct adapter *adapter)
2837{
2838 /*
2839 * Schedule a task to handle external interrupts as they may be slow
2840 * and we use a mutex to protect MDIO registers. We disable PHY
2841 * interrupts in the meantime and let the task reenable them when
2842 * it's done.
2843 */
2844 spin_lock(&adapter->work_lock);
2845 if (adapter->slow_intr_mask) {
2846 adapter->slow_intr_mask &= ~F_T3DBG;
2847 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2848 adapter->slow_intr_mask);
2849 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2850 }
2851 spin_unlock(&adapter->work_lock);
2852}
2853
2854void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2855{
2856 struct net_device *netdev = adapter->port[port_id];
2857 struct port_info *pi = netdev_priv(netdev);
2858
2859 spin_lock(&adapter->work_lock);
2860 pi->link_fault = 1;
2861 spin_unlock(&adapter->work_lock);
2862}
2863
2864static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2865{
2866 int i, ret = 0;
2867
2868 if (is_offload(adapter) &&
2869 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2870 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2871 offload_close(&adapter->tdev);
2872 }
2873
2874 /* Stop all ports */
2875 for_each_port(adapter, i) {
2876 struct net_device *netdev = adapter->port[i];
2877
2878 if (netif_running(netdev))
2879 __cxgb_close(netdev, on_wq);
2880 }
2881
2882 /* Stop SGE timers */
2883 t3_stop_sge_timers(adapter);
2884
2885 adapter->flags &= ~FULL_INIT_DONE;
2886
2887 if (reset)
2888 ret = t3_reset_adapter(adapter);
2889
2890 pci_disable_device(adapter->pdev);
2891
2892 return ret;
2893}
2894
2895static int t3_reenable_adapter(struct adapter *adapter)
2896{
2897 if (pci_enable_device(adapter->pdev)) {
2898 dev_err(&adapter->pdev->dev,
2899 "Cannot re-enable PCI device after reset.\n");
2900 goto err;
2901 }
2902 pci_set_master(adapter->pdev);
2903 pci_restore_state(adapter->pdev);
2904 pci_save_state(adapter->pdev);
2905
2906 /* Free sge resources */
2907 t3_free_sge_resources(adapter);
2908
2909 if (t3_replay_prep_adapter(adapter))
2910 goto err;
2911
2912 return 0;
2913err:
2914 return -1;
2915}
2916
2917static void t3_resume_ports(struct adapter *adapter)
2918{
2919 int i;
2920
2921 /* Restart the ports */
2922 for_each_port(adapter, i) {
2923 struct net_device *netdev = adapter->port[i];
2924
2925 if (netif_running(netdev)) {
2926 if (cxgb_open(netdev)) {
2927 dev_err(&adapter->pdev->dev,
2928 "can't bring device back up"
2929 " after reset\n");
2930 continue;
2931 }
2932 }
2933 }
2934
2935 if (is_offload(adapter) && !ofld_disable)
2936 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2937}
2938
2939/*
2940 * processes a fatal error.
2941 * Bring the ports down, reset the chip, bring the ports back up.
2942 */
2943static void fatal_error_task(struct work_struct *work)
2944{
2945 struct adapter *adapter = container_of(work, struct adapter,
2946 fatal_error_handler_task);
2947 int err = 0;
2948
2949 rtnl_lock();
2950 err = t3_adapter_error(adapter, 1, 1);
2951 if (!err)
2952 err = t3_reenable_adapter(adapter);
2953 if (!err)
2954 t3_resume_ports(adapter);
2955
2956 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2957 rtnl_unlock();
2958}
2959
2960void t3_fatal_err(struct adapter *adapter)
2961{
2962 unsigned int fw_status[4];
2963
2964 if (adapter->flags & FULL_INIT_DONE) {
2965 t3_sge_stop(adapter);
2966 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2967 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2968 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2969 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2970
2971 spin_lock(&adapter->work_lock);
2972 t3_intr_disable(adapter);
2973 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2974 spin_unlock(&adapter->work_lock);
2975 }
2976 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2977 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2978 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2979 fw_status[0], fw_status[1],
2980 fw_status[2], fw_status[3]);
2981}
2982
2983/**
2984 * t3_io_error_detected - called when PCI error is detected
2985 * @pdev: Pointer to PCI device
2986 * @state: The current pci connection state
2987 *
2988 * This function is called after a PCI bus error affecting
2989 * this device has been detected.
2990 */
2991static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2992 pci_channel_state_t state)
2993{
2994 struct adapter *adapter = pci_get_drvdata(pdev);
2995
2996 if (state == pci_channel_io_perm_failure)
2997 return PCI_ERS_RESULT_DISCONNECT;
2998
2999 t3_adapter_error(adapter, 0, 0);
3000
3001 /* Request a slot reset. */
3002 return PCI_ERS_RESULT_NEED_RESET;
3003}
3004
3005/**
3006 * t3_io_slot_reset - called after the pci bus has been reset.
3007 * @pdev: Pointer to PCI device
3008 *
3009 * Restart the card from scratch, as if from a cold-boot.
3010 */
3011static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3012{
3013 struct adapter *adapter = pci_get_drvdata(pdev);
3014
3015 if (!t3_reenable_adapter(adapter))
3016 return PCI_ERS_RESULT_RECOVERED;
3017
3018 return PCI_ERS_RESULT_DISCONNECT;
3019}
3020
3021/**
3022 * t3_io_resume - called when traffic can start flowing again.
3023 * @pdev: Pointer to PCI device
3024 *
3025 * This callback is called when the error recovery driver tells us that
3026 * its OK to resume normal operation.
3027 */
3028static void t3_io_resume(struct pci_dev *pdev)
3029{
3030 struct adapter *adapter = pci_get_drvdata(pdev);
3031
3032 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3033 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3034
3035 t3_resume_ports(adapter);
3036}
3037
3038static struct pci_error_handlers t3_err_handler = {
3039 .error_detected = t3_io_error_detected,
3040 .slot_reset = t3_io_slot_reset,
3041 .resume = t3_io_resume,
3042};
3043
3044/*
3045 * Set the number of qsets based on the number of CPUs and the number of ports,
3046 * not to exceed the number of available qsets, assuming there are enough qsets
3047 * per port in HW.
3048 */
3049static void set_nqsets(struct adapter *adap)
3050{
3051 int i, j = 0;
3052 int num_cpus = num_online_cpus();
3053 int hwports = adap->params.nports;
3054 int nqsets = adap->msix_nvectors - 1;
3055
3056 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3057 if (hwports == 2 &&
3058 (hwports * nqsets > SGE_QSETS ||
3059 num_cpus >= nqsets / hwports))
3060 nqsets /= hwports;
3061 if (nqsets > num_cpus)
3062 nqsets = num_cpus;
3063 if (nqsets < 1 || hwports == 4)
3064 nqsets = 1;
3065 } else
3066 nqsets = 1;
3067
3068 for_each_port(adap, i) {
3069 struct port_info *pi = adap2pinfo(adap, i);
3070
3071 pi->first_qset = j;
3072 pi->nqsets = nqsets;
3073 j = pi->first_qset + nqsets;
3074
3075 dev_info(&adap->pdev->dev,
3076 "Port %d using %d queue sets.\n", i, nqsets);
3077 }
3078}
3079
3080static int __devinit cxgb_enable_msix(struct adapter *adap)
3081{
3082 struct msix_entry entries[SGE_QSETS + 1];
3083 int vectors;
3084 int i, err;
3085
3086 vectors = ARRAY_SIZE(entries);
3087 for (i = 0; i < vectors; ++i)
3088 entries[i].entry = i;
3089
3090 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3091 vectors = err;
3092
3093 if (err < 0)
3094 pci_disable_msix(adap->pdev);
3095
3096 if (!err && vectors < (adap->params.nports + 1)) {
3097 pci_disable_msix(adap->pdev);
3098 err = -1;
3099 }
3100
3101 if (!err) {
3102 for (i = 0; i < vectors; ++i)
3103 adap->msix_info[i].vec = entries[i].vector;
3104 adap->msix_nvectors = vectors;
3105 }
3106
3107 return err;
3108}
3109
3110static void __devinit print_port_info(struct adapter *adap,
3111 const struct adapter_info *ai)
3112{
3113 static const char *pci_variant[] = {
3114 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3115 };
3116
3117 int i;
3118 char buf[80];
3119
3120 if (is_pcie(adap))
3121 snprintf(buf, sizeof(buf), "%s x%d",
3122 pci_variant[adap->params.pci.variant],
3123 adap->params.pci.width);
3124 else
3125 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3126 pci_variant[adap->params.pci.variant],
3127 adap->params.pci.speed, adap->params.pci.width);
3128
3129 for_each_port(adap, i) {
3130 struct net_device *dev = adap->port[i];
3131 const struct port_info *pi = netdev_priv(dev);
3132
3133 if (!test_bit(i, &adap->registered_device_map))
3134 continue;
3135 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
3136 dev->name, ai->desc, pi->phy.desc,
3137 is_offload(adap) ? "R" : "", adap->params.rev, buf,
3138 (adap->flags & USING_MSIX) ? " MSI-X" :
3139 (adap->flags & USING_MSI) ? " MSI" : "");
3140 if (adap->name == dev->name && adap->params.vpd.mclk)
3141 printk(KERN_INFO
3142 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3143 adap->name, t3_mc7_size(&adap->cm) >> 20,
3144 t3_mc7_size(&adap->pmtx) >> 20,
3145 t3_mc7_size(&adap->pmrx) >> 20,
3146 adap->params.vpd.sn);
3147 }
3148}
3149
3150static const struct net_device_ops cxgb_netdev_ops = {
3151 .ndo_open = cxgb_open,
3152 .ndo_stop = cxgb_close,
3153 .ndo_start_xmit = t3_eth_xmit,
3154 .ndo_get_stats = cxgb_get_stats,
3155 .ndo_validate_addr = eth_validate_addr,
3156 .ndo_set_multicast_list = cxgb_set_rxmode,
3157 .ndo_do_ioctl = cxgb_ioctl,
3158 .ndo_change_mtu = cxgb_change_mtu,
3159 .ndo_set_mac_address = cxgb_set_mac_addr,
3160 .ndo_fix_features = cxgb_fix_features,
3161 .ndo_set_features = cxgb_set_features,
3162#ifdef CONFIG_NET_POLL_CONTROLLER
3163 .ndo_poll_controller = cxgb_netpoll,
3164#endif
3165};
3166
3167static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3168{
3169 struct port_info *pi = netdev_priv(dev);
3170
3171 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3172 pi->iscsic.mac_addr[3] |= 0x80;
3173}
3174
3175static int __devinit init_one(struct pci_dev *pdev,
3176 const struct pci_device_id *ent)
3177{
3178 static int version_printed;
3179
3180 int i, err, pci_using_dac = 0;
3181 resource_size_t mmio_start, mmio_len;
3182 const struct adapter_info *ai;
3183 struct adapter *adapter = NULL;
3184 struct port_info *pi;
3185
3186 if (!version_printed) {
3187 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3188 ++version_printed;
3189 }
3190
3191 if (!cxgb3_wq) {
3192 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3193 if (!cxgb3_wq) {
3194 printk(KERN_ERR DRV_NAME
3195 ": cannot initialize work queue\n");
3196 return -ENOMEM;
3197 }
3198 }
3199
3200 err = pci_enable_device(pdev);
3201 if (err) {
3202 dev_err(&pdev->dev, "cannot enable PCI device\n");
3203 goto out;
3204 }
3205
3206 err = pci_request_regions(pdev, DRV_NAME);
3207 if (err) {
3208 /* Just info, some other driver may have claimed the device. */
3209 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3210 goto out_disable_device;
3211 }
3212
3213 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3214 pci_using_dac = 1;
3215 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3216 if (err) {
3217 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3218 "coherent allocations\n");
3219 goto out_release_regions;
3220 }
3221 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3222 dev_err(&pdev->dev, "no usable DMA configuration\n");
3223 goto out_release_regions;
3224 }
3225
3226 pci_set_master(pdev);
3227 pci_save_state(pdev);
3228
3229 mmio_start = pci_resource_start(pdev, 0);
3230 mmio_len = pci_resource_len(pdev, 0);
3231 ai = t3_get_adapter_info(ent->driver_data);
3232
3233 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3234 if (!adapter) {
3235 err = -ENOMEM;
3236 goto out_release_regions;
3237 }
3238
3239 adapter->nofail_skb =
3240 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3241 if (!adapter->nofail_skb) {
3242 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3243 err = -ENOMEM;
3244 goto out_free_adapter;
3245 }
3246
3247 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3248 if (!adapter->regs) {
3249 dev_err(&pdev->dev, "cannot map device registers\n");
3250 err = -ENOMEM;
3251 goto out_free_adapter;
3252 }
3253
3254 adapter->pdev = pdev;
3255 adapter->name = pci_name(pdev);
3256 adapter->msg_enable = dflt_msg_enable;
3257 adapter->mmio_len = mmio_len;
3258
3259 mutex_init(&adapter->mdio_lock);
3260 spin_lock_init(&adapter->work_lock);
3261 spin_lock_init(&adapter->stats_lock);
3262
3263 INIT_LIST_HEAD(&adapter->adapter_list);
3264 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3265 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3266
3267 INIT_WORK(&adapter->db_full_task, db_full_task);
3268 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3269 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3270
3271 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3272
3273 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3274 struct net_device *netdev;
3275
3276 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3277 if (!netdev) {
3278 err = -ENOMEM;
3279 goto out_free_dev;
3280 }
3281
3282 SET_NETDEV_DEV(netdev, &pdev->dev);
3283
3284 adapter->port[i] = netdev;
3285 pi = netdev_priv(netdev);
3286 pi->adapter = adapter;
3287 pi->port_id = i;
3288 netif_carrier_off(netdev);
3289 netdev->irq = pdev->irq;
3290 netdev->mem_start = mmio_start;
3291 netdev->mem_end = mmio_start + mmio_len - 1;
3292 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3293 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
3294 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX;
3295 if (pci_using_dac)
3296 netdev->features |= NETIF_F_HIGHDMA;
3297
3298 netdev->netdev_ops = &cxgb_netdev_ops;
3299 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3300 }
3301
3302 pci_set_drvdata(pdev, adapter);
3303 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3304 err = -ENODEV;
3305 goto out_free_dev;
3306 }
3307
3308 /*
3309 * The card is now ready to go. If any errors occur during device
3310 * registration we do not fail the whole card but rather proceed only
3311 * with the ports we manage to register successfully. However we must
3312 * register at least one net device.
3313 */
3314 for_each_port(adapter, i) {
3315 err = register_netdev(adapter->port[i]);
3316 if (err)
3317 dev_warn(&pdev->dev,
3318 "cannot register net device %s, skipping\n",
3319 adapter->port[i]->name);
3320 else {
3321 /*
3322 * Change the name we use for messages to the name of
3323 * the first successfully registered interface.
3324 */
3325 if (!adapter->registered_device_map)
3326 adapter->name = adapter->port[i]->name;
3327
3328 __set_bit(i, &adapter->registered_device_map);
3329 }
3330 }
3331 if (!adapter->registered_device_map) {
3332 dev_err(&pdev->dev, "could not register any net devices\n");
3333 goto out_free_dev;
3334 }
3335
3336 for_each_port(adapter, i)
3337 cxgb3_init_iscsi_mac(adapter->port[i]);
3338
3339 /* Driver's ready. Reflect it on LEDs */
3340 t3_led_ready(adapter);
3341
3342 if (is_offload(adapter)) {
3343 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3344 cxgb3_adapter_ofld(adapter);
3345 }
3346
3347 /* See what interrupts we'll be using */
3348 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3349 adapter->flags |= USING_MSIX;
3350 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3351 adapter->flags |= USING_MSI;
3352
3353 set_nqsets(adapter);
3354
3355 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3356 &cxgb3_attr_group);
3357
3358 for_each_port(adapter, i)
3359 cxgb_vlan_mode(adapter->port[i], adapter->port[i]->features);
3360
3361 print_port_info(adapter, ai);
3362 return 0;
3363
3364out_free_dev:
3365 iounmap(adapter->regs);
3366 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3367 if (adapter->port[i])
3368 free_netdev(adapter->port[i]);
3369
3370out_free_adapter:
3371 kfree(adapter);
3372
3373out_release_regions:
3374 pci_release_regions(pdev);
3375out_disable_device:
3376 pci_disable_device(pdev);
3377 pci_set_drvdata(pdev, NULL);
3378out:
3379 return err;
3380}
3381
3382static void __devexit remove_one(struct pci_dev *pdev)
3383{
3384 struct adapter *adapter = pci_get_drvdata(pdev);
3385
3386 if (adapter) {
3387 int i;
3388
3389 t3_sge_stop(adapter);
3390 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3391 &cxgb3_attr_group);
3392
3393 if (is_offload(adapter)) {
3394 cxgb3_adapter_unofld(adapter);
3395 if (test_bit(OFFLOAD_DEVMAP_BIT,
3396 &adapter->open_device_map))
3397 offload_close(&adapter->tdev);
3398 }
3399
3400 for_each_port(adapter, i)
3401 if (test_bit(i, &adapter->registered_device_map))
3402 unregister_netdev(adapter->port[i]);
3403
3404 t3_stop_sge_timers(adapter);
3405 t3_free_sge_resources(adapter);
3406 cxgb_disable_msi(adapter);
3407
3408 for_each_port(adapter, i)
3409 if (adapter->port[i])
3410 free_netdev(adapter->port[i]);
3411
3412 iounmap(adapter->regs);
3413 if (adapter->nofail_skb)
3414 kfree_skb(adapter->nofail_skb);
3415 kfree(adapter);
3416 pci_release_regions(pdev);
3417 pci_disable_device(pdev);
3418 pci_set_drvdata(pdev, NULL);
3419 }
3420}
3421
3422static struct pci_driver driver = {
3423 .name = DRV_NAME,
3424 .id_table = cxgb3_pci_tbl,
3425 .probe = init_one,
3426 .remove = __devexit_p(remove_one),
3427 .err_handler = &t3_err_handler,
3428};
3429
3430static int __init cxgb3_init_module(void)
3431{
3432 int ret;
3433
3434 cxgb3_offload_init();
3435
3436 ret = pci_register_driver(&driver);
3437 return ret;
3438}
3439
3440static void __exit cxgb3_cleanup_module(void)
3441{
3442 pci_unregister_driver(&driver);
3443 if (cxgb3_wq)
3444 destroy_workqueue(cxgb3_wq);
3445}
3446
3447module_init(cxgb3_init_module);
3448module_exit(cxgb3_cleanup_module);
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
new file mode 100644
index 00000000000..da5a5d9b8af
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -0,0 +1,1431 @@
1/*
2 * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/list.h>
34#include <linux/slab.h>
35#include <net/neighbour.h>
36#include <linux/notifier.h>
37#include <linux/atomic.h>
38#include <linux/proc_fs.h>
39#include <linux/if_vlan.h>
40#include <net/netevent.h>
41#include <linux/highmem.h>
42#include <linux/vmalloc.h>
43
44#include "common.h"
45#include "regs.h"
46#include "cxgb3_ioctl.h"
47#include "cxgb3_ctl_defs.h"
48#include "cxgb3_defs.h"
49#include "l2t.h"
50#include "firmware_exports.h"
51#include "cxgb3_offload.h"
52
53static LIST_HEAD(client_list);
54static LIST_HEAD(ofld_dev_list);
55static DEFINE_MUTEX(cxgb3_db_lock);
56
57static DEFINE_RWLOCK(adapter_list_lock);
58static LIST_HEAD(adapter_list);
59
60static const unsigned int MAX_ATIDS = 64 * 1024;
61static const unsigned int ATID_BASE = 0x10000;
62
63static void cxgb_neigh_update(struct neighbour *neigh);
64static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
65
66static inline int offload_activated(struct t3cdev *tdev)
67{
68 const struct adapter *adapter = tdev2adap(tdev);
69
70 return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
71}
72
73/**
74 * cxgb3_register_client - register an offload client
75 * @client: the client
76 *
77 * Add the client to the client list,
78 * and call backs the client for each activated offload device
79 */
80void cxgb3_register_client(struct cxgb3_client *client)
81{
82 struct t3cdev *tdev;
83
84 mutex_lock(&cxgb3_db_lock);
85 list_add_tail(&client->client_list, &client_list);
86
87 if (client->add) {
88 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
89 if (offload_activated(tdev))
90 client->add(tdev);
91 }
92 }
93 mutex_unlock(&cxgb3_db_lock);
94}
95
96EXPORT_SYMBOL(cxgb3_register_client);
97
98/**
99 * cxgb3_unregister_client - unregister an offload client
100 * @client: the client
101 *
102 * Remove the client to the client list,
103 * and call backs the client for each activated offload device.
104 */
105void cxgb3_unregister_client(struct cxgb3_client *client)
106{
107 struct t3cdev *tdev;
108
109 mutex_lock(&cxgb3_db_lock);
110 list_del(&client->client_list);
111
112 if (client->remove) {
113 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
114 if (offload_activated(tdev))
115 client->remove(tdev);
116 }
117 }
118 mutex_unlock(&cxgb3_db_lock);
119}
120
121EXPORT_SYMBOL(cxgb3_unregister_client);
122
123/**
124 * cxgb3_add_clients - activate registered clients for an offload device
125 * @tdev: the offload device
126 *
127 * Call backs all registered clients once a offload device is activated
128 */
129void cxgb3_add_clients(struct t3cdev *tdev)
130{
131 struct cxgb3_client *client;
132
133 mutex_lock(&cxgb3_db_lock);
134 list_for_each_entry(client, &client_list, client_list) {
135 if (client->add)
136 client->add(tdev);
137 }
138 mutex_unlock(&cxgb3_db_lock);
139}
140
141/**
142 * cxgb3_remove_clients - deactivates registered clients
143 * for an offload device
144 * @tdev: the offload device
145 *
146 * Call backs all registered clients once a offload device is deactivated
147 */
148void cxgb3_remove_clients(struct t3cdev *tdev)
149{
150 struct cxgb3_client *client;
151
152 mutex_lock(&cxgb3_db_lock);
153 list_for_each_entry(client, &client_list, client_list) {
154 if (client->remove)
155 client->remove(tdev);
156 }
157 mutex_unlock(&cxgb3_db_lock);
158}
159
160void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
161{
162 struct cxgb3_client *client;
163
164 mutex_lock(&cxgb3_db_lock);
165 list_for_each_entry(client, &client_list, client_list) {
166 if (client->event_handler)
167 client->event_handler(tdev, event, port);
168 }
169 mutex_unlock(&cxgb3_db_lock);
170}
171
172static struct net_device *get_iff_from_mac(struct adapter *adapter,
173 const unsigned char *mac,
174 unsigned int vlan)
175{
176 int i;
177
178 for_each_port(adapter, i) {
179 struct net_device *dev = adapter->port[i];
180
181 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
182 if (vlan && vlan != VLAN_VID_MASK) {
183 rcu_read_lock();
184 dev = __vlan_find_dev_deep(dev, vlan);
185 rcu_read_unlock();
186 } else if (netif_is_bond_slave(dev)) {
187 while (dev->master)
188 dev = dev->master;
189 }
190 return dev;
191 }
192 }
193 return NULL;
194}
195
196static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
197 void *data)
198{
199 int i;
200 int ret = 0;
201 unsigned int val = 0;
202 struct ulp_iscsi_info *uiip = data;
203
204 switch (req) {
205 case ULP_ISCSI_GET_PARAMS:
206 uiip->pdev = adapter->pdev;
207 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
208 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
209 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
210
211 val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ);
212 for (i = 0; i < 4; i++, val >>= 8)
213 uiip->pgsz_factor[i] = val & 0xFF;
214
215 val = t3_read_reg(adapter, A_TP_PARA_REG7);
216 uiip->max_txsz =
217 uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0,
218 (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1);
219 /*
220 * On tx, the iscsi pdu has to be <= tx page size and has to
221 * fit into the Tx PM FIFO.
222 */
223 val = min(adapter->params.tp.tx_pg_size,
224 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
225 uiip->max_txsz = min(val, uiip->max_txsz);
226
227 /* set MaxRxData to 16224 */
228 val = t3_read_reg(adapter, A_TP_PARA_REG2);
229 if ((val >> S_MAXRXDATA) != 0x3f60) {
230 val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
231 val |= V_MAXRXDATA(0x3f60);
232 printk(KERN_INFO
233 "%s, iscsi set MaxRxData to 16224 (0x%x).\n",
234 adapter->name, val);
235 t3_write_reg(adapter, A_TP_PARA_REG2, val);
236 }
237
238 /*
239 * on rx, the iscsi pdu has to be < rx page size and the
240 * the max rx data length programmed in TP
241 */
242 val = min(adapter->params.tp.rx_pg_size,
243 ((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
244 S_MAXRXDATA) & M_MAXRXDATA);
245 uiip->max_rxsz = min(val, uiip->max_rxsz);
246 break;
247 case ULP_ISCSI_SET_PARAMS:
248 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
249 /* program the ddp page sizes */
250 for (i = 0; i < 4; i++)
251 val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
252 if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
253 printk(KERN_INFO
254 "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n",
255 adapter->name, val, uiip->pgsz_factor[0],
256 uiip->pgsz_factor[1], uiip->pgsz_factor[2],
257 uiip->pgsz_factor[3]);
258 t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
259 }
260 break;
261 default:
262 ret = -EOPNOTSUPP;
263 }
264 return ret;
265}
266
267/* Response queue used for RDMA events. */
268#define ASYNC_NOTIF_RSPQ 0
269
270static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
271{
272 int ret = 0;
273
274 switch (req) {
275 case RDMA_GET_PARAMS: {
276 struct rdma_info *rdma = data;
277 struct pci_dev *pdev = adapter->pdev;
278
279 rdma->udbell_physbase = pci_resource_start(pdev, 2);
280 rdma->udbell_len = pci_resource_len(pdev, 2);
281 rdma->tpt_base =
282 t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
283 rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
284 rdma->pbl_base =
285 t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
286 rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
287 rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
288 rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
289 rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
290 rdma->pdev = pdev;
291 break;
292 }
293 case RDMA_CQ_OP:{
294 unsigned long flags;
295 struct rdma_cq_op *rdma = data;
296
297 /* may be called in any context */
298 spin_lock_irqsave(&adapter->sge.reg_lock, flags);
299 ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
300 rdma->credits);
301 spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
302 break;
303 }
304 case RDMA_GET_MEM:{
305 struct ch_mem_range *t = data;
306 struct mc7 *mem;
307
308 if ((t->addr & 7) || (t->len & 7))
309 return -EINVAL;
310 if (t->mem_id == MEM_CM)
311 mem = &adapter->cm;
312 else if (t->mem_id == MEM_PMRX)
313 mem = &adapter->pmrx;
314 else if (t->mem_id == MEM_PMTX)
315 mem = &adapter->pmtx;
316 else
317 return -EINVAL;
318
319 ret =
320 t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
321 (u64 *) t->buf);
322 if (ret)
323 return ret;
324 break;
325 }
326 case RDMA_CQ_SETUP:{
327 struct rdma_cq_setup *rdma = data;
328
329 spin_lock_irq(&adapter->sge.reg_lock);
330 ret =
331 t3_sge_init_cqcntxt(adapter, rdma->id,
332 rdma->base_addr, rdma->size,
333 ASYNC_NOTIF_RSPQ,
334 rdma->ovfl_mode, rdma->credits,
335 rdma->credit_thres);
336 spin_unlock_irq(&adapter->sge.reg_lock);
337 break;
338 }
339 case RDMA_CQ_DISABLE:
340 spin_lock_irq(&adapter->sge.reg_lock);
341 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
342 spin_unlock_irq(&adapter->sge.reg_lock);
343 break;
344 case RDMA_CTRL_QP_SETUP:{
345 struct rdma_ctrlqp_setup *rdma = data;
346
347 spin_lock_irq(&adapter->sge.reg_lock);
348 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
349 SGE_CNTXT_RDMA,
350 ASYNC_NOTIF_RSPQ,
351 rdma->base_addr, rdma->size,
352 FW_RI_TID_START, 1, 0);
353 spin_unlock_irq(&adapter->sge.reg_lock);
354 break;
355 }
356 case RDMA_GET_MIB: {
357 spin_lock(&adapter->stats_lock);
358 t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data);
359 spin_unlock(&adapter->stats_lock);
360 break;
361 }
362 default:
363 ret = -EOPNOTSUPP;
364 }
365 return ret;
366}
367
368static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
369{
370 struct adapter *adapter = tdev2adap(tdev);
371 struct tid_range *tid;
372 struct mtutab *mtup;
373 struct iff_mac *iffmacp;
374 struct ddp_params *ddpp;
375 struct adap_ports *ports;
376 struct ofld_page_info *rx_page_info;
377 struct tp_params *tp = &adapter->params.tp;
378 int i;
379
380 switch (req) {
381 case GET_MAX_OUTSTANDING_WR:
382 *(unsigned int *)data = FW_WR_NUM;
383 break;
384 case GET_WR_LEN:
385 *(unsigned int *)data = WR_FLITS;
386 break;
387 case GET_TX_MAX_CHUNK:
388 *(unsigned int *)data = 1 << 20; /* 1MB */
389 break;
390 case GET_TID_RANGE:
391 tid = data;
392 tid->num = t3_mc5_size(&adapter->mc5) -
393 adapter->params.mc5.nroutes -
394 adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
395 tid->base = 0;
396 break;
397 case GET_STID_RANGE:
398 tid = data;
399 tid->num = adapter->params.mc5.nservers;
400 tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
401 adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
402 break;
403 case GET_L2T_CAPACITY:
404 *(unsigned int *)data = 2048;
405 break;
406 case GET_MTUS:
407 mtup = data;
408 mtup->size = NMTUS;
409 mtup->mtus = adapter->params.mtus;
410 break;
411 case GET_IFF_FROM_MAC:
412 iffmacp = data;
413 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
414 iffmacp->vlan_tag &
415 VLAN_VID_MASK);
416 break;
417 case GET_DDP_PARAMS:
418 ddpp = data;
419 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
420 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
421 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
422 break;
423 case GET_PORTS:
424 ports = data;
425 ports->nports = adapter->params.nports;
426 for_each_port(adapter, i)
427 ports->lldevs[i] = adapter->port[i];
428 break;
429 case ULP_ISCSI_GET_PARAMS:
430 case ULP_ISCSI_SET_PARAMS:
431 if (!offload_running(adapter))
432 return -EAGAIN;
433 return cxgb_ulp_iscsi_ctl(adapter, req, data);
434 case RDMA_GET_PARAMS:
435 case RDMA_CQ_OP:
436 case RDMA_CQ_SETUP:
437 case RDMA_CQ_DISABLE:
438 case RDMA_CTRL_QP_SETUP:
439 case RDMA_GET_MEM:
440 case RDMA_GET_MIB:
441 if (!offload_running(adapter))
442 return -EAGAIN;
443 return cxgb_rdma_ctl(adapter, req, data);
444 case GET_RX_PAGE_INFO:
445 rx_page_info = data;
446 rx_page_info->page_size = tp->rx_pg_size;
447 rx_page_info->num = tp->rx_num_pgs;
448 break;
449 case GET_ISCSI_IPV4ADDR: {
450 struct iscsi_ipv4addr *p = data;
451 struct port_info *pi = netdev_priv(p->dev);
452 p->ipv4addr = pi->iscsi_ipv4addr;
453 break;
454 }
455 case GET_EMBEDDED_INFO: {
456 struct ch_embedded_info *e = data;
457
458 spin_lock(&adapter->stats_lock);
459 t3_get_fw_version(adapter, &e->fw_vers);
460 t3_get_tp_version(adapter, &e->tp_vers);
461 spin_unlock(&adapter->stats_lock);
462 break;
463 }
464 default:
465 return -EOPNOTSUPP;
466 }
467 return 0;
468}
469
470/*
471 * Dummy handler for Rx offload packets in case we get an offload packet before
472 * proper processing is setup. This complains and drops the packet as it isn't
473 * normal to get offload packets at this stage.
474 */
475static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
476 int n)
477{
478 while (n--)
479 dev_kfree_skb_any(skbs[n]);
480 return 0;
481}
482
483static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
484{
485}
486
487void cxgb3_set_dummy_ops(struct t3cdev *dev)
488{
489 dev->recv = rx_offload_blackhole;
490 dev->neigh_update = dummy_neigh_update;
491}
492
493/*
494 * Free an active-open TID.
495 */
496void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
497{
498 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
499 union active_open_entry *p = atid2entry(t, atid);
500 void *ctx = p->t3c_tid.ctx;
501
502 spin_lock_bh(&t->atid_lock);
503 p->next = t->afree;
504 t->afree = p;
505 t->atids_in_use--;
506 spin_unlock_bh(&t->atid_lock);
507
508 return ctx;
509}
510
511EXPORT_SYMBOL(cxgb3_free_atid);
512
513/*
514 * Free a server TID and return it to the free pool.
515 */
516void cxgb3_free_stid(struct t3cdev *tdev, int stid)
517{
518 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
519 union listen_entry *p = stid2entry(t, stid);
520
521 spin_lock_bh(&t->stid_lock);
522 p->next = t->sfree;
523 t->sfree = p;
524 t->stids_in_use--;
525 spin_unlock_bh(&t->stid_lock);
526}
527
528EXPORT_SYMBOL(cxgb3_free_stid);
529
530void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
531 void *ctx, unsigned int tid)
532{
533 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
534
535 t->tid_tab[tid].client = client;
536 t->tid_tab[tid].ctx = ctx;
537 atomic_inc(&t->tids_in_use);
538}
539
540EXPORT_SYMBOL(cxgb3_insert_tid);
541
542/*
543 * Populate a TID_RELEASE WR. The skb must be already propely sized.
544 */
545static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
546{
547 struct cpl_tid_release *req;
548
549 skb->priority = CPL_PRIORITY_SETUP;
550 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
551 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
552 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
553}
554
555static void t3_process_tid_release_list(struct work_struct *work)
556{
557 struct t3c_data *td = container_of(work, struct t3c_data,
558 tid_release_task);
559 struct sk_buff *skb;
560 struct t3cdev *tdev = td->dev;
561
562
563 spin_lock_bh(&td->tid_release_lock);
564 while (td->tid_release_list) {
565 struct t3c_tid_entry *p = td->tid_release_list;
566
567 td->tid_release_list = p->ctx;
568 spin_unlock_bh(&td->tid_release_lock);
569
570 skb = alloc_skb(sizeof(struct cpl_tid_release),
571 GFP_KERNEL);
572 if (!skb)
573 skb = td->nofail_skb;
574 if (!skb) {
575 spin_lock_bh(&td->tid_release_lock);
576 p->ctx = (void *)td->tid_release_list;
577 td->tid_release_list = (struct t3c_tid_entry *)p;
578 break;
579 }
580 mk_tid_release(skb, p - td->tid_maps.tid_tab);
581 cxgb3_ofld_send(tdev, skb);
582 p->ctx = NULL;
583 if (skb == td->nofail_skb)
584 td->nofail_skb =
585 alloc_skb(sizeof(struct cpl_tid_release),
586 GFP_KERNEL);
587 spin_lock_bh(&td->tid_release_lock);
588 }
589 td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
590 spin_unlock_bh(&td->tid_release_lock);
591
592 if (!td->nofail_skb)
593 td->nofail_skb =
594 alloc_skb(sizeof(struct cpl_tid_release),
595 GFP_KERNEL);
596}
597
598/* use ctx as a next pointer in the tid release list */
599void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
600{
601 struct t3c_data *td = T3C_DATA(tdev);
602 struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
603
604 spin_lock_bh(&td->tid_release_lock);
605 p->ctx = (void *)td->tid_release_list;
606 p->client = NULL;
607 td->tid_release_list = p;
608 if (!p->ctx || td->release_list_incomplete)
609 schedule_work(&td->tid_release_task);
610 spin_unlock_bh(&td->tid_release_lock);
611}
612
613EXPORT_SYMBOL(cxgb3_queue_tid_release);
614
615/*
616 * Remove a tid from the TID table. A client may defer processing its last
617 * CPL message if it is locked at the time it arrives, and while the message
618 * sits in the client's backlog the TID may be reused for another connection.
619 * To handle this we atomically switch the TID association if it still points
620 * to the original client context.
621 */
622void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
623{
624 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
625
626 BUG_ON(tid >= t->ntids);
627 if (tdev->type == T3A)
628 (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
629 else {
630 struct sk_buff *skb;
631
632 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
633 if (likely(skb)) {
634 mk_tid_release(skb, tid);
635 cxgb3_ofld_send(tdev, skb);
636 t->tid_tab[tid].ctx = NULL;
637 } else
638 cxgb3_queue_tid_release(tdev, tid);
639 }
640 atomic_dec(&t->tids_in_use);
641}
642
643EXPORT_SYMBOL(cxgb3_remove_tid);
644
645int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
646 void *ctx)
647{
648 int atid = -1;
649 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
650
651 spin_lock_bh(&t->atid_lock);
652 if (t->afree &&
653 t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=
654 t->ntids) {
655 union active_open_entry *p = t->afree;
656
657 atid = (p - t->atid_tab) + t->atid_base;
658 t->afree = p->next;
659 p->t3c_tid.ctx = ctx;
660 p->t3c_tid.client = client;
661 t->atids_in_use++;
662 }
663 spin_unlock_bh(&t->atid_lock);
664 return atid;
665}
666
667EXPORT_SYMBOL(cxgb3_alloc_atid);
668
669int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
670 void *ctx)
671{
672 int stid = -1;
673 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
674
675 spin_lock_bh(&t->stid_lock);
676 if (t->sfree) {
677 union listen_entry *p = t->sfree;
678
679 stid = (p - t->stid_tab) + t->stid_base;
680 t->sfree = p->next;
681 p->t3c_tid.ctx = ctx;
682 p->t3c_tid.client = client;
683 t->stids_in_use++;
684 }
685 spin_unlock_bh(&t->stid_lock);
686 return stid;
687}
688
689EXPORT_SYMBOL(cxgb3_alloc_stid);
690
691/* Get the t3cdev associated with a net_device */
692struct t3cdev *dev2t3cdev(struct net_device *dev)
693{
694 const struct port_info *pi = netdev_priv(dev);
695
696 return (struct t3cdev *)pi->adapter;
697}
698
699EXPORT_SYMBOL(dev2t3cdev);
700
701static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
702{
703 struct cpl_smt_write_rpl *rpl = cplhdr(skb);
704
705 if (rpl->status != CPL_ERR_NONE)
706 printk(KERN_ERR
707 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
708 rpl->status, GET_TID(rpl));
709
710 return CPL_RET_BUF_DONE;
711}
712
713static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
714{
715 struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
716
717 if (rpl->status != CPL_ERR_NONE)
718 printk(KERN_ERR
719 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
720 rpl->status, GET_TID(rpl));
721
722 return CPL_RET_BUF_DONE;
723}
724
725static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
726{
727 struct cpl_rte_write_rpl *rpl = cplhdr(skb);
728
729 if (rpl->status != CPL_ERR_NONE)
730 printk(KERN_ERR
731 "Unexpected RTE_WRITE_RPL status %u for entry %u\n",
732 rpl->status, GET_TID(rpl));
733
734 return CPL_RET_BUF_DONE;
735}
736
737static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
738{
739 struct cpl_act_open_rpl *rpl = cplhdr(skb);
740 unsigned int atid = G_TID(ntohl(rpl->atid));
741 struct t3c_tid_entry *t3c_tid;
742
743 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
744 if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
745 t3c_tid->client->handlers &&
746 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
747 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
748 t3c_tid->
749 ctx);
750 } else {
751 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
752 dev->name, CPL_ACT_OPEN_RPL);
753 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
754 }
755}
756
757static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
758{
759 union opcode_tid *p = cplhdr(skb);
760 unsigned int stid = G_TID(ntohl(p->opcode_tid));
761 struct t3c_tid_entry *t3c_tid;
762
763 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
764 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
765 t3c_tid->client->handlers[p->opcode]) {
766 return t3c_tid->client->handlers[p->opcode] (dev, skb,
767 t3c_tid->ctx);
768 } else {
769 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
770 dev->name, p->opcode);
771 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
772 }
773}
774
775static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
776{
777 union opcode_tid *p = cplhdr(skb);
778 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
779 struct t3c_tid_entry *t3c_tid;
780
781 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
782 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
783 t3c_tid->client->handlers[p->opcode]) {
784 return t3c_tid->client->handlers[p->opcode]
785 (dev, skb, t3c_tid->ctx);
786 } else {
787 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
788 dev->name, p->opcode);
789 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
790 }
791}
792
793static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
794{
795 struct cpl_pass_accept_req *req = cplhdr(skb);
796 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
797 struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
798 struct t3c_tid_entry *t3c_tid;
799 unsigned int tid = GET_TID(req);
800
801 if (unlikely(tid >= t->ntids)) {
802 printk("%s: passive open TID %u too large\n",
803 dev->name, tid);
804 t3_fatal_err(tdev2adap(dev));
805 return CPL_RET_BUF_DONE;
806 }
807
808 t3c_tid = lookup_stid(t, stid);
809 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
810 t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
811 return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
812 (dev, skb, t3c_tid->ctx);
813 } else {
814 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
815 dev->name, CPL_PASS_ACCEPT_REQ);
816 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
817 }
818}
819
820/*
821 * Returns an sk_buff for a reply CPL message of size len. If the input
822 * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
823 * is allocated. The input skb must be of size at least len. Note that this
824 * operation does not destroy the original skb data even if it decides to reuse
825 * the buffer.
826 */
827static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
828 gfp_t gfp)
829{
830 if (likely(!skb_cloned(skb))) {
831 BUG_ON(skb->len < len);
832 __skb_trim(skb, len);
833 skb_get(skb);
834 } else {
835 skb = alloc_skb(len, gfp);
836 if (skb)
837 __skb_put(skb, len);
838 }
839 return skb;
840}
841
842static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
843{
844 union opcode_tid *p = cplhdr(skb);
845 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
846 struct t3c_tid_entry *t3c_tid;
847
848 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
849 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
850 t3c_tid->client->handlers[p->opcode]) {
851 return t3c_tid->client->handlers[p->opcode]
852 (dev, skb, t3c_tid->ctx);
853 } else {
854 struct cpl_abort_req_rss *req = cplhdr(skb);
855 struct cpl_abort_rpl *rpl;
856 struct sk_buff *reply_skb;
857 unsigned int tid = GET_TID(req);
858 u8 cmd = req->status;
859
860 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
861 req->status == CPL_ERR_PERSIST_NEG_ADVICE)
862 goto out;
863
864 reply_skb = cxgb3_get_cpl_reply_skb(skb,
865 sizeof(struct
866 cpl_abort_rpl),
867 GFP_ATOMIC);
868
869 if (!reply_skb) {
870 printk("do_abort_req_rss: couldn't get skb!\n");
871 goto out;
872 }
873 reply_skb->priority = CPL_PRIORITY_DATA;
874 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
875 rpl = cplhdr(reply_skb);
876 rpl->wr.wr_hi =
877 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
878 rpl->wr.wr_lo = htonl(V_WR_TID(tid));
879 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
880 rpl->cmd = cmd;
881 cxgb3_ofld_send(dev, reply_skb);
882out:
883 return CPL_RET_BUF_DONE;
884 }
885}
886
887static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
888{
889 struct cpl_act_establish *req = cplhdr(skb);
890 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
891 struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
892 struct t3c_tid_entry *t3c_tid;
893 unsigned int tid = GET_TID(req);
894
895 if (unlikely(tid >= t->ntids)) {
896 printk("%s: active establish TID %u too large\n",
897 dev->name, tid);
898 t3_fatal_err(tdev2adap(dev));
899 return CPL_RET_BUF_DONE;
900 }
901
902 t3c_tid = lookup_atid(t, atid);
903 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
904 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
905 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
906 (dev, skb, t3c_tid->ctx);
907 } else {
908 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
909 dev->name, CPL_ACT_ESTABLISH);
910 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
911 }
912}
913
914static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
915{
916 struct cpl_trace_pkt *p = cplhdr(skb);
917
918 skb->protocol = htons(0xffff);
919 skb->dev = dev->lldev;
920 skb_pull(skb, sizeof(*p));
921 skb_reset_mac_header(skb);
922 netif_receive_skb(skb);
923 return 0;
924}
925
926/*
927 * That skb would better have come from process_responses() where we abuse
928 * ->priority and ->csum to carry our data. NB: if we get to per-arch
929 * ->csum, the things might get really interesting here.
930 */
931
932static inline u32 get_hwtid(struct sk_buff *skb)
933{
934 return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
935}
936
937static inline u32 get_opcode(struct sk_buff *skb)
938{
939 return G_OPCODE(ntohl((__force __be32)skb->csum));
940}
941
942static int do_term(struct t3cdev *dev, struct sk_buff *skb)
943{
944 unsigned int hwtid = get_hwtid(skb);
945 unsigned int opcode = get_opcode(skb);
946 struct t3c_tid_entry *t3c_tid;
947
948 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
949 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
950 t3c_tid->client->handlers[opcode]) {
951 return t3c_tid->client->handlers[opcode] (dev, skb,
952 t3c_tid->ctx);
953 } else {
954 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
955 dev->name, opcode);
956 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
957 }
958}
959
960static int nb_callback(struct notifier_block *self, unsigned long event,
961 void *ctx)
962{
963 switch (event) {
964 case (NETEVENT_NEIGH_UPDATE):{
965 cxgb_neigh_update((struct neighbour *)ctx);
966 break;
967 }
968 case (NETEVENT_REDIRECT):{
969 struct netevent_redirect *nr = ctx;
970 cxgb_redirect(nr->old, nr->new);
971 cxgb_neigh_update(dst_get_neighbour(nr->new));
972 break;
973 }
974 default:
975 break;
976 }
977 return 0;
978}
979
980static struct notifier_block nb = {
981 .notifier_call = nb_callback
982};
983
984/*
985 * Process a received packet with an unknown/unexpected CPL opcode.
986 */
987static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
988{
989 printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
990 *skb->data);
991 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
992}
993
994/*
995 * Handlers for each CPL opcode
996 */
997static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
998
999/*
1000 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
1001 * to unregister an existing handler.
1002 */
1003void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
1004{
1005 if (opcode < NUM_CPL_CMDS)
1006 cpl_handlers[opcode] = h ? h : do_bad_cpl;
1007 else
1008 printk(KERN_ERR "T3C: handler registration for "
1009 "opcode %x failed\n", opcode);
1010}
1011
1012EXPORT_SYMBOL(t3_register_cpl_handler);
1013
1014/*
1015 * T3CDEV's receive method.
1016 */
1017static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
1018{
1019 while (n--) {
1020 struct sk_buff *skb = *skbs++;
1021 unsigned int opcode = get_opcode(skb);
1022 int ret = cpl_handlers[opcode] (dev, skb);
1023
1024#if VALIDATE_TID
1025 if (ret & CPL_RET_UNKNOWN_TID) {
1026 union opcode_tid *p = cplhdr(skb);
1027
1028 printk(KERN_ERR "%s: CPL message (opcode %u) had "
1029 "unknown TID %u\n", dev->name, opcode,
1030 G_TID(ntohl(p->opcode_tid)));
1031 }
1032#endif
1033 if (ret & CPL_RET_BUF_DONE)
1034 kfree_skb(skb);
1035 }
1036 return 0;
1037}
1038
1039/*
1040 * Sends an sk_buff to a T3C driver after dealing with any active network taps.
1041 */
1042int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
1043{
1044 int r;
1045
1046 local_bh_disable();
1047 r = dev->send(dev, skb);
1048 local_bh_enable();
1049 return r;
1050}
1051
1052EXPORT_SYMBOL(cxgb3_ofld_send);
1053
1054static int is_offloading(struct net_device *dev)
1055{
1056 struct adapter *adapter;
1057 int i;
1058
1059 read_lock_bh(&adapter_list_lock);
1060 list_for_each_entry(adapter, &adapter_list, adapter_list) {
1061 for_each_port(adapter, i) {
1062 if (dev == adapter->port[i]) {
1063 read_unlock_bh(&adapter_list_lock);
1064 return 1;
1065 }
1066 }
1067 }
1068 read_unlock_bh(&adapter_list_lock);
1069 return 0;
1070}
1071
1072static void cxgb_neigh_update(struct neighbour *neigh)
1073{
1074 struct net_device *dev = neigh->dev;
1075
1076 if (dev && (is_offloading(dev))) {
1077 struct t3cdev *tdev = dev2t3cdev(dev);
1078
1079 BUG_ON(!tdev);
1080 t3_l2t_update(tdev, neigh);
1081 }
1082}
1083
1084static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
1085{
1086 struct sk_buff *skb;
1087 struct cpl_set_tcb_field *req;
1088
1089 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
1090 if (!skb) {
1091 printk(KERN_ERR "%s: cannot allocate skb!\n", __func__);
1092 return;
1093 }
1094 skb->priority = CPL_PRIORITY_CONTROL;
1095 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
1096 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1097 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1098 req->reply = 0;
1099 req->cpu_idx = 0;
1100 req->word = htons(W_TCB_L2T_IX);
1101 req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
1102 req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
1103 tdev->send(tdev, skb);
1104}
1105
1106static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1107{
1108 struct net_device *olddev, *newdev;
1109 struct tid_info *ti;
1110 struct t3cdev *tdev;
1111 u32 tid;
1112 int update_tcb;
1113 struct l2t_entry *e;
1114 struct t3c_tid_entry *te;
1115
1116 olddev = dst_get_neighbour(old)->dev;
1117 newdev = dst_get_neighbour(new)->dev;
1118 if (!is_offloading(olddev))
1119 return;
1120 if (!is_offloading(newdev)) {
1121 printk(KERN_WARNING "%s: Redirect to non-offload "
1122 "device ignored.\n", __func__);
1123 return;
1124 }
1125 tdev = dev2t3cdev(olddev);
1126 BUG_ON(!tdev);
1127 if (tdev != dev2t3cdev(newdev)) {
1128 printk(KERN_WARNING "%s: Redirect to different "
1129 "offload device ignored.\n", __func__);
1130 return;
1131 }
1132
1133 /* Add new L2T entry */
1134 e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev);
1135 if (!e) {
1136 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
1137 __func__);
1138 return;
1139 }
1140
1141 /* Walk tid table and notify clients of dst change. */
1142 ti = &(T3C_DATA(tdev))->tid_maps;
1143 for (tid = 0; tid < ti->ntids; tid++) {
1144 te = lookup_tid(ti, tid);
1145 BUG_ON(!te);
1146 if (te && te->ctx && te->client && te->client->redirect) {
1147 update_tcb = te->client->redirect(te->ctx, old, new, e);
1148 if (update_tcb) {
1149 rcu_read_lock();
1150 l2t_hold(L2DATA(tdev), e);
1151 rcu_read_unlock();
1152 set_l2t_ix(tdev, tid, e);
1153 }
1154 }
1155 }
1156 l2t_release(tdev, e);
1157}
1158
1159/*
1160 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1161 * The allocated memory is cleared.
1162 */
1163void *cxgb_alloc_mem(unsigned long size)
1164{
1165 void *p = kzalloc(size, GFP_KERNEL);
1166
1167 if (!p)
1168 p = vzalloc(size);
1169 return p;
1170}
1171
1172/*
1173 * Free memory allocated through t3_alloc_mem().
1174 */
1175void cxgb_free_mem(void *addr)
1176{
1177 if (is_vmalloc_addr(addr))
1178 vfree(addr);
1179 else
1180 kfree(addr);
1181}
1182
1183/*
1184 * Allocate and initialize the TID tables. Returns 0 on success.
1185 */
1186static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1187 unsigned int natids, unsigned int nstids,
1188 unsigned int atid_base, unsigned int stid_base)
1189{
1190 unsigned long size = ntids * sizeof(*t->tid_tab) +
1191 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1192
1193 t->tid_tab = cxgb_alloc_mem(size);
1194 if (!t->tid_tab)
1195 return -ENOMEM;
1196
1197 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1198 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1199 t->ntids = ntids;
1200 t->nstids = nstids;
1201 t->stid_base = stid_base;
1202 t->sfree = NULL;
1203 t->natids = natids;
1204 t->atid_base = atid_base;
1205 t->afree = NULL;
1206 t->stids_in_use = t->atids_in_use = 0;
1207 atomic_set(&t->tids_in_use, 0);
1208 spin_lock_init(&t->stid_lock);
1209 spin_lock_init(&t->atid_lock);
1210
1211 /*
1212 * Setup the free lists for stid_tab and atid_tab.
1213 */
1214 if (nstids) {
1215 while (--nstids)
1216 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1217 t->sfree = t->stid_tab;
1218 }
1219 if (natids) {
1220 while (--natids)
1221 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1222 t->afree = t->atid_tab;
1223 }
1224 return 0;
1225}
1226
1227static void free_tid_maps(struct tid_info *t)
1228{
1229 cxgb_free_mem(t->tid_tab);
1230}
1231
1232static inline void add_adapter(struct adapter *adap)
1233{
1234 write_lock_bh(&adapter_list_lock);
1235 list_add_tail(&adap->adapter_list, &adapter_list);
1236 write_unlock_bh(&adapter_list_lock);
1237}
1238
1239static inline void remove_adapter(struct adapter *adap)
1240{
1241 write_lock_bh(&adapter_list_lock);
1242 list_del(&adap->adapter_list);
1243 write_unlock_bh(&adapter_list_lock);
1244}
1245
1246int cxgb3_offload_activate(struct adapter *adapter)
1247{
1248 struct t3cdev *dev = &adapter->tdev;
1249 int natids, err;
1250 struct t3c_data *t;
1251 struct tid_range stid_range, tid_range;
1252 struct mtutab mtutab;
1253 unsigned int l2t_capacity;
1254
1255 t = kzalloc(sizeof(*t), GFP_KERNEL);
1256 if (!t)
1257 return -ENOMEM;
1258
1259 err = -EOPNOTSUPP;
1260 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1261 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1262 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1263 dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1264 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1265 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1266 goto out_free;
1267
1268 err = -ENOMEM;
1269 RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));
1270 if (!L2DATA(dev))
1271 goto out_free;
1272
1273 natids = min(tid_range.num / 2, MAX_ATIDS);
1274 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1275 stid_range.num, ATID_BASE, stid_range.base);
1276 if (err)
1277 goto out_free_l2t;
1278
1279 t->mtus = mtutab.mtus;
1280 t->nmtus = mtutab.size;
1281
1282 INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
1283 spin_lock_init(&t->tid_release_lock);
1284 INIT_LIST_HEAD(&t->list_node);
1285 t->dev = dev;
1286
1287 T3C_DATA(dev) = t;
1288 dev->recv = process_rx;
1289 dev->neigh_update = t3_l2t_update;
1290
1291 /* Register netevent handler once */
1292 if (list_empty(&adapter_list))
1293 register_netevent_notifier(&nb);
1294
1295 t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
1296 t->release_list_incomplete = 0;
1297
1298 add_adapter(adapter);
1299 return 0;
1300
1301out_free_l2t:
1302 t3_free_l2t(L2DATA(dev));
1303 rcu_assign_pointer(dev->l2opt, NULL);
1304out_free:
1305 kfree(t);
1306 return err;
1307}
1308
1309static void clean_l2_data(struct rcu_head *head)
1310{
1311 struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
1312 t3_free_l2t(d);
1313}
1314
1315
1316void cxgb3_offload_deactivate(struct adapter *adapter)
1317{
1318 struct t3cdev *tdev = &adapter->tdev;
1319 struct t3c_data *t = T3C_DATA(tdev);
1320 struct l2t_data *d;
1321
1322 remove_adapter(adapter);
1323 if (list_empty(&adapter_list))
1324 unregister_netevent_notifier(&nb);
1325
1326 free_tid_maps(&t->tid_maps);
1327 T3C_DATA(tdev) = NULL;
1328 rcu_read_lock();
1329 d = L2DATA(tdev);
1330 rcu_read_unlock();
1331 rcu_assign_pointer(tdev->l2opt, NULL);
1332 call_rcu(&d->rcu_head, clean_l2_data);
1333 if (t->nofail_skb)
1334 kfree_skb(t->nofail_skb);
1335 kfree(t);
1336}
1337
1338static inline void register_tdev(struct t3cdev *tdev)
1339{
1340 static int unit;
1341
1342 mutex_lock(&cxgb3_db_lock);
1343 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1344 list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
1345 mutex_unlock(&cxgb3_db_lock);
1346}
1347
1348static inline void unregister_tdev(struct t3cdev *tdev)
1349{
1350 mutex_lock(&cxgb3_db_lock);
1351 list_del(&tdev->ofld_dev_list);
1352 mutex_unlock(&cxgb3_db_lock);
1353}
1354
1355static inline int adap2type(struct adapter *adapter)
1356{
1357 int type = 0;
1358
1359 switch (adapter->params.rev) {
1360 case T3_REV_A:
1361 type = T3A;
1362 break;
1363 case T3_REV_B:
1364 case T3_REV_B2:
1365 type = T3B;
1366 break;
1367 case T3_REV_C:
1368 type = T3C;
1369 break;
1370 }
1371 return type;
1372}
1373
1374void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
1375{
1376 struct t3cdev *tdev = &adapter->tdev;
1377
1378 INIT_LIST_HEAD(&tdev->ofld_dev_list);
1379
1380 cxgb3_set_dummy_ops(tdev);
1381 tdev->send = t3_offload_tx;
1382 tdev->ctl = cxgb_offload_ctl;
1383 tdev->type = adap2type(adapter);
1384
1385 register_tdev(tdev);
1386}
1387
1388void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
1389{
1390 struct t3cdev *tdev = &adapter->tdev;
1391
1392 tdev->recv = NULL;
1393 tdev->neigh_update = NULL;
1394
1395 unregister_tdev(tdev);
1396}
1397
1398void __init cxgb3_offload_init(void)
1399{
1400 int i;
1401
1402 for (i = 0; i < NUM_CPL_CMDS; ++i)
1403 cpl_handlers[i] = do_bad_cpl;
1404
1405 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1406 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1407 t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
1408 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1409 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1410 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1411 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1412 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1413 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1414 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1415 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1416 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1417 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1418 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1419 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1420 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1421 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1422 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1423 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
1424 t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
1425 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1426 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1427 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1428 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1429 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1430 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1431}
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
new file mode 100644
index 00000000000..929c298115c
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -0,0 +1,209 @@
1/*
2 * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _CXGB3_OFFLOAD_H
33#define _CXGB3_OFFLOAD_H
34
35#include <linux/list.h>
36#include <linux/skbuff.h>
37
38#include "l2t.h"
39
40#include "t3cdev.h"
41#include "t3_cpl.h"
42
43struct adapter;
44
45void cxgb3_offload_init(void);
46
47void cxgb3_adapter_ofld(struct adapter *adapter);
48void cxgb3_adapter_unofld(struct adapter *adapter);
49int cxgb3_offload_activate(struct adapter *adapter);
50void cxgb3_offload_deactivate(struct adapter *adapter);
51
52void cxgb3_set_dummy_ops(struct t3cdev *dev);
53
54struct t3cdev *dev2t3cdev(struct net_device *dev);
55
56/*
57 * Client registration. Users of T3 driver must register themselves.
58 * The T3 driver will call the add function of every client for each T3
59 * adapter activated, passing up the t3cdev ptr. Each client fills out an
60 * array of callback functions to process CPL messages.
61 */
62
63void cxgb3_register_client(struct cxgb3_client *client);
64void cxgb3_unregister_client(struct cxgb3_client *client);
65void cxgb3_add_clients(struct t3cdev *tdev);
66void cxgb3_remove_clients(struct t3cdev *tdev);
67void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port);
68
69typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
70 struct sk_buff *skb, void *ctx);
71
72enum {
73 OFFLOAD_STATUS_UP,
74 OFFLOAD_STATUS_DOWN,
75 OFFLOAD_PORT_DOWN,
76 OFFLOAD_PORT_UP,
77 OFFLOAD_DB_FULL,
78 OFFLOAD_DB_EMPTY,
79 OFFLOAD_DB_DROP
80};
81
82struct cxgb3_client {
83 char *name;
84 void (*add) (struct t3cdev *);
85 void (*remove) (struct t3cdev *);
86 cxgb3_cpl_handler_func *handlers;
87 int (*redirect)(void *ctx, struct dst_entry *old,
88 struct dst_entry *new, struct l2t_entry *l2t);
89 struct list_head client_list;
90 void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port);
91};
92
93/*
94 * TID allocation services.
95 */
96int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
97 void *ctx);
98int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
99 void *ctx);
100void *cxgb3_free_atid(struct t3cdev *dev, int atid);
101void cxgb3_free_stid(struct t3cdev *dev, int stid);
102void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
103 void *ctx, unsigned int tid);
104void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
105void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
106
107struct t3c_tid_entry {
108 struct cxgb3_client *client;
109 void *ctx;
110};
111
112/* CPL message priority levels */
113enum {
114 CPL_PRIORITY_DATA = 0, /* data messages */
115 CPL_PRIORITY_SETUP = 1, /* connection setup messages */
116 CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
117 CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
118 CPL_PRIORITY_ACK = 1, /* RX ACK messages */
119 CPL_PRIORITY_CONTROL = 1 /* offload control messages */
120};
121
122/* Flags for return value of CPL message handlers */
123enum {
124 CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
125 CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */
126 CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
127};
128
129typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
130
131/*
132 * Returns a pointer to the first byte of the CPL header in an sk_buff that
133 * contains a CPL message.
134 */
135static inline void *cplhdr(struct sk_buff *skb)
136{
137 return skb->data;
138}
139
140void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
141
142union listen_entry {
143 struct t3c_tid_entry t3c_tid;
144 union listen_entry *next;
145};
146
147union active_open_entry {
148 struct t3c_tid_entry t3c_tid;
149 union active_open_entry *next;
150};
151
152/*
153 * Holds the size, base address, free list start, etc of the TID, server TID,
154 * and active-open TID tables for a offload device.
155 * The tables themselves are allocated dynamically.
156 */
157struct tid_info {
158 struct t3c_tid_entry *tid_tab;
159 unsigned int ntids;
160 atomic_t tids_in_use;
161
162 union listen_entry *stid_tab;
163 unsigned int nstids;
164 unsigned int stid_base;
165
166 union active_open_entry *atid_tab;
167 unsigned int natids;
168 unsigned int atid_base;
169
170 /*
171 * The following members are accessed R/W so we put them in their own
172 * cache lines.
173 *
174 * XXX We could combine the atid fields above with the lock here since
175 * atids are use once (unlike other tids). OTOH the above fields are
176 * usually in cache due to tid_tab.
177 */
178 spinlock_t atid_lock ____cacheline_aligned_in_smp;
179 union active_open_entry *afree;
180 unsigned int atids_in_use;
181
182 spinlock_t stid_lock ____cacheline_aligned;
183 union listen_entry *sfree;
184 unsigned int stids_in_use;
185};
186
187struct t3c_data {
188 struct list_head list_node;
189 struct t3cdev *dev;
190 unsigned int tx_max_chunk; /* max payload for TX_DATA */
191 unsigned int max_wrs; /* max in-flight WRs per connection */
192 unsigned int nmtus;
193 const unsigned short *mtus;
194 struct tid_info tid_maps;
195
196 struct t3c_tid_entry *tid_release_list;
197 spinlock_t tid_release_lock;
198 struct work_struct tid_release_task;
199
200 struct sk_buff *nofail_skb;
201 unsigned int release_list_incomplete;
202};
203
204/*
205 * t3cdev -> t3c_data accessor
206 */
207#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt)
208
209#endif
diff --git a/drivers/net/cxgb3/firmware_exports.h b/drivers/net/cxgb3/firmware_exports.h
new file mode 100644
index 00000000000..0d9b0e6dccf
--- /dev/null
+++ b/drivers/net/cxgb3/firmware_exports.h
@@ -0,0 +1,177 @@
1/*
2 * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _FIRMWARE_EXPORTS_H_
33#define _FIRMWARE_EXPORTS_H_
34
35/* WR OPCODES supported by the firmware.
36 */
37#define FW_WROPCODE_FORWARD 0x01
38#define FW_WROPCODE_BYPASS 0x05
39
40#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
41
42#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
43#define FW_WROPCODE_ULPTX_MEM_READ 0x02
44#define FW_WROPCODE_ULPTX_PKT 0x04
45#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
46
47#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
48
49#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08
50#define FW_WROPCODE_OFLD_CLOSE_CON 0x09
51#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A
52#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F
53#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B
54#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C
55#define FW_WROPCODE_OFLD_TX_DATA 0x0D
56#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E
57
58#define FW_WROPCODE_RI_RDMA_INIT 0x10
59#define FW_WROPCODE_RI_RDMA_WRITE 0x11
60#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
61#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
62#define FW_WROPCODE_RI_SEND 0x14
63#define FW_WROPCODE_RI_TERMINATE 0x15
64#define FW_WROPCODE_RI_RDMA_READ 0x16
65#define FW_WROPCODE_RI_RECEIVE 0x17
66#define FW_WROPCODE_RI_BIND_MW 0x18
67#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
68#define FW_WROPCODE_RI_LOCAL_INV 0x1A
69#define FW_WROPCODE_RI_MODIFY_QP 0x1B
70#define FW_WROPCODE_RI_BYPASS 0x1C
71
72#define FW_WROPOCDE_RSVD 0x1E
73
74#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
75
76#define FW_WROPCODE_MNGT 0x1D
77#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
78
79/* Maximum size of a WR sent from the host, limited by the SGE.
80 *
81 * Note: WR coming from ULP or TP are only limited by CIM.
82 */
83#define FW_WR_SIZE 128
84
85/* Maximum number of outstanding WRs sent from the host. Value must be
86 * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
87 * offload modules to limit the number of WRs per connection.
88 */
89#define FW_T3_WR_NUM 16
90#define FW_N3_WR_NUM 7
91
92#ifndef N3
93# define FW_WR_NUM FW_T3_WR_NUM
94#else
95# define FW_WR_NUM FW_N3_WR_NUM
96#endif
97
98/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
99 * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
100 * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
101 *
102 * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
103 * to RESP Queue[i].
104 */
105#define FW_TUNNEL_NUM 8
106#define FW_TUNNEL_SGEEC_START 8
107#define FW_TUNNEL_TID_START 65544
108
109/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
110 * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
111 * (or 'uP Token') FW_CTRL_TID_START.
112 *
113 * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
114 */
115#define FW_CTRL_NUM 8
116#define FW_CTRL_SGEEC_START 65528
117#define FW_CTRL_TID_START 65536
118
119/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
120 * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
121 *
122 * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
123 * OFFLOAD Queues, as the host is responsible for providing the correct TID in
124 * every WR.
125 *
126 * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
127 */
128#define FW_OFLD_NUM 8
129#define FW_OFLD_SGEEC_START 0
130
131/*
132 *
133 */
134#define FW_RI_NUM 1
135#define FW_RI_SGEEC_START 65527
136#define FW_RI_TID_START 65552
137
138/*
139 * The RX_PKT_TID
140 */
141#define FW_RX_PKT_NUM 1
142#define FW_RX_PKT_TID_START 65553
143
144/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
145 * by the firmware.
146 */
147#define FW_WRC_NUM \
148 (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
149
150/*
151 * FW type and version.
152 */
153#define S_FW_VERSION_TYPE 28
154#define M_FW_VERSION_TYPE 0xF
155#define V_FW_VERSION_TYPE(x) ((x) << S_FW_VERSION_TYPE)
156#define G_FW_VERSION_TYPE(x) \
157 (((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE)
158
159#define S_FW_VERSION_MAJOR 16
160#define M_FW_VERSION_MAJOR 0xFFF
161#define V_FW_VERSION_MAJOR(x) ((x) << S_FW_VERSION_MAJOR)
162#define G_FW_VERSION_MAJOR(x) \
163 (((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR)
164
165#define S_FW_VERSION_MINOR 8
166#define M_FW_VERSION_MINOR 0xFF
167#define V_FW_VERSION_MINOR(x) ((x) << S_FW_VERSION_MINOR)
168#define G_FW_VERSION_MINOR(x) \
169 (((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR)
170
171#define S_FW_VERSION_MICRO 0
172#define M_FW_VERSION_MICRO 0xFF
173#define V_FW_VERSION_MICRO(x) ((x) << S_FW_VERSION_MICRO)
174#define G_FW_VERSION_MICRO(x) \
175 (((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO)
176
177#endif /* _FIRMWARE_EXPORTS_H_ */
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
new file mode 100644
index 00000000000..41540978a17
--- /dev/null
+++ b/drivers/net/cxgb3/l2t.c
@@ -0,0 +1,454 @@
1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/if.h>
35#include <linux/if_vlan.h>
36#include <linux/jhash.h>
37#include <linux/slab.h>
38#include <net/neighbour.h>
39#include "common.h"
40#include "t3cdev.h"
41#include "cxgb3_defs.h"
42#include "l2t.h"
43#include "t3_cpl.h"
44#include "firmware_exports.h"
45
46#define VLAN_NONE 0xfff
47
48/*
49 * Module locking notes: There is a RW lock protecting the L2 table as a
50 * whole plus a spinlock per L2T entry. Entry lookups and allocations happen
51 * under the protection of the table lock, individual entry changes happen
52 * while holding that entry's spinlock. The table lock nests outside the
53 * entry locks. Allocations of new entries take the table lock as writers so
54 * no other lookups can happen while allocating new entries. Entry updates
55 * take the table lock as readers so multiple entries can be updated in
56 * parallel. An L2T entry can be dropped by decrementing its reference count
57 * and therefore can happen in parallel with entry allocation but no entry
58 * can change state or increment its ref count during allocation as both of
59 * these perform lookups.
60 */
61
62static inline unsigned int vlan_prio(const struct l2t_entry *e)
63{
64 return e->vlan >> 13;
65}
66
67static inline unsigned int arp_hash(u32 key, int ifindex,
68 const struct l2t_data *d)
69{
70 return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
71}
72
73static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
74{
75 neigh_hold(n);
76 if (e->neigh)
77 neigh_release(e->neigh);
78 e->neigh = n;
79}
80
81/*
82 * Set up an L2T entry and send any packets waiting in the arp queue. The
83 * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the
84 * entry locked.
85 */
86static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
87 struct l2t_entry *e)
88{
89 struct cpl_l2t_write_req *req;
90 struct sk_buff *tmp;
91
92 if (!skb) {
93 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
94 if (!skb)
95 return -ENOMEM;
96 }
97
98 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
99 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
100 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
101 req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
102 V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
103 V_L2T_W_PRIO(vlan_prio(e)));
104 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
105 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
106 skb->priority = CPL_PRIORITY_CONTROL;
107 cxgb3_ofld_send(dev, skb);
108
109 skb_queue_walk_safe(&e->arpq, skb, tmp) {
110 __skb_unlink(skb, &e->arpq);
111 cxgb3_ofld_send(dev, skb);
112 }
113 e->state = L2T_STATE_VALID;
114
115 return 0;
116}
117
118/*
119 * Add a packet to the an L2T entry's queue of packets awaiting resolution.
120 * Must be called with the entry's lock held.
121 */
122static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
123{
124 __skb_queue_tail(&e->arpq, skb);
125}
126
127int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
128 struct l2t_entry *e)
129{
130again:
131 switch (e->state) {
132 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
133 neigh_event_send(e->neigh, NULL);
134 spin_lock_bh(&e->lock);
135 if (e->state == L2T_STATE_STALE)
136 e->state = L2T_STATE_VALID;
137 spin_unlock_bh(&e->lock);
138 case L2T_STATE_VALID: /* fast-path, send the packet on */
139 return cxgb3_ofld_send(dev, skb);
140 case L2T_STATE_RESOLVING:
141 spin_lock_bh(&e->lock);
142 if (e->state != L2T_STATE_RESOLVING) {
143 /* ARP already completed */
144 spin_unlock_bh(&e->lock);
145 goto again;
146 }
147 arpq_enqueue(e, skb);
148 spin_unlock_bh(&e->lock);
149
150 /*
151 * Only the first packet added to the arpq should kick off
152 * resolution. However, because the alloc_skb below can fail,
153 * we allow each packet added to the arpq to retry resolution
154 * as a way of recovering from transient memory exhaustion.
155 * A better way would be to use a work request to retry L2T
156 * entries when there's no memory.
157 */
158 if (!neigh_event_send(e->neigh, NULL)) {
159 skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
160 GFP_ATOMIC);
161 if (!skb)
162 break;
163
164 spin_lock_bh(&e->lock);
165 if (!skb_queue_empty(&e->arpq))
166 setup_l2e_send_pending(dev, skb, e);
167 else /* we lost the race */
168 __kfree_skb(skb);
169 spin_unlock_bh(&e->lock);
170 }
171 }
172 return 0;
173}
174
175EXPORT_SYMBOL(t3_l2t_send_slow);
176
177void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
178{
179again:
180 switch (e->state) {
181 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
182 neigh_event_send(e->neigh, NULL);
183 spin_lock_bh(&e->lock);
184 if (e->state == L2T_STATE_STALE) {
185 e->state = L2T_STATE_VALID;
186 }
187 spin_unlock_bh(&e->lock);
188 return;
189 case L2T_STATE_VALID: /* fast-path, send the packet on */
190 return;
191 case L2T_STATE_RESOLVING:
192 spin_lock_bh(&e->lock);
193 if (e->state != L2T_STATE_RESOLVING) {
194 /* ARP already completed */
195 spin_unlock_bh(&e->lock);
196 goto again;
197 }
198 spin_unlock_bh(&e->lock);
199
200 /*
201 * Only the first packet added to the arpq should kick off
202 * resolution. However, because the alloc_skb below can fail,
203 * we allow each packet added to the arpq to retry resolution
204 * as a way of recovering from transient memory exhaustion.
205 * A better way would be to use a work request to retry L2T
206 * entries when there's no memory.
207 */
208 neigh_event_send(e->neigh, NULL);
209 }
210}
211
212EXPORT_SYMBOL(t3_l2t_send_event);
213
214/*
215 * Allocate a free L2T entry. Must be called with l2t_data.lock held.
216 */
217static struct l2t_entry *alloc_l2e(struct l2t_data *d)
218{
219 struct l2t_entry *end, *e, **p;
220
221 if (!atomic_read(&d->nfree))
222 return NULL;
223
224 /* there's definitely a free entry */
225 for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
226 if (atomic_read(&e->refcnt) == 0)
227 goto found;
228
229 for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
230found:
231 d->rover = e + 1;
232 atomic_dec(&d->nfree);
233
234 /*
235 * The entry we found may be an inactive entry that is
236 * presently in the hash table. We need to remove it.
237 */
238 if (e->state != L2T_STATE_UNUSED) {
239 int hash = arp_hash(e->addr, e->ifindex, d);
240
241 for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
242 if (*p == e) {
243 *p = e->next;
244 break;
245 }
246 e->state = L2T_STATE_UNUSED;
247 }
248 return e;
249}
250
251/*
252 * Called when an L2T entry has no more users. The entry is left in the hash
253 * table since it is likely to be reused but we also bump nfree to indicate
254 * that the entry can be reallocated for a different neighbor. We also drop
255 * the existing neighbor reference in case the neighbor is going away and is
256 * waiting on our reference.
257 *
258 * Because entries can be reallocated to other neighbors once their ref count
259 * drops to 0 we need to take the entry's lock to avoid races with a new
260 * incarnation.
261 */
262void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
263{
264 spin_lock_bh(&e->lock);
265 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
266 if (e->neigh) {
267 neigh_release(e->neigh);
268 e->neigh = NULL;
269 }
270 }
271 spin_unlock_bh(&e->lock);
272 atomic_inc(&d->nfree);
273}
274
275EXPORT_SYMBOL(t3_l2e_free);
276
277/*
278 * Update an L2T entry that was previously used for the same next hop as neigh.
279 * Must be called with softirqs disabled.
280 */
281static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
282{
283 unsigned int nud_state;
284
285 spin_lock(&e->lock); /* avoid race with t3_l2t_free */
286
287 if (neigh != e->neigh)
288 neigh_replace(e, neigh);
289 nud_state = neigh->nud_state;
290 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
291 !(nud_state & NUD_VALID))
292 e->state = L2T_STATE_RESOLVING;
293 else if (nud_state & NUD_CONNECTED)
294 e->state = L2T_STATE_VALID;
295 else
296 e->state = L2T_STATE_STALE;
297 spin_unlock(&e->lock);
298}
299
300struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
301 struct net_device *dev)
302{
303 struct l2t_entry *e = NULL;
304 struct l2t_data *d;
305 int hash;
306 u32 addr = *(u32 *) neigh->primary_key;
307 int ifidx = neigh->dev->ifindex;
308 struct port_info *p = netdev_priv(dev);
309 int smt_idx = p->port_id;
310
311 rcu_read_lock();
312 d = L2DATA(cdev);
313 if (!d)
314 goto done_rcu;
315
316 hash = arp_hash(addr, ifidx, d);
317
318 write_lock_bh(&d->lock);
319 for (e = d->l2tab[hash].first; e; e = e->next)
320 if (e->addr == addr && e->ifindex == ifidx &&
321 e->smt_idx == smt_idx) {
322 l2t_hold(d, e);
323 if (atomic_read(&e->refcnt) == 1)
324 reuse_entry(e, neigh);
325 goto done;
326 }
327
328 /* Need to allocate a new entry */
329 e = alloc_l2e(d);
330 if (e) {
331 spin_lock(&e->lock); /* avoid race with t3_l2t_free */
332 e->next = d->l2tab[hash].first;
333 d->l2tab[hash].first = e;
334 e->state = L2T_STATE_RESOLVING;
335 e->addr = addr;
336 e->ifindex = ifidx;
337 e->smt_idx = smt_idx;
338 atomic_set(&e->refcnt, 1);
339 neigh_replace(e, neigh);
340 if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
341 e->vlan = vlan_dev_vlan_id(neigh->dev);
342 else
343 e->vlan = VLAN_NONE;
344 spin_unlock(&e->lock);
345 }
346done:
347 write_unlock_bh(&d->lock);
348done_rcu:
349 rcu_read_unlock();
350 return e;
351}
352
353EXPORT_SYMBOL(t3_l2t_get);
354
355/*
356 * Called when address resolution fails for an L2T entry to handle packets
357 * on the arpq head. If a packet specifies a failure handler it is invoked,
358 * otherwise the packets is sent to the offload device.
359 *
360 * XXX: maybe we should abandon the latter behavior and just require a failure
361 * handler.
362 */
363static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
364{
365 struct sk_buff *skb, *tmp;
366
367 skb_queue_walk_safe(arpq, skb, tmp) {
368 struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
369
370 __skb_unlink(skb, arpq);
371 if (cb->arp_failure_handler)
372 cb->arp_failure_handler(dev, skb);
373 else
374 cxgb3_ofld_send(dev, skb);
375 }
376}
377
378/*
379 * Called when the host's ARP layer makes a change to some entry that is
380 * loaded into the HW L2 table.
381 */
382void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
383{
384 struct sk_buff_head arpq;
385 struct l2t_entry *e;
386 struct l2t_data *d = L2DATA(dev);
387 u32 addr = *(u32 *) neigh->primary_key;
388 int ifidx = neigh->dev->ifindex;
389 int hash = arp_hash(addr, ifidx, d);
390
391 read_lock_bh(&d->lock);
392 for (e = d->l2tab[hash].first; e; e = e->next)
393 if (e->addr == addr && e->ifindex == ifidx) {
394 spin_lock(&e->lock);
395 goto found;
396 }
397 read_unlock_bh(&d->lock);
398 return;
399
400found:
401 __skb_queue_head_init(&arpq);
402
403 read_unlock(&d->lock);
404 if (atomic_read(&e->refcnt)) {
405 if (neigh != e->neigh)
406 neigh_replace(e, neigh);
407
408 if (e->state == L2T_STATE_RESOLVING) {
409 if (neigh->nud_state & NUD_FAILED) {
410 skb_queue_splice_init(&e->arpq, &arpq);
411 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
412 setup_l2e_send_pending(dev, NULL, e);
413 } else {
414 e->state = neigh->nud_state & NUD_CONNECTED ?
415 L2T_STATE_VALID : L2T_STATE_STALE;
416 if (memcmp(e->dmac, neigh->ha, 6))
417 setup_l2e_send_pending(dev, NULL, e);
418 }
419 }
420 spin_unlock_bh(&e->lock);
421
422 if (!skb_queue_empty(&arpq))
423 handle_failed_resolution(dev, &arpq);
424}
425
426struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
427{
428 struct l2t_data *d;
429 int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
430
431 d = cxgb_alloc_mem(size);
432 if (!d)
433 return NULL;
434
435 d->nentries = l2t_capacity;
436 d->rover = &d->l2tab[1]; /* entry 0 is not used */
437 atomic_set(&d->nfree, l2t_capacity - 1);
438 rwlock_init(&d->lock);
439
440 for (i = 0; i < l2t_capacity; ++i) {
441 d->l2tab[i].idx = i;
442 d->l2tab[i].state = L2T_STATE_UNUSED;
443 __skb_queue_head_init(&d->l2tab[i].arpq);
444 spin_lock_init(&d->l2tab[i].lock);
445 atomic_set(&d->l2tab[i].refcnt, 0);
446 }
447 return d;
448}
449
450void t3_free_l2t(struct l2t_data *d)
451{
452 cxgb_free_mem(d);
453}
454
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
new file mode 100644
index 00000000000..c5f54796e2c
--- /dev/null
+++ b/drivers/net/cxgb3/l2t.h
@@ -0,0 +1,149 @@
1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _CHELSIO_L2T_H
33#define _CHELSIO_L2T_H
34
35#include <linux/spinlock.h>
36#include "t3cdev.h"
37#include <linux/atomic.h>
38
39enum {
40 L2T_STATE_VALID, /* entry is up to date */
41 L2T_STATE_STALE, /* entry may be used but needs revalidation */
42 L2T_STATE_RESOLVING, /* entry needs address resolution */
43 L2T_STATE_UNUSED /* entry not in use */
44};
45
46struct neighbour;
47struct sk_buff;
48
49/*
50 * Each L2T entry plays multiple roles. First of all, it keeps state for the
51 * corresponding entry of the HW L2 table and maintains a queue of offload
52 * packets awaiting address resolution. Second, it is a node of a hash table
53 * chain, where the nodes of the chain are linked together through their next
54 * pointer. Finally, each node is a bucket of a hash table, pointing to the
55 * first element in its chain through its first pointer.
56 */
57struct l2t_entry {
58 u16 state; /* entry state */
59 u16 idx; /* entry index */
60 u32 addr; /* dest IP address */
61 int ifindex; /* neighbor's net_device's ifindex */
62 u16 smt_idx; /* SMT index */
63 u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
64 struct neighbour *neigh; /* associated neighbour */
65 struct l2t_entry *first; /* start of hash chain */
66 struct l2t_entry *next; /* next l2t_entry on chain */
67 struct sk_buff_head arpq; /* queue of packets awaiting resolution */
68 spinlock_t lock;
69 atomic_t refcnt; /* entry reference count */
70 u8 dmac[6]; /* neighbour's MAC address */
71};
72
73struct l2t_data {
74 unsigned int nentries; /* number of entries */
75 struct l2t_entry *rover; /* starting point for next allocation */
76 atomic_t nfree; /* number of free entries */
77 rwlock_t lock;
78 struct l2t_entry l2tab[0];
79 struct rcu_head rcu_head; /* to handle rcu cleanup */
80};
81
82typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
83 struct sk_buff * skb);
84
85/*
86 * Callback stored in an skb to handle address resolution failure.
87 */
88struct l2t_skb_cb {
89 arp_failure_handler_func arp_failure_handler;
90};
91
92#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
93
94static inline void set_arp_failure_handler(struct sk_buff *skb,
95 arp_failure_handler_func hnd)
96{
97 L2T_SKB_CB(skb)->arp_failure_handler = hnd;
98}
99
100/*
101 * Getting to the L2 data from an offload device.
102 */
103#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
104
105#define W_TCB_L2T_IX 0
106#define S_TCB_L2T_IX 7
107#define M_TCB_L2T_IX 0x7ffULL
108#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
109
110void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
111void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
112struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
113 struct net_device *dev);
114int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
115 struct l2t_entry *e);
116void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
117struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
118void t3_free_l2t(struct l2t_data *d);
119
120int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
121
122static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
123 struct l2t_entry *e)
124{
125 if (likely(e->state == L2T_STATE_VALID))
126 return cxgb3_ofld_send(dev, skb);
127 return t3_l2t_send_slow(dev, skb, e);
128}
129
130static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
131{
132 struct l2t_data *d;
133
134 rcu_read_lock();
135 d = L2DATA(t);
136
137 if (atomic_dec_and_test(&e->refcnt) && d)
138 t3_l2e_free(d, e);
139
140 rcu_read_unlock();
141}
142
143static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
144{
145 if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
146 atomic_dec(&d->nfree);
147}
148
149#endif
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
new file mode 100644
index 00000000000..e13b7fe9d08
--- /dev/null
+++ b/drivers/net/cxgb3/mc5.c
@@ -0,0 +1,438 @@
1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35enum {
36 IDT75P52100 = 4,
37 IDT75N43102 = 5
38};
39
40/* DBGI command mode */
41enum {
42 DBGI_MODE_MBUS = 0,
43 DBGI_MODE_IDT52100 = 5
44};
45
46/* IDT 75P52100 commands */
47#define IDT_CMD_READ 0
48#define IDT_CMD_WRITE 1
49#define IDT_CMD_SEARCH 2
50#define IDT_CMD_LEARN 3
51
52/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
53#define IDT_LAR_ADR0 0x180006
54#define IDT_LAR_MODE144 0xffff0000
55
56/* IDT SCR and SSR addresses (low 32 bits) */
57#define IDT_SCR_ADR0 0x180000
58#define IDT_SSR0_ADR0 0x180002
59#define IDT_SSR1_ADR0 0x180004
60
61/* IDT GMR base address (low 32 bits) */
62#define IDT_GMR_BASE_ADR0 0x180020
63
64/* IDT data and mask array base addresses (low 32 bits) */
65#define IDT_DATARY_BASE_ADR0 0
66#define IDT_MSKARY_BASE_ADR0 0x80000
67
68/* IDT 75N43102 commands */
69#define IDT4_CMD_SEARCH144 3
70#define IDT4_CMD_WRITE 4
71#define IDT4_CMD_READ 5
72
73/* IDT 75N43102 SCR address (low 32 bits) */
74#define IDT4_SCR_ADR0 0x3
75
76/* IDT 75N43102 GMR base addresses (low 32 bits) */
77#define IDT4_GMR_BASE0 0x10
78#define IDT4_GMR_BASE1 0x20
79#define IDT4_GMR_BASE2 0x30
80
81/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
82#define IDT4_DATARY_BASE_ADR0 0x1000000
83#define IDT4_MSKARY_BASE_ADR0 0x2000000
84
85#define MAX_WRITE_ATTEMPTS 5
86
87#define MAX_ROUTES 2048
88
89/*
90 * Issue a command to the TCAM and wait for its completion. The address and
91 * any data required by the command must have been setup by the caller.
92 */
93static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
94{
95 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
96 return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
97 F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
98}
99
100static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
101 u32 v3)
102{
103 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
104 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
105 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
106}
107
108static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
109 u32 v3)
110{
111 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
112 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
113 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
114}
115
116static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
117 u32 *v3)
118{
119 *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
120 *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
121 *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
122}
123
124/*
125 * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
126 * command cmd. The data to be written must have been set up by the caller.
127 * Returns -1 on failure, 0 on success.
128 */
129static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
130{
131 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
132 if (mc5_cmd_write(adapter, cmd) == 0)
133 return 0;
134 CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
135 addr_lo);
136 return -1;
137}
138
139static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
140 u32 data_array_base, u32 write_cmd,
141 int addr_shift)
142{
143 unsigned int i;
144 struct adapter *adap = mc5->adapter;
145
146 /*
147 * We need the size of the TCAM data and mask arrays in terms of
148 * 72-bit entries.
149 */
150 unsigned int size72 = mc5->tcam_size;
151 unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
152
153 if (mc5->mode == MC5_MODE_144_BIT) {
154 size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
155 server_base *= 2;
156 }
157
158 /* Clear the data array */
159 dbgi_wr_data3(adap, 0, 0, 0);
160 for (i = 0; i < size72; i++)
161 if (mc5_write(adap, data_array_base + (i << addr_shift),
162 write_cmd))
163 return -1;
164
165 /* Initialize the mask array. */
166 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
167 for (i = 0; i < size72; i++) {
168 if (i == server_base) /* entering server or routing region */
169 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
170 mc5->mode == MC5_MODE_144_BIT ?
171 0xfffffff9 : 0xfffffffd);
172 if (mc5_write(adap, mask_array_base + (i << addr_shift),
173 write_cmd))
174 return -1;
175 }
176 return 0;
177}
178
179static int init_idt52100(struct mc5 *mc5)
180{
181 int i;
182 struct adapter *adap = mc5->adapter;
183
184 t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
185 V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
186 t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
187
188 /*
189 * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
190 * GMRs 8-9 for ACK- and AOPEN searches.
191 */
192 t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
193 t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
194 t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
195 t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
196 t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
197 t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
198 t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
199 t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
200 t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
201 t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
202 t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
203 t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
204
205 /* Set DBGI command mode for IDT TCAM. */
206 t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
207
208 /* Set up LAR */
209 dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
210 if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
211 goto err;
212
213 /* Set up SSRs */
214 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
215 if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
216 mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
217 goto err;
218
219 /* Set up GMRs */
220 for (i = 0; i < 32; ++i) {
221 if (i >= 12 && i < 15)
222 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
223 else if (i == 15)
224 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
225 else
226 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
227
228 if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
229 goto err;
230 }
231
232 /* Set up SCR */
233 dbgi_wr_data3(adap, 1, 0, 0);
234 if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
235 goto err;
236
237 return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
238 IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
239err:
240 return -EIO;
241}
242
243static int init_idt43102(struct mc5 *mc5)
244{
245 int i;
246 struct adapter *adap = mc5->adapter;
247
248 t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
249 adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
250 V_RDLAT(0xd) | V_SRCHLAT(0x12));
251
252 /*
253 * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
254 * for ACK- and AOPEN searches.
255 */
256 t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
257 t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
258 t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
259 IDT4_CMD_SEARCH144 | 0x3800);
260 t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
261 t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
262 t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
263 t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
264 t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
265 t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
266
267 t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
268
269 /* Set DBGI command mode for IDT TCAM. */
270 t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
271
272 /* Set up GMRs */
273 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
274 for (i = 0; i < 7; ++i)
275 if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
276 goto err;
277
278 for (i = 0; i < 4; ++i)
279 if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
280 goto err;
281
282 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
283 if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
284 mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
285 mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
286 goto err;
287
288 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
289 if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
290 goto err;
291
292 /* Set up SCR */
293 dbgi_wr_data3(adap, 0xf0000000, 0, 0);
294 if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
295 goto err;
296
297 return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
298 IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
299err:
300 return -EIO;
301}
302
303/* Put MC5 in DBGI mode. */
304static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
305{
306 t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
307 V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
308}
309
310/* Put MC5 in M-Bus mode. */
311static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
312{
313 t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
314 V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
315 V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
316 V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
317}
318
319/*
320 * Initialization that requires the OS and protocol layers to already
321 * be initialized goes here.
322 */
323int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
324 unsigned int nroutes)
325{
326 u32 cfg;
327 int err;
328 unsigned int tcam_size = mc5->tcam_size;
329 struct adapter *adap = mc5->adapter;
330
331 if (!tcam_size)
332 return 0;
333
334 if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
335 return -EINVAL;
336
337 /* Reset the TCAM */
338 cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
339 cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
340 t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
341 if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
342 CH_ERR(adap, "TCAM reset timed out\n");
343 return -1;
344 }
345
346 t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
347 t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
348 tcam_size - nroutes - nfilters);
349 t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
350 tcam_size - nroutes - nfilters - nservers);
351
352 mc5->parity_enabled = 1;
353
354 /* All the TCAM addresses we access have only the low 32 bits non 0 */
355 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
356 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
357
358 mc5_dbgi_mode_enable(mc5);
359
360 switch (mc5->part_type) {
361 case IDT75P52100:
362 err = init_idt52100(mc5);
363 break;
364 case IDT75N43102:
365 err = init_idt43102(mc5);
366 break;
367 default:
368 CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
369 err = -EINVAL;
370 break;
371 }
372
373 mc5_dbgi_mode_disable(mc5);
374 return err;
375}
376
377
378#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
379
380/*
381 * MC5 interrupt handler
382 */
383void t3_mc5_intr_handler(struct mc5 *mc5)
384{
385 struct adapter *adap = mc5->adapter;
386 u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
387
388 if ((cause & F_PARITYERR) && mc5->parity_enabled) {
389 CH_ALERT(adap, "MC5 parity error\n");
390 mc5->stats.parity_err++;
391 }
392
393 if (cause & F_REQQPARERR) {
394 CH_ALERT(adap, "MC5 request queue parity error\n");
395 mc5->stats.reqq_parity_err++;
396 }
397
398 if (cause & F_DISPQPARERR) {
399 CH_ALERT(adap, "MC5 dispatch queue parity error\n");
400 mc5->stats.dispq_parity_err++;
401 }
402
403 if (cause & F_ACTRGNFULL)
404 mc5->stats.active_rgn_full++;
405 if (cause & F_NFASRCHFAIL)
406 mc5->stats.nfa_srch_err++;
407 if (cause & F_UNKNOWNCMD)
408 mc5->stats.unknown_cmd++;
409 if (cause & F_DELACTEMPTY)
410 mc5->stats.del_act_empty++;
411 if (cause & MC5_INT_FATAL)
412 t3_fatal_err(adap);
413
414 t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
415}
416
417void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
418{
419#define K * 1024
420
421 static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
422 64 K, 128 K, 256 K, 32 K
423 };
424
425#undef K
426
427 u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
428
429 mc5->adapter = adapter;
430 mc5->mode = (unsigned char)mode;
431 mc5->part_type = (unsigned char)G_TMTYPE(cfg);
432 if (cfg & F_TMTYPEHI)
433 mc5->part_type |= 4;
434
435 mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
436 if (mode == MC5_MODE_144_BIT)
437 mc5->tcam_size /= 2;
438}
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
new file mode 100644
index 00000000000..6990f6c6522
--- /dev/null
+++ b/drivers/net/cxgb3/regs.h
@@ -0,0 +1,2598 @@
1#define A_SG_CONTROL 0x0
2
3#define S_CONGMODE 29
4#define V_CONGMODE(x) ((x) << S_CONGMODE)
5#define F_CONGMODE V_CONGMODE(1U)
6
7#define S_TNLFLMODE 28
8#define V_TNLFLMODE(x) ((x) << S_TNLFLMODE)
9#define F_TNLFLMODE V_TNLFLMODE(1U)
10
11#define S_FATLPERREN 27
12#define V_FATLPERREN(x) ((x) << S_FATLPERREN)
13#define F_FATLPERREN V_FATLPERREN(1U)
14
15#define S_DROPPKT 20
16#define V_DROPPKT(x) ((x) << S_DROPPKT)
17#define F_DROPPKT V_DROPPKT(1U)
18
19#define S_EGRGENCTRL 19
20#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL)
21#define F_EGRGENCTRL V_EGRGENCTRL(1U)
22
23#define S_USERSPACESIZE 14
24#define M_USERSPACESIZE 0x1f
25#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE)
26
27#define S_HOSTPAGESIZE 11
28#define M_HOSTPAGESIZE 0x7
29#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE)
30
31#define S_FLMODE 9
32#define V_FLMODE(x) ((x) << S_FLMODE)
33#define F_FLMODE V_FLMODE(1U)
34
35#define S_PKTSHIFT 6
36#define M_PKTSHIFT 0x7
37#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
38
39#define S_ONEINTMULTQ 5
40#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ)
41#define F_ONEINTMULTQ V_ONEINTMULTQ(1U)
42
43#define S_BIGENDIANINGRESS 2
44#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS)
45#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U)
46
47#define S_ISCSICOALESCING 1
48#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING)
49#define F_ISCSICOALESCING V_ISCSICOALESCING(1U)
50
51#define S_GLOBALENABLE 0
52#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
53#define F_GLOBALENABLE V_GLOBALENABLE(1U)
54
55#define S_AVOIDCQOVFL 24
56#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
57#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U)
58
59#define S_OPTONEINTMULTQ 23
60#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
61#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U)
62
63#define S_CQCRDTCTRL 22
64#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
65#define F_CQCRDTCTRL V_CQCRDTCTRL(1U)
66
67#define A_SG_KDOORBELL 0x4
68
69#define S_SELEGRCNTX 31
70#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX)
71#define F_SELEGRCNTX V_SELEGRCNTX(1U)
72
73#define S_EGRCNTX 0
74#define M_EGRCNTX 0xffff
75#define V_EGRCNTX(x) ((x) << S_EGRCNTX)
76
77#define A_SG_GTS 0x8
78
79#define S_RSPQ 29
80#define M_RSPQ 0x7
81#define V_RSPQ(x) ((x) << S_RSPQ)
82#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ)
83
84#define S_NEWTIMER 16
85#define M_NEWTIMER 0x1fff
86#define V_NEWTIMER(x) ((x) << S_NEWTIMER)
87
88#define S_NEWINDEX 0
89#define M_NEWINDEX 0xffff
90#define V_NEWINDEX(x) ((x) << S_NEWINDEX)
91
92#define A_SG_CONTEXT_CMD 0xc
93
94#define S_CONTEXT_CMD_OPCODE 28
95#define M_CONTEXT_CMD_OPCODE 0xf
96#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE)
97
98#define S_CONTEXT_CMD_BUSY 27
99#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY)
100#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U)
101
102#define S_CQ_CREDIT 20
103
104#define M_CQ_CREDIT 0x7f
105
106#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT)
107
108#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT)
109
110#define S_CQ 19
111
112#define V_CQ(x) ((x) << S_CQ)
113#define F_CQ V_CQ(1U)
114
115#define S_RESPONSEQ 18
116#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ)
117#define F_RESPONSEQ V_RESPONSEQ(1U)
118
119#define S_EGRESS 17
120#define V_EGRESS(x) ((x) << S_EGRESS)
121#define F_EGRESS V_EGRESS(1U)
122
123#define S_FREELIST 16
124#define V_FREELIST(x) ((x) << S_FREELIST)
125#define F_FREELIST V_FREELIST(1U)
126
127#define S_CONTEXT 0
128#define M_CONTEXT 0xffff
129#define V_CONTEXT(x) ((x) << S_CONTEXT)
130
131#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT)
132
133#define A_SG_CONTEXT_DATA0 0x10
134
135#define A_SG_CONTEXT_DATA1 0x14
136
137#define A_SG_CONTEXT_DATA2 0x18
138
139#define A_SG_CONTEXT_DATA3 0x1c
140
141#define A_SG_CONTEXT_MASK0 0x20
142
143#define A_SG_CONTEXT_MASK1 0x24
144
145#define A_SG_CONTEXT_MASK2 0x28
146
147#define A_SG_CONTEXT_MASK3 0x2c
148
149#define A_SG_RSPQ_CREDIT_RETURN 0x30
150
151#define S_CREDITS 0
152#define M_CREDITS 0xffff
153#define V_CREDITS(x) ((x) << S_CREDITS)
154
155#define A_SG_DATA_INTR 0x34
156
157#define S_ERRINTR 31
158#define V_ERRINTR(x) ((x) << S_ERRINTR)
159#define F_ERRINTR V_ERRINTR(1U)
160
161#define A_SG_HI_DRB_HI_THRSH 0x38
162
163#define A_SG_HI_DRB_LO_THRSH 0x3c
164
165#define A_SG_LO_DRB_HI_THRSH 0x40
166
167#define A_SG_LO_DRB_LO_THRSH 0x44
168
169#define A_SG_RSPQ_FL_STATUS 0x4c
170
171#define S_RSPQ0DISABLED 8
172
173#define S_FL0EMPTY 16
174#define V_FL0EMPTY(x) ((x) << S_FL0EMPTY)
175#define F_FL0EMPTY V_FL0EMPTY(1U)
176
177#define A_SG_EGR_RCQ_DRB_THRSH 0x54
178
179#define S_HIRCQDRBTHRSH 16
180#define M_HIRCQDRBTHRSH 0x7ff
181#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH)
182
183#define S_LORCQDRBTHRSH 0
184#define M_LORCQDRBTHRSH 0x7ff
185#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH)
186
187#define A_SG_EGR_CNTX_BADDR 0x58
188
189#define A_SG_INT_CAUSE 0x5c
190
191#define S_HIRCQPARITYERROR 31
192#define V_HIRCQPARITYERROR(x) ((x) << S_HIRCQPARITYERROR)
193#define F_HIRCQPARITYERROR V_HIRCQPARITYERROR(1U)
194
195#define S_LORCQPARITYERROR 30
196#define V_LORCQPARITYERROR(x) ((x) << S_LORCQPARITYERROR)
197#define F_LORCQPARITYERROR V_LORCQPARITYERROR(1U)
198
199#define S_HIDRBPARITYERROR 29
200#define V_HIDRBPARITYERROR(x) ((x) << S_HIDRBPARITYERROR)
201#define F_HIDRBPARITYERROR V_HIDRBPARITYERROR(1U)
202
203#define S_LODRBPARITYERROR 28
204#define V_LODRBPARITYERROR(x) ((x) << S_LODRBPARITYERROR)
205#define F_LODRBPARITYERROR V_LODRBPARITYERROR(1U)
206
207#define S_FLPARITYERROR 22
208#define M_FLPARITYERROR 0x3f
209#define V_FLPARITYERROR(x) ((x) << S_FLPARITYERROR)
210#define G_FLPARITYERROR(x) (((x) >> S_FLPARITYERROR) & M_FLPARITYERROR)
211
212#define S_ITPARITYERROR 20
213#define M_ITPARITYERROR 0x3
214#define V_ITPARITYERROR(x) ((x) << S_ITPARITYERROR)
215#define G_ITPARITYERROR(x) (((x) >> S_ITPARITYERROR) & M_ITPARITYERROR)
216
217#define S_IRPARITYERROR 19
218#define V_IRPARITYERROR(x) ((x) << S_IRPARITYERROR)
219#define F_IRPARITYERROR V_IRPARITYERROR(1U)
220
221#define S_RCPARITYERROR 18
222#define V_RCPARITYERROR(x) ((x) << S_RCPARITYERROR)
223#define F_RCPARITYERROR V_RCPARITYERROR(1U)
224
225#define S_OCPARITYERROR 17
226#define V_OCPARITYERROR(x) ((x) << S_OCPARITYERROR)
227#define F_OCPARITYERROR V_OCPARITYERROR(1U)
228
229#define S_CPPARITYERROR 16
230#define V_CPPARITYERROR(x) ((x) << S_CPPARITYERROR)
231#define F_CPPARITYERROR V_CPPARITYERROR(1U)
232
233#define S_R_REQ_FRAMINGERROR 15
234#define V_R_REQ_FRAMINGERROR(x) ((x) << S_R_REQ_FRAMINGERROR)
235#define F_R_REQ_FRAMINGERROR V_R_REQ_FRAMINGERROR(1U)
236
237#define S_UC_REQ_FRAMINGERROR 14
238#define V_UC_REQ_FRAMINGERROR(x) ((x) << S_UC_REQ_FRAMINGERROR)
239#define F_UC_REQ_FRAMINGERROR V_UC_REQ_FRAMINGERROR(1U)
240
241#define S_HICTLDRBDROPERR 13
242#define V_HICTLDRBDROPERR(x) ((x) << S_HICTLDRBDROPERR)
243#define F_HICTLDRBDROPERR V_HICTLDRBDROPERR(1U)
244
245#define S_LOCTLDRBDROPERR 12
246#define V_LOCTLDRBDROPERR(x) ((x) << S_LOCTLDRBDROPERR)
247#define F_LOCTLDRBDROPERR V_LOCTLDRBDROPERR(1U)
248
249#define S_HIPIODRBDROPERR 11
250#define V_HIPIODRBDROPERR(x) ((x) << S_HIPIODRBDROPERR)
251#define F_HIPIODRBDROPERR V_HIPIODRBDROPERR(1U)
252
253#define S_LOPIODRBDROPERR 10
254#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
255#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
256
257#define S_HIPRIORITYDBFULL 7
258#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
259#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
260
261#define S_HIPRIORITYDBEMPTY 6
262#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
263#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
264
265#define S_LOPRIORITYDBFULL 5
266#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
267#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
268
269#define S_LOPRIORITYDBEMPTY 4
270#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
271#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
272
273#define S_RSPQDISABLED 3
274#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
275#define F_RSPQDISABLED V_RSPQDISABLED(1U)
276
277#define S_RSPQCREDITOVERFOW 2
278#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW)
279#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U)
280
281#define S_FLEMPTY 1
282#define V_FLEMPTY(x) ((x) << S_FLEMPTY)
283#define F_FLEMPTY V_FLEMPTY(1U)
284
285#define A_SG_INT_ENABLE 0x60
286
287#define A_SG_CMDQ_CREDIT_TH 0x64
288
289#define S_TIMEOUT 8
290#define M_TIMEOUT 0xffffff
291#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
292
293#define S_THRESHOLD 0
294#define M_THRESHOLD 0xff
295#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
296
297#define A_SG_TIMER_TICK 0x68
298
299#define A_SG_CQ_CONTEXT_BADDR 0x6c
300
301#define A_SG_OCO_BASE 0x70
302
303#define S_BASE1 16
304#define M_BASE1 0xffff
305#define V_BASE1(x) ((x) << S_BASE1)
306
307#define A_SG_DRB_PRI_THRESH 0x74
308
309#define A_PCIX_INT_ENABLE 0x80
310
311#define S_MSIXPARERR 22
312#define M_MSIXPARERR 0x7
313
314#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR)
315
316#define S_CFPARERR 18
317#define M_CFPARERR 0xf
318
319#define V_CFPARERR(x) ((x) << S_CFPARERR)
320
321#define S_RFPARERR 14
322#define M_RFPARERR 0xf
323
324#define V_RFPARERR(x) ((x) << S_RFPARERR)
325
326#define S_WFPARERR 12
327#define M_WFPARERR 0x3
328
329#define V_WFPARERR(x) ((x) << S_WFPARERR)
330
331#define S_PIOPARERR 11
332#define V_PIOPARERR(x) ((x) << S_PIOPARERR)
333#define F_PIOPARERR V_PIOPARERR(1U)
334
335#define S_DETUNCECCERR 10
336#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR)
337#define F_DETUNCECCERR V_DETUNCECCERR(1U)
338
339#define S_DETCORECCERR 9
340#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR)
341#define F_DETCORECCERR V_DETCORECCERR(1U)
342
343#define S_RCVSPLCMPERR 8
344#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR)
345#define F_RCVSPLCMPERR V_RCVSPLCMPERR(1U)
346
347#define S_UNXSPLCMP 7
348#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP)
349#define F_UNXSPLCMP V_UNXSPLCMP(1U)
350
351#define S_SPLCMPDIS 6
352#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS)
353#define F_SPLCMPDIS V_SPLCMPDIS(1U)
354
355#define S_DETPARERR 5
356#define V_DETPARERR(x) ((x) << S_DETPARERR)
357#define F_DETPARERR V_DETPARERR(1U)
358
359#define S_SIGSYSERR 4
360#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR)
361#define F_SIGSYSERR V_SIGSYSERR(1U)
362
363#define S_RCVMSTABT 3
364#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT)
365#define F_RCVMSTABT V_RCVMSTABT(1U)
366
367#define S_RCVTARABT 2
368#define V_RCVTARABT(x) ((x) << S_RCVTARABT)
369#define F_RCVTARABT V_RCVTARABT(1U)
370
371#define S_SIGTARABT 1
372#define V_SIGTARABT(x) ((x) << S_SIGTARABT)
373#define F_SIGTARABT V_SIGTARABT(1U)
374
375#define S_MSTDETPARERR 0
376#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR)
377#define F_MSTDETPARERR V_MSTDETPARERR(1U)
378
379#define A_PCIX_INT_CAUSE 0x84
380
381#define A_PCIX_CFG 0x88
382
383#define S_DMASTOPEN 19
384#define V_DMASTOPEN(x) ((x) << S_DMASTOPEN)
385#define F_DMASTOPEN V_DMASTOPEN(1U)
386
387#define S_CLIDECEN 18
388#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
389#define F_CLIDECEN V_CLIDECEN(1U)
390
391#define A_PCIX_MODE 0x8c
392
393#define S_PCLKRANGE 6
394#define M_PCLKRANGE 0x3
395#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE)
396#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE)
397
398#define S_PCIXINITPAT 2
399#define M_PCIXINITPAT 0xf
400#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT)
401#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT)
402
403#define S_64BIT 0
404#define V_64BIT(x) ((x) << S_64BIT)
405#define F_64BIT V_64BIT(1U)
406
407#define A_PCIE_INT_ENABLE 0x80
408
409#define S_BISTERR 15
410#define M_BISTERR 0xff
411
412#define V_BISTERR(x) ((x) << S_BISTERR)
413
414#define S_TXPARERR 18
415#define V_TXPARERR(x) ((x) << S_TXPARERR)
416#define F_TXPARERR V_TXPARERR(1U)
417
418#define S_RXPARERR 17
419#define V_RXPARERR(x) ((x) << S_RXPARERR)
420#define F_RXPARERR V_RXPARERR(1U)
421
422#define S_RETRYLUTPARERR 16
423#define V_RETRYLUTPARERR(x) ((x) << S_RETRYLUTPARERR)
424#define F_RETRYLUTPARERR V_RETRYLUTPARERR(1U)
425
426#define S_RETRYBUFPARERR 15
427#define V_RETRYBUFPARERR(x) ((x) << S_RETRYBUFPARERR)
428#define F_RETRYBUFPARERR V_RETRYBUFPARERR(1U)
429
430#define S_PCIE_MSIXPARERR 12
431#define M_PCIE_MSIXPARERR 0x7
432
433#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR)
434
435#define S_PCIE_CFPARERR 11
436#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR)
437#define F_PCIE_CFPARERR V_PCIE_CFPARERR(1U)
438
439#define S_PCIE_RFPARERR 10
440#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR)
441#define F_PCIE_RFPARERR V_PCIE_RFPARERR(1U)
442
443#define S_PCIE_WFPARERR 9
444#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR)
445#define F_PCIE_WFPARERR V_PCIE_WFPARERR(1U)
446
447#define S_PCIE_PIOPARERR 8
448#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR)
449#define F_PCIE_PIOPARERR V_PCIE_PIOPARERR(1U)
450
451#define S_UNXSPLCPLERRC 7
452#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC)
453#define F_UNXSPLCPLERRC V_UNXSPLCPLERRC(1U)
454
455#define S_UNXSPLCPLERRR 6
456#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR)
457#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U)
458
459#define S_PEXERR 0
460#define V_PEXERR(x) ((x) << S_PEXERR)
461#define F_PEXERR V_PEXERR(1U)
462
463#define A_PCIE_INT_CAUSE 0x84
464
465#define S_PCIE_DMASTOPEN 24
466#define V_PCIE_DMASTOPEN(x) ((x) << S_PCIE_DMASTOPEN)
467#define F_PCIE_DMASTOPEN V_PCIE_DMASTOPEN(1U)
468
469#define A_PCIE_CFG 0x88
470
471#define S_ENABLELINKDWNDRST 21
472#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST)
473#define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U)
474
475#define S_ENABLELINKDOWNRST 20
476#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST)
477#define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U)
478
479#define S_PCIE_CLIDECEN 16
480#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
481#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
482
483#define S_CRSTWRMMODE 0
484#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE)
485#define F_CRSTWRMMODE V_CRSTWRMMODE(1U)
486
487#define A_PCIE_MODE 0x8c
488
489#define S_NUMFSTTRNSEQRX 10
490#define M_NUMFSTTRNSEQRX 0xff
491#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX)
492#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX)
493
494#define A_PCIE_PEX_CTRL0 0x98
495
496#define S_NUMFSTTRNSEQ 22
497#define M_NUMFSTTRNSEQ 0xff
498#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ)
499#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ)
500
501#define S_REPLAYLMT 2
502#define M_REPLAYLMT 0xfffff
503
504#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT)
505
506#define A_PCIE_PEX_CTRL1 0x9c
507
508#define S_T3A_ACKLAT 0
509#define M_T3A_ACKLAT 0x7ff
510
511#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
512
513#define S_ACKLAT 0
514#define M_ACKLAT 0x1fff
515
516#define V_ACKLAT(x) ((x) << S_ACKLAT)
517
518#define A_PCIE_PEX_ERR 0xa4
519
520#define A_T3DBG_GPIO_EN 0xd0
521
522#define S_GPIO11_OEN 27
523#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
524#define F_GPIO11_OEN V_GPIO11_OEN(1U)
525
526#define S_GPIO10_OEN 26
527#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
528#define F_GPIO10_OEN V_GPIO10_OEN(1U)
529
530#define S_GPIO7_OEN 23
531#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
532#define F_GPIO7_OEN V_GPIO7_OEN(1U)
533
534#define S_GPIO6_OEN 22
535#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
536#define F_GPIO6_OEN V_GPIO6_OEN(1U)
537
538#define S_GPIO5_OEN 21
539#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
540#define F_GPIO5_OEN V_GPIO5_OEN(1U)
541
542#define S_GPIO4_OEN 20
543#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
544#define F_GPIO4_OEN V_GPIO4_OEN(1U)
545
546#define S_GPIO2_OEN 18
547#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
548#define F_GPIO2_OEN V_GPIO2_OEN(1U)
549
550#define S_GPIO1_OEN 17
551#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
552#define F_GPIO1_OEN V_GPIO1_OEN(1U)
553
554#define S_GPIO0_OEN 16
555#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
556#define F_GPIO0_OEN V_GPIO0_OEN(1U)
557
558#define S_GPIO10_OUT_VAL 10
559#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
560#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U)
561
562#define S_GPIO7_OUT_VAL 7
563#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
564#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U)
565
566#define S_GPIO6_OUT_VAL 6
567#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
568#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U)
569
570#define S_GPIO5_OUT_VAL 5
571#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
572#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U)
573
574#define S_GPIO4_OUT_VAL 4
575#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
576#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U)
577
578#define S_GPIO2_OUT_VAL 2
579#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
580#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U)
581
582#define S_GPIO1_OUT_VAL 1
583#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
584#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U)
585
586#define S_GPIO0_OUT_VAL 0
587#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
588#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
589
590#define A_T3DBG_INT_ENABLE 0xd8
591
592#define S_GPIO11 11
593#define V_GPIO11(x) ((x) << S_GPIO11)
594#define F_GPIO11 V_GPIO11(1U)
595
596#define S_GPIO10 10
597#define V_GPIO10(x) ((x) << S_GPIO10)
598#define F_GPIO10 V_GPIO10(1U)
599
600#define S_GPIO9 9
601#define V_GPIO9(x) ((x) << S_GPIO9)
602#define F_GPIO9 V_GPIO9(1U)
603
604#define S_GPIO7 7
605#define V_GPIO7(x) ((x) << S_GPIO7)
606#define F_GPIO7 V_GPIO7(1U)
607
608#define S_GPIO6 6
609#define V_GPIO6(x) ((x) << S_GPIO6)
610#define F_GPIO6 V_GPIO6(1U)
611
612#define S_GPIO5 5
613#define V_GPIO5(x) ((x) << S_GPIO5)
614#define F_GPIO5 V_GPIO5(1U)
615
616#define S_GPIO4 4
617#define V_GPIO4(x) ((x) << S_GPIO4)
618#define F_GPIO4 V_GPIO4(1U)
619
620#define S_GPIO3 3
621#define V_GPIO3(x) ((x) << S_GPIO3)
622#define F_GPIO3 V_GPIO3(1U)
623
624#define S_GPIO2 2
625#define V_GPIO2(x) ((x) << S_GPIO2)
626#define F_GPIO2 V_GPIO2(1U)
627
628#define S_GPIO1 1
629#define V_GPIO1(x) ((x) << S_GPIO1)
630#define F_GPIO1 V_GPIO1(1U)
631
632#define S_GPIO0 0
633#define V_GPIO0(x) ((x) << S_GPIO0)
634#define F_GPIO0 V_GPIO0(1U)
635
636#define A_T3DBG_INT_CAUSE 0xdc
637
638#define A_T3DBG_GPIO_ACT_LOW 0xf0
639
640#define MC7_PMRX_BASE_ADDR 0x100
641
642#define A_MC7_CFG 0x100
643
644#define S_IFEN 13
645#define V_IFEN(x) ((x) << S_IFEN)
646#define F_IFEN V_IFEN(1U)
647
648#define S_TERM150 11
649#define V_TERM150(x) ((x) << S_TERM150)
650#define F_TERM150 V_TERM150(1U)
651
652#define S_SLOW 10
653#define V_SLOW(x) ((x) << S_SLOW)
654#define F_SLOW V_SLOW(1U)
655
656#define S_WIDTH 8
657#define M_WIDTH 0x3
658#define V_WIDTH(x) ((x) << S_WIDTH)
659#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
660
661#define S_BKS 6
662#define V_BKS(x) ((x) << S_BKS)
663#define F_BKS V_BKS(1U)
664
665#define S_ORG 5
666#define V_ORG(x) ((x) << S_ORG)
667#define F_ORG V_ORG(1U)
668
669#define S_DEN 2
670#define M_DEN 0x7
671#define V_DEN(x) ((x) << S_DEN)
672#define G_DEN(x) (((x) >> S_DEN) & M_DEN)
673
674#define S_RDY 1
675#define V_RDY(x) ((x) << S_RDY)
676#define F_RDY V_RDY(1U)
677
678#define S_CLKEN 0
679#define V_CLKEN(x) ((x) << S_CLKEN)
680#define F_CLKEN V_CLKEN(1U)
681
682#define A_MC7_MODE 0x104
683
684#define S_BUSY 31
685#define V_BUSY(x) ((x) << S_BUSY)
686#define F_BUSY V_BUSY(1U)
687
688#define S_BUSY 31
689#define V_BUSY(x) ((x) << S_BUSY)
690#define F_BUSY V_BUSY(1U)
691
692#define A_MC7_EXT_MODE1 0x108
693
694#define A_MC7_EXT_MODE2 0x10c
695
696#define A_MC7_EXT_MODE3 0x110
697
698#define A_MC7_PRE 0x114
699
700#define A_MC7_REF 0x118
701
702#define S_PREREFDIV 1
703#define M_PREREFDIV 0x3fff
704#define V_PREREFDIV(x) ((x) << S_PREREFDIV)
705
706#define S_PERREFEN 0
707#define V_PERREFEN(x) ((x) << S_PERREFEN)
708#define F_PERREFEN V_PERREFEN(1U)
709
710#define A_MC7_DLL 0x11c
711
712#define S_DLLENB 1
713#define V_DLLENB(x) ((x) << S_DLLENB)
714#define F_DLLENB V_DLLENB(1U)
715
716#define S_DLLRST 0
717#define V_DLLRST(x) ((x) << S_DLLRST)
718#define F_DLLRST V_DLLRST(1U)
719
720#define A_MC7_PARM 0x120
721
722#define S_ACTTOPREDLY 26
723#define M_ACTTOPREDLY 0xf
724#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY)
725
726#define S_ACTTORDWRDLY 23
727#define M_ACTTORDWRDLY 0x7
728#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY)
729
730#define S_PRECYC 20
731#define M_PRECYC 0x7
732#define V_PRECYC(x) ((x) << S_PRECYC)
733
734#define S_REFCYC 13
735#define M_REFCYC 0x7f
736#define V_REFCYC(x) ((x) << S_REFCYC)
737
738#define S_BKCYC 8
739#define M_BKCYC 0x1f
740#define V_BKCYC(x) ((x) << S_BKCYC)
741
742#define S_WRTORDDLY 4
743#define M_WRTORDDLY 0xf
744#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY)
745
746#define S_RDTOWRDLY 0
747#define M_RDTOWRDLY 0xf
748#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY)
749
750#define A_MC7_CAL 0x128
751
752#define S_BUSY 31
753#define V_BUSY(x) ((x) << S_BUSY)
754#define F_BUSY V_BUSY(1U)
755
756#define S_BUSY 31
757#define V_BUSY(x) ((x) << S_BUSY)
758#define F_BUSY V_BUSY(1U)
759
760#define S_CAL_FAULT 30
761#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
762#define F_CAL_FAULT V_CAL_FAULT(1U)
763
764#define S_SGL_CAL_EN 20
765#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
766#define F_SGL_CAL_EN V_SGL_CAL_EN(1U)
767
768#define A_MC7_ERR_ADDR 0x12c
769
770#define A_MC7_ECC 0x130
771
772#define S_ECCCHKEN 1
773#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN)
774#define F_ECCCHKEN V_ECCCHKEN(1U)
775
776#define S_ECCGENEN 0
777#define V_ECCGENEN(x) ((x) << S_ECCGENEN)
778#define F_ECCGENEN V_ECCGENEN(1U)
779
780#define A_MC7_CE_ADDR 0x134
781
782#define A_MC7_CE_DATA0 0x138
783
784#define A_MC7_CE_DATA1 0x13c
785
786#define A_MC7_CE_DATA2 0x140
787
788#define S_DATA 0
789#define M_DATA 0xff
790
791#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
792
793#define A_MC7_UE_ADDR 0x144
794
795#define A_MC7_UE_DATA0 0x148
796
797#define A_MC7_UE_DATA1 0x14c
798
799#define A_MC7_UE_DATA2 0x150
800
801#define A_MC7_BD_ADDR 0x154
802
803#define S_ADDR 3
804
805#define M_ADDR 0x1fffffff
806
807#define A_MC7_BD_DATA0 0x158
808
809#define A_MC7_BD_DATA1 0x15c
810
811#define A_MC7_BD_OP 0x164
812
813#define S_OP 0
814
815#define V_OP(x) ((x) << S_OP)
816#define F_OP V_OP(1U)
817
818#define F_OP V_OP(1U)
819#define A_SF_OP 0x6dc
820
821#define A_MC7_BIST_ADDR_BEG 0x168
822
823#define A_MC7_BIST_ADDR_END 0x16c
824
825#define A_MC7_BIST_DATA 0x170
826
827#define A_MC7_BIST_OP 0x174
828
829#define S_CONT 3
830#define V_CONT(x) ((x) << S_CONT)
831#define F_CONT V_CONT(1U)
832
833#define F_CONT V_CONT(1U)
834
835#define A_MC7_INT_ENABLE 0x178
836
837#define S_AE 17
838#define V_AE(x) ((x) << S_AE)
839#define F_AE V_AE(1U)
840
841#define S_PE 2
842#define M_PE 0x7fff
843
844#define V_PE(x) ((x) << S_PE)
845
846#define G_PE(x) (((x) >> S_PE) & M_PE)
847
848#define S_UE 1
849#define V_UE(x) ((x) << S_UE)
850#define F_UE V_UE(1U)
851
852#define S_CE 0
853#define V_CE(x) ((x) << S_CE)
854#define F_CE V_CE(1U)
855
856#define A_MC7_INT_CAUSE 0x17c
857
858#define MC7_PMTX_BASE_ADDR 0x180
859
860#define MC7_CM_BASE_ADDR 0x200
861
862#define A_CIM_BOOT_CFG 0x280
863
864#define S_BOOTADDR 2
865#define M_BOOTADDR 0x3fffffff
866#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
867
868#define A_CIM_SDRAM_BASE_ADDR 0x28c
869
870#define A_CIM_SDRAM_ADDR_SIZE 0x290
871
872#define A_CIM_HOST_INT_ENABLE 0x298
873
874#define S_DTAGPARERR 28
875#define V_DTAGPARERR(x) ((x) << S_DTAGPARERR)
876#define F_DTAGPARERR V_DTAGPARERR(1U)
877
878#define S_ITAGPARERR 27
879#define V_ITAGPARERR(x) ((x) << S_ITAGPARERR)
880#define F_ITAGPARERR V_ITAGPARERR(1U)
881
882#define S_IBQTPPARERR 26
883#define V_IBQTPPARERR(x) ((x) << S_IBQTPPARERR)
884#define F_IBQTPPARERR V_IBQTPPARERR(1U)
885
886#define S_IBQULPPARERR 25
887#define V_IBQULPPARERR(x) ((x) << S_IBQULPPARERR)
888#define F_IBQULPPARERR V_IBQULPPARERR(1U)
889
890#define S_IBQSGEHIPARERR 24
891#define V_IBQSGEHIPARERR(x) ((x) << S_IBQSGEHIPARERR)
892#define F_IBQSGEHIPARERR V_IBQSGEHIPARERR(1U)
893
894#define S_IBQSGELOPARERR 23
895#define V_IBQSGELOPARERR(x) ((x) << S_IBQSGELOPARERR)
896#define F_IBQSGELOPARERR V_IBQSGELOPARERR(1U)
897
898#define S_OBQULPLOPARERR 22
899#define V_OBQULPLOPARERR(x) ((x) << S_OBQULPLOPARERR)
900#define F_OBQULPLOPARERR V_OBQULPLOPARERR(1U)
901
902#define S_OBQULPHIPARERR 21
903#define V_OBQULPHIPARERR(x) ((x) << S_OBQULPHIPARERR)
904#define F_OBQULPHIPARERR V_OBQULPHIPARERR(1U)
905
906#define S_OBQSGEPARERR 20
907#define V_OBQSGEPARERR(x) ((x) << S_OBQSGEPARERR)
908#define F_OBQSGEPARERR V_OBQSGEPARERR(1U)
909
910#define S_DCACHEPARERR 19
911#define V_DCACHEPARERR(x) ((x) << S_DCACHEPARERR)
912#define F_DCACHEPARERR V_DCACHEPARERR(1U)
913
914#define S_ICACHEPARERR 18
915#define V_ICACHEPARERR(x) ((x) << S_ICACHEPARERR)
916#define F_ICACHEPARERR V_ICACHEPARERR(1U)
917
918#define S_DRAMPARERR 17
919#define V_DRAMPARERR(x) ((x) << S_DRAMPARERR)
920#define F_DRAMPARERR V_DRAMPARERR(1U)
921
922#define A_CIM_HOST_INT_CAUSE 0x29c
923
924#define S_BLKWRPLINT 12
925#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
926#define F_BLKWRPLINT V_BLKWRPLINT(1U)
927
928#define S_BLKRDPLINT 11
929#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT)
930#define F_BLKRDPLINT V_BLKRDPLINT(1U)
931
932#define S_BLKWRCTLINT 10
933#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT)
934#define F_BLKWRCTLINT V_BLKWRCTLINT(1U)
935
936#define S_BLKRDCTLINT 9
937#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT)
938#define F_BLKRDCTLINT V_BLKRDCTLINT(1U)
939
940#define S_BLKWRFLASHINT 8
941#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT)
942#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U)
943
944#define S_BLKRDFLASHINT 7
945#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT)
946#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U)
947
948#define S_SGLWRFLASHINT 6
949#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT)
950#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U)
951
952#define S_WRBLKFLASHINT 5
953#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT)
954#define F_WRBLKFLASHINT V_WRBLKFLASHINT(1U)
955
956#define S_BLKWRBOOTINT 4
957#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
958#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U)
959
960#define S_FLASHRANGEINT 2
961#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT)
962#define F_FLASHRANGEINT V_FLASHRANGEINT(1U)
963
964#define S_SDRAMRANGEINT 1
965#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT)
966#define F_SDRAMRANGEINT V_SDRAMRANGEINT(1U)
967
968#define S_RSVDSPACEINT 0
969#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
970#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
971
972#define A_CIM_HOST_ACC_CTRL 0x2b0
973
974#define S_HOSTBUSY 17
975#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
976#define F_HOSTBUSY V_HOSTBUSY(1U)
977
978#define A_CIM_HOST_ACC_DATA 0x2b4
979
980#define A_CIM_IBQ_DBG_CFG 0x2c0
981
982#define S_IBQDBGADDR 16
983#define M_IBQDBGADDR 0x1ff
984#define V_IBQDBGADDR(x) ((x) << S_IBQDBGADDR)
985#define G_IBQDBGADDR(x) (((x) >> S_IBQDBGADDR) & M_IBQDBGADDR)
986
987#define S_IBQDBGQID 3
988#define M_IBQDBGQID 0x3
989#define V_IBQDBGQID(x) ((x) << S_IBQDBGQID)
990#define G_IBQDBGQID(x) (((x) >> S_IBQDBGQID) & M_IBQDBGQID)
991
992#define S_IBQDBGWR 2
993#define V_IBQDBGWR(x) ((x) << S_IBQDBGWR)
994#define F_IBQDBGWR V_IBQDBGWR(1U)
995
996#define S_IBQDBGBUSY 1
997#define V_IBQDBGBUSY(x) ((x) << S_IBQDBGBUSY)
998#define F_IBQDBGBUSY V_IBQDBGBUSY(1U)
999
1000#define S_IBQDBGEN 0
1001#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN)
1002#define F_IBQDBGEN V_IBQDBGEN(1U)
1003
1004#define A_CIM_IBQ_DBG_DATA 0x2c8
1005
1006#define A_TP_IN_CONFIG 0x300
1007
1008#define S_RXFBARBPRIO 25
1009#define V_RXFBARBPRIO(x) ((x) << S_RXFBARBPRIO)
1010#define F_RXFBARBPRIO V_RXFBARBPRIO(1U)
1011
1012#define S_TXFBARBPRIO 24
1013#define V_TXFBARBPRIO(x) ((x) << S_TXFBARBPRIO)
1014#define F_TXFBARBPRIO V_TXFBARBPRIO(1U)
1015
1016#define S_NICMODE 14
1017#define V_NICMODE(x) ((x) << S_NICMODE)
1018#define F_NICMODE V_NICMODE(1U)
1019
1020#define F_NICMODE V_NICMODE(1U)
1021
1022#define S_IPV6ENABLE 15
1023#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
1024#define F_IPV6ENABLE V_IPV6ENABLE(1U)
1025
1026#define A_TP_OUT_CONFIG 0x304
1027
1028#define S_VLANEXTRACTIONENABLE 12
1029
1030#define A_TP_GLOBAL_CONFIG 0x308
1031
1032#define S_TXPACINGENABLE 24
1033#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
1034#define F_TXPACINGENABLE V_TXPACINGENABLE(1U)
1035
1036#define S_PATHMTU 15
1037#define V_PATHMTU(x) ((x) << S_PATHMTU)
1038#define F_PATHMTU V_PATHMTU(1U)
1039
1040#define S_IPCHECKSUMOFFLOAD 13
1041#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
1042#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U)
1043
1044#define S_UDPCHECKSUMOFFLOAD 12
1045#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD)
1046#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U)
1047
1048#define S_TCPCHECKSUMOFFLOAD 11
1049#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
1050#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U)
1051
1052#define S_IPTTL 0
1053#define M_IPTTL 0xff
1054#define V_IPTTL(x) ((x) << S_IPTTL)
1055
1056#define A_TP_CMM_MM_BASE 0x314
1057
1058#define A_TP_CMM_TIMER_BASE 0x318
1059
1060#define S_CMTIMERMAXNUM 28
1061#define M_CMTIMERMAXNUM 0x3
1062#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
1063
1064#define A_TP_PMM_SIZE 0x31c
1065
1066#define A_TP_PMM_TX_BASE 0x320
1067
1068#define A_TP_PMM_RX_BASE 0x328
1069
1070#define A_TP_PMM_RX_PAGE_SIZE 0x32c
1071
1072#define A_TP_PMM_RX_MAX_PAGE 0x330
1073
1074#define A_TP_PMM_TX_PAGE_SIZE 0x334
1075
1076#define A_TP_PMM_TX_MAX_PAGE 0x338
1077
1078#define A_TP_TCP_OPTIONS 0x340
1079
1080#define S_MTUDEFAULT 16
1081#define M_MTUDEFAULT 0xffff
1082#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
1083
1084#define S_MTUENABLE 10
1085#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
1086#define F_MTUENABLE V_MTUENABLE(1U)
1087
1088#define S_SACKRX 8
1089#define V_SACKRX(x) ((x) << S_SACKRX)
1090#define F_SACKRX V_SACKRX(1U)
1091
1092#define S_SACKMODE 4
1093
1094#define M_SACKMODE 0x3
1095
1096#define V_SACKMODE(x) ((x) << S_SACKMODE)
1097
1098#define S_WINDOWSCALEMODE 2
1099#define M_WINDOWSCALEMODE 0x3
1100#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
1101
1102#define S_TIMESTAMPSMODE 0
1103
1104#define M_TIMESTAMPSMODE 0x3
1105
1106#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
1107
1108#define A_TP_DACK_CONFIG 0x344
1109
1110#define S_AUTOSTATE3 30
1111#define M_AUTOSTATE3 0x3
1112#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
1113
1114#define S_AUTOSTATE2 28
1115#define M_AUTOSTATE2 0x3
1116#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
1117
1118#define S_AUTOSTATE1 26
1119#define M_AUTOSTATE1 0x3
1120#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
1121
1122#define S_BYTETHRESHOLD 5
1123#define M_BYTETHRESHOLD 0xfffff
1124#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
1125
1126#define S_MSSTHRESHOLD 3
1127#define M_MSSTHRESHOLD 0x3
1128#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
1129
1130#define S_AUTOCAREFUL 2
1131#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
1132#define F_AUTOCAREFUL V_AUTOCAREFUL(1U)
1133
1134#define S_AUTOENABLE 1
1135#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE)
1136#define F_AUTOENABLE V_AUTOENABLE(1U)
1137
1138#define S_DACK_MODE 0
1139#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
1140#define F_DACK_MODE V_DACK_MODE(1U)
1141
1142#define A_TP_PC_CONFIG 0x348
1143
1144#define S_TXTOSQUEUEMAPMODE 26
1145#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE)
1146#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U)
1147
1148#define S_ENABLEEPCMDAFULL 23
1149#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
1150#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U)
1151
1152#define S_MODULATEUNIONMODE 22
1153#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE)
1154#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U)
1155
1156#define S_TXDEFERENABLE 20
1157#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
1158#define F_TXDEFERENABLE V_TXDEFERENABLE(1U)
1159
1160#define S_RXCONGESTIONMODE 19
1161#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
1162#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U)
1163
1164#define S_HEARBEATDACK 16
1165#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
1166#define F_HEARBEATDACK V_HEARBEATDACK(1U)
1167
1168#define S_TXCONGESTIONMODE 15
1169#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
1170#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U)
1171
1172#define S_ENABLEOCSPIFULL 30
1173#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
1174#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U)
1175
1176#define S_LOCKTID 28
1177#define V_LOCKTID(x) ((x) << S_LOCKTID)
1178#define F_LOCKTID V_LOCKTID(1U)
1179
1180#define S_TABLELATENCYDELTA 0
1181#define M_TABLELATENCYDELTA 0xf
1182#define V_TABLELATENCYDELTA(x) ((x) << S_TABLELATENCYDELTA)
1183#define G_TABLELATENCYDELTA(x) \
1184 (((x) >> S_TABLELATENCYDELTA) & M_TABLELATENCYDELTA)
1185
1186#define A_TP_PC_CONFIG2 0x34c
1187
1188#define S_DISBLEDAPARBIT0 15
1189#define V_DISBLEDAPARBIT0(x) ((x) << S_DISBLEDAPARBIT0)
1190#define F_DISBLEDAPARBIT0 V_DISBLEDAPARBIT0(1U)
1191
1192#define S_ENABLEARPMISS 13
1193#define V_ENABLEARPMISS(x) ((x) << S_ENABLEARPMISS)
1194#define F_ENABLEARPMISS V_ENABLEARPMISS(1U)
1195
1196#define S_ENABLENONOFDTNLSYN 12
1197#define V_ENABLENONOFDTNLSYN(x) ((x) << S_ENABLENONOFDTNLSYN)
1198#define F_ENABLENONOFDTNLSYN V_ENABLENONOFDTNLSYN(1U)
1199
1200#define S_ENABLEIPV6RSS 11
1201#define V_ENABLEIPV6RSS(x) ((x) << S_ENABLEIPV6RSS)
1202#define F_ENABLEIPV6RSS V_ENABLEIPV6RSS(1U)
1203
1204#define S_CHDRAFULL 4
1205#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
1206#define F_CHDRAFULL V_CHDRAFULL(1U)
1207
1208#define A_TP_TCP_BACKOFF_REG0 0x350
1209
1210#define A_TP_TCP_BACKOFF_REG1 0x354
1211
1212#define A_TP_TCP_BACKOFF_REG2 0x358
1213
1214#define A_TP_TCP_BACKOFF_REG3 0x35c
1215
1216#define A_TP_PARA_REG2 0x368
1217
1218#define S_MAXRXDATA 16
1219#define M_MAXRXDATA 0xffff
1220#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
1221
1222#define S_RXCOALESCESIZE 0
1223#define M_RXCOALESCESIZE 0xffff
1224#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
1225
1226#define A_TP_PARA_REG3 0x36c
1227
1228#define S_TXDATAACKIDX 16
1229#define M_TXDATAACKIDX 0xf
1230
1231#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
1232
1233#define S_TXPACEAUTOSTRICT 10
1234#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
1235#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U)
1236
1237#define S_TXPACEFIXED 9
1238#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
1239#define F_TXPACEFIXED V_TXPACEFIXED(1U)
1240
1241#define S_TXPACEAUTO 8
1242#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
1243#define F_TXPACEAUTO V_TXPACEAUTO(1U)
1244
1245#define S_RXCOALESCEENABLE 1
1246#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
1247#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U)
1248
1249#define S_RXCOALESCEPSHEN 0
1250#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
1251#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U)
1252
1253#define A_TP_PARA_REG4 0x370
1254
1255#define A_TP_PARA_REG5 0x374
1256
1257#define S_RXDDPOFFINIT 3
1258#define V_RXDDPOFFINIT(x) ((x) << S_RXDDPOFFINIT)
1259#define F_RXDDPOFFINIT V_RXDDPOFFINIT(1U)
1260
1261#define A_TP_PARA_REG6 0x378
1262
1263#define S_T3A_ENABLEESND 13
1264#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND)
1265#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U)
1266
1267#define S_ENABLEESND 11
1268#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
1269#define F_ENABLEESND V_ENABLEESND(1U)
1270
1271#define A_TP_PARA_REG7 0x37c
1272
1273#define S_PMMAXXFERLEN1 16
1274#define M_PMMAXXFERLEN1 0xffff
1275#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
1276
1277#define S_PMMAXXFERLEN0 0
1278#define M_PMMAXXFERLEN0 0xffff
1279#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
1280
1281#define A_TP_TIMER_RESOLUTION 0x390
1282
1283#define S_TIMERRESOLUTION 16
1284#define M_TIMERRESOLUTION 0xff
1285#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
1286
1287#define S_TIMESTAMPRESOLUTION 8
1288#define M_TIMESTAMPRESOLUTION 0xff
1289#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
1290
1291#define S_DELAYEDACKRESOLUTION 0
1292#define M_DELAYEDACKRESOLUTION 0xff
1293#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
1294
1295#define A_TP_MSL 0x394
1296
1297#define A_TP_RXT_MIN 0x398
1298
1299#define A_TP_RXT_MAX 0x39c
1300
1301#define A_TP_PERS_MIN 0x3a0
1302
1303#define A_TP_PERS_MAX 0x3a4
1304
1305#define A_TP_KEEP_IDLE 0x3a8
1306
1307#define A_TP_KEEP_INTVL 0x3ac
1308
1309#define A_TP_INIT_SRTT 0x3b0
1310
1311#define A_TP_DACK_TIMER 0x3b4
1312
1313#define A_TP_FINWAIT2_TIMER 0x3b8
1314
1315#define A_TP_SHIFT_CNT 0x3c0
1316
1317#define S_SYNSHIFTMAX 24
1318
1319#define M_SYNSHIFTMAX 0xff
1320
1321#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
1322
1323#define S_RXTSHIFTMAXR1 20
1324
1325#define M_RXTSHIFTMAXR1 0xf
1326
1327#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
1328
1329#define S_RXTSHIFTMAXR2 16
1330
1331#define M_RXTSHIFTMAXR2 0xf
1332
1333#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
1334
1335#define S_PERSHIFTBACKOFFMAX 12
1336#define M_PERSHIFTBACKOFFMAX 0xf
1337#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
1338
1339#define S_PERSHIFTMAX 8
1340#define M_PERSHIFTMAX 0xf
1341#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
1342
1343#define S_KEEPALIVEMAX 0
1344
1345#define M_KEEPALIVEMAX 0xff
1346
1347#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX)
1348
1349#define A_TP_MTU_PORT_TABLE 0x3d0
1350
1351#define A_TP_CCTRL_TABLE 0x3dc
1352
1353#define A_TP_MTU_TABLE 0x3e4
1354
1355#define A_TP_RSS_MAP_TABLE 0x3e8
1356
1357#define A_TP_RSS_LKP_TABLE 0x3ec
1358
1359#define A_TP_RSS_CONFIG 0x3f0
1360
1361#define S_TNL4TUPEN 29
1362#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN)
1363#define F_TNL4TUPEN V_TNL4TUPEN(1U)
1364
1365#define S_TNL2TUPEN 28
1366#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN)
1367#define F_TNL2TUPEN V_TNL2TUPEN(1U)
1368
1369#define S_TNLPRTEN 26
1370#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN)
1371#define F_TNLPRTEN V_TNLPRTEN(1U)
1372
1373#define S_TNLMAPEN 25
1374#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN)
1375#define F_TNLMAPEN V_TNLMAPEN(1U)
1376
1377#define S_TNLLKPEN 24
1378#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
1379#define F_TNLLKPEN V_TNLLKPEN(1U)
1380
1381#define S_RRCPLMAPEN 7
1382#define V_RRCPLMAPEN(x) ((x) << S_RRCPLMAPEN)
1383#define F_RRCPLMAPEN V_RRCPLMAPEN(1U)
1384
1385#define S_RRCPLCPUSIZE 4
1386#define M_RRCPLCPUSIZE 0x7
1387#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
1388
1389#define S_RQFEEDBACKENABLE 3
1390#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
1391#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U)
1392
1393#define S_HASHTOEPLITZ 2
1394#define V_HASHTOEPLITZ(x) ((x) << S_HASHTOEPLITZ)
1395#define F_HASHTOEPLITZ V_HASHTOEPLITZ(1U)
1396
1397#define S_DISABLE 0
1398
1399#define A_TP_TM_PIO_ADDR 0x418
1400
1401#define A_TP_TM_PIO_DATA 0x41c
1402
1403#define A_TP_TX_MOD_QUE_TABLE 0x420
1404
1405#define A_TP_TX_RESOURCE_LIMIT 0x424
1406
1407#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428
1408
1409#define S_TX_MOD_QUEUE_REQ_MAP 0
1410#define M_TX_MOD_QUEUE_REQ_MAP 0xff
1411#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
1412
1413#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c
1414
1415#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430
1416
1417#define A_TP_MOD_CHANNEL_WEIGHT 0x434
1418
1419#define A_TP_MOD_RATE_LIMIT 0x438
1420
1421#define A_TP_PIO_ADDR 0x440
1422
1423#define A_TP_PIO_DATA 0x444
1424
1425#define A_TP_RESET 0x44c
1426
1427#define S_FLSTINITENABLE 1
1428#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE)
1429#define F_FLSTINITENABLE V_FLSTINITENABLE(1U)
1430
1431#define S_TPRESET 0
1432#define V_TPRESET(x) ((x) << S_TPRESET)
1433#define F_TPRESET V_TPRESET(1U)
1434
1435#define A_TP_CMM_MM_RX_FLST_BASE 0x460
1436
1437#define A_TP_CMM_MM_TX_FLST_BASE 0x464
1438
1439#define A_TP_CMM_MM_PS_FLST_BASE 0x468
1440
1441#define A_TP_MIB_INDEX 0x450
1442
1443#define A_TP_MIB_RDATA 0x454
1444
1445#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c
1446
1447#define A_TP_INT_ENABLE 0x470
1448
1449#define S_FLMTXFLSTEMPTY 30
1450#define V_FLMTXFLSTEMPTY(x) ((x) << S_FLMTXFLSTEMPTY)
1451#define F_FLMTXFLSTEMPTY V_FLMTXFLSTEMPTY(1U)
1452
1453#define S_FLMRXFLSTEMPTY 29
1454#define V_FLMRXFLSTEMPTY(x) ((x) << S_FLMRXFLSTEMPTY)
1455#define F_FLMRXFLSTEMPTY V_FLMRXFLSTEMPTY(1U)
1456
1457#define S_ARPLUTPERR 26
1458#define V_ARPLUTPERR(x) ((x) << S_ARPLUTPERR)
1459#define F_ARPLUTPERR V_ARPLUTPERR(1U)
1460
1461#define S_CMCACHEPERR 24
1462#define V_CMCACHEPERR(x) ((x) << S_CMCACHEPERR)
1463#define F_CMCACHEPERR V_CMCACHEPERR(1U)
1464
1465#define A_TP_INT_CAUSE 0x474
1466
1467#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
1468
1469#define A_TP_TX_DROP_CFG_CH0 0x12b
1470
1471#define A_TP_TX_DROP_MODE 0x12f
1472
1473#define A_TP_EGRESS_CONFIG 0x145
1474
1475#define S_REWRITEFORCETOSIZE 0
1476#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
1477#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U)
1478
1479#define A_TP_TX_TRC_KEY0 0x20
1480
1481#define A_TP_RX_TRC_KEY0 0x120
1482
1483#define A_TP_TX_DROP_CNT_CH0 0x12d
1484
1485#define S_TXDROPCNTCH0RCVD 0
1486#define M_TXDROPCNTCH0RCVD 0xffff
1487#define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD)
1488#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \
1489 M_TXDROPCNTCH0RCVD)
1490
1491#define A_TP_PROXY_FLOW_CNTL 0x4b0
1492
1493#define A_TP_EMBED_OP_FIELD0 0x4e8
1494#define A_TP_EMBED_OP_FIELD1 0x4ec
1495#define A_TP_EMBED_OP_FIELD2 0x4f0
1496#define A_TP_EMBED_OP_FIELD3 0x4f4
1497#define A_TP_EMBED_OP_FIELD4 0x4f8
1498#define A_TP_EMBED_OP_FIELD5 0x4fc
1499
1500#define A_ULPRX_CTL 0x500
1501
1502#define S_ROUND_ROBIN 4
1503#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN)
1504#define F_ROUND_ROBIN V_ROUND_ROBIN(1U)
1505
1506#define A_ULPRX_INT_ENABLE 0x504
1507
1508#define S_DATASELFRAMEERR0 7
1509#define V_DATASELFRAMEERR0(x) ((x) << S_DATASELFRAMEERR0)
1510#define F_DATASELFRAMEERR0 V_DATASELFRAMEERR0(1U)
1511
1512#define S_DATASELFRAMEERR1 6
1513#define V_DATASELFRAMEERR1(x) ((x) << S_DATASELFRAMEERR1)
1514#define F_DATASELFRAMEERR1 V_DATASELFRAMEERR1(1U)
1515
1516#define S_PCMDMUXPERR 5
1517#define V_PCMDMUXPERR(x) ((x) << S_PCMDMUXPERR)
1518#define F_PCMDMUXPERR V_PCMDMUXPERR(1U)
1519
1520#define S_ARBFPERR 4
1521#define V_ARBFPERR(x) ((x) << S_ARBFPERR)
1522#define F_ARBFPERR V_ARBFPERR(1U)
1523
1524#define S_ARBPF0PERR 3
1525#define V_ARBPF0PERR(x) ((x) << S_ARBPF0PERR)
1526#define F_ARBPF0PERR V_ARBPF0PERR(1U)
1527
1528#define S_ARBPF1PERR 2
1529#define V_ARBPF1PERR(x) ((x) << S_ARBPF1PERR)
1530#define F_ARBPF1PERR V_ARBPF1PERR(1U)
1531
1532#define S_PARERRPCMD 1
1533#define V_PARERRPCMD(x) ((x) << S_PARERRPCMD)
1534#define F_PARERRPCMD V_PARERRPCMD(1U)
1535
1536#define S_PARERRDATA 0
1537#define V_PARERRDATA(x) ((x) << S_PARERRDATA)
1538#define F_PARERRDATA V_PARERRDATA(1U)
1539
1540#define A_ULPRX_INT_CAUSE 0x508
1541
1542#define A_ULPRX_ISCSI_LLIMIT 0x50c
1543
1544#define A_ULPRX_ISCSI_ULIMIT 0x510
1545
1546#define A_ULPRX_ISCSI_TAGMASK 0x514
1547
1548#define A_ULPRX_ISCSI_PSZ 0x518
1549
1550#define A_ULPRX_TDDP_LLIMIT 0x51c
1551
1552#define A_ULPRX_TDDP_ULIMIT 0x520
1553#define A_ULPRX_TDDP_PSZ 0x528
1554
1555#define S_HPZ0 0
1556#define M_HPZ0 0xf
1557#define V_HPZ0(x) ((x) << S_HPZ0)
1558#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
1559
1560#define A_ULPRX_STAG_LLIMIT 0x52c
1561
1562#define A_ULPRX_STAG_ULIMIT 0x530
1563
1564#define A_ULPRX_RQ_LLIMIT 0x534
1565#define A_ULPRX_RQ_LLIMIT 0x534
1566
1567#define A_ULPRX_RQ_ULIMIT 0x538
1568#define A_ULPRX_RQ_ULIMIT 0x538
1569
1570#define A_ULPRX_PBL_LLIMIT 0x53c
1571
1572#define A_ULPRX_PBL_ULIMIT 0x540
1573#define A_ULPRX_PBL_ULIMIT 0x540
1574
1575#define A_ULPRX_TDDP_TAGMASK 0x524
1576
1577#define A_ULPRX_RQ_LLIMIT 0x534
1578#define A_ULPRX_RQ_LLIMIT 0x534
1579
1580#define A_ULPRX_RQ_ULIMIT 0x538
1581#define A_ULPRX_RQ_ULIMIT 0x538
1582
1583#define A_ULPRX_PBL_ULIMIT 0x540
1584#define A_ULPRX_PBL_ULIMIT 0x540
1585
1586#define A_ULPTX_CONFIG 0x580
1587
1588#define S_CFG_CQE_SOP_MASK 1
1589#define V_CFG_CQE_SOP_MASK(x) ((x) << S_CFG_CQE_SOP_MASK)
1590#define F_CFG_CQE_SOP_MASK V_CFG_CQE_SOP_MASK(1U)
1591
1592#define S_CFG_RR_ARB 0
1593#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB)
1594#define F_CFG_RR_ARB V_CFG_RR_ARB(1U)
1595
1596#define A_ULPTX_INT_ENABLE 0x584
1597
1598#define S_PBL_BOUND_ERR_CH1 1
1599#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
1600#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U)
1601
1602#define S_PBL_BOUND_ERR_CH0 0
1603#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0)
1604#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U)
1605
1606#define A_ULPTX_INT_CAUSE 0x588
1607
1608#define A_ULPTX_TPT_LLIMIT 0x58c
1609
1610#define A_ULPTX_TPT_ULIMIT 0x590
1611
1612#define A_ULPTX_PBL_LLIMIT 0x594
1613
1614#define A_ULPTX_PBL_ULIMIT 0x598
1615
1616#define A_ULPTX_DMA_WEIGHT 0x5ac
1617
1618#define S_D1_WEIGHT 16
1619#define M_D1_WEIGHT 0xffff
1620#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT)
1621
1622#define S_D0_WEIGHT 0
1623#define M_D0_WEIGHT 0xffff
1624#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
1625
1626#define A_PM1_RX_CFG 0x5c0
1627#define A_PM1_RX_MODE 0x5c4
1628
1629#define A_PM1_RX_INT_ENABLE 0x5d8
1630
1631#define S_ZERO_E_CMD_ERROR 18
1632#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR)
1633#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U)
1634
1635#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 17
1636#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR)
1637#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U)
1638
1639#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 16
1640#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR)
1641#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U)
1642
1643#define S_IESPI0_RX_FRAMING_ERROR 15
1644#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR)
1645#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U)
1646
1647#define S_IESPI1_RX_FRAMING_ERROR 14
1648#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR)
1649#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U)
1650
1651#define S_IESPI0_TX_FRAMING_ERROR 13
1652#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR)
1653#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U)
1654
1655#define S_IESPI1_TX_FRAMING_ERROR 12
1656#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR)
1657#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U)
1658
1659#define S_OCSPI0_RX_FRAMING_ERROR 11
1660#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR)
1661#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U)
1662
1663#define S_OCSPI1_RX_FRAMING_ERROR 10
1664#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR)
1665#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U)
1666
1667#define S_OCSPI0_TX_FRAMING_ERROR 9
1668#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR)
1669#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U)
1670
1671#define S_OCSPI1_TX_FRAMING_ERROR 8
1672#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR)
1673#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U)
1674
1675#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 7
1676#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR)
1677#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
1678
1679#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 6
1680#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1681#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
1682
1683#define S_IESPI_PAR_ERROR 3
1684#define M_IESPI_PAR_ERROR 0x7
1685
1686#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
1687
1688#define S_OCSPI_PAR_ERROR 0
1689#define M_OCSPI_PAR_ERROR 0x7
1690
1691#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
1692
1693#define A_PM1_RX_INT_CAUSE 0x5dc
1694
1695#define A_PM1_TX_CFG 0x5e0
1696#define A_PM1_TX_MODE 0x5e4
1697
1698#define A_PM1_TX_INT_ENABLE 0x5f8
1699
1700#define S_ZERO_C_CMD_ERROR 18
1701#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
1702#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U)
1703
1704#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 17
1705#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
1706#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
1707
1708#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 16
1709#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
1710#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
1711
1712#define S_ICSPI0_RX_FRAMING_ERROR 15
1713#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR)
1714#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U)
1715
1716#define S_ICSPI1_RX_FRAMING_ERROR 14
1717#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR)
1718#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U)
1719
1720#define S_ICSPI0_TX_FRAMING_ERROR 13
1721#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR)
1722#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U)
1723
1724#define S_ICSPI1_TX_FRAMING_ERROR 12
1725#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR)
1726#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U)
1727
1728#define S_OESPI0_RX_FRAMING_ERROR 11
1729#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR)
1730#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U)
1731
1732#define S_OESPI1_RX_FRAMING_ERROR 10
1733#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR)
1734#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U)
1735
1736#define S_OESPI0_TX_FRAMING_ERROR 9
1737#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR)
1738#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U)
1739
1740#define S_OESPI1_TX_FRAMING_ERROR 8
1741#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR)
1742#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U)
1743
1744#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7
1745#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
1746#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
1747
1748#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6
1749#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1750#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
1751
1752#define S_ICSPI_PAR_ERROR 3
1753#define M_ICSPI_PAR_ERROR 0x7
1754
1755#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
1756
1757#define S_OESPI_PAR_ERROR 0
1758#define M_OESPI_PAR_ERROR 0x7
1759
1760#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
1761
1762#define A_PM1_TX_INT_CAUSE 0x5fc
1763
1764#define A_MPS_CFG 0x600
1765
1766#define S_TPRXPORTEN 4
1767#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN)
1768#define F_TPRXPORTEN V_TPRXPORTEN(1U)
1769
1770#define S_TPTXPORT1EN 3
1771#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN)
1772#define F_TPTXPORT1EN V_TPTXPORT1EN(1U)
1773
1774#define S_TPTXPORT0EN 2
1775#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN)
1776#define F_TPTXPORT0EN V_TPTXPORT0EN(1U)
1777
1778#define S_PORT1ACTIVE 1
1779#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE)
1780#define F_PORT1ACTIVE V_PORT1ACTIVE(1U)
1781
1782#define S_PORT0ACTIVE 0
1783#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE)
1784#define F_PORT0ACTIVE V_PORT0ACTIVE(1U)
1785
1786#define S_ENFORCEPKT 11
1787#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
1788#define F_ENFORCEPKT V_ENFORCEPKT(1U)
1789
1790#define A_MPS_INT_ENABLE 0x61c
1791
1792#define S_MCAPARERRENB 6
1793#define M_MCAPARERRENB 0x7
1794
1795#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB)
1796
1797#define S_RXTPPARERRENB 4
1798#define M_RXTPPARERRENB 0x3
1799
1800#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB)
1801
1802#define S_TX1TPPARERRENB 2
1803#define M_TX1TPPARERRENB 0x3
1804
1805#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB)
1806
1807#define S_TX0TPPARERRENB 0
1808#define M_TX0TPPARERRENB 0x3
1809
1810#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB)
1811
1812#define A_MPS_INT_CAUSE 0x620
1813
1814#define S_MCAPARERR 6
1815#define M_MCAPARERR 0x7
1816
1817#define V_MCAPARERR(x) ((x) << S_MCAPARERR)
1818
1819#define S_RXTPPARERR 4
1820#define M_RXTPPARERR 0x3
1821
1822#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR)
1823
1824#define S_TX1TPPARERR 2
1825#define M_TX1TPPARERR 0x3
1826
1827#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR)
1828
1829#define S_TX0TPPARERR 0
1830#define M_TX0TPPARERR 0x3
1831
1832#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR)
1833
1834#define A_CPL_SWITCH_CNTRL 0x640
1835
1836#define A_CPL_INTR_ENABLE 0x650
1837
1838#define S_CIM_OP_MAP_PERR 5
1839#define V_CIM_OP_MAP_PERR(x) ((x) << S_CIM_OP_MAP_PERR)
1840#define F_CIM_OP_MAP_PERR V_CIM_OP_MAP_PERR(1U)
1841
1842#define S_CIM_OVFL_ERROR 4
1843#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
1844#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U)
1845
1846#define S_TP_FRAMING_ERROR 3
1847#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR)
1848#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U)
1849
1850#define S_SGE_FRAMING_ERROR 2
1851#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR)
1852#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U)
1853
1854#define S_CIM_FRAMING_ERROR 1
1855#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR)
1856#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U)
1857
1858#define S_ZERO_SWITCH_ERROR 0
1859#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
1860#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U)
1861
1862#define A_CPL_INTR_CAUSE 0x654
1863
1864#define A_CPL_MAP_TBL_DATA 0x65c
1865
1866#define A_SMB_GLOBAL_TIME_CFG 0x660
1867
1868#define A_I2C_CFG 0x6a0
1869
1870#define S_I2C_CLKDIV 0
1871#define M_I2C_CLKDIV 0xfff
1872#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
1873
1874#define A_MI1_CFG 0x6b0
1875
1876#define S_CLKDIV 5
1877#define M_CLKDIV 0xff
1878#define V_CLKDIV(x) ((x) << S_CLKDIV)
1879
1880#define S_ST 3
1881
1882#define M_ST 0x3
1883
1884#define V_ST(x) ((x) << S_ST)
1885
1886#define G_ST(x) (((x) >> S_ST) & M_ST)
1887
1888#define S_PREEN 2
1889#define V_PREEN(x) ((x) << S_PREEN)
1890#define F_PREEN V_PREEN(1U)
1891
1892#define S_MDIINV 1
1893#define V_MDIINV(x) ((x) << S_MDIINV)
1894#define F_MDIINV V_MDIINV(1U)
1895
1896#define S_MDIEN 0
1897#define V_MDIEN(x) ((x) << S_MDIEN)
1898#define F_MDIEN V_MDIEN(1U)
1899
1900#define A_MI1_ADDR 0x6b4
1901
1902#define S_PHYADDR 5
1903#define M_PHYADDR 0x1f
1904#define V_PHYADDR(x) ((x) << S_PHYADDR)
1905
1906#define S_REGADDR 0
1907#define M_REGADDR 0x1f
1908#define V_REGADDR(x) ((x) << S_REGADDR)
1909
1910#define A_MI1_DATA 0x6b8
1911
1912#define A_MI1_OP 0x6bc
1913
1914#define S_MDI_OP 0
1915#define M_MDI_OP 0x3
1916#define V_MDI_OP(x) ((x) << S_MDI_OP)
1917
1918#define A_SF_DATA 0x6d8
1919
1920#define A_SF_OP 0x6dc
1921
1922#define S_BYTECNT 1
1923#define M_BYTECNT 0x3
1924#define V_BYTECNT(x) ((x) << S_BYTECNT)
1925
1926#define A_PL_INT_ENABLE0 0x6e0
1927
1928#define S_T3DBG 23
1929#define V_T3DBG(x) ((x) << S_T3DBG)
1930#define F_T3DBG V_T3DBG(1U)
1931
1932#define S_XGMAC0_1 20
1933#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1)
1934#define F_XGMAC0_1 V_XGMAC0_1(1U)
1935
1936#define S_XGMAC0_0 19
1937#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0)
1938#define F_XGMAC0_0 V_XGMAC0_0(1U)
1939
1940#define S_MC5A 18
1941#define V_MC5A(x) ((x) << S_MC5A)
1942#define F_MC5A V_MC5A(1U)
1943
1944#define S_CPL_SWITCH 12
1945#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
1946#define F_CPL_SWITCH V_CPL_SWITCH(1U)
1947
1948#define S_MPS0 11
1949#define V_MPS0(x) ((x) << S_MPS0)
1950#define F_MPS0 V_MPS0(1U)
1951
1952#define S_PM1_TX 10
1953#define V_PM1_TX(x) ((x) << S_PM1_TX)
1954#define F_PM1_TX V_PM1_TX(1U)
1955
1956#define S_PM1_RX 9
1957#define V_PM1_RX(x) ((x) << S_PM1_RX)
1958#define F_PM1_RX V_PM1_RX(1U)
1959
1960#define S_ULP2_TX 8
1961#define V_ULP2_TX(x) ((x) << S_ULP2_TX)
1962#define F_ULP2_TX V_ULP2_TX(1U)
1963
1964#define S_ULP2_RX 7
1965#define V_ULP2_RX(x) ((x) << S_ULP2_RX)
1966#define F_ULP2_RX V_ULP2_RX(1U)
1967
1968#define S_TP1 6
1969#define V_TP1(x) ((x) << S_TP1)
1970#define F_TP1 V_TP1(1U)
1971
1972#define S_CIM 5
1973#define V_CIM(x) ((x) << S_CIM)
1974#define F_CIM V_CIM(1U)
1975
1976#define S_MC7_CM 4
1977#define V_MC7_CM(x) ((x) << S_MC7_CM)
1978#define F_MC7_CM V_MC7_CM(1U)
1979
1980#define S_MC7_PMTX 3
1981#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX)
1982#define F_MC7_PMTX V_MC7_PMTX(1U)
1983
1984#define S_MC7_PMRX 2
1985#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX)
1986#define F_MC7_PMRX V_MC7_PMRX(1U)
1987
1988#define S_PCIM0 1
1989#define V_PCIM0(x) ((x) << S_PCIM0)
1990#define F_PCIM0 V_PCIM0(1U)
1991
1992#define S_SGE3 0
1993#define V_SGE3(x) ((x) << S_SGE3)
1994#define F_SGE3 V_SGE3(1U)
1995
1996#define A_PL_INT_CAUSE0 0x6e4
1997
1998#define A_PL_RST 0x6f0
1999
2000#define S_FATALPERREN 4
2001#define V_FATALPERREN(x) ((x) << S_FATALPERREN)
2002#define F_FATALPERREN V_FATALPERREN(1U)
2003
2004#define S_CRSTWRM 1
2005#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
2006#define F_CRSTWRM V_CRSTWRM(1U)
2007
2008#define A_PL_REV 0x6f4
2009
2010#define A_PL_CLI 0x6f8
2011
2012#define A_MC5_DB_CONFIG 0x704
2013
2014#define S_TMTYPEHI 30
2015#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI)
2016#define F_TMTYPEHI V_TMTYPEHI(1U)
2017
2018#define S_TMPARTSIZE 28
2019#define M_TMPARTSIZE 0x3
2020#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE)
2021#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE)
2022
2023#define S_TMTYPE 26
2024#define M_TMTYPE 0x3
2025#define V_TMTYPE(x) ((x) << S_TMTYPE)
2026#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE)
2027
2028#define S_COMPEN 17
2029#define V_COMPEN(x) ((x) << S_COMPEN)
2030#define F_COMPEN V_COMPEN(1U)
2031
2032#define S_PRTYEN 6
2033#define V_PRTYEN(x) ((x) << S_PRTYEN)
2034#define F_PRTYEN V_PRTYEN(1U)
2035
2036#define S_MBUSEN 5
2037#define V_MBUSEN(x) ((x) << S_MBUSEN)
2038#define F_MBUSEN V_MBUSEN(1U)
2039
2040#define S_DBGIEN 4
2041#define V_DBGIEN(x) ((x) << S_DBGIEN)
2042#define F_DBGIEN V_DBGIEN(1U)
2043
2044#define S_TMRDY 2
2045#define V_TMRDY(x) ((x) << S_TMRDY)
2046#define F_TMRDY V_TMRDY(1U)
2047
2048#define S_TMRST 1
2049#define V_TMRST(x) ((x) << S_TMRST)
2050#define F_TMRST V_TMRST(1U)
2051
2052#define S_TMMODE 0
2053#define V_TMMODE(x) ((x) << S_TMMODE)
2054#define F_TMMODE V_TMMODE(1U)
2055
2056#define F_TMMODE V_TMMODE(1U)
2057
2058#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
2059
2060#define A_MC5_DB_FILTER_TABLE 0x710
2061
2062#define A_MC5_DB_SERVER_INDEX 0x714
2063
2064#define A_MC5_DB_RSP_LATENCY 0x720
2065
2066#define S_RDLAT 16
2067#define M_RDLAT 0x1f
2068#define V_RDLAT(x) ((x) << S_RDLAT)
2069
2070#define S_LRNLAT 8
2071#define M_LRNLAT 0x1f
2072#define V_LRNLAT(x) ((x) << S_LRNLAT)
2073
2074#define S_SRCHLAT 0
2075#define M_SRCHLAT 0x1f
2076#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
2077
2078#define A_MC5_DB_PART_ID_INDEX 0x72c
2079
2080#define A_MC5_DB_INT_ENABLE 0x740
2081
2082#define S_DELACTEMPTY 18
2083#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY)
2084#define F_DELACTEMPTY V_DELACTEMPTY(1U)
2085
2086#define S_DISPQPARERR 17
2087#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR)
2088#define F_DISPQPARERR V_DISPQPARERR(1U)
2089
2090#define S_REQQPARERR 16
2091#define V_REQQPARERR(x) ((x) << S_REQQPARERR)
2092#define F_REQQPARERR V_REQQPARERR(1U)
2093
2094#define S_UNKNOWNCMD 15
2095#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
2096#define F_UNKNOWNCMD V_UNKNOWNCMD(1U)
2097
2098#define S_NFASRCHFAIL 8
2099#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
2100#define F_NFASRCHFAIL V_NFASRCHFAIL(1U)
2101
2102#define S_ACTRGNFULL 7
2103#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL)
2104#define F_ACTRGNFULL V_ACTRGNFULL(1U)
2105
2106#define S_PARITYERR 6
2107#define V_PARITYERR(x) ((x) << S_PARITYERR)
2108#define F_PARITYERR V_PARITYERR(1U)
2109
2110#define A_MC5_DB_INT_CAUSE 0x744
2111
2112#define A_MC5_DB_DBGI_CONFIG 0x774
2113
2114#define A_MC5_DB_DBGI_REQ_CMD 0x778
2115
2116#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
2117
2118#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
2119
2120#define A_MC5_DB_DBGI_REQ_ADDR2 0x784
2121
2122#define A_MC5_DB_DBGI_REQ_DATA0 0x788
2123
2124#define A_MC5_DB_DBGI_REQ_DATA1 0x78c
2125
2126#define A_MC5_DB_DBGI_REQ_DATA2 0x790
2127
2128#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0
2129
2130#define S_DBGIRSPVALID 0
2131#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
2132#define F_DBGIRSPVALID V_DBGIRSPVALID(1U)
2133
2134#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4
2135
2136#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8
2137
2138#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc
2139
2140#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc
2141
2142#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0
2143
2144#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4
2145
2146#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8
2147
2148#define A_MC5_DB_SYN_SRCH_CMD 0x7dc
2149
2150#define A_MC5_DB_SYN_LRN_CMD 0x7e0
2151
2152#define A_MC5_DB_ACK_SRCH_CMD 0x7e4
2153
2154#define A_MC5_DB_ACK_LRN_CMD 0x7e8
2155
2156#define A_MC5_DB_ILOOKUP_CMD 0x7ec
2157
2158#define A_MC5_DB_ELOOKUP_CMD 0x7f0
2159
2160#define A_MC5_DB_DATA_WRITE_CMD 0x7f4
2161
2162#define A_MC5_DB_DATA_READ_CMD 0x7f8
2163
2164#define XGMAC0_0_BASE_ADDR 0x800
2165
2166#define A_XGM_TX_CTRL 0x800
2167
2168#define S_TXEN 0
2169#define V_TXEN(x) ((x) << S_TXEN)
2170#define F_TXEN V_TXEN(1U)
2171
2172#define A_XGM_TX_CFG 0x804
2173
2174#define S_TXPAUSEEN 0
2175#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
2176#define F_TXPAUSEEN V_TXPAUSEEN(1U)
2177
2178#define A_XGM_TX_PAUSE_QUANTA 0x808
2179
2180#define A_XGM_RX_CTRL 0x80c
2181
2182#define S_RXEN 0
2183#define V_RXEN(x) ((x) << S_RXEN)
2184#define F_RXEN V_RXEN(1U)
2185
2186#define A_XGM_RX_CFG 0x810
2187
2188#define S_DISPAUSEFRAMES 9
2189#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
2190#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U)
2191
2192#define S_EN1536BFRAMES 8
2193#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES)
2194#define F_EN1536BFRAMES V_EN1536BFRAMES(1U)
2195
2196#define S_ENJUMBO 7
2197#define V_ENJUMBO(x) ((x) << S_ENJUMBO)
2198#define F_ENJUMBO V_ENJUMBO(1U)
2199
2200#define S_RMFCS 6
2201#define V_RMFCS(x) ((x) << S_RMFCS)
2202#define F_RMFCS V_RMFCS(1U)
2203
2204#define S_ENHASHMCAST 2
2205#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
2206#define F_ENHASHMCAST V_ENHASHMCAST(1U)
2207
2208#define S_COPYALLFRAMES 0
2209#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
2210#define F_COPYALLFRAMES V_COPYALLFRAMES(1U)
2211
2212#define S_DISBCAST 1
2213#define V_DISBCAST(x) ((x) << S_DISBCAST)
2214#define F_DISBCAST V_DISBCAST(1U)
2215
2216#define A_XGM_RX_HASH_LOW 0x814
2217
2218#define A_XGM_RX_HASH_HIGH 0x818
2219
2220#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c
2221
2222#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820
2223
2224#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
2225
2226#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c
2227
2228#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834
2229
2230#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c
2231
2232#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844
2233
2234#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c
2235
2236#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854
2237
2238#define A_XGM_INT_STATUS 0x86c
2239
2240#define S_LINKFAULTCHANGE 9
2241#define V_LINKFAULTCHANGE(x) ((x) << S_LINKFAULTCHANGE)
2242#define F_LINKFAULTCHANGE V_LINKFAULTCHANGE(1U)
2243
2244#define A_XGM_XGM_INT_ENABLE 0x874
2245#define A_XGM_XGM_INT_DISABLE 0x878
2246
2247#define A_XGM_STAT_CTRL 0x880
2248
2249#define S_CLRSTATS 2
2250#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
2251#define F_CLRSTATS V_CLRSTATS(1U)
2252
2253#define A_XGM_RXFIFO_CFG 0x884
2254
2255#define S_RXFIFO_EMPTY 31
2256#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY)
2257#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U)
2258
2259#define S_RXFIFOPAUSEHWM 17
2260#define M_RXFIFOPAUSEHWM 0xfff
2261
2262#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
2263
2264#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
2265
2266#define S_RXFIFOPAUSELWM 5
2267#define M_RXFIFOPAUSELWM 0xfff
2268
2269#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
2270
2271#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
2272
2273#define S_RXSTRFRWRD 1
2274#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
2275#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
2276
2277#define S_DISERRFRAMES 0
2278#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
2279#define F_DISERRFRAMES V_DISERRFRAMES(1U)
2280
2281#define A_XGM_TXFIFO_CFG 0x888
2282
2283#define S_UNDERUNFIX 22
2284#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX)
2285#define F_UNDERUNFIX V_UNDERUNFIX(1U)
2286
2287#define S_TXIPG 13
2288#define M_TXIPG 0xff
2289#define V_TXIPG(x) ((x) << S_TXIPG)
2290#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG)
2291
2292#define S_TXFIFOTHRESH 4
2293#define M_TXFIFOTHRESH 0x1ff
2294
2295#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
2296
2297#define S_ENDROPPKT 21
2298#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT)
2299#define F_ENDROPPKT V_ENDROPPKT(1U)
2300
2301#define A_XGM_SERDES_CTRL 0x890
2302#define A_XGM_SERDES_CTRL0 0x8e0
2303
2304#define S_SERDESRESET_ 24
2305#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_)
2306#define F_SERDESRESET_ V_SERDESRESET_(1U)
2307
2308#define S_RXENABLE 4
2309#define V_RXENABLE(x) ((x) << S_RXENABLE)
2310#define F_RXENABLE V_RXENABLE(1U)
2311
2312#define S_TXENABLE 3
2313#define V_TXENABLE(x) ((x) << S_TXENABLE)
2314#define F_TXENABLE V_TXENABLE(1U)
2315
2316#define A_XGM_PAUSE_TIMER 0x890
2317
2318#define A_XGM_RGMII_IMP 0x89c
2319
2320#define S_XGM_IMPSETUPDATE 6
2321#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE)
2322#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U)
2323
2324#define S_RGMIIIMPPD 3
2325#define M_RGMIIIMPPD 0x7
2326#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
2327
2328#define S_RGMIIIMPPU 0
2329#define M_RGMIIIMPPU 0x7
2330#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
2331
2332#define S_CALRESET 8
2333#define V_CALRESET(x) ((x) << S_CALRESET)
2334#define F_CALRESET V_CALRESET(1U)
2335
2336#define S_CALUPDATE 7
2337#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
2338#define F_CALUPDATE V_CALUPDATE(1U)
2339
2340#define A_XGM_XAUI_IMP 0x8a0
2341
2342#define S_CALBUSY 31
2343#define V_CALBUSY(x) ((x) << S_CALBUSY)
2344#define F_CALBUSY V_CALBUSY(1U)
2345
2346#define S_XGM_CALFAULT 29
2347#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT)
2348#define F_XGM_CALFAULT V_XGM_CALFAULT(1U)
2349
2350#define S_CALIMP 24
2351#define M_CALIMP 0x1f
2352#define V_CALIMP(x) ((x) << S_CALIMP)
2353#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP)
2354
2355#define S_XAUIIMP 0
2356#define M_XAUIIMP 0x7
2357#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
2358
2359#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
2360
2361#define S_RXMAXFRAMERSIZE 17
2362#define M_RXMAXFRAMERSIZE 0x3fff
2363#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE)
2364#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE)
2365
2366#define S_RXENFRAMER 14
2367#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER)
2368#define F_RXENFRAMER V_RXENFRAMER(1U)
2369
2370#define S_RXMAXPKTSIZE 0
2371#define M_RXMAXPKTSIZE 0x3fff
2372#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE)
2373#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE)
2374
2375#define A_XGM_RESET_CTRL 0x8ac
2376
2377#define S_XGMAC_STOP_EN 4
2378#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN)
2379#define F_XGMAC_STOP_EN V_XGMAC_STOP_EN(1U)
2380
2381#define S_XG2G_RESET_ 3
2382#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
2383#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
2384
2385#define S_RGMII_RESET_ 2
2386#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
2387#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
2388
2389#define S_PCS_RESET_ 1
2390#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
2391#define F_PCS_RESET_ V_PCS_RESET_(1U)
2392
2393#define S_MAC_RESET_ 0
2394#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
2395#define F_MAC_RESET_ V_MAC_RESET_(1U)
2396
2397#define A_XGM_PORT_CFG 0x8b8
2398
2399#define S_CLKDIVRESET_ 3
2400#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_)
2401#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U)
2402
2403#define S_PORTSPEED 1
2404#define M_PORTSPEED 0x3
2405
2406#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
2407
2408#define S_ENRGMII 0
2409#define V_ENRGMII(x) ((x) << S_ENRGMII)
2410#define F_ENRGMII V_ENRGMII(1U)
2411
2412#define A_XGM_INT_ENABLE 0x8d4
2413
2414#define S_TXFIFO_PRTY_ERR 17
2415#define M_TXFIFO_PRTY_ERR 0x7
2416
2417#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
2418
2419#define S_RXFIFO_PRTY_ERR 14
2420#define M_RXFIFO_PRTY_ERR 0x7
2421
2422#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
2423
2424#define S_TXFIFO_UNDERRUN 13
2425#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
2426#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
2427
2428#define S_RXFIFO_OVERFLOW 12
2429#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
2430#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
2431
2432#define S_SERDES_LOS 4
2433#define M_SERDES_LOS 0xf
2434
2435#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
2436
2437#define S_XAUIPCSCTCERR 3
2438#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
2439#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
2440
2441#define S_XAUIPCSALIGNCHANGE 2
2442#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
2443#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
2444
2445#define S_XGM_INT 0
2446#define V_XGM_INT(x) ((x) << S_XGM_INT)
2447#define F_XGM_INT V_XGM_INT(1U)
2448
2449#define A_XGM_INT_CAUSE 0x8d8
2450
2451#define A_XGM_XAUI_ACT_CTRL 0x8dc
2452
2453#define S_TXACTENABLE 1
2454#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
2455#define F_TXACTENABLE V_TXACTENABLE(1U)
2456
2457#define A_XGM_SERDES_CTRL0 0x8e0
2458
2459#define S_RESET3 23
2460#define V_RESET3(x) ((x) << S_RESET3)
2461#define F_RESET3 V_RESET3(1U)
2462
2463#define S_RESET2 22
2464#define V_RESET2(x) ((x) << S_RESET2)
2465#define F_RESET2 V_RESET2(1U)
2466
2467#define S_RESET1 21
2468#define V_RESET1(x) ((x) << S_RESET1)
2469#define F_RESET1 V_RESET1(1U)
2470
2471#define S_RESET0 20
2472#define V_RESET0(x) ((x) << S_RESET0)
2473#define F_RESET0 V_RESET0(1U)
2474
2475#define S_PWRDN3 19
2476#define V_PWRDN3(x) ((x) << S_PWRDN3)
2477#define F_PWRDN3 V_PWRDN3(1U)
2478
2479#define S_PWRDN2 18
2480#define V_PWRDN2(x) ((x) << S_PWRDN2)
2481#define F_PWRDN2 V_PWRDN2(1U)
2482
2483#define S_PWRDN1 17
2484#define V_PWRDN1(x) ((x) << S_PWRDN1)
2485#define F_PWRDN1 V_PWRDN1(1U)
2486
2487#define S_PWRDN0 16
2488#define V_PWRDN0(x) ((x) << S_PWRDN0)
2489#define F_PWRDN0 V_PWRDN0(1U)
2490
2491#define S_RESETPLL23 15
2492#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
2493#define F_RESETPLL23 V_RESETPLL23(1U)
2494
2495#define S_RESETPLL01 14
2496#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
2497#define F_RESETPLL01 V_RESETPLL01(1U)
2498
2499#define A_XGM_SERDES_STAT0 0x8f0
2500#define A_XGM_SERDES_STAT1 0x8f4
2501#define A_XGM_SERDES_STAT2 0x8f8
2502
2503#define S_LOWSIG0 0
2504#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
2505#define F_LOWSIG0 V_LOWSIG0(1U)
2506
2507#define A_XGM_SERDES_STAT3 0x8fc
2508
2509#define A_XGM_STAT_TX_BYTE_LOW 0x900
2510
2511#define A_XGM_STAT_TX_BYTE_HIGH 0x904
2512
2513#define A_XGM_STAT_TX_FRAME_LOW 0x908
2514
2515#define A_XGM_STAT_TX_FRAME_HIGH 0x90c
2516
2517#define A_XGM_STAT_TX_BCAST 0x910
2518
2519#define A_XGM_STAT_TX_MCAST 0x914
2520
2521#define A_XGM_STAT_TX_PAUSE 0x918
2522
2523#define A_XGM_STAT_TX_64B_FRAMES 0x91c
2524
2525#define A_XGM_STAT_TX_65_127B_FRAMES 0x920
2526
2527#define A_XGM_STAT_TX_128_255B_FRAMES 0x924
2528
2529#define A_XGM_STAT_TX_256_511B_FRAMES 0x928
2530
2531#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c
2532
2533#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930
2534
2535#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934
2536
2537#define A_XGM_STAT_TX_ERR_FRAMES 0x938
2538
2539#define A_XGM_STAT_RX_BYTES_LOW 0x93c
2540
2541#define A_XGM_STAT_RX_BYTES_HIGH 0x940
2542
2543#define A_XGM_STAT_RX_FRAMES_LOW 0x944
2544
2545#define A_XGM_STAT_RX_FRAMES_HIGH 0x948
2546
2547#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
2548
2549#define A_XGM_STAT_RX_MCAST_FRAMES 0x950
2550
2551#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954
2552
2553#define A_XGM_STAT_RX_64B_FRAMES 0x958
2554
2555#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c
2556
2557#define A_XGM_STAT_RX_128_255B_FRAMES 0x960
2558
2559#define A_XGM_STAT_RX_256_511B_FRAMES 0x964
2560
2561#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968
2562
2563#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c
2564
2565#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970
2566
2567#define A_XGM_STAT_RX_SHORT_FRAMES 0x974
2568
2569#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978
2570
2571#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c
2572
2573#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980
2574
2575#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984
2576
2577#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988
2578
2579#define A_XGM_SERDES_STATUS0 0x98c
2580
2581#define A_XGM_SERDES_STATUS1 0x990
2582
2583#define S_CMULOCK 31
2584#define V_CMULOCK(x) ((x) << S_CMULOCK)
2585#define F_CMULOCK V_CMULOCK(1U)
2586
2587#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
2588
2589#define A_XGM_TX_SPI4_SOP_EOP_CNT 0x9a8
2590
2591#define S_TXSPI4SOPCNT 16
2592#define M_TXSPI4SOPCNT 0xffff
2593#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT)
2594#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT)
2595
2596#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
2597
2598#define XGMAC0_1_BASE_ADDR 0xa00
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
new file mode 100644
index 00000000000..d6fa1777a34
--- /dev/null
+++ b/drivers/net/cxgb3/sge.c
@@ -0,0 +1,3303 @@
1/*
2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
40#include <linux/prefetch.h>
41#include <net/arp.h>
42#include "common.h"
43#include "regs.h"
44#include "sge_defs.h"
45#include "t3_cpl.h"
46#include "firmware_exports.h"
47#include "cxgb3_offload.h"
48
49#define USE_GTS 0
50
51#define SGE_RX_SM_BUF_SIZE 1536
52
53#define SGE_RX_COPY_THRES 256
54#define SGE_RX_PULL_LEN 128
55
56#define SGE_PG_RSVD SMP_CACHE_BYTES
57/*
58 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
60 * directly.
61 */
62#define FL0_PG_CHUNK_SIZE 2048
63#define FL0_PG_ORDER 0
64#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
65#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
66#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
67#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
68
69#define SGE_RX_DROP_THRES 16
70#define RX_RECLAIM_PERIOD (HZ/4)
71
72/*
73 * Max number of Rx buffers we replenish at a time.
74 */
75#define MAX_RX_REFILL 16U
76/*
77 * Period of the Tx buffer reclaim timer. This timer does not need to run
78 * frequently as Tx buffers are usually reclaimed by new Tx packets.
79 */
80#define TX_RECLAIM_PERIOD (HZ / 4)
81#define TX_RECLAIM_TIMER_CHUNK 64U
82#define TX_RECLAIM_CHUNK 16U
83
84/* WR size in bytes */
85#define WR_LEN (WR_FLITS * 8)
86
87/*
88 * Types of Tx queues in each queue set. Order here matters, do not change.
89 */
90enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
91
92/* Values for sge_txq.flags */
93enum {
94 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
95 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
96};
97
98struct tx_desc {
99 __be64 flit[TX_DESC_FLITS];
100};
101
102struct rx_desc {
103 __be32 addr_lo;
104 __be32 len_gen;
105 __be32 gen2;
106 __be32 addr_hi;
107};
108
109struct tx_sw_desc { /* SW state per Tx descriptor */
110 struct sk_buff *skb;
111 u8 eop; /* set if last descriptor for packet */
112 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
113 u8 fragidx; /* first page fragment associated with descriptor */
114 s8 sflit; /* start flit of first SGL entry in descriptor */
115};
116
117struct rx_sw_desc { /* SW state per Rx descriptor */
118 union {
119 struct sk_buff *skb;
120 struct fl_pg_chunk pg_chunk;
121 };
122 DEFINE_DMA_UNMAP_ADDR(dma_addr);
123};
124
125struct rsp_desc { /* response queue descriptor */
126 struct rss_header rss_hdr;
127 __be32 flags;
128 __be32 len_cq;
129 u8 imm_data[47];
130 u8 intr_gen;
131};
132
133/*
134 * Holds unmapping information for Tx packets that need deferred unmapping.
135 * This structure lives at skb->head and must be allocated by callers.
136 */
137struct deferred_unmap_info {
138 struct pci_dev *pdev;
139 dma_addr_t addr[MAX_SKB_FRAGS + 1];
140};
141
142/*
143 * Maps a number of flits to the number of Tx descriptors that can hold them.
144 * The formula is
145 *
146 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
147 *
148 * HW allows up to 4 descriptors to be combined into a WR.
149 */
150static u8 flit_desc_map[] = {
151 0,
152#if SGE_NUM_GENBITS == 1
153 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
154 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
155 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
156 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
157#elif SGE_NUM_GENBITS == 2
158 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
159 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
160 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
161 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
162#else
163# error "SGE_NUM_GENBITS must be 1 or 2"
164#endif
165};
166
167static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
168{
169 return container_of(q, struct sge_qset, fl[qidx]);
170}
171
172static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
173{
174 return container_of(q, struct sge_qset, rspq);
175}
176
177static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
178{
179 return container_of(q, struct sge_qset, txq[qidx]);
180}
181
182/**
183 * refill_rspq - replenish an SGE response queue
184 * @adapter: the adapter
185 * @q: the response queue to replenish
186 * @credits: how many new responses to make available
187 *
188 * Replenishes a response queue by making the supplied number of responses
189 * available to HW.
190 */
191static inline void refill_rspq(struct adapter *adapter,
192 const struct sge_rspq *q, unsigned int credits)
193{
194 rmb();
195 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
196 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
197}
198
199/**
200 * need_skb_unmap - does the platform need unmapping of sk_buffs?
201 *
202 * Returns true if the platform needs sk_buff unmapping. The compiler
203 * optimizes away unnecessary code if this returns true.
204 */
205static inline int need_skb_unmap(void)
206{
207#ifdef CONFIG_NEED_DMA_MAP_STATE
208 return 1;
209#else
210 return 0;
211#endif
212}
213
214/**
215 * unmap_skb - unmap a packet main body and its page fragments
216 * @skb: the packet
217 * @q: the Tx queue containing Tx descriptors for the packet
218 * @cidx: index of Tx descriptor
219 * @pdev: the PCI device
220 *
221 * Unmap the main body of an sk_buff and its page fragments, if any.
222 * Because of the fairly complicated structure of our SGLs and the desire
223 * to conserve space for metadata, the information necessary to unmap an
224 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
225 * descriptors (the physical addresses of the various data buffers), and
226 * the SW descriptor state (assorted indices). The send functions
227 * initialize the indices for the first packet descriptor so we can unmap
228 * the buffers held in the first Tx descriptor here, and we have enough
229 * information at this point to set the state for the next Tx descriptor.
230 *
231 * Note that it is possible to clean up the first descriptor of a packet
232 * before the send routines have written the next descriptors, but this
233 * race does not cause any problem. We just end up writing the unmapping
234 * info for the descriptor first.
235 */
236static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
237 unsigned int cidx, struct pci_dev *pdev)
238{
239 const struct sg_ent *sgp;
240 struct tx_sw_desc *d = &q->sdesc[cidx];
241 int nfrags, frag_idx, curflit, j = d->addr_idx;
242
243 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
244 frag_idx = d->fragidx;
245
246 if (frag_idx == 0 && skb_headlen(skb)) {
247 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
248 skb_headlen(skb), PCI_DMA_TODEVICE);
249 j = 1;
250 }
251
252 curflit = d->sflit + 1 + j;
253 nfrags = skb_shinfo(skb)->nr_frags;
254
255 while (frag_idx < nfrags && curflit < WR_FLITS) {
256 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
257 skb_shinfo(skb)->frags[frag_idx].size,
258 PCI_DMA_TODEVICE);
259 j ^= 1;
260 if (j == 0) {
261 sgp++;
262 curflit++;
263 }
264 curflit++;
265 frag_idx++;
266 }
267
268 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
269 d = cidx + 1 == q->size ? q->sdesc : d + 1;
270 d->fragidx = frag_idx;
271 d->addr_idx = j;
272 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
273 }
274}
275
276/**
277 * free_tx_desc - reclaims Tx descriptors and their buffers
278 * @adapter: the adapter
279 * @q: the Tx queue to reclaim descriptors from
280 * @n: the number of descriptors to reclaim
281 *
282 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
283 * Tx buffers. Called with the Tx queue lock held.
284 */
285static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
286 unsigned int n)
287{
288 struct tx_sw_desc *d;
289 struct pci_dev *pdev = adapter->pdev;
290 unsigned int cidx = q->cidx;
291
292 const int need_unmap = need_skb_unmap() &&
293 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
294
295 d = &q->sdesc[cidx];
296 while (n--) {
297 if (d->skb) { /* an SGL is present */
298 if (need_unmap)
299 unmap_skb(d->skb, q, cidx, pdev);
300 if (d->eop) {
301 kfree_skb(d->skb);
302 d->skb = NULL;
303 }
304 }
305 ++d;
306 if (++cidx == q->size) {
307 cidx = 0;
308 d = q->sdesc;
309 }
310 }
311 q->cidx = cidx;
312}
313
314/**
315 * reclaim_completed_tx - reclaims completed Tx descriptors
316 * @adapter: the adapter
317 * @q: the Tx queue to reclaim completed descriptors from
318 * @chunk: maximum number of descriptors to reclaim
319 *
320 * Reclaims Tx descriptors that the SGE has indicated it has processed,
321 * and frees the associated buffers if possible. Called with the Tx
322 * queue's lock held.
323 */
324static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
325 struct sge_txq *q,
326 unsigned int chunk)
327{
328 unsigned int reclaim = q->processed - q->cleaned;
329
330 reclaim = min(chunk, reclaim);
331 if (reclaim) {
332 free_tx_desc(adapter, q, reclaim);
333 q->cleaned += reclaim;
334 q->in_use -= reclaim;
335 }
336 return q->processed - q->cleaned;
337}
338
339/**
340 * should_restart_tx - are there enough resources to restart a Tx queue?
341 * @q: the Tx queue
342 *
343 * Checks if there are enough descriptors to restart a suspended Tx queue.
344 */
345static inline int should_restart_tx(const struct sge_txq *q)
346{
347 unsigned int r = q->processed - q->cleaned;
348
349 return q->in_use - r < (q->size >> 1);
350}
351
352static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
353 struct rx_sw_desc *d)
354{
355 if (q->use_pages && d->pg_chunk.page) {
356 (*d->pg_chunk.p_cnt)--;
357 if (!*d->pg_chunk.p_cnt)
358 pci_unmap_page(pdev,
359 d->pg_chunk.mapping,
360 q->alloc_size, PCI_DMA_FROMDEVICE);
361
362 put_page(d->pg_chunk.page);
363 d->pg_chunk.page = NULL;
364 } else {
365 pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
366 q->buf_size, PCI_DMA_FROMDEVICE);
367 kfree_skb(d->skb);
368 d->skb = NULL;
369 }
370}
371
372/**
373 * free_rx_bufs - free the Rx buffers on an SGE free list
374 * @pdev: the PCI device associated with the adapter
375 * @rxq: the SGE free list to clean up
376 *
377 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
378 * this queue should be stopped before calling this function.
379 */
380static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
381{
382 unsigned int cidx = q->cidx;
383
384 while (q->credits--) {
385 struct rx_sw_desc *d = &q->sdesc[cidx];
386
387
388 clear_rx_desc(pdev, q, d);
389 if (++cidx == q->size)
390 cidx = 0;
391 }
392
393 if (q->pg_chunk.page) {
394 __free_pages(q->pg_chunk.page, q->order);
395 q->pg_chunk.page = NULL;
396 }
397}
398
399/**
400 * add_one_rx_buf - add a packet buffer to a free-buffer list
401 * @va: buffer start VA
402 * @len: the buffer length
403 * @d: the HW Rx descriptor to write
404 * @sd: the SW Rx descriptor to write
405 * @gen: the generation bit value
406 * @pdev: the PCI device associated with the adapter
407 *
408 * Add a buffer of the given length to the supplied HW and SW Rx
409 * descriptors.
410 */
411static inline int add_one_rx_buf(void *va, unsigned int len,
412 struct rx_desc *d, struct rx_sw_desc *sd,
413 unsigned int gen, struct pci_dev *pdev)
414{
415 dma_addr_t mapping;
416
417 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
418 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
419 return -ENOMEM;
420
421 dma_unmap_addr_set(sd, dma_addr, mapping);
422
423 d->addr_lo = cpu_to_be32(mapping);
424 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
425 wmb();
426 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
427 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
428 return 0;
429}
430
431static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
432 unsigned int gen)
433{
434 d->addr_lo = cpu_to_be32(mapping);
435 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
436 wmb();
437 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
438 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
439 return 0;
440}
441
442static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
443 struct rx_sw_desc *sd, gfp_t gfp,
444 unsigned int order)
445{
446 if (!q->pg_chunk.page) {
447 dma_addr_t mapping;
448
449 q->pg_chunk.page = alloc_pages(gfp, order);
450 if (unlikely(!q->pg_chunk.page))
451 return -ENOMEM;
452 q->pg_chunk.va = page_address(q->pg_chunk.page);
453 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
454 SGE_PG_RSVD;
455 q->pg_chunk.offset = 0;
456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457 0, q->alloc_size, PCI_DMA_FROMDEVICE);
458 q->pg_chunk.mapping = mapping;
459 }
460 sd->pg_chunk = q->pg_chunk;
461
462 prefetch(sd->pg_chunk.p_cnt);
463
464 q->pg_chunk.offset += q->buf_size;
465 if (q->pg_chunk.offset == (PAGE_SIZE << order))
466 q->pg_chunk.page = NULL;
467 else {
468 q->pg_chunk.va += q->buf_size;
469 get_page(q->pg_chunk.page);
470 }
471
472 if (sd->pg_chunk.offset == 0)
473 *sd->pg_chunk.p_cnt = 1;
474 else
475 *sd->pg_chunk.p_cnt += 1;
476
477 return 0;
478}
479
480static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
481{
482 if (q->pend_cred >= q->credits / 4) {
483 q->pend_cred = 0;
484 wmb();
485 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
486 }
487}
488
489/**
490 * refill_fl - refill an SGE free-buffer list
491 * @adapter: the adapter
492 * @q: the free-list to refill
493 * @n: the number of new buffers to allocate
494 * @gfp: the gfp flags for allocating new buffers
495 *
496 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
497 * allocated with the supplied gfp flags. The caller must assure that
498 * @n does not exceed the queue's capacity.
499 */
500static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
501{
502 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
503 struct rx_desc *d = &q->desc[q->pidx];
504 unsigned int count = 0;
505
506 while (n--) {
507 dma_addr_t mapping;
508 int err;
509
510 if (q->use_pages) {
511 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
512 q->order))) {
513nomem: q->alloc_failed++;
514 break;
515 }
516 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
517 dma_unmap_addr_set(sd, dma_addr, mapping);
518
519 add_one_rx_chunk(mapping, d, q->gen);
520 pci_dma_sync_single_for_device(adap->pdev, mapping,
521 q->buf_size - SGE_PG_RSVD,
522 PCI_DMA_FROMDEVICE);
523 } else {
524 void *buf_start;
525
526 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
527 if (!skb)
528 goto nomem;
529
530 sd->skb = skb;
531 buf_start = skb->data;
532 err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
533 q->gen, adap->pdev);
534 if (unlikely(err)) {
535 clear_rx_desc(adap->pdev, q, sd);
536 break;
537 }
538 }
539
540 d++;
541 sd++;
542 if (++q->pidx == q->size) {
543 q->pidx = 0;
544 q->gen ^= 1;
545 sd = q->sdesc;
546 d = q->desc;
547 }
548 count++;
549 }
550
551 q->credits += count;
552 q->pend_cred += count;
553 ring_fl_db(adap, q);
554
555 return count;
556}
557
558static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
559{
560 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
561 GFP_ATOMIC | __GFP_COMP);
562}
563
564/**
565 * recycle_rx_buf - recycle a receive buffer
566 * @adapter: the adapter
567 * @q: the SGE free list
568 * @idx: index of buffer to recycle
569 *
570 * Recycles the specified buffer on the given free list by adding it at
571 * the next available slot on the list.
572 */
573static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
574 unsigned int idx)
575{
576 struct rx_desc *from = &q->desc[idx];
577 struct rx_desc *to = &q->desc[q->pidx];
578
579 q->sdesc[q->pidx] = q->sdesc[idx];
580 to->addr_lo = from->addr_lo; /* already big endian */
581 to->addr_hi = from->addr_hi; /* likewise */
582 wmb();
583 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
584 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
585
586 if (++q->pidx == q->size) {
587 q->pidx = 0;
588 q->gen ^= 1;
589 }
590
591 q->credits++;
592 q->pend_cred++;
593 ring_fl_db(adap, q);
594}
595
596/**
597 * alloc_ring - allocate resources for an SGE descriptor ring
598 * @pdev: the PCI device
599 * @nelem: the number of descriptors
600 * @elem_size: the size of each descriptor
601 * @sw_size: the size of the SW state associated with each ring element
602 * @phys: the physical address of the allocated ring
603 * @metadata: address of the array holding the SW state for the ring
604 *
605 * Allocates resources for an SGE descriptor ring, such as Tx queues,
606 * free buffer lists, or response queues. Each SGE ring requires
607 * space for its HW descriptors plus, optionally, space for the SW state
608 * associated with each HW entry (the metadata). The function returns
609 * three values: the virtual address for the HW ring (the return value
610 * of the function), the physical address of the HW ring, and the address
611 * of the SW ring.
612 */
613static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
614 size_t sw_size, dma_addr_t * phys, void *metadata)
615{
616 size_t len = nelem * elem_size;
617 void *s = NULL;
618 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
619
620 if (!p)
621 return NULL;
622 if (sw_size && metadata) {
623 s = kcalloc(nelem, sw_size, GFP_KERNEL);
624
625 if (!s) {
626 dma_free_coherent(&pdev->dev, len, p, *phys);
627 return NULL;
628 }
629 *(void **)metadata = s;
630 }
631 memset(p, 0, len);
632 return p;
633}
634
635/**
636 * t3_reset_qset - reset a sge qset
637 * @q: the queue set
638 *
639 * Reset the qset structure.
640 * the NAPI structure is preserved in the event of
641 * the qset's reincarnation, for example during EEH recovery.
642 */
643static void t3_reset_qset(struct sge_qset *q)
644{
645 if (q->adap &&
646 !(q->adap->flags & NAPI_INIT)) {
647 memset(q, 0, sizeof(*q));
648 return;
649 }
650
651 q->adap = NULL;
652 memset(&q->rspq, 0, sizeof(q->rspq));
653 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
655 q->txq_stopped = 0;
656 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
657 q->rx_reclaim_timer.function = NULL;
658 q->nomem = 0;
659 napi_free_frags(&q->napi);
660}
661
662
663/**
664 * free_qset - free the resources of an SGE queue set
665 * @adapter: the adapter owning the queue set
666 * @q: the queue set
667 *
668 * Release the HW and SW resources associated with an SGE queue set, such
669 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
670 * queue set must be quiesced prior to calling this.
671 */
672static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
673{
674 int i;
675 struct pci_dev *pdev = adapter->pdev;
676
677 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
678 if (q->fl[i].desc) {
679 spin_lock_irq(&adapter->sge.reg_lock);
680 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
681 spin_unlock_irq(&adapter->sge.reg_lock);
682 free_rx_bufs(pdev, &q->fl[i]);
683 kfree(q->fl[i].sdesc);
684 dma_free_coherent(&pdev->dev,
685 q->fl[i].size *
686 sizeof(struct rx_desc), q->fl[i].desc,
687 q->fl[i].phys_addr);
688 }
689
690 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
691 if (q->txq[i].desc) {
692 spin_lock_irq(&adapter->sge.reg_lock);
693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
694 spin_unlock_irq(&adapter->sge.reg_lock);
695 if (q->txq[i].sdesc) {
696 free_tx_desc(adapter, &q->txq[i],
697 q->txq[i].in_use);
698 kfree(q->txq[i].sdesc);
699 }
700 dma_free_coherent(&pdev->dev,
701 q->txq[i].size *
702 sizeof(struct tx_desc),
703 q->txq[i].desc, q->txq[i].phys_addr);
704 __skb_queue_purge(&q->txq[i].sendq);
705 }
706
707 if (q->rspq.desc) {
708 spin_lock_irq(&adapter->sge.reg_lock);
709 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
710 spin_unlock_irq(&adapter->sge.reg_lock);
711 dma_free_coherent(&pdev->dev,
712 q->rspq.size * sizeof(struct rsp_desc),
713 q->rspq.desc, q->rspq.phys_addr);
714 }
715
716 t3_reset_qset(q);
717}
718
719/**
720 * init_qset_cntxt - initialize an SGE queue set context info
721 * @qs: the queue set
722 * @id: the queue set id
723 *
724 * Initializes the TIDs and context ids for the queues of a queue set.
725 */
726static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
727{
728 qs->rspq.cntxt_id = id;
729 qs->fl[0].cntxt_id = 2 * id;
730 qs->fl[1].cntxt_id = 2 * id + 1;
731 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
732 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
733 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
734 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
735 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
736}
737
738/**
739 * sgl_len - calculates the size of an SGL of the given capacity
740 * @n: the number of SGL entries
741 *
742 * Calculates the number of flits needed for a scatter/gather list that
743 * can hold the given number of entries.
744 */
745static inline unsigned int sgl_len(unsigned int n)
746{
747 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
748 return (3 * n) / 2 + (n & 1);
749}
750
751/**
752 * flits_to_desc - returns the num of Tx descriptors for the given flits
753 * @n: the number of flits
754 *
755 * Calculates the number of Tx descriptors needed for the supplied number
756 * of flits.
757 */
758static inline unsigned int flits_to_desc(unsigned int n)
759{
760 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
761 return flit_desc_map[n];
762}
763
764/**
765 * get_packet - return the next ingress packet buffer from a free list
766 * @adap: the adapter that received the packet
767 * @fl: the SGE free list holding the packet
768 * @len: the packet length including any SGE padding
769 * @drop_thres: # of remaining buffers before we start dropping packets
770 *
771 * Get the next packet from a free list and complete setup of the
772 * sk_buff. If the packet is small we make a copy and recycle the
773 * original buffer, otherwise we use the original buffer itself. If a
774 * positive drop threshold is supplied packets are dropped and their
775 * buffers recycled if (a) the number of remaining buffers is under the
776 * threshold and the packet is too big to copy, or (b) the packet should
777 * be copied but there is no memory for the copy.
778 */
779static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
780 unsigned int len, unsigned int drop_thres)
781{
782 struct sk_buff *skb = NULL;
783 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
784
785 prefetch(sd->skb->data);
786 fl->credits--;
787
788 if (len <= SGE_RX_COPY_THRES) {
789 skb = alloc_skb(len, GFP_ATOMIC);
790 if (likely(skb != NULL)) {
791 __skb_put(skb, len);
792 pci_dma_sync_single_for_cpu(adap->pdev,
793 dma_unmap_addr(sd, dma_addr), len,
794 PCI_DMA_FROMDEVICE);
795 memcpy(skb->data, sd->skb->data, len);
796 pci_dma_sync_single_for_device(adap->pdev,
797 dma_unmap_addr(sd, dma_addr), len,
798 PCI_DMA_FROMDEVICE);
799 } else if (!drop_thres)
800 goto use_orig_buf;
801recycle:
802 recycle_rx_buf(adap, fl, fl->cidx);
803 return skb;
804 }
805
806 if (unlikely(fl->credits < drop_thres) &&
807 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
808 GFP_ATOMIC | __GFP_COMP) == 0)
809 goto recycle;
810
811use_orig_buf:
812 pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
813 fl->buf_size, PCI_DMA_FROMDEVICE);
814 skb = sd->skb;
815 skb_put(skb, len);
816 __refill_fl(adap, fl);
817 return skb;
818}
819
820/**
821 * get_packet_pg - return the next ingress packet buffer from a free list
822 * @adap: the adapter that received the packet
823 * @fl: the SGE free list holding the packet
824 * @len: the packet length including any SGE padding
825 * @drop_thres: # of remaining buffers before we start dropping packets
826 *
827 * Get the next packet from a free list populated with page chunks.
828 * If the packet is small we make a copy and recycle the original buffer,
829 * otherwise we attach the original buffer as a page fragment to a fresh
830 * sk_buff. If a positive drop threshold is supplied packets are dropped
831 * and their buffers recycled if (a) the number of remaining buffers is
832 * under the threshold and the packet is too big to copy, or (b) there's
833 * no system memory.
834 *
835 * Note: this function is similar to @get_packet but deals with Rx buffers
836 * that are page chunks rather than sk_buffs.
837 */
838static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
839 struct sge_rspq *q, unsigned int len,
840 unsigned int drop_thres)
841{
842 struct sk_buff *newskb, *skb;
843 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
844
845 dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
846
847 newskb = skb = q->pg_skb;
848 if (!skb && (len <= SGE_RX_COPY_THRES)) {
849 newskb = alloc_skb(len, GFP_ATOMIC);
850 if (likely(newskb != NULL)) {
851 __skb_put(newskb, len);
852 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
853 PCI_DMA_FROMDEVICE);
854 memcpy(newskb->data, sd->pg_chunk.va, len);
855 pci_dma_sync_single_for_device(adap->pdev, dma_addr,
856 len,
857 PCI_DMA_FROMDEVICE);
858 } else if (!drop_thres)
859 return NULL;
860recycle:
861 fl->credits--;
862 recycle_rx_buf(adap, fl, fl->cidx);
863 q->rx_recycle_buf++;
864 return newskb;
865 }
866
867 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
868 goto recycle;
869
870 prefetch(sd->pg_chunk.p_cnt);
871
872 if (!skb)
873 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
874
875 if (unlikely(!newskb)) {
876 if (!drop_thres)
877 return NULL;
878 goto recycle;
879 }
880
881 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
882 PCI_DMA_FROMDEVICE);
883 (*sd->pg_chunk.p_cnt)--;
884 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
885 pci_unmap_page(adap->pdev,
886 sd->pg_chunk.mapping,
887 fl->alloc_size,
888 PCI_DMA_FROMDEVICE);
889 if (!skb) {
890 __skb_put(newskb, SGE_RX_PULL_LEN);
891 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
892 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
893 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
894 len - SGE_RX_PULL_LEN);
895 newskb->len = len;
896 newskb->data_len = len - SGE_RX_PULL_LEN;
897 newskb->truesize += newskb->data_len;
898 } else {
899 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
900 sd->pg_chunk.page,
901 sd->pg_chunk.offset, len);
902 newskb->len += len;
903 newskb->data_len += len;
904 newskb->truesize += len;
905 }
906
907 fl->credits--;
908 /*
909 * We do not refill FLs here, we let the caller do it to overlap a
910 * prefetch.
911 */
912 return newskb;
913}
914
915/**
916 * get_imm_packet - return the next ingress packet buffer from a response
917 * @resp: the response descriptor containing the packet data
918 *
919 * Return a packet containing the immediate data of the given response.
920 */
921static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
922{
923 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
924
925 if (skb) {
926 __skb_put(skb, IMMED_PKT_SIZE);
927 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
928 }
929 return skb;
930}
931
932/**
933 * calc_tx_descs - calculate the number of Tx descriptors for a packet
934 * @skb: the packet
935 *
936 * Returns the number of Tx descriptors needed for the given Ethernet
937 * packet. Ethernet packets require addition of WR and CPL headers.
938 */
939static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
940{
941 unsigned int flits;
942
943 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
944 return 1;
945
946 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
947 if (skb_shinfo(skb)->gso_size)
948 flits++;
949 return flits_to_desc(flits);
950}
951
952/**
953 * make_sgl - populate a scatter/gather list for a packet
954 * @skb: the packet
955 * @sgp: the SGL to populate
956 * @start: start address of skb main body data to include in the SGL
957 * @len: length of skb main body data to include in the SGL
958 * @pdev: the PCI device
959 *
960 * Generates a scatter/gather list for the buffers that make up a packet
961 * and returns the SGL size in 8-byte words. The caller must size the SGL
962 * appropriately.
963 */
964static inline unsigned int make_sgl(const struct sk_buff *skb,
965 struct sg_ent *sgp, unsigned char *start,
966 unsigned int len, struct pci_dev *pdev)
967{
968 dma_addr_t mapping;
969 unsigned int i, j = 0, nfrags;
970
971 if (len) {
972 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
973 sgp->len[0] = cpu_to_be32(len);
974 sgp->addr[0] = cpu_to_be64(mapping);
975 j = 1;
976 }
977
978 nfrags = skb_shinfo(skb)->nr_frags;
979 for (i = 0; i < nfrags; i++) {
980 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
981
982 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
983 frag->size, PCI_DMA_TODEVICE);
984 sgp->len[j] = cpu_to_be32(frag->size);
985 sgp->addr[j] = cpu_to_be64(mapping);
986 j ^= 1;
987 if (j == 0)
988 ++sgp;
989 }
990 if (j)
991 sgp->len[j] = 0;
992 return ((nfrags + (len != 0)) * 3) / 2 + j;
993}
994
995/**
996 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
997 * @adap: the adapter
998 * @q: the Tx queue
999 *
1000 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
1001 * where the HW is going to sleep just after we checked, however,
1002 * then the interrupt handler will detect the outstanding TX packet
1003 * and ring the doorbell for us.
1004 *
1005 * When GTS is disabled we unconditionally ring the doorbell.
1006 */
1007static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1008{
1009#if USE_GTS
1010 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1011 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1012 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1013 t3_write_reg(adap, A_SG_KDOORBELL,
1014 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1015 }
1016#else
1017 wmb(); /* write descriptors before telling HW */
1018 t3_write_reg(adap, A_SG_KDOORBELL,
1019 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1020#endif
1021}
1022
1023static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1024{
1025#if SGE_NUM_GENBITS == 2
1026 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1027#endif
1028}
1029
1030/**
1031 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1032 * @ndesc: number of Tx descriptors spanned by the SGL
1033 * @skb: the packet corresponding to the WR
1034 * @d: first Tx descriptor to be written
1035 * @pidx: index of above descriptors
1036 * @q: the SGE Tx queue
1037 * @sgl: the SGL
1038 * @flits: number of flits to the start of the SGL in the first descriptor
1039 * @sgl_flits: the SGL size in flits
1040 * @gen: the Tx descriptor generation
1041 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1042 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1043 *
1044 * Write a work request header and an associated SGL. If the SGL is
1045 * small enough to fit into one Tx descriptor it has already been written
1046 * and we just need to write the WR header. Otherwise we distribute the
1047 * SGL across the number of descriptors it spans.
1048 */
1049static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1050 struct tx_desc *d, unsigned int pidx,
1051 const struct sge_txq *q,
1052 const struct sg_ent *sgl,
1053 unsigned int flits, unsigned int sgl_flits,
1054 unsigned int gen, __be32 wr_hi,
1055 __be32 wr_lo)
1056{
1057 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1058 struct tx_sw_desc *sd = &q->sdesc[pidx];
1059
1060 sd->skb = skb;
1061 if (need_skb_unmap()) {
1062 sd->fragidx = 0;
1063 sd->addr_idx = 0;
1064 sd->sflit = flits;
1065 }
1066
1067 if (likely(ndesc == 1)) {
1068 sd->eop = 1;
1069 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1070 V_WR_SGLSFLT(flits)) | wr_hi;
1071 wmb();
1072 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1073 V_WR_GEN(gen)) | wr_lo;
1074 wr_gen2(d, gen);
1075 } else {
1076 unsigned int ogen = gen;
1077 const u64 *fp = (const u64 *)sgl;
1078 struct work_request_hdr *wp = wrp;
1079
1080 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1081 V_WR_SGLSFLT(flits)) | wr_hi;
1082
1083 while (sgl_flits) {
1084 unsigned int avail = WR_FLITS - flits;
1085
1086 if (avail > sgl_flits)
1087 avail = sgl_flits;
1088 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1089 sgl_flits -= avail;
1090 ndesc--;
1091 if (!sgl_flits)
1092 break;
1093
1094 fp += avail;
1095 d++;
1096 sd->eop = 0;
1097 sd++;
1098 if (++pidx == q->size) {
1099 pidx = 0;
1100 gen ^= 1;
1101 d = q->desc;
1102 sd = q->sdesc;
1103 }
1104
1105 sd->skb = skb;
1106 wrp = (struct work_request_hdr *)d;
1107 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1108 V_WR_SGLSFLT(1)) | wr_hi;
1109 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1110 sgl_flits + 1)) |
1111 V_WR_GEN(gen)) | wr_lo;
1112 wr_gen2(d, gen);
1113 flits = 1;
1114 }
1115 sd->eop = 1;
1116 wrp->wr_hi |= htonl(F_WR_EOP);
1117 wmb();
1118 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1119 wr_gen2((struct tx_desc *)wp, ogen);
1120 WARN_ON(ndesc != 0);
1121 }
1122}
1123
1124/**
1125 * write_tx_pkt_wr - write a TX_PKT work request
1126 * @adap: the adapter
1127 * @skb: the packet to send
1128 * @pi: the egress interface
1129 * @pidx: index of the first Tx descriptor to write
1130 * @gen: the generation value to use
1131 * @q: the Tx queue
1132 * @ndesc: number of descriptors the packet will occupy
1133 * @compl: the value of the COMPL bit to use
1134 *
1135 * Generate a TX_PKT work request to send the supplied packet.
1136 */
1137static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1138 const struct port_info *pi,
1139 unsigned int pidx, unsigned int gen,
1140 struct sge_txq *q, unsigned int ndesc,
1141 unsigned int compl)
1142{
1143 unsigned int flits, sgl_flits, cntrl, tso_info;
1144 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1145 struct tx_desc *d = &q->desc[pidx];
1146 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1147
1148 cpl->len = htonl(skb->len);
1149 cntrl = V_TXPKT_INTF(pi->port_id);
1150
1151 if (vlan_tx_tag_present(skb))
1152 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1153
1154 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1155 if (tso_info) {
1156 int eth_type;
1157 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1158
1159 d->flit[2] = 0;
1160 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1161 hdr->cntrl = htonl(cntrl);
1162 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1163 CPL_ETH_II : CPL_ETH_II_VLAN;
1164 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1165 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1166 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1167 hdr->lso_info = htonl(tso_info);
1168 flits = 3;
1169 } else {
1170 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1171 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1172 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1173 cpl->cntrl = htonl(cntrl);
1174
1175 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1176 q->sdesc[pidx].skb = NULL;
1177 if (!skb->data_len)
1178 skb_copy_from_linear_data(skb, &d->flit[2],
1179 skb->len);
1180 else
1181 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1182
1183 flits = (skb->len + 7) / 8 + 2;
1184 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1185 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1186 | F_WR_SOP | F_WR_EOP | compl);
1187 wmb();
1188 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1189 V_WR_TID(q->token));
1190 wr_gen2(d, gen);
1191 kfree_skb(skb);
1192 return;
1193 }
1194
1195 flits = 2;
1196 }
1197
1198 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1199 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1200
1201 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1202 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1203 htonl(V_WR_TID(q->token)));
1204}
1205
1206static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1207 struct sge_qset *qs, struct sge_txq *q)
1208{
1209 netif_tx_stop_queue(txq);
1210 set_bit(TXQ_ETH, &qs->txq_stopped);
1211 q->stops++;
1212}
1213
1214/**
1215 * eth_xmit - add a packet to the Ethernet Tx queue
1216 * @skb: the packet
1217 * @dev: the egress net device
1218 *
1219 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1220 */
1221netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1222{
1223 int qidx;
1224 unsigned int ndesc, pidx, credits, gen, compl;
1225 const struct port_info *pi = netdev_priv(dev);
1226 struct adapter *adap = pi->adapter;
1227 struct netdev_queue *txq;
1228 struct sge_qset *qs;
1229 struct sge_txq *q;
1230
1231 /*
1232 * The chip min packet length is 9 octets but play safe and reject
1233 * anything shorter than an Ethernet header.
1234 */
1235 if (unlikely(skb->len < ETH_HLEN)) {
1236 dev_kfree_skb(skb);
1237 return NETDEV_TX_OK;
1238 }
1239
1240 qidx = skb_get_queue_mapping(skb);
1241 qs = &pi->qs[qidx];
1242 q = &qs->txq[TXQ_ETH];
1243 txq = netdev_get_tx_queue(dev, qidx);
1244
1245 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1246
1247 credits = q->size - q->in_use;
1248 ndesc = calc_tx_descs(skb);
1249
1250 if (unlikely(credits < ndesc)) {
1251 t3_stop_tx_queue(txq, qs, q);
1252 dev_err(&adap->pdev->dev,
1253 "%s: Tx ring %u full while queue awake!\n",
1254 dev->name, q->cntxt_id & 7);
1255 return NETDEV_TX_BUSY;
1256 }
1257
1258 q->in_use += ndesc;
1259 if (unlikely(credits - ndesc < q->stop_thres)) {
1260 t3_stop_tx_queue(txq, qs, q);
1261
1262 if (should_restart_tx(q) &&
1263 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1264 q->restarts++;
1265 netif_tx_start_queue(txq);
1266 }
1267 }
1268
1269 gen = q->gen;
1270 q->unacked += ndesc;
1271 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1272 q->unacked &= 7;
1273 pidx = q->pidx;
1274 q->pidx += ndesc;
1275 if (q->pidx >= q->size) {
1276 q->pidx -= q->size;
1277 q->gen ^= 1;
1278 }
1279
1280 /* update port statistics */
1281 if (skb->ip_summed == CHECKSUM_COMPLETE)
1282 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1283 if (skb_shinfo(skb)->gso_size)
1284 qs->port_stats[SGE_PSTAT_TSO]++;
1285 if (vlan_tx_tag_present(skb))
1286 qs->port_stats[SGE_PSTAT_VLANINS]++;
1287
1288 /*
1289 * We do not use Tx completion interrupts to free DMAd Tx packets.
1290 * This is good for performance but means that we rely on new Tx
1291 * packets arriving to run the destructors of completed packets,
1292 * which open up space in their sockets' send queues. Sometimes
1293 * we do not get such new packets causing Tx to stall. A single
1294 * UDP transmitter is a good example of this situation. We have
1295 * a clean up timer that periodically reclaims completed packets
1296 * but it doesn't run often enough (nor do we want it to) to prevent
1297 * lengthy stalls. A solution to this problem is to run the
1298 * destructor early, after the packet is queued but before it's DMAd.
1299 * A cons is that we lie to socket memory accounting, but the amount
1300 * of extra memory is reasonable (limited by the number of Tx
1301 * descriptors), the packets do actually get freed quickly by new
1302 * packets almost always, and for protocols like TCP that wait for
1303 * acks to really free up the data the extra memory is even less.
1304 * On the positive side we run the destructors on the sending CPU
1305 * rather than on a potentially different completing CPU, usually a
1306 * good thing. We also run them without holding our Tx queue lock,
1307 * unlike what reclaim_completed_tx() would otherwise do.
1308 *
1309 * Run the destructor before telling the DMA engine about the packet
1310 * to make sure it doesn't complete and get freed prematurely.
1311 */
1312 if (likely(!skb_shared(skb)))
1313 skb_orphan(skb);
1314
1315 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1316 check_ring_tx_db(adap, q);
1317 return NETDEV_TX_OK;
1318}
1319
1320/**
1321 * write_imm - write a packet into a Tx descriptor as immediate data
1322 * @d: the Tx descriptor to write
1323 * @skb: the packet
1324 * @len: the length of packet data to write as immediate data
1325 * @gen: the generation bit value to write
1326 *
1327 * Writes a packet as immediate data into a Tx descriptor. The packet
1328 * contains a work request at its beginning. We must write the packet
1329 * carefully so the SGE doesn't read it accidentally before it's written
1330 * in its entirety.
1331 */
1332static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1333 unsigned int len, unsigned int gen)
1334{
1335 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1336 struct work_request_hdr *to = (struct work_request_hdr *)d;
1337
1338 if (likely(!skb->data_len))
1339 memcpy(&to[1], &from[1], len - sizeof(*from));
1340 else
1341 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1342
1343 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1344 V_WR_BCNTLFLT(len & 7));
1345 wmb();
1346 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1347 V_WR_LEN((len + 7) / 8));
1348 wr_gen2(d, gen);
1349 kfree_skb(skb);
1350}
1351
1352/**
1353 * check_desc_avail - check descriptor availability on a send queue
1354 * @adap: the adapter
1355 * @q: the send queue
1356 * @skb: the packet needing the descriptors
1357 * @ndesc: the number of Tx descriptors needed
1358 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1359 *
1360 * Checks if the requested number of Tx descriptors is available on an
1361 * SGE send queue. If the queue is already suspended or not enough
1362 * descriptors are available the packet is queued for later transmission.
1363 * Must be called with the Tx queue locked.
1364 *
1365 * Returns 0 if enough descriptors are available, 1 if there aren't
1366 * enough descriptors and the packet has been queued, and 2 if the caller
1367 * needs to retry because there weren't enough descriptors at the
1368 * beginning of the call but some freed up in the mean time.
1369 */
1370static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1371 struct sk_buff *skb, unsigned int ndesc,
1372 unsigned int qid)
1373{
1374 if (unlikely(!skb_queue_empty(&q->sendq))) {
1375 addq_exit:__skb_queue_tail(&q->sendq, skb);
1376 return 1;
1377 }
1378 if (unlikely(q->size - q->in_use < ndesc)) {
1379 struct sge_qset *qs = txq_to_qset(q, qid);
1380
1381 set_bit(qid, &qs->txq_stopped);
1382 smp_mb__after_clear_bit();
1383
1384 if (should_restart_tx(q) &&
1385 test_and_clear_bit(qid, &qs->txq_stopped))
1386 return 2;
1387
1388 q->stops++;
1389 goto addq_exit;
1390 }
1391 return 0;
1392}
1393
1394/**
1395 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1396 * @q: the SGE control Tx queue
1397 *
1398 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1399 * that send only immediate data (presently just the control queues) and
1400 * thus do not have any sk_buffs to release.
1401 */
1402static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1403{
1404 unsigned int reclaim = q->processed - q->cleaned;
1405
1406 q->in_use -= reclaim;
1407 q->cleaned += reclaim;
1408}
1409
1410static inline int immediate(const struct sk_buff *skb)
1411{
1412 return skb->len <= WR_LEN;
1413}
1414
1415/**
1416 * ctrl_xmit - send a packet through an SGE control Tx queue
1417 * @adap: the adapter
1418 * @q: the control queue
1419 * @skb: the packet
1420 *
1421 * Send a packet through an SGE control Tx queue. Packets sent through
1422 * a control queue must fit entirely as immediate data in a single Tx
1423 * descriptor and have no page fragments.
1424 */
1425static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1426 struct sk_buff *skb)
1427{
1428 int ret;
1429 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1430
1431 if (unlikely(!immediate(skb))) {
1432 WARN_ON(1);
1433 dev_kfree_skb(skb);
1434 return NET_XMIT_SUCCESS;
1435 }
1436
1437 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1438 wrp->wr_lo = htonl(V_WR_TID(q->token));
1439
1440 spin_lock(&q->lock);
1441 again:reclaim_completed_tx_imm(q);
1442
1443 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1444 if (unlikely(ret)) {
1445 if (ret == 1) {
1446 spin_unlock(&q->lock);
1447 return NET_XMIT_CN;
1448 }
1449 goto again;
1450 }
1451
1452 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1453
1454 q->in_use++;
1455 if (++q->pidx >= q->size) {
1456 q->pidx = 0;
1457 q->gen ^= 1;
1458 }
1459 spin_unlock(&q->lock);
1460 wmb();
1461 t3_write_reg(adap, A_SG_KDOORBELL,
1462 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1463 return NET_XMIT_SUCCESS;
1464}
1465
1466/**
1467 * restart_ctrlq - restart a suspended control queue
1468 * @qs: the queue set cotaining the control queue
1469 *
1470 * Resumes transmission on a suspended Tx control queue.
1471 */
1472static void restart_ctrlq(unsigned long data)
1473{
1474 struct sk_buff *skb;
1475 struct sge_qset *qs = (struct sge_qset *)data;
1476 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1477
1478 spin_lock(&q->lock);
1479 again:reclaim_completed_tx_imm(q);
1480
1481 while (q->in_use < q->size &&
1482 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1483
1484 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1485
1486 if (++q->pidx >= q->size) {
1487 q->pidx = 0;
1488 q->gen ^= 1;
1489 }
1490 q->in_use++;
1491 }
1492
1493 if (!skb_queue_empty(&q->sendq)) {
1494 set_bit(TXQ_CTRL, &qs->txq_stopped);
1495 smp_mb__after_clear_bit();
1496
1497 if (should_restart_tx(q) &&
1498 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1499 goto again;
1500 q->stops++;
1501 }
1502
1503 spin_unlock(&q->lock);
1504 wmb();
1505 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1506 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1507}
1508
1509/*
1510 * Send a management message through control queue 0
1511 */
1512int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1513{
1514 int ret;
1515 local_bh_disable();
1516 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1517 local_bh_enable();
1518
1519 return ret;
1520}
1521
1522/**
1523 * deferred_unmap_destructor - unmap a packet when it is freed
1524 * @skb: the packet
1525 *
1526 * This is the packet destructor used for Tx packets that need to remain
1527 * mapped until they are freed rather than until their Tx descriptors are
1528 * freed.
1529 */
1530static void deferred_unmap_destructor(struct sk_buff *skb)
1531{
1532 int i;
1533 const dma_addr_t *p;
1534 const struct skb_shared_info *si;
1535 const struct deferred_unmap_info *dui;
1536
1537 dui = (struct deferred_unmap_info *)skb->head;
1538 p = dui->addr;
1539
1540 if (skb->tail - skb->transport_header)
1541 pci_unmap_single(dui->pdev, *p++,
1542 skb->tail - skb->transport_header,
1543 PCI_DMA_TODEVICE);
1544
1545 si = skb_shinfo(skb);
1546 for (i = 0; i < si->nr_frags; i++)
1547 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1548 PCI_DMA_TODEVICE);
1549}
1550
1551static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1552 const struct sg_ent *sgl, int sgl_flits)
1553{
1554 dma_addr_t *p;
1555 struct deferred_unmap_info *dui;
1556
1557 dui = (struct deferred_unmap_info *)skb->head;
1558 dui->pdev = pdev;
1559 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1560 *p++ = be64_to_cpu(sgl->addr[0]);
1561 *p++ = be64_to_cpu(sgl->addr[1]);
1562 }
1563 if (sgl_flits)
1564 *p = be64_to_cpu(sgl->addr[0]);
1565}
1566
1567/**
1568 * write_ofld_wr - write an offload work request
1569 * @adap: the adapter
1570 * @skb: the packet to send
1571 * @q: the Tx queue
1572 * @pidx: index of the first Tx descriptor to write
1573 * @gen: the generation value to use
1574 * @ndesc: number of descriptors the packet will occupy
1575 *
1576 * Write an offload work request to send the supplied packet. The packet
1577 * data already carry the work request with most fields populated.
1578 */
1579static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1580 struct sge_txq *q, unsigned int pidx,
1581 unsigned int gen, unsigned int ndesc)
1582{
1583 unsigned int sgl_flits, flits;
1584 struct work_request_hdr *from;
1585 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1586 struct tx_desc *d = &q->desc[pidx];
1587
1588 if (immediate(skb)) {
1589 q->sdesc[pidx].skb = NULL;
1590 write_imm(d, skb, skb->len, gen);
1591 return;
1592 }
1593
1594 /* Only TX_DATA builds SGLs */
1595
1596 from = (struct work_request_hdr *)skb->data;
1597 memcpy(&d->flit[1], &from[1],
1598 skb_transport_offset(skb) - sizeof(*from));
1599
1600 flits = skb_transport_offset(skb) / 8;
1601 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1602 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1603 skb->tail - skb->transport_header,
1604 adap->pdev);
1605 if (need_skb_unmap()) {
1606 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1607 skb->destructor = deferred_unmap_destructor;
1608 }
1609
1610 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1611 gen, from->wr_hi, from->wr_lo);
1612}
1613
1614/**
1615 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1616 * @skb: the packet
1617 *
1618 * Returns the number of Tx descriptors needed for the given offload
1619 * packet. These packets are already fully constructed.
1620 */
1621static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1622{
1623 unsigned int flits, cnt;
1624
1625 if (skb->len <= WR_LEN)
1626 return 1; /* packet fits as immediate data */
1627
1628 flits = skb_transport_offset(skb) / 8; /* headers */
1629 cnt = skb_shinfo(skb)->nr_frags;
1630 if (skb->tail != skb->transport_header)
1631 cnt++;
1632 return flits_to_desc(flits + sgl_len(cnt));
1633}
1634
1635/**
1636 * ofld_xmit - send a packet through an offload queue
1637 * @adap: the adapter
1638 * @q: the Tx offload queue
1639 * @skb: the packet
1640 *
1641 * Send an offload packet through an SGE offload queue.
1642 */
1643static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1644 struct sk_buff *skb)
1645{
1646 int ret;
1647 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1648
1649 spin_lock(&q->lock);
1650again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1651
1652 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1653 if (unlikely(ret)) {
1654 if (ret == 1) {
1655 skb->priority = ndesc; /* save for restart */
1656 spin_unlock(&q->lock);
1657 return NET_XMIT_CN;
1658 }
1659 goto again;
1660 }
1661
1662 gen = q->gen;
1663 q->in_use += ndesc;
1664 pidx = q->pidx;
1665 q->pidx += ndesc;
1666 if (q->pidx >= q->size) {
1667 q->pidx -= q->size;
1668 q->gen ^= 1;
1669 }
1670 spin_unlock(&q->lock);
1671
1672 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1673 check_ring_tx_db(adap, q);
1674 return NET_XMIT_SUCCESS;
1675}
1676
1677/**
1678 * restart_offloadq - restart a suspended offload queue
1679 * @qs: the queue set cotaining the offload queue
1680 *
1681 * Resumes transmission on a suspended Tx offload queue.
1682 */
1683static void restart_offloadq(unsigned long data)
1684{
1685 struct sk_buff *skb;
1686 struct sge_qset *qs = (struct sge_qset *)data;
1687 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1688 const struct port_info *pi = netdev_priv(qs->netdev);
1689 struct adapter *adap = pi->adapter;
1690
1691 spin_lock(&q->lock);
1692again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1693
1694 while ((skb = skb_peek(&q->sendq)) != NULL) {
1695 unsigned int gen, pidx;
1696 unsigned int ndesc = skb->priority;
1697
1698 if (unlikely(q->size - q->in_use < ndesc)) {
1699 set_bit(TXQ_OFLD, &qs->txq_stopped);
1700 smp_mb__after_clear_bit();
1701
1702 if (should_restart_tx(q) &&
1703 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1704 goto again;
1705 q->stops++;
1706 break;
1707 }
1708
1709 gen = q->gen;
1710 q->in_use += ndesc;
1711 pidx = q->pidx;
1712 q->pidx += ndesc;
1713 if (q->pidx >= q->size) {
1714 q->pidx -= q->size;
1715 q->gen ^= 1;
1716 }
1717 __skb_unlink(skb, &q->sendq);
1718 spin_unlock(&q->lock);
1719
1720 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1721 spin_lock(&q->lock);
1722 }
1723 spin_unlock(&q->lock);
1724
1725#if USE_GTS
1726 set_bit(TXQ_RUNNING, &q->flags);
1727 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1728#endif
1729 wmb();
1730 t3_write_reg(adap, A_SG_KDOORBELL,
1731 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1732}
1733
1734/**
1735 * queue_set - return the queue set a packet should use
1736 * @skb: the packet
1737 *
1738 * Maps a packet to the SGE queue set it should use. The desired queue
1739 * set is carried in bits 1-3 in the packet's priority.
1740 */
1741static inline int queue_set(const struct sk_buff *skb)
1742{
1743 return skb->priority >> 1;
1744}
1745
1746/**
1747 * is_ctrl_pkt - return whether an offload packet is a control packet
1748 * @skb: the packet
1749 *
1750 * Determines whether an offload packet should use an OFLD or a CTRL
1751 * Tx queue. This is indicated by bit 0 in the packet's priority.
1752 */
1753static inline int is_ctrl_pkt(const struct sk_buff *skb)
1754{
1755 return skb->priority & 1;
1756}
1757
1758/**
1759 * t3_offload_tx - send an offload packet
1760 * @tdev: the offload device to send to
1761 * @skb: the packet
1762 *
1763 * Sends an offload packet. We use the packet priority to select the
1764 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1765 * should be sent as regular or control, bits 1-3 select the queue set.
1766 */
1767int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1768{
1769 struct adapter *adap = tdev2adap(tdev);
1770 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1771
1772 if (unlikely(is_ctrl_pkt(skb)))
1773 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1774
1775 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1776}
1777
1778/**
1779 * offload_enqueue - add an offload packet to an SGE offload receive queue
1780 * @q: the SGE response queue
1781 * @skb: the packet
1782 *
1783 * Add a new offload packet to an SGE response queue's offload packet
1784 * queue. If the packet is the first on the queue it schedules the RX
1785 * softirq to process the queue.
1786 */
1787static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1788{
1789 int was_empty = skb_queue_empty(&q->rx_queue);
1790
1791 __skb_queue_tail(&q->rx_queue, skb);
1792
1793 if (was_empty) {
1794 struct sge_qset *qs = rspq_to_qset(q);
1795
1796 napi_schedule(&qs->napi);
1797 }
1798}
1799
1800/**
1801 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1802 * @tdev: the offload device that will be receiving the packets
1803 * @q: the SGE response queue that assembled the bundle
1804 * @skbs: the partial bundle
1805 * @n: the number of packets in the bundle
1806 *
1807 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1808 */
1809static inline void deliver_partial_bundle(struct t3cdev *tdev,
1810 struct sge_rspq *q,
1811 struct sk_buff *skbs[], int n)
1812{
1813 if (n) {
1814 q->offload_bundles++;
1815 tdev->recv(tdev, skbs, n);
1816 }
1817}
1818
1819/**
1820 * ofld_poll - NAPI handler for offload packets in interrupt mode
1821 * @dev: the network device doing the polling
1822 * @budget: polling budget
1823 *
1824 * The NAPI handler for offload packets when a response queue is serviced
1825 * by the hard interrupt handler, i.e., when it's operating in non-polling
1826 * mode. Creates small packet batches and sends them through the offload
1827 * receive handler. Batches need to be of modest size as we do prefetches
1828 * on the packets in each.
1829 */
1830static int ofld_poll(struct napi_struct *napi, int budget)
1831{
1832 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1833 struct sge_rspq *q = &qs->rspq;
1834 struct adapter *adapter = qs->adap;
1835 int work_done = 0;
1836
1837 while (work_done < budget) {
1838 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1839 struct sk_buff_head queue;
1840 int ngathered;
1841
1842 spin_lock_irq(&q->lock);
1843 __skb_queue_head_init(&queue);
1844 skb_queue_splice_init(&q->rx_queue, &queue);
1845 if (skb_queue_empty(&queue)) {
1846 napi_complete(napi);
1847 spin_unlock_irq(&q->lock);
1848 return work_done;
1849 }
1850 spin_unlock_irq(&q->lock);
1851
1852 ngathered = 0;
1853 skb_queue_walk_safe(&queue, skb, tmp) {
1854 if (work_done >= budget)
1855 break;
1856 work_done++;
1857
1858 __skb_unlink(skb, &queue);
1859 prefetch(skb->data);
1860 skbs[ngathered] = skb;
1861 if (++ngathered == RX_BUNDLE_SIZE) {
1862 q->offload_bundles++;
1863 adapter->tdev.recv(&adapter->tdev, skbs,
1864 ngathered);
1865 ngathered = 0;
1866 }
1867 }
1868 if (!skb_queue_empty(&queue)) {
1869 /* splice remaining packets back onto Rx queue */
1870 spin_lock_irq(&q->lock);
1871 skb_queue_splice(&queue, &q->rx_queue);
1872 spin_unlock_irq(&q->lock);
1873 }
1874 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1875 }
1876
1877 return work_done;
1878}
1879
1880/**
1881 * rx_offload - process a received offload packet
1882 * @tdev: the offload device receiving the packet
1883 * @rq: the response queue that received the packet
1884 * @skb: the packet
1885 * @rx_gather: a gather list of packets if we are building a bundle
1886 * @gather_idx: index of the next available slot in the bundle
1887 *
1888 * Process an ingress offload pakcet and add it to the offload ingress
1889 * queue. Returns the index of the next available slot in the bundle.
1890 */
1891static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1892 struct sk_buff *skb, struct sk_buff *rx_gather[],
1893 unsigned int gather_idx)
1894{
1895 skb_reset_mac_header(skb);
1896 skb_reset_network_header(skb);
1897 skb_reset_transport_header(skb);
1898
1899 if (rq->polling) {
1900 rx_gather[gather_idx++] = skb;
1901 if (gather_idx == RX_BUNDLE_SIZE) {
1902 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1903 gather_idx = 0;
1904 rq->offload_bundles++;
1905 }
1906 } else
1907 offload_enqueue(rq, skb);
1908
1909 return gather_idx;
1910}
1911
1912/**
1913 * restart_tx - check whether to restart suspended Tx queues
1914 * @qs: the queue set to resume
1915 *
1916 * Restarts suspended Tx queues of an SGE queue set if they have enough
1917 * free resources to resume operation.
1918 */
1919static void restart_tx(struct sge_qset *qs)
1920{
1921 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1922 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1923 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1924 qs->txq[TXQ_ETH].restarts++;
1925 if (netif_running(qs->netdev))
1926 netif_tx_wake_queue(qs->tx_q);
1927 }
1928
1929 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1930 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1931 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1932 qs->txq[TXQ_OFLD].restarts++;
1933 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1934 }
1935 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1936 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1937 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1938 qs->txq[TXQ_CTRL].restarts++;
1939 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1940 }
1941}
1942
1943/**
1944 * cxgb3_arp_process - process an ARP request probing a private IP address
1945 * @adapter: the adapter
1946 * @skb: the skbuff containing the ARP request
1947 *
1948 * Check if the ARP request is probing the private IP address
1949 * dedicated to iSCSI, generate an ARP reply if so.
1950 */
1951static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
1952{
1953 struct net_device *dev = skb->dev;
1954 struct arphdr *arp;
1955 unsigned char *arp_ptr;
1956 unsigned char *sha;
1957 __be32 sip, tip;
1958
1959 if (!dev)
1960 return;
1961
1962 skb_reset_network_header(skb);
1963 arp = arp_hdr(skb);
1964
1965 if (arp->ar_op != htons(ARPOP_REQUEST))
1966 return;
1967
1968 arp_ptr = (unsigned char *)(arp + 1);
1969 sha = arp_ptr;
1970 arp_ptr += dev->addr_len;
1971 memcpy(&sip, arp_ptr, sizeof(sip));
1972 arp_ptr += sizeof(sip);
1973 arp_ptr += dev->addr_len;
1974 memcpy(&tip, arp_ptr, sizeof(tip));
1975
1976 if (tip != pi->iscsi_ipv4addr)
1977 return;
1978
1979 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1980 pi->iscsic.mac_addr, sha);
1981
1982}
1983
1984static inline int is_arp(struct sk_buff *skb)
1985{
1986 return skb->protocol == htons(ETH_P_ARP);
1987}
1988
1989static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
1990 struct sk_buff *skb)
1991{
1992 if (is_arp(skb)) {
1993 cxgb3_arp_process(pi, skb);
1994 return;
1995 }
1996
1997 if (pi->iscsic.recv)
1998 pi->iscsic.recv(pi, skb);
1999
2000}
2001
2002/**
2003 * rx_eth - process an ingress ethernet packet
2004 * @adap: the adapter
2005 * @rq: the response queue that received the packet
2006 * @skb: the packet
2007 * @pad: amount of padding at the start of the buffer
2008 *
2009 * Process an ingress ethernet pakcet and deliver it to the stack.
2010 * The padding is 2 if the packet was delivered in an Rx buffer and 0
2011 * if it was immediate data in a response.
2012 */
2013static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2014 struct sk_buff *skb, int pad, int lro)
2015{
2016 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2017 struct sge_qset *qs = rspq_to_qset(rq);
2018 struct port_info *pi;
2019
2020 skb_pull(skb, sizeof(*p) + pad);
2021 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2022 pi = netdev_priv(skb->dev);
2023 if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2024 p->csum == htons(0xffff) && !p->fragment) {
2025 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2026 skb->ip_summed = CHECKSUM_UNNECESSARY;
2027 } else
2028 skb_checksum_none_assert(skb);
2029 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2030
2031 if (p->vlan_valid) {
2032 qs->port_stats[SGE_PSTAT_VLANEX]++;
2033 __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
2034 }
2035 if (rq->polling) {
2036 if (lro)
2037 napi_gro_receive(&qs->napi, skb);
2038 else {
2039 if (unlikely(pi->iscsic.flags))
2040 cxgb3_process_iscsi_prov_pack(pi, skb);
2041 netif_receive_skb(skb);
2042 }
2043 } else
2044 netif_rx(skb);
2045}
2046
2047static inline int is_eth_tcp(u32 rss)
2048{
2049 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2050}
2051
2052/**
2053 * lro_add_page - add a page chunk to an LRO session
2054 * @adap: the adapter
2055 * @qs: the associated queue set
2056 * @fl: the free list containing the page chunk to add
2057 * @len: packet length
2058 * @complete: Indicates the last fragment of a frame
2059 *
2060 * Add a received packet contained in a page chunk to an existing LRO
2061 * session.
2062 */
2063static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2064 struct sge_fl *fl, int len, int complete)
2065{
2066 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2067 struct port_info *pi = netdev_priv(qs->netdev);
2068 struct sk_buff *skb = NULL;
2069 struct cpl_rx_pkt *cpl;
2070 struct skb_frag_struct *rx_frag;
2071 int nr_frags;
2072 int offset = 0;
2073
2074 if (!qs->nomem) {
2075 skb = napi_get_frags(&qs->napi);
2076 qs->nomem = !skb;
2077 }
2078
2079 fl->credits--;
2080
2081 pci_dma_sync_single_for_cpu(adap->pdev,
2082 dma_unmap_addr(sd, dma_addr),
2083 fl->buf_size - SGE_PG_RSVD,
2084 PCI_DMA_FROMDEVICE);
2085
2086 (*sd->pg_chunk.p_cnt)--;
2087 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2088 pci_unmap_page(adap->pdev,
2089 sd->pg_chunk.mapping,
2090 fl->alloc_size,
2091 PCI_DMA_FROMDEVICE);
2092
2093 if (!skb) {
2094 put_page(sd->pg_chunk.page);
2095 if (complete)
2096 qs->nomem = 0;
2097 return;
2098 }
2099
2100 rx_frag = skb_shinfo(skb)->frags;
2101 nr_frags = skb_shinfo(skb)->nr_frags;
2102
2103 if (!nr_frags) {
2104 offset = 2 + sizeof(struct cpl_rx_pkt);
2105 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2106
2107 if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2108 cpl->csum_valid && cpl->csum == htons(0xffff)) {
2109 skb->ip_summed = CHECKSUM_UNNECESSARY;
2110 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2111 } else
2112 skb->ip_summed = CHECKSUM_NONE;
2113 } else
2114 cpl = qs->lro_va;
2115
2116 len -= offset;
2117
2118 rx_frag += nr_frags;
2119 rx_frag->page = sd->pg_chunk.page;
2120 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2121 rx_frag->size = len;
2122
2123 skb->len += len;
2124 skb->data_len += len;
2125 skb->truesize += len;
2126 skb_shinfo(skb)->nr_frags++;
2127
2128 if (!complete)
2129 return;
2130
2131 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2132
2133 if (cpl->vlan_valid)
2134 __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
2135 napi_gro_frags(&qs->napi);
2136}
2137
2138/**
2139 * handle_rsp_cntrl_info - handles control information in a response
2140 * @qs: the queue set corresponding to the response
2141 * @flags: the response control flags
2142 *
2143 * Handles the control information of an SGE response, such as GTS
2144 * indications and completion credits for the queue set's Tx queues.
2145 * HW coalesces credits, we don't do any extra SW coalescing.
2146 */
2147static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2148{
2149 unsigned int credits;
2150
2151#if USE_GTS
2152 if (flags & F_RSPD_TXQ0_GTS)
2153 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2154#endif
2155
2156 credits = G_RSPD_TXQ0_CR(flags);
2157 if (credits)
2158 qs->txq[TXQ_ETH].processed += credits;
2159
2160 credits = G_RSPD_TXQ2_CR(flags);
2161 if (credits)
2162 qs->txq[TXQ_CTRL].processed += credits;
2163
2164# if USE_GTS
2165 if (flags & F_RSPD_TXQ1_GTS)
2166 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2167# endif
2168 credits = G_RSPD_TXQ1_CR(flags);
2169 if (credits)
2170 qs->txq[TXQ_OFLD].processed += credits;
2171}
2172
2173/**
2174 * check_ring_db - check if we need to ring any doorbells
2175 * @adapter: the adapter
2176 * @qs: the queue set whose Tx queues are to be examined
2177 * @sleeping: indicates which Tx queue sent GTS
2178 *
2179 * Checks if some of a queue set's Tx queues need to ring their doorbells
2180 * to resume transmission after idling while they still have unprocessed
2181 * descriptors.
2182 */
2183static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2184 unsigned int sleeping)
2185{
2186 if (sleeping & F_RSPD_TXQ0_GTS) {
2187 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2188
2189 if (txq->cleaned + txq->in_use != txq->processed &&
2190 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2191 set_bit(TXQ_RUNNING, &txq->flags);
2192 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2193 V_EGRCNTX(txq->cntxt_id));
2194 }
2195 }
2196
2197 if (sleeping & F_RSPD_TXQ1_GTS) {
2198 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2199
2200 if (txq->cleaned + txq->in_use != txq->processed &&
2201 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2202 set_bit(TXQ_RUNNING, &txq->flags);
2203 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2204 V_EGRCNTX(txq->cntxt_id));
2205 }
2206 }
2207}
2208
2209/**
2210 * is_new_response - check if a response is newly written
2211 * @r: the response descriptor
2212 * @q: the response queue
2213 *
2214 * Returns true if a response descriptor contains a yet unprocessed
2215 * response.
2216 */
2217static inline int is_new_response(const struct rsp_desc *r,
2218 const struct sge_rspq *q)
2219{
2220 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2221}
2222
2223static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2224{
2225 q->pg_skb = NULL;
2226 q->rx_recycle_buf = 0;
2227}
2228
2229#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2230#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2231 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2232 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2233 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2234
2235/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2236#define NOMEM_INTR_DELAY 2500
2237
2238/**
2239 * process_responses - process responses from an SGE response queue
2240 * @adap: the adapter
2241 * @qs: the queue set to which the response queue belongs
2242 * @budget: how many responses can be processed in this round
2243 *
2244 * Process responses from an SGE response queue up to the supplied budget.
2245 * Responses include received packets as well as credits and other events
2246 * for the queues that belong to the response queue's queue set.
2247 * A negative budget is effectively unlimited.
2248 *
2249 * Additionally choose the interrupt holdoff time for the next interrupt
2250 * on this queue. If the system is under memory shortage use a fairly
2251 * long delay to help recovery.
2252 */
2253static int process_responses(struct adapter *adap, struct sge_qset *qs,
2254 int budget)
2255{
2256 struct sge_rspq *q = &qs->rspq;
2257 struct rsp_desc *r = &q->desc[q->cidx];
2258 int budget_left = budget;
2259 unsigned int sleeping = 0;
2260 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2261 int ngathered = 0;
2262
2263 q->next_holdoff = q->holdoff_tmr;
2264
2265 while (likely(budget_left && is_new_response(r, q))) {
2266 int packet_complete, eth, ethpad = 2;
2267 int lro = !!(qs->netdev->features & NETIF_F_GRO);
2268 struct sk_buff *skb = NULL;
2269 u32 len, flags;
2270 __be32 rss_hi, rss_lo;
2271
2272 rmb();
2273 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2274 rss_hi = *(const __be32 *)r;
2275 rss_lo = r->rss_hdr.rss_hash_val;
2276 flags = ntohl(r->flags);
2277
2278 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2279 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2280 if (!skb)
2281 goto no_mem;
2282
2283 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2284 skb->data[0] = CPL_ASYNC_NOTIF;
2285 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2286 q->async_notif++;
2287 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2288 skb = get_imm_packet(r);
2289 if (unlikely(!skb)) {
2290no_mem:
2291 q->next_holdoff = NOMEM_INTR_DELAY;
2292 q->nomem++;
2293 /* consume one credit since we tried */
2294 budget_left--;
2295 break;
2296 }
2297 q->imm_data++;
2298 ethpad = 0;
2299 } else if ((len = ntohl(r->len_cq)) != 0) {
2300 struct sge_fl *fl;
2301
2302 lro &= eth && is_eth_tcp(rss_hi);
2303
2304 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2305 if (fl->use_pages) {
2306 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2307
2308 prefetch(addr);
2309#if L1_CACHE_BYTES < 128
2310 prefetch(addr + L1_CACHE_BYTES);
2311#endif
2312 __refill_fl(adap, fl);
2313 if (lro > 0) {
2314 lro_add_page(adap, qs, fl,
2315 G_RSPD_LEN(len),
2316 flags & F_RSPD_EOP);
2317 goto next_fl;
2318 }
2319
2320 skb = get_packet_pg(adap, fl, q,
2321 G_RSPD_LEN(len),
2322 eth ?
2323 SGE_RX_DROP_THRES : 0);
2324 q->pg_skb = skb;
2325 } else
2326 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2327 eth ? SGE_RX_DROP_THRES : 0);
2328 if (unlikely(!skb)) {
2329 if (!eth)
2330 goto no_mem;
2331 q->rx_drops++;
2332 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2333 __skb_pull(skb, 2);
2334next_fl:
2335 if (++fl->cidx == fl->size)
2336 fl->cidx = 0;
2337 } else
2338 q->pure_rsps++;
2339
2340 if (flags & RSPD_CTRL_MASK) {
2341 sleeping |= flags & RSPD_GTS_MASK;
2342 handle_rsp_cntrl_info(qs, flags);
2343 }
2344
2345 r++;
2346 if (unlikely(++q->cidx == q->size)) {
2347 q->cidx = 0;
2348 q->gen ^= 1;
2349 r = q->desc;
2350 }
2351 prefetch(r);
2352
2353 if (++q->credits >= (q->size / 4)) {
2354 refill_rspq(adap, q, q->credits);
2355 q->credits = 0;
2356 }
2357
2358 packet_complete = flags &
2359 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2360 F_RSPD_ASYNC_NOTIF);
2361
2362 if (skb != NULL && packet_complete) {
2363 if (eth)
2364 rx_eth(adap, q, skb, ethpad, lro);
2365 else {
2366 q->offload_pkts++;
2367 /* Preserve the RSS info in csum & priority */
2368 skb->csum = rss_hi;
2369 skb->priority = rss_lo;
2370 ngathered = rx_offload(&adap->tdev, q, skb,
2371 offload_skbs,
2372 ngathered);
2373 }
2374
2375 if (flags & F_RSPD_EOP)
2376 clear_rspq_bufstate(q);
2377 }
2378 --budget_left;
2379 }
2380
2381 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2382
2383 if (sleeping)
2384 check_ring_db(adap, qs, sleeping);
2385
2386 smp_mb(); /* commit Tx queue .processed updates */
2387 if (unlikely(qs->txq_stopped != 0))
2388 restart_tx(qs);
2389
2390 budget -= budget_left;
2391 return budget;
2392}
2393
2394static inline int is_pure_response(const struct rsp_desc *r)
2395{
2396 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2397
2398 return (n | r->len_cq) == 0;
2399}
2400
2401/**
2402 * napi_rx_handler - the NAPI handler for Rx processing
2403 * @napi: the napi instance
2404 * @budget: how many packets we can process in this round
2405 *
2406 * Handler for new data events when using NAPI.
2407 */
2408static int napi_rx_handler(struct napi_struct *napi, int budget)
2409{
2410 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2411 struct adapter *adap = qs->adap;
2412 int work_done = process_responses(adap, qs, budget);
2413
2414 if (likely(work_done < budget)) {
2415 napi_complete(napi);
2416
2417 /*
2418 * Because we don't atomically flush the following
2419 * write it is possible that in very rare cases it can
2420 * reach the device in a way that races with a new
2421 * response being written plus an error interrupt
2422 * causing the NAPI interrupt handler below to return
2423 * unhandled status to the OS. To protect against
2424 * this would require flushing the write and doing
2425 * both the write and the flush with interrupts off.
2426 * Way too expensive and unjustifiable given the
2427 * rarity of the race.
2428 *
2429 * The race cannot happen at all with MSI-X.
2430 */
2431 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2432 V_NEWTIMER(qs->rspq.next_holdoff) |
2433 V_NEWINDEX(qs->rspq.cidx));
2434 }
2435 return work_done;
2436}
2437
2438/*
2439 * Returns true if the device is already scheduled for polling.
2440 */
2441static inline int napi_is_scheduled(struct napi_struct *napi)
2442{
2443 return test_bit(NAPI_STATE_SCHED, &napi->state);
2444}
2445
2446/**
2447 * process_pure_responses - process pure responses from a response queue
2448 * @adap: the adapter
2449 * @qs: the queue set owning the response queue
2450 * @r: the first pure response to process
2451 *
2452 * A simpler version of process_responses() that handles only pure (i.e.,
2453 * non data-carrying) responses. Such respones are too light-weight to
2454 * justify calling a softirq under NAPI, so we handle them specially in
2455 * the interrupt handler. The function is called with a pointer to a
2456 * response, which the caller must ensure is a valid pure response.
2457 *
2458 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2459 */
2460static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2461 struct rsp_desc *r)
2462{
2463 struct sge_rspq *q = &qs->rspq;
2464 unsigned int sleeping = 0;
2465
2466 do {
2467 u32 flags = ntohl(r->flags);
2468
2469 r++;
2470 if (unlikely(++q->cidx == q->size)) {
2471 q->cidx = 0;
2472 q->gen ^= 1;
2473 r = q->desc;
2474 }
2475 prefetch(r);
2476
2477 if (flags & RSPD_CTRL_MASK) {
2478 sleeping |= flags & RSPD_GTS_MASK;
2479 handle_rsp_cntrl_info(qs, flags);
2480 }
2481
2482 q->pure_rsps++;
2483 if (++q->credits >= (q->size / 4)) {
2484 refill_rspq(adap, q, q->credits);
2485 q->credits = 0;
2486 }
2487 if (!is_new_response(r, q))
2488 break;
2489 rmb();
2490 } while (is_pure_response(r));
2491
2492 if (sleeping)
2493 check_ring_db(adap, qs, sleeping);
2494
2495 smp_mb(); /* commit Tx queue .processed updates */
2496 if (unlikely(qs->txq_stopped != 0))
2497 restart_tx(qs);
2498
2499 return is_new_response(r, q);
2500}
2501
2502/**
2503 * handle_responses - decide what to do with new responses in NAPI mode
2504 * @adap: the adapter
2505 * @q: the response queue
2506 *
2507 * This is used by the NAPI interrupt handlers to decide what to do with
2508 * new SGE responses. If there are no new responses it returns -1. If
2509 * there are new responses and they are pure (i.e., non-data carrying)
2510 * it handles them straight in hard interrupt context as they are very
2511 * cheap and don't deliver any packets. Finally, if there are any data
2512 * signaling responses it schedules the NAPI handler. Returns 1 if it
2513 * schedules NAPI, 0 if all new responses were pure.
2514 *
2515 * The caller must ascertain NAPI is not already running.
2516 */
2517static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2518{
2519 struct sge_qset *qs = rspq_to_qset(q);
2520 struct rsp_desc *r = &q->desc[q->cidx];
2521
2522 if (!is_new_response(r, q))
2523 return -1;
2524 rmb();
2525 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2526 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2527 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2528 return 0;
2529 }
2530 napi_schedule(&qs->napi);
2531 return 1;
2532}
2533
2534/*
2535 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2536 * (i.e., response queue serviced in hard interrupt).
2537 */
2538static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2539{
2540 struct sge_qset *qs = cookie;
2541 struct adapter *adap = qs->adap;
2542 struct sge_rspq *q = &qs->rspq;
2543
2544 spin_lock(&q->lock);
2545 if (process_responses(adap, qs, -1) == 0)
2546 q->unhandled_irqs++;
2547 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2548 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2549 spin_unlock(&q->lock);
2550 return IRQ_HANDLED;
2551}
2552
2553/*
2554 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2555 * (i.e., response queue serviced by NAPI polling).
2556 */
2557static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2558{
2559 struct sge_qset *qs = cookie;
2560 struct sge_rspq *q = &qs->rspq;
2561
2562 spin_lock(&q->lock);
2563
2564 if (handle_responses(qs->adap, q) < 0)
2565 q->unhandled_irqs++;
2566 spin_unlock(&q->lock);
2567 return IRQ_HANDLED;
2568}
2569
2570/*
2571 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2572 * SGE response queues as well as error and other async events as they all use
2573 * the same MSI vector. We use one SGE response queue per port in this mode
2574 * and protect all response queues with queue 0's lock.
2575 */
2576static irqreturn_t t3_intr_msi(int irq, void *cookie)
2577{
2578 int new_packets = 0;
2579 struct adapter *adap = cookie;
2580 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2581
2582 spin_lock(&q->lock);
2583
2584 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2585 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2586 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2587 new_packets = 1;
2588 }
2589
2590 if (adap->params.nports == 2 &&
2591 process_responses(adap, &adap->sge.qs[1], -1)) {
2592 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2593
2594 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2595 V_NEWTIMER(q1->next_holdoff) |
2596 V_NEWINDEX(q1->cidx));
2597 new_packets = 1;
2598 }
2599
2600 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2601 q->unhandled_irqs++;
2602
2603 spin_unlock(&q->lock);
2604 return IRQ_HANDLED;
2605}
2606
2607static int rspq_check_napi(struct sge_qset *qs)
2608{
2609 struct sge_rspq *q = &qs->rspq;
2610
2611 if (!napi_is_scheduled(&qs->napi) &&
2612 is_new_response(&q->desc[q->cidx], q)) {
2613 napi_schedule(&qs->napi);
2614 return 1;
2615 }
2616 return 0;
2617}
2618
2619/*
2620 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2621 * by NAPI polling). Handles data events from SGE response queues as well as
2622 * error and other async events as they all use the same MSI vector. We use
2623 * one SGE response queue per port in this mode and protect all response
2624 * queues with queue 0's lock.
2625 */
2626static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2627{
2628 int new_packets;
2629 struct adapter *adap = cookie;
2630 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2631
2632 spin_lock(&q->lock);
2633
2634 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2635 if (adap->params.nports == 2)
2636 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2637 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2638 q->unhandled_irqs++;
2639
2640 spin_unlock(&q->lock);
2641 return IRQ_HANDLED;
2642}
2643
2644/*
2645 * A helper function that processes responses and issues GTS.
2646 */
2647static inline int process_responses_gts(struct adapter *adap,
2648 struct sge_rspq *rq)
2649{
2650 int work;
2651
2652 work = process_responses(adap, rspq_to_qset(rq), -1);
2653 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2654 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2655 return work;
2656}
2657
2658/*
2659 * The legacy INTx interrupt handler. This needs to handle data events from
2660 * SGE response queues as well as error and other async events as they all use
2661 * the same interrupt pin. We use one SGE response queue per port in this mode
2662 * and protect all response queues with queue 0's lock.
2663 */
2664static irqreturn_t t3_intr(int irq, void *cookie)
2665{
2666 int work_done, w0, w1;
2667 struct adapter *adap = cookie;
2668 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2669 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2670
2671 spin_lock(&q0->lock);
2672
2673 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2674 w1 = adap->params.nports == 2 &&
2675 is_new_response(&q1->desc[q1->cidx], q1);
2676
2677 if (likely(w0 | w1)) {
2678 t3_write_reg(adap, A_PL_CLI, 0);
2679 t3_read_reg(adap, A_PL_CLI); /* flush */
2680
2681 if (likely(w0))
2682 process_responses_gts(adap, q0);
2683
2684 if (w1)
2685 process_responses_gts(adap, q1);
2686
2687 work_done = w0 | w1;
2688 } else
2689 work_done = t3_slow_intr_handler(adap);
2690
2691 spin_unlock(&q0->lock);
2692 return IRQ_RETVAL(work_done != 0);
2693}
2694
2695/*
2696 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2697 * Handles data events from SGE response queues as well as error and other
2698 * async events as they all use the same interrupt pin. We use one SGE
2699 * response queue per port in this mode and protect all response queues with
2700 * queue 0's lock.
2701 */
2702static irqreturn_t t3b_intr(int irq, void *cookie)
2703{
2704 u32 map;
2705 struct adapter *adap = cookie;
2706 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2707
2708 t3_write_reg(adap, A_PL_CLI, 0);
2709 map = t3_read_reg(adap, A_SG_DATA_INTR);
2710
2711 if (unlikely(!map)) /* shared interrupt, most likely */
2712 return IRQ_NONE;
2713
2714 spin_lock(&q0->lock);
2715
2716 if (unlikely(map & F_ERRINTR))
2717 t3_slow_intr_handler(adap);
2718
2719 if (likely(map & 1))
2720 process_responses_gts(adap, q0);
2721
2722 if (map & 2)
2723 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2724
2725 spin_unlock(&q0->lock);
2726 return IRQ_HANDLED;
2727}
2728
2729/*
2730 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2731 * Handles data events from SGE response queues as well as error and other
2732 * async events as they all use the same interrupt pin. We use one SGE
2733 * response queue per port in this mode and protect all response queues with
2734 * queue 0's lock.
2735 */
2736static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2737{
2738 u32 map;
2739 struct adapter *adap = cookie;
2740 struct sge_qset *qs0 = &adap->sge.qs[0];
2741 struct sge_rspq *q0 = &qs0->rspq;
2742
2743 t3_write_reg(adap, A_PL_CLI, 0);
2744 map = t3_read_reg(adap, A_SG_DATA_INTR);
2745
2746 if (unlikely(!map)) /* shared interrupt, most likely */
2747 return IRQ_NONE;
2748
2749 spin_lock(&q0->lock);
2750
2751 if (unlikely(map & F_ERRINTR))
2752 t3_slow_intr_handler(adap);
2753
2754 if (likely(map & 1))
2755 napi_schedule(&qs0->napi);
2756
2757 if (map & 2)
2758 napi_schedule(&adap->sge.qs[1].napi);
2759
2760 spin_unlock(&q0->lock);
2761 return IRQ_HANDLED;
2762}
2763
2764/**
2765 * t3_intr_handler - select the top-level interrupt handler
2766 * @adap: the adapter
2767 * @polling: whether using NAPI to service response queues
2768 *
2769 * Selects the top-level interrupt handler based on the type of interrupts
2770 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2771 * response queues.
2772 */
2773irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2774{
2775 if (adap->flags & USING_MSIX)
2776 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2777 if (adap->flags & USING_MSI)
2778 return polling ? t3_intr_msi_napi : t3_intr_msi;
2779 if (adap->params.rev > 0)
2780 return polling ? t3b_intr_napi : t3b_intr;
2781 return t3_intr;
2782}
2783
2784#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2785 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2786 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2787 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2788 F_HIRCQPARITYERROR)
2789#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2790#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2791 F_RSPQDISABLED)
2792
2793/**
2794 * t3_sge_err_intr_handler - SGE async event interrupt handler
2795 * @adapter: the adapter
2796 *
2797 * Interrupt handler for SGE asynchronous (non-data) events.
2798 */
2799void t3_sge_err_intr_handler(struct adapter *adapter)
2800{
2801 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2802 ~F_FLEMPTY;
2803
2804 if (status & SGE_PARERR)
2805 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2806 status & SGE_PARERR);
2807 if (status & SGE_FRAMINGERR)
2808 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2809 status & SGE_FRAMINGERR);
2810
2811 if (status & F_RSPQCREDITOVERFOW)
2812 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2813
2814 if (status & F_RSPQDISABLED) {
2815 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2816
2817 CH_ALERT(adapter,
2818 "packet delivered to disabled response queue "
2819 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2820 }
2821
2822 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2823 queue_work(cxgb3_wq, &adapter->db_drop_task);
2824
2825 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2826 queue_work(cxgb3_wq, &adapter->db_full_task);
2827
2828 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2829 queue_work(cxgb3_wq, &adapter->db_empty_task);
2830
2831 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2832 if (status & SGE_FATALERR)
2833 t3_fatal_err(adapter);
2834}
2835
2836/**
2837 * sge_timer_tx - perform periodic maintenance of an SGE qset
2838 * @data: the SGE queue set to maintain
2839 *
2840 * Runs periodically from a timer to perform maintenance of an SGE queue
2841 * set. It performs two tasks:
2842 *
2843 * Cleans up any completed Tx descriptors that may still be pending.
2844 * Normal descriptor cleanup happens when new packets are added to a Tx
2845 * queue so this timer is relatively infrequent and does any cleanup only
2846 * if the Tx queue has not seen any new packets in a while. We make a
2847 * best effort attempt to reclaim descriptors, in that we don't wait
2848 * around if we cannot get a queue's lock (which most likely is because
2849 * someone else is queueing new packets and so will also handle the clean
2850 * up). Since control queues use immediate data exclusively we don't
2851 * bother cleaning them up here.
2852 *
2853 */
2854static void sge_timer_tx(unsigned long data)
2855{
2856 struct sge_qset *qs = (struct sge_qset *)data;
2857 struct port_info *pi = netdev_priv(qs->netdev);
2858 struct adapter *adap = pi->adapter;
2859 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2860 unsigned long next_period;
2861
2862 if (__netif_tx_trylock(qs->tx_q)) {
2863 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2864 TX_RECLAIM_TIMER_CHUNK);
2865 __netif_tx_unlock(qs->tx_q);
2866 }
2867
2868 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2869 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2870 TX_RECLAIM_TIMER_CHUNK);
2871 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2872 }
2873
2874 next_period = TX_RECLAIM_PERIOD >>
2875 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2876 TX_RECLAIM_TIMER_CHUNK);
2877 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2878}
2879
2880/*
2881 * sge_timer_rx - perform periodic maintenance of an SGE qset
2882 * @data: the SGE queue set to maintain
2883 *
2884 * a) Replenishes Rx queues that have run out due to memory shortage.
2885 * Normally new Rx buffers are added when existing ones are consumed but
2886 * when out of memory a queue can become empty. We try to add only a few
2887 * buffers here, the queue will be replenished fully as these new buffers
2888 * are used up if memory shortage has subsided.
2889 *
2890 * b) Return coalesced response queue credits in case a response queue is
2891 * starved.
2892 *
2893 */
2894static void sge_timer_rx(unsigned long data)
2895{
2896 spinlock_t *lock;
2897 struct sge_qset *qs = (struct sge_qset *)data;
2898 struct port_info *pi = netdev_priv(qs->netdev);
2899 struct adapter *adap = pi->adapter;
2900 u32 status;
2901
2902 lock = adap->params.rev > 0 ?
2903 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2904
2905 if (!spin_trylock_irq(lock))
2906 goto out;
2907
2908 if (napi_is_scheduled(&qs->napi))
2909 goto unlock;
2910
2911 if (adap->params.rev < 4) {
2912 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2913
2914 if (status & (1 << qs->rspq.cntxt_id)) {
2915 qs->rspq.starved++;
2916 if (qs->rspq.credits) {
2917 qs->rspq.credits--;
2918 refill_rspq(adap, &qs->rspq, 1);
2919 qs->rspq.restarted++;
2920 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2921 1 << qs->rspq.cntxt_id);
2922 }
2923 }
2924 }
2925
2926 if (qs->fl[0].credits < qs->fl[0].size)
2927 __refill_fl(adap, &qs->fl[0]);
2928 if (qs->fl[1].credits < qs->fl[1].size)
2929 __refill_fl(adap, &qs->fl[1]);
2930
2931unlock:
2932 spin_unlock_irq(lock);
2933out:
2934 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
2935}
2936
2937/**
2938 * t3_update_qset_coalesce - update coalescing settings for a queue set
2939 * @qs: the SGE queue set
2940 * @p: new queue set parameters
2941 *
2942 * Update the coalescing settings for an SGE queue set. Nothing is done
2943 * if the queue set is not initialized yet.
2944 */
2945void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2946{
2947 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2948 qs->rspq.polling = p->polling;
2949 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
2950}
2951
2952/**
2953 * t3_sge_alloc_qset - initialize an SGE queue set
2954 * @adapter: the adapter
2955 * @id: the queue set id
2956 * @nports: how many Ethernet ports will be using this queue set
2957 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2958 * @p: configuration parameters for this queue set
2959 * @ntxq: number of Tx queues for the queue set
2960 * @netdev: net device associated with this queue set
2961 * @netdevq: net device TX queue associated with this queue set
2962 *
2963 * Allocate resources and initialize an SGE queue set. A queue set
2964 * comprises a response queue, two Rx free-buffer queues, and up to 3
2965 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2966 * queue, offload queue, and control queue.
2967 */
2968int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2969 int irq_vec_idx, const struct qset_params *p,
2970 int ntxq, struct net_device *dev,
2971 struct netdev_queue *netdevq)
2972{
2973 int i, avail, ret = -ENOMEM;
2974 struct sge_qset *q = &adapter->sge.qs[id];
2975
2976 init_qset_cntxt(q, id);
2977 setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
2978 setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
2979
2980 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2981 sizeof(struct rx_desc),
2982 sizeof(struct rx_sw_desc),
2983 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2984 if (!q->fl[0].desc)
2985 goto err;
2986
2987 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2988 sizeof(struct rx_desc),
2989 sizeof(struct rx_sw_desc),
2990 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2991 if (!q->fl[1].desc)
2992 goto err;
2993
2994 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2995 sizeof(struct rsp_desc), 0,
2996 &q->rspq.phys_addr, NULL);
2997 if (!q->rspq.desc)
2998 goto err;
2999
3000 for (i = 0; i < ntxq; ++i) {
3001 /*
3002 * The control queue always uses immediate data so does not
3003 * need to keep track of any sk_buffs.
3004 */
3005 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3006
3007 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3008 sizeof(struct tx_desc), sz,
3009 &q->txq[i].phys_addr,
3010 &q->txq[i].sdesc);
3011 if (!q->txq[i].desc)
3012 goto err;
3013
3014 q->txq[i].gen = 1;
3015 q->txq[i].size = p->txq_size[i];
3016 spin_lock_init(&q->txq[i].lock);
3017 skb_queue_head_init(&q->txq[i].sendq);
3018 }
3019
3020 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
3021 (unsigned long)q);
3022 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
3023 (unsigned long)q);
3024
3025 q->fl[0].gen = q->fl[1].gen = 1;
3026 q->fl[0].size = p->fl_size;
3027 q->fl[1].size = p->jumbo_size;
3028
3029 q->rspq.gen = 1;
3030 q->rspq.size = p->rspq_size;
3031 spin_lock_init(&q->rspq.lock);
3032 skb_queue_head_init(&q->rspq.rx_queue);
3033
3034 q->txq[TXQ_ETH].stop_thres = nports *
3035 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3036
3037#if FL0_PG_CHUNK_SIZE > 0
3038 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3039#else
3040 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3041#endif
3042#if FL1_PG_CHUNK_SIZE > 0
3043 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3044#else
3045 q->fl[1].buf_size = is_offload(adapter) ?
3046 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3047 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3048#endif
3049
3050 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3051 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3052 q->fl[0].order = FL0_PG_ORDER;
3053 q->fl[1].order = FL1_PG_ORDER;
3054 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3055 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3056
3057 spin_lock_irq(&adapter->sge.reg_lock);
3058
3059 /* FL threshold comparison uses < */
3060 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3061 q->rspq.phys_addr, q->rspq.size,
3062 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3063 if (ret)
3064 goto err_unlock;
3065
3066 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3067 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3068 q->fl[i].phys_addr, q->fl[i].size,
3069 q->fl[i].buf_size - SGE_PG_RSVD,
3070 p->cong_thres, 1, 0);
3071 if (ret)
3072 goto err_unlock;
3073 }
3074
3075 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3076 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3077 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3078 1, 0);
3079 if (ret)
3080 goto err_unlock;
3081
3082 if (ntxq > 1) {
3083 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3084 USE_GTS, SGE_CNTXT_OFLD, id,
3085 q->txq[TXQ_OFLD].phys_addr,
3086 q->txq[TXQ_OFLD].size, 0, 1, 0);
3087 if (ret)
3088 goto err_unlock;
3089 }
3090
3091 if (ntxq > 2) {
3092 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3093 SGE_CNTXT_CTRL, id,
3094 q->txq[TXQ_CTRL].phys_addr,
3095 q->txq[TXQ_CTRL].size,
3096 q->txq[TXQ_CTRL].token, 1, 0);
3097 if (ret)
3098 goto err_unlock;
3099 }
3100
3101 spin_unlock_irq(&adapter->sge.reg_lock);
3102
3103 q->adap = adapter;
3104 q->netdev = dev;
3105 q->tx_q = netdevq;
3106 t3_update_qset_coalesce(q, p);
3107
3108 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3109 GFP_KERNEL | __GFP_COMP);
3110 if (!avail) {
3111 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3112 goto err;
3113 }
3114 if (avail < q->fl[0].size)
3115 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3116 avail);
3117
3118 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3119 GFP_KERNEL | __GFP_COMP);
3120 if (avail < q->fl[1].size)
3121 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3122 avail);
3123 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3124
3125 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3126 V_NEWTIMER(q->rspq.holdoff_tmr));
3127
3128 return 0;
3129
3130err_unlock:
3131 spin_unlock_irq(&adapter->sge.reg_lock);
3132err:
3133 t3_free_qset(adapter, q);
3134 return ret;
3135}
3136
3137/**
3138 * t3_start_sge_timers - start SGE timer call backs
3139 * @adap: the adapter
3140 *
3141 * Starts each SGE queue set's timer call back
3142 */
3143void t3_start_sge_timers(struct adapter *adap)
3144{
3145 int i;
3146
3147 for (i = 0; i < SGE_QSETS; ++i) {
3148 struct sge_qset *q = &adap->sge.qs[i];
3149
3150 if (q->tx_reclaim_timer.function)
3151 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3152
3153 if (q->rx_reclaim_timer.function)
3154 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3155 }
3156}
3157
3158/**
3159 * t3_stop_sge_timers - stop SGE timer call backs
3160 * @adap: the adapter
3161 *
3162 * Stops each SGE queue set's timer call back
3163 */
3164void t3_stop_sge_timers(struct adapter *adap)
3165{
3166 int i;
3167
3168 for (i = 0; i < SGE_QSETS; ++i) {
3169 struct sge_qset *q = &adap->sge.qs[i];
3170
3171 if (q->tx_reclaim_timer.function)
3172 del_timer_sync(&q->tx_reclaim_timer);
3173 if (q->rx_reclaim_timer.function)
3174 del_timer_sync(&q->rx_reclaim_timer);
3175 }
3176}
3177
3178/**
3179 * t3_free_sge_resources - free SGE resources
3180 * @adap: the adapter
3181 *
3182 * Frees resources used by the SGE queue sets.
3183 */
3184void t3_free_sge_resources(struct adapter *adap)
3185{
3186 int i;
3187
3188 for (i = 0; i < SGE_QSETS; ++i)
3189 t3_free_qset(adap, &adap->sge.qs[i]);
3190}
3191
3192/**
3193 * t3_sge_start - enable SGE
3194 * @adap: the adapter
3195 *
3196 * Enables the SGE for DMAs. This is the last step in starting packet
3197 * transfers.
3198 */
3199void t3_sge_start(struct adapter *adap)
3200{
3201 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3202}
3203
3204/**
3205 * t3_sge_stop - disable SGE operation
3206 * @adap: the adapter
3207 *
3208 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3209 * from error interrupts) or from normal process context. In the latter
3210 * case it also disables any pending queue restart tasklets. Note that
3211 * if it is called in interrupt context it cannot disable the restart
3212 * tasklets as it cannot wait, however the tasklets will have no effect
3213 * since the doorbells are disabled and the driver will call this again
3214 * later from process context, at which time the tasklets will be stopped
3215 * if they are still running.
3216 */
3217void t3_sge_stop(struct adapter *adap)
3218{
3219 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3220 if (!in_interrupt()) {
3221 int i;
3222
3223 for (i = 0; i < SGE_QSETS; ++i) {
3224 struct sge_qset *qs = &adap->sge.qs[i];
3225
3226 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3227 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3228 }
3229 }
3230}
3231
3232/**
3233 * t3_sge_init - initialize SGE
3234 * @adap: the adapter
3235 * @p: the SGE parameters
3236 *
3237 * Performs SGE initialization needed every time after a chip reset.
3238 * We do not initialize any of the queue sets here, instead the driver
3239 * top-level must request those individually. We also do not enable DMA
3240 * here, that should be done after the queues have been set up.
3241 */
3242void t3_sge_init(struct adapter *adap, struct sge_params *p)
3243{
3244 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3245
3246 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3247 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3248 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3249 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3250#if SGE_NUM_GENBITS == 1
3251 ctrl |= F_EGRGENCTRL;
3252#endif
3253 if (adap->params.rev > 0) {
3254 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3255 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3256 }
3257 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3258 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3259 V_LORCQDRBTHRSH(512));
3260 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3261 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3262 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3263 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3264 adap->params.rev < T3_REV_C ? 1000 : 500);
3265 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3266 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3267 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3268 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3269 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3270}
3271
3272/**
3273 * t3_sge_prep - one-time SGE initialization
3274 * @adap: the associated adapter
3275 * @p: SGE parameters
3276 *
3277 * Performs one-time initialization of SGE SW state. Includes determining
3278 * defaults for the assorted SGE parameters, which admins can change until
3279 * they are used to initialize the SGE.
3280 */
3281void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3282{
3283 int i;
3284
3285 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3286 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3287
3288 for (i = 0; i < SGE_QSETS; ++i) {
3289 struct qset_params *q = p->qset + i;
3290
3291 q->polling = adap->params.rev > 0;
3292 q->coalesce_usecs = 5;
3293 q->rspq_size = 1024;
3294 q->fl_size = 1024;
3295 q->jumbo_size = 512;
3296 q->txq_size[TXQ_ETH] = 1024;
3297 q->txq_size[TXQ_OFLD] = 1024;
3298 q->txq_size[TXQ_CTRL] = 256;
3299 q->cong_thres = 0;
3300 }
3301
3302 spin_lock_init(&adap->sge.reg_lock);
3303}
diff --git a/drivers/net/cxgb3/sge_defs.h b/drivers/net/cxgb3/sge_defs.h
new file mode 100644
index 00000000000..29b6c800b23
--- /dev/null
+++ b/drivers/net/cxgb3/sge_defs.h
@@ -0,0 +1,255 @@
1/*
2 * This file is automatically generated --- any changes will be lost.
3 */
4
5#ifndef _SGE_DEFS_H
6#define _SGE_DEFS_H
7
8#define S_EC_CREDITS 0
9#define M_EC_CREDITS 0x7FFF
10#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
11#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
12
13#define S_EC_GTS 15
14#define V_EC_GTS(x) ((x) << S_EC_GTS)
15#define F_EC_GTS V_EC_GTS(1U)
16
17#define S_EC_INDEX 16
18#define M_EC_INDEX 0xFFFF
19#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
20#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
21
22#define S_EC_SIZE 0
23#define M_EC_SIZE 0xFFFF
24#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
25#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
26
27#define S_EC_BASE_LO 16
28#define M_EC_BASE_LO 0xFFFF
29#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
30#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
31
32#define S_EC_BASE_HI 0
33#define M_EC_BASE_HI 0xF
34#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
35#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
36
37#define S_EC_RESPQ 4
38#define M_EC_RESPQ 0x7
39#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
40#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
41
42#define S_EC_TYPE 7
43#define M_EC_TYPE 0x7
44#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
45#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
46
47#define S_EC_GEN 10
48#define V_EC_GEN(x) ((x) << S_EC_GEN)
49#define F_EC_GEN V_EC_GEN(1U)
50
51#define S_EC_UP_TOKEN 11
52#define M_EC_UP_TOKEN 0xFFFFF
53#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
54#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
55
56#define S_EC_VALID 31
57#define V_EC_VALID(x) ((x) << S_EC_VALID)
58#define F_EC_VALID V_EC_VALID(1U)
59
60#define S_RQ_MSI_VEC 20
61#define M_RQ_MSI_VEC 0x3F
62#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
63#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
64
65#define S_RQ_INTR_EN 26
66#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
67#define F_RQ_INTR_EN V_RQ_INTR_EN(1U)
68
69#define S_RQ_GEN 28
70#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
71#define F_RQ_GEN V_RQ_GEN(1U)
72
73#define S_CQ_INDEX 0
74#define M_CQ_INDEX 0xFFFF
75#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
76#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
77
78#define S_CQ_SIZE 16
79#define M_CQ_SIZE 0xFFFF
80#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
81#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
82
83#define S_CQ_BASE_HI 0
84#define M_CQ_BASE_HI 0xFFFFF
85#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
86#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
87
88#define S_CQ_RSPQ 20
89#define M_CQ_RSPQ 0x3F
90#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
91#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
92
93#define S_CQ_ASYNC_NOTIF 26
94#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
95#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U)
96
97#define S_CQ_ARMED 27
98#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
99#define F_CQ_ARMED V_CQ_ARMED(1U)
100
101#define S_CQ_ASYNC_NOTIF_SOL 28
102#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
103#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U)
104
105#define S_CQ_GEN 29
106#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
107#define F_CQ_GEN V_CQ_GEN(1U)
108
109#define S_CQ_ERR 30
110#define V_CQ_ERR(x) ((x) << S_CQ_ERR)
111#define F_CQ_ERR V_CQ_ERR(1U)
112
113#define S_CQ_OVERFLOW_MODE 31
114#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
115#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
116
117#define S_CQ_CREDITS 0
118#define M_CQ_CREDITS 0xFFFF
119#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
120#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
121
122#define S_CQ_CREDIT_THRES 16
123#define M_CQ_CREDIT_THRES 0x1FFF
124#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
125#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
126
127#define S_FL_BASE_HI 0
128#define M_FL_BASE_HI 0xFFFFF
129#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
130#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
131
132#define S_FL_INDEX_LO 20
133#define M_FL_INDEX_LO 0xFFF
134#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
135#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
136
137#define S_FL_INDEX_HI 0
138#define M_FL_INDEX_HI 0xF
139#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
140#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
141
142#define S_FL_SIZE 4
143#define M_FL_SIZE 0xFFFF
144#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
145#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
146
147#define S_FL_GEN 20
148#define V_FL_GEN(x) ((x) << S_FL_GEN)
149#define F_FL_GEN V_FL_GEN(1U)
150
151#define S_FL_ENTRY_SIZE_LO 21
152#define M_FL_ENTRY_SIZE_LO 0x7FF
153#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
154#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
155
156#define S_FL_ENTRY_SIZE_HI 0
157#define M_FL_ENTRY_SIZE_HI 0x1FFFFF
158#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
159#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
160
161#define S_FL_CONG_THRES 21
162#define M_FL_CONG_THRES 0x3FF
163#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
164#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
165
166#define S_FL_GTS 31
167#define V_FL_GTS(x) ((x) << S_FL_GTS)
168#define F_FL_GTS V_FL_GTS(1U)
169
170#define S_FLD_GEN1 31
171#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
172#define F_FLD_GEN1 V_FLD_GEN1(1U)
173
174#define S_FLD_GEN2 0
175#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
176#define F_FLD_GEN2 V_FLD_GEN2(1U)
177
178#define S_RSPD_TXQ1_CR 0
179#define M_RSPD_TXQ1_CR 0x7F
180#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
181#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
182
183#define S_RSPD_TXQ1_GTS 7
184#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
185#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U)
186
187#define S_RSPD_TXQ2_CR 8
188#define M_RSPD_TXQ2_CR 0x7F
189#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
190#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
191
192#define S_RSPD_TXQ2_GTS 15
193#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
194#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U)
195
196#define S_RSPD_TXQ0_CR 16
197#define M_RSPD_TXQ0_CR 0x7F
198#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
199#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
200
201#define S_RSPD_TXQ0_GTS 23
202#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
203#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U)
204
205#define S_RSPD_EOP 24
206#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
207#define F_RSPD_EOP V_RSPD_EOP(1U)
208
209#define S_RSPD_SOP 25
210#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
211#define F_RSPD_SOP V_RSPD_SOP(1U)
212
213#define S_RSPD_ASYNC_NOTIF 26
214#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
215#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U)
216
217#define S_RSPD_FL0_GTS 27
218#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
219#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U)
220
221#define S_RSPD_FL1_GTS 28
222#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
223#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U)
224
225#define S_RSPD_IMM_DATA_VALID 29
226#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
227#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U)
228
229#define S_RSPD_OFFLOAD 30
230#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
231#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U)
232
233#define S_RSPD_GEN1 31
234#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
235#define F_RSPD_GEN1 V_RSPD_GEN1(1U)
236
237#define S_RSPD_LEN 0
238#define M_RSPD_LEN 0x7FFFFFFF
239#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
240#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
241
242#define S_RSPD_FLQ 31
243#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
244#define F_RSPD_FLQ V_RSPD_FLQ(1U)
245
246#define S_RSPD_GEN2 0
247#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
248#define F_RSPD_GEN2 V_RSPD_GEN2(1U)
249
250#define S_RSPD_INR_VEC 1
251#define M_RSPD_INR_VEC 0x7F
252#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
253#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
254
255#endif /* _SGE_DEFS_H */
diff --git a/drivers/net/cxgb3/t3_cpl.h b/drivers/net/cxgb3/t3_cpl.h
new file mode 100644
index 00000000000..852c399a8b0
--- /dev/null
+++ b/drivers/net/cxgb3/t3_cpl.h
@@ -0,0 +1,1495 @@
1/*
2 * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef T3_CPL_H
33#define T3_CPL_H
34
35#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
36# include <asm/byteorder.h>
37#endif
38
39enum CPL_opcode {
40 CPL_PASS_OPEN_REQ = 0x1,
41 CPL_PASS_ACCEPT_RPL = 0x2,
42 CPL_ACT_OPEN_REQ = 0x3,
43 CPL_SET_TCB = 0x4,
44 CPL_SET_TCB_FIELD = 0x5,
45 CPL_GET_TCB = 0x6,
46 CPL_PCMD = 0x7,
47 CPL_CLOSE_CON_REQ = 0x8,
48 CPL_CLOSE_LISTSRV_REQ = 0x9,
49 CPL_ABORT_REQ = 0xA,
50 CPL_ABORT_RPL = 0xB,
51 CPL_TX_DATA = 0xC,
52 CPL_RX_DATA_ACK = 0xD,
53 CPL_TX_PKT = 0xE,
54 CPL_RTE_DELETE_REQ = 0xF,
55 CPL_RTE_WRITE_REQ = 0x10,
56 CPL_RTE_READ_REQ = 0x11,
57 CPL_L2T_WRITE_REQ = 0x12,
58 CPL_L2T_READ_REQ = 0x13,
59 CPL_SMT_WRITE_REQ = 0x14,
60 CPL_SMT_READ_REQ = 0x15,
61 CPL_TX_PKT_LSO = 0x16,
62 CPL_PCMD_READ = 0x17,
63 CPL_BARRIER = 0x18,
64 CPL_TID_RELEASE = 0x1A,
65
66 CPL_CLOSE_LISTSRV_RPL = 0x20,
67 CPL_ERROR = 0x21,
68 CPL_GET_TCB_RPL = 0x22,
69 CPL_L2T_WRITE_RPL = 0x23,
70 CPL_PCMD_READ_RPL = 0x24,
71 CPL_PCMD_RPL = 0x25,
72 CPL_PEER_CLOSE = 0x26,
73 CPL_RTE_DELETE_RPL = 0x27,
74 CPL_RTE_WRITE_RPL = 0x28,
75 CPL_RX_DDP_COMPLETE = 0x29,
76 CPL_RX_PHYS_ADDR = 0x2A,
77 CPL_RX_PKT = 0x2B,
78 CPL_RX_URG_NOTIFY = 0x2C,
79 CPL_SET_TCB_RPL = 0x2D,
80 CPL_SMT_WRITE_RPL = 0x2E,
81 CPL_TX_DATA_ACK = 0x2F,
82
83 CPL_ABORT_REQ_RSS = 0x30,
84 CPL_ABORT_RPL_RSS = 0x31,
85 CPL_CLOSE_CON_RPL = 0x32,
86 CPL_ISCSI_HDR = 0x33,
87 CPL_L2T_READ_RPL = 0x34,
88 CPL_RDMA_CQE = 0x35,
89 CPL_RDMA_CQE_READ_RSP = 0x36,
90 CPL_RDMA_CQE_ERR = 0x37,
91 CPL_RTE_READ_RPL = 0x38,
92 CPL_RX_DATA = 0x39,
93
94 CPL_ACT_OPEN_RPL = 0x40,
95 CPL_PASS_OPEN_RPL = 0x41,
96 CPL_RX_DATA_DDP = 0x42,
97 CPL_SMT_READ_RPL = 0x43,
98
99 CPL_ACT_ESTABLISH = 0x50,
100 CPL_PASS_ESTABLISH = 0x51,
101
102 CPL_PASS_ACCEPT_REQ = 0x70,
103
104 CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */
105
106 CPL_TX_DMA_ACK = 0xA0,
107 CPL_RDMA_READ_REQ = 0xA1,
108 CPL_RDMA_TERMINATE = 0xA2,
109 CPL_TRACE_PKT = 0xA3,
110 CPL_RDMA_EC_STATUS = 0xA5,
111
112 NUM_CPL_CMDS /* must be last and previous entries must be sorted */
113};
114
115enum CPL_error {
116 CPL_ERR_NONE = 0,
117 CPL_ERR_TCAM_PARITY = 1,
118 CPL_ERR_TCAM_FULL = 3,
119 CPL_ERR_CONN_RESET = 20,
120 CPL_ERR_CONN_EXIST = 22,
121 CPL_ERR_ARP_MISS = 23,
122 CPL_ERR_BAD_SYN = 24,
123 CPL_ERR_CONN_TIMEDOUT = 30,
124 CPL_ERR_XMIT_TIMEDOUT = 31,
125 CPL_ERR_PERSIST_TIMEDOUT = 32,
126 CPL_ERR_FINWAIT2_TIMEDOUT = 33,
127 CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
128 CPL_ERR_RTX_NEG_ADVICE = 35,
129 CPL_ERR_PERSIST_NEG_ADVICE = 36,
130 CPL_ERR_ABORT_FAILED = 42,
131 CPL_ERR_GENERAL = 99
132};
133
134enum {
135 CPL_CONN_POLICY_AUTO = 0,
136 CPL_CONN_POLICY_ASK = 1,
137 CPL_CONN_POLICY_DENY = 3
138};
139
140enum {
141 ULP_MODE_NONE = 0,
142 ULP_MODE_ISCSI = 2,
143 ULP_MODE_RDMA = 4,
144 ULP_MODE_TCPDDP = 5
145};
146
147enum {
148 ULP_CRC_HEADER = 1 << 0,
149 ULP_CRC_DATA = 1 << 1
150};
151
152enum {
153 CPL_PASS_OPEN_ACCEPT,
154 CPL_PASS_OPEN_REJECT
155};
156
157enum {
158 CPL_ABORT_SEND_RST = 0,
159 CPL_ABORT_NO_RST,
160 CPL_ABORT_POST_CLOSE_REQ = 2
161};
162
163enum { /* TX_PKT_LSO ethernet types */
164 CPL_ETH_II,
165 CPL_ETH_II_VLAN,
166 CPL_ETH_802_3,
167 CPL_ETH_802_3_VLAN
168};
169
170enum { /* TCP congestion control algorithms */
171 CONG_ALG_RENO,
172 CONG_ALG_TAHOE,
173 CONG_ALG_NEWRENO,
174 CONG_ALG_HIGHSPEED
175};
176
177enum { /* RSS hash type */
178 RSS_HASH_NONE = 0,
179 RSS_HASH_2_TUPLE = 1,
180 RSS_HASH_4_TUPLE = 2,
181 RSS_HASH_TCPV6 = 3
182};
183
184union opcode_tid {
185 __be32 opcode_tid;
186 __u8 opcode;
187};
188
189#define S_OPCODE 24
190#define V_OPCODE(x) ((x) << S_OPCODE)
191#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
192#define G_TID(x) ((x) & 0xFFFFFF)
193
194#define S_QNUM 0
195#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF)
196
197#define S_HASHTYPE 22
198#define M_HASHTYPE 0x3
199#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
200
201/* tid is assumed to be 24-bits */
202#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
203
204#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
205
206/* extract the TID from a CPL command */
207#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
208
209struct tcp_options {
210 __be16 mss;
211 __u8 wsf;
212#if defined(__LITTLE_ENDIAN_BITFIELD)
213 __u8:5;
214 __u8 ecn:1;
215 __u8 sack:1;
216 __u8 tstamp:1;
217#else
218 __u8 tstamp:1;
219 __u8 sack:1;
220 __u8 ecn:1;
221 __u8:5;
222#endif
223};
224
225struct rss_header {
226 __u8 opcode;
227#if defined(__LITTLE_ENDIAN_BITFIELD)
228 __u8 cpu_idx:6;
229 __u8 hash_type:2;
230#else
231 __u8 hash_type:2;
232 __u8 cpu_idx:6;
233#endif
234 __be16 cq_idx;
235 __be32 rss_hash_val;
236};
237
238#ifndef CHELSIO_FW
239struct work_request_hdr {
240 __be32 wr_hi;
241 __be32 wr_lo;
242};
243
244/* wr_hi fields */
245#define S_WR_SGE_CREDITS 0
246#define M_WR_SGE_CREDITS 0xFF
247#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS)
248#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS)
249
250#define S_WR_SGLSFLT 8
251#define M_WR_SGLSFLT 0xFF
252#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT)
253#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT)
254
255#define S_WR_BCNTLFLT 16
256#define M_WR_BCNTLFLT 0xF
257#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT)
258#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT)
259
260#define S_WR_DATATYPE 20
261#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE)
262#define F_WR_DATATYPE V_WR_DATATYPE(1U)
263
264#define S_WR_COMPL 21
265#define V_WR_COMPL(x) ((x) << S_WR_COMPL)
266#define F_WR_COMPL V_WR_COMPL(1U)
267
268#define S_WR_EOP 22
269#define V_WR_EOP(x) ((x) << S_WR_EOP)
270#define F_WR_EOP V_WR_EOP(1U)
271
272#define S_WR_SOP 23
273#define V_WR_SOP(x) ((x) << S_WR_SOP)
274#define F_WR_SOP V_WR_SOP(1U)
275
276#define S_WR_OP 24
277#define M_WR_OP 0xFF
278#define V_WR_OP(x) ((x) << S_WR_OP)
279#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
280
281/* wr_lo fields */
282#define S_WR_LEN 0
283#define M_WR_LEN 0xFF
284#define V_WR_LEN(x) ((x) << S_WR_LEN)
285#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN)
286
287#define S_WR_TID 8
288#define M_WR_TID 0xFFFFF
289#define V_WR_TID(x) ((x) << S_WR_TID)
290#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID)
291
292#define S_WR_CR_FLUSH 30
293#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH)
294#define F_WR_CR_FLUSH V_WR_CR_FLUSH(1U)
295
296#define S_WR_GEN 31
297#define V_WR_GEN(x) ((x) << S_WR_GEN)
298#define F_WR_GEN V_WR_GEN(1U)
299
300# define WR_HDR struct work_request_hdr wr
301# define RSS_HDR
302#else
303# define WR_HDR
304# define RSS_HDR struct rss_header rss_hdr;
305#endif
306
307/* option 0 lower-half fields */
308#define S_CPL_STATUS 0
309#define M_CPL_STATUS 0xFF
310#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS)
311#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS)
312
313#define S_INJECT_TIMER 6
314#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
315#define F_INJECT_TIMER V_INJECT_TIMER(1U)
316
317#define S_NO_OFFLOAD 7
318#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD)
319#define F_NO_OFFLOAD V_NO_OFFLOAD(1U)
320
321#define S_ULP_MODE 8
322#define M_ULP_MODE 0xF
323#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
324#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
325
326#define S_RCV_BUFSIZ 12
327#define M_RCV_BUFSIZ 0x3FFF
328#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
329#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
330
331#define S_TOS 26
332#define M_TOS 0x3F
333#define V_TOS(x) ((x) << S_TOS)
334#define G_TOS(x) (((x) >> S_TOS) & M_TOS)
335
336/* option 0 upper-half fields */
337#define S_DELACK 0
338#define V_DELACK(x) ((x) << S_DELACK)
339#define F_DELACK V_DELACK(1U)
340
341#define S_NO_CONG 1
342#define V_NO_CONG(x) ((x) << S_NO_CONG)
343#define F_NO_CONG V_NO_CONG(1U)
344
345#define S_SRC_MAC_SEL 2
346#define M_SRC_MAC_SEL 0x3
347#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL)
348#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL)
349
350#define S_L2T_IDX 4
351#define M_L2T_IDX 0x7FF
352#define V_L2T_IDX(x) ((x) << S_L2T_IDX)
353#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
354
355#define S_TX_CHANNEL 15
356#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL)
357#define F_TX_CHANNEL V_TX_CHANNEL(1U)
358
359#define S_TCAM_BYPASS 16
360#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS)
361#define F_TCAM_BYPASS V_TCAM_BYPASS(1U)
362
363#define S_NAGLE 17
364#define V_NAGLE(x) ((x) << S_NAGLE)
365#define F_NAGLE V_NAGLE(1U)
366
367#define S_WND_SCALE 18
368#define M_WND_SCALE 0xF
369#define V_WND_SCALE(x) ((x) << S_WND_SCALE)
370#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
371
372#define S_KEEP_ALIVE 22
373#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE)
374#define F_KEEP_ALIVE V_KEEP_ALIVE(1U)
375
376#define S_MAX_RETRANS 23
377#define M_MAX_RETRANS 0xF
378#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS)
379#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS)
380
381#define S_MAX_RETRANS_OVERRIDE 27
382#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE)
383#define F_MAX_RETRANS_OVERRIDE V_MAX_RETRANS_OVERRIDE(1U)
384
385#define S_MSS_IDX 28
386#define M_MSS_IDX 0xF
387#define V_MSS_IDX(x) ((x) << S_MSS_IDX)
388#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
389
390/* option 1 fields */
391#define S_RSS_ENABLE 0
392#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE)
393#define F_RSS_ENABLE V_RSS_ENABLE(1U)
394
395#define S_RSS_MASK_LEN 1
396#define M_RSS_MASK_LEN 0x7
397#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN)
398#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN)
399
400#define S_CPU_IDX 4
401#define M_CPU_IDX 0x3F
402#define V_CPU_IDX(x) ((x) << S_CPU_IDX)
403#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX)
404
405#define S_MAC_MATCH_VALID 18
406#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID)
407#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U)
408
409#define S_CONN_POLICY 19
410#define M_CONN_POLICY 0x3
411#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
412#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
413
414#define S_SYN_DEFENSE 21
415#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
416#define F_SYN_DEFENSE V_SYN_DEFENSE(1U)
417
418#define S_VLAN_PRI 22
419#define M_VLAN_PRI 0x3
420#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI)
421#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI)
422
423#define S_VLAN_PRI_VALID 24
424#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID)
425#define F_VLAN_PRI_VALID V_VLAN_PRI_VALID(1U)
426
427#define S_PKT_TYPE 25
428#define M_PKT_TYPE 0x3
429#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE)
430#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE)
431
432#define S_MAC_MATCH 27
433#define M_MAC_MATCH 0x1F
434#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH)
435#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH)
436
437/* option 2 fields */
438#define S_CPU_INDEX 0
439#define M_CPU_INDEX 0x7F
440#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX)
441#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX)
442
443#define S_CPU_INDEX_VALID 7
444#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID)
445#define F_CPU_INDEX_VALID V_CPU_INDEX_VALID(1U)
446
447#define S_RX_COALESCE 8
448#define M_RX_COALESCE 0x3
449#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
450#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
451
452#define S_RX_COALESCE_VALID 10
453#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
454#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U)
455
456#define S_CONG_CONTROL_FLAVOR 11
457#define M_CONG_CONTROL_FLAVOR 0x3
458#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR)
459#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR)
460
461#define S_PACING_FLAVOR 13
462#define M_PACING_FLAVOR 0x3
463#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR)
464#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR)
465
466#define S_FLAVORS_VALID 15
467#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID)
468#define F_FLAVORS_VALID V_FLAVORS_VALID(1U)
469
470#define S_RX_FC_DISABLE 16
471#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
472#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U)
473
474#define S_RX_FC_VALID 17
475#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
476#define F_RX_FC_VALID V_RX_FC_VALID(1U)
477
478struct cpl_pass_open_req {
479 WR_HDR;
480 union opcode_tid ot;
481 __be16 local_port;
482 __be16 peer_port;
483 __be32 local_ip;
484 __be32 peer_ip;
485 __be32 opt0h;
486 __be32 opt0l;
487 __be32 peer_netmask;
488 __be32 opt1;
489};
490
491struct cpl_pass_open_rpl {
492 RSS_HDR union opcode_tid ot;
493 __be16 local_port;
494 __be16 peer_port;
495 __be32 local_ip;
496 __be32 peer_ip;
497 __u8 resvd[7];
498 __u8 status;
499};
500
501struct cpl_pass_establish {
502 RSS_HDR union opcode_tid ot;
503 __be16 local_port;
504 __be16 peer_port;
505 __be32 local_ip;
506 __be32 peer_ip;
507 __be32 tos_tid;
508 __be16 l2t_idx;
509 __be16 tcp_opt;
510 __be32 snd_isn;
511 __be32 rcv_isn;
512};
513
514/* cpl_pass_establish.tos_tid fields */
515#define S_PASS_OPEN_TID 0
516#define M_PASS_OPEN_TID 0xFFFFFF
517#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
518#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
519
520#define S_PASS_OPEN_TOS 24
521#define M_PASS_OPEN_TOS 0xFF
522#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
523#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
524
525/* cpl_pass_establish.l2t_idx fields */
526#define S_L2T_IDX16 5
527#define M_L2T_IDX16 0x7FF
528#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16)
529#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16)
530
531/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */
532#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
533#define G_TCPOPT_SACK(x) (((x) >> 6) & 1)
534#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
535#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
536#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
537
538struct cpl_pass_accept_req {
539 RSS_HDR union opcode_tid ot;
540 __be16 local_port;
541 __be16 peer_port;
542 __be32 local_ip;
543 __be32 peer_ip;
544 __be32 tos_tid;
545 struct tcp_options tcp_options;
546 __u8 dst_mac[6];
547 __be16 vlan_tag;
548 __u8 src_mac[6];
549#if defined(__LITTLE_ENDIAN_BITFIELD)
550 __u8:3;
551 __u8 addr_idx:3;
552 __u8 port_idx:1;
553 __u8 exact_match:1;
554#else
555 __u8 exact_match:1;
556 __u8 port_idx:1;
557 __u8 addr_idx:3;
558 __u8:3;
559#endif
560 __u8 rsvd;
561 __be32 rcv_isn;
562 __be32 rsvd2;
563};
564
565struct cpl_pass_accept_rpl {
566 WR_HDR;
567 union opcode_tid ot;
568 __be32 opt2;
569 __be32 rsvd;
570 __be32 peer_ip;
571 __be32 opt0h;
572 __be32 opt0l_status;
573};
574
575struct cpl_act_open_req {
576 WR_HDR;
577 union opcode_tid ot;
578 __be16 local_port;
579 __be16 peer_port;
580 __be32 local_ip;
581 __be32 peer_ip;
582 __be32 opt0h;
583 __be32 opt0l;
584 __be32 params;
585 __be32 opt2;
586};
587
588/* cpl_act_open_req.params fields */
589#define S_AOPEN_VLAN_PRI 9
590#define M_AOPEN_VLAN_PRI 0x3
591#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI)
592#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI)
593
594#define S_AOPEN_VLAN_PRI_VALID 11
595#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID)
596#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U)
597
598#define S_AOPEN_PKT_TYPE 12
599#define M_AOPEN_PKT_TYPE 0x3
600#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE)
601#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE)
602
603#define S_AOPEN_MAC_MATCH 14
604#define M_AOPEN_MAC_MATCH 0x1F
605#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH)
606#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH)
607
608#define S_AOPEN_MAC_MATCH_VALID 19
609#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID)
610#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U)
611
612#define S_AOPEN_IFF_VLAN 20
613#define M_AOPEN_IFF_VLAN 0xFFF
614#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN)
615#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
616
617struct cpl_act_open_rpl {
618 RSS_HDR union opcode_tid ot;
619 __be16 local_port;
620 __be16 peer_port;
621 __be32 local_ip;
622 __be32 peer_ip;
623 __be32 atid;
624 __u8 rsvd[3];
625 __u8 status;
626};
627
628struct cpl_act_establish {
629 RSS_HDR union opcode_tid ot;
630 __be16 local_port;
631 __be16 peer_port;
632 __be32 local_ip;
633 __be32 peer_ip;
634 __be32 tos_tid;
635 __be16 l2t_idx;
636 __be16 tcp_opt;
637 __be32 snd_isn;
638 __be32 rcv_isn;
639};
640
641struct cpl_get_tcb {
642 WR_HDR;
643 union opcode_tid ot;
644 __be16 cpuno;
645 __be16 rsvd;
646};
647
648struct cpl_get_tcb_rpl {
649 RSS_HDR union opcode_tid ot;
650 __u8 rsvd;
651 __u8 status;
652 __be16 len;
653};
654
655struct cpl_set_tcb {
656 WR_HDR;
657 union opcode_tid ot;
658 __u8 reply;
659 __u8 cpu_idx;
660 __be16 len;
661};
662
663/* cpl_set_tcb.reply fields */
664#define S_NO_REPLY 7
665#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
666#define F_NO_REPLY V_NO_REPLY(1U)
667
668struct cpl_set_tcb_field {
669 WR_HDR;
670 union opcode_tid ot;
671 __u8 reply;
672 __u8 cpu_idx;
673 __be16 word;
674 __be64 mask;
675 __be64 val;
676};
677
678struct cpl_set_tcb_rpl {
679 RSS_HDR union opcode_tid ot;
680 __u8 rsvd[3];
681 __u8 status;
682};
683
684struct cpl_pcmd {
685 WR_HDR;
686 union opcode_tid ot;
687 __u8 rsvd[3];
688#if defined(__LITTLE_ENDIAN_BITFIELD)
689 __u8 src:1;
690 __u8 bundle:1;
691 __u8 channel:1;
692 __u8:5;
693#else
694 __u8:5;
695 __u8 channel:1;
696 __u8 bundle:1;
697 __u8 src:1;
698#endif
699 __be32 pcmd_parm[2];
700};
701
702struct cpl_pcmd_reply {
703 RSS_HDR union opcode_tid ot;
704 __u8 status;
705 __u8 rsvd;
706 __be16 len;
707};
708
709struct cpl_close_con_req {
710 WR_HDR;
711 union opcode_tid ot;
712 __be32 rsvd;
713};
714
715struct cpl_close_con_rpl {
716 RSS_HDR union opcode_tid ot;
717 __u8 rsvd[3];
718 __u8 status;
719 __be32 snd_nxt;
720 __be32 rcv_nxt;
721};
722
723struct cpl_close_listserv_req {
724 WR_HDR;
725 union opcode_tid ot;
726 __u8 rsvd0;
727 __u8 cpu_idx;
728 __be16 rsvd1;
729};
730
731struct cpl_close_listserv_rpl {
732 RSS_HDR union opcode_tid ot;
733 __u8 rsvd[3];
734 __u8 status;
735};
736
737struct cpl_abort_req_rss {
738 RSS_HDR union opcode_tid ot;
739 __be32 rsvd0;
740 __u8 rsvd1;
741 __u8 status;
742 __u8 rsvd2[6];
743};
744
745struct cpl_abort_req {
746 WR_HDR;
747 union opcode_tid ot;
748 __be32 rsvd0;
749 __u8 rsvd1;
750 __u8 cmd;
751 __u8 rsvd2[6];
752};
753
754struct cpl_abort_rpl_rss {
755 RSS_HDR union opcode_tid ot;
756 __be32 rsvd0;
757 __u8 rsvd1;
758 __u8 status;
759 __u8 rsvd2[6];
760};
761
762struct cpl_abort_rpl {
763 WR_HDR;
764 union opcode_tid ot;
765 __be32 rsvd0;
766 __u8 rsvd1;
767 __u8 cmd;
768 __u8 rsvd2[6];
769};
770
771struct cpl_peer_close {
772 RSS_HDR union opcode_tid ot;
773 __be32 rcv_nxt;
774};
775
776struct tx_data_wr {
777 __be32 wr_hi;
778 __be32 wr_lo;
779 __be32 len;
780 __be32 flags;
781 __be32 sndseq;
782 __be32 param;
783};
784
785/* tx_data_wr.flags fields */
786#define S_TX_ACK_PAGES 21
787#define M_TX_ACK_PAGES 0x7
788#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES)
789#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES)
790
791/* tx_data_wr.param fields */
792#define S_TX_PORT 0
793#define M_TX_PORT 0x7
794#define V_TX_PORT(x) ((x) << S_TX_PORT)
795#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
796
797#define S_TX_MSS 4
798#define M_TX_MSS 0xF
799#define V_TX_MSS(x) ((x) << S_TX_MSS)
800#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
801
802#define S_TX_QOS 8
803#define M_TX_QOS 0xFF
804#define V_TX_QOS(x) ((x) << S_TX_QOS)
805#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
806
807#define S_TX_SNDBUF 16
808#define M_TX_SNDBUF 0xFFFF
809#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
810#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
811
812struct cpl_tx_data {
813 union opcode_tid ot;
814 __be32 len;
815 __be32 rsvd;
816 __be16 urg;
817 __be16 flags;
818};
819
820/* cpl_tx_data.flags fields */
821#define S_TX_ULP_SUBMODE 6
822#define M_TX_ULP_SUBMODE 0xF
823#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
824#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
825
826#define S_TX_ULP_MODE 10
827#define M_TX_ULP_MODE 0xF
828#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
829#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
830
831#define S_TX_SHOVE 14
832#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
833#define F_TX_SHOVE V_TX_SHOVE(1U)
834
835#define S_TX_MORE 15
836#define V_TX_MORE(x) ((x) << S_TX_MORE)
837#define F_TX_MORE V_TX_MORE(1U)
838
839/* additional tx_data_wr.flags fields */
840#define S_TX_CPU_IDX 0
841#define M_TX_CPU_IDX 0x3F
842#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
843#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
844
845#define S_TX_URG 16
846#define V_TX_URG(x) ((x) << S_TX_URG)
847#define F_TX_URG V_TX_URG(1U)
848
849#define S_TX_CLOSE 17
850#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
851#define F_TX_CLOSE V_TX_CLOSE(1U)
852
853#define S_TX_INIT 18
854#define V_TX_INIT(x) ((x) << S_TX_INIT)
855#define F_TX_INIT V_TX_INIT(1U)
856
857#define S_TX_IMM_ACK 19
858#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
859#define F_TX_IMM_ACK V_TX_IMM_ACK(1U)
860
861#define S_TX_IMM_DMA 20
862#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
863#define F_TX_IMM_DMA V_TX_IMM_DMA(1U)
864
865struct cpl_tx_data_ack {
866 RSS_HDR union opcode_tid ot;
867 __be32 ack_seq;
868};
869
870struct cpl_wr_ack {
871 RSS_HDR union opcode_tid ot;
872 __be16 credits;
873 __be16 rsvd;
874 __be32 snd_nxt;
875 __be32 snd_una;
876};
877
878struct cpl_rdma_ec_status {
879 RSS_HDR union opcode_tid ot;
880 __u8 rsvd[3];
881 __u8 status;
882};
883
884struct mngt_pktsched_wr {
885 __be32 wr_hi;
886 __be32 wr_lo;
887 __u8 mngt_opcode;
888 __u8 rsvd[7];
889 __u8 sched;
890 __u8 idx;
891 __u8 min;
892 __u8 max;
893 __u8 binding;
894 __u8 rsvd1[3];
895};
896
897struct cpl_iscsi_hdr {
898 RSS_HDR union opcode_tid ot;
899 __be16 pdu_len_ddp;
900 __be16 len;
901 __be32 seq;
902 __be16 urg;
903 __u8 rsvd;
904 __u8 status;
905};
906
907/* cpl_iscsi_hdr.pdu_len_ddp fields */
908#define S_ISCSI_PDU_LEN 0
909#define M_ISCSI_PDU_LEN 0x7FFF
910#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
911#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
912
913#define S_ISCSI_DDP 15
914#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
915#define F_ISCSI_DDP V_ISCSI_DDP(1U)
916
917struct cpl_rx_data {
918 RSS_HDR union opcode_tid ot;
919 __be16 rsvd;
920 __be16 len;
921 __be32 seq;
922 __be16 urg;
923#if defined(__LITTLE_ENDIAN_BITFIELD)
924 __u8 dack_mode:2;
925 __u8 psh:1;
926 __u8 heartbeat:1;
927 __u8:4;
928#else
929 __u8:4;
930 __u8 heartbeat:1;
931 __u8 psh:1;
932 __u8 dack_mode:2;
933#endif
934 __u8 status;
935};
936
937struct cpl_rx_data_ack {
938 WR_HDR;
939 union opcode_tid ot;
940 __be32 credit_dack;
941};
942
943/* cpl_rx_data_ack.ack_seq fields */
944#define S_RX_CREDITS 0
945#define M_RX_CREDITS 0x7FFFFFF
946#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
947#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
948
949#define S_RX_MODULATE 27
950#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE)
951#define F_RX_MODULATE V_RX_MODULATE(1U)
952
953#define S_RX_FORCE_ACK 28
954#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
955#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U)
956
957#define S_RX_DACK_MODE 29
958#define M_RX_DACK_MODE 0x3
959#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
960#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
961
962#define S_RX_DACK_CHANGE 31
963#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
964#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
965
966struct cpl_rx_urg_notify {
967 RSS_HDR union opcode_tid ot;
968 __be32 seq;
969};
970
971struct cpl_rx_ddp_complete {
972 RSS_HDR union opcode_tid ot;
973 __be32 ddp_report;
974};
975
976struct cpl_rx_data_ddp {
977 RSS_HDR union opcode_tid ot;
978 __be16 urg;
979 __be16 len;
980 __be32 seq;
981 union {
982 __be32 nxt_seq;
983 __be32 ddp_report;
984 };
985 __be32 ulp_crc;
986 __be32 ddpvld_status;
987};
988
989/* cpl_rx_data_ddp.ddpvld_status fields */
990#define S_DDP_STATUS 0
991#define M_DDP_STATUS 0xFF
992#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS)
993#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS)
994
995#define S_DDP_VALID 15
996#define M_DDP_VALID 0x1FFFF
997#define V_DDP_VALID(x) ((x) << S_DDP_VALID)
998#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
999
1000#define S_DDP_PPOD_MISMATCH 15
1001#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
1002#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U)
1003
1004#define S_DDP_PDU 16
1005#define V_DDP_PDU(x) ((x) << S_DDP_PDU)
1006#define F_DDP_PDU V_DDP_PDU(1U)
1007
1008#define S_DDP_LLIMIT_ERR 17
1009#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
1010#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U)
1011
1012#define S_DDP_PPOD_PARITY_ERR 18
1013#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
1014#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U)
1015
1016#define S_DDP_PADDING_ERR 19
1017#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
1018#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U)
1019
1020#define S_DDP_HDRCRC_ERR 20
1021#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
1022#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U)
1023
1024#define S_DDP_DATACRC_ERR 21
1025#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
1026#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U)
1027
1028#define S_DDP_INVALID_TAG 22
1029#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
1030#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U)
1031
1032#define S_DDP_ULIMIT_ERR 23
1033#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
1034#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U)
1035
1036#define S_DDP_OFFSET_ERR 24
1037#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
1038#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U)
1039
1040#define S_DDP_COLOR_ERR 25
1041#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
1042#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U)
1043
1044#define S_DDP_TID_MISMATCH 26
1045#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
1046#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U)
1047
1048#define S_DDP_INVALID_PPOD 27
1049#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
1050#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U)
1051
1052#define S_DDP_ULP_MODE 28
1053#define M_DDP_ULP_MODE 0xF
1054#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
1055#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
1056
1057/* cpl_rx_data_ddp.ddp_report fields */
1058#define S_DDP_OFFSET 0
1059#define M_DDP_OFFSET 0x3FFFFF
1060#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
1061#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
1062
1063#define S_DDP_URG 24
1064#define V_DDP_URG(x) ((x) << S_DDP_URG)
1065#define F_DDP_URG V_DDP_URG(1U)
1066
1067#define S_DDP_PSH 25
1068#define V_DDP_PSH(x) ((x) << S_DDP_PSH)
1069#define F_DDP_PSH V_DDP_PSH(1U)
1070
1071#define S_DDP_BUF_COMPLETE 26
1072#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
1073#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U)
1074
1075#define S_DDP_BUF_TIMED_OUT 27
1076#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
1077#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U)
1078
1079#define S_DDP_BUF_IDX 28
1080#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
1081#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U)
1082
1083struct cpl_tx_pkt {
1084 WR_HDR;
1085 __be32 cntrl;
1086 __be32 len;
1087};
1088
1089struct cpl_tx_pkt_lso {
1090 WR_HDR;
1091 __be32 cntrl;
1092 __be32 len;
1093
1094 __be32 rsvd;
1095 __be32 lso_info;
1096};
1097
1098/* cpl_tx_pkt*.cntrl fields */
1099#define S_TXPKT_VLAN 0
1100#define M_TXPKT_VLAN 0xFFFF
1101#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN)
1102#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
1103
1104#define S_TXPKT_INTF 16
1105#define M_TXPKT_INTF 0xF
1106#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
1107#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
1108
1109#define S_TXPKT_IPCSUM_DIS 20
1110#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS)
1111#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1U)
1112
1113#define S_TXPKT_L4CSUM_DIS 21
1114#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS)
1115#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1U)
1116
1117#define S_TXPKT_VLAN_VLD 22
1118#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD)
1119#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1U)
1120
1121#define S_TXPKT_LOOPBACK 23
1122#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
1123#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U)
1124
1125#define S_TXPKT_OPCODE 24
1126#define M_TXPKT_OPCODE 0xFF
1127#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
1128#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
1129
1130/* cpl_tx_pkt_lso.lso_info fields */
1131#define S_LSO_MSS 0
1132#define M_LSO_MSS 0x3FFF
1133#define V_LSO_MSS(x) ((x) << S_LSO_MSS)
1134#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
1135
1136#define S_LSO_ETH_TYPE 14
1137#define M_LSO_ETH_TYPE 0x3
1138#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE)
1139#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE)
1140
1141#define S_LSO_TCPHDR_WORDS 16
1142#define M_LSO_TCPHDR_WORDS 0xF
1143#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS)
1144#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS)
1145
1146#define S_LSO_IPHDR_WORDS 20
1147#define M_LSO_IPHDR_WORDS 0xF
1148#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS)
1149#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS)
1150
1151#define S_LSO_IPV6 24
1152#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
1153#define F_LSO_IPV6 V_LSO_IPV6(1U)
1154
1155struct cpl_trace_pkt {
1156#ifdef CHELSIO_FW
1157 __u8 rss_opcode;
1158#if defined(__LITTLE_ENDIAN_BITFIELD)
1159 __u8 err:1;
1160 __u8:7;
1161#else
1162 __u8:7;
1163 __u8 err:1;
1164#endif
1165 __u8 rsvd0;
1166#if defined(__LITTLE_ENDIAN_BITFIELD)
1167 __u8 qid:4;
1168 __u8:4;
1169#else
1170 __u8:4;
1171 __u8 qid:4;
1172#endif
1173 __be32 tstamp;
1174#endif /* CHELSIO_FW */
1175
1176 __u8 opcode;
1177#if defined(__LITTLE_ENDIAN_BITFIELD)
1178 __u8 iff:4;
1179 __u8:4;
1180#else
1181 __u8:4;
1182 __u8 iff:4;
1183#endif
1184 __u8 rsvd[4];
1185 __be16 len;
1186};
1187
1188struct cpl_rx_pkt {
1189 RSS_HDR __u8 opcode;
1190#if defined(__LITTLE_ENDIAN_BITFIELD)
1191 __u8 iff:4;
1192 __u8 csum_valid:1;
1193 __u8 ipmi_pkt:1;
1194 __u8 vlan_valid:1;
1195 __u8 fragment:1;
1196#else
1197 __u8 fragment:1;
1198 __u8 vlan_valid:1;
1199 __u8 ipmi_pkt:1;
1200 __u8 csum_valid:1;
1201 __u8 iff:4;
1202#endif
1203 __be16 csum;
1204 __be16 vlan;
1205 __be16 len;
1206};
1207
1208struct cpl_l2t_write_req {
1209 WR_HDR;
1210 union opcode_tid ot;
1211 __be32 params;
1212 __u8 rsvd[2];
1213 __u8 dst_mac[6];
1214};
1215
1216/* cpl_l2t_write_req.params fields */
1217#define S_L2T_W_IDX 0
1218#define M_L2T_W_IDX 0x7FF
1219#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX)
1220#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX)
1221
1222#define S_L2T_W_VLAN 11
1223#define M_L2T_W_VLAN 0xFFF
1224#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN)
1225#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN)
1226
1227#define S_L2T_W_IFF 23
1228#define M_L2T_W_IFF 0xF
1229#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF)
1230#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF)
1231
1232#define S_L2T_W_PRIO 27
1233#define M_L2T_W_PRIO 0x7
1234#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO)
1235#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO)
1236
1237struct cpl_l2t_write_rpl {
1238 RSS_HDR union opcode_tid ot;
1239 __u8 status;
1240 __u8 rsvd[3];
1241};
1242
1243struct cpl_l2t_read_req {
1244 WR_HDR;
1245 union opcode_tid ot;
1246 __be16 rsvd;
1247 __be16 l2t_idx;
1248};
1249
1250struct cpl_l2t_read_rpl {
1251 RSS_HDR union opcode_tid ot;
1252 __be32 params;
1253 __u8 rsvd[2];
1254 __u8 dst_mac[6];
1255};
1256
1257/* cpl_l2t_read_rpl.params fields */
1258#define S_L2T_R_PRIO 0
1259#define M_L2T_R_PRIO 0x7
1260#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO)
1261#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO)
1262
1263#define S_L2T_R_VLAN 8
1264#define M_L2T_R_VLAN 0xFFF
1265#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN)
1266#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN)
1267
1268#define S_L2T_R_IFF 20
1269#define M_L2T_R_IFF 0xF
1270#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF)
1271#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF)
1272
1273#define S_L2T_STATUS 24
1274#define M_L2T_STATUS 0xFF
1275#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS)
1276#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS)
1277
1278struct cpl_smt_write_req {
1279 WR_HDR;
1280 union opcode_tid ot;
1281 __u8 rsvd0;
1282#if defined(__LITTLE_ENDIAN_BITFIELD)
1283 __u8 mtu_idx:4;
1284 __u8 iff:4;
1285#else
1286 __u8 iff:4;
1287 __u8 mtu_idx:4;
1288#endif
1289 __be16 rsvd2;
1290 __be16 rsvd3;
1291 __u8 src_mac1[6];
1292 __be16 rsvd4;
1293 __u8 src_mac0[6];
1294};
1295
1296struct cpl_smt_write_rpl {
1297 RSS_HDR union opcode_tid ot;
1298 __u8 status;
1299 __u8 rsvd[3];
1300};
1301
1302struct cpl_smt_read_req {
1303 WR_HDR;
1304 union opcode_tid ot;
1305 __u8 rsvd0;
1306#if defined(__LITTLE_ENDIAN_BITFIELD)
1307 __u8:4;
1308 __u8 iff:4;
1309#else
1310 __u8 iff:4;
1311 __u8:4;
1312#endif
1313 __be16 rsvd2;
1314};
1315
1316struct cpl_smt_read_rpl {
1317 RSS_HDR union opcode_tid ot;
1318 __u8 status;
1319#if defined(__LITTLE_ENDIAN_BITFIELD)
1320 __u8 mtu_idx:4;
1321 __u8:4;
1322#else
1323 __u8:4;
1324 __u8 mtu_idx:4;
1325#endif
1326 __be16 rsvd2;
1327 __be16 rsvd3;
1328 __u8 src_mac1[6];
1329 __be16 rsvd4;
1330 __u8 src_mac0[6];
1331};
1332
1333struct cpl_rte_delete_req {
1334 WR_HDR;
1335 union opcode_tid ot;
1336 __be32 params;
1337};
1338
1339/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */
1340#define S_RTE_REQ_LUT_IX 8
1341#define M_RTE_REQ_LUT_IX 0x7FF
1342#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
1343#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
1344
1345#define S_RTE_REQ_LUT_BASE 19
1346#define M_RTE_REQ_LUT_BASE 0x7FF
1347#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
1348#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
1349
1350#define S_RTE_READ_REQ_SELECT 31
1351#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
1352#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U)
1353
1354struct cpl_rte_delete_rpl {
1355 RSS_HDR union opcode_tid ot;
1356 __u8 status;
1357 __u8 rsvd[3];
1358};
1359
1360struct cpl_rte_write_req {
1361 WR_HDR;
1362 union opcode_tid ot;
1363#if defined(__LITTLE_ENDIAN_BITFIELD)
1364 __u8:6;
1365 __u8 write_tcam:1;
1366 __u8 write_l2t_lut:1;
1367#else
1368 __u8 write_l2t_lut:1;
1369 __u8 write_tcam:1;
1370 __u8:6;
1371#endif
1372 __u8 rsvd[3];
1373 __be32 lut_params;
1374 __be16 rsvd2;
1375 __be16 l2t_idx;
1376 __be32 netmask;
1377 __be32 faddr;
1378};
1379
1380/* cpl_rte_write_req.lut_params fields */
1381#define S_RTE_WRITE_REQ_LUT_IX 10
1382#define M_RTE_WRITE_REQ_LUT_IX 0x7FF
1383#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX)
1384#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX)
1385
1386#define S_RTE_WRITE_REQ_LUT_BASE 21
1387#define M_RTE_WRITE_REQ_LUT_BASE 0x7FF
1388#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE)
1389#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE)
1390
1391struct cpl_rte_write_rpl {
1392 RSS_HDR union opcode_tid ot;
1393 __u8 status;
1394 __u8 rsvd[3];
1395};
1396
1397struct cpl_rte_read_req {
1398 WR_HDR;
1399 union opcode_tid ot;
1400 __be32 params;
1401};
1402
1403struct cpl_rte_read_rpl {
1404 RSS_HDR union opcode_tid ot;
1405 __u8 status;
1406 __u8 rsvd0;
1407 __be16 l2t_idx;
1408#if defined(__LITTLE_ENDIAN_BITFIELD)
1409 __u8:7;
1410 __u8 select:1;
1411#else
1412 __u8 select:1;
1413 __u8:7;
1414#endif
1415 __u8 rsvd2[3];
1416 __be32 addr;
1417};
1418
1419struct cpl_tid_release {
1420 WR_HDR;
1421 union opcode_tid ot;
1422 __be32 rsvd;
1423};
1424
1425struct cpl_barrier {
1426 WR_HDR;
1427 __u8 opcode;
1428 __u8 rsvd[7];
1429};
1430
1431struct cpl_rdma_read_req {
1432 __u8 opcode;
1433 __u8 rsvd[15];
1434};
1435
1436struct cpl_rdma_terminate {
1437#ifdef CHELSIO_FW
1438 __u8 opcode;
1439 __u8 rsvd[2];
1440#if defined(__LITTLE_ENDIAN_BITFIELD)
1441 __u8 rspq:3;
1442 __u8:5;
1443#else
1444 __u8:5;
1445 __u8 rspq:3;
1446#endif
1447 __be32 tid_len;
1448#endif
1449 __be32 msn;
1450 __be32 mo;
1451 __u8 data[0];
1452};
1453
1454/* cpl_rdma_terminate.tid_len fields */
1455#define S_FLIT_CNT 0
1456#define M_FLIT_CNT 0xFF
1457#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT)
1458#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT)
1459
1460#define S_TERM_TID 8
1461#define M_TERM_TID 0xFFFFF
1462#define V_TERM_TID(x) ((x) << S_TERM_TID)
1463#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
1464
1465/* ULP_TX opcodes */
1466enum { ULP_MEM_READ = 2, ULP_MEM_WRITE = 3, ULP_TXPKT = 4 };
1467
1468#define S_ULPTX_CMD 28
1469#define M_ULPTX_CMD 0xF
1470#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD)
1471
1472#define S_ULPTX_NFLITS 0
1473#define M_ULPTX_NFLITS 0xFF
1474#define V_ULPTX_NFLITS(x) ((x) << S_ULPTX_NFLITS)
1475
1476struct ulp_mem_io {
1477 WR_HDR;
1478 __be32 cmd_lock_addr;
1479 __be32 len;
1480};
1481
1482/* ulp_mem_io.cmd_lock_addr fields */
1483#define S_ULP_MEMIO_ADDR 0
1484#define M_ULP_MEMIO_ADDR 0x7FFFFFF
1485#define V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR)
1486#define S_ULP_MEMIO_LOCK 27
1487#define V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK)
1488#define F_ULP_MEMIO_LOCK V_ULP_MEMIO_LOCK(1U)
1489
1490/* ulp_mem_io.len fields */
1491#define S_ULP_MEMIO_DATA_LEN 28
1492#define M_ULP_MEMIO_DATA_LEN 0xF
1493#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
1494
1495#endif /* T3_CPL_H */
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
new file mode 100644
index 00000000000..44ac2f40b64
--- /dev/null
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -0,0 +1,3785 @@
1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
37static void t3_port_intr_clear(struct adapter *adapter, int idx);
38
39/**
40 * t3_wait_op_done_val - wait until an operation is completed
41 * @adapter: the adapter performing the operation
42 * @reg: the register to check for completion
43 * @mask: a single-bit field within @reg that indicates completion
44 * @polarity: the value of the field when the operation is completed
45 * @attempts: number of check iterations
46 * @delay: delay in usecs between iterations
47 * @valp: where to store the value of the register at completion time
48 *
49 * Wait until an operation is completed by checking a bit in a register
50 * up to @attempts times. If @valp is not NULL the value of the register
51 * at the time it indicated completion is stored there. Returns 0 if the
52 * operation completes and -EAGAIN otherwise.
53 */
54
55int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
56 int polarity, int attempts, int delay, u32 *valp)
57{
58 while (1) {
59 u32 val = t3_read_reg(adapter, reg);
60
61 if (!!(val & mask) == polarity) {
62 if (valp)
63 *valp = val;
64 return 0;
65 }
66 if (--attempts == 0)
67 return -EAGAIN;
68 if (delay)
69 udelay(delay);
70 }
71}
72
73/**
74 * t3_write_regs - write a bunch of registers
75 * @adapter: the adapter to program
76 * @p: an array of register address/register value pairs
77 * @n: the number of address/value pairs
78 * @offset: register address offset
79 *
80 * Takes an array of register address/register value pairs and writes each
81 * value to the corresponding register. Register addresses are adjusted
82 * by the supplied offset.
83 */
84void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
85 int n, unsigned int offset)
86{
87 while (n--) {
88 t3_write_reg(adapter, p->reg_addr + offset, p->val);
89 p++;
90 }
91}
92
93/**
94 * t3_set_reg_field - set a register field to a value
95 * @adapter: the adapter to program
96 * @addr: the register address
97 * @mask: specifies the portion of the register to modify
98 * @val: the new value for the register field
99 *
100 * Sets a register field specified by the supplied mask to the
101 * given value.
102 */
103void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104 u32 val)
105{
106 u32 v = t3_read_reg(adapter, addr) & ~mask;
107
108 t3_write_reg(adapter, addr, v | val);
109 t3_read_reg(adapter, addr); /* flush */
110}
111
112/**
113 * t3_read_indirect - read indirectly addressed registers
114 * @adap: the adapter
115 * @addr_reg: register holding the indirect address
116 * @data_reg: register holding the value of the indirect register
117 * @vals: where the read register values are stored
118 * @start_idx: index of first indirect register to read
119 * @nregs: how many indirect registers to read
120 *
121 * Reads registers that are accessed indirectly through an address/data
122 * register pair.
123 */
124static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
125 unsigned int data_reg, u32 *vals,
126 unsigned int nregs, unsigned int start_idx)
127{
128 while (nregs--) {
129 t3_write_reg(adap, addr_reg, start_idx);
130 *vals++ = t3_read_reg(adap, data_reg);
131 start_idx++;
132 }
133}
134
135/**
136 * t3_mc7_bd_read - read from MC7 through backdoor accesses
137 * @mc7: identifies MC7 to read from
138 * @start: index of first 64-bit word to read
139 * @n: number of 64-bit words to read
140 * @buf: where to store the read result
141 *
142 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 * accesses.
144 */
145int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 u64 *buf)
147{
148 static const int shift[] = { 0, 0, 16, 24 };
149 static const int step[] = { 0, 32, 16, 8 };
150
151 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
152 struct adapter *adap = mc7->adapter;
153
154 if (start >= size64 || start + n > size64)
155 return -EINVAL;
156
157 start *= (8 << mc7->width);
158 while (n--) {
159 int i;
160 u64 val64 = 0;
161
162 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
163 int attempts = 10;
164 u32 val;
165
166 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
167 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
168 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
169 while ((val & F_BUSY) && attempts--)
170 val = t3_read_reg(adap,
171 mc7->offset + A_MC7_BD_OP);
172 if (val & F_BUSY)
173 return -EIO;
174
175 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
176 if (mc7->width == 0) {
177 val64 = t3_read_reg(adap,
178 mc7->offset +
179 A_MC7_BD_DATA0);
180 val64 |= (u64) val << 32;
181 } else {
182 if (mc7->width > 1)
183 val >>= shift[mc7->width];
184 val64 |= (u64) val << (step[mc7->width] * i);
185 }
186 start += 8;
187 }
188 *buf++ = val64;
189 }
190 return 0;
191}
192
193/*
194 * Initialize MI1.
195 */
196static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
197{
198 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
199 u32 val = F_PREEN | V_CLKDIV(clkdiv);
200
201 t3_write_reg(adap, A_MI1_CFG, val);
202}
203
204#define MDIO_ATTEMPTS 20
205
206/*
207 * MI1 read/write operations for clause 22 PHYs.
208 */
209static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
210 u16 reg_addr)
211{
212 struct port_info *pi = netdev_priv(dev);
213 struct adapter *adapter = pi->adapter;
214 int ret;
215 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
216
217 mutex_lock(&adapter->mdio_lock);
218 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
219 t3_write_reg(adapter, A_MI1_ADDR, addr);
220 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
221 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
222 if (!ret)
223 ret = t3_read_reg(adapter, A_MI1_DATA);
224 mutex_unlock(&adapter->mdio_lock);
225 return ret;
226}
227
228static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
229 u16 reg_addr, u16 val)
230{
231 struct port_info *pi = netdev_priv(dev);
232 struct adapter *adapter = pi->adapter;
233 int ret;
234 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
235
236 mutex_lock(&adapter->mdio_lock);
237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238 t3_write_reg(adapter, A_MI1_ADDR, addr);
239 t3_write_reg(adapter, A_MI1_DATA, val);
240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242 mutex_unlock(&adapter->mdio_lock);
243 return ret;
244}
245
246static const struct mdio_ops mi1_mdio_ops = {
247 .read = t3_mi1_read,
248 .write = t3_mi1_write,
249 .mode_support = MDIO_SUPPORTS_C22
250};
251
252/*
253 * Performs the address cycle for clause 45 PHYs.
254 * Must be called with the MDIO_LOCK held.
255 */
256static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
257 int reg_addr)
258{
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
266 MDIO_ATTEMPTS, 10);
267}
268
269/*
270 * MI1 read/write operations for indirect-addressed PHYs.
271 */
272static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
273 u16 reg_addr)
274{
275 struct port_info *pi = netdev_priv(dev);
276 struct adapter *adapter = pi->adapter;
277 int ret;
278
279 mutex_lock(&adapter->mdio_lock);
280 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
281 if (!ret) {
282 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
283 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
284 MDIO_ATTEMPTS, 10);
285 if (!ret)
286 ret = t3_read_reg(adapter, A_MI1_DATA);
287 }
288 mutex_unlock(&adapter->mdio_lock);
289 return ret;
290}
291
292static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
293 u16 reg_addr, u16 val)
294{
295 struct port_info *pi = netdev_priv(dev);
296 struct adapter *adapter = pi->adapter;
297 int ret;
298
299 mutex_lock(&adapter->mdio_lock);
300 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
301 if (!ret) {
302 t3_write_reg(adapter, A_MI1_DATA, val);
303 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
304 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
305 MDIO_ATTEMPTS, 10);
306 }
307 mutex_unlock(&adapter->mdio_lock);
308 return ret;
309}
310
311static const struct mdio_ops mi1_mdio_ext_ops = {
312 .read = mi1_ext_read,
313 .write = mi1_ext_write,
314 .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
315};
316
317/**
318 * t3_mdio_change_bits - modify the value of a PHY register
319 * @phy: the PHY to operate on
320 * @mmd: the device address
321 * @reg: the register address
322 * @clear: what part of the register value to mask off
323 * @set: what part of the register value to set
324 *
325 * Changes the value of a PHY register by applying a mask to its current
326 * value and ORing the result with a new value.
327 */
328int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
329 unsigned int set)
330{
331 int ret;
332 unsigned int val;
333
334 ret = t3_mdio_read(phy, mmd, reg, &val);
335 if (!ret) {
336 val &= ~clear;
337 ret = t3_mdio_write(phy, mmd, reg, val | set);
338 }
339 return ret;
340}
341
342/**
343 * t3_phy_reset - reset a PHY block
344 * @phy: the PHY to operate on
345 * @mmd: the device address of the PHY block to reset
346 * @wait: how long to wait for the reset to complete in 1ms increments
347 *
348 * Resets a PHY block and optionally waits for the reset to complete.
349 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
350 * for 10G PHYs.
351 */
352int t3_phy_reset(struct cphy *phy, int mmd, int wait)
353{
354 int err;
355 unsigned int ctl;
356
357 err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
358 MDIO_CTRL1_RESET);
359 if (err || !wait)
360 return err;
361
362 do {
363 err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
364 if (err)
365 return err;
366 ctl &= MDIO_CTRL1_RESET;
367 if (ctl)
368 msleep(1);
369 } while (ctl && --wait);
370
371 return ctl ? -1 : 0;
372}
373
374/**
375 * t3_phy_advertise - set the PHY advertisement registers for autoneg
376 * @phy: the PHY to operate on
377 * @advert: bitmap of capabilities the PHY should advertise
378 *
379 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
380 * requested capabilities.
381 */
382int t3_phy_advertise(struct cphy *phy, unsigned int advert)
383{
384 int err;
385 unsigned int val = 0;
386
387 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
388 if (err)
389 return err;
390
391 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
392 if (advert & ADVERTISED_1000baseT_Half)
393 val |= ADVERTISE_1000HALF;
394 if (advert & ADVERTISED_1000baseT_Full)
395 val |= ADVERTISE_1000FULL;
396
397 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
398 if (err)
399 return err;
400
401 val = 1;
402 if (advert & ADVERTISED_10baseT_Half)
403 val |= ADVERTISE_10HALF;
404 if (advert & ADVERTISED_10baseT_Full)
405 val |= ADVERTISE_10FULL;
406 if (advert & ADVERTISED_100baseT_Half)
407 val |= ADVERTISE_100HALF;
408 if (advert & ADVERTISED_100baseT_Full)
409 val |= ADVERTISE_100FULL;
410 if (advert & ADVERTISED_Pause)
411 val |= ADVERTISE_PAUSE_CAP;
412 if (advert & ADVERTISED_Asym_Pause)
413 val |= ADVERTISE_PAUSE_ASYM;
414 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
415}
416
417/**
418 * t3_phy_advertise_fiber - set fiber PHY advertisement register
419 * @phy: the PHY to operate on
420 * @advert: bitmap of capabilities the PHY should advertise
421 *
422 * Sets a fiber PHY's advertisement register to advertise the
423 * requested capabilities.
424 */
425int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
426{
427 unsigned int val = 0;
428
429 if (advert & ADVERTISED_1000baseT_Half)
430 val |= ADVERTISE_1000XHALF;
431 if (advert & ADVERTISED_1000baseT_Full)
432 val |= ADVERTISE_1000XFULL;
433 if (advert & ADVERTISED_Pause)
434 val |= ADVERTISE_1000XPAUSE;
435 if (advert & ADVERTISED_Asym_Pause)
436 val |= ADVERTISE_1000XPSE_ASYM;
437 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
438}
439
440/**
441 * t3_set_phy_speed_duplex - force PHY speed and duplex
442 * @phy: the PHY to operate on
443 * @speed: requested PHY speed
444 * @duplex: requested PHY duplex
445 *
446 * Force a 10/100/1000 PHY's speed and duplex. This also disables
447 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
448 */
449int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
450{
451 int err;
452 unsigned int ctl;
453
454 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
455 if (err)
456 return err;
457
458 if (speed >= 0) {
459 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
460 if (speed == SPEED_100)
461 ctl |= BMCR_SPEED100;
462 else if (speed == SPEED_1000)
463 ctl |= BMCR_SPEED1000;
464 }
465 if (duplex >= 0) {
466 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
467 if (duplex == DUPLEX_FULL)
468 ctl |= BMCR_FULLDPLX;
469 }
470 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
471 ctl |= BMCR_ANENABLE;
472 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
473}
474
475int t3_phy_lasi_intr_enable(struct cphy *phy)
476{
477 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
478 MDIO_PMA_LASI_LSALARM);
479}
480
481int t3_phy_lasi_intr_disable(struct cphy *phy)
482{
483 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
484}
485
486int t3_phy_lasi_intr_clear(struct cphy *phy)
487{
488 u32 val;
489
490 return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
491}
492
493int t3_phy_lasi_intr_handler(struct cphy *phy)
494{
495 unsigned int status;
496 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
497 &status);
498
499 if (err)
500 return err;
501 return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
502}
503
504static const struct adapter_info t3_adap_info[] = {
505 {1, 1, 0,
506 F_GPIO2_OEN | F_GPIO4_OEN |
507 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
508 &mi1_mdio_ops, "Chelsio PE9000"},
509 {1, 1, 0,
510 F_GPIO2_OEN | F_GPIO4_OEN |
511 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
512 &mi1_mdio_ops, "Chelsio T302"},
513 {1, 0, 0,
514 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
515 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
516 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
517 &mi1_mdio_ext_ops, "Chelsio T310"},
518 {1, 1, 0,
519 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
520 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
521 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
522 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
523 &mi1_mdio_ext_ops, "Chelsio T320"},
524 {},
525 {},
526 {1, 0, 0,
527 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
528 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
529 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
530 &mi1_mdio_ext_ops, "Chelsio T310" },
531 {1, 0, 0,
532 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
533 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
534 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
535 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
536};
537
538/*
539 * Return the adapter_info structure with a given index. Out-of-range indices
540 * return NULL.
541 */
542const struct adapter_info *t3_get_adapter_info(unsigned int id)
543{
544 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
545}
546
547struct port_type_info {
548 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
549 int phy_addr, const struct mdio_ops *ops);
550};
551
552static const struct port_type_info port_types[] = {
553 { NULL },
554 { t3_ael1002_phy_prep },
555 { t3_vsc8211_phy_prep },
556 { NULL},
557 { t3_xaui_direct_phy_prep },
558 { t3_ael2005_phy_prep },
559 { t3_qt2045_phy_prep },
560 { t3_ael1006_phy_prep },
561 { NULL },
562 { t3_aq100x_phy_prep },
563 { t3_ael2020_phy_prep },
564};
565
566#define VPD_ENTRY(name, len) \
567 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
568
569/*
570 * Partial EEPROM Vital Product Data structure. Includes only the ID and
571 * VPD-R sections.
572 */
573struct t3_vpd {
574 u8 id_tag;
575 u8 id_len[2];
576 u8 id_data[16];
577 u8 vpdr_tag;
578 u8 vpdr_len[2];
579 VPD_ENTRY(pn, 16); /* part number */
580 VPD_ENTRY(ec, 16); /* EC level */
581 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
582 VPD_ENTRY(na, 12); /* MAC address base */
583 VPD_ENTRY(cclk, 6); /* core clock */
584 VPD_ENTRY(mclk, 6); /* mem clock */
585 VPD_ENTRY(uclk, 6); /* uP clk */
586 VPD_ENTRY(mdc, 6); /* MDIO clk */
587 VPD_ENTRY(mt, 2); /* mem timing */
588 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
589 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
590 VPD_ENTRY(port0, 2); /* PHY0 complex */
591 VPD_ENTRY(port1, 2); /* PHY1 complex */
592 VPD_ENTRY(port2, 2); /* PHY2 complex */
593 VPD_ENTRY(port3, 2); /* PHY3 complex */
594 VPD_ENTRY(rv, 1); /* csum */
595 u32 pad; /* for multiple-of-4 sizing and alignment */
596};
597
598#define EEPROM_MAX_POLL 40
599#define EEPROM_STAT_ADDR 0x4000
600#define VPD_BASE 0xc00
601
602/**
603 * t3_seeprom_read - read a VPD EEPROM location
604 * @adapter: adapter to read
605 * @addr: EEPROM address
606 * @data: where to store the read data
607 *
608 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
609 * VPD ROM capability. A zero is written to the flag bit when the
610 * address is written to the control register. The hardware device will
611 * set the flag to 1 when 4 bytes have been read into the data register.
612 */
613int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
614{
615 u16 val;
616 int attempts = EEPROM_MAX_POLL;
617 u32 v;
618 unsigned int base = adapter->params.pci.vpd_cap_addr;
619
620 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
621 return -EINVAL;
622
623 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
624 do {
625 udelay(10);
626 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
627 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
628
629 if (!(val & PCI_VPD_ADDR_F)) {
630 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
631 return -EIO;
632 }
633 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
634 *data = cpu_to_le32(v);
635 return 0;
636}
637
638/**
639 * t3_seeprom_write - write a VPD EEPROM location
640 * @adapter: adapter to write
641 * @addr: EEPROM address
642 * @data: value to write
643 *
644 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
645 * VPD ROM capability.
646 */
647int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
648{
649 u16 val;
650 int attempts = EEPROM_MAX_POLL;
651 unsigned int base = adapter->params.pci.vpd_cap_addr;
652
653 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
654 return -EINVAL;
655
656 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
657 le32_to_cpu(data));
658 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
659 addr | PCI_VPD_ADDR_F);
660 do {
661 msleep(1);
662 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
663 } while ((val & PCI_VPD_ADDR_F) && --attempts);
664
665 if (val & PCI_VPD_ADDR_F) {
666 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
667 return -EIO;
668 }
669 return 0;
670}
671
672/**
673 * t3_seeprom_wp - enable/disable EEPROM write protection
674 * @adapter: the adapter
675 * @enable: 1 to enable write protection, 0 to disable it
676 *
677 * Enables or disables write protection on the serial EEPROM.
678 */
679int t3_seeprom_wp(struct adapter *adapter, int enable)
680{
681 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
682}
683
684/**
685 * get_vpd_params - read VPD parameters from VPD EEPROM
686 * @adapter: adapter to read
687 * @p: where to store the parameters
688 *
689 * Reads card parameters stored in VPD EEPROM.
690 */
691static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
692{
693 int i, addr, ret;
694 struct t3_vpd vpd;
695
696 /*
697 * Card information is normally at VPD_BASE but some early cards had
698 * it at 0.
699 */
700 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
701 if (ret)
702 return ret;
703 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
704
705 for (i = 0; i < sizeof(vpd); i += 4) {
706 ret = t3_seeprom_read(adapter, addr + i,
707 (__le32 *)((u8 *)&vpd + i));
708 if (ret)
709 return ret;
710 }
711
712 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
713 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
714 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
715 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
716 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
717 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
718
719 /* Old eeproms didn't have port information */
720 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
721 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
722 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
723 } else {
724 p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
725 p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
726 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
727 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
728 }
729
730 for (i = 0; i < 6; i++)
731 p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 +
732 hex_to_bin(vpd.na_data[2 * i + 1]);
733 return 0;
734}
735
736/* serial flash and firmware constants */
737enum {
738 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
739 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
740 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
741
742 /* flash command opcodes */
743 SF_PROG_PAGE = 2, /* program page */
744 SF_WR_DISABLE = 4, /* disable writes */
745 SF_RD_STATUS = 5, /* read status register */
746 SF_WR_ENABLE = 6, /* enable writes */
747 SF_RD_DATA_FAST = 0xb, /* read flash */
748 SF_ERASE_SECTOR = 0xd8, /* erase sector */
749
750 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
751 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
752 FW_MIN_SIZE = 8 /* at least version and csum */
753};
754
755/**
756 * sf1_read - read data from the serial flash
757 * @adapter: the adapter
758 * @byte_cnt: number of bytes to read
759 * @cont: whether another operation will be chained
760 * @valp: where to store the read data
761 *
762 * Reads up to 4 bytes of data from the serial flash. The location of
763 * the read needs to be specified prior to calling this by issuing the
764 * appropriate commands to the serial flash.
765 */
766static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
767 u32 *valp)
768{
769 int ret;
770
771 if (!byte_cnt || byte_cnt > 4)
772 return -EINVAL;
773 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
774 return -EBUSY;
775 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
776 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
777 if (!ret)
778 *valp = t3_read_reg(adapter, A_SF_DATA);
779 return ret;
780}
781
782/**
783 * sf1_write - write data to the serial flash
784 * @adapter: the adapter
785 * @byte_cnt: number of bytes to write
786 * @cont: whether another operation will be chained
787 * @val: value to write
788 *
789 * Writes up to 4 bytes of data to the serial flash. The location of
790 * the write needs to be specified prior to calling this by issuing the
791 * appropriate commands to the serial flash.
792 */
793static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
794 u32 val)
795{
796 if (!byte_cnt || byte_cnt > 4)
797 return -EINVAL;
798 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
799 return -EBUSY;
800 t3_write_reg(adapter, A_SF_DATA, val);
801 t3_write_reg(adapter, A_SF_OP,
802 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
803 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
804}
805
806/**
807 * flash_wait_op - wait for a flash operation to complete
808 * @adapter: the adapter
809 * @attempts: max number of polls of the status register
810 * @delay: delay between polls in ms
811 *
812 * Wait for a flash operation to complete by polling the status register.
813 */
814static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
815{
816 int ret;
817 u32 status;
818
819 while (1) {
820 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
821 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
822 return ret;
823 if (!(status & 1))
824 return 0;
825 if (--attempts == 0)
826 return -EAGAIN;
827 if (delay)
828 msleep(delay);
829 }
830}
831
832/**
833 * t3_read_flash - read words from serial flash
834 * @adapter: the adapter
835 * @addr: the start address for the read
836 * @nwords: how many 32-bit words to read
837 * @data: where to store the read data
838 * @byte_oriented: whether to store data as bytes or as words
839 *
840 * Read the specified number of 32-bit words from the serial flash.
841 * If @byte_oriented is set the read data is stored as a byte array
842 * (i.e., big-endian), otherwise as 32-bit words in the platform's
843 * natural endianess.
844 */
845static int t3_read_flash(struct adapter *adapter, unsigned int addr,
846 unsigned int nwords, u32 *data, int byte_oriented)
847{
848 int ret;
849
850 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
851 return -EINVAL;
852
853 addr = swab32(addr) | SF_RD_DATA_FAST;
854
855 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
856 (ret = sf1_read(adapter, 1, 1, data)) != 0)
857 return ret;
858
859 for (; nwords; nwords--, data++) {
860 ret = sf1_read(adapter, 4, nwords > 1, data);
861 if (ret)
862 return ret;
863 if (byte_oriented)
864 *data = htonl(*data);
865 }
866 return 0;
867}
868
869/**
870 * t3_write_flash - write up to a page of data to the serial flash
871 * @adapter: the adapter
872 * @addr: the start address to write
873 * @n: length of data to write
874 * @data: the data to write
875 *
876 * Writes up to a page of data (256 bytes) to the serial flash starting
877 * at the given address.
878 */
879static int t3_write_flash(struct adapter *adapter, unsigned int addr,
880 unsigned int n, const u8 *data)
881{
882 int ret;
883 u32 buf[64];
884 unsigned int i, c, left, val, offset = addr & 0xff;
885
886 if (addr + n > SF_SIZE || offset + n > 256)
887 return -EINVAL;
888
889 val = swab32(addr) | SF_PROG_PAGE;
890
891 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
892 (ret = sf1_write(adapter, 4, 1, val)) != 0)
893 return ret;
894
895 for (left = n; left; left -= c) {
896 c = min(left, 4U);
897 for (val = 0, i = 0; i < c; ++i)
898 val = (val << 8) + *data++;
899
900 ret = sf1_write(adapter, c, c != left, val);
901 if (ret)
902 return ret;
903 }
904 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
905 return ret;
906
907 /* Read the page to verify the write succeeded */
908 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
909 if (ret)
910 return ret;
911
912 if (memcmp(data - n, (u8 *) buf + offset, n))
913 return -EIO;
914 return 0;
915}
916
917/**
918 * t3_get_tp_version - read the tp sram version
919 * @adapter: the adapter
920 * @vers: where to place the version
921 *
922 * Reads the protocol sram version from sram.
923 */
924int t3_get_tp_version(struct adapter *adapter, u32 *vers)
925{
926 int ret;
927
928 /* Get version loaded in SRAM */
929 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
930 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
931 1, 1, 5, 1);
932 if (ret)
933 return ret;
934
935 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
936
937 return 0;
938}
939
940/**
941 * t3_check_tpsram_version - read the tp sram version
942 * @adapter: the adapter
943 *
944 * Reads the protocol sram version from flash.
945 */
946int t3_check_tpsram_version(struct adapter *adapter)
947{
948 int ret;
949 u32 vers;
950 unsigned int major, minor;
951
952 if (adapter->params.rev == T3_REV_A)
953 return 0;
954
955
956 ret = t3_get_tp_version(adapter, &vers);
957 if (ret)
958 return ret;
959
960 major = G_TP_VERSION_MAJOR(vers);
961 minor = G_TP_VERSION_MINOR(vers);
962
963 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
964 return 0;
965 else {
966 CH_ERR(adapter, "found wrong TP version (%u.%u), "
967 "driver compiled for version %d.%d\n", major, minor,
968 TP_VERSION_MAJOR, TP_VERSION_MINOR);
969 }
970 return -EINVAL;
971}
972
973/**
974 * t3_check_tpsram - check if provided protocol SRAM
975 * is compatible with this driver
976 * @adapter: the adapter
977 * @tp_sram: the firmware image to write
978 * @size: image size
979 *
980 * Checks if an adapter's tp sram is compatible with the driver.
981 * Returns 0 if the versions are compatible, a negative error otherwise.
982 */
983int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
984 unsigned int size)
985{
986 u32 csum;
987 unsigned int i;
988 const __be32 *p = (const __be32 *)tp_sram;
989
990 /* Verify checksum */
991 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
992 csum += ntohl(p[i]);
993 if (csum != 0xffffffff) {
994 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
995 csum);
996 return -EINVAL;
997 }
998
999 return 0;
1000}
1001
1002enum fw_version_type {
1003 FW_VERSION_N3,
1004 FW_VERSION_T3
1005};
1006
1007/**
1008 * t3_get_fw_version - read the firmware version
1009 * @adapter: the adapter
1010 * @vers: where to place the version
1011 *
1012 * Reads the FW version from flash.
1013 */
1014int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1015{
1016 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1017}
1018
1019/**
1020 * t3_check_fw_version - check if the FW is compatible with this driver
1021 * @adapter: the adapter
1022 *
1023 * Checks if an adapter's FW is compatible with the driver. Returns 0
1024 * if the versions are compatible, a negative error otherwise.
1025 */
1026int t3_check_fw_version(struct adapter *adapter)
1027{
1028 int ret;
1029 u32 vers;
1030 unsigned int type, major, minor;
1031
1032 ret = t3_get_fw_version(adapter, &vers);
1033 if (ret)
1034 return ret;
1035
1036 type = G_FW_VERSION_TYPE(vers);
1037 major = G_FW_VERSION_MAJOR(vers);
1038 minor = G_FW_VERSION_MINOR(vers);
1039
1040 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1041 minor == FW_VERSION_MINOR)
1042 return 0;
1043 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1044 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1045 "driver compiled for version %u.%u\n", major, minor,
1046 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1047 else {
1048 CH_WARN(adapter, "found newer FW version(%u.%u), "
1049 "driver compiled for version %u.%u\n", major, minor,
1050 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1051 return 0;
1052 }
1053 return -EINVAL;
1054}
1055
1056/**
1057 * t3_flash_erase_sectors - erase a range of flash sectors
1058 * @adapter: the adapter
1059 * @start: the first sector to erase
1060 * @end: the last sector to erase
1061 *
1062 * Erases the sectors in the given range.
1063 */
1064static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1065{
1066 while (start <= end) {
1067 int ret;
1068
1069 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1070 (ret = sf1_write(adapter, 4, 0,
1071 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1072 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1073 return ret;
1074 start++;
1075 }
1076 return 0;
1077}
1078
1079/*
1080 * t3_load_fw - download firmware
1081 * @adapter: the adapter
1082 * @fw_data: the firmware image to write
1083 * @size: image size
1084 *
1085 * Write the supplied firmware image to the card's serial flash.
1086 * The FW image has the following sections: @size - 8 bytes of code and
1087 * data, followed by 4 bytes of FW version, followed by the 32-bit
1088 * 1's complement checksum of the whole image.
1089 */
1090int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1091{
1092 u32 csum;
1093 unsigned int i;
1094 const __be32 *p = (const __be32 *)fw_data;
1095 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1096
1097 if ((size & 3) || size < FW_MIN_SIZE)
1098 return -EINVAL;
1099 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1100 return -EFBIG;
1101
1102 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1103 csum += ntohl(p[i]);
1104 if (csum != 0xffffffff) {
1105 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1106 csum);
1107 return -EINVAL;
1108 }
1109
1110 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1111 if (ret)
1112 goto out;
1113
1114 size -= 8; /* trim off version and checksum */
1115 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1116 unsigned int chunk_size = min(size, 256U);
1117
1118 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1119 if (ret)
1120 goto out;
1121
1122 addr += chunk_size;
1123 fw_data += chunk_size;
1124 size -= chunk_size;
1125 }
1126
1127 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1128out:
1129 if (ret)
1130 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1131 return ret;
1132}
1133
1134#define CIM_CTL_BASE 0x2000
1135
1136/**
1137 * t3_cim_ctl_blk_read - read a block from CIM control region
1138 *
1139 * @adap: the adapter
1140 * @addr: the start address within the CIM control region
1141 * @n: number of words to read
1142 * @valp: where to store the result
1143 *
1144 * Reads a block of 4-byte words from the CIM control region.
1145 */
1146int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1147 unsigned int n, unsigned int *valp)
1148{
1149 int ret = 0;
1150
1151 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1152 return -EBUSY;
1153
1154 for ( ; !ret && n--; addr += 4) {
1155 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1156 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1157 0, 5, 2);
1158 if (!ret)
1159 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1160 }
1161 return ret;
1162}
1163
1164static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1165 u32 *rx_hash_high, u32 *rx_hash_low)
1166{
1167 /* stop Rx unicast traffic */
1168 t3_mac_disable_exact_filters(mac);
1169
1170 /* stop broadcast, multicast, promiscuous mode traffic */
1171 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1172 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1173 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1174 F_DISBCAST);
1175
1176 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1177 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1178
1179 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1180 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1181
1182 /* Leave time to drain max RX fifo */
1183 msleep(1);
1184}
1185
1186static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1187 u32 rx_hash_high, u32 rx_hash_low)
1188{
1189 t3_mac_enable_exact_filters(mac);
1190 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1191 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1192 rx_cfg);
1193 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1194 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1195}
1196
1197/**
1198 * t3_link_changed - handle interface link changes
1199 * @adapter: the adapter
1200 * @port_id: the port index that changed link state
1201 *
1202 * Called when a port's link settings change to propagate the new values
1203 * to the associated PHY and MAC. After performing the common tasks it
1204 * invokes an OS-specific handler.
1205 */
1206void t3_link_changed(struct adapter *adapter, int port_id)
1207{
1208 int link_ok, speed, duplex, fc;
1209 struct port_info *pi = adap2pinfo(adapter, port_id);
1210 struct cphy *phy = &pi->phy;
1211 struct cmac *mac = &pi->mac;
1212 struct link_config *lc = &pi->link_config;
1213
1214 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1215
1216 if (!lc->link_ok && link_ok) {
1217 u32 rx_cfg, rx_hash_high, rx_hash_low;
1218 u32 status;
1219
1220 t3_xgm_intr_enable(adapter, port_id);
1221 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1222 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1223 t3_mac_enable(mac, MAC_DIRECTION_RX);
1224
1225 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1226 if (status & F_LINKFAULTCHANGE) {
1227 mac->stats.link_faults++;
1228 pi->link_fault = 1;
1229 }
1230 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1231 }
1232
1233 if (lc->requested_fc & PAUSE_AUTONEG)
1234 fc &= lc->requested_fc;
1235 else
1236 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1237
1238 if (link_ok == lc->link_ok && speed == lc->speed &&
1239 duplex == lc->duplex && fc == lc->fc)
1240 return; /* nothing changed */
1241
1242 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1243 uses_xaui(adapter)) {
1244 if (link_ok)
1245 t3b_pcs_reset(mac);
1246 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1247 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1248 }
1249 lc->link_ok = link_ok;
1250 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1251 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1252
1253 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1254 /* Set MAC speed, duplex, and flow control to match PHY. */
1255 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1256 lc->fc = fc;
1257 }
1258
1259 t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1260 speed, duplex, fc);
1261}
1262
1263void t3_link_fault(struct adapter *adapter, int port_id)
1264{
1265 struct port_info *pi = adap2pinfo(adapter, port_id);
1266 struct cmac *mac = &pi->mac;
1267 struct cphy *phy = &pi->phy;
1268 struct link_config *lc = &pi->link_config;
1269 int link_ok, speed, duplex, fc, link_fault;
1270 u32 rx_cfg, rx_hash_high, rx_hash_low;
1271
1272 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1273
1274 if (adapter->params.rev > 0 && uses_xaui(adapter))
1275 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1276
1277 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1278 t3_mac_enable(mac, MAC_DIRECTION_RX);
1279
1280 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1281
1282 link_fault = t3_read_reg(adapter,
1283 A_XGM_INT_STATUS + mac->offset);
1284 link_fault &= F_LINKFAULTCHANGE;
1285
1286 link_ok = lc->link_ok;
1287 speed = lc->speed;
1288 duplex = lc->duplex;
1289 fc = lc->fc;
1290
1291 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1292
1293 if (link_fault) {
1294 lc->link_ok = 0;
1295 lc->speed = SPEED_INVALID;
1296 lc->duplex = DUPLEX_INVALID;
1297
1298 t3_os_link_fault(adapter, port_id, 0);
1299
1300 /* Account link faults only when the phy reports a link up */
1301 if (link_ok)
1302 mac->stats.link_faults++;
1303 } else {
1304 if (link_ok)
1305 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1306 F_TXACTENABLE | F_RXEN);
1307
1308 pi->link_fault = 0;
1309 lc->link_ok = (unsigned char)link_ok;
1310 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1311 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1312 t3_os_link_fault(adapter, port_id, link_ok);
1313 }
1314}
1315
1316/**
1317 * t3_link_start - apply link configuration to MAC/PHY
1318 * @phy: the PHY to setup
1319 * @mac: the MAC to setup
1320 * @lc: the requested link configuration
1321 *
1322 * Set up a port's MAC and PHY according to a desired link configuration.
1323 * - If the PHY can auto-negotiate first decide what to advertise, then
1324 * enable/disable auto-negotiation as desired, and reset.
1325 * - If the PHY does not auto-negotiate just reset it.
1326 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1327 * otherwise do it later based on the outcome of auto-negotiation.
1328 */
1329int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1330{
1331 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1332
1333 lc->link_ok = 0;
1334 if (lc->supported & SUPPORTED_Autoneg) {
1335 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1336 if (fc) {
1337 lc->advertising |= ADVERTISED_Asym_Pause;
1338 if (fc & PAUSE_RX)
1339 lc->advertising |= ADVERTISED_Pause;
1340 }
1341 phy->ops->advertise(phy, lc->advertising);
1342
1343 if (lc->autoneg == AUTONEG_DISABLE) {
1344 lc->speed = lc->requested_speed;
1345 lc->duplex = lc->requested_duplex;
1346 lc->fc = (unsigned char)fc;
1347 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1348 fc);
1349 /* Also disables autoneg */
1350 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1351 } else
1352 phy->ops->autoneg_enable(phy);
1353 } else {
1354 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1355 lc->fc = (unsigned char)fc;
1356 phy->ops->reset(phy, 0);
1357 }
1358 return 0;
1359}
1360
1361/**
1362 * t3_set_vlan_accel - control HW VLAN extraction
1363 * @adapter: the adapter
1364 * @ports: bitmap of adapter ports to operate on
1365 * @on: enable (1) or disable (0) HW VLAN extraction
1366 *
1367 * Enables or disables HW extraction of VLAN tags for the given port.
1368 */
1369void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1370{
1371 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1372 ports << S_VLANEXTRACTIONENABLE,
1373 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1374}
1375
1376struct intr_info {
1377 unsigned int mask; /* bits to check in interrupt status */
1378 const char *msg; /* message to print or NULL */
1379 short stat_idx; /* stat counter to increment or -1 */
1380 unsigned short fatal; /* whether the condition reported is fatal */
1381};
1382
1383/**
1384 * t3_handle_intr_status - table driven interrupt handler
1385 * @adapter: the adapter that generated the interrupt
1386 * @reg: the interrupt status register to process
1387 * @mask: a mask to apply to the interrupt status
1388 * @acts: table of interrupt actions
1389 * @stats: statistics counters tracking interrupt occurrences
1390 *
1391 * A table driven interrupt handler that applies a set of masks to an
1392 * interrupt status word and performs the corresponding actions if the
1393 * interrupts described by the mask have occurred. The actions include
1394 * optionally printing a warning or alert message, and optionally
1395 * incrementing a stat counter. The table is terminated by an entry
1396 * specifying mask 0. Returns the number of fatal interrupt conditions.
1397 */
1398static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1399 unsigned int mask,
1400 const struct intr_info *acts,
1401 unsigned long *stats)
1402{
1403 int fatal = 0;
1404 unsigned int status = t3_read_reg(adapter, reg) & mask;
1405
1406 for (; acts->mask; ++acts) {
1407 if (!(status & acts->mask))
1408 continue;
1409 if (acts->fatal) {
1410 fatal++;
1411 CH_ALERT(adapter, "%s (0x%x)\n",
1412 acts->msg, status & acts->mask);
1413 status &= ~acts->mask;
1414 } else if (acts->msg)
1415 CH_WARN(adapter, "%s (0x%x)\n",
1416 acts->msg, status & acts->mask);
1417 if (acts->stat_idx >= 0)
1418 stats[acts->stat_idx]++;
1419 }
1420 if (status) /* clear processed interrupts */
1421 t3_write_reg(adapter, reg, status);
1422 return fatal;
1423}
1424
1425#define SGE_INTR_MASK (F_RSPQDISABLED | \
1426 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1427 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1428 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1429 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1430 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1431 F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1432 F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1433 F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1434 F_LOPIODRBDROPERR)
1435#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1436 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1437 F_NFASRCHFAIL)
1438#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1439#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1440 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1441 F_TXFIFO_UNDERRUN)
1442#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1443 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1444 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1445 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1446 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1447 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1448#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1449 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1450 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1451 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1452 F_TXPARERR | V_BISTERR(M_BISTERR))
1453#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1454 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1455 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1456#define ULPTX_INTR_MASK 0xfc
1457#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1458 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1459 F_ZERO_SWITCH_ERROR)
1460#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1461 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1462 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1463 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1464 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1465 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1466 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1467 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1468#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1469 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1470 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1471#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1472 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1473 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1474#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1475 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1476 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1477 V_MCAPARERRENB(M_MCAPARERRENB))
1478#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1479#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1480 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1481 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1482 F_MPS0 | F_CPL_SWITCH)
1483/*
1484 * Interrupt handler for the PCIX1 module.
1485 */
1486static void pci_intr_handler(struct adapter *adapter)
1487{
1488 static const struct intr_info pcix1_intr_info[] = {
1489 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1490 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1491 {F_RCVTARABT, "PCI received target abort", -1, 1},
1492 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1493 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1494 {F_DETPARERR, "PCI detected parity error", -1, 1},
1495 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1496 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1497 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1498 1},
1499 {F_DETCORECCERR, "PCI correctable ECC error",
1500 STAT_PCI_CORR_ECC, 0},
1501 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1502 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1503 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1504 1},
1505 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1506 1},
1507 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1508 1},
1509 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1510 "error", -1, 1},
1511 {0}
1512 };
1513
1514 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1515 pcix1_intr_info, adapter->irq_stats))
1516 t3_fatal_err(adapter);
1517}
1518
1519/*
1520 * Interrupt handler for the PCIE module.
1521 */
1522static void pcie_intr_handler(struct adapter *adapter)
1523{
1524 static const struct intr_info pcie_intr_info[] = {
1525 {F_PEXERR, "PCI PEX error", -1, 1},
1526 {F_UNXSPLCPLERRR,
1527 "PCI unexpected split completion DMA read error", -1, 1},
1528 {F_UNXSPLCPLERRC,
1529 "PCI unexpected split completion DMA command error", -1, 1},
1530 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1531 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1532 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1533 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1534 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1535 "PCI MSI-X table/PBA parity error", -1, 1},
1536 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1537 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1538 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1539 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1540 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1541 {0}
1542 };
1543
1544 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1545 CH_ALERT(adapter, "PEX error code 0x%x\n",
1546 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1547
1548 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1549 pcie_intr_info, adapter->irq_stats))
1550 t3_fatal_err(adapter);
1551}
1552
1553/*
1554 * TP interrupt handler.
1555 */
1556static void tp_intr_handler(struct adapter *adapter)
1557{
1558 static const struct intr_info tp_intr_info[] = {
1559 {0xffffff, "TP parity error", -1, 1},
1560 {0x1000000, "TP out of Rx pages", -1, 1},
1561 {0x2000000, "TP out of Tx pages", -1, 1},
1562 {0}
1563 };
1564
1565 static const struct intr_info tp_intr_info_t3c[] = {
1566 {0x1fffffff, "TP parity error", -1, 1},
1567 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1568 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1569 {0}
1570 };
1571
1572 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1573 adapter->params.rev < T3_REV_C ?
1574 tp_intr_info : tp_intr_info_t3c, NULL))
1575 t3_fatal_err(adapter);
1576}
1577
1578/*
1579 * CIM interrupt handler.
1580 */
1581static void cim_intr_handler(struct adapter *adapter)
1582{
1583 static const struct intr_info cim_intr_info[] = {
1584 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1585 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1586 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1587 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1588 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1589 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1590 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1591 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1592 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1593 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1594 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1595 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1596 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1597 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1598 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1599 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1600 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1601 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1602 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1603 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1604 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1605 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1606 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1607 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1608 {0}
1609 };
1610
1611 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1612 cim_intr_info, NULL))
1613 t3_fatal_err(adapter);
1614}
1615
1616/*
1617 * ULP RX interrupt handler.
1618 */
1619static void ulprx_intr_handler(struct adapter *adapter)
1620{
1621 static const struct intr_info ulprx_intr_info[] = {
1622 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1623 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1624 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1625 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1626 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1627 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1628 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1629 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1630 {0}
1631 };
1632
1633 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1634 ulprx_intr_info, NULL))
1635 t3_fatal_err(adapter);
1636}
1637
1638/*
1639 * ULP TX interrupt handler.
1640 */
1641static void ulptx_intr_handler(struct adapter *adapter)
1642{
1643 static const struct intr_info ulptx_intr_info[] = {
1644 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1645 STAT_ULP_CH0_PBL_OOB, 0},
1646 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1647 STAT_ULP_CH1_PBL_OOB, 0},
1648 {0xfc, "ULP TX parity error", -1, 1},
1649 {0}
1650 };
1651
1652 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1653 ulptx_intr_info, adapter->irq_stats))
1654 t3_fatal_err(adapter);
1655}
1656
1657#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1658 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1659 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1660 F_ICSPI1_TX_FRAMING_ERROR)
1661#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1662 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1663 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1664 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1665
1666/*
1667 * PM TX interrupt handler.
1668 */
1669static void pmtx_intr_handler(struct adapter *adapter)
1670{
1671 static const struct intr_info pmtx_intr_info[] = {
1672 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1673 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1674 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1675 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1676 "PMTX ispi parity error", -1, 1},
1677 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1678 "PMTX ospi parity error", -1, 1},
1679 {0}
1680 };
1681
1682 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1683 pmtx_intr_info, NULL))
1684 t3_fatal_err(adapter);
1685}
1686
1687#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1688 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1689 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1690 F_IESPI1_TX_FRAMING_ERROR)
1691#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1692 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1693 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1694 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1695
1696/*
1697 * PM RX interrupt handler.
1698 */
1699static void pmrx_intr_handler(struct adapter *adapter)
1700{
1701 static const struct intr_info pmrx_intr_info[] = {
1702 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1703 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1704 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1705 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1706 "PMRX ispi parity error", -1, 1},
1707 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1708 "PMRX ospi parity error", -1, 1},
1709 {0}
1710 };
1711
1712 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1713 pmrx_intr_info, NULL))
1714 t3_fatal_err(adapter);
1715}
1716
1717/*
1718 * CPL switch interrupt handler.
1719 */
1720static void cplsw_intr_handler(struct adapter *adapter)
1721{
1722 static const struct intr_info cplsw_intr_info[] = {
1723 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1724 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1725 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1726 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1727 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1728 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1729 {0}
1730 };
1731
1732 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1733 cplsw_intr_info, NULL))
1734 t3_fatal_err(adapter);
1735}
1736
1737/*
1738 * MPS interrupt handler.
1739 */
1740static void mps_intr_handler(struct adapter *adapter)
1741{
1742 static const struct intr_info mps_intr_info[] = {
1743 {0x1ff, "MPS parity error", -1, 1},
1744 {0}
1745 };
1746
1747 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1748 mps_intr_info, NULL))
1749 t3_fatal_err(adapter);
1750}
1751
1752#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1753
1754/*
1755 * MC7 interrupt handler.
1756 */
1757static void mc7_intr_handler(struct mc7 *mc7)
1758{
1759 struct adapter *adapter = mc7->adapter;
1760 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1761
1762 if (cause & F_CE) {
1763 mc7->stats.corr_err++;
1764 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1765 "data 0x%x 0x%x 0x%x\n", mc7->name,
1766 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1767 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1768 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1769 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1770 }
1771
1772 if (cause & F_UE) {
1773 mc7->stats.uncorr_err++;
1774 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1775 "data 0x%x 0x%x 0x%x\n", mc7->name,
1776 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1777 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1778 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1779 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1780 }
1781
1782 if (G_PE(cause)) {
1783 mc7->stats.parity_err++;
1784 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1785 mc7->name, G_PE(cause));
1786 }
1787
1788 if (cause & F_AE) {
1789 u32 addr = 0;
1790
1791 if (adapter->params.rev > 0)
1792 addr = t3_read_reg(adapter,
1793 mc7->offset + A_MC7_ERR_ADDR);
1794 mc7->stats.addr_err++;
1795 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1796 mc7->name, addr);
1797 }
1798
1799 if (cause & MC7_INTR_FATAL)
1800 t3_fatal_err(adapter);
1801
1802 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1803}
1804
1805#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1806 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1807/*
1808 * XGMAC interrupt handler.
1809 */
1810static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1811{
1812 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1813 /*
1814 * We mask out interrupt causes for which we're not taking interrupts.
1815 * This allows us to use polling logic to monitor some of the other
1816 * conditions when taking interrupts would impose too much load on the
1817 * system.
1818 */
1819 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1820 ~F_RXFIFO_OVERFLOW;
1821
1822 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1823 mac->stats.tx_fifo_parity_err++;
1824 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1825 }
1826 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1827 mac->stats.rx_fifo_parity_err++;
1828 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1829 }
1830 if (cause & F_TXFIFO_UNDERRUN)
1831 mac->stats.tx_fifo_urun++;
1832 if (cause & F_RXFIFO_OVERFLOW)
1833 mac->stats.rx_fifo_ovfl++;
1834 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1835 mac->stats.serdes_signal_loss++;
1836 if (cause & F_XAUIPCSCTCERR)
1837 mac->stats.xaui_pcs_ctc_err++;
1838 if (cause & F_XAUIPCSALIGNCHANGE)
1839 mac->stats.xaui_pcs_align_change++;
1840 if (cause & F_XGM_INT) {
1841 t3_set_reg_field(adap,
1842 A_XGM_INT_ENABLE + mac->offset,
1843 F_XGM_INT, 0);
1844 mac->stats.link_faults++;
1845
1846 t3_os_link_fault_handler(adap, idx);
1847 }
1848
1849 if (cause & XGM_INTR_FATAL)
1850 t3_fatal_err(adap);
1851
1852 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1853 return cause != 0;
1854}
1855
1856/*
1857 * Interrupt handler for PHY events.
1858 */
1859int t3_phy_intr_handler(struct adapter *adapter)
1860{
1861 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1862
1863 for_each_port(adapter, i) {
1864 struct port_info *p = adap2pinfo(adapter, i);
1865
1866 if (!(p->phy.caps & SUPPORTED_IRQ))
1867 continue;
1868
1869 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1870 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1871
1872 if (phy_cause & cphy_cause_link_change)
1873 t3_link_changed(adapter, i);
1874 if (phy_cause & cphy_cause_fifo_error)
1875 p->phy.fifo_errors++;
1876 if (phy_cause & cphy_cause_module_change)
1877 t3_os_phymod_changed(adapter, i);
1878 }
1879 }
1880
1881 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1882 return 0;
1883}
1884
1885/*
1886 * T3 slow path (non-data) interrupt handler.
1887 */
1888int t3_slow_intr_handler(struct adapter *adapter)
1889{
1890 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1891
1892 cause &= adapter->slow_intr_mask;
1893 if (!cause)
1894 return 0;
1895 if (cause & F_PCIM0) {
1896 if (is_pcie(adapter))
1897 pcie_intr_handler(adapter);
1898 else
1899 pci_intr_handler(adapter);
1900 }
1901 if (cause & F_SGE3)
1902 t3_sge_err_intr_handler(adapter);
1903 if (cause & F_MC7_PMRX)
1904 mc7_intr_handler(&adapter->pmrx);
1905 if (cause & F_MC7_PMTX)
1906 mc7_intr_handler(&adapter->pmtx);
1907 if (cause & F_MC7_CM)
1908 mc7_intr_handler(&adapter->cm);
1909 if (cause & F_CIM)
1910 cim_intr_handler(adapter);
1911 if (cause & F_TP1)
1912 tp_intr_handler(adapter);
1913 if (cause & F_ULP2_RX)
1914 ulprx_intr_handler(adapter);
1915 if (cause & F_ULP2_TX)
1916 ulptx_intr_handler(adapter);
1917 if (cause & F_PM1_RX)
1918 pmrx_intr_handler(adapter);
1919 if (cause & F_PM1_TX)
1920 pmtx_intr_handler(adapter);
1921 if (cause & F_CPL_SWITCH)
1922 cplsw_intr_handler(adapter);
1923 if (cause & F_MPS0)
1924 mps_intr_handler(adapter);
1925 if (cause & F_MC5A)
1926 t3_mc5_intr_handler(&adapter->mc5);
1927 if (cause & F_XGMAC0_0)
1928 mac_intr_handler(adapter, 0);
1929 if (cause & F_XGMAC0_1)
1930 mac_intr_handler(adapter, 1);
1931 if (cause & F_T3DBG)
1932 t3_os_ext_intr_handler(adapter);
1933
1934 /* Clear the interrupts just processed. */
1935 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1936 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1937 return 1;
1938}
1939
1940static unsigned int calc_gpio_intr(struct adapter *adap)
1941{
1942 unsigned int i, gpi_intr = 0;
1943
1944 for_each_port(adap, i)
1945 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1946 adapter_info(adap)->gpio_intr[i])
1947 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1948 return gpi_intr;
1949}
1950
1951/**
1952 * t3_intr_enable - enable interrupts
1953 * @adapter: the adapter whose interrupts should be enabled
1954 *
1955 * Enable interrupts by setting the interrupt enable registers of the
1956 * various HW modules and then enabling the top-level interrupt
1957 * concentrator.
1958 */
1959void t3_intr_enable(struct adapter *adapter)
1960{
1961 static const struct addr_val_pair intr_en_avp[] = {
1962 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1963 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1964 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1965 MC7_INTR_MASK},
1966 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1967 MC7_INTR_MASK},
1968 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1969 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1970 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1971 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1972 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1973 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1974 };
1975
1976 adapter->slow_intr_mask = PL_INTR_MASK;
1977
1978 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1979 t3_write_reg(adapter, A_TP_INT_ENABLE,
1980 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1981
1982 if (adapter->params.rev > 0) {
1983 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1984 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1985 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1986 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1987 F_PBL_BOUND_ERR_CH1);
1988 } else {
1989 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1990 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1991 }
1992
1993 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1994
1995 if (is_pcie(adapter))
1996 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1997 else
1998 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1999 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2000 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2001}
2002
2003/**
2004 * t3_intr_disable - disable a card's interrupts
2005 * @adapter: the adapter whose interrupts should be disabled
2006 *
2007 * Disable interrupts. We only disable the top-level interrupt
2008 * concentrator and the SGE data interrupts.
2009 */
2010void t3_intr_disable(struct adapter *adapter)
2011{
2012 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2013 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2014 adapter->slow_intr_mask = 0;
2015}
2016
2017/**
2018 * t3_intr_clear - clear all interrupts
2019 * @adapter: the adapter whose interrupts should be cleared
2020 *
2021 * Clears all interrupts.
2022 */
2023void t3_intr_clear(struct adapter *adapter)
2024{
2025 static const unsigned int cause_reg_addr[] = {
2026 A_SG_INT_CAUSE,
2027 A_SG_RSPQ_FL_STATUS,
2028 A_PCIX_INT_CAUSE,
2029 A_MC7_INT_CAUSE,
2030 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2031 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2032 A_CIM_HOST_INT_CAUSE,
2033 A_TP_INT_CAUSE,
2034 A_MC5_DB_INT_CAUSE,
2035 A_ULPRX_INT_CAUSE,
2036 A_ULPTX_INT_CAUSE,
2037 A_CPL_INTR_CAUSE,
2038 A_PM1_TX_INT_CAUSE,
2039 A_PM1_RX_INT_CAUSE,
2040 A_MPS_INT_CAUSE,
2041 A_T3DBG_INT_CAUSE,
2042 };
2043 unsigned int i;
2044
2045 /* Clear PHY and MAC interrupts for each port. */
2046 for_each_port(adapter, i)
2047 t3_port_intr_clear(adapter, i);
2048
2049 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2050 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2051
2052 if (is_pcie(adapter))
2053 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2054 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2055 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2056}
2057
2058void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2059{
2060 struct port_info *pi = adap2pinfo(adapter, idx);
2061
2062 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2063 XGM_EXTRA_INTR_MASK);
2064}
2065
2066void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2067{
2068 struct port_info *pi = adap2pinfo(adapter, idx);
2069
2070 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2071 0x7ff);
2072}
2073
2074/**
2075 * t3_port_intr_enable - enable port-specific interrupts
2076 * @adapter: associated adapter
2077 * @idx: index of port whose interrupts should be enabled
2078 *
2079 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2080 * adapter port.
2081 */
2082void t3_port_intr_enable(struct adapter *adapter, int idx)
2083{
2084 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2085
2086 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2087 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2088 phy->ops->intr_enable(phy);
2089}
2090
2091/**
2092 * t3_port_intr_disable - disable port-specific interrupts
2093 * @adapter: associated adapter
2094 * @idx: index of port whose interrupts should be disabled
2095 *
2096 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2097 * adapter port.
2098 */
2099void t3_port_intr_disable(struct adapter *adapter, int idx)
2100{
2101 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2102
2103 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2104 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2105 phy->ops->intr_disable(phy);
2106}
2107
2108/**
2109 * t3_port_intr_clear - clear port-specific interrupts
2110 * @adapter: associated adapter
2111 * @idx: index of port whose interrupts to clear
2112 *
2113 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2114 * adapter port.
2115 */
2116static void t3_port_intr_clear(struct adapter *adapter, int idx)
2117{
2118 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2119
2120 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2121 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2122 phy->ops->intr_clear(phy);
2123}
2124
2125#define SG_CONTEXT_CMD_ATTEMPTS 100
2126
2127/**
2128 * t3_sge_write_context - write an SGE context
2129 * @adapter: the adapter
2130 * @id: the context id
2131 * @type: the context type
2132 *
2133 * Program an SGE context with the values already loaded in the
2134 * CONTEXT_DATA? registers.
2135 */
2136static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2137 unsigned int type)
2138{
2139 if (type == F_RESPONSEQ) {
2140 /*
2141 * Can't write the Response Queue Context bits for
2142 * Interrupt Armed or the Reserve bits after the chip
2143 * has been initialized out of reset. Writing to these
2144 * bits can confuse the hardware.
2145 */
2146 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2147 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2148 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2149 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2150 } else {
2151 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2152 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2153 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2154 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2155 }
2156 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2157 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2158 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2159 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2160}
2161
2162/**
2163 * clear_sge_ctxt - completely clear an SGE context
2164 * @adapter: the adapter
2165 * @id: the context id
2166 * @type: the context type
2167 *
2168 * Completely clear an SGE context. Used predominantly at post-reset
2169 * initialization. Note in particular that we don't skip writing to any
2170 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2171 * does ...
2172 */
2173static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2174 unsigned int type)
2175{
2176 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2177 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2178 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2179 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2180 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2181 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2182 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2183 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2184 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2185 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2186 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2187 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2188}
2189
2190/**
2191 * t3_sge_init_ecntxt - initialize an SGE egress context
2192 * @adapter: the adapter to configure
2193 * @id: the context id
2194 * @gts_enable: whether to enable GTS for the context
2195 * @type: the egress context type
2196 * @respq: associated response queue
2197 * @base_addr: base address of queue
2198 * @size: number of queue entries
2199 * @token: uP token
2200 * @gen: initial generation value for the context
2201 * @cidx: consumer pointer
2202 *
2203 * Initialize an SGE egress context and make it ready for use. If the
2204 * platform allows concurrent context operations, the caller is
2205 * responsible for appropriate locking.
2206 */
2207int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2208 enum sge_context_type type, int respq, u64 base_addr,
2209 unsigned int size, unsigned int token, int gen,
2210 unsigned int cidx)
2211{
2212 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2213
2214 if (base_addr & 0xfff) /* must be 4K aligned */
2215 return -EINVAL;
2216 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2217 return -EBUSY;
2218
2219 base_addr >>= 12;
2220 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2221 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2222 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2223 V_EC_BASE_LO(base_addr & 0xffff));
2224 base_addr >>= 16;
2225 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2226 base_addr >>= 32;
2227 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2228 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2229 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2230 F_EC_VALID);
2231 return t3_sge_write_context(adapter, id, F_EGRESS);
2232}
2233
2234/**
2235 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2236 * @adapter: the adapter to configure
2237 * @id: the context id
2238 * @gts_enable: whether to enable GTS for the context
2239 * @base_addr: base address of queue
2240 * @size: number of queue entries
2241 * @bsize: size of each buffer for this queue
2242 * @cong_thres: threshold to signal congestion to upstream producers
2243 * @gen: initial generation value for the context
2244 * @cidx: consumer pointer
2245 *
2246 * Initialize an SGE free list context and make it ready for use. The
2247 * caller is responsible for ensuring only one context operation occurs
2248 * at a time.
2249 */
2250int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2251 int gts_enable, u64 base_addr, unsigned int size,
2252 unsigned int bsize, unsigned int cong_thres, int gen,
2253 unsigned int cidx)
2254{
2255 if (base_addr & 0xfff) /* must be 4K aligned */
2256 return -EINVAL;
2257 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2258 return -EBUSY;
2259
2260 base_addr >>= 12;
2261 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2262 base_addr >>= 32;
2263 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2264 V_FL_BASE_HI((u32) base_addr) |
2265 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2266 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2267 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2268 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2269 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2270 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2271 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2272 return t3_sge_write_context(adapter, id, F_FREELIST);
2273}
2274
2275/**
2276 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2277 * @adapter: the adapter to configure
2278 * @id: the context id
2279 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2280 * @base_addr: base address of queue
2281 * @size: number of queue entries
2282 * @fl_thres: threshold for selecting the normal or jumbo free list
2283 * @gen: initial generation value for the context
2284 * @cidx: consumer pointer
2285 *
2286 * Initialize an SGE response queue context and make it ready for use.
2287 * The caller is responsible for ensuring only one context operation
2288 * occurs at a time.
2289 */
2290int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2291 int irq_vec_idx, u64 base_addr, unsigned int size,
2292 unsigned int fl_thres, int gen, unsigned int cidx)
2293{
2294 unsigned int intr = 0;
2295
2296 if (base_addr & 0xfff) /* must be 4K aligned */
2297 return -EINVAL;
2298 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2299 return -EBUSY;
2300
2301 base_addr >>= 12;
2302 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2303 V_CQ_INDEX(cidx));
2304 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2305 base_addr >>= 32;
2306 if (irq_vec_idx >= 0)
2307 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2308 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2309 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2310 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2311 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2312}
2313
2314/**
2315 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2316 * @adapter: the adapter to configure
2317 * @id: the context id
2318 * @base_addr: base address of queue
2319 * @size: number of queue entries
2320 * @rspq: response queue for async notifications
2321 * @ovfl_mode: CQ overflow mode
2322 * @credits: completion queue credits
2323 * @credit_thres: the credit threshold
2324 *
2325 * Initialize an SGE completion queue context and make it ready for use.
2326 * The caller is responsible for ensuring only one context operation
2327 * occurs at a time.
2328 */
2329int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2330 unsigned int size, int rspq, int ovfl_mode,
2331 unsigned int credits, unsigned int credit_thres)
2332{
2333 if (base_addr & 0xfff) /* must be 4K aligned */
2334 return -EINVAL;
2335 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2336 return -EBUSY;
2337
2338 base_addr >>= 12;
2339 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2340 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2341 base_addr >>= 32;
2342 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2343 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2344 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2345 V_CQ_ERR(ovfl_mode));
2346 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2347 V_CQ_CREDIT_THRES(credit_thres));
2348 return t3_sge_write_context(adapter, id, F_CQ);
2349}
2350
2351/**
2352 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2353 * @adapter: the adapter
2354 * @id: the egress context id
2355 * @enable: enable (1) or disable (0) the context
2356 *
2357 * Enable or disable an SGE egress context. The caller is responsible for
2358 * ensuring only one context operation occurs at a time.
2359 */
2360int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2361{
2362 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2363 return -EBUSY;
2364
2365 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2366 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2367 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2368 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2369 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2370 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2371 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2372 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2373 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2374}
2375
2376/**
2377 * t3_sge_disable_fl - disable an SGE free-buffer list
2378 * @adapter: the adapter
2379 * @id: the free list context id
2380 *
2381 * Disable an SGE free-buffer list. The caller is responsible for
2382 * ensuring only one context operation occurs at a time.
2383 */
2384int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2385{
2386 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2387 return -EBUSY;
2388
2389 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2390 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2391 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2392 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2393 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2394 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2395 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2396 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2397 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2398}
2399
2400/**
2401 * t3_sge_disable_rspcntxt - disable an SGE response queue
2402 * @adapter: the adapter
2403 * @id: the response queue context id
2404 *
2405 * Disable an SGE response queue. The caller is responsible for
2406 * ensuring only one context operation occurs at a time.
2407 */
2408int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2409{
2410 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2411 return -EBUSY;
2412
2413 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2414 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2415 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2416 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2417 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2418 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2419 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2420 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2421 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2422}
2423
2424/**
2425 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2426 * @adapter: the adapter
2427 * @id: the completion queue context id
2428 *
2429 * Disable an SGE completion queue. The caller is responsible for
2430 * ensuring only one context operation occurs at a time.
2431 */
2432int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2433{
2434 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2435 return -EBUSY;
2436
2437 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2438 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2439 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2440 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2441 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2442 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2443 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2444 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2445 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2446}
2447
2448/**
2449 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2450 * @adapter: the adapter
2451 * @id: the context id
2452 * @op: the operation to perform
2453 *
2454 * Perform the selected operation on an SGE completion queue context.
2455 * The caller is responsible for ensuring only one context operation
2456 * occurs at a time.
2457 */
2458int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2459 unsigned int credits)
2460{
2461 u32 val;
2462
2463 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2464 return -EBUSY;
2465
2466 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2467 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2468 V_CONTEXT(id) | F_CQ);
2469 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2470 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2471 return -EIO;
2472
2473 if (op >= 2 && op < 7) {
2474 if (adapter->params.rev > 0)
2475 return G_CQ_INDEX(val);
2476
2477 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2478 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2479 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2480 F_CONTEXT_CMD_BUSY, 0,
2481 SG_CONTEXT_CMD_ATTEMPTS, 1))
2482 return -EIO;
2483 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2484 }
2485 return 0;
2486}
2487
2488/**
2489 * t3_config_rss - configure Rx packet steering
2490 * @adapter: the adapter
2491 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2492 * @cpus: values for the CPU lookup table (0xff terminated)
2493 * @rspq: values for the response queue lookup table (0xffff terminated)
2494 *
2495 * Programs the receive packet steering logic. @cpus and @rspq provide
2496 * the values for the CPU and response queue lookup tables. If they
2497 * provide fewer values than the size of the tables the supplied values
2498 * are used repeatedly until the tables are fully populated.
2499 */
2500void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2501 const u8 * cpus, const u16 *rspq)
2502{
2503 int i, j, cpu_idx = 0, q_idx = 0;
2504
2505 if (cpus)
2506 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2507 u32 val = i << 16;
2508
2509 for (j = 0; j < 2; ++j) {
2510 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2511 if (cpus[cpu_idx] == 0xff)
2512 cpu_idx = 0;
2513 }
2514 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2515 }
2516
2517 if (rspq)
2518 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2519 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2520 (i << 16) | rspq[q_idx++]);
2521 if (rspq[q_idx] == 0xffff)
2522 q_idx = 0;
2523 }
2524
2525 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2526}
2527
2528/**
2529 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2530 * @adap: the adapter
2531 * @enable: 1 to select offload mode, 0 for regular NIC
2532 *
2533 * Switches TP to NIC/offload mode.
2534 */
2535void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2536{
2537 if (is_offload(adap) || !enable)
2538 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2539 V_NICMODE(!enable));
2540}
2541
2542/**
2543 * pm_num_pages - calculate the number of pages of the payload memory
2544 * @mem_size: the size of the payload memory
2545 * @pg_size: the size of each payload memory page
2546 *
2547 * Calculate the number of pages, each of the given size, that fit in a
2548 * memory of the specified size, respecting the HW requirement that the
2549 * number of pages must be a multiple of 24.
2550 */
2551static inline unsigned int pm_num_pages(unsigned int mem_size,
2552 unsigned int pg_size)
2553{
2554 unsigned int n = mem_size / pg_size;
2555
2556 return n - n % 24;
2557}
2558
2559#define mem_region(adap, start, size, reg) \
2560 t3_write_reg((adap), A_ ## reg, (start)); \
2561 start += size
2562
2563/**
2564 * partition_mem - partition memory and configure TP memory settings
2565 * @adap: the adapter
2566 * @p: the TP parameters
2567 *
2568 * Partitions context and payload memory and configures TP's memory
2569 * registers.
2570 */
2571static void partition_mem(struct adapter *adap, const struct tp_params *p)
2572{
2573 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2574 unsigned int timers = 0, timers_shift = 22;
2575
2576 if (adap->params.rev > 0) {
2577 if (tids <= 16 * 1024) {
2578 timers = 1;
2579 timers_shift = 16;
2580 } else if (tids <= 64 * 1024) {
2581 timers = 2;
2582 timers_shift = 18;
2583 } else if (tids <= 256 * 1024) {
2584 timers = 3;
2585 timers_shift = 20;
2586 }
2587 }
2588
2589 t3_write_reg(adap, A_TP_PMM_SIZE,
2590 p->chan_rx_size | (p->chan_tx_size >> 16));
2591
2592 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2593 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2594 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2595 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2596 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2597
2598 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2599 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2600 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2601
2602 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2603 /* Add a bit of headroom and make multiple of 24 */
2604 pstructs += 48;
2605 pstructs -= pstructs % 24;
2606 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2607
2608 m = tids * TCB_SIZE;
2609 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2610 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2611 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2612 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2613 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2614 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2615 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2616 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2617
2618 m = (m + 4095) & ~0xfff;
2619 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2620 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2621
2622 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2623 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2624 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2625 if (tids < m)
2626 adap->params.mc5.nservers += m - tids;
2627}
2628
2629static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2630 u32 val)
2631{
2632 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2633 t3_write_reg(adap, A_TP_PIO_DATA, val);
2634}
2635
2636static void tp_config(struct adapter *adap, const struct tp_params *p)
2637{
2638 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2639 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2640 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2641 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2642 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2643 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2644 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2645 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2646 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2647 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2648 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2649 F_IPV6ENABLE | F_NICMODE);
2650 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2651 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2652 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2653 adap->params.rev > 0 ? F_ENABLEESND :
2654 F_T3A_ENABLEESND);
2655
2656 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2657 F_ENABLEEPCMDAFULL,
2658 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2659 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2660 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2661 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2662 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2663 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2664 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2665
2666 if (adap->params.rev > 0) {
2667 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2668 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2669 F_TXPACEAUTO);
2670 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2671 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2672 } else
2673 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2674
2675 if (adap->params.rev == T3_REV_C)
2676 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2677 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2678 V_TABLELATENCYDELTA(4));
2679
2680 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2681 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2682 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2683 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2684}
2685
2686/* Desired TP timer resolution in usec */
2687#define TP_TMR_RES 50
2688
2689/* TCP timer values in ms */
2690#define TP_DACK_TIMER 50
2691#define TP_RTO_MIN 250
2692
2693/**
2694 * tp_set_timers - set TP timing parameters
2695 * @adap: the adapter to set
2696 * @core_clk: the core clock frequency in Hz
2697 *
2698 * Set TP's timing parameters, such as the various timer resolutions and
2699 * the TCP timer values.
2700 */
2701static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2702{
2703 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2704 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2705 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2706 unsigned int tps = core_clk >> tre;
2707
2708 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2709 V_DELAYEDACKRESOLUTION(dack_re) |
2710 V_TIMESTAMPRESOLUTION(tstamp_re));
2711 t3_write_reg(adap, A_TP_DACK_TIMER,
2712 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2713 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2714 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2715 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2716 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2717 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2718 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2719 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2720 V_KEEPALIVEMAX(9));
2721
2722#define SECONDS * tps
2723
2724 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2725 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2726 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2727 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2728 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2729 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2730 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2731 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2732 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2733
2734#undef SECONDS
2735}
2736
2737/**
2738 * t3_tp_set_coalescing_size - set receive coalescing size
2739 * @adap: the adapter
2740 * @size: the receive coalescing size
2741 * @psh: whether a set PSH bit should deliver coalesced data
2742 *
2743 * Set the receive coalescing size and PSH bit handling.
2744 */
2745static int t3_tp_set_coalescing_size(struct adapter *adap,
2746 unsigned int size, int psh)
2747{
2748 u32 val;
2749
2750 if (size > MAX_RX_COALESCING_LEN)
2751 return -EINVAL;
2752
2753 val = t3_read_reg(adap, A_TP_PARA_REG3);
2754 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2755
2756 if (size) {
2757 val |= F_RXCOALESCEENABLE;
2758 if (psh)
2759 val |= F_RXCOALESCEPSHEN;
2760 size = min(MAX_RX_COALESCING_LEN, size);
2761 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2762 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2763 }
2764 t3_write_reg(adap, A_TP_PARA_REG3, val);
2765 return 0;
2766}
2767
2768/**
2769 * t3_tp_set_max_rxsize - set the max receive size
2770 * @adap: the adapter
2771 * @size: the max receive size
2772 *
2773 * Set TP's max receive size. This is the limit that applies when
2774 * receive coalescing is disabled.
2775 */
2776static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2777{
2778 t3_write_reg(adap, A_TP_PARA_REG7,
2779 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2780}
2781
2782static void init_mtus(unsigned short mtus[])
2783{
2784 /*
2785 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2786 * it can accommodate max size TCP/IP headers when SACK and timestamps
2787 * are enabled and still have at least 8 bytes of payload.
2788 */
2789 mtus[0] = 88;
2790 mtus[1] = 88;
2791 mtus[2] = 256;
2792 mtus[3] = 512;
2793 mtus[4] = 576;
2794 mtus[5] = 1024;
2795 mtus[6] = 1280;
2796 mtus[7] = 1492;
2797 mtus[8] = 1500;
2798 mtus[9] = 2002;
2799 mtus[10] = 2048;
2800 mtus[11] = 4096;
2801 mtus[12] = 4352;
2802 mtus[13] = 8192;
2803 mtus[14] = 9000;
2804 mtus[15] = 9600;
2805}
2806
2807/*
2808 * Initial congestion control parameters.
2809 */
2810static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2811{
2812 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2813 a[9] = 2;
2814 a[10] = 3;
2815 a[11] = 4;
2816 a[12] = 5;
2817 a[13] = 6;
2818 a[14] = 7;
2819 a[15] = 8;
2820 a[16] = 9;
2821 a[17] = 10;
2822 a[18] = 14;
2823 a[19] = 17;
2824 a[20] = 21;
2825 a[21] = 25;
2826 a[22] = 30;
2827 a[23] = 35;
2828 a[24] = 45;
2829 a[25] = 60;
2830 a[26] = 80;
2831 a[27] = 100;
2832 a[28] = 200;
2833 a[29] = 300;
2834 a[30] = 400;
2835 a[31] = 500;
2836
2837 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2838 b[9] = b[10] = 1;
2839 b[11] = b[12] = 2;
2840 b[13] = b[14] = b[15] = b[16] = 3;
2841 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2842 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2843 b[28] = b[29] = 6;
2844 b[30] = b[31] = 7;
2845}
2846
2847/* The minimum additive increment value for the congestion control table */
2848#define CC_MIN_INCR 2U
2849
2850/**
2851 * t3_load_mtus - write the MTU and congestion control HW tables
2852 * @adap: the adapter
2853 * @mtus: the unrestricted values for the MTU table
2854 * @alphs: the values for the congestion control alpha parameter
2855 * @beta: the values for the congestion control beta parameter
2856 * @mtu_cap: the maximum permitted effective MTU
2857 *
2858 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2859 * Update the high-speed congestion control table with the supplied alpha,
2860 * beta, and MTUs.
2861 */
2862void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2863 unsigned short alpha[NCCTRL_WIN],
2864 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2865{
2866 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2867 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2868 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2869 28672, 40960, 57344, 81920, 114688, 163840, 229376
2870 };
2871
2872 unsigned int i, w;
2873
2874 for (i = 0; i < NMTUS; ++i) {
2875 unsigned int mtu = min(mtus[i], mtu_cap);
2876 unsigned int log2 = fls(mtu);
2877
2878 if (!(mtu & ((1 << log2) >> 2))) /* round */
2879 log2--;
2880 t3_write_reg(adap, A_TP_MTU_TABLE,
2881 (i << 24) | (log2 << 16) | mtu);
2882
2883 for (w = 0; w < NCCTRL_WIN; ++w) {
2884 unsigned int inc;
2885
2886 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2887 CC_MIN_INCR);
2888
2889 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2890 (w << 16) | (beta[w] << 13) | inc);
2891 }
2892 }
2893}
2894
2895/**
2896 * t3_tp_get_mib_stats - read TP's MIB counters
2897 * @adap: the adapter
2898 * @tps: holds the returned counter values
2899 *
2900 * Returns the values of TP's MIB counters.
2901 */
2902void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2903{
2904 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2905 sizeof(*tps) / sizeof(u32), 0);
2906}
2907
2908#define ulp_region(adap, name, start, len) \
2909 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2910 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2911 (start) + (len) - 1); \
2912 start += len
2913
2914#define ulptx_region(adap, name, start, len) \
2915 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2916 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2917 (start) + (len) - 1)
2918
2919static void ulp_config(struct adapter *adap, const struct tp_params *p)
2920{
2921 unsigned int m = p->chan_rx_size;
2922
2923 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2924 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2925 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2926 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2927 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2928 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2929 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2930 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2931}
2932
2933/**
2934 * t3_set_proto_sram - set the contents of the protocol sram
2935 * @adapter: the adapter
2936 * @data: the protocol image
2937 *
2938 * Write the contents of the protocol SRAM.
2939 */
2940int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2941{
2942 int i;
2943 const __be32 *buf = (const __be32 *)data;
2944
2945 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2946 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2947 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2948 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2949 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2950 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2951
2952 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2953 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2954 return -EIO;
2955 }
2956 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2957
2958 return 0;
2959}
2960
2961void t3_config_trace_filter(struct adapter *adapter,
2962 const struct trace_params *tp, int filter_index,
2963 int invert, int enable)
2964{
2965 u32 addr, key[4], mask[4];
2966
2967 key[0] = tp->sport | (tp->sip << 16);
2968 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2969 key[2] = tp->dip;
2970 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2971
2972 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2973 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2974 mask[2] = tp->dip_mask;
2975 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2976
2977 if (invert)
2978 key[3] |= (1 << 29);
2979 if (enable)
2980 key[3] |= (1 << 28);
2981
2982 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2983 tp_wr_indirect(adapter, addr++, key[0]);
2984 tp_wr_indirect(adapter, addr++, mask[0]);
2985 tp_wr_indirect(adapter, addr++, key[1]);
2986 tp_wr_indirect(adapter, addr++, mask[1]);
2987 tp_wr_indirect(adapter, addr++, key[2]);
2988 tp_wr_indirect(adapter, addr++, mask[2]);
2989 tp_wr_indirect(adapter, addr++, key[3]);
2990 tp_wr_indirect(adapter, addr, mask[3]);
2991 t3_read_reg(adapter, A_TP_PIO_DATA);
2992}
2993
2994/**
2995 * t3_config_sched - configure a HW traffic scheduler
2996 * @adap: the adapter
2997 * @kbps: target rate in Kbps
2998 * @sched: the scheduler index
2999 *
3000 * Configure a HW scheduler for the target rate
3001 */
3002int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3003{
3004 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3005 unsigned int clk = adap->params.vpd.cclk * 1000;
3006 unsigned int selected_cpt = 0, selected_bpt = 0;
3007
3008 if (kbps > 0) {
3009 kbps *= 125; /* -> bytes */
3010 for (cpt = 1; cpt <= 255; cpt++) {
3011 tps = clk / cpt;
3012 bpt = (kbps + tps / 2) / tps;
3013 if (bpt > 0 && bpt <= 255) {
3014 v = bpt * tps;
3015 delta = v >= kbps ? v - kbps : kbps - v;
3016 if (delta <= mindelta) {
3017 mindelta = delta;
3018 selected_cpt = cpt;
3019 selected_bpt = bpt;
3020 }
3021 } else if (selected_cpt)
3022 break;
3023 }
3024 if (!selected_cpt)
3025 return -EINVAL;
3026 }
3027 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3028 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3029 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3030 if (sched & 1)
3031 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3032 else
3033 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3034 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3035 return 0;
3036}
3037
3038static int tp_init(struct adapter *adap, const struct tp_params *p)
3039{
3040 int busy = 0;
3041
3042 tp_config(adap, p);
3043 t3_set_vlan_accel(adap, 3, 0);
3044
3045 if (is_offload(adap)) {
3046 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3047 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3048 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3049 0, 1000, 5);
3050 if (busy)
3051 CH_ERR(adap, "TP initialization timed out\n");
3052 }
3053
3054 if (!busy)
3055 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3056 return busy;
3057}
3058
3059/*
3060 * Perform the bits of HW initialization that are dependent on the Tx
3061 * channels being used.
3062 */
3063static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3064{
3065 int i;
3066
3067 if (chan_map != 3) { /* one channel */
3068 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3069 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3070 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3071 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3072 F_TPTXPORT1EN | F_PORT1ACTIVE));
3073 t3_write_reg(adap, A_PM1_TX_CFG,
3074 chan_map == 1 ? 0xffffffff : 0);
3075 } else { /* two channels */
3076 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3077 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3078 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3079 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3080 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3081 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3082 F_ENFORCEPKT);
3083 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3084 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3085 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3086 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3087 for (i = 0; i < 16; i++)
3088 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3089 (i << 16) | 0x1010);
3090 }
3091}
3092
3093static int calibrate_xgm(struct adapter *adapter)
3094{
3095 if (uses_xaui(adapter)) {
3096 unsigned int v, i;
3097
3098 for (i = 0; i < 5; ++i) {
3099 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3100 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3101 msleep(1);
3102 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3103 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3104 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3105 V_XAUIIMP(G_CALIMP(v) >> 2));
3106 return 0;
3107 }
3108 }
3109 CH_ERR(adapter, "MAC calibration failed\n");
3110 return -1;
3111 } else {
3112 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3113 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3114 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3115 F_XGM_IMPSETUPDATE);
3116 }
3117 return 0;
3118}
3119
3120static void calibrate_xgm_t3b(struct adapter *adapter)
3121{
3122 if (!uses_xaui(adapter)) {
3123 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3124 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3125 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3126 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3127 F_XGM_IMPSETUPDATE);
3128 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3129 0);
3130 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3131 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3132 }
3133}
3134
3135struct mc7_timing_params {
3136 unsigned char ActToPreDly;
3137 unsigned char ActToRdWrDly;
3138 unsigned char PreCyc;
3139 unsigned char RefCyc[5];
3140 unsigned char BkCyc;
3141 unsigned char WrToRdDly;
3142 unsigned char RdToWrDly;
3143};
3144
3145/*
3146 * Write a value to a register and check that the write completed. These
3147 * writes normally complete in a cycle or two, so one read should suffice.
3148 * The very first read exists to flush the posted write to the device.
3149 */
3150static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3151{
3152 t3_write_reg(adapter, addr, val);
3153 t3_read_reg(adapter, addr); /* flush */
3154 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3155 return 0;
3156 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3157 return -EIO;
3158}
3159
3160static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3161{
3162 static const unsigned int mc7_mode[] = {
3163 0x632, 0x642, 0x652, 0x432, 0x442
3164 };
3165 static const struct mc7_timing_params mc7_timings[] = {
3166 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3167 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3168 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3169 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3170 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3171 };
3172
3173 u32 val;
3174 unsigned int width, density, slow, attempts;
3175 struct adapter *adapter = mc7->adapter;
3176 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3177
3178 if (!mc7->size)
3179 return 0;
3180
3181 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3182 slow = val & F_SLOW;
3183 width = G_WIDTH(val);
3184 density = G_DEN(val);
3185
3186 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3187 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3188 msleep(1);
3189
3190 if (!slow) {
3191 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3192 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3193 msleep(1);
3194 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3195 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3196 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3197 mc7->name);
3198 goto out_fail;
3199 }
3200 }
3201
3202 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3203 V_ACTTOPREDLY(p->ActToPreDly) |
3204 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3205 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3206 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3207
3208 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3209 val | F_CLKEN | F_TERM150);
3210 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3211
3212 if (!slow)
3213 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3214 F_DLLENB);
3215 udelay(1);
3216
3217 val = slow ? 3 : 6;
3218 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3219 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3220 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3221 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3222 goto out_fail;
3223
3224 if (!slow) {
3225 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3226 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3227 udelay(5);
3228 }
3229
3230 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3231 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3232 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3233 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3234 mc7_mode[mem_type]) ||
3235 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3236 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3237 goto out_fail;
3238
3239 /* clock value is in KHz */
3240 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3241 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3242
3243 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3244 F_PERREFEN | V_PREREFDIV(mc7_clock));
3245 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3246
3247 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3248 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3249 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3250 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3251 (mc7->size << width) - 1);
3252 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3253 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3254
3255 attempts = 50;
3256 do {
3257 msleep(250);
3258 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3259 } while ((val & F_BUSY) && --attempts);
3260 if (val & F_BUSY) {
3261 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3262 goto out_fail;
3263 }
3264
3265 /* Enable normal memory accesses. */
3266 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3267 return 0;
3268
3269out_fail:
3270 return -1;
3271}
3272
3273static void config_pcie(struct adapter *adap)
3274{
3275 static const u16 ack_lat[4][6] = {
3276 {237, 416, 559, 1071, 2095, 4143},
3277 {128, 217, 289, 545, 1057, 2081},
3278 {73, 118, 154, 282, 538, 1050},
3279 {67, 107, 86, 150, 278, 534}
3280 };
3281 static const u16 rpl_tmr[4][6] = {
3282 {711, 1248, 1677, 3213, 6285, 12429},
3283 {384, 651, 867, 1635, 3171, 6243},
3284 {219, 354, 462, 846, 1614, 3150},
3285 {201, 321, 258, 450, 834, 1602}
3286 };
3287
3288 u16 val, devid;
3289 unsigned int log2_width, pldsize;
3290 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3291
3292 pci_read_config_word(adap->pdev,
3293 adap->pdev->pcie_cap + PCI_EXP_DEVCTL,
3294 &val);
3295 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3296
3297 pci_read_config_word(adap->pdev, 0x2, &devid);
3298 if (devid == 0x37) {
3299 pci_write_config_word(adap->pdev,
3300 adap->pdev->pcie_cap + PCI_EXP_DEVCTL,
3301 val & ~PCI_EXP_DEVCTL_READRQ &
3302 ~PCI_EXP_DEVCTL_PAYLOAD);
3303 pldsize = 0;
3304 }
3305
3306 pci_read_config_word(adap->pdev, adap->pdev->pcie_cap + PCI_EXP_LNKCTL,
3307 &val);
3308
3309 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3310 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3311 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3312 log2_width = fls(adap->params.pci.width) - 1;
3313 acklat = ack_lat[log2_width][pldsize];
3314 if (val & 1) /* check LOsEnable */
3315 acklat += fst_trn_tx * 4;
3316 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3317
3318 if (adap->params.rev == 0)
3319 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3320 V_T3A_ACKLAT(M_T3A_ACKLAT),
3321 V_T3A_ACKLAT(acklat));
3322 else
3323 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3324 V_ACKLAT(acklat));
3325
3326 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3327 V_REPLAYLMT(rpllmt));
3328
3329 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3330 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3331 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3332 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3333}
3334
3335/*
3336 * Initialize and configure T3 HW modules. This performs the
3337 * initialization steps that need to be done once after a card is reset.
3338 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3339 *
3340 * fw_params are passed to FW and their value is platform dependent. Only the
3341 * top 8 bits are available for use, the rest must be 0.
3342 */
3343int t3_init_hw(struct adapter *adapter, u32 fw_params)
3344{
3345 int err = -EIO, attempts, i;
3346 const struct vpd_params *vpd = &adapter->params.vpd;
3347
3348 if (adapter->params.rev > 0)
3349 calibrate_xgm_t3b(adapter);
3350 else if (calibrate_xgm(adapter))
3351 goto out_err;
3352
3353 if (vpd->mclk) {
3354 partition_mem(adapter, &adapter->params.tp);
3355
3356 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3357 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3358 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3359 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3360 adapter->params.mc5.nfilters,
3361 adapter->params.mc5.nroutes))
3362 goto out_err;
3363
3364 for (i = 0; i < 32; i++)
3365 if (clear_sge_ctxt(adapter, i, F_CQ))
3366 goto out_err;
3367 }
3368
3369 if (tp_init(adapter, &adapter->params.tp))
3370 goto out_err;
3371
3372 t3_tp_set_coalescing_size(adapter,
3373 min(adapter->params.sge.max_pkt_size,
3374 MAX_RX_COALESCING_LEN), 1);
3375 t3_tp_set_max_rxsize(adapter,
3376 min(adapter->params.sge.max_pkt_size, 16384U));
3377 ulp_config(adapter, &adapter->params.tp);
3378
3379 if (is_pcie(adapter))
3380 config_pcie(adapter);
3381 else
3382 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3383 F_DMASTOPEN | F_CLIDECEN);
3384
3385 if (adapter->params.rev == T3_REV_C)
3386 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3387 F_CFG_CQE_SOP_MASK);
3388
3389 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3390 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3391 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3392 chan_init_hw(adapter, adapter->params.chan_map);
3393 t3_sge_init(adapter, &adapter->params.sge);
3394 t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3395
3396 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3397
3398 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3399 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3400 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3401 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3402
3403 attempts = 100;
3404 do { /* wait for uP to initialize */
3405 msleep(20);
3406 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3407 if (!attempts) {
3408 CH_ERR(adapter, "uP initialization timed out\n");
3409 goto out_err;
3410 }
3411
3412 err = 0;
3413out_err:
3414 return err;
3415}
3416
3417/**
3418 * get_pci_mode - determine a card's PCI mode
3419 * @adapter: the adapter
3420 * @p: where to store the PCI settings
3421 *
3422 * Determines a card's PCI mode and associated parameters, such as speed
3423 * and width.
3424 */
3425static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3426{
3427 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3428 u32 pci_mode, pcie_cap;
3429
3430 pcie_cap = pci_pcie_cap(adapter->pdev);
3431 if (pcie_cap) {
3432 u16 val;
3433
3434 p->variant = PCI_VARIANT_PCIE;
3435 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3436 &val);
3437 p->width = (val >> 4) & 0x3f;
3438 return;
3439 }
3440
3441 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3442 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3443 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3444 pci_mode = G_PCIXINITPAT(pci_mode);
3445 if (pci_mode == 0)
3446 p->variant = PCI_VARIANT_PCI;
3447 else if (pci_mode < 4)
3448 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3449 else if (pci_mode < 8)
3450 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3451 else
3452 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3453}
3454
3455/**
3456 * init_link_config - initialize a link's SW state
3457 * @lc: structure holding the link state
3458 * @ai: information about the current card
3459 *
3460 * Initializes the SW state maintained for each link, including the link's
3461 * capabilities and default speed/duplex/flow-control/autonegotiation
3462 * settings.
3463 */
3464static void init_link_config(struct link_config *lc, unsigned int caps)
3465{
3466 lc->supported = caps;
3467 lc->requested_speed = lc->speed = SPEED_INVALID;
3468 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3469 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3470 if (lc->supported & SUPPORTED_Autoneg) {
3471 lc->advertising = lc->supported;
3472 lc->autoneg = AUTONEG_ENABLE;
3473 lc->requested_fc |= PAUSE_AUTONEG;
3474 } else {
3475 lc->advertising = 0;
3476 lc->autoneg = AUTONEG_DISABLE;
3477 }
3478}
3479
3480/**
3481 * mc7_calc_size - calculate MC7 memory size
3482 * @cfg: the MC7 configuration
3483 *
3484 * Calculates the size of an MC7 memory in bytes from the value of its
3485 * configuration register.
3486 */
3487static unsigned int mc7_calc_size(u32 cfg)
3488{
3489 unsigned int width = G_WIDTH(cfg);
3490 unsigned int banks = !!(cfg & F_BKS) + 1;
3491 unsigned int org = !!(cfg & F_ORG) + 1;
3492 unsigned int density = G_DEN(cfg);
3493 unsigned int MBs = ((256 << density) * banks) / (org << width);
3494
3495 return MBs << 20;
3496}
3497
3498static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3499 unsigned int base_addr, const char *name)
3500{
3501 u32 cfg;
3502
3503 mc7->adapter = adapter;
3504 mc7->name = name;
3505 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3506 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3507 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3508 mc7->width = G_WIDTH(cfg);
3509}
3510
3511static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3512{
3513 u16 devid;
3514
3515 mac->adapter = adapter;
3516 pci_read_config_word(adapter->pdev, 0x2, &devid);
3517
3518 if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3519 index = 0;
3520 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3521 mac->nucast = 1;
3522
3523 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3524 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3525 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3526 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3527 F_ENRGMII, 0);
3528 }
3529}
3530
3531static void early_hw_init(struct adapter *adapter,
3532 const struct adapter_info *ai)
3533{
3534 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3535
3536 mi1_init(adapter, ai);
3537 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3538 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3539 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3540 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3541 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3542 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3543
3544 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3545 val |= F_ENRGMII;
3546
3547 /* Enable MAC clocks so we can access the registers */
3548 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3549 t3_read_reg(adapter, A_XGM_PORT_CFG);
3550
3551 val |= F_CLKDIVRESET_;
3552 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3553 t3_read_reg(adapter, A_XGM_PORT_CFG);
3554 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3555 t3_read_reg(adapter, A_XGM_PORT_CFG);
3556}
3557
3558/*
3559 * Reset the adapter.
3560 * Older PCIe cards lose their config space during reset, PCI-X
3561 * ones don't.
3562 */
3563int t3_reset_adapter(struct adapter *adapter)
3564{
3565 int i, save_and_restore_pcie =
3566 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3567 uint16_t devid = 0;
3568
3569 if (save_and_restore_pcie)
3570 pci_save_state(adapter->pdev);
3571 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3572
3573 /*
3574 * Delay. Give Some time to device to reset fully.
3575 * XXX The delay time should be modified.
3576 */
3577 for (i = 0; i < 10; i++) {
3578 msleep(50);
3579 pci_read_config_word(adapter->pdev, 0x00, &devid);
3580 if (devid == 0x1425)
3581 break;
3582 }
3583
3584 if (devid != 0x1425)
3585 return -1;
3586
3587 if (save_and_restore_pcie)
3588 pci_restore_state(adapter->pdev);
3589 return 0;
3590}
3591
3592static int init_parity(struct adapter *adap)
3593{
3594 int i, err, addr;
3595
3596 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3597 return -EBUSY;
3598
3599 for (err = i = 0; !err && i < 16; i++)
3600 err = clear_sge_ctxt(adap, i, F_EGRESS);
3601 for (i = 0xfff0; !err && i <= 0xffff; i++)
3602 err = clear_sge_ctxt(adap, i, F_EGRESS);
3603 for (i = 0; !err && i < SGE_QSETS; i++)
3604 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3605 if (err)
3606 return err;
3607
3608 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3609 for (i = 0; i < 4; i++)
3610 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3611 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3612 F_IBQDBGWR | V_IBQDBGQID(i) |
3613 V_IBQDBGADDR(addr));
3614 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3615 F_IBQDBGBUSY, 0, 2, 1);
3616 if (err)
3617 return err;
3618 }
3619 return 0;
3620}
3621
3622/*
3623 * Initialize adapter SW state for the various HW modules, set initial values
3624 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3625 * interface.
3626 */
3627int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3628 int reset)
3629{
3630 int ret;
3631 unsigned int i, j = -1;
3632
3633 get_pci_mode(adapter, &adapter->params.pci);
3634
3635 adapter->params.info = ai;
3636 adapter->params.nports = ai->nports0 + ai->nports1;
3637 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3638 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3639 /*
3640 * We used to only run the "adapter check task" once a second if
3641 * we had PHYs which didn't support interrupts (we would check
3642 * their link status once a second). Now we check other conditions
3643 * in that routine which could potentially impose a very high
3644 * interrupt load on the system. As such, we now always scan the
3645 * adapter state once a second ...
3646 */
3647 adapter->params.linkpoll_period = 10;
3648 adapter->params.stats_update_period = is_10G(adapter) ?
3649 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3650 adapter->params.pci.vpd_cap_addr =
3651 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3652 ret = get_vpd_params(adapter, &adapter->params.vpd);
3653 if (ret < 0)
3654 return ret;
3655
3656 if (reset && t3_reset_adapter(adapter))
3657 return -1;
3658
3659 t3_sge_prep(adapter, &adapter->params.sge);
3660
3661 if (adapter->params.vpd.mclk) {
3662 struct tp_params *p = &adapter->params.tp;
3663
3664 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3665 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3666 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3667
3668 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3669 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3670 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3671 p->cm_size = t3_mc7_size(&adapter->cm);
3672 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3673 p->chan_tx_size = p->pmtx_size / p->nchan;
3674 p->rx_pg_size = 64 * 1024;
3675 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3676 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3677 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3678 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3679 adapter->params.rev > 0 ? 12 : 6;
3680 }
3681
3682 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3683 t3_mc7_size(&adapter->pmtx) &&
3684 t3_mc7_size(&adapter->cm);
3685
3686 if (is_offload(adapter)) {
3687 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3688 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3689 DEFAULT_NFILTERS : 0;
3690 adapter->params.mc5.nroutes = 0;
3691 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3692
3693 init_mtus(adapter->params.mtus);
3694 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3695 }
3696
3697 early_hw_init(adapter, ai);
3698 ret = init_parity(adapter);
3699 if (ret)
3700 return ret;
3701
3702 for_each_port(adapter, i) {
3703 u8 hw_addr[6];
3704 const struct port_type_info *pti;
3705 struct port_info *p = adap2pinfo(adapter, i);
3706
3707 while (!adapter->params.vpd.port_type[++j])
3708 ;
3709
3710 pti = &port_types[adapter->params.vpd.port_type[j]];
3711 if (!pti->phy_prep) {
3712 CH_ALERT(adapter, "Invalid port type index %d\n",
3713 adapter->params.vpd.port_type[j]);
3714 return -EINVAL;
3715 }
3716
3717 p->phy.mdio.dev = adapter->port[i];
3718 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3719 ai->mdio_ops);
3720 if (ret)
3721 return ret;
3722 mac_prep(&p->mac, adapter, j);
3723
3724 /*
3725 * The VPD EEPROM stores the base Ethernet address for the
3726 * card. A port's address is derived from the base by adding
3727 * the port's index to the base's low octet.
3728 */
3729 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3730 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3731
3732 memcpy(adapter->port[i]->dev_addr, hw_addr,
3733 ETH_ALEN);
3734 memcpy(adapter->port[i]->perm_addr, hw_addr,
3735 ETH_ALEN);
3736 init_link_config(&p->link_config, p->phy.caps);
3737 p->phy.ops->power_down(&p->phy, 1);
3738
3739 /*
3740 * If the PHY doesn't support interrupts for link status
3741 * changes, schedule a scan of the adapter links at least
3742 * once a second.
3743 */
3744 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3745 adapter->params.linkpoll_period > 10)
3746 adapter->params.linkpoll_period = 10;
3747 }
3748
3749 return 0;
3750}
3751
3752void t3_led_ready(struct adapter *adapter)
3753{
3754 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3755 F_GPIO0_OUT_VAL);
3756}
3757
3758int t3_replay_prep_adapter(struct adapter *adapter)
3759{
3760 const struct adapter_info *ai = adapter->params.info;
3761 unsigned int i, j = -1;
3762 int ret;
3763
3764 early_hw_init(adapter, ai);
3765 ret = init_parity(adapter);
3766 if (ret)
3767 return ret;
3768
3769 for_each_port(adapter, i) {
3770 const struct port_type_info *pti;
3771 struct port_info *p = adap2pinfo(adapter, i);
3772
3773 while (!adapter->params.vpd.port_type[++j])
3774 ;
3775
3776 pti = &port_types[adapter->params.vpd.port_type[j]];
3777 ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3778 if (ret)
3779 return ret;
3780 p->phy.ops->power_down(&p->phy, 1);
3781 }
3782
3783return 0;
3784}
3785
diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h
new file mode 100644
index 00000000000..705713b5663
--- /dev/null
+++ b/drivers/net/cxgb3/t3cdev.h
@@ -0,0 +1,70 @@
1/*
2 * Copyright (C) 2006-2008 Chelsio Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _T3CDEV_H_
33#define _T3CDEV_H_
34
35#include <linux/list.h>
36#include <linux/atomic.h>
37#include <linux/netdevice.h>
38#include <linux/proc_fs.h>
39#include <linux/skbuff.h>
40#include <net/neighbour.h>
41
42#define T3CNAMSIZ 16
43
44struct cxgb3_client;
45
46enum t3ctype {
47 T3A = 0,
48 T3B,
49 T3C,
50};
51
52struct t3cdev {
53 char name[T3CNAMSIZ]; /* T3C device name */
54 enum t3ctype type;
55 struct list_head ofld_dev_list; /* for list linking */
56 struct net_device *lldev; /* LL dev associated with T3C messages */
57 struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */
58 int (*send)(struct t3cdev *dev, struct sk_buff *skb);
59 int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
60 int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
61 void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
62 void *priv; /* driver private data */
63 void *l2opt; /* optional layer 2 data */
64 void *l3opt; /* optional layer 3 data */
65 void *l4opt; /* optional layer 4 data */
66 void *ulp; /* ulp stuff */
67 void *ulp_iscsi; /* ulp iscsi */
68};
69
70#endif /* _T3CDEV_H_ */
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
new file mode 100644
index 00000000000..8bda06e366c
--- /dev/null
+++ b/drivers/net/cxgb3/version.h
@@ -0,0 +1,44 @@
1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
33#ifndef __CHELSIO_VERSION_H
34#define __CHELSIO_VERSION_H
35#define DRV_DESC "Chelsio T3 Network Driver"
36#define DRV_NAME "cxgb3"
37/* Driver version */
38#define DRV_VERSION "1.1.4-ko"
39
40/* Firmware version */
41#define FW_VERSION_MAJOR 7
42#define FW_VERSION_MINOR 10
43#define FW_VERSION_MICRO 0
44#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb3/vsc8211.c b/drivers/net/cxgb3/vsc8211.c
new file mode 100644
index 00000000000..4f9a1c2724f
--- /dev/null
+++ b/drivers/net/cxgb3/vsc8211.c
@@ -0,0 +1,416 @@
1/*
2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33
34/* VSC8211 PHY specific registers. */
35enum {
36 VSC8211_SIGDET_CTRL = 19,
37 VSC8211_EXT_CTRL = 23,
38 VSC8211_INTR_ENABLE = 25,
39 VSC8211_INTR_STATUS = 26,
40 VSC8211_LED_CTRL = 27,
41 VSC8211_AUX_CTRL_STAT = 28,
42 VSC8211_EXT_PAGE_AXS = 31,
43};
44
45enum {
46 VSC_INTR_RX_ERR = 1 << 0,
47 VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
48 VSC_INTR_CABLE = 1 << 2, /* cable impairment */
49 VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
50 VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
51 VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
52 VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
53 VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
54 VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
55 VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
56 VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
57 VSC_INTR_DPLX_CHG = 1 << 12, /* duplex change */
58 VSC_INTR_LINK_CHG = 1 << 13, /* link change */
59 VSC_INTR_SPD_CHG = 1 << 14, /* speed change */
60 VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
61};
62
63enum {
64 VSC_CTRL_CLAUSE37_VIEW = 1 << 4, /* Switch to Clause 37 view */
65 VSC_CTRL_MEDIA_MODE_HI = 0xf000 /* High part of media mode select */
66};
67
68#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
69 VSC_INTR_DPLX_CHG | VSC_INTR_SPD_CHG | \
70 VSC_INTR_NEG_DONE)
71#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
72 VSC_INTR_ENABLE)
73
74/* PHY specific auxiliary control & status register fields */
75#define S_ACSR_ACTIPHY_TMR 0
76#define M_ACSR_ACTIPHY_TMR 0x3
77#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
78
79#define S_ACSR_SPEED 3
80#define M_ACSR_SPEED 0x3
81#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
82
83#define S_ACSR_DUPLEX 5
84#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
85
86#define S_ACSR_ACTIPHY 6
87#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
88
89/*
90 * Reset the PHY. This PHY completes reset immediately so we never wait.
91 */
92static int vsc8211_reset(struct cphy *cphy, int wait)
93{
94 return t3_phy_reset(cphy, MDIO_DEVAD_NONE, 0);
95}
96
97static int vsc8211_intr_enable(struct cphy *cphy)
98{
99 return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE,
100 INTR_MASK);
101}
102
103static int vsc8211_intr_disable(struct cphy *cphy)
104{
105 return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE, 0);
106}
107
108static int vsc8211_intr_clear(struct cphy *cphy)
109{
110 u32 val;
111
112 /* Clear PHY interrupts by reading the register. */
113 return t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &val);
114}
115
116static int vsc8211_autoneg_enable(struct cphy *cphy)
117{
118 return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR,
119 BMCR_PDOWN | BMCR_ISOLATE,
120 BMCR_ANENABLE | BMCR_ANRESTART);
121}
122
123static int vsc8211_autoneg_restart(struct cphy *cphy)
124{
125 return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR,
126 BMCR_PDOWN | BMCR_ISOLATE,
127 BMCR_ANRESTART);
128}
129
130static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
131 int *speed, int *duplex, int *fc)
132{
133 unsigned int bmcr, status, lpa, adv;
134 int err, sp = -1, dplx = -1, pause = 0;
135
136 err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr);
137 if (!err)
138 err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status);
139 if (err)
140 return err;
141
142 if (link_ok) {
143 /*
144 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
145 * once more to get the current link state.
146 */
147 if (!(status & BMSR_LSTATUS))
148 err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR,
149 &status);
150 if (err)
151 return err;
152 *link_ok = (status & BMSR_LSTATUS) != 0;
153 }
154 if (!(bmcr & BMCR_ANENABLE)) {
155 dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
156 if (bmcr & BMCR_SPEED1000)
157 sp = SPEED_1000;
158 else if (bmcr & BMCR_SPEED100)
159 sp = SPEED_100;
160 else
161 sp = SPEED_10;
162 } else if (status & BMSR_ANEGCOMPLETE) {
163 err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_AUX_CTRL_STAT,
164 &status);
165 if (err)
166 return err;
167
168 dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
169 sp = G_ACSR_SPEED(status);
170 if (sp == 0)
171 sp = SPEED_10;
172 else if (sp == 1)
173 sp = SPEED_100;
174 else
175 sp = SPEED_1000;
176
177 if (fc && dplx == DUPLEX_FULL) {
178 err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA,
179 &lpa);
180 if (!err)
181 err = t3_mdio_read(cphy, MDIO_DEVAD_NONE,
182 MII_ADVERTISE, &adv);
183 if (err)
184 return err;
185
186 if (lpa & adv & ADVERTISE_PAUSE_CAP)
187 pause = PAUSE_RX | PAUSE_TX;
188 else if ((lpa & ADVERTISE_PAUSE_CAP) &&
189 (lpa & ADVERTISE_PAUSE_ASYM) &&
190 (adv & ADVERTISE_PAUSE_ASYM))
191 pause = PAUSE_TX;
192 else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
193 (adv & ADVERTISE_PAUSE_CAP))
194 pause = PAUSE_RX;
195 }
196 }
197 if (speed)
198 *speed = sp;
199 if (duplex)
200 *duplex = dplx;
201 if (fc)
202 *fc = pause;
203 return 0;
204}
205
206static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok,
207 int *speed, int *duplex, int *fc)
208{
209 unsigned int bmcr, status, lpa, adv;
210 int err, sp = -1, dplx = -1, pause = 0;
211
212 err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr);
213 if (!err)
214 err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status);
215 if (err)
216 return err;
217
218 if (link_ok) {
219 /*
220 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
221 * once more to get the current link state.
222 */
223 if (!(status & BMSR_LSTATUS))
224 err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR,
225 &status);
226 if (err)
227 return err;
228 *link_ok = (status & BMSR_LSTATUS) != 0;
229 }
230 if (!(bmcr & BMCR_ANENABLE)) {
231 dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
232 if (bmcr & BMCR_SPEED1000)
233 sp = SPEED_1000;
234 else if (bmcr & BMCR_SPEED100)
235 sp = SPEED_100;
236 else
237 sp = SPEED_10;
238 } else if (status & BMSR_ANEGCOMPLETE) {
239 err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA, &lpa);
240 if (!err)
241 err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_ADVERTISE,
242 &adv);
243 if (err)
244 return err;
245
246 if (adv & lpa & ADVERTISE_1000XFULL) {
247 dplx = DUPLEX_FULL;
248 sp = SPEED_1000;
249 } else if (adv & lpa & ADVERTISE_1000XHALF) {
250 dplx = DUPLEX_HALF;
251 sp = SPEED_1000;
252 }
253
254 if (fc && dplx == DUPLEX_FULL) {
255 if (lpa & adv & ADVERTISE_1000XPAUSE)
256 pause = PAUSE_RX | PAUSE_TX;
257 else if ((lpa & ADVERTISE_1000XPAUSE) &&
258 (adv & lpa & ADVERTISE_1000XPSE_ASYM))
259 pause = PAUSE_TX;
260 else if ((lpa & ADVERTISE_1000XPSE_ASYM) &&
261 (adv & ADVERTISE_1000XPAUSE))
262 pause = PAUSE_RX;
263 }
264 }
265 if (speed)
266 *speed = sp;
267 if (duplex)
268 *duplex = dplx;
269 if (fc)
270 *fc = pause;
271 return 0;
272}
273
274#ifdef UNUSED
275/*
276 * Enable/disable auto MDI/MDI-X in forced link speed mode.
277 */
278static int vsc8211_set_automdi(struct cphy *phy, int enable)
279{
280 int err;
281
282 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0x52b5);
283 if (err)
284 return err;
285
286 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 18, 0x12);
287 if (err)
288 return err;
289
290 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 17, enable ? 0x2803 : 0x3003);
291 if (err)
292 return err;
293
294 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 16, 0x87fa);
295 if (err)
296 return err;
297
298 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0);
299 if (err)
300 return err;
301
302 return 0;
303}
304
305int vsc8211_set_speed_duplex(struct cphy *phy, int speed, int duplex)
306{
307 int err;
308
309 err = t3_set_phy_speed_duplex(phy, speed, duplex);
310 if (!err)
311 err = vsc8211_set_automdi(phy, 1);
312 return err;
313}
314#endif /* UNUSED */
315
316static int vsc8211_power_down(struct cphy *cphy, int enable)
317{
318 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
319 enable ? BMCR_PDOWN : 0);
320}
321
322static int vsc8211_intr_handler(struct cphy *cphy)
323{
324 unsigned int cause;
325 int err, cphy_cause = 0;
326
327 err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &cause);
328 if (err)
329 return err;
330
331 cause &= INTR_MASK;
332 if (cause & CFG_CHG_INTR_MASK)
333 cphy_cause |= cphy_cause_link_change;
334 if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
335 cphy_cause |= cphy_cause_fifo_error;
336 return cphy_cause;
337}
338
339static struct cphy_ops vsc8211_ops = {
340 .reset = vsc8211_reset,
341 .intr_enable = vsc8211_intr_enable,
342 .intr_disable = vsc8211_intr_disable,
343 .intr_clear = vsc8211_intr_clear,
344 .intr_handler = vsc8211_intr_handler,
345 .autoneg_enable = vsc8211_autoneg_enable,
346 .autoneg_restart = vsc8211_autoneg_restart,
347 .advertise = t3_phy_advertise,
348 .set_speed_duplex = t3_set_phy_speed_duplex,
349 .get_link_status = vsc8211_get_link_status,
350 .power_down = vsc8211_power_down,
351};
352
353static struct cphy_ops vsc8211_fiber_ops = {
354 .reset = vsc8211_reset,
355 .intr_enable = vsc8211_intr_enable,
356 .intr_disable = vsc8211_intr_disable,
357 .intr_clear = vsc8211_intr_clear,
358 .intr_handler = vsc8211_intr_handler,
359 .autoneg_enable = vsc8211_autoneg_enable,
360 .autoneg_restart = vsc8211_autoneg_restart,
361 .advertise = t3_phy_advertise_fiber,
362 .set_speed_duplex = t3_set_phy_speed_duplex,
363 .get_link_status = vsc8211_get_link_status_fiber,
364 .power_down = vsc8211_power_down,
365};
366
367int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
368 int phy_addr, const struct mdio_ops *mdio_ops)
369{
370 int err;
371 unsigned int val;
372
373 cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops,
374 SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full |
375 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII |
376 SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T");
377 msleep(20); /* PHY needs ~10ms to start responding to MDIO */
378
379 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL, &val);
380 if (err)
381 return err;
382 if (val & VSC_CTRL_MEDIA_MODE_HI) {
383 /* copper interface, just need to configure the LEDs */
384 return t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_LED_CTRL,
385 0x100);
386 }
387
388 phy->caps = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
389 SUPPORTED_MII | SUPPORTED_FIBRE | SUPPORTED_IRQ;
390 phy->desc = "1000BASE-X";
391 phy->ops = &vsc8211_fiber_ops;
392
393 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 1);
394 if (err)
395 return err;
396
397 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_SIGDET_CTRL, 1);
398 if (err)
399 return err;
400
401 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0);
402 if (err)
403 return err;
404
405 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL,
406 val | VSC_CTRL_CLAUSE37_VIEW);
407 if (err)
408 return err;
409
410 err = vsc8211_reset(phy, 0);
411 if (err)
412 return err;
413
414 udelay(5); /* delay after reset before next SMI */
415 return 0;
416}
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
new file mode 100644
index 00000000000..3af19a55037
--- /dev/null
+++ b/drivers/net/cxgb3/xgmac.c
@@ -0,0 +1,657 @@
1/*
2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35/*
36 * # of exact address filters. The first one is used for the station address,
37 * the rest are available for multicast addresses.
38 */
39#define EXACT_ADDR_FILTERS 8
40
41static inline int macidx(const struct cmac *mac)
42{
43 return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
44}
45
46static void xaui_serdes_reset(struct cmac *mac)
47{
48 static const unsigned int clear[] = {
49 F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
50 F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
51 };
52
53 int i;
54 struct adapter *adap = mac->adapter;
55 u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
56
57 t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
58 F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
59 F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
60 F_RESETPLL23 | F_RESETPLL01);
61 t3_read_reg(adap, ctrl);
62 udelay(15);
63
64 for (i = 0; i < ARRAY_SIZE(clear); i++) {
65 t3_set_reg_field(adap, ctrl, clear[i], 0);
66 udelay(15);
67 }
68}
69
70void t3b_pcs_reset(struct cmac *mac)
71{
72 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
73 F_PCS_RESET_, 0);
74 udelay(20);
75 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
76 F_PCS_RESET_);
77}
78
79int t3_mac_reset(struct cmac *mac)
80{
81 static const struct addr_val_pair mac_reset_avp[] = {
82 {A_XGM_TX_CTRL, 0},
83 {A_XGM_RX_CTRL, 0},
84 {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
85 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
86 {A_XGM_RX_HASH_LOW, 0},
87 {A_XGM_RX_HASH_HIGH, 0},
88 {A_XGM_RX_EXACT_MATCH_LOW_1, 0},
89 {A_XGM_RX_EXACT_MATCH_LOW_2, 0},
90 {A_XGM_RX_EXACT_MATCH_LOW_3, 0},
91 {A_XGM_RX_EXACT_MATCH_LOW_4, 0},
92 {A_XGM_RX_EXACT_MATCH_LOW_5, 0},
93 {A_XGM_RX_EXACT_MATCH_LOW_6, 0},
94 {A_XGM_RX_EXACT_MATCH_LOW_7, 0},
95 {A_XGM_RX_EXACT_MATCH_LOW_8, 0},
96 {A_XGM_STAT_CTRL, F_CLRSTATS}
97 };
98 u32 val;
99 struct adapter *adap = mac->adapter;
100 unsigned int oft = mac->offset;
101
102 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
103 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
104
105 t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
106 t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
107 F_RXSTRFRWRD | F_DISERRFRAMES,
108 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
109 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX);
110
111 if (uses_xaui(adap)) {
112 if (adap->params.rev == 0) {
113 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
114 F_RXENABLE | F_TXENABLE);
115 if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
116 F_CMULOCK, 1, 5, 2)) {
117 CH_ERR(adap,
118 "MAC %d XAUI SERDES CMU lock failed\n",
119 macidx(mac));
120 return -1;
121 }
122 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
123 F_SERDESRESET_);
124 } else
125 xaui_serdes_reset(mac);
126 }
127
128 t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
129 V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE),
130 V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER);
131 val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
132
133 if (is_10G(adap))
134 val |= F_PCS_RESET_;
135 else if (uses_xaui(adap))
136 val |= F_PCS_RESET_ | F_XG2G_RESET_;
137 else
138 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
139 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
140 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
141 if ((val & F_PCS_RESET_) && adap->params.rev) {
142 msleep(1);
143 t3b_pcs_reset(mac);
144 }
145
146 memset(&mac->stats, 0, sizeof(mac->stats));
147 return 0;
148}
149
150static int t3b2_mac_reset(struct cmac *mac)
151{
152 struct adapter *adap = mac->adapter;
153 unsigned int oft = mac->offset, store;
154 int idx = macidx(mac);
155 u32 val;
156
157 if (!macidx(mac))
158 t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
159 else
160 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
161
162 /* Stop NIC traffic to reduce the number of TXTOGGLES */
163 t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0);
164 /* Ensure TX drains */
165 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0);
166
167 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
168 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
169
170 /* Store A_TP_TX_DROP_CFG_CH0 */
171 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
172 store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx);
173
174 msleep(10);
175
176 /* Change DROP_CFG to 0xc0000011 */
177 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
178 t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011);
179
180 /* Check for xgm Rx fifo empty */
181 /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */
182 if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
183 0x80000000, 1, 1000, 2)) {
184 CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
185 macidx(mac));
186 return -1;
187 }
188
189 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0);
190 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
191
192 val = F_MAC_RESET_;
193 if (is_10G(adap))
194 val |= F_PCS_RESET_;
195 else if (uses_xaui(adap))
196 val |= F_PCS_RESET_ | F_XG2G_RESET_;
197 else
198 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
199 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
200 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
201 if ((val & F_PCS_RESET_) && adap->params.rev) {
202 msleep(1);
203 t3b_pcs_reset(mac);
204 }
205 t3_write_reg(adap, A_XGM_RX_CFG + oft,
206 F_DISPAUSEFRAMES | F_EN1536BFRAMES |
207 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST);
208
209 /* Restore the DROP_CFG */
210 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
211 t3_write_reg(adap, A_TP_PIO_DATA, store);
212
213 if (!idx)
214 t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
215 else
216 t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
217
218 /* re-enable nic traffic */
219 t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
220
221 /* Set: re-enable NIC traffic */
222 t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
223
224 return 0;
225}
226
227/*
228 * Set the exact match register 'idx' to recognize the given Ethernet address.
229 */
230static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
231{
232 u32 addr_lo, addr_hi;
233 unsigned int oft = mac->offset + idx * 8;
234
235 addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
236 addr_hi = (addr[5] << 8) | addr[4];
237
238 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
239 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
240}
241
242/* Set one of the station's unicast MAC addresses. */
243int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
244{
245 if (idx >= mac->nucast)
246 return -EINVAL;
247 set_addr_filter(mac, idx, addr);
248 return 0;
249}
250
251/*
252 * Specify the number of exact address filters that should be reserved for
253 * unicast addresses. Caller should reload the unicast and multicast addresses
254 * after calling this.
255 */
256int t3_mac_set_num_ucast(struct cmac *mac, int n)
257{
258 if (n > EXACT_ADDR_FILTERS)
259 return -EINVAL;
260 mac->nucast = n;
261 return 0;
262}
263
264void t3_mac_disable_exact_filters(struct cmac *mac)
265{
266 unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1;
267
268 for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
269 u32 v = t3_read_reg(mac->adapter, reg);
270 t3_write_reg(mac->adapter, reg, v);
271 }
272 t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
273}
274
275void t3_mac_enable_exact_filters(struct cmac *mac)
276{
277 unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1;
278
279 for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
280 u32 v = t3_read_reg(mac->adapter, reg);
281 t3_write_reg(mac->adapter, reg, v);
282 }
283 t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
284}
285
286/* Calculate the RX hash filter index of an Ethernet address */
287static int hash_hw_addr(const u8 * addr)
288{
289 int hash = 0, octet, bit, i = 0, c;
290
291 for (octet = 0; octet < 6; ++octet)
292 for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
293 hash ^= (c & 1) << i;
294 if (++i == 6)
295 i = 0;
296 }
297 return hash;
298}
299
300int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
301{
302 u32 val, hash_lo, hash_hi;
303 struct adapter *adap = mac->adapter;
304 unsigned int oft = mac->offset;
305
306 val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
307 if (dev->flags & IFF_PROMISC)
308 val |= F_COPYALLFRAMES;
309 t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
310
311 if (dev->flags & IFF_ALLMULTI)
312 hash_lo = hash_hi = 0xffffffff;
313 else {
314 struct netdev_hw_addr *ha;
315 int exact_addr_idx = mac->nucast;
316
317 hash_lo = hash_hi = 0;
318 netdev_for_each_mc_addr(ha, dev)
319 if (exact_addr_idx < EXACT_ADDR_FILTERS)
320 set_addr_filter(mac, exact_addr_idx++,
321 ha->addr);
322 else {
323 int hash = hash_hw_addr(ha->addr);
324
325 if (hash < 32)
326 hash_lo |= (1 << hash);
327 else
328 hash_hi |= (1 << (hash - 32));
329 }
330 }
331
332 t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
333 t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
334 return 0;
335}
336
337static int rx_fifo_hwm(int mtu)
338{
339 int hwm;
340
341 hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100);
342 return min(hwm, MAC_RXFIFO_SIZE - 8192);
343}
344
345int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
346{
347 int hwm, lwm, divisor;
348 int ipg;
349 unsigned int thres, v, reg;
350 struct adapter *adap = mac->adapter;
351
352 /*
353 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
354 * packet size register includes header, but not FCS.
355 */
356 mtu += 14;
357 if (mtu > 1536)
358 mtu += 4;
359
360 if (mtu > MAX_FRAME_SIZE - 4)
361 return -EINVAL;
362 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
363
364 if (adap->params.rev >= T3_REV_B2 &&
365 (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
366 t3_mac_disable_exact_filters(mac);
367 v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset);
368 t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset,
369 F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
370
371 reg = adap->params.rev == T3_REV_B2 ?
372 A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG;
373
374 /* drain RX FIFO */
375 if (t3_wait_op_done(adap, reg + mac->offset,
376 F_RXFIFO_EMPTY, 1, 20, 5)) {
377 t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
378 t3_mac_enable_exact_filters(mac);
379 return -EIO;
380 }
381 t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
382 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
383 V_RXMAXPKTSIZE(mtu));
384 t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
385 t3_mac_enable_exact_filters(mac);
386 } else
387 t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
388 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
389 V_RXMAXPKTSIZE(mtu));
390
391 /*
392 * Adjust the PAUSE frame watermarks. We always set the LWM, and the
393 * HWM only if flow-control is enabled.
394 */
395 hwm = rx_fifo_hwm(mtu);
396 lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
397 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
398 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
399 v |= V_RXFIFOPAUSELWM(lwm / 8);
400 if (G_RXFIFOPAUSEHWM(v))
401 v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
402 V_RXFIFOPAUSEHWM(hwm / 8);
403
404 t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
405
406 /* Adjust the TX FIFO threshold based on the MTU */
407 thres = (adap->params.vpd.cclk * 1000) / 15625;
408 thres = (thres * mtu) / 1000;
409 if (is_10G(adap))
410 thres /= 10;
411 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
412 thres = max(thres, 8U); /* need at least 8 */
413 ipg = (adap->params.rev == T3_REV_C) ? 0 : 1;
414 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
415 V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
416 V_TXFIFOTHRESH(thres) | V_TXIPG(ipg));
417
418 if (adap->params.rev > 0) {
419 divisor = (adap->params.rev == T3_REV_C) ? 64 : 8;
420 t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
421 (hwm - lwm) * 4 / divisor);
422 }
423 t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
424 MAC_RXFIFO_SIZE * 4 * 8 / 512);
425 return 0;
426}
427
428int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
429{
430 u32 val;
431 struct adapter *adap = mac->adapter;
432 unsigned int oft = mac->offset;
433
434 if (duplex >= 0 && duplex != DUPLEX_FULL)
435 return -EINVAL;
436 if (speed >= 0) {
437 if (speed == SPEED_10)
438 val = V_PORTSPEED(0);
439 else if (speed == SPEED_100)
440 val = V_PORTSPEED(1);
441 else if (speed == SPEED_1000)
442 val = V_PORTSPEED(2);
443 else if (speed == SPEED_10000)
444 val = V_PORTSPEED(3);
445 else
446 return -EINVAL;
447
448 t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
449 V_PORTSPEED(M_PORTSPEED), val);
450 }
451
452 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
453 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
454 if (fc & PAUSE_TX) {
455 u32 rx_max_pkt_size =
456 G_RXMAXPKTSIZE(t3_read_reg(adap,
457 A_XGM_RX_MAX_PKT_SIZE + oft));
458 val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(rx_max_pkt_size) / 8);
459 }
460 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
461
462 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
463 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
464 return 0;
465}
466
467int t3_mac_enable(struct cmac *mac, int which)
468{
469 int idx = macidx(mac);
470 struct adapter *adap = mac->adapter;
471 unsigned int oft = mac->offset;
472 struct mac_stats *s = &mac->stats;
473
474 if (which & MAC_DIRECTION_TX) {
475 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
476 t3_write_reg(adap, A_TP_PIO_DATA,
477 adap->params.rev == T3_REV_C ?
478 0xc4ffff01 : 0xc0ede401);
479 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
480 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx,
481 adap->params.rev == T3_REV_C ? 0 : 1 << idx);
482
483 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
484
485 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
486 mac->tx_mcnt = s->tx_frames;
487 mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
488 A_TP_PIO_DATA)));
489 mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
490 A_XGM_TX_SPI4_SOP_EOP_CNT +
491 oft)));
492 mac->rx_mcnt = s->rx_frames;
493 mac->rx_pause = s->rx_pause;
494 mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
495 A_XGM_RX_SPI4_SOP_EOP_CNT +
496 oft)));
497 mac->rx_ocnt = s->rx_fifo_ovfl;
498 mac->txen = F_TXEN;
499 mac->toggle_cnt = 0;
500 }
501 if (which & MAC_DIRECTION_RX)
502 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
503 return 0;
504}
505
506int t3_mac_disable(struct cmac *mac, int which)
507{
508 struct adapter *adap = mac->adapter;
509
510 if (which & MAC_DIRECTION_TX) {
511 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
512 mac->txen = 0;
513 }
514 if (which & MAC_DIRECTION_RX) {
515 int val = F_MAC_RESET_;
516
517 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
518 F_PCS_RESET_, 0);
519 msleep(100);
520 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
521 if (is_10G(adap))
522 val |= F_PCS_RESET_;
523 else if (uses_xaui(adap))
524 val |= F_PCS_RESET_ | F_XG2G_RESET_;
525 else
526 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
527 t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
528 }
529 return 0;
530}
531
532int t3b2_mac_watchdog_task(struct cmac *mac)
533{
534 struct adapter *adap = mac->adapter;
535 struct mac_stats *s = &mac->stats;
536 unsigned int tx_tcnt, tx_xcnt;
537 u64 tx_mcnt = s->tx_frames;
538 int status;
539
540 status = 0;
541 tx_xcnt = 1; /* By default tx_xcnt is making progress */
542 tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */
543 if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
544 tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
545 A_XGM_TX_SPI4_SOP_EOP_CNT +
546 mac->offset)));
547 if (tx_xcnt == 0) {
548 t3_write_reg(adap, A_TP_PIO_ADDR,
549 A_TP_TX_DROP_CNT_CH0 + macidx(mac));
550 tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
551 A_TP_PIO_DATA)));
552 } else {
553 goto out;
554 }
555 } else {
556 mac->toggle_cnt = 0;
557 goto out;
558 }
559
560 if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
561 if (mac->toggle_cnt > 4) {
562 status = 2;
563 goto out;
564 } else {
565 status = 1;
566 goto out;
567 }
568 } else {
569 mac->toggle_cnt = 0;
570 goto out;
571 }
572
573out:
574 mac->tx_tcnt = tx_tcnt;
575 mac->tx_xcnt = tx_xcnt;
576 mac->tx_mcnt = s->tx_frames;
577 mac->rx_pause = s->rx_pause;
578 if (status == 1) {
579 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
580 t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
581 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
582 t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
583 mac->toggle_cnt++;
584 } else if (status == 2) {
585 t3b2_mac_reset(mac);
586 mac->toggle_cnt = 0;
587 }
588 return status;
589}
590
591/*
592 * This function is called periodically to accumulate the current values of the
593 * RMON counters into the port statistics. Since the packet counters are only
594 * 32 bits they can overflow in ~286 secs at 10G, so the function should be
595 * called more frequently than that. The byte counters are 45-bit wide, they
596 * would overflow in ~7.8 hours.
597 */
598const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
599{
600#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
601#define RMON_UPDATE(mac, name, reg) \
602 (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
603#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
604 (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
605 ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
606
607 u32 v, lo;
608
609 RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
610 RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
611 RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
612 RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
613 RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
614 RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
615 RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
616 RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
617 RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
618
619 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
620
621 v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
622 if (mac->adapter->params.rev == T3_REV_B2)
623 v &= 0x7fffffff;
624 mac->stats.rx_too_long += v;
625
626 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
627 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
628 RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
629 RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
630 RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
631 RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
632 RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
633
634 RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
635 RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
636 RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
637 RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
638 RMON_UPDATE(mac, tx_pause, TX_PAUSE);
639 /* This counts error frames in general (bad FCS, underrun, etc). */
640 RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
641
642 RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
643 RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
644 RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
645 RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
646 RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
647 RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
648 RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
649
650 /* The next stat isn't clear-on-read. */
651 t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
652 v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
653 lo = (u32) mac->stats.rx_cong_drops;
654 mac->stats.rx_cong_drops += (u64) (v - lo);
655
656 return &mac->stats;
657}