aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r--drivers/net/cxgb3/Makefile8
-rw-r--r--drivers/net/cxgb3/adapter.h279
-rw-r--r--drivers/net/cxgb3/ael1002.c251
-rw-r--r--drivers/net/cxgb3/common.h729
-rw-r--r--drivers/net/cxgb3/cxgb3_ctl_defs.h164
-rw-r--r--drivers/net/cxgb3/cxgb3_defs.h99
-rw-r--r--drivers/net/cxgb3/cxgb3_ioctl.h185
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2515
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c1222
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h193
-rw-r--r--drivers/net/cxgb3/firmware_exports.h177
-rw-r--r--drivers/net/cxgb3/l2t.c450
-rw-r--r--drivers/net/cxgb3/l2t.h143
-rw-r--r--drivers/net/cxgb3/mc5.c473
-rw-r--r--drivers/net/cxgb3/regs.h2195
-rw-r--r--drivers/net/cxgb3/sge.c2681
-rw-r--r--drivers/net/cxgb3/sge_defs.h251
-rw-r--r--drivers/net/cxgb3/t3_cpl.h1444
-rw-r--r--drivers/net/cxgb3/t3_hw.c3375
-rw-r--r--drivers/net/cxgb3/t3cdev.h73
-rw-r--r--drivers/net/cxgb3/version.h39
-rw-r--r--drivers/net/cxgb3/vsc8211.c228
-rw-r--r--drivers/net/cxgb3/xgmac.c409
23 files changed, 17583 insertions, 0 deletions
diff --git a/drivers/net/cxgb3/Makefile b/drivers/net/cxgb3/Makefile
new file mode 100644
index 000000000000..343467985321
--- /dev/null
+++ b/drivers/net/cxgb3/Makefile
@@ -0,0 +1,8 @@
1#
2# Chelsio T3 driver
3#
4
5obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
6
7cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
8 xgmac.o sge.o l2t.o cxgb3_offload.o
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
new file mode 100644
index 000000000000..5c97a64451ce
--- /dev/null
+++ b/drivers/net/cxgb3/adapter.h
@@ -0,0 +1,279 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/* This file should not be included directly. Include common.h instead. */
34
35#ifndef __T3_ADAPTER_H__
36#define __T3_ADAPTER_H__
37
38#include <linux/pci.h>
39#include <linux/spinlock.h>
40#include <linux/interrupt.h>
41#include <linux/timer.h>
42#include <linux/cache.h>
43#include <linux/mutex.h>
44#include "t3cdev.h"
45#include <asm/semaphore.h>
46#include <asm/bitops.h>
47#include <asm/io.h>
48
49typedef irqreturn_t(*intr_handler_t) (int, void *);
50
51struct vlan_group;
52
53struct port_info {
54 struct vlan_group *vlan_grp;
55 const struct port_type_info *port_type;
56 u8 port_id;
57 u8 rx_csum_offload;
58 u8 nqsets;
59 u8 first_qset;
60 struct cphy phy;
61 struct cmac mac;
62 struct link_config link_config;
63 struct net_device_stats netstats;
64 int activity;
65};
66
67enum { /* adapter flags */
68 FULL_INIT_DONE = (1 << 0),
69 USING_MSI = (1 << 1),
70 USING_MSIX = (1 << 2),
71 QUEUES_BOUND = (1 << 3),
72};
73
74struct rx_desc;
75struct rx_sw_desc;
76
77struct sge_fl { /* SGE per free-buffer list state */
78 unsigned int buf_size; /* size of each Rx buffer */
79 unsigned int credits; /* # of available Rx buffers */
80 unsigned int size; /* capacity of free list */
81 unsigned int cidx; /* consumer index */
82 unsigned int pidx; /* producer index */
83 unsigned int gen; /* free list generation */
84 struct rx_desc *desc; /* address of HW Rx descriptor ring */
85 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
86 dma_addr_t phys_addr; /* physical address of HW ring start */
87 unsigned int cntxt_id; /* SGE context id for the free list */
88 unsigned long empty; /* # of times queue ran out of buffers */
89};
90
91/*
92 * Bundle size for grouping offload RX packets for delivery to the stack.
93 * Don't make this too big as we do prefetch on each packet in a bundle.
94 */
95# define RX_BUNDLE_SIZE 8
96
97struct rsp_desc;
98
99struct sge_rspq { /* state for an SGE response queue */
100 unsigned int credits; /* # of pending response credits */
101 unsigned int size; /* capacity of response queue */
102 unsigned int cidx; /* consumer index */
103 unsigned int gen; /* current generation bit */
104 unsigned int polling; /* is the queue serviced through NAPI? */
105 unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
106 unsigned int next_holdoff; /* holdoff time for next interrupt */
107 struct rsp_desc *desc; /* address of HW response ring */
108 dma_addr_t phys_addr; /* physical address of the ring */
109 unsigned int cntxt_id; /* SGE context id for the response q */
110 spinlock_t lock; /* guards response processing */
111 struct sk_buff *rx_head; /* offload packet receive queue head */
112 struct sk_buff *rx_tail; /* offload packet receive queue tail */
113
114 unsigned long offload_pkts;
115 unsigned long offload_bundles;
116 unsigned long eth_pkts; /* # of ethernet packets */
117 unsigned long pure_rsps; /* # of pure (non-data) responses */
118 unsigned long imm_data; /* responses with immediate data */
119 unsigned long rx_drops; /* # of packets dropped due to no mem */
120 unsigned long async_notif; /* # of asynchronous notification events */
121 unsigned long empty; /* # of times queue ran out of credits */
122 unsigned long nomem; /* # of responses deferred due to no mem */
123 unsigned long unhandled_irqs; /* # of spurious intrs */
124};
125
126struct tx_desc;
127struct tx_sw_desc;
128
129struct sge_txq { /* state for an SGE Tx queue */
130 unsigned long flags; /* HW DMA fetch status */
131 unsigned int in_use; /* # of in-use Tx descriptors */
132 unsigned int size; /* # of descriptors */
133 unsigned int processed; /* total # of descs HW has processed */
134 unsigned int cleaned; /* total # of descs SW has reclaimed */
135 unsigned int stop_thres; /* SW TX queue suspend threshold */
136 unsigned int cidx; /* consumer index */
137 unsigned int pidx; /* producer index */
138 unsigned int gen; /* current value of generation bit */
139 unsigned int unacked; /* Tx descriptors used since last COMPL */
140 struct tx_desc *desc; /* address of HW Tx descriptor ring */
141 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
142 spinlock_t lock; /* guards enqueueing of new packets */
143 unsigned int token; /* WR token */
144 dma_addr_t phys_addr; /* physical address of the ring */
145 struct sk_buff_head sendq; /* List of backpressured offload packets */
146 struct tasklet_struct qresume_tsk; /* restarts the queue */
147 unsigned int cntxt_id; /* SGE context id for the Tx q */
148 unsigned long stops; /* # of times q has been stopped */
149 unsigned long restarts; /* # of queue restarts */
150};
151
152enum { /* per port SGE statistics */
153 SGE_PSTAT_TSO, /* # of TSO requests */
154 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
155 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
156 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
157 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
158
159 SGE_PSTAT_MAX /* must be last */
160};
161
162struct sge_qset { /* an SGE queue set */
163 struct sge_rspq rspq;
164 struct sge_fl fl[SGE_RXQ_PER_SET];
165 struct sge_txq txq[SGE_TXQ_PER_SET];
166 struct net_device *netdev; /* associated net device */
167 unsigned long txq_stopped; /* which Tx queues are stopped */
168 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
169 unsigned long port_stats[SGE_PSTAT_MAX];
170} ____cacheline_aligned;
171
172struct sge {
173 struct sge_qset qs[SGE_QSETS];
174 spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
175};
176
177struct adapter {
178 struct t3cdev tdev;
179 struct list_head adapter_list;
180 void __iomem *regs;
181 struct pci_dev *pdev;
182 unsigned long registered_device_map;
183 unsigned long open_device_map;
184 unsigned long flags;
185
186 const char *name;
187 int msg_enable;
188 unsigned int mmio_len;
189
190 struct adapter_params params;
191 unsigned int slow_intr_mask;
192 unsigned long irq_stats[IRQ_NUM_STATS];
193
194 struct {
195 unsigned short vec;
196 char desc[22];
197 } msix_info[SGE_QSETS + 1];
198
199 /* T3 modules */
200 struct sge sge;
201 struct mc7 pmrx;
202 struct mc7 pmtx;
203 struct mc7 cm;
204 struct mc5 mc5;
205
206 struct net_device *port[MAX_NPORTS];
207 unsigned int check_task_cnt;
208 struct delayed_work adap_check_task;
209 struct work_struct ext_intr_handler_task;
210
211 /*
212 * Dummy netdevices are needed when using multiple receive queues with
213 * NAPI as each netdevice can service only one queue.
214 */
215 struct net_device *dummy_netdev[SGE_QSETS - 1];
216
217 struct dentry *debugfs_root;
218
219 struct mutex mdio_lock;
220 spinlock_t stats_lock;
221 spinlock_t work_lock;
222};
223
224static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
225{
226 u32 val = readl(adapter->regs + reg_addr);
227
228 CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
229 return val;
230}
231
232static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
233{
234 CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
235 writel(val, adapter->regs + reg_addr);
236}
237
238static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
239{
240 return netdev_priv(adap->port[idx]);
241}
242
243/*
244 * We use the spare atalk_ptr to map a net device to its SGE queue set.
245 * This is a macro so it can be used as l-value.
246 */
247#define dev2qset(netdev) ((netdev)->atalk_ptr)
248
249#define OFFLOAD_DEVMAP_BIT 15
250
251#define tdev2adap(d) container_of(d, struct adapter, tdev)
252
253static inline int offload_running(struct adapter *adapter)
254{
255 return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
256}
257
258int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
259
260void t3_os_ext_intr_handler(struct adapter *adapter);
261void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
262 int speed, int duplex, int fc);
263
264void t3_sge_start(struct adapter *adap);
265void t3_sge_stop(struct adapter *adap);
266void t3_free_sge_resources(struct adapter *adap);
267void t3_sge_err_intr_handler(struct adapter *adapter);
268intr_handler_t t3_intr_handler(struct adapter *adap, int polling);
269int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
270int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
271void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
272int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
273 int irq_vec_idx, const struct qset_params *p,
274 int ntxq, struct net_device *netdev);
275int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
276 unsigned char *data);
277irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
278
279#endif /* __T3_ADAPTER_H__ */
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
new file mode 100644
index 000000000000..73a41e6a5bfc
--- /dev/null
+++ b/drivers/net/cxgb3/ael1002.c
@@ -0,0 +1,251 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35enum {
36 AEL100X_TX_DISABLE = 9,
37 AEL100X_TX_CONFIG1 = 0xc002,
38 AEL1002_PWR_DOWN_HI = 0xc011,
39 AEL1002_PWR_DOWN_LO = 0xc012,
40 AEL1002_XFI_EQL = 0xc015,
41 AEL1002_LB_EN = 0xc017,
42
43 LASI_CTRL = 0x9002,
44 LASI_STAT = 0x9005
45};
46
47static void ael100x_txon(struct cphy *phy)
48{
49 int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
50
51 msleep(100);
52 t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
53 msleep(30);
54}
55
56static int ael1002_power_down(struct cphy *phy, int enable)
57{
58 int err;
59
60 err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_DISABLE, !!enable);
61 if (!err)
62 err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
63 BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
64 return err;
65}
66
67static int ael1002_reset(struct cphy *phy, int wait)
68{
69 int err;
70
71 if ((err = ael1002_power_down(phy, 0)) ||
72 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_CONFIG1, 1)) ||
73 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_HI, 0)) ||
74 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_LO, 0)) ||
75 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_XFI_EQL, 0x18)) ||
76 (err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL1002_LB_EN,
77 0, 1 << 5)))
78 return err;
79 return 0;
80}
81
82static int ael1002_intr_noop(struct cphy *phy)
83{
84 return 0;
85}
86
87static int ael100x_get_link_status(struct cphy *phy, int *link_ok,
88 int *speed, int *duplex, int *fc)
89{
90 if (link_ok) {
91 unsigned int status;
92 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &status);
93
94 /*
95 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
96 * once more to get the current link state.
97 */
98 if (!err && !(status & BMSR_LSTATUS))
99 err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR,
100 &status);
101 if (err)
102 return err;
103 *link_ok = !!(status & BMSR_LSTATUS);
104 }
105 if (speed)
106 *speed = SPEED_10000;
107 if (duplex)
108 *duplex = DUPLEX_FULL;
109 return 0;
110}
111
112static struct cphy_ops ael1002_ops = {
113 .reset = ael1002_reset,
114 .intr_enable = ael1002_intr_noop,
115 .intr_disable = ael1002_intr_noop,
116 .intr_clear = ael1002_intr_noop,
117 .intr_handler = ael1002_intr_noop,
118 .get_link_status = ael100x_get_link_status,
119 .power_down = ael1002_power_down,
120};
121
122void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
123 int phy_addr, const struct mdio_ops *mdio_ops)
124{
125 cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops);
126 ael100x_txon(phy);
127}
128
129static int ael1006_reset(struct cphy *phy, int wait)
130{
131 return t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait);
132}
133
134static int ael1006_intr_enable(struct cphy *phy)
135{
136 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
137}
138
139static int ael1006_intr_disable(struct cphy *phy)
140{
141 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
142}
143
144static int ael1006_intr_clear(struct cphy *phy)
145{
146 u32 val;
147
148 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
149}
150
151static int ael1006_intr_handler(struct cphy *phy)
152{
153 unsigned int status;
154 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
155
156 if (err)
157 return err;
158 return (status & 1) ? cphy_cause_link_change : 0;
159}
160
161static int ael1006_power_down(struct cphy *phy, int enable)
162{
163 return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
164 BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
165}
166
167static struct cphy_ops ael1006_ops = {
168 .reset = ael1006_reset,
169 .intr_enable = ael1006_intr_enable,
170 .intr_disable = ael1006_intr_disable,
171 .intr_clear = ael1006_intr_clear,
172 .intr_handler = ael1006_intr_handler,
173 .get_link_status = ael100x_get_link_status,
174 .power_down = ael1006_power_down,
175};
176
177void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
178 int phy_addr, const struct mdio_ops *mdio_ops)
179{
180 cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops);
181 ael100x_txon(phy);
182}
183
184static struct cphy_ops qt2045_ops = {
185 .reset = ael1006_reset,
186 .intr_enable = ael1006_intr_enable,
187 .intr_disable = ael1006_intr_disable,
188 .intr_clear = ael1006_intr_clear,
189 .intr_handler = ael1006_intr_handler,
190 .get_link_status = ael100x_get_link_status,
191 .power_down = ael1006_power_down,
192};
193
194void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
195 int phy_addr, const struct mdio_ops *mdio_ops)
196{
197 unsigned int stat;
198
199 cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops);
200
201 /*
202 * Some cards where the PHY is supposed to be at address 0 actually
203 * have it at 1.
204 */
205 if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) &&
206 stat == 0xffff)
207 phy->addr = 1;
208}
209
210static int xaui_direct_reset(struct cphy *phy, int wait)
211{
212 return 0;
213}
214
215static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
216 int *speed, int *duplex, int *fc)
217{
218 if (link_ok) {
219 unsigned int status;
220
221 status = t3_read_reg(phy->adapter,
222 XGM_REG(A_XGM_SERDES_STAT0, phy->addr));
223 *link_ok = !(status & F_LOWSIG0);
224 }
225 if (speed)
226 *speed = SPEED_10000;
227 if (duplex)
228 *duplex = DUPLEX_FULL;
229 return 0;
230}
231
232static int xaui_direct_power_down(struct cphy *phy, int enable)
233{
234 return 0;
235}
236
237static struct cphy_ops xaui_direct_ops = {
238 .reset = xaui_direct_reset,
239 .intr_enable = ael1002_intr_noop,
240 .intr_disable = ael1002_intr_noop,
241 .intr_clear = ael1002_intr_noop,
242 .intr_handler = ael1002_intr_noop,
243 .get_link_status = xaui_direct_get_link_status,
244 .power_down = xaui_direct_power_down,
245};
246
247void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
248 int phy_addr, const struct mdio_ops *mdio_ops)
249{
250 cphy_init(phy, adapter, 1, &xaui_direct_ops, mdio_ops);
251}
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
new file mode 100644
index 000000000000..e23deeb7d06d
--- /dev/null
+++ b/drivers/net/cxgb3/common.h
@@ -0,0 +1,729 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __CHELSIO_COMMON_H
33#define __CHELSIO_COMMON_H
34
35#include <linux/kernel.h>
36#include <linux/types.h>
37#include <linux/ctype.h>
38#include <linux/delay.h>
39#include <linux/init.h>
40#include <linux/netdevice.h>
41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include "version.h"
44
45#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
46#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
47#define CH_ALERT(adap, fmt, ...) \
48 dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
49
50/*
51 * More powerful macro that selectively prints messages based on msg_enable.
52 * For info and debugging messages.
53 */
54#define CH_MSG(adapter, level, category, fmt, ...) do { \
55 if ((adapter)->msg_enable & NETIF_MSG_##category) \
56 dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
57 ## __VA_ARGS__); \
58} while (0)
59
60#ifdef DEBUG
61# define CH_DBG(adapter, category, fmt, ...) \
62 CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
63#else
64# define CH_DBG(adapter, category, fmt, ...)
65#endif
66
67/* Additional NETIF_MSG_* categories */
68#define NETIF_MSG_MMIO 0x8000000
69
70struct t3_rx_mode {
71 struct net_device *dev;
72 struct dev_mc_list *mclist;
73 unsigned int idx;
74};
75
76static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
77 struct dev_mc_list *mclist)
78{
79 p->dev = dev;
80 p->mclist = mclist;
81 p->idx = 0;
82}
83
84static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
85{
86 u8 *addr = NULL;
87
88 if (rm->mclist && rm->idx < rm->dev->mc_count) {
89 addr = rm->mclist->dmi_addr;
90 rm->mclist = rm->mclist->next;
91 rm->idx++;
92 }
93 return addr;
94}
95
96enum {
97 MAX_NPORTS = 2, /* max # of ports */
98 MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
99 EEPROMSIZE = 8192, /* Serial EEPROM size */
100 RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
101 TCB_SIZE = 128, /* TCB size */
102 NMTUS = 16, /* size of MTU table */
103 NCCTRL_WIN = 32, /* # of congestion control windows */
104};
105
106#define MAX_RX_COALESCING_LEN 16224U
107
108enum {
109 PAUSE_RX = 1 << 0,
110 PAUSE_TX = 1 << 1,
111 PAUSE_AUTONEG = 1 << 2
112};
113
114enum {
115 SUPPORTED_OFFLOAD = 1 << 24,
116 SUPPORTED_IRQ = 1 << 25
117};
118
119enum { /* adapter interrupt-maintained statistics */
120 STAT_ULP_CH0_PBL_OOB,
121 STAT_ULP_CH1_PBL_OOB,
122 STAT_PCI_CORR_ECC,
123
124 IRQ_NUM_STATS /* keep last */
125};
126
127enum {
128 SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
129 SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
130 SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
131};
132
133enum sge_context_type { /* SGE egress context types */
134 SGE_CNTXT_RDMA = 0,
135 SGE_CNTXT_ETH = 2,
136 SGE_CNTXT_OFLD = 4,
137 SGE_CNTXT_CTRL = 5
138};
139
140enum {
141 AN_PKT_SIZE = 32, /* async notification packet size */
142 IMMED_PKT_SIZE = 48 /* packet size for immediate data */
143};
144
145struct sg_ent { /* SGE scatter/gather entry */
146 u32 len[2];
147 u64 addr[2];
148};
149
150#ifndef SGE_NUM_GENBITS
151/* Must be 1 or 2 */
152# define SGE_NUM_GENBITS 2
153#endif
154
155#define TX_DESC_FLITS 16U
156#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
157
158struct cphy;
159struct adapter;
160
161struct mdio_ops {
162 int (*read)(struct adapter *adapter, int phy_addr, int mmd_addr,
163 int reg_addr, unsigned int *val);
164 int (*write)(struct adapter *adapter, int phy_addr, int mmd_addr,
165 int reg_addr, unsigned int val);
166};
167
168struct adapter_info {
169 unsigned char nports; /* # of ports */
170 unsigned char phy_base_addr; /* MDIO PHY base address */
171 unsigned char mdien;
172 unsigned char mdiinv;
173 unsigned int gpio_out; /* GPIO output settings */
174 unsigned int gpio_intr; /* GPIO IRQ enable mask */
175 unsigned long caps; /* adapter capabilities */
176 const struct mdio_ops *mdio_ops; /* MDIO operations */
177 const char *desc; /* product description */
178};
179
180struct port_type_info {
181 void (*phy_prep)(struct cphy *phy, struct adapter *adapter,
182 int phy_addr, const struct mdio_ops *ops);
183 unsigned int caps;
184 const char *desc;
185};
186
187struct mc5_stats {
188 unsigned long parity_err;
189 unsigned long active_rgn_full;
190 unsigned long nfa_srch_err;
191 unsigned long unknown_cmd;
192 unsigned long reqq_parity_err;
193 unsigned long dispq_parity_err;
194 unsigned long del_act_empty;
195};
196
197struct mc7_stats {
198 unsigned long corr_err;
199 unsigned long uncorr_err;
200 unsigned long parity_err;
201 unsigned long addr_err;
202};
203
204struct mac_stats {
205 u64 tx_octets; /* total # of octets in good frames */
206 u64 tx_octets_bad; /* total # of octets in error frames */
207 u64 tx_frames; /* all good frames */
208 u64 tx_mcast_frames; /* good multicast frames */
209 u64 tx_bcast_frames; /* good broadcast frames */
210 u64 tx_pause; /* # of transmitted pause frames */
211 u64 tx_deferred; /* frames with deferred transmissions */
212 u64 tx_late_collisions; /* # of late collisions */
213 u64 tx_total_collisions; /* # of total collisions */
214 u64 tx_excess_collisions; /* frame errors from excessive collissions */
215 u64 tx_underrun; /* # of Tx FIFO underruns */
216 u64 tx_len_errs; /* # of Tx length errors */
217 u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
218 u64 tx_excess_deferral; /* # of frames with excessive deferral */
219 u64 tx_fcs_errs; /* # of frames with bad FCS */
220
221 u64 tx_frames_64; /* # of Tx frames in a particular range */
222 u64 tx_frames_65_127;
223 u64 tx_frames_128_255;
224 u64 tx_frames_256_511;
225 u64 tx_frames_512_1023;
226 u64 tx_frames_1024_1518;
227 u64 tx_frames_1519_max;
228
229 u64 rx_octets; /* total # of octets in good frames */
230 u64 rx_octets_bad; /* total # of octets in error frames */
231 u64 rx_frames; /* all good frames */
232 u64 rx_mcast_frames; /* good multicast frames */
233 u64 rx_bcast_frames; /* good broadcast frames */
234 u64 rx_pause; /* # of received pause frames */
235 u64 rx_fcs_errs; /* # of received frames with bad FCS */
236 u64 rx_align_errs; /* alignment errors */
237 u64 rx_symbol_errs; /* symbol errors */
238 u64 rx_data_errs; /* data errors */
239 u64 rx_sequence_errs; /* sequence errors */
240 u64 rx_runt; /* # of runt frames */
241 u64 rx_jabber; /* # of jabber frames */
242 u64 rx_short; /* # of short frames */
243 u64 rx_too_long; /* # of oversized frames */
244 u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
245
246 u64 rx_frames_64; /* # of Rx frames in a particular range */
247 u64 rx_frames_65_127;
248 u64 rx_frames_128_255;
249 u64 rx_frames_256_511;
250 u64 rx_frames_512_1023;
251 u64 rx_frames_1024_1518;
252 u64 rx_frames_1519_max;
253
254 u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
255
256 unsigned long tx_fifo_parity_err;
257 unsigned long rx_fifo_parity_err;
258 unsigned long tx_fifo_urun;
259 unsigned long rx_fifo_ovfl;
260 unsigned long serdes_signal_loss;
261 unsigned long xaui_pcs_ctc_err;
262 unsigned long xaui_pcs_align_change;
263};
264
265struct tp_mib_stats {
266 u32 ipInReceive_hi;
267 u32 ipInReceive_lo;
268 u32 ipInHdrErrors_hi;
269 u32 ipInHdrErrors_lo;
270 u32 ipInAddrErrors_hi;
271 u32 ipInAddrErrors_lo;
272 u32 ipInUnknownProtos_hi;
273 u32 ipInUnknownProtos_lo;
274 u32 ipInDiscards_hi;
275 u32 ipInDiscards_lo;
276 u32 ipInDelivers_hi;
277 u32 ipInDelivers_lo;
278 u32 ipOutRequests_hi;
279 u32 ipOutRequests_lo;
280 u32 ipOutDiscards_hi;
281 u32 ipOutDiscards_lo;
282 u32 ipOutNoRoutes_hi;
283 u32 ipOutNoRoutes_lo;
284 u32 ipReasmTimeout;
285 u32 ipReasmReqds;
286 u32 ipReasmOKs;
287 u32 ipReasmFails;
288
289 u32 reserved[8];
290
291 u32 tcpActiveOpens;
292 u32 tcpPassiveOpens;
293 u32 tcpAttemptFails;
294 u32 tcpEstabResets;
295 u32 tcpOutRsts;
296 u32 tcpCurrEstab;
297 u32 tcpInSegs_hi;
298 u32 tcpInSegs_lo;
299 u32 tcpOutSegs_hi;
300 u32 tcpOutSegs_lo;
301 u32 tcpRetransSeg_hi;
302 u32 tcpRetransSeg_lo;
303 u32 tcpInErrs_hi;
304 u32 tcpInErrs_lo;
305 u32 tcpRtoMin;
306 u32 tcpRtoMax;
307};
308
309struct tp_params {
310 unsigned int nchan; /* # of channels */
311 unsigned int pmrx_size; /* total PMRX capacity */
312 unsigned int pmtx_size; /* total PMTX capacity */
313 unsigned int cm_size; /* total CM capacity */
314 unsigned int chan_rx_size; /* per channel Rx size */
315 unsigned int chan_tx_size; /* per channel Tx size */
316 unsigned int rx_pg_size; /* Rx page size */
317 unsigned int tx_pg_size; /* Tx page size */
318 unsigned int rx_num_pgs; /* # of Rx pages */
319 unsigned int tx_num_pgs; /* # of Tx pages */
320 unsigned int ntimer_qs; /* # of timer queues */
321};
322
323struct qset_params { /* SGE queue set parameters */
324 unsigned int polling; /* polling/interrupt service for rspq */
325 unsigned int coalesce_usecs; /* irq coalescing timer */
326 unsigned int rspq_size; /* # of entries in response queue */
327 unsigned int fl_size; /* # of entries in regular free list */
328 unsigned int jumbo_size; /* # of entries in jumbo free list */
329 unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
330 unsigned int cong_thres; /* FL congestion threshold */
331};
332
333struct sge_params {
334 unsigned int max_pkt_size; /* max offload pkt size */
335 struct qset_params qset[SGE_QSETS];
336};
337
338struct mc5_params {
339 unsigned int mode; /* selects MC5 width */
340 unsigned int nservers; /* size of server region */
341 unsigned int nfilters; /* size of filter region */
342 unsigned int nroutes; /* size of routing region */
343};
344
345/* Default MC5 region sizes */
346enum {
347 DEFAULT_NSERVERS = 512,
348 DEFAULT_NFILTERS = 128
349};
350
351/* MC5 modes, these must be non-0 */
352enum {
353 MC5_MODE_144_BIT = 1,
354 MC5_MODE_72_BIT = 2
355};
356
357struct vpd_params {
358 unsigned int cclk;
359 unsigned int mclk;
360 unsigned int uclk;
361 unsigned int mdc;
362 unsigned int mem_timing;
363 u8 eth_base[6];
364 u8 port_type[MAX_NPORTS];
365 unsigned short xauicfg[2];
366};
367
368struct pci_params {
369 unsigned int vpd_cap_addr;
370 unsigned int pcie_cap_addr;
371 unsigned short speed;
372 unsigned char width;
373 unsigned char variant;
374};
375
376enum {
377 PCI_VARIANT_PCI,
378 PCI_VARIANT_PCIX_MODE1_PARITY,
379 PCI_VARIANT_PCIX_MODE1_ECC,
380 PCI_VARIANT_PCIX_266_MODE2,
381 PCI_VARIANT_PCIE
382};
383
384struct adapter_params {
385 struct sge_params sge;
386 struct mc5_params mc5;
387 struct tp_params tp;
388 struct vpd_params vpd;
389 struct pci_params pci;
390
391 const struct adapter_info *info;
392
393 unsigned short mtus[NMTUS];
394 unsigned short a_wnd[NCCTRL_WIN];
395 unsigned short b_wnd[NCCTRL_WIN];
396
397 unsigned int nports; /* # of ethernet ports */
398 unsigned int stats_update_period; /* MAC stats accumulation period */
399 unsigned int linkpoll_period; /* link poll period in 0.1s */
400 unsigned int rev; /* chip revision */
401};
402
403struct trace_params {
404 u32 sip;
405 u32 sip_mask;
406 u32 dip;
407 u32 dip_mask;
408 u16 sport;
409 u16 sport_mask;
410 u16 dport;
411 u16 dport_mask;
412 u32 vlan:12;
413 u32 vlan_mask:12;
414 u32 intf:4;
415 u32 intf_mask:4;
416 u8 proto;
417 u8 proto_mask;
418};
419
420struct link_config {
421 unsigned int supported; /* link capabilities */
422 unsigned int advertising; /* advertised capabilities */
423 unsigned short requested_speed; /* speed user has requested */
424 unsigned short speed; /* actual link speed */
425 unsigned char requested_duplex; /* duplex user has requested */
426 unsigned char duplex; /* actual link duplex */
427 unsigned char requested_fc; /* flow control user has requested */
428 unsigned char fc; /* actual link flow control */
429 unsigned char autoneg; /* autonegotiating? */
430 unsigned int link_ok; /* link up? */
431};
432
433#define SPEED_INVALID 0xffff
434#define DUPLEX_INVALID 0xff
435
436struct mc5 {
437 struct adapter *adapter;
438 unsigned int tcam_size;
439 unsigned char part_type;
440 unsigned char parity_enabled;
441 unsigned char mode;
442 struct mc5_stats stats;
443};
444
445static inline unsigned int t3_mc5_size(const struct mc5 *p)
446{
447 return p->tcam_size;
448}
449
450struct mc7 {
451 struct adapter *adapter; /* backpointer to adapter */
452 unsigned int size; /* memory size in bytes */
453 unsigned int width; /* MC7 interface width */
454 unsigned int offset; /* register address offset for MC7 instance */
455 const char *name; /* name of MC7 instance */
456 struct mc7_stats stats; /* MC7 statistics */
457};
458
459static inline unsigned int t3_mc7_size(const struct mc7 *p)
460{
461 return p->size;
462}
463
464struct cmac {
465 struct adapter *adapter;
466 unsigned int offset;
467 unsigned int nucast; /* # of address filters for unicast MACs */
468 struct mac_stats stats;
469};
470
471enum {
472 MAC_DIRECTION_RX = 1,
473 MAC_DIRECTION_TX = 2,
474 MAC_RXFIFO_SIZE = 32768
475};
476
477/* IEEE 802.3ae specified MDIO devices */
478enum {
479 MDIO_DEV_PMA_PMD = 1,
480 MDIO_DEV_WIS = 2,
481 MDIO_DEV_PCS = 3,
482 MDIO_DEV_XGXS = 4
483};
484
485/* PHY loopback direction */
486enum {
487 PHY_LOOPBACK_TX = 1,
488 PHY_LOOPBACK_RX = 2
489};
490
491/* PHY interrupt types */
492enum {
493 cphy_cause_link_change = 1,
494 cphy_cause_fifo_error = 2
495};
496
497/* PHY operations */
498struct cphy_ops {
499 void (*destroy)(struct cphy *phy);
500 int (*reset)(struct cphy *phy, int wait);
501
502 int (*intr_enable)(struct cphy *phy);
503 int (*intr_disable)(struct cphy *phy);
504 int (*intr_clear)(struct cphy *phy);
505 int (*intr_handler)(struct cphy *phy);
506
507 int (*autoneg_enable)(struct cphy *phy);
508 int (*autoneg_restart)(struct cphy *phy);
509
510 int (*advertise)(struct cphy *phy, unsigned int advertise_map);
511 int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
512 int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
513 int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
514 int *duplex, int *fc);
515 int (*power_down)(struct cphy *phy, int enable);
516};
517
518/* A PHY instance */
519struct cphy {
520 int addr; /* PHY address */
521 struct adapter *adapter; /* associated adapter */
522 unsigned long fifo_errors; /* FIFO over/under-flows */
523 const struct cphy_ops *ops; /* PHY operations */
524 int (*mdio_read)(struct adapter *adapter, int phy_addr, int mmd_addr,
525 int reg_addr, unsigned int *val);
526 int (*mdio_write)(struct adapter *adapter, int phy_addr, int mmd_addr,
527 int reg_addr, unsigned int val);
528};
529
530/* Convenience MDIO read/write wrappers */
531static inline int mdio_read(struct cphy *phy, int mmd, int reg,
532 unsigned int *valp)
533{
534 return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
535}
536
537static inline int mdio_write(struct cphy *phy, int mmd, int reg,
538 unsigned int val)
539{
540 return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
541}
542
543/* Convenience initializer */
544static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
545 int phy_addr, struct cphy_ops *phy_ops,
546 const struct mdio_ops *mdio_ops)
547{
548 phy->adapter = adapter;
549 phy->addr = phy_addr;
550 phy->ops = phy_ops;
551 if (mdio_ops) {
552 phy->mdio_read = mdio_ops->read;
553 phy->mdio_write = mdio_ops->write;
554 }
555}
556
557/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
558#define MAC_STATS_ACCUM_SECS 180
559
560#define XGM_REG(reg_addr, idx) \
561 ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
562
563struct addr_val_pair {
564 unsigned int reg_addr;
565 unsigned int val;
566};
567
568#include "adapter.h"
569
570#ifndef PCI_VENDOR_ID_CHELSIO
571# define PCI_VENDOR_ID_CHELSIO 0x1425
572#endif
573
574#define for_each_port(adapter, iter) \
575 for (iter = 0; iter < (adapter)->params.nports; ++iter)
576
577#define adapter_info(adap) ((adap)->params.info)
578
579static inline int uses_xaui(const struct adapter *adap)
580{
581 return adapter_info(adap)->caps & SUPPORTED_AUI;
582}
583
584static inline int is_10G(const struct adapter *adap)
585{
586 return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
587}
588
589static inline int is_offload(const struct adapter *adap)
590{
591 return adapter_info(adap)->caps & SUPPORTED_OFFLOAD;
592}
593
594static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
595{
596 return adap->params.vpd.cclk / 1000;
597}
598
599static inline unsigned int is_pcie(const struct adapter *adap)
600{
601 return adap->params.pci.variant == PCI_VARIANT_PCIE;
602}
603
604void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
605 u32 val);
606void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
607 int n, unsigned int offset);
608int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
609 int polarity, int attempts, int delay, u32 *valp);
610static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
611 int polarity, int attempts, int delay)
612{
613 return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
614 delay, NULL);
615}
616int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
617 unsigned int set);
618int t3_phy_reset(struct cphy *phy, int mmd, int wait);
619int t3_phy_advertise(struct cphy *phy, unsigned int advert);
620int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
621
622void t3_intr_enable(struct adapter *adapter);
623void t3_intr_disable(struct adapter *adapter);
624void t3_intr_clear(struct adapter *adapter);
625void t3_port_intr_enable(struct adapter *adapter, int idx);
626void t3_port_intr_disable(struct adapter *adapter, int idx);
627void t3_port_intr_clear(struct adapter *adapter, int idx);
628int t3_slow_intr_handler(struct adapter *adapter);
629int t3_phy_intr_handler(struct adapter *adapter);
630
631void t3_link_changed(struct adapter *adapter, int port_id);
632int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
633const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
634int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
635int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
636int t3_seeprom_wp(struct adapter *adapter, int enable);
637int t3_read_flash(struct adapter *adapter, unsigned int addr,
638 unsigned int nwords, u32 *data, int byte_oriented);
639int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
640int t3_get_fw_version(struct adapter *adapter, u32 *vers);
641int t3_check_fw_version(struct adapter *adapter);
642int t3_init_hw(struct adapter *adapter, u32 fw_params);
643void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
644void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
645int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
646 int reset);
647void t3_led_ready(struct adapter *adapter);
648void t3_fatal_err(struct adapter *adapter);
649void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
650void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
651 const u8 * cpus, const u16 *rspq);
652int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map);
653int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
654int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
655 unsigned int n, unsigned int *valp);
656int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
657 u64 *buf);
658
659int t3_mac_reset(struct cmac *mac);
660void t3b_pcs_reset(struct cmac *mac);
661int t3_mac_enable(struct cmac *mac, int which);
662int t3_mac_disable(struct cmac *mac, int which);
663int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
664int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
665int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
666int t3_mac_set_num_ucast(struct cmac *mac, int n);
667const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
668int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
669
670void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
671int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
672 unsigned int nroutes);
673void t3_mc5_intr_handler(struct mc5 *mc5);
674int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
675 u32 *buf);
676
677int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh);
678void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size);
679void t3_tp_set_offload_mode(struct adapter *adap, int enable);
680void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
681void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
682 unsigned short alpha[NCCTRL_WIN],
683 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
684void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS]);
685void t3_get_cong_cntl_tab(struct adapter *adap,
686 unsigned short incr[NMTUS][NCCTRL_WIN]);
687void t3_config_trace_filter(struct adapter *adapter,
688 const struct trace_params *tp, int filter_index,
689 int invert, int enable);
690int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
691
692void t3_sge_prep(struct adapter *adap, struct sge_params *p);
693void t3_sge_init(struct adapter *adap, struct sge_params *p);
694int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
695 enum sge_context_type type, int respq, u64 base_addr,
696 unsigned int size, unsigned int token, int gen,
697 unsigned int cidx);
698int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
699 int gts_enable, u64 base_addr, unsigned int size,
700 unsigned int esize, unsigned int cong_thres, int gen,
701 unsigned int cidx);
702int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
703 int irq_vec_idx, u64 base_addr, unsigned int size,
704 unsigned int fl_thres, int gen, unsigned int cidx);
705int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
706 unsigned int size, int rspq, int ovfl_mode,
707 unsigned int credits, unsigned int credit_thres);
708int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
709int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
710int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
711int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
712int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4]);
713int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4]);
714int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4]);
715int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4]);
716int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
717 unsigned int credits);
718
719void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
720 int phy_addr, const struct mdio_ops *mdio_ops);
721void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
722 int phy_addr, const struct mdio_ops *mdio_ops);
723void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
724 int phy_addr, const struct mdio_ops *mdio_ops);
725void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
726 const struct mdio_ops *mdio_ops);
727void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
728 int phy_addr, const struct mdio_ops *mdio_ops);
729#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h
new file mode 100644
index 000000000000..2095ddacff78
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_ctl_defs.h
@@ -0,0 +1,164 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
33#define _CXGB3_OFFLOAD_CTL_DEFS_H
34
35enum {
36 GET_MAX_OUTSTANDING_WR,
37 GET_TX_MAX_CHUNK,
38 GET_TID_RANGE,
39 GET_STID_RANGE,
40 GET_RTBL_RANGE,
41 GET_L2T_CAPACITY,
42 GET_MTUS,
43 GET_WR_LEN,
44 GET_IFF_FROM_MAC,
45 GET_DDP_PARAMS,
46 GET_PORTS,
47
48 ULP_ISCSI_GET_PARAMS,
49 ULP_ISCSI_SET_PARAMS,
50
51 RDMA_GET_PARAMS,
52 RDMA_CQ_OP,
53 RDMA_CQ_SETUP,
54 RDMA_CQ_DISABLE,
55 RDMA_CTRL_QP_SETUP,
56 RDMA_GET_MEM,
57};
58
59/*
60 * Structure used to describe a TID range. Valid TIDs are [base, base+num).
61 */
62struct tid_range {
63 unsigned int base; /* first TID */
64 unsigned int num; /* number of TIDs in range */
65};
66
67/*
68 * Structure used to request the size and contents of the MTU table.
69 */
70struct mtutab {
71 unsigned int size; /* # of entries in the MTU table */
72 const unsigned short *mtus; /* the MTU table values */
73};
74
75struct net_device;
76
77/*
78 * Structure used to request the adapter net_device owning a given MAC address.
79 */
80struct iff_mac {
81 struct net_device *dev; /* the net_device */
82 const unsigned char *mac_addr; /* MAC address to lookup */
83 u16 vlan_tag;
84};
85
86struct pci_dev;
87
88/*
89 * Structure used to request the TCP DDP parameters.
90 */
91struct ddp_params {
92 unsigned int llimit; /* TDDP region start address */
93 unsigned int ulimit; /* TDDP region end address */
94 unsigned int tag_mask; /* TDDP tag mask */
95 struct pci_dev *pdev;
96};
97
98struct adap_ports {
99 unsigned int nports; /* number of ports on this adapter */
100 struct net_device *lldevs[2];
101};
102
103/*
104 * Structure used to return information to the iscsi layer.
105 */
106struct ulp_iscsi_info {
107 unsigned int offset;
108 unsigned int llimit;
109 unsigned int ulimit;
110 unsigned int tagmask;
111 unsigned int pgsz3;
112 unsigned int pgsz2;
113 unsigned int pgsz1;
114 unsigned int pgsz0;
115 unsigned int max_rxsz;
116 unsigned int max_txsz;
117 struct pci_dev *pdev;
118};
119
120/*
121 * Structure used to return information to the RDMA layer.
122 */
123struct rdma_info {
124 unsigned int tpt_base; /* TPT base address */
125 unsigned int tpt_top; /* TPT last entry address */
126 unsigned int pbl_base; /* PBL base address */
127 unsigned int pbl_top; /* PBL last entry address */
128 unsigned int rqt_base; /* RQT base address */
129 unsigned int rqt_top; /* RQT last entry address */
130 unsigned int udbell_len; /* user doorbell region length */
131 unsigned long udbell_physbase; /* user doorbell physical start addr */
132 void __iomem *kdb_addr; /* kernel doorbell register address */
133 struct pci_dev *pdev; /* associated PCI device */
134};
135
136/*
137 * Structure used to request an operation on an RDMA completion queue.
138 */
139struct rdma_cq_op {
140 unsigned int id;
141 unsigned int op;
142 unsigned int credits;
143};
144
145/*
146 * Structure used to setup RDMA completion queues.
147 */
148struct rdma_cq_setup {
149 unsigned int id;
150 unsigned long long base_addr;
151 unsigned int size;
152 unsigned int credits;
153 unsigned int credit_thres;
154 unsigned int ovfl_mode;
155};
156
157/*
158 * Structure used to setup the RDMA control egress context.
159 */
160struct rdma_ctrlqp_setup {
161 unsigned long long base_addr;
162 unsigned int size;
163};
164#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
new file mode 100644
index 000000000000..16e004990c59
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CHELSIO_DEFS_H
34#define _CHELSIO_DEFS_H
35
36#include <linux/skbuff.h>
37#include <net/tcp.h>
38
39#include "t3cdev.h"
40
41#include "cxgb3_offload.h"
42
43#define VALIDATE_TID 1
44
45void *cxgb_alloc_mem(unsigned long size);
46void cxgb_free_mem(void *addr);
47void cxgb_neigh_update(struct neighbour *neigh);
48void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
49
50/*
51 * Map an ATID or STID to their entries in the corresponding TID tables.
52 */
53static inline union active_open_entry *atid2entry(const struct tid_info *t,
54 unsigned int atid)
55{
56 return &t->atid_tab[atid - t->atid_base];
57}
58
59static inline union listen_entry *stid2entry(const struct tid_info *t,
60 unsigned int stid)
61{
62 return &t->stid_tab[stid - t->stid_base];
63}
64
65/*
66 * Find the connection corresponding to a TID.
67 */
68static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
69 unsigned int tid)
70{
71 return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
72}
73
74/*
75 * Find the connection corresponding to a server TID.
76 */
77static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
78 unsigned int tid)
79{
80 if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
81 return NULL;
82 return &(stid2entry(t, tid)->t3c_tid);
83}
84
85/*
86 * Find the connection corresponding to an active-open TID.
87 */
88static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
89 unsigned int tid)
90{
91 if (tid < t->atid_base || tid >= t->atid_base + t->natids)
92 return NULL;
93 return &(atid2entry(t, tid)->t3c_tid);
94}
95
96int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n);
97int attach_t3cdev(struct t3cdev *dev);
98void detach_t3cdev(struct t3cdev *dev);
99#endif
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h
new file mode 100644
index 000000000000..a94281861a66
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_ioctl.h
@@ -0,0 +1,185 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __CHIOCTL_H__
33#define __CHIOCTL_H__
34
35/*
36 * Ioctl commands specific to this driver.
37 */
38enum {
39 CHELSIO_SETREG = 1024,
40 CHELSIO_GETREG,
41 CHELSIO_SETTPI,
42 CHELSIO_GETTPI,
43 CHELSIO_GETMTUTAB,
44 CHELSIO_SETMTUTAB,
45 CHELSIO_GETMTU,
46 CHELSIO_SET_PM,
47 CHELSIO_GET_PM,
48 CHELSIO_GET_TCAM,
49 CHELSIO_SET_TCAM,
50 CHELSIO_GET_TCB,
51 CHELSIO_GET_MEM,
52 CHELSIO_LOAD_FW,
53 CHELSIO_GET_PROTO,
54 CHELSIO_SET_PROTO,
55 CHELSIO_SET_TRACE_FILTER,
56 CHELSIO_SET_QSET_PARAMS,
57 CHELSIO_GET_QSET_PARAMS,
58 CHELSIO_SET_QSET_NUM,
59 CHELSIO_GET_QSET_NUM,
60 CHELSIO_SET_PKTSCHED,
61};
62
63struct ch_reg {
64 uint32_t cmd;
65 uint32_t addr;
66 uint32_t val;
67};
68
69struct ch_cntxt {
70 uint32_t cmd;
71 uint32_t cntxt_type;
72 uint32_t cntxt_id;
73 uint32_t data[4];
74};
75
76/* context types */
77enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
78
79struct ch_desc {
80 uint32_t cmd;
81 uint32_t queue_num;
82 uint32_t idx;
83 uint32_t size;
84 uint8_t data[128];
85};
86
87struct ch_mem_range {
88 uint32_t cmd;
89 uint32_t mem_id;
90 uint32_t addr;
91 uint32_t len;
92 uint32_t version;
93 uint8_t buf[0];
94};
95
96struct ch_qset_params {
97 uint32_t cmd;
98 uint32_t qset_idx;
99 int32_t txq_size[3];
100 int32_t rspq_size;
101 int32_t fl_size[2];
102 int32_t intr_lat;
103 int32_t polling;
104 int32_t cong_thres;
105};
106
107struct ch_pktsched_params {
108 uint32_t cmd;
109 uint8_t sched;
110 uint8_t idx;
111 uint8_t min;
112 uint8_t max;
113 uint8_t binding;
114};
115
116#ifndef TCB_SIZE
117# define TCB_SIZE 128
118#endif
119
120/* TCB size in 32-bit words */
121#define TCB_WORDS (TCB_SIZE / 4)
122
123enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
124
125struct ch_mtus {
126 uint32_t cmd;
127 uint32_t nmtus;
128 uint16_t mtus[NMTUS];
129};
130
131struct ch_pm {
132 uint32_t cmd;
133 uint32_t tx_pg_sz;
134 uint32_t tx_num_pg;
135 uint32_t rx_pg_sz;
136 uint32_t rx_num_pg;
137 uint32_t pm_total;
138};
139
140struct ch_tcam {
141 uint32_t cmd;
142 uint32_t tcam_size;
143 uint32_t nservers;
144 uint32_t nroutes;
145 uint32_t nfilters;
146};
147
148struct ch_tcb {
149 uint32_t cmd;
150 uint32_t tcb_index;
151 uint32_t tcb_data[TCB_WORDS];
152};
153
154struct ch_tcam_word {
155 uint32_t cmd;
156 uint32_t addr;
157 uint32_t buf[3];
158};
159
160struct ch_trace {
161 uint32_t cmd;
162 uint32_t sip;
163 uint32_t sip_mask;
164 uint32_t dip;
165 uint32_t dip_mask;
166 uint16_t sport;
167 uint16_t sport_mask;
168 uint16_t dport;
169 uint16_t dport_mask;
170 uint32_t vlan:12;
171 uint32_t vlan_mask:12;
172 uint32_t intf:4;
173 uint32_t intf_mask:4;
174 uint8_t proto;
175 uint8_t proto_mask;
176 uint8_t invert_match:1;
177 uint8_t config_tx:1;
178 uint8_t config_rx:1;
179 uint8_t trace_tx:1;
180 uint8_t trace_rx:1;
181};
182
183#define SIOCCHIOCTL SIOCDEVPRIVATE
184
185#endif
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
new file mode 100644
index 000000000000..dfa035a1ad45
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -0,0 +1,2515 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
45#include <asm/uaccess.h>
46
47#include "common.h"
48#include "cxgb3_ioctl.h"
49#include "regs.h"
50#include "cxgb3_offload.h"
51#include "version.h"
52
53#include "cxgb3_ctl_defs.h"
54#include "t3_cpl.h"
55#include "firmware_exports.h"
56
57enum {
58 MAX_TXQ_ENTRIES = 16384,
59 MAX_CTRL_TXQ_ENTRIES = 1024,
60 MAX_RSPQ_ENTRIES = 16384,
61 MAX_RX_BUFFERS = 16384,
62 MAX_RX_JUMBO_BUFFERS = 16384,
63 MIN_TXQ_ENTRIES = 4,
64 MIN_CTRL_TXQ_ENTRIES = 4,
65 MIN_RSPQ_ENTRIES = 32,
66 MIN_FL_ENTRIES = 32
67};
68
69#define PORT_MASK ((1 << MAX_NPORTS) - 1)
70
71#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
72 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
73 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
74
75#define EEPROM_MAGIC 0x38E2F10C
76
77#define to_net_dev(class) container_of(class, struct net_device, class_dev)
78
79#define CH_DEVICE(devid, ssid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 1, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1, 1), /* T302E */
85 CH_DEVICE(0x22, 1, 2), /* T310E */
86 CH_DEVICE(0x23, 1, 3), /* T320X */
87 CH_DEVICE(0x24, 1, 1), /* T302X */
88 CH_DEVICE(0x25, 1, 3), /* T320E */
89 CH_DEVICE(0x26, 1, 2), /* T310X */
90 CH_DEVICE(0x30, 1, 2), /* T3B10 */
91 CH_DEVICE(0x31, 1, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1, 1), /* T3B02 */
93 {0,}
94};
95
96MODULE_DESCRIPTION(DRV_DESC);
97MODULE_AUTHOR("Chelsio Communications");
98MODULE_LICENSE("Dual BSD/GPL");
99MODULE_VERSION(DRV_VERSION);
100MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104module_param(dflt_msg_enable, int, 0644);
105MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107/*
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
111 *
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
115 */
116static int msi = 2;
117
118module_param(msi, int, 0644);
119MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121/*
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
124 */
125
126static int ofld_disable = 0;
127
128module_param(ofld_disable, int, 0644);
129MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131/*
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
138 */
139static struct workqueue_struct *cxgb3_wq;
140
141/**
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
144 *
145 * Shows the link status, speed, and duplex of a port.
146 */
147static void link_report(struct net_device *dev)
148{
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
154
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
165 }
166
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 }
170}
171
172/**
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
180 *
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
184 */
185void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
187{
188 struct net_device *dev = adapter->port[port_id];
189
190 /* Skip changes from disabled ports. */
191 if (!netif_running(dev))
192 return;
193
194 if (link_stat != netif_carrier_ok(dev)) {
195 if (link_stat)
196 netif_carrier_on(dev);
197 else
198 netif_carrier_off(dev);
199 link_report(dev);
200 }
201}
202
203static void cxgb_set_rxmode(struct net_device *dev)
204{
205 struct t3_rx_mode rm;
206 struct port_info *pi = netdev_priv(dev);
207
208 init_rx_mode(&rm, dev, dev->mc_list);
209 t3_mac_set_rx_mode(&pi->mac, &rm);
210}
211
212/**
213 * link_start - enable a port
214 * @dev: the device to enable
215 *
216 * Performs the MAC and PHY actions needed to enable a port.
217 */
218static void link_start(struct net_device *dev)
219{
220 struct t3_rx_mode rm;
221 struct port_info *pi = netdev_priv(dev);
222 struct cmac *mac = &pi->mac;
223
224 init_rx_mode(&rm, dev, dev->mc_list);
225 t3_mac_reset(mac);
226 t3_mac_set_mtu(mac, dev->mtu);
227 t3_mac_set_address(mac, 0, dev->dev_addr);
228 t3_mac_set_rx_mode(mac, &rm);
229 t3_link_start(&pi->phy, mac, &pi->link_config);
230 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
231}
232
233static inline void cxgb_disable_msi(struct adapter *adapter)
234{
235 if (adapter->flags & USING_MSIX) {
236 pci_disable_msix(adapter->pdev);
237 adapter->flags &= ~USING_MSIX;
238 } else if (adapter->flags & USING_MSI) {
239 pci_disable_msi(adapter->pdev);
240 adapter->flags &= ~USING_MSI;
241 }
242}
243
244/*
245 * Interrupt handler for asynchronous events used with MSI-X.
246 */
247static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
248{
249 t3_slow_intr_handler(cookie);
250 return IRQ_HANDLED;
251}
252
253/*
254 * Name the MSI-X interrupts.
255 */
256static void name_msix_vecs(struct adapter *adap)
257{
258 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
259
260 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
261 adap->msix_info[0].desc[n] = 0;
262
263 for_each_port(adap, j) {
264 struct net_device *d = adap->port[j];
265 const struct port_info *pi = netdev_priv(d);
266
267 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
268 snprintf(adap->msix_info[msi_idx].desc, n,
269 "%s (queue %d)", d->name, i);
270 adap->msix_info[msi_idx].desc[n] = 0;
271 }
272 }
273}
274
275static int request_msix_data_irqs(struct adapter *adap)
276{
277 int i, j, err, qidx = 0;
278
279 for_each_port(adap, i) {
280 int nqsets = adap2pinfo(adap, i)->nqsets;
281
282 for (j = 0; j < nqsets; ++j) {
283 err = request_irq(adap->msix_info[qidx + 1].vec,
284 t3_intr_handler(adap,
285 adap->sge.qs[qidx].
286 rspq.polling), 0,
287 adap->msix_info[qidx + 1].desc,
288 &adap->sge.qs[qidx]);
289 if (err) {
290 while (--qidx >= 0)
291 free_irq(adap->msix_info[qidx + 1].vec,
292 &adap->sge.qs[qidx]);
293 return err;
294 }
295 qidx++;
296 }
297 }
298 return 0;
299}
300
301/**
302 * setup_rss - configure RSS
303 * @adap: the adapter
304 *
305 * Sets up RSS to distribute packets to multiple receive queues. We
306 * configure the RSS CPU lookup table to distribute to the number of HW
307 * receive queues, and the response queue lookup table to narrow that
308 * down to the response queues actually configured for each port.
309 * We always configure the RSS mapping for two ports since the mapping
310 * table has plenty of entries.
311 */
312static void setup_rss(struct adapter *adap)
313{
314 int i;
315 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
316 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
317 u8 cpus[SGE_QSETS + 1];
318 u16 rspq_map[RSS_TABLE_SIZE];
319
320 for (i = 0; i < SGE_QSETS; ++i)
321 cpus[i] = i;
322 cpus[SGE_QSETS] = 0xff; /* terminator */
323
324 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
325 rspq_map[i] = i % nq0;
326 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
327 }
328
329 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
330 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
331 V_RRCPLCPUSIZE(6), cpus, rspq_map);
332}
333
334/*
335 * If we have multiple receive queues per port serviced by NAPI we need one
336 * netdevice per queue as NAPI operates on netdevices. We already have one
337 * netdevice, namely the one associated with the interface, so we use dummy
338 * ones for any additional queues. Note that these netdevices exist purely
339 * so that NAPI has something to work with, they do not represent network
340 * ports and are not registered.
341 */
342static int init_dummy_netdevs(struct adapter *adap)
343{
344 int i, j, dummy_idx = 0;
345 struct net_device *nd;
346
347 for_each_port(adap, i) {
348 struct net_device *dev = adap->port[i];
349 const struct port_info *pi = netdev_priv(dev);
350
351 for (j = 0; j < pi->nqsets - 1; j++) {
352 if (!adap->dummy_netdev[dummy_idx]) {
353 nd = alloc_netdev(0, "", ether_setup);
354 if (!nd)
355 goto free_all;
356
357 nd->priv = adap;
358 nd->weight = 64;
359 set_bit(__LINK_STATE_START, &nd->state);
360 adap->dummy_netdev[dummy_idx] = nd;
361 }
362 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
363 dummy_idx++;
364 }
365 }
366 return 0;
367
368free_all:
369 while (--dummy_idx >= 0) {
370 free_netdev(adap->dummy_netdev[dummy_idx]);
371 adap->dummy_netdev[dummy_idx] = NULL;
372 }
373 return -ENOMEM;
374}
375
376/*
377 * Wait until all NAPI handlers are descheduled. This includes the handlers of
378 * both netdevices representing interfaces and the dummy ones for the extra
379 * queues.
380 */
381static void quiesce_rx(struct adapter *adap)
382{
383 int i;
384 struct net_device *dev;
385
386 for_each_port(adap, i) {
387 dev = adap->port[i];
388 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
389 msleep(1);
390 }
391
392 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
393 dev = adap->dummy_netdev[i];
394 if (dev)
395 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
396 msleep(1);
397 }
398}
399
400/**
401 * setup_sge_qsets - configure SGE Tx/Rx/response queues
402 * @adap: the adapter
403 *
404 * Determines how many sets of SGE queues to use and initializes them.
405 * We support multiple queue sets per port if we have MSI-X, otherwise
406 * just one queue set per port.
407 */
408static int setup_sge_qsets(struct adapter *adap)
409{
410 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
411 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
412
413 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
414 irq_idx = -1;
415
416 for_each_port(adap, i) {
417 struct net_device *dev = adap->port[i];
418 const struct port_info *pi = netdev_priv(dev);
419
420 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
421 err = t3_sge_alloc_qset(adap, qset_idx, 1,
422 (adap->flags & USING_MSIX) ? qset_idx + 1 :
423 irq_idx,
424 &adap->params.sge.qset[qset_idx], ntxq,
425 j == 0 ? dev :
426 adap-> dummy_netdev[dummy_dev_idx++]);
427 if (err) {
428 t3_free_sge_resources(adap);
429 return err;
430 }
431 }
432 }
433
434 return 0;
435}
436
437static ssize_t attr_show(struct class_device *cd, char *buf,
438 ssize_t(*format) (struct adapter *, char *))
439{
440 ssize_t len;
441 struct adapter *adap = to_net_dev(cd)->priv;
442
443 /* Synchronize with ioctls that may shut down the device */
444 rtnl_lock();
445 len = (*format) (adap, buf);
446 rtnl_unlock();
447 return len;
448}
449
450static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len,
451 ssize_t(*set) (struct adapter *, unsigned int),
452 unsigned int min_val, unsigned int max_val)
453{
454 char *endp;
455 ssize_t ret;
456 unsigned int val;
457 struct adapter *adap = to_net_dev(cd)->priv;
458
459 if (!capable(CAP_NET_ADMIN))
460 return -EPERM;
461
462 val = simple_strtoul(buf, &endp, 0);
463 if (endp == buf || val < min_val || val > max_val)
464 return -EINVAL;
465
466 rtnl_lock();
467 ret = (*set) (adap, val);
468 if (!ret)
469 ret = len;
470 rtnl_unlock();
471 return ret;
472}
473
474#define CXGB3_SHOW(name, val_expr) \
475static ssize_t format_##name(struct adapter *adap, char *buf) \
476{ \
477 return sprintf(buf, "%u\n", val_expr); \
478} \
479static ssize_t show_##name(struct class_device *cd, char *buf) \
480{ \
481 return attr_show(cd, buf, format_##name); \
482}
483
484static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
485{
486 if (adap->flags & FULL_INIT_DONE)
487 return -EBUSY;
488 if (val && adap->params.rev == 0)
489 return -EINVAL;
490 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
491 return -EINVAL;
492 adap->params.mc5.nfilters = val;
493 return 0;
494}
495
496static ssize_t store_nfilters(struct class_device *cd, const char *buf,
497 size_t len)
498{
499 return attr_store(cd, buf, len, set_nfilters, 0, ~0);
500}
501
502static ssize_t set_nservers(struct adapter *adap, unsigned int val)
503{
504 if (adap->flags & FULL_INIT_DONE)
505 return -EBUSY;
506 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
507 return -EINVAL;
508 adap->params.mc5.nservers = val;
509 return 0;
510}
511
512static ssize_t store_nservers(struct class_device *cd, const char *buf,
513 size_t len)
514{
515 return attr_store(cd, buf, len, set_nservers, 0, ~0);
516}
517
518#define CXGB3_ATTR_R(name, val_expr) \
519CXGB3_SHOW(name, val_expr) \
520static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
521
522#define CXGB3_ATTR_RW(name, val_expr, store_method) \
523CXGB3_SHOW(name, val_expr) \
524static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
525
526CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
527CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
528CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
529
530static struct attribute *cxgb3_attrs[] = {
531 &class_device_attr_cam_size.attr,
532 &class_device_attr_nfilters.attr,
533 &class_device_attr_nservers.attr,
534 NULL
535};
536
537static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
538
539static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
540{
541 ssize_t len;
542 unsigned int v, addr, bpt, cpt;
543 struct adapter *adap = to_net_dev(cd)->priv;
544
545 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
546 rtnl_lock();
547 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
548 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
549 if (sched & 1)
550 v >>= 16;
551 bpt = (v >> 8) & 0xff;
552 cpt = v & 0xff;
553 if (!cpt)
554 len = sprintf(buf, "disabled\n");
555 else {
556 v = (adap->params.vpd.cclk * 1000) / cpt;
557 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
558 }
559 rtnl_unlock();
560 return len;
561}
562
563static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
564 size_t len, int sched)
565{
566 char *endp;
567 ssize_t ret;
568 unsigned int val;
569 struct adapter *adap = to_net_dev(cd)->priv;
570
571 if (!capable(CAP_NET_ADMIN))
572 return -EPERM;
573
574 val = simple_strtoul(buf, &endp, 0);
575 if (endp == buf || val > 10000000)
576 return -EINVAL;
577
578 rtnl_lock();
579 ret = t3_config_sched(adap, val, sched);
580 if (!ret)
581 ret = len;
582 rtnl_unlock();
583 return ret;
584}
585
586#define TM_ATTR(name, sched) \
587static ssize_t show_##name(struct class_device *cd, char *buf) \
588{ \
589 return tm_attr_show(cd, buf, sched); \
590} \
591static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
592{ \
593 return tm_attr_store(cd, buf, len, sched); \
594} \
595static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
596
597TM_ATTR(sched0, 0);
598TM_ATTR(sched1, 1);
599TM_ATTR(sched2, 2);
600TM_ATTR(sched3, 3);
601TM_ATTR(sched4, 4);
602TM_ATTR(sched5, 5);
603TM_ATTR(sched6, 6);
604TM_ATTR(sched7, 7);
605
606static struct attribute *offload_attrs[] = {
607 &class_device_attr_sched0.attr,
608 &class_device_attr_sched1.attr,
609 &class_device_attr_sched2.attr,
610 &class_device_attr_sched3.attr,
611 &class_device_attr_sched4.attr,
612 &class_device_attr_sched5.attr,
613 &class_device_attr_sched6.attr,
614 &class_device_attr_sched7.attr,
615 NULL
616};
617
618static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
619
620/*
621 * Sends an sk_buff to an offload queue driver
622 * after dealing with any active network taps.
623 */
624static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
625{
626 int ret;
627
628 local_bh_disable();
629 ret = t3_offload_tx(tdev, skb);
630 local_bh_enable();
631 return ret;
632}
633
634static int write_smt_entry(struct adapter *adapter, int idx)
635{
636 struct cpl_smt_write_req *req;
637 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
638
639 if (!skb)
640 return -ENOMEM;
641
642 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
643 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
644 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
645 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
646 req->iff = idx;
647 memset(req->src_mac1, 0, sizeof(req->src_mac1));
648 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
649 skb->priority = 1;
650 offload_tx(&adapter->tdev, skb);
651 return 0;
652}
653
654static int init_smt(struct adapter *adapter)
655{
656 int i;
657
658 for_each_port(adapter, i)
659 write_smt_entry(adapter, i);
660 return 0;
661}
662
663static void init_port_mtus(struct adapter *adapter)
664{
665 unsigned int mtus = adapter->port[0]->mtu;
666
667 if (adapter->port[1])
668 mtus |= adapter->port[1]->mtu << 16;
669 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
670}
671
672static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
673 int hi, int port)
674{
675 struct sk_buff *skb;
676 struct mngt_pktsched_wr *req;
677
678 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
679 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
680 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
681 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
682 req->sched = sched;
683 req->idx = qidx;
684 req->min = lo;
685 req->max = hi;
686 req->binding = port;
687 t3_mgmt_tx(adap, skb);
688}
689
690static void bind_qsets(struct adapter *adap)
691{
692 int i, j;
693
694 for_each_port(adap, i) {
695 const struct port_info *pi = adap2pinfo(adap, i);
696
697 for (j = 0; j < pi->nqsets; ++j)
698 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
699 -1, i);
700 }
701}
702
703/**
704 * cxgb_up - enable the adapter
705 * @adapter: adapter being enabled
706 *
707 * Called when the first port is enabled, this function performs the
708 * actions necessary to make an adapter operational, such as completing
709 * the initialization of HW modules, and enabling interrupts.
710 *
711 * Must be called with the rtnl lock held.
712 */
713static int cxgb_up(struct adapter *adap)
714{
715 int err = 0;
716
717 if (!(adap->flags & FULL_INIT_DONE)) {
718 err = t3_check_fw_version(adap);
719 if (err)
720 goto out;
721
722 err = init_dummy_netdevs(adap);
723 if (err)
724 goto out;
725
726 err = t3_init_hw(adap, 0);
727 if (err)
728 goto out;
729
730 err = setup_sge_qsets(adap);
731 if (err)
732 goto out;
733
734 setup_rss(adap);
735 adap->flags |= FULL_INIT_DONE;
736 }
737
738 t3_intr_clear(adap);
739
740 if (adap->flags & USING_MSIX) {
741 name_msix_vecs(adap);
742 err = request_irq(adap->msix_info[0].vec,
743 t3_async_intr_handler, 0,
744 adap->msix_info[0].desc, adap);
745 if (err)
746 goto irq_err;
747
748 if (request_msix_data_irqs(adap)) {
749 free_irq(adap->msix_info[0].vec, adap);
750 goto irq_err;
751 }
752 } else if ((err = request_irq(adap->pdev->irq,
753 t3_intr_handler(adap,
754 adap->sge.qs[0].rspq.
755 polling),
756 (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
757 adap->name, adap)))
758 goto irq_err;
759
760 t3_sge_start(adap);
761 t3_intr_enable(adap);
762
763 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
764 bind_qsets(adap);
765 adap->flags |= QUEUES_BOUND;
766
767out:
768 return err;
769irq_err:
770 CH_ERR(adap, "request_irq failed, err %d\n", err);
771 goto out;
772}
773
774/*
775 * Release resources when all the ports and offloading have been stopped.
776 */
777static void cxgb_down(struct adapter *adapter)
778{
779 t3_sge_stop(adapter);
780 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
781 t3_intr_disable(adapter);
782 spin_unlock_irq(&adapter->work_lock);
783
784 if (adapter->flags & USING_MSIX) {
785 int i, n = 0;
786
787 free_irq(adapter->msix_info[0].vec, adapter);
788 for_each_port(adapter, i)
789 n += adap2pinfo(adapter, i)->nqsets;
790
791 for (i = 0; i < n; ++i)
792 free_irq(adapter->msix_info[i + 1].vec,
793 &adapter->sge.qs[i]);
794 } else
795 free_irq(adapter->pdev->irq, adapter);
796
797 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
798 quiesce_rx(adapter);
799}
800
801static void schedule_chk_task(struct adapter *adap)
802{
803 unsigned int timeo;
804
805 timeo = adap->params.linkpoll_period ?
806 (HZ * adap->params.linkpoll_period) / 10 :
807 adap->params.stats_update_period * HZ;
808 if (timeo)
809 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
810}
811
812static int offload_open(struct net_device *dev)
813{
814 struct adapter *adapter = dev->priv;
815 struct t3cdev *tdev = T3CDEV(dev);
816 int adap_up = adapter->open_device_map & PORT_MASK;
817 int err = 0;
818
819 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
820 return 0;
821
822 if (!adap_up && (err = cxgb_up(adapter)) < 0)
823 return err;
824
825 t3_tp_set_offload_mode(adapter, 1);
826 tdev->lldev = adapter->port[0];
827 err = cxgb3_offload_activate(adapter);
828 if (err)
829 goto out;
830
831 init_port_mtus(adapter);
832 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
833 adapter->params.b_wnd,
834 adapter->params.rev == 0 ?
835 adapter->port[0]->mtu : 0xffff);
836 init_smt(adapter);
837
838 /* Never mind if the next step fails */
839 sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
840
841 /* Call back all registered clients */
842 cxgb3_add_clients(tdev);
843
844out:
845 /* restore them in case the offload module has changed them */
846 if (err) {
847 t3_tp_set_offload_mode(adapter, 0);
848 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
849 cxgb3_set_dummy_ops(tdev);
850 }
851 return err;
852}
853
854static int offload_close(struct t3cdev *tdev)
855{
856 struct adapter *adapter = tdev2adap(tdev);
857
858 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
859 return 0;
860
861 /* Call back all registered clients */
862 cxgb3_remove_clients(tdev);
863
864 sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
865
866 tdev->lldev = NULL;
867 cxgb3_set_dummy_ops(tdev);
868 t3_tp_set_offload_mode(adapter, 0);
869 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
870
871 if (!adapter->open_device_map)
872 cxgb_down(adapter);
873
874 cxgb3_offload_deactivate(adapter);
875 return 0;
876}
877
878static int cxgb_open(struct net_device *dev)
879{
880 int err;
881 struct adapter *adapter = dev->priv;
882 struct port_info *pi = netdev_priv(dev);
883 int other_ports = adapter->open_device_map & PORT_MASK;
884
885 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
886 return err;
887
888 set_bit(pi->port_id, &adapter->open_device_map);
889 if (!ofld_disable) {
890 err = offload_open(dev);
891 if (err)
892 printk(KERN_WARNING
893 "Could not initialize offload capabilities\n");
894 }
895
896 link_start(dev);
897 t3_port_intr_enable(adapter, pi->port_id);
898 netif_start_queue(dev);
899 if (!other_ports)
900 schedule_chk_task(adapter);
901
902 return 0;
903}
904
905static int cxgb_close(struct net_device *dev)
906{
907 struct adapter *adapter = dev->priv;
908 struct port_info *p = netdev_priv(dev);
909
910 t3_port_intr_disable(adapter, p->port_id);
911 netif_stop_queue(dev);
912 p->phy.ops->power_down(&p->phy, 1);
913 netif_carrier_off(dev);
914 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
915
916 spin_lock(&adapter->work_lock); /* sync with update task */
917 clear_bit(p->port_id, &adapter->open_device_map);
918 spin_unlock(&adapter->work_lock);
919
920 if (!(adapter->open_device_map & PORT_MASK))
921 cancel_rearming_delayed_workqueue(cxgb3_wq,
922 &adapter->adap_check_task);
923
924 if (!adapter->open_device_map)
925 cxgb_down(adapter);
926
927 return 0;
928}
929
930static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
931{
932 struct adapter *adapter = dev->priv;
933 struct port_info *p = netdev_priv(dev);
934 struct net_device_stats *ns = &p->netstats;
935 const struct mac_stats *pstats;
936
937 spin_lock(&adapter->stats_lock);
938 pstats = t3_mac_update_stats(&p->mac);
939 spin_unlock(&adapter->stats_lock);
940
941 ns->tx_bytes = pstats->tx_octets;
942 ns->tx_packets = pstats->tx_frames;
943 ns->rx_bytes = pstats->rx_octets;
944 ns->rx_packets = pstats->rx_frames;
945 ns->multicast = pstats->rx_mcast_frames;
946
947 ns->tx_errors = pstats->tx_underrun;
948 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
949 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
950 pstats->rx_fifo_ovfl;
951
952 /* detailed rx_errors */
953 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
954 ns->rx_over_errors = 0;
955 ns->rx_crc_errors = pstats->rx_fcs_errs;
956 ns->rx_frame_errors = pstats->rx_symbol_errs;
957 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
958 ns->rx_missed_errors = pstats->rx_cong_drops;
959
960 /* detailed tx_errors */
961 ns->tx_aborted_errors = 0;
962 ns->tx_carrier_errors = 0;
963 ns->tx_fifo_errors = pstats->tx_underrun;
964 ns->tx_heartbeat_errors = 0;
965 ns->tx_window_errors = 0;
966 return ns;
967}
968
969static u32 get_msglevel(struct net_device *dev)
970{
971 struct adapter *adapter = dev->priv;
972
973 return adapter->msg_enable;
974}
975
976static void set_msglevel(struct net_device *dev, u32 val)
977{
978 struct adapter *adapter = dev->priv;
979
980 adapter->msg_enable = val;
981}
982
983static char stats_strings[][ETH_GSTRING_LEN] = {
984 "TxOctetsOK ",
985 "TxFramesOK ",
986 "TxMulticastFramesOK",
987 "TxBroadcastFramesOK",
988 "TxPauseFrames ",
989 "TxUnderrun ",
990 "TxExtUnderrun ",
991
992 "TxFrames64 ",
993 "TxFrames65To127 ",
994 "TxFrames128To255 ",
995 "TxFrames256To511 ",
996 "TxFrames512To1023 ",
997 "TxFrames1024To1518 ",
998 "TxFrames1519ToMax ",
999
1000 "RxOctetsOK ",
1001 "RxFramesOK ",
1002 "RxMulticastFramesOK",
1003 "RxBroadcastFramesOK",
1004 "RxPauseFrames ",
1005 "RxFCSErrors ",
1006 "RxSymbolErrors ",
1007 "RxShortErrors ",
1008 "RxJabberErrors ",
1009 "RxLengthErrors ",
1010 "RxFIFOoverflow ",
1011
1012 "RxFrames64 ",
1013 "RxFrames65To127 ",
1014 "RxFrames128To255 ",
1015 "RxFrames256To511 ",
1016 "RxFrames512To1023 ",
1017 "RxFrames1024To1518 ",
1018 "RxFrames1519ToMax ",
1019
1020 "PhyFIFOErrors ",
1021 "TSO ",
1022 "VLANextractions ",
1023 "VLANinsertions ",
1024 "TxCsumOffload ",
1025 "RxCsumGood ",
1026 "RxDrops "
1027};
1028
1029static int get_stats_count(struct net_device *dev)
1030{
1031 return ARRAY_SIZE(stats_strings);
1032}
1033
1034#define T3_REGMAP_SIZE (3 * 1024)
1035
1036static int get_regs_len(struct net_device *dev)
1037{
1038 return T3_REGMAP_SIZE;
1039}
1040
1041static int get_eeprom_len(struct net_device *dev)
1042{
1043 return EEPROMSIZE;
1044}
1045
1046static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1047{
1048 u32 fw_vers = 0;
1049 struct adapter *adapter = dev->priv;
1050
1051 t3_get_fw_version(adapter, &fw_vers);
1052
1053 strcpy(info->driver, DRV_NAME);
1054 strcpy(info->version, DRV_VERSION);
1055 strcpy(info->bus_info, pci_name(adapter->pdev));
1056 if (!fw_vers)
1057 strcpy(info->fw_version, "N/A");
1058 else {
1059 snprintf(info->fw_version, sizeof(info->fw_version),
1060 "%s %u.%u.%u",
1061 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1062 G_FW_VERSION_MAJOR(fw_vers),
1063 G_FW_VERSION_MINOR(fw_vers),
1064 G_FW_VERSION_MICRO(fw_vers));
1065 }
1066}
1067
1068static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1069{
1070 if (stringset == ETH_SS_STATS)
1071 memcpy(data, stats_strings, sizeof(stats_strings));
1072}
1073
1074static unsigned long collect_sge_port_stats(struct adapter *adapter,
1075 struct port_info *p, int idx)
1076{
1077 int i;
1078 unsigned long tot = 0;
1079
1080 for (i = 0; i < p->nqsets; ++i)
1081 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1082 return tot;
1083}
1084
1085static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1086 u64 *data)
1087{
1088 struct adapter *adapter = dev->priv;
1089 struct port_info *pi = netdev_priv(dev);
1090 const struct mac_stats *s;
1091
1092 spin_lock(&adapter->stats_lock);
1093 s = t3_mac_update_stats(&pi->mac);
1094 spin_unlock(&adapter->stats_lock);
1095
1096 *data++ = s->tx_octets;
1097 *data++ = s->tx_frames;
1098 *data++ = s->tx_mcast_frames;
1099 *data++ = s->tx_bcast_frames;
1100 *data++ = s->tx_pause;
1101 *data++ = s->tx_underrun;
1102 *data++ = s->tx_fifo_urun;
1103
1104 *data++ = s->tx_frames_64;
1105 *data++ = s->tx_frames_65_127;
1106 *data++ = s->tx_frames_128_255;
1107 *data++ = s->tx_frames_256_511;
1108 *data++ = s->tx_frames_512_1023;
1109 *data++ = s->tx_frames_1024_1518;
1110 *data++ = s->tx_frames_1519_max;
1111
1112 *data++ = s->rx_octets;
1113 *data++ = s->rx_frames;
1114 *data++ = s->rx_mcast_frames;
1115 *data++ = s->rx_bcast_frames;
1116 *data++ = s->rx_pause;
1117 *data++ = s->rx_fcs_errs;
1118 *data++ = s->rx_symbol_errs;
1119 *data++ = s->rx_short;
1120 *data++ = s->rx_jabber;
1121 *data++ = s->rx_too_long;
1122 *data++ = s->rx_fifo_ovfl;
1123
1124 *data++ = s->rx_frames_64;
1125 *data++ = s->rx_frames_65_127;
1126 *data++ = s->rx_frames_128_255;
1127 *data++ = s->rx_frames_256_511;
1128 *data++ = s->rx_frames_512_1023;
1129 *data++ = s->rx_frames_1024_1518;
1130 *data++ = s->rx_frames_1519_max;
1131
1132 *data++ = pi->phy.fifo_errors;
1133
1134 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1135 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1136 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1137 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1138 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1139 *data++ = s->rx_cong_drops;
1140}
1141
1142static inline void reg_block_dump(struct adapter *ap, void *buf,
1143 unsigned int start, unsigned int end)
1144{
1145 u32 *p = buf + start;
1146
1147 for (; start <= end; start += sizeof(u32))
1148 *p++ = t3_read_reg(ap, start);
1149}
1150
1151static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1152 void *buf)
1153{
1154 struct adapter *ap = dev->priv;
1155
1156 /*
1157 * Version scheme:
1158 * bits 0..9: chip version
1159 * bits 10..15: chip revision
1160 * bit 31: set for PCIe cards
1161 */
1162 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1163
1164 /*
1165 * We skip the MAC statistics registers because they are clear-on-read.
1166 * Also reading multi-register stats would need to synchronize with the
1167 * periodic mac stats accumulation. Hard to justify the complexity.
1168 */
1169 memset(buf, 0, T3_REGMAP_SIZE);
1170 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1171 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1172 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1173 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1174 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1175 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1176 XGM_REG(A_XGM_SERDES_STAT3, 1));
1177 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1178 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1179}
1180
1181static int restart_autoneg(struct net_device *dev)
1182{
1183 struct port_info *p = netdev_priv(dev);
1184
1185 if (!netif_running(dev))
1186 return -EAGAIN;
1187 if (p->link_config.autoneg != AUTONEG_ENABLE)
1188 return -EINVAL;
1189 p->phy.ops->autoneg_restart(&p->phy);
1190 return 0;
1191}
1192
1193static int cxgb3_phys_id(struct net_device *dev, u32 data)
1194{
1195 int i;
1196 struct adapter *adapter = dev->priv;
1197
1198 if (data == 0)
1199 data = 2;
1200
1201 for (i = 0; i < data * 2; i++) {
1202 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1203 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1204 if (msleep_interruptible(500))
1205 break;
1206 }
1207 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1208 F_GPIO0_OUT_VAL);
1209 return 0;
1210}
1211
1212static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1213{
1214 struct port_info *p = netdev_priv(dev);
1215
1216 cmd->supported = p->link_config.supported;
1217 cmd->advertising = p->link_config.advertising;
1218
1219 if (netif_carrier_ok(dev)) {
1220 cmd->speed = p->link_config.speed;
1221 cmd->duplex = p->link_config.duplex;
1222 } else {
1223 cmd->speed = -1;
1224 cmd->duplex = -1;
1225 }
1226
1227 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1228 cmd->phy_address = p->phy.addr;
1229 cmd->transceiver = XCVR_EXTERNAL;
1230 cmd->autoneg = p->link_config.autoneg;
1231 cmd->maxtxpkt = 0;
1232 cmd->maxrxpkt = 0;
1233 return 0;
1234}
1235
1236static int speed_duplex_to_caps(int speed, int duplex)
1237{
1238 int cap = 0;
1239
1240 switch (speed) {
1241 case SPEED_10:
1242 if (duplex == DUPLEX_FULL)
1243 cap = SUPPORTED_10baseT_Full;
1244 else
1245 cap = SUPPORTED_10baseT_Half;
1246 break;
1247 case SPEED_100:
1248 if (duplex == DUPLEX_FULL)
1249 cap = SUPPORTED_100baseT_Full;
1250 else
1251 cap = SUPPORTED_100baseT_Half;
1252 break;
1253 case SPEED_1000:
1254 if (duplex == DUPLEX_FULL)
1255 cap = SUPPORTED_1000baseT_Full;
1256 else
1257 cap = SUPPORTED_1000baseT_Half;
1258 break;
1259 case SPEED_10000:
1260 if (duplex == DUPLEX_FULL)
1261 cap = SUPPORTED_10000baseT_Full;
1262 }
1263 return cap;
1264}
1265
1266#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1267 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1268 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1269 ADVERTISED_10000baseT_Full)
1270
1271static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1272{
1273 struct port_info *p = netdev_priv(dev);
1274 struct link_config *lc = &p->link_config;
1275
1276 if (!(lc->supported & SUPPORTED_Autoneg))
1277 return -EOPNOTSUPP; /* can't change speed/duplex */
1278
1279 if (cmd->autoneg == AUTONEG_DISABLE) {
1280 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1281
1282 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1283 return -EINVAL;
1284 lc->requested_speed = cmd->speed;
1285 lc->requested_duplex = cmd->duplex;
1286 lc->advertising = 0;
1287 } else {
1288 cmd->advertising &= ADVERTISED_MASK;
1289 cmd->advertising &= lc->supported;
1290 if (!cmd->advertising)
1291 return -EINVAL;
1292 lc->requested_speed = SPEED_INVALID;
1293 lc->requested_duplex = DUPLEX_INVALID;
1294 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1295 }
1296 lc->autoneg = cmd->autoneg;
1297 if (netif_running(dev))
1298 t3_link_start(&p->phy, &p->mac, lc);
1299 return 0;
1300}
1301
1302static void get_pauseparam(struct net_device *dev,
1303 struct ethtool_pauseparam *epause)
1304{
1305 struct port_info *p = netdev_priv(dev);
1306
1307 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1308 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1309 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1310}
1311
1312static int set_pauseparam(struct net_device *dev,
1313 struct ethtool_pauseparam *epause)
1314{
1315 struct port_info *p = netdev_priv(dev);
1316 struct link_config *lc = &p->link_config;
1317
1318 if (epause->autoneg == AUTONEG_DISABLE)
1319 lc->requested_fc = 0;
1320 else if (lc->supported & SUPPORTED_Autoneg)
1321 lc->requested_fc = PAUSE_AUTONEG;
1322 else
1323 return -EINVAL;
1324
1325 if (epause->rx_pause)
1326 lc->requested_fc |= PAUSE_RX;
1327 if (epause->tx_pause)
1328 lc->requested_fc |= PAUSE_TX;
1329 if (lc->autoneg == AUTONEG_ENABLE) {
1330 if (netif_running(dev))
1331 t3_link_start(&p->phy, &p->mac, lc);
1332 } else {
1333 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1334 if (netif_running(dev))
1335 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1336 }
1337 return 0;
1338}
1339
1340static u32 get_rx_csum(struct net_device *dev)
1341{
1342 struct port_info *p = netdev_priv(dev);
1343
1344 return p->rx_csum_offload;
1345}
1346
1347static int set_rx_csum(struct net_device *dev, u32 data)
1348{
1349 struct port_info *p = netdev_priv(dev);
1350
1351 p->rx_csum_offload = data;
1352 return 0;
1353}
1354
1355static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1356{
1357 struct adapter *adapter = dev->priv;
1358
1359 e->rx_max_pending = MAX_RX_BUFFERS;
1360 e->rx_mini_max_pending = 0;
1361 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1362 e->tx_max_pending = MAX_TXQ_ENTRIES;
1363
1364 e->rx_pending = adapter->params.sge.qset[0].fl_size;
1365 e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1366 e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1367 e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1368}
1369
1370static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1371{
1372 int i;
1373 struct adapter *adapter = dev->priv;
1374
1375 if (e->rx_pending > MAX_RX_BUFFERS ||
1376 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1377 e->tx_pending > MAX_TXQ_ENTRIES ||
1378 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1379 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1380 e->rx_pending < MIN_FL_ENTRIES ||
1381 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1382 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1383 return -EINVAL;
1384
1385 if (adapter->flags & FULL_INIT_DONE)
1386 return -EBUSY;
1387
1388 for (i = 0; i < SGE_QSETS; ++i) {
1389 struct qset_params *q = &adapter->params.sge.qset[i];
1390
1391 q->rspq_size = e->rx_mini_pending;
1392 q->fl_size = e->rx_pending;
1393 q->jumbo_size = e->rx_jumbo_pending;
1394 q->txq_size[0] = e->tx_pending;
1395 q->txq_size[1] = e->tx_pending;
1396 q->txq_size[2] = e->tx_pending;
1397 }
1398 return 0;
1399}
1400
1401static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1402{
1403 struct adapter *adapter = dev->priv;
1404 struct qset_params *qsp = &adapter->params.sge.qset[0];
1405 struct sge_qset *qs = &adapter->sge.qs[0];
1406
1407 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1408 return -EINVAL;
1409
1410 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1411 t3_update_qset_coalesce(qs, qsp);
1412 return 0;
1413}
1414
1415static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1416{
1417 struct adapter *adapter = dev->priv;
1418 struct qset_params *q = adapter->params.sge.qset;
1419
1420 c->rx_coalesce_usecs = q->coalesce_usecs;
1421 return 0;
1422}
1423
1424static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1425 u8 * data)
1426{
1427 int i, err = 0;
1428 struct adapter *adapter = dev->priv;
1429
1430 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1431 if (!buf)
1432 return -ENOMEM;
1433
1434 e->magic = EEPROM_MAGIC;
1435 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1436 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1437
1438 if (!err)
1439 memcpy(data, buf + e->offset, e->len);
1440 kfree(buf);
1441 return err;
1442}
1443
1444static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1445 u8 * data)
1446{
1447 u8 *buf;
1448 int err = 0;
1449 u32 aligned_offset, aligned_len, *p;
1450 struct adapter *adapter = dev->priv;
1451
1452 if (eeprom->magic != EEPROM_MAGIC)
1453 return -EINVAL;
1454
1455 aligned_offset = eeprom->offset & ~3;
1456 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1457
1458 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1459 buf = kmalloc(aligned_len, GFP_KERNEL);
1460 if (!buf)
1461 return -ENOMEM;
1462 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1463 if (!err && aligned_len > 4)
1464 err = t3_seeprom_read(adapter,
1465 aligned_offset + aligned_len - 4,
1466 (u32 *) & buf[aligned_len - 4]);
1467 if (err)
1468 goto out;
1469 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1470 } else
1471 buf = data;
1472
1473 err = t3_seeprom_wp(adapter, 0);
1474 if (err)
1475 goto out;
1476
1477 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1478 err = t3_seeprom_write(adapter, aligned_offset, *p);
1479 aligned_offset += 4;
1480 }
1481
1482 if (!err)
1483 err = t3_seeprom_wp(adapter, 1);
1484out:
1485 if (buf != data)
1486 kfree(buf);
1487 return err;
1488}
1489
1490static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1491{
1492 wol->supported = 0;
1493 wol->wolopts = 0;
1494 memset(&wol->sopass, 0, sizeof(wol->sopass));
1495}
1496
1497static const struct ethtool_ops cxgb_ethtool_ops = {
1498 .get_settings = get_settings,
1499 .set_settings = set_settings,
1500 .get_drvinfo = get_drvinfo,
1501 .get_msglevel = get_msglevel,
1502 .set_msglevel = set_msglevel,
1503 .get_ringparam = get_sge_param,
1504 .set_ringparam = set_sge_param,
1505 .get_coalesce = get_coalesce,
1506 .set_coalesce = set_coalesce,
1507 .get_eeprom_len = get_eeprom_len,
1508 .get_eeprom = get_eeprom,
1509 .set_eeprom = set_eeprom,
1510 .get_pauseparam = get_pauseparam,
1511 .set_pauseparam = set_pauseparam,
1512 .get_rx_csum = get_rx_csum,
1513 .set_rx_csum = set_rx_csum,
1514 .get_tx_csum = ethtool_op_get_tx_csum,
1515 .set_tx_csum = ethtool_op_set_tx_csum,
1516 .get_sg = ethtool_op_get_sg,
1517 .set_sg = ethtool_op_set_sg,
1518 .get_link = ethtool_op_get_link,
1519 .get_strings = get_strings,
1520 .phys_id = cxgb3_phys_id,
1521 .nway_reset = restart_autoneg,
1522 .get_stats_count = get_stats_count,
1523 .get_ethtool_stats = get_stats,
1524 .get_regs_len = get_regs_len,
1525 .get_regs = get_regs,
1526 .get_wol = get_wol,
1527 .get_tso = ethtool_op_get_tso,
1528 .set_tso = ethtool_op_set_tso,
1529 .get_perm_addr = ethtool_op_get_perm_addr
1530};
1531
1532static int in_range(int val, int lo, int hi)
1533{
1534 return val < 0 || (val <= hi && val >= lo);
1535}
1536
1537static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1538{
1539 int ret;
1540 u32 cmd;
1541 struct adapter *adapter = dev->priv;
1542
1543 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1544 return -EFAULT;
1545
1546 switch (cmd) {
1547 case CHELSIO_SETREG:{
1548 struct ch_reg edata;
1549
1550 if (!capable(CAP_NET_ADMIN))
1551 return -EPERM;
1552 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1553 return -EFAULT;
1554 if ((edata.addr & 3) != 0
1555 || edata.addr >= adapter->mmio_len)
1556 return -EINVAL;
1557 writel(edata.val, adapter->regs + edata.addr);
1558 break;
1559 }
1560 case CHELSIO_GETREG:{
1561 struct ch_reg edata;
1562
1563 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1564 return -EFAULT;
1565 if ((edata.addr & 3) != 0
1566 || edata.addr >= adapter->mmio_len)
1567 return -EINVAL;
1568 edata.val = readl(adapter->regs + edata.addr);
1569 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1570 return -EFAULT;
1571 break;
1572 }
1573 case CHELSIO_SET_QSET_PARAMS:{
1574 int i;
1575 struct qset_params *q;
1576 struct ch_qset_params t;
1577
1578 if (!capable(CAP_NET_ADMIN))
1579 return -EPERM;
1580 if (copy_from_user(&t, useraddr, sizeof(t)))
1581 return -EFAULT;
1582 if (t.qset_idx >= SGE_QSETS)
1583 return -EINVAL;
1584 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1585 !in_range(t.cong_thres, 0, 255) ||
1586 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1587 MAX_TXQ_ENTRIES) ||
1588 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1589 MAX_TXQ_ENTRIES) ||
1590 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1591 MAX_CTRL_TXQ_ENTRIES) ||
1592 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1593 MAX_RX_BUFFERS)
1594 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1595 MAX_RX_JUMBO_BUFFERS)
1596 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1597 MAX_RSPQ_ENTRIES))
1598 return -EINVAL;
1599 if ((adapter->flags & FULL_INIT_DONE) &&
1600 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1601 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1602 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1603 t.polling >= 0 || t.cong_thres >= 0))
1604 return -EBUSY;
1605
1606 q = &adapter->params.sge.qset[t.qset_idx];
1607
1608 if (t.rspq_size >= 0)
1609 q->rspq_size = t.rspq_size;
1610 if (t.fl_size[0] >= 0)
1611 q->fl_size = t.fl_size[0];
1612 if (t.fl_size[1] >= 0)
1613 q->jumbo_size = t.fl_size[1];
1614 if (t.txq_size[0] >= 0)
1615 q->txq_size[0] = t.txq_size[0];
1616 if (t.txq_size[1] >= 0)
1617 q->txq_size[1] = t.txq_size[1];
1618 if (t.txq_size[2] >= 0)
1619 q->txq_size[2] = t.txq_size[2];
1620 if (t.cong_thres >= 0)
1621 q->cong_thres = t.cong_thres;
1622 if (t.intr_lat >= 0) {
1623 struct sge_qset *qs =
1624 &adapter->sge.qs[t.qset_idx];
1625
1626 q->coalesce_usecs = t.intr_lat;
1627 t3_update_qset_coalesce(qs, q);
1628 }
1629 if (t.polling >= 0) {
1630 if (adapter->flags & USING_MSIX)
1631 q->polling = t.polling;
1632 else {
1633 /* No polling with INTx for T3A */
1634 if (adapter->params.rev == 0 &&
1635 !(adapter->flags & USING_MSI))
1636 t.polling = 0;
1637
1638 for (i = 0; i < SGE_QSETS; i++) {
1639 q = &adapter->params.sge.
1640 qset[i];
1641 q->polling = t.polling;
1642 }
1643 }
1644 }
1645 break;
1646 }
1647 case CHELSIO_GET_QSET_PARAMS:{
1648 struct qset_params *q;
1649 struct ch_qset_params t;
1650
1651 if (copy_from_user(&t, useraddr, sizeof(t)))
1652 return -EFAULT;
1653 if (t.qset_idx >= SGE_QSETS)
1654 return -EINVAL;
1655
1656 q = &adapter->params.sge.qset[t.qset_idx];
1657 t.rspq_size = q->rspq_size;
1658 t.txq_size[0] = q->txq_size[0];
1659 t.txq_size[1] = q->txq_size[1];
1660 t.txq_size[2] = q->txq_size[2];
1661 t.fl_size[0] = q->fl_size;
1662 t.fl_size[1] = q->jumbo_size;
1663 t.polling = q->polling;
1664 t.intr_lat = q->coalesce_usecs;
1665 t.cong_thres = q->cong_thres;
1666
1667 if (copy_to_user(useraddr, &t, sizeof(t)))
1668 return -EFAULT;
1669 break;
1670 }
1671 case CHELSIO_SET_QSET_NUM:{
1672 struct ch_reg edata;
1673 struct port_info *pi = netdev_priv(dev);
1674 unsigned int i, first_qset = 0, other_qsets = 0;
1675
1676 if (!capable(CAP_NET_ADMIN))
1677 return -EPERM;
1678 if (adapter->flags & FULL_INIT_DONE)
1679 return -EBUSY;
1680 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1681 return -EFAULT;
1682 if (edata.val < 1 ||
1683 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1684 return -EINVAL;
1685
1686 for_each_port(adapter, i)
1687 if (adapter->port[i] && adapter->port[i] != dev)
1688 other_qsets += adap2pinfo(adapter, i)->nqsets;
1689
1690 if (edata.val + other_qsets > SGE_QSETS)
1691 return -EINVAL;
1692
1693 pi->nqsets = edata.val;
1694
1695 for_each_port(adapter, i)
1696 if (adapter->port[i]) {
1697 pi = adap2pinfo(adapter, i);
1698 pi->first_qset = first_qset;
1699 first_qset += pi->nqsets;
1700 }
1701 break;
1702 }
1703 case CHELSIO_GET_QSET_NUM:{
1704 struct ch_reg edata;
1705 struct port_info *pi = netdev_priv(dev);
1706
1707 edata.cmd = CHELSIO_GET_QSET_NUM;
1708 edata.val = pi->nqsets;
1709 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1710 return -EFAULT;
1711 break;
1712 }
1713 case CHELSIO_LOAD_FW:{
1714 u8 *fw_data;
1715 struct ch_mem_range t;
1716
1717 if (!capable(CAP_NET_ADMIN))
1718 return -EPERM;
1719 if (copy_from_user(&t, useraddr, sizeof(t)))
1720 return -EFAULT;
1721
1722 fw_data = kmalloc(t.len, GFP_KERNEL);
1723 if (!fw_data)
1724 return -ENOMEM;
1725
1726 if (copy_from_user
1727 (fw_data, useraddr + sizeof(t), t.len)) {
1728 kfree(fw_data);
1729 return -EFAULT;
1730 }
1731
1732 ret = t3_load_fw(adapter, fw_data, t.len);
1733 kfree(fw_data);
1734 if (ret)
1735 return ret;
1736 break;
1737 }
1738 case CHELSIO_SETMTUTAB:{
1739 struct ch_mtus m;
1740 int i;
1741
1742 if (!is_offload(adapter))
1743 return -EOPNOTSUPP;
1744 if (!capable(CAP_NET_ADMIN))
1745 return -EPERM;
1746 if (offload_running(adapter))
1747 return -EBUSY;
1748 if (copy_from_user(&m, useraddr, sizeof(m)))
1749 return -EFAULT;
1750 if (m.nmtus != NMTUS)
1751 return -EINVAL;
1752 if (m.mtus[0] < 81) /* accommodate SACK */
1753 return -EINVAL;
1754
1755 /* MTUs must be in ascending order */
1756 for (i = 1; i < NMTUS; ++i)
1757 if (m.mtus[i] < m.mtus[i - 1])
1758 return -EINVAL;
1759
1760 memcpy(adapter->params.mtus, m.mtus,
1761 sizeof(adapter->params.mtus));
1762 break;
1763 }
1764 case CHELSIO_GET_PM:{
1765 struct tp_params *p = &adapter->params.tp;
1766 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1767
1768 if (!is_offload(adapter))
1769 return -EOPNOTSUPP;
1770 m.tx_pg_sz = p->tx_pg_size;
1771 m.tx_num_pg = p->tx_num_pgs;
1772 m.rx_pg_sz = p->rx_pg_size;
1773 m.rx_num_pg = p->rx_num_pgs;
1774 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1775 if (copy_to_user(useraddr, &m, sizeof(m)))
1776 return -EFAULT;
1777 break;
1778 }
1779 case CHELSIO_SET_PM:{
1780 struct ch_pm m;
1781 struct tp_params *p = &adapter->params.tp;
1782
1783 if (!is_offload(adapter))
1784 return -EOPNOTSUPP;
1785 if (!capable(CAP_NET_ADMIN))
1786 return -EPERM;
1787 if (adapter->flags & FULL_INIT_DONE)
1788 return -EBUSY;
1789 if (copy_from_user(&m, useraddr, sizeof(m)))
1790 return -EFAULT;
1791 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1792 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1793 return -EINVAL; /* not power of 2 */
1794 if (!(m.rx_pg_sz & 0x14000))
1795 return -EINVAL; /* not 16KB or 64KB */
1796 if (!(m.tx_pg_sz & 0x1554000))
1797 return -EINVAL;
1798 if (m.tx_num_pg == -1)
1799 m.tx_num_pg = p->tx_num_pgs;
1800 if (m.rx_num_pg == -1)
1801 m.rx_num_pg = p->rx_num_pgs;
1802 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1803 return -EINVAL;
1804 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1805 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1806 return -EINVAL;
1807 p->rx_pg_size = m.rx_pg_sz;
1808 p->tx_pg_size = m.tx_pg_sz;
1809 p->rx_num_pgs = m.rx_num_pg;
1810 p->tx_num_pgs = m.tx_num_pg;
1811 break;
1812 }
1813 case CHELSIO_GET_MEM:{
1814 struct ch_mem_range t;
1815 struct mc7 *mem;
1816 u64 buf[32];
1817
1818 if (!is_offload(adapter))
1819 return -EOPNOTSUPP;
1820 if (!(adapter->flags & FULL_INIT_DONE))
1821 return -EIO; /* need the memory controllers */
1822 if (copy_from_user(&t, useraddr, sizeof(t)))
1823 return -EFAULT;
1824 if ((t.addr & 7) || (t.len & 7))
1825 return -EINVAL;
1826 if (t.mem_id == MEM_CM)
1827 mem = &adapter->cm;
1828 else if (t.mem_id == MEM_PMRX)
1829 mem = &adapter->pmrx;
1830 else if (t.mem_id == MEM_PMTX)
1831 mem = &adapter->pmtx;
1832 else
1833 return -EINVAL;
1834
1835 /*
1836 * Version scheme:
1837 * bits 0..9: chip version
1838 * bits 10..15: chip revision
1839 */
1840 t.version = 3 | (adapter->params.rev << 10);
1841 if (copy_to_user(useraddr, &t, sizeof(t)))
1842 return -EFAULT;
1843
1844 /*
1845 * Read 256 bytes at a time as len can be large and we don't
1846 * want to use huge intermediate buffers.
1847 */
1848 useraddr += sizeof(t); /* advance to start of buffer */
1849 while (t.len) {
1850 unsigned int chunk =
1851 min_t(unsigned int, t.len, sizeof(buf));
1852
1853 ret =
1854 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1855 buf);
1856 if (ret)
1857 return ret;
1858 if (copy_to_user(useraddr, buf, chunk))
1859 return -EFAULT;
1860 useraddr += chunk;
1861 t.addr += chunk;
1862 t.len -= chunk;
1863 }
1864 break;
1865 }
1866 case CHELSIO_SET_TRACE_FILTER:{
1867 struct ch_trace t;
1868 const struct trace_params *tp;
1869
1870 if (!capable(CAP_NET_ADMIN))
1871 return -EPERM;
1872 if (!offload_running(adapter))
1873 return -EAGAIN;
1874 if (copy_from_user(&t, useraddr, sizeof(t)))
1875 return -EFAULT;
1876
1877 tp = (const struct trace_params *)&t.sip;
1878 if (t.config_tx)
1879 t3_config_trace_filter(adapter, tp, 0,
1880 t.invert_match,
1881 t.trace_tx);
1882 if (t.config_rx)
1883 t3_config_trace_filter(adapter, tp, 1,
1884 t.invert_match,
1885 t.trace_rx);
1886 break;
1887 }
1888 case CHELSIO_SET_PKTSCHED:{
1889 struct ch_pktsched_params p;
1890
1891 if (!capable(CAP_NET_ADMIN))
1892 return -EPERM;
1893 if (!adapter->open_device_map)
1894 return -EAGAIN; /* uP and SGE must be running */
1895 if (copy_from_user(&p, useraddr, sizeof(p)))
1896 return -EFAULT;
1897 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1898 p.binding);
1899 break;
1900
1901 }
1902 default:
1903 return -EOPNOTSUPP;
1904 }
1905 return 0;
1906}
1907
1908static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1909{
1910 int ret, mmd;
1911 struct adapter *adapter = dev->priv;
1912 struct port_info *pi = netdev_priv(dev);
1913 struct mii_ioctl_data *data = if_mii(req);
1914
1915 switch (cmd) {
1916 case SIOCGMIIPHY:
1917 data->phy_id = pi->phy.addr;
1918 /* FALLTHRU */
1919 case SIOCGMIIREG:{
1920 u32 val;
1921 struct cphy *phy = &pi->phy;
1922
1923 if (!phy->mdio_read)
1924 return -EOPNOTSUPP;
1925 if (is_10G(adapter)) {
1926 mmd = data->phy_id >> 8;
1927 if (!mmd)
1928 mmd = MDIO_DEV_PCS;
1929 else if (mmd > MDIO_DEV_XGXS)
1930 return -EINVAL;
1931
1932 ret =
1933 phy->mdio_read(adapter, data->phy_id & 0x1f,
1934 mmd, data->reg_num, &val);
1935 } else
1936 ret =
1937 phy->mdio_read(adapter, data->phy_id & 0x1f,
1938 0, data->reg_num & 0x1f,
1939 &val);
1940 if (!ret)
1941 data->val_out = val;
1942 break;
1943 }
1944 case SIOCSMIIREG:{
1945 struct cphy *phy = &pi->phy;
1946
1947 if (!capable(CAP_NET_ADMIN))
1948 return -EPERM;
1949 if (!phy->mdio_write)
1950 return -EOPNOTSUPP;
1951 if (is_10G(adapter)) {
1952 mmd = data->phy_id >> 8;
1953 if (!mmd)
1954 mmd = MDIO_DEV_PCS;
1955 else if (mmd > MDIO_DEV_XGXS)
1956 return -EINVAL;
1957
1958 ret =
1959 phy->mdio_write(adapter,
1960 data->phy_id & 0x1f, mmd,
1961 data->reg_num,
1962 data->val_in);
1963 } else
1964 ret =
1965 phy->mdio_write(adapter,
1966 data->phy_id & 0x1f, 0,
1967 data->reg_num & 0x1f,
1968 data->val_in);
1969 break;
1970 }
1971 case SIOCCHIOCTL:
1972 return cxgb_extension_ioctl(dev, req->ifr_data);
1973 default:
1974 return -EOPNOTSUPP;
1975 }
1976 return ret;
1977}
1978
1979static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1980{
1981 int ret;
1982 struct adapter *adapter = dev->priv;
1983 struct port_info *pi = netdev_priv(dev);
1984
1985 if (new_mtu < 81) /* accommodate SACK */
1986 return -EINVAL;
1987 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1988 return ret;
1989 dev->mtu = new_mtu;
1990 init_port_mtus(adapter);
1991 if (adapter->params.rev == 0 && offload_running(adapter))
1992 t3_load_mtus(adapter, adapter->params.mtus,
1993 adapter->params.a_wnd, adapter->params.b_wnd,
1994 adapter->port[0]->mtu);
1995 return 0;
1996}
1997
1998static int cxgb_set_mac_addr(struct net_device *dev, void *p)
1999{
2000 struct adapter *adapter = dev->priv;
2001 struct port_info *pi = netdev_priv(dev);
2002 struct sockaddr *addr = p;
2003
2004 if (!is_valid_ether_addr(addr->sa_data))
2005 return -EINVAL;
2006
2007 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2008 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2009 if (offload_running(adapter))
2010 write_smt_entry(adapter, pi->port_id);
2011 return 0;
2012}
2013
2014/**
2015 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2016 * @adap: the adapter
2017 * @p: the port
2018 *
2019 * Ensures that current Rx processing on any of the queues associated with
2020 * the given port completes before returning. We do this by acquiring and
2021 * releasing the locks of the response queues associated with the port.
2022 */
2023static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2024{
2025 int i;
2026
2027 for (i = 0; i < p->nqsets; i++) {
2028 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2029
2030 spin_lock_irq(&q->lock);
2031 spin_unlock_irq(&q->lock);
2032 }
2033}
2034
2035static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2036{
2037 struct adapter *adapter = dev->priv;
2038 struct port_info *pi = netdev_priv(dev);
2039
2040 pi->vlan_grp = grp;
2041 if (adapter->params.rev > 0)
2042 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2043 else {
2044 /* single control for all ports */
2045 unsigned int i, have_vlans = 0;
2046 for_each_port(adapter, i)
2047 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2048
2049 t3_set_vlan_accel(adapter, 1, have_vlans);
2050 }
2051 t3_synchronize_rx(adapter, pi);
2052}
2053
2054static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2055{
2056 /* nothing */
2057}
2058
2059#ifdef CONFIG_NET_POLL_CONTROLLER
2060static void cxgb_netpoll(struct net_device *dev)
2061{
2062 struct adapter *adapter = dev->priv;
2063 struct sge_qset *qs = dev2qset(dev);
2064
2065 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2066 adapter);
2067}
2068#endif
2069
2070/*
2071 * Periodic accumulation of MAC statistics.
2072 */
2073static void mac_stats_update(struct adapter *adapter)
2074{
2075 int i;
2076
2077 for_each_port(adapter, i) {
2078 struct net_device *dev = adapter->port[i];
2079 struct port_info *p = netdev_priv(dev);
2080
2081 if (netif_running(dev)) {
2082 spin_lock(&adapter->stats_lock);
2083 t3_mac_update_stats(&p->mac);
2084 spin_unlock(&adapter->stats_lock);
2085 }
2086 }
2087}
2088
2089static void check_link_status(struct adapter *adapter)
2090{
2091 int i;
2092
2093 for_each_port(adapter, i) {
2094 struct net_device *dev = adapter->port[i];
2095 struct port_info *p = netdev_priv(dev);
2096
2097 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2098 t3_link_changed(adapter, i);
2099 }
2100}
2101
2102static void t3_adap_check_task(struct work_struct *work)
2103{
2104 struct adapter *adapter = container_of(work, struct adapter,
2105 adap_check_task.work);
2106 const struct adapter_params *p = &adapter->params;
2107
2108 adapter->check_task_cnt++;
2109
2110 /* Check link status for PHYs without interrupts */
2111 if (p->linkpoll_period)
2112 check_link_status(adapter);
2113
2114 /* Accumulate MAC stats if needed */
2115 if (!p->linkpoll_period ||
2116 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2117 p->stats_update_period) {
2118 mac_stats_update(adapter);
2119 adapter->check_task_cnt = 0;
2120 }
2121
2122 /* Schedule the next check update if any port is active. */
2123 spin_lock(&adapter->work_lock);
2124 if (adapter->open_device_map & PORT_MASK)
2125 schedule_chk_task(adapter);
2126 spin_unlock(&adapter->work_lock);
2127}
2128
2129/*
2130 * Processes external (PHY) interrupts in process context.
2131 */
2132static void ext_intr_task(struct work_struct *work)
2133{
2134 struct adapter *adapter = container_of(work, struct adapter,
2135 ext_intr_handler_task);
2136
2137 t3_phy_intr_handler(adapter);
2138
2139 /* Now reenable external interrupts */
2140 spin_lock_irq(&adapter->work_lock);
2141 if (adapter->slow_intr_mask) {
2142 adapter->slow_intr_mask |= F_T3DBG;
2143 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2144 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2145 adapter->slow_intr_mask);
2146 }
2147 spin_unlock_irq(&adapter->work_lock);
2148}
2149
2150/*
2151 * Interrupt-context handler for external (PHY) interrupts.
2152 */
2153void t3_os_ext_intr_handler(struct adapter *adapter)
2154{
2155 /*
2156 * Schedule a task to handle external interrupts as they may be slow
2157 * and we use a mutex to protect MDIO registers. We disable PHY
2158 * interrupts in the meantime and let the task reenable them when
2159 * it's done.
2160 */
2161 spin_lock(&adapter->work_lock);
2162 if (adapter->slow_intr_mask) {
2163 adapter->slow_intr_mask &= ~F_T3DBG;
2164 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2165 adapter->slow_intr_mask);
2166 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2167 }
2168 spin_unlock(&adapter->work_lock);
2169}
2170
2171void t3_fatal_err(struct adapter *adapter)
2172{
2173 unsigned int fw_status[4];
2174
2175 if (adapter->flags & FULL_INIT_DONE) {
2176 t3_sge_stop(adapter);
2177 t3_intr_disable(adapter);
2178 }
2179 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2180 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2181 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2182 fw_status[0], fw_status[1],
2183 fw_status[2], fw_status[3]);
2184
2185}
2186
2187static int __devinit cxgb_enable_msix(struct adapter *adap)
2188{
2189 struct msix_entry entries[SGE_QSETS + 1];
2190 int i, err;
2191
2192 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2193 entries[i].entry = i;
2194
2195 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2196 if (!err) {
2197 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2198 adap->msix_info[i].vec = entries[i].vector;
2199 } else if (err > 0)
2200 dev_info(&adap->pdev->dev,
2201 "only %d MSI-X vectors left, not using MSI-X\n", err);
2202 return err;
2203}
2204
2205static void __devinit print_port_info(struct adapter *adap,
2206 const struct adapter_info *ai)
2207{
2208 static const char *pci_variant[] = {
2209 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2210 };
2211
2212 int i;
2213 char buf[80];
2214
2215 if (is_pcie(adap))
2216 snprintf(buf, sizeof(buf), "%s x%d",
2217 pci_variant[adap->params.pci.variant],
2218 adap->params.pci.width);
2219 else
2220 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2221 pci_variant[adap->params.pci.variant],
2222 adap->params.pci.speed, adap->params.pci.width);
2223
2224 for_each_port(adap, i) {
2225 struct net_device *dev = adap->port[i];
2226 const struct port_info *pi = netdev_priv(dev);
2227
2228 if (!test_bit(i, &adap->registered_device_map))
2229 continue;
2230 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2231 dev->name, ai->desc, pi->port_type->desc,
2232 adap->params.rev, buf,
2233 (adap->flags & USING_MSIX) ? " MSI-X" :
2234 (adap->flags & USING_MSI) ? " MSI" : "");
2235 if (adap->name == dev->name && adap->params.vpd.mclk)
2236 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2237 adap->name, t3_mc7_size(&adap->cm) >> 20,
2238 t3_mc7_size(&adap->pmtx) >> 20,
2239 t3_mc7_size(&adap->pmrx) >> 20);
2240 }
2241}
2242
2243static int __devinit init_one(struct pci_dev *pdev,
2244 const struct pci_device_id *ent)
2245{
2246 static int version_printed;
2247
2248 int i, err, pci_using_dac = 0;
2249 unsigned long mmio_start, mmio_len;
2250 const struct adapter_info *ai;
2251 struct adapter *adapter = NULL;
2252 struct port_info *pi;
2253
2254 if (!version_printed) {
2255 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2256 ++version_printed;
2257 }
2258
2259 if (!cxgb3_wq) {
2260 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2261 if (!cxgb3_wq) {
2262 printk(KERN_ERR DRV_NAME
2263 ": cannot initialize work queue\n");
2264 return -ENOMEM;
2265 }
2266 }
2267
2268 err = pci_request_regions(pdev, DRV_NAME);
2269 if (err) {
2270 /* Just info, some other driver may have claimed the device. */
2271 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2272 return err;
2273 }
2274
2275 err = pci_enable_device(pdev);
2276 if (err) {
2277 dev_err(&pdev->dev, "cannot enable PCI device\n");
2278 goto out_release_regions;
2279 }
2280
2281 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2282 pci_using_dac = 1;
2283 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2284 if (err) {
2285 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2286 "coherent allocations\n");
2287 goto out_disable_device;
2288 }
2289 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2290 dev_err(&pdev->dev, "no usable DMA configuration\n");
2291 goto out_disable_device;
2292 }
2293
2294 pci_set_master(pdev);
2295
2296 mmio_start = pci_resource_start(pdev, 0);
2297 mmio_len = pci_resource_len(pdev, 0);
2298 ai = t3_get_adapter_info(ent->driver_data);
2299
2300 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2301 if (!adapter) {
2302 err = -ENOMEM;
2303 goto out_disable_device;
2304 }
2305
2306 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2307 if (!adapter->regs) {
2308 dev_err(&pdev->dev, "cannot map device registers\n");
2309 err = -ENOMEM;
2310 goto out_free_adapter;
2311 }
2312
2313 adapter->pdev = pdev;
2314 adapter->name = pci_name(pdev);
2315 adapter->msg_enable = dflt_msg_enable;
2316 adapter->mmio_len = mmio_len;
2317
2318 mutex_init(&adapter->mdio_lock);
2319 spin_lock_init(&adapter->work_lock);
2320 spin_lock_init(&adapter->stats_lock);
2321
2322 INIT_LIST_HEAD(&adapter->adapter_list);
2323 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2324 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2325
2326 for (i = 0; i < ai->nports; ++i) {
2327 struct net_device *netdev;
2328
2329 netdev = alloc_etherdev(sizeof(struct port_info));
2330 if (!netdev) {
2331 err = -ENOMEM;
2332 goto out_free_dev;
2333 }
2334
2335 SET_MODULE_OWNER(netdev);
2336 SET_NETDEV_DEV(netdev, &pdev->dev);
2337
2338 adapter->port[i] = netdev;
2339 pi = netdev_priv(netdev);
2340 pi->rx_csum_offload = 1;
2341 pi->nqsets = 1;
2342 pi->first_qset = i;
2343 pi->activity = 0;
2344 pi->port_id = i;
2345 netif_carrier_off(netdev);
2346 netdev->irq = pdev->irq;
2347 netdev->mem_start = mmio_start;
2348 netdev->mem_end = mmio_start + mmio_len - 1;
2349 netdev->priv = adapter;
2350 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2351 netdev->features |= NETIF_F_LLTX;
2352 if (pci_using_dac)
2353 netdev->features |= NETIF_F_HIGHDMA;
2354
2355 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2356 netdev->vlan_rx_register = vlan_rx_register;
2357 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2358
2359 netdev->open = cxgb_open;
2360 netdev->stop = cxgb_close;
2361 netdev->hard_start_xmit = t3_eth_xmit;
2362 netdev->get_stats = cxgb_get_stats;
2363 netdev->set_multicast_list = cxgb_set_rxmode;
2364 netdev->do_ioctl = cxgb_ioctl;
2365 netdev->change_mtu = cxgb_change_mtu;
2366 netdev->set_mac_address = cxgb_set_mac_addr;
2367#ifdef CONFIG_NET_POLL_CONTROLLER
2368 netdev->poll_controller = cxgb_netpoll;
2369#endif
2370 netdev->weight = 64;
2371
2372 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2373 }
2374
2375 pci_set_drvdata(pdev, adapter->port[0]);
2376 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2377 err = -ENODEV;
2378 goto out_free_dev;
2379 }
2380
2381 /*
2382 * The card is now ready to go. If any errors occur during device
2383 * registration we do not fail the whole card but rather proceed only
2384 * with the ports we manage to register successfully. However we must
2385 * register at least one net device.
2386 */
2387 for_each_port(adapter, i) {
2388 err = register_netdev(adapter->port[i]);
2389 if (err)
2390 dev_warn(&pdev->dev,
2391 "cannot register net device %s, skipping\n",
2392 adapter->port[i]->name);
2393 else {
2394 /*
2395 * Change the name we use for messages to the name of
2396 * the first successfully registered interface.
2397 */
2398 if (!adapter->registered_device_map)
2399 adapter->name = adapter->port[i]->name;
2400
2401 __set_bit(i, &adapter->registered_device_map);
2402 }
2403 }
2404 if (!adapter->registered_device_map) {
2405 dev_err(&pdev->dev, "could not register any net devices\n");
2406 goto out_free_dev;
2407 }
2408
2409 /* Driver's ready. Reflect it on LEDs */
2410 t3_led_ready(adapter);
2411
2412 if (is_offload(adapter)) {
2413 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2414 cxgb3_adapter_ofld(adapter);
2415 }
2416
2417 /* See what interrupts we'll be using */
2418 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2419 adapter->flags |= USING_MSIX;
2420 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2421 adapter->flags |= USING_MSI;
2422
2423 err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
2424 &cxgb3_attr_group);
2425
2426 print_port_info(adapter, ai);
2427 return 0;
2428
2429out_free_dev:
2430 iounmap(adapter->regs);
2431 for (i = ai->nports - 1; i >= 0; --i)
2432 if (adapter->port[i])
2433 free_netdev(adapter->port[i]);
2434
2435out_free_adapter:
2436 kfree(adapter);
2437
2438out_disable_device:
2439 pci_disable_device(pdev);
2440out_release_regions:
2441 pci_release_regions(pdev);
2442 pci_set_drvdata(pdev, NULL);
2443 return err;
2444}
2445
2446static void __devexit remove_one(struct pci_dev *pdev)
2447{
2448 struct net_device *dev = pci_get_drvdata(pdev);
2449
2450 if (dev) {
2451 int i;
2452 struct adapter *adapter = dev->priv;
2453
2454 t3_sge_stop(adapter);
2455 sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
2456 &cxgb3_attr_group);
2457
2458 for_each_port(adapter, i)
2459 if (test_bit(i, &adapter->registered_device_map))
2460 unregister_netdev(adapter->port[i]);
2461
2462 if (is_offload(adapter)) {
2463 cxgb3_adapter_unofld(adapter);
2464 if (test_bit(OFFLOAD_DEVMAP_BIT,
2465 &adapter->open_device_map))
2466 offload_close(&adapter->tdev);
2467 }
2468
2469 t3_free_sge_resources(adapter);
2470 cxgb_disable_msi(adapter);
2471
2472 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2473 if (adapter->dummy_netdev[i]) {
2474 free_netdev(adapter->dummy_netdev[i]);
2475 adapter->dummy_netdev[i] = NULL;
2476 }
2477
2478 for_each_port(adapter, i)
2479 if (adapter->port[i])
2480 free_netdev(adapter->port[i]);
2481
2482 iounmap(adapter->regs);
2483 kfree(adapter);
2484 pci_release_regions(pdev);
2485 pci_disable_device(pdev);
2486 pci_set_drvdata(pdev, NULL);
2487 }
2488}
2489
2490static struct pci_driver driver = {
2491 .name = DRV_NAME,
2492 .id_table = cxgb3_pci_tbl,
2493 .probe = init_one,
2494 .remove = __devexit_p(remove_one),
2495};
2496
2497static int __init cxgb3_init_module(void)
2498{
2499 int ret;
2500
2501 cxgb3_offload_init();
2502
2503 ret = pci_register_driver(&driver);
2504 return ret;
2505}
2506
2507static void __exit cxgb3_cleanup_module(void)
2508{
2509 pci_unregister_driver(&driver);
2510 if (cxgb3_wq)
2511 destroy_workqueue(cxgb3_wq);
2512}
2513
2514module_init(cxgb3_init_module);
2515module_exit(cxgb3_cleanup_module);
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
new file mode 100644
index 000000000000..c3a02d613382
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -0,0 +1,1222 @@
1/*
2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/list.h>
35#include <net/neighbour.h>
36#include <linux/notifier.h>
37#include <asm/atomic.h>
38#include <linux/proc_fs.h>
39#include <linux/if_vlan.h>
40#include <net/netevent.h>
41#include <linux/highmem.h>
42#include <linux/vmalloc.h>
43
44#include "common.h"
45#include "regs.h"
46#include "cxgb3_ioctl.h"
47#include "cxgb3_ctl_defs.h"
48#include "cxgb3_defs.h"
49#include "l2t.h"
50#include "firmware_exports.h"
51#include "cxgb3_offload.h"
52
53static LIST_HEAD(client_list);
54static LIST_HEAD(ofld_dev_list);
55static DEFINE_MUTEX(cxgb3_db_lock);
56
57static DEFINE_RWLOCK(adapter_list_lock);
58static LIST_HEAD(adapter_list);
59
60static const unsigned int MAX_ATIDS = 64 * 1024;
61static const unsigned int ATID_BASE = 0x100000;
62
63static inline int offload_activated(struct t3cdev *tdev)
64{
65 const struct adapter *adapter = tdev2adap(tdev);
66
67 return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map));
68}
69
70/**
71 * cxgb3_register_client - register an offload client
72 * @client: the client
73 *
74 * Add the client to the client list,
75 * and call backs the client for each activated offload device
76 */
77void cxgb3_register_client(struct cxgb3_client *client)
78{
79 struct t3cdev *tdev;
80
81 mutex_lock(&cxgb3_db_lock);
82 list_add_tail(&client->client_list, &client_list);
83
84 if (client->add) {
85 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
86 if (offload_activated(tdev))
87 client->add(tdev);
88 }
89 }
90 mutex_unlock(&cxgb3_db_lock);
91}
92
93EXPORT_SYMBOL(cxgb3_register_client);
94
95/**
96 * cxgb3_unregister_client - unregister an offload client
97 * @client: the client
98 *
99 * Remove the client to the client list,
100 * and call backs the client for each activated offload device.
101 */
102void cxgb3_unregister_client(struct cxgb3_client *client)
103{
104 struct t3cdev *tdev;
105
106 mutex_lock(&cxgb3_db_lock);
107 list_del(&client->client_list);
108
109 if (client->remove) {
110 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
111 if (offload_activated(tdev))
112 client->remove(tdev);
113 }
114 }
115 mutex_unlock(&cxgb3_db_lock);
116}
117
118EXPORT_SYMBOL(cxgb3_unregister_client);
119
120/**
121 * cxgb3_add_clients - activate registered clients for an offload device
122 * @tdev: the offload device
123 *
124 * Call backs all registered clients once a offload device is activated
125 */
126void cxgb3_add_clients(struct t3cdev *tdev)
127{
128 struct cxgb3_client *client;
129
130 mutex_lock(&cxgb3_db_lock);
131 list_for_each_entry(client, &client_list, client_list) {
132 if (client->add)
133 client->add(tdev);
134 }
135 mutex_unlock(&cxgb3_db_lock);
136}
137
138/**
139 * cxgb3_remove_clients - deactivates registered clients
140 * for an offload device
141 * @tdev: the offload device
142 *
143 * Call backs all registered clients once a offload device is deactivated
144 */
145void cxgb3_remove_clients(struct t3cdev *tdev)
146{
147 struct cxgb3_client *client;
148
149 mutex_lock(&cxgb3_db_lock);
150 list_for_each_entry(client, &client_list, client_list) {
151 if (client->remove)
152 client->remove(tdev);
153 }
154 mutex_unlock(&cxgb3_db_lock);
155}
156
157static struct net_device *get_iff_from_mac(struct adapter *adapter,
158 const unsigned char *mac,
159 unsigned int vlan)
160{
161 int i;
162
163 for_each_port(adapter, i) {
164 const struct vlan_group *grp;
165 struct net_device *dev = adapter->port[i];
166 const struct port_info *p = netdev_priv(dev);
167
168 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
169 if (vlan && vlan != VLAN_VID_MASK) {
170 grp = p->vlan_grp;
171 dev = grp ? grp->vlan_devices[vlan] : NULL;
172 } else
173 while (dev->master)
174 dev = dev->master;
175 return dev;
176 }
177 }
178 return NULL;
179}
180
181static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
182 void *data)
183{
184 int ret = 0;
185 struct ulp_iscsi_info *uiip = data;
186
187 switch (req) {
188 case ULP_ISCSI_GET_PARAMS:
189 uiip->pdev = adapter->pdev;
190 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
191 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
192 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
193 /*
194 * On tx, the iscsi pdu has to be <= tx page size and has to
195 * fit into the Tx PM FIFO.
196 */
197 uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
198 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
199 /* on rx, the iscsi pdu has to be < rx page size and the
200 whole pdu + cpl headers has to fit into one sge buffer */
201 uiip->max_rxsz = min_t(unsigned int,
202 adapter->params.tp.rx_pg_size,
203 (adapter->sge.qs[0].fl[1].buf_size -
204 sizeof(struct cpl_rx_data) * 2 -
205 sizeof(struct cpl_rx_data_ddp)));
206 break;
207 case ULP_ISCSI_SET_PARAMS:
208 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
209 break;
210 default:
211 ret = -EOPNOTSUPP;
212 }
213 return ret;
214}
215
216/* Response queue used for RDMA events. */
217#define ASYNC_NOTIF_RSPQ 0
218
219static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
220{
221 int ret = 0;
222
223 switch (req) {
224 case RDMA_GET_PARAMS:{
225 struct rdma_info *req = data;
226 struct pci_dev *pdev = adapter->pdev;
227
228 req->udbell_physbase = pci_resource_start(pdev, 2);
229 req->udbell_len = pci_resource_len(pdev, 2);
230 req->tpt_base =
231 t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
232 req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
233 req->pbl_base =
234 t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
235 req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
236 req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
237 req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
238 req->kdb_addr = adapter->regs + A_SG_KDOORBELL;
239 req->pdev = pdev;
240 break;
241 }
242 case RDMA_CQ_OP:{
243 unsigned long flags;
244 struct rdma_cq_op *req = data;
245
246 /* may be called in any context */
247 spin_lock_irqsave(&adapter->sge.reg_lock, flags);
248 ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
249 req->credits);
250 spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
251 break;
252 }
253 case RDMA_GET_MEM:{
254 struct ch_mem_range *t = data;
255 struct mc7 *mem;
256
257 if ((t->addr & 7) || (t->len & 7))
258 return -EINVAL;
259 if (t->mem_id == MEM_CM)
260 mem = &adapter->cm;
261 else if (t->mem_id == MEM_PMRX)
262 mem = &adapter->pmrx;
263 else if (t->mem_id == MEM_PMTX)
264 mem = &adapter->pmtx;
265 else
266 return -EINVAL;
267
268 ret =
269 t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
270 (u64 *) t->buf);
271 if (ret)
272 return ret;
273 break;
274 }
275 case RDMA_CQ_SETUP:{
276 struct rdma_cq_setup *req = data;
277
278 spin_lock_irq(&adapter->sge.reg_lock);
279 ret =
280 t3_sge_init_cqcntxt(adapter, req->id,
281 req->base_addr, req->size,
282 ASYNC_NOTIF_RSPQ,
283 req->ovfl_mode, req->credits,
284 req->credit_thres);
285 spin_unlock_irq(&adapter->sge.reg_lock);
286 break;
287 }
288 case RDMA_CQ_DISABLE:
289 spin_lock_irq(&adapter->sge.reg_lock);
290 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
291 spin_unlock_irq(&adapter->sge.reg_lock);
292 break;
293 case RDMA_CTRL_QP_SETUP:{
294 struct rdma_ctrlqp_setup *req = data;
295
296 spin_lock_irq(&adapter->sge.reg_lock);
297 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
298 SGE_CNTXT_RDMA,
299 ASYNC_NOTIF_RSPQ,
300 req->base_addr, req->size,
301 FW_RI_TID_START, 1, 0);
302 spin_unlock_irq(&adapter->sge.reg_lock);
303 break;
304 }
305 default:
306 ret = -EOPNOTSUPP;
307 }
308 return ret;
309}
310
311static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
312{
313 struct adapter *adapter = tdev2adap(tdev);
314 struct tid_range *tid;
315 struct mtutab *mtup;
316 struct iff_mac *iffmacp;
317 struct ddp_params *ddpp;
318 struct adap_ports *ports;
319 int i;
320
321 switch (req) {
322 case GET_MAX_OUTSTANDING_WR:
323 *(unsigned int *)data = FW_WR_NUM;
324 break;
325 case GET_WR_LEN:
326 *(unsigned int *)data = WR_FLITS;
327 break;
328 case GET_TX_MAX_CHUNK:
329 *(unsigned int *)data = 1 << 20; /* 1MB */
330 break;
331 case GET_TID_RANGE:
332 tid = data;
333 tid->num = t3_mc5_size(&adapter->mc5) -
334 adapter->params.mc5.nroutes -
335 adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
336 tid->base = 0;
337 break;
338 case GET_STID_RANGE:
339 tid = data;
340 tid->num = adapter->params.mc5.nservers;
341 tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
342 adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
343 break;
344 case GET_L2T_CAPACITY:
345 *(unsigned int *)data = 2048;
346 break;
347 case GET_MTUS:
348 mtup = data;
349 mtup->size = NMTUS;
350 mtup->mtus = adapter->params.mtus;
351 break;
352 case GET_IFF_FROM_MAC:
353 iffmacp = data;
354 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
355 iffmacp->vlan_tag &
356 VLAN_VID_MASK);
357 break;
358 case GET_DDP_PARAMS:
359 ddpp = data;
360 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
361 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
362 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
363 break;
364 case GET_PORTS:
365 ports = data;
366 ports->nports = adapter->params.nports;
367 for_each_port(adapter, i)
368 ports->lldevs[i] = adapter->port[i];
369 break;
370 case ULP_ISCSI_GET_PARAMS:
371 case ULP_ISCSI_SET_PARAMS:
372 if (!offload_running(adapter))
373 return -EAGAIN;
374 return cxgb_ulp_iscsi_ctl(adapter, req, data);
375 case RDMA_GET_PARAMS:
376 case RDMA_CQ_OP:
377 case RDMA_CQ_SETUP:
378 case RDMA_CQ_DISABLE:
379 case RDMA_CTRL_QP_SETUP:
380 case RDMA_GET_MEM:
381 if (!offload_running(adapter))
382 return -EAGAIN;
383 return cxgb_rdma_ctl(adapter, req, data);
384 default:
385 return -EOPNOTSUPP;
386 }
387 return 0;
388}
389
390/*
391 * Dummy handler for Rx offload packets in case we get an offload packet before
392 * proper processing is setup. This complains and drops the packet as it isn't
393 * normal to get offload packets at this stage.
394 */
395static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
396 int n)
397{
398 CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n",
399 n, ntohl(*(u32 *)skbs[0]->data));
400 while (n--)
401 dev_kfree_skb_any(skbs[n]);
402 return 0;
403}
404
405static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
406{
407}
408
409void cxgb3_set_dummy_ops(struct t3cdev *dev)
410{
411 dev->recv = rx_offload_blackhole;
412 dev->neigh_update = dummy_neigh_update;
413}
414
415/*
416 * Free an active-open TID.
417 */
418void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
419{
420 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
421 union active_open_entry *p = atid2entry(t, atid);
422 void *ctx = p->t3c_tid.ctx;
423
424 spin_lock_bh(&t->atid_lock);
425 p->next = t->afree;
426 t->afree = p;
427 t->atids_in_use--;
428 spin_unlock_bh(&t->atid_lock);
429
430 return ctx;
431}
432
433EXPORT_SYMBOL(cxgb3_free_atid);
434
435/*
436 * Free a server TID and return it to the free pool.
437 */
438void cxgb3_free_stid(struct t3cdev *tdev, int stid)
439{
440 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
441 union listen_entry *p = stid2entry(t, stid);
442
443 spin_lock_bh(&t->stid_lock);
444 p->next = t->sfree;
445 t->sfree = p;
446 t->stids_in_use--;
447 spin_unlock_bh(&t->stid_lock);
448}
449
450EXPORT_SYMBOL(cxgb3_free_stid);
451
452void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
453 void *ctx, unsigned int tid)
454{
455 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
456
457 t->tid_tab[tid].client = client;
458 t->tid_tab[tid].ctx = ctx;
459 atomic_inc(&t->tids_in_use);
460}
461
462EXPORT_SYMBOL(cxgb3_insert_tid);
463
464/*
465 * Populate a TID_RELEASE WR. The skb must be already propely sized.
466 */
467static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
468{
469 struct cpl_tid_release *req;
470
471 skb->priority = CPL_PRIORITY_SETUP;
472 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
473 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
474 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
475}
476
477static void t3_process_tid_release_list(struct work_struct *work)
478{
479 struct t3c_data *td = container_of(work, struct t3c_data,
480 tid_release_task);
481 struct sk_buff *skb;
482 struct t3cdev *tdev = td->dev;
483
484
485 spin_lock_bh(&td->tid_release_lock);
486 while (td->tid_release_list) {
487 struct t3c_tid_entry *p = td->tid_release_list;
488
489 td->tid_release_list = (struct t3c_tid_entry *)p->ctx;
490 spin_unlock_bh(&td->tid_release_lock);
491
492 skb = alloc_skb(sizeof(struct cpl_tid_release),
493 GFP_KERNEL | __GFP_NOFAIL);
494 mk_tid_release(skb, p - td->tid_maps.tid_tab);
495 cxgb3_ofld_send(tdev, skb);
496 p->ctx = NULL;
497 spin_lock_bh(&td->tid_release_lock);
498 }
499 spin_unlock_bh(&td->tid_release_lock);
500}
501
502/* use ctx as a next pointer in the tid release list */
503void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
504{
505 struct t3c_data *td = T3C_DATA(tdev);
506 struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
507
508 spin_lock_bh(&td->tid_release_lock);
509 p->ctx = (void *)td->tid_release_list;
510 td->tid_release_list = p;
511 if (!p->ctx)
512 schedule_work(&td->tid_release_task);
513 spin_unlock_bh(&td->tid_release_lock);
514}
515
516EXPORT_SYMBOL(cxgb3_queue_tid_release);
517
518/*
519 * Remove a tid from the TID table. A client may defer processing its last
520 * CPL message if it is locked at the time it arrives, and while the message
521 * sits in the client's backlog the TID may be reused for another connection.
522 * To handle this we atomically switch the TID association if it still points
523 * to the original client context.
524 */
525void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
526{
527 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
528
529 BUG_ON(tid >= t->ntids);
530 if (tdev->type == T3A)
531 (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
532 else {
533 struct sk_buff *skb;
534
535 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
536 if (likely(skb)) {
537 mk_tid_release(skb, tid);
538 cxgb3_ofld_send(tdev, skb);
539 t->tid_tab[tid].ctx = NULL;
540 } else
541 cxgb3_queue_tid_release(tdev, tid);
542 }
543 atomic_dec(&t->tids_in_use);
544}
545
546EXPORT_SYMBOL(cxgb3_remove_tid);
547
548int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
549 void *ctx)
550{
551 int atid = -1;
552 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
553
554 spin_lock_bh(&t->atid_lock);
555 if (t->afree) {
556 union active_open_entry *p = t->afree;
557
558 atid = (p - t->atid_tab) + t->atid_base;
559 t->afree = p->next;
560 p->t3c_tid.ctx = ctx;
561 p->t3c_tid.client = client;
562 t->atids_in_use++;
563 }
564 spin_unlock_bh(&t->atid_lock);
565 return atid;
566}
567
568EXPORT_SYMBOL(cxgb3_alloc_atid);
569
570int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
571 void *ctx)
572{
573 int stid = -1;
574 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
575
576 spin_lock_bh(&t->stid_lock);
577 if (t->sfree) {
578 union listen_entry *p = t->sfree;
579
580 stid = (p - t->stid_tab) + t->stid_base;
581 t->sfree = p->next;
582 p->t3c_tid.ctx = ctx;
583 p->t3c_tid.client = client;
584 t->stids_in_use++;
585 }
586 spin_unlock_bh(&t->stid_lock);
587 return stid;
588}
589
590EXPORT_SYMBOL(cxgb3_alloc_stid);
591
592static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
593{
594 struct cpl_smt_write_rpl *rpl = cplhdr(skb);
595
596 if (rpl->status != CPL_ERR_NONE)
597 printk(KERN_ERR
598 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
599 rpl->status, GET_TID(rpl));
600
601 return CPL_RET_BUF_DONE;
602}
603
604static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
605{
606 struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
607
608 if (rpl->status != CPL_ERR_NONE)
609 printk(KERN_ERR
610 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
611 rpl->status, GET_TID(rpl));
612
613 return CPL_RET_BUF_DONE;
614}
615
616static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
617{
618 struct cpl_act_open_rpl *rpl = cplhdr(skb);
619 unsigned int atid = G_TID(ntohl(rpl->atid));
620 struct t3c_tid_entry *t3c_tid;
621
622 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
623 if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
624 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
625 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
626 t3c_tid->
627 ctx);
628 } else {
629 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
630 dev->name, CPL_ACT_OPEN_RPL);
631 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
632 }
633}
634
635static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
636{
637 union opcode_tid *p = cplhdr(skb);
638 unsigned int stid = G_TID(ntohl(p->opcode_tid));
639 struct t3c_tid_entry *t3c_tid;
640
641 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
642 if (t3c_tid->ctx && t3c_tid->client->handlers &&
643 t3c_tid->client->handlers[p->opcode]) {
644 return t3c_tid->client->handlers[p->opcode] (dev, skb,
645 t3c_tid->ctx);
646 } else {
647 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
648 dev->name, p->opcode);
649 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
650 }
651}
652
653static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
654{
655 union opcode_tid *p = cplhdr(skb);
656 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
657 struct t3c_tid_entry *t3c_tid;
658
659 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
660 if (t3c_tid->ctx && t3c_tid->client->handlers &&
661 t3c_tid->client->handlers[p->opcode]) {
662 return t3c_tid->client->handlers[p->opcode]
663 (dev, skb, t3c_tid->ctx);
664 } else {
665 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
666 dev->name, p->opcode);
667 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
668 }
669}
670
671static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
672{
673 struct cpl_pass_accept_req *req = cplhdr(skb);
674 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
675 struct t3c_tid_entry *t3c_tid;
676
677 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
678 if (t3c_tid->ctx && t3c_tid->client->handlers &&
679 t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
680 return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
681 (dev, skb, t3c_tid->ctx);
682 } else {
683 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
684 dev->name, CPL_PASS_ACCEPT_REQ);
685 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
686 }
687}
688
689static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
690{
691 union opcode_tid *p = cplhdr(skb);
692 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
693 struct t3c_tid_entry *t3c_tid;
694
695 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
696 if (t3c_tid->ctx && t3c_tid->client->handlers &&
697 t3c_tid->client->handlers[p->opcode]) {
698 return t3c_tid->client->handlers[p->opcode]
699 (dev, skb, t3c_tid->ctx);
700 } else {
701 struct cpl_abort_req_rss *req = cplhdr(skb);
702 struct cpl_abort_rpl *rpl;
703
704 struct sk_buff *skb =
705 alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
706 if (!skb) {
707 printk("do_abort_req_rss: couldn't get skb!\n");
708 goto out;
709 }
710 skb->priority = CPL_PRIORITY_DATA;
711 __skb_put(skb, sizeof(struct cpl_abort_rpl));
712 rpl = cplhdr(skb);
713 rpl->wr.wr_hi =
714 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
715 rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
716 OPCODE_TID(rpl) =
717 htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
718 rpl->cmd = req->status;
719 cxgb3_ofld_send(dev, skb);
720out:
721 return CPL_RET_BUF_DONE;
722 }
723}
724
725static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
726{
727 struct cpl_act_establish *req = cplhdr(skb);
728 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
729 struct t3c_tid_entry *t3c_tid;
730
731 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
732 if (t3c_tid->ctx && t3c_tid->client->handlers &&
733 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
734 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
735 (dev, skb, t3c_tid->ctx);
736 } else {
737 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
738 dev->name, CPL_PASS_ACCEPT_REQ);
739 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
740 }
741}
742
743static int do_set_tcb_rpl(struct t3cdev *dev, struct sk_buff *skb)
744{
745 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
746
747 if (rpl->status != CPL_ERR_NONE)
748 printk(KERN_ERR
749 "Unexpected SET_TCB_RPL status %u for tid %u\n",
750 rpl->status, GET_TID(rpl));
751 return CPL_RET_BUF_DONE;
752}
753
754static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
755{
756 struct cpl_trace_pkt *p = cplhdr(skb);
757
758 skb->protocol = 0xffff;
759 skb->dev = dev->lldev;
760 skb_pull(skb, sizeof(*p));
761 skb->mac.raw = skb->data;
762 netif_receive_skb(skb);
763 return 0;
764}
765
766static int do_term(struct t3cdev *dev, struct sk_buff *skb)
767{
768 unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff;
769 unsigned int opcode = G_OPCODE(ntohl(skb->csum));
770 struct t3c_tid_entry *t3c_tid;
771
772 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
773 if (t3c_tid->ctx && t3c_tid->client->handlers &&
774 t3c_tid->client->handlers[opcode]) {
775 return t3c_tid->client->handlers[opcode] (dev, skb,
776 t3c_tid->ctx);
777 } else {
778 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
779 dev->name, opcode);
780 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
781 }
782}
783
784static int nb_callback(struct notifier_block *self, unsigned long event,
785 void *ctx)
786{
787 switch (event) {
788 case (NETEVENT_NEIGH_UPDATE):{
789 cxgb_neigh_update((struct neighbour *)ctx);
790 break;
791 }
792 case (NETEVENT_PMTU_UPDATE):
793 break;
794 case (NETEVENT_REDIRECT):{
795 struct netevent_redirect *nr = ctx;
796 cxgb_redirect(nr->old, nr->new);
797 cxgb_neigh_update(nr->new->neighbour);
798 break;
799 }
800 default:
801 break;
802 }
803 return 0;
804}
805
806static struct notifier_block nb = {
807 .notifier_call = nb_callback
808};
809
810/*
811 * Process a received packet with an unknown/unexpected CPL opcode.
812 */
813static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
814{
815 printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
816 *skb->data);
817 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
818}
819
820/*
821 * Handlers for each CPL opcode
822 */
823static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
824
825/*
826 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
827 * to unregister an existing handler.
828 */
829void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
830{
831 if (opcode < NUM_CPL_CMDS)
832 cpl_handlers[opcode] = h ? h : do_bad_cpl;
833 else
834 printk(KERN_ERR "T3C: handler registration for "
835 "opcode %x failed\n", opcode);
836}
837
838EXPORT_SYMBOL(t3_register_cpl_handler);
839
840/*
841 * T3CDEV's receive method.
842 */
843int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
844{
845 while (n--) {
846 struct sk_buff *skb = *skbs++;
847 unsigned int opcode = G_OPCODE(ntohl(skb->csum));
848 int ret = cpl_handlers[opcode] (dev, skb);
849
850#if VALIDATE_TID
851 if (ret & CPL_RET_UNKNOWN_TID) {
852 union opcode_tid *p = cplhdr(skb);
853
854 printk(KERN_ERR "%s: CPL message (opcode %u) had "
855 "unknown TID %u\n", dev->name, opcode,
856 G_TID(ntohl(p->opcode_tid)));
857 }
858#endif
859 if (ret & CPL_RET_BUF_DONE)
860 kfree_skb(skb);
861 }
862 return 0;
863}
864
865/*
866 * Sends an sk_buff to a T3C driver after dealing with any active network taps.
867 */
868int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
869{
870 int r;
871
872 local_bh_disable();
873 r = dev->send(dev, skb);
874 local_bh_enable();
875 return r;
876}
877
878EXPORT_SYMBOL(cxgb3_ofld_send);
879
880static int is_offloading(struct net_device *dev)
881{
882 struct adapter *adapter;
883 int i;
884
885 read_lock_bh(&adapter_list_lock);
886 list_for_each_entry(adapter, &adapter_list, adapter_list) {
887 for_each_port(adapter, i) {
888 if (dev == adapter->port[i]) {
889 read_unlock_bh(&adapter_list_lock);
890 return 1;
891 }
892 }
893 }
894 read_unlock_bh(&adapter_list_lock);
895 return 0;
896}
897
898void cxgb_neigh_update(struct neighbour *neigh)
899{
900 struct net_device *dev = neigh->dev;
901
902 if (dev && (is_offloading(dev))) {
903 struct t3cdev *tdev = T3CDEV(dev);
904
905 BUG_ON(!tdev);
906 t3_l2t_update(tdev, neigh);
907 }
908}
909
910static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
911{
912 struct sk_buff *skb;
913 struct cpl_set_tcb_field *req;
914
915 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
916 if (!skb) {
917 printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__);
918 return;
919 }
920 skb->priority = CPL_PRIORITY_CONTROL;
921 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
922 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
923 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
924 req->reply = 0;
925 req->cpu_idx = 0;
926 req->word = htons(W_TCB_L2T_IX);
927 req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
928 req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
929 tdev->send(tdev, skb);
930}
931
932void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
933{
934 struct net_device *olddev, *newdev;
935 struct tid_info *ti;
936 struct t3cdev *tdev;
937 u32 tid;
938 int update_tcb;
939 struct l2t_entry *e;
940 struct t3c_tid_entry *te;
941
942 olddev = old->neighbour->dev;
943 newdev = new->neighbour->dev;
944 if (!is_offloading(olddev))
945 return;
946 if (!is_offloading(newdev)) {
947 printk(KERN_WARNING "%s: Redirect to non-offload"
948 "device ignored.\n", __FUNCTION__);
949 return;
950 }
951 tdev = T3CDEV(olddev);
952 BUG_ON(!tdev);
953 if (tdev != T3CDEV(newdev)) {
954 printk(KERN_WARNING "%s: Redirect to different "
955 "offload device ignored.\n", __FUNCTION__);
956 return;
957 }
958
959 /* Add new L2T entry */
960 e = t3_l2t_get(tdev, new->neighbour, newdev);
961 if (!e) {
962 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
963 __FUNCTION__);
964 return;
965 }
966
967 /* Walk tid table and notify clients of dst change. */
968 ti = &(T3C_DATA(tdev))->tid_maps;
969 for (tid = 0; tid < ti->ntids; tid++) {
970 te = lookup_tid(ti, tid);
971 BUG_ON(!te);
972 if (te->ctx && te->client && te->client->redirect) {
973 update_tcb = te->client->redirect(te->ctx, old, new, e);
974 if (update_tcb) {
975 l2t_hold(L2DATA(tdev), e);
976 set_l2t_ix(tdev, tid, e);
977 }
978 }
979 }
980 l2t_release(L2DATA(tdev), e);
981}
982
983/*
984 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
985 * The allocated memory is cleared.
986 */
987void *cxgb_alloc_mem(unsigned long size)
988{
989 void *p = kmalloc(size, GFP_KERNEL);
990
991 if (!p)
992 p = vmalloc(size);
993 if (p)
994 memset(p, 0, size);
995 return p;
996}
997
998/*
999 * Free memory allocated through t3_alloc_mem().
1000 */
1001void cxgb_free_mem(void *addr)
1002{
1003 unsigned long p = (unsigned long)addr;
1004
1005 if (p >= VMALLOC_START && p < VMALLOC_END)
1006 vfree(addr);
1007 else
1008 kfree(addr);
1009}
1010
1011/*
1012 * Allocate and initialize the TID tables. Returns 0 on success.
1013 */
1014static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1015 unsigned int natids, unsigned int nstids,
1016 unsigned int atid_base, unsigned int stid_base)
1017{
1018 unsigned long size = ntids * sizeof(*t->tid_tab) +
1019 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1020
1021 t->tid_tab = cxgb_alloc_mem(size);
1022 if (!t->tid_tab)
1023 return -ENOMEM;
1024
1025 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1026 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1027 t->ntids = ntids;
1028 t->nstids = nstids;
1029 t->stid_base = stid_base;
1030 t->sfree = NULL;
1031 t->natids = natids;
1032 t->atid_base = atid_base;
1033 t->afree = NULL;
1034 t->stids_in_use = t->atids_in_use = 0;
1035 atomic_set(&t->tids_in_use, 0);
1036 spin_lock_init(&t->stid_lock);
1037 spin_lock_init(&t->atid_lock);
1038
1039 /*
1040 * Setup the free lists for stid_tab and atid_tab.
1041 */
1042 if (nstids) {
1043 while (--nstids)
1044 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1045 t->sfree = t->stid_tab;
1046 }
1047 if (natids) {
1048 while (--natids)
1049 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1050 t->afree = t->atid_tab;
1051 }
1052 return 0;
1053}
1054
1055static void free_tid_maps(struct tid_info *t)
1056{
1057 cxgb_free_mem(t->tid_tab);
1058}
1059
1060static inline void add_adapter(struct adapter *adap)
1061{
1062 write_lock_bh(&adapter_list_lock);
1063 list_add_tail(&adap->adapter_list, &adapter_list);
1064 write_unlock_bh(&adapter_list_lock);
1065}
1066
1067static inline void remove_adapter(struct adapter *adap)
1068{
1069 write_lock_bh(&adapter_list_lock);
1070 list_del(&adap->adapter_list);
1071 write_unlock_bh(&adapter_list_lock);
1072}
1073
1074int cxgb3_offload_activate(struct adapter *adapter)
1075{
1076 struct t3cdev *dev = &adapter->tdev;
1077 int natids, err;
1078 struct t3c_data *t;
1079 struct tid_range stid_range, tid_range;
1080 struct mtutab mtutab;
1081 unsigned int l2t_capacity;
1082
1083 t = kcalloc(1, sizeof(*t), GFP_KERNEL);
1084 if (!t)
1085 return -ENOMEM;
1086
1087 err = -EOPNOTSUPP;
1088 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1089 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1090 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1091 dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1092 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1093 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1094 goto out_free;
1095
1096 err = -ENOMEM;
1097 L2DATA(dev) = t3_init_l2t(l2t_capacity);
1098 if (!L2DATA(dev))
1099 goto out_free;
1100
1101 natids = min(tid_range.num / 2, MAX_ATIDS);
1102 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1103 stid_range.num, ATID_BASE, stid_range.base);
1104 if (err)
1105 goto out_free_l2t;
1106
1107 t->mtus = mtutab.mtus;
1108 t->nmtus = mtutab.size;
1109
1110 INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
1111 spin_lock_init(&t->tid_release_lock);
1112 INIT_LIST_HEAD(&t->list_node);
1113 t->dev = dev;
1114
1115 T3C_DATA(dev) = t;
1116 dev->recv = process_rx;
1117 dev->neigh_update = t3_l2t_update;
1118
1119 /* Register netevent handler once */
1120 if (list_empty(&adapter_list))
1121 register_netevent_notifier(&nb);
1122
1123 add_adapter(adapter);
1124 return 0;
1125
1126out_free_l2t:
1127 t3_free_l2t(L2DATA(dev));
1128 L2DATA(dev) = NULL;
1129out_free:
1130 kfree(t);
1131 return err;
1132}
1133
1134void cxgb3_offload_deactivate(struct adapter *adapter)
1135{
1136 struct t3cdev *tdev = &adapter->tdev;
1137 struct t3c_data *t = T3C_DATA(tdev);
1138
1139 remove_adapter(adapter);
1140 if (list_empty(&adapter_list))
1141 unregister_netevent_notifier(&nb);
1142
1143 free_tid_maps(&t->tid_maps);
1144 T3C_DATA(tdev) = NULL;
1145 t3_free_l2t(L2DATA(tdev));
1146 L2DATA(tdev) = NULL;
1147 kfree(t);
1148}
1149
1150static inline void register_tdev(struct t3cdev *tdev)
1151{
1152 static int unit;
1153
1154 mutex_lock(&cxgb3_db_lock);
1155 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1156 list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
1157 mutex_unlock(&cxgb3_db_lock);
1158}
1159
1160static inline void unregister_tdev(struct t3cdev *tdev)
1161{
1162 mutex_lock(&cxgb3_db_lock);
1163 list_del(&tdev->ofld_dev_list);
1164 mutex_unlock(&cxgb3_db_lock);
1165}
1166
1167void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
1168{
1169 struct t3cdev *tdev = &adapter->tdev;
1170
1171 INIT_LIST_HEAD(&tdev->ofld_dev_list);
1172
1173 cxgb3_set_dummy_ops(tdev);
1174 tdev->send = t3_offload_tx;
1175 tdev->ctl = cxgb_offload_ctl;
1176 tdev->type = adapter->params.rev == 0 ? T3A : T3B;
1177
1178 register_tdev(tdev);
1179}
1180
1181void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
1182{
1183 struct t3cdev *tdev = &adapter->tdev;
1184
1185 tdev->recv = NULL;
1186 tdev->neigh_update = NULL;
1187
1188 unregister_tdev(tdev);
1189}
1190
1191void __init cxgb3_offload_init(void)
1192{
1193 int i;
1194
1195 for (i = 0; i < NUM_CPL_CMDS; ++i)
1196 cpl_handlers[i] = do_bad_cpl;
1197
1198 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1199 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1200 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1201 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1202 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1203 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1204 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1205 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1206 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1207 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1208 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1209 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1210 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1211 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1212 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1213 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1214 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1215 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
1216 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1217 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1218 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1219 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1220 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1221 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1222}
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
new file mode 100644
index 000000000000..0e6beb69ba17
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -0,0 +1,193 @@
1/*
2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CXGB3_OFFLOAD_H
34#define _CXGB3_OFFLOAD_H
35
36#include <linux/list.h>
37#include <linux/skbuff.h>
38
39#include "l2t.h"
40
41#include "t3cdev.h"
42#include "t3_cpl.h"
43
44struct adapter;
45
46void cxgb3_offload_init(void);
47
48void cxgb3_adapter_ofld(struct adapter *adapter);
49void cxgb3_adapter_unofld(struct adapter *adapter);
50int cxgb3_offload_activate(struct adapter *adapter);
51void cxgb3_offload_deactivate(struct adapter *adapter);
52
53void cxgb3_set_dummy_ops(struct t3cdev *dev);
54
55/*
56 * Client registration. Users of T3 driver must register themselves.
57 * The T3 driver will call the add function of every client for each T3
58 * adapter activated, passing up the t3cdev ptr. Each client fills out an
59 * array of callback functions to process CPL messages.
60 */
61
62void cxgb3_register_client(struct cxgb3_client *client);
63void cxgb3_unregister_client(struct cxgb3_client *client);
64void cxgb3_add_clients(struct t3cdev *tdev);
65void cxgb3_remove_clients(struct t3cdev *tdev);
66
67typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
68 struct sk_buff *skb, void *ctx);
69
70struct cxgb3_client {
71 char *name;
72 void (*add) (struct t3cdev *);
73 void (*remove) (struct t3cdev *);
74 cxgb3_cpl_handler_func *handlers;
75 int (*redirect)(void *ctx, struct dst_entry *old,
76 struct dst_entry *new, struct l2t_entry *l2t);
77 struct list_head client_list;
78};
79
80/*
81 * TID allocation services.
82 */
83int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
84 void *ctx);
85int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
86 void *ctx);
87void *cxgb3_free_atid(struct t3cdev *dev, int atid);
88void cxgb3_free_stid(struct t3cdev *dev, int stid);
89void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
90 void *ctx, unsigned int tid);
91void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
92void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
93
94struct t3c_tid_entry {
95 struct cxgb3_client *client;
96 void *ctx;
97};
98
99/* CPL message priority levels */
100enum {
101 CPL_PRIORITY_DATA = 0, /* data messages */
102 CPL_PRIORITY_SETUP = 1, /* connection setup messages */
103 CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
104 CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
105 CPL_PRIORITY_ACK = 1, /* RX ACK messages */
106 CPL_PRIORITY_CONTROL = 1 /* offload control messages */
107};
108
109/* Flags for return value of CPL message handlers */
110enum {
111 CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
112 CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */
113 CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
114};
115
116typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
117
118/*
119 * Returns a pointer to the first byte of the CPL header in an sk_buff that
120 * contains a CPL message.
121 */
122static inline void *cplhdr(struct sk_buff *skb)
123{
124 return skb->data;
125}
126
127void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
128
129union listen_entry {
130 struct t3c_tid_entry t3c_tid;
131 union listen_entry *next;
132};
133
134union active_open_entry {
135 struct t3c_tid_entry t3c_tid;
136 union active_open_entry *next;
137};
138
139/*
140 * Holds the size, base address, free list start, etc of the TID, server TID,
141 * and active-open TID tables for a offload device.
142 * The tables themselves are allocated dynamically.
143 */
144struct tid_info {
145 struct t3c_tid_entry *tid_tab;
146 unsigned int ntids;
147 atomic_t tids_in_use;
148
149 union listen_entry *stid_tab;
150 unsigned int nstids;
151 unsigned int stid_base;
152
153 union active_open_entry *atid_tab;
154 unsigned int natids;
155 unsigned int atid_base;
156
157 /*
158 * The following members are accessed R/W so we put them in their own
159 * cache lines.
160 *
161 * XXX We could combine the atid fields above with the lock here since
162 * atids are use once (unlike other tids). OTOH the above fields are
163 * usually in cache due to tid_tab.
164 */
165 spinlock_t atid_lock ____cacheline_aligned_in_smp;
166 union active_open_entry *afree;
167 unsigned int atids_in_use;
168
169 spinlock_t stid_lock ____cacheline_aligned;
170 union listen_entry *sfree;
171 unsigned int stids_in_use;
172};
173
174struct t3c_data {
175 struct list_head list_node;
176 struct t3cdev *dev;
177 unsigned int tx_max_chunk; /* max payload for TX_DATA */
178 unsigned int max_wrs; /* max in-flight WRs per connection */
179 unsigned int nmtus;
180 const unsigned short *mtus;
181 struct tid_info tid_maps;
182
183 struct t3c_tid_entry *tid_release_list;
184 spinlock_t tid_release_lock;
185 struct work_struct tid_release_task;
186};
187
188/*
189 * t3cdev -> t3c_data accessor
190 */
191#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt)
192
193#endif
diff --git a/drivers/net/cxgb3/firmware_exports.h b/drivers/net/cxgb3/firmware_exports.h
new file mode 100644
index 000000000000..6a835f6a262a
--- /dev/null
+++ b/drivers/net/cxgb3/firmware_exports.h
@@ -0,0 +1,177 @@
1/*
2 * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _FIRMWARE_EXPORTS_H_
33#define _FIRMWARE_EXPORTS_H_
34
35/* WR OPCODES supported by the firmware.
36 */
37#define FW_WROPCODE_FORWARD 0x01
38#define FW_WROPCODE_BYPASS 0x05
39
40#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
41
42#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
43#define FW_WROPCODE_ULPTX_MEM_READ 0x02
44#define FW_WROPCODE_ULPTX_PKT 0x04
45#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
46
47#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
48
49#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08
50#define FW_WROPCODE_OFLD_CLOSE_CON 0x09
51#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A
52#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F
53#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B
54#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C
55#define FW_WROPCODE_OFLD_TX_DATA 0x0D
56#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E
57
58#define FW_WROPCODE_RI_RDMA_INIT 0x10
59#define FW_WROPCODE_RI_RDMA_WRITE 0x11
60#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
61#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
62#define FW_WROPCODE_RI_SEND 0x14
63#define FW_WROPCODE_RI_TERMINATE 0x15
64#define FW_WROPCODE_RI_RDMA_READ 0x16
65#define FW_WROPCODE_RI_RECEIVE 0x17
66#define FW_WROPCODE_RI_BIND_MW 0x18
67#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
68#define FW_WROPCODE_RI_LOCAL_INV 0x1A
69#define FW_WROPCODE_RI_MODIFY_QP 0x1B
70#define FW_WROPCODE_RI_BYPASS 0x1C
71
72#define FW_WROPOCDE_RSVD 0x1E
73
74#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
75
76#define FW_WROPCODE_MNGT 0x1D
77#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
78
79/* Maximum size of a WR sent from the host, limited by the SGE.
80 *
81 * Note: WR coming from ULP or TP are only limited by CIM.
82 */
83#define FW_WR_SIZE 128
84
85/* Maximum number of outstanding WRs sent from the host. Value must be
86 * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
87 * offload modules to limit the number of WRs per connection.
88 */
89#define FW_T3_WR_NUM 16
90#define FW_N3_WR_NUM 7
91
92#ifndef N3
93# define FW_WR_NUM FW_T3_WR_NUM
94#else
95# define FW_WR_NUM FW_N3_WR_NUM
96#endif
97
98/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
99 * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
100 * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
101 *
102 * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
103 * to RESP Queue[i].
104 */
105#define FW_TUNNEL_NUM 8
106#define FW_TUNNEL_SGEEC_START 8
107#define FW_TUNNEL_TID_START 65544
108
109/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
110 * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
111 * (or 'uP Token') FW_CTRL_TID_START.
112 *
113 * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
114 */
115#define FW_CTRL_NUM 8
116#define FW_CTRL_SGEEC_START 65528
117#define FW_CTRL_TID_START 65536
118
119/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
120 * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
121 *
122 * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
123 * OFFLOAD Queues, as the host is responsible for providing the correct TID in
124 * every WR.
125 *
126 * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
127 */
128#define FW_OFLD_NUM 8
129#define FW_OFLD_SGEEC_START 0
130
131/*
132 *
133 */
134#define FW_RI_NUM 1
135#define FW_RI_SGEEC_START 65527
136#define FW_RI_TID_START 65552
137
138/*
139 * The RX_PKT_TID
140 */
141#define FW_RX_PKT_NUM 1
142#define FW_RX_PKT_TID_START 65553
143
144/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
145 * by the firmware.
146 */
147#define FW_WRC_NUM \
148 (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
149
150/*
151 * FW type and version.
152 */
153#define S_FW_VERSION_TYPE 28
154#define M_FW_VERSION_TYPE 0xF
155#define V_FW_VERSION_TYPE(x) ((x) << S_FW_VERSION_TYPE)
156#define G_FW_VERSION_TYPE(x) \
157 (((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE)
158
159#define S_FW_VERSION_MAJOR 16
160#define M_FW_VERSION_MAJOR 0xFFF
161#define V_FW_VERSION_MAJOR(x) ((x) << S_FW_VERSION_MAJOR)
162#define G_FW_VERSION_MAJOR(x) \
163 (((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR)
164
165#define S_FW_VERSION_MINOR 8
166#define M_FW_VERSION_MINOR 0xFF
167#define V_FW_VERSION_MINOR(x) ((x) << S_FW_VERSION_MINOR)
168#define G_FW_VERSION_MINOR(x) \
169 (((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR)
170
171#define S_FW_VERSION_MICRO 0
172#define M_FW_VERSION_MICRO 0xFF
173#define V_FW_VERSION_MICRO(x) ((x) << S_FW_VERSION_MICRO)
174#define G_FW_VERSION_MICRO(x) \
175 (((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO)
176
177#endif /* _FIRMWARE_EXPORTS_H_ */
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
new file mode 100644
index 000000000000..3c0cb8557058
--- /dev/null
+++ b/drivers/net/cxgb3/l2t.c
@@ -0,0 +1,450 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/if.h>
36#include <linux/if_vlan.h>
37#include <linux/jhash.h>
38#include <net/neighbour.h>
39#include "common.h"
40#include "t3cdev.h"
41#include "cxgb3_defs.h"
42#include "l2t.h"
43#include "t3_cpl.h"
44#include "firmware_exports.h"
45
46#define VLAN_NONE 0xfff
47
48/*
49 * Module locking notes: There is a RW lock protecting the L2 table as a
50 * whole plus a spinlock per L2T entry. Entry lookups and allocations happen
51 * under the protection of the table lock, individual entry changes happen
52 * while holding that entry's spinlock. The table lock nests outside the
53 * entry locks. Allocations of new entries take the table lock as writers so
54 * no other lookups can happen while allocating new entries. Entry updates
55 * take the table lock as readers so multiple entries can be updated in
56 * parallel. An L2T entry can be dropped by decrementing its reference count
57 * and therefore can happen in parallel with entry allocation but no entry
58 * can change state or increment its ref count during allocation as both of
59 * these perform lookups.
60 */
61
62static inline unsigned int vlan_prio(const struct l2t_entry *e)
63{
64 return e->vlan >> 13;
65}
66
67static inline unsigned int arp_hash(u32 key, int ifindex,
68 const struct l2t_data *d)
69{
70 return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
71}
72
73static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
74{
75 neigh_hold(n);
76 if (e->neigh)
77 neigh_release(e->neigh);
78 e->neigh = n;
79}
80
81/*
82 * Set up an L2T entry and send any packets waiting in the arp queue. The
83 * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the
84 * entry locked.
85 */
86static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
87 struct l2t_entry *e)
88{
89 struct cpl_l2t_write_req *req;
90
91 if (!skb) {
92 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
93 if (!skb)
94 return -ENOMEM;
95 }
96
97 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
98 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
99 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
100 req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
101 V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
102 V_L2T_W_PRIO(vlan_prio(e)));
103 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
104 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
105 skb->priority = CPL_PRIORITY_CONTROL;
106 cxgb3_ofld_send(dev, skb);
107 while (e->arpq_head) {
108 skb = e->arpq_head;
109 e->arpq_head = skb->next;
110 skb->next = NULL;
111 cxgb3_ofld_send(dev, skb);
112 }
113 e->arpq_tail = NULL;
114 e->state = L2T_STATE_VALID;
115
116 return 0;
117}
118
119/*
120 * Add a packet to the an L2T entry's queue of packets awaiting resolution.
121 * Must be called with the entry's lock held.
122 */
123static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
124{
125 skb->next = NULL;
126 if (e->arpq_head)
127 e->arpq_tail->next = skb;
128 else
129 e->arpq_head = skb;
130 e->arpq_tail = skb;
131}
132
133int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
134 struct l2t_entry *e)
135{
136again:
137 switch (e->state) {
138 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
139 neigh_event_send(e->neigh, NULL);
140 spin_lock_bh(&e->lock);
141 if (e->state == L2T_STATE_STALE)
142 e->state = L2T_STATE_VALID;
143 spin_unlock_bh(&e->lock);
144 case L2T_STATE_VALID: /* fast-path, send the packet on */
145 return cxgb3_ofld_send(dev, skb);
146 case L2T_STATE_RESOLVING:
147 spin_lock_bh(&e->lock);
148 if (e->state != L2T_STATE_RESOLVING) {
149 /* ARP already completed */
150 spin_unlock_bh(&e->lock);
151 goto again;
152 }
153 arpq_enqueue(e, skb);
154 spin_unlock_bh(&e->lock);
155
156 /*
157 * Only the first packet added to the arpq should kick off
158 * resolution. However, because the alloc_skb below can fail,
159 * we allow each packet added to the arpq to retry resolution
160 * as a way of recovering from transient memory exhaustion.
161 * A better way would be to use a work request to retry L2T
162 * entries when there's no memory.
163 */
164 if (!neigh_event_send(e->neigh, NULL)) {
165 skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
166 GFP_ATOMIC);
167 if (!skb)
168 break;
169
170 spin_lock_bh(&e->lock);
171 if (e->arpq_head)
172 setup_l2e_send_pending(dev, skb, e);
173 else /* we lost the race */
174 __kfree_skb(skb);
175 spin_unlock_bh(&e->lock);
176 }
177 }
178 return 0;
179}
180
181EXPORT_SYMBOL(t3_l2t_send_slow);
182
183void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
184{
185again:
186 switch (e->state) {
187 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
188 neigh_event_send(e->neigh, NULL);
189 spin_lock_bh(&e->lock);
190 if (e->state == L2T_STATE_STALE) {
191 e->state = L2T_STATE_VALID;
192 }
193 spin_unlock_bh(&e->lock);
194 return;
195 case L2T_STATE_VALID: /* fast-path, send the packet on */
196 return;
197 case L2T_STATE_RESOLVING:
198 spin_lock_bh(&e->lock);
199 if (e->state != L2T_STATE_RESOLVING) {
200 /* ARP already completed */
201 spin_unlock_bh(&e->lock);
202 goto again;
203 }
204 spin_unlock_bh(&e->lock);
205
206 /*
207 * Only the first packet added to the arpq should kick off
208 * resolution. However, because the alloc_skb below can fail,
209 * we allow each packet added to the arpq to retry resolution
210 * as a way of recovering from transient memory exhaustion.
211 * A better way would be to use a work request to retry L2T
212 * entries when there's no memory.
213 */
214 neigh_event_send(e->neigh, NULL);
215 }
216 return;
217}
218
219EXPORT_SYMBOL(t3_l2t_send_event);
220
221/*
222 * Allocate a free L2T entry. Must be called with l2t_data.lock held.
223 */
224static struct l2t_entry *alloc_l2e(struct l2t_data *d)
225{
226 struct l2t_entry *end, *e, **p;
227
228 if (!atomic_read(&d->nfree))
229 return NULL;
230
231 /* there's definitely a free entry */
232 for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
233 if (atomic_read(&e->refcnt) == 0)
234 goto found;
235
236 for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
237found:
238 d->rover = e + 1;
239 atomic_dec(&d->nfree);
240
241 /*
242 * The entry we found may be an inactive entry that is
243 * presently in the hash table. We need to remove it.
244 */
245 if (e->state != L2T_STATE_UNUSED) {
246 int hash = arp_hash(e->addr, e->ifindex, d);
247
248 for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
249 if (*p == e) {
250 *p = e->next;
251 break;
252 }
253 e->state = L2T_STATE_UNUSED;
254 }
255 return e;
256}
257
258/*
259 * Called when an L2T entry has no more users. The entry is left in the hash
260 * table since it is likely to be reused but we also bump nfree to indicate
261 * that the entry can be reallocated for a different neighbor. We also drop
262 * the existing neighbor reference in case the neighbor is going away and is
263 * waiting on our reference.
264 *
265 * Because entries can be reallocated to other neighbors once their ref count
266 * drops to 0 we need to take the entry's lock to avoid races with a new
267 * incarnation.
268 */
269void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
270{
271 spin_lock_bh(&e->lock);
272 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
273 if (e->neigh) {
274 neigh_release(e->neigh);
275 e->neigh = NULL;
276 }
277 }
278 spin_unlock_bh(&e->lock);
279 atomic_inc(&d->nfree);
280}
281
282EXPORT_SYMBOL(t3_l2e_free);
283
284/*
285 * Update an L2T entry that was previously used for the same next hop as neigh.
286 * Must be called with softirqs disabled.
287 */
288static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
289{
290 unsigned int nud_state;
291
292 spin_lock(&e->lock); /* avoid race with t3_l2t_free */
293
294 if (neigh != e->neigh)
295 neigh_replace(e, neigh);
296 nud_state = neigh->nud_state;
297 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
298 !(nud_state & NUD_VALID))
299 e->state = L2T_STATE_RESOLVING;
300 else if (nud_state & NUD_CONNECTED)
301 e->state = L2T_STATE_VALID;
302 else
303 e->state = L2T_STATE_STALE;
304 spin_unlock(&e->lock);
305}
306
307struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
308 struct net_device *dev)
309{
310 struct l2t_entry *e;
311 struct l2t_data *d = L2DATA(cdev);
312 u32 addr = *(u32 *) neigh->primary_key;
313 int ifidx = neigh->dev->ifindex;
314 int hash = arp_hash(addr, ifidx, d);
315 struct port_info *p = netdev_priv(dev);
316 int smt_idx = p->port_id;
317
318 write_lock_bh(&d->lock);
319 for (e = d->l2tab[hash].first; e; e = e->next)
320 if (e->addr == addr && e->ifindex == ifidx &&
321 e->smt_idx == smt_idx) {
322 l2t_hold(d, e);
323 if (atomic_read(&e->refcnt) == 1)
324 reuse_entry(e, neigh);
325 goto done;
326 }
327
328 /* Need to allocate a new entry */
329 e = alloc_l2e(d);
330 if (e) {
331 spin_lock(&e->lock); /* avoid race with t3_l2t_free */
332 e->next = d->l2tab[hash].first;
333 d->l2tab[hash].first = e;
334 e->state = L2T_STATE_RESOLVING;
335 e->addr = addr;
336 e->ifindex = ifidx;
337 e->smt_idx = smt_idx;
338 atomic_set(&e->refcnt, 1);
339 neigh_replace(e, neigh);
340 if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
341 e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id;
342 else
343 e->vlan = VLAN_NONE;
344 spin_unlock(&e->lock);
345 }
346done:
347 write_unlock_bh(&d->lock);
348 return e;
349}
350
351EXPORT_SYMBOL(t3_l2t_get);
352
353/*
354 * Called when address resolution fails for an L2T entry to handle packets
355 * on the arpq head. If a packet specifies a failure handler it is invoked,
356 * otherwise the packets is sent to the offload device.
357 *
358 * XXX: maybe we should abandon the latter behavior and just require a failure
359 * handler.
360 */
361static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq)
362{
363 while (arpq) {
364 struct sk_buff *skb = arpq;
365 struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
366
367 arpq = skb->next;
368 skb->next = NULL;
369 if (cb->arp_failure_handler)
370 cb->arp_failure_handler(dev, skb);
371 else
372 cxgb3_ofld_send(dev, skb);
373 }
374}
375
376/*
377 * Called when the host's ARP layer makes a change to some entry that is
378 * loaded into the HW L2 table.
379 */
380void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
381{
382 struct l2t_entry *e;
383 struct sk_buff *arpq = NULL;
384 struct l2t_data *d = L2DATA(dev);
385 u32 addr = *(u32 *) neigh->primary_key;
386 int ifidx = neigh->dev->ifindex;
387 int hash = arp_hash(addr, ifidx, d);
388
389 read_lock_bh(&d->lock);
390 for (e = d->l2tab[hash].first; e; e = e->next)
391 if (e->addr == addr && e->ifindex == ifidx) {
392 spin_lock(&e->lock);
393 goto found;
394 }
395 read_unlock_bh(&d->lock);
396 return;
397
398found:
399 read_unlock(&d->lock);
400 if (atomic_read(&e->refcnt)) {
401 if (neigh != e->neigh)
402 neigh_replace(e, neigh);
403
404 if (e->state == L2T_STATE_RESOLVING) {
405 if (neigh->nud_state & NUD_FAILED) {
406 arpq = e->arpq_head;
407 e->arpq_head = e->arpq_tail = NULL;
408 } else if (neigh_is_connected(neigh))
409 setup_l2e_send_pending(dev, NULL, e);
410 } else {
411 e->state = neigh_is_connected(neigh) ?
412 L2T_STATE_VALID : L2T_STATE_STALE;
413 if (memcmp(e->dmac, neigh->ha, 6))
414 setup_l2e_send_pending(dev, NULL, e);
415 }
416 }
417 spin_unlock_bh(&e->lock);
418
419 if (arpq)
420 handle_failed_resolution(dev, arpq);
421}
422
423struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
424{
425 struct l2t_data *d;
426 int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
427
428 d = cxgb_alloc_mem(size);
429 if (!d)
430 return NULL;
431
432 d->nentries = l2t_capacity;
433 d->rover = &d->l2tab[1]; /* entry 0 is not used */
434 atomic_set(&d->nfree, l2t_capacity - 1);
435 rwlock_init(&d->lock);
436
437 for (i = 0; i < l2t_capacity; ++i) {
438 d->l2tab[i].idx = i;
439 d->l2tab[i].state = L2T_STATE_UNUSED;
440 spin_lock_init(&d->l2tab[i].lock);
441 atomic_set(&d->l2tab[i].refcnt, 0);
442 }
443 return d;
444}
445
446void t3_free_l2t(struct l2t_data *d)
447{
448 cxgb_free_mem(d);
449}
450
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
new file mode 100644
index 000000000000..ba5d2cbd7241
--- /dev/null
+++ b/drivers/net/cxgb3/l2t.h
@@ -0,0 +1,143 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CHELSIO_L2T_H
34#define _CHELSIO_L2T_H
35
36#include <linux/spinlock.h>
37#include "t3cdev.h"
38#include <asm/atomic.h>
39
40enum {
41 L2T_STATE_VALID, /* entry is up to date */
42 L2T_STATE_STALE, /* entry may be used but needs revalidation */
43 L2T_STATE_RESOLVING, /* entry needs address resolution */
44 L2T_STATE_UNUSED /* entry not in use */
45};
46
47struct neighbour;
48struct sk_buff;
49
50/*
51 * Each L2T entry plays multiple roles. First of all, it keeps state for the
52 * corresponding entry of the HW L2 table and maintains a queue of offload
53 * packets awaiting address resolution. Second, it is a node of a hash table
54 * chain, where the nodes of the chain are linked together through their next
55 * pointer. Finally, each node is a bucket of a hash table, pointing to the
56 * first element in its chain through its first pointer.
57 */
58struct l2t_entry {
59 u16 state; /* entry state */
60 u16 idx; /* entry index */
61 u32 addr; /* dest IP address */
62 int ifindex; /* neighbor's net_device's ifindex */
63 u16 smt_idx; /* SMT index */
64 u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
65 struct neighbour *neigh; /* associated neighbour */
66 struct l2t_entry *first; /* start of hash chain */
67 struct l2t_entry *next; /* next l2t_entry on chain */
68 struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
69 struct sk_buff *arpq_tail;
70 spinlock_t lock;
71 atomic_t refcnt; /* entry reference count */
72 u8 dmac[6]; /* neighbour's MAC address */
73};
74
75struct l2t_data {
76 unsigned int nentries; /* number of entries */
77 struct l2t_entry *rover; /* starting point for next allocation */
78 atomic_t nfree; /* number of free entries */
79 rwlock_t lock;
80 struct l2t_entry l2tab[0];
81};
82
83typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
84 struct sk_buff * skb);
85
86/*
87 * Callback stored in an skb to handle address resolution failure.
88 */
89struct l2t_skb_cb {
90 arp_failure_handler_func arp_failure_handler;
91};
92
93#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
94
95static inline void set_arp_failure_handler(struct sk_buff *skb,
96 arp_failure_handler_func hnd)
97{
98 L2T_SKB_CB(skb)->arp_failure_handler = hnd;
99}
100
101/*
102 * Getting to the L2 data from an offload device.
103 */
104#define L2DATA(dev) ((dev)->l2opt)
105
106#define W_TCB_L2T_IX 0
107#define S_TCB_L2T_IX 7
108#define M_TCB_L2T_IX 0x7ffULL
109#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
110
111void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
112void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
113struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
114 struct net_device *dev);
115int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
116 struct l2t_entry *e);
117void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
118struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
119void t3_free_l2t(struct l2t_data *d);
120
121int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
122
123static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
124 struct l2t_entry *e)
125{
126 if (likely(e->state == L2T_STATE_VALID))
127 return cxgb3_ofld_send(dev, skb);
128 return t3_l2t_send_slow(dev, skb, e);
129}
130
131static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
132{
133 if (atomic_dec_and_test(&e->refcnt))
134 t3_l2e_free(d, e);
135}
136
137static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
138{
139 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
140 atomic_dec(&d->nfree);
141}
142
143#endif
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
new file mode 100644
index 000000000000..644d62ea86a6
--- /dev/null
+++ b/drivers/net/cxgb3/mc5.c
@@ -0,0 +1,473 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35enum {
36 IDT75P52100 = 4,
37 IDT75N43102 = 5
38};
39
40/* DBGI command mode */
41enum {
42 DBGI_MODE_MBUS = 0,
43 DBGI_MODE_IDT52100 = 5
44};
45
46/* IDT 75P52100 commands */
47#define IDT_CMD_READ 0
48#define IDT_CMD_WRITE 1
49#define IDT_CMD_SEARCH 2
50#define IDT_CMD_LEARN 3
51
52/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
53#define IDT_LAR_ADR0 0x180006
54#define IDT_LAR_MODE144 0xffff0000
55
56/* IDT SCR and SSR addresses (low 32 bits) */
57#define IDT_SCR_ADR0 0x180000
58#define IDT_SSR0_ADR0 0x180002
59#define IDT_SSR1_ADR0 0x180004
60
61/* IDT GMR base address (low 32 bits) */
62#define IDT_GMR_BASE_ADR0 0x180020
63
64/* IDT data and mask array base addresses (low 32 bits) */
65#define IDT_DATARY_BASE_ADR0 0
66#define IDT_MSKARY_BASE_ADR0 0x80000
67
68/* IDT 75N43102 commands */
69#define IDT4_CMD_SEARCH144 3
70#define IDT4_CMD_WRITE 4
71#define IDT4_CMD_READ 5
72
73/* IDT 75N43102 SCR address (low 32 bits) */
74#define IDT4_SCR_ADR0 0x3
75
76/* IDT 75N43102 GMR base addresses (low 32 bits) */
77#define IDT4_GMR_BASE0 0x10
78#define IDT4_GMR_BASE1 0x20
79#define IDT4_GMR_BASE2 0x30
80
81/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
82#define IDT4_DATARY_BASE_ADR0 0x1000000
83#define IDT4_MSKARY_BASE_ADR0 0x2000000
84
85#define MAX_WRITE_ATTEMPTS 5
86
87#define MAX_ROUTES 2048
88
89/*
90 * Issue a command to the TCAM and wait for its completion. The address and
91 * any data required by the command must have been setup by the caller.
92 */
93static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
94{
95 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
96 return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
97 F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
98}
99
100static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
101 u32 v3)
102{
103 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
104 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
105 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
106}
107
108static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
109 u32 v3)
110{
111 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
112 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
113 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
114}
115
116static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
117 u32 *v3)
118{
119 *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
120 *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
121 *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
122}
123
124/*
125 * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
126 * command cmd. The data to be written must have been set up by the caller.
127 * Returns -1 on failure, 0 on success.
128 */
129static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
130{
131 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
132 if (mc5_cmd_write(adapter, cmd) == 0)
133 return 0;
134 CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
135 addr_lo);
136 return -1;
137}
138
139static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
140 u32 data_array_base, u32 write_cmd,
141 int addr_shift)
142{
143 unsigned int i;
144 struct adapter *adap = mc5->adapter;
145
146 /*
147 * We need the size of the TCAM data and mask arrays in terms of
148 * 72-bit entries.
149 */
150 unsigned int size72 = mc5->tcam_size;
151 unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
152
153 if (mc5->mode == MC5_MODE_144_BIT) {
154 size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
155 server_base *= 2;
156 }
157
158 /* Clear the data array */
159 dbgi_wr_data3(adap, 0, 0, 0);
160 for (i = 0; i < size72; i++)
161 if (mc5_write(adap, data_array_base + (i << addr_shift),
162 write_cmd))
163 return -1;
164
165 /* Initialize the mask array. */
166 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
167 for (i = 0; i < size72; i++) {
168 if (i == server_base) /* entering server or routing region */
169 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
170 mc5->mode == MC5_MODE_144_BIT ?
171 0xfffffff9 : 0xfffffffd);
172 if (mc5_write(adap, mask_array_base + (i << addr_shift),
173 write_cmd))
174 return -1;
175 }
176 return 0;
177}
178
179static int init_idt52100(struct mc5 *mc5)
180{
181 int i;
182 struct adapter *adap = mc5->adapter;
183
184 t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
185 V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
186 t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
187
188 /*
189 * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
190 * GMRs 8-9 for ACK- and AOPEN searches.
191 */
192 t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
193 t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
194 t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
195 t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
196 t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
197 t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
198 t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
199 t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
200 t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
201 t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
202 t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
203 t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
204
205 /* Set DBGI command mode for IDT TCAM. */
206 t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
207
208 /* Set up LAR */
209 dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
210 if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
211 goto err;
212
213 /* Set up SSRs */
214 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
215 if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
216 mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
217 goto err;
218
219 /* Set up GMRs */
220 for (i = 0; i < 32; ++i) {
221 if (i >= 12 && i < 15)
222 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
223 else if (i == 15)
224 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
225 else
226 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
227
228 if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
229 goto err;
230 }
231
232 /* Set up SCR */
233 dbgi_wr_data3(adap, 1, 0, 0);
234 if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
235 goto err;
236
237 return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
238 IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
239err:
240 return -EIO;
241}
242
243static int init_idt43102(struct mc5 *mc5)
244{
245 int i;
246 struct adapter *adap = mc5->adapter;
247
248 t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
249 adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
250 V_RDLAT(0xd) | V_SRCHLAT(0x12));
251
252 /*
253 * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
254 * for ACK- and AOPEN searches.
255 */
256 t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
257 t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
258 t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
259 IDT4_CMD_SEARCH144 | 0x3800);
260 t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
261 t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
262 t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
263 t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
264 t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
265 t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
266
267 t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
268
269 /* Set DBGI command mode for IDT TCAM. */
270 t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
271
272 /* Set up GMRs */
273 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
274 for (i = 0; i < 7; ++i)
275 if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
276 goto err;
277
278 for (i = 0; i < 4; ++i)
279 if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
280 goto err;
281
282 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
283 if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
284 mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
285 mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
286 goto err;
287
288 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
289 if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
290 goto err;
291
292 /* Set up SCR */
293 dbgi_wr_data3(adap, 0xf0000000, 0, 0);
294 if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
295 goto err;
296
297 return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
298 IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
299err:
300 return -EIO;
301}
302
303/* Put MC5 in DBGI mode. */
304static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
305{
306 t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
307 V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
308}
309
310/* Put MC5 in M-Bus mode. */
311static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
312{
313 t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
314 V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
315 V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
316 V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
317}
318
319/*
320 * Initialization that requires the OS and protocol layers to already
321 * be intialized goes here.
322 */
323int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
324 unsigned int nroutes)
325{
326 u32 cfg;
327 int err;
328 unsigned int tcam_size = mc5->tcam_size;
329 struct adapter *adap = mc5->adapter;
330
331 if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
332 return -EINVAL;
333
334 /* Reset the TCAM */
335 cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
336 cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
337 t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
338 if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
339 CH_ERR(adap, "TCAM reset timed out\n");
340 return -1;
341 }
342
343 t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
344 t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
345 tcam_size - nroutes - nfilters);
346 t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
347 tcam_size - nroutes - nfilters - nservers);
348
349 mc5->parity_enabled = 1;
350
351 /* All the TCAM addresses we access have only the low 32 bits non 0 */
352 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
353 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
354
355 mc5_dbgi_mode_enable(mc5);
356
357 switch (mc5->part_type) {
358 case IDT75P52100:
359 err = init_idt52100(mc5);
360 break;
361 case IDT75N43102:
362 err = init_idt43102(mc5);
363 break;
364 default:
365 CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
366 err = -EINVAL;
367 break;
368 }
369
370 mc5_dbgi_mode_disable(mc5);
371 return err;
372}
373
374/*
375 * read_mc5_range - dump a part of the memory managed by MC5
376 * @mc5: the MC5 handle
377 * @start: the start address for the dump
378 * @n: number of 72-bit words to read
379 * @buf: result buffer
380 *
381 * Read n 72-bit words from MC5 memory from the given start location.
382 */
383int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start,
384 unsigned int n, u32 *buf)
385{
386 u32 read_cmd;
387 int err = 0;
388 struct adapter *adap = mc5->adapter;
389
390 if (mc5->part_type == IDT75P52100)
391 read_cmd = IDT_CMD_READ;
392 else if (mc5->part_type == IDT75N43102)
393 read_cmd = IDT4_CMD_READ;
394 else
395 return -EINVAL;
396
397 mc5_dbgi_mode_enable(mc5);
398
399 while (n--) {
400 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR0, start++);
401 if (mc5_cmd_write(adap, read_cmd)) {
402 err = -EIO;
403 break;
404 }
405 dbgi_rd_rsp3(adap, buf + 2, buf + 1, buf);
406 buf += 3;
407 }
408
409 mc5_dbgi_mode_disable(mc5);
410 return 0;
411}
412
413#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
414
415/*
416 * MC5 interrupt handler
417 */
418void t3_mc5_intr_handler(struct mc5 *mc5)
419{
420 struct adapter *adap = mc5->adapter;
421 u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
422
423 if ((cause & F_PARITYERR) && mc5->parity_enabled) {
424 CH_ALERT(adap, "MC5 parity error\n");
425 mc5->stats.parity_err++;
426 }
427
428 if (cause & F_REQQPARERR) {
429 CH_ALERT(adap, "MC5 request queue parity error\n");
430 mc5->stats.reqq_parity_err++;
431 }
432
433 if (cause & F_DISPQPARERR) {
434 CH_ALERT(adap, "MC5 dispatch queue parity error\n");
435 mc5->stats.dispq_parity_err++;
436 }
437
438 if (cause & F_ACTRGNFULL)
439 mc5->stats.active_rgn_full++;
440 if (cause & F_NFASRCHFAIL)
441 mc5->stats.nfa_srch_err++;
442 if (cause & F_UNKNOWNCMD)
443 mc5->stats.unknown_cmd++;
444 if (cause & F_DELACTEMPTY)
445 mc5->stats.del_act_empty++;
446 if (cause & MC5_INT_FATAL)
447 t3_fatal_err(adap);
448
449 t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
450}
451
452void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
453{
454#define K * 1024
455
456 static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
457 64 K, 128 K, 256 K, 32 K
458 };
459
460#undef K
461
462 u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
463
464 mc5->adapter = adapter;
465 mc5->mode = (unsigned char)mode;
466 mc5->part_type = (unsigned char)G_TMTYPE(cfg);
467 if (cfg & F_TMTYPEHI)
468 mc5->part_type |= 4;
469
470 mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
471 if (mode == MC5_MODE_144_BIT)
472 mc5->tcam_size /= 2;
473}
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
new file mode 100644
index 000000000000..b56c5f52bcdc
--- /dev/null
+++ b/drivers/net/cxgb3/regs.h
@@ -0,0 +1,2195 @@
1#define A_SG_CONTROL 0x0
2
3#define S_DROPPKT 20
4#define V_DROPPKT(x) ((x) << S_DROPPKT)
5#define F_DROPPKT V_DROPPKT(1U)
6
7#define S_EGRGENCTRL 19
8#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL)
9#define F_EGRGENCTRL V_EGRGENCTRL(1U)
10
11#define S_USERSPACESIZE 14
12#define M_USERSPACESIZE 0x1f
13#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE)
14
15#define S_HOSTPAGESIZE 11
16#define M_HOSTPAGESIZE 0x7
17#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE)
18
19#define S_FLMODE 9
20#define V_FLMODE(x) ((x) << S_FLMODE)
21#define F_FLMODE V_FLMODE(1U)
22
23#define S_PKTSHIFT 6
24#define M_PKTSHIFT 0x7
25#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
26
27#define S_ONEINTMULTQ 5
28#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ)
29#define F_ONEINTMULTQ V_ONEINTMULTQ(1U)
30
31#define S_BIGENDIANINGRESS 2
32#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS)
33#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U)
34
35#define S_ISCSICOALESCING 1
36#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING)
37#define F_ISCSICOALESCING V_ISCSICOALESCING(1U)
38
39#define S_GLOBALENABLE 0
40#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
41#define F_GLOBALENABLE V_GLOBALENABLE(1U)
42
43#define S_AVOIDCQOVFL 24
44#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
45#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U)
46
47#define S_OPTONEINTMULTQ 23
48#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
49#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U)
50
51#define S_CQCRDTCTRL 22
52#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
53#define F_CQCRDTCTRL V_CQCRDTCTRL(1U)
54
55#define A_SG_KDOORBELL 0x4
56
57#define S_SELEGRCNTX 31
58#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX)
59#define F_SELEGRCNTX V_SELEGRCNTX(1U)
60
61#define S_EGRCNTX 0
62#define M_EGRCNTX 0xffff
63#define V_EGRCNTX(x) ((x) << S_EGRCNTX)
64
65#define A_SG_GTS 0x8
66
67#define S_RSPQ 29
68#define M_RSPQ 0x7
69#define V_RSPQ(x) ((x) << S_RSPQ)
70#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ)
71
72#define S_NEWTIMER 16
73#define M_NEWTIMER 0x1fff
74#define V_NEWTIMER(x) ((x) << S_NEWTIMER)
75
76#define S_NEWINDEX 0
77#define M_NEWINDEX 0xffff
78#define V_NEWINDEX(x) ((x) << S_NEWINDEX)
79
80#define A_SG_CONTEXT_CMD 0xc
81
82#define S_CONTEXT_CMD_OPCODE 28
83#define M_CONTEXT_CMD_OPCODE 0xf
84#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE)
85
86#define S_CONTEXT_CMD_BUSY 27
87#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY)
88#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U)
89
90#define S_CQ_CREDIT 20
91
92#define M_CQ_CREDIT 0x7f
93
94#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT)
95
96#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT)
97
98#define S_CQ 19
99
100#define V_CQ(x) ((x) << S_CQ)
101#define F_CQ V_CQ(1U)
102
103#define S_RESPONSEQ 18
104#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ)
105#define F_RESPONSEQ V_RESPONSEQ(1U)
106
107#define S_EGRESS 17
108#define V_EGRESS(x) ((x) << S_EGRESS)
109#define F_EGRESS V_EGRESS(1U)
110
111#define S_FREELIST 16
112#define V_FREELIST(x) ((x) << S_FREELIST)
113#define F_FREELIST V_FREELIST(1U)
114
115#define S_CONTEXT 0
116#define M_CONTEXT 0xffff
117#define V_CONTEXT(x) ((x) << S_CONTEXT)
118
119#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT)
120
121#define A_SG_CONTEXT_DATA0 0x10
122
123#define A_SG_CONTEXT_DATA1 0x14
124
125#define A_SG_CONTEXT_DATA2 0x18
126
127#define A_SG_CONTEXT_DATA3 0x1c
128
129#define A_SG_CONTEXT_MASK0 0x20
130
131#define A_SG_CONTEXT_MASK1 0x24
132
133#define A_SG_CONTEXT_MASK2 0x28
134
135#define A_SG_CONTEXT_MASK3 0x2c
136
137#define A_SG_RSPQ_CREDIT_RETURN 0x30
138
139#define S_CREDITS 0
140#define M_CREDITS 0xffff
141#define V_CREDITS(x) ((x) << S_CREDITS)
142
143#define A_SG_DATA_INTR 0x34
144
145#define S_ERRINTR 31
146#define V_ERRINTR(x) ((x) << S_ERRINTR)
147#define F_ERRINTR V_ERRINTR(1U)
148
149#define A_SG_HI_DRB_HI_THRSH 0x38
150
151#define A_SG_HI_DRB_LO_THRSH 0x3c
152
153#define A_SG_LO_DRB_HI_THRSH 0x40
154
155#define A_SG_LO_DRB_LO_THRSH 0x44
156
157#define A_SG_RSPQ_FL_STATUS 0x4c
158
159#define S_RSPQ0DISABLED 8
160
161#define A_SG_EGR_RCQ_DRB_THRSH 0x54
162
163#define S_HIRCQDRBTHRSH 16
164#define M_HIRCQDRBTHRSH 0x7ff
165#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH)
166
167#define S_LORCQDRBTHRSH 0
168#define M_LORCQDRBTHRSH 0x7ff
169#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH)
170
171#define A_SG_EGR_CNTX_BADDR 0x58
172
173#define A_SG_INT_CAUSE 0x5c
174
175#define S_RSPQDISABLED 3
176#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
177#define F_RSPQDISABLED V_RSPQDISABLED(1U)
178
179#define S_RSPQCREDITOVERFOW 2
180#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW)
181#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U)
182
183#define A_SG_INT_ENABLE 0x60
184
185#define A_SG_CMDQ_CREDIT_TH 0x64
186
187#define S_TIMEOUT 8
188#define M_TIMEOUT 0xffffff
189#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
190
191#define S_THRESHOLD 0
192#define M_THRESHOLD 0xff
193#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
194
195#define A_SG_TIMER_TICK 0x68
196
197#define A_SG_CQ_CONTEXT_BADDR 0x6c
198
199#define A_SG_OCO_BASE 0x70
200
201#define S_BASE1 16
202#define M_BASE1 0xffff
203#define V_BASE1(x) ((x) << S_BASE1)
204
205#define A_SG_DRB_PRI_THRESH 0x74
206
207#define A_PCIX_INT_ENABLE 0x80
208
209#define S_MSIXPARERR 22
210#define M_MSIXPARERR 0x7
211
212#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR)
213
214#define S_CFPARERR 18
215#define M_CFPARERR 0xf
216
217#define V_CFPARERR(x) ((x) << S_CFPARERR)
218
219#define S_RFPARERR 14
220#define M_RFPARERR 0xf
221
222#define V_RFPARERR(x) ((x) << S_RFPARERR)
223
224#define S_WFPARERR 12
225#define M_WFPARERR 0x3
226
227#define V_WFPARERR(x) ((x) << S_WFPARERR)
228
229#define S_PIOPARERR 11
230#define V_PIOPARERR(x) ((x) << S_PIOPARERR)
231#define F_PIOPARERR V_PIOPARERR(1U)
232
233#define S_DETUNCECCERR 10
234#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR)
235#define F_DETUNCECCERR V_DETUNCECCERR(1U)
236
237#define S_DETCORECCERR 9
238#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR)
239#define F_DETCORECCERR V_DETCORECCERR(1U)
240
241#define S_RCVSPLCMPERR 8
242#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR)
243#define F_RCVSPLCMPERR V_RCVSPLCMPERR(1U)
244
245#define S_UNXSPLCMP 7
246#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP)
247#define F_UNXSPLCMP V_UNXSPLCMP(1U)
248
249#define S_SPLCMPDIS 6
250#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS)
251#define F_SPLCMPDIS V_SPLCMPDIS(1U)
252
253#define S_DETPARERR 5
254#define V_DETPARERR(x) ((x) << S_DETPARERR)
255#define F_DETPARERR V_DETPARERR(1U)
256
257#define S_SIGSYSERR 4
258#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR)
259#define F_SIGSYSERR V_SIGSYSERR(1U)
260
261#define S_RCVMSTABT 3
262#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT)
263#define F_RCVMSTABT V_RCVMSTABT(1U)
264
265#define S_RCVTARABT 2
266#define V_RCVTARABT(x) ((x) << S_RCVTARABT)
267#define F_RCVTARABT V_RCVTARABT(1U)
268
269#define S_SIGTARABT 1
270#define V_SIGTARABT(x) ((x) << S_SIGTARABT)
271#define F_SIGTARABT V_SIGTARABT(1U)
272
273#define S_MSTDETPARERR 0
274#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR)
275#define F_MSTDETPARERR V_MSTDETPARERR(1U)
276
277#define A_PCIX_INT_CAUSE 0x84
278
279#define A_PCIX_CFG 0x88
280
281#define S_CLIDECEN 18
282#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
283#define F_CLIDECEN V_CLIDECEN(1U)
284
285#define A_PCIX_MODE 0x8c
286
287#define S_PCLKRANGE 6
288#define M_PCLKRANGE 0x3
289#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE)
290#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE)
291
292#define S_PCIXINITPAT 2
293#define M_PCIXINITPAT 0xf
294#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT)
295#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT)
296
297#define S_64BIT 0
298#define V_64BIT(x) ((x) << S_64BIT)
299#define F_64BIT V_64BIT(1U)
300
301#define A_PCIE_INT_ENABLE 0x80
302
303#define S_BISTERR 15
304#define M_BISTERR 0xff
305
306#define V_BISTERR(x) ((x) << S_BISTERR)
307
308#define S_PCIE_MSIXPARERR 12
309#define M_PCIE_MSIXPARERR 0x7
310
311#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR)
312
313#define S_PCIE_CFPARERR 11
314#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR)
315#define F_PCIE_CFPARERR V_PCIE_CFPARERR(1U)
316
317#define S_PCIE_RFPARERR 10
318#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR)
319#define F_PCIE_RFPARERR V_PCIE_RFPARERR(1U)
320
321#define S_PCIE_WFPARERR 9
322#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR)
323#define F_PCIE_WFPARERR V_PCIE_WFPARERR(1U)
324
325#define S_PCIE_PIOPARERR 8
326#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR)
327#define F_PCIE_PIOPARERR V_PCIE_PIOPARERR(1U)
328
329#define S_UNXSPLCPLERRC 7
330#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC)
331#define F_UNXSPLCPLERRC V_UNXSPLCPLERRC(1U)
332
333#define S_UNXSPLCPLERRR 6
334#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR)
335#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U)
336
337#define S_PEXERR 0
338#define V_PEXERR(x) ((x) << S_PEXERR)
339#define F_PEXERR V_PEXERR(1U)
340
341#define A_PCIE_INT_CAUSE 0x84
342
343#define A_PCIE_CFG 0x88
344
345#define S_PCIE_CLIDECEN 16
346#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
347#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
348
349#define S_CRSTWRMMODE 0
350#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE)
351#define F_CRSTWRMMODE V_CRSTWRMMODE(1U)
352
353#define A_PCIE_MODE 0x8c
354
355#define S_NUMFSTTRNSEQRX 10
356#define M_NUMFSTTRNSEQRX 0xff
357#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX)
358#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX)
359
360#define A_PCIE_PEX_CTRL0 0x98
361
362#define S_NUMFSTTRNSEQ 22
363#define M_NUMFSTTRNSEQ 0xff
364#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ)
365#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ)
366
367#define S_REPLAYLMT 2
368#define M_REPLAYLMT 0xfffff
369
370#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT)
371
372#define A_PCIE_PEX_CTRL1 0x9c
373
374#define S_T3A_ACKLAT 0
375#define M_T3A_ACKLAT 0x7ff
376
377#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
378
379#define S_ACKLAT 0
380#define M_ACKLAT 0x1fff
381
382#define V_ACKLAT(x) ((x) << S_ACKLAT)
383
384#define A_PCIE_PEX_ERR 0xa4
385
386#define A_T3DBG_GPIO_EN 0xd0
387
388#define S_GPIO11_OEN 27
389#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
390#define F_GPIO11_OEN V_GPIO11_OEN(1U)
391
392#define S_GPIO10_OEN 26
393#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
394#define F_GPIO10_OEN V_GPIO10_OEN(1U)
395
396#define S_GPIO7_OEN 23
397#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
398#define F_GPIO7_OEN V_GPIO7_OEN(1U)
399
400#define S_GPIO6_OEN 22
401#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
402#define F_GPIO6_OEN V_GPIO6_OEN(1U)
403
404#define S_GPIO5_OEN 21
405#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
406#define F_GPIO5_OEN V_GPIO5_OEN(1U)
407
408#define S_GPIO4_OEN 20
409#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
410#define F_GPIO4_OEN V_GPIO4_OEN(1U)
411
412#define S_GPIO2_OEN 18
413#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
414#define F_GPIO2_OEN V_GPIO2_OEN(1U)
415
416#define S_GPIO1_OEN 17
417#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
418#define F_GPIO1_OEN V_GPIO1_OEN(1U)
419
420#define S_GPIO0_OEN 16
421#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
422#define F_GPIO0_OEN V_GPIO0_OEN(1U)
423
424#define S_GPIO10_OUT_VAL 10
425#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
426#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U)
427
428#define S_GPIO7_OUT_VAL 7
429#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
430#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U)
431
432#define S_GPIO6_OUT_VAL 6
433#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
434#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U)
435
436#define S_GPIO5_OUT_VAL 5
437#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
438#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U)
439
440#define S_GPIO4_OUT_VAL 4
441#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
442#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U)
443
444#define S_GPIO2_OUT_VAL 2
445#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
446#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U)
447
448#define S_GPIO1_OUT_VAL 1
449#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
450#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U)
451
452#define S_GPIO0_OUT_VAL 0
453#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
454#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
455
456#define A_T3DBG_INT_ENABLE 0xd8
457
458#define S_GPIO11 11
459#define V_GPIO11(x) ((x) << S_GPIO11)
460#define F_GPIO11 V_GPIO11(1U)
461
462#define S_GPIO10 10
463#define V_GPIO10(x) ((x) << S_GPIO10)
464#define F_GPIO10 V_GPIO10(1U)
465
466#define S_GPIO7 7
467#define V_GPIO7(x) ((x) << S_GPIO7)
468#define F_GPIO7 V_GPIO7(1U)
469
470#define S_GPIO6 6
471#define V_GPIO6(x) ((x) << S_GPIO6)
472#define F_GPIO6 V_GPIO6(1U)
473
474#define S_GPIO5 5
475#define V_GPIO5(x) ((x) << S_GPIO5)
476#define F_GPIO5 V_GPIO5(1U)
477
478#define S_GPIO4 4
479#define V_GPIO4(x) ((x) << S_GPIO4)
480#define F_GPIO4 V_GPIO4(1U)
481
482#define S_GPIO3 3
483#define V_GPIO3(x) ((x) << S_GPIO3)
484#define F_GPIO3 V_GPIO3(1U)
485
486#define S_GPIO2 2
487#define V_GPIO2(x) ((x) << S_GPIO2)
488#define F_GPIO2 V_GPIO2(1U)
489
490#define S_GPIO1 1
491#define V_GPIO1(x) ((x) << S_GPIO1)
492#define F_GPIO1 V_GPIO1(1U)
493
494#define S_GPIO0 0
495#define V_GPIO0(x) ((x) << S_GPIO0)
496#define F_GPIO0 V_GPIO0(1U)
497
498#define A_T3DBG_INT_CAUSE 0xdc
499
500#define A_T3DBG_GPIO_ACT_LOW 0xf0
501
502#define MC7_PMRX_BASE_ADDR 0x100
503
504#define A_MC7_CFG 0x100
505
506#define S_IFEN 13
507#define V_IFEN(x) ((x) << S_IFEN)
508#define F_IFEN V_IFEN(1U)
509
510#define S_TERM150 11
511#define V_TERM150(x) ((x) << S_TERM150)
512#define F_TERM150 V_TERM150(1U)
513
514#define S_SLOW 10
515#define V_SLOW(x) ((x) << S_SLOW)
516#define F_SLOW V_SLOW(1U)
517
518#define S_WIDTH 8
519#define M_WIDTH 0x3
520#define V_WIDTH(x) ((x) << S_WIDTH)
521#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
522
523#define S_BKS 6
524#define V_BKS(x) ((x) << S_BKS)
525#define F_BKS V_BKS(1U)
526
527#define S_ORG 5
528#define V_ORG(x) ((x) << S_ORG)
529#define F_ORG V_ORG(1U)
530
531#define S_DEN 2
532#define M_DEN 0x7
533#define V_DEN(x) ((x) << S_DEN)
534#define G_DEN(x) (((x) >> S_DEN) & M_DEN)
535
536#define S_RDY 1
537#define V_RDY(x) ((x) << S_RDY)
538#define F_RDY V_RDY(1U)
539
540#define S_CLKEN 0
541#define V_CLKEN(x) ((x) << S_CLKEN)
542#define F_CLKEN V_CLKEN(1U)
543
544#define A_MC7_MODE 0x104
545
546#define S_BUSY 31
547#define V_BUSY(x) ((x) << S_BUSY)
548#define F_BUSY V_BUSY(1U)
549
550#define S_BUSY 31
551#define V_BUSY(x) ((x) << S_BUSY)
552#define F_BUSY V_BUSY(1U)
553
554#define A_MC7_EXT_MODE1 0x108
555
556#define A_MC7_EXT_MODE2 0x10c
557
558#define A_MC7_EXT_MODE3 0x110
559
560#define A_MC7_PRE 0x114
561
562#define A_MC7_REF 0x118
563
564#define S_PREREFDIV 1
565#define M_PREREFDIV 0x3fff
566#define V_PREREFDIV(x) ((x) << S_PREREFDIV)
567
568#define S_PERREFEN 0
569#define V_PERREFEN(x) ((x) << S_PERREFEN)
570#define F_PERREFEN V_PERREFEN(1U)
571
572#define A_MC7_DLL 0x11c
573
574#define S_DLLENB 1
575#define V_DLLENB(x) ((x) << S_DLLENB)
576#define F_DLLENB V_DLLENB(1U)
577
578#define S_DLLRST 0
579#define V_DLLRST(x) ((x) << S_DLLRST)
580#define F_DLLRST V_DLLRST(1U)
581
582#define A_MC7_PARM 0x120
583
584#define S_ACTTOPREDLY 26
585#define M_ACTTOPREDLY 0xf
586#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY)
587
588#define S_ACTTORDWRDLY 23
589#define M_ACTTORDWRDLY 0x7
590#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY)
591
592#define S_PRECYC 20
593#define M_PRECYC 0x7
594#define V_PRECYC(x) ((x) << S_PRECYC)
595
596#define S_REFCYC 13
597#define M_REFCYC 0x7f
598#define V_REFCYC(x) ((x) << S_REFCYC)
599
600#define S_BKCYC 8
601#define M_BKCYC 0x1f
602#define V_BKCYC(x) ((x) << S_BKCYC)
603
604#define S_WRTORDDLY 4
605#define M_WRTORDDLY 0xf
606#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY)
607
608#define S_RDTOWRDLY 0
609#define M_RDTOWRDLY 0xf
610#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY)
611
612#define A_MC7_CAL 0x128
613
614#define S_BUSY 31
615#define V_BUSY(x) ((x) << S_BUSY)
616#define F_BUSY V_BUSY(1U)
617
618#define S_BUSY 31
619#define V_BUSY(x) ((x) << S_BUSY)
620#define F_BUSY V_BUSY(1U)
621
622#define S_CAL_FAULT 30
623#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
624#define F_CAL_FAULT V_CAL_FAULT(1U)
625
626#define S_SGL_CAL_EN 20
627#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
628#define F_SGL_CAL_EN V_SGL_CAL_EN(1U)
629
630#define A_MC7_ERR_ADDR 0x12c
631
632#define A_MC7_ECC 0x130
633
634#define S_ECCCHKEN 1
635#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN)
636#define F_ECCCHKEN V_ECCCHKEN(1U)
637
638#define S_ECCGENEN 0
639#define V_ECCGENEN(x) ((x) << S_ECCGENEN)
640#define F_ECCGENEN V_ECCGENEN(1U)
641
642#define A_MC7_CE_ADDR 0x134
643
644#define A_MC7_CE_DATA0 0x138
645
646#define A_MC7_CE_DATA1 0x13c
647
648#define A_MC7_CE_DATA2 0x140
649
650#define S_DATA 0
651#define M_DATA 0xff
652
653#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
654
655#define A_MC7_UE_ADDR 0x144
656
657#define A_MC7_UE_DATA0 0x148
658
659#define A_MC7_UE_DATA1 0x14c
660
661#define A_MC7_UE_DATA2 0x150
662
663#define A_MC7_BD_ADDR 0x154
664
665#define S_ADDR 3
666
667#define M_ADDR 0x1fffffff
668
669#define A_MC7_BD_DATA0 0x158
670
671#define A_MC7_BD_DATA1 0x15c
672
673#define A_MC7_BD_OP 0x164
674
675#define S_OP 0
676
677#define V_OP(x) ((x) << S_OP)
678#define F_OP V_OP(1U)
679
680#define F_OP V_OP(1U)
681#define A_SF_OP 0x6dc
682
683#define A_MC7_BIST_ADDR_BEG 0x168
684
685#define A_MC7_BIST_ADDR_END 0x16c
686
687#define A_MC7_BIST_DATA 0x170
688
689#define A_MC7_BIST_OP 0x174
690
691#define S_CONT 3
692#define V_CONT(x) ((x) << S_CONT)
693#define F_CONT V_CONT(1U)
694
695#define F_CONT V_CONT(1U)
696
697#define A_MC7_INT_ENABLE 0x178
698
699#define S_AE 17
700#define V_AE(x) ((x) << S_AE)
701#define F_AE V_AE(1U)
702
703#define S_PE 2
704#define M_PE 0x7fff
705
706#define V_PE(x) ((x) << S_PE)
707
708#define G_PE(x) (((x) >> S_PE) & M_PE)
709
710#define S_UE 1
711#define V_UE(x) ((x) << S_UE)
712#define F_UE V_UE(1U)
713
714#define S_CE 0
715#define V_CE(x) ((x) << S_CE)
716#define F_CE V_CE(1U)
717
718#define A_MC7_INT_CAUSE 0x17c
719
720#define MC7_PMTX_BASE_ADDR 0x180
721
722#define MC7_CM_BASE_ADDR 0x200
723
724#define A_CIM_BOOT_CFG 0x280
725
726#define S_BOOTADDR 2
727#define M_BOOTADDR 0x3fffffff
728#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
729
730#define A_CIM_SDRAM_BASE_ADDR 0x28c
731
732#define A_CIM_SDRAM_ADDR_SIZE 0x290
733
734#define A_CIM_HOST_INT_ENABLE 0x298
735
736#define A_CIM_HOST_INT_CAUSE 0x29c
737
738#define S_BLKWRPLINT 12
739#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
740#define F_BLKWRPLINT V_BLKWRPLINT(1U)
741
742#define S_BLKRDPLINT 11
743#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT)
744#define F_BLKRDPLINT V_BLKRDPLINT(1U)
745
746#define S_BLKWRCTLINT 10
747#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT)
748#define F_BLKWRCTLINT V_BLKWRCTLINT(1U)
749
750#define S_BLKRDCTLINT 9
751#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT)
752#define F_BLKRDCTLINT V_BLKRDCTLINT(1U)
753
754#define S_BLKWRFLASHINT 8
755#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT)
756#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U)
757
758#define S_BLKRDFLASHINT 7
759#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT)
760#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U)
761
762#define S_SGLWRFLASHINT 6
763#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT)
764#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U)
765
766#define S_WRBLKFLASHINT 5
767#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT)
768#define F_WRBLKFLASHINT V_WRBLKFLASHINT(1U)
769
770#define S_BLKWRBOOTINT 4
771#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
772#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U)
773
774#define S_FLASHRANGEINT 2
775#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT)
776#define F_FLASHRANGEINT V_FLASHRANGEINT(1U)
777
778#define S_SDRAMRANGEINT 1
779#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT)
780#define F_SDRAMRANGEINT V_SDRAMRANGEINT(1U)
781
782#define S_RSVDSPACEINT 0
783#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
784#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
785
786#define A_CIM_HOST_ACC_CTRL 0x2b0
787
788#define S_HOSTBUSY 17
789#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
790#define F_HOSTBUSY V_HOSTBUSY(1U)
791
792#define A_CIM_HOST_ACC_DATA 0x2b4
793
794#define A_TP_IN_CONFIG 0x300
795
796#define S_NICMODE 14
797#define V_NICMODE(x) ((x) << S_NICMODE)
798#define F_NICMODE V_NICMODE(1U)
799
800#define F_NICMODE V_NICMODE(1U)
801
802#define S_IPV6ENABLE 15
803#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
804#define F_IPV6ENABLE V_IPV6ENABLE(1U)
805
806#define A_TP_OUT_CONFIG 0x304
807
808#define S_VLANEXTRACTIONENABLE 12
809
810#define A_TP_GLOBAL_CONFIG 0x308
811
812#define S_TXPACINGENABLE 24
813#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
814#define F_TXPACINGENABLE V_TXPACINGENABLE(1U)
815
816#define S_PATHMTU 15
817#define V_PATHMTU(x) ((x) << S_PATHMTU)
818#define F_PATHMTU V_PATHMTU(1U)
819
820#define S_IPCHECKSUMOFFLOAD 13
821#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
822#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U)
823
824#define S_UDPCHECKSUMOFFLOAD 12
825#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD)
826#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U)
827
828#define S_TCPCHECKSUMOFFLOAD 11
829#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
830#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U)
831
832#define S_IPTTL 0
833#define M_IPTTL 0xff
834#define V_IPTTL(x) ((x) << S_IPTTL)
835
836#define A_TP_CMM_MM_BASE 0x314
837
838#define A_TP_CMM_TIMER_BASE 0x318
839
840#define S_CMTIMERMAXNUM 28
841#define M_CMTIMERMAXNUM 0x3
842#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
843
844#define A_TP_PMM_SIZE 0x31c
845
846#define A_TP_PMM_TX_BASE 0x320
847
848#define A_TP_PMM_RX_BASE 0x328
849
850#define A_TP_PMM_RX_PAGE_SIZE 0x32c
851
852#define A_TP_PMM_RX_MAX_PAGE 0x330
853
854#define A_TP_PMM_TX_PAGE_SIZE 0x334
855
856#define A_TP_PMM_TX_MAX_PAGE 0x338
857
858#define A_TP_TCP_OPTIONS 0x340
859
860#define S_MTUDEFAULT 16
861#define M_MTUDEFAULT 0xffff
862#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
863
864#define S_MTUENABLE 10
865#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
866#define F_MTUENABLE V_MTUENABLE(1U)
867
868#define S_SACKRX 8
869#define V_SACKRX(x) ((x) << S_SACKRX)
870#define F_SACKRX V_SACKRX(1U)
871
872#define S_SACKMODE 4
873
874#define M_SACKMODE 0x3
875
876#define V_SACKMODE(x) ((x) << S_SACKMODE)
877
878#define S_WINDOWSCALEMODE 2
879#define M_WINDOWSCALEMODE 0x3
880#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
881
882#define S_TIMESTAMPSMODE 0
883
884#define M_TIMESTAMPSMODE 0x3
885
886#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
887
888#define A_TP_DACK_CONFIG 0x344
889
890#define S_AUTOSTATE3 30
891#define M_AUTOSTATE3 0x3
892#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
893
894#define S_AUTOSTATE2 28
895#define M_AUTOSTATE2 0x3
896#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
897
898#define S_AUTOSTATE1 26
899#define M_AUTOSTATE1 0x3
900#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
901
902#define S_BYTETHRESHOLD 5
903#define M_BYTETHRESHOLD 0xfffff
904#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
905
906#define S_MSSTHRESHOLD 3
907#define M_MSSTHRESHOLD 0x3
908#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
909
910#define S_AUTOCAREFUL 2
911#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
912#define F_AUTOCAREFUL V_AUTOCAREFUL(1U)
913
914#define S_AUTOENABLE 1
915#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE)
916#define F_AUTOENABLE V_AUTOENABLE(1U)
917
918#define S_DACK_MODE 0
919#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
920#define F_DACK_MODE V_DACK_MODE(1U)
921
922#define A_TP_PC_CONFIG 0x348
923
924#define S_TXTOSQUEUEMAPMODE 26
925#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE)
926#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U)
927
928#define S_ENABLEEPCMDAFULL 23
929#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
930#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U)
931
932#define S_MODULATEUNIONMODE 22
933#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE)
934#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U)
935
936#define S_TXDEFERENABLE 20
937#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
938#define F_TXDEFERENABLE V_TXDEFERENABLE(1U)
939
940#define S_RXCONGESTIONMODE 19
941#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
942#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U)
943
944#define S_HEARBEATDACK 16
945#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
946#define F_HEARBEATDACK V_HEARBEATDACK(1U)
947
948#define S_TXCONGESTIONMODE 15
949#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
950#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U)
951
952#define S_ENABLEOCSPIFULL 30
953#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
954#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U)
955
956#define S_LOCKTID 28
957#define V_LOCKTID(x) ((x) << S_LOCKTID)
958#define F_LOCKTID V_LOCKTID(1U)
959
960#define A_TP_PC_CONFIG2 0x34c
961
962#define S_CHDRAFULL 4
963#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
964#define F_CHDRAFULL V_CHDRAFULL(1U)
965
966#define A_TP_TCP_BACKOFF_REG0 0x350
967
968#define A_TP_TCP_BACKOFF_REG1 0x354
969
970#define A_TP_TCP_BACKOFF_REG2 0x358
971
972#define A_TP_TCP_BACKOFF_REG3 0x35c
973
974#define A_TP_PARA_REG2 0x368
975
976#define S_MAXRXDATA 16
977#define M_MAXRXDATA 0xffff
978#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
979
980#define S_RXCOALESCESIZE 0
981#define M_RXCOALESCESIZE 0xffff
982#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
983
984#define A_TP_PARA_REG3 0x36c
985
986#define S_TXDATAACKIDX 16
987#define M_TXDATAACKIDX 0xf
988
989#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
990
991#define S_TXPACEAUTOSTRICT 10
992#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
993#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U)
994
995#define S_TXPACEFIXED 9
996#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
997#define F_TXPACEFIXED V_TXPACEFIXED(1U)
998
999#define S_TXPACEAUTO 8
1000#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
1001#define F_TXPACEAUTO V_TXPACEAUTO(1U)
1002
1003#define S_RXCOALESCEENABLE 1
1004#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
1005#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U)
1006
1007#define S_RXCOALESCEPSHEN 0
1008#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
1009#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U)
1010
1011#define A_TP_PARA_REG4 0x370
1012
1013#define A_TP_PARA_REG6 0x378
1014
1015#define S_T3A_ENABLEESND 13
1016#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND)
1017#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U)
1018
1019#define S_ENABLEESND 11
1020#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
1021#define F_ENABLEESND V_ENABLEESND(1U)
1022
1023#define A_TP_PARA_REG7 0x37c
1024
1025#define S_PMMAXXFERLEN1 16
1026#define M_PMMAXXFERLEN1 0xffff
1027#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
1028
1029#define S_PMMAXXFERLEN0 0
1030#define M_PMMAXXFERLEN0 0xffff
1031#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
1032
1033#define A_TP_TIMER_RESOLUTION 0x390
1034
1035#define S_TIMERRESOLUTION 16
1036#define M_TIMERRESOLUTION 0xff
1037#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
1038
1039#define S_TIMESTAMPRESOLUTION 8
1040#define M_TIMESTAMPRESOLUTION 0xff
1041#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
1042
1043#define S_DELAYEDACKRESOLUTION 0
1044#define M_DELAYEDACKRESOLUTION 0xff
1045#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
1046
1047#define A_TP_MSL 0x394
1048
1049#define A_TP_RXT_MIN 0x398
1050
1051#define A_TP_RXT_MAX 0x39c
1052
1053#define A_TP_PERS_MIN 0x3a0
1054
1055#define A_TP_PERS_MAX 0x3a4
1056
1057#define A_TP_KEEP_IDLE 0x3a8
1058
1059#define A_TP_KEEP_INTVL 0x3ac
1060
1061#define A_TP_INIT_SRTT 0x3b0
1062
1063#define A_TP_DACK_TIMER 0x3b4
1064
1065#define A_TP_FINWAIT2_TIMER 0x3b8
1066
1067#define A_TP_SHIFT_CNT 0x3c0
1068
1069#define S_SYNSHIFTMAX 24
1070
1071#define M_SYNSHIFTMAX 0xff
1072
1073#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
1074
1075#define S_RXTSHIFTMAXR1 20
1076
1077#define M_RXTSHIFTMAXR1 0xf
1078
1079#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
1080
1081#define S_RXTSHIFTMAXR2 16
1082
1083#define M_RXTSHIFTMAXR2 0xf
1084
1085#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
1086
1087#define S_PERSHIFTBACKOFFMAX 12
1088#define M_PERSHIFTBACKOFFMAX 0xf
1089#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
1090
1091#define S_PERSHIFTMAX 8
1092#define M_PERSHIFTMAX 0xf
1093#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
1094
1095#define S_KEEPALIVEMAX 0
1096
1097#define M_KEEPALIVEMAX 0xff
1098
1099#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX)
1100
1101#define A_TP_MTU_PORT_TABLE 0x3d0
1102
1103#define A_TP_CCTRL_TABLE 0x3dc
1104
1105#define A_TP_MTU_TABLE 0x3e4
1106
1107#define A_TP_RSS_MAP_TABLE 0x3e8
1108
1109#define A_TP_RSS_LKP_TABLE 0x3ec
1110
1111#define A_TP_RSS_CONFIG 0x3f0
1112
1113#define S_TNL4TUPEN 29
1114#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN)
1115#define F_TNL4TUPEN V_TNL4TUPEN(1U)
1116
1117#define S_TNL2TUPEN 28
1118#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN)
1119#define F_TNL2TUPEN V_TNL2TUPEN(1U)
1120
1121#define S_TNLPRTEN 26
1122#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN)
1123#define F_TNLPRTEN V_TNLPRTEN(1U)
1124
1125#define S_TNLMAPEN 25
1126#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN)
1127#define F_TNLMAPEN V_TNLMAPEN(1U)
1128
1129#define S_TNLLKPEN 24
1130#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
1131#define F_TNLLKPEN V_TNLLKPEN(1U)
1132
1133#define S_RRCPLCPUSIZE 4
1134#define M_RRCPLCPUSIZE 0x7
1135#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
1136
1137#define S_RQFEEDBACKENABLE 3
1138#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
1139#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U)
1140
1141#define S_DISABLE 0
1142
1143#define A_TP_TM_PIO_ADDR 0x418
1144
1145#define A_TP_TM_PIO_DATA 0x41c
1146
1147#define A_TP_TX_MOD_QUE_TABLE 0x420
1148
1149#define A_TP_TX_RESOURCE_LIMIT 0x424
1150
1151#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428
1152
1153#define S_TX_MOD_QUEUE_REQ_MAP 0
1154#define M_TX_MOD_QUEUE_REQ_MAP 0xff
1155#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
1156
1157#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c
1158
1159#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430
1160
1161#define A_TP_MOD_CHANNEL_WEIGHT 0x434
1162
1163#define A_TP_PIO_ADDR 0x440
1164
1165#define A_TP_PIO_DATA 0x444
1166
1167#define A_TP_RESET 0x44c
1168
1169#define S_FLSTINITENABLE 1
1170#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE)
1171#define F_FLSTINITENABLE V_FLSTINITENABLE(1U)
1172
1173#define S_TPRESET 0
1174#define V_TPRESET(x) ((x) << S_TPRESET)
1175#define F_TPRESET V_TPRESET(1U)
1176
1177#define A_TP_CMM_MM_RX_FLST_BASE 0x460
1178
1179#define A_TP_CMM_MM_TX_FLST_BASE 0x464
1180
1181#define A_TP_CMM_MM_PS_FLST_BASE 0x468
1182
1183#define A_TP_MIB_INDEX 0x450
1184
1185#define A_TP_MIB_RDATA 0x454
1186
1187#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c
1188
1189#define A_TP_INT_ENABLE 0x470
1190
1191#define A_TP_INT_CAUSE 0x474
1192
1193#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
1194
1195#define A_TP_TX_DROP_CFG_CH0 0x12b
1196
1197#define A_TP_TX_DROP_MODE 0x12f
1198
1199#define A_TP_EGRESS_CONFIG 0x145
1200
1201#define S_REWRITEFORCETOSIZE 0
1202#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
1203#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U)
1204
1205#define A_TP_TX_TRC_KEY0 0x20
1206
1207#define A_TP_RX_TRC_KEY0 0x120
1208
1209#define A_ULPRX_CTL 0x500
1210
1211#define S_ROUND_ROBIN 4
1212#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN)
1213#define F_ROUND_ROBIN V_ROUND_ROBIN(1U)
1214
1215#define A_ULPRX_INT_ENABLE 0x504
1216
1217#define S_PARERR 0
1218#define V_PARERR(x) ((x) << S_PARERR)
1219#define F_PARERR V_PARERR(1U)
1220
1221#define A_ULPRX_INT_CAUSE 0x508
1222
1223#define A_ULPRX_ISCSI_LLIMIT 0x50c
1224
1225#define A_ULPRX_ISCSI_ULIMIT 0x510
1226
1227#define A_ULPRX_ISCSI_TAGMASK 0x514
1228
1229#define A_ULPRX_TDDP_LLIMIT 0x51c
1230
1231#define A_ULPRX_TDDP_ULIMIT 0x520
1232
1233#define A_ULPRX_STAG_LLIMIT 0x52c
1234
1235#define A_ULPRX_STAG_ULIMIT 0x530
1236
1237#define A_ULPRX_RQ_LLIMIT 0x534
1238#define A_ULPRX_RQ_LLIMIT 0x534
1239
1240#define A_ULPRX_RQ_ULIMIT 0x538
1241#define A_ULPRX_RQ_ULIMIT 0x538
1242
1243#define A_ULPRX_PBL_LLIMIT 0x53c
1244
1245#define A_ULPRX_PBL_ULIMIT 0x540
1246#define A_ULPRX_PBL_ULIMIT 0x540
1247
1248#define A_ULPRX_TDDP_TAGMASK 0x524
1249
1250#define A_ULPRX_RQ_LLIMIT 0x534
1251#define A_ULPRX_RQ_LLIMIT 0x534
1252
1253#define A_ULPRX_RQ_ULIMIT 0x538
1254#define A_ULPRX_RQ_ULIMIT 0x538
1255
1256#define A_ULPRX_PBL_ULIMIT 0x540
1257#define A_ULPRX_PBL_ULIMIT 0x540
1258
1259#define A_ULPTX_CONFIG 0x580
1260
1261#define S_CFG_RR_ARB 0
1262#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB)
1263#define F_CFG_RR_ARB V_CFG_RR_ARB(1U)
1264
1265#define A_ULPTX_INT_ENABLE 0x584
1266
1267#define S_PBL_BOUND_ERR_CH1 1
1268#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
1269#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U)
1270
1271#define S_PBL_BOUND_ERR_CH0 0
1272#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0)
1273#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U)
1274
1275#define A_ULPTX_INT_CAUSE 0x588
1276
1277#define A_ULPTX_TPT_LLIMIT 0x58c
1278
1279#define A_ULPTX_TPT_ULIMIT 0x590
1280
1281#define A_ULPTX_PBL_LLIMIT 0x594
1282
1283#define A_ULPTX_PBL_ULIMIT 0x598
1284
1285#define A_ULPTX_DMA_WEIGHT 0x5ac
1286
1287#define S_D1_WEIGHT 16
1288#define M_D1_WEIGHT 0xffff
1289#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT)
1290
1291#define S_D0_WEIGHT 0
1292#define M_D0_WEIGHT 0xffff
1293#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
1294
1295#define A_PM1_RX_CFG 0x5c0
1296
1297#define A_PM1_RX_INT_ENABLE 0x5d8
1298
1299#define S_ZERO_E_CMD_ERROR 18
1300#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR)
1301#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U)
1302
1303#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 17
1304#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR)
1305#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U)
1306
1307#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 16
1308#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR)
1309#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U)
1310
1311#define S_IESPI0_RX_FRAMING_ERROR 15
1312#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR)
1313#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U)
1314
1315#define S_IESPI1_RX_FRAMING_ERROR 14
1316#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR)
1317#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U)
1318
1319#define S_IESPI0_TX_FRAMING_ERROR 13
1320#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR)
1321#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U)
1322
1323#define S_IESPI1_TX_FRAMING_ERROR 12
1324#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR)
1325#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U)
1326
1327#define S_OCSPI0_RX_FRAMING_ERROR 11
1328#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR)
1329#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U)
1330
1331#define S_OCSPI1_RX_FRAMING_ERROR 10
1332#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR)
1333#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U)
1334
1335#define S_OCSPI0_TX_FRAMING_ERROR 9
1336#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR)
1337#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U)
1338
1339#define S_OCSPI1_TX_FRAMING_ERROR 8
1340#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR)
1341#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U)
1342
1343#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 7
1344#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR)
1345#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
1346
1347#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 6
1348#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1349#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
1350
1351#define S_IESPI_PAR_ERROR 3
1352#define M_IESPI_PAR_ERROR 0x7
1353
1354#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
1355
1356#define S_OCSPI_PAR_ERROR 0
1357#define M_OCSPI_PAR_ERROR 0x7
1358
1359#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
1360
1361#define A_PM1_RX_INT_CAUSE 0x5dc
1362
1363#define A_PM1_TX_CFG 0x5e0
1364
1365#define A_PM1_TX_INT_ENABLE 0x5f8
1366
1367#define S_ZERO_C_CMD_ERROR 18
1368#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
1369#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U)
1370
1371#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 17
1372#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
1373#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
1374
1375#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 16
1376#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
1377#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
1378
1379#define S_ICSPI0_RX_FRAMING_ERROR 15
1380#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR)
1381#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U)
1382
1383#define S_ICSPI1_RX_FRAMING_ERROR 14
1384#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR)
1385#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U)
1386
1387#define S_ICSPI0_TX_FRAMING_ERROR 13
1388#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR)
1389#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U)
1390
1391#define S_ICSPI1_TX_FRAMING_ERROR 12
1392#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR)
1393#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U)
1394
1395#define S_OESPI0_RX_FRAMING_ERROR 11
1396#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR)
1397#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U)
1398
1399#define S_OESPI1_RX_FRAMING_ERROR 10
1400#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR)
1401#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U)
1402
1403#define S_OESPI0_TX_FRAMING_ERROR 9
1404#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR)
1405#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U)
1406
1407#define S_OESPI1_TX_FRAMING_ERROR 8
1408#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR)
1409#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U)
1410
1411#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7
1412#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
1413#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
1414
1415#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6
1416#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1417#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
1418
1419#define S_ICSPI_PAR_ERROR 3
1420#define M_ICSPI_PAR_ERROR 0x7
1421
1422#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
1423
1424#define S_OESPI_PAR_ERROR 0
1425#define M_OESPI_PAR_ERROR 0x7
1426
1427#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
1428
1429#define A_PM1_TX_INT_CAUSE 0x5fc
1430
1431#define A_MPS_CFG 0x600
1432
1433#define S_TPRXPORTEN 4
1434#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN)
1435#define F_TPRXPORTEN V_TPRXPORTEN(1U)
1436
1437#define S_TPTXPORT1EN 3
1438#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN)
1439#define F_TPTXPORT1EN V_TPTXPORT1EN(1U)
1440
1441#define S_TPTXPORT0EN 2
1442#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN)
1443#define F_TPTXPORT0EN V_TPTXPORT0EN(1U)
1444
1445#define S_PORT1ACTIVE 1
1446#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE)
1447#define F_PORT1ACTIVE V_PORT1ACTIVE(1U)
1448
1449#define S_PORT0ACTIVE 0
1450#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE)
1451#define F_PORT0ACTIVE V_PORT0ACTIVE(1U)
1452
1453#define S_ENFORCEPKT 11
1454#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
1455#define F_ENFORCEPKT V_ENFORCEPKT(1U)
1456
1457#define A_MPS_INT_ENABLE 0x61c
1458
1459#define S_MCAPARERRENB 6
1460#define M_MCAPARERRENB 0x7
1461
1462#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB)
1463
1464#define S_RXTPPARERRENB 4
1465#define M_RXTPPARERRENB 0x3
1466
1467#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB)
1468
1469#define S_TX1TPPARERRENB 2
1470#define M_TX1TPPARERRENB 0x3
1471
1472#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB)
1473
1474#define S_TX0TPPARERRENB 0
1475#define M_TX0TPPARERRENB 0x3
1476
1477#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB)
1478
1479#define A_MPS_INT_CAUSE 0x620
1480
1481#define S_MCAPARERR 6
1482#define M_MCAPARERR 0x7
1483
1484#define V_MCAPARERR(x) ((x) << S_MCAPARERR)
1485
1486#define S_RXTPPARERR 4
1487#define M_RXTPPARERR 0x3
1488
1489#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR)
1490
1491#define S_TX1TPPARERR 2
1492#define M_TX1TPPARERR 0x3
1493
1494#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR)
1495
1496#define S_TX0TPPARERR 0
1497#define M_TX0TPPARERR 0x3
1498
1499#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR)
1500
1501#define A_CPL_SWITCH_CNTRL 0x640
1502
1503#define A_CPL_INTR_ENABLE 0x650
1504
1505#define S_CIM_OVFL_ERROR 4
1506#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
1507#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U)
1508
1509#define S_TP_FRAMING_ERROR 3
1510#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR)
1511#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U)
1512
1513#define S_SGE_FRAMING_ERROR 2
1514#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR)
1515#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U)
1516
1517#define S_CIM_FRAMING_ERROR 1
1518#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR)
1519#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U)
1520
1521#define S_ZERO_SWITCH_ERROR 0
1522#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
1523#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U)
1524
1525#define A_CPL_INTR_CAUSE 0x654
1526
1527#define A_CPL_MAP_TBL_DATA 0x65c
1528
1529#define A_SMB_GLOBAL_TIME_CFG 0x660
1530
1531#define A_I2C_CFG 0x6a0
1532
1533#define S_I2C_CLKDIV 0
1534#define M_I2C_CLKDIV 0xfff
1535#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
1536
1537#define A_MI1_CFG 0x6b0
1538
1539#define S_CLKDIV 5
1540#define M_CLKDIV 0xff
1541#define V_CLKDIV(x) ((x) << S_CLKDIV)
1542
1543#define S_ST 3
1544
1545#define M_ST 0x3
1546
1547#define V_ST(x) ((x) << S_ST)
1548
1549#define G_ST(x) (((x) >> S_ST) & M_ST)
1550
1551#define S_PREEN 2
1552#define V_PREEN(x) ((x) << S_PREEN)
1553#define F_PREEN V_PREEN(1U)
1554
1555#define S_MDIINV 1
1556#define V_MDIINV(x) ((x) << S_MDIINV)
1557#define F_MDIINV V_MDIINV(1U)
1558
1559#define S_MDIEN 0
1560#define V_MDIEN(x) ((x) << S_MDIEN)
1561#define F_MDIEN V_MDIEN(1U)
1562
1563#define A_MI1_ADDR 0x6b4
1564
1565#define S_PHYADDR 5
1566#define M_PHYADDR 0x1f
1567#define V_PHYADDR(x) ((x) << S_PHYADDR)
1568
1569#define S_REGADDR 0
1570#define M_REGADDR 0x1f
1571#define V_REGADDR(x) ((x) << S_REGADDR)
1572
1573#define A_MI1_DATA 0x6b8
1574
1575#define A_MI1_OP 0x6bc
1576
1577#define S_MDI_OP 0
1578#define M_MDI_OP 0x3
1579#define V_MDI_OP(x) ((x) << S_MDI_OP)
1580
1581#define A_SF_DATA 0x6d8
1582
1583#define A_SF_OP 0x6dc
1584
1585#define S_BYTECNT 1
1586#define M_BYTECNT 0x3
1587#define V_BYTECNT(x) ((x) << S_BYTECNT)
1588
1589#define A_PL_INT_ENABLE0 0x6e0
1590
1591#define S_T3DBG 23
1592#define V_T3DBG(x) ((x) << S_T3DBG)
1593#define F_T3DBG V_T3DBG(1U)
1594
1595#define S_XGMAC0_1 20
1596#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1)
1597#define F_XGMAC0_1 V_XGMAC0_1(1U)
1598
1599#define S_XGMAC0_0 19
1600#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0)
1601#define F_XGMAC0_0 V_XGMAC0_0(1U)
1602
1603#define S_MC5A 18
1604#define V_MC5A(x) ((x) << S_MC5A)
1605#define F_MC5A V_MC5A(1U)
1606
1607#define S_CPL_SWITCH 12
1608#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
1609#define F_CPL_SWITCH V_CPL_SWITCH(1U)
1610
1611#define S_MPS0 11
1612#define V_MPS0(x) ((x) << S_MPS0)
1613#define F_MPS0 V_MPS0(1U)
1614
1615#define S_PM1_TX 10
1616#define V_PM1_TX(x) ((x) << S_PM1_TX)
1617#define F_PM1_TX V_PM1_TX(1U)
1618
1619#define S_PM1_RX 9
1620#define V_PM1_RX(x) ((x) << S_PM1_RX)
1621#define F_PM1_RX V_PM1_RX(1U)
1622
1623#define S_ULP2_TX 8
1624#define V_ULP2_TX(x) ((x) << S_ULP2_TX)
1625#define F_ULP2_TX V_ULP2_TX(1U)
1626
1627#define S_ULP2_RX 7
1628#define V_ULP2_RX(x) ((x) << S_ULP2_RX)
1629#define F_ULP2_RX V_ULP2_RX(1U)
1630
1631#define S_TP1 6
1632#define V_TP1(x) ((x) << S_TP1)
1633#define F_TP1 V_TP1(1U)
1634
1635#define S_CIM 5
1636#define V_CIM(x) ((x) << S_CIM)
1637#define F_CIM V_CIM(1U)
1638
1639#define S_MC7_CM 4
1640#define V_MC7_CM(x) ((x) << S_MC7_CM)
1641#define F_MC7_CM V_MC7_CM(1U)
1642
1643#define S_MC7_PMTX 3
1644#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX)
1645#define F_MC7_PMTX V_MC7_PMTX(1U)
1646
1647#define S_MC7_PMRX 2
1648#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX)
1649#define F_MC7_PMRX V_MC7_PMRX(1U)
1650
1651#define S_PCIM0 1
1652#define V_PCIM0(x) ((x) << S_PCIM0)
1653#define F_PCIM0 V_PCIM0(1U)
1654
1655#define S_SGE3 0
1656#define V_SGE3(x) ((x) << S_SGE3)
1657#define F_SGE3 V_SGE3(1U)
1658
1659#define A_PL_INT_CAUSE0 0x6e4
1660
1661#define A_PL_RST 0x6f0
1662
1663#define S_CRSTWRM 1
1664#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
1665#define F_CRSTWRM V_CRSTWRM(1U)
1666
1667#define A_PL_REV 0x6f4
1668
1669#define A_PL_CLI 0x6f8
1670
1671#define A_MC5_DB_CONFIG 0x704
1672
1673#define S_TMTYPEHI 30
1674#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI)
1675#define F_TMTYPEHI V_TMTYPEHI(1U)
1676
1677#define S_TMPARTSIZE 28
1678#define M_TMPARTSIZE 0x3
1679#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE)
1680#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE)
1681
1682#define S_TMTYPE 26
1683#define M_TMTYPE 0x3
1684#define V_TMTYPE(x) ((x) << S_TMTYPE)
1685#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE)
1686
1687#define S_COMPEN 17
1688#define V_COMPEN(x) ((x) << S_COMPEN)
1689#define F_COMPEN V_COMPEN(1U)
1690
1691#define S_PRTYEN 6
1692#define V_PRTYEN(x) ((x) << S_PRTYEN)
1693#define F_PRTYEN V_PRTYEN(1U)
1694
1695#define S_MBUSEN 5
1696#define V_MBUSEN(x) ((x) << S_MBUSEN)
1697#define F_MBUSEN V_MBUSEN(1U)
1698
1699#define S_DBGIEN 4
1700#define V_DBGIEN(x) ((x) << S_DBGIEN)
1701#define F_DBGIEN V_DBGIEN(1U)
1702
1703#define S_TMRDY 2
1704#define V_TMRDY(x) ((x) << S_TMRDY)
1705#define F_TMRDY V_TMRDY(1U)
1706
1707#define S_TMRST 1
1708#define V_TMRST(x) ((x) << S_TMRST)
1709#define F_TMRST V_TMRST(1U)
1710
1711#define S_TMMODE 0
1712#define V_TMMODE(x) ((x) << S_TMMODE)
1713#define F_TMMODE V_TMMODE(1U)
1714
1715#define F_TMMODE V_TMMODE(1U)
1716
1717#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
1718
1719#define A_MC5_DB_FILTER_TABLE 0x710
1720
1721#define A_MC5_DB_SERVER_INDEX 0x714
1722
1723#define A_MC5_DB_RSP_LATENCY 0x720
1724
1725#define S_RDLAT 16
1726#define M_RDLAT 0x1f
1727#define V_RDLAT(x) ((x) << S_RDLAT)
1728
1729#define S_LRNLAT 8
1730#define M_LRNLAT 0x1f
1731#define V_LRNLAT(x) ((x) << S_LRNLAT)
1732
1733#define S_SRCHLAT 0
1734#define M_SRCHLAT 0x1f
1735#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
1736
1737#define A_MC5_DB_PART_ID_INDEX 0x72c
1738
1739#define A_MC5_DB_INT_ENABLE 0x740
1740
1741#define S_DELACTEMPTY 18
1742#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY)
1743#define F_DELACTEMPTY V_DELACTEMPTY(1U)
1744
1745#define S_DISPQPARERR 17
1746#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR)
1747#define F_DISPQPARERR V_DISPQPARERR(1U)
1748
1749#define S_REQQPARERR 16
1750#define V_REQQPARERR(x) ((x) << S_REQQPARERR)
1751#define F_REQQPARERR V_REQQPARERR(1U)
1752
1753#define S_UNKNOWNCMD 15
1754#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
1755#define F_UNKNOWNCMD V_UNKNOWNCMD(1U)
1756
1757#define S_NFASRCHFAIL 8
1758#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
1759#define F_NFASRCHFAIL V_NFASRCHFAIL(1U)
1760
1761#define S_ACTRGNFULL 7
1762#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL)
1763#define F_ACTRGNFULL V_ACTRGNFULL(1U)
1764
1765#define S_PARITYERR 6
1766#define V_PARITYERR(x) ((x) << S_PARITYERR)
1767#define F_PARITYERR V_PARITYERR(1U)
1768
1769#define A_MC5_DB_INT_CAUSE 0x744
1770
1771#define A_MC5_DB_DBGI_CONFIG 0x774
1772
1773#define A_MC5_DB_DBGI_REQ_CMD 0x778
1774
1775#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
1776
1777#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
1778
1779#define A_MC5_DB_DBGI_REQ_ADDR2 0x784
1780
1781#define A_MC5_DB_DBGI_REQ_DATA0 0x788
1782
1783#define A_MC5_DB_DBGI_REQ_DATA1 0x78c
1784
1785#define A_MC5_DB_DBGI_REQ_DATA2 0x790
1786
1787#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0
1788
1789#define S_DBGIRSPVALID 0
1790#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
1791#define F_DBGIRSPVALID V_DBGIRSPVALID(1U)
1792
1793#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4
1794
1795#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8
1796
1797#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc
1798
1799#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc
1800
1801#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0
1802
1803#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4
1804
1805#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8
1806
1807#define A_MC5_DB_SYN_SRCH_CMD 0x7dc
1808
1809#define A_MC5_DB_SYN_LRN_CMD 0x7e0
1810
1811#define A_MC5_DB_ACK_SRCH_CMD 0x7e4
1812
1813#define A_MC5_DB_ACK_LRN_CMD 0x7e8
1814
1815#define A_MC5_DB_ILOOKUP_CMD 0x7ec
1816
1817#define A_MC5_DB_ELOOKUP_CMD 0x7f0
1818
1819#define A_MC5_DB_DATA_WRITE_CMD 0x7f4
1820
1821#define A_MC5_DB_DATA_READ_CMD 0x7f8
1822
1823#define XGMAC0_0_BASE_ADDR 0x800
1824
1825#define A_XGM_TX_CTRL 0x800
1826
1827#define S_TXEN 0
1828#define V_TXEN(x) ((x) << S_TXEN)
1829#define F_TXEN V_TXEN(1U)
1830
1831#define A_XGM_TX_CFG 0x804
1832
1833#define S_TXPAUSEEN 0
1834#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
1835#define F_TXPAUSEEN V_TXPAUSEEN(1U)
1836
1837#define A_XGM_RX_CTRL 0x80c
1838
1839#define S_RXEN 0
1840#define V_RXEN(x) ((x) << S_RXEN)
1841#define F_RXEN V_RXEN(1U)
1842
1843#define A_XGM_RX_CFG 0x810
1844
1845#define S_DISPAUSEFRAMES 9
1846#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
1847#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U)
1848
1849#define S_EN1536BFRAMES 8
1850#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES)
1851#define F_EN1536BFRAMES V_EN1536BFRAMES(1U)
1852
1853#define S_ENJUMBO 7
1854#define V_ENJUMBO(x) ((x) << S_ENJUMBO)
1855#define F_ENJUMBO V_ENJUMBO(1U)
1856
1857#define S_RMFCS 6
1858#define V_RMFCS(x) ((x) << S_RMFCS)
1859#define F_RMFCS V_RMFCS(1U)
1860
1861#define S_ENHASHMCAST 2
1862#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
1863#define F_ENHASHMCAST V_ENHASHMCAST(1U)
1864
1865#define S_COPYALLFRAMES 0
1866#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
1867#define F_COPYALLFRAMES V_COPYALLFRAMES(1U)
1868
1869#define A_XGM_RX_HASH_LOW 0x814
1870
1871#define A_XGM_RX_HASH_HIGH 0x818
1872
1873#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c
1874
1875#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820
1876
1877#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
1878
1879#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c
1880
1881#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834
1882
1883#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c
1884
1885#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844
1886
1887#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c
1888
1889#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854
1890
1891#define A_XGM_STAT_CTRL 0x880
1892
1893#define S_CLRSTATS 2
1894#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
1895#define F_CLRSTATS V_CLRSTATS(1U)
1896
1897#define A_XGM_RXFIFO_CFG 0x884
1898
1899#define S_RXFIFOPAUSEHWM 17
1900#define M_RXFIFOPAUSEHWM 0xfff
1901
1902#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
1903
1904#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
1905
1906#define S_RXFIFOPAUSELWM 5
1907#define M_RXFIFOPAUSELWM 0xfff
1908
1909#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
1910
1911#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
1912
1913#define S_RXSTRFRWRD 1
1914#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
1915#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
1916
1917#define S_DISERRFRAMES 0
1918#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
1919#define F_DISERRFRAMES V_DISERRFRAMES(1U)
1920
1921#define A_XGM_TXFIFO_CFG 0x888
1922
1923#define S_TXFIFOTHRESH 4
1924#define M_TXFIFOTHRESH 0x1ff
1925
1926#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
1927
1928#define A_XGM_SERDES_CTRL 0x890
1929#define A_XGM_SERDES_CTRL0 0x8e0
1930
1931#define S_SERDESRESET_ 24
1932#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_)
1933#define F_SERDESRESET_ V_SERDESRESET_(1U)
1934
1935#define S_RXENABLE 4
1936#define V_RXENABLE(x) ((x) << S_RXENABLE)
1937#define F_RXENABLE V_RXENABLE(1U)
1938
1939#define S_TXENABLE 3
1940#define V_TXENABLE(x) ((x) << S_TXENABLE)
1941#define F_TXENABLE V_TXENABLE(1U)
1942
1943#define A_XGM_PAUSE_TIMER 0x890
1944
1945#define A_XGM_RGMII_IMP 0x89c
1946
1947#define S_XGM_IMPSETUPDATE 6
1948#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE)
1949#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U)
1950
1951#define S_RGMIIIMPPD 3
1952#define M_RGMIIIMPPD 0x7
1953#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
1954
1955#define S_RGMIIIMPPU 0
1956#define M_RGMIIIMPPU 0x7
1957#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
1958
1959#define S_CALRESET 8
1960#define V_CALRESET(x) ((x) << S_CALRESET)
1961#define F_CALRESET V_CALRESET(1U)
1962
1963#define S_CALUPDATE 7
1964#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
1965#define F_CALUPDATE V_CALUPDATE(1U)
1966
1967#define A_XGM_XAUI_IMP 0x8a0
1968
1969#define S_CALBUSY 31
1970#define V_CALBUSY(x) ((x) << S_CALBUSY)
1971#define F_CALBUSY V_CALBUSY(1U)
1972
1973#define S_XGM_CALFAULT 29
1974#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT)
1975#define F_XGM_CALFAULT V_XGM_CALFAULT(1U)
1976
1977#define S_CALIMP 24
1978#define M_CALIMP 0x1f
1979#define V_CALIMP(x) ((x) << S_CALIMP)
1980#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP)
1981
1982#define S_XAUIIMP 0
1983#define M_XAUIIMP 0x7
1984#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
1985
1986#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
1987#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
1988
1989#define A_XGM_RESET_CTRL 0x8ac
1990
1991#define S_XG2G_RESET_ 3
1992#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
1993#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
1994
1995#define S_RGMII_RESET_ 2
1996#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
1997#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
1998
1999#define S_PCS_RESET_ 1
2000#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
2001#define F_PCS_RESET_ V_PCS_RESET_(1U)
2002
2003#define S_MAC_RESET_ 0
2004#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
2005#define F_MAC_RESET_ V_MAC_RESET_(1U)
2006
2007#define A_XGM_PORT_CFG 0x8b8
2008
2009#define S_CLKDIVRESET_ 3
2010#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_)
2011#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U)
2012
2013#define S_PORTSPEED 1
2014#define M_PORTSPEED 0x3
2015
2016#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
2017
2018#define S_ENRGMII 0
2019#define V_ENRGMII(x) ((x) << S_ENRGMII)
2020#define F_ENRGMII V_ENRGMII(1U)
2021
2022#define A_XGM_INT_ENABLE 0x8d4
2023
2024#define S_TXFIFO_PRTY_ERR 17
2025#define M_TXFIFO_PRTY_ERR 0x7
2026
2027#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
2028
2029#define S_RXFIFO_PRTY_ERR 14
2030#define M_RXFIFO_PRTY_ERR 0x7
2031
2032#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
2033
2034#define S_TXFIFO_UNDERRUN 13
2035#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
2036#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
2037
2038#define S_RXFIFO_OVERFLOW 12
2039#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
2040#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
2041
2042#define S_SERDES_LOS 4
2043#define M_SERDES_LOS 0xf
2044
2045#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
2046
2047#define S_XAUIPCSCTCERR 3
2048#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
2049#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
2050
2051#define S_XAUIPCSALIGNCHANGE 2
2052#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
2053#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
2054
2055#define A_XGM_INT_CAUSE 0x8d8
2056
2057#define A_XGM_XAUI_ACT_CTRL 0x8dc
2058
2059#define S_TXACTENABLE 1
2060#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
2061#define F_TXACTENABLE V_TXACTENABLE(1U)
2062
2063#define A_XGM_SERDES_CTRL0 0x8e0
2064
2065#define S_RESET3 23
2066#define V_RESET3(x) ((x) << S_RESET3)
2067#define F_RESET3 V_RESET3(1U)
2068
2069#define S_RESET2 22
2070#define V_RESET2(x) ((x) << S_RESET2)
2071#define F_RESET2 V_RESET2(1U)
2072
2073#define S_RESET1 21
2074#define V_RESET1(x) ((x) << S_RESET1)
2075#define F_RESET1 V_RESET1(1U)
2076
2077#define S_RESET0 20
2078#define V_RESET0(x) ((x) << S_RESET0)
2079#define F_RESET0 V_RESET0(1U)
2080
2081#define S_PWRDN3 19
2082#define V_PWRDN3(x) ((x) << S_PWRDN3)
2083#define F_PWRDN3 V_PWRDN3(1U)
2084
2085#define S_PWRDN2 18
2086#define V_PWRDN2(x) ((x) << S_PWRDN2)
2087#define F_PWRDN2 V_PWRDN2(1U)
2088
2089#define S_PWRDN1 17
2090#define V_PWRDN1(x) ((x) << S_PWRDN1)
2091#define F_PWRDN1 V_PWRDN1(1U)
2092
2093#define S_PWRDN0 16
2094#define V_PWRDN0(x) ((x) << S_PWRDN0)
2095#define F_PWRDN0 V_PWRDN0(1U)
2096
2097#define S_RESETPLL23 15
2098#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
2099#define F_RESETPLL23 V_RESETPLL23(1U)
2100
2101#define S_RESETPLL01 14
2102#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
2103#define F_RESETPLL01 V_RESETPLL01(1U)
2104
2105#define A_XGM_SERDES_STAT0 0x8f0
2106
2107#define S_LOWSIG0 0
2108#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
2109#define F_LOWSIG0 V_LOWSIG0(1U)
2110
2111#define A_XGM_SERDES_STAT3 0x8fc
2112
2113#define A_XGM_STAT_TX_BYTE_LOW 0x900
2114
2115#define A_XGM_STAT_TX_BYTE_HIGH 0x904
2116
2117#define A_XGM_STAT_TX_FRAME_LOW 0x908
2118
2119#define A_XGM_STAT_TX_FRAME_HIGH 0x90c
2120
2121#define A_XGM_STAT_TX_BCAST 0x910
2122
2123#define A_XGM_STAT_TX_MCAST 0x914
2124
2125#define A_XGM_STAT_TX_PAUSE 0x918
2126
2127#define A_XGM_STAT_TX_64B_FRAMES 0x91c
2128
2129#define A_XGM_STAT_TX_65_127B_FRAMES 0x920
2130
2131#define A_XGM_STAT_TX_128_255B_FRAMES 0x924
2132
2133#define A_XGM_STAT_TX_256_511B_FRAMES 0x928
2134
2135#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c
2136
2137#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930
2138
2139#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934
2140
2141#define A_XGM_STAT_TX_ERR_FRAMES 0x938
2142
2143#define A_XGM_STAT_RX_BYTES_LOW 0x93c
2144
2145#define A_XGM_STAT_RX_BYTES_HIGH 0x940
2146
2147#define A_XGM_STAT_RX_FRAMES_LOW 0x944
2148
2149#define A_XGM_STAT_RX_FRAMES_HIGH 0x948
2150
2151#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
2152
2153#define A_XGM_STAT_RX_MCAST_FRAMES 0x950
2154
2155#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954
2156
2157#define A_XGM_STAT_RX_64B_FRAMES 0x958
2158
2159#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c
2160
2161#define A_XGM_STAT_RX_128_255B_FRAMES 0x960
2162
2163#define A_XGM_STAT_RX_256_511B_FRAMES 0x964
2164
2165#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968
2166
2167#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c
2168
2169#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970
2170
2171#define A_XGM_STAT_RX_SHORT_FRAMES 0x974
2172
2173#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978
2174
2175#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c
2176
2177#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980
2178
2179#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984
2180
2181#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988
2182
2183#define A_XGM_SERDES_STATUS0 0x98c
2184
2185#define A_XGM_SERDES_STATUS1 0x990
2186
2187#define S_CMULOCK 31
2188#define V_CMULOCK(x) ((x) << S_CMULOCK)
2189#define F_CMULOCK V_CMULOCK(1U)
2190
2191#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
2192
2193#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
2194
2195#define XGMAC0_1_BASE_ADDR 0xa00
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
new file mode 100644
index 000000000000..3f2cf8a07c61
--- /dev/null
+++ b/drivers/net/cxgb3/sge.c
@@ -0,0 +1,2681 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
39#include "common.h"
40#include "regs.h"
41#include "sge_defs.h"
42#include "t3_cpl.h"
43#include "firmware_exports.h"
44
45#define USE_GTS 0
46
47#define SGE_RX_SM_BUF_SIZE 1536
48#define SGE_RX_COPY_THRES 256
49
50# define SGE_RX_DROP_THRES 16
51
52/*
53 * Period of the Tx buffer reclaim timer. This timer does not need to run
54 * frequently as Tx buffers are usually reclaimed by new Tx packets.
55 */
56#define TX_RECLAIM_PERIOD (HZ / 4)
57
58/* WR size in bytes */
59#define WR_LEN (WR_FLITS * 8)
60
61/*
62 * Types of Tx queues in each queue set. Order here matters, do not change.
63 */
64enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
65
66/* Values for sge_txq.flags */
67enum {
68 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
69 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
70};
71
72struct tx_desc {
73 u64 flit[TX_DESC_FLITS];
74};
75
76struct rx_desc {
77 __be32 addr_lo;
78 __be32 len_gen;
79 __be32 gen2;
80 __be32 addr_hi;
81};
82
83struct tx_sw_desc { /* SW state per Tx descriptor */
84 struct sk_buff *skb;
85};
86
87struct rx_sw_desc { /* SW state per Rx descriptor */
88 struct sk_buff *skb;
89 DECLARE_PCI_UNMAP_ADDR(dma_addr);
90};
91
92struct rsp_desc { /* response queue descriptor */
93 struct rss_header rss_hdr;
94 __be32 flags;
95 __be32 len_cq;
96 u8 imm_data[47];
97 u8 intr_gen;
98};
99
100struct unmap_info { /* packet unmapping info, overlays skb->cb */
101 int sflit; /* start flit of first SGL entry in Tx descriptor */
102 u16 fragidx; /* first page fragment in current Tx descriptor */
103 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
104 u32 len; /* mapped length of skb main body */
105};
106
107/*
108 * Maps a number of flits to the number of Tx descriptors that can hold them.
109 * The formula is
110 *
111 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
112 *
113 * HW allows up to 4 descriptors to be combined into a WR.
114 */
115static u8 flit_desc_map[] = {
116 0,
117#if SGE_NUM_GENBITS == 1
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
120 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
121 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
122#elif SGE_NUM_GENBITS == 2
123 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
124 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
125 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
126 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
127#else
128# error "SGE_NUM_GENBITS must be 1 or 2"
129#endif
130};
131
132static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
133{
134 return container_of(q, struct sge_qset, fl[qidx]);
135}
136
137static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
138{
139 return container_of(q, struct sge_qset, rspq);
140}
141
142static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
143{
144 return container_of(q, struct sge_qset, txq[qidx]);
145}
146
147/**
148 * refill_rspq - replenish an SGE response queue
149 * @adapter: the adapter
150 * @q: the response queue to replenish
151 * @credits: how many new responses to make available
152 *
153 * Replenishes a response queue by making the supplied number of responses
154 * available to HW.
155 */
156static inline void refill_rspq(struct adapter *adapter,
157 const struct sge_rspq *q, unsigned int credits)
158{
159 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
160 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
161}
162
163/**
164 * need_skb_unmap - does the platform need unmapping of sk_buffs?
165 *
166 * Returns true if the platfrom needs sk_buff unmapping. The compiler
167 * optimizes away unecessary code if this returns true.
168 */
169static inline int need_skb_unmap(void)
170{
171 /*
172 * This structure is used to tell if the platfrom needs buffer
173 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
174 */
175 struct dummy {
176 DECLARE_PCI_UNMAP_ADDR(addr);
177 };
178
179 return sizeof(struct dummy) != 0;
180}
181
182/**
183 * unmap_skb - unmap a packet main body and its page fragments
184 * @skb: the packet
185 * @q: the Tx queue containing Tx descriptors for the packet
186 * @cidx: index of Tx descriptor
187 * @pdev: the PCI device
188 *
189 * Unmap the main body of an sk_buff and its page fragments, if any.
190 * Because of the fairly complicated structure of our SGLs and the desire
191 * to conserve space for metadata, we keep the information necessary to
192 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
193 * in the Tx descriptors (the physical addresses of the various data
194 * buffers). The send functions initialize the state in skb->cb so we
195 * can unmap the buffers held in the first Tx descriptor here, and we
196 * have enough information at this point to update the state for the next
197 * Tx descriptor.
198 */
199static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
200 unsigned int cidx, struct pci_dev *pdev)
201{
202 const struct sg_ent *sgp;
203 struct unmap_info *ui = (struct unmap_info *)skb->cb;
204 int nfrags, frag_idx, curflit, j = ui->addr_idx;
205
206 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
207
208 if (ui->len) {
209 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
210 PCI_DMA_TODEVICE);
211 ui->len = 0; /* so we know for next descriptor for this skb */
212 j = 1;
213 }
214
215 frag_idx = ui->fragidx;
216 curflit = ui->sflit + 1 + j;
217 nfrags = skb_shinfo(skb)->nr_frags;
218
219 while (frag_idx < nfrags && curflit < WR_FLITS) {
220 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
221 skb_shinfo(skb)->frags[frag_idx].size,
222 PCI_DMA_TODEVICE);
223 j ^= 1;
224 if (j == 0) {
225 sgp++;
226 curflit++;
227 }
228 curflit++;
229 frag_idx++;
230 }
231
232 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
233 ui->fragidx = frag_idx;
234 ui->addr_idx = j;
235 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
236 }
237}
238
239/**
240 * free_tx_desc - reclaims Tx descriptors and their buffers
241 * @adapter: the adapter
242 * @q: the Tx queue to reclaim descriptors from
243 * @n: the number of descriptors to reclaim
244 *
245 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
246 * Tx buffers. Called with the Tx queue lock held.
247 */
248static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
249 unsigned int n)
250{
251 struct tx_sw_desc *d;
252 struct pci_dev *pdev = adapter->pdev;
253 unsigned int cidx = q->cidx;
254
255 d = &q->sdesc[cidx];
256 while (n--) {
257 if (d->skb) { /* an SGL is present */
258 if (need_skb_unmap())
259 unmap_skb(d->skb, q, cidx, pdev);
260 if (d->skb->priority == cidx)
261 kfree_skb(d->skb);
262 }
263 ++d;
264 if (++cidx == q->size) {
265 cidx = 0;
266 d = q->sdesc;
267 }
268 }
269 q->cidx = cidx;
270}
271
272/**
273 * reclaim_completed_tx - reclaims completed Tx descriptors
274 * @adapter: the adapter
275 * @q: the Tx queue to reclaim completed descriptors from
276 *
277 * Reclaims Tx descriptors that the SGE has indicated it has processed,
278 * and frees the associated buffers if possible. Called with the Tx
279 * queue's lock held.
280 */
281static inline void reclaim_completed_tx(struct adapter *adapter,
282 struct sge_txq *q)
283{
284 unsigned int reclaim = q->processed - q->cleaned;
285
286 if (reclaim) {
287 free_tx_desc(adapter, q, reclaim);
288 q->cleaned += reclaim;
289 q->in_use -= reclaim;
290 }
291}
292
293/**
294 * should_restart_tx - are there enough resources to restart a Tx queue?
295 * @q: the Tx queue
296 *
297 * Checks if there are enough descriptors to restart a suspended Tx queue.
298 */
299static inline int should_restart_tx(const struct sge_txq *q)
300{
301 unsigned int r = q->processed - q->cleaned;
302
303 return q->in_use - r < (q->size >> 1);
304}
305
306/**
307 * free_rx_bufs - free the Rx buffers on an SGE free list
308 * @pdev: the PCI device associated with the adapter
309 * @rxq: the SGE free list to clean up
310 *
311 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
312 * this queue should be stopped before calling this function.
313 */
314static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
315{
316 unsigned int cidx = q->cidx;
317
318 while (q->credits--) {
319 struct rx_sw_desc *d = &q->sdesc[cidx];
320
321 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
322 q->buf_size, PCI_DMA_FROMDEVICE);
323 kfree_skb(d->skb);
324 d->skb = NULL;
325 if (++cidx == q->size)
326 cidx = 0;
327 }
328}
329
330/**
331 * add_one_rx_buf - add a packet buffer to a free-buffer list
332 * @skb: the buffer to add
333 * @len: the buffer length
334 * @d: the HW Rx descriptor to write
335 * @sd: the SW Rx descriptor to write
336 * @gen: the generation bit value
337 * @pdev: the PCI device associated with the adapter
338 *
339 * Add a buffer of the given length to the supplied HW and SW Rx
340 * descriptors.
341 */
342static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
343 struct rx_desc *d, struct rx_sw_desc *sd,
344 unsigned int gen, struct pci_dev *pdev)
345{
346 dma_addr_t mapping;
347
348 sd->skb = skb;
349 mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
350 pci_unmap_addr_set(sd, dma_addr, mapping);
351
352 d->addr_lo = cpu_to_be32(mapping);
353 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
354 wmb();
355 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
356 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
357}
358
359/**
360 * refill_fl - refill an SGE free-buffer list
361 * @adapter: the adapter
362 * @q: the free-list to refill
363 * @n: the number of new buffers to allocate
364 * @gfp: the gfp flags for allocating new buffers
365 *
366 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
367 * allocated with the supplied gfp flags. The caller must assure that
368 * @n does not exceed the queue's capacity.
369 */
370static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
371{
372 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
373 struct rx_desc *d = &q->desc[q->pidx];
374
375 while (n--) {
376 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
377
378 if (!skb)
379 break;
380
381 add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
382 d++;
383 sd++;
384 if (++q->pidx == q->size) {
385 q->pidx = 0;
386 q->gen ^= 1;
387 sd = q->sdesc;
388 d = q->desc;
389 }
390 q->credits++;
391 }
392
393 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
394}
395
396static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
397{
398 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
399}
400
401/**
402 * recycle_rx_buf - recycle a receive buffer
403 * @adapter: the adapter
404 * @q: the SGE free list
405 * @idx: index of buffer to recycle
406 *
407 * Recycles the specified buffer on the given free list by adding it at
408 * the next available slot on the list.
409 */
410static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
411 unsigned int idx)
412{
413 struct rx_desc *from = &q->desc[idx];
414 struct rx_desc *to = &q->desc[q->pidx];
415
416 q->sdesc[q->pidx] = q->sdesc[idx];
417 to->addr_lo = from->addr_lo; /* already big endian */
418 to->addr_hi = from->addr_hi; /* likewise */
419 wmb();
420 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
421 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
422 q->credits++;
423
424 if (++q->pidx == q->size) {
425 q->pidx = 0;
426 q->gen ^= 1;
427 }
428 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
429}
430
431/**
432 * alloc_ring - allocate resources for an SGE descriptor ring
433 * @pdev: the PCI device
434 * @nelem: the number of descriptors
435 * @elem_size: the size of each descriptor
436 * @sw_size: the size of the SW state associated with each ring element
437 * @phys: the physical address of the allocated ring
438 * @metadata: address of the array holding the SW state for the ring
439 *
440 * Allocates resources for an SGE descriptor ring, such as Tx queues,
441 * free buffer lists, or response queues. Each SGE ring requires
442 * space for its HW descriptors plus, optionally, space for the SW state
443 * associated with each HW entry (the metadata). The function returns
444 * three values: the virtual address for the HW ring (the return value
445 * of the function), the physical address of the HW ring, and the address
446 * of the SW ring.
447 */
448static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
449 size_t sw_size, dma_addr_t *phys, void *metadata)
450{
451 size_t len = nelem * elem_size;
452 void *s = NULL;
453 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
454
455 if (!p)
456 return NULL;
457 if (sw_size) {
458 s = kcalloc(nelem, sw_size, GFP_KERNEL);
459
460 if (!s) {
461 dma_free_coherent(&pdev->dev, len, p, *phys);
462 return NULL;
463 }
464 }
465 if (metadata)
466 *(void **)metadata = s;
467 memset(p, 0, len);
468 return p;
469}
470
471/**
472 * free_qset - free the resources of an SGE queue set
473 * @adapter: the adapter owning the queue set
474 * @q: the queue set
475 *
476 * Release the HW and SW resources associated with an SGE queue set, such
477 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
478 * queue set must be quiesced prior to calling this.
479 */
480void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
481{
482 int i;
483 struct pci_dev *pdev = adapter->pdev;
484
485 if (q->tx_reclaim_timer.function)
486 del_timer_sync(&q->tx_reclaim_timer);
487
488 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
489 if (q->fl[i].desc) {
490 spin_lock(&adapter->sge.reg_lock);
491 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
492 spin_unlock(&adapter->sge.reg_lock);
493 free_rx_bufs(pdev, &q->fl[i]);
494 kfree(q->fl[i].sdesc);
495 dma_free_coherent(&pdev->dev,
496 q->fl[i].size *
497 sizeof(struct rx_desc), q->fl[i].desc,
498 q->fl[i].phys_addr);
499 }
500
501 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
502 if (q->txq[i].desc) {
503 spin_lock(&adapter->sge.reg_lock);
504 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
505 spin_unlock(&adapter->sge.reg_lock);
506 if (q->txq[i].sdesc) {
507 free_tx_desc(adapter, &q->txq[i],
508 q->txq[i].in_use);
509 kfree(q->txq[i].sdesc);
510 }
511 dma_free_coherent(&pdev->dev,
512 q->txq[i].size *
513 sizeof(struct tx_desc),
514 q->txq[i].desc, q->txq[i].phys_addr);
515 __skb_queue_purge(&q->txq[i].sendq);
516 }
517
518 if (q->rspq.desc) {
519 spin_lock(&adapter->sge.reg_lock);
520 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
521 spin_unlock(&adapter->sge.reg_lock);
522 dma_free_coherent(&pdev->dev,
523 q->rspq.size * sizeof(struct rsp_desc),
524 q->rspq.desc, q->rspq.phys_addr);
525 }
526
527 if (q->netdev)
528 q->netdev->atalk_ptr = NULL;
529
530 memset(q, 0, sizeof(*q));
531}
532
533/**
534 * init_qset_cntxt - initialize an SGE queue set context info
535 * @qs: the queue set
536 * @id: the queue set id
537 *
538 * Initializes the TIDs and context ids for the queues of a queue set.
539 */
540static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
541{
542 qs->rspq.cntxt_id = id;
543 qs->fl[0].cntxt_id = 2 * id;
544 qs->fl[1].cntxt_id = 2 * id + 1;
545 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
546 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
547 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
548 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
549 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
550}
551
552/**
553 * sgl_len - calculates the size of an SGL of the given capacity
554 * @n: the number of SGL entries
555 *
556 * Calculates the number of flits needed for a scatter/gather list that
557 * can hold the given number of entries.
558 */
559static inline unsigned int sgl_len(unsigned int n)
560{
561 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
562 return (3 * n) / 2 + (n & 1);
563}
564
565/**
566 * flits_to_desc - returns the num of Tx descriptors for the given flits
567 * @n: the number of flits
568 *
569 * Calculates the number of Tx descriptors needed for the supplied number
570 * of flits.
571 */
572static inline unsigned int flits_to_desc(unsigned int n)
573{
574 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
575 return flit_desc_map[n];
576}
577
578/**
579 * get_packet - return the next ingress packet buffer from a free list
580 * @adap: the adapter that received the packet
581 * @fl: the SGE free list holding the packet
582 * @len: the packet length including any SGE padding
583 * @drop_thres: # of remaining buffers before we start dropping packets
584 *
585 * Get the next packet from a free list and complete setup of the
586 * sk_buff. If the packet is small we make a copy and recycle the
587 * original buffer, otherwise we use the original buffer itself. If a
588 * positive drop threshold is supplied packets are dropped and their
589 * buffers recycled if (a) the number of remaining buffers is under the
590 * threshold and the packet is too big to copy, or (b) the packet should
591 * be copied but there is no memory for the copy.
592 */
593static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
594 unsigned int len, unsigned int drop_thres)
595{
596 struct sk_buff *skb = NULL;
597 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
598
599 prefetch(sd->skb->data);
600
601 if (len <= SGE_RX_COPY_THRES) {
602 skb = alloc_skb(len, GFP_ATOMIC);
603 if (likely(skb != NULL)) {
604 __skb_put(skb, len);
605 pci_dma_sync_single_for_cpu(adap->pdev,
606 pci_unmap_addr(sd,
607 dma_addr),
608 len, PCI_DMA_FROMDEVICE);
609 memcpy(skb->data, sd->skb->data, len);
610 pci_dma_sync_single_for_device(adap->pdev,
611 pci_unmap_addr(sd,
612 dma_addr),
613 len, PCI_DMA_FROMDEVICE);
614 } else if (!drop_thres)
615 goto use_orig_buf;
616 recycle:
617 recycle_rx_buf(adap, fl, fl->cidx);
618 return skb;
619 }
620
621 if (unlikely(fl->credits < drop_thres))
622 goto recycle;
623
624 use_orig_buf:
625 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
626 fl->buf_size, PCI_DMA_FROMDEVICE);
627 skb = sd->skb;
628 skb_put(skb, len);
629 __refill_fl(adap, fl);
630 return skb;
631}
632
633/**
634 * get_imm_packet - return the next ingress packet buffer from a response
635 * @resp: the response descriptor containing the packet data
636 *
637 * Return a packet containing the immediate data of the given response.
638 */
639static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
640{
641 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
642
643 if (skb) {
644 __skb_put(skb, IMMED_PKT_SIZE);
645 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
646 }
647 return skb;
648}
649
650/**
651 * calc_tx_descs - calculate the number of Tx descriptors for a packet
652 * @skb: the packet
653 *
654 * Returns the number of Tx descriptors needed for the given Ethernet
655 * packet. Ethernet packets require addition of WR and CPL headers.
656 */
657static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
658{
659 unsigned int flits;
660
661 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
662 return 1;
663
664 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
665 if (skb_shinfo(skb)->gso_size)
666 flits++;
667 return flits_to_desc(flits);
668}
669
670/**
671 * make_sgl - populate a scatter/gather list for a packet
672 * @skb: the packet
673 * @sgp: the SGL to populate
674 * @start: start address of skb main body data to include in the SGL
675 * @len: length of skb main body data to include in the SGL
676 * @pdev: the PCI device
677 *
678 * Generates a scatter/gather list for the buffers that make up a packet
679 * and returns the SGL size in 8-byte words. The caller must size the SGL
680 * appropriately.
681 */
682static inline unsigned int make_sgl(const struct sk_buff *skb,
683 struct sg_ent *sgp, unsigned char *start,
684 unsigned int len, struct pci_dev *pdev)
685{
686 dma_addr_t mapping;
687 unsigned int i, j = 0, nfrags;
688
689 if (len) {
690 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
691 sgp->len[0] = cpu_to_be32(len);
692 sgp->addr[0] = cpu_to_be64(mapping);
693 j = 1;
694 }
695
696 nfrags = skb_shinfo(skb)->nr_frags;
697 for (i = 0; i < nfrags; i++) {
698 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
699
700 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
701 frag->size, PCI_DMA_TODEVICE);
702 sgp->len[j] = cpu_to_be32(frag->size);
703 sgp->addr[j] = cpu_to_be64(mapping);
704 j ^= 1;
705 if (j == 0)
706 ++sgp;
707 }
708 if (j)
709 sgp->len[j] = 0;
710 return ((nfrags + (len != 0)) * 3) / 2 + j;
711}
712
713/**
714 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
715 * @adap: the adapter
716 * @q: the Tx queue
717 *
718 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
719 * where the HW is going to sleep just after we checked, however,
720 * then the interrupt handler will detect the outstanding TX packet
721 * and ring the doorbell for us.
722 *
723 * When GTS is disabled we unconditionally ring the doorbell.
724 */
725static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
726{
727#if USE_GTS
728 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
729 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
730 set_bit(TXQ_LAST_PKT_DB, &q->flags);
731 t3_write_reg(adap, A_SG_KDOORBELL,
732 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
733 }
734#else
735 wmb(); /* write descriptors before telling HW */
736 t3_write_reg(adap, A_SG_KDOORBELL,
737 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
738#endif
739}
740
741static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
742{
743#if SGE_NUM_GENBITS == 2
744 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
745#endif
746}
747
748/**
749 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
750 * @ndesc: number of Tx descriptors spanned by the SGL
751 * @skb: the packet corresponding to the WR
752 * @d: first Tx descriptor to be written
753 * @pidx: index of above descriptors
754 * @q: the SGE Tx queue
755 * @sgl: the SGL
756 * @flits: number of flits to the start of the SGL in the first descriptor
757 * @sgl_flits: the SGL size in flits
758 * @gen: the Tx descriptor generation
759 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
760 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
761 *
762 * Write a work request header and an associated SGL. If the SGL is
763 * small enough to fit into one Tx descriptor it has already been written
764 * and we just need to write the WR header. Otherwise we distribute the
765 * SGL across the number of descriptors it spans.
766 */
767static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
768 struct tx_desc *d, unsigned int pidx,
769 const struct sge_txq *q,
770 const struct sg_ent *sgl,
771 unsigned int flits, unsigned int sgl_flits,
772 unsigned int gen, unsigned int wr_hi,
773 unsigned int wr_lo)
774{
775 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
776 struct tx_sw_desc *sd = &q->sdesc[pidx];
777
778 sd->skb = skb;
779 if (need_skb_unmap()) {
780 struct unmap_info *ui = (struct unmap_info *)skb->cb;
781
782 ui->fragidx = 0;
783 ui->addr_idx = 0;
784 ui->sflit = flits;
785 }
786
787 if (likely(ndesc == 1)) {
788 skb->priority = pidx;
789 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
790 V_WR_SGLSFLT(flits)) | wr_hi;
791 wmb();
792 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
793 V_WR_GEN(gen)) | wr_lo;
794 wr_gen2(d, gen);
795 } else {
796 unsigned int ogen = gen;
797 const u64 *fp = (const u64 *)sgl;
798 struct work_request_hdr *wp = wrp;
799
800 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
801 V_WR_SGLSFLT(flits)) | wr_hi;
802
803 while (sgl_flits) {
804 unsigned int avail = WR_FLITS - flits;
805
806 if (avail > sgl_flits)
807 avail = sgl_flits;
808 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
809 sgl_flits -= avail;
810 ndesc--;
811 if (!sgl_flits)
812 break;
813
814 fp += avail;
815 d++;
816 sd++;
817 if (++pidx == q->size) {
818 pidx = 0;
819 gen ^= 1;
820 d = q->desc;
821 sd = q->sdesc;
822 }
823
824 sd->skb = skb;
825 wrp = (struct work_request_hdr *)d;
826 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
827 V_WR_SGLSFLT(1)) | wr_hi;
828 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
829 sgl_flits + 1)) |
830 V_WR_GEN(gen)) | wr_lo;
831 wr_gen2(d, gen);
832 flits = 1;
833 }
834 skb->priority = pidx;
835 wrp->wr_hi |= htonl(F_WR_EOP);
836 wmb();
837 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
838 wr_gen2((struct tx_desc *)wp, ogen);
839 WARN_ON(ndesc != 0);
840 }
841}
842
843/**
844 * write_tx_pkt_wr - write a TX_PKT work request
845 * @adap: the adapter
846 * @skb: the packet to send
847 * @pi: the egress interface
848 * @pidx: index of the first Tx descriptor to write
849 * @gen: the generation value to use
850 * @q: the Tx queue
851 * @ndesc: number of descriptors the packet will occupy
852 * @compl: the value of the COMPL bit to use
853 *
854 * Generate a TX_PKT work request to send the supplied packet.
855 */
856static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
857 const struct port_info *pi,
858 unsigned int pidx, unsigned int gen,
859 struct sge_txq *q, unsigned int ndesc,
860 unsigned int compl)
861{
862 unsigned int flits, sgl_flits, cntrl, tso_info;
863 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
864 struct tx_desc *d = &q->desc[pidx];
865 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
866
867 cpl->len = htonl(skb->len | 0x80000000);
868 cntrl = V_TXPKT_INTF(pi->port_id);
869
870 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
871 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
872
873 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
874 if (tso_info) {
875 int eth_type;
876 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
877
878 d->flit[2] = 0;
879 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
880 hdr->cntrl = htonl(cntrl);
881 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
882 CPL_ETH_II : CPL_ETH_II_VLAN;
883 tso_info |= V_LSO_ETH_TYPE(eth_type) |
884 V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
885 V_LSO_TCPHDR_WORDS(skb->h.th->doff);
886 hdr->lso_info = htonl(tso_info);
887 flits = 3;
888 } else {
889 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
890 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
891 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
892 cpl->cntrl = htonl(cntrl);
893
894 if (skb->len <= WR_LEN - sizeof(*cpl)) {
895 q->sdesc[pidx].skb = NULL;
896 if (!skb->data_len)
897 memcpy(&d->flit[2], skb->data, skb->len);
898 else
899 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
900
901 flits = (skb->len + 7) / 8 + 2;
902 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
903 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
904 | F_WR_SOP | F_WR_EOP | compl);
905 wmb();
906 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
907 V_WR_TID(q->token));
908 wr_gen2(d, gen);
909 kfree_skb(skb);
910 return;
911 }
912
913 flits = 2;
914 }
915
916 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
917 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
918 if (need_skb_unmap())
919 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
920
921 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
922 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
923 htonl(V_WR_TID(q->token)));
924}
925
926/**
927 * eth_xmit - add a packet to the Ethernet Tx queue
928 * @skb: the packet
929 * @dev: the egress net device
930 *
931 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
932 */
933int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
934{
935 unsigned int ndesc, pidx, credits, gen, compl;
936 const struct port_info *pi = netdev_priv(dev);
937 struct adapter *adap = dev->priv;
938 struct sge_qset *qs = dev2qset(dev);
939 struct sge_txq *q = &qs->txq[TXQ_ETH];
940
941 /*
942 * The chip min packet length is 9 octets but play safe and reject
943 * anything shorter than an Ethernet header.
944 */
945 if (unlikely(skb->len < ETH_HLEN)) {
946 dev_kfree_skb(skb);
947 return NETDEV_TX_OK;
948 }
949
950 spin_lock(&q->lock);
951 reclaim_completed_tx(adap, q);
952
953 credits = q->size - q->in_use;
954 ndesc = calc_tx_descs(skb);
955
956 if (unlikely(credits < ndesc)) {
957 if (!netif_queue_stopped(dev)) {
958 netif_stop_queue(dev);
959 set_bit(TXQ_ETH, &qs->txq_stopped);
960 q->stops++;
961 dev_err(&adap->pdev->dev,
962 "%s: Tx ring %u full while queue awake!\n",
963 dev->name, q->cntxt_id & 7);
964 }
965 spin_unlock(&q->lock);
966 return NETDEV_TX_BUSY;
967 }
968
969 q->in_use += ndesc;
970 if (unlikely(credits - ndesc < q->stop_thres)) {
971 q->stops++;
972 netif_stop_queue(dev);
973 set_bit(TXQ_ETH, &qs->txq_stopped);
974#if !USE_GTS
975 if (should_restart_tx(q) &&
976 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
977 q->restarts++;
978 netif_wake_queue(dev);
979 }
980#endif
981 }
982
983 gen = q->gen;
984 q->unacked += ndesc;
985 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
986 q->unacked &= 7;
987 pidx = q->pidx;
988 q->pidx += ndesc;
989 if (q->pidx >= q->size) {
990 q->pidx -= q->size;
991 q->gen ^= 1;
992 }
993
994 /* update port statistics */
995 if (skb->ip_summed == CHECKSUM_COMPLETE)
996 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
997 if (skb_shinfo(skb)->gso_size)
998 qs->port_stats[SGE_PSTAT_TSO]++;
999 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1000 qs->port_stats[SGE_PSTAT_VLANINS]++;
1001
1002 dev->trans_start = jiffies;
1003 spin_unlock(&q->lock);
1004
1005 /*
1006 * We do not use Tx completion interrupts to free DMAd Tx packets.
1007 * This is good for performamce but means that we rely on new Tx
1008 * packets arriving to run the destructors of completed packets,
1009 * which open up space in their sockets' send queues. Sometimes
1010 * we do not get such new packets causing Tx to stall. A single
1011 * UDP transmitter is a good example of this situation. We have
1012 * a clean up timer that periodically reclaims completed packets
1013 * but it doesn't run often enough (nor do we want it to) to prevent
1014 * lengthy stalls. A solution to this problem is to run the
1015 * destructor early, after the packet is queued but before it's DMAd.
1016 * A cons is that we lie to socket memory accounting, but the amount
1017 * of extra memory is reasonable (limited by the number of Tx
1018 * descriptors), the packets do actually get freed quickly by new
1019 * packets almost always, and for protocols like TCP that wait for
1020 * acks to really free up the data the extra memory is even less.
1021 * On the positive side we run the destructors on the sending CPU
1022 * rather than on a potentially different completing CPU, usually a
1023 * good thing. We also run them without holding our Tx queue lock,
1024 * unlike what reclaim_completed_tx() would otherwise do.
1025 *
1026 * Run the destructor before telling the DMA engine about the packet
1027 * to make sure it doesn't complete and get freed prematurely.
1028 */
1029 if (likely(!skb_shared(skb)))
1030 skb_orphan(skb);
1031
1032 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1033 check_ring_tx_db(adap, q);
1034 return NETDEV_TX_OK;
1035}
1036
1037/**
1038 * write_imm - write a packet into a Tx descriptor as immediate data
1039 * @d: the Tx descriptor to write
1040 * @skb: the packet
1041 * @len: the length of packet data to write as immediate data
1042 * @gen: the generation bit value to write
1043 *
1044 * Writes a packet as immediate data into a Tx descriptor. The packet
1045 * contains a work request at its beginning. We must write the packet
1046 * carefully so the SGE doesn't read accidentally before it's written in
1047 * its entirety.
1048 */
1049static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1050 unsigned int len, unsigned int gen)
1051{
1052 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1053 struct work_request_hdr *to = (struct work_request_hdr *)d;
1054
1055 memcpy(&to[1], &from[1], len - sizeof(*from));
1056 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1057 V_WR_BCNTLFLT(len & 7));
1058 wmb();
1059 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1060 V_WR_LEN((len + 7) / 8));
1061 wr_gen2(d, gen);
1062 kfree_skb(skb);
1063}
1064
1065/**
1066 * check_desc_avail - check descriptor availability on a send queue
1067 * @adap: the adapter
1068 * @q: the send queue
1069 * @skb: the packet needing the descriptors
1070 * @ndesc: the number of Tx descriptors needed
1071 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1072 *
1073 * Checks if the requested number of Tx descriptors is available on an
1074 * SGE send queue. If the queue is already suspended or not enough
1075 * descriptors are available the packet is queued for later transmission.
1076 * Must be called with the Tx queue locked.
1077 *
1078 * Returns 0 if enough descriptors are available, 1 if there aren't
1079 * enough descriptors and the packet has been queued, and 2 if the caller
1080 * needs to retry because there weren't enough descriptors at the
1081 * beginning of the call but some freed up in the mean time.
1082 */
1083static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1084 struct sk_buff *skb, unsigned int ndesc,
1085 unsigned int qid)
1086{
1087 if (unlikely(!skb_queue_empty(&q->sendq))) {
1088 addq_exit:__skb_queue_tail(&q->sendq, skb);
1089 return 1;
1090 }
1091 if (unlikely(q->size - q->in_use < ndesc)) {
1092 struct sge_qset *qs = txq_to_qset(q, qid);
1093
1094 set_bit(qid, &qs->txq_stopped);
1095 smp_mb__after_clear_bit();
1096
1097 if (should_restart_tx(q) &&
1098 test_and_clear_bit(qid, &qs->txq_stopped))
1099 return 2;
1100
1101 q->stops++;
1102 goto addq_exit;
1103 }
1104 return 0;
1105}
1106
1107/**
1108 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1109 * @q: the SGE control Tx queue
1110 *
1111 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1112 * that send only immediate data (presently just the control queues) and
1113 * thus do not have any sk_buffs to release.
1114 */
1115static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1116{
1117 unsigned int reclaim = q->processed - q->cleaned;
1118
1119 q->in_use -= reclaim;
1120 q->cleaned += reclaim;
1121}
1122
1123static inline int immediate(const struct sk_buff *skb)
1124{
1125 return skb->len <= WR_LEN && !skb->data_len;
1126}
1127
1128/**
1129 * ctrl_xmit - send a packet through an SGE control Tx queue
1130 * @adap: the adapter
1131 * @q: the control queue
1132 * @skb: the packet
1133 *
1134 * Send a packet through an SGE control Tx queue. Packets sent through
1135 * a control queue must fit entirely as immediate data in a single Tx
1136 * descriptor and have no page fragments.
1137 */
1138static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1139 struct sk_buff *skb)
1140{
1141 int ret;
1142 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1143
1144 if (unlikely(!immediate(skb))) {
1145 WARN_ON(1);
1146 dev_kfree_skb(skb);
1147 return NET_XMIT_SUCCESS;
1148 }
1149
1150 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1151 wrp->wr_lo = htonl(V_WR_TID(q->token));
1152
1153 spin_lock(&q->lock);
1154 again:reclaim_completed_tx_imm(q);
1155
1156 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1157 if (unlikely(ret)) {
1158 if (ret == 1) {
1159 spin_unlock(&q->lock);
1160 return NET_XMIT_CN;
1161 }
1162 goto again;
1163 }
1164
1165 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1166
1167 q->in_use++;
1168 if (++q->pidx >= q->size) {
1169 q->pidx = 0;
1170 q->gen ^= 1;
1171 }
1172 spin_unlock(&q->lock);
1173 wmb();
1174 t3_write_reg(adap, A_SG_KDOORBELL,
1175 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1176 return NET_XMIT_SUCCESS;
1177}
1178
1179/**
1180 * restart_ctrlq - restart a suspended control queue
1181 * @qs: the queue set cotaining the control queue
1182 *
1183 * Resumes transmission on a suspended Tx control queue.
1184 */
1185static void restart_ctrlq(unsigned long data)
1186{
1187 struct sk_buff *skb;
1188 struct sge_qset *qs = (struct sge_qset *)data;
1189 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1190 struct adapter *adap = qs->netdev->priv;
1191
1192 spin_lock(&q->lock);
1193 again:reclaim_completed_tx_imm(q);
1194
1195 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1196
1197 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1198
1199 if (++q->pidx >= q->size) {
1200 q->pidx = 0;
1201 q->gen ^= 1;
1202 }
1203 q->in_use++;
1204 }
1205
1206 if (!skb_queue_empty(&q->sendq)) {
1207 set_bit(TXQ_CTRL, &qs->txq_stopped);
1208 smp_mb__after_clear_bit();
1209
1210 if (should_restart_tx(q) &&
1211 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1212 goto again;
1213 q->stops++;
1214 }
1215
1216 spin_unlock(&q->lock);
1217 t3_write_reg(adap, A_SG_KDOORBELL,
1218 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1219}
1220
1221/*
1222 * Send a management message through control queue 0
1223 */
1224int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1225{
1226 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1227}
1228
1229/**
1230 * write_ofld_wr - write an offload work request
1231 * @adap: the adapter
1232 * @skb: the packet to send
1233 * @q: the Tx queue
1234 * @pidx: index of the first Tx descriptor to write
1235 * @gen: the generation value to use
1236 * @ndesc: number of descriptors the packet will occupy
1237 *
1238 * Write an offload work request to send the supplied packet. The packet
1239 * data already carry the work request with most fields populated.
1240 */
1241static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1242 struct sge_txq *q, unsigned int pidx,
1243 unsigned int gen, unsigned int ndesc)
1244{
1245 unsigned int sgl_flits, flits;
1246 struct work_request_hdr *from;
1247 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1248 struct tx_desc *d = &q->desc[pidx];
1249
1250 if (immediate(skb)) {
1251 q->sdesc[pidx].skb = NULL;
1252 write_imm(d, skb, skb->len, gen);
1253 return;
1254 }
1255
1256 /* Only TX_DATA builds SGLs */
1257
1258 from = (struct work_request_hdr *)skb->data;
1259 memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
1260
1261 flits = (skb->h.raw - skb->data) / 8;
1262 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1263 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1264 adap->pdev);
1265 if (need_skb_unmap())
1266 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1267
1268 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1269 gen, from->wr_hi, from->wr_lo);
1270}
1271
1272/**
1273 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1274 * @skb: the packet
1275 *
1276 * Returns the number of Tx descriptors needed for the given offload
1277 * packet. These packets are already fully constructed.
1278 */
1279static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1280{
1281 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1282
1283 if (skb->len <= WR_LEN && cnt == 0)
1284 return 1; /* packet fits as immediate data */
1285
1286 flits = (skb->h.raw - skb->data) / 8; /* headers */
1287 if (skb->tail != skb->h.raw)
1288 cnt++;
1289 return flits_to_desc(flits + sgl_len(cnt));
1290}
1291
1292/**
1293 * ofld_xmit - send a packet through an offload queue
1294 * @adap: the adapter
1295 * @q: the Tx offload queue
1296 * @skb: the packet
1297 *
1298 * Send an offload packet through an SGE offload queue.
1299 */
1300static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1301 struct sk_buff *skb)
1302{
1303 int ret;
1304 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1305
1306 spin_lock(&q->lock);
1307 again:reclaim_completed_tx(adap, q);
1308
1309 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1310 if (unlikely(ret)) {
1311 if (ret == 1) {
1312 skb->priority = ndesc; /* save for restart */
1313 spin_unlock(&q->lock);
1314 return NET_XMIT_CN;
1315 }
1316 goto again;
1317 }
1318
1319 gen = q->gen;
1320 q->in_use += ndesc;
1321 pidx = q->pidx;
1322 q->pidx += ndesc;
1323 if (q->pidx >= q->size) {
1324 q->pidx -= q->size;
1325 q->gen ^= 1;
1326 }
1327 spin_unlock(&q->lock);
1328
1329 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1330 check_ring_tx_db(adap, q);
1331 return NET_XMIT_SUCCESS;
1332}
1333
1334/**
1335 * restart_offloadq - restart a suspended offload queue
1336 * @qs: the queue set cotaining the offload queue
1337 *
1338 * Resumes transmission on a suspended Tx offload queue.
1339 */
1340static void restart_offloadq(unsigned long data)
1341{
1342 struct sk_buff *skb;
1343 struct sge_qset *qs = (struct sge_qset *)data;
1344 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1345 struct adapter *adap = qs->netdev->priv;
1346
1347 spin_lock(&q->lock);
1348 again:reclaim_completed_tx(adap, q);
1349
1350 while ((skb = skb_peek(&q->sendq)) != NULL) {
1351 unsigned int gen, pidx;
1352 unsigned int ndesc = skb->priority;
1353
1354 if (unlikely(q->size - q->in_use < ndesc)) {
1355 set_bit(TXQ_OFLD, &qs->txq_stopped);
1356 smp_mb__after_clear_bit();
1357
1358 if (should_restart_tx(q) &&
1359 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1360 goto again;
1361 q->stops++;
1362 break;
1363 }
1364
1365 gen = q->gen;
1366 q->in_use += ndesc;
1367 pidx = q->pidx;
1368 q->pidx += ndesc;
1369 if (q->pidx >= q->size) {
1370 q->pidx -= q->size;
1371 q->gen ^= 1;
1372 }
1373 __skb_unlink(skb, &q->sendq);
1374 spin_unlock(&q->lock);
1375
1376 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1377 spin_lock(&q->lock);
1378 }
1379 spin_unlock(&q->lock);
1380
1381#if USE_GTS
1382 set_bit(TXQ_RUNNING, &q->flags);
1383 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1384#endif
1385 t3_write_reg(adap, A_SG_KDOORBELL,
1386 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1387}
1388
1389/**
1390 * queue_set - return the queue set a packet should use
1391 * @skb: the packet
1392 *
1393 * Maps a packet to the SGE queue set it should use. The desired queue
1394 * set is carried in bits 1-3 in the packet's priority.
1395 */
1396static inline int queue_set(const struct sk_buff *skb)
1397{
1398 return skb->priority >> 1;
1399}
1400
1401/**
1402 * is_ctrl_pkt - return whether an offload packet is a control packet
1403 * @skb: the packet
1404 *
1405 * Determines whether an offload packet should use an OFLD or a CTRL
1406 * Tx queue. This is indicated by bit 0 in the packet's priority.
1407 */
1408static inline int is_ctrl_pkt(const struct sk_buff *skb)
1409{
1410 return skb->priority & 1;
1411}
1412
1413/**
1414 * t3_offload_tx - send an offload packet
1415 * @tdev: the offload device to send to
1416 * @skb: the packet
1417 *
1418 * Sends an offload packet. We use the packet priority to select the
1419 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1420 * should be sent as regular or control, bits 1-3 select the queue set.
1421 */
1422int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1423{
1424 struct adapter *adap = tdev2adap(tdev);
1425 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1426
1427 if (unlikely(is_ctrl_pkt(skb)))
1428 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1429
1430 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1431}
1432
1433/**
1434 * offload_enqueue - add an offload packet to an SGE offload receive queue
1435 * @q: the SGE response queue
1436 * @skb: the packet
1437 *
1438 * Add a new offload packet to an SGE response queue's offload packet
1439 * queue. If the packet is the first on the queue it schedules the RX
1440 * softirq to process the queue.
1441 */
1442static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1443{
1444 skb->next = skb->prev = NULL;
1445 if (q->rx_tail)
1446 q->rx_tail->next = skb;
1447 else {
1448 struct sge_qset *qs = rspq_to_qset(q);
1449
1450 if (__netif_rx_schedule_prep(qs->netdev))
1451 __netif_rx_schedule(qs->netdev);
1452 q->rx_head = skb;
1453 }
1454 q->rx_tail = skb;
1455}
1456
1457/**
1458 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1459 * @tdev: the offload device that will be receiving the packets
1460 * @q: the SGE response queue that assembled the bundle
1461 * @skbs: the partial bundle
1462 * @n: the number of packets in the bundle
1463 *
1464 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1465 */
1466static inline void deliver_partial_bundle(struct t3cdev *tdev,
1467 struct sge_rspq *q,
1468 struct sk_buff *skbs[], int n)
1469{
1470 if (n) {
1471 q->offload_bundles++;
1472 tdev->recv(tdev, skbs, n);
1473 }
1474}
1475
1476/**
1477 * ofld_poll - NAPI handler for offload packets in interrupt mode
1478 * @dev: the network device doing the polling
1479 * @budget: polling budget
1480 *
1481 * The NAPI handler for offload packets when a response queue is serviced
1482 * by the hard interrupt handler, i.e., when it's operating in non-polling
1483 * mode. Creates small packet batches and sends them through the offload
1484 * receive handler. Batches need to be of modest size as we do prefetches
1485 * on the packets in each.
1486 */
1487static int ofld_poll(struct net_device *dev, int *budget)
1488{
1489 struct adapter *adapter = dev->priv;
1490 struct sge_qset *qs = dev2qset(dev);
1491 struct sge_rspq *q = &qs->rspq;
1492 int work_done, limit = min(*budget, dev->quota), avail = limit;
1493
1494 while (avail) {
1495 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1496 int ngathered;
1497
1498 spin_lock_irq(&q->lock);
1499 head = q->rx_head;
1500 if (!head) {
1501 work_done = limit - avail;
1502 *budget -= work_done;
1503 dev->quota -= work_done;
1504 __netif_rx_complete(dev);
1505 spin_unlock_irq(&q->lock);
1506 return 0;
1507 }
1508
1509 tail = q->rx_tail;
1510 q->rx_head = q->rx_tail = NULL;
1511 spin_unlock_irq(&q->lock);
1512
1513 for (ngathered = 0; avail && head; avail--) {
1514 prefetch(head->data);
1515 skbs[ngathered] = head;
1516 head = head->next;
1517 skbs[ngathered]->next = NULL;
1518 if (++ngathered == RX_BUNDLE_SIZE) {
1519 q->offload_bundles++;
1520 adapter->tdev.recv(&adapter->tdev, skbs,
1521 ngathered);
1522 ngathered = 0;
1523 }
1524 }
1525 if (head) { /* splice remaining packets back onto Rx queue */
1526 spin_lock_irq(&q->lock);
1527 tail->next = q->rx_head;
1528 if (!q->rx_head)
1529 q->rx_tail = tail;
1530 q->rx_head = head;
1531 spin_unlock_irq(&q->lock);
1532 }
1533 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1534 }
1535 work_done = limit - avail;
1536 *budget -= work_done;
1537 dev->quota -= work_done;
1538 return 1;
1539}
1540
1541/**
1542 * rx_offload - process a received offload packet
1543 * @tdev: the offload device receiving the packet
1544 * @rq: the response queue that received the packet
1545 * @skb: the packet
1546 * @rx_gather: a gather list of packets if we are building a bundle
1547 * @gather_idx: index of the next available slot in the bundle
1548 *
1549 * Process an ingress offload pakcet and add it to the offload ingress
1550 * queue. Returns the index of the next available slot in the bundle.
1551 */
1552static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1553 struct sk_buff *skb, struct sk_buff *rx_gather[],
1554 unsigned int gather_idx)
1555{
1556 rq->offload_pkts++;
1557 skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
1558
1559 if (rq->polling) {
1560 rx_gather[gather_idx++] = skb;
1561 if (gather_idx == RX_BUNDLE_SIZE) {
1562 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1563 gather_idx = 0;
1564 rq->offload_bundles++;
1565 }
1566 } else
1567 offload_enqueue(rq, skb);
1568
1569 return gather_idx;
1570}
1571
1572/**
1573 * restart_tx - check whether to restart suspended Tx queues
1574 * @qs: the queue set to resume
1575 *
1576 * Restarts suspended Tx queues of an SGE queue set if they have enough
1577 * free resources to resume operation.
1578 */
1579static void restart_tx(struct sge_qset *qs)
1580{
1581 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1582 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1583 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1584 qs->txq[TXQ_ETH].restarts++;
1585 if (netif_running(qs->netdev))
1586 netif_wake_queue(qs->netdev);
1587 }
1588
1589 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1590 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1591 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1592 qs->txq[TXQ_OFLD].restarts++;
1593 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1594 }
1595 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1596 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1597 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1598 qs->txq[TXQ_CTRL].restarts++;
1599 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1600 }
1601}
1602
1603/**
1604 * rx_eth - process an ingress ethernet packet
1605 * @adap: the adapter
1606 * @rq: the response queue that received the packet
1607 * @skb: the packet
1608 * @pad: amount of padding at the start of the buffer
1609 *
1610 * Process an ingress ethernet pakcet and deliver it to the stack.
1611 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1612 * if it was immediate data in a response.
1613 */
1614static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1615 struct sk_buff *skb, int pad)
1616{
1617 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1618 struct port_info *pi;
1619
1620 rq->eth_pkts++;
1621 skb_pull(skb, sizeof(*p) + pad);
1622 skb->dev = adap->port[p->iff];
1623 skb->dev->last_rx = jiffies;
1624 skb->protocol = eth_type_trans(skb, skb->dev);
1625 pi = netdev_priv(skb->dev);
1626 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1627 !p->fragment) {
1628 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1629 skb->ip_summed = CHECKSUM_UNNECESSARY;
1630 } else
1631 skb->ip_summed = CHECKSUM_NONE;
1632
1633 if (unlikely(p->vlan_valid)) {
1634 struct vlan_group *grp = pi->vlan_grp;
1635
1636 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1637 if (likely(grp))
1638 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1639 rq->polling);
1640 else
1641 dev_kfree_skb_any(skb);
1642 } else if (rq->polling)
1643 netif_receive_skb(skb);
1644 else
1645 netif_rx(skb);
1646}
1647
1648/**
1649 * handle_rsp_cntrl_info - handles control information in a response
1650 * @qs: the queue set corresponding to the response
1651 * @flags: the response control flags
1652 *
1653 * Handles the control information of an SGE response, such as GTS
1654 * indications and completion credits for the queue set's Tx queues.
1655 * HW coalesces credits, we don't do any extra SW coalescing.
1656 */
1657static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
1658{
1659 unsigned int credits;
1660
1661#if USE_GTS
1662 if (flags & F_RSPD_TXQ0_GTS)
1663 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1664#endif
1665
1666 credits = G_RSPD_TXQ0_CR(flags);
1667 if (credits)
1668 qs->txq[TXQ_ETH].processed += credits;
1669
1670 credits = G_RSPD_TXQ2_CR(flags);
1671 if (credits)
1672 qs->txq[TXQ_CTRL].processed += credits;
1673
1674# if USE_GTS
1675 if (flags & F_RSPD_TXQ1_GTS)
1676 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1677# endif
1678 credits = G_RSPD_TXQ1_CR(flags);
1679 if (credits)
1680 qs->txq[TXQ_OFLD].processed += credits;
1681}
1682
1683/**
1684 * check_ring_db - check if we need to ring any doorbells
1685 * @adapter: the adapter
1686 * @qs: the queue set whose Tx queues are to be examined
1687 * @sleeping: indicates which Tx queue sent GTS
1688 *
1689 * Checks if some of a queue set's Tx queues need to ring their doorbells
1690 * to resume transmission after idling while they still have unprocessed
1691 * descriptors.
1692 */
1693static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1694 unsigned int sleeping)
1695{
1696 if (sleeping & F_RSPD_TXQ0_GTS) {
1697 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1698
1699 if (txq->cleaned + txq->in_use != txq->processed &&
1700 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1701 set_bit(TXQ_RUNNING, &txq->flags);
1702 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1703 V_EGRCNTX(txq->cntxt_id));
1704 }
1705 }
1706
1707 if (sleeping & F_RSPD_TXQ1_GTS) {
1708 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1709
1710 if (txq->cleaned + txq->in_use != txq->processed &&
1711 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1712 set_bit(TXQ_RUNNING, &txq->flags);
1713 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1714 V_EGRCNTX(txq->cntxt_id));
1715 }
1716 }
1717}
1718
1719/**
1720 * is_new_response - check if a response is newly written
1721 * @r: the response descriptor
1722 * @q: the response queue
1723 *
1724 * Returns true if a response descriptor contains a yet unprocessed
1725 * response.
1726 */
1727static inline int is_new_response(const struct rsp_desc *r,
1728 const struct sge_rspq *q)
1729{
1730 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1731}
1732
1733#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1734#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1735 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1736 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1737 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1738
1739/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1740#define NOMEM_INTR_DELAY 2500
1741
1742/**
1743 * process_responses - process responses from an SGE response queue
1744 * @adap: the adapter
1745 * @qs: the queue set to which the response queue belongs
1746 * @budget: how many responses can be processed in this round
1747 *
1748 * Process responses from an SGE response queue up to the supplied budget.
1749 * Responses include received packets as well as credits and other events
1750 * for the queues that belong to the response queue's queue set.
1751 * A negative budget is effectively unlimited.
1752 *
1753 * Additionally choose the interrupt holdoff time for the next interrupt
1754 * on this queue. If the system is under memory shortage use a fairly
1755 * long delay to help recovery.
1756 */
1757static int process_responses(struct adapter *adap, struct sge_qset *qs,
1758 int budget)
1759{
1760 struct sge_rspq *q = &qs->rspq;
1761 struct rsp_desc *r = &q->desc[q->cidx];
1762 int budget_left = budget;
1763 unsigned int sleeping = 0;
1764 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1765 int ngathered = 0;
1766
1767 q->next_holdoff = q->holdoff_tmr;
1768
1769 while (likely(budget_left && is_new_response(r, q))) {
1770 int eth, ethpad = 0;
1771 struct sk_buff *skb = NULL;
1772 u32 len, flags = ntohl(r->flags);
1773 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1774
1775 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1776
1777 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1778 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1779 if (!skb)
1780 goto no_mem;
1781
1782 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1783 skb->data[0] = CPL_ASYNC_NOTIF;
1784 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1785 q->async_notif++;
1786 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1787 skb = get_imm_packet(r);
1788 if (unlikely(!skb)) {
1789 no_mem:
1790 q->next_holdoff = NOMEM_INTR_DELAY;
1791 q->nomem++;
1792 /* consume one credit since we tried */
1793 budget_left--;
1794 break;
1795 }
1796 q->imm_data++;
1797 } else if ((len = ntohl(r->len_cq)) != 0) {
1798 struct sge_fl *fl;
1799
1800 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1801 fl->credits--;
1802 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1803 eth ? SGE_RX_DROP_THRES : 0);
1804 if (!skb)
1805 q->rx_drops++;
1806 else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
1807 __skb_pull(skb, 2);
1808 ethpad = 2;
1809 if (++fl->cidx == fl->size)
1810 fl->cidx = 0;
1811 } else
1812 q->pure_rsps++;
1813
1814 if (flags & RSPD_CTRL_MASK) {
1815 sleeping |= flags & RSPD_GTS_MASK;
1816 handle_rsp_cntrl_info(qs, flags);
1817 }
1818
1819 r++;
1820 if (unlikely(++q->cidx == q->size)) {
1821 q->cidx = 0;
1822 q->gen ^= 1;
1823 r = q->desc;
1824 }
1825 prefetch(r);
1826
1827 if (++q->credits >= (q->size / 4)) {
1828 refill_rspq(adap, q, q->credits);
1829 q->credits = 0;
1830 }
1831
1832 if (likely(skb != NULL)) {
1833 if (eth)
1834 rx_eth(adap, q, skb, ethpad);
1835 else {
1836 /* Preserve the RSS info in csum & priority */
1837 skb->csum = rss_hi;
1838 skb->priority = rss_lo;
1839 ngathered = rx_offload(&adap->tdev, q, skb,
1840 offload_skbs, ngathered);
1841 }
1842 }
1843
1844 --budget_left;
1845 }
1846
1847 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
1848 if (sleeping)
1849 check_ring_db(adap, qs, sleeping);
1850
1851 smp_mb(); /* commit Tx queue .processed updates */
1852 if (unlikely(qs->txq_stopped != 0))
1853 restart_tx(qs);
1854
1855 budget -= budget_left;
1856 return budget;
1857}
1858
1859static inline int is_pure_response(const struct rsp_desc *r)
1860{
1861 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
1862
1863 return (n | r->len_cq) == 0;
1864}
1865
1866/**
1867 * napi_rx_handler - the NAPI handler for Rx processing
1868 * @dev: the net device
1869 * @budget: how many packets we can process in this round
1870 *
1871 * Handler for new data events when using NAPI.
1872 */
1873static int napi_rx_handler(struct net_device *dev, int *budget)
1874{
1875 struct adapter *adap = dev->priv;
1876 struct sge_qset *qs = dev2qset(dev);
1877 int effective_budget = min(*budget, dev->quota);
1878
1879 int work_done = process_responses(adap, qs, effective_budget);
1880 *budget -= work_done;
1881 dev->quota -= work_done;
1882
1883 if (work_done >= effective_budget)
1884 return 1;
1885
1886 netif_rx_complete(dev);
1887
1888 /*
1889 * Because we don't atomically flush the following write it is
1890 * possible that in very rare cases it can reach the device in a way
1891 * that races with a new response being written plus an error interrupt
1892 * causing the NAPI interrupt handler below to return unhandled status
1893 * to the OS. To protect against this would require flushing the write
1894 * and doing both the write and the flush with interrupts off. Way too
1895 * expensive and unjustifiable given the rarity of the race.
1896 *
1897 * The race cannot happen at all with MSI-X.
1898 */
1899 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
1900 V_NEWTIMER(qs->rspq.next_holdoff) |
1901 V_NEWINDEX(qs->rspq.cidx));
1902 return 0;
1903}
1904
1905/*
1906 * Returns true if the device is already scheduled for polling.
1907 */
1908static inline int napi_is_scheduled(struct net_device *dev)
1909{
1910 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1911}
1912
1913/**
1914 * process_pure_responses - process pure responses from a response queue
1915 * @adap: the adapter
1916 * @qs: the queue set owning the response queue
1917 * @r: the first pure response to process
1918 *
1919 * A simpler version of process_responses() that handles only pure (i.e.,
1920 * non data-carrying) responses. Such respones are too light-weight to
1921 * justify calling a softirq under NAPI, so we handle them specially in
1922 * the interrupt handler. The function is called with a pointer to a
1923 * response, which the caller must ensure is a valid pure response.
1924 *
1925 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
1926 */
1927static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
1928 struct rsp_desc *r)
1929{
1930 struct sge_rspq *q = &qs->rspq;
1931 unsigned int sleeping = 0;
1932
1933 do {
1934 u32 flags = ntohl(r->flags);
1935
1936 r++;
1937 if (unlikely(++q->cidx == q->size)) {
1938 q->cidx = 0;
1939 q->gen ^= 1;
1940 r = q->desc;
1941 }
1942 prefetch(r);
1943
1944 if (flags & RSPD_CTRL_MASK) {
1945 sleeping |= flags & RSPD_GTS_MASK;
1946 handle_rsp_cntrl_info(qs, flags);
1947 }
1948
1949 q->pure_rsps++;
1950 if (++q->credits >= (q->size / 4)) {
1951 refill_rspq(adap, q, q->credits);
1952 q->credits = 0;
1953 }
1954 } while (is_new_response(r, q) && is_pure_response(r));
1955
1956 if (sleeping)
1957 check_ring_db(adap, qs, sleeping);
1958
1959 smp_mb(); /* commit Tx queue .processed updates */
1960 if (unlikely(qs->txq_stopped != 0))
1961 restart_tx(qs);
1962
1963 return is_new_response(r, q);
1964}
1965
1966/**
1967 * handle_responses - decide what to do with new responses in NAPI mode
1968 * @adap: the adapter
1969 * @q: the response queue
1970 *
1971 * This is used by the NAPI interrupt handlers to decide what to do with
1972 * new SGE responses. If there are no new responses it returns -1. If
1973 * there are new responses and they are pure (i.e., non-data carrying)
1974 * it handles them straight in hard interrupt context as they are very
1975 * cheap and don't deliver any packets. Finally, if there are any data
1976 * signaling responses it schedules the NAPI handler. Returns 1 if it
1977 * schedules NAPI, 0 if all new responses were pure.
1978 *
1979 * The caller must ascertain NAPI is not already running.
1980 */
1981static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
1982{
1983 struct sge_qset *qs = rspq_to_qset(q);
1984 struct rsp_desc *r = &q->desc[q->cidx];
1985
1986 if (!is_new_response(r, q))
1987 return -1;
1988 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
1989 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
1990 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
1991 return 0;
1992 }
1993 if (likely(__netif_rx_schedule_prep(qs->netdev)))
1994 __netif_rx_schedule(qs->netdev);
1995 return 1;
1996}
1997
1998/*
1999 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2000 * (i.e., response queue serviced in hard interrupt).
2001 */
2002irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2003{
2004 struct sge_qset *qs = cookie;
2005 struct adapter *adap = qs->netdev->priv;
2006 struct sge_rspq *q = &qs->rspq;
2007
2008 spin_lock(&q->lock);
2009 if (process_responses(adap, qs, -1) == 0)
2010 q->unhandled_irqs++;
2011 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2012 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2013 spin_unlock(&q->lock);
2014 return IRQ_HANDLED;
2015}
2016
2017/*
2018 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2019 * (i.e., response queue serviced by NAPI polling).
2020 */
2021irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2022{
2023 struct sge_qset *qs = cookie;
2024 struct adapter *adap = qs->netdev->priv;
2025 struct sge_rspq *q = &qs->rspq;
2026
2027 spin_lock(&q->lock);
2028 BUG_ON(napi_is_scheduled(qs->netdev));
2029
2030 if (handle_responses(adap, q) < 0)
2031 q->unhandled_irqs++;
2032 spin_unlock(&q->lock);
2033 return IRQ_HANDLED;
2034}
2035
2036/*
2037 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2038 * SGE response queues as well as error and other async events as they all use
2039 * the same MSI vector. We use one SGE response queue per port in this mode
2040 * and protect all response queues with queue 0's lock.
2041 */
2042static irqreturn_t t3_intr_msi(int irq, void *cookie)
2043{
2044 int new_packets = 0;
2045 struct adapter *adap = cookie;
2046 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2047
2048 spin_lock(&q->lock);
2049
2050 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2051 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2052 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2053 new_packets = 1;
2054 }
2055
2056 if (adap->params.nports == 2 &&
2057 process_responses(adap, &adap->sge.qs[1], -1)) {
2058 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2059
2060 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2061 V_NEWTIMER(q1->next_holdoff) |
2062 V_NEWINDEX(q1->cidx));
2063 new_packets = 1;
2064 }
2065
2066 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2067 q->unhandled_irqs++;
2068
2069 spin_unlock(&q->lock);
2070 return IRQ_HANDLED;
2071}
2072
2073static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2074{
2075 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2076 if (likely(__netif_rx_schedule_prep(dev)))
2077 __netif_rx_schedule(dev);
2078 return 1;
2079 }
2080 return 0;
2081}
2082
2083/*
2084 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2085 * by NAPI polling). Handles data events from SGE response queues as well as
2086 * error and other async events as they all use the same MSI vector. We use
2087 * one SGE response queue per port in this mode and protect all response
2088 * queues with queue 0's lock.
2089 */
2090irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2091{
2092 int new_packets;
2093 struct adapter *adap = cookie;
2094 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2095
2096 spin_lock(&q->lock);
2097
2098 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2099 if (adap->params.nports == 2)
2100 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2101 &adap->sge.qs[1].rspq);
2102 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2103 q->unhandled_irqs++;
2104
2105 spin_unlock(&q->lock);
2106 return IRQ_HANDLED;
2107}
2108
2109/*
2110 * A helper function that processes responses and issues GTS.
2111 */
2112static inline int process_responses_gts(struct adapter *adap,
2113 struct sge_rspq *rq)
2114{
2115 int work;
2116
2117 work = process_responses(adap, rspq_to_qset(rq), -1);
2118 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2119 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2120 return work;
2121}
2122
2123/*
2124 * The legacy INTx interrupt handler. This needs to handle data events from
2125 * SGE response queues as well as error and other async events as they all use
2126 * the same interrupt pin. We use one SGE response queue per port in this mode
2127 * and protect all response queues with queue 0's lock.
2128 */
2129static irqreturn_t t3_intr(int irq, void *cookie)
2130{
2131 int work_done, w0, w1;
2132 struct adapter *adap = cookie;
2133 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2134 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2135
2136 spin_lock(&q0->lock);
2137
2138 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2139 w1 = adap->params.nports == 2 &&
2140 is_new_response(&q1->desc[q1->cidx], q1);
2141
2142 if (likely(w0 | w1)) {
2143 t3_write_reg(adap, A_PL_CLI, 0);
2144 t3_read_reg(adap, A_PL_CLI); /* flush */
2145
2146 if (likely(w0))
2147 process_responses_gts(adap, q0);
2148
2149 if (w1)
2150 process_responses_gts(adap, q1);
2151
2152 work_done = w0 | w1;
2153 } else
2154 work_done = t3_slow_intr_handler(adap);
2155
2156 spin_unlock(&q0->lock);
2157 return IRQ_RETVAL(work_done != 0);
2158}
2159
2160/*
2161 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2162 * Handles data events from SGE response queues as well as error and other
2163 * async events as they all use the same interrupt pin. We use one SGE
2164 * response queue per port in this mode and protect all response queues with
2165 * queue 0's lock.
2166 */
2167static irqreturn_t t3b_intr(int irq, void *cookie)
2168{
2169 u32 map;
2170 struct adapter *adap = cookie;
2171 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2172
2173 t3_write_reg(adap, A_PL_CLI, 0);
2174 map = t3_read_reg(adap, A_SG_DATA_INTR);
2175
2176 if (unlikely(!map)) /* shared interrupt, most likely */
2177 return IRQ_NONE;
2178
2179 spin_lock(&q0->lock);
2180
2181 if (unlikely(map & F_ERRINTR))
2182 t3_slow_intr_handler(adap);
2183
2184 if (likely(map & 1))
2185 process_responses_gts(adap, q0);
2186
2187 if (map & 2)
2188 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2189
2190 spin_unlock(&q0->lock);
2191 return IRQ_HANDLED;
2192}
2193
2194/*
2195 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2196 * Handles data events from SGE response queues as well as error and other
2197 * async events as they all use the same interrupt pin. We use one SGE
2198 * response queue per port in this mode and protect all response queues with
2199 * queue 0's lock.
2200 */
2201static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2202{
2203 u32 map;
2204 struct net_device *dev;
2205 struct adapter *adap = cookie;
2206 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2207
2208 t3_write_reg(adap, A_PL_CLI, 0);
2209 map = t3_read_reg(adap, A_SG_DATA_INTR);
2210
2211 if (unlikely(!map)) /* shared interrupt, most likely */
2212 return IRQ_NONE;
2213
2214 spin_lock(&q0->lock);
2215
2216 if (unlikely(map & F_ERRINTR))
2217 t3_slow_intr_handler(adap);
2218
2219 if (likely(map & 1)) {
2220 dev = adap->sge.qs[0].netdev;
2221
2222 if (likely(__netif_rx_schedule_prep(dev)))
2223 __netif_rx_schedule(dev);
2224 }
2225 if (map & 2) {
2226 dev = adap->sge.qs[1].netdev;
2227
2228 if (likely(__netif_rx_schedule_prep(dev)))
2229 __netif_rx_schedule(dev);
2230 }
2231
2232 spin_unlock(&q0->lock);
2233 return IRQ_HANDLED;
2234}
2235
2236/**
2237 * t3_intr_handler - select the top-level interrupt handler
2238 * @adap: the adapter
2239 * @polling: whether using NAPI to service response queues
2240 *
2241 * Selects the top-level interrupt handler based on the type of interrupts
2242 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2243 * response queues.
2244 */
2245intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2246{
2247 if (adap->flags & USING_MSIX)
2248 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2249 if (adap->flags & USING_MSI)
2250 return polling ? t3_intr_msi_napi : t3_intr_msi;
2251 if (adap->params.rev > 0)
2252 return polling ? t3b_intr_napi : t3b_intr;
2253 return t3_intr;
2254}
2255
2256/**
2257 * t3_sge_err_intr_handler - SGE async event interrupt handler
2258 * @adapter: the adapter
2259 *
2260 * Interrupt handler for SGE asynchronous (non-data) events.
2261 */
2262void t3_sge_err_intr_handler(struct adapter *adapter)
2263{
2264 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2265
2266 if (status & F_RSPQCREDITOVERFOW)
2267 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2268
2269 if (status & F_RSPQDISABLED) {
2270 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2271
2272 CH_ALERT(adapter,
2273 "packet delivered to disabled response queue "
2274 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2275 }
2276
2277 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2278 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2279 t3_fatal_err(adapter);
2280}
2281
2282/**
2283 * sge_timer_cb - perform periodic maintenance of an SGE qset
2284 * @data: the SGE queue set to maintain
2285 *
2286 * Runs periodically from a timer to perform maintenance of an SGE queue
2287 * set. It performs two tasks:
2288 *
2289 * a) Cleans up any completed Tx descriptors that may still be pending.
2290 * Normal descriptor cleanup happens when new packets are added to a Tx
2291 * queue so this timer is relatively infrequent and does any cleanup only
2292 * if the Tx queue has not seen any new packets in a while. We make a
2293 * best effort attempt to reclaim descriptors, in that we don't wait
2294 * around if we cannot get a queue's lock (which most likely is because
2295 * someone else is queueing new packets and so will also handle the clean
2296 * up). Since control queues use immediate data exclusively we don't
2297 * bother cleaning them up here.
2298 *
2299 * b) Replenishes Rx queues that have run out due to memory shortage.
2300 * Normally new Rx buffers are added when existing ones are consumed but
2301 * when out of memory a queue can become empty. We try to add only a few
2302 * buffers here, the queue will be replenished fully as these new buffers
2303 * are used up if memory shortage has subsided.
2304 */
2305static void sge_timer_cb(unsigned long data)
2306{
2307 spinlock_t *lock;
2308 struct sge_qset *qs = (struct sge_qset *)data;
2309 struct adapter *adap = qs->netdev->priv;
2310
2311 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2312 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2313 spin_unlock(&qs->txq[TXQ_ETH].lock);
2314 }
2315 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2316 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2317 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2318 }
2319 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2320 &adap->sge.qs[0].rspq.lock;
2321 if (spin_trylock_irq(lock)) {
2322 if (!napi_is_scheduled(qs->netdev)) {
2323 if (qs->fl[0].credits < qs->fl[0].size)
2324 __refill_fl(adap, &qs->fl[0]);
2325 if (qs->fl[1].credits < qs->fl[1].size)
2326 __refill_fl(adap, &qs->fl[1]);
2327 }
2328 spin_unlock_irq(lock);
2329 }
2330 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2331}
2332
2333/**
2334 * t3_update_qset_coalesce - update coalescing settings for a queue set
2335 * @qs: the SGE queue set
2336 * @p: new queue set parameters
2337 *
2338 * Update the coalescing settings for an SGE queue set. Nothing is done
2339 * if the queue set is not initialized yet.
2340 */
2341void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2342{
2343 if (!qs->netdev)
2344 return;
2345
2346 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2347 qs->rspq.polling = p->polling;
2348 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2349}
2350
2351/**
2352 * t3_sge_alloc_qset - initialize an SGE queue set
2353 * @adapter: the adapter
2354 * @id: the queue set id
2355 * @nports: how many Ethernet ports will be using this queue set
2356 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2357 * @p: configuration parameters for this queue set
2358 * @ntxq: number of Tx queues for the queue set
2359 * @netdev: net device associated with this queue set
2360 *
2361 * Allocate resources and initialize an SGE queue set. A queue set
2362 * comprises a response queue, two Rx free-buffer queues, and up to 3
2363 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2364 * queue, offload queue, and control queue.
2365 */
2366int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2367 int irq_vec_idx, const struct qset_params *p,
2368 int ntxq, struct net_device *netdev)
2369{
2370 int i, ret = -ENOMEM;
2371 struct sge_qset *q = &adapter->sge.qs[id];
2372
2373 init_qset_cntxt(q, id);
2374 init_timer(&q->tx_reclaim_timer);
2375 q->tx_reclaim_timer.data = (unsigned long)q;
2376 q->tx_reclaim_timer.function = sge_timer_cb;
2377
2378 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2379 sizeof(struct rx_desc),
2380 sizeof(struct rx_sw_desc),
2381 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2382 if (!q->fl[0].desc)
2383 goto err;
2384
2385 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2386 sizeof(struct rx_desc),
2387 sizeof(struct rx_sw_desc),
2388 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2389 if (!q->fl[1].desc)
2390 goto err;
2391
2392 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2393 sizeof(struct rsp_desc), 0,
2394 &q->rspq.phys_addr, NULL);
2395 if (!q->rspq.desc)
2396 goto err;
2397
2398 for (i = 0; i < ntxq; ++i) {
2399 /*
2400 * The control queue always uses immediate data so does not
2401 * need to keep track of any sk_buffs.
2402 */
2403 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2404
2405 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2406 sizeof(struct tx_desc), sz,
2407 &q->txq[i].phys_addr,
2408 &q->txq[i].sdesc);
2409 if (!q->txq[i].desc)
2410 goto err;
2411
2412 q->txq[i].gen = 1;
2413 q->txq[i].size = p->txq_size[i];
2414 spin_lock_init(&q->txq[i].lock);
2415 skb_queue_head_init(&q->txq[i].sendq);
2416 }
2417
2418 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2419 (unsigned long)q);
2420 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2421 (unsigned long)q);
2422
2423 q->fl[0].gen = q->fl[1].gen = 1;
2424 q->fl[0].size = p->fl_size;
2425 q->fl[1].size = p->jumbo_size;
2426
2427 q->rspq.gen = 1;
2428 q->rspq.size = p->rspq_size;
2429 spin_lock_init(&q->rspq.lock);
2430
2431 q->txq[TXQ_ETH].stop_thres = nports *
2432 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2433
2434 if (ntxq == 1) {
2435 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2436 sizeof(struct cpl_rx_pkt);
2437 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2438 sizeof(struct cpl_rx_pkt);
2439 } else {
2440 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2441 sizeof(struct cpl_rx_data);
2442 q->fl[1].buf_size = (16 * 1024) -
2443 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2444 }
2445
2446 spin_lock(&adapter->sge.reg_lock);
2447
2448 /* FL threshold comparison uses < */
2449 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2450 q->rspq.phys_addr, q->rspq.size,
2451 q->fl[0].buf_size, 1, 0);
2452 if (ret)
2453 goto err_unlock;
2454
2455 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2456 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2457 q->fl[i].phys_addr, q->fl[i].size,
2458 q->fl[i].buf_size, p->cong_thres, 1,
2459 0);
2460 if (ret)
2461 goto err_unlock;
2462 }
2463
2464 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2465 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2466 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2467 1, 0);
2468 if (ret)
2469 goto err_unlock;
2470
2471 if (ntxq > 1) {
2472 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2473 USE_GTS, SGE_CNTXT_OFLD, id,
2474 q->txq[TXQ_OFLD].phys_addr,
2475 q->txq[TXQ_OFLD].size, 0, 1, 0);
2476 if (ret)
2477 goto err_unlock;
2478 }
2479
2480 if (ntxq > 2) {
2481 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2482 SGE_CNTXT_CTRL, id,
2483 q->txq[TXQ_CTRL].phys_addr,
2484 q->txq[TXQ_CTRL].size,
2485 q->txq[TXQ_CTRL].token, 1, 0);
2486 if (ret)
2487 goto err_unlock;
2488 }
2489
2490 spin_unlock(&adapter->sge.reg_lock);
2491 q->netdev = netdev;
2492 t3_update_qset_coalesce(q, p);
2493
2494 /*
2495 * We use atalk_ptr as a backpointer to a qset. In case a device is
2496 * associated with multiple queue sets only the first one sets
2497 * atalk_ptr.
2498 */
2499 if (netdev->atalk_ptr == NULL)
2500 netdev->atalk_ptr = q;
2501
2502 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2503 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2504 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2505
2506 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2507 V_NEWTIMER(q->rspq.holdoff_tmr));
2508
2509 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2510 return 0;
2511
2512 err_unlock:
2513 spin_unlock(&adapter->sge.reg_lock);
2514 err:
2515 t3_free_qset(adapter, q);
2516 return ret;
2517}
2518
2519/**
2520 * t3_free_sge_resources - free SGE resources
2521 * @adap: the adapter
2522 *
2523 * Frees resources used by the SGE queue sets.
2524 */
2525void t3_free_sge_resources(struct adapter *adap)
2526{
2527 int i;
2528
2529 for (i = 0; i < SGE_QSETS; ++i)
2530 t3_free_qset(adap, &adap->sge.qs[i]);
2531}
2532
2533/**
2534 * t3_sge_start - enable SGE
2535 * @adap: the adapter
2536 *
2537 * Enables the SGE for DMAs. This is the last step in starting packet
2538 * transfers.
2539 */
2540void t3_sge_start(struct adapter *adap)
2541{
2542 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2543}
2544
2545/**
2546 * t3_sge_stop - disable SGE operation
2547 * @adap: the adapter
2548 *
2549 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2550 * from error interrupts) or from normal process context. In the latter
2551 * case it also disables any pending queue restart tasklets. Note that
2552 * if it is called in interrupt context it cannot disable the restart
2553 * tasklets as it cannot wait, however the tasklets will have no effect
2554 * since the doorbells are disabled and the driver will call this again
2555 * later from process context, at which time the tasklets will be stopped
2556 * if they are still running.
2557 */
2558void t3_sge_stop(struct adapter *adap)
2559{
2560 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2561 if (!in_interrupt()) {
2562 int i;
2563
2564 for (i = 0; i < SGE_QSETS; ++i) {
2565 struct sge_qset *qs = &adap->sge.qs[i];
2566
2567 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2568 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2569 }
2570 }
2571}
2572
2573/**
2574 * t3_sge_init - initialize SGE
2575 * @adap: the adapter
2576 * @p: the SGE parameters
2577 *
2578 * Performs SGE initialization needed every time after a chip reset.
2579 * We do not initialize any of the queue sets here, instead the driver
2580 * top-level must request those individually. We also do not enable DMA
2581 * here, that should be done after the queues have been set up.
2582 */
2583void t3_sge_init(struct adapter *adap, struct sge_params *p)
2584{
2585 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2586
2587 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2588 F_CQCRDTCTRL |
2589 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2590 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2591#if SGE_NUM_GENBITS == 1
2592 ctrl |= F_EGRGENCTRL;
2593#endif
2594 if (adap->params.rev > 0) {
2595 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2596 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2597 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2598 }
2599 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2600 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2601 V_LORCQDRBTHRSH(512));
2602 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2603 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2604 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
2605 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2606 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2607 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2608 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2609 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2610 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2611}
2612
2613/**
2614 * t3_sge_prep - one-time SGE initialization
2615 * @adap: the associated adapter
2616 * @p: SGE parameters
2617 *
2618 * Performs one-time initialization of SGE SW state. Includes determining
2619 * defaults for the assorted SGE parameters, which admins can change until
2620 * they are used to initialize the SGE.
2621 */
2622void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2623{
2624 int i;
2625
2626 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2627 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2628
2629 for (i = 0; i < SGE_QSETS; ++i) {
2630 struct qset_params *q = p->qset + i;
2631
2632 q->polling = adap->params.rev > 0;
2633 q->coalesce_usecs = 5;
2634 q->rspq_size = 1024;
2635 q->fl_size = 4096;
2636 q->jumbo_size = 512;
2637 q->txq_size[TXQ_ETH] = 1024;
2638 q->txq_size[TXQ_OFLD] = 1024;
2639 q->txq_size[TXQ_CTRL] = 256;
2640 q->cong_thres = 0;
2641 }
2642
2643 spin_lock_init(&adap->sge.reg_lock);
2644}
2645
2646/**
2647 * t3_get_desc - dump an SGE descriptor for debugging purposes
2648 * @qs: the queue set
2649 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2650 * @idx: the descriptor index in the queue
2651 * @data: where to dump the descriptor contents
2652 *
2653 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2654 * size of the descriptor.
2655 */
2656int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2657 unsigned char *data)
2658{
2659 if (qnum >= 6)
2660 return -EINVAL;
2661
2662 if (qnum < 3) {
2663 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2664 return -EINVAL;
2665 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2666 return sizeof(struct tx_desc);
2667 }
2668
2669 if (qnum == 3) {
2670 if (!qs->rspq.desc || idx >= qs->rspq.size)
2671 return -EINVAL;
2672 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2673 return sizeof(struct rsp_desc);
2674 }
2675
2676 qnum -= 4;
2677 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2678 return -EINVAL;
2679 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2680 return sizeof(struct rx_desc);
2681}
diff --git a/drivers/net/cxgb3/sge_defs.h b/drivers/net/cxgb3/sge_defs.h
new file mode 100644
index 000000000000..514869e26a76
--- /dev/null
+++ b/drivers/net/cxgb3/sge_defs.h
@@ -0,0 +1,251 @@
1/*
2 * This file is automatically generated --- any changes will be lost.
3 */
4
5#ifndef _SGE_DEFS_H
6#define _SGE_DEFS_H
7
8#define S_EC_CREDITS 0
9#define M_EC_CREDITS 0x7FFF
10#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
11#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
12
13#define S_EC_GTS 15
14#define V_EC_GTS(x) ((x) << S_EC_GTS)
15#define F_EC_GTS V_EC_GTS(1U)
16
17#define S_EC_INDEX 16
18#define M_EC_INDEX 0xFFFF
19#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
20#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
21
22#define S_EC_SIZE 0
23#define M_EC_SIZE 0xFFFF
24#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
25#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
26
27#define S_EC_BASE_LO 16
28#define M_EC_BASE_LO 0xFFFF
29#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
30#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
31
32#define S_EC_BASE_HI 0
33#define M_EC_BASE_HI 0xF
34#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
35#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
36
37#define S_EC_RESPQ 4
38#define M_EC_RESPQ 0x7
39#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
40#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
41
42#define S_EC_TYPE 7
43#define M_EC_TYPE 0x7
44#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
45#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
46
47#define S_EC_GEN 10
48#define V_EC_GEN(x) ((x) << S_EC_GEN)
49#define F_EC_GEN V_EC_GEN(1U)
50
51#define S_EC_UP_TOKEN 11
52#define M_EC_UP_TOKEN 0xFFFFF
53#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
54#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
55
56#define S_EC_VALID 31
57#define V_EC_VALID(x) ((x) << S_EC_VALID)
58#define F_EC_VALID V_EC_VALID(1U)
59
60#define S_RQ_MSI_VEC 20
61#define M_RQ_MSI_VEC 0x3F
62#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
63#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
64
65#define S_RQ_INTR_EN 26
66#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
67#define F_RQ_INTR_EN V_RQ_INTR_EN(1U)
68
69#define S_RQ_GEN 28
70#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
71#define F_RQ_GEN V_RQ_GEN(1U)
72
73#define S_CQ_INDEX 0
74#define M_CQ_INDEX 0xFFFF
75#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
76#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
77
78#define S_CQ_SIZE 16
79#define M_CQ_SIZE 0xFFFF
80#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
81#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
82
83#define S_CQ_BASE_HI 0
84#define M_CQ_BASE_HI 0xFFFFF
85#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
86#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
87
88#define S_CQ_RSPQ 20
89#define M_CQ_RSPQ 0x3F
90#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
91#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
92
93#define S_CQ_ASYNC_NOTIF 26
94#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
95#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U)
96
97#define S_CQ_ARMED 27
98#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
99#define F_CQ_ARMED V_CQ_ARMED(1U)
100
101#define S_CQ_ASYNC_NOTIF_SOL 28
102#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
103#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U)
104
105#define S_CQ_GEN 29
106#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
107#define F_CQ_GEN V_CQ_GEN(1U)
108
109#define S_CQ_OVERFLOW_MODE 31
110#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
111#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
112
113#define S_CQ_CREDITS 0
114#define M_CQ_CREDITS 0xFFFF
115#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
116#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
117
118#define S_CQ_CREDIT_THRES 16
119#define M_CQ_CREDIT_THRES 0x1FFF
120#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
121#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
122
123#define S_FL_BASE_HI 0
124#define M_FL_BASE_HI 0xFFFFF
125#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
126#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
127
128#define S_FL_INDEX_LO 20
129#define M_FL_INDEX_LO 0xFFF
130#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
131#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
132
133#define S_FL_INDEX_HI 0
134#define M_FL_INDEX_HI 0xF
135#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
136#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
137
138#define S_FL_SIZE 4
139#define M_FL_SIZE 0xFFFF
140#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
141#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
142
143#define S_FL_GEN 20
144#define V_FL_GEN(x) ((x) << S_FL_GEN)
145#define F_FL_GEN V_FL_GEN(1U)
146
147#define S_FL_ENTRY_SIZE_LO 21
148#define M_FL_ENTRY_SIZE_LO 0x7FF
149#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
150#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
151
152#define S_FL_ENTRY_SIZE_HI 0
153#define M_FL_ENTRY_SIZE_HI 0x1FFFFF
154#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
155#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
156
157#define S_FL_CONG_THRES 21
158#define M_FL_CONG_THRES 0x3FF
159#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
160#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
161
162#define S_FL_GTS 31
163#define V_FL_GTS(x) ((x) << S_FL_GTS)
164#define F_FL_GTS V_FL_GTS(1U)
165
166#define S_FLD_GEN1 31
167#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
168#define F_FLD_GEN1 V_FLD_GEN1(1U)
169
170#define S_FLD_GEN2 0
171#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
172#define F_FLD_GEN2 V_FLD_GEN2(1U)
173
174#define S_RSPD_TXQ1_CR 0
175#define M_RSPD_TXQ1_CR 0x7F
176#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
177#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
178
179#define S_RSPD_TXQ1_GTS 7
180#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
181#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U)
182
183#define S_RSPD_TXQ2_CR 8
184#define M_RSPD_TXQ2_CR 0x7F
185#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
186#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
187
188#define S_RSPD_TXQ2_GTS 15
189#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
190#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U)
191
192#define S_RSPD_TXQ0_CR 16
193#define M_RSPD_TXQ0_CR 0x7F
194#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
195#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
196
197#define S_RSPD_TXQ0_GTS 23
198#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
199#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U)
200
201#define S_RSPD_EOP 24
202#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
203#define F_RSPD_EOP V_RSPD_EOP(1U)
204
205#define S_RSPD_SOP 25
206#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
207#define F_RSPD_SOP V_RSPD_SOP(1U)
208
209#define S_RSPD_ASYNC_NOTIF 26
210#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
211#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U)
212
213#define S_RSPD_FL0_GTS 27
214#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
215#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U)
216
217#define S_RSPD_FL1_GTS 28
218#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
219#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U)
220
221#define S_RSPD_IMM_DATA_VALID 29
222#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
223#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U)
224
225#define S_RSPD_OFFLOAD 30
226#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
227#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U)
228
229#define S_RSPD_GEN1 31
230#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
231#define F_RSPD_GEN1 V_RSPD_GEN1(1U)
232
233#define S_RSPD_LEN 0
234#define M_RSPD_LEN 0x7FFFFFFF
235#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
236#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
237
238#define S_RSPD_FLQ 31
239#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
240#define F_RSPD_FLQ V_RSPD_FLQ(1U)
241
242#define S_RSPD_GEN2 0
243#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
244#define F_RSPD_GEN2 V_RSPD_GEN2(1U)
245
246#define S_RSPD_INR_VEC 1
247#define M_RSPD_INR_VEC 0x7F
248#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
249#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
250
251#endif /* _SGE_DEFS_H */
diff --git a/drivers/net/cxgb3/t3_cpl.h b/drivers/net/cxgb3/t3_cpl.h
new file mode 100644
index 000000000000..b7a1a310dfd4
--- /dev/null
+++ b/drivers/net/cxgb3/t3_cpl.h
@@ -0,0 +1,1444 @@
1/*
2 * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef T3_CPL_H
33#define T3_CPL_H
34
35#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
36# include <asm/byteorder.h>
37#endif
38
39enum CPL_opcode {
40 CPL_PASS_OPEN_REQ = 0x1,
41 CPL_PASS_ACCEPT_RPL = 0x2,
42 CPL_ACT_OPEN_REQ = 0x3,
43 CPL_SET_TCB = 0x4,
44 CPL_SET_TCB_FIELD = 0x5,
45 CPL_GET_TCB = 0x6,
46 CPL_PCMD = 0x7,
47 CPL_CLOSE_CON_REQ = 0x8,
48 CPL_CLOSE_LISTSRV_REQ = 0x9,
49 CPL_ABORT_REQ = 0xA,
50 CPL_ABORT_RPL = 0xB,
51 CPL_TX_DATA = 0xC,
52 CPL_RX_DATA_ACK = 0xD,
53 CPL_TX_PKT = 0xE,
54 CPL_RTE_DELETE_REQ = 0xF,
55 CPL_RTE_WRITE_REQ = 0x10,
56 CPL_RTE_READ_REQ = 0x11,
57 CPL_L2T_WRITE_REQ = 0x12,
58 CPL_L2T_READ_REQ = 0x13,
59 CPL_SMT_WRITE_REQ = 0x14,
60 CPL_SMT_READ_REQ = 0x15,
61 CPL_TX_PKT_LSO = 0x16,
62 CPL_PCMD_READ = 0x17,
63 CPL_BARRIER = 0x18,
64 CPL_TID_RELEASE = 0x1A,
65
66 CPL_CLOSE_LISTSRV_RPL = 0x20,
67 CPL_ERROR = 0x21,
68 CPL_GET_TCB_RPL = 0x22,
69 CPL_L2T_WRITE_RPL = 0x23,
70 CPL_PCMD_READ_RPL = 0x24,
71 CPL_PCMD_RPL = 0x25,
72 CPL_PEER_CLOSE = 0x26,
73 CPL_RTE_DELETE_RPL = 0x27,
74 CPL_RTE_WRITE_RPL = 0x28,
75 CPL_RX_DDP_COMPLETE = 0x29,
76 CPL_RX_PHYS_ADDR = 0x2A,
77 CPL_RX_PKT = 0x2B,
78 CPL_RX_URG_NOTIFY = 0x2C,
79 CPL_SET_TCB_RPL = 0x2D,
80 CPL_SMT_WRITE_RPL = 0x2E,
81 CPL_TX_DATA_ACK = 0x2F,
82
83 CPL_ABORT_REQ_RSS = 0x30,
84 CPL_ABORT_RPL_RSS = 0x31,
85 CPL_CLOSE_CON_RPL = 0x32,
86 CPL_ISCSI_HDR = 0x33,
87 CPL_L2T_READ_RPL = 0x34,
88 CPL_RDMA_CQE = 0x35,
89 CPL_RDMA_CQE_READ_RSP = 0x36,
90 CPL_RDMA_CQE_ERR = 0x37,
91 CPL_RTE_READ_RPL = 0x38,
92 CPL_RX_DATA = 0x39,
93
94 CPL_ACT_OPEN_RPL = 0x40,
95 CPL_PASS_OPEN_RPL = 0x41,
96 CPL_RX_DATA_DDP = 0x42,
97 CPL_SMT_READ_RPL = 0x43,
98
99 CPL_ACT_ESTABLISH = 0x50,
100 CPL_PASS_ESTABLISH = 0x51,
101
102 CPL_PASS_ACCEPT_REQ = 0x70,
103
104 CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */
105
106 CPL_TX_DMA_ACK = 0xA0,
107 CPL_RDMA_READ_REQ = 0xA1,
108 CPL_RDMA_TERMINATE = 0xA2,
109 CPL_TRACE_PKT = 0xA3,
110 CPL_RDMA_EC_STATUS = 0xA5,
111
112 NUM_CPL_CMDS /* must be last and previous entries must be sorted */
113};
114
115enum CPL_error {
116 CPL_ERR_NONE = 0,
117 CPL_ERR_TCAM_PARITY = 1,
118 CPL_ERR_TCAM_FULL = 3,
119 CPL_ERR_CONN_RESET = 20,
120 CPL_ERR_CONN_EXIST = 22,
121 CPL_ERR_ARP_MISS = 23,
122 CPL_ERR_BAD_SYN = 24,
123 CPL_ERR_CONN_TIMEDOUT = 30,
124 CPL_ERR_XMIT_TIMEDOUT = 31,
125 CPL_ERR_PERSIST_TIMEDOUT = 32,
126 CPL_ERR_FINWAIT2_TIMEDOUT = 33,
127 CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
128 CPL_ERR_RTX_NEG_ADVICE = 35,
129 CPL_ERR_PERSIST_NEG_ADVICE = 36,
130 CPL_ERR_ABORT_FAILED = 42,
131 CPL_ERR_GENERAL = 99
132};
133
134enum {
135 CPL_CONN_POLICY_AUTO = 0,
136 CPL_CONN_POLICY_ASK = 1,
137 CPL_CONN_POLICY_DENY = 3
138};
139
140enum {
141 ULP_MODE_NONE = 0,
142 ULP_MODE_ISCSI = 2,
143 ULP_MODE_RDMA = 4,
144 ULP_MODE_TCPDDP = 5
145};
146
147enum {
148 ULP_CRC_HEADER = 1 << 0,
149 ULP_CRC_DATA = 1 << 1
150};
151
152enum {
153 CPL_PASS_OPEN_ACCEPT,
154 CPL_PASS_OPEN_REJECT
155};
156
157enum {
158 CPL_ABORT_SEND_RST = 0,
159 CPL_ABORT_NO_RST,
160 CPL_ABORT_POST_CLOSE_REQ = 2
161};
162
163enum { /* TX_PKT_LSO ethernet types */
164 CPL_ETH_II,
165 CPL_ETH_II_VLAN,
166 CPL_ETH_802_3,
167 CPL_ETH_802_3_VLAN
168};
169
170enum { /* TCP congestion control algorithms */
171 CONG_ALG_RENO,
172 CONG_ALG_TAHOE,
173 CONG_ALG_NEWRENO,
174 CONG_ALG_HIGHSPEED
175};
176
177union opcode_tid {
178 __be32 opcode_tid;
179 __u8 opcode;
180};
181
182#define S_OPCODE 24
183#define V_OPCODE(x) ((x) << S_OPCODE)
184#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
185#define G_TID(x) ((x) & 0xFFFFFF)
186
187/* tid is assumed to be 24-bits */
188#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
189
190#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
191
192/* extract the TID from a CPL command */
193#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
194
195struct tcp_options {
196 __be16 mss;
197 __u8 wsf;
198#if defined(__LITTLE_ENDIAN_BITFIELD)
199 __u8:5;
200 __u8 ecn:1;
201 __u8 sack:1;
202 __u8 tstamp:1;
203#else
204 __u8 tstamp:1;
205 __u8 sack:1;
206 __u8 ecn:1;
207 __u8:5;
208#endif
209};
210
211struct rss_header {
212 __u8 opcode;
213#if defined(__LITTLE_ENDIAN_BITFIELD)
214 __u8 cpu_idx:6;
215 __u8 hash_type:2;
216#else
217 __u8 hash_type:2;
218 __u8 cpu_idx:6;
219#endif
220 __be16 cq_idx;
221 __be32 rss_hash_val;
222};
223
224#ifndef CHELSIO_FW
225struct work_request_hdr {
226 __be32 wr_hi;
227 __be32 wr_lo;
228};
229
230/* wr_hi fields */
231#define S_WR_SGE_CREDITS 0
232#define M_WR_SGE_CREDITS 0xFF
233#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS)
234#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS)
235
236#define S_WR_SGLSFLT 8
237#define M_WR_SGLSFLT 0xFF
238#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT)
239#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT)
240
241#define S_WR_BCNTLFLT 16
242#define M_WR_BCNTLFLT 0xF
243#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT)
244#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT)
245
246#define S_WR_DATATYPE 20
247#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE)
248#define F_WR_DATATYPE V_WR_DATATYPE(1U)
249
250#define S_WR_COMPL 21
251#define V_WR_COMPL(x) ((x) << S_WR_COMPL)
252#define F_WR_COMPL V_WR_COMPL(1U)
253
254#define S_WR_EOP 22
255#define V_WR_EOP(x) ((x) << S_WR_EOP)
256#define F_WR_EOP V_WR_EOP(1U)
257
258#define S_WR_SOP 23
259#define V_WR_SOP(x) ((x) << S_WR_SOP)
260#define F_WR_SOP V_WR_SOP(1U)
261
262#define S_WR_OP 24
263#define M_WR_OP 0xFF
264#define V_WR_OP(x) ((x) << S_WR_OP)
265#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
266
267/* wr_lo fields */
268#define S_WR_LEN 0
269#define M_WR_LEN 0xFF
270#define V_WR_LEN(x) ((x) << S_WR_LEN)
271#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN)
272
273#define S_WR_TID 8
274#define M_WR_TID 0xFFFFF
275#define V_WR_TID(x) ((x) << S_WR_TID)
276#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID)
277
278#define S_WR_CR_FLUSH 30
279#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH)
280#define F_WR_CR_FLUSH V_WR_CR_FLUSH(1U)
281
282#define S_WR_GEN 31
283#define V_WR_GEN(x) ((x) << S_WR_GEN)
284#define F_WR_GEN V_WR_GEN(1U)
285
286# define WR_HDR struct work_request_hdr wr
287# define RSS_HDR
288#else
289# define WR_HDR
290# define RSS_HDR struct rss_header rss_hdr;
291#endif
292
293/* option 0 lower-half fields */
294#define S_CPL_STATUS 0
295#define M_CPL_STATUS 0xFF
296#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS)
297#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS)
298
299#define S_INJECT_TIMER 6
300#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
301#define F_INJECT_TIMER V_INJECT_TIMER(1U)
302
303#define S_NO_OFFLOAD 7
304#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD)
305#define F_NO_OFFLOAD V_NO_OFFLOAD(1U)
306
307#define S_ULP_MODE 8
308#define M_ULP_MODE 0xF
309#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
310#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
311
312#define S_RCV_BUFSIZ 12
313#define M_RCV_BUFSIZ 0x3FFF
314#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
315#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
316
317#define S_TOS 26
318#define M_TOS 0x3F
319#define V_TOS(x) ((x) << S_TOS)
320#define G_TOS(x) (((x) >> S_TOS) & M_TOS)
321
322/* option 0 upper-half fields */
323#define S_DELACK 0
324#define V_DELACK(x) ((x) << S_DELACK)
325#define F_DELACK V_DELACK(1U)
326
327#define S_NO_CONG 1
328#define V_NO_CONG(x) ((x) << S_NO_CONG)
329#define F_NO_CONG V_NO_CONG(1U)
330
331#define S_SRC_MAC_SEL 2
332#define M_SRC_MAC_SEL 0x3
333#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL)
334#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL)
335
336#define S_L2T_IDX 4
337#define M_L2T_IDX 0x7FF
338#define V_L2T_IDX(x) ((x) << S_L2T_IDX)
339#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
340
341#define S_TX_CHANNEL 15
342#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL)
343#define F_TX_CHANNEL V_TX_CHANNEL(1U)
344
345#define S_TCAM_BYPASS 16
346#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS)
347#define F_TCAM_BYPASS V_TCAM_BYPASS(1U)
348
349#define S_NAGLE 17
350#define V_NAGLE(x) ((x) << S_NAGLE)
351#define F_NAGLE V_NAGLE(1U)
352
353#define S_WND_SCALE 18
354#define M_WND_SCALE 0xF
355#define V_WND_SCALE(x) ((x) << S_WND_SCALE)
356#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
357
358#define S_KEEP_ALIVE 22
359#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE)
360#define F_KEEP_ALIVE V_KEEP_ALIVE(1U)
361
362#define S_MAX_RETRANS 23
363#define M_MAX_RETRANS 0xF
364#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS)
365#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS)
366
367#define S_MAX_RETRANS_OVERRIDE 27
368#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE)
369#define F_MAX_RETRANS_OVERRIDE V_MAX_RETRANS_OVERRIDE(1U)
370
371#define S_MSS_IDX 28
372#define M_MSS_IDX 0xF
373#define V_MSS_IDX(x) ((x) << S_MSS_IDX)
374#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
375
376/* option 1 fields */
377#define S_RSS_ENABLE 0
378#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE)
379#define F_RSS_ENABLE V_RSS_ENABLE(1U)
380
381#define S_RSS_MASK_LEN 1
382#define M_RSS_MASK_LEN 0x7
383#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN)
384#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN)
385
386#define S_CPU_IDX 4
387#define M_CPU_IDX 0x3F
388#define V_CPU_IDX(x) ((x) << S_CPU_IDX)
389#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX)
390
391#define S_MAC_MATCH_VALID 18
392#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID)
393#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U)
394
395#define S_CONN_POLICY 19
396#define M_CONN_POLICY 0x3
397#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
398#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
399
400#define S_SYN_DEFENSE 21
401#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
402#define F_SYN_DEFENSE V_SYN_DEFENSE(1U)
403
404#define S_VLAN_PRI 22
405#define M_VLAN_PRI 0x3
406#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI)
407#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI)
408
409#define S_VLAN_PRI_VALID 24
410#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID)
411#define F_VLAN_PRI_VALID V_VLAN_PRI_VALID(1U)
412
413#define S_PKT_TYPE 25
414#define M_PKT_TYPE 0x3
415#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE)
416#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE)
417
418#define S_MAC_MATCH 27
419#define M_MAC_MATCH 0x1F
420#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH)
421#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH)
422
423/* option 2 fields */
424#define S_CPU_INDEX 0
425#define M_CPU_INDEX 0x7F
426#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX)
427#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX)
428
429#define S_CPU_INDEX_VALID 7
430#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID)
431#define F_CPU_INDEX_VALID V_CPU_INDEX_VALID(1U)
432
433#define S_RX_COALESCE 8
434#define M_RX_COALESCE 0x3
435#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
436#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
437
438#define S_RX_COALESCE_VALID 10
439#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
440#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U)
441
442#define S_CONG_CONTROL_FLAVOR 11
443#define M_CONG_CONTROL_FLAVOR 0x3
444#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR)
445#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR)
446
447#define S_PACING_FLAVOR 13
448#define M_PACING_FLAVOR 0x3
449#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR)
450#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR)
451
452#define S_FLAVORS_VALID 15
453#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID)
454#define F_FLAVORS_VALID V_FLAVORS_VALID(1U)
455
456#define S_RX_FC_DISABLE 16
457#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
458#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U)
459
460#define S_RX_FC_VALID 17
461#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
462#define F_RX_FC_VALID V_RX_FC_VALID(1U)
463
464struct cpl_pass_open_req {
465 WR_HDR;
466 union opcode_tid ot;
467 __be16 local_port;
468 __be16 peer_port;
469 __be32 local_ip;
470 __be32 peer_ip;
471 __be32 opt0h;
472 __be32 opt0l;
473 __be32 peer_netmask;
474 __be32 opt1;
475};
476
477struct cpl_pass_open_rpl {
478 RSS_HDR union opcode_tid ot;
479 __be16 local_port;
480 __be16 peer_port;
481 __be32 local_ip;
482 __be32 peer_ip;
483 __u8 resvd[7];
484 __u8 status;
485};
486
487struct cpl_pass_establish {
488 RSS_HDR union opcode_tid ot;
489 __be16 local_port;
490 __be16 peer_port;
491 __be32 local_ip;
492 __be32 peer_ip;
493 __be32 tos_tid;
494 __be16 l2t_idx;
495 __be16 tcp_opt;
496 __be32 snd_isn;
497 __be32 rcv_isn;
498};
499
500/* cpl_pass_establish.tos_tid fields */
501#define S_PASS_OPEN_TID 0
502#define M_PASS_OPEN_TID 0xFFFFFF
503#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
504#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
505
506#define S_PASS_OPEN_TOS 24
507#define M_PASS_OPEN_TOS 0xFF
508#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
509#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
510
511/* cpl_pass_establish.l2t_idx fields */
512#define S_L2T_IDX16 5
513#define M_L2T_IDX16 0x7FF
514#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16)
515#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16)
516
517/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */
518#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
519#define G_TCPOPT_SACK(x) (((x) >> 6) & 1)
520#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
521#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
522#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
523
524struct cpl_pass_accept_req {
525 RSS_HDR union opcode_tid ot;
526 __be16 local_port;
527 __be16 peer_port;
528 __be32 local_ip;
529 __be32 peer_ip;
530 __be32 tos_tid;
531 struct tcp_options tcp_options;
532 __u8 dst_mac[6];
533 __be16 vlan_tag;
534 __u8 src_mac[6];
535#if defined(__LITTLE_ENDIAN_BITFIELD)
536 __u8:3;
537 __u8 addr_idx:3;
538 __u8 port_idx:1;
539 __u8 exact_match:1;
540#else
541 __u8 exact_match:1;
542 __u8 port_idx:1;
543 __u8 addr_idx:3;
544 __u8:3;
545#endif
546 __u8 rsvd;
547 __be32 rcv_isn;
548 __be32 rsvd2;
549};
550
551struct cpl_pass_accept_rpl {
552 WR_HDR;
553 union opcode_tid ot;
554 __be32 opt2;
555 __be32 rsvd;
556 __be32 peer_ip;
557 __be32 opt0h;
558 __be32 opt0l_status;
559};
560
561struct cpl_act_open_req {
562 WR_HDR;
563 union opcode_tid ot;
564 __be16 local_port;
565 __be16 peer_port;
566 __be32 local_ip;
567 __be32 peer_ip;
568 __be32 opt0h;
569 __be32 opt0l;
570 __be32 params;
571 __be32 opt2;
572};
573
574/* cpl_act_open_req.params fields */
575#define S_AOPEN_VLAN_PRI 9
576#define M_AOPEN_VLAN_PRI 0x3
577#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI)
578#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI)
579
580#define S_AOPEN_VLAN_PRI_VALID 11
581#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID)
582#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U)
583
584#define S_AOPEN_PKT_TYPE 12
585#define M_AOPEN_PKT_TYPE 0x3
586#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE)
587#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE)
588
589#define S_AOPEN_MAC_MATCH 14
590#define M_AOPEN_MAC_MATCH 0x1F
591#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH)
592#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH)
593
594#define S_AOPEN_MAC_MATCH_VALID 19
595#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID)
596#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U)
597
598#define S_AOPEN_IFF_VLAN 20
599#define M_AOPEN_IFF_VLAN 0xFFF
600#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN)
601#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
602
603struct cpl_act_open_rpl {
604 RSS_HDR union opcode_tid ot;
605 __be16 local_port;
606 __be16 peer_port;
607 __be32 local_ip;
608 __be32 peer_ip;
609 __be32 atid;
610 __u8 rsvd[3];
611 __u8 status;
612};
613
614struct cpl_act_establish {
615 RSS_HDR union opcode_tid ot;
616 __be16 local_port;
617 __be16 peer_port;
618 __be32 local_ip;
619 __be32 peer_ip;
620 __be32 tos_tid;
621 __be16 l2t_idx;
622 __be16 tcp_opt;
623 __be32 snd_isn;
624 __be32 rcv_isn;
625};
626
627struct cpl_get_tcb {
628 WR_HDR;
629 union opcode_tid ot;
630 __be16 cpuno;
631 __be16 rsvd;
632};
633
634struct cpl_get_tcb_rpl {
635 RSS_HDR union opcode_tid ot;
636 __u8 rsvd;
637 __u8 status;
638 __be16 len;
639};
640
641struct cpl_set_tcb {
642 WR_HDR;
643 union opcode_tid ot;
644 __u8 reply;
645 __u8 cpu_idx;
646 __be16 len;
647};
648
649/* cpl_set_tcb.reply fields */
650#define S_NO_REPLY 7
651#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
652#define F_NO_REPLY V_NO_REPLY(1U)
653
654struct cpl_set_tcb_field {
655 WR_HDR;
656 union opcode_tid ot;
657 __u8 reply;
658 __u8 cpu_idx;
659 __be16 word;
660 __be64 mask;
661 __be64 val;
662};
663
664struct cpl_set_tcb_rpl {
665 RSS_HDR union opcode_tid ot;
666 __u8 rsvd[3];
667 __u8 status;
668};
669
670struct cpl_pcmd {
671 WR_HDR;
672 union opcode_tid ot;
673 __u8 rsvd[3];
674#if defined(__LITTLE_ENDIAN_BITFIELD)
675 __u8 src:1;
676 __u8 bundle:1;
677 __u8 channel:1;
678 __u8:5;
679#else
680 __u8:5;
681 __u8 channel:1;
682 __u8 bundle:1;
683 __u8 src:1;
684#endif
685 __be32 pcmd_parm[2];
686};
687
688struct cpl_pcmd_reply {
689 RSS_HDR union opcode_tid ot;
690 __u8 status;
691 __u8 rsvd;
692 __be16 len;
693};
694
695struct cpl_close_con_req {
696 WR_HDR;
697 union opcode_tid ot;
698 __be32 rsvd;
699};
700
701struct cpl_close_con_rpl {
702 RSS_HDR union opcode_tid ot;
703 __u8 rsvd[3];
704 __u8 status;
705 __be32 snd_nxt;
706 __be32 rcv_nxt;
707};
708
709struct cpl_close_listserv_req {
710 WR_HDR;
711 union opcode_tid ot;
712 __u8 rsvd0;
713 __u8 cpu_idx;
714 __be16 rsvd1;
715};
716
717struct cpl_close_listserv_rpl {
718 RSS_HDR union opcode_tid ot;
719 __u8 rsvd[3];
720 __u8 status;
721};
722
723struct cpl_abort_req_rss {
724 RSS_HDR union opcode_tid ot;
725 __be32 rsvd0;
726 __u8 rsvd1;
727 __u8 status;
728 __u8 rsvd2[6];
729};
730
731struct cpl_abort_req {
732 WR_HDR;
733 union opcode_tid ot;
734 __be32 rsvd0;
735 __u8 rsvd1;
736 __u8 cmd;
737 __u8 rsvd2[6];
738};
739
740struct cpl_abort_rpl_rss {
741 RSS_HDR union opcode_tid ot;
742 __be32 rsvd0;
743 __u8 rsvd1;
744 __u8 status;
745 __u8 rsvd2[6];
746};
747
748struct cpl_abort_rpl {
749 WR_HDR;
750 union opcode_tid ot;
751 __be32 rsvd0;
752 __u8 rsvd1;
753 __u8 cmd;
754 __u8 rsvd2[6];
755};
756
757struct cpl_peer_close {
758 RSS_HDR union opcode_tid ot;
759 __be32 rcv_nxt;
760};
761
762struct tx_data_wr {
763 __be32 wr_hi;
764 __be32 wr_lo;
765 __be32 len;
766 __be32 flags;
767 __be32 sndseq;
768 __be32 param;
769};
770
771/* tx_data_wr.param fields */
772#define S_TX_PORT 0
773#define M_TX_PORT 0x7
774#define V_TX_PORT(x) ((x) << S_TX_PORT)
775#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
776
777#define S_TX_MSS 4
778#define M_TX_MSS 0xF
779#define V_TX_MSS(x) ((x) << S_TX_MSS)
780#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
781
782#define S_TX_QOS 8
783#define M_TX_QOS 0xFF
784#define V_TX_QOS(x) ((x) << S_TX_QOS)
785#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
786
787#define S_TX_SNDBUF 16
788#define M_TX_SNDBUF 0xFFFF
789#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
790#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
791
792struct cpl_tx_data {
793 union opcode_tid ot;
794 __be32 len;
795 __be32 rsvd;
796 __be16 urg;
797 __be16 flags;
798};
799
800/* cpl_tx_data.flags fields */
801#define S_TX_ULP_SUBMODE 6
802#define M_TX_ULP_SUBMODE 0xF
803#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
804#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
805
806#define S_TX_ULP_MODE 10
807#define M_TX_ULP_MODE 0xF
808#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
809#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
810
811#define S_TX_SHOVE 14
812#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
813#define F_TX_SHOVE V_TX_SHOVE(1U)
814
815#define S_TX_MORE 15
816#define V_TX_MORE(x) ((x) << S_TX_MORE)
817#define F_TX_MORE V_TX_MORE(1U)
818
819/* additional tx_data_wr.flags fields */
820#define S_TX_CPU_IDX 0
821#define M_TX_CPU_IDX 0x3F
822#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
823#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
824
825#define S_TX_URG 16
826#define V_TX_URG(x) ((x) << S_TX_URG)
827#define F_TX_URG V_TX_URG(1U)
828
829#define S_TX_CLOSE 17
830#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
831#define F_TX_CLOSE V_TX_CLOSE(1U)
832
833#define S_TX_INIT 18
834#define V_TX_INIT(x) ((x) << S_TX_INIT)
835#define F_TX_INIT V_TX_INIT(1U)
836
837#define S_TX_IMM_ACK 19
838#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
839#define F_TX_IMM_ACK V_TX_IMM_ACK(1U)
840
841#define S_TX_IMM_DMA 20
842#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
843#define F_TX_IMM_DMA V_TX_IMM_DMA(1U)
844
845struct cpl_tx_data_ack {
846 RSS_HDR union opcode_tid ot;
847 __be32 ack_seq;
848};
849
850struct cpl_wr_ack {
851 RSS_HDR union opcode_tid ot;
852 __be16 credits;
853 __be16 rsvd;
854 __be32 snd_nxt;
855 __be32 snd_una;
856};
857
858struct cpl_rdma_ec_status {
859 RSS_HDR union opcode_tid ot;
860 __u8 rsvd[3];
861 __u8 status;
862};
863
864struct mngt_pktsched_wr {
865 __be32 wr_hi;
866 __be32 wr_lo;
867 __u8 mngt_opcode;
868 __u8 rsvd[7];
869 __u8 sched;
870 __u8 idx;
871 __u8 min;
872 __u8 max;
873 __u8 binding;
874 __u8 rsvd1[3];
875};
876
877struct cpl_iscsi_hdr {
878 RSS_HDR union opcode_tid ot;
879 __be16 pdu_len_ddp;
880 __be16 len;
881 __be32 seq;
882 __be16 urg;
883 __u8 rsvd;
884 __u8 status;
885};
886
887/* cpl_iscsi_hdr.pdu_len_ddp fields */
888#define S_ISCSI_PDU_LEN 0
889#define M_ISCSI_PDU_LEN 0x7FFF
890#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
891#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
892
893#define S_ISCSI_DDP 15
894#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
895#define F_ISCSI_DDP V_ISCSI_DDP(1U)
896
897struct cpl_rx_data {
898 RSS_HDR union opcode_tid ot;
899 __be16 rsvd;
900 __be16 len;
901 __be32 seq;
902 __be16 urg;
903#if defined(__LITTLE_ENDIAN_BITFIELD)
904 __u8 dack_mode:2;
905 __u8 psh:1;
906 __u8 heartbeat:1;
907 __u8:4;
908#else
909 __u8:4;
910 __u8 heartbeat:1;
911 __u8 psh:1;
912 __u8 dack_mode:2;
913#endif
914 __u8 status;
915};
916
917struct cpl_rx_data_ack {
918 WR_HDR;
919 union opcode_tid ot;
920 __be32 credit_dack;
921};
922
923/* cpl_rx_data_ack.ack_seq fields */
924#define S_RX_CREDITS 0
925#define M_RX_CREDITS 0x7FFFFFF
926#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
927#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
928
929#define S_RX_MODULATE 27
930#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE)
931#define F_RX_MODULATE V_RX_MODULATE(1U)
932
933#define S_RX_FORCE_ACK 28
934#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
935#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U)
936
937#define S_RX_DACK_MODE 29
938#define M_RX_DACK_MODE 0x3
939#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
940#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
941
942#define S_RX_DACK_CHANGE 31
943#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
944#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
945
946struct cpl_rx_urg_notify {
947 RSS_HDR union opcode_tid ot;
948 __be32 seq;
949};
950
951struct cpl_rx_ddp_complete {
952 RSS_HDR union opcode_tid ot;
953 __be32 ddp_report;
954};
955
956struct cpl_rx_data_ddp {
957 RSS_HDR union opcode_tid ot;
958 __be16 urg;
959 __be16 len;
960 __be32 seq;
961 union {
962 __be32 nxt_seq;
963 __be32 ddp_report;
964 };
965 __be32 ulp_crc;
966 __be32 ddpvld_status;
967};
968
969/* cpl_rx_data_ddp.ddpvld_status fields */
970#define S_DDP_STATUS 0
971#define M_DDP_STATUS 0xFF
972#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS)
973#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS)
974
975#define S_DDP_VALID 15
976#define M_DDP_VALID 0x1FFFF
977#define V_DDP_VALID(x) ((x) << S_DDP_VALID)
978#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
979
980#define S_DDP_PPOD_MISMATCH 15
981#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
982#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U)
983
984#define S_DDP_PDU 16
985#define V_DDP_PDU(x) ((x) << S_DDP_PDU)
986#define F_DDP_PDU V_DDP_PDU(1U)
987
988#define S_DDP_LLIMIT_ERR 17
989#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
990#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U)
991
992#define S_DDP_PPOD_PARITY_ERR 18
993#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
994#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U)
995
996#define S_DDP_PADDING_ERR 19
997#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
998#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U)
999
1000#define S_DDP_HDRCRC_ERR 20
1001#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
1002#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U)
1003
1004#define S_DDP_DATACRC_ERR 21
1005#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
1006#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U)
1007
1008#define S_DDP_INVALID_TAG 22
1009#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
1010#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U)
1011
1012#define S_DDP_ULIMIT_ERR 23
1013#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
1014#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U)
1015
1016#define S_DDP_OFFSET_ERR 24
1017#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
1018#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U)
1019
1020#define S_DDP_COLOR_ERR 25
1021#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
1022#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U)
1023
1024#define S_DDP_TID_MISMATCH 26
1025#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
1026#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U)
1027
1028#define S_DDP_INVALID_PPOD 27
1029#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
1030#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U)
1031
1032#define S_DDP_ULP_MODE 28
1033#define M_DDP_ULP_MODE 0xF
1034#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
1035#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
1036
1037/* cpl_rx_data_ddp.ddp_report fields */
1038#define S_DDP_OFFSET 0
1039#define M_DDP_OFFSET 0x3FFFFF
1040#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
1041#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
1042
1043#define S_DDP_URG 24
1044#define V_DDP_URG(x) ((x) << S_DDP_URG)
1045#define F_DDP_URG V_DDP_URG(1U)
1046
1047#define S_DDP_PSH 25
1048#define V_DDP_PSH(x) ((x) << S_DDP_PSH)
1049#define F_DDP_PSH V_DDP_PSH(1U)
1050
1051#define S_DDP_BUF_COMPLETE 26
1052#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
1053#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U)
1054
1055#define S_DDP_BUF_TIMED_OUT 27
1056#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
1057#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U)
1058
1059#define S_DDP_BUF_IDX 28
1060#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
1061#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U)
1062
1063struct cpl_tx_pkt {
1064 WR_HDR;
1065 __be32 cntrl;
1066 __be32 len;
1067};
1068
1069struct cpl_tx_pkt_lso {
1070 WR_HDR;
1071 __be32 cntrl;
1072 __be32 len;
1073
1074 __be32 rsvd;
1075 __be32 lso_info;
1076};
1077
1078/* cpl_tx_pkt*.cntrl fields */
1079#define S_TXPKT_VLAN 0
1080#define M_TXPKT_VLAN 0xFFFF
1081#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN)
1082#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
1083
1084#define S_TXPKT_INTF 16
1085#define M_TXPKT_INTF 0xF
1086#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
1087#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
1088
1089#define S_TXPKT_IPCSUM_DIS 20
1090#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS)
1091#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1U)
1092
1093#define S_TXPKT_L4CSUM_DIS 21
1094#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS)
1095#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1U)
1096
1097#define S_TXPKT_VLAN_VLD 22
1098#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD)
1099#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1U)
1100
1101#define S_TXPKT_LOOPBACK 23
1102#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
1103#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U)
1104
1105#define S_TXPKT_OPCODE 24
1106#define M_TXPKT_OPCODE 0xFF
1107#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
1108#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
1109
1110/* cpl_tx_pkt_lso.lso_info fields */
1111#define S_LSO_MSS 0
1112#define M_LSO_MSS 0x3FFF
1113#define V_LSO_MSS(x) ((x) << S_LSO_MSS)
1114#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
1115
1116#define S_LSO_ETH_TYPE 14
1117#define M_LSO_ETH_TYPE 0x3
1118#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE)
1119#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE)
1120
1121#define S_LSO_TCPHDR_WORDS 16
1122#define M_LSO_TCPHDR_WORDS 0xF
1123#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS)
1124#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS)
1125
1126#define S_LSO_IPHDR_WORDS 20
1127#define M_LSO_IPHDR_WORDS 0xF
1128#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS)
1129#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS)
1130
1131#define S_LSO_IPV6 24
1132#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
1133#define F_LSO_IPV6 V_LSO_IPV6(1U)
1134
1135struct cpl_trace_pkt {
1136#ifdef CHELSIO_FW
1137 __u8 rss_opcode;
1138#if defined(__LITTLE_ENDIAN_BITFIELD)
1139 __u8 err:1;
1140 __u8:7;
1141#else
1142 __u8:7;
1143 __u8 err:1;
1144#endif
1145 __u8 rsvd0;
1146#if defined(__LITTLE_ENDIAN_BITFIELD)
1147 __u8 qid:4;
1148 __u8:4;
1149#else
1150 __u8:4;
1151 __u8 qid:4;
1152#endif
1153 __be32 tstamp;
1154#endif /* CHELSIO_FW */
1155
1156 __u8 opcode;
1157#if defined(__LITTLE_ENDIAN_BITFIELD)
1158 __u8 iff:4;
1159 __u8:4;
1160#else
1161 __u8:4;
1162 __u8 iff:4;
1163#endif
1164 __u8 rsvd[4];
1165 __be16 len;
1166};
1167
1168struct cpl_rx_pkt {
1169 RSS_HDR __u8 opcode;
1170#if defined(__LITTLE_ENDIAN_BITFIELD)
1171 __u8 iff:4;
1172 __u8 csum_valid:1;
1173 __u8 ipmi_pkt:1;
1174 __u8 vlan_valid:1;
1175 __u8 fragment:1;
1176#else
1177 __u8 fragment:1;
1178 __u8 vlan_valid:1;
1179 __u8 ipmi_pkt:1;
1180 __u8 csum_valid:1;
1181 __u8 iff:4;
1182#endif
1183 __be16 csum;
1184 __be16 vlan;
1185 __be16 len;
1186};
1187
1188struct cpl_l2t_write_req {
1189 WR_HDR;
1190 union opcode_tid ot;
1191 __be32 params;
1192 __u8 rsvd[2];
1193 __u8 dst_mac[6];
1194};
1195
1196/* cpl_l2t_write_req.params fields */
1197#define S_L2T_W_IDX 0
1198#define M_L2T_W_IDX 0x7FF
1199#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX)
1200#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX)
1201
1202#define S_L2T_W_VLAN 11
1203#define M_L2T_W_VLAN 0xFFF
1204#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN)
1205#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN)
1206
1207#define S_L2T_W_IFF 23
1208#define M_L2T_W_IFF 0xF
1209#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF)
1210#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF)
1211
1212#define S_L2T_W_PRIO 27
1213#define M_L2T_W_PRIO 0x7
1214#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO)
1215#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO)
1216
1217struct cpl_l2t_write_rpl {
1218 RSS_HDR union opcode_tid ot;
1219 __u8 status;
1220 __u8 rsvd[3];
1221};
1222
1223struct cpl_l2t_read_req {
1224 WR_HDR;
1225 union opcode_tid ot;
1226 __be16 rsvd;
1227 __be16 l2t_idx;
1228};
1229
1230struct cpl_l2t_read_rpl {
1231 RSS_HDR union opcode_tid ot;
1232 __be32 params;
1233 __u8 rsvd[2];
1234 __u8 dst_mac[6];
1235};
1236
1237/* cpl_l2t_read_rpl.params fields */
1238#define S_L2T_R_PRIO 0
1239#define M_L2T_R_PRIO 0x7
1240#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO)
1241#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO)
1242
1243#define S_L2T_R_VLAN 8
1244#define M_L2T_R_VLAN 0xFFF
1245#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN)
1246#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN)
1247
1248#define S_L2T_R_IFF 20
1249#define M_L2T_R_IFF 0xF
1250#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF)
1251#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF)
1252
1253#define S_L2T_STATUS 24
1254#define M_L2T_STATUS 0xFF
1255#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS)
1256#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS)
1257
1258struct cpl_smt_write_req {
1259 WR_HDR;
1260 union opcode_tid ot;
1261 __u8 rsvd0;
1262#if defined(__LITTLE_ENDIAN_BITFIELD)
1263 __u8 mtu_idx:4;
1264 __u8 iff:4;
1265#else
1266 __u8 iff:4;
1267 __u8 mtu_idx:4;
1268#endif
1269 __be16 rsvd2;
1270 __be16 rsvd3;
1271 __u8 src_mac1[6];
1272 __be16 rsvd4;
1273 __u8 src_mac0[6];
1274};
1275
1276struct cpl_smt_write_rpl {
1277 RSS_HDR union opcode_tid ot;
1278 __u8 status;
1279 __u8 rsvd[3];
1280};
1281
1282struct cpl_smt_read_req {
1283 WR_HDR;
1284 union opcode_tid ot;
1285 __u8 rsvd0;
1286#if defined(__LITTLE_ENDIAN_BITFIELD)
1287 __u8:4;
1288 __u8 iff:4;
1289#else
1290 __u8 iff:4;
1291 __u8:4;
1292#endif
1293 __be16 rsvd2;
1294};
1295
1296struct cpl_smt_read_rpl {
1297 RSS_HDR union opcode_tid ot;
1298 __u8 status;
1299#if defined(__LITTLE_ENDIAN_BITFIELD)
1300 __u8 mtu_idx:4;
1301 __u8:4;
1302#else
1303 __u8:4;
1304 __u8 mtu_idx:4;
1305#endif
1306 __be16 rsvd2;
1307 __be16 rsvd3;
1308 __u8 src_mac1[6];
1309 __be16 rsvd4;
1310 __u8 src_mac0[6];
1311};
1312
1313struct cpl_rte_delete_req {
1314 WR_HDR;
1315 union opcode_tid ot;
1316 __be32 params;
1317};
1318
1319/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */
1320#define S_RTE_REQ_LUT_IX 8
1321#define M_RTE_REQ_LUT_IX 0x7FF
1322#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
1323#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
1324
1325#define S_RTE_REQ_LUT_BASE 19
1326#define M_RTE_REQ_LUT_BASE 0x7FF
1327#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
1328#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
1329
1330#define S_RTE_READ_REQ_SELECT 31
1331#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
1332#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U)
1333
1334struct cpl_rte_delete_rpl {
1335 RSS_HDR union opcode_tid ot;
1336 __u8 status;
1337 __u8 rsvd[3];
1338};
1339
1340struct cpl_rte_write_req {
1341 WR_HDR;
1342 union opcode_tid ot;
1343#if defined(__LITTLE_ENDIAN_BITFIELD)
1344 __u8:6;
1345 __u8 write_tcam:1;
1346 __u8 write_l2t_lut:1;
1347#else
1348 __u8 write_l2t_lut:1;
1349 __u8 write_tcam:1;
1350 __u8:6;
1351#endif
1352 __u8 rsvd[3];
1353 __be32 lut_params;
1354 __be16 rsvd2;
1355 __be16 l2t_idx;
1356 __be32 netmask;
1357 __be32 faddr;
1358};
1359
1360/* cpl_rte_write_req.lut_params fields */
1361#define S_RTE_WRITE_REQ_LUT_IX 10
1362#define M_RTE_WRITE_REQ_LUT_IX 0x7FF
1363#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX)
1364#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX)
1365
1366#define S_RTE_WRITE_REQ_LUT_BASE 21
1367#define M_RTE_WRITE_REQ_LUT_BASE 0x7FF
1368#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE)
1369#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE)
1370
1371struct cpl_rte_write_rpl {
1372 RSS_HDR union opcode_tid ot;
1373 __u8 status;
1374 __u8 rsvd[3];
1375};
1376
1377struct cpl_rte_read_req {
1378 WR_HDR;
1379 union opcode_tid ot;
1380 __be32 params;
1381};
1382
1383struct cpl_rte_read_rpl {
1384 RSS_HDR union opcode_tid ot;
1385 __u8 status;
1386 __u8 rsvd0;
1387 __be16 l2t_idx;
1388#if defined(__LITTLE_ENDIAN_BITFIELD)
1389 __u8:7;
1390 __u8 select:1;
1391#else
1392 __u8 select:1;
1393 __u8:7;
1394#endif
1395 __u8 rsvd2[3];
1396 __be32 addr;
1397};
1398
1399struct cpl_tid_release {
1400 WR_HDR;
1401 union opcode_tid ot;
1402 __be32 rsvd;
1403};
1404
1405struct cpl_barrier {
1406 WR_HDR;
1407 __u8 opcode;
1408 __u8 rsvd[7];
1409};
1410
1411struct cpl_rdma_read_req {
1412 __u8 opcode;
1413 __u8 rsvd[15];
1414};
1415
1416struct cpl_rdma_terminate {
1417#ifdef CHELSIO_FW
1418 __u8 opcode;
1419 __u8 rsvd[2];
1420#if defined(__LITTLE_ENDIAN_BITFIELD)
1421 __u8 rspq:3;
1422 __u8:5;
1423#else
1424 __u8:5;
1425 __u8 rspq:3;
1426#endif
1427 __be32 tid_len;
1428#endif
1429 __be32 msn;
1430 __be32 mo;
1431 __u8 data[0];
1432};
1433
1434/* cpl_rdma_terminate.tid_len fields */
1435#define S_FLIT_CNT 0
1436#define M_FLIT_CNT 0xFF
1437#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT)
1438#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT)
1439
1440#define S_TERM_TID 8
1441#define M_TERM_TID 0xFFFFF
1442#define V_TERM_TID(x) ((x) << S_TERM_TID)
1443#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
1444#endif /* T3_CPL_H */
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
new file mode 100644
index 000000000000..365a7f5b1f94
--- /dev/null
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -0,0 +1,3375 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
122void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
199
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
203}
204
205#define MDIO_ATTEMPTS 10
206
207/*
208 * MI1 read/write operations for direct-addressed PHYs.
209 */
210static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
212{
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215
216 if (mmd_addr)
217 return -EINVAL;
218
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
227}
228
229static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
231{
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234
235 if (mmd_addr)
236 return -EINVAL;
237
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
245}
246
247static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
250};
251
252/*
253 * MI1 read/write operations for indirect-addressed PHYs.
254 */
255static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
257{
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
272 }
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
275}
276
277static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
279{
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
282
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
293 }
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
296}
297
298static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
301};
302
303/**
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
310 *
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
313 */
314int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
316{
317 int ret;
318 unsigned int val;
319
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
324 }
325 return ret;
326}
327
328/**
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
333 *
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
337 */
338int t3_phy_reset(struct cphy *phy, int mmd, int wait)
339{
340 int err;
341 unsigned int ctl;
342
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
346
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
355
356 return ctl ? -1 : 0;
357}
358
359/**
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
363 *
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
366 */
367int t3_phy_advertise(struct cphy *phy, unsigned int advert)
368{
369 int err;
370 unsigned int val = 0;
371
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
375
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
381
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
385
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
400}
401
402/**
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
407 *
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
410 */
411int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
412{
413 int err;
414 unsigned int ctl;
415
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
419
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
426 }
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
431 }
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
435}
436
437static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
441 SUPPORTED_OFFLOAD,
442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
446 SUPPORTED_OFFLOAD,
447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
450 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
451 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
458 &mi1_mdio_ext_ops, "Chelsio T320"},
459};
460
461/*
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
464 */
465const struct adapter_info *t3_get_adapter_info(unsigned int id)
466{
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
468}
469
470#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
473
474static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
488};
489
490#undef CAPS_1G
491#undef CAPS_10G
492
493#define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
495
496/*
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
499 */
500struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
508 VPD_ENTRY(sn, 16); /* serial number */
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
523};
524
525#define EEPROM_MAX_POLL 4
526#define EEPROM_STAT_ADDR 0x4000
527#define VPD_BASE 0xc00
528
529/**
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
534 *
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
539 */
540int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
541{
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
545
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
547 return -EINVAL;
548
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
550 do {
551 udelay(10);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
554
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
557 return -EIO;
558 }
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
561 return 0;
562}
563
564/**
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
569 *
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
572 */
573int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
574{
575 u16 val;
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
578
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
580 return -EINVAL;
581
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
583 cpu_to_le32(data));
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
586 do {
587 msleep(1);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
590
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
593 return -EIO;
594 }
595 return 0;
596}
597
598/**
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
602 *
603 * Enables or disables write protection on the serial EEPROM.
604 */
605int t3_seeprom_wp(struct adapter *adapter, int enable)
606{
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
608}
609
610/*
611 * Convert a character holding a hex digit to a number.
612 */
613static unsigned int hex2int(unsigned char c)
614{
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
616}
617
618/**
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
622 *
623 * Reads card parameters stored in VPD EEPROM.
624 */
625static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
626{
627 int i, addr, ret;
628 struct t3_vpd vpd;
629
630 /*
631 * Card information is normally at VPD_BASE but some early cards had
632 * it at 0.
633 */
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
635 if (ret)
636 return ret;
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
638
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
642 if (ret)
643 return ret;
644 }
645
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
651
652 /* Old eeproms didn't have port information */
653 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
654 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
655 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
656 } else {
657 p->port_type[0] = hex2int(vpd.port0_data[0]);
658 p->port_type[1] = hex2int(vpd.port1_data[0]);
659 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
660 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
661 }
662
663 for (i = 0; i < 6; i++)
664 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
665 hex2int(vpd.na_data[2 * i + 1]);
666 return 0;
667}
668
669/* serial flash and firmware constants */
670enum {
671 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
672 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
673 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
674
675 /* flash command opcodes */
676 SF_PROG_PAGE = 2, /* program page */
677 SF_WR_DISABLE = 4, /* disable writes */
678 SF_RD_STATUS = 5, /* read status register */
679 SF_WR_ENABLE = 6, /* enable writes */
680 SF_RD_DATA_FAST = 0xb, /* read flash */
681 SF_ERASE_SECTOR = 0xd8, /* erase sector */
682
683 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
684 FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */
685};
686
687/**
688 * sf1_read - read data from the serial flash
689 * @adapter: the adapter
690 * @byte_cnt: number of bytes to read
691 * @cont: whether another operation will be chained
692 * @valp: where to store the read data
693 *
694 * Reads up to 4 bytes of data from the serial flash. The location of
695 * the read needs to be specified prior to calling this by issuing the
696 * appropriate commands to the serial flash.
697 */
698static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
699 u32 *valp)
700{
701 int ret;
702
703 if (!byte_cnt || byte_cnt > 4)
704 return -EINVAL;
705 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
706 return -EBUSY;
707 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
708 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
709 if (!ret)
710 *valp = t3_read_reg(adapter, A_SF_DATA);
711 return ret;
712}
713
714/**
715 * sf1_write - write data to the serial flash
716 * @adapter: the adapter
717 * @byte_cnt: number of bytes to write
718 * @cont: whether another operation will be chained
719 * @val: value to write
720 *
721 * Writes up to 4 bytes of data to the serial flash. The location of
722 * the write needs to be specified prior to calling this by issuing the
723 * appropriate commands to the serial flash.
724 */
725static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
726 u32 val)
727{
728 if (!byte_cnt || byte_cnt > 4)
729 return -EINVAL;
730 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
731 return -EBUSY;
732 t3_write_reg(adapter, A_SF_DATA, val);
733 t3_write_reg(adapter, A_SF_OP,
734 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
735 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
736}
737
738/**
739 * flash_wait_op - wait for a flash operation to complete
740 * @adapter: the adapter
741 * @attempts: max number of polls of the status register
742 * @delay: delay between polls in ms
743 *
744 * Wait for a flash operation to complete by polling the status register.
745 */
746static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
747{
748 int ret;
749 u32 status;
750
751 while (1) {
752 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
753 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
754 return ret;
755 if (!(status & 1))
756 return 0;
757 if (--attempts == 0)
758 return -EAGAIN;
759 if (delay)
760 msleep(delay);
761 }
762}
763
764/**
765 * t3_read_flash - read words from serial flash
766 * @adapter: the adapter
767 * @addr: the start address for the read
768 * @nwords: how many 32-bit words to read
769 * @data: where to store the read data
770 * @byte_oriented: whether to store data as bytes or as words
771 *
772 * Read the specified number of 32-bit words from the serial flash.
773 * If @byte_oriented is set the read data is stored as a byte array
774 * (i.e., big-endian), otherwise as 32-bit words in the platform's
775 * natural endianess.
776 */
777int t3_read_flash(struct adapter *adapter, unsigned int addr,
778 unsigned int nwords, u32 *data, int byte_oriented)
779{
780 int ret;
781
782 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
783 return -EINVAL;
784
785 addr = swab32(addr) | SF_RD_DATA_FAST;
786
787 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
788 (ret = sf1_read(adapter, 1, 1, data)) != 0)
789 return ret;
790
791 for (; nwords; nwords--, data++) {
792 ret = sf1_read(adapter, 4, nwords > 1, data);
793 if (ret)
794 return ret;
795 if (byte_oriented)
796 *data = htonl(*data);
797 }
798 return 0;
799}
800
801/**
802 * t3_write_flash - write up to a page of data to the serial flash
803 * @adapter: the adapter
804 * @addr: the start address to write
805 * @n: length of data to write
806 * @data: the data to write
807 *
808 * Writes up to a page of data (256 bytes) to the serial flash starting
809 * at the given address.
810 */
811static int t3_write_flash(struct adapter *adapter, unsigned int addr,
812 unsigned int n, const u8 *data)
813{
814 int ret;
815 u32 buf[64];
816 unsigned int i, c, left, val, offset = addr & 0xff;
817
818 if (addr + n > SF_SIZE || offset + n > 256)
819 return -EINVAL;
820
821 val = swab32(addr) | SF_PROG_PAGE;
822
823 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
824 (ret = sf1_write(adapter, 4, 1, val)) != 0)
825 return ret;
826
827 for (left = n; left; left -= c) {
828 c = min(left, 4U);
829 for (val = 0, i = 0; i < c; ++i)
830 val = (val << 8) + *data++;
831
832 ret = sf1_write(adapter, c, c != left, val);
833 if (ret)
834 return ret;
835 }
836 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
837 return ret;
838
839 /* Read the page to verify the write succeeded */
840 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
841 if (ret)
842 return ret;
843
844 if (memcmp(data - n, (u8 *) buf + offset, n))
845 return -EIO;
846 return 0;
847}
848
849enum fw_version_type {
850 FW_VERSION_N3,
851 FW_VERSION_T3
852};
853
854/**
855 * t3_get_fw_version - read the firmware version
856 * @adapter: the adapter
857 * @vers: where to place the version
858 *
859 * Reads the FW version from flash.
860 */
861int t3_get_fw_version(struct adapter *adapter, u32 *vers)
862{
863 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
864}
865
866/**
867 * t3_check_fw_version - check if the FW is compatible with this driver
868 * @adapter: the adapter
869 *
870 * Checks if an adapter's FW is compatible with the driver. Returns 0
871 * if the versions are compatible, a negative error otherwise.
872 */
873int t3_check_fw_version(struct adapter *adapter)
874{
875 int ret;
876 u32 vers;
877 unsigned int type, major, minor;
878
879 ret = t3_get_fw_version(adapter, &vers);
880 if (ret)
881 return ret;
882
883 type = G_FW_VERSION_TYPE(vers);
884 major = G_FW_VERSION_MAJOR(vers);
885 minor = G_FW_VERSION_MINOR(vers);
886
887 if (type == FW_VERSION_T3 && major == 3 && minor == 1)
888 return 0;
889
890 CH_ERR(adapter, "found wrong FW version(%u.%u), "
891 "driver needs version 3.1\n", major, minor);
892 return -EINVAL;
893}
894
895/**
896 * t3_flash_erase_sectors - erase a range of flash sectors
897 * @adapter: the adapter
898 * @start: the first sector to erase
899 * @end: the last sector to erase
900 *
901 * Erases the sectors in the given range.
902 */
903static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
904{
905 while (start <= end) {
906 int ret;
907
908 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
909 (ret = sf1_write(adapter, 4, 0,
910 SF_ERASE_SECTOR | (start << 8))) != 0 ||
911 (ret = flash_wait_op(adapter, 5, 500)) != 0)
912 return ret;
913 start++;
914 }
915 return 0;
916}
917
918/*
919 * t3_load_fw - download firmware
920 * @adapter: the adapter
921 * @fw_data: the firrware image to write
922 * @size: image size
923 *
924 * Write the supplied firmware image to the card's serial flash.
925 * The FW image has the following sections: @size - 8 bytes of code and
926 * data, followed by 4 bytes of FW version, followed by the 32-bit
927 * 1's complement checksum of the whole image.
928 */
929int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
930{
931 u32 csum;
932 unsigned int i;
933 const u32 *p = (const u32 *)fw_data;
934 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
935
936 if (size & 3)
937 return -EINVAL;
938 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
939 return -EFBIG;
940
941 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
942 csum += ntohl(p[i]);
943 if (csum != 0xffffffff) {
944 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
945 csum);
946 return -EINVAL;
947 }
948
949 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
950 if (ret)
951 goto out;
952
953 size -= 8; /* trim off version and checksum */
954 for (addr = FW_FLASH_BOOT_ADDR; size;) {
955 unsigned int chunk_size = min(size, 256U);
956
957 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
958 if (ret)
959 goto out;
960
961 addr += chunk_size;
962 fw_data += chunk_size;
963 size -= chunk_size;
964 }
965
966 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
967out:
968 if (ret)
969 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
970 return ret;
971}
972
973#define CIM_CTL_BASE 0x2000
974
975/**
976 * t3_cim_ctl_blk_read - read a block from CIM control region
977 *
978 * @adap: the adapter
979 * @addr: the start address within the CIM control region
980 * @n: number of words to read
981 * @valp: where to store the result
982 *
983 * Reads a block of 4-byte words from the CIM control region.
984 */
985int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
986 unsigned int n, unsigned int *valp)
987{
988 int ret = 0;
989
990 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
991 return -EBUSY;
992
993 for ( ; !ret && n--; addr += 4) {
994 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
995 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
996 0, 5, 2);
997 if (!ret)
998 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
999 }
1000 return ret;
1001}
1002
1003
1004/**
1005 * t3_link_changed - handle interface link changes
1006 * @adapter: the adapter
1007 * @port_id: the port index that changed link state
1008 *
1009 * Called when a port's link settings change to propagate the new values
1010 * to the associated PHY and MAC. After performing the common tasks it
1011 * invokes an OS-specific handler.
1012 */
1013void t3_link_changed(struct adapter *adapter, int port_id)
1014{
1015 int link_ok, speed, duplex, fc;
1016 struct port_info *pi = adap2pinfo(adapter, port_id);
1017 struct cphy *phy = &pi->phy;
1018 struct cmac *mac = &pi->mac;
1019 struct link_config *lc = &pi->link_config;
1020
1021 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1022
1023 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1024 uses_xaui(adapter)) {
1025 if (link_ok)
1026 t3b_pcs_reset(mac);
1027 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1028 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1029 }
1030 lc->link_ok = link_ok;
1031 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1032 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1033 if (lc->requested_fc & PAUSE_AUTONEG)
1034 fc &= lc->requested_fc;
1035 else
1036 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1037
1038 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1039 /* Set MAC speed, duplex, and flow control to match PHY. */
1040 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1041 lc->fc = fc;
1042 }
1043
1044 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1045}
1046
1047/**
1048 * t3_link_start - apply link configuration to MAC/PHY
1049 * @phy: the PHY to setup
1050 * @mac: the MAC to setup
1051 * @lc: the requested link configuration
1052 *
1053 * Set up a port's MAC and PHY according to a desired link configuration.
1054 * - If the PHY can auto-negotiate first decide what to advertise, then
1055 * enable/disable auto-negotiation as desired, and reset.
1056 * - If the PHY does not auto-negotiate just reset it.
1057 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1058 * otherwise do it later based on the outcome of auto-negotiation.
1059 */
1060int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1061{
1062 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1063
1064 lc->link_ok = 0;
1065 if (lc->supported & SUPPORTED_Autoneg) {
1066 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1067 if (fc) {
1068 lc->advertising |= ADVERTISED_Asym_Pause;
1069 if (fc & PAUSE_RX)
1070 lc->advertising |= ADVERTISED_Pause;
1071 }
1072 phy->ops->advertise(phy, lc->advertising);
1073
1074 if (lc->autoneg == AUTONEG_DISABLE) {
1075 lc->speed = lc->requested_speed;
1076 lc->duplex = lc->requested_duplex;
1077 lc->fc = (unsigned char)fc;
1078 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1079 fc);
1080 /* Also disables autoneg */
1081 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1082 phy->ops->reset(phy, 0);
1083 } else
1084 phy->ops->autoneg_enable(phy);
1085 } else {
1086 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1087 lc->fc = (unsigned char)fc;
1088 phy->ops->reset(phy, 0);
1089 }
1090 return 0;
1091}
1092
1093/**
1094 * t3_set_vlan_accel - control HW VLAN extraction
1095 * @adapter: the adapter
1096 * @ports: bitmap of adapter ports to operate on
1097 * @on: enable (1) or disable (0) HW VLAN extraction
1098 *
1099 * Enables or disables HW extraction of VLAN tags for the given port.
1100 */
1101void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1102{
1103 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1104 ports << S_VLANEXTRACTIONENABLE,
1105 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1106}
1107
1108struct intr_info {
1109 unsigned int mask; /* bits to check in interrupt status */
1110 const char *msg; /* message to print or NULL */
1111 short stat_idx; /* stat counter to increment or -1 */
1112 unsigned short fatal:1; /* whether the condition reported is fatal */
1113};
1114
1115/**
1116 * t3_handle_intr_status - table driven interrupt handler
1117 * @adapter: the adapter that generated the interrupt
1118 * @reg: the interrupt status register to process
1119 * @mask: a mask to apply to the interrupt status
1120 * @acts: table of interrupt actions
1121 * @stats: statistics counters tracking interrupt occurences
1122 *
1123 * A table driven interrupt handler that applies a set of masks to an
1124 * interrupt status word and performs the corresponding actions if the
1125 * interrupts described by the mask have occured. The actions include
1126 * optionally printing a warning or alert message, and optionally
1127 * incrementing a stat counter. The table is terminated by an entry
1128 * specifying mask 0. Returns the number of fatal interrupt conditions.
1129 */
1130static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1131 unsigned int mask,
1132 const struct intr_info *acts,
1133 unsigned long *stats)
1134{
1135 int fatal = 0;
1136 unsigned int status = t3_read_reg(adapter, reg) & mask;
1137
1138 for (; acts->mask; ++acts) {
1139 if (!(status & acts->mask))
1140 continue;
1141 if (acts->fatal) {
1142 fatal++;
1143 CH_ALERT(adapter, "%s (0x%x)\n",
1144 acts->msg, status & acts->mask);
1145 } else if (acts->msg)
1146 CH_WARN(adapter, "%s (0x%x)\n",
1147 acts->msg, status & acts->mask);
1148 if (acts->stat_idx >= 0)
1149 stats[acts->stat_idx]++;
1150 }
1151 if (status) /* clear processed interrupts */
1152 t3_write_reg(adapter, reg, status);
1153 return fatal;
1154}
1155
1156#define SGE_INTR_MASK (F_RSPQDISABLED)
1157#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1158 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1159 F_NFASRCHFAIL)
1160#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1161#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1162 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1163 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1164#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1165 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1166 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1167 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1168 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1169 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1170#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1171 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1172 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1173 V_BISTERR(M_BISTERR) | F_PEXERR)
1174#define ULPRX_INTR_MASK F_PARERR
1175#define ULPTX_INTR_MASK 0
1176#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1177 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1178 F_ZERO_SWITCH_ERROR)
1179#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1180 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1181 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1182 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1183#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1184 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1185 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1186#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1187 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1188 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1189#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1190 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1191 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1192 V_MCAPARERRENB(M_MCAPARERRENB))
1193#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1194 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1195 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1196 F_MPS0 | F_CPL_SWITCH)
1197
1198/*
1199 * Interrupt handler for the PCIX1 module.
1200 */
1201static void pci_intr_handler(struct adapter *adapter)
1202{
1203 static const struct intr_info pcix1_intr_info[] = {
1204 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1205 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1206 {F_RCVTARABT, "PCI received target abort", -1, 1},
1207 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1208 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1209 {F_DETPARERR, "PCI detected parity error", -1, 1},
1210 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1211 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1212 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1213 1},
1214 {F_DETCORECCERR, "PCI correctable ECC error",
1215 STAT_PCI_CORR_ECC, 0},
1216 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1217 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1218 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1219 1},
1220 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1221 1},
1222 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1223 1},
1224 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1225 "error", -1, 1},
1226 {0}
1227 };
1228
1229 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1230 pcix1_intr_info, adapter->irq_stats))
1231 t3_fatal_err(adapter);
1232}
1233
1234/*
1235 * Interrupt handler for the PCIE module.
1236 */
1237static void pcie_intr_handler(struct adapter *adapter)
1238{
1239 static const struct intr_info pcie_intr_info[] = {
1240 {F_PEXERR, "PCI PEX error", -1, 1},
1241 {F_UNXSPLCPLERRR,
1242 "PCI unexpected split completion DMA read error", -1, 1},
1243 {F_UNXSPLCPLERRC,
1244 "PCI unexpected split completion DMA command error", -1, 1},
1245 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1246 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1247 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1248 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1249 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1250 "PCI MSI-X table/PBA parity error", -1, 1},
1251 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1252 {0}
1253 };
1254
1255 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1256 pcie_intr_info, adapter->irq_stats))
1257 t3_fatal_err(adapter);
1258}
1259
1260/*
1261 * TP interrupt handler.
1262 */
1263static void tp_intr_handler(struct adapter *adapter)
1264{
1265 static const struct intr_info tp_intr_info[] = {
1266 {0xffffff, "TP parity error", -1, 1},
1267 {0x1000000, "TP out of Rx pages", -1, 1},
1268 {0x2000000, "TP out of Tx pages", -1, 1},
1269 {0}
1270 };
1271
1272 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1273 tp_intr_info, NULL))
1274 t3_fatal_err(adapter);
1275}
1276
1277/*
1278 * CIM interrupt handler.
1279 */
1280static void cim_intr_handler(struct adapter *adapter)
1281{
1282 static const struct intr_info cim_intr_info[] = {
1283 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1284 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1285 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1286 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1287 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1288 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1289 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1290 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1291 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1292 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1293 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1294 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1295 {0}
1296 };
1297
1298 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1299 cim_intr_info, NULL))
1300 t3_fatal_err(adapter);
1301}
1302
1303/*
1304 * ULP RX interrupt handler.
1305 */
1306static void ulprx_intr_handler(struct adapter *adapter)
1307{
1308 static const struct intr_info ulprx_intr_info[] = {
1309 {F_PARERR, "ULP RX parity error", -1, 1},
1310 {0}
1311 };
1312
1313 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1314 ulprx_intr_info, NULL))
1315 t3_fatal_err(adapter);
1316}
1317
1318/*
1319 * ULP TX interrupt handler.
1320 */
1321static void ulptx_intr_handler(struct adapter *adapter)
1322{
1323 static const struct intr_info ulptx_intr_info[] = {
1324 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1325 STAT_ULP_CH0_PBL_OOB, 0},
1326 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1327 STAT_ULP_CH1_PBL_OOB, 0},
1328 {0}
1329 };
1330
1331 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1332 ulptx_intr_info, adapter->irq_stats))
1333 t3_fatal_err(adapter);
1334}
1335
1336#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1337 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1338 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1339 F_ICSPI1_TX_FRAMING_ERROR)
1340#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1341 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1342 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1343 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1344
1345/*
1346 * PM TX interrupt handler.
1347 */
1348static void pmtx_intr_handler(struct adapter *adapter)
1349{
1350 static const struct intr_info pmtx_intr_info[] = {
1351 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1352 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1353 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1354 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1355 "PMTX ispi parity error", -1, 1},
1356 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1357 "PMTX ospi parity error", -1, 1},
1358 {0}
1359 };
1360
1361 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1362 pmtx_intr_info, NULL))
1363 t3_fatal_err(adapter);
1364}
1365
1366#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1367 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1368 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1369 F_IESPI1_TX_FRAMING_ERROR)
1370#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1371 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1372 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1373 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1374
1375/*
1376 * PM RX interrupt handler.
1377 */
1378static void pmrx_intr_handler(struct adapter *adapter)
1379{
1380 static const struct intr_info pmrx_intr_info[] = {
1381 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1382 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1383 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1384 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1385 "PMRX ispi parity error", -1, 1},
1386 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1387 "PMRX ospi parity error", -1, 1},
1388 {0}
1389 };
1390
1391 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1392 pmrx_intr_info, NULL))
1393 t3_fatal_err(adapter);
1394}
1395
1396/*
1397 * CPL switch interrupt handler.
1398 */
1399static void cplsw_intr_handler(struct adapter *adapter)
1400{
1401 static const struct intr_info cplsw_intr_info[] = {
1402/* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1403 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1404 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1405 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1406 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1407 {0}
1408 };
1409
1410 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1411 cplsw_intr_info, NULL))
1412 t3_fatal_err(adapter);
1413}
1414
1415/*
1416 * MPS interrupt handler.
1417 */
1418static void mps_intr_handler(struct adapter *adapter)
1419{
1420 static const struct intr_info mps_intr_info[] = {
1421 {0x1ff, "MPS parity error", -1, 1},
1422 {0}
1423 };
1424
1425 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1426 mps_intr_info, NULL))
1427 t3_fatal_err(adapter);
1428}
1429
1430#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1431
1432/*
1433 * MC7 interrupt handler.
1434 */
1435static void mc7_intr_handler(struct mc7 *mc7)
1436{
1437 struct adapter *adapter = mc7->adapter;
1438 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1439
1440 if (cause & F_CE) {
1441 mc7->stats.corr_err++;
1442 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1443 "data 0x%x 0x%x 0x%x\n", mc7->name,
1444 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1445 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1446 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1447 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1448 }
1449
1450 if (cause & F_UE) {
1451 mc7->stats.uncorr_err++;
1452 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1453 "data 0x%x 0x%x 0x%x\n", mc7->name,
1454 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1455 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1456 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1457 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1458 }
1459
1460 if (G_PE(cause)) {
1461 mc7->stats.parity_err++;
1462 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1463 mc7->name, G_PE(cause));
1464 }
1465
1466 if (cause & F_AE) {
1467 u32 addr = 0;
1468
1469 if (adapter->params.rev > 0)
1470 addr = t3_read_reg(adapter,
1471 mc7->offset + A_MC7_ERR_ADDR);
1472 mc7->stats.addr_err++;
1473 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1474 mc7->name, addr);
1475 }
1476
1477 if (cause & MC7_INTR_FATAL)
1478 t3_fatal_err(adapter);
1479
1480 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1481}
1482
1483#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1484 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1485/*
1486 * XGMAC interrupt handler.
1487 */
1488static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1489{
1490 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1491 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1492
1493 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1494 mac->stats.tx_fifo_parity_err++;
1495 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1496 }
1497 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1498 mac->stats.rx_fifo_parity_err++;
1499 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1500 }
1501 if (cause & F_TXFIFO_UNDERRUN)
1502 mac->stats.tx_fifo_urun++;
1503 if (cause & F_RXFIFO_OVERFLOW)
1504 mac->stats.rx_fifo_ovfl++;
1505 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1506 mac->stats.serdes_signal_loss++;
1507 if (cause & F_XAUIPCSCTCERR)
1508 mac->stats.xaui_pcs_ctc_err++;
1509 if (cause & F_XAUIPCSALIGNCHANGE)
1510 mac->stats.xaui_pcs_align_change++;
1511
1512 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1513 if (cause & XGM_INTR_FATAL)
1514 t3_fatal_err(adap);
1515 return cause != 0;
1516}
1517
1518/*
1519 * Interrupt handler for PHY events.
1520 */
1521int t3_phy_intr_handler(struct adapter *adapter)
1522{
1523 static const int intr_gpio_bits[] = { 8, 0x20 };
1524
1525 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1526
1527 for_each_port(adapter, i) {
1528 if (cause & intr_gpio_bits[i]) {
1529 struct cphy *phy = &adap2pinfo(adapter, i)->phy;
1530 int phy_cause = phy->ops->intr_handler(phy);
1531
1532 if (phy_cause & cphy_cause_link_change)
1533 t3_link_changed(adapter, i);
1534 if (phy_cause & cphy_cause_fifo_error)
1535 phy->fifo_errors++;
1536 }
1537 }
1538
1539 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1540 return 0;
1541}
1542
1543/*
1544 * T3 slow path (non-data) interrupt handler.
1545 */
1546int t3_slow_intr_handler(struct adapter *adapter)
1547{
1548 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1549
1550 cause &= adapter->slow_intr_mask;
1551 if (!cause)
1552 return 0;
1553 if (cause & F_PCIM0) {
1554 if (is_pcie(adapter))
1555 pcie_intr_handler(adapter);
1556 else
1557 pci_intr_handler(adapter);
1558 }
1559 if (cause & F_SGE3)
1560 t3_sge_err_intr_handler(adapter);
1561 if (cause & F_MC7_PMRX)
1562 mc7_intr_handler(&adapter->pmrx);
1563 if (cause & F_MC7_PMTX)
1564 mc7_intr_handler(&adapter->pmtx);
1565 if (cause & F_MC7_CM)
1566 mc7_intr_handler(&adapter->cm);
1567 if (cause & F_CIM)
1568 cim_intr_handler(adapter);
1569 if (cause & F_TP1)
1570 tp_intr_handler(adapter);
1571 if (cause & F_ULP2_RX)
1572 ulprx_intr_handler(adapter);
1573 if (cause & F_ULP2_TX)
1574 ulptx_intr_handler(adapter);
1575 if (cause & F_PM1_RX)
1576 pmrx_intr_handler(adapter);
1577 if (cause & F_PM1_TX)
1578 pmtx_intr_handler(adapter);
1579 if (cause & F_CPL_SWITCH)
1580 cplsw_intr_handler(adapter);
1581 if (cause & F_MPS0)
1582 mps_intr_handler(adapter);
1583 if (cause & F_MC5A)
1584 t3_mc5_intr_handler(&adapter->mc5);
1585 if (cause & F_XGMAC0_0)
1586 mac_intr_handler(adapter, 0);
1587 if (cause & F_XGMAC0_1)
1588 mac_intr_handler(adapter, 1);
1589 if (cause & F_T3DBG)
1590 t3_os_ext_intr_handler(adapter);
1591
1592 /* Clear the interrupts just processed. */
1593 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1594 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1595 return 1;
1596}
1597
1598/**
1599 * t3_intr_enable - enable interrupts
1600 * @adapter: the adapter whose interrupts should be enabled
1601 *
1602 * Enable interrupts by setting the interrupt enable registers of the
1603 * various HW modules and then enabling the top-level interrupt
1604 * concentrator.
1605 */
1606void t3_intr_enable(struct adapter *adapter)
1607{
1608 static const struct addr_val_pair intr_en_avp[] = {
1609 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1610 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1611 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1612 MC7_INTR_MASK},
1613 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1614 MC7_INTR_MASK},
1615 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1616 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1617 {A_TP_INT_ENABLE, 0x3bfffff},
1618 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1619 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1620 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1621 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1622 };
1623
1624 adapter->slow_intr_mask = PL_INTR_MASK;
1625
1626 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1627
1628 if (adapter->params.rev > 0) {
1629 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1630 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1631 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1632 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1633 F_PBL_BOUND_ERR_CH1);
1634 } else {
1635 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1636 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1637 }
1638
1639 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1640 adapter_info(adapter)->gpio_intr);
1641 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1642 adapter_info(adapter)->gpio_intr);
1643 if (is_pcie(adapter))
1644 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1645 else
1646 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1647 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1648 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1649}
1650
1651/**
1652 * t3_intr_disable - disable a card's interrupts
1653 * @adapter: the adapter whose interrupts should be disabled
1654 *
1655 * Disable interrupts. We only disable the top-level interrupt
1656 * concentrator and the SGE data interrupts.
1657 */
1658void t3_intr_disable(struct adapter *adapter)
1659{
1660 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1661 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1662 adapter->slow_intr_mask = 0;
1663}
1664
1665/**
1666 * t3_intr_clear - clear all interrupts
1667 * @adapter: the adapter whose interrupts should be cleared
1668 *
1669 * Clears all interrupts.
1670 */
1671void t3_intr_clear(struct adapter *adapter)
1672{
1673 static const unsigned int cause_reg_addr[] = {
1674 A_SG_INT_CAUSE,
1675 A_SG_RSPQ_FL_STATUS,
1676 A_PCIX_INT_CAUSE,
1677 A_MC7_INT_CAUSE,
1678 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1679 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1680 A_CIM_HOST_INT_CAUSE,
1681 A_TP_INT_CAUSE,
1682 A_MC5_DB_INT_CAUSE,
1683 A_ULPRX_INT_CAUSE,
1684 A_ULPTX_INT_CAUSE,
1685 A_CPL_INTR_CAUSE,
1686 A_PM1_TX_INT_CAUSE,
1687 A_PM1_RX_INT_CAUSE,
1688 A_MPS_INT_CAUSE,
1689 A_T3DBG_INT_CAUSE,
1690 };
1691 unsigned int i;
1692
1693 /* Clear PHY and MAC interrupts for each port. */
1694 for_each_port(adapter, i)
1695 t3_port_intr_clear(adapter, i);
1696
1697 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1698 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1699
1700 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1701 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1702}
1703
1704/**
1705 * t3_port_intr_enable - enable port-specific interrupts
1706 * @adapter: associated adapter
1707 * @idx: index of port whose interrupts should be enabled
1708 *
1709 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1710 * adapter port.
1711 */
1712void t3_port_intr_enable(struct adapter *adapter, int idx)
1713{
1714 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1715
1716 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1717 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1718 phy->ops->intr_enable(phy);
1719}
1720
1721/**
1722 * t3_port_intr_disable - disable port-specific interrupts
1723 * @adapter: associated adapter
1724 * @idx: index of port whose interrupts should be disabled
1725 *
1726 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1727 * adapter port.
1728 */
1729void t3_port_intr_disable(struct adapter *adapter, int idx)
1730{
1731 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1732
1733 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1734 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1735 phy->ops->intr_disable(phy);
1736}
1737
1738/**
1739 * t3_port_intr_clear - clear port-specific interrupts
1740 * @adapter: associated adapter
1741 * @idx: index of port whose interrupts to clear
1742 *
1743 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1744 * adapter port.
1745 */
1746void t3_port_intr_clear(struct adapter *adapter, int idx)
1747{
1748 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1749
1750 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1751 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1752 phy->ops->intr_clear(phy);
1753}
1754
1755/**
1756 * t3_sge_write_context - write an SGE context
1757 * @adapter: the adapter
1758 * @id: the context id
1759 * @type: the context type
1760 *
1761 * Program an SGE context with the values already loaded in the
1762 * CONTEXT_DATA? registers.
1763 */
1764static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1765 unsigned int type)
1766{
1767 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1768 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1769 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1770 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1771 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1772 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1773 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1774 0, 5, 1);
1775}
1776
1777/**
1778 * t3_sge_init_ecntxt - initialize an SGE egress context
1779 * @adapter: the adapter to configure
1780 * @id: the context id
1781 * @gts_enable: whether to enable GTS for the context
1782 * @type: the egress context type
1783 * @respq: associated response queue
1784 * @base_addr: base address of queue
1785 * @size: number of queue entries
1786 * @token: uP token
1787 * @gen: initial generation value for the context
1788 * @cidx: consumer pointer
1789 *
1790 * Initialize an SGE egress context and make it ready for use. If the
1791 * platform allows concurrent context operations, the caller is
1792 * responsible for appropriate locking.
1793 */
1794int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1795 enum sge_context_type type, int respq, u64 base_addr,
1796 unsigned int size, unsigned int token, int gen,
1797 unsigned int cidx)
1798{
1799 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1800
1801 if (base_addr & 0xfff) /* must be 4K aligned */
1802 return -EINVAL;
1803 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1804 return -EBUSY;
1805
1806 base_addr >>= 12;
1807 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1808 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1809 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1810 V_EC_BASE_LO(base_addr & 0xffff));
1811 base_addr >>= 16;
1812 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1813 base_addr >>= 32;
1814 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1815 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1816 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1817 F_EC_VALID);
1818 return t3_sge_write_context(adapter, id, F_EGRESS);
1819}
1820
1821/**
1822 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1823 * @adapter: the adapter to configure
1824 * @id: the context id
1825 * @gts_enable: whether to enable GTS for the context
1826 * @base_addr: base address of queue
1827 * @size: number of queue entries
1828 * @bsize: size of each buffer for this queue
1829 * @cong_thres: threshold to signal congestion to upstream producers
1830 * @gen: initial generation value for the context
1831 * @cidx: consumer pointer
1832 *
1833 * Initialize an SGE free list context and make it ready for use. The
1834 * caller is responsible for ensuring only one context operation occurs
1835 * at a time.
1836 */
1837int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1838 int gts_enable, u64 base_addr, unsigned int size,
1839 unsigned int bsize, unsigned int cong_thres, int gen,
1840 unsigned int cidx)
1841{
1842 if (base_addr & 0xfff) /* must be 4K aligned */
1843 return -EINVAL;
1844 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1845 return -EBUSY;
1846
1847 base_addr >>= 12;
1848 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1849 base_addr >>= 32;
1850 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1851 V_FL_BASE_HI((u32) base_addr) |
1852 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1853 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1854 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1855 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1856 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1857 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1858 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1859 return t3_sge_write_context(adapter, id, F_FREELIST);
1860}
1861
1862/**
1863 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1864 * @adapter: the adapter to configure
1865 * @id: the context id
1866 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1867 * @base_addr: base address of queue
1868 * @size: number of queue entries
1869 * @fl_thres: threshold for selecting the normal or jumbo free list
1870 * @gen: initial generation value for the context
1871 * @cidx: consumer pointer
1872 *
1873 * Initialize an SGE response queue context and make it ready for use.
1874 * The caller is responsible for ensuring only one context operation
1875 * occurs at a time.
1876 */
1877int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1878 int irq_vec_idx, u64 base_addr, unsigned int size,
1879 unsigned int fl_thres, int gen, unsigned int cidx)
1880{
1881 unsigned int intr = 0;
1882
1883 if (base_addr & 0xfff) /* must be 4K aligned */
1884 return -EINVAL;
1885 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1886 return -EBUSY;
1887
1888 base_addr >>= 12;
1889 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1890 V_CQ_INDEX(cidx));
1891 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1892 base_addr >>= 32;
1893 if (irq_vec_idx >= 0)
1894 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1895 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1896 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1897 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1898 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1899}
1900
1901/**
1902 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1903 * @adapter: the adapter to configure
1904 * @id: the context id
1905 * @base_addr: base address of queue
1906 * @size: number of queue entries
1907 * @rspq: response queue for async notifications
1908 * @ovfl_mode: CQ overflow mode
1909 * @credits: completion queue credits
1910 * @credit_thres: the credit threshold
1911 *
1912 * Initialize an SGE completion queue context and make it ready for use.
1913 * The caller is responsible for ensuring only one context operation
1914 * occurs at a time.
1915 */
1916int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
1917 unsigned int size, int rspq, int ovfl_mode,
1918 unsigned int credits, unsigned int credit_thres)
1919{
1920 if (base_addr & 0xfff) /* must be 4K aligned */
1921 return -EINVAL;
1922 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1923 return -EBUSY;
1924
1925 base_addr >>= 12;
1926 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1927 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1928 base_addr >>= 32;
1929 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1930 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1931 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1932 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
1933 V_CQ_CREDIT_THRES(credit_thres));
1934 return t3_sge_write_context(adapter, id, F_CQ);
1935}
1936
1937/**
1938 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
1939 * @adapter: the adapter
1940 * @id: the egress context id
1941 * @enable: enable (1) or disable (0) the context
1942 *
1943 * Enable or disable an SGE egress context. The caller is responsible for
1944 * ensuring only one context operation occurs at a time.
1945 */
1946int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
1947{
1948 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1949 return -EBUSY;
1950
1951 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1952 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1953 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1954 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
1955 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
1956 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1957 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
1958 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1959 0, 5, 1);
1960}
1961
1962/**
1963 * t3_sge_disable_fl - disable an SGE free-buffer list
1964 * @adapter: the adapter
1965 * @id: the free list context id
1966 *
1967 * Disable an SGE free-buffer list. The caller is responsible for
1968 * ensuring only one context operation occurs at a time.
1969 */
1970int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
1971{
1972 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1973 return -EBUSY;
1974
1975 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1976 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1977 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
1978 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1979 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
1980 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1981 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
1982 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1983 0, 5, 1);
1984}
1985
1986/**
1987 * t3_sge_disable_rspcntxt - disable an SGE response queue
1988 * @adapter: the adapter
1989 * @id: the response queue context id
1990 *
1991 * Disable an SGE response queue. The caller is responsible for
1992 * ensuring only one context operation occurs at a time.
1993 */
1994int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
1995{
1996 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1997 return -EBUSY;
1998
1999 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2000 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2001 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2002 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2003 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2004 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2005 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2006 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2007 0, 5, 1);
2008}
2009
2010/**
2011 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2012 * @adapter: the adapter
2013 * @id: the completion queue context id
2014 *
2015 * Disable an SGE completion queue. The caller is responsible for
2016 * ensuring only one context operation occurs at a time.
2017 */
2018int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2019{
2020 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2021 return -EBUSY;
2022
2023 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2024 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2025 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2026 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2027 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2028 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2029 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2030 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2031 0, 5, 1);
2032}
2033
2034/**
2035 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2036 * @adapter: the adapter
2037 * @id: the context id
2038 * @op: the operation to perform
2039 *
2040 * Perform the selected operation on an SGE completion queue context.
2041 * The caller is responsible for ensuring only one context operation
2042 * occurs at a time.
2043 */
2044int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2045 unsigned int credits)
2046{
2047 u32 val;
2048
2049 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2050 return -EBUSY;
2051
2052 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2053 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2054 V_CONTEXT(id) | F_CQ);
2055 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2056 0, 5, 1, &val))
2057 return -EIO;
2058
2059 if (op >= 2 && op < 7) {
2060 if (adapter->params.rev > 0)
2061 return G_CQ_INDEX(val);
2062
2063 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2064 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2065 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2066 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2067 return -EIO;
2068 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2069 }
2070 return 0;
2071}
2072
2073/**
2074 * t3_sge_read_context - read an SGE context
2075 * @type: the context type
2076 * @adapter: the adapter
2077 * @id: the context id
2078 * @data: holds the retrieved context
2079 *
2080 * Read an SGE egress context. The caller is responsible for ensuring
2081 * only one context operation occurs at a time.
2082 */
2083static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2084 unsigned int id, u32 data[4])
2085{
2086 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2087 return -EBUSY;
2088
2089 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2090 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2091 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2092 5, 1))
2093 return -EIO;
2094 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2095 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2096 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2097 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2098 return 0;
2099}
2100
2101/**
2102 * t3_sge_read_ecntxt - read an SGE egress context
2103 * @adapter: the adapter
2104 * @id: the context id
2105 * @data: holds the retrieved context
2106 *
2107 * Read an SGE egress context. The caller is responsible for ensuring
2108 * only one context operation occurs at a time.
2109 */
2110int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2111{
2112 if (id >= 65536)
2113 return -EINVAL;
2114 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2115}
2116
2117/**
2118 * t3_sge_read_cq - read an SGE CQ context
2119 * @adapter: the adapter
2120 * @id: the context id
2121 * @data: holds the retrieved context
2122 *
2123 * Read an SGE CQ context. The caller is responsible for ensuring
2124 * only one context operation occurs at a time.
2125 */
2126int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2127{
2128 if (id >= 65536)
2129 return -EINVAL;
2130 return t3_sge_read_context(F_CQ, adapter, id, data);
2131}
2132
2133/**
2134 * t3_sge_read_fl - read an SGE free-list context
2135 * @adapter: the adapter
2136 * @id: the context id
2137 * @data: holds the retrieved context
2138 *
2139 * Read an SGE free-list context. The caller is responsible for ensuring
2140 * only one context operation occurs at a time.
2141 */
2142int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2143{
2144 if (id >= SGE_QSETS * 2)
2145 return -EINVAL;
2146 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2147}
2148
2149/**
2150 * t3_sge_read_rspq - read an SGE response queue context
2151 * @adapter: the adapter
2152 * @id: the context id
2153 * @data: holds the retrieved context
2154 *
2155 * Read an SGE response queue context. The caller is responsible for
2156 * ensuring only one context operation occurs at a time.
2157 */
2158int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2159{
2160 if (id >= SGE_QSETS)
2161 return -EINVAL;
2162 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2163}
2164
2165/**
2166 * t3_config_rss - configure Rx packet steering
2167 * @adapter: the adapter
2168 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2169 * @cpus: values for the CPU lookup table (0xff terminated)
2170 * @rspq: values for the response queue lookup table (0xffff terminated)
2171 *
2172 * Programs the receive packet steering logic. @cpus and @rspq provide
2173 * the values for the CPU and response queue lookup tables. If they
2174 * provide fewer values than the size of the tables the supplied values
2175 * are used repeatedly until the tables are fully populated.
2176 */
2177void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2178 const u8 * cpus, const u16 *rspq)
2179{
2180 int i, j, cpu_idx = 0, q_idx = 0;
2181
2182 if (cpus)
2183 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2184 u32 val = i << 16;
2185
2186 for (j = 0; j < 2; ++j) {
2187 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2188 if (cpus[cpu_idx] == 0xff)
2189 cpu_idx = 0;
2190 }
2191 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2192 }
2193
2194 if (rspq)
2195 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2196 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2197 (i << 16) | rspq[q_idx++]);
2198 if (rspq[q_idx] == 0xffff)
2199 q_idx = 0;
2200 }
2201
2202 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2203}
2204
2205/**
2206 * t3_read_rss - read the contents of the RSS tables
2207 * @adapter: the adapter
2208 * @lkup: holds the contents of the RSS lookup table
2209 * @map: holds the contents of the RSS map table
2210 *
2211 * Reads the contents of the receive packet steering tables.
2212 */
2213int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2214{
2215 int i;
2216 u32 val;
2217
2218 if (lkup)
2219 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2220 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2221 0xffff0000 | i);
2222 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2223 if (!(val & 0x80000000))
2224 return -EAGAIN;
2225 *lkup++ = val;
2226 *lkup++ = (val >> 8);
2227 }
2228
2229 if (map)
2230 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2231 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2232 0xffff0000 | i);
2233 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2234 if (!(val & 0x80000000))
2235 return -EAGAIN;
2236 *map++ = val;
2237 }
2238 return 0;
2239}
2240
2241/**
2242 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2243 * @adap: the adapter
2244 * @enable: 1 to select offload mode, 0 for regular NIC
2245 *
2246 * Switches TP to NIC/offload mode.
2247 */
2248void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2249{
2250 if (is_offload(adap) || !enable)
2251 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2252 V_NICMODE(!enable));
2253}
2254
2255/**
2256 * pm_num_pages - calculate the number of pages of the payload memory
2257 * @mem_size: the size of the payload memory
2258 * @pg_size: the size of each payload memory page
2259 *
2260 * Calculate the number of pages, each of the given size, that fit in a
2261 * memory of the specified size, respecting the HW requirement that the
2262 * number of pages must be a multiple of 24.
2263 */
2264static inline unsigned int pm_num_pages(unsigned int mem_size,
2265 unsigned int pg_size)
2266{
2267 unsigned int n = mem_size / pg_size;
2268
2269 return n - n % 24;
2270}
2271
2272#define mem_region(adap, start, size, reg) \
2273 t3_write_reg((adap), A_ ## reg, (start)); \
2274 start += size
2275
2276/*
2277 * partition_mem - partition memory and configure TP memory settings
2278 * @adap: the adapter
2279 * @p: the TP parameters
2280 *
2281 * Partitions context and payload memory and configures TP's memory
2282 * registers.
2283 */
2284static void partition_mem(struct adapter *adap, const struct tp_params *p)
2285{
2286 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2287 unsigned int timers = 0, timers_shift = 22;
2288
2289 if (adap->params.rev > 0) {
2290 if (tids <= 16 * 1024) {
2291 timers = 1;
2292 timers_shift = 16;
2293 } else if (tids <= 64 * 1024) {
2294 timers = 2;
2295 timers_shift = 18;
2296 } else if (tids <= 256 * 1024) {
2297 timers = 3;
2298 timers_shift = 20;
2299 }
2300 }
2301
2302 t3_write_reg(adap, A_TP_PMM_SIZE,
2303 p->chan_rx_size | (p->chan_tx_size >> 16));
2304
2305 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2306 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2307 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2308 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2309 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2310
2311 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2312 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2313 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2314
2315 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2316 /* Add a bit of headroom and make multiple of 24 */
2317 pstructs += 48;
2318 pstructs -= pstructs % 24;
2319 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2320
2321 m = tids * TCB_SIZE;
2322 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2323 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2324 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2325 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2326 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2327 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2328 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2329 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2330
2331 m = (m + 4095) & ~0xfff;
2332 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2333 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2334
2335 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2336 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2337 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2338 if (tids < m)
2339 adap->params.mc5.nservers += m - tids;
2340}
2341
2342static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2343 u32 val)
2344{
2345 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2346 t3_write_reg(adap, A_TP_PIO_DATA, val);
2347}
2348
2349static void tp_config(struct adapter *adap, const struct tp_params *p)
2350{
2351 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2352 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2353 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2354 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2355 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2356 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2357 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2358 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2359 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2360 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2361 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2362 F_IPV6ENABLE | F_NICMODE);
2363 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2364 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2365 t3_set_reg_field(adap, A_TP_PARA_REG6,
2366 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
2367 0);
2368
2369 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2370 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
2371 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
2372 F_RXCONGESTIONMODE);
2373 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2374
2375 if (adap->params.rev > 0) {
2376 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2377 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2378 F_TXPACEAUTO);
2379 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2380 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2381 } else
2382 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2383
2384 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
2385 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
2386 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
2387}
2388
2389/* Desired TP timer resolution in usec */
2390#define TP_TMR_RES 50
2391
2392/* TCP timer values in ms */
2393#define TP_DACK_TIMER 50
2394#define TP_RTO_MIN 250
2395
2396/**
2397 * tp_set_timers - set TP timing parameters
2398 * @adap: the adapter to set
2399 * @core_clk: the core clock frequency in Hz
2400 *
2401 * Set TP's timing parameters, such as the various timer resolutions and
2402 * the TCP timer values.
2403 */
2404static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2405{
2406 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2407 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2408 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2409 unsigned int tps = core_clk >> tre;
2410
2411 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2412 V_DELAYEDACKRESOLUTION(dack_re) |
2413 V_TIMESTAMPRESOLUTION(tstamp_re));
2414 t3_write_reg(adap, A_TP_DACK_TIMER,
2415 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2416 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2417 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2418 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2419 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2420 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2421 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2422 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2423 V_KEEPALIVEMAX(9));
2424
2425#define SECONDS * tps
2426
2427 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2428 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2429 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2430 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2431 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2432 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2433 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2434 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2435 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2436
2437#undef SECONDS
2438}
2439
2440/**
2441 * t3_tp_set_coalescing_size - set receive coalescing size
2442 * @adap: the adapter
2443 * @size: the receive coalescing size
2444 * @psh: whether a set PSH bit should deliver coalesced data
2445 *
2446 * Set the receive coalescing size and PSH bit handling.
2447 */
2448int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2449{
2450 u32 val;
2451
2452 if (size > MAX_RX_COALESCING_LEN)
2453 return -EINVAL;
2454
2455 val = t3_read_reg(adap, A_TP_PARA_REG3);
2456 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2457
2458 if (size) {
2459 val |= F_RXCOALESCEENABLE;
2460 if (psh)
2461 val |= F_RXCOALESCEPSHEN;
2462 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2463 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2464 }
2465 t3_write_reg(adap, A_TP_PARA_REG3, val);
2466 return 0;
2467}
2468
2469/**
2470 * t3_tp_set_max_rxsize - set the max receive size
2471 * @adap: the adapter
2472 * @size: the max receive size
2473 *
2474 * Set TP's max receive size. This is the limit that applies when
2475 * receive coalescing is disabled.
2476 */
2477void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2478{
2479 t3_write_reg(adap, A_TP_PARA_REG7,
2480 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2481}
2482
2483static void __devinit init_mtus(unsigned short mtus[])
2484{
2485 /*
2486 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2487 * it can accomodate max size TCP/IP headers when SACK and timestamps
2488 * are enabled and still have at least 8 bytes of payload.
2489 */
2490 mtus[0] = 88;
2491 mtus[1] = 256;
2492 mtus[2] = 512;
2493 mtus[3] = 576;
2494 mtus[4] = 808;
2495 mtus[5] = 1024;
2496 mtus[6] = 1280;
2497 mtus[7] = 1492;
2498 mtus[8] = 1500;
2499 mtus[9] = 2002;
2500 mtus[10] = 2048;
2501 mtus[11] = 4096;
2502 mtus[12] = 4352;
2503 mtus[13] = 8192;
2504 mtus[14] = 9000;
2505 mtus[15] = 9600;
2506}
2507
2508/*
2509 * Initial congestion control parameters.
2510 */
2511static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2512{
2513 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2514 a[9] = 2;
2515 a[10] = 3;
2516 a[11] = 4;
2517 a[12] = 5;
2518 a[13] = 6;
2519 a[14] = 7;
2520 a[15] = 8;
2521 a[16] = 9;
2522 a[17] = 10;
2523 a[18] = 14;
2524 a[19] = 17;
2525 a[20] = 21;
2526 a[21] = 25;
2527 a[22] = 30;
2528 a[23] = 35;
2529 a[24] = 45;
2530 a[25] = 60;
2531 a[26] = 80;
2532 a[27] = 100;
2533 a[28] = 200;
2534 a[29] = 300;
2535 a[30] = 400;
2536 a[31] = 500;
2537
2538 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2539 b[9] = b[10] = 1;
2540 b[11] = b[12] = 2;
2541 b[13] = b[14] = b[15] = b[16] = 3;
2542 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2543 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2544 b[28] = b[29] = 6;
2545 b[30] = b[31] = 7;
2546}
2547
2548/* The minimum additive increment value for the congestion control table */
2549#define CC_MIN_INCR 2U
2550
2551/**
2552 * t3_load_mtus - write the MTU and congestion control HW tables
2553 * @adap: the adapter
2554 * @mtus: the unrestricted values for the MTU table
2555 * @alphs: the values for the congestion control alpha parameter
2556 * @beta: the values for the congestion control beta parameter
2557 * @mtu_cap: the maximum permitted effective MTU
2558 *
2559 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2560 * Update the high-speed congestion control table with the supplied alpha,
2561 * beta, and MTUs.
2562 */
2563void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2564 unsigned short alpha[NCCTRL_WIN],
2565 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2566{
2567 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2568 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2569 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2570 28672, 40960, 57344, 81920, 114688, 163840, 229376
2571 };
2572
2573 unsigned int i, w;
2574
2575 for (i = 0; i < NMTUS; ++i) {
2576 unsigned int mtu = min(mtus[i], mtu_cap);
2577 unsigned int log2 = fls(mtu);
2578
2579 if (!(mtu & ((1 << log2) >> 2))) /* round */
2580 log2--;
2581 t3_write_reg(adap, A_TP_MTU_TABLE,
2582 (i << 24) | (log2 << 16) | mtu);
2583
2584 for (w = 0; w < NCCTRL_WIN; ++w) {
2585 unsigned int inc;
2586
2587 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2588 CC_MIN_INCR);
2589
2590 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2591 (w << 16) | (beta[w] << 13) | inc);
2592 }
2593 }
2594}
2595
2596/**
2597 * t3_read_hw_mtus - returns the values in the HW MTU table
2598 * @adap: the adapter
2599 * @mtus: where to store the HW MTU values
2600 *
2601 * Reads the HW MTU table.
2602 */
2603void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2604{
2605 int i;
2606
2607 for (i = 0; i < NMTUS; ++i) {
2608 unsigned int val;
2609
2610 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2611 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2612 mtus[i] = val & 0x3fff;
2613 }
2614}
2615
2616/**
2617 * t3_get_cong_cntl_tab - reads the congestion control table
2618 * @adap: the adapter
2619 * @incr: where to store the alpha values
2620 *
2621 * Reads the additive increments programmed into the HW congestion
2622 * control table.
2623 */
2624void t3_get_cong_cntl_tab(struct adapter *adap,
2625 unsigned short incr[NMTUS][NCCTRL_WIN])
2626{
2627 unsigned int mtu, w;
2628
2629 for (mtu = 0; mtu < NMTUS; ++mtu)
2630 for (w = 0; w < NCCTRL_WIN; ++w) {
2631 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2632 0xffff0000 | (mtu << 5) | w);
2633 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2634 0x1fff;
2635 }
2636}
2637
2638/**
2639 * t3_tp_get_mib_stats - read TP's MIB counters
2640 * @adap: the adapter
2641 * @tps: holds the returned counter values
2642 *
2643 * Returns the values of TP's MIB counters.
2644 */
2645void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2646{
2647 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2648 sizeof(*tps) / sizeof(u32), 0);
2649}
2650
2651#define ulp_region(adap, name, start, len) \
2652 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2653 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2654 (start) + (len) - 1); \
2655 start += len
2656
2657#define ulptx_region(adap, name, start, len) \
2658 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2659 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2660 (start) + (len) - 1)
2661
2662static void ulp_config(struct adapter *adap, const struct tp_params *p)
2663{
2664 unsigned int m = p->chan_rx_size;
2665
2666 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2667 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2668 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2669 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2670 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2671 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2672 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2673 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2674}
2675
2676void t3_config_trace_filter(struct adapter *adapter,
2677 const struct trace_params *tp, int filter_index,
2678 int invert, int enable)
2679{
2680 u32 addr, key[4], mask[4];
2681
2682 key[0] = tp->sport | (tp->sip << 16);
2683 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2684 key[2] = tp->dip;
2685 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2686
2687 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2688 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2689 mask[2] = tp->dip_mask;
2690 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2691
2692 if (invert)
2693 key[3] |= (1 << 29);
2694 if (enable)
2695 key[3] |= (1 << 28);
2696
2697 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2698 tp_wr_indirect(adapter, addr++, key[0]);
2699 tp_wr_indirect(adapter, addr++, mask[0]);
2700 tp_wr_indirect(adapter, addr++, key[1]);
2701 tp_wr_indirect(adapter, addr++, mask[1]);
2702 tp_wr_indirect(adapter, addr++, key[2]);
2703 tp_wr_indirect(adapter, addr++, mask[2]);
2704 tp_wr_indirect(adapter, addr++, key[3]);
2705 tp_wr_indirect(adapter, addr, mask[3]);
2706 t3_read_reg(adapter, A_TP_PIO_DATA);
2707}
2708
2709/**
2710 * t3_config_sched - configure a HW traffic scheduler
2711 * @adap: the adapter
2712 * @kbps: target rate in Kbps
2713 * @sched: the scheduler index
2714 *
2715 * Configure a HW scheduler for the target rate
2716 */
2717int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2718{
2719 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2720 unsigned int clk = adap->params.vpd.cclk * 1000;
2721 unsigned int selected_cpt = 0, selected_bpt = 0;
2722
2723 if (kbps > 0) {
2724 kbps *= 125; /* -> bytes */
2725 for (cpt = 1; cpt <= 255; cpt++) {
2726 tps = clk / cpt;
2727 bpt = (kbps + tps / 2) / tps;
2728 if (bpt > 0 && bpt <= 255) {
2729 v = bpt * tps;
2730 delta = v >= kbps ? v - kbps : kbps - v;
2731 if (delta <= mindelta) {
2732 mindelta = delta;
2733 selected_cpt = cpt;
2734 selected_bpt = bpt;
2735 }
2736 } else if (selected_cpt)
2737 break;
2738 }
2739 if (!selected_cpt)
2740 return -EINVAL;
2741 }
2742 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2743 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2744 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2745 if (sched & 1)
2746 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2747 else
2748 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2749 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2750 return 0;
2751}
2752
2753static int tp_init(struct adapter *adap, const struct tp_params *p)
2754{
2755 int busy = 0;
2756
2757 tp_config(adap, p);
2758 t3_set_vlan_accel(adap, 3, 0);
2759
2760 if (is_offload(adap)) {
2761 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2762 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2763 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2764 0, 1000, 5);
2765 if (busy)
2766 CH_ERR(adap, "TP initialization timed out\n");
2767 }
2768
2769 if (!busy)
2770 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2771 return busy;
2772}
2773
2774int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2775{
2776 if (port_mask & ~((1 << adap->params.nports) - 1))
2777 return -EINVAL;
2778 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2779 port_mask << S_PORT0ACTIVE);
2780 return 0;
2781}
2782
2783/*
2784 * Perform the bits of HW initialization that are dependent on the number
2785 * of available ports.
2786 */
2787static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2788{
2789 int i;
2790
2791 if (nports == 1) {
2792 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2793 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2794 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2795 F_PORT0ACTIVE | F_ENFORCEPKT);
2796 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
2797 } else {
2798 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2799 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2800 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2801 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2802 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2803 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2804 F_ENFORCEPKT);
2805 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2806 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2807 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2808 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2809 for (i = 0; i < 16; i++)
2810 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2811 (i << 16) | 0x1010);
2812 }
2813}
2814
2815static int calibrate_xgm(struct adapter *adapter)
2816{
2817 if (uses_xaui(adapter)) {
2818 unsigned int v, i;
2819
2820 for (i = 0; i < 5; ++i) {
2821 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2822 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2823 msleep(1);
2824 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2825 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2826 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2827 V_XAUIIMP(G_CALIMP(v) >> 2));
2828 return 0;
2829 }
2830 }
2831 CH_ERR(adapter, "MAC calibration failed\n");
2832 return -1;
2833 } else {
2834 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2835 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2836 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2837 F_XGM_IMPSETUPDATE);
2838 }
2839 return 0;
2840}
2841
2842static void calibrate_xgm_t3b(struct adapter *adapter)
2843{
2844 if (!uses_xaui(adapter)) {
2845 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2846 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2847 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2848 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2849 F_XGM_IMPSETUPDATE);
2850 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2851 0);
2852 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2853 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2854 }
2855}
2856
2857struct mc7_timing_params {
2858 unsigned char ActToPreDly;
2859 unsigned char ActToRdWrDly;
2860 unsigned char PreCyc;
2861 unsigned char RefCyc[5];
2862 unsigned char BkCyc;
2863 unsigned char WrToRdDly;
2864 unsigned char RdToWrDly;
2865};
2866
2867/*
2868 * Write a value to a register and check that the write completed. These
2869 * writes normally complete in a cycle or two, so one read should suffice.
2870 * The very first read exists to flush the posted write to the device.
2871 */
2872static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
2873{
2874 t3_write_reg(adapter, addr, val);
2875 t3_read_reg(adapter, addr); /* flush */
2876 if (!(t3_read_reg(adapter, addr) & F_BUSY))
2877 return 0;
2878 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2879 return -EIO;
2880}
2881
2882static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2883{
2884 static const unsigned int mc7_mode[] = {
2885 0x632, 0x642, 0x652, 0x432, 0x442
2886 };
2887 static const struct mc7_timing_params mc7_timings[] = {
2888 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
2889 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
2890 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
2891 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
2892 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
2893 };
2894
2895 u32 val;
2896 unsigned int width, density, slow, attempts;
2897 struct adapter *adapter = mc7->adapter;
2898 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2899
2900 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
2901 slow = val & F_SLOW;
2902 width = G_WIDTH(val);
2903 density = G_DEN(val);
2904
2905 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
2906 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2907 msleep(1);
2908
2909 if (!slow) {
2910 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
2911 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
2912 msleep(1);
2913 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
2914 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
2915 CH_ERR(adapter, "%s MC7 calibration timed out\n",
2916 mc7->name);
2917 goto out_fail;
2918 }
2919 }
2920
2921 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
2922 V_ACTTOPREDLY(p->ActToPreDly) |
2923 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
2924 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
2925 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
2926
2927 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
2928 val | F_CLKEN | F_TERM150);
2929 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2930
2931 if (!slow)
2932 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
2933 F_DLLENB);
2934 udelay(1);
2935
2936 val = slow ? 3 : 6;
2937 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2938 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
2939 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
2940 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2941 goto out_fail;
2942
2943 if (!slow) {
2944 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
2945 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
2946 udelay(5);
2947 }
2948
2949 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2950 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2951 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2952 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
2953 mc7_mode[mem_type]) ||
2954 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
2955 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2956 goto out_fail;
2957
2958 /* clock value is in KHz */
2959 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
2960 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
2961
2962 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
2963 F_PERREFEN | V_PREREFDIV(mc7_clock));
2964 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
2965
2966 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
2967 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
2968 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
2969 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
2970 (mc7->size << width) - 1);
2971 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
2972 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
2973
2974 attempts = 50;
2975 do {
2976 msleep(250);
2977 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
2978 } while ((val & F_BUSY) && --attempts);
2979 if (val & F_BUSY) {
2980 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
2981 goto out_fail;
2982 }
2983
2984 /* Enable normal memory accesses. */
2985 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
2986 return 0;
2987
2988out_fail:
2989 return -1;
2990}
2991
2992static void config_pcie(struct adapter *adap)
2993{
2994 static const u16 ack_lat[4][6] = {
2995 {237, 416, 559, 1071, 2095, 4143},
2996 {128, 217, 289, 545, 1057, 2081},
2997 {73, 118, 154, 282, 538, 1050},
2998 {67, 107, 86, 150, 278, 534}
2999 };
3000 static const u16 rpl_tmr[4][6] = {
3001 {711, 1248, 1677, 3213, 6285, 12429},
3002 {384, 651, 867, 1635, 3171, 6243},
3003 {219, 354, 462, 846, 1614, 3150},
3004 {201, 321, 258, 450, 834, 1602}
3005 };
3006
3007 u16 val;
3008 unsigned int log2_width, pldsize;
3009 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3010
3011 pci_read_config_word(adap->pdev,
3012 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3013 &val);
3014 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3015 pci_read_config_word(adap->pdev,
3016 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3017 &val);
3018
3019 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3020 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3021 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3022 log2_width = fls(adap->params.pci.width) - 1;
3023 acklat = ack_lat[log2_width][pldsize];
3024 if (val & 1) /* check LOsEnable */
3025 acklat += fst_trn_tx * 4;
3026 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3027
3028 if (adap->params.rev == 0)
3029 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3030 V_T3A_ACKLAT(M_T3A_ACKLAT),
3031 V_T3A_ACKLAT(acklat));
3032 else
3033 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3034 V_ACKLAT(acklat));
3035
3036 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3037 V_REPLAYLMT(rpllmt));
3038
3039 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3040 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3041}
3042
3043/*
3044 * Initialize and configure T3 HW modules. This performs the
3045 * initialization steps that need to be done once after a card is reset.
3046 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3047 *
3048 * fw_params are passed to FW and their value is platform dependent. Only the
3049 * top 8 bits are available for use, the rest must be 0.
3050 */
3051int t3_init_hw(struct adapter *adapter, u32 fw_params)
3052{
3053 int err = -EIO, attempts = 100;
3054 const struct vpd_params *vpd = &adapter->params.vpd;
3055
3056 if (adapter->params.rev > 0)
3057 calibrate_xgm_t3b(adapter);
3058 else if (calibrate_xgm(adapter))
3059 goto out_err;
3060
3061 if (vpd->mclk) {
3062 partition_mem(adapter, &adapter->params.tp);
3063
3064 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3065 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3066 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3067 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3068 adapter->params.mc5.nfilters,
3069 adapter->params.mc5.nroutes))
3070 goto out_err;
3071 }
3072
3073 if (tp_init(adapter, &adapter->params.tp))
3074 goto out_err;
3075
3076 t3_tp_set_coalescing_size(adapter,
3077 min(adapter->params.sge.max_pkt_size,
3078 MAX_RX_COALESCING_LEN), 1);
3079 t3_tp_set_max_rxsize(adapter,
3080 min(adapter->params.sge.max_pkt_size, 16384U));
3081 ulp_config(adapter, &adapter->params.tp);
3082
3083 if (is_pcie(adapter))
3084 config_pcie(adapter);
3085 else
3086 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3087
3088 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
3089 init_hw_for_avail_ports(adapter, adapter->params.nports);
3090 t3_sge_init(adapter, &adapter->params.sge);
3091
3092 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3093 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3094 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3095 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3096
3097 do { /* wait for uP to initialize */
3098 msleep(20);
3099 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3100 if (!attempts)
3101 goto out_err;
3102
3103 err = 0;
3104out_err:
3105 return err;
3106}
3107
3108/**
3109 * get_pci_mode - determine a card's PCI mode
3110 * @adapter: the adapter
3111 * @p: where to store the PCI settings
3112 *
3113 * Determines a card's PCI mode and associated parameters, such as speed
3114 * and width.
3115 */
3116static void __devinit get_pci_mode(struct adapter *adapter,
3117 struct pci_params *p)
3118{
3119 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3120 u32 pci_mode, pcie_cap;
3121
3122 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3123 if (pcie_cap) {
3124 u16 val;
3125
3126 p->variant = PCI_VARIANT_PCIE;
3127 p->pcie_cap_addr = pcie_cap;
3128 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3129 &val);
3130 p->width = (val >> 4) & 0x3f;
3131 return;
3132 }
3133
3134 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3135 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3136 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3137 pci_mode = G_PCIXINITPAT(pci_mode);
3138 if (pci_mode == 0)
3139 p->variant = PCI_VARIANT_PCI;
3140 else if (pci_mode < 4)
3141 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3142 else if (pci_mode < 8)
3143 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3144 else
3145 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3146}
3147
3148/**
3149 * init_link_config - initialize a link's SW state
3150 * @lc: structure holding the link state
3151 * @ai: information about the current card
3152 *
3153 * Initializes the SW state maintained for each link, including the link's
3154 * capabilities and default speed/duplex/flow-control/autonegotiation
3155 * settings.
3156 */
3157static void __devinit init_link_config(struct link_config *lc,
3158 unsigned int caps)
3159{
3160 lc->supported = caps;
3161 lc->requested_speed = lc->speed = SPEED_INVALID;
3162 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3163 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3164 if (lc->supported & SUPPORTED_Autoneg) {
3165 lc->advertising = lc->supported;
3166 lc->autoneg = AUTONEG_ENABLE;
3167 lc->requested_fc |= PAUSE_AUTONEG;
3168 } else {
3169 lc->advertising = 0;
3170 lc->autoneg = AUTONEG_DISABLE;
3171 }
3172}
3173
3174/**
3175 * mc7_calc_size - calculate MC7 memory size
3176 * @cfg: the MC7 configuration
3177 *
3178 * Calculates the size of an MC7 memory in bytes from the value of its
3179 * configuration register.
3180 */
3181static unsigned int __devinit mc7_calc_size(u32 cfg)
3182{
3183 unsigned int width = G_WIDTH(cfg);
3184 unsigned int banks = !!(cfg & F_BKS) + 1;
3185 unsigned int org = !!(cfg & F_ORG) + 1;
3186 unsigned int density = G_DEN(cfg);
3187 unsigned int MBs = ((256 << density) * banks) / (org << width);
3188
3189 return MBs << 20;
3190}
3191
3192static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3193 unsigned int base_addr, const char *name)
3194{
3195 u32 cfg;
3196
3197 mc7->adapter = adapter;
3198 mc7->name = name;
3199 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3200 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3201 mc7->size = mc7_calc_size(cfg);
3202 mc7->width = G_WIDTH(cfg);
3203}
3204
3205void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3206{
3207 mac->adapter = adapter;
3208 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3209 mac->nucast = 1;
3210
3211 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3212 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3213 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3214 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3215 F_ENRGMII, 0);
3216 }
3217}
3218
3219void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3220{
3221 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3222
3223 mi1_init(adapter, ai);
3224 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3225 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3226 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3227 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3228
3229 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3230 val |= F_ENRGMII;
3231
3232 /* Enable MAC clocks so we can access the registers */
3233 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3234 t3_read_reg(adapter, A_XGM_PORT_CFG);
3235
3236 val |= F_CLKDIVRESET_;
3237 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3238 t3_read_reg(adapter, A_XGM_PORT_CFG);
3239 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3240 t3_read_reg(adapter, A_XGM_PORT_CFG);
3241}
3242
3243/*
3244 * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
3245 * ones don't.
3246 */
3247int t3_reset_adapter(struct adapter *adapter)
3248{
3249 int i;
3250 uint16_t devid = 0;
3251
3252 if (is_pcie(adapter))
3253 pci_save_state(adapter->pdev);
3254 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3255
3256 /*
3257 * Delay. Give Some time to device to reset fully.
3258 * XXX The delay time should be modified.
3259 */
3260 for (i = 0; i < 10; i++) {
3261 msleep(50);
3262 pci_read_config_word(adapter->pdev, 0x00, &devid);
3263 if (devid == 0x1425)
3264 break;
3265 }
3266
3267 if (devid != 0x1425)
3268 return -1;
3269
3270 if (is_pcie(adapter))
3271 pci_restore_state(adapter->pdev);
3272 return 0;
3273}
3274
3275/*
3276 * Initialize adapter SW state for the various HW modules, set initial values
3277 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3278 * interface.
3279 */
3280int __devinit t3_prep_adapter(struct adapter *adapter,
3281 const struct adapter_info *ai, int reset)
3282{
3283 int ret;
3284 unsigned int i, j = 0;
3285
3286 get_pci_mode(adapter, &adapter->params.pci);
3287
3288 adapter->params.info = ai;
3289 adapter->params.nports = ai->nports;
3290 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3291 adapter->params.linkpoll_period = 0;
3292 adapter->params.stats_update_period = is_10G(adapter) ?
3293 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3294 adapter->params.pci.vpd_cap_addr =
3295 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3296 ret = get_vpd_params(adapter, &adapter->params.vpd);
3297 if (ret < 0)
3298 return ret;
3299
3300 if (reset && t3_reset_adapter(adapter))
3301 return -1;
3302
3303 t3_sge_prep(adapter, &adapter->params.sge);
3304
3305 if (adapter->params.vpd.mclk) {
3306 struct tp_params *p = &adapter->params.tp;
3307
3308 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3309 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3310 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3311
3312 p->nchan = ai->nports;
3313 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3314 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3315 p->cm_size = t3_mc7_size(&adapter->cm);
3316 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3317 p->chan_tx_size = p->pmtx_size / p->nchan;
3318 p->rx_pg_size = 64 * 1024;
3319 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3320 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3321 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3322 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3323 adapter->params.rev > 0 ? 12 : 6;
3324
3325 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3326 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3327 DEFAULT_NFILTERS : 0;
3328 adapter->params.mc5.nroutes = 0;
3329 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3330
3331 init_mtus(adapter->params.mtus);
3332 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3333 }
3334
3335 early_hw_init(adapter, ai);
3336
3337 for_each_port(adapter, i) {
3338 u8 hw_addr[6];
3339 struct port_info *p = adap2pinfo(adapter, i);
3340
3341 while (!adapter->params.vpd.port_type[j])
3342 ++j;
3343
3344 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3345 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3346 ai->mdio_ops);
3347 mac_prep(&p->mac, adapter, j);
3348 ++j;
3349
3350 /*
3351 * The VPD EEPROM stores the base Ethernet address for the
3352 * card. A port's address is derived from the base by adding
3353 * the port's index to the base's low octet.
3354 */
3355 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3356 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3357
3358 memcpy(adapter->port[i]->dev_addr, hw_addr,
3359 ETH_ALEN);
3360 memcpy(adapter->port[i]->perm_addr, hw_addr,
3361 ETH_ALEN);
3362 init_link_config(&p->link_config, p->port_type->caps);
3363 p->phy.ops->power_down(&p->phy, 1);
3364 if (!(p->port_type->caps & SUPPORTED_IRQ))
3365 adapter->params.linkpoll_period = 10;
3366 }
3367
3368 return 0;
3369}
3370
3371void t3_led_ready(struct adapter *adapter)
3372{
3373 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3374 F_GPIO0_OUT_VAL);
3375}
diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h
new file mode 100644
index 000000000000..9af3bcd64b3b
--- /dev/null
+++ b/drivers/net/cxgb3/t3cdev.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright (C) 2006-2007 Chelsio Communications. All rights reserved.
3 * Copyright (C) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _T3CDEV_H_
34#define _T3CDEV_H_
35
36#include <linux/list.h>
37#include <asm/atomic.h>
38#include <asm/semaphore.h>
39#include <linux/netdevice.h>
40#include <linux/proc_fs.h>
41#include <linux/skbuff.h>
42#include <net/neighbour.h>
43
44#define T3CNAMSIZ 16
45
46/* Get the t3cdev associated with a net_device */
47#define T3CDEV(netdev) (struct t3cdev *)(netdev->priv)
48
49struct cxgb3_client;
50
51enum t3ctype {
52 T3A = 0,
53 T3B
54};
55
56struct t3cdev {
57 char name[T3CNAMSIZ]; /* T3C device name */
58 enum t3ctype type;
59 struct list_head ofld_dev_list; /* for list linking */
60 struct net_device *lldev; /* LL dev associated with T3C messages */
61 struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */
62 int (*send)(struct t3cdev *dev, struct sk_buff *skb);
63 int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
64 int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
65 void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
66 void *priv; /* driver private data */
67 void *l2opt; /* optional layer 2 data */
68 void *l3opt; /* optional layer 3 data */
69 void *l4opt; /* optional layer 4 data */
70 void *ulp; /* ulp stuff */
71};
72
73#endif /* _T3CDEV_H_ */
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
new file mode 100644
index 000000000000..2b67dd523cc1
--- /dev/null
+++ b/drivers/net/cxgb3/version.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
33#ifndef __CHELSIO_VERSION_H
34#define __CHELSIO_VERSION_H
35#define DRV_DESC "Chelsio T3 Network Driver"
36#define DRV_NAME "cxgb3"
37/* Driver version */
38#define DRV_VERSION "1.0"
39#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb3/vsc8211.c b/drivers/net/cxgb3/vsc8211.c
new file mode 100644
index 000000000000..eee4285b31be
--- /dev/null
+++ b/drivers/net/cxgb3/vsc8211.c
@@ -0,0 +1,228 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33
34/* VSC8211 PHY specific registers. */
35enum {
36 VSC8211_INTR_ENABLE = 25,
37 VSC8211_INTR_STATUS = 26,
38 VSC8211_AUX_CTRL_STAT = 28,
39};
40
41enum {
42 VSC_INTR_RX_ERR = 1 << 0,
43 VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
44 VSC_INTR_CABLE = 1 << 2, /* cable impairment */
45 VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
46 VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
47 VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
48 VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
49 VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
50 VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
51 VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
52 VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
53 VSC_INTR_LINK_CHG = 1 << 13, /* link change */
54 VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
55};
56
57#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
58 VSC_INTR_NEG_DONE)
59#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
60 VSC_INTR_ENABLE)
61
62/* PHY specific auxiliary control & status register fields */
63#define S_ACSR_ACTIPHY_TMR 0
64#define M_ACSR_ACTIPHY_TMR 0x3
65#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
66
67#define S_ACSR_SPEED 3
68#define M_ACSR_SPEED 0x3
69#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
70
71#define S_ACSR_DUPLEX 5
72#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
73
74#define S_ACSR_ACTIPHY 6
75#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
76
77/*
78 * Reset the PHY. This PHY completes reset immediately so we never wait.
79 */
80static int vsc8211_reset(struct cphy *cphy, int wait)
81{
82 return t3_phy_reset(cphy, 0, 0);
83}
84
85static int vsc8211_intr_enable(struct cphy *cphy)
86{
87 return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, INTR_MASK);
88}
89
90static int vsc8211_intr_disable(struct cphy *cphy)
91{
92 return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, 0);
93}
94
95static int vsc8211_intr_clear(struct cphy *cphy)
96{
97 u32 val;
98
99 /* Clear PHY interrupts by reading the register. */
100 return mdio_read(cphy, 0, VSC8211_INTR_STATUS, &val);
101}
102
103static int vsc8211_autoneg_enable(struct cphy *cphy)
104{
105 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
106 BMCR_ANENABLE | BMCR_ANRESTART);
107}
108
109static int vsc8211_autoneg_restart(struct cphy *cphy)
110{
111 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
112 BMCR_ANRESTART);
113}
114
115static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
116 int *speed, int *duplex, int *fc)
117{
118 unsigned int bmcr, status, lpa, adv;
119 int err, sp = -1, dplx = -1, pause = 0;
120
121 err = mdio_read(cphy, 0, MII_BMCR, &bmcr);
122 if (!err)
123 err = mdio_read(cphy, 0, MII_BMSR, &status);
124 if (err)
125 return err;
126
127 if (link_ok) {
128 /*
129 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
130 * once more to get the current link state.
131 */
132 if (!(status & BMSR_LSTATUS))
133 err = mdio_read(cphy, 0, MII_BMSR, &status);
134 if (err)
135 return err;
136 *link_ok = (status & BMSR_LSTATUS) != 0;
137 }
138 if (!(bmcr & BMCR_ANENABLE)) {
139 dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
140 if (bmcr & BMCR_SPEED1000)
141 sp = SPEED_1000;
142 else if (bmcr & BMCR_SPEED100)
143 sp = SPEED_100;
144 else
145 sp = SPEED_10;
146 } else if (status & BMSR_ANEGCOMPLETE) {
147 err = mdio_read(cphy, 0, VSC8211_AUX_CTRL_STAT, &status);
148 if (err)
149 return err;
150
151 dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
152 sp = G_ACSR_SPEED(status);
153 if (sp == 0)
154 sp = SPEED_10;
155 else if (sp == 1)
156 sp = SPEED_100;
157 else
158 sp = SPEED_1000;
159
160 if (fc && dplx == DUPLEX_FULL) {
161 err = mdio_read(cphy, 0, MII_LPA, &lpa);
162 if (!err)
163 err = mdio_read(cphy, 0, MII_ADVERTISE, &adv);
164 if (err)
165 return err;
166
167 if (lpa & adv & ADVERTISE_PAUSE_CAP)
168 pause = PAUSE_RX | PAUSE_TX;
169 else if ((lpa & ADVERTISE_PAUSE_CAP) &&
170 (lpa & ADVERTISE_PAUSE_ASYM) &&
171 (adv & ADVERTISE_PAUSE_ASYM))
172 pause = PAUSE_TX;
173 else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
174 (adv & ADVERTISE_PAUSE_CAP))
175 pause = PAUSE_RX;
176 }
177 }
178 if (speed)
179 *speed = sp;
180 if (duplex)
181 *duplex = dplx;
182 if (fc)
183 *fc = pause;
184 return 0;
185}
186
187static int vsc8211_power_down(struct cphy *cphy, int enable)
188{
189 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
190 enable ? BMCR_PDOWN : 0);
191}
192
193static int vsc8211_intr_handler(struct cphy *cphy)
194{
195 unsigned int cause;
196 int err, cphy_cause = 0;
197
198 err = mdio_read(cphy, 0, VSC8211_INTR_STATUS, &cause);
199 if (err)
200 return err;
201
202 cause &= INTR_MASK;
203 if (cause & CFG_CHG_INTR_MASK)
204 cphy_cause |= cphy_cause_link_change;
205 if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
206 cphy_cause |= cphy_cause_fifo_error;
207 return cphy_cause;
208}
209
210static struct cphy_ops vsc8211_ops = {
211 .reset = vsc8211_reset,
212 .intr_enable = vsc8211_intr_enable,
213 .intr_disable = vsc8211_intr_disable,
214 .intr_clear = vsc8211_intr_clear,
215 .intr_handler = vsc8211_intr_handler,
216 .autoneg_enable = vsc8211_autoneg_enable,
217 .autoneg_restart = vsc8211_autoneg_restart,
218 .advertise = t3_phy_advertise,
219 .set_speed_duplex = t3_set_phy_speed_duplex,
220 .get_link_status = vsc8211_get_link_status,
221 .power_down = vsc8211_power_down,
222};
223
224void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
225 int phy_addr, const struct mdio_ops *mdio_ops)
226{
227 cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops);
228}
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
new file mode 100644
index 000000000000..907a272ae32d
--- /dev/null
+++ b/drivers/net/cxgb3/xgmac.c
@@ -0,0 +1,409 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35/*
36 * # of exact address filters. The first one is used for the station address,
37 * the rest are available for multicast addresses.
38 */
39#define EXACT_ADDR_FILTERS 8
40
41static inline int macidx(const struct cmac *mac)
42{
43 return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
44}
45
46static void xaui_serdes_reset(struct cmac *mac)
47{
48 static const unsigned int clear[] = {
49 F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
50 F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
51 };
52
53 int i;
54 struct adapter *adap = mac->adapter;
55 u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
56
57 t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
58 F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
59 F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
60 F_RESETPLL23 | F_RESETPLL01);
61 t3_read_reg(adap, ctrl);
62 udelay(15);
63
64 for (i = 0; i < ARRAY_SIZE(clear); i++) {
65 t3_set_reg_field(adap, ctrl, clear[i], 0);
66 udelay(15);
67 }
68}
69
70void t3b_pcs_reset(struct cmac *mac)
71{
72 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
73 F_PCS_RESET_, 0);
74 udelay(20);
75 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
76 F_PCS_RESET_);
77}
78
79int t3_mac_reset(struct cmac *mac)
80{
81 static const struct addr_val_pair mac_reset_avp[] = {
82 {A_XGM_TX_CTRL, 0},
83 {A_XGM_RX_CTRL, 0},
84 {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
85 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
86 {A_XGM_RX_HASH_LOW, 0},
87 {A_XGM_RX_HASH_HIGH, 0},
88 {A_XGM_RX_EXACT_MATCH_LOW_1, 0},
89 {A_XGM_RX_EXACT_MATCH_LOW_2, 0},
90 {A_XGM_RX_EXACT_MATCH_LOW_3, 0},
91 {A_XGM_RX_EXACT_MATCH_LOW_4, 0},
92 {A_XGM_RX_EXACT_MATCH_LOW_5, 0},
93 {A_XGM_RX_EXACT_MATCH_LOW_6, 0},
94 {A_XGM_RX_EXACT_MATCH_LOW_7, 0},
95 {A_XGM_RX_EXACT_MATCH_LOW_8, 0},
96 {A_XGM_STAT_CTRL, F_CLRSTATS}
97 };
98 u32 val;
99 struct adapter *adap = mac->adapter;
100 unsigned int oft = mac->offset;
101
102 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
103 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
104
105 t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
106 t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
107 F_RXSTRFRWRD | F_DISERRFRAMES,
108 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
109
110 if (uses_xaui(adap)) {
111 if (adap->params.rev == 0) {
112 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
113 F_RXENABLE | F_TXENABLE);
114 if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
115 F_CMULOCK, 1, 5, 2)) {
116 CH_ERR(adap,
117 "MAC %d XAUI SERDES CMU lock failed\n",
118 macidx(mac));
119 return -1;
120 }
121 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
122 F_SERDESRESET_);
123 } else
124 xaui_serdes_reset(mac);
125 }
126
127 if (adap->params.rev > 0)
128 t3_write_reg(adap, A_XGM_PAUSE_TIMER + oft, 0xf000);
129
130 val = F_MAC_RESET_;
131 if (is_10G(adap))
132 val |= F_PCS_RESET_;
133 else if (uses_xaui(adap))
134 val |= F_PCS_RESET_ | F_XG2G_RESET_;
135 else
136 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
137 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
138 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
139 if ((val & F_PCS_RESET_) && adap->params.rev) {
140 msleep(1);
141 t3b_pcs_reset(mac);
142 }
143
144 memset(&mac->stats, 0, sizeof(mac->stats));
145 return 0;
146}
147
148/*
149 * Set the exact match register 'idx' to recognize the given Ethernet address.
150 */
151static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
152{
153 u32 addr_lo, addr_hi;
154 unsigned int oft = mac->offset + idx * 8;
155
156 addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
157 addr_hi = (addr[5] << 8) | addr[4];
158
159 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
160 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
161}
162
163/* Set one of the station's unicast MAC addresses. */
164int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
165{
166 if (idx >= mac->nucast)
167 return -EINVAL;
168 set_addr_filter(mac, idx, addr);
169 return 0;
170}
171
172/*
173 * Specify the number of exact address filters that should be reserved for
174 * unicast addresses. Caller should reload the unicast and multicast addresses
175 * after calling this.
176 */
177int t3_mac_set_num_ucast(struct cmac *mac, int n)
178{
179 if (n > EXACT_ADDR_FILTERS)
180 return -EINVAL;
181 mac->nucast = n;
182 return 0;
183}
184
185/* Calculate the RX hash filter index of an Ethernet address */
186static int hash_hw_addr(const u8 * addr)
187{
188 int hash = 0, octet, bit, i = 0, c;
189
190 for (octet = 0; octet < 6; ++octet)
191 for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
192 hash ^= (c & 1) << i;
193 if (++i == 6)
194 i = 0;
195 }
196 return hash;
197}
198
199int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
200{
201 u32 val, hash_lo, hash_hi;
202 struct adapter *adap = mac->adapter;
203 unsigned int oft = mac->offset;
204
205 val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
206 if (rm->dev->flags & IFF_PROMISC)
207 val |= F_COPYALLFRAMES;
208 t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
209
210 if (rm->dev->flags & IFF_ALLMULTI)
211 hash_lo = hash_hi = 0xffffffff;
212 else {
213 u8 *addr;
214 int exact_addr_idx = mac->nucast;
215
216 hash_lo = hash_hi = 0;
217 while ((addr = t3_get_next_mcaddr(rm)))
218 if (exact_addr_idx < EXACT_ADDR_FILTERS)
219 set_addr_filter(mac, exact_addr_idx++, addr);
220 else {
221 int hash = hash_hw_addr(addr);
222
223 if (hash < 32)
224 hash_lo |= (1 << hash);
225 else
226 hash_hi |= (1 << (hash - 32));
227 }
228 }
229
230 t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
231 t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
232 return 0;
233}
234
235int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
236{
237 int hwm, lwm;
238 unsigned int thres, v;
239 struct adapter *adap = mac->adapter;
240
241 /*
242 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
243 * packet size register includes header, but not FCS.
244 */
245 mtu += 14;
246 if (mtu > MAX_FRAME_SIZE - 4)
247 return -EINVAL;
248 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
249
250 /*
251 * Adjust the PAUSE frame watermarks. We always set the LWM, and the
252 * HWM only if flow-control is enabled.
253 */
254 hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, MAC_RXFIFO_SIZE / 2U);
255 hwm = min(hwm, 3 * MAC_RXFIFO_SIZE / 4 + 1024);
256 lwm = hwm - 1024;
257 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
258 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
259 v |= V_RXFIFOPAUSELWM(lwm / 8);
260 if (G_RXFIFOPAUSEHWM(v))
261 v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
262 V_RXFIFOPAUSEHWM(hwm / 8);
263 t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
264
265 /* Adjust the TX FIFO threshold based on the MTU */
266 thres = (adap->params.vpd.cclk * 1000) / 15625;
267 thres = (thres * mtu) / 1000;
268 if (is_10G(adap))
269 thres /= 10;
270 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
271 thres = max(thres, 8U); /* need at least 8 */
272 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
273 V_TXFIFOTHRESH(M_TXFIFOTHRESH), V_TXFIFOTHRESH(thres));
274 return 0;
275}
276
277int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
278{
279 u32 val;
280 struct adapter *adap = mac->adapter;
281 unsigned int oft = mac->offset;
282
283 if (duplex >= 0 && duplex != DUPLEX_FULL)
284 return -EINVAL;
285 if (speed >= 0) {
286 if (speed == SPEED_10)
287 val = V_PORTSPEED(0);
288 else if (speed == SPEED_100)
289 val = V_PORTSPEED(1);
290 else if (speed == SPEED_1000)
291 val = V_PORTSPEED(2);
292 else if (speed == SPEED_10000)
293 val = V_PORTSPEED(3);
294 else
295 return -EINVAL;
296
297 t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
298 V_PORTSPEED(M_PORTSPEED), val);
299 }
300
301 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
302 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
303 if (fc & PAUSE_TX)
304 val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128); /* +1KB */
305 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
306
307 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
308 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
309 return 0;
310}
311
312int t3_mac_enable(struct cmac *mac, int which)
313{
314 int idx = macidx(mac);
315 struct adapter *adap = mac->adapter;
316 unsigned int oft = mac->offset;
317
318 if (which & MAC_DIRECTION_TX) {
319 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
320 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
321 t3_write_reg(adap, A_TP_PIO_DATA, 0xbf000001);
322 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
323 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
324 }
325 if (which & MAC_DIRECTION_RX)
326 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
327 return 0;
328}
329
330int t3_mac_disable(struct cmac *mac, int which)
331{
332 int idx = macidx(mac);
333 struct adapter *adap = mac->adapter;
334
335 if (which & MAC_DIRECTION_TX) {
336 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
337 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
338 t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
339 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
340 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 0);
341 }
342 if (which & MAC_DIRECTION_RX)
343 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
344 return 0;
345}
346
347/*
348 * This function is called periodically to accumulate the current values of the
349 * RMON counters into the port statistics. Since the packet counters are only
350 * 32 bits they can overflow in ~286 secs at 10G, so the function should be
351 * called more frequently than that. The byte counters are 45-bit wide, they
352 * would overflow in ~7.8 hours.
353 */
354const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
355{
356#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
357#define RMON_UPDATE(mac, name, reg) \
358 (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
359#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
360 (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
361 ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
362
363 u32 v, lo;
364
365 RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
366 RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
367 RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
368 RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
369 RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
370 RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
371 RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
372 RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
373 RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
374
375 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
376 mac->stats.rx_too_long += RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
377
378 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
379 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
380 RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
381 RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
382 RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
383 RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
384 RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
385
386 RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
387 RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
388 RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
389 RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
390 RMON_UPDATE(mac, tx_pause, TX_PAUSE);
391 /* This counts error frames in general (bad FCS, underrun, etc). */
392 RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
393
394 RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
395 RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
396 RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
397 RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
398 RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
399 RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
400 RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
401
402 /* The next stat isn't clear-on-read. */
403 t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
404 v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
405 lo = (u32) mac->stats.rx_cong_drops;
406 mac->stats.rx_cong_drops += (u64) (v - lo);
407
408 return &mac->stats;
409}