aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDivy Le Ray <divy@chelsio.com>2007-01-18 22:04:14 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-05 16:58:46 -0500
commit4d22de3e6cc4a09c369b504cd8bcde3385a974cd (patch)
treeaf13a2ee582105d961c79fc4e55fce0b5e043310
parent0bf94faf64afaba6e7b49fd11541b59d2ba06d0e (diff)
Add support for the latest 1G/10G Chelsio adapter, T3.
This driver is required by the Chelsio T3 RDMA driver posted by Steve Wise. Signed-off-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r--drivers/net/Kconfig18
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/cxgb3/Makefile8
-rw-r--r--drivers/net/cxgb3/adapter.h255
-rw-r--r--drivers/net/cxgb3/ael1002.c231
-rw-r--r--drivers/net/cxgb3/common.h709
-rw-r--r--drivers/net/cxgb3/cxgb3_ctl_defs.h142
-rw-r--r--drivers/net/cxgb3/cxgb3_defs.h99
-rw-r--r--drivers/net/cxgb3/cxgb3_ioctl.h165
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2474
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c1222
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h193
-rw-r--r--drivers/net/cxgb3/firmware_exports.h144
-rw-r--r--drivers/net/cxgb3/l2t.c450
-rw-r--r--drivers/net/cxgb3/l2t.h143
-rw-r--r--drivers/net/cxgb3/mc5.c453
-rw-r--r--drivers/net/cxgb3/regs.h2195
-rw-r--r--drivers/net/cxgb3/sge.c2702
-rw-r--r--drivers/net/cxgb3/sge_defs.h251
-rw-r--r--drivers/net/cxgb3/t3_cpl.h1426
-rw-r--r--drivers/net/cxgb3/t3_hw.c3354
-rw-r--r--drivers/net/cxgb3/t3cdev.h72
-rw-r--r--drivers/net/cxgb3/version.h24
-rw-r--r--drivers/net/cxgb3/vsc8211.c208
-rw-r--r--drivers/net/cxgb3/xgmac.c389
25 files changed, 17328 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b199456a7364..555b3ad66098 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2389,6 +2389,24 @@ config CHELSIO_T1_NAPI
2389 NAPI is a driver API designed to reduce CPU and interrupt load 2389 NAPI is a driver API designed to reduce CPU and interrupt load
2390 when the driver is receiving lots of packets from the card. 2390 when the driver is receiving lots of packets from the card.
2391 2391
2392config CHELSIO_T3
2393 tristate "Chelsio Communications T3 10Gb Ethernet support"
2394 depends on PCI
2395 help
2396 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
2397 adapters.
2398
2399 For general information about Chelsio and our products, visit
2400 our website at <http://www.chelsio.com>.
2401
2402 For customer support, please visit our customer support page at
2403 <http://www.chelsio.com/support.htm>.
2404
2405 Please send feedback to <linux-bugs@chelsio.com>.
2406
2407 To compile this driver as a module, choose M here: the module
2408 will be called cxgb3.
2409
2392config EHEA 2410config EHEA
2393 tristate "eHEA Ethernet support" 2411 tristate "eHEA Ethernet support"
2394 depends on IBMEBUS 2412 depends on IBMEBUS
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 6c61f3e7396b..88b63369d783 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/
6obj-$(CONFIG_IBM_EMAC) += ibm_emac/ 6obj-$(CONFIG_IBM_EMAC) += ibm_emac/
7obj-$(CONFIG_IXGB) += ixgb/ 7obj-$(CONFIG_IXGB) += ixgb/
8obj-$(CONFIG_CHELSIO_T1) += chelsio/ 8obj-$(CONFIG_CHELSIO_T1) += chelsio/
9obj-$(CONFIG_CHELSIO_T3) += cxgb3/
9obj-$(CONFIG_EHEA) += ehea/ 10obj-$(CONFIG_EHEA) += ehea/
10obj-$(CONFIG_BONDING) += bonding/ 11obj-$(CONFIG_BONDING) += bonding/
11obj-$(CONFIG_GIANFAR) += gianfar_driver.o 12obj-$(CONFIG_GIANFAR) += gianfar_driver.o
diff --git a/drivers/net/cxgb3/Makefile b/drivers/net/cxgb3/Makefile
new file mode 100644
index 000000000000..343467985321
--- /dev/null
+++ b/drivers/net/cxgb3/Makefile
@@ -0,0 +1,8 @@
1#
2# Chelsio T3 driver
3#
4
5obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
6
7cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
8 xgmac.o sge.o l2t.o cxgb3_offload.o
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
new file mode 100644
index 000000000000..16643f6d00a9
--- /dev/null
+++ b/drivers/net/cxgb3/adapter.h
@@ -0,0 +1,255 @@
1/*
2 * This file is part of the Chelsio T3 Ethernet driver for Linux.
3 *
4 * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12/* This file should not be included directly. Include common.h instead. */
13
14#ifndef __T3_ADAPTER_H__
15#define __T3_ADAPTER_H__
16
17#include <linux/pci.h>
18#include <linux/spinlock.h>
19#include <linux/interrupt.h>
20#include <linux/timer.h>
21#include <linux/cache.h>
22#include "t3cdev.h"
23#include <asm/semaphore.h>
24#include <asm/bitops.h>
25#include <asm/io.h>
26
27typedef irqreturn_t(*intr_handler_t) (int, void *);
28
29struct vlan_group;
30
31struct port_info {
32 struct vlan_group *vlan_grp;
33 const struct port_type_info *port_type;
34 u8 port_id;
35 u8 rx_csum_offload;
36 u8 nqsets;
37 u8 first_qset;
38 struct cphy phy;
39 struct cmac mac;
40 struct link_config link_config;
41 struct net_device_stats netstats;
42 int activity;
43};
44
45enum { /* adapter flags */
46 FULL_INIT_DONE = (1 << 0),
47 USING_MSI = (1 << 1),
48 USING_MSIX = (1 << 2),
49};
50
51struct rx_desc;
52struct rx_sw_desc;
53
54struct sge_fl { /* SGE per free-buffer list state */
55 unsigned int buf_size; /* size of each Rx buffer */
56 unsigned int credits; /* # of available Rx buffers */
57 unsigned int size; /* capacity of free list */
58 unsigned int cidx; /* consumer index */
59 unsigned int pidx; /* producer index */
60 unsigned int gen; /* free list generation */
61 struct rx_desc *desc; /* address of HW Rx descriptor ring */
62 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
63 dma_addr_t phys_addr; /* physical address of HW ring start */
64 unsigned int cntxt_id; /* SGE context id for the free list */
65 unsigned long empty; /* # of times queue ran out of buffers */
66};
67
68/*
69 * Bundle size for grouping offload RX packets for delivery to the stack.
70 * Don't make this too big as we do prefetch on each packet in a bundle.
71 */
72# define RX_BUNDLE_SIZE 8
73
74struct rsp_desc;
75
76struct sge_rspq { /* state for an SGE response queue */
77 unsigned int credits; /* # of pending response credits */
78 unsigned int size; /* capacity of response queue */
79 unsigned int cidx; /* consumer index */
80 unsigned int gen; /* current generation bit */
81 unsigned int polling; /* is the queue serviced through NAPI? */
82 unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
83 unsigned int next_holdoff; /* holdoff time for next interrupt */
84 struct rsp_desc *desc; /* address of HW response ring */
85 dma_addr_t phys_addr; /* physical address of the ring */
86 unsigned int cntxt_id; /* SGE context id for the response q */
87 spinlock_t lock; /* guards response processing */
88 struct sk_buff *rx_head; /* offload packet receive queue head */
89 struct sk_buff *rx_tail; /* offload packet receive queue tail */
90
91 unsigned long offload_pkts;
92 unsigned long offload_bundles;
93 unsigned long eth_pkts; /* # of ethernet packets */
94 unsigned long pure_rsps; /* # of pure (non-data) responses */
95 unsigned long imm_data; /* responses with immediate data */
96 unsigned long rx_drops; /* # of packets dropped due to no mem */
97 unsigned long async_notif; /* # of asynchronous notification events */
98 unsigned long empty; /* # of times queue ran out of credits */
99 unsigned long nomem; /* # of responses deferred due to no mem */
100 unsigned long unhandled_irqs; /* # of spurious intrs */
101};
102
103struct tx_desc;
104struct tx_sw_desc;
105
106struct sge_txq { /* state for an SGE Tx queue */
107 unsigned long flags; /* HW DMA fetch status */
108 unsigned int in_use; /* # of in-use Tx descriptors */
109 unsigned int size; /* # of descriptors */
110 unsigned int processed; /* total # of descs HW has processed */
111 unsigned int cleaned; /* total # of descs SW has reclaimed */
112 unsigned int stop_thres; /* SW TX queue suspend threshold */
113 unsigned int cidx; /* consumer index */
114 unsigned int pidx; /* producer index */
115 unsigned int gen; /* current value of generation bit */
116 unsigned int unacked; /* Tx descriptors used since last COMPL */
117 struct tx_desc *desc; /* address of HW Tx descriptor ring */
118 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
119 spinlock_t lock; /* guards enqueueing of new packets */
120 unsigned int token; /* WR token */
121 dma_addr_t phys_addr; /* physical address of the ring */
122 struct sk_buff_head sendq; /* List of backpressured offload packets */
123 struct tasklet_struct qresume_tsk; /* restarts the queue */
124 unsigned int cntxt_id; /* SGE context id for the Tx q */
125 unsigned long stops; /* # of times q has been stopped */
126 unsigned long restarts; /* # of queue restarts */
127};
128
129enum { /* per port SGE statistics */
130 SGE_PSTAT_TSO, /* # of TSO requests */
131 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
132 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
133 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
134 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
135
136 SGE_PSTAT_MAX /* must be last */
137};
138
139struct sge_qset { /* an SGE queue set */
140 struct sge_rspq rspq;
141 struct sge_fl fl[SGE_RXQ_PER_SET];
142 struct sge_txq txq[SGE_TXQ_PER_SET];
143 struct net_device *netdev; /* associated net device */
144 unsigned long txq_stopped; /* which Tx queues are stopped */
145 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
146 unsigned long port_stats[SGE_PSTAT_MAX];
147} ____cacheline_aligned;
148
149struct sge {
150 struct sge_qset qs[SGE_QSETS];
151 spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
152};
153
154struct adapter {
155 struct t3cdev tdev;
156 struct list_head adapter_list;
157 void __iomem *regs;
158 struct pci_dev *pdev;
159 unsigned long registered_device_map;
160 unsigned long open_device_map;
161 unsigned long flags;
162
163 const char *name;
164 int msg_enable;
165 unsigned int mmio_len;
166
167 struct adapter_params params;
168 unsigned int slow_intr_mask;
169 unsigned long irq_stats[IRQ_NUM_STATS];
170
171 struct {
172 unsigned short vec;
173 char desc[22];
174 } msix_info[SGE_QSETS + 1];
175
176 /* T3 modules */
177 struct sge sge;
178 struct mc7 pmrx;
179 struct mc7 pmtx;
180 struct mc7 cm;
181 struct mc5 mc5;
182
183 struct net_device *port[MAX_NPORTS];
184 unsigned int check_task_cnt;
185 struct delayed_work adap_check_task;
186 struct work_struct ext_intr_handler_task;
187
188 /*
189 * Dummy netdevices are needed when using multiple receive queues with
190 * NAPI as each netdevice can service only one queue.
191 */
192 struct net_device *dummy_netdev[SGE_QSETS - 1];
193
194 struct dentry *debugfs_root;
195
196 struct mutex mdio_lock;
197 spinlock_t stats_lock;
198 spinlock_t work_lock;
199};
200
201static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
202{
203 u32 val = readl(adapter->regs + reg_addr);
204
205 CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
206 return val;
207}
208
209static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
210{
211 CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
212 writel(val, adapter->regs + reg_addr);
213}
214
215static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
216{
217 return netdev_priv(adap->port[idx]);
218}
219
220/*
221 * We use the spare atalk_ptr to map a net device to its SGE queue set.
222 * This is a macro so it can be used as l-value.
223 */
224#define dev2qset(netdev) ((netdev)->atalk_ptr)
225
226#define OFFLOAD_DEVMAP_BIT 15
227
228#define tdev2adap(d) container_of(d, struct adapter, tdev)
229
230static inline int offload_running(struct adapter *adapter)
231{
232 return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
233}
234
235int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
236
237void t3_os_ext_intr_handler(struct adapter *adapter);
238void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
239 int speed, int duplex, int fc);
240
241void t3_sge_start(struct adapter *adap);
242void t3_sge_stop(struct adapter *adap);
243void t3_free_sge_resources(struct adapter *adap);
244void t3_sge_err_intr_handler(struct adapter *adapter);
245intr_handler_t t3_intr_handler(struct adapter *adap, int polling);
246int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
247void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
248int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
249 int irq_vec_idx, const struct qset_params *p,
250 int ntxq, struct net_device *netdev);
251int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
252 unsigned char *data);
253irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
254
255#endif /* __T3_ADAPTER_H__ */
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
new file mode 100644
index 000000000000..93a90d825d85
--- /dev/null
+++ b/drivers/net/cxgb3/ael1002.c
@@ -0,0 +1,231 @@
1/*
2 * This file is part of the Chelsio T3 Ethernet driver.
3 *
4 * Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#include "common.h"
13#include "regs.h"
14
15enum {
16 AEL100X_TX_DISABLE = 9,
17 AEL100X_TX_CONFIG1 = 0xc002,
18 AEL1002_PWR_DOWN_HI = 0xc011,
19 AEL1002_PWR_DOWN_LO = 0xc012,
20 AEL1002_XFI_EQL = 0xc015,
21 AEL1002_LB_EN = 0xc017,
22
23 LASI_CTRL = 0x9002,
24 LASI_STAT = 0x9005
25};
26
27static void ael100x_txon(struct cphy *phy)
28{
29 int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
30
31 msleep(100);
32 t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
33 msleep(30);
34}
35
36static int ael1002_power_down(struct cphy *phy, int enable)
37{
38 int err;
39
40 err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_DISABLE, !!enable);
41 if (!err)
42 err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
43 BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
44 return err;
45}
46
47static int ael1002_reset(struct cphy *phy, int wait)
48{
49 int err;
50
51 if ((err = ael1002_power_down(phy, 0)) ||
52 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_CONFIG1, 1)) ||
53 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_HI, 0)) ||
54 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_LO, 0)) ||
55 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_XFI_EQL, 0x18)) ||
56 (err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL1002_LB_EN,
57 0, 1 << 5)))
58 return err;
59 return 0;
60}
61
62static int ael1002_intr_noop(struct cphy *phy)
63{
64 return 0;
65}
66
67static int ael100x_get_link_status(struct cphy *phy, int *link_ok,
68 int *speed, int *duplex, int *fc)
69{
70 if (link_ok) {
71 unsigned int status;
72 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &status);
73
74 /*
75 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
76 * once more to get the current link state.
77 */
78 if (!err && !(status & BMSR_LSTATUS))
79 err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR,
80 &status);
81 if (err)
82 return err;
83 *link_ok = !!(status & BMSR_LSTATUS);
84 }
85 if (speed)
86 *speed = SPEED_10000;
87 if (duplex)
88 *duplex = DUPLEX_FULL;
89 return 0;
90}
91
92static struct cphy_ops ael1002_ops = {
93 .reset = ael1002_reset,
94 .intr_enable = ael1002_intr_noop,
95 .intr_disable = ael1002_intr_noop,
96 .intr_clear = ael1002_intr_noop,
97 .intr_handler = ael1002_intr_noop,
98 .get_link_status = ael100x_get_link_status,
99 .power_down = ael1002_power_down,
100};
101
102void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
103 int phy_addr, const struct mdio_ops *mdio_ops)
104{
105 cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops);
106 ael100x_txon(phy);
107}
108
109static int ael1006_reset(struct cphy *phy, int wait)
110{
111 return t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait);
112}
113
114static int ael1006_intr_enable(struct cphy *phy)
115{
116 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
117}
118
119static int ael1006_intr_disable(struct cphy *phy)
120{
121 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
122}
123
124static int ael1006_intr_clear(struct cphy *phy)
125{
126 u32 val;
127
128 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
129}
130
131static int ael1006_intr_handler(struct cphy *phy)
132{
133 unsigned int status;
134 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
135
136 if (err)
137 return err;
138 return (status & 1) ? cphy_cause_link_change : 0;
139}
140
141static int ael1006_power_down(struct cphy *phy, int enable)
142{
143 return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
144 BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
145}
146
147static struct cphy_ops ael1006_ops = {
148 .reset = ael1006_reset,
149 .intr_enable = ael1006_intr_enable,
150 .intr_disable = ael1006_intr_disable,
151 .intr_clear = ael1006_intr_clear,
152 .intr_handler = ael1006_intr_handler,
153 .get_link_status = ael100x_get_link_status,
154 .power_down = ael1006_power_down,
155};
156
157void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
158 int phy_addr, const struct mdio_ops *mdio_ops)
159{
160 cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops);
161 ael100x_txon(phy);
162}
163
164static struct cphy_ops qt2045_ops = {
165 .reset = ael1006_reset,
166 .intr_enable = ael1006_intr_enable,
167 .intr_disable = ael1006_intr_disable,
168 .intr_clear = ael1006_intr_clear,
169 .intr_handler = ael1006_intr_handler,
170 .get_link_status = ael100x_get_link_status,
171 .power_down = ael1006_power_down,
172};
173
174void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
175 int phy_addr, const struct mdio_ops *mdio_ops)
176{
177 unsigned int stat;
178
179 cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops);
180
181 /*
182 * Some cards where the PHY is supposed to be at address 0 actually
183 * have it at 1.
184 */
185 if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) &&
186 stat == 0xffff)
187 phy->addr = 1;
188}
189
190static int xaui_direct_reset(struct cphy *phy, int wait)
191{
192 return 0;
193}
194
195static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
196 int *speed, int *duplex, int *fc)
197{
198 if (link_ok) {
199 unsigned int status;
200
201 status = t3_read_reg(phy->adapter,
202 XGM_REG(A_XGM_SERDES_STAT0, phy->addr));
203 *link_ok = !(status & F_LOWSIG0);
204 }
205 if (speed)
206 *speed = SPEED_10000;
207 if (duplex)
208 *duplex = DUPLEX_FULL;
209 return 0;
210}
211
212static int xaui_direct_power_down(struct cphy *phy, int enable)
213{
214 return 0;
215}
216
217static struct cphy_ops xaui_direct_ops = {
218 .reset = xaui_direct_reset,
219 .intr_enable = ael1002_intr_noop,
220 .intr_disable = ael1002_intr_noop,
221 .intr_clear = ael1002_intr_noop,
222 .intr_handler = ael1002_intr_noop,
223 .get_link_status = xaui_direct_get_link_status,
224 .power_down = xaui_direct_power_down,
225};
226
227void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
228 int phy_addr, const struct mdio_ops *mdio_ops)
229{
230 cphy_init(phy, adapter, 1, &xaui_direct_ops, mdio_ops);
231}
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
new file mode 100644
index 000000000000..60a979b62a65
--- /dev/null
+++ b/drivers/net/cxgb3/common.h
@@ -0,0 +1,709 @@
1/*
2 * This file is part of the Chelsio T3 Ethernet driver.
3 *
4 * Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#ifndef __CHELSIO_COMMON_H
13#define __CHELSIO_COMMON_H
14
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/ctype.h>
18#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/netdevice.h>
21#include <linux/ethtool.h>
22#include <linux/mii.h>
23#include "version.h"
24
25#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
26#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
27#define CH_ALERT(adap, fmt, ...) \
28 dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
29
30/*
31 * More powerful macro that selectively prints messages based on msg_enable.
32 * For info and debugging messages.
33 */
34#define CH_MSG(adapter, level, category, fmt, ...) do { \
35 if ((adapter)->msg_enable & NETIF_MSG_##category) \
36 dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
37 ## __VA_ARGS__); \
38} while (0)
39
40#ifdef DEBUG
41# define CH_DBG(adapter, category, fmt, ...) \
42 CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
43#else
44# define CH_DBG(adapter, category, fmt, ...)
45#endif
46
47/* Additional NETIF_MSG_* categories */
48#define NETIF_MSG_MMIO 0x8000000
49
50struct t3_rx_mode {
51 struct net_device *dev;
52 struct dev_mc_list *mclist;
53 unsigned int idx;
54};
55
56static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
57 struct dev_mc_list *mclist)
58{
59 p->dev = dev;
60 p->mclist = mclist;
61 p->idx = 0;
62}
63
64static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
65{
66 u8 *addr = NULL;
67
68 if (rm->mclist && rm->idx < rm->dev->mc_count) {
69 addr = rm->mclist->dmi_addr;
70 rm->mclist = rm->mclist->next;
71 rm->idx++;
72 }
73 return addr;
74}
75
76enum {
77 MAX_NPORTS = 2, /* max # of ports */
78 MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
79 EEPROMSIZE = 8192, /* Serial EEPROM size */
80 RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
81 TCB_SIZE = 128, /* TCB size */
82 NMTUS = 16, /* size of MTU table */
83 NCCTRL_WIN = 32, /* # of congestion control windows */
84};
85
86#define MAX_RX_COALESCING_LEN 16224U
87
88enum {
89 PAUSE_RX = 1 << 0,
90 PAUSE_TX = 1 << 1,
91 PAUSE_AUTONEG = 1 << 2
92};
93
94enum {
95 SUPPORTED_OFFLOAD = 1 << 24,
96 SUPPORTED_IRQ = 1 << 25
97};
98
99enum { /* adapter interrupt-maintained statistics */
100 STAT_ULP_CH0_PBL_OOB,
101 STAT_ULP_CH1_PBL_OOB,
102 STAT_PCI_CORR_ECC,
103
104 IRQ_NUM_STATS /* keep last */
105};
106
107enum {
108 SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
109 SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
110 SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
111};
112
113enum sge_context_type { /* SGE egress context types */
114 SGE_CNTXT_RDMA = 0,
115 SGE_CNTXT_ETH = 2,
116 SGE_CNTXT_OFLD = 4,
117 SGE_CNTXT_CTRL = 5
118};
119
120enum {
121 AN_PKT_SIZE = 32, /* async notification packet size */
122 IMMED_PKT_SIZE = 48 /* packet size for immediate data */
123};
124
125struct sg_ent { /* SGE scatter/gather entry */
126 u32 len[2];
127 u64 addr[2];
128};
129
130#ifndef SGE_NUM_GENBITS
131/* Must be 1 or 2 */
132# define SGE_NUM_GENBITS 2
133#endif
134
135#define TX_DESC_FLITS 16U
136#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
137
138struct cphy;
139struct adapter;
140
141struct mdio_ops {
142 int (*read)(struct adapter *adapter, int phy_addr, int mmd_addr,
143 int reg_addr, unsigned int *val);
144 int (*write)(struct adapter *adapter, int phy_addr, int mmd_addr,
145 int reg_addr, unsigned int val);
146};
147
148struct adapter_info {
149 unsigned char nports; /* # of ports */
150 unsigned char phy_base_addr; /* MDIO PHY base address */
151 unsigned char mdien;
152 unsigned char mdiinv;
153 unsigned int gpio_out; /* GPIO output settings */
154 unsigned int gpio_intr; /* GPIO IRQ enable mask */
155 unsigned long caps; /* adapter capabilities */
156 const struct mdio_ops *mdio_ops; /* MDIO operations */
157 const char *desc; /* product description */
158};
159
160struct port_type_info {
161 void (*phy_prep)(struct cphy *phy, struct adapter *adapter,
162 int phy_addr, const struct mdio_ops *ops);
163 unsigned int caps;
164 const char *desc;
165};
166
167struct mc5_stats {
168 unsigned long parity_err;
169 unsigned long active_rgn_full;
170 unsigned long nfa_srch_err;
171 unsigned long unknown_cmd;
172 unsigned long reqq_parity_err;
173 unsigned long dispq_parity_err;
174 unsigned long del_act_empty;
175};
176
177struct mc7_stats {
178 unsigned long corr_err;
179 unsigned long uncorr_err;
180 unsigned long parity_err;
181 unsigned long addr_err;
182};
183
184struct mac_stats {
185 u64 tx_octets; /* total # of octets in good frames */
186 u64 tx_octets_bad; /* total # of octets in error frames */
187 u64 tx_frames; /* all good frames */
188 u64 tx_mcast_frames; /* good multicast frames */
189 u64 tx_bcast_frames; /* good broadcast frames */
190 u64 tx_pause; /* # of transmitted pause frames */
191 u64 tx_deferred; /* frames with deferred transmissions */
192 u64 tx_late_collisions; /* # of late collisions */
193 u64 tx_total_collisions; /* # of total collisions */
194 u64 tx_excess_collisions; /* frame errors from excessive collissions */
195 u64 tx_underrun; /* # of Tx FIFO underruns */
196 u64 tx_len_errs; /* # of Tx length errors */
197 u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
198 u64 tx_excess_deferral; /* # of frames with excessive deferral */
199 u64 tx_fcs_errs; /* # of frames with bad FCS */
200
201 u64 tx_frames_64; /* # of Tx frames in a particular range */
202 u64 tx_frames_65_127;
203 u64 tx_frames_128_255;
204 u64 tx_frames_256_511;
205 u64 tx_frames_512_1023;
206 u64 tx_frames_1024_1518;
207 u64 tx_frames_1519_max;
208
209 u64 rx_octets; /* total # of octets in good frames */
210 u64 rx_octets_bad; /* total # of octets in error frames */
211 u64 rx_frames; /* all good frames */
212 u64 rx_mcast_frames; /* good multicast frames */
213 u64 rx_bcast_frames; /* good broadcast frames */
214 u64 rx_pause; /* # of received pause frames */
215 u64 rx_fcs_errs; /* # of received frames with bad FCS */
216 u64 rx_align_errs; /* alignment errors */
217 u64 rx_symbol_errs; /* symbol errors */
218 u64 rx_data_errs; /* data errors */
219 u64 rx_sequence_errs; /* sequence errors */
220 u64 rx_runt; /* # of runt frames */
221 u64 rx_jabber; /* # of jabber frames */
222 u64 rx_short; /* # of short frames */
223 u64 rx_too_long; /* # of oversized frames */
224 u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
225
226 u64 rx_frames_64; /* # of Rx frames in a particular range */
227 u64 rx_frames_65_127;
228 u64 rx_frames_128_255;
229 u64 rx_frames_256_511;
230 u64 rx_frames_512_1023;
231 u64 rx_frames_1024_1518;
232 u64 rx_frames_1519_max;
233
234 u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
235
236 unsigned long tx_fifo_parity_err;
237 unsigned long rx_fifo_parity_err;
238 unsigned long tx_fifo_urun;
239 unsigned long rx_fifo_ovfl;
240 unsigned long serdes_signal_loss;
241 unsigned long xaui_pcs_ctc_err;
242 unsigned long xaui_pcs_align_change;
243};
244
245struct tp_mib_stats {
246 u32 ipInReceive_hi;
247 u32 ipInReceive_lo;
248 u32 ipInHdrErrors_hi;
249 u32 ipInHdrErrors_lo;
250 u32 ipInAddrErrors_hi;
251 u32 ipInAddrErrors_lo;
252 u32 ipInUnknownProtos_hi;
253 u32 ipInUnknownProtos_lo;
254 u32 ipInDiscards_hi;
255 u32 ipInDiscards_lo;
256 u32 ipInDelivers_hi;
257 u32 ipInDelivers_lo;
258 u32 ipOutRequests_hi;
259 u32 ipOutRequests_lo;
260 u32 ipOutDiscards_hi;
261 u32 ipOutDiscards_lo;
262 u32 ipOutNoRoutes_hi;
263 u32 ipOutNoRoutes_lo;
264 u32 ipReasmTimeout;
265 u32 ipReasmReqds;
266 u32 ipReasmOKs;
267 u32 ipReasmFails;
268
269 u32 reserved[8];
270
271 u32 tcpActiveOpens;
272 u32 tcpPassiveOpens;
273 u32 tcpAttemptFails;
274 u32 tcpEstabResets;
275 u32 tcpOutRsts;
276 u32 tcpCurrEstab;
277 u32 tcpInSegs_hi;
278 u32 tcpInSegs_lo;
279 u32 tcpOutSegs_hi;
280 u32 tcpOutSegs_lo;
281 u32 tcpRetransSeg_hi;
282 u32 tcpRetransSeg_lo;
283 u32 tcpInErrs_hi;
284 u32 tcpInErrs_lo;
285 u32 tcpRtoMin;
286 u32 tcpRtoMax;
287};
288
289struct tp_params {
290 unsigned int nchan; /* # of channels */
291 unsigned int pmrx_size; /* total PMRX capacity */
292 unsigned int pmtx_size; /* total PMTX capacity */
293 unsigned int cm_size; /* total CM capacity */
294 unsigned int chan_rx_size; /* per channel Rx size */
295 unsigned int chan_tx_size; /* per channel Tx size */
296 unsigned int rx_pg_size; /* Rx page size */
297 unsigned int tx_pg_size; /* Tx page size */
298 unsigned int rx_num_pgs; /* # of Rx pages */
299 unsigned int tx_num_pgs; /* # of Tx pages */
300 unsigned int ntimer_qs; /* # of timer queues */
301};
302
303struct qset_params { /* SGE queue set parameters */
304 unsigned int polling; /* polling/interrupt service for rspq */
305 unsigned int coalesce_usecs; /* irq coalescing timer */
306 unsigned int rspq_size; /* # of entries in response queue */
307 unsigned int fl_size; /* # of entries in regular free list */
308 unsigned int jumbo_size; /* # of entries in jumbo free list */
309 unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
310 unsigned int cong_thres; /* FL congestion threshold */
311};
312
313struct sge_params {
314 unsigned int max_pkt_size; /* max offload pkt size */
315 struct qset_params qset[SGE_QSETS];
316};
317
318struct mc5_params {
319 unsigned int mode; /* selects MC5 width */
320 unsigned int nservers; /* size of server region */
321 unsigned int nfilters; /* size of filter region */
322 unsigned int nroutes; /* size of routing region */
323};
324
325/* Default MC5 region sizes */
326enum {
327 DEFAULT_NSERVERS = 512,
328 DEFAULT_NFILTERS = 128
329};
330
331/* MC5 modes, these must be non-0 */
332enum {
333 MC5_MODE_144_BIT = 1,
334 MC5_MODE_72_BIT = 2
335};
336
337struct vpd_params {
338 unsigned int cclk;
339 unsigned int mclk;
340 unsigned int uclk;
341 unsigned int mdc;
342 unsigned int mem_timing;
343 u8 eth_base[6];
344 u8 port_type[MAX_NPORTS];
345 unsigned short xauicfg[2];
346};
347
348struct pci_params {
349 unsigned int vpd_cap_addr;
350 unsigned int pcie_cap_addr;
351 unsigned short speed;
352 unsigned char width;
353 unsigned char variant;
354};
355
356enum {
357 PCI_VARIANT_PCI,
358 PCI_VARIANT_PCIX_MODE1_PARITY,
359 PCI_VARIANT_PCIX_MODE1_ECC,
360 PCI_VARIANT_PCIX_266_MODE2,
361 PCI_VARIANT_PCIE
362};
363
364struct adapter_params {
365 struct sge_params sge;
366 struct mc5_params mc5;
367 struct tp_params tp;
368 struct vpd_params vpd;
369 struct pci_params pci;
370
371 const struct adapter_info *info;
372
373 unsigned short mtus[NMTUS];
374 unsigned short a_wnd[NCCTRL_WIN];
375 unsigned short b_wnd[NCCTRL_WIN];
376
377 unsigned int nports; /* # of ethernet ports */
378 unsigned int stats_update_period; /* MAC stats accumulation period */
379 unsigned int linkpoll_period; /* link poll period in 0.1s */
380 unsigned int rev; /* chip revision */
381};
382
383struct trace_params {
384 u32 sip;
385 u32 sip_mask;
386 u32 dip;
387 u32 dip_mask;
388 u16 sport;
389 u16 sport_mask;
390 u16 dport;
391 u16 dport_mask;
392 u32 vlan:12;
393 u32 vlan_mask:12;
394 u32 intf:4;
395 u32 intf_mask:4;
396 u8 proto;
397 u8 proto_mask;
398};
399
400struct link_config {
401 unsigned int supported; /* link capabilities */
402 unsigned int advertising; /* advertised capabilities */
403 unsigned short requested_speed; /* speed user has requested */
404 unsigned short speed; /* actual link speed */
405 unsigned char requested_duplex; /* duplex user has requested */
406 unsigned char duplex; /* actual link duplex */
407 unsigned char requested_fc; /* flow control user has requested */
408 unsigned char fc; /* actual link flow control */
409 unsigned char autoneg; /* autonegotiating? */
410 unsigned int link_ok; /* link up? */
411};
412
413#define SPEED_INVALID 0xffff
414#define DUPLEX_INVALID 0xff
415
416struct mc5 {
417 struct adapter *adapter;
418 unsigned int tcam_size;
419 unsigned char part_type;
420 unsigned char parity_enabled;
421 unsigned char mode;
422 struct mc5_stats stats;
423};
424
425static inline unsigned int t3_mc5_size(const struct mc5 *p)
426{
427 return p->tcam_size;
428}
429
430struct mc7 {
431 struct adapter *adapter; /* backpointer to adapter */
432 unsigned int size; /* memory size in bytes */
433 unsigned int width; /* MC7 interface width */
434 unsigned int offset; /* register address offset for MC7 instance */
435 const char *name; /* name of MC7 instance */
436 struct mc7_stats stats; /* MC7 statistics */
437};
438
439static inline unsigned int t3_mc7_size(const struct mc7 *p)
440{
441 return p->size;
442}
443
444struct cmac {
445 struct adapter *adapter;
446 unsigned int offset;
447 unsigned int nucast; /* # of address filters for unicast MACs */
448 struct mac_stats stats;
449};
450
451enum {
452 MAC_DIRECTION_RX = 1,
453 MAC_DIRECTION_TX = 2,
454 MAC_RXFIFO_SIZE = 32768
455};
456
457/* IEEE 802.3ae specified MDIO devices */
458enum {
459 MDIO_DEV_PMA_PMD = 1,
460 MDIO_DEV_WIS = 2,
461 MDIO_DEV_PCS = 3,
462 MDIO_DEV_XGXS = 4
463};
464
465/* PHY loopback direction */
466enum {
467 PHY_LOOPBACK_TX = 1,
468 PHY_LOOPBACK_RX = 2
469};
470
471/* PHY interrupt types */
472enum {
473 cphy_cause_link_change = 1,
474 cphy_cause_fifo_error = 2
475};
476
477/* PHY operations */
478struct cphy_ops {
479 void (*destroy)(struct cphy *phy);
480 int (*reset)(struct cphy *phy, int wait);
481
482 int (*intr_enable)(struct cphy *phy);
483 int (*intr_disable)(struct cphy *phy);
484 int (*intr_clear)(struct cphy *phy);
485 int (*intr_handler)(struct cphy *phy);
486
487 int (*autoneg_enable)(struct cphy *phy);
488 int (*autoneg_restart)(struct cphy *phy);
489
490 int (*advertise)(struct cphy *phy, unsigned int advertise_map);
491 int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
492 int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
493 int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
494 int *duplex, int *fc);
495 int (*power_down)(struct cphy *phy, int enable);
496};
497
498/* A PHY instance */
499struct cphy {
500 int addr; /* PHY address */
501 struct adapter *adapter; /* associated adapter */
502 unsigned long fifo_errors; /* FIFO over/under-flows */
503 const struct cphy_ops *ops; /* PHY operations */
504 int (*mdio_read)(struct adapter *adapter, int phy_addr, int mmd_addr,
505 int reg_addr, unsigned int *val);
506 int (*mdio_write)(struct adapter *adapter, int phy_addr, int mmd_addr,
507 int reg_addr, unsigned int val);
508};
509
510/* Convenience MDIO read/write wrappers */
511static inline int mdio_read(struct cphy *phy, int mmd, int reg,
512 unsigned int *valp)
513{
514 return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
515}
516
517static inline int mdio_write(struct cphy *phy, int mmd, int reg,
518 unsigned int val)
519{
520 return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
521}
522
523/* Convenience initializer */
524static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
525 int phy_addr, struct cphy_ops *phy_ops,
526 const struct mdio_ops *mdio_ops)
527{
528 phy->adapter = adapter;
529 phy->addr = phy_addr;
530 phy->ops = phy_ops;
531 if (mdio_ops) {
532 phy->mdio_read = mdio_ops->read;
533 phy->mdio_write = mdio_ops->write;
534 }
535}
536
537/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
538#define MAC_STATS_ACCUM_SECS 180
539
540#define XGM_REG(reg_addr, idx) \
541 ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
542
543struct addr_val_pair {
544 unsigned int reg_addr;
545 unsigned int val;
546};
547
548#include "adapter.h"
549
550#ifndef PCI_VENDOR_ID_CHELSIO
551# define PCI_VENDOR_ID_CHELSIO 0x1425
552#endif
553
554#define for_each_port(adapter, iter) \
555 for (iter = 0; iter < (adapter)->params.nports; ++iter)
556
557#define adapter_info(adap) ((adap)->params.info)
558
559static inline int uses_xaui(const struct adapter *adap)
560{
561 return adapter_info(adap)->caps & SUPPORTED_AUI;
562}
563
564static inline int is_10G(const struct adapter *adap)
565{
566 return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
567}
568
569static inline int is_offload(const struct adapter *adap)
570{
571 return adapter_info(adap)->caps & SUPPORTED_OFFLOAD;
572}
573
574static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
575{
576 return adap->params.vpd.cclk / 1000;
577}
578
579static inline unsigned int is_pcie(const struct adapter *adap)
580{
581 return adap->params.pci.variant == PCI_VARIANT_PCIE;
582}
583
584void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
585 u32 val);
586void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
587 int n, unsigned int offset);
588int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
589 int polarity, int attempts, int delay, u32 *valp);
590static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
591 int polarity, int attempts, int delay)
592{
593 return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
594 delay, NULL);
595}
596int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
597 unsigned int set);
598int t3_phy_reset(struct cphy *phy, int mmd, int wait);
599int t3_phy_advertise(struct cphy *phy, unsigned int advert);
600int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
601
602void t3_intr_enable(struct adapter *adapter);
603void t3_intr_disable(struct adapter *adapter);
604void t3_intr_clear(struct adapter *adapter);
605void t3_port_intr_enable(struct adapter *adapter, int idx);
606void t3_port_intr_disable(struct adapter *adapter, int idx);
607void t3_port_intr_clear(struct adapter *adapter, int idx);
608int t3_slow_intr_handler(struct adapter *adapter);
609int t3_phy_intr_handler(struct adapter *adapter);
610
611void t3_link_changed(struct adapter *adapter, int port_id);
612int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
613const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
614int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
615int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
616int t3_seeprom_wp(struct adapter *adapter, int enable);
617int t3_read_flash(struct adapter *adapter, unsigned int addr,
618 unsigned int nwords, u32 *data, int byte_oriented);
619int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
620int t3_get_fw_version(struct adapter *adapter, u32 *vers);
621int t3_check_fw_version(struct adapter *adapter);
622int t3_init_hw(struct adapter *adapter, u32 fw_params);
623void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
624void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
625int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
626 int reset);
627void t3_led_ready(struct adapter *adapter);
628void t3_fatal_err(struct adapter *adapter);
629void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
630void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
631 const u8 * cpus, const u16 *rspq);
632int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map);
633int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
634int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
635 unsigned int n, unsigned int *valp);
636int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
637 u64 *buf);
638
639int t3_mac_reset(struct cmac *mac);
640void t3b_pcs_reset(struct cmac *mac);
641int t3_mac_enable(struct cmac *mac, int which);
642int t3_mac_disable(struct cmac *mac, int which);
643int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
644int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
645int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
646int t3_mac_set_num_ucast(struct cmac *mac, int n);
647const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
648int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
649
650void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
651int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
652 unsigned int nroutes);
653void t3_mc5_intr_handler(struct mc5 *mc5);
654int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
655 u32 *buf);
656
657int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh);
658void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size);
659void t3_tp_set_offload_mode(struct adapter *adap, int enable);
660void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
661void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
662 unsigned short alpha[NCCTRL_WIN],
663 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
664void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS]);
665void t3_get_cong_cntl_tab(struct adapter *adap,
666 unsigned short incr[NMTUS][NCCTRL_WIN]);
667void t3_config_trace_filter(struct adapter *adapter,
668 const struct trace_params *tp, int filter_index,
669 int invert, int enable);
670int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
671
672void t3_sge_prep(struct adapter *adap, struct sge_params *p);
673void t3_sge_init(struct adapter *adap, struct sge_params *p);
674int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
675 enum sge_context_type type, int respq, u64 base_addr,
676 unsigned int size, unsigned int token, int gen,
677 unsigned int cidx);
678int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
679 int gts_enable, u64 base_addr, unsigned int size,
680 unsigned int esize, unsigned int cong_thres, int gen,
681 unsigned int cidx);
682int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
683 int irq_vec_idx, u64 base_addr, unsigned int size,
684 unsigned int fl_thres, int gen, unsigned int cidx);
685int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
686 unsigned int size, int rspq, int ovfl_mode,
687 unsigned int credits, unsigned int credit_thres);
688int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
689int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
690int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
691int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
692int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4]);
693int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4]);
694int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4]);
695int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4]);
696int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
697 unsigned int credits);
698
699void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
700 int phy_addr, const struct mdio_ops *mdio_ops);
701void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
702 int phy_addr, const struct mdio_ops *mdio_ops);
703void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
704 int phy_addr, const struct mdio_ops *mdio_ops);
705void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
706 const struct mdio_ops *mdio_ops);
707void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
708 int phy_addr, const struct mdio_ops *mdio_ops);
709#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h
new file mode 100644
index 000000000000..0fdc36529eb6
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_ctl_defs.h
@@ -0,0 +1,142 @@
1/*
2 * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
3 *
4 * This program is distributed in the hope that it will be useful, but WITHOUT
5 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
7 * release for licensing terms and conditions.
8 */
9
10#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
11#define _CXGB3_OFFLOAD_CTL_DEFS_H
12
13enum {
14 GET_MAX_OUTSTANDING_WR,
15 GET_TX_MAX_CHUNK,
16 GET_TID_RANGE,
17 GET_STID_RANGE,
18 GET_RTBL_RANGE,
19 GET_L2T_CAPACITY,
20 GET_MTUS,
21 GET_WR_LEN,
22 GET_IFF_FROM_MAC,
23 GET_DDP_PARAMS,
24 GET_PORTS,
25
26 ULP_ISCSI_GET_PARAMS,
27 ULP_ISCSI_SET_PARAMS,
28
29 RDMA_GET_PARAMS,
30 RDMA_CQ_OP,
31 RDMA_CQ_SETUP,
32 RDMA_CQ_DISABLE,
33 RDMA_CTRL_QP_SETUP,
34 RDMA_GET_MEM,
35};
36
37/*
38 * Structure used to describe a TID range. Valid TIDs are [base, base+num).
39 */
40struct tid_range {
41 unsigned int base; /* first TID */
42 unsigned int num; /* number of TIDs in range */
43};
44
45/*
46 * Structure used to request the size and contents of the MTU table.
47 */
48struct mtutab {
49 unsigned int size; /* # of entries in the MTU table */
50 const unsigned short *mtus; /* the MTU table values */
51};
52
53struct net_device;
54
55/*
56 * Structure used to request the adapter net_device owning a given MAC address.
57 */
58struct iff_mac {
59 struct net_device *dev; /* the net_device */
60 const unsigned char *mac_addr; /* MAC address to lookup */
61 u16 vlan_tag;
62};
63
64struct pci_dev;
65
66/*
67 * Structure used to request the TCP DDP parameters.
68 */
69struct ddp_params {
70 unsigned int llimit; /* TDDP region start address */
71 unsigned int ulimit; /* TDDP region end address */
72 unsigned int tag_mask; /* TDDP tag mask */
73 struct pci_dev *pdev;
74};
75
76struct adap_ports {
77 unsigned int nports; /* number of ports on this adapter */
78 struct net_device *lldevs[2];
79};
80
81/*
82 * Structure used to return information to the iscsi layer.
83 */
84struct ulp_iscsi_info {
85 unsigned int offset;
86 unsigned int llimit;
87 unsigned int ulimit;
88 unsigned int tagmask;
89 unsigned int pgsz3;
90 unsigned int pgsz2;
91 unsigned int pgsz1;
92 unsigned int pgsz0;
93 unsigned int max_rxsz;
94 unsigned int max_txsz;
95 struct pci_dev *pdev;
96};
97
98/*
99 * Structure used to return information to the RDMA layer.
100 */
101struct rdma_info {
102 unsigned int tpt_base; /* TPT base address */
103 unsigned int tpt_top; /* TPT last entry address */
104 unsigned int pbl_base; /* PBL base address */
105 unsigned int pbl_top; /* PBL last entry address */
106 unsigned int rqt_base; /* RQT base address */
107 unsigned int rqt_top; /* RQT last entry address */
108 unsigned int udbell_len; /* user doorbell region length */
109 unsigned long udbell_physbase; /* user doorbell physical start addr */
110 void __iomem *kdb_addr; /* kernel doorbell register address */
111 struct pci_dev *pdev; /* associated PCI device */
112};
113
114/*
115 * Structure used to request an operation on an RDMA completion queue.
116 */
117struct rdma_cq_op {
118 unsigned int id;
119 unsigned int op;
120 unsigned int credits;
121};
122
123/*
124 * Structure used to setup RDMA completion queues.
125 */
126struct rdma_cq_setup {
127 unsigned int id;
128 unsigned long long base_addr;
129 unsigned int size;
130 unsigned int credits;
131 unsigned int credit_thres;
132 unsigned int ovfl_mode;
133};
134
135/*
136 * Structure used to setup the RDMA control egress context.
137 */
138struct rdma_ctrlqp_setup {
139 unsigned long long base_addr;
140 unsigned int size;
141};
142#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
new file mode 100644
index 000000000000..82344c2bbc13
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CHELSIO_DEFS_H
34#define _CHELSIO_DEFS_H
35
36#include <linux/skbuff.h>
37#include <net/tcp.h>
38
39#include "t3cdev.h"
40
41#include "cxgb3_offload.h"
42
43#define VALIDATE_TID 1
44
45void *cxgb_alloc_mem(unsigned long size);
46void cxgb_free_mem(void *addr);
47void cxgb_neigh_update(struct neighbour *neigh);
48void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
49
50/*
51 * Map an ATID or STID to their entries in the corresponding TID tables.
52 */
53static inline union active_open_entry *atid2entry(const struct tid_info *t,
54 unsigned int atid)
55{
56 return &t->atid_tab[atid - t->atid_base];
57}
58
59static inline union listen_entry *stid2entry(const struct tid_info *t,
60 unsigned int stid)
61{
62 return &t->stid_tab[stid - t->stid_base];
63}
64
65/*
66 * Find the connection corresponding to a TID.
67 */
68static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
69 unsigned int tid)
70{
71 return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
72}
73
74/*
75 * Find the connection corresponding to a server TID.
76 */
77static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
78 unsigned int tid)
79{
80 if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
81 return NULL;
82 return &(stid2entry(t, tid)->t3c_tid);
83}
84
85/*
86 * Find the connection corresponding to an active-open TID.
87 */
88static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
89 unsigned int tid)
90{
91 if (tid < t->atid_base || tid >= t->atid_base + t->natids)
92 return NULL;
93 return &(atid2entry(t, tid)->t3c_tid);
94}
95
96int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n);
97int attach_t3cdev(struct t3cdev *dev);
98void detach_t3cdev(struct t3cdev *dev);
99#endif
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h
new file mode 100644
index 000000000000..1ee77b28cdf2
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_ioctl.h
@@ -0,0 +1,165 @@
1/*
2 * This file is part of the Chelsio T3 Ethernet driver for Linux.
3 *
4 * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#ifndef __CHIOCTL_H__
13#define __CHIOCTL_H__
14
15/*
16 * Ioctl commands specific to this driver.
17 */
18enum {
19 CHELSIO_SETREG = 1024,
20 CHELSIO_GETREG,
21 CHELSIO_SETTPI,
22 CHELSIO_GETTPI,
23 CHELSIO_GETMTUTAB,
24 CHELSIO_SETMTUTAB,
25 CHELSIO_GETMTU,
26 CHELSIO_SET_PM,
27 CHELSIO_GET_PM,
28 CHELSIO_GET_TCAM,
29 CHELSIO_SET_TCAM,
30 CHELSIO_GET_TCB,
31 CHELSIO_GET_MEM,
32 CHELSIO_LOAD_FW,
33 CHELSIO_GET_PROTO,
34 CHELSIO_SET_PROTO,
35 CHELSIO_SET_TRACE_FILTER,
36 CHELSIO_SET_QSET_PARAMS,
37 CHELSIO_GET_QSET_PARAMS,
38 CHELSIO_SET_QSET_NUM,
39 CHELSIO_GET_QSET_NUM,
40 CHELSIO_SET_PKTSCHED,
41};
42
43struct ch_reg {
44 uint32_t cmd;
45 uint32_t addr;
46 uint32_t val;
47};
48
49struct ch_cntxt {
50 uint32_t cmd;
51 uint32_t cntxt_type;
52 uint32_t cntxt_id;
53 uint32_t data[4];
54};
55
56/* context types */
57enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
58
59struct ch_desc {
60 uint32_t cmd;
61 uint32_t queue_num;
62 uint32_t idx;
63 uint32_t size;
64 uint8_t data[128];
65};
66
67struct ch_mem_range {
68 uint32_t cmd;
69 uint32_t mem_id;
70 uint32_t addr;
71 uint32_t len;
72 uint32_t version;
73 uint8_t buf[0];
74};
75
76struct ch_qset_params {
77 uint32_t cmd;
78 uint32_t qset_idx;
79 int32_t txq_size[3];
80 int32_t rspq_size;
81 int32_t fl_size[2];
82 int32_t intr_lat;
83 int32_t polling;
84 int32_t cong_thres;
85};
86
87struct ch_pktsched_params {
88 uint32_t cmd;
89 uint8_t sched;
90 uint8_t idx;
91 uint8_t min;
92 uint8_t max;
93 uint8_t binding;
94};
95
96#ifndef TCB_SIZE
97# define TCB_SIZE 128
98#endif
99
100/* TCB size in 32-bit words */
101#define TCB_WORDS (TCB_SIZE / 4)
102
103enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
104
105struct ch_mtus {
106 uint32_t cmd;
107 uint32_t nmtus;
108 uint16_t mtus[NMTUS];
109};
110
111struct ch_pm {
112 uint32_t cmd;
113 uint32_t tx_pg_sz;
114 uint32_t tx_num_pg;
115 uint32_t rx_pg_sz;
116 uint32_t rx_num_pg;
117 uint32_t pm_total;
118};
119
120struct ch_tcam {
121 uint32_t cmd;
122 uint32_t tcam_size;
123 uint32_t nservers;
124 uint32_t nroutes;
125 uint32_t nfilters;
126};
127
128struct ch_tcb {
129 uint32_t cmd;
130 uint32_t tcb_index;
131 uint32_t tcb_data[TCB_WORDS];
132};
133
134struct ch_tcam_word {
135 uint32_t cmd;
136 uint32_t addr;
137 uint32_t buf[3];
138};
139
140struct ch_trace {
141 uint32_t cmd;
142 uint32_t sip;
143 uint32_t sip_mask;
144 uint32_t dip;
145 uint32_t dip_mask;
146 uint16_t sport;
147 uint16_t sport_mask;
148 uint16_t dport;
149 uint16_t dport_mask;
150 uint32_t vlan:12;
151 uint32_t vlan_mask:12;
152 uint32_t intf:4;
153 uint32_t intf_mask:4;
154 uint8_t proto;
155 uint8_t proto_mask;
156 uint8_t invert_match:1;
157 uint8_t config_tx:1;
158 uint8_t config_rx:1;
159 uint8_t trace_tx:1;
160 uint8_t trace_rx:1;
161};
162
163#define SIOCCHIOCTL SIOCDEVPRIVATE
164
165#endif
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
new file mode 100644
index 000000000000..54c49acd86b4
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -0,0 +1,2474 @@
1/*
2 * This file is part of the Chelsio T3 Ethernet driver for Linux.
3 *
4 * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16#include <linux/dma-mapping.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/if_vlan.h>
20#include <linux/mii.h>
21#include <linux/sockios.h>
22#include <linux/workqueue.h>
23#include <linux/proc_fs.h>
24#include <linux/rtnetlink.h>
25#include <asm/uaccess.h>
26
27#include "common.h"
28#include "cxgb3_ioctl.h"
29#include "regs.h"
30#include "cxgb3_offload.h"
31#include "version.h"
32
33#include "cxgb3_ctl_defs.h"
34#include "t3_cpl.h"
35#include "firmware_exports.h"
36
37enum {
38 MAX_TXQ_ENTRIES = 16384,
39 MAX_CTRL_TXQ_ENTRIES = 1024,
40 MAX_RSPQ_ENTRIES = 16384,
41 MAX_RX_BUFFERS = 16384,
42 MAX_RX_JUMBO_BUFFERS = 16384,
43 MIN_TXQ_ENTRIES = 4,
44 MIN_CTRL_TXQ_ENTRIES = 4,
45 MIN_RSPQ_ENTRIES = 32,
46 MIN_FL_ENTRIES = 32
47};
48
49#define PORT_MASK ((1 << MAX_NPORTS) - 1)
50
51#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
52 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
53 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
54
55#define EEPROM_MAGIC 0x38E2F10C
56
57#define to_net_dev(class) container_of(class, struct net_device, class_dev)
58
59#define CH_DEVICE(devid, ssid, idx) \
60 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
61
62static const struct pci_device_id cxgb3_pci_tbl[] = {
63 CH_DEVICE(0x20, 1, 0), /* PE9000 */
64 CH_DEVICE(0x21, 1, 1), /* T302E */
65 CH_DEVICE(0x22, 1, 2), /* T310E */
66 CH_DEVICE(0x23, 1, 3), /* T320X */
67 CH_DEVICE(0x24, 1, 1), /* T302X */
68 CH_DEVICE(0x25, 1, 3), /* T320E */
69 CH_DEVICE(0x26, 1, 2), /* T310X */
70 CH_DEVICE(0x30, 1, 2), /* T3B10 */
71 CH_DEVICE(0x31, 1, 3), /* T3B20 */
72 CH_DEVICE(0x32, 1, 1), /* T3B02 */
73 {0,}
74};
75
76MODULE_DESCRIPTION(DRV_DESC);
77MODULE_AUTHOR("Chelsio Communications");
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_VERSION);
80MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
81
82static int dflt_msg_enable = DFLT_MSG_ENABLE;
83
84module_param(dflt_msg_enable, int, 0644);
85MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
86
87/*
88 * The driver uses the best interrupt scheme available on a platform in the
89 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
90 * of these schemes the driver may consider as follows:
91 *
92 * msi = 2: choose from among all three options
93 * msi = 1: only consider MSI and pin interrupts
94 * msi = 0: force pin interrupts
95 */
96static int msi = 2;
97
98module_param(msi, int, 0644);
99MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
100
101/*
102 * The driver enables offload as a default.
103 * To disable it, use ofld_disable = 1.
104 */
105
106static int ofld_disable = 0;
107
108module_param(ofld_disable, int, 0644);
109MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
110
111/*
112 * We have work elements that we need to cancel when an interface is taken
113 * down. Normally the work elements would be executed by keventd but that
114 * can deadlock because of linkwatch. If our close method takes the rtnl
115 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
116 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
117 * for our work to complete. Get our own work queue to solve this.
118 */
119static struct workqueue_struct *cxgb3_wq;
120
121/**
122 * link_report - show link status and link speed/duplex
123 * @p: the port whose settings are to be reported
124 *
125 * Shows the link status, speed, and duplex of a port.
126 */
127static void link_report(struct net_device *dev)
128{
129 if (!netif_carrier_ok(dev))
130 printk(KERN_INFO "%s: link down\n", dev->name);
131 else {
132 const char *s = "10Mbps";
133 const struct port_info *p = netdev_priv(dev);
134
135 switch (p->link_config.speed) {
136 case SPEED_10000:
137 s = "10Gbps";
138 break;
139 case SPEED_1000:
140 s = "1000Mbps";
141 break;
142 case SPEED_100:
143 s = "100Mbps";
144 break;
145 }
146
147 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
148 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
149 }
150}
151
152/**
153 * t3_os_link_changed - handle link status changes
154 * @adapter: the adapter associated with the link change
155 * @port_id: the port index whose limk status has changed
156 * @link_stat: the new status of the link
157 * @speed: the new speed setting
158 * @duplex: the new duplex setting
159 * @pause: the new flow-control setting
160 *
161 * This is the OS-dependent handler for link status changes. The OS
162 * neutral handler takes care of most of the processing for these events,
163 * then calls this handler for any OS-specific processing.
164 */
165void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
166 int speed, int duplex, int pause)
167{
168 struct net_device *dev = adapter->port[port_id];
169
170 /* Skip changes from disabled ports. */
171 if (!netif_running(dev))
172 return;
173
174 if (link_stat != netif_carrier_ok(dev)) {
175 if (link_stat)
176 netif_carrier_on(dev);
177 else
178 netif_carrier_off(dev);
179 link_report(dev);
180 }
181}
182
183static void cxgb_set_rxmode(struct net_device *dev)
184{
185 struct t3_rx_mode rm;
186 struct port_info *pi = netdev_priv(dev);
187
188 init_rx_mode(&rm, dev, dev->mc_list);
189 t3_mac_set_rx_mode(&pi->mac, &rm);
190}
191
192/**
193 * link_start - enable a port
194 * @dev: the device to enable
195 *
196 * Performs the MAC and PHY actions needed to enable a port.
197 */
198static void link_start(struct net_device *dev)
199{
200 struct t3_rx_mode rm;
201 struct port_info *pi = netdev_priv(dev);
202 struct cmac *mac = &pi->mac;
203
204 init_rx_mode(&rm, dev, dev->mc_list);
205 t3_mac_reset(mac);
206 t3_mac_set_mtu(mac, dev->mtu);
207 t3_mac_set_address(mac, 0, dev->dev_addr);
208 t3_mac_set_rx_mode(mac, &rm);
209 t3_link_start(&pi->phy, mac, &pi->link_config);
210 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
211}
212
213static inline void cxgb_disable_msi(struct adapter *adapter)
214{
215 if (adapter->flags & USING_MSIX) {
216 pci_disable_msix(adapter->pdev);
217 adapter->flags &= ~USING_MSIX;
218 } else if (adapter->flags & USING_MSI) {
219 pci_disable_msi(adapter->pdev);
220 adapter->flags &= ~USING_MSI;
221 }
222}
223
224/*
225 * Interrupt handler for asynchronous events used with MSI-X.
226 */
227static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
228{
229 t3_slow_intr_handler(cookie);
230 return IRQ_HANDLED;
231}
232
233/*
234 * Name the MSI-X interrupts.
235 */
236static void name_msix_vecs(struct adapter *adap)
237{
238 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
239
240 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
241 adap->msix_info[0].desc[n] = 0;
242
243 for_each_port(adap, j) {
244 struct net_device *d = adap->port[j];
245 const struct port_info *pi = netdev_priv(d);
246
247 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
248 snprintf(adap->msix_info[msi_idx].desc, n,
249 "%s (queue %d)", d->name, i);
250 adap->msix_info[msi_idx].desc[n] = 0;
251 }
252 }
253}
254
255static int request_msix_data_irqs(struct adapter *adap)
256{
257 int i, j, err, qidx = 0;
258
259 for_each_port(adap, i) {
260 int nqsets = adap2pinfo(adap, i)->nqsets;
261
262 for (j = 0; j < nqsets; ++j) {
263 err = request_irq(adap->msix_info[qidx + 1].vec,
264 t3_intr_handler(adap,
265 adap->sge.qs[qidx].
266 rspq.polling), 0,
267 adap->msix_info[qidx + 1].desc,
268 &adap->sge.qs[qidx]);
269 if (err) {
270 while (--qidx >= 0)
271 free_irq(adap->msix_info[qidx + 1].vec,
272 &adap->sge.qs[qidx]);
273 return err;
274 }
275 qidx++;
276 }
277 }
278 return 0;
279}
280
281/**
282 * setup_rss - configure RSS
283 * @adap: the adapter
284 *
285 * Sets up RSS to distribute packets to multiple receive queues. We
286 * configure the RSS CPU lookup table to distribute to the number of HW
287 * receive queues, and the response queue lookup table to narrow that
288 * down to the response queues actually configured for each port.
289 * We always configure the RSS mapping for two ports since the mapping
290 * table has plenty of entries.
291 */
292static void setup_rss(struct adapter *adap)
293{
294 int i;
295 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
296 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
297 u8 cpus[SGE_QSETS + 1];
298 u16 rspq_map[RSS_TABLE_SIZE];
299
300 for (i = 0; i < SGE_QSETS; ++i)
301 cpus[i] = i;
302 cpus[SGE_QSETS] = 0xff; /* terminator */
303
304 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
305 rspq_map[i] = i % nq0;
306 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
307 }
308
309 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
310 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
311 V_RRCPLCPUSIZE(6), cpus, rspq_map);
312}
313
314/*
315 * If we have multiple receive queues per port serviced by NAPI we need one
316 * netdevice per queue as NAPI operates on netdevices. We already have one
317 * netdevice, namely the one associated with the interface, so we use dummy
318 * ones for any additional queues. Note that these netdevices exist purely
319 * so that NAPI has something to work with, they do not represent network
320 * ports and are not registered.
321 */
322static int init_dummy_netdevs(struct adapter *adap)
323{
324 int i, j, dummy_idx = 0;
325 struct net_device *nd;
326
327 for_each_port(adap, i) {
328 struct net_device *dev = adap->port[i];
329 const struct port_info *pi = netdev_priv(dev);
330
331 for (j = 0; j < pi->nqsets - 1; j++) {
332 if (!adap->dummy_netdev[dummy_idx]) {
333 nd = alloc_netdev(0, "", ether_setup);
334 if (!nd)
335 goto free_all;
336
337 nd->priv = adap;
338 nd->weight = 64;
339 set_bit(__LINK_STATE_START, &nd->state);
340 adap->dummy_netdev[dummy_idx] = nd;
341 }
342 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
343 dummy_idx++;
344 }
345 }
346 return 0;
347
348free_all:
349 while (--dummy_idx >= 0) {
350 free_netdev(adap->dummy_netdev[dummy_idx]);
351 adap->dummy_netdev[dummy_idx] = NULL;
352 }
353 return -ENOMEM;
354}
355
356/*
357 * Wait until all NAPI handlers are descheduled. This includes the handlers of
358 * both netdevices representing interfaces and the dummy ones for the extra
359 * queues.
360 */
361static void quiesce_rx(struct adapter *adap)
362{
363 int i;
364 struct net_device *dev;
365
366 for_each_port(adap, i) {
367 dev = adap->port[i];
368 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
369 msleep(1);
370 }
371
372 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
373 dev = adap->dummy_netdev[i];
374 if (dev)
375 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
376 msleep(1);
377 }
378}
379
380/**
381 * setup_sge_qsets - configure SGE Tx/Rx/response queues
382 * @adap: the adapter
383 *
384 * Determines how many sets of SGE queues to use and initializes them.
385 * We support multiple queue sets per port if we have MSI-X, otherwise
386 * just one queue set per port.
387 */
388static int setup_sge_qsets(struct adapter *adap)
389{
390 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
391 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
392
393 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
394 irq_idx = -1;
395
396 for_each_port(adap, i) {
397 struct net_device *dev = adap->port[i];
398 const struct port_info *pi = netdev_priv(dev);
399
400 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
401 err = t3_sge_alloc_qset(adap, qset_idx, 1,
402 (adap->flags & USING_MSIX) ? qset_idx + 1 :
403 irq_idx,
404 &adap->params.sge.qset[qset_idx], ntxq,
405 j == 0 ? dev :
406 adap-> dummy_netdev[dummy_dev_idx++]);
407 if (err) {
408 t3_free_sge_resources(adap);
409 return err;
410 }
411 }
412 }
413
414 return 0;
415}
416
417static ssize_t attr_show(struct class_device *cd, char *buf,
418 ssize_t(*format) (struct adapter *, char *))
419{
420 ssize_t len;
421 struct adapter *adap = to_net_dev(cd)->priv;
422
423 /* Synchronize with ioctls that may shut down the device */
424 rtnl_lock();
425 len = (*format) (adap, buf);
426 rtnl_unlock();
427 return len;
428}
429
430static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len,
431 ssize_t(*set) (struct adapter *, unsigned int),
432 unsigned int min_val, unsigned int max_val)
433{
434 char *endp;
435 ssize_t ret;
436 unsigned int val;
437 struct adapter *adap = to_net_dev(cd)->priv;
438
439 if (!capable(CAP_NET_ADMIN))
440 return -EPERM;
441
442 val = simple_strtoul(buf, &endp, 0);
443 if (endp == buf || val < min_val || val > max_val)
444 return -EINVAL;
445
446 rtnl_lock();
447 ret = (*set) (adap, val);
448 if (!ret)
449 ret = len;
450 rtnl_unlock();
451 return ret;
452}
453
454#define CXGB3_SHOW(name, val_expr) \
455static ssize_t format_##name(struct adapter *adap, char *buf) \
456{ \
457 return sprintf(buf, "%u\n", val_expr); \
458} \
459static ssize_t show_##name(struct class_device *cd, char *buf) \
460{ \
461 return attr_show(cd, buf, format_##name); \
462}
463
464static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
465{
466 if (adap->flags & FULL_INIT_DONE)
467 return -EBUSY;
468 if (val && adap->params.rev == 0)
469 return -EINVAL;
470 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
471 return -EINVAL;
472 adap->params.mc5.nfilters = val;
473 return 0;
474}
475
476static ssize_t store_nfilters(struct class_device *cd, const char *buf,
477 size_t len)
478{
479 return attr_store(cd, buf, len, set_nfilters, 0, ~0);
480}
481
482static ssize_t set_nservers(struct adapter *adap, unsigned int val)
483{
484 if (adap->flags & FULL_INIT_DONE)
485 return -EBUSY;
486 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
487 return -EINVAL;
488 adap->params.mc5.nservers = val;
489 return 0;
490}
491
492static ssize_t store_nservers(struct class_device *cd, const char *buf,
493 size_t len)
494{
495 return attr_store(cd, buf, len, set_nservers, 0, ~0);
496}
497
498#define CXGB3_ATTR_R(name, val_expr) \
499CXGB3_SHOW(name, val_expr) \
500static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
501
502#define CXGB3_ATTR_RW(name, val_expr, store_method) \
503CXGB3_SHOW(name, val_expr) \
504static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
505
506CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
507CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
508CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
509
510static struct attribute *cxgb3_attrs[] = {
511 &class_device_attr_cam_size.attr,
512 &class_device_attr_nfilters.attr,
513 &class_device_attr_nservers.attr,
514 NULL
515};
516
517static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
518
519static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
520{
521 ssize_t len;
522 unsigned int v, addr, bpt, cpt;
523 struct adapter *adap = to_net_dev(cd)->priv;
524
525 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
526 rtnl_lock();
527 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
528 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
529 if (sched & 1)
530 v >>= 16;
531 bpt = (v >> 8) & 0xff;
532 cpt = v & 0xff;
533 if (!cpt)
534 len = sprintf(buf, "disabled\n");
535 else {
536 v = (adap->params.vpd.cclk * 1000) / cpt;
537 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
538 }
539 rtnl_unlock();
540 return len;
541}
542
543static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
544 size_t len, int sched)
545{
546 char *endp;
547 ssize_t ret;
548 unsigned int val;
549 struct adapter *adap = to_net_dev(cd)->priv;
550
551 if (!capable(CAP_NET_ADMIN))
552 return -EPERM;
553
554 val = simple_strtoul(buf, &endp, 0);
555 if (endp == buf || val > 10000000)
556 return -EINVAL;
557
558 rtnl_lock();
559 ret = t3_config_sched(adap, val, sched);
560 if (!ret)
561 ret = len;
562 rtnl_unlock();
563 return ret;
564}
565
566#define TM_ATTR(name, sched) \
567static ssize_t show_##name(struct class_device *cd, char *buf) \
568{ \
569 return tm_attr_show(cd, buf, sched); \
570} \
571static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
572{ \
573 return tm_attr_store(cd, buf, len, sched); \
574} \
575static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
576
577TM_ATTR(sched0, 0);
578TM_ATTR(sched1, 1);
579TM_ATTR(sched2, 2);
580TM_ATTR(sched3, 3);
581TM_ATTR(sched4, 4);
582TM_ATTR(sched5, 5);
583TM_ATTR(sched6, 6);
584TM_ATTR(sched7, 7);
585
586static struct attribute *offload_attrs[] = {
587 &class_device_attr_sched0.attr,
588 &class_device_attr_sched1.attr,
589 &class_device_attr_sched2.attr,
590 &class_device_attr_sched3.attr,
591 &class_device_attr_sched4.attr,
592 &class_device_attr_sched5.attr,
593 &class_device_attr_sched6.attr,
594 &class_device_attr_sched7.attr,
595 NULL
596};
597
598static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
599
600/*
601 * Sends an sk_buff to an offload queue driver
602 * after dealing with any active network taps.
603 */
604static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
605{
606 int ret;
607
608 local_bh_disable();
609 ret = t3_offload_tx(tdev, skb);
610 local_bh_enable();
611 return ret;
612}
613
614static int write_smt_entry(struct adapter *adapter, int idx)
615{
616 struct cpl_smt_write_req *req;
617 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
618
619 if (!skb)
620 return -ENOMEM;
621
622 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
623 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
624 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
625 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
626 req->iff = idx;
627 memset(req->src_mac1, 0, sizeof(req->src_mac1));
628 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
629 skb->priority = 1;
630 offload_tx(&adapter->tdev, skb);
631 return 0;
632}
633
634static int init_smt(struct adapter *adapter)
635{
636 int i;
637
638 for_each_port(adapter, i)
639 write_smt_entry(adapter, i);
640 return 0;
641}
642
643static void init_port_mtus(struct adapter *adapter)
644{
645 unsigned int mtus = adapter->port[0]->mtu;
646
647 if (adapter->port[1])
648 mtus |= adapter->port[1]->mtu << 16;
649 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
650}
651
652/**
653 * cxgb_up - enable the adapter
654 * @adapter: adapter being enabled
655 *
656 * Called when the first port is enabled, this function performs the
657 * actions necessary to make an adapter operational, such as completing
658 * the initialization of HW modules, and enabling interrupts.
659 *
660 * Must be called with the rtnl lock held.
661 */
662static int cxgb_up(struct adapter *adap)
663{
664 int err = 0;
665
666 if (!(adap->flags & FULL_INIT_DONE)) {
667 err = t3_check_fw_version(adap);
668 if (err) {
669 dev_err(&adap->pdev->dev,
670 "adapter FW is not compatible with driver\n");
671 goto out;
672 }
673
674 err = init_dummy_netdevs(adap);
675 if (err)
676 goto out;
677
678 err = t3_init_hw(adap, 0);
679 if (err)
680 goto out;
681
682 err = setup_sge_qsets(adap);
683 if (err)
684 goto out;
685
686 setup_rss(adap);
687 adap->flags |= FULL_INIT_DONE;
688 }
689
690 t3_intr_clear(adap);
691
692 if (adap->flags & USING_MSIX) {
693 name_msix_vecs(adap);
694 err = request_irq(adap->msix_info[0].vec,
695 t3_async_intr_handler, 0,
696 adap->msix_info[0].desc, adap);
697 if (err)
698 goto irq_err;
699
700 if (request_msix_data_irqs(adap)) {
701 free_irq(adap->msix_info[0].vec, adap);
702 goto irq_err;
703 }
704 } else if ((err = request_irq(adap->pdev->irq,
705 t3_intr_handler(adap,
706 adap->sge.qs[0].rspq.
707 polling),
708 (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
709 adap->name, adap)))
710 goto irq_err;
711
712 t3_sge_start(adap);
713 t3_intr_enable(adap);
714out:
715 return err;
716irq_err:
717 CH_ERR(adap, "request_irq failed, err %d\n", err);
718 goto out;
719}
720
721/*
722 * Release resources when all the ports and offloading have been stopped.
723 */
724static void cxgb_down(struct adapter *adapter)
725{
726 t3_sge_stop(adapter);
727 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
728 t3_intr_disable(adapter);
729 spin_unlock_irq(&adapter->work_lock);
730
731 if (adapter->flags & USING_MSIX) {
732 int i, n = 0;
733
734 free_irq(adapter->msix_info[0].vec, adapter);
735 for_each_port(adapter, i)
736 n += adap2pinfo(adapter, i)->nqsets;
737
738 for (i = 0; i < n; ++i)
739 free_irq(adapter->msix_info[i + 1].vec,
740 &adapter->sge.qs[i]);
741 } else
742 free_irq(adapter->pdev->irq, adapter);
743
744 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
745 quiesce_rx(adapter);
746}
747
748static void schedule_chk_task(struct adapter *adap)
749{
750 unsigned int timeo;
751
752 timeo = adap->params.linkpoll_period ?
753 (HZ * adap->params.linkpoll_period) / 10 :
754 adap->params.stats_update_period * HZ;
755 if (timeo)
756 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
757}
758
759static int offload_open(struct net_device *dev)
760{
761 struct adapter *adapter = dev->priv;
762 struct t3cdev *tdev = T3CDEV(dev);
763 int adap_up = adapter->open_device_map & PORT_MASK;
764 int err = 0;
765
766 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
767 return 0;
768
769 if (!adap_up && (err = cxgb_up(adapter)) < 0)
770 return err;
771
772 t3_tp_set_offload_mode(adapter, 1);
773 tdev->lldev = adapter->port[0];
774 err = cxgb3_offload_activate(adapter);
775 if (err)
776 goto out;
777
778 init_port_mtus(adapter);
779 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
780 adapter->params.b_wnd,
781 adapter->params.rev == 0 ?
782 adapter->port[0]->mtu : 0xffff);
783 init_smt(adapter);
784
785 /* Never mind if the next step fails */
786 sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
787
788 /* Call back all registered clients */
789 cxgb3_add_clients(tdev);
790
791out:
792 /* restore them in case the offload module has changed them */
793 if (err) {
794 t3_tp_set_offload_mode(adapter, 0);
795 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
796 cxgb3_set_dummy_ops(tdev);
797 }
798 return err;
799}
800
801static int offload_close(struct t3cdev *tdev)
802{
803 struct adapter *adapter = tdev2adap(tdev);
804
805 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
806 return 0;
807
808 /* Call back all registered clients */
809 cxgb3_remove_clients(tdev);
810
811 sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
812
813 tdev->lldev = NULL;
814 cxgb3_set_dummy_ops(tdev);
815 t3_tp_set_offload_mode(adapter, 0);
816 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
817
818 if (!adapter->open_device_map)
819 cxgb_down(adapter);
820
821 cxgb3_offload_deactivate(adapter);
822 return 0;
823}
824
825static int cxgb_open(struct net_device *dev)
826{
827 int err;
828 struct adapter *adapter = dev->priv;
829 struct port_info *pi = netdev_priv(dev);
830 int other_ports = adapter->open_device_map & PORT_MASK;
831
832 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
833 return err;
834
835 set_bit(pi->port_id, &adapter->open_device_map);
836 if (!ofld_disable) {
837 err = offload_open(dev);
838 if (err)
839 printk(KERN_WARNING
840 "Could not initialize offload capabilities\n");
841 }
842
843 link_start(dev);
844 t3_port_intr_enable(adapter, pi->port_id);
845 netif_start_queue(dev);
846 if (!other_ports)
847 schedule_chk_task(adapter);
848
849 return 0;
850}
851
852static int cxgb_close(struct net_device *dev)
853{
854 struct adapter *adapter = dev->priv;
855 struct port_info *p = netdev_priv(dev);
856
857 t3_port_intr_disable(adapter, p->port_id);
858 netif_stop_queue(dev);
859 p->phy.ops->power_down(&p->phy, 1);
860 netif_carrier_off(dev);
861 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
862
863 spin_lock(&adapter->work_lock); /* sync with update task */
864 clear_bit(p->port_id, &adapter->open_device_map);
865 spin_unlock(&adapter->work_lock);
866
867 if (!(adapter->open_device_map & PORT_MASK))
868 cancel_rearming_delayed_workqueue(cxgb3_wq,
869 &adapter->adap_check_task);
870
871 if (!adapter->open_device_map)
872 cxgb_down(adapter);
873
874 return 0;
875}
876
877static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
878{
879 struct adapter *adapter = dev->priv;
880 struct port_info *p = netdev_priv(dev);
881 struct net_device_stats *ns = &p->netstats;
882 const struct mac_stats *pstats;
883
884 spin_lock(&adapter->stats_lock);
885 pstats = t3_mac_update_stats(&p->mac);
886 spin_unlock(&adapter->stats_lock);
887
888 ns->tx_bytes = pstats->tx_octets;
889 ns->tx_packets = pstats->tx_frames;
890 ns->rx_bytes = pstats->rx_octets;
891 ns->rx_packets = pstats->rx_frames;
892 ns->multicast = pstats->rx_mcast_frames;
893
894 ns->tx_errors = pstats->tx_underrun;
895 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
896 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
897 pstats->rx_fifo_ovfl;
898
899 /* detailed rx_errors */
900 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
901 ns->rx_over_errors = 0;
902 ns->rx_crc_errors = pstats->rx_fcs_errs;
903 ns->rx_frame_errors = pstats->rx_symbol_errs;
904 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
905 ns->rx_missed_errors = pstats->rx_cong_drops;
906
907 /* detailed tx_errors */
908 ns->tx_aborted_errors = 0;
909 ns->tx_carrier_errors = 0;
910 ns->tx_fifo_errors = pstats->tx_underrun;
911 ns->tx_heartbeat_errors = 0;
912 ns->tx_window_errors = 0;
913 return ns;
914}
915
916static u32 get_msglevel(struct net_device *dev)
917{
918 struct adapter *adapter = dev->priv;
919
920 return adapter->msg_enable;
921}
922
923static void set_msglevel(struct net_device *dev, u32 val)
924{
925 struct adapter *adapter = dev->priv;
926
927 adapter->msg_enable = val;
928}
929
930static char stats_strings[][ETH_GSTRING_LEN] = {
931 "TxOctetsOK ",
932 "TxFramesOK ",
933 "TxMulticastFramesOK",
934 "TxBroadcastFramesOK",
935 "TxPauseFrames ",
936 "TxUnderrun ",
937 "TxExtUnderrun ",
938
939 "TxFrames64 ",
940 "TxFrames65To127 ",
941 "TxFrames128To255 ",
942 "TxFrames256To511 ",
943 "TxFrames512To1023 ",
944 "TxFrames1024To1518 ",
945 "TxFrames1519ToMax ",
946
947 "RxOctetsOK ",
948 "RxFramesOK ",
949 "RxMulticastFramesOK",
950 "RxBroadcastFramesOK",
951 "RxPauseFrames ",
952 "RxFCSErrors ",
953 "RxSymbolErrors ",
954 "RxShortErrors ",
955 "RxJabberErrors ",
956 "RxLengthErrors ",
957 "RxFIFOoverflow ",
958
959 "RxFrames64 ",
960 "RxFrames65To127 ",
961 "RxFrames128To255 ",
962 "RxFrames256To511 ",
963 "RxFrames512To1023 ",
964 "RxFrames1024To1518 ",
965 "RxFrames1519ToMax ",
966
967 "PhyFIFOErrors ",
968 "TSO ",
969 "VLANextractions ",
970 "VLANinsertions ",
971 "TxCsumOffload ",
972 "RxCsumGood ",
973 "RxDrops "
974};
975
976static int get_stats_count(struct net_device *dev)
977{
978 return ARRAY_SIZE(stats_strings);
979}
980
981#define T3_REGMAP_SIZE (3 * 1024)
982
983static int get_regs_len(struct net_device *dev)
984{
985 return T3_REGMAP_SIZE;
986}
987
988static int get_eeprom_len(struct net_device *dev)
989{
990 return EEPROMSIZE;
991}
992
993static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
994{
995 u32 fw_vers = 0;
996 struct adapter *adapter = dev->priv;
997
998 t3_get_fw_version(adapter, &fw_vers);
999
1000 strcpy(info->driver, DRV_NAME);
1001 strcpy(info->version, DRV_VERSION);
1002 strcpy(info->bus_info, pci_name(adapter->pdev));
1003 if (!fw_vers)
1004 strcpy(info->fw_version, "N/A");
1005 else
1006 snprintf(info->fw_version, sizeof(info->fw_version),
1007 "%s %u.%u", (fw_vers >> 24) ? "T" : "N",
1008 (fw_vers >> 12) & 0xfff, fw_vers & 0xfff);
1009}
1010
1011static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1012{
1013 if (stringset == ETH_SS_STATS)
1014 memcpy(data, stats_strings, sizeof(stats_strings));
1015}
1016
1017static unsigned long collect_sge_port_stats(struct adapter *adapter,
1018 struct port_info *p, int idx)
1019{
1020 int i;
1021 unsigned long tot = 0;
1022
1023 for (i = 0; i < p->nqsets; ++i)
1024 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1025 return tot;
1026}
1027
1028static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1029 u64 *data)
1030{
1031 struct adapter *adapter = dev->priv;
1032 struct port_info *pi = netdev_priv(dev);
1033 const struct mac_stats *s;
1034
1035 spin_lock(&adapter->stats_lock);
1036 s = t3_mac_update_stats(&pi->mac);
1037 spin_unlock(&adapter->stats_lock);
1038
1039 *data++ = s->tx_octets;
1040 *data++ = s->tx_frames;
1041 *data++ = s->tx_mcast_frames;
1042 *data++ = s->tx_bcast_frames;
1043 *data++ = s->tx_pause;
1044 *data++ = s->tx_underrun;
1045 *data++ = s->tx_fifo_urun;
1046
1047 *data++ = s->tx_frames_64;
1048 *data++ = s->tx_frames_65_127;
1049 *data++ = s->tx_frames_128_255;
1050 *data++ = s->tx_frames_256_511;
1051 *data++ = s->tx_frames_512_1023;
1052 *data++ = s->tx_frames_1024_1518;
1053 *data++ = s->tx_frames_1519_max;
1054
1055 *data++ = s->rx_octets;
1056 *data++ = s->rx_frames;
1057 *data++ = s->rx_mcast_frames;
1058 *data++ = s->rx_bcast_frames;
1059 *data++ = s->rx_pause;
1060 *data++ = s->rx_fcs_errs;
1061 *data++ = s->rx_symbol_errs;
1062 *data++ = s->rx_short;
1063 *data++ = s->rx_jabber;
1064 *data++ = s->rx_too_long;
1065 *data++ = s->rx_fifo_ovfl;
1066
1067 *data++ = s->rx_frames_64;
1068 *data++ = s->rx_frames_65_127;
1069 *data++ = s->rx_frames_128_255;
1070 *data++ = s->rx_frames_256_511;
1071 *data++ = s->rx_frames_512_1023;
1072 *data++ = s->rx_frames_1024_1518;
1073 *data++ = s->rx_frames_1519_max;
1074
1075 *data++ = pi->phy.fifo_errors;
1076
1077 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1078 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1079 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1080 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1081 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1082 *data++ = s->rx_cong_drops;
1083}
1084
1085static inline void reg_block_dump(struct adapter *ap, void *buf,
1086 unsigned int start, unsigned int end)
1087{
1088 u32 *p = buf + start;
1089
1090 for (; start <= end; start += sizeof(u32))
1091 *p++ = t3_read_reg(ap, start);
1092}
1093
1094static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1095 void *buf)
1096{
1097 struct adapter *ap = dev->priv;
1098
1099 /*
1100 * Version scheme:
1101 * bits 0..9: chip version
1102 * bits 10..15: chip revision
1103 * bit 31: set for PCIe cards
1104 */
1105 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1106
1107 /*
1108 * We skip the MAC statistics registers because they are clear-on-read.
1109 * Also reading multi-register stats would need to synchronize with the
1110 * periodic mac stats accumulation. Hard to justify the complexity.
1111 */
1112 memset(buf, 0, T3_REGMAP_SIZE);
1113 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1114 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1115 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1116 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1117 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1118 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1119 XGM_REG(A_XGM_SERDES_STAT3, 1));
1120 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1121 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1122}
1123
1124static int restart_autoneg(struct net_device *dev)
1125{
1126 struct port_info *p = netdev_priv(dev);
1127
1128 if (!netif_running(dev))
1129 return -EAGAIN;
1130 if (p->link_config.autoneg != AUTONEG_ENABLE)
1131 return -EINVAL;
1132 p->phy.ops->autoneg_restart(&p->phy);
1133 return 0;
1134}
1135
1136static int cxgb3_phys_id(struct net_device *dev, u32 data)
1137{
1138 int i;
1139 struct adapter *adapter = dev->priv;
1140
1141 if (data == 0)
1142 data = 2;
1143
1144 for (i = 0; i < data * 2; i++) {
1145 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1146 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1147 if (msleep_interruptible(500))
1148 break;
1149 }
1150 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1151 F_GPIO0_OUT_VAL);
1152 return 0;
1153}
1154
1155static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1156{
1157 struct port_info *p = netdev_priv(dev);
1158
1159 cmd->supported = p->link_config.supported;
1160 cmd->advertising = p->link_config.advertising;
1161
1162 if (netif_carrier_ok(dev)) {
1163 cmd->speed = p->link_config.speed;
1164 cmd->duplex = p->link_config.duplex;
1165 } else {
1166 cmd->speed = -1;
1167 cmd->duplex = -1;
1168 }
1169
1170 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1171 cmd->phy_address = p->phy.addr;
1172 cmd->transceiver = XCVR_EXTERNAL;
1173 cmd->autoneg = p->link_config.autoneg;
1174 cmd->maxtxpkt = 0;
1175 cmd->maxrxpkt = 0;
1176 return 0;
1177}
1178
1179static int speed_duplex_to_caps(int speed, int duplex)
1180{
1181 int cap = 0;
1182
1183 switch (speed) {
1184 case SPEED_10:
1185 if (duplex == DUPLEX_FULL)
1186 cap = SUPPORTED_10baseT_Full;
1187 else
1188 cap = SUPPORTED_10baseT_Half;
1189 break;
1190 case SPEED_100:
1191 if (duplex == DUPLEX_FULL)
1192 cap = SUPPORTED_100baseT_Full;
1193 else
1194 cap = SUPPORTED_100baseT_Half;
1195 break;
1196 case SPEED_1000:
1197 if (duplex == DUPLEX_FULL)
1198 cap = SUPPORTED_1000baseT_Full;
1199 else
1200 cap = SUPPORTED_1000baseT_Half;
1201 break;
1202 case SPEED_10000:
1203 if (duplex == DUPLEX_FULL)
1204 cap = SUPPORTED_10000baseT_Full;
1205 }
1206 return cap;
1207}
1208
1209#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1210 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1211 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1212 ADVERTISED_10000baseT_Full)
1213
1214static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1215{
1216 struct port_info *p = netdev_priv(dev);
1217 struct link_config *lc = &p->link_config;
1218
1219 if (!(lc->supported & SUPPORTED_Autoneg))
1220 return -EOPNOTSUPP; /* can't change speed/duplex */
1221
1222 if (cmd->autoneg == AUTONEG_DISABLE) {
1223 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1224
1225 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1226 return -EINVAL;
1227 lc->requested_speed = cmd->speed;
1228 lc->requested_duplex = cmd->duplex;
1229 lc->advertising = 0;
1230 } else {
1231 cmd->advertising &= ADVERTISED_MASK;
1232 cmd->advertising &= lc->supported;
1233 if (!cmd->advertising)
1234 return -EINVAL;
1235 lc->requested_speed = SPEED_INVALID;
1236 lc->requested_duplex = DUPLEX_INVALID;
1237 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1238 }
1239 lc->autoneg = cmd->autoneg;
1240 if (netif_running(dev))
1241 t3_link_start(&p->phy, &p->mac, lc);
1242 return 0;
1243}
1244
1245static void get_pauseparam(struct net_device *dev,
1246 struct ethtool_pauseparam *epause)
1247{
1248 struct port_info *p = netdev_priv(dev);
1249
1250 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1251 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1252 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1253}
1254
1255static int set_pauseparam(struct net_device *dev,
1256 struct ethtool_pauseparam *epause)
1257{
1258 struct port_info *p = netdev_priv(dev);
1259 struct link_config *lc = &p->link_config;
1260
1261 if (epause->autoneg == AUTONEG_DISABLE)
1262 lc->requested_fc = 0;
1263 else if (lc->supported & SUPPORTED_Autoneg)
1264 lc->requested_fc = PAUSE_AUTONEG;
1265 else
1266 return -EINVAL;
1267
1268 if (epause->rx_pause)
1269 lc->requested_fc |= PAUSE_RX;
1270 if (epause->tx_pause)
1271 lc->requested_fc |= PAUSE_TX;
1272 if (lc->autoneg == AUTONEG_ENABLE) {
1273 if (netif_running(dev))
1274 t3_link_start(&p->phy, &p->mac, lc);
1275 } else {
1276 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1277 if (netif_running(dev))
1278 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1279 }
1280 return 0;
1281}
1282
1283static u32 get_rx_csum(struct net_device *dev)
1284{
1285 struct port_info *p = netdev_priv(dev);
1286
1287 return p->rx_csum_offload;
1288}
1289
1290static int set_rx_csum(struct net_device *dev, u32 data)
1291{
1292 struct port_info *p = netdev_priv(dev);
1293
1294 p->rx_csum_offload = data;
1295 return 0;
1296}
1297
1298static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1299{
1300 struct adapter *adapter = dev->priv;
1301
1302 e->rx_max_pending = MAX_RX_BUFFERS;
1303 e->rx_mini_max_pending = 0;
1304 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1305 e->tx_max_pending = MAX_TXQ_ENTRIES;
1306
1307 e->rx_pending = adapter->params.sge.qset[0].fl_size;
1308 e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1309 e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1310 e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1311}
1312
1313static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1314{
1315 int i;
1316 struct adapter *adapter = dev->priv;
1317
1318 if (e->rx_pending > MAX_RX_BUFFERS ||
1319 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1320 e->tx_pending > MAX_TXQ_ENTRIES ||
1321 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1322 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1323 e->rx_pending < MIN_FL_ENTRIES ||
1324 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1325 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1326 return -EINVAL;
1327
1328 if (adapter->flags & FULL_INIT_DONE)
1329 return -EBUSY;
1330
1331 for (i = 0; i < SGE_QSETS; ++i) {
1332 struct qset_params *q = &adapter->params.sge.qset[i];
1333
1334 q->rspq_size = e->rx_mini_pending;
1335 q->fl_size = e->rx_pending;
1336 q->jumbo_size = e->rx_jumbo_pending;
1337 q->txq_size[0] = e->tx_pending;
1338 q->txq_size[1] = e->tx_pending;
1339 q->txq_size[2] = e->tx_pending;
1340 }
1341 return 0;
1342}
1343
1344static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1345{
1346 struct adapter *adapter = dev->priv;
1347 struct qset_params *qsp = &adapter->params.sge.qset[0];
1348 struct sge_qset *qs = &adapter->sge.qs[0];
1349
1350 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1351 return -EINVAL;
1352
1353 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1354 t3_update_qset_coalesce(qs, qsp);
1355 return 0;
1356}
1357
1358static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1359{
1360 struct adapter *adapter = dev->priv;
1361 struct qset_params *q = adapter->params.sge.qset;
1362
1363 c->rx_coalesce_usecs = q->coalesce_usecs;
1364 return 0;
1365}
1366
1367static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1368 u8 * data)
1369{
1370 int i, err = 0;
1371 struct adapter *adapter = dev->priv;
1372
1373 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1374 if (!buf)
1375 return -ENOMEM;
1376
1377 e->magic = EEPROM_MAGIC;
1378 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1379 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1380
1381 if (!err)
1382 memcpy(data, buf + e->offset, e->len);
1383 kfree(buf);
1384 return err;
1385}
1386
1387static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1388 u8 * data)
1389{
1390 u8 *buf;
1391 int err = 0;
1392 u32 aligned_offset, aligned_len, *p;
1393 struct adapter *adapter = dev->priv;
1394
1395 if (eeprom->magic != EEPROM_MAGIC)
1396 return -EINVAL;
1397
1398 aligned_offset = eeprom->offset & ~3;
1399 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1400
1401 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1402 buf = kmalloc(aligned_len, GFP_KERNEL);
1403 if (!buf)
1404 return -ENOMEM;
1405 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1406 if (!err && aligned_len > 4)
1407 err = t3_seeprom_read(adapter,
1408 aligned_offset + aligned_len - 4,
1409 (u32 *) & buf[aligned_len - 4]);
1410 if (err)
1411 goto out;
1412 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1413 } else
1414 buf = data;
1415
1416 err = t3_seeprom_wp(adapter, 0);
1417 if (err)
1418 goto out;
1419
1420 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1421 err = t3_seeprom_write(adapter, aligned_offset, *p);
1422 aligned_offset += 4;
1423 }
1424
1425 if (!err)
1426 err = t3_seeprom_wp(adapter, 1);
1427out:
1428 if (buf != data)
1429 kfree(buf);
1430 return err;
1431}
1432
1433static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1434{
1435 wol->supported = 0;
1436 wol->wolopts = 0;
1437 memset(&wol->sopass, 0, sizeof(wol->sopass));
1438}
1439
1440static const struct ethtool_ops cxgb_ethtool_ops = {
1441 .get_settings = get_settings,
1442 .set_settings = set_settings,
1443 .get_drvinfo = get_drvinfo,
1444 .get_msglevel = get_msglevel,
1445 .set_msglevel = set_msglevel,
1446 .get_ringparam = get_sge_param,
1447 .set_ringparam = set_sge_param,
1448 .get_coalesce = get_coalesce,
1449 .set_coalesce = set_coalesce,
1450 .get_eeprom_len = get_eeprom_len,
1451 .get_eeprom = get_eeprom,
1452 .set_eeprom = set_eeprom,
1453 .get_pauseparam = get_pauseparam,
1454 .set_pauseparam = set_pauseparam,
1455 .get_rx_csum = get_rx_csum,
1456 .set_rx_csum = set_rx_csum,
1457 .get_tx_csum = ethtool_op_get_tx_csum,
1458 .set_tx_csum = ethtool_op_set_tx_csum,
1459 .get_sg = ethtool_op_get_sg,
1460 .set_sg = ethtool_op_set_sg,
1461 .get_link = ethtool_op_get_link,
1462 .get_strings = get_strings,
1463 .phys_id = cxgb3_phys_id,
1464 .nway_reset = restart_autoneg,
1465 .get_stats_count = get_stats_count,
1466 .get_ethtool_stats = get_stats,
1467 .get_regs_len = get_regs_len,
1468 .get_regs = get_regs,
1469 .get_wol = get_wol,
1470 .get_tso = ethtool_op_get_tso,
1471 .set_tso = ethtool_op_set_tso,
1472 .get_perm_addr = ethtool_op_get_perm_addr
1473};
1474
1475static int in_range(int val, int lo, int hi)
1476{
1477 return val < 0 || (val <= hi && val >= lo);
1478}
1479
1480static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1481{
1482 int ret;
1483 u32 cmd;
1484 struct adapter *adapter = dev->priv;
1485
1486 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1487 return -EFAULT;
1488
1489 switch (cmd) {
1490 case CHELSIO_SETREG:{
1491 struct ch_reg edata;
1492
1493 if (!capable(CAP_NET_ADMIN))
1494 return -EPERM;
1495 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1496 return -EFAULT;
1497 if ((edata.addr & 3) != 0
1498 || edata.addr >= adapter->mmio_len)
1499 return -EINVAL;
1500 writel(edata.val, adapter->regs + edata.addr);
1501 break;
1502 }
1503 case CHELSIO_GETREG:{
1504 struct ch_reg edata;
1505
1506 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1507 return -EFAULT;
1508 if ((edata.addr & 3) != 0
1509 || edata.addr >= adapter->mmio_len)
1510 return -EINVAL;
1511 edata.val = readl(adapter->regs + edata.addr);
1512 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1513 return -EFAULT;
1514 break;
1515 }
1516 case CHELSIO_SET_QSET_PARAMS:{
1517 int i;
1518 struct qset_params *q;
1519 struct ch_qset_params t;
1520
1521 if (!capable(CAP_NET_ADMIN))
1522 return -EPERM;
1523 if (copy_from_user(&t, useraddr, sizeof(t)))
1524 return -EFAULT;
1525 if (t.qset_idx >= SGE_QSETS)
1526 return -EINVAL;
1527 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1528 !in_range(t.cong_thres, 0, 255) ||
1529 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1530 MAX_TXQ_ENTRIES) ||
1531 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1532 MAX_TXQ_ENTRIES) ||
1533 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1534 MAX_CTRL_TXQ_ENTRIES) ||
1535 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1536 MAX_RX_BUFFERS)
1537 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1538 MAX_RX_JUMBO_BUFFERS)
1539 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1540 MAX_RSPQ_ENTRIES))
1541 return -EINVAL;
1542 if ((adapter->flags & FULL_INIT_DONE) &&
1543 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1544 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1545 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1546 t.polling >= 0 || t.cong_thres >= 0))
1547 return -EBUSY;
1548
1549 q = &adapter->params.sge.qset[t.qset_idx];
1550
1551 if (t.rspq_size >= 0)
1552 q->rspq_size = t.rspq_size;
1553 if (t.fl_size[0] >= 0)
1554 q->fl_size = t.fl_size[0];
1555 if (t.fl_size[1] >= 0)
1556 q->jumbo_size = t.fl_size[1];
1557 if (t.txq_size[0] >= 0)
1558 q->txq_size[0] = t.txq_size[0];
1559 if (t.txq_size[1] >= 0)
1560 q->txq_size[1] = t.txq_size[1];
1561 if (t.txq_size[2] >= 0)
1562 q->txq_size[2] = t.txq_size[2];
1563 if (t.cong_thres >= 0)
1564 q->cong_thres = t.cong_thres;
1565 if (t.intr_lat >= 0) {
1566 struct sge_qset *qs =
1567 &adapter->sge.qs[t.qset_idx];
1568
1569 q->coalesce_usecs = t.intr_lat;
1570 t3_update_qset_coalesce(qs, q);
1571 }
1572 if (t.polling >= 0) {
1573 if (adapter->flags & USING_MSIX)
1574 q->polling = t.polling;
1575 else {
1576 /* No polling with INTx for T3A */
1577 if (adapter->params.rev == 0 &&
1578 !(adapter->flags & USING_MSI))
1579 t.polling = 0;
1580
1581 for (i = 0; i < SGE_QSETS; i++) {
1582 q = &adapter->params.sge.
1583 qset[i];
1584 q->polling = t.polling;
1585 }
1586 }
1587 }
1588 break;
1589 }
1590 case CHELSIO_GET_QSET_PARAMS:{
1591 struct qset_params *q;
1592 struct ch_qset_params t;
1593
1594 if (copy_from_user(&t, useraddr, sizeof(t)))
1595 return -EFAULT;
1596 if (t.qset_idx >= SGE_QSETS)
1597 return -EINVAL;
1598
1599 q = &adapter->params.sge.qset[t.qset_idx];
1600 t.rspq_size = q->rspq_size;
1601 t.txq_size[0] = q->txq_size[0];
1602 t.txq_size[1] = q->txq_size[1];
1603 t.txq_size[2] = q->txq_size[2];
1604 t.fl_size[0] = q->fl_size;
1605 t.fl_size[1] = q->jumbo_size;
1606 t.polling = q->polling;
1607 t.intr_lat = q->coalesce_usecs;
1608 t.cong_thres = q->cong_thres;
1609
1610 if (copy_to_user(useraddr, &t, sizeof(t)))
1611 return -EFAULT;
1612 break;
1613 }
1614 case CHELSIO_SET_QSET_NUM:{
1615 struct ch_reg edata;
1616 struct port_info *pi = netdev_priv(dev);
1617 unsigned int i, first_qset = 0, other_qsets = 0;
1618
1619 if (!capable(CAP_NET_ADMIN))
1620 return -EPERM;
1621 if (adapter->flags & FULL_INIT_DONE)
1622 return -EBUSY;
1623 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1624 return -EFAULT;
1625 if (edata.val < 1 ||
1626 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1627 return -EINVAL;
1628
1629 for_each_port(adapter, i)
1630 if (adapter->port[i] && adapter->port[i] != dev)
1631 other_qsets += adap2pinfo(adapter, i)->nqsets;
1632
1633 if (edata.val + other_qsets > SGE_QSETS)
1634 return -EINVAL;
1635
1636 pi->nqsets = edata.val;
1637
1638 for_each_port(adapter, i)
1639 if (adapter->port[i]) {
1640 pi = adap2pinfo(adapter, i);
1641 pi->first_qset = first_qset;
1642 first_qset += pi->nqsets;
1643 }
1644 break;
1645 }
1646 case CHELSIO_GET_QSET_NUM:{
1647 struct ch_reg edata;
1648 struct port_info *pi = netdev_priv(dev);
1649
1650 edata.cmd = CHELSIO_GET_QSET_NUM;
1651 edata.val = pi->nqsets;
1652 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1653 return -EFAULT;
1654 break;
1655 }
1656 case CHELSIO_LOAD_FW:{
1657 u8 *fw_data;
1658 struct ch_mem_range t;
1659
1660 if (!capable(CAP_NET_ADMIN))
1661 return -EPERM;
1662 if (copy_from_user(&t, useraddr, sizeof(t)))
1663 return -EFAULT;
1664
1665 fw_data = kmalloc(t.len, GFP_KERNEL);
1666 if (!fw_data)
1667 return -ENOMEM;
1668
1669 if (copy_from_user
1670 (fw_data, useraddr + sizeof(t), t.len)) {
1671 kfree(fw_data);
1672 return -EFAULT;
1673 }
1674
1675 ret = t3_load_fw(adapter, fw_data, t.len);
1676 kfree(fw_data);
1677 if (ret)
1678 return ret;
1679 break;
1680 }
1681 case CHELSIO_SETMTUTAB:{
1682 struct ch_mtus m;
1683 int i;
1684
1685 if (!is_offload(adapter))
1686 return -EOPNOTSUPP;
1687 if (!capable(CAP_NET_ADMIN))
1688 return -EPERM;
1689 if (offload_running(adapter))
1690 return -EBUSY;
1691 if (copy_from_user(&m, useraddr, sizeof(m)))
1692 return -EFAULT;
1693 if (m.nmtus != NMTUS)
1694 return -EINVAL;
1695 if (m.mtus[0] < 81) /* accommodate SACK */
1696 return -EINVAL;
1697
1698 /* MTUs must be in ascending order */
1699 for (i = 1; i < NMTUS; ++i)
1700 if (m.mtus[i] < m.mtus[i - 1])
1701 return -EINVAL;
1702
1703 memcpy(adapter->params.mtus, m.mtus,
1704 sizeof(adapter->params.mtus));
1705 break;
1706 }
1707 case CHELSIO_GET_PM:{
1708 struct tp_params *p = &adapter->params.tp;
1709 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1710
1711 if (!is_offload(adapter))
1712 return -EOPNOTSUPP;
1713 m.tx_pg_sz = p->tx_pg_size;
1714 m.tx_num_pg = p->tx_num_pgs;
1715 m.rx_pg_sz = p->rx_pg_size;
1716 m.rx_num_pg = p->rx_num_pgs;
1717 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1718 if (copy_to_user(useraddr, &m, sizeof(m)))
1719 return -EFAULT;
1720 break;
1721 }
1722 case CHELSIO_SET_PM:{
1723 struct ch_pm m;
1724 struct tp_params *p = &adapter->params.tp;
1725
1726 if (!is_offload(adapter))
1727 return -EOPNOTSUPP;
1728 if (!capable(CAP_NET_ADMIN))
1729 return -EPERM;
1730 if (adapter->flags & FULL_INIT_DONE)
1731 return -EBUSY;
1732 if (copy_from_user(&m, useraddr, sizeof(m)))
1733 return -EFAULT;
1734 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1735 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1736 return -EINVAL; /* not power of 2 */
1737 if (!(m.rx_pg_sz & 0x14000))
1738 return -EINVAL; /* not 16KB or 64KB */
1739 if (!(m.tx_pg_sz & 0x1554000))
1740 return -EINVAL;
1741 if (m.tx_num_pg == -1)
1742 m.tx_num_pg = p->tx_num_pgs;
1743 if (m.rx_num_pg == -1)
1744 m.rx_num_pg = p->rx_num_pgs;
1745 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1746 return -EINVAL;
1747 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1748 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1749 return -EINVAL;
1750 p->rx_pg_size = m.rx_pg_sz;
1751 p->tx_pg_size = m.tx_pg_sz;
1752 p->rx_num_pgs = m.rx_num_pg;
1753 p->tx_num_pgs = m.tx_num_pg;
1754 break;
1755 }
1756 case CHELSIO_GET_MEM:{
1757 struct ch_mem_range t;
1758 struct mc7 *mem;
1759 u64 buf[32];
1760
1761 if (!is_offload(adapter))
1762 return -EOPNOTSUPP;
1763 if (!(adapter->flags & FULL_INIT_DONE))
1764 return -EIO; /* need the memory controllers */
1765 if (copy_from_user(&t, useraddr, sizeof(t)))
1766 return -EFAULT;
1767 if ((t.addr & 7) || (t.len & 7))
1768 return -EINVAL;
1769 if (t.mem_id == MEM_CM)
1770 mem = &adapter->cm;
1771 else if (t.mem_id == MEM_PMRX)
1772 mem = &adapter->pmrx;
1773 else if (t.mem_id == MEM_PMTX)
1774 mem = &adapter->pmtx;
1775 else
1776 return -EINVAL;
1777
1778 /*
1779 * Version scheme:
1780 * bits 0..9: chip version
1781 * bits 10..15: chip revision
1782 */
1783 t.version = 3 | (adapter->params.rev << 10);
1784 if (copy_to_user(useraddr, &t, sizeof(t)))
1785 return -EFAULT;
1786
1787 /*
1788 * Read 256 bytes at a time as len can be large and we don't
1789 * want to use huge intermediate buffers.
1790 */
1791 useraddr += sizeof(t); /* advance to start of buffer */
1792 while (t.len) {
1793 unsigned int chunk =
1794 min_t(unsigned int, t.len, sizeof(buf));
1795
1796 ret =
1797 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1798 buf);
1799 if (ret)
1800 return ret;
1801 if (copy_to_user(useraddr, buf, chunk))
1802 return -EFAULT;
1803 useraddr += chunk;
1804 t.addr += chunk;
1805 t.len -= chunk;
1806 }
1807 break;
1808 }
1809 case CHELSIO_SET_TRACE_FILTER:{
1810 struct ch_trace t;
1811 const struct trace_params *tp;
1812
1813 if (!capable(CAP_NET_ADMIN))
1814 return -EPERM;
1815 if (!offload_running(adapter))
1816 return -EAGAIN;
1817 if (copy_from_user(&t, useraddr, sizeof(t)))
1818 return -EFAULT;
1819
1820 tp = (const struct trace_params *)&t.sip;
1821 if (t.config_tx)
1822 t3_config_trace_filter(adapter, tp, 0,
1823 t.invert_match,
1824 t.trace_tx);
1825 if (t.config_rx)
1826 t3_config_trace_filter(adapter, tp, 1,
1827 t.invert_match,
1828 t.trace_rx);
1829 break;
1830 }
1831 case CHELSIO_SET_PKTSCHED:{
1832 struct sk_buff *skb;
1833 struct ch_pktsched_params p;
1834 struct mngt_pktsched_wr *req;
1835
1836 if (!(adapter->flags & FULL_INIT_DONE))
1837 return -EIO; /* uP must be up and running */
1838 if (copy_from_user(&p, useraddr, sizeof(p)))
1839 return -EFAULT;
1840 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1841 if (!skb)
1842 return -ENOMEM;
1843 req =
1844 (struct mngt_pktsched_wr *)skb_put(skb,
1845 sizeof(*req));
1846 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1847 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1848 req->sched = p.sched;
1849 req->idx = p.idx;
1850 req->min = p.min;
1851 req->max = p.max;
1852 req->binding = p.binding;
1853 printk(KERN_INFO
1854 "pktsched: sched %u idx %u min %u max %u binding %u\n",
1855 req->sched, req->idx, req->min, req->max,
1856 req->binding);
1857 skb->priority = 1;
1858 offload_tx(&adapter->tdev, skb);
1859 break;
1860 }
1861 default:
1862 return -EOPNOTSUPP;
1863 }
1864 return 0;
1865}
1866
1867static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1868{
1869 int ret, mmd;
1870 struct adapter *adapter = dev->priv;
1871 struct port_info *pi = netdev_priv(dev);
1872 struct mii_ioctl_data *data = if_mii(req);
1873
1874 switch (cmd) {
1875 case SIOCGMIIPHY:
1876 data->phy_id = pi->phy.addr;
1877 /* FALLTHRU */
1878 case SIOCGMIIREG:{
1879 u32 val;
1880 struct cphy *phy = &pi->phy;
1881
1882 if (!phy->mdio_read)
1883 return -EOPNOTSUPP;
1884 if (is_10G(adapter)) {
1885 mmd = data->phy_id >> 8;
1886 if (!mmd)
1887 mmd = MDIO_DEV_PCS;
1888 else if (mmd > MDIO_DEV_XGXS)
1889 return -EINVAL;
1890
1891 ret =
1892 phy->mdio_read(adapter, data->phy_id & 0x1f,
1893 mmd, data->reg_num, &val);
1894 } else
1895 ret =
1896 phy->mdio_read(adapter, data->phy_id & 0x1f,
1897 0, data->reg_num & 0x1f,
1898 &val);
1899 if (!ret)
1900 data->val_out = val;
1901 break;
1902 }
1903 case SIOCSMIIREG:{
1904 struct cphy *phy = &pi->phy;
1905
1906 if (!capable(CAP_NET_ADMIN))
1907 return -EPERM;
1908 if (!phy->mdio_write)
1909 return -EOPNOTSUPP;
1910 if (is_10G(adapter)) {
1911 mmd = data->phy_id >> 8;
1912 if (!mmd)
1913 mmd = MDIO_DEV_PCS;
1914 else if (mmd > MDIO_DEV_XGXS)
1915 return -EINVAL;
1916
1917 ret =
1918 phy->mdio_write(adapter,
1919 data->phy_id & 0x1f, mmd,
1920 data->reg_num,
1921 data->val_in);
1922 } else
1923 ret =
1924 phy->mdio_write(adapter,
1925 data->phy_id & 0x1f, 0,
1926 data->reg_num & 0x1f,
1927 data->val_in);
1928 break;
1929 }
1930 case SIOCCHIOCTL:
1931 return cxgb_extension_ioctl(dev, req->ifr_data);
1932 default:
1933 return -EOPNOTSUPP;
1934 }
1935 return ret;
1936}
1937
1938static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1939{
1940 int ret;
1941 struct adapter *adapter = dev->priv;
1942 struct port_info *pi = netdev_priv(dev);
1943
1944 if (new_mtu < 81) /* accommodate SACK */
1945 return -EINVAL;
1946 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1947 return ret;
1948 dev->mtu = new_mtu;
1949 init_port_mtus(adapter);
1950 if (adapter->params.rev == 0 && offload_running(adapter))
1951 t3_load_mtus(adapter, adapter->params.mtus,
1952 adapter->params.a_wnd, adapter->params.b_wnd,
1953 adapter->port[0]->mtu);
1954 return 0;
1955}
1956
1957static int cxgb_set_mac_addr(struct net_device *dev, void *p)
1958{
1959 struct adapter *adapter = dev->priv;
1960 struct port_info *pi = netdev_priv(dev);
1961 struct sockaddr *addr = p;
1962
1963 if (!is_valid_ether_addr(addr->sa_data))
1964 return -EINVAL;
1965
1966 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1967 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
1968 if (offload_running(adapter))
1969 write_smt_entry(adapter, pi->port_id);
1970 return 0;
1971}
1972
1973/**
1974 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1975 * @adap: the adapter
1976 * @p: the port
1977 *
1978 * Ensures that current Rx processing on any of the queues associated with
1979 * the given port completes before returning. We do this by acquiring and
1980 * releasing the locks of the response queues associated with the port.
1981 */
1982static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1983{
1984 int i;
1985
1986 for (i = 0; i < p->nqsets; i++) {
1987 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
1988
1989 spin_lock_irq(&q->lock);
1990 spin_unlock_irq(&q->lock);
1991 }
1992}
1993
1994static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1995{
1996 struct adapter *adapter = dev->priv;
1997 struct port_info *pi = netdev_priv(dev);
1998
1999 pi->vlan_grp = grp;
2000 if (adapter->params.rev > 0)
2001 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2002 else {
2003 /* single control for all ports */
2004 unsigned int i, have_vlans = 0;
2005 for_each_port(adapter, i)
2006 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2007
2008 t3_set_vlan_accel(adapter, 1, have_vlans);
2009 }
2010 t3_synchronize_rx(adapter, pi);
2011}
2012
2013static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2014{
2015 /* nothing */
2016}
2017
2018#ifdef CONFIG_NET_POLL_CONTROLLER
2019static void cxgb_netpoll(struct net_device *dev)
2020{
2021 struct adapter *adapter = dev->priv;
2022 struct sge_qset *qs = dev2qset(dev);
2023
2024 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2025 adapter);
2026}
2027#endif
2028
2029/*
2030 * Periodic accumulation of MAC statistics.
2031 */
2032static void mac_stats_update(struct adapter *adapter)
2033{
2034 int i;
2035
2036 for_each_port(adapter, i) {
2037 struct net_device *dev = adapter->port[i];
2038 struct port_info *p = netdev_priv(dev);
2039
2040 if (netif_running(dev)) {
2041 spin_lock(&adapter->stats_lock);
2042 t3_mac_update_stats(&p->mac);
2043 spin_unlock(&adapter->stats_lock);
2044 }
2045 }
2046}
2047
2048static void check_link_status(struct adapter *adapter)
2049{
2050 int i;
2051
2052 for_each_port(adapter, i) {
2053 struct net_device *dev = adapter->port[i];
2054 struct port_info *p = netdev_priv(dev);
2055
2056 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2057 t3_link_changed(adapter, i);
2058 }
2059}
2060
2061static void t3_adap_check_task(struct work_struct *work)
2062{
2063 struct adapter *adapter = container_of(work, struct adapter,
2064 adap_check_task.work);
2065 const struct adapter_params *p = &adapter->params;
2066
2067 adapter->check_task_cnt++;
2068
2069 /* Check link status for PHYs without interrupts */
2070 if (p->linkpoll_period)
2071 check_link_status(adapter);
2072
2073 /* Accumulate MAC stats if needed */
2074 if (!p->linkpoll_period ||
2075 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2076 p->stats_update_period) {
2077 mac_stats_update(adapter);
2078 adapter->check_task_cnt = 0;
2079 }
2080
2081 /* Schedule the next check update if any port is active. */
2082 spin_lock(&adapter->work_lock);
2083 if (adapter->open_device_map & PORT_MASK)
2084 schedule_chk_task(adapter);
2085 spin_unlock(&adapter->work_lock);
2086}
2087
2088/*
2089 * Processes external (PHY) interrupts in process context.
2090 */
2091static void ext_intr_task(struct work_struct *work)
2092{
2093 struct adapter *adapter = container_of(work, struct adapter,
2094 ext_intr_handler_task);
2095
2096 t3_phy_intr_handler(adapter);
2097
2098 /* Now reenable external interrupts */
2099 spin_lock_irq(&adapter->work_lock);
2100 if (adapter->slow_intr_mask) {
2101 adapter->slow_intr_mask |= F_T3DBG;
2102 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2103 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2104 adapter->slow_intr_mask);
2105 }
2106 spin_unlock_irq(&adapter->work_lock);
2107}
2108
2109/*
2110 * Interrupt-context handler for external (PHY) interrupts.
2111 */
2112void t3_os_ext_intr_handler(struct adapter *adapter)
2113{
2114 /*
2115 * Schedule a task to handle external interrupts as they may be slow
2116 * and we use a mutex to protect MDIO registers. We disable PHY
2117 * interrupts in the meantime and let the task reenable them when
2118 * it's done.
2119 */
2120 spin_lock(&adapter->work_lock);
2121 if (adapter->slow_intr_mask) {
2122 adapter->slow_intr_mask &= ~F_T3DBG;
2123 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2124 adapter->slow_intr_mask);
2125 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2126 }
2127 spin_unlock(&adapter->work_lock);
2128}
2129
2130void t3_fatal_err(struct adapter *adapter)
2131{
2132 unsigned int fw_status[4];
2133
2134 if (adapter->flags & FULL_INIT_DONE) {
2135 t3_sge_stop(adapter);
2136 t3_intr_disable(adapter);
2137 }
2138 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2139 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2140 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2141 fw_status[0], fw_status[1],
2142 fw_status[2], fw_status[3]);
2143
2144}
2145
2146static int __devinit cxgb_enable_msix(struct adapter *adap)
2147{
2148 struct msix_entry entries[SGE_QSETS + 1];
2149 int i, err;
2150
2151 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2152 entries[i].entry = i;
2153
2154 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2155 if (!err) {
2156 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2157 adap->msix_info[i].vec = entries[i].vector;
2158 } else if (err > 0)
2159 dev_info(&adap->pdev->dev,
2160 "only %d MSI-X vectors left, not using MSI-X\n", err);
2161 return err;
2162}
2163
2164static void __devinit print_port_info(struct adapter *adap,
2165 const struct adapter_info *ai)
2166{
2167 static const char *pci_variant[] = {
2168 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2169 };
2170
2171 int i;
2172 char buf[80];
2173
2174 if (is_pcie(adap))
2175 snprintf(buf, sizeof(buf), "%s x%d",
2176 pci_variant[adap->params.pci.variant],
2177 adap->params.pci.width);
2178 else
2179 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2180 pci_variant[adap->params.pci.variant],
2181 adap->params.pci.speed, adap->params.pci.width);
2182
2183 for_each_port(adap, i) {
2184 struct net_device *dev = adap->port[i];
2185 const struct port_info *pi = netdev_priv(dev);
2186
2187 if (!test_bit(i, &adap->registered_device_map))
2188 continue;
2189 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2190 dev->name, ai->desc, pi->port_type->desc,
2191 adap->params.rev, buf,
2192 (adap->flags & USING_MSIX) ? " MSI-X" :
2193 (adap->flags & USING_MSI) ? " MSI" : "");
2194 if (adap->name == dev->name && adap->params.vpd.mclk)
2195 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2196 adap->name, t3_mc7_size(&adap->cm) >> 20,
2197 t3_mc7_size(&adap->pmtx) >> 20,
2198 t3_mc7_size(&adap->pmrx) >> 20);
2199 }
2200}
2201
2202static int __devinit init_one(struct pci_dev *pdev,
2203 const struct pci_device_id *ent)
2204{
2205 static int version_printed;
2206
2207 int i, err, pci_using_dac = 0;
2208 unsigned long mmio_start, mmio_len;
2209 const struct adapter_info *ai;
2210 struct adapter *adapter = NULL;
2211 struct port_info *pi;
2212
2213 if (!version_printed) {
2214 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2215 ++version_printed;
2216 }
2217
2218 if (!cxgb3_wq) {
2219 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2220 if (!cxgb3_wq) {
2221 printk(KERN_ERR DRV_NAME
2222 ": cannot initialize work queue\n");
2223 return -ENOMEM;
2224 }
2225 }
2226
2227 err = pci_request_regions(pdev, DRV_NAME);
2228 if (err) {
2229 /* Just info, some other driver may have claimed the device. */
2230 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2231 return err;
2232 }
2233
2234 err = pci_enable_device(pdev);
2235 if (err) {
2236 dev_err(&pdev->dev, "cannot enable PCI device\n");
2237 goto out_release_regions;
2238 }
2239
2240 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2241 pci_using_dac = 1;
2242 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2243 if (err) {
2244 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2245 "coherent allocations\n");
2246 goto out_disable_device;
2247 }
2248 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2249 dev_err(&pdev->dev, "no usable DMA configuration\n");
2250 goto out_disable_device;
2251 }
2252
2253 pci_set_master(pdev);
2254
2255 mmio_start = pci_resource_start(pdev, 0);
2256 mmio_len = pci_resource_len(pdev, 0);
2257 ai = t3_get_adapter_info(ent->driver_data);
2258
2259 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2260 if (!adapter) {
2261 err = -ENOMEM;
2262 goto out_disable_device;
2263 }
2264
2265 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2266 if (!adapter->regs) {
2267 dev_err(&pdev->dev, "cannot map device registers\n");
2268 err = -ENOMEM;
2269 goto out_free_adapter;
2270 }
2271
2272 adapter->pdev = pdev;
2273 adapter->name = pci_name(pdev);
2274 adapter->msg_enable = dflt_msg_enable;
2275 adapter->mmio_len = mmio_len;
2276
2277 mutex_init(&adapter->mdio_lock);
2278 spin_lock_init(&adapter->work_lock);
2279 spin_lock_init(&adapter->stats_lock);
2280
2281 INIT_LIST_HEAD(&adapter->adapter_list);
2282 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2283 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2284
2285 for (i = 0; i < ai->nports; ++i) {
2286 struct net_device *netdev;
2287
2288 netdev = alloc_etherdev(sizeof(struct port_info));
2289 if (!netdev) {
2290 err = -ENOMEM;
2291 goto out_free_dev;
2292 }
2293
2294 SET_MODULE_OWNER(netdev);
2295 SET_NETDEV_DEV(netdev, &pdev->dev);
2296
2297 adapter->port[i] = netdev;
2298 pi = netdev_priv(netdev);
2299 pi->rx_csum_offload = 1;
2300 pi->nqsets = 1;
2301 pi->first_qset = i;
2302 pi->activity = 0;
2303 pi->port_id = i;
2304 netif_carrier_off(netdev);
2305 netdev->irq = pdev->irq;
2306 netdev->mem_start = mmio_start;
2307 netdev->mem_end = mmio_start + mmio_len - 1;
2308 netdev->priv = adapter;
2309 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2310 netdev->features |= NETIF_F_LLTX;
2311 if (pci_using_dac)
2312 netdev->features |= NETIF_F_HIGHDMA;
2313
2314 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2315 netdev->vlan_rx_register = vlan_rx_register;
2316 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2317
2318 netdev->open = cxgb_open;
2319 netdev->stop = cxgb_close;
2320 netdev->hard_start_xmit = t3_eth_xmit;
2321 netdev->get_stats = cxgb_get_stats;
2322 netdev->set_multicast_list = cxgb_set_rxmode;
2323 netdev->do_ioctl = cxgb_ioctl;
2324 netdev->change_mtu = cxgb_change_mtu;
2325 netdev->set_mac_address = cxgb_set_mac_addr;
2326#ifdef CONFIG_NET_POLL_CONTROLLER
2327 netdev->poll_controller = cxgb_netpoll;
2328#endif
2329 netdev->weight = 64;
2330
2331 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2332 }
2333
2334 pci_set_drvdata(pdev, adapter->port[0]);
2335 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2336 err = -ENODEV;
2337 goto out_free_dev;
2338 }
2339
2340 /*
2341 * The card is now ready to go. If any errors occur during device
2342 * registration we do not fail the whole card but rather proceed only
2343 * with the ports we manage to register successfully. However we must
2344 * register at least one net device.
2345 */
2346 for_each_port(adapter, i) {
2347 err = register_netdev(adapter->port[i]);
2348 if (err)
2349 dev_warn(&pdev->dev,
2350 "cannot register net device %s, skipping\n",
2351 adapter->port[i]->name);
2352 else {
2353 /*
2354 * Change the name we use for messages to the name of
2355 * the first successfully registered interface.
2356 */
2357 if (!adapter->registered_device_map)
2358 adapter->name = adapter->port[i]->name;
2359
2360 __set_bit(i, &adapter->registered_device_map);
2361 }
2362 }
2363 if (!adapter->registered_device_map) {
2364 dev_err(&pdev->dev, "could not register any net devices\n");
2365 goto out_free_dev;
2366 }
2367
2368 /* Driver's ready. Reflect it on LEDs */
2369 t3_led_ready(adapter);
2370
2371 if (is_offload(adapter)) {
2372 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2373 cxgb3_adapter_ofld(adapter);
2374 }
2375
2376 /* See what interrupts we'll be using */
2377 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2378 adapter->flags |= USING_MSIX;
2379 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2380 adapter->flags |= USING_MSI;
2381
2382 err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
2383 &cxgb3_attr_group);
2384
2385 print_port_info(adapter, ai);
2386 return 0;
2387
2388out_free_dev:
2389 iounmap(adapter->regs);
2390 for (i = ai->nports - 1; i >= 0; --i)
2391 if (adapter->port[i])
2392 free_netdev(adapter->port[i]);
2393
2394out_free_adapter:
2395 kfree(adapter);
2396
2397out_disable_device:
2398 pci_disable_device(pdev);
2399out_release_regions:
2400 pci_release_regions(pdev);
2401 pci_set_drvdata(pdev, NULL);
2402 return err;
2403}
2404
2405static void __devexit remove_one(struct pci_dev *pdev)
2406{
2407 struct net_device *dev = pci_get_drvdata(pdev);
2408
2409 if (dev) {
2410 int i;
2411 struct adapter *adapter = dev->priv;
2412
2413 t3_sge_stop(adapter);
2414 sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
2415 &cxgb3_attr_group);
2416
2417 for_each_port(adapter, i)
2418 if (test_bit(i, &adapter->registered_device_map))
2419 unregister_netdev(adapter->port[i]);
2420
2421 if (is_offload(adapter)) {
2422 cxgb3_adapter_unofld(adapter);
2423 if (test_bit(OFFLOAD_DEVMAP_BIT,
2424 &adapter->open_device_map))
2425 offload_close(&adapter->tdev);
2426 }
2427
2428 t3_free_sge_resources(adapter);
2429 cxgb_disable_msi(adapter);
2430
2431 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2432 if (adapter->dummy_netdev[i]) {
2433 free_netdev(adapter->dummy_netdev[i]);
2434 adapter->dummy_netdev[i] = NULL;
2435 }
2436
2437 for_each_port(adapter, i)
2438 if (adapter->port[i])
2439 free_netdev(adapter->port[i]);
2440
2441 iounmap(adapter->regs);
2442 kfree(adapter);
2443 pci_release_regions(pdev);
2444 pci_disable_device(pdev);
2445 pci_set_drvdata(pdev, NULL);
2446 }
2447}
2448
2449static struct pci_driver driver = {
2450 .name = DRV_NAME,
2451 .id_table = cxgb3_pci_tbl,
2452 .probe = init_one,
2453 .remove = __devexit_p(remove_one),
2454};
2455
2456static int __init cxgb3_init_module(void)
2457{
2458 int ret;
2459
2460 cxgb3_offload_init();
2461
2462 ret = pci_register_driver(&driver);
2463 return ret;
2464}
2465
2466static void __exit cxgb3_cleanup_module(void)
2467{
2468 pci_unregister_driver(&driver);
2469 if (cxgb3_wq)
2470 destroy_workqueue(cxgb3_wq);
2471}
2472
2473module_init(cxgb3_init_module);
2474module_exit(cxgb3_cleanup_module);
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
new file mode 100644
index 000000000000..3abd4d25c3b8
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -0,0 +1,1222 @@
1/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/list.h>
35#include <net/neighbour.h>
36#include <linux/notifier.h>
37#include <asm/atomic.h>
38#include <linux/proc_fs.h>
39#include <linux/if_vlan.h>
40#include <net/netevent.h>
41#include <linux/highmem.h>
42#include <linux/vmalloc.h>
43
44#include "common.h"
45#include "regs.h"
46#include "cxgb3_ioctl.h"
47#include "cxgb3_ctl_defs.h"
48#include "cxgb3_defs.h"
49#include "l2t.h"
50#include "firmware_exports.h"
51#include "cxgb3_offload.h"
52
53static LIST_HEAD(client_list);
54static LIST_HEAD(ofld_dev_list);
55static DEFINE_MUTEX(cxgb3_db_lock);
56
57static DEFINE_RWLOCK(adapter_list_lock);
58static LIST_HEAD(adapter_list);
59
60static const unsigned int MAX_ATIDS = 64 * 1024;
61static const unsigned int ATID_BASE = 0x100000;
62
63static inline int offload_activated(struct t3cdev *tdev)
64{
65 const struct adapter *adapter = tdev2adap(tdev);
66
67 return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map));
68}
69
70/**
71 * cxgb3_register_client - register an offload client
72 * @client: the client
73 *
74 * Add the client to the client list,
75 * and call backs the client for each activated offload device
76 */
77void cxgb3_register_client(struct cxgb3_client *client)
78{
79 struct t3cdev *tdev;
80
81 mutex_lock(&cxgb3_db_lock);
82 list_add_tail(&client->client_list, &client_list);
83
84 if (client->add) {
85 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
86 if (offload_activated(tdev))
87 client->add(tdev);
88 }
89 }
90 mutex_unlock(&cxgb3_db_lock);
91}
92
93EXPORT_SYMBOL(cxgb3_register_client);
94
95/**
96 * cxgb3_unregister_client - unregister an offload client
97 * @client: the client
98 *
99 * Remove the client to the client list,
100 * and call backs the client for each activated offload device.
101 */
102void cxgb3_unregister_client(struct cxgb3_client *client)
103{
104 struct t3cdev *tdev;
105
106 mutex_lock(&cxgb3_db_lock);
107 list_del(&client->client_list);
108
109 if (client->remove) {
110 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
111 if (offload_activated(tdev))
112 client->remove(tdev);
113 }
114 }
115 mutex_unlock(&cxgb3_db_lock);
116}
117
118EXPORT_SYMBOL(cxgb3_unregister_client);
119
120/**
121 * cxgb3_add_clients - activate registered clients for an offload device
122 * @tdev: the offload device
123 *
124 * Call backs all registered clients once a offload device is activated
125 */
126void cxgb3_add_clients(struct t3cdev *tdev)
127{
128 struct cxgb3_client *client;
129
130 mutex_lock(&cxgb3_db_lock);
131 list_for_each_entry(client, &client_list, client_list) {
132 if (client->add)
133 client->add(tdev);
134 }
135 mutex_unlock(&cxgb3_db_lock);
136}
137
138/**
139 * cxgb3_remove_clients - deactivates registered clients
140 * for an offload device
141 * @tdev: the offload device
142 *
143 * Call backs all registered clients once a offload device is deactivated
144 */
145void cxgb3_remove_clients(struct t3cdev *tdev)
146{
147 struct cxgb3_client *client;
148
149 mutex_lock(&cxgb3_db_lock);
150 list_for_each_entry(client, &client_list, client_list) {
151 if (client->remove)
152 client->remove(tdev);
153 }
154 mutex_unlock(&cxgb3_db_lock);
155}
156
157static struct net_device *get_iff_from_mac(struct adapter *adapter,
158 const unsigned char *mac,
159 unsigned int vlan)
160{
161 int i;
162
163 for_each_port(adapter, i) {
164 const struct vlan_group *grp;
165 struct net_device *dev = adapter->port[i];
166 const struct port_info *p = netdev_priv(dev);
167
168 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
169 if (vlan && vlan != VLAN_VID_MASK) {
170 grp = p->vlan_grp;
171 dev = grp ? grp->vlan_devices[vlan] : NULL;
172 } else
173 while (dev->master)
174 dev = dev->master;
175 return dev;
176 }
177 }
178 return NULL;
179}
180
181static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
182 void *data)
183{
184 int ret = 0;
185 struct ulp_iscsi_info *uiip = data;
186
187 switch (req) {
188 case ULP_ISCSI_GET_PARAMS:
189 uiip->pdev = adapter->pdev;
190 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
191 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
192 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
193 /*
194 * On tx, the iscsi pdu has to be <= tx page size and has to
195 * fit into the Tx PM FIFO.
196 */
197 uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
198 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
199 /* on rx, the iscsi pdu has to be < rx page size and the
200 whole pdu + cpl headers has to fit into one sge buffer */
201 uiip->max_rxsz = min_t(unsigned int,
202 adapter->params.tp.rx_pg_size,
203 (adapter->sge.qs[0].fl[1].buf_size -
204 sizeof(struct cpl_rx_data) * 2 -
205 sizeof(struct cpl_rx_data_ddp)));
206 break;
207 case ULP_ISCSI_SET_PARAMS:
208 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
209 break;
210 default:
211 ret = -EOPNOTSUPP;
212 }
213 return ret;
214}
215
216/* Response queue used for RDMA events. */
217#define ASYNC_NOTIF_RSPQ 0
218
219static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
220{
221 int ret = 0;
222
223 switch (req) {
224 case RDMA_GET_PARAMS:{
225 struct rdma_info *req = data;
226 struct pci_dev *pdev = adapter->pdev;
227
228 req->udbell_physbase = pci_resource_start(pdev, 2);
229 req->udbell_len = pci_resource_len(pdev, 2);
230 req->tpt_base =
231 t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
232 req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
233 req->pbl_base =
234 t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
235 req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
236 req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
237 req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
238 req->kdb_addr = adapter->regs + A_SG_KDOORBELL;
239 req->pdev = pdev;
240 break;
241 }
242 case RDMA_CQ_OP:{
243 unsigned long flags;
244 struct rdma_cq_op *req = data;
245
246 /* may be called in any context */
247 spin_lock_irqsave(&adapter->sge.reg_lock, flags);
248 ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
249 req->credits);
250 spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
251 break;
252 }
253 case RDMA_GET_MEM:{
254 struct ch_mem_range *t = data;
255 struct mc7 *mem;
256
257 if ((t->addr & 7) || (t->len & 7))
258 return -EINVAL;
259 if (t->mem_id == MEM_CM)
260 mem = &adapter->cm;
261 else if (t->mem_id == MEM_PMRX)
262 mem = &adapter->pmrx;
263 else if (t->mem_id == MEM_PMTX)
264 mem = &adapter->pmtx;
265 else
266 return -EINVAL;
267
268 ret =
269 t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
270 (u64 *) t->buf);
271 if (ret)
272 return ret;
273 break;
274 }
275 case RDMA_CQ_SETUP:{
276 struct rdma_cq_setup *req = data;
277
278 spin_lock_irq(&adapter->sge.reg_lock);
279 ret =
280 t3_sge_init_cqcntxt(adapter, req->id,
281 req->base_addr, req->size,
282 ASYNC_NOTIF_RSPQ,
283 req->ovfl_mode, req->credits,
284 req->credit_thres);
285 spin_unlock_irq(&adapter->sge.reg_lock);
286 break;
287 }
288 case RDMA_CQ_DISABLE:
289 spin_lock_irq(&adapter->sge.reg_lock);
290 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
291 spin_unlock_irq(&adapter->sge.reg_lock);
292 break;
293 case RDMA_CTRL_QP_SETUP:{
294 struct rdma_ctrlqp_setup *req = data;
295
296 spin_lock_irq(&adapter->sge.reg_lock);
297 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
298 SGE_CNTXT_RDMA,
299 ASYNC_NOTIF_RSPQ,
300 req->base_addr, req->size,
301 FW_RI_TID_START, 1, 0);
302 spin_unlock_irq(&adapter->sge.reg_lock);
303 break;
304 }
305 default:
306 ret = -EOPNOTSUPP;
307 }
308 return ret;
309}
310
311static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
312{
313 struct adapter *adapter = tdev2adap(tdev);
314 struct tid_range *tid;
315 struct mtutab *mtup;
316 struct iff_mac *iffmacp;
317 struct ddp_params *ddpp;
318 struct adap_ports *ports;
319 int i;
320
321 switch (req) {
322 case GET_MAX_OUTSTANDING_WR:
323 *(unsigned int *)data = FW_WR_NUM;
324 break;
325 case GET_WR_LEN:
326 *(unsigned int *)data = WR_FLITS;
327 break;
328 case GET_TX_MAX_CHUNK:
329 *(unsigned int *)data = 1 << 20; /* 1MB */
330 break;
331 case GET_TID_RANGE:
332 tid = data;
333 tid->num = t3_mc5_size(&adapter->mc5) -
334 adapter->params.mc5.nroutes -
335 adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
336 tid->base = 0;
337 break;
338 case GET_STID_RANGE:
339 tid = data;
340 tid->num = adapter->params.mc5.nservers;
341 tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
342 adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
343 break;
344 case GET_L2T_CAPACITY:
345 *(unsigned int *)data = 2048;
346 break;
347 case GET_MTUS:
348 mtup = data;
349 mtup->size = NMTUS;
350 mtup->mtus = adapter->params.mtus;
351 break;
352 case GET_IFF_FROM_MAC:
353 iffmacp = data;
354 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
355 iffmacp->vlan_tag &
356 VLAN_VID_MASK);
357 break;
358 case GET_DDP_PARAMS:
359 ddpp = data;
360 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
361 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
362 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
363 break;
364 case GET_PORTS:
365 ports = data;
366 ports->nports = adapter->params.nports;
367 for_each_port(adapter, i)
368 ports->lldevs[i] = adapter->port[i];
369 break;
370 case ULP_ISCSI_GET_PARAMS:
371 case ULP_ISCSI_SET_PARAMS:
372 if (!offload_running(adapter))
373 return -EAGAIN;
374 return cxgb_ulp_iscsi_ctl(adapter, req, data);
375 case RDMA_GET_PARAMS:
376 case RDMA_CQ_OP:
377 case RDMA_CQ_SETUP:
378 case RDMA_CQ_DISABLE:
379 case RDMA_CTRL_QP_SETUP:
380 case RDMA_GET_MEM:
381 if (!offload_running(adapter))
382 return -EAGAIN;
383 return cxgb_rdma_ctl(adapter, req, data);
384 default:
385 return -EOPNOTSUPP;
386 }
387 return 0;
388}
389
390/*
391 * Dummy handler for Rx offload packets in case we get an offload packet before
392 * proper processing is setup. This complains and drops the packet as it isn't
393 * normal to get offload packets at this stage.
394 */
395static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
396 int n)
397{
398 CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n",
399 n, ntohl(*(u32 *)skbs[0]->data));
400 while (n--)
401 dev_kfree_skb_any(skbs[n]);
402 return 0;
403}
404
405static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
406{
407}
408
409void cxgb3_set_dummy_ops(struct t3cdev *dev)
410{
411 dev->recv = rx_offload_blackhole;
412 dev->neigh_update = dummy_neigh_update;
413}
414
415/*
416 * Free an active-open TID.
417 */
418void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
419{
420 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
421 union active_open_entry *p = atid2entry(t, atid);
422 void *ctx = p->t3c_tid.ctx;
423
424 spin_lock_bh(&t->atid_lock);
425 p->next = t->afree;
426 t->afree = p;
427 t->atids_in_use--;
428 spin_unlock_bh(&t->atid_lock);
429
430 return ctx;
431}
432
433EXPORT_SYMBOL(cxgb3_free_atid);
434
435/*
436 * Free a server TID and return it to the free pool.
437 */
438void cxgb3_free_stid(struct t3cdev *tdev, int stid)
439{
440 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
441 union listen_entry *p = stid2entry(t, stid);
442
443 spin_lock_bh(&t->stid_lock);
444 p->next = t->sfree;
445 t->sfree = p;
446 t->stids_in_use--;
447 spin_unlock_bh(&t->stid_lock);
448}
449
450EXPORT_SYMBOL(cxgb3_free_stid);
451
452void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
453 void *ctx, unsigned int tid)
454{
455 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
456
457 t->tid_tab[tid].client = client;
458 t->tid_tab[tid].ctx = ctx;
459 atomic_inc(&t->tids_in_use);
460}
461
462EXPORT_SYMBOL(cxgb3_insert_tid);
463
464/*
465 * Populate a TID_RELEASE WR. The skb must be already propely sized.
466 */
467static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
468{
469 struct cpl_tid_release *req;
470
471 skb->priority = CPL_PRIORITY_SETUP;
472 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
473 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
474 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
475}
476
477static void t3_process_tid_release_list(struct work_struct *work)
478{
479 struct t3c_data *td = container_of(work, struct t3c_data,
480 tid_release_task);
481 struct sk_buff *skb;
482 struct t3cdev *tdev = td->dev;
483
484
485 spin_lock_bh(&td->tid_release_lock);
486 while (td->tid_release_list) {
487 struct t3c_tid_entry *p = td->tid_release_list;
488
489 td->tid_release_list = (struct t3c_tid_entry *)p->ctx;
490 spin_unlock_bh(&td->tid_release_lock);
491
492 skb = alloc_skb(sizeof(struct cpl_tid_release),
493 GFP_KERNEL | __GFP_NOFAIL);
494 mk_tid_release(skb, p - td->tid_maps.tid_tab);
495 cxgb3_ofld_send(tdev, skb);
496 p->ctx = NULL;
497 spin_lock_bh(&td->tid_release_lock);
498 }
499 spin_unlock_bh(&td->tid_release_lock);
500}
501
502/* use ctx as a next pointer in the tid release list */
503void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
504{
505 struct t3c_data *td = T3C_DATA(tdev);
506 struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
507
508 spin_lock_bh(&td->tid_release_lock);
509 p->ctx = (void *)td->tid_release_list;
510 td->tid_release_list = p;
511 if (!p->ctx)
512 schedule_work(&td->tid_release_task);
513 spin_unlock_bh(&td->tid_release_lock);
514}
515
516EXPORT_SYMBOL(cxgb3_queue_tid_release);
517
518/*
519 * Remove a tid from the TID table. A client may defer processing its last
520 * CPL message if it is locked at the time it arrives, and while the message
521 * sits in the client's backlog the TID may be reused for another connection.
522 * To handle this we atomically switch the TID association if it still points
523 * to the original client context.
524 */
525void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
526{
527 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
528
529 BUG_ON(tid >= t->ntids);
530 if (tdev->type == T3A)
531 (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
532 else {
533 struct sk_buff *skb;
534
535 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
536 if (likely(skb)) {
537 mk_tid_release(skb, tid);
538 cxgb3_ofld_send(tdev, skb);
539 t->tid_tab[tid].ctx = NULL;
540 } else
541 cxgb3_queue_tid_release(tdev, tid);
542 }
543 atomic_dec(&t->tids_in_use);
544}
545
546EXPORT_SYMBOL(cxgb3_remove_tid);
547
548int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
549 void *ctx)
550{
551 int atid = -1;
552 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
553
554 spin_lock_bh(&t->atid_lock);
555 if (t->afree) {
556 union active_open_entry *p = t->afree;
557
558 atid = (p - t->atid_tab) + t->atid_base;
559 t->afree = p->next;
560 p->t3c_tid.ctx = ctx;
561 p->t3c_tid.client = client;
562 t->atids_in_use++;
563 }
564 spin_unlock_bh(&t->atid_lock);
565 return atid;
566}
567
568EXPORT_SYMBOL(cxgb3_alloc_atid);
569
570int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
571 void *ctx)
572{
573 int stid = -1;
574 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
575
576 spin_lock_bh(&t->stid_lock);
577 if (t->sfree) {
578 union listen_entry *p = t->sfree;
579
580 stid = (p - t->stid_tab) + t->stid_base;
581 t->sfree = p->next;
582 p->t3c_tid.ctx = ctx;
583 p->t3c_tid.client = client;
584 t->stids_in_use++;
585 }
586 spin_unlock_bh(&t->stid_lock);
587 return stid;
588}
589
590EXPORT_SYMBOL(cxgb3_alloc_stid);
591
592static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
593{
594 struct cpl_smt_write_rpl *rpl = cplhdr(skb);
595
596 if (rpl->status != CPL_ERR_NONE)
597 printk(KERN_ERR
598 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
599 rpl->status, GET_TID(rpl));
600
601 return CPL_RET_BUF_DONE;
602}
603
604static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
605{
606 struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
607
608 if (rpl->status != CPL_ERR_NONE)
609 printk(KERN_ERR
610 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
611 rpl->status, GET_TID(rpl));
612
613 return CPL_RET_BUF_DONE;
614}
615
616static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
617{
618 struct cpl_act_open_rpl *rpl = cplhdr(skb);
619 unsigned int atid = G_TID(ntohl(rpl->atid));
620 struct t3c_tid_entry *t3c_tid;
621
622 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
623 if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
624 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
625 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
626 t3c_tid->
627 ctx);
628 } else {
629 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
630 dev->name, CPL_ACT_OPEN_RPL);
631 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
632 }
633}
634
635static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
636{
637 union opcode_tid *p = cplhdr(skb);
638 unsigned int stid = G_TID(ntohl(p->opcode_tid));
639 struct t3c_tid_entry *t3c_tid;
640
641 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
642 if (t3c_tid->ctx && t3c_tid->client->handlers &&
643 t3c_tid->client->handlers[p->opcode]) {
644 return t3c_tid->client->handlers[p->opcode] (dev, skb,
645 t3c_tid->ctx);
646 } else {
647 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
648 dev->name, p->opcode);
649 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
650 }
651}
652
653static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
654{
655 union opcode_tid *p = cplhdr(skb);
656 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
657 struct t3c_tid_entry *t3c_tid;
658
659 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
660 if (t3c_tid->ctx && t3c_tid->client->handlers &&
661 t3c_tid->client->handlers[p->opcode]) {
662 return t3c_tid->client->handlers[p->opcode]
663 (dev, skb, t3c_tid->ctx);
664 } else {
665 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
666 dev->name, p->opcode);
667 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
668 }
669}
670
671static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
672{
673 struct cpl_pass_accept_req *req = cplhdr(skb);
674 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
675 struct t3c_tid_entry *t3c_tid;
676
677 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
678 if (t3c_tid->ctx && t3c_tid->client->handlers &&
679 t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
680 return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
681 (dev, skb, t3c_tid->ctx);
682 } else {
683 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
684 dev->name, CPL_PASS_ACCEPT_REQ);
685 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
686 }
687}
688
689static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
690{
691 union opcode_tid *p = cplhdr(skb);
692 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
693 struct t3c_tid_entry *t3c_tid;
694
695 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
696 if (t3c_tid->ctx && t3c_tid->client->handlers &&
697 t3c_tid->client->handlers[p->opcode]) {
698 return t3c_tid->client->handlers[p->opcode]
699 (dev, skb, t3c_tid->ctx);
700 } else {
701 struct cpl_abort_req_rss *req = cplhdr(skb);
702 struct cpl_abort_rpl *rpl;
703
704 struct sk_buff *skb =
705 alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
706 if (!skb) {
707 printk("do_abort_req_rss: couldn't get skb!\n");
708 goto out;
709 }
710 skb->priority = CPL_PRIORITY_DATA;
711 __skb_put(skb, sizeof(struct cpl_abort_rpl));
712 rpl = cplhdr(skb);
713 rpl->wr.wr_hi =
714 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
715 rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
716 OPCODE_TID(rpl) =
717 htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
718 rpl->cmd = req->status;
719 cxgb3_ofld_send(dev, skb);
720out:
721 return CPL_RET_BUF_DONE;
722 }
723}
724
725static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
726{
727 struct cpl_act_establish *req = cplhdr(skb);
728 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
729 struct t3c_tid_entry *t3c_tid;
730
731 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
732 if (t3c_tid->ctx && t3c_tid->client->handlers &&
733 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
734 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
735 (dev, skb, t3c_tid->ctx);
736 } else {
737 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
738 dev->name, CPL_PASS_ACCEPT_REQ);
739 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
740 }
741}
742
743static int do_set_tcb_rpl(struct t3cdev *dev, struct sk_buff *skb)
744{
745 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
746
747 if (rpl->status != CPL_ERR_NONE)
748 printk(KERN_ERR
749 "Unexpected SET_TCB_RPL status %u for tid %u\n",
750 rpl->status, GET_TID(rpl));
751 return CPL_RET_BUF_DONE;
752}
753
754static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
755{
756 struct cpl_trace_pkt *p = cplhdr(skb);
757
758 skb->protocol = 0xffff;
759 skb->dev = dev->lldev;
760 skb_pull(skb, sizeof(*p));
761 skb->mac.raw = skb->data;
762 netif_receive_skb(skb);
763 return 0;
764}
765
766static int do_term(struct t3cdev *dev, struct sk_buff *skb)
767{
768 unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff;
769 unsigned int opcode = G_OPCODE(ntohl(skb->csum));
770 struct t3c_tid_entry *t3c_tid;
771
772 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
773 if (t3c_tid->ctx && t3c_tid->client->handlers &&
774 t3c_tid->client->handlers[opcode]) {
775 return t3c_tid->client->handlers[opcode] (dev, skb,
776 t3c_tid->ctx);
777 } else {
778 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
779 dev->name, opcode);
780 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
781 }
782}
783
784static int nb_callback(struct notifier_block *self, unsigned long event,
785 void *ctx)
786{
787 switch (event) {
788 case (NETEVENT_NEIGH_UPDATE):{
789 cxgb_neigh_update((struct neighbour *)ctx);
790 break;
791 }
792 case (NETEVENT_PMTU_UPDATE):
793 break;
794 case (NETEVENT_REDIRECT):{
795 struct netevent_redirect *nr = ctx;
796 cxgb_redirect(nr->old, nr->new);
797 cxgb_neigh_update(nr->new->neighbour);
798 break;
799 }
800 default:
801 break;
802 }
803 return 0;
804}
805
806static struct notifier_block nb = {
807 .notifier_call = nb_callback
808};
809
810/*
811 * Process a received packet with an unknown/unexpected CPL opcode.
812 */
813static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
814{
815 printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
816 *skb->data);
817 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
818}
819
820/*
821 * Handlers for each CPL opcode
822 */
823static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
824
825/*
826 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
827 * to unregister an existing handler.
828 */
829void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
830{
831 if (opcode < NUM_CPL_CMDS)
832 cpl_handlers[opcode] = h ? h : do_bad_cpl;
833 else
834 printk(KERN_ERR "T3C: handler registration for "
835 "opcode %x failed\n", opcode);
836}
837
838EXPORT_SYMBOL(t3_register_cpl_handler);
839
840/*
841 * T3CDEV's receive method.
842 */
843int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
844{
845 while (n--) {
846 struct sk_buff *skb = *skbs++;
847 unsigned int opcode = G_OPCODE(ntohl(skb->csum));
848 int ret = cpl_handlers[opcode] (dev, skb);
849
850#if VALIDATE_TID
851 if (ret & CPL_RET_UNKNOWN_TID) {
852 union opcode_tid *p = cplhdr(skb);
853
854 printk(KERN_ERR "%s: CPL message (opcode %u) had "
855 "unknown TID %u\n", dev->name, opcode,
856 G_TID(ntohl(p->opcode_tid)));
857 }
858#endif
859 if (ret & CPL_RET_BUF_DONE)
860 kfree_skb(skb);
861 }
862 return 0;
863}
864
865/*
866 * Sends an sk_buff to a T3C driver after dealing with any active network taps.
867 */
868int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
869{
870 int r;
871
872 local_bh_disable();
873 r = dev->send(dev, skb);
874 local_bh_enable();
875 return r;
876}
877
878EXPORT_SYMBOL(cxgb3_ofld_send);
879
880static int is_offloading(struct net_device *dev)
881{
882 struct adapter *adapter;
883 int i;
884
885 read_lock_bh(&adapter_list_lock);
886 list_for_each_entry(adapter, &adapter_list, adapter_list) {
887 for_each_port(adapter, i) {
888 if (dev == adapter->port[i]) {
889 read_unlock_bh(&adapter_list_lock);
890 return 1;
891 }
892 }
893 }
894 read_unlock_bh(&adapter_list_lock);
895 return 0;
896}
897
898void cxgb_neigh_update(struct neighbour *neigh)
899{
900 struct net_device *dev = neigh->dev;
901
902 if (dev && (is_offloading(dev))) {
903 struct t3cdev *tdev = T3CDEV(dev);
904
905 BUG_ON(!tdev);
906 t3_l2t_update(tdev, neigh);
907 }
908}
909
910static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
911{
912 struct sk_buff *skb;
913 struct cpl_set_tcb_field *req;
914
915 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
916 if (!skb) {
917 printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__);
918 return;
919 }
920 skb->priority = CPL_PRIORITY_CONTROL;
921 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
922 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
923 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
924 req->reply = 0;
925 req->cpu_idx = 0;
926 req->word = htons(W_TCB_L2T_IX);
927 req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
928 req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
929 tdev->send(tdev, skb);
930}
931
932void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
933{
934 struct net_device *olddev, *newdev;
935 struct tid_info *ti;
936 struct t3cdev *tdev;
937 u32 tid;
938 int update_tcb;
939 struct l2t_entry *e;
940 struct t3c_tid_entry *te;
941
942 olddev = old->neighbour->dev;
943 newdev = new->neighbour->dev;
944 if (!is_offloading(olddev))
945 return;
946 if (!is_offloading(newdev)) {
947 printk(KERN_WARNING "%s: Redirect to non-offload"
948 "device ignored.\n", __FUNCTION__);
949 return;
950 }
951 tdev = T3CDEV(olddev);
952 BUG_ON(!tdev);
953 if (tdev != T3CDEV(newdev)) {
954 printk(KERN_WARNING "%s: Redirect to different "
955 "offload device ignored.\n", __FUNCTION__);
956 return;
957 }
958
959 /* Add new L2T entry */
960 e = t3_l2t_get(tdev, new->neighbour, newdev);
961 if (!e) {
962 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
963 __FUNCTION__);
964 return;
965 }
966
967 /* Walk tid table and notify clients of dst change. */
968 ti = &(T3C_DATA(tdev))->tid_maps;
969 for (tid = 0; tid < ti->ntids; tid++) {
970 te = lookup_tid(ti, tid);
971 BUG_ON(!te);
972 if (te->ctx && te->client && te->client->redirect) {
973 update_tcb = te->client->redirect(te->ctx, old, new, e);
974 if (update_tcb) {
975 l2t_hold(L2DATA(tdev), e);
976 set_l2t_ix(tdev, tid, e);
977 }
978 }
979 }
980 l2t_release(L2DATA(tdev), e);
981}
982
983/*
984 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
985 * The allocated memory is cleared.
986 */
987void *cxgb_alloc_mem(unsigned long size)
988{
989 void *p = kmalloc(size, GFP_KERNEL);
990
991 if (!p)
992 p = vmalloc(size);
993 if (p)
994 memset(p, 0, size);
995 return p;
996}
997
998/*
999 * Free memory allocated through t3_alloc_mem().
1000 */
1001void cxgb_free_mem(void *addr)
1002{
1003 unsigned long p = (unsigned long)addr;
1004
1005 if (p >= VMALLOC_START && p < VMALLOC_END)
1006 vfree(addr);
1007 else
1008 kfree(addr);
1009}
1010
1011/*
1012 * Allocate and initialize the TID tables. Returns 0 on success.
1013 */
1014static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1015 unsigned int natids, unsigned int nstids,
1016 unsigned int atid_base, unsigned int stid_base)
1017{
1018 unsigned long size = ntids * sizeof(*t->tid_tab) +
1019 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1020
1021 t->tid_tab = cxgb_alloc_mem(size);
1022 if (!t->tid_tab)
1023 return -ENOMEM;
1024
1025 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1026 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1027 t->ntids = ntids;
1028 t->nstids = nstids;
1029 t->stid_base = stid_base;
1030 t->sfree = NULL;
1031 t->natids = natids;
1032 t->atid_base = atid_base;
1033 t->afree = NULL;
1034 t->stids_in_use = t->atids_in_use = 0;
1035 atomic_set(&t->tids_in_use, 0);
1036 spin_lock_init(&t->stid_lock);
1037 spin_lock_init(&t->atid_lock);
1038
1039 /*
1040 * Setup the free lists for stid_tab and atid_tab.
1041 */
1042 if (nstids) {
1043 while (--nstids)
1044 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1045 t->sfree = t->stid_tab;
1046 }
1047 if (natids) {
1048 while (--natids)
1049 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1050 t->afree = t->atid_tab;
1051 }
1052 return 0;
1053}
1054
1055static void free_tid_maps(struct tid_info *t)
1056{
1057 cxgb_free_mem(t->tid_tab);
1058}
1059
1060static inline void add_adapter(struct adapter *adap)
1061{
1062 write_lock_bh(&adapter_list_lock);
1063 list_add_tail(&adap->adapter_list, &adapter_list);
1064 write_unlock_bh(&adapter_list_lock);
1065}
1066
1067static inline void remove_adapter(struct adapter *adap)
1068{
1069 write_lock_bh(&adapter_list_lock);
1070 list_del(&adap->adapter_list);
1071 write_unlock_bh(&adapter_list_lock);
1072}
1073
1074int cxgb3_offload_activate(struct adapter *adapter)
1075{
1076 struct t3cdev *dev = &adapter->tdev;
1077 int natids, err;
1078 struct t3c_data *t;
1079 struct tid_range stid_range, tid_range;
1080 struct mtutab mtutab;
1081 unsigned int l2t_capacity;
1082
1083 t = kcalloc(1, sizeof(*t), GFP_KERNEL);
1084 if (!t)
1085 return -ENOMEM;
1086
1087 err = -EOPNOTSUPP;
1088 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1089 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1090 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1091 dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1092 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1093 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1094 goto out_free;
1095
1096 err = -ENOMEM;
1097 L2DATA(dev) = t3_init_l2t(l2t_capacity);
1098 if (!L2DATA(dev))
1099 goto out_free;
1100
1101 natids = min(tid_range.num / 2, MAX_ATIDS);
1102 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1103 stid_range.num, ATID_BASE, stid_range.base);
1104 if (err)
1105 goto out_free_l2t;
1106
1107 t->mtus = mtutab.mtus;
1108 t->nmtus = mtutab.size;
1109
1110 INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
1111 spin_lock_init(&t->tid_release_lock);
1112 INIT_LIST_HEAD(&t->list_node);
1113 t->dev = dev;
1114
1115 T3C_DATA(dev) = t;
1116 dev->recv = process_rx;
1117 dev->neigh_update = t3_l2t_update;
1118
1119 /* Register netevent handler once */
1120 if (list_empty(&adapter_list))
1121 register_netevent_notifier(&nb);
1122
1123 add_adapter(adapter);
1124 return 0;
1125
1126out_free_l2t:
1127 t3_free_l2t(L2DATA(dev));
1128 L2DATA(dev) = NULL;
1129out_free:
1130 kfree(t);
1131 return err;
1132}
1133
1134void cxgb3_offload_deactivate(struct adapter *adapter)
1135{
1136 struct t3cdev *tdev = &adapter->tdev;
1137 struct t3c_data *t = T3C_DATA(tdev);
1138
1139 remove_adapter(adapter);
1140 if (list_empty(&adapter_list))
1141 unregister_netevent_notifier(&nb);
1142
1143 free_tid_maps(&t->tid_maps);
1144 T3C_DATA(tdev) = NULL;
1145 t3_free_l2t(L2DATA(tdev));
1146 L2DATA(tdev) = NULL;
1147 kfree(t);
1148}
1149
1150static inline void register_tdev(struct t3cdev *tdev)
1151{
1152 static int unit;
1153
1154 mutex_lock(&cxgb3_db_lock);
1155 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1156 list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
1157 mutex_unlock(&cxgb3_db_lock);
1158}
1159
1160static inline void unregister_tdev(struct t3cdev *tdev)
1161{
1162 mutex_lock(&cxgb3_db_lock);
1163 list_del(&tdev->ofld_dev_list);
1164 mutex_unlock(&cxgb3_db_lock);
1165}
1166
1167void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
1168{
1169 struct t3cdev *tdev = &adapter->tdev;
1170
1171 INIT_LIST_HEAD(&tdev->ofld_dev_list);
1172
1173 cxgb3_set_dummy_ops(tdev);
1174 tdev->send = t3_offload_tx;
1175 tdev->ctl = cxgb_offload_ctl;
1176 tdev->type = adapter->params.rev == 0 ? T3A : T3B;
1177
1178 register_tdev(tdev);
1179}
1180
1181void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
1182{
1183 struct t3cdev *tdev = &adapter->tdev;
1184
1185 tdev->recv = NULL;
1186 tdev->neigh_update = NULL;
1187
1188 unregister_tdev(tdev);
1189}
1190
1191void __init cxgb3_offload_init(void)
1192{
1193 int i;
1194
1195 for (i = 0; i < NUM_CPL_CMDS; ++i)
1196 cpl_handlers[i] = do_bad_cpl;
1197
1198 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1199 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1200 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1201 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1202 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1203 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1204 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1205 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1206 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1207 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1208 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1209 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1210 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1211 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1212 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1213 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1214 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1215 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
1216 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1217 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1218 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1219 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1220 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1221 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1222}
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
new file mode 100644
index 000000000000..7b7798384ae8
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -0,0 +1,193 @@
1/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CXGB3_OFFLOAD_H
34#define _CXGB3_OFFLOAD_H
35
36#include <linux/list.h>
37#include <linux/skbuff.h>
38
39#include "l2t.h"
40
41#include "t3cdev.h"
42#include "t3_cpl.h"
43
44struct adapter;
45
46void cxgb3_offload_init(void);
47
48void cxgb3_adapter_ofld(struct adapter *adapter);
49void cxgb3_adapter_unofld(struct adapter *adapter);
50int cxgb3_offload_activate(struct adapter *adapter);
51void cxgb3_offload_deactivate(struct adapter *adapter);
52
53void cxgb3_set_dummy_ops(struct t3cdev *dev);
54
55/*
56 * Client registration. Users of T3 driver must register themselves.
57 * The T3 driver will call the add function of every client for each T3
58 * adapter activated, passing up the t3cdev ptr. Each client fills out an
59 * array of callback functions to process CPL messages.
60 */
61
62void cxgb3_register_client(struct cxgb3_client *client);
63void cxgb3_unregister_client(struct cxgb3_client *client);
64void cxgb3_add_clients(struct t3cdev *tdev);
65void cxgb3_remove_clients(struct t3cdev *tdev);
66
67typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
68 struct sk_buff *skb, void *ctx);
69
70struct cxgb3_client {
71 char *name;
72 void (*add) (struct t3cdev *);
73 void (*remove) (struct t3cdev *);
74 cxgb3_cpl_handler_func *handlers;
75 int (*redirect)(void *ctx, struct dst_entry *old,
76 struct dst_entry *new, struct l2t_entry *l2t);
77 struct list_head client_list;
78};
79
80/*
81 * TID allocation services.
82 */
83int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
84 void *ctx);
85int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
86 void *ctx);
87void *cxgb3_free_atid(struct t3cdev *dev, int atid);
88void cxgb3_free_stid(struct t3cdev *dev, int stid);
89void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
90 void *ctx, unsigned int tid);
91void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
92void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
93
94struct t3c_tid_entry {
95 struct cxgb3_client *client;
96 void *ctx;
97};
98
99/* CPL message priority levels */
100enum {
101 CPL_PRIORITY_DATA = 0, /* data messages */
102 CPL_PRIORITY_SETUP = 1, /* connection setup messages */
103 CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
104 CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
105 CPL_PRIORITY_ACK = 1, /* RX ACK messages */
106 CPL_PRIORITY_CONTROL = 1 /* offload control messages */
107};
108
109/* Flags for return value of CPL message handlers */
110enum {
111 CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
112 CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */
113 CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
114};
115
116typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
117
118/*
119 * Returns a pointer to the first byte of the CPL header in an sk_buff that
120 * contains a CPL message.
121 */
122static inline void *cplhdr(struct sk_buff *skb)
123{
124 return skb->data;
125}
126
127void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
128
129union listen_entry {
130 struct t3c_tid_entry t3c_tid;
131 union listen_entry *next;
132};
133
134union active_open_entry {
135 struct t3c_tid_entry t3c_tid;
136 union active_open_entry *next;
137};
138
139/*
140 * Holds the size, base address, free list start, etc of the TID, server TID,
141 * and active-open TID tables for a offload device.
142 * The tables themselves are allocated dynamically.
143 */
144struct tid_info {
145 struct t3c_tid_entry *tid_tab;
146 unsigned int ntids;
147 atomic_t tids_in_use;
148
149 union listen_entry *stid_tab;
150 unsigned int nstids;
151 unsigned int stid_base;
152
153 union active_open_entry *atid_tab;
154 unsigned int natids;
155 unsigned int atid_base;
156
157 /*
158 * The following members are accessed R/W so we put them in their own
159 * cache lines.
160 *
161 * XXX We could combine the atid fields above with the lock here since
162 * atids are use once (unlike other tids). OTOH the above fields are
163 * usually in cache due to tid_tab.
164 */
165 spinlock_t atid_lock ____cacheline_aligned_in_smp;
166 union active_open_entry *afree;
167 unsigned int atids_in_use;
168
169 spinlock_t stid_lock ____cacheline_aligned;
170 union listen_entry *sfree;
171 unsigned int stids_in_use;
172};
173
174struct t3c_data {
175 struct list_head list_node;
176 struct t3cdev *dev;
177 unsigned int tx_max_chunk; /* max payload for TX_DATA */
178 unsigned int max_wrs; /* max in-flight WRs per connection */
179 unsigned int nmtus;
180 const unsigned short *mtus;
181 struct tid_info tid_maps;
182
183 struct t3c_tid_entry *tid_release_list;
184 spinlock_t tid_release_lock;
185 struct work_struct tid_release_task;
186};
187
188/*
189 * t3cdev -> t3c_data accessor
190 */
191#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt)
192
193#endif
diff --git a/drivers/net/cxgb3/firmware_exports.h b/drivers/net/cxgb3/firmware_exports.h
new file mode 100644
index 000000000000..3565f481801b
--- /dev/null
+++ b/drivers/net/cxgb3/firmware_exports.h
@@ -0,0 +1,144 @@
1/*
2 * ----------------------------------------------------------------------------
3 * >>>>>>>>>>>>>>>>>>>>>>>>>>>>> COPYRIGHT NOTICE <<<<<<<<<<<<<<<<<<<<<<<<<<<<<
4 * ----------------------------------------------------------------------------
5 * Copyright 2004 (C) Chelsio Communications, Inc. (Chelsio)
6 *
7 * Chelsio Communications, Inc. owns the sole copyright to this software.
8 * You may not make a copy, you may not derive works herefrom, and you may
9 * not distribute this work to others. Other restrictions of rights may apply
10 * as well. This is unpublished, confidential information. All rights reserved.
11 * This software contains confidential information and trade secrets of Chelsio
12 * Communications, Inc. Use, disclosure, or reproduction is prohibited without
13 * the prior express written permission of Chelsio Communications, Inc.
14 * ----------------------------------------------------------------------------
15 * >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Warranty <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
16 * ----------------------------------------------------------------------------
17 * CHELSIO MAKES NO WARRANTY OF ANY KIND WITH REGARD TO THE USE OF THIS
18 * SOFTWARE, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
20 * ----------------------------------------------------------------------------
21 *
22 * This is the firmware_exports.h header file, firmware interface defines.
23 *
24 * Written January 2005 by felix marti (felix@chelsio.com)
25 */
26#ifndef _FIRMWARE_EXPORTS_H_
27#define _FIRMWARE_EXPORTS_H_
28
29/* WR OPCODES supported by the firmware.
30 */
31#define FW_WROPCODE_FORWARD 0x01
32#define FW_WROPCODE_BYPASS 0x05
33
34#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
35
36#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
37#define FW_WROPCODE_ULPTX_MEM_READ 0x02
38#define FW_WROPCODE_ULPTX_PKT 0x04
39#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
40
41#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
42
43#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08
44#define FW_WROPCODE_OFLD_CLOSE_CON 0x09
45#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A
46#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F
47#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B
48#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C
49#define FW_WROPCODE_OFLD_TX_DATA 0x0D
50#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E
51
52#define FW_WROPCODE_RI_RDMA_INIT 0x10
53#define FW_WROPCODE_RI_RDMA_WRITE 0x11
54#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
55#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
56#define FW_WROPCODE_RI_SEND 0x14
57#define FW_WROPCODE_RI_TERMINATE 0x15
58#define FW_WROPCODE_RI_RDMA_READ 0x16
59#define FW_WROPCODE_RI_RECEIVE 0x17
60#define FW_WROPCODE_RI_BIND_MW 0x18
61#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
62#define FW_WROPCODE_RI_LOCAL_INV 0x1A
63#define FW_WROPCODE_RI_MODIFY_QP 0x1B
64#define FW_WROPCODE_RI_BYPASS 0x1C
65
66#define FW_WROPOCDE_RSVD 0x1E
67
68#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
69
70#define FW_WROPCODE_MNGT 0x1D
71#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
72
73/* Maximum size of a WR sent from the host, limited by the SGE.
74 *
75 * Note: WR coming from ULP or TP are only limited by CIM.
76 */
77#define FW_WR_SIZE 128
78
79/* Maximum number of outstanding WRs sent from the host. Value must be
80 * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
81 * offload modules to limit the number of WRs per connection.
82 */
83#define FW_T3_WR_NUM 16
84#define FW_N3_WR_NUM 7
85
86#ifndef N3
87# define FW_WR_NUM FW_T3_WR_NUM
88#else
89# define FW_WR_NUM FW_N3_WR_NUM
90#endif
91
92/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
93 * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
94 * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
95 *
96 * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
97 * to RESP Queue[i].
98 */
99#define FW_TUNNEL_NUM 8
100#define FW_TUNNEL_SGEEC_START 8
101#define FW_TUNNEL_TID_START 65544
102
103/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
104 * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
105 * (or 'uP Token') FW_CTRL_TID_START.
106 *
107 * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
108 */
109#define FW_CTRL_NUM 8
110#define FW_CTRL_SGEEC_START 65528
111#define FW_CTRL_TID_START 65536
112
113/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
114 * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
115 *
116 * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
117 * OFFLOAD Queues, as the host is responsible for providing the correct TID in
118 * every WR.
119 *
120 * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
121 */
122#define FW_OFLD_NUM 8
123#define FW_OFLD_SGEEC_START 0
124
125/*
126 *
127 */
128#define FW_RI_NUM 1
129#define FW_RI_SGEEC_START 65527
130#define FW_RI_TID_START 65552
131
132/*
133 * The RX_PKT_TID
134 */
135#define FW_RX_PKT_NUM 1
136#define FW_RX_PKT_TID_START 65553
137
138/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
139 * by the firmware.
140 */
141#define FW_WRC_NUM \
142 (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
143
144#endif /* _FIRMWARE_EXPORTS_H_ */
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
new file mode 100644
index 000000000000..9997138a4fdc
--- /dev/null
+++ b/drivers/net/cxgb3/l2t.c
@@ -0,0 +1,450 @@
1/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/if.h>
36#include <linux/if_vlan.h>
37#include <linux/jhash.h>
38#include <net/neighbour.h>
39#include "common.h"
40#include "t3cdev.h"
41#include "cxgb3_defs.h"
42#include "l2t.h"
43#include "t3_cpl.h"
44#include "firmware_exports.h"
45
46#define VLAN_NONE 0xfff
47
48/*
49 * Module locking notes: There is a RW lock protecting the L2 table as a
50 * whole plus a spinlock per L2T entry. Entry lookups and allocations happen
51 * under the protection of the table lock, individual entry changes happen
52 * while holding that entry's spinlock. The table lock nests outside the
53 * entry locks. Allocations of new entries take the table lock as writers so
54 * no other lookups can happen while allocating new entries. Entry updates
55 * take the table lock as readers so multiple entries can be updated in
56 * parallel. An L2T entry can be dropped by decrementing its reference count
57 * and therefore can happen in parallel with entry allocation but no entry
58 * can change state or increment its ref count during allocation as both of
59 * these perform lookups.
60 */
61
62static inline unsigned int vlan_prio(const struct l2t_entry *e)
63{
64 return e->vlan >> 13;
65}
66
67static inline unsigned int arp_hash(u32 key, int ifindex,
68 const struct l2t_data *d)
69{
70 return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
71}
72
73static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
74{
75 neigh_hold(n);
76 if (e->neigh)
77 neigh_release(e->neigh);
78 e->neigh = n;
79}
80
81/*
82 * Set up an L2T entry and send any packets waiting in the arp queue. The
83 * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the
84 * entry locked.
85 */
86static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
87 struct l2t_entry *e)
88{
89 struct cpl_l2t_write_req *req;
90
91 if (!skb) {
92 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
93 if (!skb)
94 return -ENOMEM;
95 }
96
97 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
98 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
99 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
100 req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
101 V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
102 V_L2T_W_PRIO(vlan_prio(e)));
103 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
104 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
105 skb->priority = CPL_PRIORITY_CONTROL;
106 cxgb3_ofld_send(dev, skb);
107 while (e->arpq_head) {
108 skb = e->arpq_head;
109 e->arpq_head = skb->next;
110 skb->next = NULL;
111 cxgb3_ofld_send(dev, skb);
112 }
113 e->arpq_tail = NULL;
114 e->state = L2T_STATE_VALID;
115
116 return 0;
117}
118
119/*
120 * Add a packet to the an L2T entry's queue of packets awaiting resolution.
121 * Must be called with the entry's lock held.
122 */
123static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
124{
125 skb->next = NULL;
126 if (e->arpq_head)
127 e->arpq_tail->next = skb;
128 else
129 e->arpq_head = skb;
130 e->arpq_tail = skb;
131}
132
133int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
134 struct l2t_entry *e)
135{
136again:
137 switch (e->state) {
138 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
139 neigh_event_send(e->neigh, NULL);
140 spin_lock_bh(&e->lock);
141 if (e->state == L2T_STATE_STALE)
142 e->state = L2T_STATE_VALID;
143 spin_unlock_bh(&e->lock);
144 case L2T_STATE_VALID: /* fast-path, send the packet on */
145 return cxgb3_ofld_send(dev, skb);
146 case L2T_STATE_RESOLVING:
147 spin_lock_bh(&e->lock);
148 if (e->state != L2T_STATE_RESOLVING) {
149 /* ARP already completed */
150 spin_unlock_bh(&e->lock);
151 goto again;
152 }
153 arpq_enqueue(e, skb);
154 spin_unlock_bh(&e->lock);
155
156 /*
157 * Only the first packet added to the arpq should kick off
158 * resolution. However, because the alloc_skb below can fail,
159 * we allow each packet added to the arpq to retry resolution
160 * as a way of recovering from transient memory exhaustion.
161 * A better way would be to use a work request to retry L2T
162 * entries when there's no memory.
163 */
164 if (!neigh_event_send(e->neigh, NULL)) {
165 skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
166 GFP_ATOMIC);
167 if (!skb)
168 break;
169
170 spin_lock_bh(&e->lock);
171 if (e->arpq_head)
172 setup_l2e_send_pending(dev, skb, e);
173 else /* we lost the race */
174 __kfree_skb(skb);
175 spin_unlock_bh(&e->lock);
176 }
177 }
178 return 0;
179}
180
181EXPORT_SYMBOL(t3_l2t_send_slow);
182
183void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
184{
185again:
186 switch (e->state) {
187 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
188 neigh_event_send(e->neigh, NULL);
189 spin_lock_bh(&e->lock);
190 if (e->state == L2T_STATE_STALE) {
191 e->state = L2T_STATE_VALID;
192 }
193 spin_unlock_bh(&e->lock);
194 return;
195 case L2T_STATE_VALID: /* fast-path, send the packet on */
196 return;
197 case L2T_STATE_RESOLVING:
198 spin_lock_bh(&e->lock);
199 if (e->state != L2T_STATE_RESOLVING) {
200 /* ARP already completed */
201 spin_unlock_bh(&e->lock);
202 goto again;
203 }
204 spin_unlock_bh(&e->lock);
205
206 /*
207 * Only the first packet added to the arpq should kick off
208 * resolution. However, because the alloc_skb below can fail,
209 * we allow each packet added to the arpq to retry resolution
210 * as a way of recovering from transient memory exhaustion.
211 * A better way would be to use a work request to retry L2T
212 * entries when there's no memory.
213 */
214 neigh_event_send(e->neigh, NULL);
215 }
216 return;
217}
218
219EXPORT_SYMBOL(t3_l2t_send_event);
220
221/*
222 * Allocate a free L2T entry. Must be called with l2t_data.lock held.
223 */
224static struct l2t_entry *alloc_l2e(struct l2t_data *d)
225{
226 struct l2t_entry *end, *e, **p;
227
228 if (!atomic_read(&d->nfree))
229 return NULL;
230
231 /* there's definitely a free entry */
232 for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
233 if (atomic_read(&e->refcnt) == 0)
234 goto found;
235
236 for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
237found:
238 d->rover = e + 1;
239 atomic_dec(&d->nfree);
240
241 /*
242 * The entry we found may be an inactive entry that is
243 * presently in the hash table. We need to remove it.
244 */
245 if (e->state != L2T_STATE_UNUSED) {
246 int hash = arp_hash(e->addr, e->ifindex, d);
247
248 for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
249 if (*p == e) {
250 *p = e->next;
251 break;
252 }
253 e->state = L2T_STATE_UNUSED;
254 }
255 return e;
256}
257
258/*
259 * Called when an L2T entry has no more users. The entry is left in the hash
260 * table since it is likely to be reused but we also bump nfree to indicate
261 * that the entry can be reallocated for a different neighbor. We also drop
262 * the existing neighbor reference in case the neighbor is going away and is
263 * waiting on our reference.
264 *
265 * Because entries can be reallocated to other neighbors once their ref count
266 * drops to 0 we need to take the entry's lock to avoid races with a new
267 * incarnation.
268 */
269void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
270{
271 spin_lock_bh(&e->lock);
272 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
273 if (e->neigh) {
274 neigh_release(e->neigh);
275 e->neigh = NULL;
276 }
277 }
278 spin_unlock_bh(&e->lock);
279 atomic_inc(&d->nfree);
280}
281
282EXPORT_SYMBOL(t3_l2e_free);
283
284/*
285 * Update an L2T entry that was previously used for the same next hop as neigh.
286 * Must be called with softirqs disabled.
287 */
288static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
289{
290 unsigned int nud_state;
291
292 spin_lock(&e->lock); /* avoid race with t3_l2t_free */
293
294 if (neigh != e->neigh)
295 neigh_replace(e, neigh);
296 nud_state = neigh->nud_state;
297 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
298 !(nud_state & NUD_VALID))
299 e->state = L2T_STATE_RESOLVING;
300 else if (nud_state & NUD_CONNECTED)
301 e->state = L2T_STATE_VALID;
302 else
303 e->state = L2T_STATE_STALE;
304 spin_unlock(&e->lock);
305}
306
307struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
308 struct net_device *dev)
309{
310 struct l2t_entry *e;
311 struct l2t_data *d = L2DATA(cdev);
312 u32 addr = *(u32 *) neigh->primary_key;
313 int ifidx = neigh->dev->ifindex;
314 int hash = arp_hash(addr, ifidx, d);
315 struct port_info *p = netdev_priv(dev);
316 int smt_idx = p->port_id;
317
318 write_lock_bh(&d->lock);
319 for (e = d->l2tab[hash].first; e; e = e->next)
320 if (e->addr == addr && e->ifindex == ifidx &&
321 e->smt_idx == smt_idx) {
322 l2t_hold(d, e);
323 if (atomic_read(&e->refcnt) == 1)
324 reuse_entry(e, neigh);
325 goto done;
326 }
327
328 /* Need to allocate a new entry */
329 e = alloc_l2e(d);
330 if (e) {
331 spin_lock(&e->lock); /* avoid race with t3_l2t_free */
332 e->next = d->l2tab[hash].first;
333 d->l2tab[hash].first = e;
334 e->state = L2T_STATE_RESOLVING;
335 e->addr = addr;
336 e->ifindex = ifidx;
337 e->smt_idx = smt_idx;
338 atomic_set(&e->refcnt, 1);
339 neigh_replace(e, neigh);
340 if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
341 e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id;
342 else
343 e->vlan = VLAN_NONE;
344 spin_unlock(&e->lock);
345 }
346done:
347 write_unlock_bh(&d->lock);
348 return e;
349}
350
351EXPORT_SYMBOL(t3_l2t_get);
352
353/*
354 * Called when address resolution fails for an L2T entry to handle packets
355 * on the arpq head. If a packet specifies a failure handler it is invoked,
356 * otherwise the packets is sent to the offload device.
357 *
358 * XXX: maybe we should abandon the latter behavior and just require a failure
359 * handler.
360 */
361static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq)
362{
363 while (arpq) {
364 struct sk_buff *skb = arpq;
365 struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
366
367 arpq = skb->next;
368 skb->next = NULL;
369 if (cb->arp_failure_handler)
370 cb->arp_failure_handler(dev, skb);
371 else
372 cxgb3_ofld_send(dev, skb);
373 }
374}
375
376/*
377 * Called when the host's ARP layer makes a change to some entry that is
378 * loaded into the HW L2 table.
379 */
380void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
381{
382 struct l2t_entry *e;
383 struct sk_buff *arpq = NULL;
384 struct l2t_data *d = L2DATA(dev);
385 u32 addr = *(u32 *) neigh->primary_key;
386 int ifidx = neigh->dev->ifindex;
387 int hash = arp_hash(addr, ifidx, d);
388
389 read_lock_bh(&d->lock);
390 for (e = d->l2tab[hash].first; e; e = e->next)
391 if (e->addr == addr && e->ifindex == ifidx) {
392 spin_lock(&e->lock);
393 goto found;
394 }
395 read_unlock_bh(&d->lock);
396 return;
397
398found:
399 read_unlock(&d->lock);
400 if (atomic_read(&e->refcnt)) {
401 if (neigh != e->neigh)
402 neigh_replace(e, neigh);
403
404 if (e->state == L2T_STATE_RESOLVING) {
405 if (neigh->nud_state & NUD_FAILED) {
406 arpq = e->arpq_head;
407 e->arpq_head = e->arpq_tail = NULL;
408 } else if (neigh_is_connected(neigh))
409 setup_l2e_send_pending(dev, NULL, e);
410 } else {
411 e->state = neigh_is_connected(neigh) ?
412 L2T_STATE_VALID : L2T_STATE_STALE;
413 if (memcmp(e->dmac, neigh->ha, 6))
414 setup_l2e_send_pending(dev, NULL, e);
415 }
416 }
417 spin_unlock_bh(&e->lock);
418
419 if (arpq)
420 handle_failed_resolution(dev, arpq);
421}
422
423struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
424{
425 struct l2t_data *d;
426 int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
427
428 d = cxgb_alloc_mem(size);
429 if (!d)
430 return NULL;
431
432 d->nentries = l2t_capacity;
433 d->rover = &d->l2tab[1]; /* entry 0 is not used */
434 atomic_set(&d->nfree, l2t_capacity - 1);
435 rwlock_init(&d->lock);
436
437 for (i = 0; i < l2t_capacity; ++i) {
438 d->l2tab[i].idx = i;
439 d->l2tab[i].state = L2T_STATE_UNUSED;
440 spin_lock_init(&d->l2tab[i].lock);
441 atomic_set(&d->l2tab[i].refcnt, 0);
442 }
443 return d;
444}
445
446void t3_free_l2t(struct l2t_data *d)
447{
448 cxgb_free_mem(d);
449}
450
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
new file mode 100644
index 000000000000..51a9c1f2c58a
--- /dev/null
+++ b/drivers/net/cxgb3/l2t.h
@@ -0,0 +1,143 @@
1/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CHELSIO_L2T_H
34#define _CHELSIO_L2T_H
35
36#include <linux/spinlock.h>
37#include "t3cdev.h"
38#include <asm/atomic.h>
39
40enum {
41 L2T_STATE_VALID, /* entry is up to date */
42 L2T_STATE_STALE, /* entry may be used but needs revalidation */
43 L2T_STATE_RESOLVING, /* entry needs address resolution */
44 L2T_STATE_UNUSED /* entry not in use */
45};
46
47struct neighbour;
48struct sk_buff;
49
50/*
51 * Each L2T entry plays multiple roles. First of all, it keeps state for the
52 * corresponding entry of the HW L2 table and maintains a queue of offload
53 * packets awaiting address resolution. Second, it is a node of a hash table
54 * chain, where the nodes of the chain are linked together through their next
55 * pointer. Finally, each node is a bucket of a hash table, pointing to the
56 * first element in its chain through its first pointer.
57 */
58struct l2t_entry {
59 u16 state; /* entry state */
60 u16 idx; /* entry index */
61 u32 addr; /* dest IP address */
62 int ifindex; /* neighbor's net_device's ifindex */
63 u16 smt_idx; /* SMT index */
64 u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
65 struct neighbour *neigh; /* associated neighbour */
66 struct l2t_entry *first; /* start of hash chain */
67 struct l2t_entry *next; /* next l2t_entry on chain */
68 struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
69 struct sk_buff *arpq_tail;
70 spinlock_t lock;
71 atomic_t refcnt; /* entry reference count */
72 u8 dmac[6]; /* neighbour's MAC address */
73};
74
75struct l2t_data {
76 unsigned int nentries; /* number of entries */
77 struct l2t_entry *rover; /* starting point for next allocation */
78 atomic_t nfree; /* number of free entries */
79 rwlock_t lock;
80 struct l2t_entry l2tab[0];
81};
82
83typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
84 struct sk_buff * skb);
85
86/*
87 * Callback stored in an skb to handle address resolution failure.
88 */
89struct l2t_skb_cb {
90 arp_failure_handler_func arp_failure_handler;
91};
92
93#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
94
95static inline void set_arp_failure_handler(struct sk_buff *skb,
96 arp_failure_handler_func hnd)
97{
98 L2T_SKB_CB(skb)->arp_failure_handler = hnd;
99}
100
101/*
102 * Getting to the L2 data from an offload device.
103 */
104#define L2DATA(dev) ((dev)->l2opt)
105
106#define W_TCB_L2T_IX 0
107#define S_TCB_L2T_IX 7
108#define M_TCB_L2T_IX 0x7ffULL
109#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
110
111void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
112void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
113struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
114 struct net_device *dev);
115int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
116 struct l2t_entry *e);
117void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
118struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
119void t3_free_l2t(struct l2t_data *d);
120
121int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
122
123static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
124 struct l2t_entry *e)
125{
126 if (likely(e->state == L2T_STATE_VALID))
127 return cxgb3_ofld_send(dev, skb);
128 return t3_l2t_send_slow(dev, skb, e);
129}
130
131static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
132{
133 if (atomic_dec_and_test(&e->refcnt))
134 t3_l2e_free(d, e);
135}
136
137static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
138{
139 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
140 atomic_dec(&d->nfree);
141}
142
143#endif
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
new file mode 100644
index 000000000000..44fa9eaedcc3
--- /dev/null
+++ b/drivers/net/cxgb3/mc5.c
@@ -0,0 +1,453 @@
1/*
2 * This file is part of the Chelsio T3 Ethernet driver.
3 *
4 * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#include "common.h"
13#include "regs.h"
14
15enum {
16 IDT75P52100 = 4,
17 IDT75N43102 = 5
18};
19
20/* DBGI command mode */
21enum {
22 DBGI_MODE_MBUS = 0,
23 DBGI_MODE_IDT52100 = 5
24};
25
26/* IDT 75P52100 commands */
27#define IDT_CMD_READ 0
28#define IDT_CMD_WRITE 1
29#define IDT_CMD_SEARCH 2
30#define IDT_CMD_LEARN 3
31
32/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
33#define IDT_LAR_ADR0 0x180006
34#define IDT_LAR_MODE144 0xffff0000
35
36/* IDT SCR and SSR addresses (low 32 bits) */
37#define IDT_SCR_ADR0 0x180000
38#define IDT_SSR0_ADR0 0x180002
39#define IDT_SSR1_ADR0 0x180004
40
41/* IDT GMR base address (low 32 bits) */
42#define IDT_GMR_BASE_ADR0 0x180020
43
44/* IDT data and mask array base addresses (low 32 bits) */
45#define IDT_DATARY_BASE_ADR0 0
46#define IDT_MSKARY_BASE_ADR0 0x80000
47
48/* IDT 75N43102 commands */
49#define IDT4_CMD_SEARCH144 3
50#define IDT4_CMD_WRITE 4
51#define IDT4_CMD_READ 5
52
53/* IDT 75N43102 SCR address (low 32 bits) */
54#define IDT4_SCR_ADR0 0x3
55
56/* IDT 75N43102 GMR base addresses (low 32 bits) */
57#define IDT4_GMR_BASE0 0x10
58#define IDT4_GMR_BASE1 0x20
59#define IDT4_GMR_BASE2 0x30
60
61/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
62#define IDT4_DATARY_BASE_ADR0 0x1000000
63#define IDT4_MSKARY_BASE_ADR0 0x2000000
64
65#define MAX_WRITE_ATTEMPTS 5
66
67#define MAX_ROUTES 2048
68
69/*
70 * Issue a command to the TCAM and wait for its completion. The address and
71 * any data required by the command must have been setup by the caller.
72 */
73static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
74{
75 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
76 return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
77 F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
78}
79
80static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
81 u32 v3)
82{
83 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
84 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
85 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
86}
87
88static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
89 u32 v3)
90{
91 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
92 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
93 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
94}
95
96static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
97 u32 *v3)
98{
99 *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
100 *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
101 *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
102}
103
104/*
105 * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
106 * command cmd. The data to be written must have been set up by the caller.
107 * Returns -1 on failure, 0 on success.
108 */
109static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
110{
111 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
112 if (mc5_cmd_write(adapter, cmd) == 0)
113 return 0;
114 CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
115 addr_lo);
116 return -1;
117}
118
119static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
120 u32 data_array_base, u32 write_cmd,
121 int addr_shift)
122{
123 unsigned int i;
124 struct adapter *adap = mc5->adapter;
125
126 /*
127 * We need the size of the TCAM data and mask arrays in terms of
128 * 72-bit entries.
129 */
130 unsigned int size72 = mc5->tcam_size;
131 unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
132
133 if (mc5->mode == MC5_MODE_144_BIT) {
134 size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
135 server_base *= 2;
136 }
137
138 /* Clear the data array */
139 dbgi_wr_data3(adap, 0, 0, 0);
140 for (i = 0; i < size72; i++)
141 if (mc5_write(adap, data_array_base + (i << addr_shift),
142 write_cmd))
143 return -1;
144
145 /* Initialize the mask array. */
146 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
147 for (i = 0; i < size72; i++) {
148 if (i == server_base) /* entering server or routing region */
149 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
150 mc5->mode == MC5_MODE_144_BIT ?
151 0xfffffff9 : 0xfffffffd);
152 if (mc5_write(adap, mask_array_base + (i << addr_shift),
153 write_cmd))
154 return -1;
155 }
156 return 0;
157}
158
159static int init_idt52100(struct mc5 *mc5)
160{
161 int i;
162 struct adapter *adap = mc5->adapter;
163
164 t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
165 V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
166 t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
167
168 /*
169 * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
170 * GMRs 8-9 for ACK- and AOPEN searches.
171 */
172 t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
173 t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
174 t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
175 t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
176 t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
177 t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
178 t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
179 t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
180 t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
181 t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
182 t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
183 t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
184
185 /* Set DBGI command mode for IDT TCAM. */
186 t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
187
188 /* Set up LAR */
189 dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
190 if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
191 goto err;
192
193 /* Set up SSRs */
194 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
195 if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
196 mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
197 goto err;
198
199 /* Set up GMRs */
200 for (i = 0; i < 32; ++i) {
201 if (i >= 12 && i < 15)
202 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
203 else if (i == 15)
204 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
205 else
206 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
207
208 if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
209 goto err;
210 }
211
212 /* Set up SCR */
213 dbgi_wr_data3(adap, 1, 0, 0);
214 if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
215 goto err;
216
217 return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
218 IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
219err:
220 return -EIO;
221}
222
223static int init_idt43102(struct mc5 *mc5)
224{
225 int i;
226 struct adapter *adap = mc5->adapter;
227
228 t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
229 adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
230 V_RDLAT(0xd) | V_SRCHLAT(0x12));
231
232 /*
233 * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
234 * for ACK- and AOPEN searches.
235 */
236 t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
237 t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
238 t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
239 IDT4_CMD_SEARCH144 | 0x3800);
240 t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
241 t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
242 t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
243 t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
244 t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
245 t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
246
247 t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
248
249 /* Set DBGI command mode for IDT TCAM. */
250 t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
251
252 /* Set up GMRs */
253 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
254 for (i = 0; i < 7; ++i)
255 if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
256 goto err;
257
258 for (i = 0; i < 4; ++i)
259 if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
260 goto err;
261
262 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
263 if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
264 mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
265 mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
266 goto err;
267
268 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
269 if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
270 goto err;
271
272 /* Set up SCR */
273 dbgi_wr_data3(adap, 0xf0000000, 0, 0);
274 if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
275 goto err;
276
277 return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
278 IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
279err:
280 return -EIO;
281}
282
283/* Put MC5 in DBGI mode. */
284static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
285{
286 t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
287 V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
288}
289
290/* Put MC5 in M-Bus mode. */
291static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
292{
293 t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
294 V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
295 V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
296 V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
297}
298
299/*
300 * Initialization that requires the OS and protocol layers to already
301 * be intialized goes here.
302 */
303int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
304 unsigned int nroutes)
305{
306 u32 cfg;
307 int err;
308 unsigned int tcam_size = mc5->tcam_size;
309 struct adapter *adap = mc5->adapter;
310
311 if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
312 return -EINVAL;
313
314 /* Reset the TCAM */
315 cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
316 cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
317 t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
318 if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
319 CH_ERR(adap, "TCAM reset timed out\n");
320 return -1;
321 }
322
323 t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
324 t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
325 tcam_size - nroutes - nfilters);
326 t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
327 tcam_size - nroutes - nfilters - nservers);
328
329 mc5->parity_enabled = 1;
330
331 /* All the TCAM addresses we access have only the low 32 bits non 0 */
332 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
333 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
334
335 mc5_dbgi_mode_enable(mc5);
336
337 switch (mc5->part_type) {
338 case IDT75P52100:
339 err = init_idt52100(mc5);
340 break;
341 case IDT75N43102:
342 err = init_idt43102(mc5);
343 break;
344 default:
345 CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
346 err = -EINVAL;
347 break;
348 }
349
350 mc5_dbgi_mode_disable(mc5);
351 return err;
352}
353
354/*
355 * read_mc5_range - dump a part of the memory managed by MC5
356 * @mc5: the MC5 handle
357 * @start: the start address for the dump
358 * @n: number of 72-bit words to read
359 * @buf: result buffer
360 *
361 * Read n 72-bit words from MC5 memory from the given start location.
362 */
363int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start,
364 unsigned int n, u32 *buf)
365{
366 u32 read_cmd;
367 int err = 0;
368 struct adapter *adap = mc5->adapter;
369
370 if (mc5->part_type == IDT75P52100)
371 read_cmd = IDT_CMD_READ;
372 else if (mc5->part_type == IDT75N43102)
373 read_cmd = IDT4_CMD_READ;
374 else
375 return -EINVAL;
376
377 mc5_dbgi_mode_enable(mc5);
378
379 while (n--) {
380 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR0, start++);
381 if (mc5_cmd_write(adap, read_cmd)) {
382 err = -EIO;
383 break;
384 }
385 dbgi_rd_rsp3(adap, buf + 2, buf + 1, buf);
386 buf += 3;
387 }
388
389 mc5_dbgi_mode_disable(mc5);
390 return 0;
391}
392
393#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
394
395/*
396 * MC5 interrupt handler
397 */
398void t3_mc5_intr_handler(struct mc5 *mc5)
399{
400 struct adapter *adap = mc5->adapter;
401 u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
402
403 if ((cause & F_PARITYERR) && mc5->parity_enabled) {
404 CH_ALERT(adap, "MC5 parity error\n");
405 mc5->stats.parity_err++;
406 }
407
408 if (cause & F_REQQPARERR) {
409 CH_ALERT(adap, "MC5 request queue parity error\n");
410 mc5->stats.reqq_parity_err++;
411 }
412
413 if (cause & F_DISPQPARERR) {
414 CH_ALERT(adap, "MC5 dispatch queue parity error\n");
415 mc5->stats.dispq_parity_err++;
416 }
417
418 if (cause & F_ACTRGNFULL)
419 mc5->stats.active_rgn_full++;
420 if (cause & F_NFASRCHFAIL)
421 mc5->stats.nfa_srch_err++;
422 if (cause & F_UNKNOWNCMD)
423 mc5->stats.unknown_cmd++;
424 if (cause & F_DELACTEMPTY)
425 mc5->stats.del_act_empty++;
426 if (cause & MC5_INT_FATAL)
427 t3_fatal_err(adap);
428
429 t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
430}
431
432void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
433{
434#define K * 1024
435
436 static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
437 64 K, 128 K, 256 K, 32 K
438 };
439
440#undef K
441
442 u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
443
444 mc5->adapter = adapter;
445 mc5->mode = (unsigned char)mode;
446 mc5->part_type = (unsigned char)G_TMTYPE(cfg);
447 if (cfg & F_TMTYPEHI)
448 mc5->part_type |= 4;
449
450 mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
451 if (mode == MC5_MODE_144_BIT)
452 mc5->tcam_size /= 2;
453}
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
new file mode 100644
index 000000000000..b56c5f52bcdc
--- /dev/null
+++ b/drivers/net/cxgb3/regs.h
@@ -0,0 +1,2195 @@
1#define A_SG_CONTROL 0x0
2
3#define S_DROPPKT 20
4#define V_DROPPKT(x) ((x) << S_DROPPKT)
5#define F_DROPPKT V_DROPPKT(1U)
6
7#define S_EGRGENCTRL 19
8#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL)
9#define F_EGRGENCTRL V_EGRGENCTRL(1U)
10
11#define S_USERSPACESIZE 14
12#define M_USERSPACESIZE 0x1f
13#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE)
14
15#define S_HOSTPAGESIZE 11
16#define M_HOSTPAGESIZE 0x7
17#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE)
18
19#define S_FLMODE 9
20#define V_FLMODE(x) ((x) << S_FLMODE)
21#define F_FLMODE V_FLMODE(1U)
22
23#define S_PKTSHIFT 6
24#define M_PKTSHIFT 0x7
25#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
26
27#define S_ONEINTMULTQ 5
28#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ)
29#define F_ONEINTMULTQ V_ONEINTMULTQ(1U)
30
31#define S_BIGENDIANINGRESS 2
32#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS)
33#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U)
34
35#define S_ISCSICOALESCING 1
36#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING)
37#define F_ISCSICOALESCING V_ISCSICOALESCING(1U)
38
39#define S_GLOBALENABLE 0
40#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
41#define F_GLOBALENABLE V_GLOBALENABLE(1U)
42
43#define S_AVOIDCQOVFL 24
44#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
45#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U)
46
47#define S_OPTONEINTMULTQ 23
48#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
49#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U)
50
51#define S_CQCRDTCTRL 22
52#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
53#define F_CQCRDTCTRL V_CQCRDTCTRL(1U)
54
55#define A_SG_KDOORBELL 0x4
56
57#define S_SELEGRCNTX 31
58#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX)
59#define F_SELEGRCNTX V_SELEGRCNTX(1U)
60
61#define S_EGRCNTX 0
62#define M_EGRCNTX 0xffff
63#define V_EGRCNTX(x) ((x) << S_EGRCNTX)
64
65#define A_SG_GTS 0x8
66
67#define S_RSPQ 29
68#define M_RSPQ 0x7
69#define V_RSPQ(x) ((x) << S_RSPQ)
70#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ)
71
72#define S_NEWTIMER 16
73#define M_NEWTIMER 0x1fff
74#define V_NEWTIMER(x) ((x) << S_NEWTIMER)
75
76#define S_NEWINDEX 0
77#define M_NEWINDEX 0xffff
78#define V_NEWINDEX(x) ((x) << S_NEWINDEX)
79
80#define A_SG_CONTEXT_CMD 0xc
81
82#define S_CONTEXT_CMD_OPCODE 28
83#define M_CONTEXT_CMD_OPCODE 0xf
84#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE)
85
86#define S_CONTEXT_CMD_BUSY 27
87#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY)
88#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U)
89
90#define S_CQ_CREDIT 20
91
92#define M_CQ_CREDIT 0x7f
93
94#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT)
95
96#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT)
97
98#define S_CQ 19
99
100#define V_CQ(x) ((x) << S_CQ)
101#define F_CQ V_CQ(1U)
102
103#define S_RESPONSEQ 18
104#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ)
105#define F_RESPONSEQ V_RESPONSEQ(1U)
106
107#define S_EGRESS 17
108#define V_EGRESS(x) ((x) << S_EGRESS)
109#define F_EGRESS V_EGRESS(1U)
110
111#define S_FREELIST 16
112#define V_FREELIST(x) ((x) << S_FREELIST)
113#define F_FREELIST V_FREELIST(1U)
114
115#define S_CONTEXT 0
116#define M_CONTEXT 0xffff
117#define V_CONTEXT(x) ((x) << S_CONTEXT)
118
119#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT)
120
121#define A_SG_CONTEXT_DATA0 0x10
122
123#define A_SG_CONTEXT_DATA1 0x14
124
125#define A_SG_CONTEXT_DATA2 0x18
126
127#define A_SG_CONTEXT_DATA3 0x1c
128
129#define A_SG_CONTEXT_MASK0 0x20
130
131#define A_SG_CONTEXT_MASK1 0x24
132
133#define A_SG_CONTEXT_MASK2 0x28
134
135#define A_SG_CONTEXT_MASK3 0x2c
136
137#define A_SG_RSPQ_CREDIT_RETURN 0x30
138
139#define S_CREDITS 0
140#define M_CREDITS 0xffff
141#define V_CREDITS(x) ((x) << S_CREDITS)
142
143#define A_SG_DATA_INTR 0x34
144
145#define S_ERRINTR 31
146#define V_ERRINTR(x) ((x) << S_ERRINTR)
147#define F_ERRINTR V_ERRINTR(1U)
148
149#define A_SG_HI_DRB_HI_THRSH 0x38
150
151#define A_SG_HI_DRB_LO_THRSH 0x3c
152
153#define A_SG_LO_DRB_HI_THRSH 0x40
154
155#define A_SG_LO_DRB_LO_THRSH 0x44
156
157#define A_SG_RSPQ_FL_STATUS 0x4c
158
159#define S_RSPQ0DISABLED 8
160
161#define A_SG_EGR_RCQ_DRB_THRSH 0x54
162
163#define S_HIRCQDRBTHRSH 16
164#define M_HIRCQDRBTHRSH 0x7ff
165#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH)
166
167#define S_LORCQDRBTHRSH 0
168#define M_LORCQDRBTHRSH 0x7ff
169#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH)
170
171#define A_SG_EGR_CNTX_BADDR 0x58
172
173#define A_SG_INT_CAUSE 0x5c
174
175#define S_RSPQDISABLED 3
176#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
177#define F_RSPQDISABLED V_RSPQDISABLED(1U)
178
179#define S_RSPQCREDITOVERFOW 2
180#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW)
181#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U)
182
183#define A_SG_INT_ENABLE 0x60
184
185#define A_SG_CMDQ_CREDIT_TH 0x64
186
187#define S_TIMEOUT 8
188#define M_TIMEOUT 0xffffff
189#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
190
191#define S_THRESHOLD 0
192#define M_THRESHOLD 0xff
193#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
194
195#define A_SG_TIMER_TICK 0x68
196
197#define A_SG_CQ_CONTEXT_BADDR 0x6c
198
199#define A_SG_OCO_BASE 0x70
200
201#define S_BASE1 16
202#define M_BASE1 0xffff
203#define V_BASE1(x) ((x) << S_BASE1)
204
205#define A_SG_DRB_PRI_THRESH 0x74
206
207#define A_PCIX_INT_ENABLE 0x80
208
209#define S_MSIXPARERR 22
210#define M_MSIXPARERR 0x7
211
212#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR)
213
214#define S_CFPARERR 18
215#define M_CFPARERR 0xf
216
217#define V_CFPARERR(x) ((x) << S_CFPARERR)
218
219#define S_RFPARERR 14
220#define M_RFPARERR 0xf
221
222#define V_RFPARERR(x) ((x) << S_RFPARERR)
223
224#define S_WFPARERR 12
225#define M_WFPARERR 0x3
226
227#define V_WFPARERR(x) ((x) << S_WFPARERR)
228
229#define S_PIOPARERR 11
230#define V_PIOPARERR(x) ((x) << S_PIOPARERR)
231#define F_PIOPARERR V_PIOPARERR(1U)
232
233#define S_DETUNCECCERR 10
234#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR)
235#define F_DETUNCECCERR V_DETUNCECCERR(1U)
236
237#define S_DETCORECCERR 9
238#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR)
239#define F_DETCORECCERR V_DETCORECCERR(1U)
240
241#define S_RCVSPLCMPERR 8
242#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR)
243#define F_RCVSPLCMPERR V_RCVSPLCMPERR(1U)
244
245#define S_UNXSPLCMP 7
246#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP)
247#define F_UNXSPLCMP V_UNXSPLCMP(1U)
248
249#define S_SPLCMPDIS 6
250#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS)
251#define F_SPLCMPDIS V_SPLCMPDIS(1U)
252
253#define S_DETPARERR 5
254#define V_DETPARERR(x) ((x) << S_DETPARERR)
255#define F_DETPARERR V_DETPARERR(1U)
256
257#define S_SIGSYSERR 4
258#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR)
259#define F_SIGSYSERR V_SIGSYSERR(1U)
260
261#define S_RCVMSTABT 3
262#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT)
263#define F_RCVMSTABT V_RCVMSTABT(1U)
264
265#define S_RCVTARABT 2
266#define V_RCVTARABT(x) ((x) << S_RCVTARABT)
267#define F_RCVTARABT V_RCVTARABT(1U)
268
269#define S_SIGTARABT 1
270#define V_SIGTARABT(x) ((x) << S_SIGTARABT)
271#define F_SIGTARABT V_SIGTARABT(1U)
272
273#define S_MSTDETPARERR 0
274#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR)
275#define F_MSTDETPARERR V_MSTDETPARERR(1U)
276
277#define A_PCIX_INT_CAUSE 0x84
278
279#define A_PCIX_CFG 0x88
280
281#define S_CLIDECEN 18
282#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
283#define F_CLIDECEN V_CLIDECEN(1U)
284
285#define A_PCIX_MODE 0x8c
286
287#define S_PCLKRANGE 6
288#define M_PCLKRANGE 0x3
289#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE)
290#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE)
291
292#define S_PCIXINITPAT 2
293#define M_PCIXINITPAT 0xf
294#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT)
295#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT)
296
297#define S_64BIT 0
298#define V_64BIT(x) ((x) << S_64BIT)
299#define F_64BIT V_64BIT(1U)
300
301#define A_PCIE_INT_ENABLE 0x80
302
303#define S_BISTERR 15
304#define M_BISTERR 0xff
305
306#define V_BISTERR(x) ((x) << S_BISTERR)
307
308#define S_PCIE_MSIXPARERR 12
309#define M_PCIE_MSIXPARERR 0x7
310
311#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR)
312
313#define S_PCIE_CFPARERR 11
314#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR)
315#define F_PCIE_CFPARERR V_PCIE_CFPARERR(1U)
316
317#define S_PCIE_RFPARERR 10
318#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR)
319#define F_PCIE_RFPARERR V_PCIE_RFPARERR(1U)
320
321#define S_PCIE_WFPARERR 9
322#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR)
323#define F_PCIE_WFPARERR V_PCIE_WFPARERR(1U)
324
325#define S_PCIE_PIOPARERR 8
326#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR)
327#define F_PCIE_PIOPARERR V_PCIE_PIOPARERR(1U)
328
329#define S_UNXSPLCPLERRC 7
330#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC)
331#define F_UNXSPLCPLERRC V_UNXSPLCPLERRC(1U)
332
333#define S_UNXSPLCPLERRR 6
334#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR)
335#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U)
336
337#define S_PEXERR 0
338#define V_PEXERR(x) ((x) << S_PEXERR)
339#define F_PEXERR V_PEXERR(1U)
340
341#define A_PCIE_INT_CAUSE 0x84
342
343#define A_PCIE_CFG 0x88
344
345#define S_PCIE_CLIDECEN 16
346#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
347#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
348
349#define S_CRSTWRMMODE 0
350#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE)
351#define F_CRSTWRMMODE V_CRSTWRMMODE(1U)
352
353#define A_PCIE_MODE 0x8c
354
355#define S_NUMFSTTRNSEQRX 10
356#define M_NUMFSTTRNSEQRX 0xff
357#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX)
358#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX)
359
360#define A_PCIE_PEX_CTRL0 0x98
361
362#define S_NUMFSTTRNSEQ 22
363#define M_NUMFSTTRNSEQ 0xff
364#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ)
365#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ)
366
367#define S_REPLAYLMT 2
368#define M_REPLAYLMT 0xfffff
369
370#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT)
371
372#define A_PCIE_PEX_CTRL1 0x9c
373
374#define S_T3A_ACKLAT 0
375#define M_T3A_ACKLAT 0x7ff
376
377#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
378
379#define S_ACKLAT 0
380#define M_ACKLAT 0x1fff
381
382#define V_ACKLAT(x) ((x) << S_ACKLAT)
383
384#define A_PCIE_PEX_ERR 0xa4
385
386#define A_T3DBG_GPIO_EN 0xd0
387
388#define S_GPIO11_OEN 27
389#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
390#define F_GPIO11_OEN V_GPIO11_OEN(1U)
391
392#define S_GPIO10_OEN 26
393#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
394#define F_GPIO10_OEN V_GPIO10_OEN(1U)
395
396#define S_GPIO7_OEN 23
397#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
398#define F_GPIO7_OEN V_GPIO7_OEN(1U)
399
400#define S_GPIO6_OEN 22
401#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
402#define F_GPIO6_OEN V_GPIO6_OEN(1U)
403
404#define S_GPIO5_OEN 21
405#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
406#define F_GPIO5_OEN V_GPIO5_OEN(1U)
407
408#define S_GPIO4_OEN 20
409#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
410#define F_GPIO4_OEN V_GPIO4_OEN(1U)
411
412#define S_GPIO2_OEN 18
413#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
414#define F_GPIO2_OEN V_GPIO2_OEN(1U)
415
416#define S_GPIO1_OEN 17
417#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
418#define F_GPIO1_OEN V_GPIO1_OEN(1U)
419
420#define S_GPIO0_OEN 16
421#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
422#define F_GPIO0_OEN V_GPIO0_OEN(1U)
423
424#define S_GPIO10_OUT_VAL 10
425#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
426#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U)
427
428#define S_GPIO7_OUT_VAL 7
429#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
430#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U)
431
432#define S_GPIO6_OUT_VAL 6
433#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
434#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U)
435
436#define S_GPIO5_OUT_VAL 5
437#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
438#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U)
439
440#define S_GPIO4_OUT_VAL 4
441#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
442#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U)
443
444#define S_GPIO2_OUT_VAL 2
445#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
446#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U)
447
448#define S_GPIO1_OUT_VAL 1
449#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
450#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U)
451
452#define S_GPIO0_OUT_VAL 0
453#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
454#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
455
456#define A_T3DBG_INT_ENABLE 0xd8
457
458#define S_GPIO11 11
459#define V_GPIO11(x) ((x) << S_GPIO11)
460#define F_GPIO11 V_GPIO11(1U)
461
462#define S_GPIO10 10
463#define V_GPIO10(x) ((x) << S_GPIO10)
464#define F_GPIO10 V_GPIO10(1U)
465
466#define S_GPIO7 7
467#define V_GPIO7(x) ((x) << S_GPIO7)
468#define F_GPIO7 V_GPIO7(1U)
469
470#define S_GPIO6 6
471#define V_GPIO6(x) ((x) << S_GPIO6)
472#define F_GPIO6 V_GPIO6(1U)
473
474#define S_GPIO5 5
475#define V_GPIO5(x) ((x) << S_GPIO5)
476#define F_GPIO5 V_GPIO5(1U)
477
478#define S_GPIO4 4
479#define V_GPIO4(x) ((x) << S_GPIO4)
480#define F_GPIO4 V_GPIO4(1U)
481
482#define S_GPIO3 3
483#define V_GPIO3(x) ((x) << S_GPIO3)
484#define F_GPIO3 V_GPIO3(1U)
485
486#define S_GPIO2 2
487#define V_GPIO2(x) ((x) << S_GPIO2)
488#define F_GPIO2 V_GPIO2(1U)
489
490#define S_GPIO1 1
491#define V_GPIO1(x) ((x) << S_GPIO1)
492#define F_GPIO1 V_GPIO1(1U)
493
494#define S_GPIO0 0
495#define V_GPIO0(x) ((x) << S_GPIO0)
496#define F_GPIO0 V_GPIO0(1U)
497
498#define A_T3DBG_INT_CAUSE 0xdc
499
500#define A_T3DBG_GPIO_ACT_LOW 0xf0
501
502#define MC7_PMRX_BASE_ADDR 0x100
503
504#define A_MC7_CFG 0x100
505
506#define S_IFEN 13
507#define V_IFEN(x) ((x) << S_IFEN)
508#define F_IFEN V_IFEN(1U)
509
510#define S_TERM150 11
511#define V_TERM150(x) ((x) << S_TERM150)
512#define F_TERM150 V_TERM150(1U)
513
514#define S_SLOW 10
515#define V_SLOW(x) ((x) << S_SLOW)
516#define F_SLOW V_SLOW(1U)
517
518#define S_WIDTH 8
519#define M_WIDTH 0x3
520#define V_WIDTH(x) ((x) << S_WIDTH)
521#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
522
523#define S_BKS 6
524#define V_BKS(x) ((x) << S_BKS)
525#define F_BKS V_BKS(1U)
526
527#define S_ORG 5
528#define V_ORG(x) ((x) << S_ORG)
529#define F_ORG V_ORG(1U)
530
531#define S_DEN 2
532#define M_DEN 0x7
533#define V_DEN(x) ((x) << S_DEN)
534#define G_DEN(x) (((x) >> S_DEN) & M_DEN)
535
536#define S_RDY 1
537#define V_RDY(x) ((x) << S_RDY)
538#define F_RDY V_RDY(1U)
539
540#define S_CLKEN 0
541#define V_CLKEN(x) ((x) << S_CLKEN)
542#define F_CLKEN V_CLKEN(1U)
543
544#define A_MC7_MODE 0x104
545
546#define S_BUSY 31
547#define V_BUSY(x) ((x) << S_BUSY)
548#define F_BUSY V_BUSY(1U)
549
550#define S_BUSY 31
551#define V_BUSY(x) ((x) << S_BUSY)
552#define F_BUSY V_BUSY(1U)
553
554#define A_MC7_EXT_MODE1 0x108
555
556#define A_MC7_EXT_MODE2 0x10c
557
558#define A_MC7_EXT_MODE3 0x110
559
560#define A_MC7_PRE 0x114
561
562#define A_MC7_REF 0x118
563
564#define S_PREREFDIV 1
565#define M_PREREFDIV 0x3fff
566#define V_PREREFDIV(x) ((x) << S_PREREFDIV)
567
568#define S_PERREFEN 0
569#define V_PERREFEN(x) ((x) << S_PERREFEN)
570#define F_PERREFEN V_PERREFEN(1U)
571
572#define A_MC7_DLL 0x11c
573
574#define S_DLLENB 1
575#define V_DLLENB(x) ((x) << S_DLLENB)
576#define F_DLLENB V_DLLENB(1U)
577
578#define S_DLLRST 0
579#define V_DLLRST(x) ((x) << S_DLLRST)
580#define F_DLLRST V_DLLRST(1U)
581
582#define A_MC7_PARM 0x120
583
584#define S_ACTTOPREDLY 26
585#define M_ACTTOPREDLY 0xf
586#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY)
587
588#define S_ACTTORDWRDLY 23
589#define M_ACTTORDWRDLY 0x7
590#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY)
591
592#define S_PRECYC 20
593#define M_PRECYC 0x7
594#define V_PRECYC(x) ((x) << S_PRECYC)
595
596#define S_REFCYC 13
597#define M_REFCYC 0x7f
598#define V_REFCYC(x) ((x) << S_REFCYC)
599
600#define S_BKCYC 8
601#define M_BKCYC 0x1f
602#define V_BKCYC(x) ((x) << S_BKCYC)
603
604#define S_WRTORDDLY 4
605#define M_WRTORDDLY 0xf
606#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY)
607
608#define S_RDTOWRDLY 0
609#define M_RDTOWRDLY 0xf
610#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY)
611
612#define A_MC7_CAL 0x128
613
614#define S_BUSY 31
615#define V_BUSY(x) ((x) << S_BUSY)
616#define F_BUSY V_BUSY(1U)
617
618#define S_BUSY 31
619#define V_BUSY(x) ((x) << S_BUSY)
620#define F_BUSY V_BUSY(1U)
621
622#define S_CAL_FAULT 30
623#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
624#define F_CAL_FAULT V_CAL_FAULT(1U)
625
626#define S_SGL_CAL_EN 20
627#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
628#define F_SGL_CAL_EN V_SGL_CAL_EN(1U)
629
630#define A_MC7_ERR_ADDR 0x12c
631
632#define A_MC7_ECC 0x130
633
634#define S_ECCCHKEN 1
635#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN)
636#define F_ECCCHKEN V_ECCCHKEN(1U)
637
638#define S_ECCGENEN 0
639#define V_ECCGENEN(x) ((x) << S_ECCGENEN)
640#define F_ECCGENEN V_ECCGENEN(1U)
641
642#define A_MC7_CE_ADDR 0x134
643
644#define A_MC7_CE_DATA0 0x138
645
646#define A_MC7_CE_DATA1 0x13c
647
648#define A_MC7_CE_DATA2 0x140
649
650#define S_DATA 0
651#define M_DATA 0xff
652
653#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
654
655#define A_MC7_UE_ADDR 0x144
656
657#define A_MC7_UE_DATA0 0x148
658
659#define A_MC7_UE_DATA1 0x14c
660
661#define A_MC7_UE_DATA2 0x150
662
663#define A_MC7_BD_ADDR 0x154
664
665#define S_ADDR 3
666
667#define M_ADDR 0x1fffffff
668
669#define A_MC7_BD_DATA0 0x158
670
671#define A_MC7_BD_DATA1 0x15c
672
673#define A_MC7_BD_OP 0x164
674
675#define S_OP 0
676
677#define V_OP(x) ((x) << S_OP)
678#define F_OP V_OP(1U)
679
680#define F_OP V_OP(1U)
681#define A_SF_OP 0x6dc
682
683#define A_MC7_BIST_ADDR_BEG 0x168
684
685#define A_MC7_BIST_ADDR_END 0x16c
686
687#define A_MC7_BIST_DATA 0x170
688
689#define A_MC7_BIST_OP 0x174
690
691#define S_CONT 3
692#define V_CONT(x) ((x) << S_CONT)
693#define F_CONT V_CONT(1U)
694
695#define F_CONT V_CONT(1U)
696
697#define A_MC7_INT_ENABLE 0x178
698
699#define S_AE 17
700#define V_AE(x) ((x) << S_AE)
701#define F_AE V_AE(1U)
702
703#define S_PE 2
704#define M_PE 0x7fff
705
706#define V_PE(x) ((x) << S_PE)
707
708#define G_PE(x) (((x) >> S_PE) & M_PE)
709
710#define S_UE 1
711#define V_UE(x) ((x) << S_UE)
712#define F_UE V_UE(1U)
713
714#define S_CE 0
715#define V_CE(x) ((x) << S_CE)
716#define F_CE V_CE(1U)
717
718#define A_MC7_INT_CAUSE 0x17c
719
720#define MC7_PMTX_BASE_ADDR 0x180
721
722#define MC7_CM_BASE_ADDR 0x200
723
724#define A_CIM_BOOT_CFG 0x280
725
726#define S_BOOTADDR 2
727#define M_BOOTADDR 0x3fffffff
728#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
729
730#define A_CIM_SDRAM_BASE_ADDR 0x28c
731
732#define A_CIM_SDRAM_ADDR_SIZE 0x290
733
734#define A_CIM_HOST_INT_ENABLE 0x298
735
736#define A_CIM_HOST_INT_CAUSE 0x29c
737
738#define S_BLKWRPLINT 12
739#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
740#define F_BLKWRPLINT V_BLKWRPLINT(1U)
741
742#define S_BLKRDPLINT 11
743#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT)
744#define F_BLKRDPLINT V_BLKRDPLINT(1U)
745
746#define S_BLKWRCTLINT 10
747#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT)
748#define F_BLKWRCTLINT V_BLKWRCTLINT(1U)
749
750#define S_BLKRDCTLINT 9
751#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT)
752#define F_BLKRDCTLINT V_BLKRDCTLINT(1U)
753
754#define S_BLKWRFLASHINT 8
755#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT)
756#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U)
757
758#define S_BLKRDFLASHINT 7
759#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT)
760#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U)
761
762#define S_SGLWRFLASHINT 6
763#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT)
764#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U)
765
766#define S_WRBLKFLASHINT 5
767#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT)
768#define F_WRBLKFLASHINT V_WRBLKFLASHINT(1U)
769
770#define S_BLKWRBOOTINT 4
771#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
772#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U)
773
774#define S_FLASHRANGEINT 2
775#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT)
776#define F_FLASHRANGEINT V_FLASHRANGEINT(1U)
777
778#define S_SDRAMRANGEINT 1
779#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT)
780#define F_SDRAMRANGEINT V_SDRAMRANGEINT(1U)
781
782#define S_RSVDSPACEINT 0
783#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
784#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
785
786#define A_CIM_HOST_ACC_CTRL 0x2b0
787
788#define S_HOSTBUSY 17
789#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
790#define F_HOSTBUSY V_HOSTBUSY(1U)
791
792#define A_CIM_HOST_ACC_DATA 0x2b4
793
794#define A_TP_IN_CONFIG 0x300
795
796#define S_NICMODE 14
797#define V_NICMODE(x) ((x) << S_NICMODE)
798#define F_NICMODE V_NICMODE(1U)
799
800#define F_NICMODE V_NICMODE(1U)
801
802#define S_IPV6ENABLE 15
803#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
804#define F_IPV6ENABLE V_IPV6ENABLE(1U)
805
806#define A_TP_OUT_CONFIG 0x304
807
808#define S_VLANEXTRACTIONENABLE 12
809
810#define A_TP_GLOBAL_CONFIG 0x308
811
812#define S_TXPACINGENABLE 24
813#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
814#define F_TXPACINGENABLE V_TXPACINGENABLE(1U)
815
816#define S_PATHMTU 15
817#define V_PATHMTU(x) ((x) << S_PATHMTU)
818#define F_PATHMTU V_PATHMTU(1U)
819
820#define S_IPCHECKSUMOFFLOAD 13
821#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
822#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U)
823
824#define S_UDPCHECKSUMOFFLOAD 12
825#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD)
826#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U)
827
828#define S_TCPCHECKSUMOFFLOAD 11
829#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
830#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U)
831
832#define S_IPTTL 0
833#define M_IPTTL 0xff
834#define V_IPTTL(x) ((x) << S_IPTTL)
835
836#define A_TP_CMM_MM_BASE 0x314
837
838#define A_TP_CMM_TIMER_BASE 0x318
839
840#define S_CMTIMERMAXNUM 28
841#define M_CMTIMERMAXNUM 0x3
842#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
843
844#define A_TP_PMM_SIZE 0x31c
845
846#define A_TP_PMM_TX_BASE 0x320
847
848#define A_TP_PMM_RX_BASE 0x328
849
850#define A_TP_PMM_RX_PAGE_SIZE 0x32c
851
852#define A_TP_PMM_RX_MAX_PAGE 0x330
853
854#define A_TP_PMM_TX_PAGE_SIZE 0x334
855
856#define A_TP_PMM_TX_MAX_PAGE 0x338
857
858#define A_TP_TCP_OPTIONS 0x340
859
860#define S_MTUDEFAULT 16
861#define M_MTUDEFAULT 0xffff
862#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
863
864#define S_MTUENABLE 10
865#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
866#define F_MTUENABLE V_MTUENABLE(1U)
867
868#define S_SACKRX 8
869#define V_SACKRX(x) ((x) << S_SACKRX)
870#define F_SACKRX V_SACKRX(1U)
871
872#define S_SACKMODE 4
873
874#define M_SACKMODE 0x3
875
876#define V_SACKMODE(x) ((x) << S_SACKMODE)
877
878#define S_WINDOWSCALEMODE 2
879#define M_WINDOWSCALEMODE 0x3
880#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
881
882#define S_TIMESTAMPSMODE 0
883
884#define M_TIMESTAMPSMODE 0x3
885
886#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
887
888#define A_TP_DACK_CONFIG 0x344
889
890#define S_AUTOSTATE3 30
891#define M_AUTOSTATE3 0x3
892#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
893
894#define S_AUTOSTATE2 28
895#define M_AUTOSTATE2 0x3
896#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
897
898#define S_AUTOSTATE1 26
899#define M_AUTOSTATE1 0x3
900#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
901
902#define S_BYTETHRESHOLD 5
903#define M_BYTETHRESHOLD 0xfffff
904#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
905
906#define S_MSSTHRESHOLD 3
907#define M_MSSTHRESHOLD 0x3
908#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
909
910#define S_AUTOCAREFUL 2
911#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
912#define F_AUTOCAREFUL V_AUTOCAREFUL(1U)
913
914#define S_AUTOENABLE 1
915#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE)
916#define F_AUTOENABLE V_AUTOENABLE(1U)
917
918#define S_DACK_MODE 0
919#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
920#define F_DACK_MODE V_DACK_MODE(1U)
921
922#define A_TP_PC_CONFIG 0x348
923
924#define S_TXTOSQUEUEMAPMODE 26
925#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE)
926#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U)
927
928#define S_ENABLEEPCMDAFULL 23
929#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
930#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U)
931
932#define S_MODULATEUNIONMODE 22
933#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE)
934#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U)
935
936#define S_TXDEFERENABLE 20
937#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
938#define F_TXDEFERENABLE V_TXDEFERENABLE(1U)
939
940#define S_RXCONGESTIONMODE 19
941#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
942#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U)
943
944#define S_HEARBEATDACK 16
945#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
946#define F_HEARBEATDACK V_HEARBEATDACK(1U)
947
948#define S_TXCONGESTIONMODE 15
949#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
950#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U)
951
952#define S_ENABLEOCSPIFULL 30
953#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
954#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U)
955
956#define S_LOCKTID 28
957#define V_LOCKTID(x) ((x) << S_LOCKTID)
958#define F_LOCKTID V_LOCKTID(1U)
959
960#define A_TP_PC_CONFIG2 0x34c
961
962#define S_CHDRAFULL 4
963#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
964#define F_CHDRAFULL V_CHDRAFULL(1U)
965
966#define A_TP_TCP_BACKOFF_REG0 0x350
967
968#define A_TP_TCP_BACKOFF_REG1 0x354
969
970#define A_TP_TCP_BACKOFF_REG2 0x358
971
972#define A_TP_TCP_BACKOFF_REG3 0x35c
973
974#define A_TP_PARA_REG2 0x368
975
976#define S_MAXRXDATA 16
977#define M_MAXRXDATA 0xffff
978#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
979
980#define S_RXCOALESCESIZE 0
981#define M_RXCOALESCESIZE 0xffff
982#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
983
984#define A_TP_PARA_REG3 0x36c
985
986#define S_TXDATAACKIDX 16
987#define M_TXDATAACKIDX 0xf
988
989#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
990
991#define S_TXPACEAUTOSTRICT 10
992#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
993#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U)
994
995#define S_TXPACEFIXED 9
996#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
997#define F_TXPACEFIXED V_TXPACEFIXED(1U)
998
999#define S_TXPACEAUTO 8
1000#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
1001#define F_TXPACEAUTO V_TXPACEAUTO(1U)
1002
1003#define S_RXCOALESCEENABLE 1
1004#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
1005#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U)
1006
1007#define S_RXCOALESCEPSHEN 0
1008#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
1009#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U)
1010
1011#define A_TP_PARA_REG4 0x370
1012
1013#define A_TP_PARA_REG6 0x378
1014
1015#define S_T3A_ENABLEESND 13
1016#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND)
1017#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U)
1018
1019#define S_ENABLEESND 11
1020#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
1021#define F_ENABLEESND V_ENABLEESND(1U)
1022
1023#define A_TP_PARA_REG7 0x37c
1024
1025#define S_PMMAXXFERLEN1 16
1026#define M_PMMAXXFERLEN1 0xffff
1027#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
1028
1029#define S_PMMAXXFERLEN0 0
1030#define M_PMMAXXFERLEN0 0xffff
1031#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
1032
1033#define A_TP_TIMER_RESOLUTION 0x390
1034
1035#define S_TIMERRESOLUTION 16
1036#define M_TIMERRESOLUTION 0xff
1037#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
1038
1039#define S_TIMESTAMPRESOLUTION 8
1040#define M_TIMESTAMPRESOLUTION 0xff
1041#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
1042
1043#define S_DELAYEDACKRESOLUTION 0
1044#define M_DELAYEDACKRESOLUTION 0xff
1045#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
1046
1047#define A_TP_MSL 0x394
1048
1049#define A_TP_RXT_MIN 0x398
1050
1051#define A_TP_RXT_MAX 0x39c
1052
1053#define A_TP_PERS_MIN 0x3a0
1054
1055#define A_TP_PERS_MAX 0x3a4
1056
1057#define A_TP_KEEP_IDLE 0x3a8
1058
1059#define A_TP_KEEP_INTVL 0x3ac
1060
1061#define A_TP_INIT_SRTT 0x3b0
1062
1063#define A_TP_DACK_TIMER 0x3b4
1064
1065#define A_TP_FINWAIT2_TIMER 0x3b8
1066
1067#define A_TP_SHIFT_CNT 0x3c0
1068
1069#define S_SYNSHIFTMAX 24
1070
1071#define M_SYNSHIFTMAX 0xff
1072
1073#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
1074
1075#define S_RXTSHIFTMAXR1 20
1076
1077#define M_RXTSHIFTMAXR1 0xf
1078
1079#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
1080
1081#define S_RXTSHIFTMAXR2 16
1082
1083#define M_RXTSHIFTMAXR2 0xf
1084
1085#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
1086
1087#define S_PERSHIFTBACKOFFMAX 12
1088#define M_PERSHIFTBACKOFFMAX 0xf
1089#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
1090
1091#define S_PERSHIFTMAX 8
1092#define M_PERSHIFTMAX 0xf
1093#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
1094
1095#define S_KEEPALIVEMAX 0
1096
1097#define M_KEEPALIVEMAX 0xff
1098
1099#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX)
1100
1101#define A_TP_MTU_PORT_TABLE 0x3d0
1102
1103#define A_TP_CCTRL_TABLE 0x3dc
1104
1105#define A_TP_MTU_TABLE 0x3e4
1106
1107#define A_TP_RSS_MAP_TABLE 0x3e8
1108
1109#define A_TP_RSS_LKP_TABLE 0x3ec
1110
1111#define A_TP_RSS_CONFIG 0x3f0
1112
1113#define S_TNL4TUPEN 29
1114#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN)
1115#define F_TNL4TUPEN V_TNL4TUPEN(1U)
1116
1117#define S_TNL2TUPEN 28
1118#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN)
1119#define F_TNL2TUPEN V_TNL2TUPEN(1U)
1120
1121#define S_TNLPRTEN 26
1122#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN)
1123#define F_TNLPRTEN V_TNLPRTEN(1U)
1124
1125#define S_TNLMAPEN 25
1126#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN)
1127#define F_TNLMAPEN V_TNLMAPEN(1U)
1128
1129#define S_TNLLKPEN 24
1130#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
1131#define F_TNLLKPEN V_TNLLKPEN(1U)
1132
1133#define S_RRCPLCPUSIZE 4
1134#define M_RRCPLCPUSIZE 0x7
1135#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
1136
1137#define S_RQFEEDBACKENABLE 3
1138#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
1139#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U)
1140
1141#define S_DISABLE 0
1142
1143#define A_TP_TM_PIO_ADDR 0x418
1144
1145#define A_TP_TM_PIO_DATA 0x41c
1146
1147#define A_TP_TX_MOD_QUE_TABLE 0x420
1148
1149#define A_TP_TX_RESOURCE_LIMIT 0x424
1150
1151#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428
1152
1153#define S_TX_MOD_QUEUE_REQ_MAP 0
1154#define M_TX_MOD_QUEUE_REQ_MAP 0xff
1155#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
1156
1157#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c
1158
1159#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430
1160
1161#define A_TP_MOD_CHANNEL_WEIGHT 0x434
1162
1163#define A_TP_PIO_ADDR 0x440
1164
1165#define A_TP_PIO_DATA 0x444
1166
1167#define A_TP_RESET 0x44c
1168
1169#define S_FLSTINITENABLE 1
1170#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE)
1171#define F_FLSTINITENABLE V_FLSTINITENABLE(1U)
1172
1173#define S_TPRESET 0
1174#define V_TPRESET(x) ((x) << S_TPRESET)
1175#define F_TPRESET V_TPRESET(1U)
1176
1177#define A_TP_CMM_MM_RX_FLST_BASE 0x460
1178
1179#define A_TP_CMM_MM_TX_FLST_BASE 0x464
1180
1181#define A_TP_CMM_MM_PS_FLST_BASE 0x468
1182
1183#define A_TP_MIB_INDEX 0x450
1184
1185#define A_TP_MIB_RDATA 0x454
1186
1187#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c
1188
1189#define A_TP_INT_ENABLE 0x470
1190
1191#define A_TP_INT_CAUSE 0x474
1192
1193#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
1194
1195#define A_TP_TX_DROP_CFG_CH0 0x12b
1196
1197#define A_TP_TX_DROP_MODE 0x12f
1198
1199#define A_TP_EGRESS_CONFIG 0x145
1200
1201#define S_REWRITEFORCETOSIZE 0
1202#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
1203#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U)
1204
1205#define A_TP_TX_TRC_KEY0 0x20
1206
1207#define A_TP_RX_TRC_KEY0 0x120
1208
1209#define A_ULPRX_CTL 0x500
1210
1211#define S_ROUND_ROBIN 4
1212#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN)
1213#define F_ROUND_ROBIN V_ROUND_ROBIN(1U)
1214
1215#define A_ULPRX_INT_ENABLE 0x504
1216
1217#define S_PARERR 0
1218#define V_PARERR(x) ((x) << S_PARERR)
1219#define F_PARERR V_PARERR(1U)
1220
1221#define A_ULPRX_INT_CAUSE 0x508
1222
1223#define A_ULPRX_ISCSI_LLIMIT 0x50c
1224
1225#define A_ULPRX_ISCSI_ULIMIT 0x510
1226
1227#define A_ULPRX_ISCSI_TAGMASK 0x514
1228
1229#define A_ULPRX_TDDP_LLIMIT 0x51c
1230
1231#define A_ULPRX_TDDP_ULIMIT 0x520
1232
1233#define A_ULPRX_STAG_LLIMIT 0x52c
1234
1235#define A_ULPRX_STAG_ULIMIT 0x530
1236
1237#define A_ULPRX_RQ_LLIMIT 0x534
1238#define A_ULPRX_RQ_LLIMIT 0x534
1239
1240#define A_ULPRX_RQ_ULIMIT 0x538
1241#define A_ULPRX_RQ_ULIMIT 0x538
1242
1243#define A_ULPRX_PBL_LLIMIT 0x53c
1244
1245#define A_ULPRX_PBL_ULIMIT 0x540
1246#define A_ULPRX_PBL_ULIMIT 0x540
1247
1248#define A_ULPRX_TDDP_TAGMASK 0x524
1249
1250#define A_ULPRX_RQ_LLIMIT 0x534
1251#define A_ULPRX_RQ_LLIMIT 0x534
1252
1253#define A_ULPRX_RQ_ULIMIT 0x538
1254#define A_ULPRX_RQ_ULIMIT 0x538
1255
1256#define A_ULPRX_PBL_ULIMIT 0x540
1257#define A_ULPRX_PBL_ULIMIT 0x540
1258
1259#define A_ULPTX_CONFIG 0x580
1260
1261#define S_CFG_RR_ARB 0
1262#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB)
1263#define F_CFG_RR_ARB V_CFG_RR_ARB(1U)
1264
1265#define A_ULPTX_INT_ENABLE 0x584
1266
1267#define S_PBL_BOUND_ERR_CH1 1
1268#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
1269#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U)
1270
1271#define S_PBL_BOUND_ERR_CH0 0
1272#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0)
1273#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U)
1274
1275#define A_ULPTX_INT_CAUSE 0x588
1276
1277#define A_ULPTX_TPT_LLIMIT 0x58c
1278
1279#define A_ULPTX_TPT_ULIMIT 0x590
1280
1281#define A_ULPTX_PBL_LLIMIT 0x594
1282
1283#define A_ULPTX_PBL_ULIMIT 0x598
1284
1285#define A_ULPTX_DMA_WEIGHT 0x5ac
1286
1287#define S_D1_WEIGHT 16
1288#define M_D1_WEIGHT 0xffff
1289#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT)
1290
1291#define S_D0_WEIGHT 0
1292#define M_D0_WEIGHT 0xffff
1293#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
1294
1295#define A_PM1_RX_CFG 0x5c0
1296
1297#define A_PM1_RX_INT_ENABLE 0x5d8
1298
1299#define S_ZERO_E_CMD_ERROR 18
1300#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR)
1301#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U)
1302
1303#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 17
1304#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR)
1305#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U)
1306
1307#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 16
1308#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR)
1309#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U)
1310
1311#define S_IESPI0_RX_FRAMING_ERROR 15
1312#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR)
1313#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U)
1314
1315#define S_IESPI1_RX_FRAMING_ERROR 14
1316#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR)
1317#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U)
1318
1319#define S_IESPI0_TX_FRAMING_ERROR 13
1320#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR)
1321#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U)
1322
1323#define S_IESPI1_TX_FRAMING_ERROR 12
1324#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR)
1325#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U)
1326
1327#define S_OCSPI0_RX_FRAMING_ERROR 11
1328#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR)
1329#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U)
1330
1331#define S_OCSPI1_RX_FRAMING_ERROR 10
1332#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR)
1333#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U)
1334
1335#define S_OCSPI0_TX_FRAMING_ERROR 9
1336#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR)
1337#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U)
1338
1339#define S_OCSPI1_TX_FRAMING_ERROR 8
1340#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR)
1341#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U)
1342
1343#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 7
1344#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR)
1345#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
1346
1347#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 6
1348#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1349#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
1350
1351#define S_IESPI_PAR_ERROR 3
1352#define M_IESPI_PAR_ERROR 0x7
1353
1354#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
1355
1356#define S_OCSPI_PAR_ERROR 0
1357#define M_OCSPI_PAR_ERROR 0x7
1358
1359#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
1360
1361#define A_PM1_RX_INT_CAUSE 0x5dc
1362
1363#define A_PM1_TX_CFG 0x5e0
1364
1365#define A_PM1_TX_INT_ENABLE 0x5f8
1366
1367#define S_ZERO_C_CMD_ERROR 18
1368#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
1369#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U)
1370
1371#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 17
1372#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
1373#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
1374
1375#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 16
1376#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
1377#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
1378
1379#define S_ICSPI0_RX_FRAMING_ERROR 15
1380#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR)
1381#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U)
1382
1383#define S_ICSPI1_RX_FRAMING_ERROR 14
1384#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR)
1385#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U)
1386
1387#define S_ICSPI0_TX_FRAMING_ERROR 13
1388#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR)
1389#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U)
1390
1391#define S_ICSPI1_TX_FRAMING_ERROR 12
1392#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR)
1393#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U)
1394
1395#define S_OESPI0_RX_FRAMING_ERROR 11
1396#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR)
1397#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U)
1398
1399#define S_OESPI1_RX_FRAMING_ERROR 10
1400#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR)
1401#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U)
1402
1403#define S_OESPI0_TX_FRAMING_ERROR 9
1404#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR)
1405#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U)
1406
1407#define S_OESPI1_TX_FRAMING_ERROR 8
1408#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR)
1409#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U)
1410
1411#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7
1412#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
1413#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
1414
1415#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6
1416#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1417#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
1418
1419#define S_ICSPI_PAR_ERROR 3
1420#define M_ICSPI_PAR_ERROR 0x7
1421
1422#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
1423
1424#define S_OESPI_PAR_ERROR 0
1425#define M_OESPI_PAR_ERROR 0x7
1426
1427#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
1428
1429#define A_PM1_TX_INT_CAUSE 0x5fc
1430
1431#define A_MPS_CFG 0x600
1432
1433#define S_TPRXPORTEN 4
1434#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN)
1435#define F_TPRXPORTEN V_TPRXPORTEN(1U)
1436
1437#define S_TPTXPORT1EN 3
1438#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN)
1439#define F_TPTXPORT1EN V_TPTXPORT1EN(1U)
1440
1441#define S_TPTXPORT0EN 2
1442#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN)
1443#define F_TPTXPORT0EN V_TPTXPORT0EN(1U)
1444
1445#define S_PORT1ACTIVE 1
1446#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE)
1447#define F_PORT1ACTIVE V_PORT1ACTIVE(1U)
1448
1449#define S_PORT0ACTIVE 0
1450#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE)
1451#define F_PORT0ACTIVE V_PORT0ACTIVE(1U)
1452
1453#define S_ENFORCEPKT 11
1454#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
1455#define F_ENFORCEPKT V_ENFORCEPKT(1U)
1456
1457#define A_MPS_INT_ENABLE 0x61c
1458
1459#define S_MCAPARERRENB 6
1460#define M_MCAPARERRENB 0x7
1461
1462#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB)
1463
1464#define S_RXTPPARERRENB 4
1465#define M_RXTPPARERRENB 0x3
1466
1467#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB)
1468
1469#define S_TX1TPPARERRENB 2
1470#define M_TX1TPPARERRENB 0x3
1471
1472#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB)
1473
1474#define S_TX0TPPARERRENB 0
1475#define M_TX0TPPARERRENB 0x3
1476
1477#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB)
1478
1479#define A_MPS_INT_CAUSE 0x620
1480
1481#define S_MCAPARERR 6
1482#define M_MCAPARERR 0x7
1483
1484#define V_MCAPARERR(x) ((x) << S_MCAPARERR)
1485
1486#define S_RXTPPARERR 4
1487#define M_RXTPPARERR 0x3
1488
1489#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR)
1490
1491#define S_TX1TPPARERR 2
1492#define M_TX1TPPARERR 0x3
1493
1494#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR)
1495
1496#define S_TX0TPPARERR 0
1497#define M_TX0TPPARERR 0x3
1498
1499#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR)
1500
1501#define A_CPL_SWITCH_CNTRL 0x640
1502
1503#define A_CPL_INTR_ENABLE 0x650
1504
1505#define S_CIM_OVFL_ERROR 4
1506#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
1507#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U)
1508
1509#define S_TP_FRAMING_ERROR 3
1510#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR)
1511#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U)
1512
1513#define S_SGE_FRAMING_ERROR 2
1514#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR)
1515#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U)
1516
1517#define S_CIM_FRAMING_ERROR 1
1518#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR)
1519#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U)
1520
1521#define S_ZERO_SWITCH_ERROR 0
1522#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
1523#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U)
1524
1525#define A_CPL_INTR_CAUSE 0x654
1526
1527#define A_CPL_MAP_TBL_DATA 0x65c
1528
1529#define A_SMB_GLOBAL_TIME_CFG 0x660
1530
1531#define A_I2C_CFG 0x6a0
1532
1533#define S_I2C_CLKDIV 0
1534#define M_I2C_CLKDIV 0xfff
1535#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
1536
1537#define A_MI1_CFG 0x6b0
1538
1539#define S_CLKDIV 5
1540#define M_CLKDIV 0xff
1541#define V_CLKDIV(x) ((x) << S_CLKDIV)
1542
1543#define S_ST 3
1544
1545#define M_ST 0x3
1546
1547#define V_ST(x) ((x) << S_ST)
1548
1549#define G_ST(x) (((x) >> S_ST) & M_ST)
1550
1551#define S_PREEN 2
1552#define V_PREEN(x) ((x) << S_PREEN)
1553#define F_PREEN V_PREEN(1U)
1554
1555#define S_MDIINV 1
1556#define V_MDIINV(x) ((x) << S_MDIINV)
1557#define F_MDIINV V_MDIINV(1U)
1558
1559#define S_MDIEN 0
1560#define V_MDIEN(x) ((x) << S_MDIEN)
1561#define F_MDIEN V_MDIEN(1U)
1562
1563#define A_MI1_ADDR 0x6b4
1564
1565#define S_PHYADDR 5
1566#define M_PHYADDR 0x1f
1567#define V_PHYADDR(x) ((x) << S_PHYADDR)
1568
1569#define S_REGADDR 0
1570#define M_REGADDR 0x1f
1571#define V_REGADDR(x) ((x) << S_REGADDR)
1572
1573#define A_MI1_DATA 0x6b8
1574
1575#define A_MI1_OP 0x6bc
1576
1577#define S_MDI_OP 0
1578#define M_MDI_OP 0x3
1579#define V_MDI_OP(x) ((x) << S_MDI_OP)
1580
1581#define A_SF_DATA 0x6d8
1582
1583#define A_SF_OP 0x6dc
1584
1585#define S_BYTECNT 1
1586#define M_BYTECNT 0x3
1587#define V_BYTECNT(x) ((x) << S_BYTECNT)
1588
1589#define A_PL_INT_ENABLE0 0x6e0
1590
1591#define S_T3DBG 23
1592#define V_T3DBG(x) ((x) << S_T3DBG)
1593#define F_T3DBG V_T3DBG(1U)
1594
1595#define S_XGMAC0_1 20
1596#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1)
1597#define F_XGMAC0_1 V_XGMAC0_1(1U)
1598
1599#define S_XGMAC0_0 19
1600#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0)
1601#define F_XGMAC0_0 V_XGMAC0_0(1U)
1602
1603#define S_MC5A 18
1604#define V_MC5A(x) ((x) << S_MC5A)
1605#define F_MC5A V_MC5A(1U)
1606
1607#define S_CPL_SWITCH 12
1608#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
1609#define F_CPL_SWITCH V_CPL_SWITCH(1U)
1610
1611#define S_MPS0 11
1612#define V_MPS0(x) ((x) << S_MPS0)
1613#define F_MPS0 V_MPS0(1U)
1614
1615#define S_PM1_TX 10
1616#define V_PM1_TX(x) ((x) << S_PM1_TX)
1617#define F_PM1_TX V_PM1_TX(1U)
1618
1619#define S_PM1_RX 9
1620#define V_PM1_RX(x) ((x) << S_PM1_RX)
1621#define F_PM1_RX V_PM1_RX(1U)
1622
1623#define S_ULP2_TX 8
1624#define V_ULP2_TX(x) ((x) << S_ULP2_TX)
1625#define F_ULP2_TX V_ULP2_TX(1U)
1626
1627#define S_ULP2_RX 7
1628#define V_ULP2_RX(x) ((x) << S_ULP2_RX)
1629#define F_ULP2_RX V_ULP2_RX(1U)
1630
1631#define S_TP1 6
1632#define V_TP1(x) ((x) << S_TP1)
1633#define F_TP1 V_TP1(1U)
1634
1635#define S_CIM 5
1636#define V_CIM(x) ((x) << S_CIM)
1637#define F_CIM V_CIM(1U)
1638
1639#define S_MC7_CM 4
1640#define V_MC7_CM(x) ((x) << S_MC7_CM)
1641#define F_MC7_CM V_MC7_CM(1U)
1642
1643#define S_MC7_PMTX 3
1644#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX)
1645#define F_MC7_PMTX V_MC7_PMTX(1U)
1646
1647#define S_MC7_PMRX 2
1648#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX)
1649#define F_MC7_PMRX V_MC7_PMRX(1U)
1650
1651#define S_PCIM0 1
1652#define V_PCIM0(x) ((x) << S_PCIM0)
1653#define F_PCIM0 V_PCIM0(1U)
1654
1655#define S_SGE3 0
1656#define V_SGE3(x) ((x) << S_SGE3)
1657#define F_SGE3 V_SGE3(1U)
1658
1659#define A_PL_INT_CAUSE0 0x6e4
1660
1661#define A_PL_RST 0x6f0
1662
1663#define S_CRSTWRM 1
1664#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
1665#define F_CRSTWRM V_CRSTWRM(1U)
1666
1667#define A_PL_REV 0x6f4
1668
1669#define A_PL_CLI 0x6f8
1670
1671#define A_MC5_DB_CONFIG 0x704
1672
1673#define S_TMTYPEHI 30
1674#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI)
1675#define F_TMTYPEHI V_TMTYPEHI(1U)
1676
1677#define S_TMPARTSIZE 28
1678#define M_TMPARTSIZE 0x3
1679#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE)
1680#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE)
1681
1682#define S_TMTYPE 26
1683#define M_TMTYPE 0x3
1684#define V_TMTYPE(x) ((x) << S_TMTYPE)
1685#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE)
1686
1687#define S_COMPEN 17
1688#define V_COMPEN(x) ((x) << S_COMPEN)
1689#define F_COMPEN V_COMPEN(1U)
1690
1691#define S_PRTYEN 6
1692#define V_PRTYEN(x) ((x) << S_PRTYEN)
1693#define F_PRTYEN V_PRTYEN(1U)
1694
1695#define S_MBUSEN 5
1696#define V_MBUSEN(x) ((x) << S_MBUSEN)
1697#define F_MBUSEN V_MBUSEN(1U)
1698
1699#define S_DBGIEN 4
1700#define V_DBGIEN(x) ((x) << S_DBGIEN)
1701#define F_DBGIEN V_DBGIEN(1U)
1702
1703#define S_TMRDY 2
1704#define V_TMRDY(x) ((x) << S_TMRDY)
1705#define F_TMRDY V_TMRDY(1U)
1706
1707#define S_TMRST 1
1708#define V_TMRST(x) ((x) << S_TMRST)
1709#define F_TMRST V_TMRST(1U)
1710
1711#define S_TMMODE 0
1712#define V_TMMODE(x) ((x) << S_TMMODE)
1713#define F_TMMODE V_TMMODE(1U)
1714
1715#define F_TMMODE V_TMMODE(1U)
1716
1717#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
1718
1719#define A_MC5_DB_FILTER_TABLE 0x710
1720
1721#define A_MC5_DB_SERVER_INDEX 0x714
1722
1723#define A_MC5_DB_RSP_LATENCY 0x720
1724
1725#define S_RDLAT 16
1726#define M_RDLAT 0x1f
1727#define V_RDLAT(x) ((x) << S_RDLAT)
1728
1729#define S_LRNLAT 8
1730#define M_LRNLAT 0x1f
1731#define V_LRNLAT(x) ((x) << S_LRNLAT)
1732
1733#define S_SRCHLAT 0
1734#define M_SRCHLAT 0x1f
1735#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
1736
1737#define A_MC5_DB_PART_ID_INDEX 0x72c
1738
1739#define A_MC5_DB_INT_ENABLE 0x740
1740
1741#define S_DELACTEMPTY 18
1742#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY)
1743#define F_DELACTEMPTY V_DELACTEMPTY(1U)
1744
1745#define S_DISPQPARERR 17
1746#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR)
1747#define F_DISPQPARERR V_DISPQPARERR(1U)
1748
1749#define S_REQQPARERR 16
1750#define V_REQQPARERR(x) ((x) << S_REQQPARERR)
1751#define F_REQQPARERR V_REQQPARERR(1U)
1752
1753#define S_UNKNOWNCMD 15
1754#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
1755#define F_UNKNOWNCMD V_UNKNOWNCMD(1U)
1756
1757#define S_NFASRCHFAIL 8
1758#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
1759#define F_NFASRCHFAIL V_NFASRCHFAIL(1U)
1760
1761#define S_ACTRGNFULL 7
1762#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL)
1763#define F_ACTRGNFULL V_ACTRGNFULL(1U)
1764
1765#define S_PARITYERR 6
1766#define V_PARITYERR(x) ((x) << S_PARITYERR)
1767#define F_PARITYERR V_PARITYERR(1U)
1768
1769#define A_MC5_DB_INT_CAUSE 0x744
1770
1771#define A_MC5_DB_DBGI_CONFIG 0x774
1772
1773#define A_MC5_DB_DBGI_REQ_CMD 0x778
1774
1775#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
1776
1777#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
1778
1779#define A_MC5_DB_DBGI_REQ_ADDR2 0x784
1780
1781#define A_MC5_DB_DBGI_REQ_DATA0 0x788
1782
1783#define A_MC5_DB_DBGI_REQ_DATA1 0x78c
1784
1785#define A_MC5_DB_DBGI_REQ_DATA2 0x790
1786
1787#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0
1788
1789#define S_DBGIRSPVALID 0
1790#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
1791#define F_DBGIRSPVALID V_DBGIRSPVALID(1U)
1792
1793#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4
1794
1795#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8
1796
1797#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc
1798
1799#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc
1800
1801#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0
1802
1803#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4
1804
1805#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8
1806
1807#define A_MC5_DB_SYN_SRCH_CMD 0x7dc
1808
1809#define A_MC5_DB_SYN_LRN_CMD 0x7e0
1810
1811#define A_MC5_DB_ACK_SRCH_CMD 0x7e4
1812
1813#define A_MC5_DB_ACK_LRN_CMD 0x7e8
1814
1815#define A_MC5_DB_ILOOKUP_CMD 0x7ec
1816
1817#define A_MC5_DB_ELOOKUP_CMD 0x7f0
1818
1819#define A_MC5_DB_DATA_WRITE_CMD 0x7f4
1820
1821#define A_MC5_DB_DATA_READ_CMD 0x7f8
1822
1823#define XGMAC0_0_BASE_ADDR 0x800
1824
1825#define A_XGM_TX_CTRL 0x800
1826
1827#define S_TXEN 0
1828#define V_TXEN(x) ((x) << S_TXEN)
1829#define F_TXEN V_TXEN(1U)
1830
1831#define A_XGM_TX_CFG 0x804
1832
1833#define S_TXPAUSEEN 0
1834#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
1835#define F_TXPAUSEEN V_TXPAUSEEN(1U)
1836
1837#define A_XGM_RX_CTRL 0x80c
1838
1839#define S_RXEN 0
1840#define V_RXEN(x) ((x) << S_RXEN)
1841#define F_RXEN V_RXEN(1U)
1842
1843#define A_XGM_RX_CFG 0x810
1844
1845#define S_DISPAUSEFRAMES 9
1846#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
1847#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U)
1848
1849#define S_EN1536BFRAMES 8
1850#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES)
1851#define F_EN1536BFRAMES V_EN1536BFRAMES(1U)
1852
1853#define S_ENJUMBO 7
1854#define V_ENJUMBO(x) ((x) << S_ENJUMBO)
1855#define F_ENJUMBO V_ENJUMBO(1U)
1856
1857#define S_RMFCS 6
1858#define V_RMFCS(x) ((x) << S_RMFCS)
1859#define F_RMFCS V_RMFCS(1U)
1860
1861#define S_ENHASHMCAST 2
1862#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
1863#define F_ENHASHMCAST V_ENHASHMCAST(1U)
1864
1865#define S_COPYALLFRAMES 0
1866#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
1867#define F_COPYALLFRAMES V_COPYALLFRAMES(1U)
1868
1869#define A_XGM_RX_HASH_LOW 0x814
1870
1871#define A_XGM_RX_HASH_HIGH 0x818
1872
1873#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c
1874
1875#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820
1876
1877#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
1878
1879#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c
1880
1881#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834
1882
1883#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c
1884
1885#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844
1886
1887#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c
1888
1889#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854
1890
1891#define A_XGM_STAT_CTRL 0x880
1892
1893#define S_CLRSTATS 2
1894#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
1895#define F_CLRSTATS V_CLRSTATS(1U)
1896
1897#define A_XGM_RXFIFO_CFG 0x884
1898
1899#define S_RXFIFOPAUSEHWM 17
1900#define M_RXFIFOPAUSEHWM 0xfff
1901
1902#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
1903
1904#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
1905
1906#define S_RXFIFOPAUSELWM 5
1907#define M_RXFIFOPAUSELWM 0xfff
1908
1909#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
1910
1911#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
1912
1913#define S_RXSTRFRWRD 1
1914#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
1915#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
1916
1917#define S_DISERRFRAMES 0
1918#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
1919#define F_DISERRFRAMES V_DISERRFRAMES(1U)
1920
1921#define A_XGM_TXFIFO_CFG 0x888
1922
1923#define S_TXFIFOTHRESH 4
1924#define M_TXFIFOTHRESH 0x1ff
1925
1926#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
1927
1928#define A_XGM_SERDES_CTRL 0x890
1929#define A_XGM_SERDES_CTRL0 0x8e0
1930
1931#define S_SERDESRESET_ 24
1932#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_)
1933#define F_SERDESRESET_ V_SERDESRESET_(1U)
1934
1935#define S_RXENABLE 4
1936#define V_RXENABLE(x) ((x) << S_RXENABLE)
1937#define F_RXENABLE V_RXENABLE(1U)
1938
1939#define S_TXENABLE 3
1940#define V_TXENABLE(x) ((x) << S_TXENABLE)
1941#define F_TXENABLE V_TXENABLE(1U)
1942
1943#define A_XGM_PAUSE_TIMER 0x890
1944
1945#define A_XGM_RGMII_IMP 0x89c
1946
1947#define S_XGM_IMPSETUPDATE 6
1948#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE)
1949#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U)
1950
1951#define S_RGMIIIMPPD 3
1952#define M_RGMIIIMPPD 0x7
1953#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
1954
1955#define S_RGMIIIMPPU 0
1956#define M_RGMIIIMPPU 0x7
1957#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
1958
1959#define S_CALRESET 8
1960#define V_CALRESET(x) ((x) << S_CALRESET)
1961#define F_CALRESET V_CALRESET(1U)
1962
1963#define S_CALUPDATE 7
1964#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
1965#define F_CALUPDATE V_CALUPDATE(1U)
1966
1967#define A_XGM_XAUI_IMP 0x8a0
1968
1969#define S_CALBUSY 31
1970#define V_CALBUSY(x) ((x) << S_CALBUSY)
1971#define F_CALBUSY V_CALBUSY(1U)
1972
1973#define S_XGM_CALFAULT 29
1974#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT)
1975#define F_XGM_CALFAULT V_XGM_CALFAULT(1U)
1976
1977#define S_CALIMP 24
1978#define M_CALIMP 0x1f
1979#define V_CALIMP(x) ((x) << S_CALIMP)
1980#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP)
1981
1982#define S_XAUIIMP 0
1983#define M_XAUIIMP 0x7
1984#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
1985
1986#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
1987#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
1988
1989#define A_XGM_RESET_CTRL 0x8ac
1990
1991#define S_XG2G_RESET_ 3
1992#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
1993#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
1994
1995#define S_RGMII_RESET_ 2
1996#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
1997#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
1998
1999#define S_PCS_RESET_ 1
2000#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
2001#define F_PCS_RESET_ V_PCS_RESET_(1U)
2002
2003#define S_MAC_RESET_ 0
2004#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
2005#define F_MAC_RESET_ V_MAC_RESET_(1U)
2006
2007#define A_XGM_PORT_CFG 0x8b8
2008
2009#define S_CLKDIVRESET_ 3
2010#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_)
2011#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U)
2012
2013#define S_PORTSPEED 1
2014#define M_PORTSPEED 0x3
2015
2016#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
2017
2018#define S_ENRGMII 0
2019#define V_ENRGMII(x) ((x) << S_ENRGMII)
2020#define F_ENRGMII V_ENRGMII(1U)
2021
2022#define A_XGM_INT_ENABLE 0x8d4
2023
2024#define S_TXFIFO_PRTY_ERR 17
2025#define M_TXFIFO_PRTY_ERR 0x7
2026
2027#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
2028
2029#define S_RXFIFO_PRTY_ERR 14
2030#define M_RXFIFO_PRTY_ERR 0x7
2031
2032#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
2033
2034#define S_TXFIFO_UNDERRUN 13
2035#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
2036#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
2037
2038#define S_RXFIFO_OVERFLOW 12
2039#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
2040#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
2041
2042#define S_SERDES_LOS 4
2043#define M_SERDES_LOS 0xf
2044
2045#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
2046
2047#define S_XAUIPCSCTCERR 3
2048#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
2049#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
2050
2051#define S_XAUIPCSALIGNCHANGE 2
2052#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
2053#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
2054
2055#define A_XGM_INT_CAUSE 0x8d8
2056
2057#define A_XGM_XAUI_ACT_CTRL 0x8dc
2058
2059#define S_TXACTENABLE 1
2060#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
2061#define F_TXACTENABLE V_TXACTENABLE(1U)
2062
2063#define A_XGM_SERDES_CTRL0 0x8e0
2064
2065#define S_RESET3 23
2066#define V_RESET3(x) ((x) << S_RESET3)
2067#define F_RESET3 V_RESET3(1U)
2068
2069#define S_RESET2 22
2070#define V_RESET2(x) ((x) << S_RESET2)
2071#define F_RESET2 V_RESET2(1U)
2072
2073#define S_RESET1 21
2074#define V_RESET1(x) ((x) << S_RESET1)
2075#define F_RESET1 V_RESET1(1U)
2076
2077#define S_RESET0 20
2078#define V_RESET0(x) ((x) << S_RESET0)
2079#define F_RESET0 V_RESET0(1U)
2080
2081#define S_PWRDN3 19
2082#define V_PWRDN3(x) ((x) << S_PWRDN3)
2083#define F_PWRDN3 V_PWRDN3(1U)
2084
2085#define S_PWRDN2 18
2086#define V_PWRDN2(x) ((x) << S_PWRDN2)
2087#define F_PWRDN2 V_PWRDN2(1U)
2088
2089#define S_PWRDN1 17
2090#define V_PWRDN1(x) ((x) << S_PWRDN1)
2091#define F_PWRDN1 V_PWRDN1(1U)
2092
2093#define S_PWRDN0 16
2094#define V_PWRDN0(x) ((x) << S_PWRDN0)
2095#define F_PWRDN0 V_PWRDN0(1U)
2096
2097#define S_RESETPLL23 15
2098#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
2099#define F_RESETPLL23 V_RESETPLL23(1U)
2100
2101#define S_RESETPLL01 14
2102#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
2103#define F_RESETPLL01 V_RESETPLL01(1U)
2104
2105#define A_XGM_SERDES_STAT0 0x8f0
2106
2107#define S_LOWSIG0 0
2108#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
2109#define F_LOWSIG0 V_LOWSIG0(1U)
2110
2111#define A_XGM_SERDES_STAT3 0x8fc
2112
2113#define A_XGM_STAT_TX_BYTE_LOW 0x900
2114
2115#define A_XGM_STAT_TX_BYTE_HIGH 0x904
2116
2117#define A_XGM_STAT_TX_FRAME_LOW 0x908
2118
2119#define A_XGM_STAT_TX_FRAME_HIGH 0x90c
2120
2121#define A_XGM_STAT_TX_BCAST 0x910
2122
2123#define A_XGM_STAT_TX_MCAST 0x914
2124
2125#define A_XGM_STAT_TX_PAUSE 0x918
2126
2127#define A_XGM_STAT_TX_64B_FRAMES 0x91c
2128
2129#define A_XGM_STAT_TX_65_127B_FRAMES 0x920
2130
2131#define A_XGM_STAT_TX_128_255B_FRAMES 0x924
2132
2133#define A_XGM_STAT_TX_256_511B_FRAMES 0x928
2134
2135#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c
2136
2137#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930
2138
2139#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934
2140
2141#define A_XGM_STAT_TX_ERR_FRAMES 0x938
2142
2143#define A_XGM_STAT_RX_BYTES_LOW 0x93c
2144
2145#define A_XGM_STAT_RX_BYTES_HIGH 0x940
2146
2147#define A_XGM_STAT_RX_FRAMES_LOW 0x944
2148
2149#define A_XGM_STAT_RX_FRAMES_HIGH 0x948
2150
2151#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
2152
2153#define A_XGM_STAT_RX_MCAST_FRAMES 0x950
2154
2155#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954
2156
2157#define A_XGM_STAT_RX_64B_FRAMES 0x958
2158
2159#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c
2160
2161#define A_XGM_STAT_RX_128_255B_FRAMES 0x960
2162
2163#define A_XGM_STAT_RX_256_511B_FRAMES 0x964
2164
2165#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968
2166
2167#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c
2168
2169#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970
2170
2171#define A_XGM_STAT_RX_SHORT_FRAMES 0x974
2172
2173#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978
2174
2175#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c
2176
2177#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980
2178
2179#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984
2180
2181#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988
2182
2183#define A_XGM_SERDES_STATUS0 0x98c
2184
2185#define A_XGM_SERDES_STATUS1 0x990
2186
2187#define S_CMULOCK 31
2188#define V_CMULOCK(x) ((x) << S_CMULOCK)
2189#define F_CMULOCK V_CMULOCK(1U)
2190
2191#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
2192
2193#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
2194
2195#define XGMAC0_1_BASE_ADDR 0xa00
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
new file mode 100644
index 000000000000..6c77f4bab62f
--- /dev/null
+++ b/drivers/net/cxgb3/sge.c
@@ -0,0 +1,2702 @@
1/*
2 * This file is part of the Chelsio T3 Ethernet driver.
3 *
4 * Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/if_vlan.h>
16#include <linux/ip.h>
17#include <linux/tcp.h>
18#include <linux/dma-mapping.h>
19#include "common.h"
20#include "regs.h"
21#include "sge_defs.h"
22#include "t3_cpl.h"
23#include "firmware_exports.h"
24
25#define USE_GTS 0
26
27#define SGE_RX_SM_BUF_SIZE 1536
28#define SGE_RX_COPY_THRES 256
29
30# define SGE_RX_DROP_THRES 16
31
32/*
33 * Period of the Tx buffer reclaim timer. This timer does not need to run
34 * frequently as Tx buffers are usually reclaimed by new Tx packets.
35 */
36#define TX_RECLAIM_PERIOD (HZ / 4)
37
38/* WR size in bytes */
39#define WR_LEN (WR_FLITS * 8)
40
41/*
42 * Types of Tx queues in each queue set. Order here matters, do not change.
43 */
44enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
45
46/* Values for sge_txq.flags */
47enum {
48 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
49 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
50};
51
52struct tx_desc {
53 u64 flit[TX_DESC_FLITS];
54};
55
56struct rx_desc {
57 __be32 addr_lo;
58 __be32 len_gen;
59 __be32 gen2;
60 __be32 addr_hi;
61};
62
63struct tx_sw_desc { /* SW state per Tx descriptor */
64 struct sk_buff *skb;
65};
66
67struct rx_sw_desc { /* SW state per Rx descriptor */
68 struct sk_buff *skb;
69 DECLARE_PCI_UNMAP_ADDR(dma_addr);
70};
71
72struct rsp_desc { /* response queue descriptor */
73 struct rss_header rss_hdr;
74 __be32 flags;
75 __be32 len_cq;
76 u8 imm_data[47];
77 u8 intr_gen;
78};
79
80struct unmap_info { /* packet unmapping info, overlays skb->cb */
81 int sflit; /* start flit of first SGL entry in Tx descriptor */
82 u16 fragidx; /* first page fragment in current Tx descriptor */
83 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
84 u32 len; /* mapped length of skb main body */
85};
86
87/*
88 * Maps a number of flits to the number of Tx descriptors that can hold them.
89 * The formula is
90 *
91 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
92 *
93 * HW allows up to 4 descriptors to be combined into a WR.
94 */
95static u8 flit_desc_map[] = {
96 0,
97#if SGE_NUM_GENBITS == 1
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
101 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
102#elif SGE_NUM_GENBITS == 2
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
106 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
107#else
108# error "SGE_NUM_GENBITS must be 1 or 2"
109#endif
110};
111
112static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
113{
114 return container_of(q, struct sge_qset, fl[qidx]);
115}
116
117static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
118{
119 return container_of(q, struct sge_qset, rspq);
120}
121
122static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
123{
124 return container_of(q, struct sge_qset, txq[qidx]);
125}
126
127/**
128 * refill_rspq - replenish an SGE response queue
129 * @adapter: the adapter
130 * @q: the response queue to replenish
131 * @credits: how many new responses to make available
132 *
133 * Replenishes a response queue by making the supplied number of responses
134 * available to HW.
135 */
136static inline void refill_rspq(struct adapter *adapter,
137 const struct sge_rspq *q, unsigned int credits)
138{
139 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
140 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
141}
142
143/**
144 * need_skb_unmap - does the platform need unmapping of sk_buffs?
145 *
146 * Returns true if the platfrom needs sk_buff unmapping. The compiler
147 * optimizes away unecessary code if this returns true.
148 */
149static inline int need_skb_unmap(void)
150{
151 /*
152 * This structure is used to tell if the platfrom needs buffer
153 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
154 */
155 struct dummy {
156 DECLARE_PCI_UNMAP_ADDR(addr);
157 };
158
159 return sizeof(struct dummy) != 0;
160}
161
162/**
163 * unmap_skb - unmap a packet main body and its page fragments
164 * @skb: the packet
165 * @q: the Tx queue containing Tx descriptors for the packet
166 * @cidx: index of Tx descriptor
167 * @pdev: the PCI device
168 *
169 * Unmap the main body of an sk_buff and its page fragments, if any.
170 * Because of the fairly complicated structure of our SGLs and the desire
171 * to conserve space for metadata, we keep the information necessary to
172 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
173 * in the Tx descriptors (the physical addresses of the various data
174 * buffers). The send functions initialize the state in skb->cb so we
175 * can unmap the buffers held in the first Tx descriptor here, and we
176 * have enough information at this point to update the state for the next
177 * Tx descriptor.
178 */
179static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
180 unsigned int cidx, struct pci_dev *pdev)
181{
182 const struct sg_ent *sgp;
183 struct unmap_info *ui = (struct unmap_info *)skb->cb;
184 int nfrags, frag_idx, curflit, j = ui->addr_idx;
185
186 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
187
188 if (ui->len) {
189 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
190 PCI_DMA_TODEVICE);
191 ui->len = 0; /* so we know for next descriptor for this skb */
192 j = 1;
193 }
194
195 frag_idx = ui->fragidx;
196 curflit = ui->sflit + 1 + j;
197 nfrags = skb_shinfo(skb)->nr_frags;
198
199 while (frag_idx < nfrags && curflit < WR_FLITS) {
200 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
201 skb_shinfo(skb)->frags[frag_idx].size,
202 PCI_DMA_TODEVICE);
203 j ^= 1;
204 if (j == 0) {
205 sgp++;
206 curflit++;
207 }
208 curflit++;
209 frag_idx++;
210 }
211
212 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
213 ui->fragidx = frag_idx;
214 ui->addr_idx = j;
215 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
216 }
217}
218
219/**
220 * free_tx_desc - reclaims Tx descriptors and their buffers
221 * @adapter: the adapter
222 * @q: the Tx queue to reclaim descriptors from
223 * @n: the number of descriptors to reclaim
224 *
225 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
226 * Tx buffers. Called with the Tx queue lock held.
227 */
228static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
229 unsigned int n)
230{
231 struct tx_sw_desc *d;
232 struct pci_dev *pdev = adapter->pdev;
233 unsigned int cidx = q->cidx;
234
235 d = &q->sdesc[cidx];
236 while (n--) {
237 if (d->skb) { /* an SGL is present */
238 if (need_skb_unmap())
239 unmap_skb(d->skb, q, cidx, pdev);
240 if (d->skb->priority == cidx)
241 kfree_skb(d->skb);
242 }
243 ++d;
244 if (++cidx == q->size) {
245 cidx = 0;
246 d = q->sdesc;
247 }
248 }
249 q->cidx = cidx;
250}
251
252/**
253 * reclaim_completed_tx - reclaims completed Tx descriptors
254 * @adapter: the adapter
255 * @q: the Tx queue to reclaim completed descriptors from
256 *
257 * Reclaims Tx descriptors that the SGE has indicated it has processed,
258 * and frees the associated buffers if possible. Called with the Tx
259 * queue's lock held.
260 */
261static inline void reclaim_completed_tx(struct adapter *adapter,
262 struct sge_txq *q)
263{
264 unsigned int reclaim = q->processed - q->cleaned;
265
266 if (reclaim) {
267 free_tx_desc(adapter, q, reclaim);
268 q->cleaned += reclaim;
269 q->in_use -= reclaim;
270 }
271}
272
273/**
274 * should_restart_tx - are there enough resources to restart a Tx queue?
275 * @q: the Tx queue
276 *
277 * Checks if there are enough descriptors to restart a suspended Tx queue.
278 */
279static inline int should_restart_tx(const struct sge_txq *q)
280{
281 unsigned int r = q->processed - q->cleaned;
282
283 return q->in_use - r < (q->size >> 1);
284}
285
286/**
287 * free_rx_bufs - free the Rx buffers on an SGE free list
288 * @pdev: the PCI device associated with the adapter
289 * @rxq: the SGE free list to clean up
290 *
291 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
292 * this queue should be stopped before calling this function.
293 */
294static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
295{
296 unsigned int cidx = q->cidx;
297
298 while (q->credits--) {
299 struct rx_sw_desc *d = &q->sdesc[cidx];
300
301 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
302 q->buf_size, PCI_DMA_FROMDEVICE);
303 kfree_skb(d->skb);
304 d->skb = NULL;
305 if (++cidx == q->size)
306 cidx = 0;
307 }
308}
309
310/**
311 * add_one_rx_buf - add a packet buffer to a free-buffer list
312 * @skb: the buffer to add
313 * @len: the buffer length
314 * @d: the HW Rx descriptor to write
315 * @sd: the SW Rx descriptor to write
316 * @gen: the generation bit value
317 * @pdev: the PCI device associated with the adapter
318 *
319 * Add a buffer of the given length to the supplied HW and SW Rx
320 * descriptors.
321 */
322static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
323 struct rx_desc *d, struct rx_sw_desc *sd,
324 unsigned int gen, struct pci_dev *pdev)
325{
326 dma_addr_t mapping;
327
328 sd->skb = skb;
329 mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
330 pci_unmap_addr_set(sd, dma_addr, mapping);
331
332 d->addr_lo = cpu_to_be32(mapping);
333 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
334 wmb();
335 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
336 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
337}
338
339/**
340 * refill_fl - refill an SGE free-buffer list
341 * @adapter: the adapter
342 * @q: the free-list to refill
343 * @n: the number of new buffers to allocate
344 * @gfp: the gfp flags for allocating new buffers
345 *
346 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
347 * allocated with the supplied gfp flags. The caller must assure that
348 * @n does not exceed the queue's capacity.
349 */
350static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
351{
352 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
353 struct rx_desc *d = &q->desc[q->pidx];
354
355 while (n--) {
356 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
357
358 if (!skb)
359 break;
360
361 add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
362 d++;
363 sd++;
364 if (++q->pidx == q->size) {
365 q->pidx = 0;
366 q->gen ^= 1;
367 sd = q->sdesc;
368 d = q->desc;
369 }
370 q->credits++;
371 }
372
373 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
374}
375
376static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
377{
378 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
379}
380
381/**
382 * recycle_rx_buf - recycle a receive buffer
383 * @adapter: the adapter
384 * @q: the SGE free list
385 * @idx: index of buffer to recycle
386 *
387 * Recycles the specified buffer on the given free list by adding it at
388 * the next available slot on the list.
389 */
390static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
391 unsigned int idx)
392{
393 struct rx_desc *from = &q->desc[idx];
394 struct rx_desc *to = &q->desc[q->pidx];
395
396 q->sdesc[q->pidx] = q->sdesc[idx];
397 to->addr_lo = from->addr_lo; /* already big endian */
398 to->addr_hi = from->addr_hi; /* likewise */
399 wmb();
400 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
401 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
402 q->credits++;
403
404 if (++q->pidx == q->size) {
405 q->pidx = 0;
406 q->gen ^= 1;
407 }
408 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
409}
410
411/**
412 * alloc_ring - allocate resources for an SGE descriptor ring
413 * @pdev: the PCI device
414 * @nelem: the number of descriptors
415 * @elem_size: the size of each descriptor
416 * @sw_size: the size of the SW state associated with each ring element
417 * @phys: the physical address of the allocated ring
418 * @metadata: address of the array holding the SW state for the ring
419 *
420 * Allocates resources for an SGE descriptor ring, such as Tx queues,
421 * free buffer lists, or response queues. Each SGE ring requires
422 * space for its HW descriptors plus, optionally, space for the SW state
423 * associated with each HW entry (the metadata). The function returns
424 * three values: the virtual address for the HW ring (the return value
425 * of the function), the physical address of the HW ring, and the address
426 * of the SW ring.
427 */
428static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
429 size_t sw_size, dma_addr_t *phys, void *metadata)
430{
431 size_t len = nelem * elem_size;
432 void *s = NULL;
433 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
434
435 if (!p)
436 return NULL;
437 if (sw_size) {
438 s = kcalloc(nelem, sw_size, GFP_KERNEL);
439
440 if (!s) {
441 dma_free_coherent(&pdev->dev, len, p, *phys);
442 return NULL;
443 }
444 }
445 if (metadata)
446 *(void **)metadata = s;
447 memset(p, 0, len);
448 return p;
449}
450
451/**
452 * free_qset - free the resources of an SGE queue set
453 * @adapter: the adapter owning the queue set
454 * @q: the queue set
455 *
456 * Release the HW and SW resources associated with an SGE queue set, such
457 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
458 * queue set must be quiesced prior to calling this.
459 */
460void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
461{
462 int i;
463 struct pci_dev *pdev = adapter->pdev;
464
465 if (q->tx_reclaim_timer.function)
466 del_timer_sync(&q->tx_reclaim_timer);
467
468 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
469 if (q->fl[i].desc) {
470 spin_lock(&adapter->sge.reg_lock);
471 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
472 spin_unlock(&adapter->sge.reg_lock);
473 free_rx_bufs(pdev, &q->fl[i]);
474 kfree(q->fl[i].sdesc);
475 dma_free_coherent(&pdev->dev,
476 q->fl[i].size *
477 sizeof(struct rx_desc), q->fl[i].desc,
478 q->fl[i].phys_addr);
479 }
480
481 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
482 if (q->txq[i].desc) {
483 spin_lock(&adapter->sge.reg_lock);
484 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
485 spin_unlock(&adapter->sge.reg_lock);
486 if (q->txq[i].sdesc) {
487 free_tx_desc(adapter, &q->txq[i],
488 q->txq[i].in_use);
489 kfree(q->txq[i].sdesc);
490 }
491 dma_free_coherent(&pdev->dev,
492 q->txq[i].size *
493 sizeof(struct tx_desc),
494 q->txq[i].desc, q->txq[i].phys_addr);
495 __skb_queue_purge(&q->txq[i].sendq);
496 }
497
498 if (q->rspq.desc) {
499 spin_lock(&adapter->sge.reg_lock);
500 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
501 spin_unlock(&adapter->sge.reg_lock);
502 dma_free_coherent(&pdev->dev,
503 q->rspq.size * sizeof(struct rsp_desc),
504 q->rspq.desc, q->rspq.phys_addr);
505 }
506
507 if (q->netdev)
508 q->netdev->atalk_ptr = NULL;
509
510 memset(q, 0, sizeof(*q));
511}
512
513/**
514 * init_qset_cntxt - initialize an SGE queue set context info
515 * @qs: the queue set
516 * @id: the queue set id
517 *
518 * Initializes the TIDs and context ids for the queues of a queue set.
519 */
520static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
521{
522 qs->rspq.cntxt_id = id;
523 qs->fl[0].cntxt_id = 2 * id;
524 qs->fl[1].cntxt_id = 2 * id + 1;
525 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
526 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
527 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
528 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
529 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
530}
531
532/**
533 * sgl_len - calculates the size of an SGL of the given capacity
534 * @n: the number of SGL entries
535 *
536 * Calculates the number of flits needed for a scatter/gather list that
537 * can hold the given number of entries.
538 */
539static inline unsigned int sgl_len(unsigned int n)
540{
541 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
542 return (3 * n) / 2 + (n & 1);
543}
544
545/**
546 * flits_to_desc - returns the num of Tx descriptors for the given flits
547 * @n: the number of flits
548 *
549 * Calculates the number of Tx descriptors needed for the supplied number
550 * of flits.
551 */
552static inline unsigned int flits_to_desc(unsigned int n)
553{
554 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
555 return flit_desc_map[n];
556}
557
558/**
559 * get_packet - return the next ingress packet buffer from a free list
560 * @adap: the adapter that received the packet
561 * @fl: the SGE free list holding the packet
562 * @len: the packet length including any SGE padding
563 * @drop_thres: # of remaining buffers before we start dropping packets
564 *
565 * Get the next packet from a free list and complete setup of the
566 * sk_buff. If the packet is small we make a copy and recycle the
567 * original buffer, otherwise we use the original buffer itself. If a
568 * positive drop threshold is supplied packets are dropped and their
569 * buffers recycled if (a) the number of remaining buffers is under the
570 * threshold and the packet is too big to copy, or (b) the packet should
571 * be copied but there is no memory for the copy.
572 */
573static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
574 unsigned int len, unsigned int drop_thres)
575{
576 struct sk_buff *skb = NULL;
577 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
578
579 prefetch(sd->skb->data);
580
581 if (len <= SGE_RX_COPY_THRES) {
582 skb = alloc_skb(len, GFP_ATOMIC);
583 if (likely(skb != NULL)) {
584 __skb_put(skb, len);
585 pci_dma_sync_single_for_cpu(adap->pdev,
586 pci_unmap_addr(sd,
587 dma_addr),
588 len, PCI_DMA_FROMDEVICE);
589 memcpy(skb->data, sd->skb->data, len);
590 pci_dma_sync_single_for_device(adap->pdev,
591 pci_unmap_addr(sd,
592 dma_addr),
593 len, PCI_DMA_FROMDEVICE);
594 } else if (!drop_thres)
595 goto use_orig_buf;
596 recycle:
597 recycle_rx_buf(adap, fl, fl->cidx);
598 return skb;
599 }
600
601 if (unlikely(fl->credits < drop_thres))
602 goto recycle;
603
604 use_orig_buf:
605 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
606 fl->buf_size, PCI_DMA_FROMDEVICE);
607 skb = sd->skb;
608 skb_put(skb, len);
609 __refill_fl(adap, fl);
610 return skb;
611}
612
613/**
614 * get_imm_packet - return the next ingress packet buffer from a response
615 * @resp: the response descriptor containing the packet data
616 *
617 * Return a packet containing the immediate data of the given response.
618 */
619static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
620{
621 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
622
623 if (skb) {
624 __skb_put(skb, IMMED_PKT_SIZE);
625 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
626 }
627 return skb;
628}
629
630/**
631 * calc_tx_descs - calculate the number of Tx descriptors for a packet
632 * @skb: the packet
633 *
634 * Returns the number of Tx descriptors needed for the given Ethernet
635 * packet. Ethernet packets require addition of WR and CPL headers.
636 */
637static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
638{
639 unsigned int flits;
640
641 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
642 return 1;
643
644 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
645 if (skb_shinfo(skb)->gso_size)
646 flits++;
647 return flits_to_desc(flits);
648}
649
650/**
651 * make_sgl - populate a scatter/gather list for a packet
652 * @skb: the packet
653 * @sgp: the SGL to populate
654 * @start: start address of skb main body data to include in the SGL
655 * @len: length of skb main body data to include in the SGL
656 * @pdev: the PCI device
657 *
658 * Generates a scatter/gather list for the buffers that make up a packet
659 * and returns the SGL size in 8-byte words. The caller must size the SGL
660 * appropriately.
661 */
662static inline unsigned int make_sgl(const struct sk_buff *skb,
663 struct sg_ent *sgp, unsigned char *start,
664 unsigned int len, struct pci_dev *pdev)
665{
666 dma_addr_t mapping;
667 unsigned int i, j = 0, nfrags;
668
669 if (len) {
670 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
671 sgp->len[0] = cpu_to_be32(len);
672 sgp->addr[0] = cpu_to_be64(mapping);
673 j = 1;
674 }
675
676 nfrags = skb_shinfo(skb)->nr_frags;
677 for (i = 0; i < nfrags; i++) {
678 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
679
680 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
681 frag->size, PCI_DMA_TODEVICE);
682 sgp->len[j] = cpu_to_be32(frag->size);
683 sgp->addr[j] = cpu_to_be64(mapping);
684 j ^= 1;
685 if (j == 0)
686 ++sgp;
687 }
688 if (j)
689 sgp->len[j] = 0;
690 return ((nfrags + (len != 0)) * 3) / 2 + j;
691}
692
693/**
694 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
695 * @adap: the adapter
696 * @q: the Tx queue
697 *
698 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
699 * where the HW is going to sleep just after we checked, however,
700 * then the interrupt handler will detect the outstanding TX packet
701 * and ring the doorbell for us.
702 *
703 * When GTS is disabled we unconditionally ring the doorbell.
704 */
705static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
706{
707#if USE_GTS
708 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
709 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
710 set_bit(TXQ_LAST_PKT_DB, &q->flags);
711 t3_write_reg(adap, A_SG_KDOORBELL,
712 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
713 }
714#else
715 wmb(); /* write descriptors before telling HW */
716 t3_write_reg(adap, A_SG_KDOORBELL,
717 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
718#endif
719}
720
721static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
722{
723#if SGE_NUM_GENBITS == 2
724 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
725#endif
726}
727
728/**
729 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
730 * @ndesc: number of Tx descriptors spanned by the SGL
731 * @skb: the packet corresponding to the WR
732 * @d: first Tx descriptor to be written
733 * @pidx: index of above descriptors
734 * @q: the SGE Tx queue
735 * @sgl: the SGL
736 * @flits: number of flits to the start of the SGL in the first descriptor
737 * @sgl_flits: the SGL size in flits
738 * @gen: the Tx descriptor generation
739 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
740 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
741 *
742 * Write a work request header and an associated SGL. If the SGL is
743 * small enough to fit into one Tx descriptor it has already been written
744 * and we just need to write the WR header. Otherwise we distribute the
745 * SGL across the number of descriptors it spans.
746 */
747static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
748 struct tx_desc *d, unsigned int pidx,
749 const struct sge_txq *q,
750 const struct sg_ent *sgl,
751 unsigned int flits, unsigned int sgl_flits,
752 unsigned int gen, unsigned int wr_hi,
753 unsigned int wr_lo)
754{
755 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
756 struct tx_sw_desc *sd = &q->sdesc[pidx];
757
758 sd->skb = skb;
759 if (need_skb_unmap()) {
760 struct unmap_info *ui = (struct unmap_info *)skb->cb;
761
762 ui->fragidx = 0;
763 ui->addr_idx = 0;
764 ui->sflit = flits;
765 }
766
767 if (likely(ndesc == 1)) {
768 skb->priority = pidx;
769 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
770 V_WR_SGLSFLT(flits)) | wr_hi;
771 wmb();
772 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
773 V_WR_GEN(gen)) | wr_lo;
774 wr_gen2(d, gen);
775 } else {
776 unsigned int ogen = gen;
777 const u64 *fp = (const u64 *)sgl;
778 struct work_request_hdr *wp = wrp;
779
780 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
781 V_WR_SGLSFLT(flits)) | wr_hi;
782
783 while (sgl_flits) {
784 unsigned int avail = WR_FLITS - flits;
785
786 if (avail > sgl_flits)
787 avail = sgl_flits;
788 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
789 sgl_flits -= avail;
790 ndesc--;
791 if (!sgl_flits)
792 break;
793
794 fp += avail;
795 d++;
796 sd++;
797 if (++pidx == q->size) {
798 pidx = 0;
799 gen ^= 1;
800 d = q->desc;
801 sd = q->sdesc;
802 }
803
804 sd->skb = skb;
805 wrp = (struct work_request_hdr *)d;
806 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
807 V_WR_SGLSFLT(1)) | wr_hi;
808 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
809 sgl_flits + 1)) |
810 V_WR_GEN(gen)) | wr_lo;
811 wr_gen2(d, gen);
812 flits = 1;
813 }
814 skb->priority = pidx;
815 wrp->wr_hi |= htonl(F_WR_EOP);
816 wmb();
817 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
818 wr_gen2((struct tx_desc *)wp, ogen);
819 WARN_ON(ndesc != 0);
820 }
821}
822
823/**
824 * write_tx_pkt_wr - write a TX_PKT work request
825 * @adap: the adapter
826 * @skb: the packet to send
827 * @pi: the egress interface
828 * @pidx: index of the first Tx descriptor to write
829 * @gen: the generation value to use
830 * @q: the Tx queue
831 * @ndesc: number of descriptors the packet will occupy
832 * @compl: the value of the COMPL bit to use
833 *
834 * Generate a TX_PKT work request to send the supplied packet.
835 */
836static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
837 const struct port_info *pi,
838 unsigned int pidx, unsigned int gen,
839 struct sge_txq *q, unsigned int ndesc,
840 unsigned int compl)
841{
842 unsigned int flits, sgl_flits, cntrl, tso_info;
843 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
844 struct tx_desc *d = &q->desc[pidx];
845 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
846
847 cpl->len = htonl(skb->len | 0x80000000);
848 cntrl = V_TXPKT_INTF(pi->port_id);
849
850 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
851 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
852
853 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
854 if (tso_info) {
855 int eth_type;
856 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
857
858 d->flit[2] = 0;
859 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
860 hdr->cntrl = htonl(cntrl);
861 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
862 CPL_ETH_II : CPL_ETH_II_VLAN;
863 tso_info |= V_LSO_ETH_TYPE(eth_type) |
864 V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
865 V_LSO_TCPHDR_WORDS(skb->h.th->doff);
866 hdr->lso_info = htonl(tso_info);
867 flits = 3;
868 } else {
869 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
870 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
871 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
872 cpl->cntrl = htonl(cntrl);
873
874 if (skb->len <= WR_LEN - sizeof(*cpl)) {
875 q->sdesc[pidx].skb = NULL;
876 if (!skb->data_len)
877 memcpy(&d->flit[2], skb->data, skb->len);
878 else
879 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
880
881 flits = (skb->len + 7) / 8 + 2;
882 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
883 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
884 | F_WR_SOP | F_WR_EOP | compl);
885 wmb();
886 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
887 V_WR_TID(q->token));
888 wr_gen2(d, gen);
889 kfree_skb(skb);
890 return;
891 }
892
893 flits = 2;
894 }
895
896 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
897 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
898 if (need_skb_unmap())
899 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
900
901 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
902 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
903 htonl(V_WR_TID(q->token)));
904}
905
906/**
907 * eth_xmit - add a packet to the Ethernet Tx queue
908 * @skb: the packet
909 * @dev: the egress net device
910 *
911 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
912 */
913int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
914{
915 unsigned int ndesc, pidx, credits, gen, compl;
916 const struct port_info *pi = netdev_priv(dev);
917 struct adapter *adap = dev->priv;
918 struct sge_qset *qs = dev2qset(dev);
919 struct sge_txq *q = &qs->txq[TXQ_ETH];
920
921 /*
922 * The chip min packet length is 9 octets but play safe and reject
923 * anything shorter than an Ethernet header.
924 */
925 if (unlikely(skb->len < ETH_HLEN)) {
926 dev_kfree_skb(skb);
927 return NETDEV_TX_OK;
928 }
929
930 spin_lock(&q->lock);
931 reclaim_completed_tx(adap, q);
932
933 credits = q->size - q->in_use;
934 ndesc = calc_tx_descs(skb);
935
936 if (unlikely(credits < ndesc)) {
937 if (!netif_queue_stopped(dev)) {
938 netif_stop_queue(dev);
939 set_bit(TXQ_ETH, &qs->txq_stopped);
940 q->stops++;
941 dev_err(&adap->pdev->dev,
942 "%s: Tx ring %u full while queue awake!\n",
943 dev->name, q->cntxt_id & 7);
944 }
945 spin_unlock(&q->lock);
946 return NETDEV_TX_BUSY;
947 }
948
949 q->in_use += ndesc;
950 if (unlikely(credits - ndesc < q->stop_thres)) {
951 q->stops++;
952 netif_stop_queue(dev);
953 set_bit(TXQ_ETH, &qs->txq_stopped);
954#if !USE_GTS
955 if (should_restart_tx(q) &&
956 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
957 q->restarts++;
958 netif_wake_queue(dev);
959 }
960#endif
961 }
962
963 gen = q->gen;
964 q->unacked += ndesc;
965 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
966 q->unacked &= 7;
967 pidx = q->pidx;
968 q->pidx += ndesc;
969 if (q->pidx >= q->size) {
970 q->pidx -= q->size;
971 q->gen ^= 1;
972 }
973
974 /* update port statistics */
975 if (skb->ip_summed == CHECKSUM_COMPLETE)
976 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
977 if (skb_shinfo(skb)->gso_size)
978 qs->port_stats[SGE_PSTAT_TSO]++;
979 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
980 qs->port_stats[SGE_PSTAT_VLANINS]++;
981
982 dev->trans_start = jiffies;
983 spin_unlock(&q->lock);
984
985 /*
986 * We do not use Tx completion interrupts to free DMAd Tx packets.
987 * This is good for performamce but means that we rely on new Tx
988 * packets arriving to run the destructors of completed packets,
989 * which open up space in their sockets' send queues. Sometimes
990 * we do not get such new packets causing Tx to stall. A single
991 * UDP transmitter is a good example of this situation. We have
992 * a clean up timer that periodically reclaims completed packets
993 * but it doesn't run often enough (nor do we want it to) to prevent
994 * lengthy stalls. A solution to this problem is to run the
995 * destructor early, after the packet is queued but before it's DMAd.
996 * A cons is that we lie to socket memory accounting, but the amount
997 * of extra memory is reasonable (limited by the number of Tx
998 * descriptors), the packets do actually get freed quickly by new
999 * packets almost always, and for protocols like TCP that wait for
1000 * acks to really free up the data the extra memory is even less.
1001 * On the positive side we run the destructors on the sending CPU
1002 * rather than on a potentially different completing CPU, usually a
1003 * good thing. We also run them without holding our Tx queue lock,
1004 * unlike what reclaim_completed_tx() would otherwise do.
1005 *
1006 * Run the destructor before telling the DMA engine about the packet
1007 * to make sure it doesn't complete and get freed prematurely.
1008 */
1009 if (likely(!skb_shared(skb)))
1010 skb_orphan(skb);
1011
1012 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1013 check_ring_tx_db(adap, q);
1014 return NETDEV_TX_OK;
1015}
1016
1017/**
1018 * write_imm - write a packet into a Tx descriptor as immediate data
1019 * @d: the Tx descriptor to write
1020 * @skb: the packet
1021 * @len: the length of packet data to write as immediate data
1022 * @gen: the generation bit value to write
1023 *
1024 * Writes a packet as immediate data into a Tx descriptor. The packet
1025 * contains a work request at its beginning. We must write the packet
1026 * carefully so the SGE doesn't read accidentally before it's written in
1027 * its entirety.
1028 */
1029static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1030 unsigned int len, unsigned int gen)
1031{
1032 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1033 struct work_request_hdr *to = (struct work_request_hdr *)d;
1034
1035 memcpy(&to[1], &from[1], len - sizeof(*from));
1036 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1037 V_WR_BCNTLFLT(len & 7));
1038 wmb();
1039 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1040 V_WR_LEN((len + 7) / 8));
1041 wr_gen2(d, gen);
1042 kfree_skb(skb);
1043}
1044
1045/**
1046 * check_desc_avail - check descriptor availability on a send queue
1047 * @adap: the adapter
1048 * @q: the send queue
1049 * @skb: the packet needing the descriptors
1050 * @ndesc: the number of Tx descriptors needed
1051 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1052 *
1053 * Checks if the requested number of Tx descriptors is available on an
1054 * SGE send queue. If the queue is already suspended or not enough
1055 * descriptors are available the packet is queued for later transmission.
1056 * Must be called with the Tx queue locked.
1057 *
1058 * Returns 0 if enough descriptors are available, 1 if there aren't
1059 * enough descriptors and the packet has been queued, and 2 if the caller
1060 * needs to retry because there weren't enough descriptors at the
1061 * beginning of the call but some freed up in the mean time.
1062 */
1063static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1064 struct sk_buff *skb, unsigned int ndesc,
1065 unsigned int qid)
1066{
1067 if (unlikely(!skb_queue_empty(&q->sendq))) {
1068 addq_exit:__skb_queue_tail(&q->sendq, skb);
1069 return 1;
1070 }
1071 if (unlikely(q->size - q->in_use < ndesc)) {
1072 struct sge_qset *qs = txq_to_qset(q, qid);
1073
1074 set_bit(qid, &qs->txq_stopped);
1075 smp_mb__after_clear_bit();
1076
1077 if (should_restart_tx(q) &&
1078 test_and_clear_bit(qid, &qs->txq_stopped))
1079 return 2;
1080
1081 q->stops++;
1082 goto addq_exit;
1083 }
1084 return 0;
1085}
1086
1087/**
1088 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1089 * @q: the SGE control Tx queue
1090 *
1091 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1092 * that send only immediate data (presently just the control queues) and
1093 * thus do not have any sk_buffs to release.
1094 */
1095static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1096{
1097 unsigned int reclaim = q->processed - q->cleaned;
1098
1099 q->in_use -= reclaim;
1100 q->cleaned += reclaim;
1101}
1102
1103static inline int immediate(const struct sk_buff *skb)
1104{
1105 return skb->len <= WR_LEN && !skb->data_len;
1106}
1107
1108/**
1109 * ctrl_xmit - send a packet through an SGE control Tx queue
1110 * @adap: the adapter
1111 * @q: the control queue
1112 * @skb: the packet
1113 *
1114 * Send a packet through an SGE control Tx queue. Packets sent through
1115 * a control queue must fit entirely as immediate data in a single Tx
1116 * descriptor and have no page fragments.
1117 */
1118static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1119 struct sk_buff *skb)
1120{
1121 int ret;
1122 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1123
1124 if (unlikely(!immediate(skb))) {
1125 WARN_ON(1);
1126 dev_kfree_skb(skb);
1127 return NET_XMIT_SUCCESS;
1128 }
1129
1130 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1131 wrp->wr_lo = htonl(V_WR_TID(q->token));
1132
1133 spin_lock(&q->lock);
1134 again:reclaim_completed_tx_imm(q);
1135
1136 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1137 if (unlikely(ret)) {
1138 if (ret == 1) {
1139 spin_unlock(&q->lock);
1140 return NET_XMIT_CN;
1141 }
1142 goto again;
1143 }
1144
1145 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1146
1147 q->in_use++;
1148 if (++q->pidx >= q->size) {
1149 q->pidx = 0;
1150 q->gen ^= 1;
1151 }
1152 spin_unlock(&q->lock);
1153 wmb();
1154 t3_write_reg(adap, A_SG_KDOORBELL,
1155 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1156 return NET_XMIT_SUCCESS;
1157}
1158
1159/**
1160 * restart_ctrlq - restart a suspended control queue
1161 * @qs: the queue set cotaining the control queue
1162 *
1163 * Resumes transmission on a suspended Tx control queue.
1164 */
1165static void restart_ctrlq(unsigned long data)
1166{
1167 struct sk_buff *skb;
1168 struct sge_qset *qs = (struct sge_qset *)data;
1169 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1170 struct adapter *adap = qs->netdev->priv;
1171
1172 spin_lock(&q->lock);
1173 again:reclaim_completed_tx_imm(q);
1174
1175 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1176
1177 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1178
1179 if (++q->pidx >= q->size) {
1180 q->pidx = 0;
1181 q->gen ^= 1;
1182 }
1183 q->in_use++;
1184 }
1185
1186 if (!skb_queue_empty(&q->sendq)) {
1187 set_bit(TXQ_CTRL, &qs->txq_stopped);
1188 smp_mb__after_clear_bit();
1189
1190 if (should_restart_tx(q) &&
1191 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1192 goto again;
1193 q->stops++;
1194 }
1195
1196 spin_unlock(&q->lock);
1197 t3_write_reg(adap, A_SG_KDOORBELL,
1198 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1199}
1200
1201/**
1202 * write_ofld_wr - write an offload work request
1203 * @adap: the adapter
1204 * @skb: the packet to send
1205 * @q: the Tx queue
1206 * @pidx: index of the first Tx descriptor to write
1207 * @gen: the generation value to use
1208 * @ndesc: number of descriptors the packet will occupy
1209 *
1210 * Write an offload work request to send the supplied packet. The packet
1211 * data already carry the work request with most fields populated.
1212 */
1213static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1214 struct sge_txq *q, unsigned int pidx,
1215 unsigned int gen, unsigned int ndesc)
1216{
1217 unsigned int sgl_flits, flits;
1218 struct work_request_hdr *from;
1219 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1220 struct tx_desc *d = &q->desc[pidx];
1221
1222 if (immediate(skb)) {
1223 q->sdesc[pidx].skb = NULL;
1224 write_imm(d, skb, skb->len, gen);
1225 return;
1226 }
1227
1228 /* Only TX_DATA builds SGLs */
1229
1230 from = (struct work_request_hdr *)skb->data;
1231 memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
1232
1233 flits = (skb->h.raw - skb->data) / 8;
1234 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1235 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1236 adap->pdev);
1237 if (need_skb_unmap())
1238 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1239
1240 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1241 gen, from->wr_hi, from->wr_lo);
1242}
1243
1244/**
1245 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1246 * @skb: the packet
1247 *
1248 * Returns the number of Tx descriptors needed for the given offload
1249 * packet. These packets are already fully constructed.
1250 */
1251static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1252{
1253 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1254
1255 if (skb->len <= WR_LEN && cnt == 0)
1256 return 1; /* packet fits as immediate data */
1257
1258 flits = (skb->h.raw - skb->data) / 8; /* headers */
1259 if (skb->tail != skb->h.raw)
1260 cnt++;
1261 return flits_to_desc(flits + sgl_len(cnt));
1262}
1263
1264/**
1265 * ofld_xmit - send a packet through an offload queue
1266 * @adap: the adapter
1267 * @q: the Tx offload queue
1268 * @skb: the packet
1269 *
1270 * Send an offload packet through an SGE offload queue.
1271 */
1272static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1273 struct sk_buff *skb)
1274{
1275 int ret;
1276 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1277
1278 spin_lock(&q->lock);
1279 again:reclaim_completed_tx(adap, q);
1280
1281 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1282 if (unlikely(ret)) {
1283 if (ret == 1) {
1284 skb->priority = ndesc; /* save for restart */
1285 spin_unlock(&q->lock);
1286 return NET_XMIT_CN;
1287 }
1288 goto again;
1289 }
1290
1291 gen = q->gen;
1292 q->in_use += ndesc;
1293 pidx = q->pidx;
1294 q->pidx += ndesc;
1295 if (q->pidx >= q->size) {
1296 q->pidx -= q->size;
1297 q->gen ^= 1;
1298 }
1299 spin_unlock(&q->lock);
1300
1301 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1302 check_ring_tx_db(adap, q);
1303 return NET_XMIT_SUCCESS;
1304}
1305
1306/**
1307 * restart_offloadq - restart a suspended offload queue
1308 * @qs: the queue set cotaining the offload queue
1309 *
1310 * Resumes transmission on a suspended Tx offload queue.
1311 */
1312static void restart_offloadq(unsigned long data)
1313{
1314 struct sk_buff *skb;
1315 struct sge_qset *qs = (struct sge_qset *)data;
1316 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1317 struct adapter *adap = qs->netdev->priv;
1318
1319 spin_lock(&q->lock);
1320 again:reclaim_completed_tx(adap, q);
1321
1322 while ((skb = skb_peek(&q->sendq)) != NULL) {
1323 unsigned int gen, pidx;
1324 unsigned int ndesc = skb->priority;
1325
1326 if (unlikely(q->size - q->in_use < ndesc)) {
1327 set_bit(TXQ_OFLD, &qs->txq_stopped);
1328 smp_mb__after_clear_bit();
1329
1330 if (should_restart_tx(q) &&
1331 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1332 goto again;
1333 q->stops++;
1334 break;
1335 }
1336
1337 gen = q->gen;
1338 q->in_use += ndesc;
1339 pidx = q->pidx;
1340 q->pidx += ndesc;
1341 if (q->pidx >= q->size) {
1342 q->pidx -= q->size;
1343 q->gen ^= 1;
1344 }
1345 __skb_unlink(skb, &q->sendq);
1346 spin_unlock(&q->lock);
1347
1348 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1349 spin_lock(&q->lock);
1350 }
1351 spin_unlock(&q->lock);
1352
1353#if USE_GTS
1354 set_bit(TXQ_RUNNING, &q->flags);
1355 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1356#endif
1357 t3_write_reg(adap, A_SG_KDOORBELL,
1358 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1359}
1360
1361/**
1362 * queue_set - return the queue set a packet should use
1363 * @skb: the packet
1364 *
1365 * Maps a packet to the SGE queue set it should use. The desired queue
1366 * set is carried in bits 1-3 in the packet's priority.
1367 */
1368static inline int queue_set(const struct sk_buff *skb)
1369{
1370 return skb->priority >> 1;
1371}
1372
1373/**
1374 * is_ctrl_pkt - return whether an offload packet is a control packet
1375 * @skb: the packet
1376 *
1377 * Determines whether an offload packet should use an OFLD or a CTRL
1378 * Tx queue. This is indicated by bit 0 in the packet's priority.
1379 */
1380static inline int is_ctrl_pkt(const struct sk_buff *skb)
1381{
1382 return skb->priority & 1;
1383}
1384
1385/**
1386 * t3_offload_tx - send an offload packet
1387 * @tdev: the offload device to send to
1388 * @skb: the packet
1389 *
1390 * Sends an offload packet. We use the packet priority to select the
1391 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1392 * should be sent as regular or control, bits 1-3 select the queue set.
1393 */
1394int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1395{
1396 struct adapter *adap = tdev2adap(tdev);
1397 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1398
1399 if (unlikely(is_ctrl_pkt(skb)))
1400 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1401
1402 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1403}
1404
1405/**
1406 * offload_enqueue - add an offload packet to an SGE offload receive queue
1407 * @q: the SGE response queue
1408 * @skb: the packet
1409 *
1410 * Add a new offload packet to an SGE response queue's offload packet
1411 * queue. If the packet is the first on the queue it schedules the RX
1412 * softirq to process the queue.
1413 */
1414static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1415{
1416 skb->next = skb->prev = NULL;
1417 if (q->rx_tail)
1418 q->rx_tail->next = skb;
1419 else {
1420 struct sge_qset *qs = rspq_to_qset(q);
1421
1422 if (__netif_rx_schedule_prep(qs->netdev))
1423 __netif_rx_schedule(qs->netdev);
1424 q->rx_head = skb;
1425 }
1426 q->rx_tail = skb;
1427}
1428
1429/**
1430 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1431 * @tdev: the offload device that will be receiving the packets
1432 * @q: the SGE response queue that assembled the bundle
1433 * @skbs: the partial bundle
1434 * @n: the number of packets in the bundle
1435 *
1436 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1437 */
1438static inline void deliver_partial_bundle(struct t3cdev *tdev,
1439 struct sge_rspq *q,
1440 struct sk_buff *skbs[], int n)
1441{
1442 if (n) {
1443 q->offload_bundles++;
1444 tdev->recv(tdev, skbs, n);
1445 }
1446}
1447
1448/**
1449 * ofld_poll - NAPI handler for offload packets in interrupt mode
1450 * @dev: the network device doing the polling
1451 * @budget: polling budget
1452 *
1453 * The NAPI handler for offload packets when a response queue is serviced
1454 * by the hard interrupt handler, i.e., when it's operating in non-polling
1455 * mode. Creates small packet batches and sends them through the offload
1456 * receive handler. Batches need to be of modest size as we do prefetches
1457 * on the packets in each.
1458 */
1459static int ofld_poll(struct net_device *dev, int *budget)
1460{
1461 struct adapter *adapter = dev->priv;
1462 struct sge_qset *qs = dev2qset(dev);
1463 struct sge_rspq *q = &qs->rspq;
1464 int work_done, limit = min(*budget, dev->quota), avail = limit;
1465
1466 while (avail) {
1467 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1468 int ngathered;
1469
1470 spin_lock_irq(&q->lock);
1471 head = q->rx_head;
1472 if (!head) {
1473 work_done = limit - avail;
1474 *budget -= work_done;
1475 dev->quota -= work_done;
1476 __netif_rx_complete(dev);
1477 spin_unlock_irq(&q->lock);
1478 return 0;
1479 }
1480
1481 tail = q->rx_tail;
1482 q->rx_head = q->rx_tail = NULL;
1483 spin_unlock_irq(&q->lock);
1484
1485 for (ngathered = 0; avail && head; avail--) {
1486 prefetch(head->data);
1487 skbs[ngathered] = head;
1488 head = head->next;
1489 skbs[ngathered]->next = NULL;
1490 if (++ngathered == RX_BUNDLE_SIZE) {
1491 q->offload_bundles++;
1492 adapter->tdev.recv(&adapter->tdev, skbs,
1493 ngathered);
1494 ngathered = 0;
1495 }
1496 }
1497 if (head) { /* splice remaining packets back onto Rx queue */
1498 spin_lock_irq(&q->lock);
1499 tail->next = q->rx_head;
1500 if (!q->rx_head)
1501 q->rx_tail = tail;
1502 q->rx_head = head;
1503 spin_unlock_irq(&q->lock);
1504 }
1505 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1506 }
1507 work_done = limit - avail;
1508 *budget -= work_done;
1509 dev->quota -= work_done;
1510 return 1;
1511}
1512
1513/**
1514 * rx_offload - process a received offload packet
1515 * @tdev: the offload device receiving the packet
1516 * @rq: the response queue that received the packet
1517 * @skb: the packet
1518 * @rx_gather: a gather list of packets if we are building a bundle
1519 * @gather_idx: index of the next available slot in the bundle
1520 *
1521 * Process an ingress offload pakcet and add it to the offload ingress
1522 * queue. Returns the index of the next available slot in the bundle.
1523 */
1524static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1525 struct sk_buff *skb, struct sk_buff *rx_gather[],
1526 unsigned int gather_idx)
1527{
1528 rq->offload_pkts++;
1529 skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
1530
1531 if (rq->polling) {
1532 rx_gather[gather_idx++] = skb;
1533 if (gather_idx == RX_BUNDLE_SIZE) {
1534 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1535 gather_idx = 0;
1536 rq->offload_bundles++;
1537 }
1538 } else
1539 offload_enqueue(rq, skb);
1540
1541 return gather_idx;
1542}
1543
1544/**
1545 * update_tx_completed - update the number of processed Tx descriptors
1546 * @qs: the queue set to update
1547 * @idx: which Tx queue within the set to update
1548 * @credits: number of new processed descriptors
1549 * @tx_completed: accumulates credits for the queues
1550 *
1551 * Updates the number of completed Tx descriptors for a queue set's Tx
1552 * queue. On UP systems we updated the information immediately but on
1553 * MP we accumulate the credits locally and update the Tx queue when we
1554 * reach a threshold to avoid cache-line bouncing.
1555 */
1556static inline void update_tx_completed(struct sge_qset *qs, int idx,
1557 unsigned int credits,
1558 unsigned int tx_completed[])
1559{
1560#ifdef CONFIG_SMP
1561 tx_completed[idx] += credits;
1562 if (tx_completed[idx] > 32) {
1563 qs->txq[idx].processed += tx_completed[idx];
1564 tx_completed[idx] = 0;
1565 }
1566#else
1567 qs->txq[idx].processed += credits;
1568#endif
1569}
1570
1571/**
1572 * restart_tx - check whether to restart suspended Tx queues
1573 * @qs: the queue set to resume
1574 *
1575 * Restarts suspended Tx queues of an SGE queue set if they have enough
1576 * free resources to resume operation.
1577 */
1578static void restart_tx(struct sge_qset *qs)
1579{
1580 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1581 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1582 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1583 qs->txq[TXQ_ETH].restarts++;
1584 if (netif_running(qs->netdev))
1585 netif_wake_queue(qs->netdev);
1586 }
1587
1588 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1589 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1590 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1591 qs->txq[TXQ_OFLD].restarts++;
1592 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1593 }
1594 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1595 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1596 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1597 qs->txq[TXQ_CTRL].restarts++;
1598 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1599 }
1600}
1601
1602/**
1603 * rx_eth - process an ingress ethernet packet
1604 * @adap: the adapter
1605 * @rq: the response queue that received the packet
1606 * @skb: the packet
1607 * @pad: amount of padding at the start of the buffer
1608 *
1609 * Process an ingress ethernet pakcet and deliver it to the stack.
1610 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1611 * if it was immediate data in a response.
1612 */
1613static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1614 struct sk_buff *skb, int pad)
1615{
1616 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1617 struct port_info *pi;
1618
1619 rq->eth_pkts++;
1620 skb_pull(skb, sizeof(*p) + pad);
1621 skb->dev = adap->port[p->iff];
1622 skb->dev->last_rx = jiffies;
1623 skb->protocol = eth_type_trans(skb, skb->dev);
1624 pi = netdev_priv(skb->dev);
1625 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1626 !p->fragment) {
1627 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1628 skb->ip_summed = CHECKSUM_UNNECESSARY;
1629 } else
1630 skb->ip_summed = CHECKSUM_NONE;
1631
1632 if (unlikely(p->vlan_valid)) {
1633 struct vlan_group *grp = pi->vlan_grp;
1634
1635 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1636 if (likely(grp))
1637 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1638 rq->polling);
1639 else
1640 dev_kfree_skb_any(skb);
1641 } else if (rq->polling)
1642 netif_receive_skb(skb);
1643 else
1644 netif_rx(skb);
1645}
1646
1647/**
1648 * handle_rsp_cntrl_info - handles control information in a response
1649 * @qs: the queue set corresponding to the response
1650 * @flags: the response control flags
1651 * @tx_completed: accumulates completion credits for the Tx queues
1652 *
1653 * Handles the control information of an SGE response, such as GTS
1654 * indications and completion credits for the queue set's Tx queues.
1655 */
1656static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags,
1657 unsigned int tx_completed[])
1658{
1659 unsigned int credits;
1660
1661#if USE_GTS
1662 if (flags & F_RSPD_TXQ0_GTS)
1663 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1664#endif
1665
1666 /* ETH credits are already coalesced, return them immediately. */
1667 credits = G_RSPD_TXQ0_CR(flags);
1668 if (credits)
1669 qs->txq[TXQ_ETH].processed += credits;
1670
1671# if USE_GTS
1672 if (flags & F_RSPD_TXQ1_GTS)
1673 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1674# endif
1675 update_tx_completed(qs, TXQ_OFLD, G_RSPD_TXQ1_CR(flags), tx_completed);
1676 update_tx_completed(qs, TXQ_CTRL, G_RSPD_TXQ2_CR(flags), tx_completed);
1677}
1678
1679/**
1680 * flush_tx_completed - returns accumulated Tx completions to Tx queues
1681 * @qs: the queue set to update
1682 * @tx_completed: pending completion credits to return to Tx queues
1683 *
1684 * Updates the number of completed Tx descriptors for a queue set's Tx
1685 * queues with the credits pending in @tx_completed. This does something
1686 * only on MP systems as on UP systems we return the credits immediately.
1687 */
1688static inline void flush_tx_completed(struct sge_qset *qs,
1689 unsigned int tx_completed[])
1690{
1691#if defined(CONFIG_SMP)
1692 if (tx_completed[TXQ_OFLD])
1693 qs->txq[TXQ_OFLD].processed += tx_completed[TXQ_OFLD];
1694 if (tx_completed[TXQ_CTRL])
1695 qs->txq[TXQ_CTRL].processed += tx_completed[TXQ_CTRL];
1696#endif
1697}
1698
1699/**
1700 * check_ring_db - check if we need to ring any doorbells
1701 * @adapter: the adapter
1702 * @qs: the queue set whose Tx queues are to be examined
1703 * @sleeping: indicates which Tx queue sent GTS
1704 *
1705 * Checks if some of a queue set's Tx queues need to ring their doorbells
1706 * to resume transmission after idling while they still have unprocessed
1707 * descriptors.
1708 */
1709static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1710 unsigned int sleeping)
1711{
1712 if (sleeping & F_RSPD_TXQ0_GTS) {
1713 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1714
1715 if (txq->cleaned + txq->in_use != txq->processed &&
1716 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1717 set_bit(TXQ_RUNNING, &txq->flags);
1718 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1719 V_EGRCNTX(txq->cntxt_id));
1720 }
1721 }
1722
1723 if (sleeping & F_RSPD_TXQ1_GTS) {
1724 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1725
1726 if (txq->cleaned + txq->in_use != txq->processed &&
1727 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1728 set_bit(TXQ_RUNNING, &txq->flags);
1729 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1730 V_EGRCNTX(txq->cntxt_id));
1731 }
1732 }
1733}
1734
1735/**
1736 * is_new_response - check if a response is newly written
1737 * @r: the response descriptor
1738 * @q: the response queue
1739 *
1740 * Returns true if a response descriptor contains a yet unprocessed
1741 * response.
1742 */
1743static inline int is_new_response(const struct rsp_desc *r,
1744 const struct sge_rspq *q)
1745{
1746 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1747}
1748
1749#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1750#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1751 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1752 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1753 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1754
1755/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1756#define NOMEM_INTR_DELAY 2500
1757
1758/**
1759 * process_responses - process responses from an SGE response queue
1760 * @adap: the adapter
1761 * @qs: the queue set to which the response queue belongs
1762 * @budget: how many responses can be processed in this round
1763 *
1764 * Process responses from an SGE response queue up to the supplied budget.
1765 * Responses include received packets as well as credits and other events
1766 * for the queues that belong to the response queue's queue set.
1767 * A negative budget is effectively unlimited.
1768 *
1769 * Additionally choose the interrupt holdoff time for the next interrupt
1770 * on this queue. If the system is under memory shortage use a fairly
1771 * long delay to help recovery.
1772 */
1773static int process_responses(struct adapter *adap, struct sge_qset *qs,
1774 int budget)
1775{
1776 struct sge_rspq *q = &qs->rspq;
1777 struct rsp_desc *r = &q->desc[q->cidx];
1778 int budget_left = budget;
1779 unsigned int sleeping = 0, tx_completed[3] = { 0, 0, 0 };
1780 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1781 int ngathered = 0;
1782
1783 q->next_holdoff = q->holdoff_tmr;
1784
1785 while (likely(budget_left && is_new_response(r, q))) {
1786 int eth, ethpad = 0;
1787 struct sk_buff *skb = NULL;
1788 u32 len, flags = ntohl(r->flags);
1789 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1790
1791 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1792
1793 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1794 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1795 if (!skb)
1796 goto no_mem;
1797
1798 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1799 skb->data[0] = CPL_ASYNC_NOTIF;
1800 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1801 q->async_notif++;
1802 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1803 skb = get_imm_packet(r);
1804 if (unlikely(!skb)) {
1805 no_mem:
1806 q->next_holdoff = NOMEM_INTR_DELAY;
1807 q->nomem++;
1808 /* consume one credit since we tried */
1809 budget_left--;
1810 break;
1811 }
1812 q->imm_data++;
1813 } else if ((len = ntohl(r->len_cq)) != 0) {
1814 struct sge_fl *fl;
1815
1816 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1817 fl->credits--;
1818 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1819 eth ? SGE_RX_DROP_THRES : 0);
1820 if (!skb)
1821 q->rx_drops++;
1822 else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
1823 __skb_pull(skb, 2);
1824 ethpad = 2;
1825 if (++fl->cidx == fl->size)
1826 fl->cidx = 0;
1827 } else
1828 q->pure_rsps++;
1829
1830 if (flags & RSPD_CTRL_MASK) {
1831 sleeping |= flags & RSPD_GTS_MASK;
1832 handle_rsp_cntrl_info(qs, flags, tx_completed);
1833 }
1834
1835 r++;
1836 if (unlikely(++q->cidx == q->size)) {
1837 q->cidx = 0;
1838 q->gen ^= 1;
1839 r = q->desc;
1840 }
1841 prefetch(r);
1842
1843 if (++q->credits >= (q->size / 4)) {
1844 refill_rspq(adap, q, q->credits);
1845 q->credits = 0;
1846 }
1847
1848 if (likely(skb != NULL)) {
1849 if (eth)
1850 rx_eth(adap, q, skb, ethpad);
1851 else {
1852 /* Preserve the RSS info in csum & priority */
1853 skb->csum = rss_hi;
1854 skb->priority = rss_lo;
1855 ngathered = rx_offload(&adap->tdev, q, skb,
1856 offload_skbs, ngathered);
1857 }
1858 }
1859
1860 --budget_left;
1861 }
1862
1863 flush_tx_completed(qs, tx_completed);
1864 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
1865 if (sleeping)
1866 check_ring_db(adap, qs, sleeping);
1867
1868 smp_mb(); /* commit Tx queue .processed updates */
1869 if (unlikely(qs->txq_stopped != 0))
1870 restart_tx(qs);
1871
1872 budget -= budget_left;
1873 return budget;
1874}
1875
1876static inline int is_pure_response(const struct rsp_desc *r)
1877{
1878 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
1879
1880 return (n | r->len_cq) == 0;
1881}
1882
1883/**
1884 * napi_rx_handler - the NAPI handler for Rx processing
1885 * @dev: the net device
1886 * @budget: how many packets we can process in this round
1887 *
1888 * Handler for new data events when using NAPI.
1889 */
1890static int napi_rx_handler(struct net_device *dev, int *budget)
1891{
1892 struct adapter *adap = dev->priv;
1893 struct sge_qset *qs = dev2qset(dev);
1894 int effective_budget = min(*budget, dev->quota);
1895
1896 int work_done = process_responses(adap, qs, effective_budget);
1897 *budget -= work_done;
1898 dev->quota -= work_done;
1899
1900 if (work_done >= effective_budget)
1901 return 1;
1902
1903 netif_rx_complete(dev);
1904
1905 /*
1906 * Because we don't atomically flush the following write it is
1907 * possible that in very rare cases it can reach the device in a way
1908 * that races with a new response being written plus an error interrupt
1909 * causing the NAPI interrupt handler below to return unhandled status
1910 * to the OS. To protect against this would require flushing the write
1911 * and doing both the write and the flush with interrupts off. Way too
1912 * expensive and unjustifiable given the rarity of the race.
1913 *
1914 * The race cannot happen at all with MSI-X.
1915 */
1916 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
1917 V_NEWTIMER(qs->rspq.next_holdoff) |
1918 V_NEWINDEX(qs->rspq.cidx));
1919 return 0;
1920}
1921
1922/*
1923 * Returns true if the device is already scheduled for polling.
1924 */
1925static inline int napi_is_scheduled(struct net_device *dev)
1926{
1927 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1928}
1929
1930/**
1931 * process_pure_responses - process pure responses from a response queue
1932 * @adap: the adapter
1933 * @qs: the queue set owning the response queue
1934 * @r: the first pure response to process
1935 *
1936 * A simpler version of process_responses() that handles only pure (i.e.,
1937 * non data-carrying) responses. Such respones are too light-weight to
1938 * justify calling a softirq under NAPI, so we handle them specially in
1939 * the interrupt handler. The function is called with a pointer to a
1940 * response, which the caller must ensure is a valid pure response.
1941 *
1942 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
1943 */
1944static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
1945 struct rsp_desc *r)
1946{
1947 struct sge_rspq *q = &qs->rspq;
1948 unsigned int sleeping = 0, tx_completed[3] = { 0, 0, 0 };
1949
1950 do {
1951 u32 flags = ntohl(r->flags);
1952
1953 r++;
1954 if (unlikely(++q->cidx == q->size)) {
1955 q->cidx = 0;
1956 q->gen ^= 1;
1957 r = q->desc;
1958 }
1959 prefetch(r);
1960
1961 if (flags & RSPD_CTRL_MASK) {
1962 sleeping |= flags & RSPD_GTS_MASK;
1963 handle_rsp_cntrl_info(qs, flags, tx_completed);
1964 }
1965
1966 q->pure_rsps++;
1967 if (++q->credits >= (q->size / 4)) {
1968 refill_rspq(adap, q, q->credits);
1969 q->credits = 0;
1970 }
1971 } while (is_new_response(r, q) && is_pure_response(r));
1972
1973 flush_tx_completed(qs, tx_completed);
1974
1975 if (sleeping)
1976 check_ring_db(adap, qs, sleeping);
1977
1978 smp_mb(); /* commit Tx queue .processed updates */
1979 if (unlikely(qs->txq_stopped != 0))
1980 restart_tx(qs);
1981
1982 return is_new_response(r, q);
1983}
1984
1985/**
1986 * handle_responses - decide what to do with new responses in NAPI mode
1987 * @adap: the adapter
1988 * @q: the response queue
1989 *
1990 * This is used by the NAPI interrupt handlers to decide what to do with
1991 * new SGE responses. If there are no new responses it returns -1. If
1992 * there are new responses and they are pure (i.e., non-data carrying)
1993 * it handles them straight in hard interrupt context as they are very
1994 * cheap and don't deliver any packets. Finally, if there are any data
1995 * signaling responses it schedules the NAPI handler. Returns 1 if it
1996 * schedules NAPI, 0 if all new responses were pure.
1997 *
1998 * The caller must ascertain NAPI is not already running.
1999 */
2000static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2001{
2002 struct sge_qset *qs = rspq_to_qset(q);
2003 struct rsp_desc *r = &q->desc[q->cidx];
2004
2005 if (!is_new_response(r, q))
2006 return -1;
2007 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2008 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2009 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2010 return 0;
2011 }
2012 if (likely(__netif_rx_schedule_prep(qs->netdev)))
2013 __netif_rx_schedule(qs->netdev);
2014 return 1;
2015}
2016
2017/*
2018 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2019 * (i.e., response queue serviced in hard interrupt).
2020 */
2021irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2022{
2023 struct sge_qset *qs = cookie;
2024 struct adapter *adap = qs->netdev->priv;
2025 struct sge_rspq *q = &qs->rspq;
2026
2027 spin_lock(&q->lock);
2028 if (process_responses(adap, qs, -1) == 0)
2029 q->unhandled_irqs++;
2030 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2031 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2032 spin_unlock(&q->lock);
2033 return IRQ_HANDLED;
2034}
2035
2036/*
2037 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2038 * (i.e., response queue serviced by NAPI polling).
2039 */
2040irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2041{
2042 struct sge_qset *qs = cookie;
2043 struct adapter *adap = qs->netdev->priv;
2044 struct sge_rspq *q = &qs->rspq;
2045
2046 spin_lock(&q->lock);
2047 BUG_ON(napi_is_scheduled(qs->netdev));
2048
2049 if (handle_responses(adap, q) < 0)
2050 q->unhandled_irqs++;
2051 spin_unlock(&q->lock);
2052 return IRQ_HANDLED;
2053}
2054
2055/*
2056 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2057 * SGE response queues as well as error and other async events as they all use
2058 * the same MSI vector. We use one SGE response queue per port in this mode
2059 * and protect all response queues with queue 0's lock.
2060 */
2061static irqreturn_t t3_intr_msi(int irq, void *cookie)
2062{
2063 int new_packets = 0;
2064 struct adapter *adap = cookie;
2065 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2066
2067 spin_lock(&q->lock);
2068
2069 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2070 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2071 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2072 new_packets = 1;
2073 }
2074
2075 if (adap->params.nports == 2 &&
2076 process_responses(adap, &adap->sge.qs[1], -1)) {
2077 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2078
2079 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2080 V_NEWTIMER(q1->next_holdoff) |
2081 V_NEWINDEX(q1->cidx));
2082 new_packets = 1;
2083 }
2084
2085 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2086 q->unhandled_irqs++;
2087
2088 spin_unlock(&q->lock);
2089 return IRQ_HANDLED;
2090}
2091
2092static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2093{
2094 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2095 if (likely(__netif_rx_schedule_prep(dev)))
2096 __netif_rx_schedule(dev);
2097 return 1;
2098 }
2099 return 0;
2100}
2101
2102/*
2103 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2104 * by NAPI polling). Handles data events from SGE response queues as well as
2105 * error and other async events as they all use the same MSI vector. We use
2106 * one SGE response queue per port in this mode and protect all response
2107 * queues with queue 0's lock.
2108 */
2109irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2110{
2111 int new_packets;
2112 struct adapter *adap = cookie;
2113 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2114
2115 spin_lock(&q->lock);
2116
2117 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2118 if (adap->params.nports == 2)
2119 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2120 &adap->sge.qs[1].rspq);
2121 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2122 q->unhandled_irqs++;
2123
2124 spin_unlock(&q->lock);
2125 return IRQ_HANDLED;
2126}
2127
2128/*
2129 * A helper function that processes responses and issues GTS.
2130 */
2131static inline int process_responses_gts(struct adapter *adap,
2132 struct sge_rspq *rq)
2133{
2134 int work;
2135
2136 work = process_responses(adap, rspq_to_qset(rq), -1);
2137 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2138 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2139 return work;
2140}
2141
2142/*
2143 * The legacy INTx interrupt handler. This needs to handle data events from
2144 * SGE response queues as well as error and other async events as they all use
2145 * the same interrupt pin. We use one SGE response queue per port in this mode
2146 * and protect all response queues with queue 0's lock.
2147 */
2148static irqreturn_t t3_intr(int irq, void *cookie)
2149{
2150 int work_done, w0, w1;
2151 struct adapter *adap = cookie;
2152 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2153 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2154
2155 spin_lock(&q0->lock);
2156
2157 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2158 w1 = adap->params.nports == 2 &&
2159 is_new_response(&q1->desc[q1->cidx], q1);
2160
2161 if (likely(w0 | w1)) {
2162 t3_write_reg(adap, A_PL_CLI, 0);
2163 t3_read_reg(adap, A_PL_CLI); /* flush */
2164
2165 if (likely(w0))
2166 process_responses_gts(adap, q0);
2167
2168 if (w1)
2169 process_responses_gts(adap, q1);
2170
2171 work_done = w0 | w1;
2172 } else
2173 work_done = t3_slow_intr_handler(adap);
2174
2175 spin_unlock(&q0->lock);
2176 return IRQ_RETVAL(work_done != 0);
2177}
2178
2179/*
2180 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2181 * Handles data events from SGE response queues as well as error and other
2182 * async events as they all use the same interrupt pin. We use one SGE
2183 * response queue per port in this mode and protect all response queues with
2184 * queue 0's lock.
2185 */
2186static irqreturn_t t3b_intr(int irq, void *cookie)
2187{
2188 u32 map;
2189 struct adapter *adap = cookie;
2190 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2191
2192 t3_write_reg(adap, A_PL_CLI, 0);
2193 map = t3_read_reg(adap, A_SG_DATA_INTR);
2194
2195 if (unlikely(!map)) /* shared interrupt, most likely */
2196 return IRQ_NONE;
2197
2198 spin_lock(&q0->lock);
2199
2200 if (unlikely(map & F_ERRINTR))
2201 t3_slow_intr_handler(adap);
2202
2203 if (likely(map & 1))
2204 process_responses_gts(adap, q0);
2205
2206 if (map & 2)
2207 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2208
2209 spin_unlock(&q0->lock);
2210 return IRQ_HANDLED;
2211}
2212
2213/*
2214 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2215 * Handles data events from SGE response queues as well as error and other
2216 * async events as they all use the same interrupt pin. We use one SGE
2217 * response queue per port in this mode and protect all response queues with
2218 * queue 0's lock.
2219 */
2220static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2221{
2222 u32 map;
2223 struct net_device *dev;
2224 struct adapter *adap = cookie;
2225 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2226
2227 t3_write_reg(adap, A_PL_CLI, 0);
2228 map = t3_read_reg(adap, A_SG_DATA_INTR);
2229
2230 if (unlikely(!map)) /* shared interrupt, most likely */
2231 return IRQ_NONE;
2232
2233 spin_lock(&q0->lock);
2234
2235 if (unlikely(map & F_ERRINTR))
2236 t3_slow_intr_handler(adap);
2237
2238 if (likely(map & 1)) {
2239 dev = adap->sge.qs[0].netdev;
2240
2241 BUG_ON(napi_is_scheduled(dev));
2242 if (likely(__netif_rx_schedule_prep(dev)))
2243 __netif_rx_schedule(dev);
2244 }
2245 if (map & 2) {
2246 dev = adap->sge.qs[1].netdev;
2247
2248 BUG_ON(napi_is_scheduled(dev));
2249 if (likely(__netif_rx_schedule_prep(dev)))
2250 __netif_rx_schedule(dev);
2251 }
2252
2253 spin_unlock(&q0->lock);
2254 return IRQ_HANDLED;
2255}
2256
2257/**
2258 * t3_intr_handler - select the top-level interrupt handler
2259 * @adap: the adapter
2260 * @polling: whether using NAPI to service response queues
2261 *
2262 * Selects the top-level interrupt handler based on the type of interrupts
2263 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2264 * response queues.
2265 */
2266intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2267{
2268 if (adap->flags & USING_MSIX)
2269 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2270 if (adap->flags & USING_MSI)
2271 return polling ? t3_intr_msi_napi : t3_intr_msi;
2272 if (adap->params.rev > 0)
2273 return polling ? t3b_intr_napi : t3b_intr;
2274 return t3_intr;
2275}
2276
2277/**
2278 * t3_sge_err_intr_handler - SGE async event interrupt handler
2279 * @adapter: the adapter
2280 *
2281 * Interrupt handler for SGE asynchronous (non-data) events.
2282 */
2283void t3_sge_err_intr_handler(struct adapter *adapter)
2284{
2285 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2286
2287 if (status & F_RSPQCREDITOVERFOW)
2288 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2289
2290 if (status & F_RSPQDISABLED) {
2291 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2292
2293 CH_ALERT(adapter,
2294 "packet delivered to disabled response queue "
2295 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2296 }
2297
2298 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2299 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2300 t3_fatal_err(adapter);
2301}
2302
2303/**
2304 * sge_timer_cb - perform periodic maintenance of an SGE qset
2305 * @data: the SGE queue set to maintain
2306 *
2307 * Runs periodically from a timer to perform maintenance of an SGE queue
2308 * set. It performs two tasks:
2309 *
2310 * a) Cleans up any completed Tx descriptors that may still be pending.
2311 * Normal descriptor cleanup happens when new packets are added to a Tx
2312 * queue so this timer is relatively infrequent and does any cleanup only
2313 * if the Tx queue has not seen any new packets in a while. We make a
2314 * best effort attempt to reclaim descriptors, in that we don't wait
2315 * around if we cannot get a queue's lock (which most likely is because
2316 * someone else is queueing new packets and so will also handle the clean
2317 * up). Since control queues use immediate data exclusively we don't
2318 * bother cleaning them up here.
2319 *
2320 * b) Replenishes Rx queues that have run out due to memory shortage.
2321 * Normally new Rx buffers are added when existing ones are consumed but
2322 * when out of memory a queue can become empty. We try to add only a few
2323 * buffers here, the queue will be replenished fully as these new buffers
2324 * are used up if memory shortage has subsided.
2325 */
2326static void sge_timer_cb(unsigned long data)
2327{
2328 spinlock_t *lock;
2329 struct sge_qset *qs = (struct sge_qset *)data;
2330 struct adapter *adap = qs->netdev->priv;
2331
2332 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2333 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2334 spin_unlock(&qs->txq[TXQ_ETH].lock);
2335 }
2336 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2337 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2338 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2339 }
2340 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2341 &adap->sge.qs[0].rspq.lock;
2342 if (spin_trylock_irq(lock)) {
2343 if (!napi_is_scheduled(qs->netdev)) {
2344 if (qs->fl[0].credits < qs->fl[0].size)
2345 __refill_fl(adap, &qs->fl[0]);
2346 if (qs->fl[1].credits < qs->fl[1].size)
2347 __refill_fl(adap, &qs->fl[1]);
2348 }
2349 spin_unlock_irq(lock);
2350 }
2351 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2352}
2353
2354/**
2355 * t3_update_qset_coalesce - update coalescing settings for a queue set
2356 * @qs: the SGE queue set
2357 * @p: new queue set parameters
2358 *
2359 * Update the coalescing settings for an SGE queue set. Nothing is done
2360 * if the queue set is not initialized yet.
2361 */
2362void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2363{
2364 if (!qs->netdev)
2365 return;
2366
2367 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2368 qs->rspq.polling = p->polling;
2369 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2370}
2371
2372/**
2373 * t3_sge_alloc_qset - initialize an SGE queue set
2374 * @adapter: the adapter
2375 * @id: the queue set id
2376 * @nports: how many Ethernet ports will be using this queue set
2377 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2378 * @p: configuration parameters for this queue set
2379 * @ntxq: number of Tx queues for the queue set
2380 * @netdev: net device associated with this queue set
2381 *
2382 * Allocate resources and initialize an SGE queue set. A queue set
2383 * comprises a response queue, two Rx free-buffer queues, and up to 3
2384 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2385 * queue, offload queue, and control queue.
2386 */
2387int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2388 int irq_vec_idx, const struct qset_params *p,
2389 int ntxq, struct net_device *netdev)
2390{
2391 int i, ret = -ENOMEM;
2392 struct sge_qset *q = &adapter->sge.qs[id];
2393
2394 init_qset_cntxt(q, id);
2395 init_timer(&q->tx_reclaim_timer);
2396 q->tx_reclaim_timer.data = (unsigned long)q;
2397 q->tx_reclaim_timer.function = sge_timer_cb;
2398
2399 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2400 sizeof(struct rx_desc),
2401 sizeof(struct rx_sw_desc),
2402 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2403 if (!q->fl[0].desc)
2404 goto err;
2405
2406 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2407 sizeof(struct rx_desc),
2408 sizeof(struct rx_sw_desc),
2409 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2410 if (!q->fl[1].desc)
2411 goto err;
2412
2413 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2414 sizeof(struct rsp_desc), 0,
2415 &q->rspq.phys_addr, NULL);
2416 if (!q->rspq.desc)
2417 goto err;
2418
2419 for (i = 0; i < ntxq; ++i) {
2420 /*
2421 * The control queue always uses immediate data so does not
2422 * need to keep track of any sk_buffs.
2423 */
2424 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2425
2426 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2427 sizeof(struct tx_desc), sz,
2428 &q->txq[i].phys_addr,
2429 &q->txq[i].sdesc);
2430 if (!q->txq[i].desc)
2431 goto err;
2432
2433 q->txq[i].gen = 1;
2434 q->txq[i].size = p->txq_size[i];
2435 spin_lock_init(&q->txq[i].lock);
2436 skb_queue_head_init(&q->txq[i].sendq);
2437 }
2438
2439 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2440 (unsigned long)q);
2441 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2442 (unsigned long)q);
2443
2444 q->fl[0].gen = q->fl[1].gen = 1;
2445 q->fl[0].size = p->fl_size;
2446 q->fl[1].size = p->jumbo_size;
2447
2448 q->rspq.gen = 1;
2449 q->rspq.size = p->rspq_size;
2450 spin_lock_init(&q->rspq.lock);
2451
2452 q->txq[TXQ_ETH].stop_thres = nports *
2453 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2454
2455 if (ntxq == 1) {
2456 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2457 sizeof(struct cpl_rx_pkt);
2458 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2459 sizeof(struct cpl_rx_pkt);
2460 } else {
2461 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2462 sizeof(struct cpl_rx_data);
2463 q->fl[1].buf_size = (16 * 1024) -
2464 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2465 }
2466
2467 spin_lock(&adapter->sge.reg_lock);
2468
2469 /* FL threshold comparison uses < */
2470 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2471 q->rspq.phys_addr, q->rspq.size,
2472 q->fl[0].buf_size, 1, 0);
2473 if (ret)
2474 goto err_unlock;
2475
2476 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2477 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2478 q->fl[i].phys_addr, q->fl[i].size,
2479 q->fl[i].buf_size, p->cong_thres, 1,
2480 0);
2481 if (ret)
2482 goto err_unlock;
2483 }
2484
2485 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2486 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2487 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2488 1, 0);
2489 if (ret)
2490 goto err_unlock;
2491
2492 if (ntxq > 1) {
2493 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2494 USE_GTS, SGE_CNTXT_OFLD, id,
2495 q->txq[TXQ_OFLD].phys_addr,
2496 q->txq[TXQ_OFLD].size, 0, 1, 0);
2497 if (ret)
2498 goto err_unlock;
2499 }
2500
2501 if (ntxq > 2) {
2502 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2503 SGE_CNTXT_CTRL, id,
2504 q->txq[TXQ_CTRL].phys_addr,
2505 q->txq[TXQ_CTRL].size,
2506 q->txq[TXQ_CTRL].token, 1, 0);
2507 if (ret)
2508 goto err_unlock;
2509 }
2510
2511 spin_unlock(&adapter->sge.reg_lock);
2512 q->netdev = netdev;
2513 t3_update_qset_coalesce(q, p);
2514
2515 /*
2516 * We use atalk_ptr as a backpointer to a qset. In case a device is
2517 * associated with multiple queue sets only the first one sets
2518 * atalk_ptr.
2519 */
2520 if (netdev->atalk_ptr == NULL)
2521 netdev->atalk_ptr = q;
2522
2523 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2524 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2525 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2526
2527 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2528 V_NEWTIMER(q->rspq.holdoff_tmr));
2529
2530 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2531 return 0;
2532
2533 err_unlock:
2534 spin_unlock(&adapter->sge.reg_lock);
2535 err:
2536 t3_free_qset(adapter, q);
2537 return ret;
2538}
2539
2540/**
2541 * t3_free_sge_resources - free SGE resources
2542 * @adap: the adapter
2543 *
2544 * Frees resources used by the SGE queue sets.
2545 */
2546void t3_free_sge_resources(struct adapter *adap)
2547{
2548 int i;
2549
2550 for (i = 0; i < SGE_QSETS; ++i)
2551 t3_free_qset(adap, &adap->sge.qs[i]);
2552}
2553
2554/**
2555 * t3_sge_start - enable SGE
2556 * @adap: the adapter
2557 *
2558 * Enables the SGE for DMAs. This is the last step in starting packet
2559 * transfers.
2560 */
2561void t3_sge_start(struct adapter *adap)
2562{
2563 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2564}
2565
2566/**
2567 * t3_sge_stop - disable SGE operation
2568 * @adap: the adapter
2569 *
2570 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2571 * from error interrupts) or from normal process context. In the latter
2572 * case it also disables any pending queue restart tasklets. Note that
2573 * if it is called in interrupt context it cannot disable the restart
2574 * tasklets as it cannot wait, however the tasklets will have no effect
2575 * since the doorbells are disabled and the driver will call this again
2576 * later from process context, at which time the tasklets will be stopped
2577 * if they are still running.
2578 */
2579void t3_sge_stop(struct adapter *adap)
2580{
2581 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2582 if (!in_interrupt()) {
2583 int i;
2584
2585 for (i = 0; i < SGE_QSETS; ++i) {
2586 struct sge_qset *qs = &adap->sge.qs[i];
2587
2588 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2589 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2590 }
2591 }
2592}
2593
2594/**
2595 * t3_sge_init - initialize SGE
2596 * @adap: the adapter
2597 * @p: the SGE parameters
2598 *
2599 * Performs SGE initialization needed every time after a chip reset.
2600 * We do not initialize any of the queue sets here, instead the driver
2601 * top-level must request those individually. We also do not enable DMA
2602 * here, that should be done after the queues have been set up.
2603 */
2604void t3_sge_init(struct adapter *adap, struct sge_params *p)
2605{
2606 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2607
2608 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2609 F_CQCRDTCTRL |
2610 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2611 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2612#if SGE_NUM_GENBITS == 1
2613 ctrl |= F_EGRGENCTRL;
2614#endif
2615 if (adap->params.rev > 0) {
2616 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2617 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2618 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2619 }
2620 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2621 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2622 V_LORCQDRBTHRSH(512));
2623 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2624 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2625 V_TIMEOUT(100 * core_ticks_per_usec(adap)));
2626 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2627 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2628 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2629 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2630 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2631 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2632}
2633
2634/**
2635 * t3_sge_prep - one-time SGE initialization
2636 * @adap: the associated adapter
2637 * @p: SGE parameters
2638 *
2639 * Performs one-time initialization of SGE SW state. Includes determining
2640 * defaults for the assorted SGE parameters, which admins can change until
2641 * they are used to initialize the SGE.
2642 */
2643void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2644{
2645 int i;
2646
2647 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2648 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2649
2650 for (i = 0; i < SGE_QSETS; ++i) {
2651 struct qset_params *q = p->qset + i;
2652
2653 q->polling = adap->params.rev > 0;
2654 q->coalesce_usecs = 5;
2655 q->rspq_size = 1024;
2656 q->fl_size = 4096;
2657 q->jumbo_size = 512;
2658 q->txq_size[TXQ_ETH] = 1024;
2659 q->txq_size[TXQ_OFLD] = 1024;
2660 q->txq_size[TXQ_CTRL] = 256;
2661 q->cong_thres = 0;
2662 }
2663
2664 spin_lock_init(&adap->sge.reg_lock);
2665}
2666
2667/**
2668 * t3_get_desc - dump an SGE descriptor for debugging purposes
2669 * @qs: the queue set
2670 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2671 * @idx: the descriptor index in the queue
2672 * @data: where to dump the descriptor contents
2673 *
2674 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2675 * size of the descriptor.
2676 */
2677int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2678 unsigned char *data)
2679{
2680 if (qnum >= 6)
2681 return -EINVAL;
2682
2683 if (qnum < 3) {
2684 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2685 return -EINVAL;
2686 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2687 return sizeof(struct tx_desc);
2688 }
2689
2690 if (qnum == 3) {
2691 if (!qs->rspq.desc || idx >= qs->rspq.size)
2692 return -EINVAL;
2693 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2694 return sizeof(struct rsp_desc);
2695 }
2696
2697 qnum -= 4;
2698 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2699 return -EINVAL;
2700 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2701 return sizeof(struct rx_desc);
2702}
diff --git a/drivers/net/cxgb3/sge_defs.h b/drivers/net/cxgb3/sge_defs.h
new file mode 100644
index 000000000000..514869e26a76
--- /dev/null
+++ b/drivers/net/cxgb3/sge_defs.h
@@ -0,0 +1,251 @@
1/*
2 * This file is automatically generated --- any changes will be lost.
3 */
4
5#ifndef _SGE_DEFS_H
6#define _SGE_DEFS_H
7
8#define S_EC_CREDITS 0
9#define M_EC_CREDITS 0x7FFF
10#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
11#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
12
13#define S_EC_GTS 15
14#define V_EC_GTS(x) ((x) << S_EC_GTS)
15#define F_EC_GTS V_EC_GTS(1U)
16
17#define S_EC_INDEX 16
18#define M_EC_INDEX 0xFFFF
19#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
20#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
21
22#define S_EC_SIZE 0
23#define M_EC_SIZE 0xFFFF
24#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
25#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
26
27#define S_EC_BASE_LO 16
28#define M_EC_BASE_LO 0xFFFF
29#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
30#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
31
32#define S_EC_BASE_HI 0
33#define M_EC_BASE_HI 0xF
34#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
35#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
36
37#define S_EC_RESPQ 4
38#define M_EC_RESPQ 0x7
39#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
40#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
41
42#define S_EC_TYPE 7
43#define M_EC_TYPE 0x7
44#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
45#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
46
47#define S_EC_GEN 10
48#define V_EC_GEN(x) ((x) << S_EC_GEN)
49#define F_EC_GEN V_EC_GEN(1U)
50
51#define S_EC_UP_TOKEN 11
52#define M_EC_UP_TOKEN 0xFFFFF
53#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
54#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
55
56#define S_EC_VALID 31
57#define V_EC_VALID(x) ((x) << S_EC_VALID)
58#define F_EC_VALID V_EC_VALID(1U)
59
60#define S_RQ_MSI_VEC 20
61#define M_RQ_MSI_VEC 0x3F
62#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
63#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
64
65#define S_RQ_INTR_EN 26
66#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
67#define F_RQ_INTR_EN V_RQ_INTR_EN(1U)
68
69#define S_RQ_GEN 28
70#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
71#define F_RQ_GEN V_RQ_GEN(1U)
72
73#define S_CQ_INDEX 0
74#define M_CQ_INDEX 0xFFFF
75#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
76#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
77
78#define S_CQ_SIZE 16
79#define M_CQ_SIZE 0xFFFF
80#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
81#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
82
83#define S_CQ_BASE_HI 0
84#define M_CQ_BASE_HI 0xFFFFF
85#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
86#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
87
88#define S_CQ_RSPQ 20
89#define M_CQ_RSPQ 0x3F
90#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
91#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
92
93#define S_CQ_ASYNC_NOTIF 26
94#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
95#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U)
96
97#define S_CQ_ARMED 27
98#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
99#define F_CQ_ARMED V_CQ_ARMED(1U)
100
101#define S_CQ_ASYNC_NOTIF_SOL 28
102#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
103#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U)
104
105#define S_CQ_GEN 29
106#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
107#define F_CQ_GEN V_CQ_GEN(1U)
108
109#define S_CQ_OVERFLOW_MODE 31
110#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
111#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
112
113#define S_CQ_CREDITS 0
114#define M_CQ_CREDITS 0xFFFF
115#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
116#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
117
118#define S_CQ_CREDIT_THRES 16
119#define M_CQ_CREDIT_THRES 0x1FFF
120#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
121#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
122
123#define S_FL_BASE_HI 0
124#define M_FL_BASE_HI 0xFFFFF
125#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
126#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
127
128#define S_FL_INDEX_LO 20
129#define M_FL_INDEX_LO 0xFFF
130#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
131#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
132
133#define S_FL_INDEX_HI 0
134#define M_FL_INDEX_HI 0xF
135#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
136#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
137
138#define S_FL_SIZE 4
139#define M_FL_SIZE 0xFFFF
140#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
141#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
142
143#define S_FL_GEN 20
144#define V_FL_GEN(x) ((x) << S_FL_GEN)
145#define F_FL_GEN V_FL_GEN(1U)
146
147#define S_FL_ENTRY_SIZE_LO 21
148#define M_FL_ENTRY_SIZE_LO 0x7FF
149#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
150#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
151
152#define S_FL_ENTRY_SIZE_HI 0
153#define M_FL_ENTRY_SIZE_HI 0x1FFFFF
154#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
155#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
156
157#define S_FL_CONG_THRES 21
158#define M_FL_CONG_THRES 0x3FF
159#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
160#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
161
162#define S_FL_GTS 31
163#define V_FL_GTS(x) ((x) << S_FL_GTS)
164#define F_FL_GTS V_FL_GTS(1U)
165
166#define S_FLD_GEN1 31
167#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
168#define F_FLD_GEN1 V_FLD_GEN1(1U)
169
170#define S_FLD_GEN2 0
171#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
172#define F_FLD_GEN2 V_FLD_GEN2(1U)
173
174#define S_RSPD_TXQ1_CR 0
175#define M_RSPD_TXQ1_CR 0x7F
176#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
177#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
178
179#define S_RSPD_TXQ1_GTS 7
180#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
181#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U)
182
183#define S_RSPD_TXQ2_CR 8
184#define M_RSPD_TXQ2_CR 0x7F
185#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
186#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
187
188#define S_RSPD_TXQ2_GTS 15
189#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
190#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U)
191
192#define S_RSPD_TXQ0_CR 16
193#define M_RSPD_TXQ0_CR 0x7F
194#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
195#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
196
197#define S_RSPD_TXQ0_GTS 23
198#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
199#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U)
200
201#define S_RSPD_EOP 24
202#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
203#define F_RSPD_EOP V_RSPD_EOP(1U)
204
205#define S_RSPD_SOP 25
206#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
207#define F_RSPD_SOP V_RSPD_SOP(1U)
208
209#define S_RSPD_ASYNC_NOTIF 26
210#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
211#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U)
212
213#define S_RSPD_FL0_GTS 27
214#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
215#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U)
216
217#define S_RSPD_FL1_GTS 28
218#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
219#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U)
220
221#define S_RSPD_IMM_DATA_VALID 29
222#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
223#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U)
224
225#define S_RSPD_OFFLOAD 30
226#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
227#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U)
228
229#define S_RSPD_GEN1 31
230#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
231#define F_RSPD_GEN1 V_RSPD_GEN1(1U)
232
233#define S_RSPD_LEN 0
234#define M_RSPD_LEN 0x7FFFFFFF
235#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
236#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
237
238#define S_RSPD_FLQ 31
239#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
240#define F_RSPD_FLQ V_RSPD_FLQ(1U)
241
242#define S_RSPD_GEN2 0
243#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
244#define F_RSPD_GEN2 V_RSPD_GEN2(1U)
245
246#define S_RSPD_INR_VEC 1
247#define M_RSPD_INR_VEC 0x7F
248#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
249#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
250
251#endif /* _SGE_DEFS_H */
diff --git a/drivers/net/cxgb3/t3_cpl.h b/drivers/net/cxgb3/t3_cpl.h
new file mode 100644
index 000000000000..b0df4ba94e02
--- /dev/null
+++ b/drivers/net/cxgb3/t3_cpl.h
@@ -0,0 +1,1426 @@
1/*
2 * Definitions of the CPL 5 commands and status codes.
3 *
4 * Copyright (C) 2004-2006 Chelsio Communications. All rights reserved.
5 *
6 * Written by Dimitris Michailidis (dm@chelsio.com)
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
11 * release for licensing terms and conditions.
12 */
13
14#ifndef T3_CPL_H
15#define T3_CPL_H
16
17#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
18# include <asm/byteorder.h>
19#endif
20
21enum CPL_opcode {
22 CPL_PASS_OPEN_REQ = 0x1,
23 CPL_PASS_ACCEPT_RPL = 0x2,
24 CPL_ACT_OPEN_REQ = 0x3,
25 CPL_SET_TCB = 0x4,
26 CPL_SET_TCB_FIELD = 0x5,
27 CPL_GET_TCB = 0x6,
28 CPL_PCMD = 0x7,
29 CPL_CLOSE_CON_REQ = 0x8,
30 CPL_CLOSE_LISTSRV_REQ = 0x9,
31 CPL_ABORT_REQ = 0xA,
32 CPL_ABORT_RPL = 0xB,
33 CPL_TX_DATA = 0xC,
34 CPL_RX_DATA_ACK = 0xD,
35 CPL_TX_PKT = 0xE,
36 CPL_RTE_DELETE_REQ = 0xF,
37 CPL_RTE_WRITE_REQ = 0x10,
38 CPL_RTE_READ_REQ = 0x11,
39 CPL_L2T_WRITE_REQ = 0x12,
40 CPL_L2T_READ_REQ = 0x13,
41 CPL_SMT_WRITE_REQ = 0x14,
42 CPL_SMT_READ_REQ = 0x15,
43 CPL_TX_PKT_LSO = 0x16,
44 CPL_PCMD_READ = 0x17,
45 CPL_BARRIER = 0x18,
46 CPL_TID_RELEASE = 0x1A,
47
48 CPL_CLOSE_LISTSRV_RPL = 0x20,
49 CPL_ERROR = 0x21,
50 CPL_GET_TCB_RPL = 0x22,
51 CPL_L2T_WRITE_RPL = 0x23,
52 CPL_PCMD_READ_RPL = 0x24,
53 CPL_PCMD_RPL = 0x25,
54 CPL_PEER_CLOSE = 0x26,
55 CPL_RTE_DELETE_RPL = 0x27,
56 CPL_RTE_WRITE_RPL = 0x28,
57 CPL_RX_DDP_COMPLETE = 0x29,
58 CPL_RX_PHYS_ADDR = 0x2A,
59 CPL_RX_PKT = 0x2B,
60 CPL_RX_URG_NOTIFY = 0x2C,
61 CPL_SET_TCB_RPL = 0x2D,
62 CPL_SMT_WRITE_RPL = 0x2E,
63 CPL_TX_DATA_ACK = 0x2F,
64
65 CPL_ABORT_REQ_RSS = 0x30,
66 CPL_ABORT_RPL_RSS = 0x31,
67 CPL_CLOSE_CON_RPL = 0x32,
68 CPL_ISCSI_HDR = 0x33,
69 CPL_L2T_READ_RPL = 0x34,
70 CPL_RDMA_CQE = 0x35,
71 CPL_RDMA_CQE_READ_RSP = 0x36,
72 CPL_RDMA_CQE_ERR = 0x37,
73 CPL_RTE_READ_RPL = 0x38,
74 CPL_RX_DATA = 0x39,
75
76 CPL_ACT_OPEN_RPL = 0x40,
77 CPL_PASS_OPEN_RPL = 0x41,
78 CPL_RX_DATA_DDP = 0x42,
79 CPL_SMT_READ_RPL = 0x43,
80
81 CPL_ACT_ESTABLISH = 0x50,
82 CPL_PASS_ESTABLISH = 0x51,
83
84 CPL_PASS_ACCEPT_REQ = 0x70,
85
86 CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */
87
88 CPL_TX_DMA_ACK = 0xA0,
89 CPL_RDMA_READ_REQ = 0xA1,
90 CPL_RDMA_TERMINATE = 0xA2,
91 CPL_TRACE_PKT = 0xA3,
92 CPL_RDMA_EC_STATUS = 0xA5,
93
94 NUM_CPL_CMDS /* must be last and previous entries must be sorted */
95};
96
97enum CPL_error {
98 CPL_ERR_NONE = 0,
99 CPL_ERR_TCAM_PARITY = 1,
100 CPL_ERR_TCAM_FULL = 3,
101 CPL_ERR_CONN_RESET = 20,
102 CPL_ERR_CONN_EXIST = 22,
103 CPL_ERR_ARP_MISS = 23,
104 CPL_ERR_BAD_SYN = 24,
105 CPL_ERR_CONN_TIMEDOUT = 30,
106 CPL_ERR_XMIT_TIMEDOUT = 31,
107 CPL_ERR_PERSIST_TIMEDOUT = 32,
108 CPL_ERR_FINWAIT2_TIMEDOUT = 33,
109 CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
110 CPL_ERR_RTX_NEG_ADVICE = 35,
111 CPL_ERR_PERSIST_NEG_ADVICE = 36,
112 CPL_ERR_ABORT_FAILED = 42,
113 CPL_ERR_GENERAL = 99
114};
115
116enum {
117 CPL_CONN_POLICY_AUTO = 0,
118 CPL_CONN_POLICY_ASK = 1,
119 CPL_CONN_POLICY_DENY = 3
120};
121
122enum {
123 ULP_MODE_NONE = 0,
124 ULP_MODE_ISCSI = 2,
125 ULP_MODE_RDMA = 4,
126 ULP_MODE_TCPDDP = 5
127};
128
129enum {
130 ULP_CRC_HEADER = 1 << 0,
131 ULP_CRC_DATA = 1 << 1
132};
133
134enum {
135 CPL_PASS_OPEN_ACCEPT,
136 CPL_PASS_OPEN_REJECT
137};
138
139enum {
140 CPL_ABORT_SEND_RST = 0,
141 CPL_ABORT_NO_RST,
142 CPL_ABORT_POST_CLOSE_REQ = 2
143};
144
145enum { /* TX_PKT_LSO ethernet types */
146 CPL_ETH_II,
147 CPL_ETH_II_VLAN,
148 CPL_ETH_802_3,
149 CPL_ETH_802_3_VLAN
150};
151
152enum { /* TCP congestion control algorithms */
153 CONG_ALG_RENO,
154 CONG_ALG_TAHOE,
155 CONG_ALG_NEWRENO,
156 CONG_ALG_HIGHSPEED
157};
158
159union opcode_tid {
160 __be32 opcode_tid;
161 __u8 opcode;
162};
163
164#define S_OPCODE 24
165#define V_OPCODE(x) ((x) << S_OPCODE)
166#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
167#define G_TID(x) ((x) & 0xFFFFFF)
168
169/* tid is assumed to be 24-bits */
170#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
171
172#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
173
174/* extract the TID from a CPL command */
175#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
176
177struct tcp_options {
178 __be16 mss;
179 __u8 wsf;
180#if defined(__LITTLE_ENDIAN_BITFIELD)
181 __u8:5;
182 __u8 ecn:1;
183 __u8 sack:1;
184 __u8 tstamp:1;
185#else
186 __u8 tstamp:1;
187 __u8 sack:1;
188 __u8 ecn:1;
189 __u8:5;
190#endif
191};
192
193struct rss_header {
194 __u8 opcode;
195#if defined(__LITTLE_ENDIAN_BITFIELD)
196 __u8 cpu_idx:6;
197 __u8 hash_type:2;
198#else
199 __u8 hash_type:2;
200 __u8 cpu_idx:6;
201#endif
202 __be16 cq_idx;
203 __be32 rss_hash_val;
204};
205
206#ifndef CHELSIO_FW
207struct work_request_hdr {
208 __be32 wr_hi;
209 __be32 wr_lo;
210};
211
212/* wr_hi fields */
213#define S_WR_SGE_CREDITS 0
214#define M_WR_SGE_CREDITS 0xFF
215#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS)
216#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS)
217
218#define S_WR_SGLSFLT 8
219#define M_WR_SGLSFLT 0xFF
220#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT)
221#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT)
222
223#define S_WR_BCNTLFLT 16
224#define M_WR_BCNTLFLT 0xF
225#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT)
226#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT)
227
228#define S_WR_DATATYPE 20
229#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE)
230#define F_WR_DATATYPE V_WR_DATATYPE(1U)
231
232#define S_WR_COMPL 21
233#define V_WR_COMPL(x) ((x) << S_WR_COMPL)
234#define F_WR_COMPL V_WR_COMPL(1U)
235
236#define S_WR_EOP 22
237#define V_WR_EOP(x) ((x) << S_WR_EOP)
238#define F_WR_EOP V_WR_EOP(1U)
239
240#define S_WR_SOP 23
241#define V_WR_SOP(x) ((x) << S_WR_SOP)
242#define F_WR_SOP V_WR_SOP(1U)
243
244#define S_WR_OP 24
245#define M_WR_OP 0xFF
246#define V_WR_OP(x) ((x) << S_WR_OP)
247#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
248
249/* wr_lo fields */
250#define S_WR_LEN 0
251#define M_WR_LEN 0xFF
252#define V_WR_LEN(x) ((x) << S_WR_LEN)
253#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN)
254
255#define S_WR_TID 8
256#define M_WR_TID 0xFFFFF
257#define V_WR_TID(x) ((x) << S_WR_TID)
258#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID)
259
260#define S_WR_CR_FLUSH 30
261#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH)
262#define F_WR_CR_FLUSH V_WR_CR_FLUSH(1U)
263
264#define S_WR_GEN 31
265#define V_WR_GEN(x) ((x) << S_WR_GEN)
266#define F_WR_GEN V_WR_GEN(1U)
267
268# define WR_HDR struct work_request_hdr wr
269# define RSS_HDR
270#else
271# define WR_HDR
272# define RSS_HDR struct rss_header rss_hdr;
273#endif
274
275/* option 0 lower-half fields */
276#define S_CPL_STATUS 0
277#define M_CPL_STATUS 0xFF
278#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS)
279#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS)
280
281#define S_INJECT_TIMER 6
282#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
283#define F_INJECT_TIMER V_INJECT_TIMER(1U)
284
285#define S_NO_OFFLOAD 7
286#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD)
287#define F_NO_OFFLOAD V_NO_OFFLOAD(1U)
288
289#define S_ULP_MODE 8
290#define M_ULP_MODE 0xF
291#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
292#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
293
294#define S_RCV_BUFSIZ 12
295#define M_RCV_BUFSIZ 0x3FFF
296#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
297#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
298
299#define S_TOS 26
300#define M_TOS 0x3F
301#define V_TOS(x) ((x) << S_TOS)
302#define G_TOS(x) (((x) >> S_TOS) & M_TOS)
303
304/* option 0 upper-half fields */
305#define S_DELACK 0
306#define V_DELACK(x) ((x) << S_DELACK)
307#define F_DELACK V_DELACK(1U)
308
309#define S_NO_CONG 1
310#define V_NO_CONG(x) ((x) << S_NO_CONG)
311#define F_NO_CONG V_NO_CONG(1U)
312
313#define S_SRC_MAC_SEL 2
314#define M_SRC_MAC_SEL 0x3
315#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL)
316#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL)
317
318#define S_L2T_IDX 4
319#define M_L2T_IDX 0x7FF
320#define V_L2T_IDX(x) ((x) << S_L2T_IDX)
321#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
322
323#define S_TX_CHANNEL 15
324#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL)
325#define F_TX_CHANNEL V_TX_CHANNEL(1U)
326
327#define S_TCAM_BYPASS 16
328#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS)
329#define F_TCAM_BYPASS V_TCAM_BYPASS(1U)
330
331#define S_NAGLE 17
332#define V_NAGLE(x) ((x) << S_NAGLE)
333#define F_NAGLE V_NAGLE(1U)
334
335#define S_WND_SCALE 18
336#define M_WND_SCALE 0xF
337#define V_WND_SCALE(x) ((x) << S_WND_SCALE)
338#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
339
340#define S_KEEP_ALIVE 22
341#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE)
342#define F_KEEP_ALIVE V_KEEP_ALIVE(1U)
343
344#define S_MAX_RETRANS 23
345#define M_MAX_RETRANS 0xF
346#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS)
347#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS)
348
349#define S_MAX_RETRANS_OVERRIDE 27
350#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE)
351#define F_MAX_RETRANS_OVERRIDE V_MAX_RETRANS_OVERRIDE(1U)
352
353#define S_MSS_IDX 28
354#define M_MSS_IDX 0xF
355#define V_MSS_IDX(x) ((x) << S_MSS_IDX)
356#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
357
358/* option 1 fields */
359#define S_RSS_ENABLE 0
360#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE)
361#define F_RSS_ENABLE V_RSS_ENABLE(1U)
362
363#define S_RSS_MASK_LEN 1
364#define M_RSS_MASK_LEN 0x7
365#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN)
366#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN)
367
368#define S_CPU_IDX 4
369#define M_CPU_IDX 0x3F
370#define V_CPU_IDX(x) ((x) << S_CPU_IDX)
371#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX)
372
373#define S_MAC_MATCH_VALID 18
374#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID)
375#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U)
376
377#define S_CONN_POLICY 19
378#define M_CONN_POLICY 0x3
379#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
380#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
381
382#define S_SYN_DEFENSE 21
383#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
384#define F_SYN_DEFENSE V_SYN_DEFENSE(1U)
385
386#define S_VLAN_PRI 22
387#define M_VLAN_PRI 0x3
388#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI)
389#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI)
390
391#define S_VLAN_PRI_VALID 24
392#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID)
393#define F_VLAN_PRI_VALID V_VLAN_PRI_VALID(1U)
394
395#define S_PKT_TYPE 25
396#define M_PKT_TYPE 0x3
397#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE)
398#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE)
399
400#define S_MAC_MATCH 27
401#define M_MAC_MATCH 0x1F
402#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH)
403#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH)
404
405/* option 2 fields */
406#define S_CPU_INDEX 0
407#define M_CPU_INDEX 0x7F
408#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX)
409#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX)
410
411#define S_CPU_INDEX_VALID 7
412#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID)
413#define F_CPU_INDEX_VALID V_CPU_INDEX_VALID(1U)
414
415#define S_RX_COALESCE 8
416#define M_RX_COALESCE 0x3
417#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
418#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
419
420#define S_RX_COALESCE_VALID 10
421#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
422#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U)
423
424#define S_CONG_CONTROL_FLAVOR 11
425#define M_CONG_CONTROL_FLAVOR 0x3
426#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR)
427#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR)
428
429#define S_PACING_FLAVOR 13
430#define M_PACING_FLAVOR 0x3
431#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR)
432#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR)
433
434#define S_FLAVORS_VALID 15
435#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID)
436#define F_FLAVORS_VALID V_FLAVORS_VALID(1U)
437
438#define S_RX_FC_DISABLE 16
439#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
440#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U)
441
442#define S_RX_FC_VALID 17
443#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
444#define F_RX_FC_VALID V_RX_FC_VALID(1U)
445
446struct cpl_pass_open_req {
447 WR_HDR;
448 union opcode_tid ot;
449 __be16 local_port;
450 __be16 peer_port;
451 __be32 local_ip;
452 __be32 peer_ip;
453 __be32 opt0h;
454 __be32 opt0l;
455 __be32 peer_netmask;
456 __be32 opt1;
457};
458
459struct cpl_pass_open_rpl {
460 RSS_HDR union opcode_tid ot;
461 __be16 local_port;
462 __be16 peer_port;
463 __be32 local_ip;
464 __be32 peer_ip;
465 __u8 resvd[7];
466 __u8 status;
467};
468
469struct cpl_pass_establish {
470 RSS_HDR union opcode_tid ot;
471 __be16 local_port;
472 __be16 peer_port;
473 __be32 local_ip;
474 __be32 peer_ip;
475 __be32 tos_tid;
476 __be16 l2t_idx;
477 __be16 tcp_opt;
478 __be32 snd_isn;
479 __be32 rcv_isn;
480};
481
482/* cpl_pass_establish.tos_tid fields */
483#define S_PASS_OPEN_TID 0
484#define M_PASS_OPEN_TID 0xFFFFFF
485#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
486#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
487
488#define S_PASS_OPEN_TOS 24
489#define M_PASS_OPEN_TOS 0xFF
490#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
491#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
492
493/* cpl_pass_establish.l2t_idx fields */
494#define S_L2T_IDX16 5
495#define M_L2T_IDX16 0x7FF
496#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16)
497#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16)
498
499/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */
500#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
501#define G_TCPOPT_SACK(x) (((x) >> 6) & 1)
502#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
503#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
504#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
505
506struct cpl_pass_accept_req {
507 RSS_HDR union opcode_tid ot;
508 __be16 local_port;
509 __be16 peer_port;
510 __be32 local_ip;
511 __be32 peer_ip;
512 __be32 tos_tid;
513 struct tcp_options tcp_options;
514 __u8 dst_mac[6];
515 __be16 vlan_tag;
516 __u8 src_mac[6];
517#if defined(__LITTLE_ENDIAN_BITFIELD)
518 __u8:3;
519 __u8 addr_idx:3;
520 __u8 port_idx:1;
521 __u8 exact_match:1;
522#else
523 __u8 exact_match:1;
524 __u8 port_idx:1;
525 __u8 addr_idx:3;
526 __u8:3;
527#endif
528 __u8 rsvd;
529 __be32 rcv_isn;
530 __be32 rsvd2;
531};
532
533struct cpl_pass_accept_rpl {
534 WR_HDR;
535 union opcode_tid ot;
536 __be32 opt2;
537 __be32 rsvd;
538 __be32 peer_ip;
539 __be32 opt0h;
540 __be32 opt0l_status;
541};
542
543struct cpl_act_open_req {
544 WR_HDR;
545 union opcode_tid ot;
546 __be16 local_port;
547 __be16 peer_port;
548 __be32 local_ip;
549 __be32 peer_ip;
550 __be32 opt0h;
551 __be32 opt0l;
552 __be32 params;
553 __be32 opt2;
554};
555
556/* cpl_act_open_req.params fields */
557#define S_AOPEN_VLAN_PRI 9
558#define M_AOPEN_VLAN_PRI 0x3
559#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI)
560#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI)
561
562#define S_AOPEN_VLAN_PRI_VALID 11
563#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID)
564#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U)
565
566#define S_AOPEN_PKT_TYPE 12
567#define M_AOPEN_PKT_TYPE 0x3
568#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE)
569#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE)
570
571#define S_AOPEN_MAC_MATCH 14
572#define M_AOPEN_MAC_MATCH 0x1F
573#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH)
574#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH)
575
576#define S_AOPEN_MAC_MATCH_VALID 19
577#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID)
578#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U)
579
580#define S_AOPEN_IFF_VLAN 20
581#define M_AOPEN_IFF_VLAN 0xFFF
582#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN)
583#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
584
585struct cpl_act_open_rpl {
586 RSS_HDR union opcode_tid ot;
587 __be16 local_port;
588 __be16 peer_port;
589 __be32 local_ip;
590 __be32 peer_ip;
591 __be32 atid;
592 __u8 rsvd[3];
593 __u8 status;
594};
595
596struct cpl_act_establish {
597 RSS_HDR union opcode_tid ot;
598 __be16 local_port;
599 __be16 peer_port;
600 __be32 local_ip;
601 __be32 peer_ip;
602 __be32 tos_tid;
603 __be16 l2t_idx;
604 __be16 tcp_opt;
605 __be32 snd_isn;
606 __be32 rcv_isn;
607};
608
609struct cpl_get_tcb {
610 WR_HDR;
611 union opcode_tid ot;
612 __be16 cpuno;
613 __be16 rsvd;
614};
615
616struct cpl_get_tcb_rpl {
617 RSS_HDR union opcode_tid ot;
618 __u8 rsvd;
619 __u8 status;
620 __be16 len;
621};
622
623struct cpl_set_tcb {
624 WR_HDR;
625 union opcode_tid ot;
626 __u8 reply;
627 __u8 cpu_idx;
628 __be16 len;
629};
630
631/* cpl_set_tcb.reply fields */
632#define S_NO_REPLY 7
633#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
634#define F_NO_REPLY V_NO_REPLY(1U)
635
636struct cpl_set_tcb_field {
637 WR_HDR;
638 union opcode_tid ot;
639 __u8 reply;
640 __u8 cpu_idx;
641 __be16 word;
642 __be64 mask;
643 __be64 val;
644};
645
646struct cpl_set_tcb_rpl {
647 RSS_HDR union opcode_tid ot;
648 __u8 rsvd[3];
649 __u8 status;
650};
651
652struct cpl_pcmd {
653 WR_HDR;
654 union opcode_tid ot;
655 __u8 rsvd[3];
656#if defined(__LITTLE_ENDIAN_BITFIELD)
657 __u8 src:1;
658 __u8 bundle:1;
659 __u8 channel:1;
660 __u8:5;
661#else
662 __u8:5;
663 __u8 channel:1;
664 __u8 bundle:1;
665 __u8 src:1;
666#endif
667 __be32 pcmd_parm[2];
668};
669
670struct cpl_pcmd_reply {
671 RSS_HDR union opcode_tid ot;
672 __u8 status;
673 __u8 rsvd;
674 __be16 len;
675};
676
677struct cpl_close_con_req {
678 WR_HDR;
679 union opcode_tid ot;
680 __be32 rsvd;
681};
682
683struct cpl_close_con_rpl {
684 RSS_HDR union opcode_tid ot;
685 __u8 rsvd[3];
686 __u8 status;
687 __be32 snd_nxt;
688 __be32 rcv_nxt;
689};
690
691struct cpl_close_listserv_req {
692 WR_HDR;
693 union opcode_tid ot;
694 __u8 rsvd0;
695 __u8 cpu_idx;
696 __be16 rsvd1;
697};
698
699struct cpl_close_listserv_rpl {
700 RSS_HDR union opcode_tid ot;
701 __u8 rsvd[3];
702 __u8 status;
703};
704
705struct cpl_abort_req_rss {
706 RSS_HDR union opcode_tid ot;
707 __be32 rsvd0;
708 __u8 rsvd1;
709 __u8 status;
710 __u8 rsvd2[6];
711};
712
713struct cpl_abort_req {
714 WR_HDR;
715 union opcode_tid ot;
716 __be32 rsvd0;
717 __u8 rsvd1;
718 __u8 cmd;
719 __u8 rsvd2[6];
720};
721
722struct cpl_abort_rpl_rss {
723 RSS_HDR union opcode_tid ot;
724 __be32 rsvd0;
725 __u8 rsvd1;
726 __u8 status;
727 __u8 rsvd2[6];
728};
729
730struct cpl_abort_rpl {
731 WR_HDR;
732 union opcode_tid ot;
733 __be32 rsvd0;
734 __u8 rsvd1;
735 __u8 cmd;
736 __u8 rsvd2[6];
737};
738
739struct cpl_peer_close {
740 RSS_HDR union opcode_tid ot;
741 __be32 rcv_nxt;
742};
743
744struct tx_data_wr {
745 __be32 wr_hi;
746 __be32 wr_lo;
747 __be32 len;
748 __be32 flags;
749 __be32 sndseq;
750 __be32 param;
751};
752
753/* tx_data_wr.param fields */
754#define S_TX_PORT 0
755#define M_TX_PORT 0x7
756#define V_TX_PORT(x) ((x) << S_TX_PORT)
757#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
758
759#define S_TX_MSS 4
760#define M_TX_MSS 0xF
761#define V_TX_MSS(x) ((x) << S_TX_MSS)
762#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
763
764#define S_TX_QOS 8
765#define M_TX_QOS 0xFF
766#define V_TX_QOS(x) ((x) << S_TX_QOS)
767#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
768
769#define S_TX_SNDBUF 16
770#define M_TX_SNDBUF 0xFFFF
771#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
772#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
773
774struct cpl_tx_data {
775 union opcode_tid ot;
776 __be32 len;
777 __be32 rsvd;
778 __be16 urg;
779 __be16 flags;
780};
781
782/* cpl_tx_data.flags fields */
783#define S_TX_ULP_SUBMODE 6
784#define M_TX_ULP_SUBMODE 0xF
785#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
786#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
787
788#define S_TX_ULP_MODE 10
789#define M_TX_ULP_MODE 0xF
790#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
791#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
792
793#define S_TX_SHOVE 14
794#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
795#define F_TX_SHOVE V_TX_SHOVE(1U)
796
797#define S_TX_MORE 15
798#define V_TX_MORE(x) ((x) << S_TX_MORE)
799#define F_TX_MORE V_TX_MORE(1U)
800
801/* additional tx_data_wr.flags fields */
802#define S_TX_CPU_IDX 0
803#define M_TX_CPU_IDX 0x3F
804#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
805#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
806
807#define S_TX_URG 16
808#define V_TX_URG(x) ((x) << S_TX_URG)
809#define F_TX_URG V_TX_URG(1U)
810
811#define S_TX_CLOSE 17
812#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
813#define F_TX_CLOSE V_TX_CLOSE(1U)
814
815#define S_TX_INIT 18
816#define V_TX_INIT(x) ((x) << S_TX_INIT)
817#define F_TX_INIT V_TX_INIT(1U)
818
819#define S_TX_IMM_ACK 19
820#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
821#define F_TX_IMM_ACK V_TX_IMM_ACK(1U)
822
823#define S_TX_IMM_DMA 20
824#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
825#define F_TX_IMM_DMA V_TX_IMM_DMA(1U)
826
827struct cpl_tx_data_ack {
828 RSS_HDR union opcode_tid ot;
829 __be32 ack_seq;
830};
831
832struct cpl_wr_ack {
833 RSS_HDR union opcode_tid ot;
834 __be16 credits;
835 __be16 rsvd;
836 __be32 snd_nxt;
837 __be32 snd_una;
838};
839
840struct cpl_rdma_ec_status {
841 RSS_HDR union opcode_tid ot;
842 __u8 rsvd[3];
843 __u8 status;
844};
845
846struct mngt_pktsched_wr {
847 __be32 wr_hi;
848 __be32 wr_lo;
849 __u8 mngt_opcode;
850 __u8 rsvd[7];
851 __u8 sched;
852 __u8 idx;
853 __u8 min;
854 __u8 max;
855 __u8 binding;
856 __u8 rsvd1[3];
857};
858
859struct cpl_iscsi_hdr {
860 RSS_HDR union opcode_tid ot;
861 __be16 pdu_len_ddp;
862 __be16 len;
863 __be32 seq;
864 __be16 urg;
865 __u8 rsvd;
866 __u8 status;
867};
868
869/* cpl_iscsi_hdr.pdu_len_ddp fields */
870#define S_ISCSI_PDU_LEN 0
871#define M_ISCSI_PDU_LEN 0x7FFF
872#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
873#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
874
875#define S_ISCSI_DDP 15
876#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
877#define F_ISCSI_DDP V_ISCSI_DDP(1U)
878
879struct cpl_rx_data {
880 RSS_HDR union opcode_tid ot;
881 __be16 rsvd;
882 __be16 len;
883 __be32 seq;
884 __be16 urg;
885#if defined(__LITTLE_ENDIAN_BITFIELD)
886 __u8 dack_mode:2;
887 __u8 psh:1;
888 __u8 heartbeat:1;
889 __u8:4;
890#else
891 __u8:4;
892 __u8 heartbeat:1;
893 __u8 psh:1;
894 __u8 dack_mode:2;
895#endif
896 __u8 status;
897};
898
899struct cpl_rx_data_ack {
900 WR_HDR;
901 union opcode_tid ot;
902 __be32 credit_dack;
903};
904
905/* cpl_rx_data_ack.ack_seq fields */
906#define S_RX_CREDITS 0
907#define M_RX_CREDITS 0x7FFFFFF
908#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
909#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
910
911#define S_RX_MODULATE 27
912#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE)
913#define F_RX_MODULATE V_RX_MODULATE(1U)
914
915#define S_RX_FORCE_ACK 28
916#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
917#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U)
918
919#define S_RX_DACK_MODE 29
920#define M_RX_DACK_MODE 0x3
921#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
922#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
923
924#define S_RX_DACK_CHANGE 31
925#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
926#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
927
928struct cpl_rx_urg_notify {
929 RSS_HDR union opcode_tid ot;
930 __be32 seq;
931};
932
933struct cpl_rx_ddp_complete {
934 RSS_HDR union opcode_tid ot;
935 __be32 ddp_report;
936};
937
938struct cpl_rx_data_ddp {
939 RSS_HDR union opcode_tid ot;
940 __be16 urg;
941 __be16 len;
942 __be32 seq;
943 union {
944 __be32 nxt_seq;
945 __be32 ddp_report;
946 };
947 __be32 ulp_crc;
948 __be32 ddpvld_status;
949};
950
951/* cpl_rx_data_ddp.ddpvld_status fields */
952#define S_DDP_STATUS 0
953#define M_DDP_STATUS 0xFF
954#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS)
955#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS)
956
957#define S_DDP_VALID 15
958#define M_DDP_VALID 0x1FFFF
959#define V_DDP_VALID(x) ((x) << S_DDP_VALID)
960#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
961
962#define S_DDP_PPOD_MISMATCH 15
963#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
964#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U)
965
966#define S_DDP_PDU 16
967#define V_DDP_PDU(x) ((x) << S_DDP_PDU)
968#define F_DDP_PDU V_DDP_PDU(1U)
969
970#define S_DDP_LLIMIT_ERR 17
971#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
972#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U)
973
974#define S_DDP_PPOD_PARITY_ERR 18
975#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
976#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U)
977
978#define S_DDP_PADDING_ERR 19
979#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
980#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U)
981
982#define S_DDP_HDRCRC_ERR 20
983#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
984#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U)
985
986#define S_DDP_DATACRC_ERR 21
987#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
988#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U)
989
990#define S_DDP_INVALID_TAG 22
991#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
992#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U)
993
994#define S_DDP_ULIMIT_ERR 23
995#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
996#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U)
997
998#define S_DDP_OFFSET_ERR 24
999#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
1000#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U)
1001
1002#define S_DDP_COLOR_ERR 25
1003#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
1004#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U)
1005
1006#define S_DDP_TID_MISMATCH 26
1007#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
1008#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U)
1009
1010#define S_DDP_INVALID_PPOD 27
1011#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
1012#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U)
1013
1014#define S_DDP_ULP_MODE 28
1015#define M_DDP_ULP_MODE 0xF
1016#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
1017#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
1018
1019/* cpl_rx_data_ddp.ddp_report fields */
1020#define S_DDP_OFFSET 0
1021#define M_DDP_OFFSET 0x3FFFFF
1022#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
1023#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
1024
1025#define S_DDP_URG 24
1026#define V_DDP_URG(x) ((x) << S_DDP_URG)
1027#define F_DDP_URG V_DDP_URG(1U)
1028
1029#define S_DDP_PSH 25
1030#define V_DDP_PSH(x) ((x) << S_DDP_PSH)
1031#define F_DDP_PSH V_DDP_PSH(1U)
1032
1033#define S_DDP_BUF_COMPLETE 26
1034#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
1035#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U)
1036
1037#define S_DDP_BUF_TIMED_OUT 27
1038#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
1039#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U)
1040
1041#define S_DDP_BUF_IDX 28
1042#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
1043#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U)
1044
1045struct cpl_tx_pkt {
1046 WR_HDR;
1047 __be32 cntrl;
1048 __be32 len;
1049};
1050
1051struct cpl_tx_pkt_lso {
1052 WR_HDR;
1053 __be32 cntrl;
1054 __be32 len;
1055
1056 __be32 rsvd;
1057 __be32 lso_info;
1058};
1059
1060/* cpl_tx_pkt*.cntrl fields */
1061#define S_TXPKT_VLAN 0
1062#define M_TXPKT_VLAN 0xFFFF
1063#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN)
1064#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
1065
1066#define S_TXPKT_INTF 16
1067#define M_TXPKT_INTF 0xF
1068#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
1069#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
1070
1071#define S_TXPKT_IPCSUM_DIS 20
1072#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS)
1073#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1U)
1074
1075#define S_TXPKT_L4CSUM_DIS 21
1076#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS)
1077#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1U)
1078
1079#define S_TXPKT_VLAN_VLD 22
1080#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD)
1081#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1U)
1082
1083#define S_TXPKT_LOOPBACK 23
1084#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
1085#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U)
1086
1087#define S_TXPKT_OPCODE 24
1088#define M_TXPKT_OPCODE 0xFF
1089#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
1090#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
1091
1092/* cpl_tx_pkt_lso.lso_info fields */
1093#define S_LSO_MSS 0
1094#define M_LSO_MSS 0x3FFF
1095#define V_LSO_MSS(x) ((x) << S_LSO_MSS)
1096#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
1097
1098#define S_LSO_ETH_TYPE 14
1099#define M_LSO_ETH_TYPE 0x3
1100#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE)
1101#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE)
1102
1103#define S_LSO_TCPHDR_WORDS 16
1104#define M_LSO_TCPHDR_WORDS 0xF
1105#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS)
1106#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS)
1107
1108#define S_LSO_IPHDR_WORDS 20
1109#define M_LSO_IPHDR_WORDS 0xF
1110#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS)
1111#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS)
1112
1113#define S_LSO_IPV6 24
1114#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
1115#define F_LSO_IPV6 V_LSO_IPV6(1U)
1116
1117struct cpl_trace_pkt {
1118#ifdef CHELSIO_FW
1119 __u8 rss_opcode;
1120#if defined(__LITTLE_ENDIAN_BITFIELD)
1121 __u8 err:1;
1122 __u8:7;
1123#else
1124 __u8:7;
1125 __u8 err:1;
1126#endif
1127 __u8 rsvd0;
1128#if defined(__LITTLE_ENDIAN_BITFIELD)
1129 __u8 qid:4;
1130 __u8:4;
1131#else
1132 __u8:4;
1133 __u8 qid:4;
1134#endif
1135 __be32 tstamp;
1136#endif /* CHELSIO_FW */
1137
1138 __u8 opcode;
1139#if defined(__LITTLE_ENDIAN_BITFIELD)
1140 __u8 iff:4;
1141 __u8:4;
1142#else
1143 __u8:4;
1144 __u8 iff:4;
1145#endif
1146 __u8 rsvd[4];
1147 __be16 len;
1148};
1149
1150struct cpl_rx_pkt {
1151 RSS_HDR __u8 opcode;
1152#if defined(__LITTLE_ENDIAN_BITFIELD)
1153 __u8 iff:4;
1154 __u8 csum_valid:1;
1155 __u8 ipmi_pkt:1;
1156 __u8 vlan_valid:1;
1157 __u8 fragment:1;
1158#else
1159 __u8 fragment:1;
1160 __u8 vlan_valid:1;
1161 __u8 ipmi_pkt:1;
1162 __u8 csum_valid:1;
1163 __u8 iff:4;
1164#endif
1165 __be16 csum;
1166 __be16 vlan;
1167 __be16 len;
1168};
1169
1170struct cpl_l2t_write_req {
1171 WR_HDR;
1172 union opcode_tid ot;
1173 __be32 params;
1174 __u8 rsvd[2];
1175 __u8 dst_mac[6];
1176};
1177
1178/* cpl_l2t_write_req.params fields */
1179#define S_L2T_W_IDX 0
1180#define M_L2T_W_IDX 0x7FF
1181#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX)
1182#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX)
1183
1184#define S_L2T_W_VLAN 11
1185#define M_L2T_W_VLAN 0xFFF
1186#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN)
1187#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN)
1188
1189#define S_L2T_W_IFF 23
1190#define M_L2T_W_IFF 0xF
1191#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF)
1192#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF)
1193
1194#define S_L2T_W_PRIO 27
1195#define M_L2T_W_PRIO 0x7
1196#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO)
1197#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO)
1198
1199struct cpl_l2t_write_rpl {
1200 RSS_HDR union opcode_tid ot;
1201 __u8 status;
1202 __u8 rsvd[3];
1203};
1204
1205struct cpl_l2t_read_req {
1206 WR_HDR;
1207 union opcode_tid ot;
1208 __be16 rsvd;
1209 __be16 l2t_idx;
1210};
1211
1212struct cpl_l2t_read_rpl {
1213 RSS_HDR union opcode_tid ot;
1214 __be32 params;
1215 __u8 rsvd[2];
1216 __u8 dst_mac[6];
1217};
1218
1219/* cpl_l2t_read_rpl.params fields */
1220#define S_L2T_R_PRIO 0
1221#define M_L2T_R_PRIO 0x7
1222#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO)
1223#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO)
1224
1225#define S_L2T_R_VLAN 8
1226#define M_L2T_R_VLAN 0xFFF
1227#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN)
1228#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN)
1229
1230#define S_L2T_R_IFF 20
1231#define M_L2T_R_IFF 0xF
1232#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF)
1233#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF)
1234
1235#define S_L2T_STATUS 24
1236#define M_L2T_STATUS 0xFF
1237#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS)
1238#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS)
1239
1240struct cpl_smt_write_req {
1241 WR_HDR;
1242 union opcode_tid ot;
1243 __u8 rsvd0;
1244#if defined(__LITTLE_ENDIAN_BITFIELD)
1245 __u8 mtu_idx:4;
1246 __u8 iff:4;
1247#else
1248 __u8 iff:4;
1249 __u8 mtu_idx:4;
1250#endif
1251 __be16 rsvd2;
1252 __be16 rsvd3;
1253 __u8 src_mac1[6];
1254 __be16 rsvd4;
1255 __u8 src_mac0[6];
1256};
1257
1258struct cpl_smt_write_rpl {
1259 RSS_HDR union opcode_tid ot;
1260 __u8 status;
1261 __u8 rsvd[3];
1262};
1263
1264struct cpl_smt_read_req {
1265 WR_HDR;
1266 union opcode_tid ot;
1267 __u8 rsvd0;
1268#if defined(__LITTLE_ENDIAN_BITFIELD)
1269 __u8:4;
1270 __u8 iff:4;
1271#else
1272 __u8 iff:4;
1273 __u8:4;
1274#endif
1275 __be16 rsvd2;
1276};
1277
1278struct cpl_smt_read_rpl {
1279 RSS_HDR union opcode_tid ot;
1280 __u8 status;
1281#if defined(__LITTLE_ENDIAN_BITFIELD)
1282 __u8 mtu_idx:4;
1283 __u8:4;
1284#else
1285 __u8:4;
1286 __u8 mtu_idx:4;
1287#endif
1288 __be16 rsvd2;
1289 __be16 rsvd3;
1290 __u8 src_mac1[6];
1291 __be16 rsvd4;
1292 __u8 src_mac0[6];
1293};
1294
1295struct cpl_rte_delete_req {
1296 WR_HDR;
1297 union opcode_tid ot;
1298 __be32 params;
1299};
1300
1301/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */
1302#define S_RTE_REQ_LUT_IX 8
1303#define M_RTE_REQ_LUT_IX 0x7FF
1304#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
1305#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
1306
1307#define S_RTE_REQ_LUT_BASE 19
1308#define M_RTE_REQ_LUT_BASE 0x7FF
1309#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
1310#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
1311
1312#define S_RTE_READ_REQ_SELECT 31
1313#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
1314#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U)
1315
1316struct cpl_rte_delete_rpl {
1317 RSS_HDR union opcode_tid ot;
1318 __u8 status;
1319 __u8 rsvd[3];
1320};
1321
1322struct cpl_rte_write_req {
1323 WR_HDR;
1324 union opcode_tid ot;
1325#if defined(__LITTLE_ENDIAN_BITFIELD)
1326 __u8:6;
1327 __u8 write_tcam:1;
1328 __u8 write_l2t_lut:1;
1329#else
1330 __u8 write_l2t_lut:1;
1331 __u8 write_tcam:1;
1332 __u8:6;
1333#endif
1334 __u8 rsvd[3];
1335 __be32 lut_params;
1336 __be16 rsvd2;
1337 __be16 l2t_idx;
1338 __be32 netmask;
1339 __be32 faddr;
1340};
1341
1342/* cpl_rte_write_req.lut_params fields */
1343#define S_RTE_WRITE_REQ_LUT_IX 10
1344#define M_RTE_WRITE_REQ_LUT_IX 0x7FF
1345#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX)
1346#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX)
1347
1348#define S_RTE_WRITE_REQ_LUT_BASE 21
1349#define M_RTE_WRITE_REQ_LUT_BASE 0x7FF
1350#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE)
1351#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE)
1352
1353struct cpl_rte_write_rpl {
1354 RSS_HDR union opcode_tid ot;
1355 __u8 status;
1356 __u8 rsvd[3];
1357};
1358
1359struct cpl_rte_read_req {
1360 WR_HDR;
1361 union opcode_tid ot;
1362 __be32 params;
1363};
1364
1365struct cpl_rte_read_rpl {
1366 RSS_HDR union opcode_tid ot;
1367 __u8 status;
1368 __u8 rsvd0;
1369 __be16 l2t_idx;
1370#if defined(__LITTLE_ENDIAN_BITFIELD)
1371 __u8:7;
1372 __u8 select:1;
1373#else
1374 __u8 select:1;
1375 __u8:7;
1376#endif
1377 __u8 rsvd2[3];
1378 __be32 addr;
1379};
1380
1381struct cpl_tid_release {
1382 WR_HDR;
1383 union opcode_tid ot;
1384 __be32 rsvd;
1385};
1386
1387struct cpl_barrier {
1388 WR_HDR;
1389 __u8 opcode;
1390 __u8 rsvd[7];
1391};
1392
1393struct cpl_rdma_read_req {
1394 __u8 opcode;
1395 __u8 rsvd[15];
1396};
1397
1398struct cpl_rdma_terminate {
1399#ifdef CHELSIO_FW
1400 __u8 opcode;
1401 __u8 rsvd[2];
1402#if defined(__LITTLE_ENDIAN_BITFIELD)
1403 __u8 rspq:3;
1404 __u8:5;
1405#else
1406 __u8:5;
1407 __u8 rspq:3;
1408#endif
1409 __be32 tid_len;
1410#endif
1411 __be32 msn;
1412 __be32 mo;
1413 __u8 data[0];
1414};
1415
1416/* cpl_rdma_terminate.tid_len fields */
1417#define S_FLIT_CNT 0
1418#define M_FLIT_CNT 0xFF
1419#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT)
1420#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT)
1421
1422#define S_TERM_TID 8
1423#define M_TERM_TID 0xFFFFF
1424#define V_TERM_TID(x) ((x) << S_TERM_TID)
1425#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
1426#endif /* T3_CPL_H */
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
new file mode 100644
index 000000000000..a4e2e57e1465
--- /dev/null
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -0,0 +1,3354 @@
1/*
2 * This file is part of the Chelsio T3 Ethernet driver.
3 *
4 * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#include "common.h"
13#include "regs.h"
14#include "sge_defs.h"
15#include "firmware_exports.h"
16
17 /**
18 * t3_wait_op_done_val - wait until an operation is completed
19 * @adapter: the adapter performing the operation
20 * @reg: the register to check for completion
21 * @mask: a single-bit field within @reg that indicates completion
22 * @polarity: the value of the field when the operation is completed
23 * @attempts: number of check iterations
24 * @delay: delay in usecs between iterations
25 * @valp: where to store the value of the register at completion time
26 *
27 * Wait until an operation is completed by checking a bit in a register
28 * up to @attempts times. If @valp is not NULL the value of the register
29 * at the time it indicated completion is stored there. Returns 0 if the
30 * operation completes and -EAGAIN otherwise.
31 */
32
33int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
34 int polarity, int attempts, int delay, u32 *valp)
35{
36 while (1) {
37 u32 val = t3_read_reg(adapter, reg);
38
39 if (!!(val & mask) == polarity) {
40 if (valp)
41 *valp = val;
42 return 0;
43 }
44 if (--attempts == 0)
45 return -EAGAIN;
46 if (delay)
47 udelay(delay);
48 }
49}
50
51/**
52 * t3_write_regs - write a bunch of registers
53 * @adapter: the adapter to program
54 * @p: an array of register address/register value pairs
55 * @n: the number of address/value pairs
56 * @offset: register address offset
57 *
58 * Takes an array of register address/register value pairs and writes each
59 * value to the corresponding register. Register addresses are adjusted
60 * by the supplied offset.
61 */
62void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
63 int n, unsigned int offset)
64{
65 while (n--) {
66 t3_write_reg(adapter, p->reg_addr + offset, p->val);
67 p++;
68 }
69}
70
71/**
72 * t3_set_reg_field - set a register field to a value
73 * @adapter: the adapter to program
74 * @addr: the register address
75 * @mask: specifies the portion of the register to modify
76 * @val: the new value for the register field
77 *
78 * Sets a register field specified by the supplied mask to the
79 * given value.
80 */
81void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
82 u32 val)
83{
84 u32 v = t3_read_reg(adapter, addr) & ~mask;
85
86 t3_write_reg(adapter, addr, v | val);
87 t3_read_reg(adapter, addr); /* flush */
88}
89
90/**
91 * t3_read_indirect - read indirectly addressed registers
92 * @adap: the adapter
93 * @addr_reg: register holding the indirect address
94 * @data_reg: register holding the value of the indirect register
95 * @vals: where the read register values are stored
96 * @start_idx: index of first indirect register to read
97 * @nregs: how many indirect registers to read
98 *
99 * Reads registers that are accessed indirectly through an address/data
100 * register pair.
101 */
102void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
103 unsigned int data_reg, u32 *vals, unsigned int nregs,
104 unsigned int start_idx)
105{
106 while (nregs--) {
107 t3_write_reg(adap, addr_reg, start_idx);
108 *vals++ = t3_read_reg(adap, data_reg);
109 start_idx++;
110 }
111}
112
113/**
114 * t3_mc7_bd_read - read from MC7 through backdoor accesses
115 * @mc7: identifies MC7 to read from
116 * @start: index of first 64-bit word to read
117 * @n: number of 64-bit words to read
118 * @buf: where to store the read result
119 *
120 * Read n 64-bit words from MC7 starting at word start, using backdoor
121 * accesses.
122 */
123int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
124 u64 *buf)
125{
126 static const int shift[] = { 0, 0, 16, 24 };
127 static const int step[] = { 0, 32, 16, 8 };
128
129 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
130 struct adapter *adap = mc7->adapter;
131
132 if (start >= size64 || start + n > size64)
133 return -EINVAL;
134
135 start *= (8 << mc7->width);
136 while (n--) {
137 int i;
138 u64 val64 = 0;
139
140 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
141 int attempts = 10;
142 u32 val;
143
144 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
145 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
146 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
147 while ((val & F_BUSY) && attempts--)
148 val = t3_read_reg(adap,
149 mc7->offset + A_MC7_BD_OP);
150 if (val & F_BUSY)
151 return -EIO;
152
153 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
154 if (mc7->width == 0) {
155 val64 = t3_read_reg(adap,
156 mc7->offset +
157 A_MC7_BD_DATA0);
158 val64 |= (u64) val << 32;
159 } else {
160 if (mc7->width > 1)
161 val >>= shift[mc7->width];
162 val64 |= (u64) val << (step[mc7->width] * i);
163 }
164 start += 8;
165 }
166 *buf++ = val64;
167 }
168 return 0;
169}
170
171/*
172 * Initialize MI1.
173 */
174static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
175{
176 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
177 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
178 V_CLKDIV(clkdiv);
179
180 if (!(ai->caps & SUPPORTED_10000baseT_Full))
181 val |= V_ST(1);
182 t3_write_reg(adap, A_MI1_CFG, val);
183}
184
185#define MDIO_ATTEMPTS 10
186
187/*
188 * MI1 read/write operations for direct-addressed PHYs.
189 */
190static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
191 int reg_addr, unsigned int *valp)
192{
193 int ret;
194 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
195
196 if (mmd_addr)
197 return -EINVAL;
198
199 mutex_lock(&adapter->mdio_lock);
200 t3_write_reg(adapter, A_MI1_ADDR, addr);
201 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
202 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
203 if (!ret)
204 *valp = t3_read_reg(adapter, A_MI1_DATA);
205 mutex_unlock(&adapter->mdio_lock);
206 return ret;
207}
208
209static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
210 int reg_addr, unsigned int val)
211{
212 int ret;
213 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
214
215 if (mmd_addr)
216 return -EINVAL;
217
218 mutex_lock(&adapter->mdio_lock);
219 t3_write_reg(adapter, A_MI1_ADDR, addr);
220 t3_write_reg(adapter, A_MI1_DATA, val);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 mutex_unlock(&adapter->mdio_lock);
224 return ret;
225}
226
227static const struct mdio_ops mi1_mdio_ops = {
228 mi1_read,
229 mi1_write
230};
231
232/*
233 * MI1 read/write operations for indirect-addressed PHYs.
234 */
235static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
236 int reg_addr, unsigned int *valp)
237{
238 int ret;
239 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
240
241 mutex_lock(&adapter->mdio_lock);
242 t3_write_reg(adapter, A_MI1_ADDR, addr);
243 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
244 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
245 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
246 if (!ret) {
247 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
248 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
249 MDIO_ATTEMPTS, 20);
250 if (!ret)
251 *valp = t3_read_reg(adapter, A_MI1_DATA);
252 }
253 mutex_unlock(&adapter->mdio_lock);
254 return ret;
255}
256
257static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
258 int reg_addr, unsigned int val)
259{
260 int ret;
261 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
262
263 mutex_lock(&adapter->mdio_lock);
264 t3_write_reg(adapter, A_MI1_ADDR, addr);
265 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
266 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
267 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
268 if (!ret) {
269 t3_write_reg(adapter, A_MI1_DATA, val);
270 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
271 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
272 MDIO_ATTEMPTS, 20);
273 }
274 mutex_unlock(&adapter->mdio_lock);
275 return ret;
276}
277
278static const struct mdio_ops mi1_mdio_ext_ops = {
279 mi1_ext_read,
280 mi1_ext_write
281};
282
283/**
284 * t3_mdio_change_bits - modify the value of a PHY register
285 * @phy: the PHY to operate on
286 * @mmd: the device address
287 * @reg: the register address
288 * @clear: what part of the register value to mask off
289 * @set: what part of the register value to set
290 *
291 * Changes the value of a PHY register by applying a mask to its current
292 * value and ORing the result with a new value.
293 */
294int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
295 unsigned int set)
296{
297 int ret;
298 unsigned int val;
299
300 ret = mdio_read(phy, mmd, reg, &val);
301 if (!ret) {
302 val &= ~clear;
303 ret = mdio_write(phy, mmd, reg, val | set);
304 }
305 return ret;
306}
307
308/**
309 * t3_phy_reset - reset a PHY block
310 * @phy: the PHY to operate on
311 * @mmd: the device address of the PHY block to reset
312 * @wait: how long to wait for the reset to complete in 1ms increments
313 *
314 * Resets a PHY block and optionally waits for the reset to complete.
315 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
316 * for 10G PHYs.
317 */
318int t3_phy_reset(struct cphy *phy, int mmd, int wait)
319{
320 int err;
321 unsigned int ctl;
322
323 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
324 if (err || !wait)
325 return err;
326
327 do {
328 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
329 if (err)
330 return err;
331 ctl &= BMCR_RESET;
332 if (ctl)
333 msleep(1);
334 } while (ctl && --wait);
335
336 return ctl ? -1 : 0;
337}
338
339/**
340 * t3_phy_advertise - set the PHY advertisement registers for autoneg
341 * @phy: the PHY to operate on
342 * @advert: bitmap of capabilities the PHY should advertise
343 *
344 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
345 * requested capabilities.
346 */
347int t3_phy_advertise(struct cphy *phy, unsigned int advert)
348{
349 int err;
350 unsigned int val = 0;
351
352 err = mdio_read(phy, 0, MII_CTRL1000, &val);
353 if (err)
354 return err;
355
356 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
357 if (advert & ADVERTISED_1000baseT_Half)
358 val |= ADVERTISE_1000HALF;
359 if (advert & ADVERTISED_1000baseT_Full)
360 val |= ADVERTISE_1000FULL;
361
362 err = mdio_write(phy, 0, MII_CTRL1000, val);
363 if (err)
364 return err;
365
366 val = 1;
367 if (advert & ADVERTISED_10baseT_Half)
368 val |= ADVERTISE_10HALF;
369 if (advert & ADVERTISED_10baseT_Full)
370 val |= ADVERTISE_10FULL;
371 if (advert & ADVERTISED_100baseT_Half)
372 val |= ADVERTISE_100HALF;
373 if (advert & ADVERTISED_100baseT_Full)
374 val |= ADVERTISE_100FULL;
375 if (advert & ADVERTISED_Pause)
376 val |= ADVERTISE_PAUSE_CAP;
377 if (advert & ADVERTISED_Asym_Pause)
378 val |= ADVERTISE_PAUSE_ASYM;
379 return mdio_write(phy, 0, MII_ADVERTISE, val);
380}
381
382/**
383 * t3_set_phy_speed_duplex - force PHY speed and duplex
384 * @phy: the PHY to operate on
385 * @speed: requested PHY speed
386 * @duplex: requested PHY duplex
387 *
388 * Force a 10/100/1000 PHY's speed and duplex. This also disables
389 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
390 */
391int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
392{
393 int err;
394 unsigned int ctl;
395
396 err = mdio_read(phy, 0, MII_BMCR, &ctl);
397 if (err)
398 return err;
399
400 if (speed >= 0) {
401 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
402 if (speed == SPEED_100)
403 ctl |= BMCR_SPEED100;
404 else if (speed == SPEED_1000)
405 ctl |= BMCR_SPEED1000;
406 }
407 if (duplex >= 0) {
408 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
409 if (duplex == DUPLEX_FULL)
410 ctl |= BMCR_FULLDPLX;
411 }
412 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
413 ctl |= BMCR_ANENABLE;
414 return mdio_write(phy, 0, MII_BMCR, ctl);
415}
416
417static const struct adapter_info t3_adap_info[] = {
418 {2, 0, 0, 0,
419 F_GPIO2_OEN | F_GPIO4_OEN |
420 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
421 SUPPORTED_OFFLOAD,
422 &mi1_mdio_ops, "Chelsio PE9000"},
423 {2, 0, 0, 0,
424 F_GPIO2_OEN | F_GPIO4_OEN |
425 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
426 SUPPORTED_OFFLOAD,
427 &mi1_mdio_ops, "Chelsio T302"},
428 {1, 0, 0, 0,
429 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
430 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
431 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
432 &mi1_mdio_ext_ops, "Chelsio T310"},
433 {2, 0, 0, 0,
434 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
435 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
436 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
437 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
438 &mi1_mdio_ext_ops, "Chelsio T320"},
439};
440
441/*
442 * Return the adapter_info structure with a given index. Out-of-range indices
443 * return NULL.
444 */
445const struct adapter_info *t3_get_adapter_info(unsigned int id)
446{
447 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
448}
449
450#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
451 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
452#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
453
454static const struct port_type_info port_types[] = {
455 {NULL},
456 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
457 "10GBASE-XR"},
458 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
459 "10/100/1000BASE-T"},
460 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
461 "10/100/1000BASE-T"},
462 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
463 {NULL, CAPS_10G, "10GBASE-KX4"},
464 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
465 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
466 "10GBASE-SR"},
467 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
468};
469
470#undef CAPS_1G
471#undef CAPS_10G
472
473#define VPD_ENTRY(name, len) \
474 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
475
476/*
477 * Partial EEPROM Vital Product Data structure. Includes only the ID and
478 * VPD-R sections.
479 */
480struct t3_vpd {
481 u8 id_tag;
482 u8 id_len[2];
483 u8 id_data[16];
484 u8 vpdr_tag;
485 u8 vpdr_len[2];
486 VPD_ENTRY(pn, 16); /* part number */
487 VPD_ENTRY(ec, 16); /* EC level */
488 VPD_ENTRY(sn, 16); /* serial number */
489 VPD_ENTRY(na, 12); /* MAC address base */
490 VPD_ENTRY(cclk, 6); /* core clock */
491 VPD_ENTRY(mclk, 6); /* mem clock */
492 VPD_ENTRY(uclk, 6); /* uP clk */
493 VPD_ENTRY(mdc, 6); /* MDIO clk */
494 VPD_ENTRY(mt, 2); /* mem timing */
495 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
496 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
497 VPD_ENTRY(port0, 2); /* PHY0 complex */
498 VPD_ENTRY(port1, 2); /* PHY1 complex */
499 VPD_ENTRY(port2, 2); /* PHY2 complex */
500 VPD_ENTRY(port3, 2); /* PHY3 complex */
501 VPD_ENTRY(rv, 1); /* csum */
502 u32 pad; /* for multiple-of-4 sizing and alignment */
503};
504
505#define EEPROM_MAX_POLL 4
506#define EEPROM_STAT_ADDR 0x4000
507#define VPD_BASE 0xc00
508
509/**
510 * t3_seeprom_read - read a VPD EEPROM location
511 * @adapter: adapter to read
512 * @addr: EEPROM address
513 * @data: where to store the read data
514 *
515 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
516 * VPD ROM capability. A zero is written to the flag bit when the
517 * addres is written to the control register. The hardware device will
518 * set the flag to 1 when 4 bytes have been read into the data register.
519 */
520int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
521{
522 u16 val;
523 int attempts = EEPROM_MAX_POLL;
524 unsigned int base = adapter->params.pci.vpd_cap_addr;
525
526 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
527 return -EINVAL;
528
529 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
530 do {
531 udelay(10);
532 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
533 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
534
535 if (!(val & PCI_VPD_ADDR_F)) {
536 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
537 return -EIO;
538 }
539 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
540 *data = le32_to_cpu(*data);
541 return 0;
542}
543
544/**
545 * t3_seeprom_write - write a VPD EEPROM location
546 * @adapter: adapter to write
547 * @addr: EEPROM address
548 * @data: value to write
549 *
550 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
551 * VPD ROM capability.
552 */
553int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
554{
555 u16 val;
556 int attempts = EEPROM_MAX_POLL;
557 unsigned int base = adapter->params.pci.vpd_cap_addr;
558
559 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
560 return -EINVAL;
561
562 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
563 cpu_to_le32(data));
564 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
565 addr | PCI_VPD_ADDR_F);
566 do {
567 msleep(1);
568 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
569 } while ((val & PCI_VPD_ADDR_F) && --attempts);
570
571 if (val & PCI_VPD_ADDR_F) {
572 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
573 return -EIO;
574 }
575 return 0;
576}
577
578/**
579 * t3_seeprom_wp - enable/disable EEPROM write protection
580 * @adapter: the adapter
581 * @enable: 1 to enable write protection, 0 to disable it
582 *
583 * Enables or disables write protection on the serial EEPROM.
584 */
585int t3_seeprom_wp(struct adapter *adapter, int enable)
586{
587 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
588}
589
590/*
591 * Convert a character holding a hex digit to a number.
592 */
593static unsigned int hex2int(unsigned char c)
594{
595 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
596}
597
598/**
599 * get_vpd_params - read VPD parameters from VPD EEPROM
600 * @adapter: adapter to read
601 * @p: where to store the parameters
602 *
603 * Reads card parameters stored in VPD EEPROM.
604 */
605static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
606{
607 int i, addr, ret;
608 struct t3_vpd vpd;
609
610 /*
611 * Card information is normally at VPD_BASE but some early cards had
612 * it at 0.
613 */
614 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
615 if (ret)
616 return ret;
617 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
618
619 for (i = 0; i < sizeof(vpd); i += 4) {
620 ret = t3_seeprom_read(adapter, addr + i,
621 (u32 *)((u8 *)&vpd + i));
622 if (ret)
623 return ret;
624 }
625
626 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
627 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
628 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
629 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
630 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
631
632 /* Old eeproms didn't have port information */
633 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
634 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
635 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
636 } else {
637 p->port_type[0] = hex2int(vpd.port0_data[0]);
638 p->port_type[1] = hex2int(vpd.port1_data[0]);
639 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
640 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
641 }
642
643 for (i = 0; i < 6; i++)
644 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
645 hex2int(vpd.na_data[2 * i + 1]);
646 return 0;
647}
648
649/* serial flash and firmware constants */
650enum {
651 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
652 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
653 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
654
655 /* flash command opcodes */
656 SF_PROG_PAGE = 2, /* program page */
657 SF_WR_DISABLE = 4, /* disable writes */
658 SF_RD_STATUS = 5, /* read status register */
659 SF_WR_ENABLE = 6, /* enable writes */
660 SF_RD_DATA_FAST = 0xb, /* read flash */
661 SF_ERASE_SECTOR = 0xd8, /* erase sector */
662
663 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
664 FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */
665};
666
667/**
668 * sf1_read - read data from the serial flash
669 * @adapter: the adapter
670 * @byte_cnt: number of bytes to read
671 * @cont: whether another operation will be chained
672 * @valp: where to store the read data
673 *
674 * Reads up to 4 bytes of data from the serial flash. The location of
675 * the read needs to be specified prior to calling this by issuing the
676 * appropriate commands to the serial flash.
677 */
678static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
679 u32 *valp)
680{
681 int ret;
682
683 if (!byte_cnt || byte_cnt > 4)
684 return -EINVAL;
685 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
686 return -EBUSY;
687 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
688 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
689 if (!ret)
690 *valp = t3_read_reg(adapter, A_SF_DATA);
691 return ret;
692}
693
694/**
695 * sf1_write - write data to the serial flash
696 * @adapter: the adapter
697 * @byte_cnt: number of bytes to write
698 * @cont: whether another operation will be chained
699 * @val: value to write
700 *
701 * Writes up to 4 bytes of data to the serial flash. The location of
702 * the write needs to be specified prior to calling this by issuing the
703 * appropriate commands to the serial flash.
704 */
705static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
706 u32 val)
707{
708 if (!byte_cnt || byte_cnt > 4)
709 return -EINVAL;
710 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
711 return -EBUSY;
712 t3_write_reg(adapter, A_SF_DATA, val);
713 t3_write_reg(adapter, A_SF_OP,
714 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
715 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
716}
717
718/**
719 * flash_wait_op - wait for a flash operation to complete
720 * @adapter: the adapter
721 * @attempts: max number of polls of the status register
722 * @delay: delay between polls in ms
723 *
724 * Wait for a flash operation to complete by polling the status register.
725 */
726static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
727{
728 int ret;
729 u32 status;
730
731 while (1) {
732 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
733 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
734 return ret;
735 if (!(status & 1))
736 return 0;
737 if (--attempts == 0)
738 return -EAGAIN;
739 if (delay)
740 msleep(delay);
741 }
742}
743
744/**
745 * t3_read_flash - read words from serial flash
746 * @adapter: the adapter
747 * @addr: the start address for the read
748 * @nwords: how many 32-bit words to read
749 * @data: where to store the read data
750 * @byte_oriented: whether to store data as bytes or as words
751 *
752 * Read the specified number of 32-bit words from the serial flash.
753 * If @byte_oriented is set the read data is stored as a byte array
754 * (i.e., big-endian), otherwise as 32-bit words in the platform's
755 * natural endianess.
756 */
757int t3_read_flash(struct adapter *adapter, unsigned int addr,
758 unsigned int nwords, u32 *data, int byte_oriented)
759{
760 int ret;
761
762 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
763 return -EINVAL;
764
765 addr = swab32(addr) | SF_RD_DATA_FAST;
766
767 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
768 (ret = sf1_read(adapter, 1, 1, data)) != 0)
769 return ret;
770
771 for (; nwords; nwords--, data++) {
772 ret = sf1_read(adapter, 4, nwords > 1, data);
773 if (ret)
774 return ret;
775 if (byte_oriented)
776 *data = htonl(*data);
777 }
778 return 0;
779}
780
781/**
782 * t3_write_flash - write up to a page of data to the serial flash
783 * @adapter: the adapter
784 * @addr: the start address to write
785 * @n: length of data to write
786 * @data: the data to write
787 *
788 * Writes up to a page of data (256 bytes) to the serial flash starting
789 * at the given address.
790 */
791static int t3_write_flash(struct adapter *adapter, unsigned int addr,
792 unsigned int n, const u8 *data)
793{
794 int ret;
795 u32 buf[64];
796 unsigned int i, c, left, val, offset = addr & 0xff;
797
798 if (addr + n > SF_SIZE || offset + n > 256)
799 return -EINVAL;
800
801 val = swab32(addr) | SF_PROG_PAGE;
802
803 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
804 (ret = sf1_write(adapter, 4, 1, val)) != 0)
805 return ret;
806
807 for (left = n; left; left -= c) {
808 c = min(left, 4U);
809 for (val = 0, i = 0; i < c; ++i)
810 val = (val << 8) + *data++;
811
812 ret = sf1_write(adapter, c, c != left, val);
813 if (ret)
814 return ret;
815 }
816 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
817 return ret;
818
819 /* Read the page to verify the write succeeded */
820 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
821 if (ret)
822 return ret;
823
824 if (memcmp(data - n, (u8 *) buf + offset, n))
825 return -EIO;
826 return 0;
827}
828
829/**
830 * t3_get_fw_version - read the firmware version
831 * @adapter: the adapter
832 * @vers: where to place the version
833 *
834 * Reads the FW version from flash.
835 */
836int t3_get_fw_version(struct adapter *adapter, u32 *vers)
837{
838 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
839}
840
841/**
842 * t3_check_fw_version - check if the FW is compatible with this driver
843 * @adapter: the adapter
844 *
845 * Checks if an adapter's FW is compatible with the driver. Returns 0
846 * if the versions are compatible, a negative error otherwise.
847 */
848int t3_check_fw_version(struct adapter *adapter)
849{
850 int ret;
851 u32 vers;
852
853 ret = t3_get_fw_version(adapter, &vers);
854 if (ret)
855 return ret;
856
857 /* Minor 0xfff means the FW is an internal development-only version. */
858 if ((vers & 0xfff) == 0xfff)
859 return 0;
860
861 if (vers == 0x1002009)
862 return 0;
863
864 CH_ERR(adapter, "found wrong FW version, driver needs version 2.9\n");
865 return -EINVAL;
866}
867
868/**
869 * t3_flash_erase_sectors - erase a range of flash sectors
870 * @adapter: the adapter
871 * @start: the first sector to erase
872 * @end: the last sector to erase
873 *
874 * Erases the sectors in the given range.
875 */
876static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
877{
878 while (start <= end) {
879 int ret;
880
881 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
882 (ret = sf1_write(adapter, 4, 0,
883 SF_ERASE_SECTOR | (start << 8))) != 0 ||
884 (ret = flash_wait_op(adapter, 5, 500)) != 0)
885 return ret;
886 start++;
887 }
888 return 0;
889}
890
891/*
892 * t3_load_fw - download firmware
893 * @adapter: the adapter
894 * @fw_data: the firrware image to write
895 * @size: image size
896 *
897 * Write the supplied firmware image to the card's serial flash.
898 * The FW image has the following sections: @size - 8 bytes of code and
899 * data, followed by 4 bytes of FW version, followed by the 32-bit
900 * 1's complement checksum of the whole image.
901 */
902int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
903{
904 u32 csum;
905 unsigned int i;
906 const u32 *p = (const u32 *)fw_data;
907 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
908
909 if (size & 3)
910 return -EINVAL;
911 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
912 return -EFBIG;
913
914 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
915 csum += ntohl(p[i]);
916 if (csum != 0xffffffff) {
917 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
918 csum);
919 return -EINVAL;
920 }
921
922 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
923 if (ret)
924 goto out;
925
926 size -= 8; /* trim off version and checksum */
927 for (addr = FW_FLASH_BOOT_ADDR; size;) {
928 unsigned int chunk_size = min(size, 256U);
929
930 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
931 if (ret)
932 goto out;
933
934 addr += chunk_size;
935 fw_data += chunk_size;
936 size -= chunk_size;
937 }
938
939 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
940out:
941 if (ret)
942 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
943 return ret;
944}
945
946#define CIM_CTL_BASE 0x2000
947
948/**
949 * t3_cim_ctl_blk_read - read a block from CIM control region
950 *
951 * @adap: the adapter
952 * @addr: the start address within the CIM control region
953 * @n: number of words to read
954 * @valp: where to store the result
955 *
956 * Reads a block of 4-byte words from the CIM control region.
957 */
958int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
959 unsigned int n, unsigned int *valp)
960{
961 int ret = 0;
962
963 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
964 return -EBUSY;
965
966 for ( ; !ret && n--; addr += 4) {
967 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
968 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
969 0, 5, 2);
970 if (!ret)
971 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
972 }
973 return ret;
974}
975
976
977/**
978 * t3_link_changed - handle interface link changes
979 * @adapter: the adapter
980 * @port_id: the port index that changed link state
981 *
982 * Called when a port's link settings change to propagate the new values
983 * to the associated PHY and MAC. After performing the common tasks it
984 * invokes an OS-specific handler.
985 */
986void t3_link_changed(struct adapter *adapter, int port_id)
987{
988 int link_ok, speed, duplex, fc;
989 struct port_info *pi = adap2pinfo(adapter, port_id);
990 struct cphy *phy = &pi->phy;
991 struct cmac *mac = &pi->mac;
992 struct link_config *lc = &pi->link_config;
993
994 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
995
996 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
997 uses_xaui(adapter)) {
998 if (link_ok)
999 t3b_pcs_reset(mac);
1000 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1001 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1002 }
1003 lc->link_ok = link_ok;
1004 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1005 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1006 if (lc->requested_fc & PAUSE_AUTONEG)
1007 fc &= lc->requested_fc;
1008 else
1009 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1010
1011 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1012 /* Set MAC speed, duplex, and flow control to match PHY. */
1013 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1014 lc->fc = fc;
1015 }
1016
1017 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1018}
1019
1020/**
1021 * t3_link_start - apply link configuration to MAC/PHY
1022 * @phy: the PHY to setup
1023 * @mac: the MAC to setup
1024 * @lc: the requested link configuration
1025 *
1026 * Set up a port's MAC and PHY according to a desired link configuration.
1027 * - If the PHY can auto-negotiate first decide what to advertise, then
1028 * enable/disable auto-negotiation as desired, and reset.
1029 * - If the PHY does not auto-negotiate just reset it.
1030 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1031 * otherwise do it later based on the outcome of auto-negotiation.
1032 */
1033int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1034{
1035 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1036
1037 lc->link_ok = 0;
1038 if (lc->supported & SUPPORTED_Autoneg) {
1039 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1040 if (fc) {
1041 lc->advertising |= ADVERTISED_Asym_Pause;
1042 if (fc & PAUSE_RX)
1043 lc->advertising |= ADVERTISED_Pause;
1044 }
1045 phy->ops->advertise(phy, lc->advertising);
1046
1047 if (lc->autoneg == AUTONEG_DISABLE) {
1048 lc->speed = lc->requested_speed;
1049 lc->duplex = lc->requested_duplex;
1050 lc->fc = (unsigned char)fc;
1051 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1052 fc);
1053 /* Also disables autoneg */
1054 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1055 phy->ops->reset(phy, 0);
1056 } else
1057 phy->ops->autoneg_enable(phy);
1058 } else {
1059 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1060 lc->fc = (unsigned char)fc;
1061 phy->ops->reset(phy, 0);
1062 }
1063 return 0;
1064}
1065
1066/**
1067 * t3_set_vlan_accel - control HW VLAN extraction
1068 * @adapter: the adapter
1069 * @ports: bitmap of adapter ports to operate on
1070 * @on: enable (1) or disable (0) HW VLAN extraction
1071 *
1072 * Enables or disables HW extraction of VLAN tags for the given port.
1073 */
1074void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1075{
1076 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1077 ports << S_VLANEXTRACTIONENABLE,
1078 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1079}
1080
1081struct intr_info {
1082 unsigned int mask; /* bits to check in interrupt status */
1083 const char *msg; /* message to print or NULL */
1084 short stat_idx; /* stat counter to increment or -1 */
1085 unsigned short fatal:1; /* whether the condition reported is fatal */
1086};
1087
1088/**
1089 * t3_handle_intr_status - table driven interrupt handler
1090 * @adapter: the adapter that generated the interrupt
1091 * @reg: the interrupt status register to process
1092 * @mask: a mask to apply to the interrupt status
1093 * @acts: table of interrupt actions
1094 * @stats: statistics counters tracking interrupt occurences
1095 *
1096 * A table driven interrupt handler that applies a set of masks to an
1097 * interrupt status word and performs the corresponding actions if the
1098 * interrupts described by the mask have occured. The actions include
1099 * optionally printing a warning or alert message, and optionally
1100 * incrementing a stat counter. The table is terminated by an entry
1101 * specifying mask 0. Returns the number of fatal interrupt conditions.
1102 */
1103static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1104 unsigned int mask,
1105 const struct intr_info *acts,
1106 unsigned long *stats)
1107{
1108 int fatal = 0;
1109 unsigned int status = t3_read_reg(adapter, reg) & mask;
1110
1111 for (; acts->mask; ++acts) {
1112 if (!(status & acts->mask))
1113 continue;
1114 if (acts->fatal) {
1115 fatal++;
1116 CH_ALERT(adapter, "%s (0x%x)\n",
1117 acts->msg, status & acts->mask);
1118 } else if (acts->msg)
1119 CH_WARN(adapter, "%s (0x%x)\n",
1120 acts->msg, status & acts->mask);
1121 if (acts->stat_idx >= 0)
1122 stats[acts->stat_idx]++;
1123 }
1124 if (status) /* clear processed interrupts */
1125 t3_write_reg(adapter, reg, status);
1126 return fatal;
1127}
1128
1129#define SGE_INTR_MASK (F_RSPQDISABLED)
1130#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1131 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1132 F_NFASRCHFAIL)
1133#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1134#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1135 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1136 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1137#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1138 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1139 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1140 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1141 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1142 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1143#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1144 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1145 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1146 V_BISTERR(M_BISTERR) | F_PEXERR)
1147#define ULPRX_INTR_MASK F_PARERR
1148#define ULPTX_INTR_MASK 0
1149#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1150 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1151 F_ZERO_SWITCH_ERROR)
1152#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1153 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1154 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1155 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1156#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1157 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1158 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1159#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1160 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1161 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1162#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1163 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1164 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1165 V_MCAPARERRENB(M_MCAPARERRENB))
1166#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1167 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1168 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1169 F_MPS0 | F_CPL_SWITCH)
1170
1171/*
1172 * Interrupt handler for the PCIX1 module.
1173 */
1174static void pci_intr_handler(struct adapter *adapter)
1175{
1176 static const struct intr_info pcix1_intr_info[] = {
1177 { F_PEXERR, "PCI PEX error", -1, 1 },
1178 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1179 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1180 {F_RCVTARABT, "PCI received target abort", -1, 1},
1181 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1182 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1183 {F_DETPARERR, "PCI detected parity error", -1, 1},
1184 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1185 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1186 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1187 1},
1188 {F_DETCORECCERR, "PCI correctable ECC error",
1189 STAT_PCI_CORR_ECC, 0},
1190 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1191 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1192 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1193 1},
1194 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1195 1},
1196 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1197 1},
1198 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1199 "error", -1, 1},
1200 {0}
1201 };
1202
1203 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1204 pcix1_intr_info, adapter->irq_stats))
1205 t3_fatal_err(adapter);
1206}
1207
1208/*
1209 * Interrupt handler for the PCIE module.
1210 */
1211static void pcie_intr_handler(struct adapter *adapter)
1212{
1213 static const struct intr_info pcie_intr_info[] = {
1214 {F_UNXSPLCPLERRR,
1215 "PCI unexpected split completion DMA read error", -1, 1},
1216 {F_UNXSPLCPLERRC,
1217 "PCI unexpected split completion DMA command error", -1, 1},
1218 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1219 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1220 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1221 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1222 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1223 "PCI MSI-X table/PBA parity error", -1, 1},
1224 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1225 {0}
1226 };
1227
1228 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1229 pcie_intr_info, adapter->irq_stats))
1230 t3_fatal_err(adapter);
1231}
1232
1233/*
1234 * TP interrupt handler.
1235 */
1236static void tp_intr_handler(struct adapter *adapter)
1237{
1238 static const struct intr_info tp_intr_info[] = {
1239 {0xffffff, "TP parity error", -1, 1},
1240 {0x1000000, "TP out of Rx pages", -1, 1},
1241 {0x2000000, "TP out of Tx pages", -1, 1},
1242 {0}
1243 };
1244
1245 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1246 tp_intr_info, NULL))
1247 t3_fatal_err(adapter);
1248}
1249
1250/*
1251 * CIM interrupt handler.
1252 */
1253static void cim_intr_handler(struct adapter *adapter)
1254{
1255 static const struct intr_info cim_intr_info[] = {
1256 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1257 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1258 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1259 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1260 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1261 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1262 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1263 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1264 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1265 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1266 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1267 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1268 {0}
1269 };
1270
1271 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1272 cim_intr_info, NULL))
1273 t3_fatal_err(adapter);
1274}
1275
1276/*
1277 * ULP RX interrupt handler.
1278 */
1279static void ulprx_intr_handler(struct adapter *adapter)
1280{
1281 static const struct intr_info ulprx_intr_info[] = {
1282 {F_PARERR, "ULP RX parity error", -1, 1},
1283 {0}
1284 };
1285
1286 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1287 ulprx_intr_info, NULL))
1288 t3_fatal_err(adapter);
1289}
1290
1291/*
1292 * ULP TX interrupt handler.
1293 */
1294static void ulptx_intr_handler(struct adapter *adapter)
1295{
1296 static const struct intr_info ulptx_intr_info[] = {
1297 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1298 STAT_ULP_CH0_PBL_OOB, 0},
1299 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1300 STAT_ULP_CH1_PBL_OOB, 0},
1301 {0}
1302 };
1303
1304 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1305 ulptx_intr_info, adapter->irq_stats))
1306 t3_fatal_err(adapter);
1307}
1308
1309#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1310 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1311 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1312 F_ICSPI1_TX_FRAMING_ERROR)
1313#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1314 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1315 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1316 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1317
1318/*
1319 * PM TX interrupt handler.
1320 */
1321static void pmtx_intr_handler(struct adapter *adapter)
1322{
1323 static const struct intr_info pmtx_intr_info[] = {
1324 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1325 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1326 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1327 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1328 "PMTX ispi parity error", -1, 1},
1329 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1330 "PMTX ospi parity error", -1, 1},
1331 {0}
1332 };
1333
1334 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1335 pmtx_intr_info, NULL))
1336 t3_fatal_err(adapter);
1337}
1338
1339#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1340 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1341 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1342 F_IESPI1_TX_FRAMING_ERROR)
1343#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1344 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1345 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1346 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1347
1348/*
1349 * PM RX interrupt handler.
1350 */
1351static void pmrx_intr_handler(struct adapter *adapter)
1352{
1353 static const struct intr_info pmrx_intr_info[] = {
1354 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1355 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1356 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1357 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1358 "PMRX ispi parity error", -1, 1},
1359 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1360 "PMRX ospi parity error", -1, 1},
1361 {0}
1362 };
1363
1364 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1365 pmrx_intr_info, NULL))
1366 t3_fatal_err(adapter);
1367}
1368
1369/*
1370 * CPL switch interrupt handler.
1371 */
1372static void cplsw_intr_handler(struct adapter *adapter)
1373{
1374 static const struct intr_info cplsw_intr_info[] = {
1375/* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1376 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1377 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1378 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1379 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1380 {0}
1381 };
1382
1383 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1384 cplsw_intr_info, NULL))
1385 t3_fatal_err(adapter);
1386}
1387
1388/*
1389 * MPS interrupt handler.
1390 */
1391static void mps_intr_handler(struct adapter *adapter)
1392{
1393 static const struct intr_info mps_intr_info[] = {
1394 {0x1ff, "MPS parity error", -1, 1},
1395 {0}
1396 };
1397
1398 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1399 mps_intr_info, NULL))
1400 t3_fatal_err(adapter);
1401}
1402
1403#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1404
1405/*
1406 * MC7 interrupt handler.
1407 */
1408static void mc7_intr_handler(struct mc7 *mc7)
1409{
1410 struct adapter *adapter = mc7->adapter;
1411 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1412
1413 if (cause & F_CE) {
1414 mc7->stats.corr_err++;
1415 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1416 "data 0x%x 0x%x 0x%x\n", mc7->name,
1417 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1418 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1419 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1420 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1421 }
1422
1423 if (cause & F_UE) {
1424 mc7->stats.uncorr_err++;
1425 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1426 "data 0x%x 0x%x 0x%x\n", mc7->name,
1427 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1428 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1429 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1430 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1431 }
1432
1433 if (G_PE(cause)) {
1434 mc7->stats.parity_err++;
1435 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1436 mc7->name, G_PE(cause));
1437 }
1438
1439 if (cause & F_AE) {
1440 u32 addr = 0;
1441
1442 if (adapter->params.rev > 0)
1443 addr = t3_read_reg(adapter,
1444 mc7->offset + A_MC7_ERR_ADDR);
1445 mc7->stats.addr_err++;
1446 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1447 mc7->name, addr);
1448 }
1449
1450 if (cause & MC7_INTR_FATAL)
1451 t3_fatal_err(adapter);
1452
1453 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1454}
1455
1456#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1457 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1458/*
1459 * XGMAC interrupt handler.
1460 */
1461static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1462{
1463 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1464 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1465
1466 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1467 mac->stats.tx_fifo_parity_err++;
1468 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1469 }
1470 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1471 mac->stats.rx_fifo_parity_err++;
1472 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1473 }
1474 if (cause & F_TXFIFO_UNDERRUN)
1475 mac->stats.tx_fifo_urun++;
1476 if (cause & F_RXFIFO_OVERFLOW)
1477 mac->stats.rx_fifo_ovfl++;
1478 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1479 mac->stats.serdes_signal_loss++;
1480 if (cause & F_XAUIPCSCTCERR)
1481 mac->stats.xaui_pcs_ctc_err++;
1482 if (cause & F_XAUIPCSALIGNCHANGE)
1483 mac->stats.xaui_pcs_align_change++;
1484
1485 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1486 if (cause & XGM_INTR_FATAL)
1487 t3_fatal_err(adap);
1488 return cause != 0;
1489}
1490
1491/*
1492 * Interrupt handler for PHY events.
1493 */
1494int t3_phy_intr_handler(struct adapter *adapter)
1495{
1496 static const int intr_gpio_bits[] = { 8, 0x20 };
1497
1498 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1499
1500 for_each_port(adapter, i) {
1501 if (cause & intr_gpio_bits[i]) {
1502 struct cphy *phy = &adap2pinfo(adapter, i)->phy;
1503 int phy_cause = phy->ops->intr_handler(phy);
1504
1505 if (phy_cause & cphy_cause_link_change)
1506 t3_link_changed(adapter, i);
1507 if (phy_cause & cphy_cause_fifo_error)
1508 phy->fifo_errors++;
1509 }
1510 }
1511
1512 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1513 return 0;
1514}
1515
1516/*
1517 * T3 slow path (non-data) interrupt handler.
1518 */
1519int t3_slow_intr_handler(struct adapter *adapter)
1520{
1521 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1522
1523 cause &= adapter->slow_intr_mask;
1524 if (!cause)
1525 return 0;
1526 if (cause & F_PCIM0) {
1527 if (is_pcie(adapter))
1528 pcie_intr_handler(adapter);
1529 else
1530 pci_intr_handler(adapter);
1531 }
1532 if (cause & F_SGE3)
1533 t3_sge_err_intr_handler(adapter);
1534 if (cause & F_MC7_PMRX)
1535 mc7_intr_handler(&adapter->pmrx);
1536 if (cause & F_MC7_PMTX)
1537 mc7_intr_handler(&adapter->pmtx);
1538 if (cause & F_MC7_CM)
1539 mc7_intr_handler(&adapter->cm);
1540 if (cause & F_CIM)
1541 cim_intr_handler(adapter);
1542 if (cause & F_TP1)
1543 tp_intr_handler(adapter);
1544 if (cause & F_ULP2_RX)
1545 ulprx_intr_handler(adapter);
1546 if (cause & F_ULP2_TX)
1547 ulptx_intr_handler(adapter);
1548 if (cause & F_PM1_RX)
1549 pmrx_intr_handler(adapter);
1550 if (cause & F_PM1_TX)
1551 pmtx_intr_handler(adapter);
1552 if (cause & F_CPL_SWITCH)
1553 cplsw_intr_handler(adapter);
1554 if (cause & F_MPS0)
1555 mps_intr_handler(adapter);
1556 if (cause & F_MC5A)
1557 t3_mc5_intr_handler(&adapter->mc5);
1558 if (cause & F_XGMAC0_0)
1559 mac_intr_handler(adapter, 0);
1560 if (cause & F_XGMAC0_1)
1561 mac_intr_handler(adapter, 1);
1562 if (cause & F_T3DBG)
1563 t3_os_ext_intr_handler(adapter);
1564
1565 /* Clear the interrupts just processed. */
1566 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1567 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1568 return 1;
1569}
1570
1571/**
1572 * t3_intr_enable - enable interrupts
1573 * @adapter: the adapter whose interrupts should be enabled
1574 *
1575 * Enable interrupts by setting the interrupt enable registers of the
1576 * various HW modules and then enabling the top-level interrupt
1577 * concentrator.
1578 */
1579void t3_intr_enable(struct adapter *adapter)
1580{
1581 static const struct addr_val_pair intr_en_avp[] = {
1582 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1583 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1584 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1585 MC7_INTR_MASK},
1586 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1587 MC7_INTR_MASK},
1588 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1589 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1590 {A_TP_INT_ENABLE, 0x3bfffff},
1591 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1592 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1593 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1594 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1595 };
1596
1597 adapter->slow_intr_mask = PL_INTR_MASK;
1598
1599 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1600
1601 if (adapter->params.rev > 0) {
1602 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1603 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1604 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1605 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1606 F_PBL_BOUND_ERR_CH1);
1607 } else {
1608 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1609 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1610 }
1611
1612 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1613 adapter_info(adapter)->gpio_intr);
1614 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1615 adapter_info(adapter)->gpio_intr);
1616 if (is_pcie(adapter))
1617 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1618 else
1619 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1620 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1621 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1622}
1623
1624/**
1625 * t3_intr_disable - disable a card's interrupts
1626 * @adapter: the adapter whose interrupts should be disabled
1627 *
1628 * Disable interrupts. We only disable the top-level interrupt
1629 * concentrator and the SGE data interrupts.
1630 */
1631void t3_intr_disable(struct adapter *adapter)
1632{
1633 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1634 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1635 adapter->slow_intr_mask = 0;
1636}
1637
1638/**
1639 * t3_intr_clear - clear all interrupts
1640 * @adapter: the adapter whose interrupts should be cleared
1641 *
1642 * Clears all interrupts.
1643 */
1644void t3_intr_clear(struct adapter *adapter)
1645{
1646 static const unsigned int cause_reg_addr[] = {
1647 A_SG_INT_CAUSE,
1648 A_SG_RSPQ_FL_STATUS,
1649 A_PCIX_INT_CAUSE,
1650 A_MC7_INT_CAUSE,
1651 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1652 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1653 A_CIM_HOST_INT_CAUSE,
1654 A_TP_INT_CAUSE,
1655 A_MC5_DB_INT_CAUSE,
1656 A_ULPRX_INT_CAUSE,
1657 A_ULPTX_INT_CAUSE,
1658 A_CPL_INTR_CAUSE,
1659 A_PM1_TX_INT_CAUSE,
1660 A_PM1_RX_INT_CAUSE,
1661 A_MPS_INT_CAUSE,
1662 A_T3DBG_INT_CAUSE,
1663 };
1664 unsigned int i;
1665
1666 /* Clear PHY and MAC interrupts for each port. */
1667 for_each_port(adapter, i)
1668 t3_port_intr_clear(adapter, i);
1669
1670 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1671 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1672
1673 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1674 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1675}
1676
1677/**
1678 * t3_port_intr_enable - enable port-specific interrupts
1679 * @adapter: associated adapter
1680 * @idx: index of port whose interrupts should be enabled
1681 *
1682 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1683 * adapter port.
1684 */
1685void t3_port_intr_enable(struct adapter *adapter, int idx)
1686{
1687 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1688
1689 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1690 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1691 phy->ops->intr_enable(phy);
1692}
1693
1694/**
1695 * t3_port_intr_disable - disable port-specific interrupts
1696 * @adapter: associated adapter
1697 * @idx: index of port whose interrupts should be disabled
1698 *
1699 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1700 * adapter port.
1701 */
1702void t3_port_intr_disable(struct adapter *adapter, int idx)
1703{
1704 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1705
1706 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1707 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1708 phy->ops->intr_disable(phy);
1709}
1710
1711/**
1712 * t3_port_intr_clear - clear port-specific interrupts
1713 * @adapter: associated adapter
1714 * @idx: index of port whose interrupts to clear
1715 *
1716 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1717 * adapter port.
1718 */
1719void t3_port_intr_clear(struct adapter *adapter, int idx)
1720{
1721 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1722
1723 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1724 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1725 phy->ops->intr_clear(phy);
1726}
1727
1728/**
1729 * t3_sge_write_context - write an SGE context
1730 * @adapter: the adapter
1731 * @id: the context id
1732 * @type: the context type
1733 *
1734 * Program an SGE context with the values already loaded in the
1735 * CONTEXT_DATA? registers.
1736 */
1737static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1738 unsigned int type)
1739{
1740 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1741 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1742 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1743 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1744 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1745 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1746 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1747 0, 5, 1);
1748}
1749
1750/**
1751 * t3_sge_init_ecntxt - initialize an SGE egress context
1752 * @adapter: the adapter to configure
1753 * @id: the context id
1754 * @gts_enable: whether to enable GTS for the context
1755 * @type: the egress context type
1756 * @respq: associated response queue
1757 * @base_addr: base address of queue
1758 * @size: number of queue entries
1759 * @token: uP token
1760 * @gen: initial generation value for the context
1761 * @cidx: consumer pointer
1762 *
1763 * Initialize an SGE egress context and make it ready for use. If the
1764 * platform allows concurrent context operations, the caller is
1765 * responsible for appropriate locking.
1766 */
1767int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1768 enum sge_context_type type, int respq, u64 base_addr,
1769 unsigned int size, unsigned int token, int gen,
1770 unsigned int cidx)
1771{
1772 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1773
1774 if (base_addr & 0xfff) /* must be 4K aligned */
1775 return -EINVAL;
1776 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1777 return -EBUSY;
1778
1779 base_addr >>= 12;
1780 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1781 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1782 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1783 V_EC_BASE_LO(base_addr & 0xffff));
1784 base_addr >>= 16;
1785 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1786 base_addr >>= 32;
1787 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1788 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1789 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1790 F_EC_VALID);
1791 return t3_sge_write_context(adapter, id, F_EGRESS);
1792}
1793
1794/**
1795 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1796 * @adapter: the adapter to configure
1797 * @id: the context id
1798 * @gts_enable: whether to enable GTS for the context
1799 * @base_addr: base address of queue
1800 * @size: number of queue entries
1801 * @bsize: size of each buffer for this queue
1802 * @cong_thres: threshold to signal congestion to upstream producers
1803 * @gen: initial generation value for the context
1804 * @cidx: consumer pointer
1805 *
1806 * Initialize an SGE free list context and make it ready for use. The
1807 * caller is responsible for ensuring only one context operation occurs
1808 * at a time.
1809 */
1810int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1811 int gts_enable, u64 base_addr, unsigned int size,
1812 unsigned int bsize, unsigned int cong_thres, int gen,
1813 unsigned int cidx)
1814{
1815 if (base_addr & 0xfff) /* must be 4K aligned */
1816 return -EINVAL;
1817 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1818 return -EBUSY;
1819
1820 base_addr >>= 12;
1821 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1822 base_addr >>= 32;
1823 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1824 V_FL_BASE_HI((u32) base_addr) |
1825 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1826 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1827 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1828 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1829 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1830 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1831 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1832 return t3_sge_write_context(adapter, id, F_FREELIST);
1833}
1834
1835/**
1836 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1837 * @adapter: the adapter to configure
1838 * @id: the context id
1839 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1840 * @base_addr: base address of queue
1841 * @size: number of queue entries
1842 * @fl_thres: threshold for selecting the normal or jumbo free list
1843 * @gen: initial generation value for the context
1844 * @cidx: consumer pointer
1845 *
1846 * Initialize an SGE response queue context and make it ready for use.
1847 * The caller is responsible for ensuring only one context operation
1848 * occurs at a time.
1849 */
1850int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1851 int irq_vec_idx, u64 base_addr, unsigned int size,
1852 unsigned int fl_thres, int gen, unsigned int cidx)
1853{
1854 unsigned int intr = 0;
1855
1856 if (base_addr & 0xfff) /* must be 4K aligned */
1857 return -EINVAL;
1858 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1859 return -EBUSY;
1860
1861 base_addr >>= 12;
1862 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1863 V_CQ_INDEX(cidx));
1864 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1865 base_addr >>= 32;
1866 if (irq_vec_idx >= 0)
1867 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1868 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1869 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1870 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1871 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1872}
1873
1874/**
1875 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1876 * @adapter: the adapter to configure
1877 * @id: the context id
1878 * @base_addr: base address of queue
1879 * @size: number of queue entries
1880 * @rspq: response queue for async notifications
1881 * @ovfl_mode: CQ overflow mode
1882 * @credits: completion queue credits
1883 * @credit_thres: the credit threshold
1884 *
1885 * Initialize an SGE completion queue context and make it ready for use.
1886 * The caller is responsible for ensuring only one context operation
1887 * occurs at a time.
1888 */
1889int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
1890 unsigned int size, int rspq, int ovfl_mode,
1891 unsigned int credits, unsigned int credit_thres)
1892{
1893 if (base_addr & 0xfff) /* must be 4K aligned */
1894 return -EINVAL;
1895 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1896 return -EBUSY;
1897
1898 base_addr >>= 12;
1899 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1900 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1901 base_addr >>= 32;
1902 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1903 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1904 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1905 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
1906 V_CQ_CREDIT_THRES(credit_thres));
1907 return t3_sge_write_context(adapter, id, F_CQ);
1908}
1909
1910/**
1911 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
1912 * @adapter: the adapter
1913 * @id: the egress context id
1914 * @enable: enable (1) or disable (0) the context
1915 *
1916 * Enable or disable an SGE egress context. The caller is responsible for
1917 * ensuring only one context operation occurs at a time.
1918 */
1919int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
1920{
1921 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1922 return -EBUSY;
1923
1924 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1925 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1926 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1927 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
1928 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
1929 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1930 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
1931 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1932 0, 5, 1);
1933}
1934
1935/**
1936 * t3_sge_disable_fl - disable an SGE free-buffer list
1937 * @adapter: the adapter
1938 * @id: the free list context id
1939 *
1940 * Disable an SGE free-buffer list. The caller is responsible for
1941 * ensuring only one context operation occurs at a time.
1942 */
1943int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
1944{
1945 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1946 return -EBUSY;
1947
1948 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1949 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1950 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
1951 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1952 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
1953 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1954 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
1955 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1956 0, 5, 1);
1957}
1958
1959/**
1960 * t3_sge_disable_rspcntxt - disable an SGE response queue
1961 * @adapter: the adapter
1962 * @id: the response queue context id
1963 *
1964 * Disable an SGE response queue. The caller is responsible for
1965 * ensuring only one context operation occurs at a time.
1966 */
1967int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
1968{
1969 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1970 return -EBUSY;
1971
1972 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
1973 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1974 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1975 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1976 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
1977 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1978 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
1979 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1980 0, 5, 1);
1981}
1982
1983/**
1984 * t3_sge_disable_cqcntxt - disable an SGE completion queue
1985 * @adapter: the adapter
1986 * @id: the completion queue context id
1987 *
1988 * Disable an SGE completion queue. The caller is responsible for
1989 * ensuring only one context operation occurs at a time.
1990 */
1991int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
1992{
1993 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1994 return -EBUSY;
1995
1996 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
1997 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1998 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1999 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2000 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2001 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2002 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2003 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2004 0, 5, 1);
2005}
2006
2007/**
2008 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2009 * @adapter: the adapter
2010 * @id: the context id
2011 * @op: the operation to perform
2012 *
2013 * Perform the selected operation on an SGE completion queue context.
2014 * The caller is responsible for ensuring only one context operation
2015 * occurs at a time.
2016 */
2017int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2018 unsigned int credits)
2019{
2020 u32 val;
2021
2022 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2023 return -EBUSY;
2024
2025 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2026 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2027 V_CONTEXT(id) | F_CQ);
2028 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2029 0, 5, 1, &val))
2030 return -EIO;
2031
2032 if (op >= 2 && op < 7) {
2033 if (adapter->params.rev > 0)
2034 return G_CQ_INDEX(val);
2035
2036 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2037 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2038 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2039 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2040 return -EIO;
2041 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2042 }
2043 return 0;
2044}
2045
2046/**
2047 * t3_sge_read_context - read an SGE context
2048 * @type: the context type
2049 * @adapter: the adapter
2050 * @id: the context id
2051 * @data: holds the retrieved context
2052 *
2053 * Read an SGE egress context. The caller is responsible for ensuring
2054 * only one context operation occurs at a time.
2055 */
2056static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2057 unsigned int id, u32 data[4])
2058{
2059 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2060 return -EBUSY;
2061
2062 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2063 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2064 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2065 5, 1))
2066 return -EIO;
2067 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2068 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2069 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2070 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2071 return 0;
2072}
2073
2074/**
2075 * t3_sge_read_ecntxt - read an SGE egress context
2076 * @adapter: the adapter
2077 * @id: the context id
2078 * @data: holds the retrieved context
2079 *
2080 * Read an SGE egress context. The caller is responsible for ensuring
2081 * only one context operation occurs at a time.
2082 */
2083int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2084{
2085 if (id >= 65536)
2086 return -EINVAL;
2087 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2088}
2089
2090/**
2091 * t3_sge_read_cq - read an SGE CQ context
2092 * @adapter: the adapter
2093 * @id: the context id
2094 * @data: holds the retrieved context
2095 *
2096 * Read an SGE CQ context. The caller is responsible for ensuring
2097 * only one context operation occurs at a time.
2098 */
2099int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2100{
2101 if (id >= 65536)
2102 return -EINVAL;
2103 return t3_sge_read_context(F_CQ, adapter, id, data);
2104}
2105
2106/**
2107 * t3_sge_read_fl - read an SGE free-list context
2108 * @adapter: the adapter
2109 * @id: the context id
2110 * @data: holds the retrieved context
2111 *
2112 * Read an SGE free-list context. The caller is responsible for ensuring
2113 * only one context operation occurs at a time.
2114 */
2115int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2116{
2117 if (id >= SGE_QSETS * 2)
2118 return -EINVAL;
2119 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2120}
2121
2122/**
2123 * t3_sge_read_rspq - read an SGE response queue context
2124 * @adapter: the adapter
2125 * @id: the context id
2126 * @data: holds the retrieved context
2127 *
2128 * Read an SGE response queue context. The caller is responsible for
2129 * ensuring only one context operation occurs at a time.
2130 */
2131int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2132{
2133 if (id >= SGE_QSETS)
2134 return -EINVAL;
2135 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2136}
2137
2138/**
2139 * t3_config_rss - configure Rx packet steering
2140 * @adapter: the adapter
2141 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2142 * @cpus: values for the CPU lookup table (0xff terminated)
2143 * @rspq: values for the response queue lookup table (0xffff terminated)
2144 *
2145 * Programs the receive packet steering logic. @cpus and @rspq provide
2146 * the values for the CPU and response queue lookup tables. If they
2147 * provide fewer values than the size of the tables the supplied values
2148 * are used repeatedly until the tables are fully populated.
2149 */
2150void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2151 const u8 * cpus, const u16 *rspq)
2152{
2153 int i, j, cpu_idx = 0, q_idx = 0;
2154
2155 if (cpus)
2156 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2157 u32 val = i << 16;
2158
2159 for (j = 0; j < 2; ++j) {
2160 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2161 if (cpus[cpu_idx] == 0xff)
2162 cpu_idx = 0;
2163 }
2164 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2165 }
2166
2167 if (rspq)
2168 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2169 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2170 (i << 16) | rspq[q_idx++]);
2171 if (rspq[q_idx] == 0xffff)
2172 q_idx = 0;
2173 }
2174
2175 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2176}
2177
2178/**
2179 * t3_read_rss - read the contents of the RSS tables
2180 * @adapter: the adapter
2181 * @lkup: holds the contents of the RSS lookup table
2182 * @map: holds the contents of the RSS map table
2183 *
2184 * Reads the contents of the receive packet steering tables.
2185 */
2186int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2187{
2188 int i;
2189 u32 val;
2190
2191 if (lkup)
2192 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2193 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2194 0xffff0000 | i);
2195 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2196 if (!(val & 0x80000000))
2197 return -EAGAIN;
2198 *lkup++ = val;
2199 *lkup++ = (val >> 8);
2200 }
2201
2202 if (map)
2203 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2204 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2205 0xffff0000 | i);
2206 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2207 if (!(val & 0x80000000))
2208 return -EAGAIN;
2209 *map++ = val;
2210 }
2211 return 0;
2212}
2213
2214/**
2215 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2216 * @adap: the adapter
2217 * @enable: 1 to select offload mode, 0 for regular NIC
2218 *
2219 * Switches TP to NIC/offload mode.
2220 */
2221void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2222{
2223 if (is_offload(adap) || !enable)
2224 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2225 V_NICMODE(!enable));
2226}
2227
2228/**
2229 * pm_num_pages - calculate the number of pages of the payload memory
2230 * @mem_size: the size of the payload memory
2231 * @pg_size: the size of each payload memory page
2232 *
2233 * Calculate the number of pages, each of the given size, that fit in a
2234 * memory of the specified size, respecting the HW requirement that the
2235 * number of pages must be a multiple of 24.
2236 */
2237static inline unsigned int pm_num_pages(unsigned int mem_size,
2238 unsigned int pg_size)
2239{
2240 unsigned int n = mem_size / pg_size;
2241
2242 return n - n % 24;
2243}
2244
2245#define mem_region(adap, start, size, reg) \
2246 t3_write_reg((adap), A_ ## reg, (start)); \
2247 start += size
2248
2249/*
2250 * partition_mem - partition memory and configure TP memory settings
2251 * @adap: the adapter
2252 * @p: the TP parameters
2253 *
2254 * Partitions context and payload memory and configures TP's memory
2255 * registers.
2256 */
2257static void partition_mem(struct adapter *adap, const struct tp_params *p)
2258{
2259 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2260 unsigned int timers = 0, timers_shift = 22;
2261
2262 if (adap->params.rev > 0) {
2263 if (tids <= 16 * 1024) {
2264 timers = 1;
2265 timers_shift = 16;
2266 } else if (tids <= 64 * 1024) {
2267 timers = 2;
2268 timers_shift = 18;
2269 } else if (tids <= 256 * 1024) {
2270 timers = 3;
2271 timers_shift = 20;
2272 }
2273 }
2274
2275 t3_write_reg(adap, A_TP_PMM_SIZE,
2276 p->chan_rx_size | (p->chan_tx_size >> 16));
2277
2278 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2279 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2280 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2281 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2282 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2283
2284 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2285 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2286 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2287
2288 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2289 /* Add a bit of headroom and make multiple of 24 */
2290 pstructs += 48;
2291 pstructs -= pstructs % 24;
2292 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2293
2294 m = tids * TCB_SIZE;
2295 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2296 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2297 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2298 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2299 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2300 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2301 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2302 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2303
2304 m = (m + 4095) & ~0xfff;
2305 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2306 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2307
2308 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2309 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2310 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2311 if (tids < m)
2312 adap->params.mc5.nservers += m - tids;
2313}
2314
2315static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2316 u32 val)
2317{
2318 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2319 t3_write_reg(adap, A_TP_PIO_DATA, val);
2320}
2321
2322static void tp_config(struct adapter *adap, const struct tp_params *p)
2323{
2324 unsigned int v;
2325
2326 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2327 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2328 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2329 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2330 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2331 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2332 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2333 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2334 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2335 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2336 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2337 F_IPV6ENABLE | F_NICMODE);
2338 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2339 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2340 t3_set_reg_field(adap, A_TP_PARA_REG6,
2341 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
2342 0);
2343
2344 v = t3_read_reg(adap, A_TP_PC_CONFIG);
2345 v &= ~(F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL);
2346 t3_write_reg(adap, A_TP_PC_CONFIG, v | F_TXDEFERENABLE |
2347 F_MODULATEUNIONMODE | F_HEARBEATDACK |
2348 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2349
2350 v = t3_read_reg(adap, A_TP_PC_CONFIG2);
2351 v &= ~F_CHDRAFULL;
2352 t3_write_reg(adap, A_TP_PC_CONFIG2, v);
2353
2354 if (adap->params.rev > 0) {
2355 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2356 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2357 F_TXPACEAUTO);
2358 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2359 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2360 } else
2361 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2362
2363 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
2364 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
2365 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
2366}
2367
2368/* Desired TP timer resolution in usec */
2369#define TP_TMR_RES 50
2370
2371/* TCP timer values in ms */
2372#define TP_DACK_TIMER 50
2373#define TP_RTO_MIN 250
2374
2375/**
2376 * tp_set_timers - set TP timing parameters
2377 * @adap: the adapter to set
2378 * @core_clk: the core clock frequency in Hz
2379 *
2380 * Set TP's timing parameters, such as the various timer resolutions and
2381 * the TCP timer values.
2382 */
2383static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2384{
2385 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2386 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2387 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2388 unsigned int tps = core_clk >> tre;
2389
2390 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2391 V_DELAYEDACKRESOLUTION(dack_re) |
2392 V_TIMESTAMPRESOLUTION(tstamp_re));
2393 t3_write_reg(adap, A_TP_DACK_TIMER,
2394 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2395 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2396 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2397 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2398 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2399 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2400 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2401 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2402 V_KEEPALIVEMAX(9));
2403
2404#define SECONDS * tps
2405
2406 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2407 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2408 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2409 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2410 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2411 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2412 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2413 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2414 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2415
2416#undef SECONDS
2417}
2418
2419/**
2420 * t3_tp_set_coalescing_size - set receive coalescing size
2421 * @adap: the adapter
2422 * @size: the receive coalescing size
2423 * @psh: whether a set PSH bit should deliver coalesced data
2424 *
2425 * Set the receive coalescing size and PSH bit handling.
2426 */
2427int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2428{
2429 u32 val;
2430
2431 if (size > MAX_RX_COALESCING_LEN)
2432 return -EINVAL;
2433
2434 val = t3_read_reg(adap, A_TP_PARA_REG3);
2435 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2436
2437 if (size) {
2438 val |= F_RXCOALESCEENABLE;
2439 if (psh)
2440 val |= F_RXCOALESCEPSHEN;
2441 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2442 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2443 }
2444 t3_write_reg(adap, A_TP_PARA_REG3, val);
2445 return 0;
2446}
2447
2448/**
2449 * t3_tp_set_max_rxsize - set the max receive size
2450 * @adap: the adapter
2451 * @size: the max receive size
2452 *
2453 * Set TP's max receive size. This is the limit that applies when
2454 * receive coalescing is disabled.
2455 */
2456void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2457{
2458 t3_write_reg(adap, A_TP_PARA_REG7,
2459 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2460}
2461
2462static void __devinit init_mtus(unsigned short mtus[])
2463{
2464 /*
2465 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2466 * it can accomodate max size TCP/IP headers when SACK and timestamps
2467 * are enabled and still have at least 8 bytes of payload.
2468 */
2469 mtus[0] = 88;
2470 mtus[1] = 256;
2471 mtus[2] = 512;
2472 mtus[3] = 576;
2473 mtus[4] = 808;
2474 mtus[5] = 1024;
2475 mtus[6] = 1280;
2476 mtus[7] = 1492;
2477 mtus[8] = 1500;
2478 mtus[9] = 2002;
2479 mtus[10] = 2048;
2480 mtus[11] = 4096;
2481 mtus[12] = 4352;
2482 mtus[13] = 8192;
2483 mtus[14] = 9000;
2484 mtus[15] = 9600;
2485}
2486
2487/*
2488 * Initial congestion control parameters.
2489 */
2490static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2491{
2492 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2493 a[9] = 2;
2494 a[10] = 3;
2495 a[11] = 4;
2496 a[12] = 5;
2497 a[13] = 6;
2498 a[14] = 7;
2499 a[15] = 8;
2500 a[16] = 9;
2501 a[17] = 10;
2502 a[18] = 14;
2503 a[19] = 17;
2504 a[20] = 21;
2505 a[21] = 25;
2506 a[22] = 30;
2507 a[23] = 35;
2508 a[24] = 45;
2509 a[25] = 60;
2510 a[26] = 80;
2511 a[27] = 100;
2512 a[28] = 200;
2513 a[29] = 300;
2514 a[30] = 400;
2515 a[31] = 500;
2516
2517 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2518 b[9] = b[10] = 1;
2519 b[11] = b[12] = 2;
2520 b[13] = b[14] = b[15] = b[16] = 3;
2521 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2522 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2523 b[28] = b[29] = 6;
2524 b[30] = b[31] = 7;
2525}
2526
2527/* The minimum additive increment value for the congestion control table */
2528#define CC_MIN_INCR 2U
2529
2530/**
2531 * t3_load_mtus - write the MTU and congestion control HW tables
2532 * @adap: the adapter
2533 * @mtus: the unrestricted values for the MTU table
2534 * @alphs: the values for the congestion control alpha parameter
2535 * @beta: the values for the congestion control beta parameter
2536 * @mtu_cap: the maximum permitted effective MTU
2537 *
2538 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2539 * Update the high-speed congestion control table with the supplied alpha,
2540 * beta, and MTUs.
2541 */
2542void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2543 unsigned short alpha[NCCTRL_WIN],
2544 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2545{
2546 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2547 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2548 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2549 28672, 40960, 57344, 81920, 114688, 163840, 229376
2550 };
2551
2552 unsigned int i, w;
2553
2554 for (i = 0; i < NMTUS; ++i) {
2555 unsigned int mtu = min(mtus[i], mtu_cap);
2556 unsigned int log2 = fls(mtu);
2557
2558 if (!(mtu & ((1 << log2) >> 2))) /* round */
2559 log2--;
2560 t3_write_reg(adap, A_TP_MTU_TABLE,
2561 (i << 24) | (log2 << 16) | mtu);
2562
2563 for (w = 0; w < NCCTRL_WIN; ++w) {
2564 unsigned int inc;
2565
2566 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2567 CC_MIN_INCR);
2568
2569 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2570 (w << 16) | (beta[w] << 13) | inc);
2571 }
2572 }
2573}
2574
2575/**
2576 * t3_read_hw_mtus - returns the values in the HW MTU table
2577 * @adap: the adapter
2578 * @mtus: where to store the HW MTU values
2579 *
2580 * Reads the HW MTU table.
2581 */
2582void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2583{
2584 int i;
2585
2586 for (i = 0; i < NMTUS; ++i) {
2587 unsigned int val;
2588
2589 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2590 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2591 mtus[i] = val & 0x3fff;
2592 }
2593}
2594
2595/**
2596 * t3_get_cong_cntl_tab - reads the congestion control table
2597 * @adap: the adapter
2598 * @incr: where to store the alpha values
2599 *
2600 * Reads the additive increments programmed into the HW congestion
2601 * control table.
2602 */
2603void t3_get_cong_cntl_tab(struct adapter *adap,
2604 unsigned short incr[NMTUS][NCCTRL_WIN])
2605{
2606 unsigned int mtu, w;
2607
2608 for (mtu = 0; mtu < NMTUS; ++mtu)
2609 for (w = 0; w < NCCTRL_WIN; ++w) {
2610 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2611 0xffff0000 | (mtu << 5) | w);
2612 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2613 0x1fff;
2614 }
2615}
2616
2617/**
2618 * t3_tp_get_mib_stats - read TP's MIB counters
2619 * @adap: the adapter
2620 * @tps: holds the returned counter values
2621 *
2622 * Returns the values of TP's MIB counters.
2623 */
2624void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2625{
2626 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2627 sizeof(*tps) / sizeof(u32), 0);
2628}
2629
2630#define ulp_region(adap, name, start, len) \
2631 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2632 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2633 (start) + (len) - 1); \
2634 start += len
2635
2636#define ulptx_region(adap, name, start, len) \
2637 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2638 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2639 (start) + (len) - 1)
2640
2641static void ulp_config(struct adapter *adap, const struct tp_params *p)
2642{
2643 unsigned int m = p->chan_rx_size;
2644
2645 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2646 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2647 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2648 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2649 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2650 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2651 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2652 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2653}
2654
2655void t3_config_trace_filter(struct adapter *adapter,
2656 const struct trace_params *tp, int filter_index,
2657 int invert, int enable)
2658{
2659 u32 addr, key[4], mask[4];
2660
2661 key[0] = tp->sport | (tp->sip << 16);
2662 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2663 key[2] = tp->dip;
2664 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2665
2666 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2667 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2668 mask[2] = tp->dip_mask;
2669 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2670
2671 if (invert)
2672 key[3] |= (1 << 29);
2673 if (enable)
2674 key[3] |= (1 << 28);
2675
2676 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2677 tp_wr_indirect(adapter, addr++, key[0]);
2678 tp_wr_indirect(adapter, addr++, mask[0]);
2679 tp_wr_indirect(adapter, addr++, key[1]);
2680 tp_wr_indirect(adapter, addr++, mask[1]);
2681 tp_wr_indirect(adapter, addr++, key[2]);
2682 tp_wr_indirect(adapter, addr++, mask[2]);
2683 tp_wr_indirect(adapter, addr++, key[3]);
2684 tp_wr_indirect(adapter, addr, mask[3]);
2685 t3_read_reg(adapter, A_TP_PIO_DATA);
2686}
2687
2688/**
2689 * t3_config_sched - configure a HW traffic scheduler
2690 * @adap: the adapter
2691 * @kbps: target rate in Kbps
2692 * @sched: the scheduler index
2693 *
2694 * Configure a HW scheduler for the target rate
2695 */
2696int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2697{
2698 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2699 unsigned int clk = adap->params.vpd.cclk * 1000;
2700 unsigned int selected_cpt = 0, selected_bpt = 0;
2701
2702 if (kbps > 0) {
2703 kbps *= 125; /* -> bytes */
2704 for (cpt = 1; cpt <= 255; cpt++) {
2705 tps = clk / cpt;
2706 bpt = (kbps + tps / 2) / tps;
2707 if (bpt > 0 && bpt <= 255) {
2708 v = bpt * tps;
2709 delta = v >= kbps ? v - kbps : kbps - v;
2710 if (delta <= mindelta) {
2711 mindelta = delta;
2712 selected_cpt = cpt;
2713 selected_bpt = bpt;
2714 }
2715 } else if (selected_cpt)
2716 break;
2717 }
2718 if (!selected_cpt)
2719 return -EINVAL;
2720 }
2721 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2722 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2723 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2724 if (sched & 1)
2725 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2726 else
2727 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2728 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2729 return 0;
2730}
2731
2732static int tp_init(struct adapter *adap, const struct tp_params *p)
2733{
2734 int busy = 0;
2735
2736 tp_config(adap, p);
2737 t3_set_vlan_accel(adap, 3, 0);
2738
2739 if (is_offload(adap)) {
2740 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2741 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2742 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2743 0, 1000, 5);
2744 if (busy)
2745 CH_ERR(adap, "TP initialization timed out\n");
2746 }
2747
2748 if (!busy)
2749 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2750 return busy;
2751}
2752
2753int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2754{
2755 if (port_mask & ~((1 << adap->params.nports) - 1))
2756 return -EINVAL;
2757 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2758 port_mask << S_PORT0ACTIVE);
2759 return 0;
2760}
2761
2762/*
2763 * Perform the bits of HW initialization that are dependent on the number
2764 * of available ports.
2765 */
2766static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2767{
2768 int i;
2769
2770 if (nports == 1) {
2771 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2772 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2773 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2774 F_PORT0ACTIVE | F_ENFORCEPKT);
2775 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
2776 } else {
2777 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2778 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2779 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2780 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2781 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2782 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2783 F_ENFORCEPKT);
2784 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2785 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2786 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2787 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2788 for (i = 0; i < 16; i++)
2789 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2790 (i << 16) | 0x1010);
2791 }
2792}
2793
2794static int calibrate_xgm(struct adapter *adapter)
2795{
2796 if (uses_xaui(adapter)) {
2797 unsigned int v, i;
2798
2799 for (i = 0; i < 5; ++i) {
2800 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2801 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2802 msleep(1);
2803 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2804 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2805 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2806 V_XAUIIMP(G_CALIMP(v) >> 2));
2807 return 0;
2808 }
2809 }
2810 CH_ERR(adapter, "MAC calibration failed\n");
2811 return -1;
2812 } else {
2813 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2814 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2815 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2816 F_XGM_IMPSETUPDATE);
2817 }
2818 return 0;
2819}
2820
2821static void calibrate_xgm_t3b(struct adapter *adapter)
2822{
2823 if (!uses_xaui(adapter)) {
2824 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2825 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2826 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2827 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2828 F_XGM_IMPSETUPDATE);
2829 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2830 0);
2831 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2832 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2833 }
2834}
2835
2836struct mc7_timing_params {
2837 unsigned char ActToPreDly;
2838 unsigned char ActToRdWrDly;
2839 unsigned char PreCyc;
2840 unsigned char RefCyc[5];
2841 unsigned char BkCyc;
2842 unsigned char WrToRdDly;
2843 unsigned char RdToWrDly;
2844};
2845
2846/*
2847 * Write a value to a register and check that the write completed. These
2848 * writes normally complete in a cycle or two, so one read should suffice.
2849 * The very first read exists to flush the posted write to the device.
2850 */
2851static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
2852{
2853 t3_write_reg(adapter, addr, val);
2854 t3_read_reg(adapter, addr); /* flush */
2855 if (!(t3_read_reg(adapter, addr) & F_BUSY))
2856 return 0;
2857 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2858 return -EIO;
2859}
2860
2861static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2862{
2863 static const unsigned int mc7_mode[] = {
2864 0x632, 0x642, 0x652, 0x432, 0x442
2865 };
2866 static const struct mc7_timing_params mc7_timings[] = {
2867 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
2868 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
2869 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
2870 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
2871 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
2872 };
2873
2874 u32 val;
2875 unsigned int width, density, slow, attempts;
2876 struct adapter *adapter = mc7->adapter;
2877 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2878
2879 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
2880 slow = val & F_SLOW;
2881 width = G_WIDTH(val);
2882 density = G_DEN(val);
2883
2884 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
2885 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2886 msleep(1);
2887
2888 if (!slow) {
2889 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
2890 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
2891 msleep(1);
2892 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
2893 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
2894 CH_ERR(adapter, "%s MC7 calibration timed out\n",
2895 mc7->name);
2896 goto out_fail;
2897 }
2898 }
2899
2900 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
2901 V_ACTTOPREDLY(p->ActToPreDly) |
2902 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
2903 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
2904 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
2905
2906 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
2907 val | F_CLKEN | F_TERM150);
2908 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2909
2910 if (!slow)
2911 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
2912 F_DLLENB);
2913 udelay(1);
2914
2915 val = slow ? 3 : 6;
2916 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2917 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
2918 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
2919 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2920 goto out_fail;
2921
2922 if (!slow) {
2923 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
2924 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
2925 udelay(5);
2926 }
2927
2928 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2929 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2930 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2931 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
2932 mc7_mode[mem_type]) ||
2933 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
2934 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2935 goto out_fail;
2936
2937 /* clock value is in KHz */
2938 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
2939 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
2940
2941 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
2942 F_PERREFEN | V_PREREFDIV(mc7_clock));
2943 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
2944
2945 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
2946 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
2947 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
2948 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
2949 (mc7->size << width) - 1);
2950 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
2951 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
2952
2953 attempts = 50;
2954 do {
2955 msleep(250);
2956 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
2957 } while ((val & F_BUSY) && --attempts);
2958 if (val & F_BUSY) {
2959 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
2960 goto out_fail;
2961 }
2962
2963 /* Enable normal memory accesses. */
2964 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
2965 return 0;
2966
2967out_fail:
2968 return -1;
2969}
2970
2971static void config_pcie(struct adapter *adap)
2972{
2973 static const u16 ack_lat[4][6] = {
2974 {237, 416, 559, 1071, 2095, 4143},
2975 {128, 217, 289, 545, 1057, 2081},
2976 {73, 118, 154, 282, 538, 1050},
2977 {67, 107, 86, 150, 278, 534}
2978 };
2979 static const u16 rpl_tmr[4][6] = {
2980 {711, 1248, 1677, 3213, 6285, 12429},
2981 {384, 651, 867, 1635, 3171, 6243},
2982 {219, 354, 462, 846, 1614, 3150},
2983 {201, 321, 258, 450, 834, 1602}
2984 };
2985
2986 u16 val;
2987 unsigned int log2_width, pldsize;
2988 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
2989
2990 pci_read_config_word(adap->pdev,
2991 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
2992 &val);
2993 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
2994 pci_read_config_word(adap->pdev,
2995 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
2996 &val);
2997
2998 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
2999 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3000 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3001 log2_width = fls(adap->params.pci.width) - 1;
3002 acklat = ack_lat[log2_width][pldsize];
3003 if (val & 1) /* check LOsEnable */
3004 acklat += fst_trn_tx * 4;
3005 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3006
3007 if (adap->params.rev == 0)
3008 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3009 V_T3A_ACKLAT(M_T3A_ACKLAT),
3010 V_T3A_ACKLAT(acklat));
3011 else
3012 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3013 V_ACKLAT(acklat));
3014
3015 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3016 V_REPLAYLMT(rpllmt));
3017
3018 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3019 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3020}
3021
3022/*
3023 * Initialize and configure T3 HW modules. This performs the
3024 * initialization steps that need to be done once after a card is reset.
3025 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3026 *
3027 * fw_params are passed to FW and their value is platform dependent. Only the
3028 * top 8 bits are available for use, the rest must be 0.
3029 */
3030int t3_init_hw(struct adapter *adapter, u32 fw_params)
3031{
3032 int err = -EIO, attempts = 100;
3033 const struct vpd_params *vpd = &adapter->params.vpd;
3034
3035 if (adapter->params.rev > 0)
3036 calibrate_xgm_t3b(adapter);
3037 else if (calibrate_xgm(adapter))
3038 goto out_err;
3039
3040 if (vpd->mclk) {
3041 partition_mem(adapter, &adapter->params.tp);
3042
3043 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3044 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3045 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3046 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3047 adapter->params.mc5.nfilters,
3048 adapter->params.mc5.nroutes))
3049 goto out_err;
3050 }
3051
3052 if (tp_init(adapter, &adapter->params.tp))
3053 goto out_err;
3054
3055 t3_tp_set_coalescing_size(adapter,
3056 min(adapter->params.sge.max_pkt_size,
3057 MAX_RX_COALESCING_LEN), 1);
3058 t3_tp_set_max_rxsize(adapter,
3059 min(adapter->params.sge.max_pkt_size, 16384U));
3060 ulp_config(adapter, &adapter->params.tp);
3061
3062 if (is_pcie(adapter))
3063 config_pcie(adapter);
3064 else
3065 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3066
3067 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
3068 init_hw_for_avail_ports(adapter, adapter->params.nports);
3069 t3_sge_init(adapter, &adapter->params.sge);
3070
3071 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3072 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3073 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3074 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3075
3076 do { /* wait for uP to initialize */
3077 msleep(20);
3078 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3079 if (!attempts)
3080 goto out_err;
3081
3082 err = 0;
3083out_err:
3084 return err;
3085}
3086
3087/**
3088 * get_pci_mode - determine a card's PCI mode
3089 * @adapter: the adapter
3090 * @p: where to store the PCI settings
3091 *
3092 * Determines a card's PCI mode and associated parameters, such as speed
3093 * and width.
3094 */
3095static void __devinit get_pci_mode(struct adapter *adapter,
3096 struct pci_params *p)
3097{
3098 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3099 u32 pci_mode, pcie_cap;
3100
3101 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3102 if (pcie_cap) {
3103 u16 val;
3104
3105 p->variant = PCI_VARIANT_PCIE;
3106 p->pcie_cap_addr = pcie_cap;
3107 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3108 &val);
3109 p->width = (val >> 4) & 0x3f;
3110 return;
3111 }
3112
3113 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3114 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3115 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3116 pci_mode = G_PCIXINITPAT(pci_mode);
3117 if (pci_mode == 0)
3118 p->variant = PCI_VARIANT_PCI;
3119 else if (pci_mode < 4)
3120 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3121 else if (pci_mode < 8)
3122 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3123 else
3124 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3125}
3126
3127/**
3128 * init_link_config - initialize a link's SW state
3129 * @lc: structure holding the link state
3130 * @ai: information about the current card
3131 *
3132 * Initializes the SW state maintained for each link, including the link's
3133 * capabilities and default speed/duplex/flow-control/autonegotiation
3134 * settings.
3135 */
3136static void __devinit init_link_config(struct link_config *lc,
3137 unsigned int caps)
3138{
3139 lc->supported = caps;
3140 lc->requested_speed = lc->speed = SPEED_INVALID;
3141 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3142 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3143 if (lc->supported & SUPPORTED_Autoneg) {
3144 lc->advertising = lc->supported;
3145 lc->autoneg = AUTONEG_ENABLE;
3146 lc->requested_fc |= PAUSE_AUTONEG;
3147 } else {
3148 lc->advertising = 0;
3149 lc->autoneg = AUTONEG_DISABLE;
3150 }
3151}
3152
3153/**
3154 * mc7_calc_size - calculate MC7 memory size
3155 * @cfg: the MC7 configuration
3156 *
3157 * Calculates the size of an MC7 memory in bytes from the value of its
3158 * configuration register.
3159 */
3160static unsigned int __devinit mc7_calc_size(u32 cfg)
3161{
3162 unsigned int width = G_WIDTH(cfg);
3163 unsigned int banks = !!(cfg & F_BKS) + 1;
3164 unsigned int org = !!(cfg & F_ORG) + 1;
3165 unsigned int density = G_DEN(cfg);
3166 unsigned int MBs = ((256 << density) * banks) / (org << width);
3167
3168 return MBs << 20;
3169}
3170
3171static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3172 unsigned int base_addr, const char *name)
3173{
3174 u32 cfg;
3175
3176 mc7->adapter = adapter;
3177 mc7->name = name;
3178 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3179 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3180 mc7->size = mc7_calc_size(cfg);
3181 mc7->width = G_WIDTH(cfg);
3182}
3183
3184void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3185{
3186 mac->adapter = adapter;
3187 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3188 mac->nucast = 1;
3189
3190 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3191 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3192 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3193 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3194 F_ENRGMII, 0);
3195 }
3196}
3197
3198void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3199{
3200 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3201
3202 mi1_init(adapter, ai);
3203 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3204 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3205 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3206 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3207
3208 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3209 val |= F_ENRGMII;
3210
3211 /* Enable MAC clocks so we can access the registers */
3212 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3213 t3_read_reg(adapter, A_XGM_PORT_CFG);
3214
3215 val |= F_CLKDIVRESET_;
3216 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3217 t3_read_reg(adapter, A_XGM_PORT_CFG);
3218 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3219 t3_read_reg(adapter, A_XGM_PORT_CFG);
3220}
3221
3222/*
3223 * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
3224 * ones don't.
3225 */
3226int t3_reset_adapter(struct adapter *adapter)
3227{
3228 int i;
3229 uint16_t devid = 0;
3230
3231 if (is_pcie(adapter))
3232 pci_save_state(adapter->pdev);
3233 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3234
3235 /*
3236 * Delay. Give Some time to device to reset fully.
3237 * XXX The delay time should be modified.
3238 */
3239 for (i = 0; i < 10; i++) {
3240 msleep(50);
3241 pci_read_config_word(adapter->pdev, 0x00, &devid);
3242 if (devid == 0x1425)
3243 break;
3244 }
3245
3246 if (devid != 0x1425)
3247 return -1;
3248
3249 if (is_pcie(adapter))
3250 pci_restore_state(adapter->pdev);
3251 return 0;
3252}
3253
3254/*
3255 * Initialize adapter SW state for the various HW modules, set initial values
3256 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3257 * interface.
3258 */
3259int __devinit t3_prep_adapter(struct adapter *adapter,
3260 const struct adapter_info *ai, int reset)
3261{
3262 int ret;
3263 unsigned int i, j = 0;
3264
3265 get_pci_mode(adapter, &adapter->params.pci);
3266
3267 adapter->params.info = ai;
3268 adapter->params.nports = ai->nports;
3269 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3270 adapter->params.linkpoll_period = 0;
3271 adapter->params.stats_update_period = is_10G(adapter) ?
3272 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3273 adapter->params.pci.vpd_cap_addr =
3274 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3275 ret = get_vpd_params(adapter, &adapter->params.vpd);
3276 if (ret < 0)
3277 return ret;
3278
3279 if (reset && t3_reset_adapter(adapter))
3280 return -1;
3281
3282 t3_sge_prep(adapter, &adapter->params.sge);
3283
3284 if (adapter->params.vpd.mclk) {
3285 struct tp_params *p = &adapter->params.tp;
3286
3287 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3288 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3289 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3290
3291 p->nchan = ai->nports;
3292 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3293 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3294 p->cm_size = t3_mc7_size(&adapter->cm);
3295 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3296 p->chan_tx_size = p->pmtx_size / p->nchan;
3297 p->rx_pg_size = 64 * 1024;
3298 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3299 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3300 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3301 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3302 adapter->params.rev > 0 ? 12 : 6;
3303
3304 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3305 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3306 DEFAULT_NFILTERS : 0;
3307 adapter->params.mc5.nroutes = 0;
3308 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3309
3310 init_mtus(adapter->params.mtus);
3311 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3312 }
3313
3314 early_hw_init(adapter, ai);
3315
3316 for_each_port(adapter, i) {
3317 u8 hw_addr[6];
3318 struct port_info *p = adap2pinfo(adapter, i);
3319
3320 while (!adapter->params.vpd.port_type[j])
3321 ++j;
3322
3323 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3324 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3325 ai->mdio_ops);
3326 mac_prep(&p->mac, adapter, j);
3327 ++j;
3328
3329 /*
3330 * The VPD EEPROM stores the base Ethernet address for the
3331 * card. A port's address is derived from the base by adding
3332 * the port's index to the base's low octet.
3333 */
3334 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3335 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3336
3337 memcpy(adapter->port[i]->dev_addr, hw_addr,
3338 ETH_ALEN);
3339 memcpy(adapter->port[i]->perm_addr, hw_addr,
3340 ETH_ALEN);
3341 init_link_config(&p->link_config, p->port_type->caps);
3342 p->phy.ops->power_down(&p->phy, 1);
3343 if (!(p->port_type->caps & SUPPORTED_IRQ))
3344 adapter->params.linkpoll_period = 10;
3345 }
3346
3347 return 0;
3348}
3349
3350void t3_led_ready(struct adapter *adapter)
3351{
3352 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3353 F_GPIO0_OUT_VAL);
3354}
diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h
new file mode 100644
index 000000000000..359584e7d582
--- /dev/null
+++ b/drivers/net/cxgb3/t3cdev.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _T3CDEV_H_
33#define _T3CDEV_H_
34
35#include <linux/list.h>
36#include <asm/atomic.h>
37#include <asm/semaphore.h>
38#include <linux/netdevice.h>
39#include <linux/proc_fs.h>
40#include <linux/skbuff.h>
41#include <net/neighbour.h>
42
43#define T3CNAMSIZ 16
44
45/* Get the t3cdev associated with a net_device */
46#define T3CDEV(netdev) (struct t3cdev *)(netdev->priv)
47
48struct cxgb3_client;
49
50enum t3ctype {
51 T3A = 0,
52 T3B
53};
54
55struct t3cdev {
56 char name[T3CNAMSIZ]; /* T3C device name */
57 enum t3ctype type;
58 struct list_head ofld_dev_list; /* for list linking */
59 struct net_device *lldev; /* LL dev associated with T3C messages */
60 struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */
61 int (*send)(struct t3cdev *dev, struct sk_buff *skb);
62 int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
63 int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
64 void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
65 void *priv; /* driver private data */
66 void *l2opt; /* optional layer 2 data */
67 void *l3opt; /* optional layer 3 data */
68 void *l4opt; /* optional layer 4 data */
69 void *ulp; /* ulp stuff */
70};
71
72#endif /* _T3CDEV_H_ */
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
new file mode 100644
index 000000000000..1413ea37f415
--- /dev/null
+++ b/drivers/net/cxgb3/version.h
@@ -0,0 +1,24 @@
1/*****************************************************************************
2 * *
3 * File: *
4 * version.h *
5 * *
6 * Description: *
7 * Chelsio driver version defines. *
8 * *
9 * Copyright (c) 2003 - 2006 Chelsio Communications, Inc. *
10 * All rights reserved. *
11 * *
12 * Maintainers: maintainers@chelsio.com *
13 * *
14 * http://www.chelsio.com *
15 * *
16 ****************************************************************************/
17/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
18#ifndef __CHELSIO_VERSION_H
19#define __CHELSIO_VERSION_H
20#define DRV_DESC "Chelsio T3 Network Driver"
21#define DRV_NAME "cxgb3"
22/* Driver version */
23#define DRV_VERSION "1.0"
24#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb3/vsc8211.c b/drivers/net/cxgb3/vsc8211.c
new file mode 100644
index 000000000000..6a0a815b89c1
--- /dev/null
+++ b/drivers/net/cxgb3/vsc8211.c
@@ -0,0 +1,208 @@
1/*
2 * This file is part of the Chelsio T3 Ethernet driver.
3 *
4 * Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#include "common.h"
13
14/* VSC8211 PHY specific registers. */
15enum {
16 VSC8211_INTR_ENABLE = 25,
17 VSC8211_INTR_STATUS = 26,
18 VSC8211_AUX_CTRL_STAT = 28,
19};
20
21enum {
22 VSC_INTR_RX_ERR = 1 << 0,
23 VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
24 VSC_INTR_CABLE = 1 << 2, /* cable impairment */
25 VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
26 VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
27 VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
28 VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
29 VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
30 VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
31 VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
32 VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
33 VSC_INTR_LINK_CHG = 1 << 13, /* link change */
34 VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
35};
36
37#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
38 VSC_INTR_NEG_DONE)
39#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
40 VSC_INTR_ENABLE)
41
42/* PHY specific auxiliary control & status register fields */
43#define S_ACSR_ACTIPHY_TMR 0
44#define M_ACSR_ACTIPHY_TMR 0x3
45#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
46
47#define S_ACSR_SPEED 3
48#define M_ACSR_SPEED 0x3
49#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
50
51#define S_ACSR_DUPLEX 5
52#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
53
54#define S_ACSR_ACTIPHY 6
55#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
56
57/*
58 * Reset the PHY. This PHY completes reset immediately so we never wait.
59 */
60static int vsc8211_reset(struct cphy *cphy, int wait)
61{
62 return t3_phy_reset(cphy, 0, 0);
63}
64
65static int vsc8211_intr_enable(struct cphy *cphy)
66{
67 return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, INTR_MASK);
68}
69
70static int vsc8211_intr_disable(struct cphy *cphy)
71{
72 return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, 0);
73}
74
75static int vsc8211_intr_clear(struct cphy *cphy)
76{
77 u32 val;
78
79 /* Clear PHY interrupts by reading the register. */
80 return mdio_read(cphy, 0, VSC8211_INTR_STATUS, &val);
81}
82
83static int vsc8211_autoneg_enable(struct cphy *cphy)
84{
85 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
86 BMCR_ANENABLE | BMCR_ANRESTART);
87}
88
89static int vsc8211_autoneg_restart(struct cphy *cphy)
90{
91 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
92 BMCR_ANRESTART);
93}
94
95static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
96 int *speed, int *duplex, int *fc)
97{
98 unsigned int bmcr, status, lpa, adv;
99 int err, sp = -1, dplx = -1, pause = 0;
100
101 err = mdio_read(cphy, 0, MII_BMCR, &bmcr);
102 if (!err)
103 err = mdio_read(cphy, 0, MII_BMSR, &status);
104 if (err)
105 return err;
106
107 if (link_ok) {
108 /*
109 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
110 * once more to get the current link state.
111 */
112 if (!(status & BMSR_LSTATUS))
113 err = mdio_read(cphy, 0, MII_BMSR, &status);
114 if (err)
115 return err;
116 *link_ok = (status & BMSR_LSTATUS) != 0;
117 }
118 if (!(bmcr & BMCR_ANENABLE)) {
119 dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
120 if (bmcr & BMCR_SPEED1000)
121 sp = SPEED_1000;
122 else if (bmcr & BMCR_SPEED100)
123 sp = SPEED_100;
124 else
125 sp = SPEED_10;
126 } else if (status & BMSR_ANEGCOMPLETE) {
127 err = mdio_read(cphy, 0, VSC8211_AUX_CTRL_STAT, &status);
128 if (err)
129 return err;
130
131 dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
132 sp = G_ACSR_SPEED(status);
133 if (sp == 0)
134 sp = SPEED_10;
135 else if (sp == 1)
136 sp = SPEED_100;
137 else
138 sp = SPEED_1000;
139
140 if (fc && dplx == DUPLEX_FULL) {
141 err = mdio_read(cphy, 0, MII_LPA, &lpa);
142 if (!err)
143 err = mdio_read(cphy, 0, MII_ADVERTISE, &adv);
144 if (err)
145 return err;
146
147 if (lpa & adv & ADVERTISE_PAUSE_CAP)
148 pause = PAUSE_RX | PAUSE_TX;
149 else if ((lpa & ADVERTISE_PAUSE_CAP) &&
150 (lpa & ADVERTISE_PAUSE_ASYM) &&
151 (adv & ADVERTISE_PAUSE_ASYM))
152 pause = PAUSE_TX;
153 else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
154 (adv & ADVERTISE_PAUSE_CAP))
155 pause = PAUSE_RX;
156 }
157 }
158 if (speed)
159 *speed = sp;
160 if (duplex)
161 *duplex = dplx;
162 if (fc)
163 *fc = pause;
164 return 0;
165}
166
167static int vsc8211_power_down(struct cphy *cphy, int enable)
168{
169 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
170 enable ? BMCR_PDOWN : 0);
171}
172
173static int vsc8211_intr_handler(struct cphy *cphy)
174{
175 unsigned int cause;
176 int err, cphy_cause = 0;
177
178 err = mdio_read(cphy, 0, VSC8211_INTR_STATUS, &cause);
179 if (err)
180 return err;
181
182 cause &= INTR_MASK;
183 if (cause & CFG_CHG_INTR_MASK)
184 cphy_cause |= cphy_cause_link_change;
185 if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
186 cphy_cause |= cphy_cause_fifo_error;
187 return cphy_cause;
188}
189
190static struct cphy_ops vsc8211_ops = {
191 .reset = vsc8211_reset,
192 .intr_enable = vsc8211_intr_enable,
193 .intr_disable = vsc8211_intr_disable,
194 .intr_clear = vsc8211_intr_clear,
195 .intr_handler = vsc8211_intr_handler,
196 .autoneg_enable = vsc8211_autoneg_enable,
197 .autoneg_restart = vsc8211_autoneg_restart,
198 .advertise = t3_phy_advertise,
199 .set_speed_duplex = t3_set_phy_speed_duplex,
200 .get_link_status = vsc8211_get_link_status,
201 .power_down = vsc8211_power_down,
202};
203
204void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
205 int phy_addr, const struct mdio_ops *mdio_ops)
206{
207 cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops);
208}
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
new file mode 100644
index 000000000000..0f209c776929
--- /dev/null
+++ b/drivers/net/cxgb3/xgmac.c
@@ -0,0 +1,389 @@
1/*
2 * This file is part of the Chelsio T3 Ethernet driver.
3 *
4 * Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#include "common.h"
13#include "regs.h"
14
15/*
16 * # of exact address filters. The first one is used for the station address,
17 * the rest are available for multicast addresses.
18 */
19#define EXACT_ADDR_FILTERS 8
20
21static inline int macidx(const struct cmac *mac)
22{
23 return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
24}
25
26static void xaui_serdes_reset(struct cmac *mac)
27{
28 static const unsigned int clear[] = {
29 F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
30 F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
31 };
32
33 int i;
34 struct adapter *adap = mac->adapter;
35 u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
36
37 t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
38 F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
39 F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
40 F_RESETPLL23 | F_RESETPLL01);
41 t3_read_reg(adap, ctrl);
42 udelay(15);
43
44 for (i = 0; i < ARRAY_SIZE(clear); i++) {
45 t3_set_reg_field(adap, ctrl, clear[i], 0);
46 udelay(15);
47 }
48}
49
50void t3b_pcs_reset(struct cmac *mac)
51{
52 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
53 F_PCS_RESET_, 0);
54 udelay(20);
55 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
56 F_PCS_RESET_);
57}
58
59int t3_mac_reset(struct cmac *mac)
60{
61 static const struct addr_val_pair mac_reset_avp[] = {
62 {A_XGM_TX_CTRL, 0},
63 {A_XGM_RX_CTRL, 0},
64 {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
65 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
66 {A_XGM_RX_HASH_LOW, 0},
67 {A_XGM_RX_HASH_HIGH, 0},
68 {A_XGM_RX_EXACT_MATCH_LOW_1, 0},
69 {A_XGM_RX_EXACT_MATCH_LOW_2, 0},
70 {A_XGM_RX_EXACT_MATCH_LOW_3, 0},
71 {A_XGM_RX_EXACT_MATCH_LOW_4, 0},
72 {A_XGM_RX_EXACT_MATCH_LOW_5, 0},
73 {A_XGM_RX_EXACT_MATCH_LOW_6, 0},
74 {A_XGM_RX_EXACT_MATCH_LOW_7, 0},
75 {A_XGM_RX_EXACT_MATCH_LOW_8, 0},
76 {A_XGM_STAT_CTRL, F_CLRSTATS}
77 };
78 u32 val;
79 struct adapter *adap = mac->adapter;
80 unsigned int oft = mac->offset;
81
82 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
83 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
84
85 t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
86 t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
87 F_RXSTRFRWRD | F_DISERRFRAMES,
88 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
89
90 if (uses_xaui(adap)) {
91 if (adap->params.rev == 0) {
92 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
93 F_RXENABLE | F_TXENABLE);
94 if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
95 F_CMULOCK, 1, 5, 2)) {
96 CH_ERR(adap,
97 "MAC %d XAUI SERDES CMU lock failed\n",
98 macidx(mac));
99 return -1;
100 }
101 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
102 F_SERDESRESET_);
103 } else
104 xaui_serdes_reset(mac);
105 }
106
107 if (adap->params.rev > 0)
108 t3_write_reg(adap, A_XGM_PAUSE_TIMER + oft, 0xf000);
109
110 val = F_MAC_RESET_;
111 if (is_10G(adap))
112 val |= F_PCS_RESET_;
113 else if (uses_xaui(adap))
114 val |= F_PCS_RESET_ | F_XG2G_RESET_;
115 else
116 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
117 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
118 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
119 if ((val & F_PCS_RESET_) && adap->params.rev) {
120 msleep(1);
121 t3b_pcs_reset(mac);
122 }
123
124 memset(&mac->stats, 0, sizeof(mac->stats));
125 return 0;
126}
127
128/*
129 * Set the exact match register 'idx' to recognize the given Ethernet address.
130 */
131static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
132{
133 u32 addr_lo, addr_hi;
134 unsigned int oft = mac->offset + idx * 8;
135
136 addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
137 addr_hi = (addr[5] << 8) | addr[4];
138
139 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
140 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
141}
142
143/* Set one of the station's unicast MAC addresses. */
144int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
145{
146 if (idx >= mac->nucast)
147 return -EINVAL;
148 set_addr_filter(mac, idx, addr);
149 return 0;
150}
151
152/*
153 * Specify the number of exact address filters that should be reserved for
154 * unicast addresses. Caller should reload the unicast and multicast addresses
155 * after calling this.
156 */
157int t3_mac_set_num_ucast(struct cmac *mac, int n)
158{
159 if (n > EXACT_ADDR_FILTERS)
160 return -EINVAL;
161 mac->nucast = n;
162 return 0;
163}
164
165/* Calculate the RX hash filter index of an Ethernet address */
166static int hash_hw_addr(const u8 * addr)
167{
168 int hash = 0, octet, bit, i = 0, c;
169
170 for (octet = 0; octet < 6; ++octet)
171 for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
172 hash ^= (c & 1) << i;
173 if (++i == 6)
174 i = 0;
175 }
176 return hash;
177}
178
179int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
180{
181 u32 val, hash_lo, hash_hi;
182 struct adapter *adap = mac->adapter;
183 unsigned int oft = mac->offset;
184
185 val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
186 if (rm->dev->flags & IFF_PROMISC)
187 val |= F_COPYALLFRAMES;
188 t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
189
190 if (rm->dev->flags & IFF_ALLMULTI)
191 hash_lo = hash_hi = 0xffffffff;
192 else {
193 u8 *addr;
194 int exact_addr_idx = mac->nucast;
195
196 hash_lo = hash_hi = 0;
197 while ((addr = t3_get_next_mcaddr(rm)))
198 if (exact_addr_idx < EXACT_ADDR_FILTERS)
199 set_addr_filter(mac, exact_addr_idx++, addr);
200 else {
201 int hash = hash_hw_addr(addr);
202
203 if (hash < 32)
204 hash_lo |= (1 << hash);
205 else
206 hash_hi |= (1 << (hash - 32));
207 }
208 }
209
210 t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
211 t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
212 return 0;
213}
214
215int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
216{
217 int hwm, lwm;
218 unsigned int thres, v;
219 struct adapter *adap = mac->adapter;
220
221 /*
222 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
223 * packet size register includes header, but not FCS.
224 */
225 mtu += 14;
226 if (mtu > MAX_FRAME_SIZE - 4)
227 return -EINVAL;
228 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
229
230 /*
231 * Adjust the PAUSE frame watermarks. We always set the LWM, and the
232 * HWM only if flow-control is enabled.
233 */
234 hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, MAC_RXFIFO_SIZE / 2U);
235 hwm = min(hwm, 3 * MAC_RXFIFO_SIZE / 4 + 1024);
236 lwm = hwm - 1024;
237 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
238 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
239 v |= V_RXFIFOPAUSELWM(lwm / 8);
240 if (G_RXFIFOPAUSEHWM(v))
241 v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
242 V_RXFIFOPAUSEHWM(hwm / 8);
243 t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
244
245 /* Adjust the TX FIFO threshold based on the MTU */
246 thres = (adap->params.vpd.cclk * 1000) / 15625;
247 thres = (thres * mtu) / 1000;
248 if (is_10G(adap))
249 thres /= 10;
250 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
251 thres = max(thres, 8U); /* need at least 8 */
252 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
253 V_TXFIFOTHRESH(M_TXFIFOTHRESH), V_TXFIFOTHRESH(thres));
254 return 0;
255}
256
257int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
258{
259 u32 val;
260 struct adapter *adap = mac->adapter;
261 unsigned int oft = mac->offset;
262
263 if (duplex >= 0 && duplex != DUPLEX_FULL)
264 return -EINVAL;
265 if (speed >= 0) {
266 if (speed == SPEED_10)
267 val = V_PORTSPEED(0);
268 else if (speed == SPEED_100)
269 val = V_PORTSPEED(1);
270 else if (speed == SPEED_1000)
271 val = V_PORTSPEED(2);
272 else if (speed == SPEED_10000)
273 val = V_PORTSPEED(3);
274 else
275 return -EINVAL;
276
277 t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
278 V_PORTSPEED(M_PORTSPEED), val);
279 }
280
281 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
282 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
283 if (fc & PAUSE_TX)
284 val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128); /* +1KB */
285 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
286
287 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
288 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
289 return 0;
290}
291
292int t3_mac_enable(struct cmac *mac, int which)
293{
294 int idx = macidx(mac);
295 struct adapter *adap = mac->adapter;
296 unsigned int oft = mac->offset;
297
298 if (which & MAC_DIRECTION_TX) {
299 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
300 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
301 t3_write_reg(adap, A_TP_PIO_DATA, 0xbf000001);
302 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
303 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
304 }
305 if (which & MAC_DIRECTION_RX)
306 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
307 return 0;
308}
309
310int t3_mac_disable(struct cmac *mac, int which)
311{
312 int idx = macidx(mac);
313 struct adapter *adap = mac->adapter;
314
315 if (which & MAC_DIRECTION_TX) {
316 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
317 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
318 t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
319 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
320 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 0);
321 }
322 if (which & MAC_DIRECTION_RX)
323 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
324 return 0;
325}
326
327/*
328 * This function is called periodically to accumulate the current values of the
329 * RMON counters into the port statistics. Since the packet counters are only
330 * 32 bits they can overflow in ~286 secs at 10G, so the function should be
331 * called more frequently than that. The byte counters are 45-bit wide, they
332 * would overflow in ~7.8 hours.
333 */
334const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
335{
336#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
337#define RMON_UPDATE(mac, name, reg) \
338 (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
339#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
340 (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
341 ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
342
343 u32 v, lo;
344
345 RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
346 RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
347 RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
348 RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
349 RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
350 RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
351 RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
352 RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
353 RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
354
355 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
356 mac->stats.rx_too_long += RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
357
358 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
359 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
360 RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
361 RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
362 RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
363 RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
364 RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
365
366 RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
367 RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
368 RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
369 RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
370 RMON_UPDATE(mac, tx_pause, TX_PAUSE);
371 /* This counts error frames in general (bad FCS, underrun, etc). */
372 RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
373
374 RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
375 RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
376 RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
377 RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
378 RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
379 RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
380 RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
381
382 /* The next stat isn't clear-on-read. */
383 t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
384 v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
385 lo = (u32) mac->stats.rx_cong_drops;
386 mac->stats.rx_cong_drops += (u64) (v - lo);
387
388 return &mac->stats;
389}