diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/net/cxgb3/adapter.h | |
parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) |
Diffstat (limited to 'drivers/net/cxgb3/adapter.h')
-rw-r--r-- | drivers/net/cxgb3/adapter.h | 334 |
1 files changed, 334 insertions, 0 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h new file mode 100644 index 00000000000..8b395b53733 --- /dev/null +++ b/drivers/net/cxgb3/adapter.h | |||
@@ -0,0 +1,334 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | /* This file should not be included directly. Include common.h instead. */ | ||
34 | |||
35 | #ifndef __T3_ADAPTER_H__ | ||
36 | #define __T3_ADAPTER_H__ | ||
37 | |||
38 | #include <linux/pci.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/interrupt.h> | ||
41 | #include <linux/timer.h> | ||
42 | #include <linux/cache.h> | ||
43 | #include <linux/mutex.h> | ||
44 | #include <linux/bitops.h> | ||
45 | #include "t3cdev.h" | ||
46 | #include <asm/io.h> | ||
47 | |||
48 | struct adapter; | ||
49 | struct sge_qset; | ||
50 | struct port_info; | ||
51 | |||
52 | enum mac_idx_types { | ||
53 | LAN_MAC_IDX = 0, | ||
54 | SAN_MAC_IDX, | ||
55 | |||
56 | MAX_MAC_IDX | ||
57 | }; | ||
58 | |||
59 | struct iscsi_config { | ||
60 | __u8 mac_addr[ETH_ALEN]; | ||
61 | __u32 flags; | ||
62 | int (*send)(struct port_info *pi, struct sk_buff **skb); | ||
63 | int (*recv)(struct port_info *pi, struct sk_buff *skb); | ||
64 | }; | ||
65 | |||
66 | struct port_info { | ||
67 | struct adapter *adapter; | ||
68 | struct sge_qset *qs; | ||
69 | u8 port_id; | ||
70 | u8 nqsets; | ||
71 | u8 first_qset; | ||
72 | struct cphy phy; | ||
73 | struct cmac mac; | ||
74 | struct link_config link_config; | ||
75 | struct net_device_stats netstats; | ||
76 | int activity; | ||
77 | __be32 iscsi_ipv4addr; | ||
78 | struct iscsi_config iscsic; | ||
79 | |||
80 | int link_fault; /* link fault was detected */ | ||
81 | }; | ||
82 | |||
83 | enum { /* adapter flags */ | ||
84 | FULL_INIT_DONE = (1 << 0), | ||
85 | USING_MSI = (1 << 1), | ||
86 | USING_MSIX = (1 << 2), | ||
87 | QUEUES_BOUND = (1 << 3), | ||
88 | TP_PARITY_INIT = (1 << 4), | ||
89 | NAPI_INIT = (1 << 5), | ||
90 | }; | ||
91 | |||
92 | struct fl_pg_chunk { | ||
93 | struct page *page; | ||
94 | void *va; | ||
95 | unsigned int offset; | ||
96 | unsigned long *p_cnt; | ||
97 | dma_addr_t mapping; | ||
98 | }; | ||
99 | |||
100 | struct rx_desc; | ||
101 | struct rx_sw_desc; | ||
102 | |||
103 | struct sge_fl { /* SGE per free-buffer list state */ | ||
104 | unsigned int buf_size; /* size of each Rx buffer */ | ||
105 | unsigned int credits; /* # of available Rx buffers */ | ||
106 | unsigned int pend_cred; /* new buffers since last FL DB ring */ | ||
107 | unsigned int size; /* capacity of free list */ | ||
108 | unsigned int cidx; /* consumer index */ | ||
109 | unsigned int pidx; /* producer index */ | ||
110 | unsigned int gen; /* free list generation */ | ||
111 | struct fl_pg_chunk pg_chunk;/* page chunk cache */ | ||
112 | unsigned int use_pages; /* whether FL uses pages or sk_buffs */ | ||
113 | unsigned int order; /* order of page allocations */ | ||
114 | unsigned int alloc_size; /* size of allocated buffer */ | ||
115 | struct rx_desc *desc; /* address of HW Rx descriptor ring */ | ||
116 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ | ||
117 | dma_addr_t phys_addr; /* physical address of HW ring start */ | ||
118 | unsigned int cntxt_id; /* SGE context id for the free list */ | ||
119 | unsigned long empty; /* # of times queue ran out of buffers */ | ||
120 | unsigned long alloc_failed; /* # of times buffer allocation failed */ | ||
121 | }; | ||
122 | |||
123 | /* | ||
124 | * Bundle size for grouping offload RX packets for delivery to the stack. | ||
125 | * Don't make this too big as we do prefetch on each packet in a bundle. | ||
126 | */ | ||
127 | # define RX_BUNDLE_SIZE 8 | ||
128 | |||
129 | struct rsp_desc; | ||
130 | |||
131 | struct sge_rspq { /* state for an SGE response queue */ | ||
132 | unsigned int credits; /* # of pending response credits */ | ||
133 | unsigned int size; /* capacity of response queue */ | ||
134 | unsigned int cidx; /* consumer index */ | ||
135 | unsigned int gen; /* current generation bit */ | ||
136 | unsigned int polling; /* is the queue serviced through NAPI? */ | ||
137 | unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */ | ||
138 | unsigned int next_holdoff; /* holdoff time for next interrupt */ | ||
139 | unsigned int rx_recycle_buf; /* whether recycling occurred | ||
140 | within current sop-eop */ | ||
141 | struct rsp_desc *desc; /* address of HW response ring */ | ||
142 | dma_addr_t phys_addr; /* physical address of the ring */ | ||
143 | unsigned int cntxt_id; /* SGE context id for the response q */ | ||
144 | spinlock_t lock; /* guards response processing */ | ||
145 | struct sk_buff_head rx_queue; /* offload packet receive queue */ | ||
146 | struct sk_buff *pg_skb; /* used to build frag list in napi handler */ | ||
147 | |||
148 | unsigned long offload_pkts; | ||
149 | unsigned long offload_bundles; | ||
150 | unsigned long eth_pkts; /* # of ethernet packets */ | ||
151 | unsigned long pure_rsps; /* # of pure (non-data) responses */ | ||
152 | unsigned long imm_data; /* responses with immediate data */ | ||
153 | unsigned long rx_drops; /* # of packets dropped due to no mem */ | ||
154 | unsigned long async_notif; /* # of asynchronous notification events */ | ||
155 | unsigned long empty; /* # of times queue ran out of credits */ | ||
156 | unsigned long nomem; /* # of responses deferred due to no mem */ | ||
157 | unsigned long unhandled_irqs; /* # of spurious intrs */ | ||
158 | unsigned long starved; | ||
159 | unsigned long restarted; | ||
160 | }; | ||
161 | |||
162 | struct tx_desc; | ||
163 | struct tx_sw_desc; | ||
164 | |||
165 | struct sge_txq { /* state for an SGE Tx queue */ | ||
166 | unsigned long flags; /* HW DMA fetch status */ | ||
167 | unsigned int in_use; /* # of in-use Tx descriptors */ | ||
168 | unsigned int size; /* # of descriptors */ | ||
169 | unsigned int processed; /* total # of descs HW has processed */ | ||
170 | unsigned int cleaned; /* total # of descs SW has reclaimed */ | ||
171 | unsigned int stop_thres; /* SW TX queue suspend threshold */ | ||
172 | unsigned int cidx; /* consumer index */ | ||
173 | unsigned int pidx; /* producer index */ | ||
174 | unsigned int gen; /* current value of generation bit */ | ||
175 | unsigned int unacked; /* Tx descriptors used since last COMPL */ | ||
176 | struct tx_desc *desc; /* address of HW Tx descriptor ring */ | ||
177 | struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ | ||
178 | spinlock_t lock; /* guards enqueueing of new packets */ | ||
179 | unsigned int token; /* WR token */ | ||
180 | dma_addr_t phys_addr; /* physical address of the ring */ | ||
181 | struct sk_buff_head sendq; /* List of backpressured offload packets */ | ||
182 | struct tasklet_struct qresume_tsk; /* restarts the queue */ | ||
183 | unsigned int cntxt_id; /* SGE context id for the Tx q */ | ||
184 | unsigned long stops; /* # of times q has been stopped */ | ||
185 | unsigned long restarts; /* # of queue restarts */ | ||
186 | }; | ||
187 | |||
188 | enum { /* per port SGE statistics */ | ||
189 | SGE_PSTAT_TSO, /* # of TSO requests */ | ||
190 | SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */ | ||
191 | SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ | ||
192 | SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ | ||
193 | SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ | ||
194 | |||
195 | SGE_PSTAT_MAX /* must be last */ | ||
196 | }; | ||
197 | |||
198 | struct napi_gro_fraginfo; | ||
199 | |||
200 | struct sge_qset { /* an SGE queue set */ | ||
201 | struct adapter *adap; | ||
202 | struct napi_struct napi; | ||
203 | struct sge_rspq rspq; | ||
204 | struct sge_fl fl[SGE_RXQ_PER_SET]; | ||
205 | struct sge_txq txq[SGE_TXQ_PER_SET]; | ||
206 | int nomem; | ||
207 | void *lro_va; | ||
208 | struct net_device *netdev; | ||
209 | struct netdev_queue *tx_q; /* associated netdev TX queue */ | ||
210 | unsigned long txq_stopped; /* which Tx queues are stopped */ | ||
211 | struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ | ||
212 | struct timer_list rx_reclaim_timer; /* reclaims RX buffers */ | ||
213 | unsigned long port_stats[SGE_PSTAT_MAX]; | ||
214 | } ____cacheline_aligned; | ||
215 | |||
216 | struct sge { | ||
217 | struct sge_qset qs[SGE_QSETS]; | ||
218 | spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */ | ||
219 | }; | ||
220 | |||
221 | struct adapter { | ||
222 | struct t3cdev tdev; | ||
223 | struct list_head adapter_list; | ||
224 | void __iomem *regs; | ||
225 | struct pci_dev *pdev; | ||
226 | unsigned long registered_device_map; | ||
227 | unsigned long open_device_map; | ||
228 | unsigned long flags; | ||
229 | |||
230 | const char *name; | ||
231 | int msg_enable; | ||
232 | unsigned int mmio_len; | ||
233 | |||
234 | struct adapter_params params; | ||
235 | unsigned int slow_intr_mask; | ||
236 | unsigned long irq_stats[IRQ_NUM_STATS]; | ||
237 | |||
238 | int msix_nvectors; | ||
239 | struct { | ||
240 | unsigned short vec; | ||
241 | char desc[22]; | ||
242 | } msix_info[SGE_QSETS + 1]; | ||
243 | |||
244 | /* T3 modules */ | ||
245 | struct sge sge; | ||
246 | struct mc7 pmrx; | ||
247 | struct mc7 pmtx; | ||
248 | struct mc7 cm; | ||
249 | struct mc5 mc5; | ||
250 | |||
251 | struct net_device *port[MAX_NPORTS]; | ||
252 | unsigned int check_task_cnt; | ||
253 | struct delayed_work adap_check_task; | ||
254 | struct work_struct ext_intr_handler_task; | ||
255 | struct work_struct fatal_error_handler_task; | ||
256 | struct work_struct link_fault_handler_task; | ||
257 | |||
258 | struct work_struct db_full_task; | ||
259 | struct work_struct db_empty_task; | ||
260 | struct work_struct db_drop_task; | ||
261 | |||
262 | struct dentry *debugfs_root; | ||
263 | |||
264 | struct mutex mdio_lock; | ||
265 | spinlock_t stats_lock; | ||
266 | spinlock_t work_lock; | ||
267 | |||
268 | struct sk_buff *nofail_skb; | ||
269 | }; | ||
270 | |||
271 | static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr) | ||
272 | { | ||
273 | u32 val = readl(adapter->regs + reg_addr); | ||
274 | |||
275 | CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val); | ||
276 | return val; | ||
277 | } | ||
278 | |||
279 | static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) | ||
280 | { | ||
281 | CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val); | ||
282 | writel(val, adapter->regs + reg_addr); | ||
283 | } | ||
284 | |||
285 | static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) | ||
286 | { | ||
287 | return netdev_priv(adap->port[idx]); | ||
288 | } | ||
289 | |||
290 | static inline int phy2portid(struct cphy *phy) | ||
291 | { | ||
292 | struct adapter *adap = phy->adapter; | ||
293 | struct port_info *port0 = adap2pinfo(adap, 0); | ||
294 | |||
295 | return &port0->phy == phy ? 0 : 1; | ||
296 | } | ||
297 | |||
298 | #define OFFLOAD_DEVMAP_BIT 15 | ||
299 | |||
300 | #define tdev2adap(d) container_of(d, struct adapter, tdev) | ||
301 | |||
302 | static inline int offload_running(struct adapter *adapter) | ||
303 | { | ||
304 | return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); | ||
305 | } | ||
306 | |||
307 | int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb); | ||
308 | |||
309 | void t3_os_ext_intr_handler(struct adapter *adapter); | ||
310 | void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status, | ||
311 | int speed, int duplex, int fc); | ||
312 | void t3_os_phymod_changed(struct adapter *adap, int port_id); | ||
313 | void t3_os_link_fault(struct adapter *adapter, int port_id, int state); | ||
314 | void t3_os_link_fault_handler(struct adapter *adapter, int port_id); | ||
315 | |||
316 | void t3_sge_start(struct adapter *adap); | ||
317 | void t3_sge_stop(struct adapter *adap); | ||
318 | void t3_start_sge_timers(struct adapter *adap); | ||
319 | void t3_stop_sge_timers(struct adapter *adap); | ||
320 | void t3_free_sge_resources(struct adapter *adap); | ||
321 | void t3_sge_err_intr_handler(struct adapter *adapter); | ||
322 | irq_handler_t t3_intr_handler(struct adapter *adap, int polling); | ||
323 | netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev); | ||
324 | int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb); | ||
325 | void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); | ||
326 | int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | ||
327 | int irq_vec_idx, const struct qset_params *p, | ||
328 | int ntxq, struct net_device *dev, | ||
329 | struct netdev_queue *netdevq); | ||
330 | extern struct workqueue_struct *cxgb3_wq; | ||
331 | |||
332 | int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); | ||
333 | |||
334 | #endif /* __T3_ADAPTER_H__ */ | ||