diff options
Diffstat (limited to 'drivers/net/chelsio/sge.c')
-rw-r--r-- | drivers/net/chelsio/sge.c | 1451 |
1 files changed, 1451 insertions, 0 deletions
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c new file mode 100644 index 000000000000..bcf8b1e939b0 --- /dev/null +++ b/drivers/net/chelsio/sge.c | |||
@@ -0,0 +1,1451 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: sge.c * | ||
4 | * $Revision: 1.13 $ * | ||
5 | * $Date: 2005/03/23 07:41:27 $ * | ||
6 | * Description: * | ||
7 | * DMA engine. * | ||
8 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
9 | * * | ||
10 | * This program is free software; you can redistribute it and/or modify * | ||
11 | * it under the terms of the GNU General Public License, version 2, as * | ||
12 | * published by the Free Software Foundation. * | ||
13 | * * | ||
14 | * You should have received a copy of the GNU General Public License along * | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
16 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
17 | * * | ||
18 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
19 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
20 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
21 | * * | ||
22 | * http://www.chelsio.com * | ||
23 | * * | ||
24 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
25 | * All rights reserved. * | ||
26 | * * | ||
27 | * Maintainers: maintainers@chelsio.com * | ||
28 | * * | ||
29 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
30 | * Tina Yang <tainay@chelsio.com> * | ||
31 | * Felix Marti <felix@chelsio.com> * | ||
32 | * Scott Bardone <sbardone@chelsio.com> * | ||
33 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
34 | * Frank DiMambro <frank@chelsio.com> * | ||
35 | * * | ||
36 | * History: * | ||
37 | * * | ||
38 | ****************************************************************************/ | ||
39 | |||
40 | #include "common.h" | ||
41 | |||
42 | #include <linux/config.h> | ||
43 | #include <linux/types.h> | ||
44 | #include <linux/errno.h> | ||
45 | #include <linux/pci.h> | ||
46 | #include <linux/netdevice.h> | ||
47 | #include <linux/etherdevice.h> | ||
48 | #include <linux/if_vlan.h> | ||
49 | #include <linux/skbuff.h> | ||
50 | #include <linux/init.h> | ||
51 | #include <linux/mm.h> | ||
52 | #include <linux/ip.h> | ||
53 | #include <linux/in.h> | ||
54 | #include <linux/if_arp.h> | ||
55 | |||
56 | #include "cpl5_cmd.h" | ||
57 | #include "sge.h" | ||
58 | #include "regs.h" | ||
59 | #include "espi.h" | ||
60 | |||
61 | #include <linux/tcp.h> | ||
62 | |||
63 | #define SGE_CMDQ_N 2 | ||
64 | #define SGE_FREELQ_N 2 | ||
65 | #define SGE_CMDQ0_E_N 512 | ||
66 | #define SGE_CMDQ1_E_N 128 | ||
67 | #define SGE_FREEL_SIZE 4096 | ||
68 | #define SGE_JUMBO_FREEL_SIZE 512 | ||
69 | #define SGE_FREEL_REFILL_THRESH 16 | ||
70 | #define SGE_RESPQ_E_N 1024 | ||
71 | #define SGE_INTR_BUCKETSIZE 100 | ||
72 | #define SGE_INTR_LATBUCKETS 5 | ||
73 | #define SGE_INTR_MAXBUCKETS 11 | ||
74 | #define SGE_INTRTIMER0 1 | ||
75 | #define SGE_INTRTIMER1 50 | ||
76 | #define SGE_INTRTIMER_NRES 10000 | ||
77 | #define SGE_RX_COPY_THRESHOLD 256 | ||
78 | #define SGE_RX_SM_BUF_SIZE 1536 | ||
79 | |||
80 | #define SGE_RESPQ_REPLENISH_THRES ((3 * SGE_RESPQ_E_N) / 4) | ||
81 | |||
82 | #define SGE_RX_OFFSET 2 | ||
83 | #ifndef NET_IP_ALIGN | ||
84 | # define NET_IP_ALIGN SGE_RX_OFFSET | ||
85 | #endif | ||
86 | |||
87 | /* | ||
88 | * Memory Mapped HW Command, Freelist and Response Queue Descriptors | ||
89 | */ | ||
90 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
91 | struct cmdQ_e { | ||
92 | u32 AddrLow; | ||
93 | u32 GenerationBit : 1; | ||
94 | u32 BufferLength : 31; | ||
95 | u32 RespQueueSelector : 4; | ||
96 | u32 ResponseTokens : 12; | ||
97 | u32 CmdId : 8; | ||
98 | u32 Reserved : 3; | ||
99 | u32 TokenValid : 1; | ||
100 | u32 Eop : 1; | ||
101 | u32 Sop : 1; | ||
102 | u32 DataValid : 1; | ||
103 | u32 GenerationBit2 : 1; | ||
104 | u32 AddrHigh; | ||
105 | }; | ||
106 | |||
107 | struct freelQ_e { | ||
108 | u32 AddrLow; | ||
109 | u32 GenerationBit : 1; | ||
110 | u32 BufferLength : 31; | ||
111 | u32 Reserved : 31; | ||
112 | u32 GenerationBit2 : 1; | ||
113 | u32 AddrHigh; | ||
114 | }; | ||
115 | |||
116 | struct respQ_e { | ||
117 | u32 Qsleeping : 4; | ||
118 | u32 Cmdq1CreditReturn : 5; | ||
119 | u32 Cmdq1DmaComplete : 5; | ||
120 | u32 Cmdq0CreditReturn : 5; | ||
121 | u32 Cmdq0DmaComplete : 5; | ||
122 | u32 FreelistQid : 2; | ||
123 | u32 CreditValid : 1; | ||
124 | u32 DataValid : 1; | ||
125 | u32 Offload : 1; | ||
126 | u32 Eop : 1; | ||
127 | u32 Sop : 1; | ||
128 | u32 GenerationBit : 1; | ||
129 | u32 BufferLength; | ||
130 | }; | ||
131 | |||
132 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
133 | struct cmdQ_e { | ||
134 | u32 BufferLength : 31; | ||
135 | u32 GenerationBit : 1; | ||
136 | u32 AddrLow; | ||
137 | u32 AddrHigh; | ||
138 | u32 GenerationBit2 : 1; | ||
139 | u32 DataValid : 1; | ||
140 | u32 Sop : 1; | ||
141 | u32 Eop : 1; | ||
142 | u32 TokenValid : 1; | ||
143 | u32 Reserved : 3; | ||
144 | u32 CmdId : 8; | ||
145 | u32 ResponseTokens : 12; | ||
146 | u32 RespQueueSelector : 4; | ||
147 | }; | ||
148 | |||
149 | struct freelQ_e { | ||
150 | u32 BufferLength : 31; | ||
151 | u32 GenerationBit : 1; | ||
152 | u32 AddrLow; | ||
153 | u32 AddrHigh; | ||
154 | u32 GenerationBit2 : 1; | ||
155 | u32 Reserved : 31; | ||
156 | }; | ||
157 | |||
158 | struct respQ_e { | ||
159 | u32 BufferLength; | ||
160 | u32 GenerationBit : 1; | ||
161 | u32 Sop : 1; | ||
162 | u32 Eop : 1; | ||
163 | u32 Offload : 1; | ||
164 | u32 DataValid : 1; | ||
165 | u32 CreditValid : 1; | ||
166 | u32 FreelistQid : 2; | ||
167 | u32 Cmdq0DmaComplete : 5; | ||
168 | u32 Cmdq0CreditReturn : 5; | ||
169 | u32 Cmdq1DmaComplete : 5; | ||
170 | u32 Cmdq1CreditReturn : 5; | ||
171 | u32 Qsleeping : 4; | ||
172 | } ; | ||
173 | #endif | ||
174 | |||
175 | /* | ||
176 | * SW Context Command and Freelist Queue Descriptors | ||
177 | */ | ||
178 | struct cmdQ_ce { | ||
179 | struct sk_buff *skb; | ||
180 | DECLARE_PCI_UNMAP_ADDR(dma_addr); | ||
181 | DECLARE_PCI_UNMAP_LEN(dma_len); | ||
182 | unsigned int single; | ||
183 | }; | ||
184 | |||
185 | struct freelQ_ce { | ||
186 | struct sk_buff *skb; | ||
187 | DECLARE_PCI_UNMAP_ADDR(dma_addr); | ||
188 | DECLARE_PCI_UNMAP_LEN(dma_len); | ||
189 | }; | ||
190 | |||
191 | /* | ||
192 | * SW Command, Freelist and Response Queue | ||
193 | */ | ||
194 | struct cmdQ { | ||
195 | atomic_t asleep; /* HW DMA Fetch status */ | ||
196 | atomic_t credits; /* # available descriptors for TX */ | ||
197 | atomic_t pio_pidx; /* Variable updated on Doorbell */ | ||
198 | u16 entries_n; /* # descriptors for TX */ | ||
199 | u16 pidx; /* producer index (SW) */ | ||
200 | u16 cidx; /* consumer index (HW) */ | ||
201 | u8 genbit; /* current generation (=valid) bit */ | ||
202 | struct cmdQ_e *entries; /* HW command descriptor Q */ | ||
203 | struct cmdQ_ce *centries; /* SW command context descriptor Q */ | ||
204 | spinlock_t Qlock; /* Lock to protect cmdQ enqueuing */ | ||
205 | dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ | ||
206 | }; | ||
207 | |||
208 | struct freelQ { | ||
209 | unsigned int credits; /* # of available RX buffers */ | ||
210 | unsigned int entries_n; /* free list capacity */ | ||
211 | u16 pidx; /* producer index (SW) */ | ||
212 | u16 cidx; /* consumer index (HW) */ | ||
213 | u16 rx_buffer_size; /* Buffer size on this free list */ | ||
214 | u16 dma_offset; /* DMA offset to align IP headers */ | ||
215 | u8 genbit; /* current generation (=valid) bit */ | ||
216 | struct freelQ_e *entries; /* HW freelist descriptor Q */ | ||
217 | struct freelQ_ce *centries; /* SW freelist conext descriptor Q */ | ||
218 | dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ | ||
219 | }; | ||
220 | |||
221 | struct respQ { | ||
222 | u16 credits; /* # of available respQ descriptors */ | ||
223 | u16 credits_pend; /* # of not yet returned descriptors */ | ||
224 | u16 entries_n; /* # of response Q descriptors */ | ||
225 | u16 pidx; /* producer index (HW) */ | ||
226 | u16 cidx; /* consumer index (SW) */ | ||
227 | u8 genbit; /* current generation(=valid) bit */ | ||
228 | struct respQ_e *entries; /* HW response descriptor Q */ | ||
229 | dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ | ||
230 | }; | ||
231 | |||
232 | /* | ||
233 | * Main SGE data structure | ||
234 | * | ||
235 | * Interrupts are handled by a single CPU and it is likely that on a MP system | ||
236 | * the application is migrated to another CPU. In that scenario, we try to | ||
237 | * seperate the RX(in irq context) and TX state in order to decrease memory | ||
238 | * contention. | ||
239 | */ | ||
240 | struct sge { | ||
241 | struct adapter *adapter; /* adapter backpointer */ | ||
242 | struct freelQ freelQ[SGE_FREELQ_N]; /* freelist Q(s) */ | ||
243 | struct respQ respQ; /* response Q instatiation */ | ||
244 | unsigned int rx_pkt_pad; /* RX padding for L2 packets */ | ||
245 | unsigned int jumbo_fl; /* jumbo freelist Q index */ | ||
246 | u32 intrtimer[SGE_INTR_MAXBUCKETS]; /* ! */ | ||
247 | u32 currIndex; /* current index into intrtimer[] */ | ||
248 | u32 intrtimer_nres; /* no resource interrupt timer value */ | ||
249 | u32 sge_control; /* shadow content of sge control reg */ | ||
250 | struct sge_intr_counts intr_cnt; | ||
251 | struct timer_list ptimer; | ||
252 | struct sk_buff *pskb; | ||
253 | u32 ptimeout; | ||
254 | struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned; /* command Q(s)*/ | ||
255 | }; | ||
256 | |||
257 | static unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, | ||
258 | unsigned int qid); | ||
259 | |||
260 | /* | ||
261 | * PIO to indicate that memory mapped Q contains valid descriptor(s). | ||
262 | */ | ||
263 | static inline void doorbell_pio(struct sge *sge, u32 val) | ||
264 | { | ||
265 | wmb(); | ||
266 | t1_write_reg_4(sge->adapter, A_SG_DOORBELL, val); | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * Disables the DMA engine. | ||
271 | */ | ||
272 | void t1_sge_stop(struct sge *sge) | ||
273 | { | ||
274 | t1_write_reg_4(sge->adapter, A_SG_CONTROL, 0); | ||
275 | t1_read_reg_4(sge->adapter, A_SG_CONTROL); /* flush write */ | ||
276 | if (is_T2(sge->adapter)) | ||
277 | del_timer_sync(&sge->ptimer); | ||
278 | } | ||
279 | |||
280 | static u8 ch_mac_addr[ETH_ALEN] = {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; | ||
281 | static void t1_espi_workaround(void *data) | ||
282 | { | ||
283 | struct adapter *adapter = (struct adapter *)data; | ||
284 | struct sge *sge = adapter->sge; | ||
285 | |||
286 | if (netif_running(adapter->port[0].dev) && | ||
287 | atomic_read(&sge->cmdQ[0].asleep)) { | ||
288 | |||
289 | u32 seop = t1_espi_get_mon(adapter, 0x930, 0); | ||
290 | |||
291 | if ((seop & 0xfff0fff) == 0xfff && sge->pskb) { | ||
292 | struct sk_buff *skb = sge->pskb; | ||
293 | if (!skb->cb[0]) { | ||
294 | memcpy(skb->data+sizeof(struct cpl_tx_pkt), ch_mac_addr, ETH_ALEN); | ||
295 | memcpy(skb->data+skb->len-10, ch_mac_addr, ETH_ALEN); | ||
296 | |||
297 | skb->cb[0] = 0xff; | ||
298 | } | ||
299 | t1_sge_tx(skb, adapter,0); | ||
300 | } | ||
301 | } | ||
302 | mod_timer(&adapter->sge->ptimer, jiffies + sge->ptimeout); | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * Enables the DMA engine. | ||
307 | */ | ||
308 | void t1_sge_start(struct sge *sge) | ||
309 | { | ||
310 | t1_write_reg_4(sge->adapter, A_SG_CONTROL, sge->sge_control); | ||
311 | t1_read_reg_4(sge->adapter, A_SG_CONTROL); /* flush write */ | ||
312 | if (is_T2(sge->adapter)) { | ||
313 | init_timer(&sge->ptimer); | ||
314 | sge->ptimer.function = (void *)&t1_espi_workaround; | ||
315 | sge->ptimer.data = (unsigned long)sge->adapter; | ||
316 | sge->ptimer.expires = jiffies + sge->ptimeout; | ||
317 | add_timer(&sge->ptimer); | ||
318 | } | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * Creates a t1_sge structure and returns suggested resource parameters. | ||
323 | */ | ||
324 | struct sge * __devinit t1_sge_create(struct adapter *adapter, | ||
325 | struct sge_params *p) | ||
326 | { | ||
327 | struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL); | ||
328 | |||
329 | if (!sge) | ||
330 | return NULL; | ||
331 | memset(sge, 0, sizeof(*sge)); | ||
332 | |||
333 | if (is_T2(adapter)) | ||
334 | sge->ptimeout = 1; /* finest allowed */ | ||
335 | |||
336 | sge->adapter = adapter; | ||
337 | sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : SGE_RX_OFFSET; | ||
338 | sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; | ||
339 | |||
340 | p->cmdQ_size[0] = SGE_CMDQ0_E_N; | ||
341 | p->cmdQ_size[1] = SGE_CMDQ1_E_N; | ||
342 | p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; | ||
343 | p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; | ||
344 | p->rx_coalesce_usecs = SGE_INTRTIMER1; | ||
345 | p->last_rx_coalesce_raw = SGE_INTRTIMER1 * | ||
346 | (board_info(sge->adapter)->clock_core / 1000000); | ||
347 | p->default_rx_coalesce_usecs = SGE_INTRTIMER1; | ||
348 | p->coalesce_enable = 0; /* Turn off adaptive algorithm by default */ | ||
349 | p->sample_interval_usecs = 0; | ||
350 | return sge; | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * Frees all RX buffers on the freelist Q. The caller must make sure that | ||
355 | * the SGE is turned off before calling this function. | ||
356 | */ | ||
357 | static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *Q) | ||
358 | { | ||
359 | unsigned int cidx = Q->cidx, credits = Q->credits; | ||
360 | |||
361 | while (credits--) { | ||
362 | struct freelQ_ce *ce = &Q->centries[cidx]; | ||
363 | |||
364 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), | ||
365 | pci_unmap_len(ce, dma_len), | ||
366 | PCI_DMA_FROMDEVICE); | ||
367 | dev_kfree_skb(ce->skb); | ||
368 | ce->skb = NULL; | ||
369 | if (++cidx == Q->entries_n) | ||
370 | cidx = 0; | ||
371 | } | ||
372 | } | ||
373 | |||
374 | /* | ||
375 | * Free RX free list and response queue resources. | ||
376 | */ | ||
377 | static void free_rx_resources(struct sge *sge) | ||
378 | { | ||
379 | struct pci_dev *pdev = sge->adapter->pdev; | ||
380 | unsigned int size, i; | ||
381 | |||
382 | if (sge->respQ.entries) { | ||
383 | size = sizeof(struct respQ_e) * sge->respQ.entries_n; | ||
384 | pci_free_consistent(pdev, size, sge->respQ.entries, | ||
385 | sge->respQ.dma_addr); | ||
386 | } | ||
387 | |||
388 | for (i = 0; i < SGE_FREELQ_N; i++) { | ||
389 | struct freelQ *Q = &sge->freelQ[i]; | ||
390 | |||
391 | if (Q->centries) { | ||
392 | free_freelQ_buffers(pdev, Q); | ||
393 | kfree(Q->centries); | ||
394 | } | ||
395 | if (Q->entries) { | ||
396 | size = sizeof(struct freelQ_e) * Q->entries_n; | ||
397 | pci_free_consistent(pdev, size, Q->entries, | ||
398 | Q->dma_addr); | ||
399 | } | ||
400 | } | ||
401 | } | ||
402 | |||
403 | /* | ||
404 | * Allocates basic RX resources, consisting of memory mapped freelist Qs and a | ||
405 | * response Q. | ||
406 | */ | ||
407 | static int alloc_rx_resources(struct sge *sge, struct sge_params *p) | ||
408 | { | ||
409 | struct pci_dev *pdev = sge->adapter->pdev; | ||
410 | unsigned int size, i; | ||
411 | |||
412 | for (i = 0; i < SGE_FREELQ_N; i++) { | ||
413 | struct freelQ *Q = &sge->freelQ[i]; | ||
414 | |||
415 | Q->genbit = 1; | ||
416 | Q->entries_n = p->freelQ_size[i]; | ||
417 | Q->dma_offset = SGE_RX_OFFSET - sge->rx_pkt_pad; | ||
418 | size = sizeof(struct freelQ_e) * Q->entries_n; | ||
419 | Q->entries = (struct freelQ_e *) | ||
420 | pci_alloc_consistent(pdev, size, &Q->dma_addr); | ||
421 | if (!Q->entries) | ||
422 | goto err_no_mem; | ||
423 | memset(Q->entries, 0, size); | ||
424 | Q->centries = kcalloc(Q->entries_n, sizeof(struct freelQ_ce), | ||
425 | GFP_KERNEL); | ||
426 | if (!Q->centries) | ||
427 | goto err_no_mem; | ||
428 | } | ||
429 | |||
430 | /* | ||
431 | * Calculate the buffer sizes for the two free lists. FL0 accommodates | ||
432 | * regular sized Ethernet frames, FL1 is sized not to exceed 16K, | ||
433 | * including all the sk_buff overhead. | ||
434 | * | ||
435 | * Note: For T2 FL0 and FL1 are reversed. | ||
436 | */ | ||
437 | sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + | ||
438 | sizeof(struct cpl_rx_data) + | ||
439 | sge->freelQ[!sge->jumbo_fl].dma_offset; | ||
440 | sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) - | ||
441 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
442 | |||
443 | sge->respQ.genbit = 1; | ||
444 | sge->respQ.entries_n = SGE_RESPQ_E_N; | ||
445 | sge->respQ.credits = SGE_RESPQ_E_N; | ||
446 | size = sizeof(struct respQ_e) * sge->respQ.entries_n; | ||
447 | sge->respQ.entries = (struct respQ_e *) | ||
448 | pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); | ||
449 | if (!sge->respQ.entries) | ||
450 | goto err_no_mem; | ||
451 | memset(sge->respQ.entries, 0, size); | ||
452 | return 0; | ||
453 | |||
454 | err_no_mem: | ||
455 | free_rx_resources(sge); | ||
456 | return -ENOMEM; | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * Frees 'credits_pend' TX buffers and returns the credits to Q->credits. | ||
461 | * | ||
462 | * The adaptive algorithm receives the total size of the buffers freed | ||
463 | * accumulated in @*totpayload. No initialization of this argument here. | ||
464 | * | ||
465 | */ | ||
466 | static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *Q, | ||
467 | unsigned int credits_pend, unsigned int *totpayload) | ||
468 | { | ||
469 | struct pci_dev *pdev = sge->adapter->pdev; | ||
470 | struct sk_buff *skb; | ||
471 | struct cmdQ_ce *ce, *cq = Q->centries; | ||
472 | unsigned int entries_n = Q->entries_n, cidx = Q->cidx, | ||
473 | i = credits_pend; | ||
474 | |||
475 | |||
476 | ce = &cq[cidx]; | ||
477 | while (i--) { | ||
478 | if (ce->single) | ||
479 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), | ||
480 | pci_unmap_len(ce, dma_len), | ||
481 | PCI_DMA_TODEVICE); | ||
482 | else | ||
483 | pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr), | ||
484 | pci_unmap_len(ce, dma_len), | ||
485 | PCI_DMA_TODEVICE); | ||
486 | if (totpayload) | ||
487 | *totpayload += pci_unmap_len(ce, dma_len); | ||
488 | |||
489 | skb = ce->skb; | ||
490 | if (skb) | ||
491 | dev_kfree_skb_irq(skb); | ||
492 | |||
493 | ce++; | ||
494 | if (++cidx == entries_n) { | ||
495 | cidx = 0; | ||
496 | ce = cq; | ||
497 | } | ||
498 | } | ||
499 | |||
500 | Q->cidx = cidx; | ||
501 | atomic_add(credits_pend, &Q->credits); | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * Free TX resources. | ||
506 | * | ||
507 | * Assumes that SGE is stopped and all interrupts are disabled. | ||
508 | */ | ||
509 | static void free_tx_resources(struct sge *sge) | ||
510 | { | ||
511 | struct pci_dev *pdev = sge->adapter->pdev; | ||
512 | unsigned int size, i; | ||
513 | |||
514 | for (i = 0; i < SGE_CMDQ_N; i++) { | ||
515 | struct cmdQ *Q = &sge->cmdQ[i]; | ||
516 | |||
517 | if (Q->centries) { | ||
518 | unsigned int pending = Q->entries_n - | ||
519 | atomic_read(&Q->credits); | ||
520 | |||
521 | if (pending) | ||
522 | free_cmdQ_buffers(sge, Q, pending, NULL); | ||
523 | kfree(Q->centries); | ||
524 | } | ||
525 | if (Q->entries) { | ||
526 | size = sizeof(struct cmdQ_e) * Q->entries_n; | ||
527 | pci_free_consistent(pdev, size, Q->entries, | ||
528 | Q->dma_addr); | ||
529 | } | ||
530 | } | ||
531 | } | ||
532 | |||
533 | /* | ||
534 | * Allocates basic TX resources, consisting of memory mapped command Qs. | ||
535 | */ | ||
536 | static int alloc_tx_resources(struct sge *sge, struct sge_params *p) | ||
537 | { | ||
538 | struct pci_dev *pdev = sge->adapter->pdev; | ||
539 | unsigned int size, i; | ||
540 | |||
541 | for (i = 0; i < SGE_CMDQ_N; i++) { | ||
542 | struct cmdQ *Q = &sge->cmdQ[i]; | ||
543 | |||
544 | Q->genbit = 1; | ||
545 | Q->entries_n = p->cmdQ_size[i]; | ||
546 | atomic_set(&Q->credits, Q->entries_n); | ||
547 | atomic_set(&Q->asleep, 1); | ||
548 | spin_lock_init(&Q->Qlock); | ||
549 | size = sizeof(struct cmdQ_e) * Q->entries_n; | ||
550 | Q->entries = (struct cmdQ_e *) | ||
551 | pci_alloc_consistent(pdev, size, &Q->dma_addr); | ||
552 | if (!Q->entries) | ||
553 | goto err_no_mem; | ||
554 | memset(Q->entries, 0, size); | ||
555 | Q->centries = kcalloc(Q->entries_n, sizeof(struct cmdQ_ce), | ||
556 | GFP_KERNEL); | ||
557 | if (!Q->centries) | ||
558 | goto err_no_mem; | ||
559 | } | ||
560 | |||
561 | return 0; | ||
562 | |||
563 | err_no_mem: | ||
564 | free_tx_resources(sge); | ||
565 | return -ENOMEM; | ||
566 | } | ||
567 | |||
568 | static inline void setup_ring_params(struct adapter *adapter, u64 addr, | ||
569 | u32 size, int base_reg_lo, | ||
570 | int base_reg_hi, int size_reg) | ||
571 | { | ||
572 | t1_write_reg_4(adapter, base_reg_lo, (u32)addr); | ||
573 | t1_write_reg_4(adapter, base_reg_hi, addr >> 32); | ||
574 | t1_write_reg_4(adapter, size_reg, size); | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * Enable/disable VLAN acceleration. | ||
579 | */ | ||
580 | void t1_set_vlan_accel(struct adapter *adapter, int on_off) | ||
581 | { | ||
582 | struct sge *sge = adapter->sge; | ||
583 | |||
584 | sge->sge_control &= ~F_VLAN_XTRACT; | ||
585 | if (on_off) | ||
586 | sge->sge_control |= F_VLAN_XTRACT; | ||
587 | if (adapter->open_device_map) { | ||
588 | t1_write_reg_4(adapter, A_SG_CONTROL, sge->sge_control); | ||
589 | t1_read_reg_4(adapter, A_SG_CONTROL); /* flush */ | ||
590 | } | ||
591 | } | ||
592 | |||
593 | /* | ||
594 | * Sets the interrupt latency timer when the adaptive Rx coalescing | ||
595 | * is turned off. Do nothing when it is turned on again. | ||
596 | * | ||
597 | * This routine relies on the fact that the caller has already set | ||
598 | * the adaptive policy in adapter->sge_params before calling it. | ||
599 | */ | ||
600 | int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) | ||
601 | { | ||
602 | if (!p->coalesce_enable) { | ||
603 | u32 newTimer = p->rx_coalesce_usecs * | ||
604 | (board_info(sge->adapter)->clock_core / 1000000); | ||
605 | |||
606 | t1_write_reg_4(sge->adapter, A_SG_INTRTIMER, newTimer); | ||
607 | } | ||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * Programs the various SGE registers. However, the engine is not yet enabled, | ||
613 | * but sge->sge_control is setup and ready to go. | ||
614 | */ | ||
615 | static void configure_sge(struct sge *sge, struct sge_params *p) | ||
616 | { | ||
617 | struct adapter *ap = sge->adapter; | ||
618 | int i; | ||
619 | |||
620 | t1_write_reg_4(ap, A_SG_CONTROL, 0); | ||
621 | setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].entries_n, | ||
622 | A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); | ||
623 | setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].entries_n, | ||
624 | A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); | ||
625 | setup_ring_params(ap, sge->freelQ[0].dma_addr, | ||
626 | sge->freelQ[0].entries_n, A_SG_FL0BASELWR, | ||
627 | A_SG_FL0BASEUPR, A_SG_FL0SIZE); | ||
628 | setup_ring_params(ap, sge->freelQ[1].dma_addr, | ||
629 | sge->freelQ[1].entries_n, A_SG_FL1BASELWR, | ||
630 | A_SG_FL1BASEUPR, A_SG_FL1SIZE); | ||
631 | |||
632 | /* The threshold comparison uses <. */ | ||
633 | t1_write_reg_4(ap, A_SG_FLTHRESHOLD, SGE_RX_SM_BUF_SIZE + 1); | ||
634 | |||
635 | setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.entries_n, | ||
636 | A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); | ||
637 | t1_write_reg_4(ap, A_SG_RSPQUEUECREDIT, (u32)sge->respQ.entries_n); | ||
638 | |||
639 | sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | | ||
640 | F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | | ||
641 | V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | | ||
642 | V_RX_PKT_OFFSET(sge->rx_pkt_pad); | ||
643 | |||
644 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
645 | sge->sge_control |= F_ENABLE_BIG_ENDIAN; | ||
646 | #endif | ||
647 | |||
648 | /* | ||
649 | * Initialize the SGE Interrupt Timer arrray: | ||
650 | * intrtimer[0] = (SGE_INTRTIMER0) usec | ||
651 | * intrtimer[0<i<5] = (SGE_INTRTIMER0 + i*2) usec | ||
652 | * intrtimer[4<i<10] = ((i - 3) * 6) usec | ||
653 | * intrtimer[10] = (SGE_INTRTIMER1) usec | ||
654 | * | ||
655 | */ | ||
656 | sge->intrtimer[0] = board_info(sge->adapter)->clock_core / 1000000; | ||
657 | for (i = 1; i < SGE_INTR_LATBUCKETS; ++i) { | ||
658 | sge->intrtimer[i] = SGE_INTRTIMER0 + (2 * i); | ||
659 | sge->intrtimer[i] *= sge->intrtimer[0]; | ||
660 | } | ||
661 | for (i = SGE_INTR_LATBUCKETS; i < SGE_INTR_MAXBUCKETS - 1; ++i) { | ||
662 | sge->intrtimer[i] = (i - 3) * 6; | ||
663 | sge->intrtimer[i] *= sge->intrtimer[0]; | ||
664 | } | ||
665 | sge->intrtimer[SGE_INTR_MAXBUCKETS - 1] = | ||
666 | sge->intrtimer[0] * SGE_INTRTIMER1; | ||
667 | /* Initialize resource timer */ | ||
668 | sge->intrtimer_nres = sge->intrtimer[0] * SGE_INTRTIMER_NRES; | ||
669 | /* Finally finish initialization of intrtimer[0] */ | ||
670 | sge->intrtimer[0] *= SGE_INTRTIMER0; | ||
671 | /* Initialize for a throughput oriented workload */ | ||
672 | sge->currIndex = SGE_INTR_MAXBUCKETS - 1; | ||
673 | |||
674 | if (p->coalesce_enable) | ||
675 | t1_write_reg_4(ap, A_SG_INTRTIMER, | ||
676 | sge->intrtimer[sge->currIndex]); | ||
677 | else | ||
678 | t1_sge_set_coalesce_params(sge, p); | ||
679 | } | ||
680 | |||
681 | /* | ||
682 | * Return the payload capacity of the jumbo free-list buffers. | ||
683 | */ | ||
684 | static inline unsigned int jumbo_payload_capacity(const struct sge *sge) | ||
685 | { | ||
686 | return sge->freelQ[sge->jumbo_fl].rx_buffer_size - | ||
687 | sizeof(struct cpl_rx_data) - SGE_RX_OFFSET + sge->rx_pkt_pad; | ||
688 | } | ||
689 | |||
690 | /* | ||
691 | * Allocates both RX and TX resources and configures the SGE. However, | ||
692 | * the hardware is not enabled yet. | ||
693 | */ | ||
694 | int t1_sge_configure(struct sge *sge, struct sge_params *p) | ||
695 | { | ||
696 | if (alloc_rx_resources(sge, p)) | ||
697 | return -ENOMEM; | ||
698 | if (alloc_tx_resources(sge, p)) { | ||
699 | free_rx_resources(sge); | ||
700 | return -ENOMEM; | ||
701 | } | ||
702 | configure_sge(sge, p); | ||
703 | |||
704 | /* | ||
705 | * Now that we have sized the free lists calculate the payload | ||
706 | * capacity of the large buffers. Other parts of the driver use | ||
707 | * this to set the max offload coalescing size so that RX packets | ||
708 | * do not overflow our large buffers. | ||
709 | */ | ||
710 | p->large_buf_capacity = jumbo_payload_capacity(sge); | ||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | /* | ||
715 | * Frees all SGE related resources and the sge structure itself | ||
716 | */ | ||
717 | void t1_sge_destroy(struct sge *sge) | ||
718 | { | ||
719 | if (sge->pskb) | ||
720 | dev_kfree_skb(sge->pskb); | ||
721 | free_tx_resources(sge); | ||
722 | free_rx_resources(sge); | ||
723 | kfree(sge); | ||
724 | } | ||
725 | |||
726 | /* | ||
727 | * Allocates new RX buffers on the freelist Q (and tracks them on the freelist | ||
728 | * context Q) until the Q is full or alloc_skb fails. | ||
729 | * | ||
730 | * It is possible that the generation bits already match, indicating that the | ||
731 | * buffer is already valid and nothing needs to be done. This happens when we | ||
732 | * copied a received buffer into a new sk_buff during the interrupt processing. | ||
733 | * | ||
734 | * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), | ||
735 | * we specify a RX_OFFSET in order to make sure that the IP header is 4B | ||
736 | * aligned. | ||
737 | */ | ||
738 | static void refill_free_list(struct sge *sge, struct freelQ *Q) | ||
739 | { | ||
740 | struct pci_dev *pdev = sge->adapter->pdev; | ||
741 | struct freelQ_ce *ce = &Q->centries[Q->pidx]; | ||
742 | struct freelQ_e *e = &Q->entries[Q->pidx]; | ||
743 | unsigned int dma_len = Q->rx_buffer_size - Q->dma_offset; | ||
744 | |||
745 | |||
746 | while (Q->credits < Q->entries_n) { | ||
747 | if (e->GenerationBit != Q->genbit) { | ||
748 | struct sk_buff *skb; | ||
749 | dma_addr_t mapping; | ||
750 | |||
751 | skb = alloc_skb(Q->rx_buffer_size, GFP_ATOMIC); | ||
752 | if (!skb) | ||
753 | break; | ||
754 | if (Q->dma_offset) | ||
755 | skb_reserve(skb, Q->dma_offset); | ||
756 | mapping = pci_map_single(pdev, skb->data, dma_len, | ||
757 | PCI_DMA_FROMDEVICE); | ||
758 | ce->skb = skb; | ||
759 | pci_unmap_addr_set(ce, dma_addr, mapping); | ||
760 | pci_unmap_len_set(ce, dma_len, dma_len); | ||
761 | e->AddrLow = (u32)mapping; | ||
762 | e->AddrHigh = (u64)mapping >> 32; | ||
763 | e->BufferLength = dma_len; | ||
764 | e->GenerationBit = e->GenerationBit2 = Q->genbit; | ||
765 | } | ||
766 | |||
767 | e++; | ||
768 | ce++; | ||
769 | if (++Q->pidx == Q->entries_n) { | ||
770 | Q->pidx = 0; | ||
771 | Q->genbit ^= 1; | ||
772 | ce = Q->centries; | ||
773 | e = Q->entries; | ||
774 | } | ||
775 | Q->credits++; | ||
776 | } | ||
777 | |||
778 | } | ||
779 | |||
780 | /* | ||
781 | * Calls refill_free_list for both freelist Qs. If we cannot | ||
782 | * fill at least 1/4 of both Qs, we go into 'few interrupt mode' in order | ||
783 | * to give the system time to free up resources. | ||
784 | */ | ||
785 | static void freelQs_empty(struct sge *sge) | ||
786 | { | ||
787 | u32 irq_reg = t1_read_reg_4(sge->adapter, A_SG_INT_ENABLE); | ||
788 | u32 irqholdoff_reg; | ||
789 | |||
790 | refill_free_list(sge, &sge->freelQ[0]); | ||
791 | refill_free_list(sge, &sge->freelQ[1]); | ||
792 | |||
793 | if (sge->freelQ[0].credits > (sge->freelQ[0].entries_n >> 2) && | ||
794 | sge->freelQ[1].credits > (sge->freelQ[1].entries_n >> 2)) { | ||
795 | irq_reg |= F_FL_EXHAUSTED; | ||
796 | irqholdoff_reg = sge->intrtimer[sge->currIndex]; | ||
797 | } else { | ||
798 | /* Clear the F_FL_EXHAUSTED interrupts for now */ | ||
799 | irq_reg &= ~F_FL_EXHAUSTED; | ||
800 | irqholdoff_reg = sge->intrtimer_nres; | ||
801 | } | ||
802 | t1_write_reg_4(sge->adapter, A_SG_INTRTIMER, irqholdoff_reg); | ||
803 | t1_write_reg_4(sge->adapter, A_SG_INT_ENABLE, irq_reg); | ||
804 | |||
805 | /* We reenable the Qs to force a freelist GTS interrupt later */ | ||
806 | doorbell_pio(sge, F_FL0_ENABLE | F_FL1_ENABLE); | ||
807 | } | ||
808 | |||
809 | #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) | ||
810 | #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) | ||
811 | #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ | ||
812 | F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) | ||
813 | |||
814 | /* | ||
815 | * Disable SGE Interrupts | ||
816 | */ | ||
817 | void t1_sge_intr_disable(struct sge *sge) | ||
818 | { | ||
819 | u32 val = t1_read_reg_4(sge->adapter, A_PL_ENABLE); | ||
820 | |||
821 | t1_write_reg_4(sge->adapter, A_PL_ENABLE, val & ~SGE_PL_INTR_MASK); | ||
822 | t1_write_reg_4(sge->adapter, A_SG_INT_ENABLE, 0); | ||
823 | } | ||
824 | |||
825 | /* | ||
826 | * Enable SGE interrupts. | ||
827 | */ | ||
828 | void t1_sge_intr_enable(struct sge *sge) | ||
829 | { | ||
830 | u32 en = SGE_INT_ENABLE; | ||
831 | u32 val = t1_read_reg_4(sge->adapter, A_PL_ENABLE); | ||
832 | |||
833 | if (sge->adapter->flags & TSO_CAPABLE) | ||
834 | en &= ~F_PACKET_TOO_BIG; | ||
835 | t1_write_reg_4(sge->adapter, A_SG_INT_ENABLE, en); | ||
836 | t1_write_reg_4(sge->adapter, A_PL_ENABLE, val | SGE_PL_INTR_MASK); | ||
837 | } | ||
838 | |||
839 | /* | ||
840 | * Clear SGE interrupts. | ||
841 | */ | ||
842 | void t1_sge_intr_clear(struct sge *sge) | ||
843 | { | ||
844 | t1_write_reg_4(sge->adapter, A_PL_CAUSE, SGE_PL_INTR_MASK); | ||
845 | t1_write_reg_4(sge->adapter, A_SG_INT_CAUSE, 0xffffffff); | ||
846 | } | ||
847 | |||
848 | /* | ||
849 | * SGE 'Error' interrupt handler | ||
850 | */ | ||
851 | int t1_sge_intr_error_handler(struct sge *sge) | ||
852 | { | ||
853 | struct adapter *adapter = sge->adapter; | ||
854 | u32 cause = t1_read_reg_4(adapter, A_SG_INT_CAUSE); | ||
855 | |||
856 | if (adapter->flags & TSO_CAPABLE) | ||
857 | cause &= ~F_PACKET_TOO_BIG; | ||
858 | if (cause & F_RESPQ_EXHAUSTED) | ||
859 | sge->intr_cnt.respQ_empty++; | ||
860 | if (cause & F_RESPQ_OVERFLOW) { | ||
861 | sge->intr_cnt.respQ_overflow++; | ||
862 | CH_ALERT("%s: SGE response queue overflow\n", | ||
863 | adapter->name); | ||
864 | } | ||
865 | if (cause & F_FL_EXHAUSTED) { | ||
866 | sge->intr_cnt.freelistQ_empty++; | ||
867 | freelQs_empty(sge); | ||
868 | } | ||
869 | if (cause & F_PACKET_TOO_BIG) { | ||
870 | sge->intr_cnt.pkt_too_big++; | ||
871 | CH_ALERT("%s: SGE max packet size exceeded\n", | ||
872 | adapter->name); | ||
873 | } | ||
874 | if (cause & F_PACKET_MISMATCH) { | ||
875 | sge->intr_cnt.pkt_mismatch++; | ||
876 | CH_ALERT("%s: SGE packet mismatch\n", adapter->name); | ||
877 | } | ||
878 | if (cause & SGE_INT_FATAL) | ||
879 | t1_fatal_err(adapter); | ||
880 | |||
881 | t1_write_reg_4(adapter, A_SG_INT_CAUSE, cause); | ||
882 | return 0; | ||
883 | } | ||
884 | |||
885 | /* | ||
886 | * The following code is copied from 2.6, where the skb_pull is doing the | ||
887 | * right thing and only pulls ETH_HLEN. | ||
888 | * | ||
889 | * Determine the packet's protocol ID. The rule here is that we | ||
890 | * assume 802.3 if the type field is short enough to be a length. | ||
891 | * This is normal practice and works for any 'now in use' protocol. | ||
892 | */ | ||
893 | static unsigned short sge_eth_type_trans(struct sk_buff *skb, | ||
894 | struct net_device *dev) | ||
895 | { | ||
896 | struct ethhdr *eth; | ||
897 | unsigned char *rawp; | ||
898 | |||
899 | skb->mac.raw = skb->data; | ||
900 | skb_pull(skb, ETH_HLEN); | ||
901 | eth = (struct ethhdr *)skb->mac.raw; | ||
902 | |||
903 | if (*eth->h_dest&1) { | ||
904 | if(memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0) | ||
905 | skb->pkt_type = PACKET_BROADCAST; | ||
906 | else | ||
907 | skb->pkt_type = PACKET_MULTICAST; | ||
908 | } | ||
909 | |||
910 | /* | ||
911 | * This ALLMULTI check should be redundant by 1.4 | ||
912 | * so don't forget to remove it. | ||
913 | * | ||
914 | * Seems, you forgot to remove it. All silly devices | ||
915 | * seems to set IFF_PROMISC. | ||
916 | */ | ||
917 | |||
918 | else if (1 /*dev->flags&IFF_PROMISC*/) | ||
919 | { | ||
920 | if(memcmp(eth->h_dest,dev->dev_addr, ETH_ALEN)) | ||
921 | skb->pkt_type=PACKET_OTHERHOST; | ||
922 | } | ||
923 | |||
924 | if (ntohs(eth->h_proto) >= 1536) | ||
925 | return eth->h_proto; | ||
926 | |||
927 | rawp = skb->data; | ||
928 | |||
929 | /* | ||
930 | * This is a magic hack to spot IPX packets. Older Novell breaks | ||
931 | * the protocol design and runs IPX over 802.3 without an 802.2 LLC | ||
932 | * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This | ||
933 | * won't work for fault tolerant netware but does for the rest. | ||
934 | */ | ||
935 | if (*(unsigned short *)rawp == 0xFFFF) | ||
936 | return htons(ETH_P_802_3); | ||
937 | |||
938 | /* | ||
939 | * Real 802.2 LLC | ||
940 | */ | ||
941 | return htons(ETH_P_802_2); | ||
942 | } | ||
943 | |||
944 | /* | ||
945 | * Prepare the received buffer and pass it up the stack. If it is small enough | ||
946 | * and allocation doesn't fail, we use a new sk_buff and copy the content. | ||
947 | */ | ||
948 | static unsigned int t1_sge_rx(struct sge *sge, struct freelQ *Q, | ||
949 | unsigned int len, unsigned int offload) | ||
950 | { | ||
951 | struct sk_buff *skb; | ||
952 | struct adapter *adapter = sge->adapter; | ||
953 | struct freelQ_ce *ce = &Q->centries[Q->cidx]; | ||
954 | |||
955 | if (len <= SGE_RX_COPY_THRESHOLD && | ||
956 | (skb = alloc_skb(len + NET_IP_ALIGN, GFP_ATOMIC))) { | ||
957 | struct freelQ_e *e; | ||
958 | char *src = ce->skb->data; | ||
959 | |||
960 | pci_dma_sync_single_for_cpu(adapter->pdev, | ||
961 | pci_unmap_addr(ce, dma_addr), | ||
962 | pci_unmap_len(ce, dma_len), | ||
963 | PCI_DMA_FROMDEVICE); | ||
964 | if (!offload) { | ||
965 | skb_reserve(skb, NET_IP_ALIGN); | ||
966 | src += sge->rx_pkt_pad; | ||
967 | } | ||
968 | memcpy(skb->data, src, len); | ||
969 | |||
970 | /* Reuse the entry. */ | ||
971 | e = &Q->entries[Q->cidx]; | ||
972 | e->GenerationBit ^= 1; | ||
973 | e->GenerationBit2 ^= 1; | ||
974 | } else { | ||
975 | pci_unmap_single(adapter->pdev, pci_unmap_addr(ce, dma_addr), | ||
976 | pci_unmap_len(ce, dma_len), | ||
977 | PCI_DMA_FROMDEVICE); | ||
978 | skb = ce->skb; | ||
979 | if (!offload && sge->rx_pkt_pad) | ||
980 | __skb_pull(skb, sge->rx_pkt_pad); | ||
981 | } | ||
982 | |||
983 | skb_put(skb, len); | ||
984 | |||
985 | |||
986 | if (unlikely(offload)) { | ||
987 | { | ||
988 | printk(KERN_ERR | ||
989 | "%s: unexpected offloaded packet, cmd %u\n", | ||
990 | adapter->name, *skb->data); | ||
991 | dev_kfree_skb_any(skb); | ||
992 | } | ||
993 | } else { | ||
994 | struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)skb->data; | ||
995 | |||
996 | skb_pull(skb, sizeof(*p)); | ||
997 | skb->dev = adapter->port[p->iff].dev; | ||
998 | skb->dev->last_rx = jiffies; | ||
999 | skb->protocol = sge_eth_type_trans(skb, skb->dev); | ||
1000 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && | ||
1001 | skb->protocol == htons(ETH_P_IP) && | ||
1002 | (skb->data[9] == IPPROTO_TCP || | ||
1003 | skb->data[9] == IPPROTO_UDP)) | ||
1004 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1005 | else | ||
1006 | skb->ip_summed = CHECKSUM_NONE; | ||
1007 | if (adapter->vlan_grp && p->vlan_valid) | ||
1008 | vlan_hwaccel_rx(skb, adapter->vlan_grp, | ||
1009 | ntohs(p->vlan)); | ||
1010 | else | ||
1011 | netif_rx(skb); | ||
1012 | } | ||
1013 | |||
1014 | if (++Q->cidx == Q->entries_n) | ||
1015 | Q->cidx = 0; | ||
1016 | |||
1017 | if (unlikely(--Q->credits < Q->entries_n - SGE_FREEL_REFILL_THRESH)) | ||
1018 | refill_free_list(sge, Q); | ||
1019 | return 1; | ||
1020 | } | ||
1021 | |||
1022 | |||
1023 | /* | ||
1024 | * Adaptive interrupt timer logic to keep the CPU utilization to | ||
1025 | * manageable levels. Basically, as the Average Packet Size (APS) | ||
1026 | * gets higher, the interrupt latency setting gets longer. Every | ||
1027 | * SGE_INTR_BUCKETSIZE (of 100B) causes a bump of 2usec to the | ||
1028 | * base value of SGE_INTRTIMER0. At large values of payload the | ||
1029 | * latency hits the ceiling value of SGE_INTRTIMER1 stored at | ||
1030 | * index SGE_INTR_MAXBUCKETS-1 in sge->intrtimer[]. | ||
1031 | * | ||
1032 | * sge->currIndex caches the last index to save unneeded PIOs. | ||
1033 | */ | ||
1034 | static inline void update_intr_timer(struct sge *sge, unsigned int avg_payload) | ||
1035 | { | ||
1036 | unsigned int newIndex; | ||
1037 | |||
1038 | newIndex = avg_payload / SGE_INTR_BUCKETSIZE; | ||
1039 | if (newIndex > SGE_INTR_MAXBUCKETS - 1) { | ||
1040 | newIndex = SGE_INTR_MAXBUCKETS - 1; | ||
1041 | } | ||
1042 | /* Save a PIO with this check....maybe */ | ||
1043 | if (newIndex != sge->currIndex) { | ||
1044 | t1_write_reg_4(sge->adapter, A_SG_INTRTIMER, | ||
1045 | sge->intrtimer[newIndex]); | ||
1046 | sge->currIndex = newIndex; | ||
1047 | sge->adapter->params.sge.last_rx_coalesce_raw = | ||
1048 | sge->intrtimer[newIndex]; | ||
1049 | } | ||
1050 | } | ||
1051 | |||
1052 | /* | ||
1053 | * Returns true if command queue q_num has enough available descriptors that | ||
1054 | * we can resume Tx operation after temporarily disabling its packet queue. | ||
1055 | */ | ||
1056 | static inline int enough_free_Tx_descs(struct sge *sge, int q_num) | ||
1057 | { | ||
1058 | return atomic_read(&sge->cmdQ[q_num].credits) > | ||
1059 | (sge->cmdQ[q_num].entries_n >> 2); | ||
1060 | } | ||
1061 | |||
1062 | /* | ||
1063 | * Main interrupt handler, optimized assuming that we took a 'DATA' | ||
1064 | * interrupt. | ||
1065 | * | ||
1066 | * 1. Clear the interrupt | ||
1067 | * 2. Loop while we find valid descriptors and process them; accumulate | ||
1068 | * information that can be processed after the loop | ||
1069 | * 3. Tell the SGE at which index we stopped processing descriptors | ||
1070 | * 4. Bookkeeping; free TX buffers, ring doorbell if there are any | ||
1071 | * outstanding TX buffers waiting, replenish RX buffers, potentially | ||
1072 | * reenable upper layers if they were turned off due to lack of TX | ||
1073 | * resources which are available again. | ||
1074 | * 5. If we took an interrupt, but no valid respQ descriptors was found we | ||
1075 | * let the slow_intr_handler run and do error handling. | ||
1076 | */ | ||
1077 | irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs) | ||
1078 | { | ||
1079 | struct net_device *netdev; | ||
1080 | struct adapter *adapter = cookie; | ||
1081 | struct sge *sge = adapter->sge; | ||
1082 | struct respQ *Q = &sge->respQ; | ||
1083 | unsigned int credits = Q->credits, flags = 0, ret = 0; | ||
1084 | unsigned int tot_rxpayload = 0, tot_txpayload = 0, n_rx = 0, n_tx = 0; | ||
1085 | unsigned int credits_pend[SGE_CMDQ_N] = { 0, 0 }; | ||
1086 | |||
1087 | struct respQ_e *e = &Q->entries[Q->cidx]; | ||
1088 | prefetch(e); | ||
1089 | |||
1090 | t1_write_reg_4(adapter, A_PL_CAUSE, F_PL_INTR_SGE_DATA); | ||
1091 | |||
1092 | |||
1093 | while (e->GenerationBit == Q->genbit) { | ||
1094 | if (--credits < SGE_RESPQ_REPLENISH_THRES) { | ||
1095 | u32 n = Q->entries_n - credits - 1; | ||
1096 | |||
1097 | t1_write_reg_4(adapter, A_SG_RSPQUEUECREDIT, n); | ||
1098 | credits += n; | ||
1099 | } | ||
1100 | if (likely(e->DataValid)) { | ||
1101 | if (!e->Sop || !e->Eop) | ||
1102 | BUG(); | ||
1103 | t1_sge_rx(sge, &sge->freelQ[e->FreelistQid], | ||
1104 | e->BufferLength, e->Offload); | ||
1105 | tot_rxpayload += e->BufferLength; | ||
1106 | ++n_rx; | ||
1107 | } | ||
1108 | flags |= e->Qsleeping; | ||
1109 | credits_pend[0] += e->Cmdq0CreditReturn; | ||
1110 | credits_pend[1] += e->Cmdq1CreditReturn; | ||
1111 | |||
1112 | #ifdef CONFIG_SMP | ||
1113 | /* | ||
1114 | * If enough cmdQ0 buffers have finished DMAing free them so | ||
1115 | * anyone that may be waiting for their release can continue. | ||
1116 | * We do this only on MP systems to allow other CPUs to proceed | ||
1117 | * promptly. UP systems can wait for the free_cmdQ_buffers() | ||
1118 | * calls after this loop as the sole CPU is currently busy in | ||
1119 | * this loop. | ||
1120 | */ | ||
1121 | if (unlikely(credits_pend[0] > SGE_FREEL_REFILL_THRESH)) { | ||
1122 | free_cmdQ_buffers(sge, &sge->cmdQ[0], credits_pend[0], | ||
1123 | &tot_txpayload); | ||
1124 | n_tx += credits_pend[0]; | ||
1125 | credits_pend[0] = 0; | ||
1126 | } | ||
1127 | #endif | ||
1128 | ret++; | ||
1129 | e++; | ||
1130 | if (unlikely(++Q->cidx == Q->entries_n)) { | ||
1131 | Q->cidx = 0; | ||
1132 | Q->genbit ^= 1; | ||
1133 | e = Q->entries; | ||
1134 | } | ||
1135 | } | ||
1136 | |||
1137 | Q->credits = credits; | ||
1138 | t1_write_reg_4(adapter, A_SG_SLEEPING, Q->cidx); | ||
1139 | |||
1140 | if (credits_pend[0]) | ||
1141 | free_cmdQ_buffers(sge, &sge->cmdQ[0], credits_pend[0], &tot_txpayload); | ||
1142 | if (credits_pend[1]) | ||
1143 | free_cmdQ_buffers(sge, &sge->cmdQ[1], credits_pend[1], &tot_txpayload); | ||
1144 | |||
1145 | /* Do any coalescing and interrupt latency timer adjustments */ | ||
1146 | if (adapter->params.sge.coalesce_enable) { | ||
1147 | unsigned int avg_txpayload = 0, avg_rxpayload = 0; | ||
1148 | |||
1149 | n_tx += credits_pend[0] + credits_pend[1]; | ||
1150 | |||
1151 | /* | ||
1152 | * Choose larger avg. payload size to increase | ||
1153 | * throughput and reduce [CPU util., intr/s.] | ||
1154 | * | ||
1155 | * Throughput behavior favored in mixed-mode. | ||
1156 | */ | ||
1157 | if (n_tx) | ||
1158 | avg_txpayload = tot_txpayload/n_tx; | ||
1159 | if (n_rx) | ||
1160 | avg_rxpayload = tot_rxpayload/n_rx; | ||
1161 | |||
1162 | if (n_tx && avg_txpayload > avg_rxpayload){ | ||
1163 | update_intr_timer(sge, avg_txpayload); | ||
1164 | } else if (n_rx) { | ||
1165 | update_intr_timer(sge, avg_rxpayload); | ||
1166 | } | ||
1167 | } | ||
1168 | |||
1169 | if (flags & F_CMDQ0_ENABLE) { | ||
1170 | struct cmdQ *cmdQ = &sge->cmdQ[0]; | ||
1171 | |||
1172 | atomic_set(&cmdQ->asleep, 1); | ||
1173 | if (atomic_read(&cmdQ->pio_pidx) != cmdQ->pidx) { | ||
1174 | doorbell_pio(sge, F_CMDQ0_ENABLE); | ||
1175 | atomic_set(&cmdQ->pio_pidx, cmdQ->pidx); | ||
1176 | } | ||
1177 | } | ||
1178 | if (unlikely(flags & (F_FL0_ENABLE | F_FL1_ENABLE))) | ||
1179 | freelQs_empty(sge); | ||
1180 | |||
1181 | netdev = adapter->port[0].dev; | ||
1182 | if (unlikely(netif_queue_stopped(netdev) && netif_carrier_ok(netdev) && | ||
1183 | enough_free_Tx_descs(sge, 0) && | ||
1184 | enough_free_Tx_descs(sge, 1))) { | ||
1185 | netif_wake_queue(netdev); | ||
1186 | } | ||
1187 | if (unlikely(!ret)) | ||
1188 | ret = t1_slow_intr_handler(adapter); | ||
1189 | |||
1190 | return IRQ_RETVAL(ret != 0); | ||
1191 | } | ||
1192 | |||
1193 | /* | ||
1194 | * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. | ||
1195 | * | ||
1196 | * The code figures out how many entries the sk_buff will require in the | ||
1197 | * cmdQ and updates the cmdQ data structure with the state once the enqueue | ||
1198 | * has complete. Then, it doesn't access the global structure anymore, but | ||
1199 | * uses the corresponding fields on the stack. In conjuction with a spinlock | ||
1200 | * around that code, we can make the function reentrant without holding the | ||
1201 | * lock when we actually enqueue (which might be expensive, especially on | ||
1202 | * architectures with IO MMUs). | ||
1203 | */ | ||
1204 | static unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, | ||
1205 | unsigned int qid) | ||
1206 | { | ||
1207 | struct sge *sge = adapter->sge; | ||
1208 | struct cmdQ *Q = &sge->cmdQ[qid]; | ||
1209 | struct cmdQ_e *e; | ||
1210 | struct cmdQ_ce *ce; | ||
1211 | dma_addr_t mapping; | ||
1212 | unsigned int credits, pidx, genbit; | ||
1213 | |||
1214 | unsigned int count = 1 + skb_shinfo(skb)->nr_frags; | ||
1215 | |||
1216 | /* | ||
1217 | * Coming from the timer | ||
1218 | */ | ||
1219 | if ((skb == sge->pskb)) { | ||
1220 | /* | ||
1221 | * Quit if any cmdQ activities | ||
1222 | */ | ||
1223 | if (!spin_trylock(&Q->Qlock)) | ||
1224 | return 0; | ||
1225 | if (atomic_read(&Q->credits) != Q->entries_n) { | ||
1226 | spin_unlock(&Q->Qlock); | ||
1227 | return 0; | ||
1228 | } | ||
1229 | } | ||
1230 | else | ||
1231 | spin_lock(&Q->Qlock); | ||
1232 | |||
1233 | genbit = Q->genbit; | ||
1234 | pidx = Q->pidx; | ||
1235 | credits = atomic_read(&Q->credits); | ||
1236 | |||
1237 | credits -= count; | ||
1238 | atomic_sub(count, &Q->credits); | ||
1239 | Q->pidx += count; | ||
1240 | if (Q->pidx >= Q->entries_n) { | ||
1241 | Q->pidx -= Q->entries_n; | ||
1242 | Q->genbit ^= 1; | ||
1243 | } | ||
1244 | |||
1245 | if (unlikely(credits < (MAX_SKB_FRAGS + 1))) { | ||
1246 | sge->intr_cnt.cmdQ_full[qid]++; | ||
1247 | netif_stop_queue(adapter->port[0].dev); | ||
1248 | } | ||
1249 | spin_unlock(&Q->Qlock); | ||
1250 | |||
1251 | mapping = pci_map_single(adapter->pdev, skb->data, | ||
1252 | skb->len - skb->data_len, PCI_DMA_TODEVICE); | ||
1253 | ce = &Q->centries[pidx]; | ||
1254 | ce->skb = NULL; | ||
1255 | pci_unmap_addr_set(ce, dma_addr, mapping); | ||
1256 | pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len); | ||
1257 | ce->single = 1; | ||
1258 | |||
1259 | e = &Q->entries[pidx]; | ||
1260 | e->Sop = 1; | ||
1261 | e->DataValid = 1; | ||
1262 | e->BufferLength = skb->len - skb->data_len; | ||
1263 | e->AddrHigh = (u64)mapping >> 32; | ||
1264 | e->AddrLow = (u32)mapping; | ||
1265 | |||
1266 | if (--count > 0) { | ||
1267 | unsigned int i; | ||
1268 | |||
1269 | e->Eop = 0; | ||
1270 | wmb(); | ||
1271 | e->GenerationBit = e->GenerationBit2 = genbit; | ||
1272 | |||
1273 | for (i = 0; i < count; i++) { | ||
1274 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
1275 | |||
1276 | ce++; e++; | ||
1277 | if (++pidx == Q->entries_n) { | ||
1278 | pidx = 0; | ||
1279 | genbit ^= 1; | ||
1280 | ce = Q->centries; | ||
1281 | e = Q->entries; | ||
1282 | } | ||
1283 | |||
1284 | mapping = pci_map_page(adapter->pdev, frag->page, | ||
1285 | frag->page_offset, | ||
1286 | frag->size, | ||
1287 | PCI_DMA_TODEVICE); | ||
1288 | ce->skb = NULL; | ||
1289 | pci_unmap_addr_set(ce, dma_addr, mapping); | ||
1290 | pci_unmap_len_set(ce, dma_len, frag->size); | ||
1291 | ce->single = 0; | ||
1292 | |||
1293 | e->Sop = 0; | ||
1294 | e->DataValid = 1; | ||
1295 | e->BufferLength = frag->size; | ||
1296 | e->AddrHigh = (u64)mapping >> 32; | ||
1297 | e->AddrLow = (u32)mapping; | ||
1298 | |||
1299 | if (i < count - 1) { | ||
1300 | e->Eop = 0; | ||
1301 | wmb(); | ||
1302 | e->GenerationBit = e->GenerationBit2 = genbit; | ||
1303 | } | ||
1304 | } | ||
1305 | } | ||
1306 | |||
1307 | if (skb != sge->pskb) | ||
1308 | ce->skb = skb; | ||
1309 | e->Eop = 1; | ||
1310 | wmb(); | ||
1311 | e->GenerationBit = e->GenerationBit2 = genbit; | ||
1312 | |||
1313 | /* | ||
1314 | * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring | ||
1315 | * the doorbell if the Q is asleep. There is a natural race, where | ||
1316 | * the hardware is going to sleep just after we checked, however, | ||
1317 | * then the interrupt handler will detect the outstanding TX packet | ||
1318 | * and ring the doorbell for us. | ||
1319 | */ | ||
1320 | if (qid) { | ||
1321 | doorbell_pio(sge, F_CMDQ1_ENABLE); | ||
1322 | } else if (atomic_read(&Q->asleep)) { | ||
1323 | atomic_set(&Q->asleep, 0); | ||
1324 | doorbell_pio(sge, F_CMDQ0_ENABLE); | ||
1325 | atomic_set(&Q->pio_pidx, Q->pidx); | ||
1326 | } | ||
1327 | return 0; | ||
1328 | } | ||
1329 | |||
1330 | #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) | ||
1331 | |||
1332 | /* | ||
1333 | * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. | ||
1334 | */ | ||
1335 | int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1336 | { | ||
1337 | struct adapter *adapter = dev->priv; | ||
1338 | struct cpl_tx_pkt *cpl; | ||
1339 | struct ethhdr *eth; | ||
1340 | size_t max_len; | ||
1341 | |||
1342 | /* | ||
1343 | * We are using a non-standard hard_header_len and some kernel | ||
1344 | * components, such as pktgen, do not handle it right. Complain | ||
1345 | * when this happens but try to fix things up. | ||
1346 | */ | ||
1347 | if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { | ||
1348 | struct sk_buff *orig_skb = skb; | ||
1349 | |||
1350 | if (net_ratelimit()) | ||
1351 | printk(KERN_ERR | ||
1352 | "%s: Tx packet has inadequate headroom\n", | ||
1353 | dev->name); | ||
1354 | skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso)); | ||
1355 | dev_kfree_skb_any(orig_skb); | ||
1356 | if (!skb) | ||
1357 | return -ENOMEM; | ||
1358 | } | ||
1359 | |||
1360 | if (skb_shinfo(skb)->tso_size) { | ||
1361 | int eth_type; | ||
1362 | struct cpl_tx_pkt_lso *hdr; | ||
1363 | |||
1364 | eth_type = skb->nh.raw - skb->data == ETH_HLEN ? | ||
1365 | CPL_ETH_II : CPL_ETH_II_VLAN; | ||
1366 | |||
1367 | hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); | ||
1368 | hdr->opcode = CPL_TX_PKT_LSO; | ||
1369 | hdr->ip_csum_dis = hdr->l4_csum_dis = 0; | ||
1370 | hdr->ip_hdr_words = skb->nh.iph->ihl; | ||
1371 | hdr->tcp_hdr_words = skb->h.th->doff; | ||
1372 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, | ||
1373 | skb_shinfo(skb)->tso_size)); | ||
1374 | hdr->len = htonl(skb->len - sizeof(*hdr)); | ||
1375 | cpl = (struct cpl_tx_pkt *)hdr; | ||
1376 | } else | ||
1377 | { | ||
1378 | /* | ||
1379 | * An Ethernet packet must have at least space for | ||
1380 | * the DIX Ethernet header and be no greater than | ||
1381 | * the device set MTU. Otherwise trash the packet. | ||
1382 | */ | ||
1383 | if (skb->len < ETH_HLEN) | ||
1384 | goto t1_start_xmit_fail2; | ||
1385 | eth = (struct ethhdr *)skb->data; | ||
1386 | if (eth->h_proto == htons(ETH_P_8021Q)) | ||
1387 | max_len = dev->mtu + VLAN_ETH_HLEN; | ||
1388 | else | ||
1389 | max_len = dev->mtu + ETH_HLEN; | ||
1390 | if (skb->len > max_len) | ||
1391 | goto t1_start_xmit_fail2; | ||
1392 | |||
1393 | if (!(adapter->flags & UDP_CSUM_CAPABLE) && | ||
1394 | skb->ip_summed == CHECKSUM_HW && | ||
1395 | skb->nh.iph->protocol == IPPROTO_UDP && | ||
1396 | skb_checksum_help(skb, 0)) | ||
1397 | goto t1_start_xmit_fail3; | ||
1398 | |||
1399 | |||
1400 | if (!adapter->sge->pskb) { | ||
1401 | if (skb->protocol == htons(ETH_P_ARP) && | ||
1402 | skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) | ||
1403 | adapter->sge->pskb = skb; | ||
1404 | } | ||
1405 | cpl = (struct cpl_tx_pkt *)skb_push(skb, sizeof(*cpl)); | ||
1406 | cpl->opcode = CPL_TX_PKT; | ||
1407 | cpl->ip_csum_dis = 1; /* SW calculates IP csum */ | ||
1408 | cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1; | ||
1409 | /* the length field isn't used so don't bother setting it */ | ||
1410 | } | ||
1411 | cpl->iff = dev->if_port; | ||
1412 | |||
1413 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
1414 | if (adapter->vlan_grp && vlan_tx_tag_present(skb)) { | ||
1415 | cpl->vlan_valid = 1; | ||
1416 | cpl->vlan = htons(vlan_tx_tag_get(skb)); | ||
1417 | } else | ||
1418 | #endif | ||
1419 | cpl->vlan_valid = 0; | ||
1420 | |||
1421 | dev->trans_start = jiffies; | ||
1422 | return t1_sge_tx(skb, adapter, 0); | ||
1423 | |||
1424 | t1_start_xmit_fail3: | ||
1425 | printk(KERN_INFO "%s: Unable to complete checksum\n", dev->name); | ||
1426 | goto t1_start_xmit_fail1; | ||
1427 | |||
1428 | t1_start_xmit_fail2: | ||
1429 | printk(KERN_INFO "%s: Invalid packet length %d, dropping\n", | ||
1430 | dev->name, skb->len); | ||
1431 | |||
1432 | t1_start_xmit_fail1: | ||
1433 | dev_kfree_skb_any(skb); | ||
1434 | return 0; | ||
1435 | } | ||
1436 | |||
1437 | void t1_sge_set_ptimeout(adapter_t *adapter, u32 val) | ||
1438 | { | ||
1439 | struct sge *sge = adapter->sge; | ||
1440 | |||
1441 | if (is_T2(adapter)) | ||
1442 | sge->ptimeout = max((u32)((HZ * val) / 1000), (u32)1); | ||
1443 | } | ||
1444 | |||
1445 | u32 t1_sge_get_ptimeout(adapter_t *adapter) | ||
1446 | { | ||
1447 | struct sge *sge = adapter->sge; | ||
1448 | |||
1449 | return (is_T2(adapter) ? ((sge->ptimeout * 1000) / HZ) : 0); | ||
1450 | } | ||
1451 | |||