aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
authorDivy Le Ray <divy@chelsio.com>2007-01-18 22:04:14 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-05 16:58:46 -0500
commit4d22de3e6cc4a09c369b504cd8bcde3385a974cd (patch)
treeaf13a2ee582105d961c79fc4e55fce0b5e043310 /drivers/net/cxgb3/sge.c
parent0bf94faf64afaba6e7b49fd11541b59d2ba06d0e (diff)
Add support for the latest 1G/10G Chelsio adapter, T3.
This driver is required by the Chelsio T3 RDMA driver posted by Steve Wise. Signed-off-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c2702
1 files changed, 2702 insertions, 0 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
new file mode 100644
index 000000000000..6c77f4bab62f
--- /dev/null
+++ b/drivers/net/cxgb3/sge.c
@@ -0,0 +1,2702 @@
1/*
2 * This file is part of the Chelsio T3 Ethernet driver.
3 *
4 * Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/if_vlan.h>
16#include <linux/ip.h>
17#include <linux/tcp.h>
18#include <linux/dma-mapping.h>
19#include "common.h"
20#include "regs.h"
21#include "sge_defs.h"
22#include "t3_cpl.h"
23#include "firmware_exports.h"
24
25#define USE_GTS 0
26
27#define SGE_RX_SM_BUF_SIZE 1536
28#define SGE_RX_COPY_THRES 256
29
30# define SGE_RX_DROP_THRES 16
31
32/*
33 * Period of the Tx buffer reclaim timer. This timer does not need to run
34 * frequently as Tx buffers are usually reclaimed by new Tx packets.
35 */
36#define TX_RECLAIM_PERIOD (HZ / 4)
37
38/* WR size in bytes */
39#define WR_LEN (WR_FLITS * 8)
40
41/*
42 * Types of Tx queues in each queue set. Order here matters, do not change.
43 */
44enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
45
46/* Values for sge_txq.flags */
47enum {
48 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
49 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
50};
51
52struct tx_desc {
53 u64 flit[TX_DESC_FLITS];
54};
55
56struct rx_desc {
57 __be32 addr_lo;
58 __be32 len_gen;
59 __be32 gen2;
60 __be32 addr_hi;
61};
62
63struct tx_sw_desc { /* SW state per Tx descriptor */
64 struct sk_buff *skb;
65};
66
67struct rx_sw_desc { /* SW state per Rx descriptor */
68 struct sk_buff *skb;
69 DECLARE_PCI_UNMAP_ADDR(dma_addr);
70};
71
72struct rsp_desc { /* response queue descriptor */
73 struct rss_header rss_hdr;
74 __be32 flags;
75 __be32 len_cq;
76 u8 imm_data[47];
77 u8 intr_gen;
78};
79
80struct unmap_info { /* packet unmapping info, overlays skb->cb */
81 int sflit; /* start flit of first SGL entry in Tx descriptor */
82 u16 fragidx; /* first page fragment in current Tx descriptor */
83 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
84 u32 len; /* mapped length of skb main body */
85};
86
87/*
88 * Maps a number of flits to the number of Tx descriptors that can hold them.
89 * The formula is
90 *
91 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
92 *
93 * HW allows up to 4 descriptors to be combined into a WR.
94 */
95static u8 flit_desc_map[] = {
96 0,
97#if SGE_NUM_GENBITS == 1
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
101 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
102#elif SGE_NUM_GENBITS == 2
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
106 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
107#else
108# error "SGE_NUM_GENBITS must be 1 or 2"
109#endif
110};
111
112static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
113{
114 return container_of(q, struct sge_qset, fl[qidx]);
115}
116
117static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
118{
119 return container_of(q, struct sge_qset, rspq);
120}
121
122static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
123{
124 return container_of(q, struct sge_qset, txq[qidx]);
125}
126
127/**
128 * refill_rspq - replenish an SGE response queue
129 * @adapter: the adapter
130 * @q: the response queue to replenish
131 * @credits: how many new responses to make available
132 *
133 * Replenishes a response queue by making the supplied number of responses
134 * available to HW.
135 */
136static inline void refill_rspq(struct adapter *adapter,
137 const struct sge_rspq *q, unsigned int credits)
138{
139 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
140 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
141}
142
143/**
144 * need_skb_unmap - does the platform need unmapping of sk_buffs?
145 *
146 * Returns true if the platfrom needs sk_buff unmapping. The compiler
147 * optimizes away unecessary code if this returns true.
148 */
149static inline int need_skb_unmap(void)
150{
151 /*
152 * This structure is used to tell if the platfrom needs buffer
153 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
154 */
155 struct dummy {
156 DECLARE_PCI_UNMAP_ADDR(addr);
157 };
158
159 return sizeof(struct dummy) != 0;
160}
161
162/**
163 * unmap_skb - unmap a packet main body and its page fragments
164 * @skb: the packet
165 * @q: the Tx queue containing Tx descriptors for the packet
166 * @cidx: index of Tx descriptor
167 * @pdev: the PCI device
168 *
169 * Unmap the main body of an sk_buff and its page fragments, if any.
170 * Because of the fairly complicated structure of our SGLs and the desire
171 * to conserve space for metadata, we keep the information necessary to
172 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
173 * in the Tx descriptors (the physical addresses of the various data
174 * buffers). The send functions initialize the state in skb->cb so we
175 * can unmap the buffers held in the first Tx descriptor here, and we
176 * have enough information at this point to update the state for the next
177 * Tx descriptor.
178 */
179static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
180 unsigned int cidx, struct pci_dev *pdev)
181{
182 const struct sg_ent *sgp;
183 struct unmap_info *ui = (struct unmap_info *)skb->cb;
184 int nfrags, frag_idx, curflit, j = ui->addr_idx;
185
186 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
187
188 if (ui->len) {
189 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
190 PCI_DMA_TODEVICE);
191 ui->len = 0; /* so we know for next descriptor for this skb */
192 j = 1;
193 }
194
195 frag_idx = ui->fragidx;
196 curflit = ui->sflit + 1 + j;
197 nfrags = skb_shinfo(skb)->nr_frags;
198
199 while (frag_idx < nfrags && curflit < WR_FLITS) {
200 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
201 skb_shinfo(skb)->frags[frag_idx].size,
202 PCI_DMA_TODEVICE);
203 j ^= 1;
204 if (j == 0) {
205 sgp++;
206 curflit++;
207 }
208 curflit++;
209 frag_idx++;
210 }
211
212 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
213 ui->fragidx = frag_idx;
214 ui->addr_idx = j;
215 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
216 }
217}
218
219/**
220 * free_tx_desc - reclaims Tx descriptors and their buffers
221 * @adapter: the adapter
222 * @q: the Tx queue to reclaim descriptors from
223 * @n: the number of descriptors to reclaim
224 *
225 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
226 * Tx buffers. Called with the Tx queue lock held.
227 */
228static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
229 unsigned int n)
230{
231 struct tx_sw_desc *d;
232 struct pci_dev *pdev = adapter->pdev;
233 unsigned int cidx = q->cidx;
234
235 d = &q->sdesc[cidx];
236 while (n--) {
237 if (d->skb) { /* an SGL is present */
238 if (need_skb_unmap())
239 unmap_skb(d->skb, q, cidx, pdev);
240 if (d->skb->priority == cidx)
241 kfree_skb(d->skb);
242 }
243 ++d;
244 if (++cidx == q->size) {
245 cidx = 0;
246 d = q->sdesc;
247 }
248 }
249 q->cidx = cidx;
250}
251
252/**
253 * reclaim_completed_tx - reclaims completed Tx descriptors
254 * @adapter: the adapter
255 * @q: the Tx queue to reclaim completed descriptors from
256 *
257 * Reclaims Tx descriptors that the SGE has indicated it has processed,
258 * and frees the associated buffers if possible. Called with the Tx
259 * queue's lock held.
260 */
261static inline void reclaim_completed_tx(struct adapter *adapter,
262 struct sge_txq *q)
263{
264 unsigned int reclaim = q->processed - q->cleaned;
265
266 if (reclaim) {
267 free_tx_desc(adapter, q, reclaim);
268 q->cleaned += reclaim;
269 q->in_use -= reclaim;
270 }
271}
272
273/**
274 * should_restart_tx - are there enough resources to restart a Tx queue?
275 * @q: the Tx queue
276 *
277 * Checks if there are enough descriptors to restart a suspended Tx queue.
278 */
279static inline int should_restart_tx(const struct sge_txq *q)
280{
281 unsigned int r = q->processed - q->cleaned;
282
283 return q->in_use - r < (q->size >> 1);
284}
285
286/**
287 * free_rx_bufs - free the Rx buffers on an SGE free list
288 * @pdev: the PCI device associated with the adapter
289 * @rxq: the SGE free list to clean up
290 *
291 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
292 * this queue should be stopped before calling this function.
293 */
294static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
295{
296 unsigned int cidx = q->cidx;
297
298 while (q->credits--) {
299 struct rx_sw_desc *d = &q->sdesc[cidx];
300
301 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
302 q->buf_size, PCI_DMA_FROMDEVICE);
303 kfree_skb(d->skb);
304 d->skb = NULL;
305 if (++cidx == q->size)
306 cidx = 0;
307 }
308}
309
310/**
311 * add_one_rx_buf - add a packet buffer to a free-buffer list
312 * @skb: the buffer to add
313 * @len: the buffer length
314 * @d: the HW Rx descriptor to write
315 * @sd: the SW Rx descriptor to write
316 * @gen: the generation bit value
317 * @pdev: the PCI device associated with the adapter
318 *
319 * Add a buffer of the given length to the supplied HW and SW Rx
320 * descriptors.
321 */
322static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
323 struct rx_desc *d, struct rx_sw_desc *sd,
324 unsigned int gen, struct pci_dev *pdev)
325{
326 dma_addr_t mapping;
327
328 sd->skb = skb;
329 mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
330 pci_unmap_addr_set(sd, dma_addr, mapping);
331
332 d->addr_lo = cpu_to_be32(mapping);
333 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
334 wmb();
335 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
336 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
337}
338
339/**
340 * refill_fl - refill an SGE free-buffer list
341 * @adapter: the adapter
342 * @q: the free-list to refill
343 * @n: the number of new buffers to allocate
344 * @gfp: the gfp flags for allocating new buffers
345 *
346 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
347 * allocated with the supplied gfp flags. The caller must assure that
348 * @n does not exceed the queue's capacity.
349 */
350static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
351{
352 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
353 struct rx_desc *d = &q->desc[q->pidx];
354
355 while (n--) {
356 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
357
358 if (!skb)
359 break;
360
361 add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
362 d++;
363 sd++;
364 if (++q->pidx == q->size) {
365 q->pidx = 0;
366 q->gen ^= 1;
367 sd = q->sdesc;
368 d = q->desc;
369 }
370 q->credits++;
371 }
372
373 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
374}
375
376static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
377{
378 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
379}
380
381/**
382 * recycle_rx_buf - recycle a receive buffer
383 * @adapter: the adapter
384 * @q: the SGE free list
385 * @idx: index of buffer to recycle
386 *
387 * Recycles the specified buffer on the given free list by adding it at
388 * the next available slot on the list.
389 */
390static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
391 unsigned int idx)
392{
393 struct rx_desc *from = &q->desc[idx];
394 struct rx_desc *to = &q->desc[q->pidx];
395
396 q->sdesc[q->pidx] = q->sdesc[idx];
397 to->addr_lo = from->addr_lo; /* already big endian */
398 to->addr_hi = from->addr_hi; /* likewise */
399 wmb();
400 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
401 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
402 q->credits++;
403
404 if (++q->pidx == q->size) {
405 q->pidx = 0;
406 q->gen ^= 1;
407 }
408 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
409}
410
411/**
412 * alloc_ring - allocate resources for an SGE descriptor ring
413 * @pdev: the PCI device
414 * @nelem: the number of descriptors
415 * @elem_size: the size of each descriptor
416 * @sw_size: the size of the SW state associated with each ring element
417 * @phys: the physical address of the allocated ring
418 * @metadata: address of the array holding the SW state for the ring
419 *
420 * Allocates resources for an SGE descriptor ring, such as Tx queues,
421 * free buffer lists, or response queues. Each SGE ring requires
422 * space for its HW descriptors plus, optionally, space for the SW state
423 * associated with each HW entry (the metadata). The function returns
424 * three values: the virtual address for the HW ring (the return value
425 * of the function), the physical address of the HW ring, and the address
426 * of the SW ring.
427 */
428static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
429 size_t sw_size, dma_addr_t *phys, void *metadata)
430{
431 size_t len = nelem * elem_size;
432 void *s = NULL;
433 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
434
435 if (!p)
436 return NULL;
437 if (sw_size) {
438 s = kcalloc(nelem, sw_size, GFP_KERNEL);
439
440 if (!s) {
441 dma_free_coherent(&pdev->dev, len, p, *phys);
442 return NULL;
443 }
444 }
445 if (metadata)
446 *(void **)metadata = s;
447 memset(p, 0, len);
448 return p;
449}
450
451/**
452 * free_qset - free the resources of an SGE queue set
453 * @adapter: the adapter owning the queue set
454 * @q: the queue set
455 *
456 * Release the HW and SW resources associated with an SGE queue set, such
457 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
458 * queue set must be quiesced prior to calling this.
459 */
460void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
461{
462 int i;
463 struct pci_dev *pdev = adapter->pdev;
464
465 if (q->tx_reclaim_timer.function)
466 del_timer_sync(&q->tx_reclaim_timer);
467
468 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
469 if (q->fl[i].desc) {
470 spin_lock(&adapter->sge.reg_lock);
471 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
472 spin_unlock(&adapter->sge.reg_lock);
473 free_rx_bufs(pdev, &q->fl[i]);
474 kfree(q->fl[i].sdesc);
475 dma_free_coherent(&pdev->dev,
476 q->fl[i].size *
477 sizeof(struct rx_desc), q->fl[i].desc,
478 q->fl[i].phys_addr);
479 }
480
481 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
482 if (q->txq[i].desc) {
483 spin_lock(&adapter->sge.reg_lock);
484 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
485 spin_unlock(&adapter->sge.reg_lock);
486 if (q->txq[i].sdesc) {
487 free_tx_desc(adapter, &q->txq[i],
488 q->txq[i].in_use);
489 kfree(q->txq[i].sdesc);
490 }
491 dma_free_coherent(&pdev->dev,
492 q->txq[i].size *
493 sizeof(struct tx_desc),
494 q->txq[i].desc, q->txq[i].phys_addr);
495 __skb_queue_purge(&q->txq[i].sendq);
496 }
497
498 if (q->rspq.desc) {
499 spin_lock(&adapter->sge.reg_lock);
500 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
501 spin_unlock(&adapter->sge.reg_lock);
502 dma_free_coherent(&pdev->dev,
503 q->rspq.size * sizeof(struct rsp_desc),
504 q->rspq.desc, q->rspq.phys_addr);
505 }
506
507 if (q->netdev)
508 q->netdev->atalk_ptr = NULL;
509
510 memset(q, 0, sizeof(*q));
511}
512
513/**
514 * init_qset_cntxt - initialize an SGE queue set context info
515 * @qs: the queue set
516 * @id: the queue set id
517 *
518 * Initializes the TIDs and context ids for the queues of a queue set.
519 */
520static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
521{
522 qs->rspq.cntxt_id = id;
523 qs->fl[0].cntxt_id = 2 * id;
524 qs->fl[1].cntxt_id = 2 * id + 1;
525 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
526 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
527 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
528 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
529 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
530}
531
532/**
533 * sgl_len - calculates the size of an SGL of the given capacity
534 * @n: the number of SGL entries
535 *
536 * Calculates the number of flits needed for a scatter/gather list that
537 * can hold the given number of entries.
538 */
539static inline unsigned int sgl_len(unsigned int n)
540{
541 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
542 return (3 * n) / 2 + (n & 1);
543}
544
545/**
546 * flits_to_desc - returns the num of Tx descriptors for the given flits
547 * @n: the number of flits
548 *
549 * Calculates the number of Tx descriptors needed for the supplied number
550 * of flits.
551 */
552static inline unsigned int flits_to_desc(unsigned int n)
553{
554 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
555 return flit_desc_map[n];
556}
557
558/**
559 * get_packet - return the next ingress packet buffer from a free list
560 * @adap: the adapter that received the packet
561 * @fl: the SGE free list holding the packet
562 * @len: the packet length including any SGE padding
563 * @drop_thres: # of remaining buffers before we start dropping packets
564 *
565 * Get the next packet from a free list and complete setup of the
566 * sk_buff. If the packet is small we make a copy and recycle the
567 * original buffer, otherwise we use the original buffer itself. If a
568 * positive drop threshold is supplied packets are dropped and their
569 * buffers recycled if (a) the number of remaining buffers is under the
570 * threshold and the packet is too big to copy, or (b) the packet should
571 * be copied but there is no memory for the copy.
572 */
573static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
574 unsigned int len, unsigned int drop_thres)
575{
576 struct sk_buff *skb = NULL;
577 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
578
579 prefetch(sd->skb->data);
580
581 if (len <= SGE_RX_COPY_THRES) {
582 skb = alloc_skb(len, GFP_ATOMIC);
583 if (likely(skb != NULL)) {
584 __skb_put(skb, len);
585 pci_dma_sync_single_for_cpu(adap->pdev,
586 pci_unmap_addr(sd,
587 dma_addr),
588 len, PCI_DMA_FROMDEVICE);
589 memcpy(skb->data, sd->skb->data, len);
590 pci_dma_sync_single_for_device(adap->pdev,
591 pci_unmap_addr(sd,
592 dma_addr),
593 len, PCI_DMA_FROMDEVICE);
594 } else if (!drop_thres)
595 goto use_orig_buf;
596 recycle:
597 recycle_rx_buf(adap, fl, fl->cidx);
598 return skb;
599 }
600
601 if (unlikely(fl->credits < drop_thres))
602 goto recycle;
603
604 use_orig_buf:
605 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
606 fl->buf_size, PCI_DMA_FROMDEVICE);
607 skb = sd->skb;
608 skb_put(skb, len);
609 __refill_fl(adap, fl);
610 return skb;
611}
612
613/**
614 * get_imm_packet - return the next ingress packet buffer from a response
615 * @resp: the response descriptor containing the packet data
616 *
617 * Return a packet containing the immediate data of the given response.
618 */
619static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
620{
621 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
622
623 if (skb) {
624 __skb_put(skb, IMMED_PKT_SIZE);
625 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
626 }
627 return skb;
628}
629
630/**
631 * calc_tx_descs - calculate the number of Tx descriptors for a packet
632 * @skb: the packet
633 *
634 * Returns the number of Tx descriptors needed for the given Ethernet
635 * packet. Ethernet packets require addition of WR and CPL headers.
636 */
637static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
638{
639 unsigned int flits;
640
641 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
642 return 1;
643
644 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
645 if (skb_shinfo(skb)->gso_size)
646 flits++;
647 return flits_to_desc(flits);
648}
649
650/**
651 * make_sgl - populate a scatter/gather list for a packet
652 * @skb: the packet
653 * @sgp: the SGL to populate
654 * @start: start address of skb main body data to include in the SGL
655 * @len: length of skb main body data to include in the SGL
656 * @pdev: the PCI device
657 *
658 * Generates a scatter/gather list for the buffers that make up a packet
659 * and returns the SGL size in 8-byte words. The caller must size the SGL
660 * appropriately.
661 */
662static inline unsigned int make_sgl(const struct sk_buff *skb,
663 struct sg_ent *sgp, unsigned char *start,
664 unsigned int len, struct pci_dev *pdev)
665{
666 dma_addr_t mapping;
667 unsigned int i, j = 0, nfrags;
668
669 if (len) {
670 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
671 sgp->len[0] = cpu_to_be32(len);
672 sgp->addr[0] = cpu_to_be64(mapping);
673 j = 1;
674 }
675
676 nfrags = skb_shinfo(skb)->nr_frags;
677 for (i = 0; i < nfrags; i++) {
678 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
679
680 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
681 frag->size, PCI_DMA_TODEVICE);
682 sgp->len[j] = cpu_to_be32(frag->size);
683 sgp->addr[j] = cpu_to_be64(mapping);
684 j ^= 1;
685 if (j == 0)
686 ++sgp;
687 }
688 if (j)
689 sgp->len[j] = 0;
690 return ((nfrags + (len != 0)) * 3) / 2 + j;
691}
692
693/**
694 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
695 * @adap: the adapter
696 * @q: the Tx queue
697 *
698 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
699 * where the HW is going to sleep just after we checked, however,
700 * then the interrupt handler will detect the outstanding TX packet
701 * and ring the doorbell for us.
702 *
703 * When GTS is disabled we unconditionally ring the doorbell.
704 */
705static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
706{
707#if USE_GTS
708 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
709 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
710 set_bit(TXQ_LAST_PKT_DB, &q->flags);
711 t3_write_reg(adap, A_SG_KDOORBELL,
712 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
713 }
714#else
715 wmb(); /* write descriptors before telling HW */
716 t3_write_reg(adap, A_SG_KDOORBELL,
717 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
718#endif
719}
720
721static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
722{
723#if SGE_NUM_GENBITS == 2
724 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
725#endif
726}
727
728/**
729 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
730 * @ndesc: number of Tx descriptors spanned by the SGL
731 * @skb: the packet corresponding to the WR
732 * @d: first Tx descriptor to be written
733 * @pidx: index of above descriptors
734 * @q: the SGE Tx queue
735 * @sgl: the SGL
736 * @flits: number of flits to the start of the SGL in the first descriptor
737 * @sgl_flits: the SGL size in flits
738 * @gen: the Tx descriptor generation
739 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
740 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
741 *
742 * Write a work request header and an associated SGL. If the SGL is
743 * small enough to fit into one Tx descriptor it has already been written
744 * and we just need to write the WR header. Otherwise we distribute the
745 * SGL across the number of descriptors it spans.
746 */
747static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
748 struct tx_desc *d, unsigned int pidx,
749 const struct sge_txq *q,
750 const struct sg_ent *sgl,
751 unsigned int flits, unsigned int sgl_flits,
752 unsigned int gen, unsigned int wr_hi,
753 unsigned int wr_lo)
754{
755 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
756 struct tx_sw_desc *sd = &q->sdesc[pidx];
757
758 sd->skb = skb;
759 if (need_skb_unmap()) {
760 struct unmap_info *ui = (struct unmap_info *)skb->cb;
761
762 ui->fragidx = 0;
763 ui->addr_idx = 0;
764 ui->sflit = flits;
765 }
766
767 if (likely(ndesc == 1)) {
768 skb->priority = pidx;
769 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
770 V_WR_SGLSFLT(flits)) | wr_hi;
771 wmb();
772 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
773 V_WR_GEN(gen)) | wr_lo;
774 wr_gen2(d, gen);
775 } else {
776 unsigned int ogen = gen;
777 const u64 *fp = (const u64 *)sgl;
778 struct work_request_hdr *wp = wrp;
779
780 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
781 V_WR_SGLSFLT(flits)) | wr_hi;
782
783 while (sgl_flits) {
784 unsigned int avail = WR_FLITS - flits;
785
786 if (avail > sgl_flits)
787 avail = sgl_flits;
788 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
789 sgl_flits -= avail;
790 ndesc--;
791 if (!sgl_flits)
792 break;
793
794 fp += avail;
795 d++;
796 sd++;
797 if (++pidx == q->size) {
798 pidx = 0;
799 gen ^= 1;
800 d = q->desc;
801 sd = q->sdesc;
802 }
803
804 sd->skb = skb;
805 wrp = (struct work_request_hdr *)d;
806 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
807 V_WR_SGLSFLT(1)) | wr_hi;
808 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
809 sgl_flits + 1)) |
810 V_WR_GEN(gen)) | wr_lo;
811 wr_gen2(d, gen);
812 flits = 1;
813 }
814 skb->priority = pidx;
815 wrp->wr_hi |= htonl(F_WR_EOP);
816 wmb();
817 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
818 wr_gen2((struct tx_desc *)wp, ogen);
819 WARN_ON(ndesc != 0);
820 }
821}
822
823/**
824 * write_tx_pkt_wr - write a TX_PKT work request
825 * @adap: the adapter
826 * @skb: the packet to send
827 * @pi: the egress interface
828 * @pidx: index of the first Tx descriptor to write
829 * @gen: the generation value to use
830 * @q: the Tx queue
831 * @ndesc: number of descriptors the packet will occupy
832 * @compl: the value of the COMPL bit to use
833 *
834 * Generate a TX_PKT work request to send the supplied packet.
835 */
836static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
837 const struct port_info *pi,
838 unsigned int pidx, unsigned int gen,
839 struct sge_txq *q, unsigned int ndesc,
840 unsigned int compl)
841{
842 unsigned int flits, sgl_flits, cntrl, tso_info;
843 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
844 struct tx_desc *d = &q->desc[pidx];
845 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
846
847 cpl->len = htonl(skb->len | 0x80000000);
848 cntrl = V_TXPKT_INTF(pi->port_id);
849
850 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
851 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
852
853 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
854 if (tso_info) {
855 int eth_type;
856 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
857
858 d->flit[2] = 0;
859 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
860 hdr->cntrl = htonl(cntrl);
861 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
862 CPL_ETH_II : CPL_ETH_II_VLAN;
863 tso_info |= V_LSO_ETH_TYPE(eth_type) |
864 V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
865 V_LSO_TCPHDR_WORDS(skb->h.th->doff);
866 hdr->lso_info = htonl(tso_info);
867 flits = 3;
868 } else {
869 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
870 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
871 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
872 cpl->cntrl = htonl(cntrl);
873
874 if (skb->len <= WR_LEN - sizeof(*cpl)) {
875 q->sdesc[pidx].skb = NULL;
876 if (!skb->data_len)
877 memcpy(&d->flit[2], skb->data, skb->len);
878 else
879 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
880
881 flits = (skb->len + 7) / 8 + 2;
882 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
883 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
884 | F_WR_SOP | F_WR_EOP | compl);
885 wmb();
886 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
887 V_WR_TID(q->token));
888 wr_gen2(d, gen);
889 kfree_skb(skb);
890 return;
891 }
892
893 flits = 2;
894 }
895
896 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
897 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
898 if (need_skb_unmap())
899 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
900
901 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
902 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
903 htonl(V_WR_TID(q->token)));
904}
905
906/**
907 * eth_xmit - add a packet to the Ethernet Tx queue
908 * @skb: the packet
909 * @dev: the egress net device
910 *
911 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
912 */
913int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
914{
915 unsigned int ndesc, pidx, credits, gen, compl;
916 const struct port_info *pi = netdev_priv(dev);
917 struct adapter *adap = dev->priv;
918 struct sge_qset *qs = dev2qset(dev);
919 struct sge_txq *q = &qs->txq[TXQ_ETH];
920
921 /*
922 * The chip min packet length is 9 octets but play safe and reject
923 * anything shorter than an Ethernet header.
924 */
925 if (unlikely(skb->len < ETH_HLEN)) {
926 dev_kfree_skb(skb);
927 return NETDEV_TX_OK;
928 }
929
930 spin_lock(&q->lock);
931 reclaim_completed_tx(adap, q);
932
933 credits = q->size - q->in_use;
934 ndesc = calc_tx_descs(skb);
935
936 if (unlikely(credits < ndesc)) {
937 if (!netif_queue_stopped(dev)) {
938 netif_stop_queue(dev);
939 set_bit(TXQ_ETH, &qs->txq_stopped);
940 q->stops++;
941 dev_err(&adap->pdev->dev,
942 "%s: Tx ring %u full while queue awake!\n",
943 dev->name, q->cntxt_id & 7);
944 }
945 spin_unlock(&q->lock);
946 return NETDEV_TX_BUSY;
947 }
948
949 q->in_use += ndesc;
950 if (unlikely(credits - ndesc < q->stop_thres)) {
951 q->stops++;
952 netif_stop_queue(dev);
953 set_bit(TXQ_ETH, &qs->txq_stopped);
954#if !USE_GTS
955 if (should_restart_tx(q) &&
956 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
957 q->restarts++;
958 netif_wake_queue(dev);
959 }
960#endif
961 }
962
963 gen = q->gen;
964 q->unacked += ndesc;
965 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
966 q->unacked &= 7;
967 pidx = q->pidx;
968 q->pidx += ndesc;
969 if (q->pidx >= q->size) {
970 q->pidx -= q->size;
971 q->gen ^= 1;
972 }
973
974 /* update port statistics */
975 if (skb->ip_summed == CHECKSUM_COMPLETE)
976 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
977 if (skb_shinfo(skb)->gso_size)
978 qs->port_stats[SGE_PSTAT_TSO]++;
979 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
980 qs->port_stats[SGE_PSTAT_VLANINS]++;
981
982 dev->trans_start = jiffies;
983 spin_unlock(&q->lock);
984
985 /*
986 * We do not use Tx completion interrupts to free DMAd Tx packets.
987 * This is good for performamce but means that we rely on new Tx
988 * packets arriving to run the destructors of completed packets,
989 * which open up space in their sockets' send queues. Sometimes
990 * we do not get such new packets causing Tx to stall. A single
991 * UDP transmitter is a good example of this situation. We have
992 * a clean up timer that periodically reclaims completed packets
993 * but it doesn't run often enough (nor do we want it to) to prevent
994 * lengthy stalls. A solution to this problem is to run the
995 * destructor early, after the packet is queued but before it's DMAd.
996 * A cons is that we lie to socket memory accounting, but the amount
997 * of extra memory is reasonable (limited by the number of Tx
998 * descriptors), the packets do actually get freed quickly by new
999 * packets almost always, and for protocols like TCP that wait for
1000 * acks to really free up the data the extra memory is even less.
1001 * On the positive side we run the destructors on the sending CPU
1002 * rather than on a potentially different completing CPU, usually a
1003 * good thing. We also run them without holding our Tx queue lock,
1004 * unlike what reclaim_completed_tx() would otherwise do.
1005 *
1006 * Run the destructor before telling the DMA engine about the packet
1007 * to make sure it doesn't complete and get freed prematurely.
1008 */
1009 if (likely(!skb_shared(skb)))
1010 skb_orphan(skb);
1011
1012 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1013 check_ring_tx_db(adap, q);
1014 return NETDEV_TX_OK;
1015}
1016
1017/**
1018 * write_imm - write a packet into a Tx descriptor as immediate data
1019 * @d: the Tx descriptor to write
1020 * @skb: the packet
1021 * @len: the length of packet data to write as immediate data
1022 * @gen: the generation bit value to write
1023 *
1024 * Writes a packet as immediate data into a Tx descriptor. The packet
1025 * contains a work request at its beginning. We must write the packet
1026 * carefully so the SGE doesn't read accidentally before it's written in
1027 * its entirety.
1028 */
1029static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1030 unsigned int len, unsigned int gen)
1031{
1032 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1033 struct work_request_hdr *to = (struct work_request_hdr *)d;
1034
1035 memcpy(&to[1], &from[1], len - sizeof(*from));
1036 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1037 V_WR_BCNTLFLT(len & 7));
1038 wmb();
1039 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1040 V_WR_LEN((len + 7) / 8));
1041 wr_gen2(d, gen);
1042 kfree_skb(skb);
1043}
1044
1045/**
1046 * check_desc_avail - check descriptor availability on a send queue
1047 * @adap: the adapter
1048 * @q: the send queue
1049 * @skb: the packet needing the descriptors
1050 * @ndesc: the number of Tx descriptors needed
1051 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1052 *
1053 * Checks if the requested number of Tx descriptors is available on an
1054 * SGE send queue. If the queue is already suspended or not enough
1055 * descriptors are available the packet is queued for later transmission.
1056 * Must be called with the Tx queue locked.
1057 *
1058 * Returns 0 if enough descriptors are available, 1 if there aren't
1059 * enough descriptors and the packet has been queued, and 2 if the caller
1060 * needs to retry because there weren't enough descriptors at the
1061 * beginning of the call but some freed up in the mean time.
1062 */
1063static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1064 struct sk_buff *skb, unsigned int ndesc,
1065 unsigned int qid)
1066{
1067 if (unlikely(!skb_queue_empty(&q->sendq))) {
1068 addq_exit:__skb_queue_tail(&q->sendq, skb);
1069 return 1;
1070 }
1071 if (unlikely(q->size - q->in_use < ndesc)) {
1072 struct sge_qset *qs = txq_to_qset(q, qid);
1073
1074 set_bit(qid, &qs->txq_stopped);
1075 smp_mb__after_clear_bit();
1076
1077 if (should_restart_tx(q) &&
1078 test_and_clear_bit(qid, &qs->txq_stopped))
1079 return 2;
1080
1081 q->stops++;
1082 goto addq_exit;
1083 }
1084 return 0;
1085}
1086
1087/**
1088 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1089 * @q: the SGE control Tx queue
1090 *
1091 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1092 * that send only immediate data (presently just the control queues) and
1093 * thus do not have any sk_buffs to release.
1094 */
1095static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1096{
1097 unsigned int reclaim = q->processed - q->cleaned;
1098
1099 q->in_use -= reclaim;
1100 q->cleaned += reclaim;
1101}
1102
1103static inline int immediate(const struct sk_buff *skb)
1104{
1105 return skb->len <= WR_LEN && !skb->data_len;
1106}
1107
1108/**
1109 * ctrl_xmit - send a packet through an SGE control Tx queue
1110 * @adap: the adapter
1111 * @q: the control queue
1112 * @skb: the packet
1113 *
1114 * Send a packet through an SGE control Tx queue. Packets sent through
1115 * a control queue must fit entirely as immediate data in a single Tx
1116 * descriptor and have no page fragments.
1117 */
1118static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1119 struct sk_buff *skb)
1120{
1121 int ret;
1122 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1123
1124 if (unlikely(!immediate(skb))) {
1125 WARN_ON(1);
1126 dev_kfree_skb(skb);
1127 return NET_XMIT_SUCCESS;
1128 }
1129
1130 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1131 wrp->wr_lo = htonl(V_WR_TID(q->token));
1132
1133 spin_lock(&q->lock);
1134 again:reclaim_completed_tx_imm(q);
1135
1136 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1137 if (unlikely(ret)) {
1138 if (ret == 1) {
1139 spin_unlock(&q->lock);
1140 return NET_XMIT_CN;
1141 }
1142 goto again;
1143 }
1144
1145 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1146
1147 q->in_use++;
1148 if (++q->pidx >= q->size) {
1149 q->pidx = 0;
1150 q->gen ^= 1;
1151 }
1152 spin_unlock(&q->lock);
1153 wmb();
1154 t3_write_reg(adap, A_SG_KDOORBELL,
1155 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1156 return NET_XMIT_SUCCESS;
1157}
1158
1159/**
1160 * restart_ctrlq - restart a suspended control queue
1161 * @qs: the queue set cotaining the control queue
1162 *
1163 * Resumes transmission on a suspended Tx control queue.
1164 */
1165static void restart_ctrlq(unsigned long data)
1166{
1167 struct sk_buff *skb;
1168 struct sge_qset *qs = (struct sge_qset *)data;
1169 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1170 struct adapter *adap = qs->netdev->priv;
1171
1172 spin_lock(&q->lock);
1173 again:reclaim_completed_tx_imm(q);
1174
1175 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1176
1177 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1178
1179 if (++q->pidx >= q->size) {
1180 q->pidx = 0;
1181 q->gen ^= 1;
1182 }
1183 q->in_use++;
1184 }
1185
1186 if (!skb_queue_empty(&q->sendq)) {
1187 set_bit(TXQ_CTRL, &qs->txq_stopped);
1188 smp_mb__after_clear_bit();
1189
1190 if (should_restart_tx(q) &&
1191 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1192 goto again;
1193 q->stops++;
1194 }
1195
1196 spin_unlock(&q->lock);
1197 t3_write_reg(adap, A_SG_KDOORBELL,
1198 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1199}
1200
1201/**
1202 * write_ofld_wr - write an offload work request
1203 * @adap: the adapter
1204 * @skb: the packet to send
1205 * @q: the Tx queue
1206 * @pidx: index of the first Tx descriptor to write
1207 * @gen: the generation value to use
1208 * @ndesc: number of descriptors the packet will occupy
1209 *
1210 * Write an offload work request to send the supplied packet. The packet
1211 * data already carry the work request with most fields populated.
1212 */
1213static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1214 struct sge_txq *q, unsigned int pidx,
1215 unsigned int gen, unsigned int ndesc)
1216{
1217 unsigned int sgl_flits, flits;
1218 struct work_request_hdr *from;
1219 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1220 struct tx_desc *d = &q->desc[pidx];
1221
1222 if (immediate(skb)) {
1223 q->sdesc[pidx].skb = NULL;
1224 write_imm(d, skb, skb->len, gen);
1225 return;
1226 }
1227
1228 /* Only TX_DATA builds SGLs */
1229
1230 from = (struct work_request_hdr *)skb->data;
1231 memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
1232
1233 flits = (skb->h.raw - skb->data) / 8;
1234 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1235 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1236 adap->pdev);
1237 if (need_skb_unmap())
1238 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1239
1240 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1241 gen, from->wr_hi, from->wr_lo);
1242}
1243
1244/**
1245 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1246 * @skb: the packet
1247 *
1248 * Returns the number of Tx descriptors needed for the given offload
1249 * packet. These packets are already fully constructed.
1250 */
1251static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1252{
1253 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1254
1255 if (skb->len <= WR_LEN && cnt == 0)
1256 return 1; /* packet fits as immediate data */
1257
1258 flits = (skb->h.raw - skb->data) / 8; /* headers */
1259 if (skb->tail != skb->h.raw)
1260 cnt++;
1261 return flits_to_desc(flits + sgl_len(cnt));
1262}
1263
1264/**
1265 * ofld_xmit - send a packet through an offload queue
1266 * @adap: the adapter
1267 * @q: the Tx offload queue
1268 * @skb: the packet
1269 *
1270 * Send an offload packet through an SGE offload queue.
1271 */
1272static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1273 struct sk_buff *skb)
1274{
1275 int ret;
1276 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1277
1278 spin_lock(&q->lock);
1279 again:reclaim_completed_tx(adap, q);
1280
1281 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1282 if (unlikely(ret)) {
1283 if (ret == 1) {
1284 skb->priority = ndesc; /* save for restart */
1285 spin_unlock(&q->lock);
1286 return NET_XMIT_CN;
1287 }
1288 goto again;
1289 }
1290
1291 gen = q->gen;
1292 q->in_use += ndesc;
1293 pidx = q->pidx;
1294 q->pidx += ndesc;
1295 if (q->pidx >= q->size) {
1296 q->pidx -= q->size;
1297 q->gen ^= 1;
1298 }
1299 spin_unlock(&q->lock);
1300
1301 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1302 check_ring_tx_db(adap, q);
1303 return NET_XMIT_SUCCESS;
1304}
1305
1306/**
1307 * restart_offloadq - restart a suspended offload queue
1308 * @qs: the queue set cotaining the offload queue
1309 *
1310 * Resumes transmission on a suspended Tx offload queue.
1311 */
1312static void restart_offloadq(unsigned long data)
1313{
1314 struct sk_buff *skb;
1315 struct sge_qset *qs = (struct sge_qset *)data;
1316 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1317 struct adapter *adap = qs->netdev->priv;
1318
1319 spin_lock(&q->lock);
1320 again:reclaim_completed_tx(adap, q);
1321
1322 while ((skb = skb_peek(&q->sendq)) != NULL) {
1323 unsigned int gen, pidx;
1324 unsigned int ndesc = skb->priority;
1325
1326 if (unlikely(q->size - q->in_use < ndesc)) {
1327 set_bit(TXQ_OFLD, &qs->txq_stopped);
1328 smp_mb__after_clear_bit();
1329
1330 if (should_restart_tx(q) &&
1331 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1332 goto again;
1333 q->stops++;
1334 break;
1335 }
1336
1337 gen = q->gen;
1338 q->in_use += ndesc;
1339 pidx = q->pidx;
1340 q->pidx += ndesc;
1341 if (q->pidx >= q->size) {
1342 q->pidx -= q->size;
1343 q->gen ^= 1;
1344 }
1345 __skb_unlink(skb, &q->sendq);
1346 spin_unlock(&q->lock);
1347
1348 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1349 spin_lock(&q->lock);
1350 }
1351 spin_unlock(&q->lock);
1352
1353#if USE_GTS
1354 set_bit(TXQ_RUNNING, &q->flags);
1355 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1356#endif
1357 t3_write_reg(adap, A_SG_KDOORBELL,
1358 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1359}
1360
1361/**
1362 * queue_set - return the queue set a packet should use
1363 * @skb: the packet
1364 *
1365 * Maps a packet to the SGE queue set it should use. The desired queue
1366 * set is carried in bits 1-3 in the packet's priority.
1367 */
1368static inline int queue_set(const struct sk_buff *skb)
1369{
1370 return skb->priority >> 1;
1371}
1372
1373/**
1374 * is_ctrl_pkt - return whether an offload packet is a control packet
1375 * @skb: the packet
1376 *
1377 * Determines whether an offload packet should use an OFLD or a CTRL
1378 * Tx queue. This is indicated by bit 0 in the packet's priority.
1379 */
1380static inline int is_ctrl_pkt(const struct sk_buff *skb)
1381{
1382 return skb->priority & 1;
1383}
1384
1385/**
1386 * t3_offload_tx - send an offload packet
1387 * @tdev: the offload device to send to
1388 * @skb: the packet
1389 *
1390 * Sends an offload packet. We use the packet priority to select the
1391 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1392 * should be sent as regular or control, bits 1-3 select the queue set.
1393 */
1394int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1395{
1396 struct adapter *adap = tdev2adap(tdev);
1397 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1398
1399 if (unlikely(is_ctrl_pkt(skb)))
1400 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1401
1402 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1403}
1404
1405/**
1406 * offload_enqueue - add an offload packet to an SGE offload receive queue
1407 * @q: the SGE response queue
1408 * @skb: the packet
1409 *
1410 * Add a new offload packet to an SGE response queue's offload packet
1411 * queue. If the packet is the first on the queue it schedules the RX
1412 * softirq to process the queue.
1413 */
1414static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1415{
1416 skb->next = skb->prev = NULL;
1417 if (q->rx_tail)
1418 q->rx_tail->next = skb;
1419 else {
1420 struct sge_qset *qs = rspq_to_qset(q);
1421
1422 if (__netif_rx_schedule_prep(qs->netdev))
1423 __netif_rx_schedule(qs->netdev);
1424 q->rx_head = skb;
1425 }
1426 q->rx_tail = skb;
1427}
1428
1429/**
1430 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1431 * @tdev: the offload device that will be receiving the packets
1432 * @q: the SGE response queue that assembled the bundle
1433 * @skbs: the partial bundle
1434 * @n: the number of packets in the bundle
1435 *
1436 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1437 */
1438static inline void deliver_partial_bundle(struct t3cdev *tdev,
1439 struct sge_rspq *q,
1440 struct sk_buff *skbs[], int n)
1441{
1442 if (n) {
1443 q->offload_bundles++;
1444 tdev->recv(tdev, skbs, n);
1445 }
1446}
1447
1448/**
1449 * ofld_poll - NAPI handler for offload packets in interrupt mode
1450 * @dev: the network device doing the polling
1451 * @budget: polling budget
1452 *
1453 * The NAPI handler for offload packets when a response queue is serviced
1454 * by the hard interrupt handler, i.e., when it's operating in non-polling
1455 * mode. Creates small packet batches and sends them through the offload
1456 * receive handler. Batches need to be of modest size as we do prefetches
1457 * on the packets in each.
1458 */
1459static int ofld_poll(struct net_device *dev, int *budget)
1460{
1461 struct adapter *adapter = dev->priv;
1462 struct sge_qset *qs = dev2qset(dev);
1463 struct sge_rspq *q = &qs->rspq;
1464 int work_done, limit = min(*budget, dev->quota), avail = limit;
1465
1466 while (avail) {
1467 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1468 int ngathered;
1469
1470 spin_lock_irq(&q->lock);
1471 head = q->rx_head;
1472 if (!head) {
1473 work_done = limit - avail;
1474 *budget -= work_done;
1475 dev->quota -= work_done;
1476 __netif_rx_complete(dev);
1477 spin_unlock_irq(&q->lock);
1478 return 0;
1479 }
1480
1481 tail = q->rx_tail;
1482 q->rx_head = q->rx_tail = NULL;
1483 spin_unlock_irq(&q->lock);
1484
1485 for (ngathered = 0; avail && head; avail--) {
1486 prefetch(head->data);
1487 skbs[ngathered] = head;
1488 head = head->next;
1489 skbs[ngathered]->next = NULL;
1490 if (++ngathered == RX_BUNDLE_SIZE) {
1491 q->offload_bundles++;
1492 adapter->tdev.recv(&adapter->tdev, skbs,
1493 ngathered);
1494 ngathered = 0;
1495 }
1496 }
1497 if (head) { /* splice remaining packets back onto Rx queue */
1498 spin_lock_irq(&q->lock);
1499 tail->next = q->rx_head;
1500 if (!q->rx_head)
1501 q->rx_tail = tail;
1502 q->rx_head = head;
1503 spin_unlock_irq(&q->lock);
1504 }
1505 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1506 }
1507 work_done = limit - avail;
1508 *budget -= work_done;
1509 dev->quota -= work_done;
1510 return 1;
1511}
1512
1513/**
1514 * rx_offload - process a received offload packet
1515 * @tdev: the offload device receiving the packet
1516 * @rq: the response queue that received the packet
1517 * @skb: the packet
1518 * @rx_gather: a gather list of packets if we are building a bundle
1519 * @gather_idx: index of the next available slot in the bundle
1520 *
1521 * Process an ingress offload pakcet and add it to the offload ingress
1522 * queue. Returns the index of the next available slot in the bundle.
1523 */
1524static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1525 struct sk_buff *skb, struct sk_buff *rx_gather[],
1526 unsigned int gather_idx)
1527{
1528 rq->offload_pkts++;
1529 skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
1530
1531 if (rq->polling) {
1532 rx_gather[gather_idx++] = skb;
1533 if (gather_idx == RX_BUNDLE_SIZE) {
1534 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1535 gather_idx = 0;
1536 rq->offload_bundles++;
1537 }
1538 } else
1539 offload_enqueue(rq, skb);
1540
1541 return gather_idx;
1542}
1543
1544/**
1545 * update_tx_completed - update the number of processed Tx descriptors
1546 * @qs: the queue set to update
1547 * @idx: which Tx queue within the set to update
1548 * @credits: number of new processed descriptors
1549 * @tx_completed: accumulates credits for the queues
1550 *
1551 * Updates the number of completed Tx descriptors for a queue set's Tx
1552 * queue. On UP systems we updated the information immediately but on
1553 * MP we accumulate the credits locally and update the Tx queue when we
1554 * reach a threshold to avoid cache-line bouncing.
1555 */
1556static inline void update_tx_completed(struct sge_qset *qs, int idx,
1557 unsigned int credits,
1558 unsigned int tx_completed[])
1559{
1560#ifdef CONFIG_SMP
1561 tx_completed[idx] += credits;
1562 if (tx_completed[idx] > 32) {
1563 qs->txq[idx].processed += tx_completed[idx];
1564 tx_completed[idx] = 0;
1565 }
1566#else
1567 qs->txq[idx].processed += credits;
1568#endif
1569}
1570
1571/**
1572 * restart_tx - check whether to restart suspended Tx queues
1573 * @qs: the queue set to resume
1574 *
1575 * Restarts suspended Tx queues of an SGE queue set if they have enough
1576 * free resources to resume operation.
1577 */
1578static void restart_tx(struct sge_qset *qs)
1579{
1580 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1581 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1582 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1583 qs->txq[TXQ_ETH].restarts++;
1584 if (netif_running(qs->netdev))
1585 netif_wake_queue(qs->netdev);
1586 }
1587
1588 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1589 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1590 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1591 qs->txq[TXQ_OFLD].restarts++;
1592 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1593 }
1594 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1595 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1596 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1597 qs->txq[TXQ_CTRL].restarts++;
1598 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1599 }
1600}
1601
1602/**
1603 * rx_eth - process an ingress ethernet packet
1604 * @adap: the adapter
1605 * @rq: the response queue that received the packet
1606 * @skb: the packet
1607 * @pad: amount of padding at the start of the buffer
1608 *
1609 * Process an ingress ethernet pakcet and deliver it to the stack.
1610 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1611 * if it was immediate data in a response.
1612 */
1613static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1614 struct sk_buff *skb, int pad)
1615{
1616 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1617 struct port_info *pi;
1618
1619 rq->eth_pkts++;
1620 skb_pull(skb, sizeof(*p) + pad);
1621 skb->dev = adap->port[p->iff];
1622 skb->dev->last_rx = jiffies;
1623 skb->protocol = eth_type_trans(skb, skb->dev);
1624 pi = netdev_priv(skb->dev);
1625 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1626 !p->fragment) {
1627 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1628 skb->ip_summed = CHECKSUM_UNNECESSARY;
1629 } else
1630 skb->ip_summed = CHECKSUM_NONE;
1631
1632 if (unlikely(p->vlan_valid)) {
1633 struct vlan_group *grp = pi->vlan_grp;
1634
1635 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1636 if (likely(grp))
1637 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1638 rq->polling);
1639 else
1640 dev_kfree_skb_any(skb);
1641 } else if (rq->polling)
1642 netif_receive_skb(skb);
1643 else
1644 netif_rx(skb);
1645}
1646
1647/**
1648 * handle_rsp_cntrl_info - handles control information in a response
1649 * @qs: the queue set corresponding to the response
1650 * @flags: the response control flags
1651 * @tx_completed: accumulates completion credits for the Tx queues
1652 *
1653 * Handles the control information of an SGE response, such as GTS
1654 * indications and completion credits for the queue set's Tx queues.
1655 */
1656static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags,
1657 unsigned int tx_completed[])
1658{
1659 unsigned int credits;
1660
1661#if USE_GTS
1662 if (flags & F_RSPD_TXQ0_GTS)
1663 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1664#endif
1665
1666 /* ETH credits are already coalesced, return them immediately. */
1667 credits = G_RSPD_TXQ0_CR(flags);
1668 if (credits)
1669 qs->txq[TXQ_ETH].processed += credits;
1670
1671# if USE_GTS
1672 if (flags & F_RSPD_TXQ1_GTS)
1673 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1674# endif
1675 update_tx_completed(qs, TXQ_OFLD, G_RSPD_TXQ1_CR(flags), tx_completed);
1676 update_tx_completed(qs, TXQ_CTRL, G_RSPD_TXQ2_CR(flags), tx_completed);
1677}
1678
1679/**
1680 * flush_tx_completed - returns accumulated Tx completions to Tx queues
1681 * @qs: the queue set to update
1682 * @tx_completed: pending completion credits to return to Tx queues
1683 *
1684 * Updates the number of completed Tx descriptors for a queue set's Tx
1685 * queues with the credits pending in @tx_completed. This does something
1686 * only on MP systems as on UP systems we return the credits immediately.
1687 */
1688static inline void flush_tx_completed(struct sge_qset *qs,
1689 unsigned int tx_completed[])
1690{
1691#if defined(CONFIG_SMP)
1692 if (tx_completed[TXQ_OFLD])
1693 qs->txq[TXQ_OFLD].processed += tx_completed[TXQ_OFLD];
1694 if (tx_completed[TXQ_CTRL])
1695 qs->txq[TXQ_CTRL].processed += tx_completed[TXQ_CTRL];
1696#endif
1697}
1698
1699/**
1700 * check_ring_db - check if we need to ring any doorbells
1701 * @adapter: the adapter
1702 * @qs: the queue set whose Tx queues are to be examined
1703 * @sleeping: indicates which Tx queue sent GTS
1704 *
1705 * Checks if some of a queue set's Tx queues need to ring their doorbells
1706 * to resume transmission after idling while they still have unprocessed
1707 * descriptors.
1708 */
1709static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1710 unsigned int sleeping)
1711{
1712 if (sleeping & F_RSPD_TXQ0_GTS) {
1713 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1714
1715 if (txq->cleaned + txq->in_use != txq->processed &&
1716 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1717 set_bit(TXQ_RUNNING, &txq->flags);
1718 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1719 V_EGRCNTX(txq->cntxt_id));
1720 }
1721 }
1722
1723 if (sleeping & F_RSPD_TXQ1_GTS) {
1724 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1725
1726 if (txq->cleaned + txq->in_use != txq->processed &&
1727 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1728 set_bit(TXQ_RUNNING, &txq->flags);
1729 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1730 V_EGRCNTX(txq->cntxt_id));
1731 }
1732 }
1733}
1734
1735/**
1736 * is_new_response - check if a response is newly written
1737 * @r: the response descriptor
1738 * @q: the response queue
1739 *
1740 * Returns true if a response descriptor contains a yet unprocessed
1741 * response.
1742 */
1743static inline int is_new_response(const struct rsp_desc *r,
1744 const struct sge_rspq *q)
1745{
1746 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1747}
1748
1749#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1750#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1751 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1752 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1753 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1754
1755/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1756#define NOMEM_INTR_DELAY 2500
1757
1758/**
1759 * process_responses - process responses from an SGE response queue
1760 * @adap: the adapter
1761 * @qs: the queue set to which the response queue belongs
1762 * @budget: how many responses can be processed in this round
1763 *
1764 * Process responses from an SGE response queue up to the supplied budget.
1765 * Responses include received packets as well as credits and other events
1766 * for the queues that belong to the response queue's queue set.
1767 * A negative budget is effectively unlimited.
1768 *
1769 * Additionally choose the interrupt holdoff time for the next interrupt
1770 * on this queue. If the system is under memory shortage use a fairly
1771 * long delay to help recovery.
1772 */
1773static int process_responses(struct adapter *adap, struct sge_qset *qs,
1774 int budget)
1775{
1776 struct sge_rspq *q = &qs->rspq;
1777 struct rsp_desc *r = &q->desc[q->cidx];
1778 int budget_left = budget;
1779 unsigned int sleeping = 0, tx_completed[3] = { 0, 0, 0 };
1780 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1781 int ngathered = 0;
1782
1783 q->next_holdoff = q->holdoff_tmr;
1784
1785 while (likely(budget_left && is_new_response(r, q))) {
1786 int eth, ethpad = 0;
1787 struct sk_buff *skb = NULL;
1788 u32 len, flags = ntohl(r->flags);
1789 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1790
1791 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1792
1793 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1794 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1795 if (!skb)
1796 goto no_mem;
1797
1798 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1799 skb->data[0] = CPL_ASYNC_NOTIF;
1800 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1801 q->async_notif++;
1802 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1803 skb = get_imm_packet(r);
1804 if (unlikely(!skb)) {
1805 no_mem:
1806 q->next_holdoff = NOMEM_INTR_DELAY;
1807 q->nomem++;
1808 /* consume one credit since we tried */
1809 budget_left--;
1810 break;
1811 }
1812 q->imm_data++;
1813 } else if ((len = ntohl(r->len_cq)) != 0) {
1814 struct sge_fl *fl;
1815
1816 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1817 fl->credits--;
1818 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1819 eth ? SGE_RX_DROP_THRES : 0);
1820 if (!skb)
1821 q->rx_drops++;
1822 else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
1823 __skb_pull(skb, 2);
1824 ethpad = 2;
1825 if (++fl->cidx == fl->size)
1826 fl->cidx = 0;
1827 } else
1828 q->pure_rsps++;
1829
1830 if (flags & RSPD_CTRL_MASK) {
1831 sleeping |= flags & RSPD_GTS_MASK;
1832 handle_rsp_cntrl_info(qs, flags, tx_completed);
1833 }
1834
1835 r++;
1836 if (unlikely(++q->cidx == q->size)) {
1837 q->cidx = 0;
1838 q->gen ^= 1;
1839 r = q->desc;
1840 }
1841 prefetch(r);
1842
1843 if (++q->credits >= (q->size / 4)) {
1844 refill_rspq(adap, q, q->credits);
1845 q->credits = 0;
1846 }
1847
1848 if (likely(skb != NULL)) {
1849 if (eth)
1850 rx_eth(adap, q, skb, ethpad);
1851 else {
1852 /* Preserve the RSS info in csum & priority */
1853 skb->csum = rss_hi;
1854 skb->priority = rss_lo;
1855 ngathered = rx_offload(&adap->tdev, q, skb,
1856 offload_skbs, ngathered);
1857 }
1858 }
1859
1860 --budget_left;
1861 }
1862
1863 flush_tx_completed(qs, tx_completed);
1864 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
1865 if (sleeping)
1866 check_ring_db(adap, qs, sleeping);
1867
1868 smp_mb(); /* commit Tx queue .processed updates */
1869 if (unlikely(qs->txq_stopped != 0))
1870 restart_tx(qs);
1871
1872 budget -= budget_left;
1873 return budget;
1874}
1875
1876static inline int is_pure_response(const struct rsp_desc *r)
1877{
1878 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
1879
1880 return (n | r->len_cq) == 0;
1881}
1882
1883/**
1884 * napi_rx_handler - the NAPI handler for Rx processing
1885 * @dev: the net device
1886 * @budget: how many packets we can process in this round
1887 *
1888 * Handler for new data events when using NAPI.
1889 */
1890static int napi_rx_handler(struct net_device *dev, int *budget)
1891{
1892 struct adapter *adap = dev->priv;
1893 struct sge_qset *qs = dev2qset(dev);
1894 int effective_budget = min(*budget, dev->quota);
1895
1896 int work_done = process_responses(adap, qs, effective_budget);
1897 *budget -= work_done;
1898 dev->quota -= work_done;
1899
1900 if (work_done >= effective_budget)
1901 return 1;
1902
1903 netif_rx_complete(dev);
1904
1905 /*
1906 * Because we don't atomically flush the following write it is
1907 * possible that in very rare cases it can reach the device in a way
1908 * that races with a new response being written plus an error interrupt
1909 * causing the NAPI interrupt handler below to return unhandled status
1910 * to the OS. To protect against this would require flushing the write
1911 * and doing both the write and the flush with interrupts off. Way too
1912 * expensive and unjustifiable given the rarity of the race.
1913 *
1914 * The race cannot happen at all with MSI-X.
1915 */
1916 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
1917 V_NEWTIMER(qs->rspq.next_holdoff) |
1918 V_NEWINDEX(qs->rspq.cidx));
1919 return 0;
1920}
1921
1922/*
1923 * Returns true if the device is already scheduled for polling.
1924 */
1925static inline int napi_is_scheduled(struct net_device *dev)
1926{
1927 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1928}
1929
1930/**
1931 * process_pure_responses - process pure responses from a response queue
1932 * @adap: the adapter
1933 * @qs: the queue set owning the response queue
1934 * @r: the first pure response to process
1935 *
1936 * A simpler version of process_responses() that handles only pure (i.e.,
1937 * non data-carrying) responses. Such respones are too light-weight to
1938 * justify calling a softirq under NAPI, so we handle them specially in
1939 * the interrupt handler. The function is called with a pointer to a
1940 * response, which the caller must ensure is a valid pure response.
1941 *
1942 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
1943 */
1944static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
1945 struct rsp_desc *r)
1946{
1947 struct sge_rspq *q = &qs->rspq;
1948 unsigned int sleeping = 0, tx_completed[3] = { 0, 0, 0 };
1949
1950 do {
1951 u32 flags = ntohl(r->flags);
1952
1953 r++;
1954 if (unlikely(++q->cidx == q->size)) {
1955 q->cidx = 0;
1956 q->gen ^= 1;
1957 r = q->desc;
1958 }
1959 prefetch(r);
1960
1961 if (flags & RSPD_CTRL_MASK) {
1962 sleeping |= flags & RSPD_GTS_MASK;
1963 handle_rsp_cntrl_info(qs, flags, tx_completed);
1964 }
1965
1966 q->pure_rsps++;
1967 if (++q->credits >= (q->size / 4)) {
1968 refill_rspq(adap, q, q->credits);
1969 q->credits = 0;
1970 }
1971 } while (is_new_response(r, q) && is_pure_response(r));
1972
1973 flush_tx_completed(qs, tx_completed);
1974
1975 if (sleeping)
1976 check_ring_db(adap, qs, sleeping);
1977
1978 smp_mb(); /* commit Tx queue .processed updates */
1979 if (unlikely(qs->txq_stopped != 0))
1980 restart_tx(qs);
1981
1982 return is_new_response(r, q);
1983}
1984
1985/**
1986 * handle_responses - decide what to do with new responses in NAPI mode
1987 * @adap: the adapter
1988 * @q: the response queue
1989 *
1990 * This is used by the NAPI interrupt handlers to decide what to do with
1991 * new SGE responses. If there are no new responses it returns -1. If
1992 * there are new responses and they are pure (i.e., non-data carrying)
1993 * it handles them straight in hard interrupt context as they are very
1994 * cheap and don't deliver any packets. Finally, if there are any data
1995 * signaling responses it schedules the NAPI handler. Returns 1 if it
1996 * schedules NAPI, 0 if all new responses were pure.
1997 *
1998 * The caller must ascertain NAPI is not already running.
1999 */
2000static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2001{
2002 struct sge_qset *qs = rspq_to_qset(q);
2003 struct rsp_desc *r = &q->desc[q->cidx];
2004
2005 if (!is_new_response(r, q))
2006 return -1;
2007 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2008 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2009 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2010 return 0;
2011 }
2012 if (likely(__netif_rx_schedule_prep(qs->netdev)))
2013 __netif_rx_schedule(qs->netdev);
2014 return 1;
2015}
2016
2017/*
2018 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2019 * (i.e., response queue serviced in hard interrupt).
2020 */
2021irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2022{
2023 struct sge_qset *qs = cookie;
2024 struct adapter *adap = qs->netdev->priv;
2025 struct sge_rspq *q = &qs->rspq;
2026
2027 spin_lock(&q->lock);
2028 if (process_responses(adap, qs, -1) == 0)
2029 q->unhandled_irqs++;
2030 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2031 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2032 spin_unlock(&q->lock);
2033 return IRQ_HANDLED;
2034}
2035
2036/*
2037 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2038 * (i.e., response queue serviced by NAPI polling).
2039 */
2040irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2041{
2042 struct sge_qset *qs = cookie;
2043 struct adapter *adap = qs->netdev->priv;
2044 struct sge_rspq *q = &qs->rspq;
2045
2046 spin_lock(&q->lock);
2047 BUG_ON(napi_is_scheduled(qs->netdev));
2048
2049 if (handle_responses(adap, q) < 0)
2050 q->unhandled_irqs++;
2051 spin_unlock(&q->lock);
2052 return IRQ_HANDLED;
2053}
2054
2055/*
2056 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2057 * SGE response queues as well as error and other async events as they all use
2058 * the same MSI vector. We use one SGE response queue per port in this mode
2059 * and protect all response queues with queue 0's lock.
2060 */
2061static irqreturn_t t3_intr_msi(int irq, void *cookie)
2062{
2063 int new_packets = 0;
2064 struct adapter *adap = cookie;
2065 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2066
2067 spin_lock(&q->lock);
2068
2069 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2070 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2071 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2072 new_packets = 1;
2073 }
2074
2075 if (adap->params.nports == 2 &&
2076 process_responses(adap, &adap->sge.qs[1], -1)) {
2077 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2078
2079 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2080 V_NEWTIMER(q1->next_holdoff) |
2081 V_NEWINDEX(q1->cidx));
2082 new_packets = 1;
2083 }
2084
2085 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2086 q->unhandled_irqs++;
2087
2088 spin_unlock(&q->lock);
2089 return IRQ_HANDLED;
2090}
2091
2092static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2093{
2094 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2095 if (likely(__netif_rx_schedule_prep(dev)))
2096 __netif_rx_schedule(dev);
2097 return 1;
2098 }
2099 return 0;
2100}
2101
2102/*
2103 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2104 * by NAPI polling). Handles data events from SGE response queues as well as
2105 * error and other async events as they all use the same MSI vector. We use
2106 * one SGE response queue per port in this mode and protect all response
2107 * queues with queue 0's lock.
2108 */
2109irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2110{
2111 int new_packets;
2112 struct adapter *adap = cookie;
2113 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2114
2115 spin_lock(&q->lock);
2116
2117 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2118 if (adap->params.nports == 2)
2119 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2120 &adap->sge.qs[1].rspq);
2121 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2122 q->unhandled_irqs++;
2123
2124 spin_unlock(&q->lock);
2125 return IRQ_HANDLED;
2126}
2127
2128/*
2129 * A helper function that processes responses and issues GTS.
2130 */
2131static inline int process_responses_gts(struct adapter *adap,
2132 struct sge_rspq *rq)
2133{
2134 int work;
2135
2136 work = process_responses(adap, rspq_to_qset(rq), -1);
2137 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2138 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2139 return work;
2140}
2141
2142/*
2143 * The legacy INTx interrupt handler. This needs to handle data events from
2144 * SGE response queues as well as error and other async events as they all use
2145 * the same interrupt pin. We use one SGE response queue per port in this mode
2146 * and protect all response queues with queue 0's lock.
2147 */
2148static irqreturn_t t3_intr(int irq, void *cookie)
2149{
2150 int work_done, w0, w1;
2151 struct adapter *adap = cookie;
2152 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2153 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2154
2155 spin_lock(&q0->lock);
2156
2157 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2158 w1 = adap->params.nports == 2 &&
2159 is_new_response(&q1->desc[q1->cidx], q1);
2160
2161 if (likely(w0 | w1)) {
2162 t3_write_reg(adap, A_PL_CLI, 0);
2163 t3_read_reg(adap, A_PL_CLI); /* flush */
2164
2165 if (likely(w0))
2166 process_responses_gts(adap, q0);
2167
2168 if (w1)
2169 process_responses_gts(adap, q1);
2170
2171 work_done = w0 | w1;
2172 } else
2173 work_done = t3_slow_intr_handler(adap);
2174
2175 spin_unlock(&q0->lock);
2176 return IRQ_RETVAL(work_done != 0);
2177}
2178
2179/*
2180 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2181 * Handles data events from SGE response queues as well as error and other
2182 * async events as they all use the same interrupt pin. We use one SGE
2183 * response queue per port in this mode and protect all response queues with
2184 * queue 0's lock.
2185 */
2186static irqreturn_t t3b_intr(int irq, void *cookie)
2187{
2188 u32 map;
2189 struct adapter *adap = cookie;
2190 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2191
2192 t3_write_reg(adap, A_PL_CLI, 0);
2193 map = t3_read_reg(adap, A_SG_DATA_INTR);
2194
2195 if (unlikely(!map)) /* shared interrupt, most likely */
2196 return IRQ_NONE;
2197
2198 spin_lock(&q0->lock);
2199
2200 if (unlikely(map & F_ERRINTR))
2201 t3_slow_intr_handler(adap);
2202
2203 if (likely(map & 1))
2204 process_responses_gts(adap, q0);
2205
2206 if (map & 2)
2207 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2208
2209 spin_unlock(&q0->lock);
2210 return IRQ_HANDLED;
2211}
2212
2213/*
2214 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2215 * Handles data events from SGE response queues as well as error and other
2216 * async events as they all use the same interrupt pin. We use one SGE
2217 * response queue per port in this mode and protect all response queues with
2218 * queue 0's lock.
2219 */
2220static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2221{
2222 u32 map;
2223 struct net_device *dev;
2224 struct adapter *adap = cookie;
2225 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2226
2227 t3_write_reg(adap, A_PL_CLI, 0);
2228 map = t3_read_reg(adap, A_SG_DATA_INTR);
2229
2230 if (unlikely(!map)) /* shared interrupt, most likely */
2231 return IRQ_NONE;
2232
2233 spin_lock(&q0->lock);
2234
2235 if (unlikely(map & F_ERRINTR))
2236 t3_slow_intr_handler(adap);
2237
2238 if (likely(map & 1)) {
2239 dev = adap->sge.qs[0].netdev;
2240
2241 BUG_ON(napi_is_scheduled(dev));
2242 if (likely(__netif_rx_schedule_prep(dev)))
2243 __netif_rx_schedule(dev);
2244 }
2245 if (map & 2) {
2246 dev = adap->sge.qs[1].netdev;
2247
2248 BUG_ON(napi_is_scheduled(dev));
2249 if (likely(__netif_rx_schedule_prep(dev)))
2250 __netif_rx_schedule(dev);
2251 }
2252
2253 spin_unlock(&q0->lock);
2254 return IRQ_HANDLED;
2255}
2256
2257/**
2258 * t3_intr_handler - select the top-level interrupt handler
2259 * @adap: the adapter
2260 * @polling: whether using NAPI to service response queues
2261 *
2262 * Selects the top-level interrupt handler based on the type of interrupts
2263 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2264 * response queues.
2265 */
2266intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2267{
2268 if (adap->flags & USING_MSIX)
2269 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2270 if (adap->flags & USING_MSI)
2271 return polling ? t3_intr_msi_napi : t3_intr_msi;
2272 if (adap->params.rev > 0)
2273 return polling ? t3b_intr_napi : t3b_intr;
2274 return t3_intr;
2275}
2276
2277/**
2278 * t3_sge_err_intr_handler - SGE async event interrupt handler
2279 * @adapter: the adapter
2280 *
2281 * Interrupt handler for SGE asynchronous (non-data) events.
2282 */
2283void t3_sge_err_intr_handler(struct adapter *adapter)
2284{
2285 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2286
2287 if (status & F_RSPQCREDITOVERFOW)
2288 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2289
2290 if (status & F_RSPQDISABLED) {
2291 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2292
2293 CH_ALERT(adapter,
2294 "packet delivered to disabled response queue "
2295 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2296 }
2297
2298 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2299 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2300 t3_fatal_err(adapter);
2301}
2302
2303/**
2304 * sge_timer_cb - perform periodic maintenance of an SGE qset
2305 * @data: the SGE queue set to maintain
2306 *
2307 * Runs periodically from a timer to perform maintenance of an SGE queue
2308 * set. It performs two tasks:
2309 *
2310 * a) Cleans up any completed Tx descriptors that may still be pending.
2311 * Normal descriptor cleanup happens when new packets are added to a Tx
2312 * queue so this timer is relatively infrequent and does any cleanup only
2313 * if the Tx queue has not seen any new packets in a while. We make a
2314 * best effort attempt to reclaim descriptors, in that we don't wait
2315 * around if we cannot get a queue's lock (which most likely is because
2316 * someone else is queueing new packets and so will also handle the clean
2317 * up). Since control queues use immediate data exclusively we don't
2318 * bother cleaning them up here.
2319 *
2320 * b) Replenishes Rx queues that have run out due to memory shortage.
2321 * Normally new Rx buffers are added when existing ones are consumed but
2322 * when out of memory a queue can become empty. We try to add only a few
2323 * buffers here, the queue will be replenished fully as these new buffers
2324 * are used up if memory shortage has subsided.
2325 */
2326static void sge_timer_cb(unsigned long data)
2327{
2328 spinlock_t *lock;
2329 struct sge_qset *qs = (struct sge_qset *)data;
2330 struct adapter *adap = qs->netdev->priv;
2331
2332 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2333 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2334 spin_unlock(&qs->txq[TXQ_ETH].lock);
2335 }
2336 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2337 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2338 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2339 }
2340 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2341 &adap->sge.qs[0].rspq.lock;
2342 if (spin_trylock_irq(lock)) {
2343 if (!napi_is_scheduled(qs->netdev)) {
2344 if (qs->fl[0].credits < qs->fl[0].size)
2345 __refill_fl(adap, &qs->fl[0]);
2346 if (qs->fl[1].credits < qs->fl[1].size)
2347 __refill_fl(adap, &qs->fl[1]);
2348 }
2349 spin_unlock_irq(lock);
2350 }
2351 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2352}
2353
2354/**
2355 * t3_update_qset_coalesce - update coalescing settings for a queue set
2356 * @qs: the SGE queue set
2357 * @p: new queue set parameters
2358 *
2359 * Update the coalescing settings for an SGE queue set. Nothing is done
2360 * if the queue set is not initialized yet.
2361 */
2362void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2363{
2364 if (!qs->netdev)
2365 return;
2366
2367 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2368 qs->rspq.polling = p->polling;
2369 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2370}
2371
2372/**
2373 * t3_sge_alloc_qset - initialize an SGE queue set
2374 * @adapter: the adapter
2375 * @id: the queue set id
2376 * @nports: how many Ethernet ports will be using this queue set
2377 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2378 * @p: configuration parameters for this queue set
2379 * @ntxq: number of Tx queues for the queue set
2380 * @netdev: net device associated with this queue set
2381 *
2382 * Allocate resources and initialize an SGE queue set. A queue set
2383 * comprises a response queue, two Rx free-buffer queues, and up to 3
2384 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2385 * queue, offload queue, and control queue.
2386 */
2387int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2388 int irq_vec_idx, const struct qset_params *p,
2389 int ntxq, struct net_device *netdev)
2390{
2391 int i, ret = -ENOMEM;
2392 struct sge_qset *q = &adapter->sge.qs[id];
2393
2394 init_qset_cntxt(q, id);
2395 init_timer(&q->tx_reclaim_timer);
2396 q->tx_reclaim_timer.data = (unsigned long)q;
2397 q->tx_reclaim_timer.function = sge_timer_cb;
2398
2399 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2400 sizeof(struct rx_desc),
2401 sizeof(struct rx_sw_desc),
2402 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2403 if (!q->fl[0].desc)
2404 goto err;
2405
2406 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2407 sizeof(struct rx_desc),
2408 sizeof(struct rx_sw_desc),
2409 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2410 if (!q->fl[1].desc)
2411 goto err;
2412
2413 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2414 sizeof(struct rsp_desc), 0,
2415 &q->rspq.phys_addr, NULL);
2416 if (!q->rspq.desc)
2417 goto err;
2418
2419 for (i = 0; i < ntxq; ++i) {
2420 /*
2421 * The control queue always uses immediate data so does not
2422 * need to keep track of any sk_buffs.
2423 */
2424 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2425
2426 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2427 sizeof(struct tx_desc), sz,
2428 &q->txq[i].phys_addr,
2429 &q->txq[i].sdesc);
2430 if (!q->txq[i].desc)
2431 goto err;
2432
2433 q->txq[i].gen = 1;
2434 q->txq[i].size = p->txq_size[i];
2435 spin_lock_init(&q->txq[i].lock);
2436 skb_queue_head_init(&q->txq[i].sendq);
2437 }
2438
2439 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2440 (unsigned long)q);
2441 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2442 (unsigned long)q);
2443
2444 q->fl[0].gen = q->fl[1].gen = 1;
2445 q->fl[0].size = p->fl_size;
2446 q->fl[1].size = p->jumbo_size;
2447
2448 q->rspq.gen = 1;
2449 q->rspq.size = p->rspq_size;
2450 spin_lock_init(&q->rspq.lock);
2451
2452 q->txq[TXQ_ETH].stop_thres = nports *
2453 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2454
2455 if (ntxq == 1) {
2456 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2457 sizeof(struct cpl_rx_pkt);
2458 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2459 sizeof(struct cpl_rx_pkt);
2460 } else {
2461 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2462 sizeof(struct cpl_rx_data);
2463 q->fl[1].buf_size = (16 * 1024) -
2464 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2465 }
2466
2467 spin_lock(&adapter->sge.reg_lock);
2468
2469 /* FL threshold comparison uses < */
2470 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2471 q->rspq.phys_addr, q->rspq.size,
2472 q->fl[0].buf_size, 1, 0);
2473 if (ret)
2474 goto err_unlock;
2475
2476 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2477 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2478 q->fl[i].phys_addr, q->fl[i].size,
2479 q->fl[i].buf_size, p->cong_thres, 1,
2480 0);
2481 if (ret)
2482 goto err_unlock;
2483 }
2484
2485 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2486 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2487 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2488 1, 0);
2489 if (ret)
2490 goto err_unlock;
2491
2492 if (ntxq > 1) {
2493 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2494 USE_GTS, SGE_CNTXT_OFLD, id,
2495 q->txq[TXQ_OFLD].phys_addr,
2496 q->txq[TXQ_OFLD].size, 0, 1, 0);
2497 if (ret)
2498 goto err_unlock;
2499 }
2500
2501 if (ntxq > 2) {
2502 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2503 SGE_CNTXT_CTRL, id,
2504 q->txq[TXQ_CTRL].phys_addr,
2505 q->txq[TXQ_CTRL].size,
2506 q->txq[TXQ_CTRL].token, 1, 0);
2507 if (ret)
2508 goto err_unlock;
2509 }
2510
2511 spin_unlock(&adapter->sge.reg_lock);
2512 q->netdev = netdev;
2513 t3_update_qset_coalesce(q, p);
2514
2515 /*
2516 * We use atalk_ptr as a backpointer to a qset. In case a device is
2517 * associated with multiple queue sets only the first one sets
2518 * atalk_ptr.
2519 */
2520 if (netdev->atalk_ptr == NULL)
2521 netdev->atalk_ptr = q;
2522
2523 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2524 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2525 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2526
2527 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2528 V_NEWTIMER(q->rspq.holdoff_tmr));
2529
2530 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2531 return 0;
2532
2533 err_unlock:
2534 spin_unlock(&adapter->sge.reg_lock);
2535 err:
2536 t3_free_qset(adapter, q);
2537 return ret;
2538}
2539
2540/**
2541 * t3_free_sge_resources - free SGE resources
2542 * @adap: the adapter
2543 *
2544 * Frees resources used by the SGE queue sets.
2545 */
2546void t3_free_sge_resources(struct adapter *adap)
2547{
2548 int i;
2549
2550 for (i = 0; i < SGE_QSETS; ++i)
2551 t3_free_qset(adap, &adap->sge.qs[i]);
2552}
2553
2554/**
2555 * t3_sge_start - enable SGE
2556 * @adap: the adapter
2557 *
2558 * Enables the SGE for DMAs. This is the last step in starting packet
2559 * transfers.
2560 */
2561void t3_sge_start(struct adapter *adap)
2562{
2563 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2564}
2565
2566/**
2567 * t3_sge_stop - disable SGE operation
2568 * @adap: the adapter
2569 *
2570 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2571 * from error interrupts) or from normal process context. In the latter
2572 * case it also disables any pending queue restart tasklets. Note that
2573 * if it is called in interrupt context it cannot disable the restart
2574 * tasklets as it cannot wait, however the tasklets will have no effect
2575 * since the doorbells are disabled and the driver will call this again
2576 * later from process context, at which time the tasklets will be stopped
2577 * if they are still running.
2578 */
2579void t3_sge_stop(struct adapter *adap)
2580{
2581 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2582 if (!in_interrupt()) {
2583 int i;
2584
2585 for (i = 0; i < SGE_QSETS; ++i) {
2586 struct sge_qset *qs = &adap->sge.qs[i];
2587
2588 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2589 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2590 }
2591 }
2592}
2593
2594/**
2595 * t3_sge_init - initialize SGE
2596 * @adap: the adapter
2597 * @p: the SGE parameters
2598 *
2599 * Performs SGE initialization needed every time after a chip reset.
2600 * We do not initialize any of the queue sets here, instead the driver
2601 * top-level must request those individually. We also do not enable DMA
2602 * here, that should be done after the queues have been set up.
2603 */
2604void t3_sge_init(struct adapter *adap, struct sge_params *p)
2605{
2606 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2607
2608 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2609 F_CQCRDTCTRL |
2610 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2611 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2612#if SGE_NUM_GENBITS == 1
2613 ctrl |= F_EGRGENCTRL;
2614#endif
2615 if (adap->params.rev > 0) {
2616 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2617 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2618 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2619 }
2620 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2621 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2622 V_LORCQDRBTHRSH(512));
2623 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2624 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2625 V_TIMEOUT(100 * core_ticks_per_usec(adap)));
2626 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2627 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2628 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2629 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2630 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2631 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2632}
2633
2634/**
2635 * t3_sge_prep - one-time SGE initialization
2636 * @adap: the associated adapter
2637 * @p: SGE parameters
2638 *
2639 * Performs one-time initialization of SGE SW state. Includes determining
2640 * defaults for the assorted SGE parameters, which admins can change until
2641 * they are used to initialize the SGE.
2642 */
2643void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2644{
2645 int i;
2646
2647 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2648 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2649
2650 for (i = 0; i < SGE_QSETS; ++i) {
2651 struct qset_params *q = p->qset + i;
2652
2653 q->polling = adap->params.rev > 0;
2654 q->coalesce_usecs = 5;
2655 q->rspq_size = 1024;
2656 q->fl_size = 4096;
2657 q->jumbo_size = 512;
2658 q->txq_size[TXQ_ETH] = 1024;
2659 q->txq_size[TXQ_OFLD] = 1024;
2660 q->txq_size[TXQ_CTRL] = 256;
2661 q->cong_thres = 0;
2662 }
2663
2664 spin_lock_init(&adap->sge.reg_lock);
2665}
2666
2667/**
2668 * t3_get_desc - dump an SGE descriptor for debugging purposes
2669 * @qs: the queue set
2670 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2671 * @idx: the descriptor index in the queue
2672 * @data: where to dump the descriptor contents
2673 *
2674 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2675 * size of the descriptor.
2676 */
2677int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2678 unsigned char *data)
2679{
2680 if (qnum >= 6)
2681 return -EINVAL;
2682
2683 if (qnum < 3) {
2684 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2685 return -EINVAL;
2686 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2687 return sizeof(struct tx_desc);
2688 }
2689
2690 if (qnum == 3) {
2691 if (!qs->rspq.desc || idx >= qs->rspq.size)
2692 return -EINVAL;
2693 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2694 return sizeof(struct rsp_desc);
2695 }
2696
2697 qnum -= 4;
2698 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2699 return -EINVAL;
2700 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2701 return sizeof(struct rx_desc);
2702}