diff options
author | Divy Le Ray <divy@chelsio.com> | 2007-05-31 00:10:47 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-07-08 22:16:39 -0400 |
commit | cf992af561cc3ba72d79582535e6262818b00548 (patch) | |
tree | df96ec9bf963a78d460f4647a382ef9814c9f873 /drivers/net/cxgb3 | |
parent | 287aa83dffd1b39859f49d73b0d67f57106de5f1 (diff) |
cxgb3 - sge page management
Streamline sge page management.
Fix dma mappings when buffers are recycled.
Signed-off-by: Divy Le Ray <divy@chelsio.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r-- | drivers/net/cxgb3/adapter.h | 38 | ||||
-rw-r--r-- | drivers/net/cxgb3/sge.c | 423 |
2 files changed, 232 insertions, 229 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 80c3d8f268a7..ab72563b81ee 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h | |||
@@ -71,27 +71,29 @@ enum { /* adapter flags */ | |||
71 | QUEUES_BOUND = (1 << 3), | 71 | QUEUES_BOUND = (1 << 3), |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct fl_pg_chunk { | ||
75 | struct page *page; | ||
76 | void *va; | ||
77 | unsigned int offset; | ||
78 | }; | ||
79 | |||
74 | struct rx_desc; | 80 | struct rx_desc; |
75 | struct rx_sw_desc; | 81 | struct rx_sw_desc; |
76 | 82 | ||
77 | struct sge_fl_page { | 83 | struct sge_fl { /* SGE per free-buffer list state */ |
78 | struct skb_frag_struct frag; | 84 | unsigned int buf_size; /* size of each Rx buffer */ |
79 | unsigned char *va; | 85 | unsigned int credits; /* # of available Rx buffers */ |
80 | }; | 86 | unsigned int size; /* capacity of free list */ |
81 | 87 | unsigned int cidx; /* consumer index */ | |
82 | struct sge_fl { /* SGE per free-buffer list state */ | 88 | unsigned int pidx; /* producer index */ |
83 | unsigned int buf_size; /* size of each Rx buffer */ | 89 | unsigned int gen; /* free list generation */ |
84 | unsigned int credits; /* # of available Rx buffers */ | 90 | struct fl_pg_chunk pg_chunk;/* page chunk cache */ |
85 | unsigned int size; /* capacity of free list */ | 91 | unsigned int use_pages; /* whether FL uses pages or sk_buffs */ |
86 | unsigned int cidx; /* consumer index */ | 92 | struct rx_desc *desc; /* address of HW Rx descriptor ring */ |
87 | unsigned int pidx; /* producer index */ | 93 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ |
88 | unsigned int gen; /* free list generation */ | 94 | dma_addr_t phys_addr; /* physical address of HW ring start */ |
89 | unsigned int cntxt_id; /* SGE context id for the free list */ | 95 | unsigned int cntxt_id; /* SGE context id for the free list */ |
90 | struct sge_fl_page page; | 96 | unsigned long empty; /* # of times queue ran out of buffers */ |
91 | struct rx_desc *desc; /* address of HW Rx descriptor ring */ | ||
92 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ | ||
93 | dma_addr_t phys_addr; /* physical address of HW ring start */ | ||
94 | unsigned long empty; /* # of times queue ran out of buffers */ | ||
95 | unsigned long alloc_failed; /* # of times buffer allocation failed */ | 97 | unsigned long alloc_failed; /* # of times buffer allocation failed */ |
96 | }; | 98 | }; |
97 | 99 | ||
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index a60ec4d4707c..a2cfd68ac757 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -46,23 +46,16 @@ | |||
46 | 46 | ||
47 | #define SGE_RX_SM_BUF_SIZE 1536 | 47 | #define SGE_RX_SM_BUF_SIZE 1536 |
48 | 48 | ||
49 | /* | ||
50 | * If USE_RX_PAGE is defined, the small freelist populated with (partial) | ||
51 | * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must | ||
52 | * be a multiple of the host page size). | ||
53 | */ | ||
54 | #define USE_RX_PAGE | ||
55 | #define RX_PAGE_SIZE 2048 | ||
56 | |||
57 | /* | ||
58 | * skb freelist packets are copied into a new skb (and the freelist one is | ||
59 | * reused) if their len is <= | ||
60 | */ | ||
61 | #define SGE_RX_COPY_THRES 256 | 49 | #define SGE_RX_COPY_THRES 256 |
50 | #define SGE_RX_PULL_LEN 128 | ||
62 | 51 | ||
63 | /* | 52 | /* |
64 | * Minimum number of freelist entries before we start dropping TUNNEL frames. | 53 | * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. |
54 | * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs | ||
55 | * directly. | ||
65 | */ | 56 | */ |
57 | #define FL0_PG_CHUNK_SIZE 2048 | ||
58 | |||
66 | #define SGE_RX_DROP_THRES 16 | 59 | #define SGE_RX_DROP_THRES 16 |
67 | 60 | ||
68 | /* | 61 | /* |
@@ -100,12 +93,12 @@ struct tx_sw_desc { /* SW state per Tx descriptor */ | |||
100 | struct sk_buff *skb; | 93 | struct sk_buff *skb; |
101 | }; | 94 | }; |
102 | 95 | ||
103 | struct rx_sw_desc { /* SW state per Rx descriptor */ | 96 | struct rx_sw_desc { /* SW state per Rx descriptor */ |
104 | union { | 97 | union { |
105 | struct sk_buff *skb; | 98 | struct sk_buff *skb; |
106 | struct sge_fl_page page; | 99 | struct fl_pg_chunk pg_chunk; |
107 | } t; | 100 | }; |
108 | DECLARE_PCI_UNMAP_ADDR(dma_addr); | 101 | DECLARE_PCI_UNMAP_ADDR(dma_addr); |
109 | }; | 102 | }; |
110 | 103 | ||
111 | struct rsp_desc { /* response queue descriptor */ | 104 | struct rsp_desc { /* response queue descriptor */ |
@@ -351,27 +344,26 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) | |||
351 | 344 | ||
352 | pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), | 345 | pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), |
353 | q->buf_size, PCI_DMA_FROMDEVICE); | 346 | q->buf_size, PCI_DMA_FROMDEVICE); |
354 | 347 | if (q->use_pages) { | |
355 | if (q->buf_size != RX_PAGE_SIZE) { | 348 | put_page(d->pg_chunk.page); |
356 | kfree_skb(d->t.skb); | 349 | d->pg_chunk.page = NULL; |
357 | d->t.skb = NULL; | ||
358 | } else { | 350 | } else { |
359 | if (d->t.page.frag.page) | 351 | kfree_skb(d->skb); |
360 | put_page(d->t.page.frag.page); | 352 | d->skb = NULL; |
361 | d->t.page.frag.page = NULL; | ||
362 | } | 353 | } |
363 | if (++cidx == q->size) | 354 | if (++cidx == q->size) |
364 | cidx = 0; | 355 | cidx = 0; |
365 | } | 356 | } |
366 | 357 | ||
367 | if (q->page.frag.page) | 358 | if (q->pg_chunk.page) { |
368 | put_page(q->page.frag.page); | 359 | __free_page(q->pg_chunk.page); |
369 | q->page.frag.page = NULL; | 360 | q->pg_chunk.page = NULL; |
361 | } | ||
370 | } | 362 | } |
371 | 363 | ||
372 | /** | 364 | /** |
373 | * add_one_rx_buf - add a packet buffer to a free-buffer list | 365 | * add_one_rx_buf - add a packet buffer to a free-buffer list |
374 | * @va: va of the buffer to add | 366 | * @va: buffer start VA |
375 | * @len: the buffer length | 367 | * @len: the buffer length |
376 | * @d: the HW Rx descriptor to write | 368 | * @d: the HW Rx descriptor to write |
377 | * @sd: the SW Rx descriptor to write | 369 | * @sd: the SW Rx descriptor to write |
@@ -381,7 +373,7 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) | |||
381 | * Add a buffer of the given length to the supplied HW and SW Rx | 373 | * Add a buffer of the given length to the supplied HW and SW Rx |
382 | * descriptors. | 374 | * descriptors. |
383 | */ | 375 | */ |
384 | static inline void add_one_rx_buf(unsigned char *va, unsigned int len, | 376 | static inline void add_one_rx_buf(void *va, unsigned int len, |
385 | struct rx_desc *d, struct rx_sw_desc *sd, | 377 | struct rx_desc *d, struct rx_sw_desc *sd, |
386 | unsigned int gen, struct pci_dev *pdev) | 378 | unsigned int gen, struct pci_dev *pdev) |
387 | { | 379 | { |
@@ -397,6 +389,27 @@ static inline void add_one_rx_buf(unsigned char *va, unsigned int len, | |||
397 | d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); | 389 | d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); |
398 | } | 390 | } |
399 | 391 | ||
392 | static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp) | ||
393 | { | ||
394 | if (!q->pg_chunk.page) { | ||
395 | q->pg_chunk.page = alloc_page(gfp); | ||
396 | if (unlikely(!q->pg_chunk.page)) | ||
397 | return -ENOMEM; | ||
398 | q->pg_chunk.va = page_address(q->pg_chunk.page); | ||
399 | q->pg_chunk.offset = 0; | ||
400 | } | ||
401 | sd->pg_chunk = q->pg_chunk; | ||
402 | |||
403 | q->pg_chunk.offset += q->buf_size; | ||
404 | if (q->pg_chunk.offset == PAGE_SIZE) | ||
405 | q->pg_chunk.page = NULL; | ||
406 | else { | ||
407 | q->pg_chunk.va += q->buf_size; | ||
408 | get_page(q->pg_chunk.page); | ||
409 | } | ||
410 | return 0; | ||
411 | } | ||
412 | |||
400 | /** | 413 | /** |
401 | * refill_fl - refill an SGE free-buffer list | 414 | * refill_fl - refill an SGE free-buffer list |
402 | * @adapter: the adapter | 415 | * @adapter: the adapter |
@@ -410,49 +423,29 @@ static inline void add_one_rx_buf(unsigned char *va, unsigned int len, | |||
410 | */ | 423 | */ |
411 | static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) | 424 | static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) |
412 | { | 425 | { |
426 | void *buf_start; | ||
413 | struct rx_sw_desc *sd = &q->sdesc[q->pidx]; | 427 | struct rx_sw_desc *sd = &q->sdesc[q->pidx]; |
414 | struct rx_desc *d = &q->desc[q->pidx]; | 428 | struct rx_desc *d = &q->desc[q->pidx]; |
415 | struct sge_fl_page *p = &q->page; | ||
416 | 429 | ||
417 | while (n--) { | 430 | while (n--) { |
418 | unsigned char *va; | 431 | if (q->use_pages) { |
419 | 432 | if (unlikely(alloc_pg_chunk(q, sd, gfp))) { | |
420 | if (unlikely(q->buf_size != RX_PAGE_SIZE)) { | 433 | nomem: q->alloc_failed++; |
421 | struct sk_buff *skb = alloc_skb(q->buf_size, gfp); | ||
422 | |||
423 | if (!skb) { | ||
424 | q->alloc_failed++; | ||
425 | break; | 434 | break; |
426 | } | 435 | } |
427 | va = skb->data; | 436 | buf_start = sd->pg_chunk.va; |
428 | sd->t.skb = skb; | ||
429 | } else { | 437 | } else { |
430 | if (!p->frag.page) { | 438 | struct sk_buff *skb = alloc_skb(q->buf_size, gfp); |
431 | p->frag.page = alloc_pages(gfp, 0); | ||
432 | if (unlikely(!p->frag.page)) { | ||
433 | q->alloc_failed++; | ||
434 | break; | ||
435 | } else { | ||
436 | p->frag.size = RX_PAGE_SIZE; | ||
437 | p->frag.page_offset = 0; | ||
438 | p->va = page_address(p->frag.page); | ||
439 | } | ||
440 | } | ||
441 | 439 | ||
442 | memcpy(&sd->t, p, sizeof(*p)); | 440 | if (!skb) |
443 | va = p->va; | 441 | goto nomem; |
444 | 442 | ||
445 | p->frag.page_offset += RX_PAGE_SIZE; | 443 | sd->skb = skb; |
446 | BUG_ON(p->frag.page_offset > PAGE_SIZE); | 444 | buf_start = skb->data; |
447 | p->va += RX_PAGE_SIZE; | ||
448 | if (p->frag.page_offset == PAGE_SIZE) | ||
449 | p->frag.page = NULL; | ||
450 | else | ||
451 | get_page(p->frag.page); | ||
452 | } | 445 | } |
453 | 446 | ||
454 | add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev); | 447 | add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, |
455 | 448 | adap->pdev); | |
456 | d++; | 449 | d++; |
457 | sd++; | 450 | sd++; |
458 | if (++q->pidx == q->size) { | 451 | if (++q->pidx == q->size) { |
@@ -487,7 +480,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, | |||
487 | struct rx_desc *from = &q->desc[idx]; | 480 | struct rx_desc *from = &q->desc[idx]; |
488 | struct rx_desc *to = &q->desc[q->pidx]; | 481 | struct rx_desc *to = &q->desc[q->pidx]; |
489 | 482 | ||
490 | memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc)); | 483 | q->sdesc[q->pidx] = q->sdesc[idx]; |
491 | to->addr_lo = from->addr_lo; /* already big endian */ | 484 | to->addr_lo = from->addr_lo; /* already big endian */ |
492 | to->addr_hi = from->addr_hi; /* likewise */ | 485 | to->addr_hi = from->addr_hi; /* likewise */ |
493 | wmb(); | 486 | wmb(); |
@@ -650,6 +643,132 @@ static inline unsigned int flits_to_desc(unsigned int n) | |||
650 | } | 643 | } |
651 | 644 | ||
652 | /** | 645 | /** |
646 | * get_packet - return the next ingress packet buffer from a free list | ||
647 | * @adap: the adapter that received the packet | ||
648 | * @fl: the SGE free list holding the packet | ||
649 | * @len: the packet length including any SGE padding | ||
650 | * @drop_thres: # of remaining buffers before we start dropping packets | ||
651 | * | ||
652 | * Get the next packet from a free list and complete setup of the | ||
653 | * sk_buff. If the packet is small we make a copy and recycle the | ||
654 | * original buffer, otherwise we use the original buffer itself. If a | ||
655 | * positive drop threshold is supplied packets are dropped and their | ||
656 | * buffers recycled if (a) the number of remaining buffers is under the | ||
657 | * threshold and the packet is too big to copy, or (b) the packet should | ||
658 | * be copied but there is no memory for the copy. | ||
659 | */ | ||
660 | static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, | ||
661 | unsigned int len, unsigned int drop_thres) | ||
662 | { | ||
663 | struct sk_buff *skb = NULL; | ||
664 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | ||
665 | |||
666 | prefetch(sd->skb->data); | ||
667 | fl->credits--; | ||
668 | |||
669 | if (len <= SGE_RX_COPY_THRES) { | ||
670 | skb = alloc_skb(len, GFP_ATOMIC); | ||
671 | if (likely(skb != NULL)) { | ||
672 | __skb_put(skb, len); | ||
673 | pci_dma_sync_single_for_cpu(adap->pdev, | ||
674 | pci_unmap_addr(sd, dma_addr), len, | ||
675 | PCI_DMA_FROMDEVICE); | ||
676 | memcpy(skb->data, sd->skb->data, len); | ||
677 | pci_dma_sync_single_for_device(adap->pdev, | ||
678 | pci_unmap_addr(sd, dma_addr), len, | ||
679 | PCI_DMA_FROMDEVICE); | ||
680 | } else if (!drop_thres) | ||
681 | goto use_orig_buf; | ||
682 | recycle: | ||
683 | recycle_rx_buf(adap, fl, fl->cidx); | ||
684 | return skb; | ||
685 | } | ||
686 | |||
687 | if (unlikely(fl->credits < drop_thres)) | ||
688 | goto recycle; | ||
689 | |||
690 | use_orig_buf: | ||
691 | pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), | ||
692 | fl->buf_size, PCI_DMA_FROMDEVICE); | ||
693 | skb = sd->skb; | ||
694 | skb_put(skb, len); | ||
695 | __refill_fl(adap, fl); | ||
696 | return skb; | ||
697 | } | ||
698 | |||
699 | /** | ||
700 | * get_packet_pg - return the next ingress packet buffer from a free list | ||
701 | * @adap: the adapter that received the packet | ||
702 | * @fl: the SGE free list holding the packet | ||
703 | * @len: the packet length including any SGE padding | ||
704 | * @drop_thres: # of remaining buffers before we start dropping packets | ||
705 | * | ||
706 | * Get the next packet from a free list populated with page chunks. | ||
707 | * If the packet is small we make a copy and recycle the original buffer, | ||
708 | * otherwise we attach the original buffer as a page fragment to a fresh | ||
709 | * sk_buff. If a positive drop threshold is supplied packets are dropped | ||
710 | * and their buffers recycled if (a) the number of remaining buffers is | ||
711 | * under the threshold and the packet is too big to copy, or (b) there's | ||
712 | * no system memory. | ||
713 | * | ||
714 | * Note: this function is similar to @get_packet but deals with Rx buffers | ||
715 | * that are page chunks rather than sk_buffs. | ||
716 | */ | ||
717 | static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, | ||
718 | unsigned int len, unsigned int drop_thres) | ||
719 | { | ||
720 | struct sk_buff *skb = NULL; | ||
721 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | ||
722 | |||
723 | if (len <= SGE_RX_COPY_THRES) { | ||
724 | skb = alloc_skb(len, GFP_ATOMIC); | ||
725 | if (likely(skb != NULL)) { | ||
726 | __skb_put(skb, len); | ||
727 | pci_dma_sync_single_for_cpu(adap->pdev, | ||
728 | pci_unmap_addr(sd, dma_addr), len, | ||
729 | PCI_DMA_FROMDEVICE); | ||
730 | memcpy(skb->data, sd->pg_chunk.va, len); | ||
731 | pci_dma_sync_single_for_device(adap->pdev, | ||
732 | pci_unmap_addr(sd, dma_addr), len, | ||
733 | PCI_DMA_FROMDEVICE); | ||
734 | } else if (!drop_thres) | ||
735 | return NULL; | ||
736 | recycle: | ||
737 | fl->credits--; | ||
738 | recycle_rx_buf(adap, fl, fl->cidx); | ||
739 | return skb; | ||
740 | } | ||
741 | |||
742 | if (unlikely(fl->credits <= drop_thres)) | ||
743 | goto recycle; | ||
744 | |||
745 | skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); | ||
746 | if (unlikely(!skb)) { | ||
747 | if (!drop_thres) | ||
748 | return NULL; | ||
749 | goto recycle; | ||
750 | } | ||
751 | |||
752 | pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), | ||
753 | fl->buf_size, PCI_DMA_FROMDEVICE); | ||
754 | __skb_put(skb, SGE_RX_PULL_LEN); | ||
755 | memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); | ||
756 | skb_fill_page_desc(skb, 0, sd->pg_chunk.page, | ||
757 | sd->pg_chunk.offset + SGE_RX_PULL_LEN, | ||
758 | len - SGE_RX_PULL_LEN); | ||
759 | skb->len = len; | ||
760 | skb->data_len = len - SGE_RX_PULL_LEN; | ||
761 | skb->truesize += skb->data_len; | ||
762 | |||
763 | fl->credits--; | ||
764 | /* | ||
765 | * We do not refill FLs here, we let the caller do it to overlap a | ||
766 | * prefetch. | ||
767 | */ | ||
768 | return skb; | ||
769 | } | ||
770 | |||
771 | /** | ||
653 | * get_imm_packet - return the next ingress packet buffer from a response | 772 | * get_imm_packet - return the next ingress packet buffer from a response |
654 | * @resp: the response descriptor containing the packet data | 773 | * @resp: the response descriptor containing the packet data |
655 | * | 774 | * |
@@ -1715,85 +1834,6 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |||
1715 | netif_rx(skb); | 1834 | netif_rx(skb); |
1716 | } | 1835 | } |
1717 | 1836 | ||
1718 | #define SKB_DATA_SIZE 128 | ||
1719 | |||
1720 | static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p, | ||
1721 | unsigned int len) | ||
1722 | { | ||
1723 | skb->len = len; | ||
1724 | if (len <= SKB_DATA_SIZE) { | ||
1725 | skb_copy_to_linear_data(skb, p->va, len); | ||
1726 | skb->tail += len; | ||
1727 | put_page(p->frag.page); | ||
1728 | } else { | ||
1729 | skb_copy_to_linear_data(skb, p->va, SKB_DATA_SIZE); | ||
1730 | skb_shinfo(skb)->frags[0].page = p->frag.page; | ||
1731 | skb_shinfo(skb)->frags[0].page_offset = | ||
1732 | p->frag.page_offset + SKB_DATA_SIZE; | ||
1733 | skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE; | ||
1734 | skb_shinfo(skb)->nr_frags = 1; | ||
1735 | skb->data_len = len - SKB_DATA_SIZE; | ||
1736 | skb->tail += SKB_DATA_SIZE; | ||
1737 | skb->truesize += skb->data_len; | ||
1738 | } | ||
1739 | } | ||
1740 | |||
1741 | /** | ||
1742 | * get_packet - return the next ingress packet buffer from a free list | ||
1743 | * @adap: the adapter that received the packet | ||
1744 | * @fl: the SGE free list holding the packet | ||
1745 | * @len: the packet length including any SGE padding | ||
1746 | * @drop_thres: # of remaining buffers before we start dropping packets | ||
1747 | * | ||
1748 | * Get the next packet from a free list and complete setup of the | ||
1749 | * sk_buff. If the packet is small we make a copy and recycle the | ||
1750 | * original buffer, otherwise we use the original buffer itself. If a | ||
1751 | * positive drop threshold is supplied packets are dropped and their | ||
1752 | * buffers recycled if (a) the number of remaining buffers is under the | ||
1753 | * threshold and the packet is too big to copy, or (b) the packet should | ||
1754 | * be copied but there is no memory for the copy. | ||
1755 | */ | ||
1756 | static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, | ||
1757 | unsigned int len, unsigned int drop_thres) | ||
1758 | { | ||
1759 | struct sk_buff *skb = NULL; | ||
1760 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | ||
1761 | |||
1762 | prefetch(sd->t.skb->data); | ||
1763 | |||
1764 | if (len <= SGE_RX_COPY_THRES) { | ||
1765 | skb = alloc_skb(len, GFP_ATOMIC); | ||
1766 | if (likely(skb != NULL)) { | ||
1767 | struct rx_desc *d = &fl->desc[fl->cidx]; | ||
1768 | dma_addr_t mapping = | ||
1769 | (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 | | ||
1770 | be32_to_cpu(d->addr_lo)); | ||
1771 | |||
1772 | __skb_put(skb, len); | ||
1773 | pci_dma_sync_single_for_cpu(adap->pdev, mapping, len, | ||
1774 | PCI_DMA_FROMDEVICE); | ||
1775 | skb_copy_from_linear_data(sd->t.skb, skb->data, len); | ||
1776 | pci_dma_sync_single_for_device(adap->pdev, mapping, len, | ||
1777 | PCI_DMA_FROMDEVICE); | ||
1778 | } else if (!drop_thres) | ||
1779 | goto use_orig_buf; | ||
1780 | recycle: | ||
1781 | recycle_rx_buf(adap, fl, fl->cidx); | ||
1782 | return skb; | ||
1783 | } | ||
1784 | |||
1785 | if (unlikely(fl->credits < drop_thres)) | ||
1786 | goto recycle; | ||
1787 | |||
1788 | use_orig_buf: | ||
1789 | pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), | ||
1790 | fl->buf_size, PCI_DMA_FROMDEVICE); | ||
1791 | skb = sd->t.skb; | ||
1792 | skb_put(skb, len); | ||
1793 | __refill_fl(adap, fl); | ||
1794 | return skb; | ||
1795 | } | ||
1796 | |||
1797 | /** | 1837 | /** |
1798 | * handle_rsp_cntrl_info - handles control information in a response | 1838 | * handle_rsp_cntrl_info - handles control information in a response |
1799 | * @qs: the queue set corresponding to the response | 1839 | * @qs: the queue set corresponding to the response |
@@ -1935,7 +1975,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, | |||
1935 | } else if (flags & F_RSPD_IMM_DATA_VALID) { | 1975 | } else if (flags & F_RSPD_IMM_DATA_VALID) { |
1936 | skb = get_imm_packet(r); | 1976 | skb = get_imm_packet(r); |
1937 | if (unlikely(!skb)) { | 1977 | if (unlikely(!skb)) { |
1938 | no_mem: | 1978 | no_mem: |
1939 | q->next_holdoff = NOMEM_INTR_DELAY; | 1979 | q->next_holdoff = NOMEM_INTR_DELAY; |
1940 | q->nomem++; | 1980 | q->nomem++; |
1941 | /* consume one credit since we tried */ | 1981 | /* consume one credit since we tried */ |
@@ -1945,53 +1985,29 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, | |||
1945 | q->imm_data++; | 1985 | q->imm_data++; |
1946 | ethpad = 0; | 1986 | ethpad = 0; |
1947 | } else if ((len = ntohl(r->len_cq)) != 0) { | 1987 | } else if ((len = ntohl(r->len_cq)) != 0) { |
1948 | struct sge_fl *fl = | 1988 | struct sge_fl *fl; |
1949 | (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; | ||
1950 | 1989 | ||
1951 | if (fl->buf_size == RX_PAGE_SIZE) { | 1990 | fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; |
1952 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | 1991 | if (fl->use_pages) { |
1953 | struct sge_fl_page *p = &sd->t.page; | 1992 | void *addr = fl->sdesc[fl->cidx].pg_chunk.va; |
1954 | |||
1955 | prefetch(p->va); | ||
1956 | prefetch(p->va + L1_CACHE_BYTES); | ||
1957 | 1993 | ||
1994 | prefetch(addr); | ||
1995 | #if L1_CACHE_BYTES < 128 | ||
1996 | prefetch(addr + L1_CACHE_BYTES); | ||
1997 | #endif | ||
1958 | __refill_fl(adap, fl); | 1998 | __refill_fl(adap, fl); |
1959 | 1999 | ||
1960 | pci_unmap_single(adap->pdev, | 2000 | skb = get_packet_pg(adap, fl, G_RSPD_LEN(len), |
1961 | pci_unmap_addr(sd, dma_addr), | 2001 | eth ? SGE_RX_DROP_THRES : 0); |
1962 | fl->buf_size, | 2002 | } else |
1963 | PCI_DMA_FROMDEVICE); | ||
1964 | |||
1965 | if (eth) { | ||
1966 | if (unlikely(fl->credits < | ||
1967 | SGE_RX_DROP_THRES)) | ||
1968 | goto eth_recycle; | ||
1969 | |||
1970 | skb = alloc_skb(SKB_DATA_SIZE, | ||
1971 | GFP_ATOMIC); | ||
1972 | if (unlikely(!skb)) { | ||
1973 | eth_recycle: | ||
1974 | q->rx_drops++; | ||
1975 | recycle_rx_buf(adap, fl, | ||
1976 | fl->cidx); | ||
1977 | goto eth_done; | ||
1978 | } | ||
1979 | } else { | ||
1980 | skb = alloc_skb(SKB_DATA_SIZE, | ||
1981 | GFP_ATOMIC); | ||
1982 | if (unlikely(!skb)) | ||
1983 | goto no_mem; | ||
1984 | } | ||
1985 | |||
1986 | skb_data_init(skb, p, G_RSPD_LEN(len)); | ||
1987 | eth_done: | ||
1988 | fl->credits--; | ||
1989 | q->eth_pkts++; | ||
1990 | } else { | ||
1991 | fl->credits--; | ||
1992 | skb = get_packet(adap, fl, G_RSPD_LEN(len), | 2003 | skb = get_packet(adap, fl, G_RSPD_LEN(len), |
1993 | eth ? SGE_RX_DROP_THRES : 0); | 2004 | eth ? SGE_RX_DROP_THRES : 0); |
1994 | } | 2005 | if (unlikely(!skb)) { |
2006 | if (!eth) | ||
2007 | goto no_mem; | ||
2008 | q->rx_drops++; | ||
2009 | } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) | ||
2010 | __skb_pull(skb, 2); | ||
1995 | 2011 | ||
1996 | if (++fl->cidx == fl->size) | 2012 | if (++fl->cidx == fl->size) |
1997 | fl->cidx = 0; | 2013 | fl->cidx = 0; |
@@ -2016,20 +2032,15 @@ eth_done: | |||
2016 | q->credits = 0; | 2032 | q->credits = 0; |
2017 | } | 2033 | } |
2018 | 2034 | ||
2019 | if (skb) { | 2035 | if (likely(skb != NULL)) { |
2020 | /* Preserve the RSS info in csum & priority */ | ||
2021 | skb->csum = rss_hi; | ||
2022 | skb->priority = rss_lo; | ||
2023 | |||
2024 | if (eth) | 2036 | if (eth) |
2025 | rx_eth(adap, q, skb, ethpad); | 2037 | rx_eth(adap, q, skb, ethpad); |
2026 | else { | 2038 | else { |
2027 | if (unlikely(r->rss_hdr.opcode == | 2039 | /* Preserve the RSS info in csum & priority */ |
2028 | CPL_TRACE_PKT)) | 2040 | skb->csum = rss_hi; |
2029 | __skb_pull(skb, ethpad); | 2041 | skb->priority = rss_lo; |
2030 | 2042 | ngathered = rx_offload(&adap->tdev, q, skb, | |
2031 | ngathered = rx_offload(&adap->tdev, q, | 2043 | offload_skbs, |
2032 | skb, offload_skbs, | ||
2033 | ngathered); | 2044 | ngathered); |
2034 | } | 2045 | } |
2035 | } | 2046 | } |
@@ -2635,25 +2646,15 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |||
2635 | q->txq[TXQ_ETH].stop_thres = nports * | 2646 | q->txq[TXQ_ETH].stop_thres = nports * |
2636 | flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); | 2647 | flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); |
2637 | 2648 | ||
2638 | if (!is_offload(adapter)) { | 2649 | #if FL0_PG_CHUNK_SIZE > 0 |
2639 | #ifdef USE_RX_PAGE | 2650 | q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; |
2640 | q->fl[0].buf_size = RX_PAGE_SIZE; | ||
2641 | #else | 2651 | #else |
2642 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 + | 2652 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); |
2643 | sizeof(struct cpl_rx_pkt); | ||
2644 | #endif | 2653 | #endif |
2645 | q->fl[1].buf_size = MAX_FRAME_SIZE + 2 + | 2654 | q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; |
2646 | sizeof(struct cpl_rx_pkt); | 2655 | q->fl[1].buf_size = is_offload(adapter) ? |
2647 | } else { | 2656 | (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : |
2648 | #ifdef USE_RX_PAGE | 2657 | MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); |
2649 | q->fl[0].buf_size = RX_PAGE_SIZE; | ||
2650 | #else | ||
2651 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + | ||
2652 | sizeof(struct cpl_rx_data); | ||
2653 | #endif | ||
2654 | q->fl[1].buf_size = (16 * 1024) - | ||
2655 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
2656 | } | ||
2657 | 2658 | ||
2658 | spin_lock(&adapter->sge.reg_lock); | 2659 | spin_lock(&adapter->sge.reg_lock); |
2659 | 2660 | ||