diff options
author | Divy Le Ray <divy@chelsio.com> | 2007-02-24 19:44:17 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-02-27 04:27:12 -0500 |
commit | e0994eb1d9ead09bb8f6483cf5cf6aa55ce0f3b9 (patch) | |
tree | c6d7b848bb965f31b6a68618078586dea5de07b2 /drivers | |
parent | bae73f44472921008f8d0982344c53ae231445a1 (diff) |
cxgb3 - Feed Rx free list with pages
Populate Rx free list with pages.
Signed-off-by: Divy Le Ray <divy@chelsio.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/cxgb3/adapter.h | 9 | ||||
-rw-r--r-- | drivers/net/cxgb3/sge.c | 318 |
2 files changed, 235 insertions, 92 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 01b99b901434..80c3d8f268a7 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h | |||
@@ -74,6 +74,11 @@ enum { /* adapter flags */ | |||
74 | struct rx_desc; | 74 | struct rx_desc; |
75 | struct rx_sw_desc; | 75 | struct rx_sw_desc; |
76 | 76 | ||
77 | struct sge_fl_page { | ||
78 | struct skb_frag_struct frag; | ||
79 | unsigned char *va; | ||
80 | }; | ||
81 | |||
77 | struct sge_fl { /* SGE per free-buffer list state */ | 82 | struct sge_fl { /* SGE per free-buffer list state */ |
78 | unsigned int buf_size; /* size of each Rx buffer */ | 83 | unsigned int buf_size; /* size of each Rx buffer */ |
79 | unsigned int credits; /* # of available Rx buffers */ | 84 | unsigned int credits; /* # of available Rx buffers */ |
@@ -81,11 +86,13 @@ struct sge_fl { /* SGE per free-buffer list state */ | |||
81 | unsigned int cidx; /* consumer index */ | 86 | unsigned int cidx; /* consumer index */ |
82 | unsigned int pidx; /* producer index */ | 87 | unsigned int pidx; /* producer index */ |
83 | unsigned int gen; /* free list generation */ | 88 | unsigned int gen; /* free list generation */ |
89 | unsigned int cntxt_id; /* SGE context id for the free list */ | ||
90 | struct sge_fl_page page; | ||
84 | struct rx_desc *desc; /* address of HW Rx descriptor ring */ | 91 | struct rx_desc *desc; /* address of HW Rx descriptor ring */ |
85 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ | 92 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ |
86 | dma_addr_t phys_addr; /* physical address of HW ring start */ | 93 | dma_addr_t phys_addr; /* physical address of HW ring start */ |
87 | unsigned int cntxt_id; /* SGE context id for the free list */ | ||
88 | unsigned long empty; /* # of times queue ran out of buffers */ | 94 | unsigned long empty; /* # of times queue ran out of buffers */ |
95 | unsigned long alloc_failed; /* # of times buffer allocation failed */ | ||
89 | }; | 96 | }; |
90 | 97 | ||
91 | /* | 98 | /* |
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 4ff0ab60c6c7..c23783432e51 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -45,9 +45,25 @@ | |||
45 | #define USE_GTS 0 | 45 | #define USE_GTS 0 |
46 | 46 | ||
47 | #define SGE_RX_SM_BUF_SIZE 1536 | 47 | #define SGE_RX_SM_BUF_SIZE 1536 |
48 | |||
49 | /* | ||
50 | * If USE_RX_PAGE is defined, the small freelist populated with (partial) | ||
51 | * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must | ||
52 | * be a multiple of the host page size). | ||
53 | */ | ||
54 | #define USE_RX_PAGE | ||
55 | #define RX_PAGE_SIZE 2048 | ||
56 | |||
57 | /* | ||
58 | * skb freelist packets are copied into a new skb (and the freelist one is | ||
59 | * reused) if their len is <= | ||
60 | */ | ||
48 | #define SGE_RX_COPY_THRES 256 | 61 | #define SGE_RX_COPY_THRES 256 |
49 | 62 | ||
50 | # define SGE_RX_DROP_THRES 16 | 63 | /* |
64 | * Minimum number of freelist entries before we start dropping TUNNEL frames. | ||
65 | */ | ||
66 | #define SGE_RX_DROP_THRES 16 | ||
51 | 67 | ||
52 | /* | 68 | /* |
53 | * Period of the Tx buffer reclaim timer. This timer does not need to run | 69 | * Period of the Tx buffer reclaim timer. This timer does not need to run |
@@ -85,7 +101,10 @@ struct tx_sw_desc { /* SW state per Tx descriptor */ | |||
85 | }; | 101 | }; |
86 | 102 | ||
87 | struct rx_sw_desc { /* SW state per Rx descriptor */ | 103 | struct rx_sw_desc { /* SW state per Rx descriptor */ |
88 | struct sk_buff *skb; | 104 | union { |
105 | struct sk_buff *skb; | ||
106 | struct sge_fl_page page; | ||
107 | } t; | ||
89 | DECLARE_PCI_UNMAP_ADDR(dma_addr); | 108 | DECLARE_PCI_UNMAP_ADDR(dma_addr); |
90 | }; | 109 | }; |
91 | 110 | ||
@@ -332,16 +351,27 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) | |||
332 | 351 | ||
333 | pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), | 352 | pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), |
334 | q->buf_size, PCI_DMA_FROMDEVICE); | 353 | q->buf_size, PCI_DMA_FROMDEVICE); |
335 | kfree_skb(d->skb); | 354 | |
336 | d->skb = NULL; | 355 | if (q->buf_size != RX_PAGE_SIZE) { |
356 | kfree_skb(d->t.skb); | ||
357 | d->t.skb = NULL; | ||
358 | } else { | ||
359 | if (d->t.page.frag.page) | ||
360 | put_page(d->t.page.frag.page); | ||
361 | d->t.page.frag.page = NULL; | ||
362 | } | ||
337 | if (++cidx == q->size) | 363 | if (++cidx == q->size) |
338 | cidx = 0; | 364 | cidx = 0; |
339 | } | 365 | } |
366 | |||
367 | if (q->page.frag.page) | ||
368 | put_page(q->page.frag.page); | ||
369 | q->page.frag.page = NULL; | ||
340 | } | 370 | } |
341 | 371 | ||
342 | /** | 372 | /** |
343 | * add_one_rx_buf - add a packet buffer to a free-buffer list | 373 | * add_one_rx_buf - add a packet buffer to a free-buffer list |
344 | * @skb: the buffer to add | 374 | * @va: va of the buffer to add |
345 | * @len: the buffer length | 375 | * @len: the buffer length |
346 | * @d: the HW Rx descriptor to write | 376 | * @d: the HW Rx descriptor to write |
347 | * @sd: the SW Rx descriptor to write | 377 | * @sd: the SW Rx descriptor to write |
@@ -351,14 +381,13 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) | |||
351 | * Add a buffer of the given length to the supplied HW and SW Rx | 381 | * Add a buffer of the given length to the supplied HW and SW Rx |
352 | * descriptors. | 382 | * descriptors. |
353 | */ | 383 | */ |
354 | static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len, | 384 | static inline void add_one_rx_buf(unsigned char *va, unsigned int len, |
355 | struct rx_desc *d, struct rx_sw_desc *sd, | 385 | struct rx_desc *d, struct rx_sw_desc *sd, |
356 | unsigned int gen, struct pci_dev *pdev) | 386 | unsigned int gen, struct pci_dev *pdev) |
357 | { | 387 | { |
358 | dma_addr_t mapping; | 388 | dma_addr_t mapping; |
359 | 389 | ||
360 | sd->skb = skb; | 390 | mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); |
361 | mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE); | ||
362 | pci_unmap_addr_set(sd, dma_addr, mapping); | 391 | pci_unmap_addr_set(sd, dma_addr, mapping); |
363 | 392 | ||
364 | d->addr_lo = cpu_to_be32(mapping); | 393 | d->addr_lo = cpu_to_be32(mapping); |
@@ -383,14 +412,47 @@ static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) | |||
383 | { | 412 | { |
384 | struct rx_sw_desc *sd = &q->sdesc[q->pidx]; | 413 | struct rx_sw_desc *sd = &q->sdesc[q->pidx]; |
385 | struct rx_desc *d = &q->desc[q->pidx]; | 414 | struct rx_desc *d = &q->desc[q->pidx]; |
415 | struct sge_fl_page *p = &q->page; | ||
386 | 416 | ||
387 | while (n--) { | 417 | while (n--) { |
388 | struct sk_buff *skb = alloc_skb(q->buf_size, gfp); | 418 | unsigned char *va; |
389 | 419 | ||
390 | if (!skb) | 420 | if (unlikely(q->buf_size != RX_PAGE_SIZE)) { |
391 | break; | 421 | struct sk_buff *skb = alloc_skb(q->buf_size, gfp); |
422 | |||
423 | if (!skb) { | ||
424 | q->alloc_failed++; | ||
425 | break; | ||
426 | } | ||
427 | va = skb->data; | ||
428 | sd->t.skb = skb; | ||
429 | } else { | ||
430 | if (!p->frag.page) { | ||
431 | p->frag.page = alloc_pages(gfp, 0); | ||
432 | if (unlikely(!p->frag.page)) { | ||
433 | q->alloc_failed++; | ||
434 | break; | ||
435 | } else { | ||
436 | p->frag.size = RX_PAGE_SIZE; | ||
437 | p->frag.page_offset = 0; | ||
438 | p->va = page_address(p->frag.page); | ||
439 | } | ||
440 | } | ||
441 | |||
442 | memcpy(&sd->t, p, sizeof(*p)); | ||
443 | va = p->va; | ||
444 | |||
445 | p->frag.page_offset += RX_PAGE_SIZE; | ||
446 | BUG_ON(p->frag.page_offset > PAGE_SIZE); | ||
447 | p->va += RX_PAGE_SIZE; | ||
448 | if (p->frag.page_offset == PAGE_SIZE) | ||
449 | p->frag.page = NULL; | ||
450 | else | ||
451 | get_page(p->frag.page); | ||
452 | } | ||
453 | |||
454 | add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev); | ||
392 | 455 | ||
393 | add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev); | ||
394 | d++; | 456 | d++; |
395 | sd++; | 457 | sd++; |
396 | if (++q->pidx == q->size) { | 458 | if (++q->pidx == q->size) { |
@@ -425,7 +487,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, | |||
425 | struct rx_desc *from = &q->desc[idx]; | 487 | struct rx_desc *from = &q->desc[idx]; |
426 | struct rx_desc *to = &q->desc[q->pidx]; | 488 | struct rx_desc *to = &q->desc[q->pidx]; |
427 | 489 | ||
428 | q->sdesc[q->pidx] = q->sdesc[idx]; | 490 | memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc)); |
429 | to->addr_lo = from->addr_lo; /* already big endian */ | 491 | to->addr_lo = from->addr_lo; /* already big endian */ |
430 | to->addr_hi = from->addr_hi; /* likewise */ | 492 | to->addr_hi = from->addr_hi; /* likewise */ |
431 | wmb(); | 493 | wmb(); |
@@ -458,7 +520,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, | |||
458 | * of the SW ring. | 520 | * of the SW ring. |
459 | */ | 521 | */ |
460 | static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, | 522 | static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, |
461 | size_t sw_size, dma_addr_t *phys, void *metadata) | 523 | size_t sw_size, dma_addr_t * phys, void *metadata) |
462 | { | 524 | { |
463 | size_t len = nelem * elem_size; | 525 | size_t len = nelem * elem_size; |
464 | void *s = NULL; | 526 | void *s = NULL; |
@@ -588,61 +650,6 @@ static inline unsigned int flits_to_desc(unsigned int n) | |||
588 | } | 650 | } |
589 | 651 | ||
590 | /** | 652 | /** |
591 | * get_packet - return the next ingress packet buffer from a free list | ||
592 | * @adap: the adapter that received the packet | ||
593 | * @fl: the SGE free list holding the packet | ||
594 | * @len: the packet length including any SGE padding | ||
595 | * @drop_thres: # of remaining buffers before we start dropping packets | ||
596 | * | ||
597 | * Get the next packet from a free list and complete setup of the | ||
598 | * sk_buff. If the packet is small we make a copy and recycle the | ||
599 | * original buffer, otherwise we use the original buffer itself. If a | ||
600 | * positive drop threshold is supplied packets are dropped and their | ||
601 | * buffers recycled if (a) the number of remaining buffers is under the | ||
602 | * threshold and the packet is too big to copy, or (b) the packet should | ||
603 | * be copied but there is no memory for the copy. | ||
604 | */ | ||
605 | static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, | ||
606 | unsigned int len, unsigned int drop_thres) | ||
607 | { | ||
608 | struct sk_buff *skb = NULL; | ||
609 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | ||
610 | |||
611 | prefetch(sd->skb->data); | ||
612 | |||
613 | if (len <= SGE_RX_COPY_THRES) { | ||
614 | skb = alloc_skb(len, GFP_ATOMIC); | ||
615 | if (likely(skb != NULL)) { | ||
616 | __skb_put(skb, len); | ||
617 | pci_dma_sync_single_for_cpu(adap->pdev, | ||
618 | pci_unmap_addr(sd, | ||
619 | dma_addr), | ||
620 | len, PCI_DMA_FROMDEVICE); | ||
621 | memcpy(skb->data, sd->skb->data, len); | ||
622 | pci_dma_sync_single_for_device(adap->pdev, | ||
623 | pci_unmap_addr(sd, | ||
624 | dma_addr), | ||
625 | len, PCI_DMA_FROMDEVICE); | ||
626 | } else if (!drop_thres) | ||
627 | goto use_orig_buf; | ||
628 | recycle: | ||
629 | recycle_rx_buf(adap, fl, fl->cidx); | ||
630 | return skb; | ||
631 | } | ||
632 | |||
633 | if (unlikely(fl->credits < drop_thres)) | ||
634 | goto recycle; | ||
635 | |||
636 | use_orig_buf: | ||
637 | pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), | ||
638 | fl->buf_size, PCI_DMA_FROMDEVICE); | ||
639 | skb = sd->skb; | ||
640 | skb_put(skb, len); | ||
641 | __refill_fl(adap, fl); | ||
642 | return skb; | ||
643 | } | ||
644 | |||
645 | /** | ||
646 | * get_imm_packet - return the next ingress packet buffer from a response | 653 | * get_imm_packet - return the next ingress packet buffer from a response |
647 | * @resp: the response descriptor containing the packet data | 654 | * @resp: the response descriptor containing the packet data |
648 | * | 655 | * |
@@ -1676,7 +1683,6 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |||
1676 | struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); | 1683 | struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); |
1677 | struct port_info *pi; | 1684 | struct port_info *pi; |
1678 | 1685 | ||
1679 | rq->eth_pkts++; | ||
1680 | skb_pull(skb, sizeof(*p) + pad); | 1686 | skb_pull(skb, sizeof(*p) + pad); |
1681 | skb->dev = adap->port[p->iff]; | 1687 | skb->dev = adap->port[p->iff]; |
1682 | skb->dev->last_rx = jiffies; | 1688 | skb->dev->last_rx = jiffies; |
@@ -1704,6 +1710,85 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |||
1704 | netif_rx(skb); | 1710 | netif_rx(skb); |
1705 | } | 1711 | } |
1706 | 1712 | ||
1713 | #define SKB_DATA_SIZE 128 | ||
1714 | |||
1715 | static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p, | ||
1716 | unsigned int len) | ||
1717 | { | ||
1718 | skb->len = len; | ||
1719 | if (len <= SKB_DATA_SIZE) { | ||
1720 | memcpy(skb->data, p->va, len); | ||
1721 | skb->tail += len; | ||
1722 | put_page(p->frag.page); | ||
1723 | } else { | ||
1724 | memcpy(skb->data, p->va, SKB_DATA_SIZE); | ||
1725 | skb_shinfo(skb)->frags[0].page = p->frag.page; | ||
1726 | skb_shinfo(skb)->frags[0].page_offset = | ||
1727 | p->frag.page_offset + SKB_DATA_SIZE; | ||
1728 | skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE; | ||
1729 | skb_shinfo(skb)->nr_frags = 1; | ||
1730 | skb->data_len = len - SKB_DATA_SIZE; | ||
1731 | skb->tail += SKB_DATA_SIZE; | ||
1732 | skb->truesize += skb->data_len; | ||
1733 | } | ||
1734 | } | ||
1735 | |||
1736 | /** | ||
1737 | * get_packet - return the next ingress packet buffer from a free list | ||
1738 | * @adap: the adapter that received the packet | ||
1739 | * @fl: the SGE free list holding the packet | ||
1740 | * @len: the packet length including any SGE padding | ||
1741 | * @drop_thres: # of remaining buffers before we start dropping packets | ||
1742 | * | ||
1743 | * Get the next packet from a free list and complete setup of the | ||
1744 | * sk_buff. If the packet is small we make a copy and recycle the | ||
1745 | * original buffer, otherwise we use the original buffer itself. If a | ||
1746 | * positive drop threshold is supplied packets are dropped and their | ||
1747 | * buffers recycled if (a) the number of remaining buffers is under the | ||
1748 | * threshold and the packet is too big to copy, or (b) the packet should | ||
1749 | * be copied but there is no memory for the copy. | ||
1750 | */ | ||
1751 | static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, | ||
1752 | unsigned int len, unsigned int drop_thres) | ||
1753 | { | ||
1754 | struct sk_buff *skb = NULL; | ||
1755 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | ||
1756 | |||
1757 | prefetch(sd->t.skb->data); | ||
1758 | |||
1759 | if (len <= SGE_RX_COPY_THRES) { | ||
1760 | skb = alloc_skb(len, GFP_ATOMIC); | ||
1761 | if (likely(skb != NULL)) { | ||
1762 | struct rx_desc *d = &fl->desc[fl->cidx]; | ||
1763 | dma_addr_t mapping = | ||
1764 | (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 | | ||
1765 | be32_to_cpu(d->addr_lo)); | ||
1766 | |||
1767 | __skb_put(skb, len); | ||
1768 | pci_dma_sync_single_for_cpu(adap->pdev, mapping, len, | ||
1769 | PCI_DMA_FROMDEVICE); | ||
1770 | memcpy(skb->data, sd->t.skb->data, len); | ||
1771 | pci_dma_sync_single_for_device(adap->pdev, mapping, len, | ||
1772 | PCI_DMA_FROMDEVICE); | ||
1773 | } else if (!drop_thres) | ||
1774 | goto use_orig_buf; | ||
1775 | recycle: | ||
1776 | recycle_rx_buf(adap, fl, fl->cidx); | ||
1777 | return skb; | ||
1778 | } | ||
1779 | |||
1780 | if (unlikely(fl->credits < drop_thres)) | ||
1781 | goto recycle; | ||
1782 | |||
1783 | use_orig_buf: | ||
1784 | pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), | ||
1785 | fl->buf_size, PCI_DMA_FROMDEVICE); | ||
1786 | skb = sd->t.skb; | ||
1787 | skb_put(skb, len); | ||
1788 | __refill_fl(adap, fl); | ||
1789 | return skb; | ||
1790 | } | ||
1791 | |||
1707 | /** | 1792 | /** |
1708 | * handle_rsp_cntrl_info - handles control information in a response | 1793 | * handle_rsp_cntrl_info - handles control information in a response |
1709 | * @qs: the queue set corresponding to the response | 1794 | * @qs: the queue set corresponding to the response |
@@ -1826,7 +1911,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, | |||
1826 | q->next_holdoff = q->holdoff_tmr; | 1911 | q->next_holdoff = q->holdoff_tmr; |
1827 | 1912 | ||
1828 | while (likely(budget_left && is_new_response(r, q))) { | 1913 | while (likely(budget_left && is_new_response(r, q))) { |
1829 | int eth, ethpad = 0; | 1914 | int eth, ethpad = 2; |
1830 | struct sk_buff *skb = NULL; | 1915 | struct sk_buff *skb = NULL; |
1831 | u32 len, flags = ntohl(r->flags); | 1916 | u32 len, flags = ntohl(r->flags); |
1832 | u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val; | 1917 | u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val; |
@@ -1853,18 +1938,56 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, | |||
1853 | break; | 1938 | break; |
1854 | } | 1939 | } |
1855 | q->imm_data++; | 1940 | q->imm_data++; |
1941 | ethpad = 0; | ||
1856 | } else if ((len = ntohl(r->len_cq)) != 0) { | 1942 | } else if ((len = ntohl(r->len_cq)) != 0) { |
1857 | struct sge_fl *fl; | 1943 | struct sge_fl *fl = |
1944 | (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; | ||
1945 | |||
1946 | if (fl->buf_size == RX_PAGE_SIZE) { | ||
1947 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | ||
1948 | struct sge_fl_page *p = &sd->t.page; | ||
1949 | |||
1950 | prefetch(p->va); | ||
1951 | prefetch(p->va + L1_CACHE_BYTES); | ||
1952 | |||
1953 | __refill_fl(adap, fl); | ||
1954 | |||
1955 | pci_unmap_single(adap->pdev, | ||
1956 | pci_unmap_addr(sd, dma_addr), | ||
1957 | fl->buf_size, | ||
1958 | PCI_DMA_FROMDEVICE); | ||
1959 | |||
1960 | if (eth) { | ||
1961 | if (unlikely(fl->credits < | ||
1962 | SGE_RX_DROP_THRES)) | ||
1963 | goto eth_recycle; | ||
1964 | |||
1965 | skb = alloc_skb(SKB_DATA_SIZE, | ||
1966 | GFP_ATOMIC); | ||
1967 | if (unlikely(!skb)) { | ||
1968 | eth_recycle: | ||
1969 | q->rx_drops++; | ||
1970 | recycle_rx_buf(adap, fl, | ||
1971 | fl->cidx); | ||
1972 | goto eth_done; | ||
1973 | } | ||
1974 | } else { | ||
1975 | skb = alloc_skb(SKB_DATA_SIZE, | ||
1976 | GFP_ATOMIC); | ||
1977 | if (unlikely(!skb)) | ||
1978 | goto no_mem; | ||
1979 | } | ||
1980 | |||
1981 | skb_data_init(skb, p, G_RSPD_LEN(len)); | ||
1982 | eth_done: | ||
1983 | fl->credits--; | ||
1984 | q->eth_pkts++; | ||
1985 | } else { | ||
1986 | fl->credits--; | ||
1987 | skb = get_packet(adap, fl, G_RSPD_LEN(len), | ||
1988 | eth ? SGE_RX_DROP_THRES : 0); | ||
1989 | } | ||
1858 | 1990 | ||
1859 | fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; | ||
1860 | fl->credits--; | ||
1861 | skb = get_packet(adap, fl, G_RSPD_LEN(len), | ||
1862 | eth ? SGE_RX_DROP_THRES : 0); | ||
1863 | if (!skb) | ||
1864 | q->rx_drops++; | ||
1865 | else if (r->rss_hdr.opcode == CPL_TRACE_PKT) | ||
1866 | __skb_pull(skb, 2); | ||
1867 | ethpad = 2; | ||
1868 | if (++fl->cidx == fl->size) | 1991 | if (++fl->cidx == fl->size) |
1869 | fl->cidx = 0; | 1992 | fl->cidx = 0; |
1870 | } else | 1993 | } else |
@@ -1888,18 +2011,23 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, | |||
1888 | q->credits = 0; | 2011 | q->credits = 0; |
1889 | } | 2012 | } |
1890 | 2013 | ||
1891 | if (likely(skb != NULL)) { | 2014 | if (skb) { |
2015 | /* Preserve the RSS info in csum & priority */ | ||
2016 | skb->csum = rss_hi; | ||
2017 | skb->priority = rss_lo; | ||
2018 | |||
1892 | if (eth) | 2019 | if (eth) |
1893 | rx_eth(adap, q, skb, ethpad); | 2020 | rx_eth(adap, q, skb, ethpad); |
1894 | else { | 2021 | else { |
1895 | /* Preserve the RSS info in csum & priority */ | 2022 | if (unlikely(r->rss_hdr.opcode == |
1896 | skb->csum = rss_hi; | 2023 | CPL_TRACE_PKT)) |
1897 | skb->priority = rss_lo; | 2024 | __skb_pull(skb, ethpad); |
1898 | ngathered = rx_offload(&adap->tdev, q, skb, | 2025 | |
1899 | offload_skbs, ngathered); | 2026 | ngathered = rx_offload(&adap->tdev, q, |
2027 | skb, offload_skbs, | ||
2028 | ngathered); | ||
1900 | } | 2029 | } |
1901 | } | 2030 | } |
1902 | |||
1903 | --budget_left; | 2031 | --budget_left; |
1904 | } | 2032 | } |
1905 | 2033 | ||
@@ -2376,7 +2504,7 @@ static void sge_timer_cb(unsigned long data) | |||
2376 | spin_unlock(&qs->txq[TXQ_OFLD].lock); | 2504 | spin_unlock(&qs->txq[TXQ_OFLD].lock); |
2377 | } | 2505 | } |
2378 | lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock : | 2506 | lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock : |
2379 | &adap->sge.qs[0].rspq.lock; | 2507 | &adap->sge.qs[0].rspq.lock; |
2380 | if (spin_trylock_irq(lock)) { | 2508 | if (spin_trylock_irq(lock)) { |
2381 | if (!napi_is_scheduled(qs->netdev)) { | 2509 | if (!napi_is_scheduled(qs->netdev)) { |
2382 | u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); | 2510 | u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); |
@@ -2392,7 +2520,7 @@ static void sge_timer_cb(unsigned long data) | |||
2392 | refill_rspq(adap, &qs->rspq, 1); | 2520 | refill_rspq(adap, &qs->rspq, 1); |
2393 | qs->rspq.credits--; | 2521 | qs->rspq.credits--; |
2394 | qs->rspq.restarted++; | 2522 | qs->rspq.restarted++; |
2395 | t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, | 2523 | t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, |
2396 | 1 << qs->rspq.cntxt_id); | 2524 | 1 << qs->rspq.cntxt_id); |
2397 | } | 2525 | } |
2398 | } | 2526 | } |
@@ -2504,13 +2632,21 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |||
2504 | flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); | 2632 | flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); |
2505 | 2633 | ||
2506 | if (ntxq == 1) { | 2634 | if (ntxq == 1) { |
2635 | #ifdef USE_RX_PAGE | ||
2636 | q->fl[0].buf_size = RX_PAGE_SIZE; | ||
2637 | #else | ||
2507 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 + | 2638 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 + |
2508 | sizeof(struct cpl_rx_pkt); | 2639 | sizeof(struct cpl_rx_pkt); |
2640 | #endif | ||
2509 | q->fl[1].buf_size = MAX_FRAME_SIZE + 2 + | 2641 | q->fl[1].buf_size = MAX_FRAME_SIZE + 2 + |
2510 | sizeof(struct cpl_rx_pkt); | 2642 | sizeof(struct cpl_rx_pkt); |
2511 | } else { | 2643 | } else { |
2644 | #ifdef USE_RX_PAGE | ||
2645 | q->fl[0].buf_size = RX_PAGE_SIZE; | ||
2646 | #else | ||
2512 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + | 2647 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + |
2513 | sizeof(struct cpl_rx_data); | 2648 | sizeof(struct cpl_rx_data); |
2649 | #endif | ||
2514 | q->fl[1].buf_size = (16 * 1024) - | 2650 | q->fl[1].buf_size = (16 * 1024) - |
2515 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 2651 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
2516 | } | 2652 | } |
@@ -2704,7 +2840,7 @@ void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p) | |||
2704 | q->polling = adap->params.rev > 0; | 2840 | q->polling = adap->params.rev > 0; |
2705 | q->coalesce_usecs = 5; | 2841 | q->coalesce_usecs = 5; |
2706 | q->rspq_size = 1024; | 2842 | q->rspq_size = 1024; |
2707 | q->fl_size = 4096; | 2843 | q->fl_size = 1024; |
2708 | q->jumbo_size = 512; | 2844 | q->jumbo_size = 512; |
2709 | q->txq_size[TXQ_ETH] = 1024; | 2845 | q->txq_size[TXQ_ETH] = 1024; |
2710 | q->txq_size[TXQ_OFLD] = 1024; | 2846 | q->txq_size[TXQ_OFLD] = 1024; |