diff options
author | Divy Le Ray <divy@chelsio.com> | 2008-05-21 21:56:21 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-05-22 06:34:11 -0400 |
commit | 7385ecf339c504933a98581c2056d83b69b2a82b (patch) | |
tree | 6d240a62e1002a72fb147f0623efc0f6c298375a /drivers/net/cxgb3 | |
parent | b1fb1f280d0969f47d4ef19334120f5c34e36080 (diff) |
cxgb3 - Add page support to jumbo frame Rx queue
Add page support to Jumbo frame Rx queues.
Signed-off-by: Divy Le Ray <divy@chelsio.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r-- | drivers/net/cxgb3/adapter.h | 4 | ||||
-rw-r--r-- | drivers/net/cxgb3/sge.c | 115 |
2 files changed, 84 insertions, 35 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index acebe431d068..263e4faf45e5 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h | |||
@@ -92,6 +92,7 @@ struct sge_fl { /* SGE per free-buffer list state */ | |||
92 | unsigned int gen; /* free list generation */ | 92 | unsigned int gen; /* free list generation */ |
93 | struct fl_pg_chunk pg_chunk;/* page chunk cache */ | 93 | struct fl_pg_chunk pg_chunk;/* page chunk cache */ |
94 | unsigned int use_pages; /* whether FL uses pages or sk_buffs */ | 94 | unsigned int use_pages; /* whether FL uses pages or sk_buffs */ |
95 | unsigned int order; /* order of page allocations */ | ||
95 | struct rx_desc *desc; /* address of HW Rx descriptor ring */ | 96 | struct rx_desc *desc; /* address of HW Rx descriptor ring */ |
96 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ | 97 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ |
97 | dma_addr_t phys_addr; /* physical address of HW ring start */ | 98 | dma_addr_t phys_addr; /* physical address of HW ring start */ |
@@ -116,12 +117,15 @@ struct sge_rspq { /* state for an SGE response queue */ | |||
116 | unsigned int polling; /* is the queue serviced through NAPI? */ | 117 | unsigned int polling; /* is the queue serviced through NAPI? */ |
117 | unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */ | 118 | unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */ |
118 | unsigned int next_holdoff; /* holdoff time for next interrupt */ | 119 | unsigned int next_holdoff; /* holdoff time for next interrupt */ |
120 | unsigned int rx_recycle_buf; /* whether recycling occurred | ||
121 | within current sop-eop */ | ||
119 | struct rsp_desc *desc; /* address of HW response ring */ | 122 | struct rsp_desc *desc; /* address of HW response ring */ |
120 | dma_addr_t phys_addr; /* physical address of the ring */ | 123 | dma_addr_t phys_addr; /* physical address of the ring */ |
121 | unsigned int cntxt_id; /* SGE context id for the response q */ | 124 | unsigned int cntxt_id; /* SGE context id for the response q */ |
122 | spinlock_t lock; /* guards response processing */ | 125 | spinlock_t lock; /* guards response processing */ |
123 | struct sk_buff *rx_head; /* offload packet receive queue head */ | 126 | struct sk_buff *rx_head; /* offload packet receive queue head */ |
124 | struct sk_buff *rx_tail; /* offload packet receive queue tail */ | 127 | struct sk_buff *rx_tail; /* offload packet receive queue tail */ |
128 | struct sk_buff *pg_skb; /* used to build frag list in napi handler */ | ||
125 | 129 | ||
126 | unsigned long offload_pkts; | 130 | unsigned long offload_pkts; |
127 | unsigned long offload_bundles; | 131 | unsigned long offload_bundles; |
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 0741deb86ca6..3e91be55e19e 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -55,6 +55,9 @@ | |||
55 | * directly. | 55 | * directly. |
56 | */ | 56 | */ |
57 | #define FL0_PG_CHUNK_SIZE 2048 | 57 | #define FL0_PG_CHUNK_SIZE 2048 |
58 | #define FL0_PG_ORDER 0 | ||
59 | #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) | ||
60 | #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) | ||
58 | 61 | ||
59 | #define SGE_RX_DROP_THRES 16 | 62 | #define SGE_RX_DROP_THRES 16 |
60 | 63 | ||
@@ -359,7 +362,7 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) | |||
359 | } | 362 | } |
360 | 363 | ||
361 | if (q->pg_chunk.page) { | 364 | if (q->pg_chunk.page) { |
362 | __free_page(q->pg_chunk.page); | 365 | __free_pages(q->pg_chunk.page, q->order); |
363 | q->pg_chunk.page = NULL; | 366 | q->pg_chunk.page = NULL; |
364 | } | 367 | } |
365 | } | 368 | } |
@@ -396,10 +399,11 @@ static inline int add_one_rx_buf(void *va, unsigned int len, | |||
396 | return 0; | 399 | return 0; |
397 | } | 400 | } |
398 | 401 | ||
399 | static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp) | 402 | static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp, |
403 | unsigned int order) | ||
400 | { | 404 | { |
401 | if (!q->pg_chunk.page) { | 405 | if (!q->pg_chunk.page) { |
402 | q->pg_chunk.page = alloc_page(gfp); | 406 | q->pg_chunk.page = alloc_pages(gfp, order); |
403 | if (unlikely(!q->pg_chunk.page)) | 407 | if (unlikely(!q->pg_chunk.page)) |
404 | return -ENOMEM; | 408 | return -ENOMEM; |
405 | q->pg_chunk.va = page_address(q->pg_chunk.page); | 409 | q->pg_chunk.va = page_address(q->pg_chunk.page); |
@@ -408,7 +412,7 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp) | |||
408 | sd->pg_chunk = q->pg_chunk; | 412 | sd->pg_chunk = q->pg_chunk; |
409 | 413 | ||
410 | q->pg_chunk.offset += q->buf_size; | 414 | q->pg_chunk.offset += q->buf_size; |
411 | if (q->pg_chunk.offset == PAGE_SIZE) | 415 | if (q->pg_chunk.offset == (PAGE_SIZE << order)) |
412 | q->pg_chunk.page = NULL; | 416 | q->pg_chunk.page = NULL; |
413 | else { | 417 | else { |
414 | q->pg_chunk.va += q->buf_size; | 418 | q->pg_chunk.va += q->buf_size; |
@@ -439,7 +443,7 @@ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) | |||
439 | int err; | 443 | int err; |
440 | 444 | ||
441 | if (q->use_pages) { | 445 | if (q->use_pages) { |
442 | if (unlikely(alloc_pg_chunk(q, sd, gfp))) { | 446 | if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) { |
443 | nomem: q->alloc_failed++; | 447 | nomem: q->alloc_failed++; |
444 | break; | 448 | break; |
445 | } | 449 | } |
@@ -484,7 +488,8 @@ nomem: q->alloc_failed++; | |||
484 | 488 | ||
485 | static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) | 489 | static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) |
486 | { | 490 | { |
487 | refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC); | 491 | refill_fl(adap, fl, min(16U, fl->size - fl->credits), |
492 | GFP_ATOMIC | __GFP_COMP); | ||
488 | } | 493 | } |
489 | 494 | ||
490 | /** | 495 | /** |
@@ -759,19 +764,22 @@ use_orig_buf: | |||
759 | * that are page chunks rather than sk_buffs. | 764 | * that are page chunks rather than sk_buffs. |
760 | */ | 765 | */ |
761 | static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, | 766 | static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, |
762 | unsigned int len, unsigned int drop_thres) | 767 | struct sge_rspq *q, unsigned int len, |
768 | unsigned int drop_thres) | ||
763 | { | 769 | { |
764 | struct sk_buff *skb = NULL; | 770 | struct sk_buff *newskb, *skb; |
765 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | 771 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; |
766 | 772 | ||
767 | if (len <= SGE_RX_COPY_THRES) { | 773 | newskb = skb = q->pg_skb; |
768 | skb = alloc_skb(len, GFP_ATOMIC); | 774 | |
769 | if (likely(skb != NULL)) { | 775 | if (!skb && (len <= SGE_RX_COPY_THRES)) { |
770 | __skb_put(skb, len); | 776 | newskb = alloc_skb(len, GFP_ATOMIC); |
777 | if (likely(newskb != NULL)) { | ||
778 | __skb_put(newskb, len); | ||
771 | pci_dma_sync_single_for_cpu(adap->pdev, | 779 | pci_dma_sync_single_for_cpu(adap->pdev, |
772 | pci_unmap_addr(sd, dma_addr), len, | 780 | pci_unmap_addr(sd, dma_addr), len, |
773 | PCI_DMA_FROMDEVICE); | 781 | PCI_DMA_FROMDEVICE); |
774 | memcpy(skb->data, sd->pg_chunk.va, len); | 782 | memcpy(newskb->data, sd->pg_chunk.va, len); |
775 | pci_dma_sync_single_for_device(adap->pdev, | 783 | pci_dma_sync_single_for_device(adap->pdev, |
776 | pci_unmap_addr(sd, dma_addr), len, | 784 | pci_unmap_addr(sd, dma_addr), len, |
777 | PCI_DMA_FROMDEVICE); | 785 | PCI_DMA_FROMDEVICE); |
@@ -780,14 +788,16 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, | |||
780 | recycle: | 788 | recycle: |
781 | fl->credits--; | 789 | fl->credits--; |
782 | recycle_rx_buf(adap, fl, fl->cidx); | 790 | recycle_rx_buf(adap, fl, fl->cidx); |
783 | return skb; | 791 | q->rx_recycle_buf++; |
792 | return newskb; | ||
784 | } | 793 | } |
785 | 794 | ||
786 | if (unlikely(fl->credits <= drop_thres)) | 795 | if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) |
787 | goto recycle; | 796 | goto recycle; |
788 | 797 | ||
789 | skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); | 798 | if (!skb) |
790 | if (unlikely(!skb)) { | 799 | newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); |
800 | if (unlikely(!newskb)) { | ||
791 | if (!drop_thres) | 801 | if (!drop_thres) |
792 | return NULL; | 802 | return NULL; |
793 | goto recycle; | 803 | goto recycle; |
@@ -795,21 +805,29 @@ recycle: | |||
795 | 805 | ||
796 | pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), | 806 | pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), |
797 | fl->buf_size, PCI_DMA_FROMDEVICE); | 807 | fl->buf_size, PCI_DMA_FROMDEVICE); |
798 | __skb_put(skb, SGE_RX_PULL_LEN); | 808 | if (!skb) { |
799 | memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); | 809 | __skb_put(newskb, SGE_RX_PULL_LEN); |
800 | skb_fill_page_desc(skb, 0, sd->pg_chunk.page, | 810 | memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); |
801 | sd->pg_chunk.offset + SGE_RX_PULL_LEN, | 811 | skb_fill_page_desc(newskb, 0, sd->pg_chunk.page, |
802 | len - SGE_RX_PULL_LEN); | 812 | sd->pg_chunk.offset + SGE_RX_PULL_LEN, |
803 | skb->len = len; | 813 | len - SGE_RX_PULL_LEN); |
804 | skb->data_len = len - SGE_RX_PULL_LEN; | 814 | newskb->len = len; |
805 | skb->truesize += skb->data_len; | 815 | newskb->data_len = len - SGE_RX_PULL_LEN; |
816 | } else { | ||
817 | skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags, | ||
818 | sd->pg_chunk.page, | ||
819 | sd->pg_chunk.offset, len); | ||
820 | newskb->len += len; | ||
821 | newskb->data_len += len; | ||
822 | } | ||
823 | newskb->truesize += newskb->data_len; | ||
806 | 824 | ||
807 | fl->credits--; | 825 | fl->credits--; |
808 | /* | 826 | /* |
809 | * We do not refill FLs here, we let the caller do it to overlap a | 827 | * We do not refill FLs here, we let the caller do it to overlap a |
810 | * prefetch. | 828 | * prefetch. |
811 | */ | 829 | */ |
812 | return skb; | 830 | return newskb; |
813 | } | 831 | } |
814 | 832 | ||
815 | /** | 833 | /** |
@@ -1966,6 +1984,12 @@ static inline int is_new_response(const struct rsp_desc *r, | |||
1966 | return (r->intr_gen & F_RSPD_GEN2) == q->gen; | 1984 | return (r->intr_gen & F_RSPD_GEN2) == q->gen; |
1967 | } | 1985 | } |
1968 | 1986 | ||
1987 | static inline void clear_rspq_bufstate(struct sge_rspq * const q) | ||
1988 | { | ||
1989 | q->pg_skb = NULL; | ||
1990 | q->rx_recycle_buf = 0; | ||
1991 | } | ||
1992 | |||
1969 | #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) | 1993 | #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) |
1970 | #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ | 1994 | #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ |
1971 | V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ | 1995 | V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ |
@@ -2003,10 +2027,11 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, | |||
2003 | q->next_holdoff = q->holdoff_tmr; | 2027 | q->next_holdoff = q->holdoff_tmr; |
2004 | 2028 | ||
2005 | while (likely(budget_left && is_new_response(r, q))) { | 2029 | while (likely(budget_left && is_new_response(r, q))) { |
2006 | int eth, ethpad = 2; | 2030 | int packet_complete, eth, ethpad = 2; |
2007 | struct sk_buff *skb = NULL; | 2031 | struct sk_buff *skb = NULL; |
2008 | u32 len, flags = ntohl(r->flags); | 2032 | u32 len, flags = ntohl(r->flags); |
2009 | __be32 rss_hi = *(const __be32 *)r, rss_lo = r->rss_hdr.rss_hash_val; | 2033 | __be32 rss_hi = *(const __be32 *)r, |
2034 | rss_lo = r->rss_hdr.rss_hash_val; | ||
2010 | 2035 | ||
2011 | eth = r->rss_hdr.opcode == CPL_RX_PKT; | 2036 | eth = r->rss_hdr.opcode == CPL_RX_PKT; |
2012 | 2037 | ||
@@ -2044,8 +2069,11 @@ no_mem: | |||
2044 | #endif | 2069 | #endif |
2045 | __refill_fl(adap, fl); | 2070 | __refill_fl(adap, fl); |
2046 | 2071 | ||
2047 | skb = get_packet_pg(adap, fl, G_RSPD_LEN(len), | 2072 | skb = get_packet_pg(adap, fl, q, |
2048 | eth ? SGE_RX_DROP_THRES : 0); | 2073 | G_RSPD_LEN(len), |
2074 | eth ? | ||
2075 | SGE_RX_DROP_THRES : 0); | ||
2076 | q->pg_skb = skb; | ||
2049 | } else | 2077 | } else |
2050 | skb = get_packet(adap, fl, G_RSPD_LEN(len), | 2078 | skb = get_packet(adap, fl, G_RSPD_LEN(len), |
2051 | eth ? SGE_RX_DROP_THRES : 0); | 2079 | eth ? SGE_RX_DROP_THRES : 0); |
@@ -2079,7 +2107,11 @@ no_mem: | |||
2079 | q->credits = 0; | 2107 | q->credits = 0; |
2080 | } | 2108 | } |
2081 | 2109 | ||
2082 | if (likely(skb != NULL)) { | 2110 | packet_complete = flags & |
2111 | (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID | | ||
2112 | F_RSPD_ASYNC_NOTIF); | ||
2113 | |||
2114 | if (skb != NULL && packet_complete) { | ||
2083 | if (eth) | 2115 | if (eth) |
2084 | rx_eth(adap, q, skb, ethpad); | 2116 | rx_eth(adap, q, skb, ethpad); |
2085 | else { | 2117 | else { |
@@ -2091,6 +2123,9 @@ no_mem: | |||
2091 | offload_skbs, | 2123 | offload_skbs, |
2092 | ngathered); | 2124 | ngathered); |
2093 | } | 2125 | } |
2126 | |||
2127 | if (flags & F_RSPD_EOP) | ||
2128 | clear_rspq_bufstate(q); | ||
2094 | } | 2129 | } |
2095 | --budget_left; | 2130 | --budget_left; |
2096 | } | 2131 | } |
@@ -2706,10 +2741,18 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |||
2706 | #else | 2741 | #else |
2707 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); | 2742 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); |
2708 | #endif | 2743 | #endif |
2709 | q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; | 2744 | #if FL1_PG_CHUNK_SIZE > 0 |
2745 | q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; | ||
2746 | #else | ||
2710 | q->fl[1].buf_size = is_offload(adapter) ? | 2747 | q->fl[1].buf_size = is_offload(adapter) ? |
2711 | (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : | 2748 | (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : |
2712 | MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); | 2749 | MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); |
2750 | #endif | ||
2751 | |||
2752 | q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; | ||
2753 | q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; | ||
2754 | q->fl[0].order = FL0_PG_ORDER; | ||
2755 | q->fl[1].order = FL1_PG_ORDER; | ||
2713 | 2756 | ||
2714 | spin_lock_irq(&adapter->sge.reg_lock); | 2757 | spin_lock_irq(&adapter->sge.reg_lock); |
2715 | 2758 | ||
@@ -2760,7 +2803,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |||
2760 | q->adap = adapter; | 2803 | q->adap = adapter; |
2761 | q->netdev = dev; | 2804 | q->netdev = dev; |
2762 | t3_update_qset_coalesce(q, p); | 2805 | t3_update_qset_coalesce(q, p); |
2763 | avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL); | 2806 | avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, |
2807 | GFP_KERNEL | __GFP_COMP); | ||
2764 | if (!avail) { | 2808 | if (!avail) { |
2765 | CH_ALERT(adapter, "free list queue 0 initialization failed\n"); | 2809 | CH_ALERT(adapter, "free list queue 0 initialization failed\n"); |
2766 | goto err; | 2810 | goto err; |
@@ -2769,7 +2813,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |||
2769 | CH_WARN(adapter, "free list queue 0 enabled with %d credits\n", | 2813 | CH_WARN(adapter, "free list queue 0 enabled with %d credits\n", |
2770 | avail); | 2814 | avail); |
2771 | 2815 | ||
2772 | avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL); | 2816 | avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, |
2817 | GFP_KERNEL | __GFP_COMP); | ||
2773 | if (avail < q->fl[1].size) | 2818 | if (avail < q->fl[1].size) |
2774 | CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", | 2819 | CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", |
2775 | avail); | 2820 | avail); |
@@ -2905,7 +2950,7 @@ void t3_sge_prep(struct adapter *adap, struct sge_params *p) | |||
2905 | q->coalesce_usecs = 5; | 2950 | q->coalesce_usecs = 5; |
2906 | q->rspq_size = 1024; | 2951 | q->rspq_size = 1024; |
2907 | q->fl_size = 1024; | 2952 | q->fl_size = 1024; |
2908 | q->jumbo_size = 512; | 2953 | q->jumbo_size = 512; |
2909 | q->txq_size[TXQ_ETH] = 1024; | 2954 | q->txq_size[TXQ_ETH] = 1024; |
2910 | q->txq_size[TXQ_OFLD] = 1024; | 2955 | q->txq_size[TXQ_OFLD] = 1024; |
2911 | q->txq_size[TXQ_CTRL] = 256; | 2956 | q->txq_size[TXQ_CTRL] = 256; |