aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
authorMatt Carlson <mcarlson@broadcom.com>2009-11-13 08:03:50 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-16 01:14:54 -0500
commitb196c7e45f30cbcd38c83386bc8a04a21477f8d3 (patch)
tree97bdf9375a9a2b7c400204587a85515bd90c8a37 /drivers/net/tg3.c
parent66711e66639776685aeaad774488be1857abce26 (diff)
tg3: Add rx prod ring consolidation
This patch adds code to funnel each MSI-X vector's rx packet buffers into a single set of producer rings which will then be submitted to the hardware. Signed-off-by: Matt Carlson <mcarlson@broadcom.com> Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c171
1 files changed, 161 insertions, 10 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 05fd42f8f4ed..3ff2d40e63dd 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4558,7 +4558,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4558 u32 sw_idx = tnapi->rx_rcb_ptr; 4558 u32 sw_idx = tnapi->rx_rcb_ptr;
4559 u16 hw_idx; 4559 u16 hw_idx;
4560 int received; 4560 int received;
4561 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 4561 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4562 4562
4563 hw_idx = *(tnapi->rx_rcb_prod_idx); 4563 hw_idx = *(tnapi->rx_rcb_prod_idx);
4564 /* 4564 /*
@@ -4581,13 +4581,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4581 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4581 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4582 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4582 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4583 if (opaque_key == RXD_OPAQUE_RING_STD) { 4583 if (opaque_key == RXD_OPAQUE_RING_STD) {
4584 ri = &tpr->rx_std_buffers[desc_idx]; 4584 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4585 dma_addr = pci_unmap_addr(ri, mapping); 4585 dma_addr = pci_unmap_addr(ri, mapping);
4586 skb = ri->skb; 4586 skb = ri->skb;
4587 post_ptr = &std_prod_idx; 4587 post_ptr = &std_prod_idx;
4588 rx_std_posted++; 4588 rx_std_posted++;
4589 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4589 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4590 ri = &tpr->rx_jmb_buffers[desc_idx]; 4590 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4591 dma_addr = pci_unmap_addr(ri, mapping); 4591 dma_addr = pci_unmap_addr(ri, mapping);
4592 skb = ri->skb; 4592 skb = ri->skb;
4593 post_ptr = &jmb_prod_idx; 4593 post_ptr = &jmb_prod_idx;
@@ -4704,15 +4704,30 @@ next_pkt_nopost:
4704 tw32_rx_mbox(tnapi->consmbox, sw_idx); 4704 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4705 4705
4706 /* Refill RX ring(s). */ 4706 /* Refill RX ring(s). */
4707 if (work_mask & RXD_OPAQUE_RING_STD) { 4707 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
4708 if (work_mask & RXD_OPAQUE_RING_STD) {
4709 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4710 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4711 tpr->rx_std_prod_idx);
4712 }
4713 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4714 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4715 TG3_RX_JUMBO_RING_SIZE;
4716 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4717 tpr->rx_jmb_prod_idx);
4718 }
4719 mmiowb();
4720 } else if (work_mask) {
4721 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4722 * updated before the producer indices can be updated.
4723 */
4724 smp_wmb();
4725
4708 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; 4726 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4709 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
4710 }
4711 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4712 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE; 4727 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4713 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 4728
4729 napi_schedule(&tp->napi[1].napi);
4714 } 4730 }
4715 mmiowb();
4716 4731
4717 return received; 4732 return received;
4718} 4733}
@@ -4743,6 +4758,93 @@ static void tg3_poll_link(struct tg3 *tp)
4743 } 4758 }
4744} 4759}
4745 4760
4761static void tg3_rx_prodring_xfer(struct tg3 *tp,
4762 struct tg3_rx_prodring_set *dpr,
4763 struct tg3_rx_prodring_set *spr)
4764{
4765 u32 si, di, cpycnt, src_prod_idx;
4766 int i;
4767
4768 while (1) {
4769 src_prod_idx = spr->rx_std_prod_idx;
4770
4771 /* Make sure updates to the rx_std_buffers[] entries and the
4772 * standard producer index are seen in the correct order.
4773 */
4774 smp_rmb();
4775
4776 if (spr->rx_std_cons_idx == src_prod_idx)
4777 break;
4778
4779 if (spr->rx_std_cons_idx < src_prod_idx)
4780 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4781 else
4782 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4783
4784 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4785
4786 si = spr->rx_std_cons_idx;
4787 di = dpr->rx_std_prod_idx;
4788
4789 memcpy(&dpr->rx_std_buffers[di],
4790 &spr->rx_std_buffers[si],
4791 cpycnt * sizeof(struct ring_info));
4792
4793 for (i = 0; i < cpycnt; i++, di++, si++) {
4794 struct tg3_rx_buffer_desc *sbd, *dbd;
4795 sbd = &spr->rx_std[si];
4796 dbd = &dpr->rx_std[di];
4797 dbd->addr_hi = sbd->addr_hi;
4798 dbd->addr_lo = sbd->addr_lo;
4799 }
4800
4801 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4802 TG3_RX_RING_SIZE;
4803 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4804 TG3_RX_RING_SIZE;
4805 }
4806
4807 while (1) {
4808 src_prod_idx = spr->rx_jmb_prod_idx;
4809
4810 /* Make sure updates to the rx_jmb_buffers[] entries and
4811 * the jumbo producer index are seen in the correct order.
4812 */
4813 smp_rmb();
4814
4815 if (spr->rx_jmb_cons_idx == src_prod_idx)
4816 break;
4817
4818 if (spr->rx_jmb_cons_idx < src_prod_idx)
4819 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4820 else
4821 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4822
4823 cpycnt = min(cpycnt,
4824 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4825
4826 si = spr->rx_jmb_cons_idx;
4827 di = dpr->rx_jmb_prod_idx;
4828
4829 memcpy(&dpr->rx_jmb_buffers[di],
4830 &spr->rx_jmb_buffers[si],
4831 cpycnt * sizeof(struct ring_info));
4832
4833 for (i = 0; i < cpycnt; i++, di++, si++) {
4834 struct tg3_rx_buffer_desc *sbd, *dbd;
4835 sbd = &spr->rx_jmb[si].std;
4836 dbd = &dpr->rx_jmb[di].std;
4837 dbd->addr_hi = sbd->addr_hi;
4838 dbd->addr_lo = sbd->addr_lo;
4839 }
4840
4841 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4842 TG3_RX_JUMBO_RING_SIZE;
4843 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4844 TG3_RX_JUMBO_RING_SIZE;
4845 }
4846}
4847
4746static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 4848static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4747{ 4849{
4748 struct tg3 *tp = tnapi->tp; 4850 struct tg3 *tp = tnapi->tp;
@@ -4761,6 +4863,30 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4761 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 4863 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4762 work_done += tg3_rx(tnapi, budget - work_done); 4864 work_done += tg3_rx(tnapi, budget - work_done);
4763 4865
4866 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4867 int i;
4868 u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
4869 u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
4870
4871 for (i = 2; i < tp->irq_cnt; i++)
4872 tg3_rx_prodring_xfer(tp, tnapi->prodring,
4873 tp->napi[i].prodring);
4874
4875 wmb();
4876
4877 if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
4878 u32 mbox = TG3_RX_STD_PROD_IDX_REG;
4879 tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
4880 }
4881
4882 if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
4883 u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
4884 tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
4885 }
4886
4887 mmiowb();
4888 }
4889
4764 return work_done; 4890 return work_done;
4765} 4891}
4766 4892
@@ -5715,8 +5841,23 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
5715{ 5841{
5716 int i; 5842 int i;
5717 5843
5718 if (tpr != &tp->prodring[0]) 5844 if (tpr != &tp->prodring[0]) {
5845 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
5846 i = (i + 1) % TG3_RX_RING_SIZE)
5847 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5848 tp->rx_pkt_map_sz);
5849
5850 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5851 for (i = tpr->rx_jmb_cons_idx;
5852 i != tpr->rx_jmb_prod_idx;
5853 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
5854 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5855 TG3_RX_JMB_MAP_SZ);
5856 }
5857 }
5858
5719 return; 5859 return;
5860 }
5720 5861
5721 for (i = 0; i < TG3_RX_RING_SIZE; i++) 5862 for (i = 0; i < TG3_RX_RING_SIZE; i++)
5722 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], 5863 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
@@ -5741,6 +5882,11 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5741{ 5882{
5742 u32 i, rx_pkt_dma_sz; 5883 u32 i, rx_pkt_dma_sz;
5743 5884
5885 tpr->rx_std_cons_idx = 0;
5886 tpr->rx_std_prod_idx = 0;
5887 tpr->rx_jmb_cons_idx = 0;
5888 tpr->rx_jmb_prod_idx = 0;
5889
5744 if (tpr != &tp->prodring[0]) { 5890 if (tpr != &tp->prodring[0]) {
5745 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE); 5891 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
5746 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) 5892 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
@@ -6062,6 +6208,11 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6062 break; 6208 break;
6063 } 6209 }
6064 6210
6211 if (tp->irq_cnt == 1)
6212 tnapi->prodring = &tp->prodring[0];
6213 else if (i)
6214 tnapi->prodring = &tp->prodring[i - 1];
6215
6065 /* 6216 /*
6066 * If multivector RSS is enabled, vector 0 does not handle 6217 * If multivector RSS is enabled, vector 0 does not handle
6067 * rx or tx interrupts. Don't allocate any resources for it. 6218 * rx or tx interrupts. Don't allocate any resources for it.