aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMatt Carlson <mcarlson@broadcom.com>2009-08-28 10:00:25 -0400
committerDavid S. Miller <davem@davemloft.net>2009-08-29 18:42:50 -0400
commit21f581a5366d48d86b9ae86043fc61d44e992c0c (patch)
treebfaa5a591d663ad5d61b9cdd219409ec4922f0dd /drivers
parentcf7a7298c4f47ab7546b933bb54ad3ea03a1daf6 (diff)
tg3: Create a new prodring_set structure
This patch migrates most of the rx producer ring variables to a new tg3_rx_prodring_set structure and modifies the code accordingly. Signed-off-by: Matt Carlson <mcarlson@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/tg3.c163
-rw-r--r--drivers/net/tg3.h27
2 files changed, 100 insertions, 90 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 5d0a1e6e0517..053d4e85820d 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4352,24 +4352,25 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4352 struct sk_buff *skb; 4352 struct sk_buff *skb;
4353 dma_addr_t mapping; 4353 dma_addr_t mapping;
4354 int skb_size, dest_idx; 4354 int skb_size, dest_idx;
4355 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4355 4356
4356 src_map = NULL; 4357 src_map = NULL;
4357 switch (opaque_key) { 4358 switch (opaque_key) {
4358 case RXD_OPAQUE_RING_STD: 4359 case RXD_OPAQUE_RING_STD:
4359 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4360 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4360 desc = &tp->rx_std[dest_idx]; 4361 desc = &tpr->rx_std[dest_idx];
4361 map = &tp->rx_std_buffers[dest_idx]; 4362 map = &tpr->rx_std_buffers[dest_idx];
4362 if (src_idx >= 0) 4363 if (src_idx >= 0)
4363 src_map = &tp->rx_std_buffers[src_idx]; 4364 src_map = &tpr->rx_std_buffers[src_idx];
4364 skb_size = tp->rx_pkt_map_sz; 4365 skb_size = tp->rx_pkt_map_sz;
4365 break; 4366 break;
4366 4367
4367 case RXD_OPAQUE_RING_JUMBO: 4368 case RXD_OPAQUE_RING_JUMBO:
4368 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4369 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4369 desc = &tp->rx_jumbo[dest_idx]; 4370 desc = &tpr->rx_jmb[dest_idx];
4370 map = &tp->rx_jumbo_buffers[dest_idx]; 4371 map = &tpr->rx_jmb_buffers[dest_idx];
4371 if (src_idx >= 0) 4372 if (src_idx >= 0)
4372 src_map = &tp->rx_jumbo_buffers[src_idx]; 4373 src_map = &tpr->rx_jmb_buffers[src_idx];
4373 skb_size = TG3_RX_JMB_MAP_SZ; 4374 skb_size = TG3_RX_JMB_MAP_SZ;
4374 break; 4375 break;
4375 4376
@@ -4414,22 +4415,23 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4414 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 4415 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4415 struct ring_info *src_map, *dest_map; 4416 struct ring_info *src_map, *dest_map;
4416 int dest_idx; 4417 int dest_idx;
4418 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4417 4419
4418 switch (opaque_key) { 4420 switch (opaque_key) {
4419 case RXD_OPAQUE_RING_STD: 4421 case RXD_OPAQUE_RING_STD:
4420 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4422 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4421 dest_desc = &tp->rx_std[dest_idx]; 4423 dest_desc = &tpr->rx_std[dest_idx];
4422 dest_map = &tp->rx_std_buffers[dest_idx]; 4424 dest_map = &tpr->rx_std_buffers[dest_idx];
4423 src_desc = &tp->rx_std[src_idx]; 4425 src_desc = &tpr->rx_std[src_idx];
4424 src_map = &tp->rx_std_buffers[src_idx]; 4426 src_map = &tpr->rx_std_buffers[src_idx];
4425 break; 4427 break;
4426 4428
4427 case RXD_OPAQUE_RING_JUMBO: 4429 case RXD_OPAQUE_RING_JUMBO:
4428 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4430 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4429 dest_desc = &tp->rx_jumbo[dest_idx]; 4431 dest_desc = &tpr->rx_jmb[dest_idx];
4430 dest_map = &tp->rx_jumbo_buffers[dest_idx]; 4432 dest_map = &tpr->rx_jmb_buffers[dest_idx];
4431 src_desc = &tp->rx_jumbo[src_idx]; 4433 src_desc = &tpr->rx_jmb[src_idx];
4432 src_map = &tp->rx_jumbo_buffers[src_idx]; 4434 src_map = &tpr->rx_jmb_buffers[src_idx];
4433 break; 4435 break;
4434 4436
4435 default: 4437 default:
@@ -4482,6 +4484,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
4482 u32 sw_idx = tp->rx_rcb_ptr; 4484 u32 sw_idx = tp->rx_rcb_ptr;
4483 u16 hw_idx; 4485 u16 hw_idx;
4484 int received; 4486 int received;
4487 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4485 4488
4486 hw_idx = tp->hw_status->idx[0].rx_producer; 4489 hw_idx = tp->hw_status->idx[0].rx_producer;
4487 /* 4490 /*
@@ -4501,20 +4504,18 @@ static int tg3_rx(struct tg3 *tp, int budget)
4501 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4504 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4502 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4505 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4503 if (opaque_key == RXD_OPAQUE_RING_STD) { 4506 if (opaque_key == RXD_OPAQUE_RING_STD) {
4504 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], 4507 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx];
4505 mapping); 4508 dma_addr = pci_unmap_addr(ri, mapping);
4506 skb = tp->rx_std_buffers[desc_idx].skb; 4509 skb = ri->skb;
4507 post_ptr = &tp->rx_std_ptr; 4510 post_ptr = &tpr->rx_std_ptr;
4508 rx_std_posted++; 4511 rx_std_posted++;
4509 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4512 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4510 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx], 4513 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx];
4511 mapping); 4514 dma_addr = pci_unmap_addr(ri, mapping);
4512 skb = tp->rx_jumbo_buffers[desc_idx].skb; 4515 skb = ri->skb;
4513 post_ptr = &tp->rx_jumbo_ptr; 4516 post_ptr = &tpr->rx_jmb_ptr;
4514 } 4517 } else
4515 else {
4516 goto next_pkt_nopost; 4518 goto next_pkt_nopost;
4517 }
4518 4519
4519 work_mask |= opaque_key; 4520 work_mask |= opaque_key;
4520 4521
@@ -4627,12 +4628,12 @@ next_pkt_nopost:
4627 4628
4628 /* Refill RX ring(s). */ 4629 /* Refill RX ring(s). */
4629 if (work_mask & RXD_OPAQUE_RING_STD) { 4630 if (work_mask & RXD_OPAQUE_RING_STD) {
4630 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE; 4631 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE;
4631 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 4632 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4632 sw_idx); 4633 sw_idx);
4633 } 4634 }
4634 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 4635 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4635 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE; 4636 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE;
4636 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 4637 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4637 sw_idx); 4638 sw_idx);
4638 } 4639 }
@@ -5517,13 +5518,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5517 return err; 5518 return err;
5518} 5519}
5519 5520
5520static void tg3_rx_prodring_free(struct tg3 *tp) 5521static void tg3_rx_prodring_free(struct tg3 *tp,
5522 struct tg3_rx_prodring_set *tpr)
5521{ 5523{
5522 struct ring_info *rxp; 5524 struct ring_info *rxp;
5523 int i; 5525 int i;
5524 5526
5525 for (i = 0; i < TG3_RX_RING_SIZE; i++) { 5527 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5526 rxp = &tp->rx_std_buffers[i]; 5528 rxp = &tpr->rx_std_buffers[i];
5527 5529
5528 if (rxp->skb == NULL) 5530 if (rxp->skb == NULL)
5529 continue; 5531 continue;
@@ -5538,7 +5540,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp)
5538 5540
5539 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 5541 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5540 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { 5542 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5541 rxp = &tp->rx_jumbo_buffers[i]; 5543 rxp = &tpr->rx_jmb_buffers[i];
5542 5544
5543 if (rxp->skb == NULL) 5545 if (rxp->skb == NULL)
5544 continue; 5546 continue;
@@ -5560,12 +5562,13 @@ static void tg3_rx_prodring_free(struct tg3 *tp)
5560 * end up in the driver. tp->{tx,}lock are held and thus 5562 * end up in the driver. tp->{tx,}lock are held and thus
5561 * we may not sleep. 5563 * we may not sleep.
5562 */ 5564 */
5563static int tg3_rx_prodring_alloc(struct tg3 *tp) 5565static int tg3_rx_prodring_alloc(struct tg3 *tp,
5566 struct tg3_rx_prodring_set *tpr)
5564{ 5567{
5565 u32 i, rx_pkt_dma_sz; 5568 u32 i, rx_pkt_dma_sz;
5566 5569
5567 /* Zero out all descriptors. */ 5570 /* Zero out all descriptors. */
5568 memset(tp->rx_std, 0, TG3_RX_RING_BYTES); 5571 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
5569 5572
5570 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 5573 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
5571 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && 5574 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
@@ -5580,7 +5583,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp)
5580 for (i = 0; i < TG3_RX_RING_SIZE; i++) { 5583 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5581 struct tg3_rx_buffer_desc *rxd; 5584 struct tg3_rx_buffer_desc *rxd;
5582 5585
5583 rxd = &tp->rx_std[i]; 5586 rxd = &tpr->rx_std[i];
5584 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; 5587 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
5585 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 5588 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5586 rxd->opaque = (RXD_OPAQUE_RING_STD | 5589 rxd->opaque = (RXD_OPAQUE_RING_STD |
@@ -5605,13 +5608,13 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp)
5605 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)) 5608 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
5606 goto done; 5609 goto done;
5607 5610
5608 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES); 5611 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
5609 5612
5610 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 5613 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5611 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { 5614 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5612 struct tg3_rx_buffer_desc *rxd; 5615 struct tg3_rx_buffer_desc *rxd;
5613 5616
5614 rxd = &tp->rx_jumbo[i]; 5617 rxd = &tpr->rx_jmb[i];
5615 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 5618 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
5616 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 5619 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5617 RXD_FLAG_JUMBO; 5620 RXD_FLAG_JUMBO;
@@ -5639,58 +5642,60 @@ done:
5639 return 0; 5642 return 0;
5640 5643
5641initfail: 5644initfail:
5642 tg3_rx_prodring_free(tp); 5645 tg3_rx_prodring_free(tp, tpr);
5643 return -ENOMEM; 5646 return -ENOMEM;
5644} 5647}
5645 5648
5646static void tg3_rx_prodring_fini(struct tg3 *tp) 5649static void tg3_rx_prodring_fini(struct tg3 *tp,
5650 struct tg3_rx_prodring_set *tpr)
5647{ 5651{
5648 kfree(tp->rx_std_buffers); 5652 kfree(tpr->rx_std_buffers);
5649 tp->rx_std_buffers = NULL; 5653 tpr->rx_std_buffers = NULL;
5650 kfree(tp->rx_jumbo_buffers); 5654 kfree(tpr->rx_jmb_buffers);
5651 tp->rx_jumbo_buffers = NULL; 5655 tpr->rx_jmb_buffers = NULL;
5652 if (tp->rx_std) { 5656 if (tpr->rx_std) {
5653 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, 5657 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5654 tp->rx_std, tp->rx_std_mapping); 5658 tpr->rx_std, tpr->rx_std_mapping);
5655 tp->rx_std = NULL; 5659 tpr->rx_std = NULL;
5656 } 5660 }
5657 if (tp->rx_jumbo) { 5661 if (tpr->rx_jmb) {
5658 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, 5662 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5659 tp->rx_jumbo, tp->rx_jumbo_mapping); 5663 tpr->rx_jmb, tpr->rx_jmb_mapping);
5660 tp->rx_jumbo = NULL; 5664 tpr->rx_jmb = NULL;
5661 } 5665 }
5662} 5666}
5663 5667
5664static int tg3_rx_prodring_init(struct tg3 *tp) 5668static int tg3_rx_prodring_init(struct tg3 *tp,
5669 struct tg3_rx_prodring_set *tpr)
5665{ 5670{
5666 tp->rx_std_buffers = kzalloc(sizeof(struct ring_info) * 5671 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
5667 TG3_RX_RING_SIZE, GFP_KERNEL); 5672 TG3_RX_RING_SIZE, GFP_KERNEL);
5668 if (!tp->rx_std_buffers) 5673 if (!tpr->rx_std_buffers)
5669 return -ENOMEM; 5674 return -ENOMEM;
5670 5675
5671 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, 5676 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5672 &tp->rx_std_mapping); 5677 &tpr->rx_std_mapping);
5673 if (!tp->rx_std) 5678 if (!tpr->rx_std)
5674 goto err_out; 5679 goto err_out;
5675 5680
5676 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 5681 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5677 tp->rx_jumbo_buffers = kzalloc(sizeof(struct ring_info) * 5682 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
5678 TG3_RX_JUMBO_RING_SIZE, 5683 TG3_RX_JUMBO_RING_SIZE,
5679 GFP_KERNEL); 5684 GFP_KERNEL);
5680 if (!tp->rx_jumbo_buffers) 5685 if (!tpr->rx_jmb_buffers)
5681 goto err_out; 5686 goto err_out;
5682 5687
5683 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, 5688 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
5684 TG3_RX_JUMBO_RING_BYTES, 5689 TG3_RX_JUMBO_RING_BYTES,
5685 &tp->rx_jumbo_mapping); 5690 &tpr->rx_jmb_mapping);
5686 if (!tp->rx_jumbo) 5691 if (!tpr->rx_jmb)
5687 goto err_out; 5692 goto err_out;
5688 } 5693 }
5689 5694
5690 return 0; 5695 return 0;
5691 5696
5692err_out: 5697err_out:
5693 tg3_rx_prodring_fini(tp); 5698 tg3_rx_prodring_fini(tp, tpr);
5694 return -ENOMEM; 5699 return -ENOMEM;
5695} 5700}
5696 5701
@@ -5726,7 +5731,7 @@ static void tg3_free_rings(struct tg3 *tp)
5726 dev_kfree_skb_any(skb); 5731 dev_kfree_skb_any(skb);
5727 } 5732 }
5728 5733
5729 tg3_rx_prodring_free(tp); 5734 tg3_rx_prodring_free(tp, &tp->prodring[0]);
5730} 5735}
5731 5736
5732/* Initialize tx/rx rings for packet processing. 5737/* Initialize tx/rx rings for packet processing.
@@ -5745,7 +5750,7 @@ static int tg3_init_rings(struct tg3 *tp)
5745 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 5750 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5746 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); 5751 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5747 5752
5748 return tg3_rx_prodring_alloc(tp); 5753 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
5749} 5754}
5750 5755
5751/* 5756/*
@@ -5776,7 +5781,7 @@ static void tg3_free_consistent(struct tg3 *tp)
5776 tp->hw_stats, tp->stats_mapping); 5781 tp->hw_stats, tp->stats_mapping);
5777 tp->hw_stats = NULL; 5782 tp->hw_stats = NULL;
5778 } 5783 }
5779 tg3_rx_prodring_fini(tp); 5784 tg3_rx_prodring_fini(tp, &tp->prodring[0]);
5780} 5785}
5781 5786
5782/* 5787/*
@@ -5785,7 +5790,7 @@ static void tg3_free_consistent(struct tg3 *tp)
5785 */ 5790 */
5786static int tg3_alloc_consistent(struct tg3 *tp) 5791static int tg3_alloc_consistent(struct tg3 *tp)
5787{ 5792{
5788 if (tg3_rx_prodring_init(tp)) 5793 if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
5789 return -ENOMEM; 5794 return -ENOMEM;
5790 5795
5791 tp->tx_buffers = kzalloc(sizeof(struct tx_ring_info) * 5796 tp->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
@@ -6794,6 +6799,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6794{ 6799{
6795 u32 val, rdmac_mode; 6800 u32 val, rdmac_mode;
6796 int i, err, limit; 6801 int i, err, limit;
6802 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
6797 6803
6798 tg3_disable_ints(tp); 6804 tg3_disable_ints(tp);
6799 6805
@@ -7022,9 +7028,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7022 * configurable. 7028 * configurable.
7023 */ 7029 */
7024 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 7030 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7025 ((u64) tp->rx_std_mapping >> 32)); 7031 ((u64) tpr->rx_std_mapping >> 32));
7026 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7032 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7027 ((u64) tp->rx_std_mapping & 0xffffffff)); 7033 ((u64) tpr->rx_std_mapping & 0xffffffff));
7028 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 7034 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7029 NIC_SRAM_RX_BUFFER_DESC); 7035 NIC_SRAM_RX_BUFFER_DESC);
7030 7036
@@ -7043,9 +7049,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7043 7049
7044 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 7050 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7045 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 7051 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7046 ((u64) tp->rx_jumbo_mapping >> 32)); 7052 ((u64) tpr->rx_jmb_mapping >> 32));
7047 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7053 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7048 ((u64) tp->rx_jumbo_mapping & 0xffffffff)); 7054 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7049 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7055 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7050 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT); 7056 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7051 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 7057 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
@@ -7102,14 +7108,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7102 BDINFO_FLAGS_MAXLEN_SHIFT), 7108 BDINFO_FLAGS_MAXLEN_SHIFT),
7103 0); 7109 0);
7104 7110
7105 tp->rx_std_ptr = tp->rx_pending; 7111 tpr->rx_std_ptr = tp->rx_pending;
7106 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 7112 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7107 tp->rx_std_ptr); 7113 tpr->rx_std_ptr);
7108 7114
7109 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? 7115 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7110 tp->rx_jumbo_pending : 0; 7116 tp->rx_jumbo_pending : 0;
7111 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 7117 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7112 tp->rx_jumbo_ptr); 7118 tpr->rx_jmb_ptr);
7113 7119
7114 /* Initialize MAC address and backoff seed. */ 7120 /* Initialize MAC address and backoff seed. */
7115 __tg3_set_mac_addr(tp, 0); 7121 __tg3_set_mac_addr(tp, 0);
@@ -9815,6 +9821,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9815 dma_addr_t map; 9821 dma_addr_t map;
9816 int num_pkts, tx_len, rx_len, i, err; 9822 int num_pkts, tx_len, rx_len, i, err;
9817 struct tg3_rx_buffer_desc *desc; 9823 struct tg3_rx_buffer_desc *desc;
9824 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
9818 9825
9819 if (loopback_mode == TG3_MAC_LOOPBACK) { 9826 if (loopback_mode == TG3_MAC_LOOPBACK) {
9820 /* HW errata - mac loopback fails in some cases on 5780. 9827 /* HW errata - mac loopback fails in some cases on 5780.
@@ -9949,9 +9956,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9949 if (rx_len != tx_len) 9956 if (rx_len != tx_len)
9950 goto out; 9957 goto out;
9951 9958
9952 rx_skb = tp->rx_std_buffers[desc_idx].skb; 9959 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
9953 9960
9954 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping); 9961 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
9955 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); 9962 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9956 9963
9957 for (i = 14; i < tx_len; i++) { 9964 for (i = 14; i < tx_len; i++) {
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index d029b4b9b929..7a27cffb5ada 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2476,6 +2476,17 @@ struct tg3_ethtool_stats {
2476 u64 nic_tx_threshold_hit; 2476 u64 nic_tx_threshold_hit;
2477}; 2477};
2478 2478
2479struct tg3_rx_prodring_set {
2480 u32 rx_std_ptr;
2481 u32 rx_jmb_ptr;
2482 struct tg3_rx_buffer_desc *rx_std;
2483 struct tg3_rx_buffer_desc *rx_jmb;
2484 struct ring_info *rx_std_buffers;
2485 struct ring_info *rx_jmb_buffers;
2486 dma_addr_t rx_std_mapping;
2487 dma_addr_t rx_jmb_mapping;
2488};
2489
2479struct tg3 { 2490struct tg3 {
2480 /* begin "general, frequently-used members" cacheline section */ 2491 /* begin "general, frequently-used members" cacheline section */
2481 2492
@@ -2551,27 +2562,19 @@ struct tg3 {
2551 void (*write32_rx_mbox) (struct tg3 *, u32, 2562 void (*write32_rx_mbox) (struct tg3 *, u32,
2552 u32); 2563 u32);
2553 u32 rx_rcb_ptr; 2564 u32 rx_rcb_ptr;
2554 u32 rx_std_ptr;
2555 u32 rx_jumbo_ptr;
2556 u32 rx_pending; 2565 u32 rx_pending;
2557 u32 rx_jumbo_pending; 2566 u32 rx_jumbo_pending;
2567 u32 rx_std_max_post;
2568 u32 rx_pkt_map_sz;
2558#if TG3_VLAN_TAG_USED 2569#if TG3_VLAN_TAG_USED
2559 struct vlan_group *vlgrp; 2570 struct vlan_group *vlgrp;
2560#endif 2571#endif
2561 2572
2562 struct tg3_rx_buffer_desc *rx_std;
2563 struct ring_info *rx_std_buffers;
2564 dma_addr_t rx_std_mapping;
2565 u32 rx_std_max_post;
2566
2567 struct tg3_rx_buffer_desc *rx_jumbo;
2568 struct ring_info *rx_jumbo_buffers;
2569 dma_addr_t rx_jumbo_mapping;
2570
2571 struct tg3_rx_buffer_desc *rx_rcb; 2573 struct tg3_rx_buffer_desc *rx_rcb;
2572 dma_addr_t rx_rcb_mapping; 2574 dma_addr_t rx_rcb_mapping;
2573 2575
2574 u32 rx_pkt_map_sz; 2576 struct tg3_rx_prodring_set prodring[1];
2577
2575 2578
2576 /* begin "everything else" cacheline(s) section */ 2579 /* begin "everything else" cacheline(s) section */
2577 struct net_device_stats net_stats; 2580 struct net_device_stats net_stats;