aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/tg3.c173
-rw-r--r--drivers/net/tg3.h6
2 files changed, 132 insertions, 47 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 6e6db955b4a9..302ea0b7b235 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4357,7 +4357,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
4357 txq = netdev_get_tx_queue(tp->dev, index); 4357 txq = netdev_get_tx_queue(tp->dev, index);
4358 4358
4359 while (sw_idx != hw_idx) { 4359 while (sw_idx != hw_idx) {
4360 struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; 4360 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4361 struct sk_buff *skb = ri->skb; 4361 struct sk_buff *skb = ri->skb;
4362 int i, tx_bug = 0; 4362 int i, tx_bug = 0;
4363 4363
@@ -4366,7 +4366,10 @@ static void tg3_tx(struct tg3_napi *tnapi)
4366 return; 4366 return;
4367 } 4367 }
4368 4368
4369 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); 4369 pci_unmap_single(tp->pdev,
4370 pci_unmap_addr(ri, mapping),
4371 skb_headlen(skb),
4372 PCI_DMA_TODEVICE);
4370 4373
4371 ri->skb = NULL; 4374 ri->skb = NULL;
4372 4375
@@ -4376,6 +4379,11 @@ static void tg3_tx(struct tg3_napi *tnapi)
4376 ri = &tnapi->tx_buffers[sw_idx]; 4379 ri = &tnapi->tx_buffers[sw_idx];
4377 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 4380 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4378 tx_bug = 1; 4381 tx_bug = 1;
4382
4383 pci_unmap_page(tp->pdev,
4384 pci_unmap_addr(ri, mapping),
4385 skb_shinfo(skb)->frags[i].size,
4386 PCI_DMA_TODEVICE);
4379 sw_idx = NEXT_TX(sw_idx); 4387 sw_idx = NEXT_TX(sw_idx);
4380 } 4388 }
4381 4389
@@ -5334,17 +5342,21 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5334 } else { 5342 } else {
5335 /* New SKB is guaranteed to be linear. */ 5343 /* New SKB is guaranteed to be linear. */
5336 entry = *start; 5344 entry = *start;
5337 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE); 5345 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5338 new_addr = skb_shinfo(new_skb)->dma_head; 5346 PCI_DMA_TODEVICE);
5347 /* Make sure the mapping succeeded */
5348 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5349 ret = -1;
5350 dev_kfree_skb(new_skb);
5351 new_skb = NULL;
5339 5352
5340 /* Make sure new skb does not cross any 4G boundaries. 5353 /* Make sure new skb does not cross any 4G boundaries.
5341 * Drop the packet if it does. 5354 * Drop the packet if it does.
5342 */ 5355 */
5343 if (ret || ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && 5356 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5344 tg3_4g_overflow_test(new_addr, new_skb->len))) { 5357 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5345 if (!ret) 5358 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5346 skb_dma_unmap(&tp->pdev->dev, new_skb, 5359 PCI_DMA_TODEVICE);
5347 DMA_TO_DEVICE);
5348 ret = -1; 5360 ret = -1;
5349 dev_kfree_skb(new_skb); 5361 dev_kfree_skb(new_skb);
5350 new_skb = NULL; 5362 new_skb = NULL;
@@ -5358,15 +5370,28 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5358 /* Now clean up the sw ring entries. */ 5370 /* Now clean up the sw ring entries. */
5359 i = 0; 5371 i = 0;
5360 while (entry != last_plus_one) { 5372 while (entry != last_plus_one) {
5373 int len;
5374
5361 if (i == 0) 5375 if (i == 0)
5362 tnapi->tx_buffers[entry].skb = new_skb; 5376 len = skb_headlen(skb);
5363 else 5377 else
5378 len = skb_shinfo(skb)->frags[i-1].size;
5379
5380 pci_unmap_single(tp->pdev,
5381 pci_unmap_addr(&tnapi->tx_buffers[entry],
5382 mapping),
5383 len, PCI_DMA_TODEVICE);
5384 if (i == 0) {
5385 tnapi->tx_buffers[entry].skb = new_skb;
5386 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5387 new_addr);
5388 } else {
5364 tnapi->tx_buffers[entry].skb = NULL; 5389 tnapi->tx_buffers[entry].skb = NULL;
5390 }
5365 entry = NEXT_TX(entry); 5391 entry = NEXT_TX(entry);
5366 i++; 5392 i++;
5367 } 5393 }
5368 5394
5369 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5370 dev_kfree_skb(skb); 5395 dev_kfree_skb(skb);
5371 5396
5372 return ret; 5397 return ret;
@@ -5403,10 +5428,11 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5403{ 5428{
5404 struct tg3 *tp = netdev_priv(dev); 5429 struct tg3 *tp = netdev_priv(dev);
5405 u32 len, entry, base_flags, mss; 5430 u32 len, entry, base_flags, mss;
5406 struct skb_shared_info *sp;
5407 dma_addr_t mapping; 5431 dma_addr_t mapping;
5408 struct tg3_napi *tnapi; 5432 struct tg3_napi *tnapi;
5409 struct netdev_queue *txq; 5433 struct netdev_queue *txq;
5434 unsigned int i, last;
5435
5410 5436
5411 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 5437 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5412 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 5438 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
@@ -5477,18 +5503,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5477 (vlan_tx_tag_get(skb) << 16)); 5503 (vlan_tx_tag_get(skb) << 16));
5478#endif 5504#endif
5479 5505
5480 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { 5506 len = skb_headlen(skb);
5507
5508 /* Queue skb data, a.k.a. the main skb fragment. */
5509 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5510 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5481 dev_kfree_skb(skb); 5511 dev_kfree_skb(skb);
5482 goto out_unlock; 5512 goto out_unlock;
5483 } 5513 }
5484 5514
5485 sp = skb_shinfo(skb);
5486
5487 mapping = sp->dma_head;
5488
5489 tnapi->tx_buffers[entry].skb = skb; 5515 tnapi->tx_buffers[entry].skb = skb;
5490 5516 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5491 len = skb_headlen(skb);
5492 5517
5493 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && 5518 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
5494 !mss && skb->len > ETH_DATA_LEN) 5519 !mss && skb->len > ETH_DATA_LEN)
@@ -5501,15 +5526,21 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5501 5526
5502 /* Now loop through additional data fragments, and queue them. */ 5527 /* Now loop through additional data fragments, and queue them. */
5503 if (skb_shinfo(skb)->nr_frags > 0) { 5528 if (skb_shinfo(skb)->nr_frags > 0) {
5504 unsigned int i, last;
5505
5506 last = skb_shinfo(skb)->nr_frags - 1; 5529 last = skb_shinfo(skb)->nr_frags - 1;
5507 for (i = 0; i <= last; i++) { 5530 for (i = 0; i <= last; i++) {
5508 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5531 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5509 5532
5510 len = frag->size; 5533 len = frag->size;
5511 mapping = sp->dma_maps[i]; 5534 mapping = pci_map_page(tp->pdev,
5535 frag->page,
5536 frag->page_offset,
5537 len, PCI_DMA_TODEVICE);
5538 if (pci_dma_mapping_error(tp->pdev, mapping))
5539 goto dma_error;
5540
5512 tnapi->tx_buffers[entry].skb = NULL; 5541 tnapi->tx_buffers[entry].skb = NULL;
5542 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5543 mapping);
5513 5544
5514 tg3_set_txd(tnapi, entry, mapping, len, 5545 tg3_set_txd(tnapi, entry, mapping, len,
5515 base_flags, (i == last) | (mss << 1)); 5546 base_flags, (i == last) | (mss << 1));
@@ -5532,6 +5563,27 @@ out_unlock:
5532 mmiowb(); 5563 mmiowb();
5533 5564
5534 return NETDEV_TX_OK; 5565 return NETDEV_TX_OK;
5566
5567dma_error:
5568 last = i;
5569 entry = tnapi->tx_prod;
5570 tnapi->tx_buffers[entry].skb = NULL;
5571 pci_unmap_single(tp->pdev,
5572 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5573 skb_headlen(skb),
5574 PCI_DMA_TODEVICE);
5575 for (i = 0; i <= last; i++) {
5576 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5577 entry = NEXT_TX(entry);
5578
5579 pci_unmap_page(tp->pdev,
5580 pci_unmap_addr(&tnapi->tx_buffers[entry],
5581 mapping),
5582 frag->size, PCI_DMA_TODEVICE);
5583 }
5584
5585 dev_kfree_skb(skb);
5586 return NETDEV_TX_OK;
5535} 5587}
5536 5588
5537static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *, 5589static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
@@ -5579,11 +5631,12 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5579{ 5631{
5580 struct tg3 *tp = netdev_priv(dev); 5632 struct tg3 *tp = netdev_priv(dev);
5581 u32 len, entry, base_flags, mss; 5633 u32 len, entry, base_flags, mss;
5582 struct skb_shared_info *sp;
5583 int would_hit_hwbug; 5634 int would_hit_hwbug;
5584 dma_addr_t mapping; 5635 dma_addr_t mapping;
5585 struct tg3_napi *tnapi; 5636 struct tg3_napi *tnapi;
5586 struct netdev_queue *txq; 5637 struct netdev_queue *txq;
5638 unsigned int i, last;
5639
5587 5640
5588 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 5641 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5589 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 5642 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
@@ -5678,21 +5731,19 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5678 !mss && skb->len > ETH_DATA_LEN) 5731 !mss && skb->len > ETH_DATA_LEN)
5679 base_flags |= TXD_FLAG_JMB_PKT; 5732 base_flags |= TXD_FLAG_JMB_PKT;
5680 5733
5681 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { 5734 len = skb_headlen(skb);
5735
5736 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5737 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5682 dev_kfree_skb(skb); 5738 dev_kfree_skb(skb);
5683 goto out_unlock; 5739 goto out_unlock;
5684 } 5740 }
5685 5741
5686 sp = skb_shinfo(skb);
5687
5688 mapping = sp->dma_head;
5689
5690 tnapi->tx_buffers[entry].skb = skb; 5742 tnapi->tx_buffers[entry].skb = skb;
5743 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5691 5744
5692 would_hit_hwbug = 0; 5745 would_hit_hwbug = 0;
5693 5746
5694 len = skb_headlen(skb);
5695
5696 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) 5747 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5697 would_hit_hwbug = 1; 5748 would_hit_hwbug = 1;
5698 5749
@@ -5714,16 +5765,21 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5714 5765
5715 /* Now loop through additional data fragments, and queue them. */ 5766 /* Now loop through additional data fragments, and queue them. */
5716 if (skb_shinfo(skb)->nr_frags > 0) { 5767 if (skb_shinfo(skb)->nr_frags > 0) {
5717 unsigned int i, last;
5718
5719 last = skb_shinfo(skb)->nr_frags - 1; 5768 last = skb_shinfo(skb)->nr_frags - 1;
5720 for (i = 0; i <= last; i++) { 5769 for (i = 0; i <= last; i++) {
5721 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5770 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5722 5771
5723 len = frag->size; 5772 len = frag->size;
5724 mapping = sp->dma_maps[i]; 5773 mapping = pci_map_page(tp->pdev,
5774 frag->page,
5775 frag->page_offset,
5776 len, PCI_DMA_TODEVICE);
5725 5777
5726 tnapi->tx_buffers[entry].skb = NULL; 5778 tnapi->tx_buffers[entry].skb = NULL;
5779 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5780 mapping);
5781 if (pci_dma_mapping_error(tp->pdev, mapping))
5782 goto dma_error;
5727 5783
5728 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && 5784 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5729 len <= 8) 5785 len <= 8)
@@ -5779,6 +5835,27 @@ out_unlock:
5779 mmiowb(); 5835 mmiowb();
5780 5836
5781 return NETDEV_TX_OK; 5837 return NETDEV_TX_OK;
5838
5839dma_error:
5840 last = i;
5841 entry = tnapi->tx_prod;
5842 tnapi->tx_buffers[entry].skb = NULL;
5843 pci_unmap_single(tp->pdev,
5844 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5845 skb_headlen(skb),
5846 PCI_DMA_TODEVICE);
5847 for (i = 0; i <= last; i++) {
5848 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5849 entry = NEXT_TX(entry);
5850
5851 pci_unmap_page(tp->pdev,
5852 pci_unmap_addr(&tnapi->tx_buffers[entry],
5853 mapping),
5854 frag->size, PCI_DMA_TODEVICE);
5855 }
5856
5857 dev_kfree_skb(skb);
5858 return NETDEV_TX_OK;
5782} 5859}
5783 5860
5784static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 5861static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
@@ -6046,8 +6123,9 @@ static void tg3_free_rings(struct tg3 *tp)
6046 continue; 6123 continue;
6047 6124
6048 for (i = 0; i < TG3_TX_RING_SIZE; ) { 6125 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6049 struct tx_ring_info *txp; 6126 struct ring_info *txp;
6050 struct sk_buff *skb; 6127 struct sk_buff *skb;
6128 unsigned int k;
6051 6129
6052 txp = &tnapi->tx_buffers[i]; 6130 txp = &tnapi->tx_buffers[i];
6053 skb = txp->skb; 6131 skb = txp->skb;
@@ -6057,11 +6135,22 @@ static void tg3_free_rings(struct tg3 *tp)
6057 continue; 6135 continue;
6058 } 6136 }
6059 6137
6060 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); 6138 pci_unmap_single(tp->pdev,
6061 6139 pci_unmap_addr(txp, mapping),
6140 skb_headlen(skb),
6141 PCI_DMA_TODEVICE);
6062 txp->skb = NULL; 6142 txp->skb = NULL;
6063 6143
6064 i += skb_shinfo(skb)->nr_frags + 1; 6144 i++;
6145
6146 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6147 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6148 pci_unmap_page(tp->pdev,
6149 pci_unmap_addr(txp, mapping),
6150 skb_shinfo(skb)->frags[k].size,
6151 PCI_DMA_TODEVICE);
6152 i++;
6153 }
6065 6154
6066 dev_kfree_skb_any(skb); 6155 dev_kfree_skb_any(skb);
6067 } 6156 }
@@ -6231,7 +6320,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6231 6320
6232 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 6321 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6233 6322
6234 tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) * 6323 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6235 TG3_TX_RING_SIZE, GFP_KERNEL); 6324 TG3_TX_RING_SIZE, GFP_KERNEL);
6236 if (!tnapi->tx_buffers) 6325 if (!tnapi->tx_buffers)
6237 goto err_out; 6326 goto err_out;
@@ -10637,7 +10726,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10637 for (i = 14; i < tx_len; i++) 10726 for (i = 14; i < tx_len; i++)
10638 tx_data[i] = (u8) (i & 0xff); 10727 tx_data[i] = (u8) (i & 0xff);
10639 10728
10640 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { 10729 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10730 if (pci_dma_mapping_error(tp->pdev, map)) {
10641 dev_kfree_skb(skb); 10731 dev_kfree_skb(skb);
10642 return -EIO; 10732 return -EIO;
10643 } 10733 }
@@ -10651,8 +10741,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10651 10741
10652 num_pkts = 0; 10742 num_pkts = 0;
10653 10743
10654 tg3_set_txd(tnapi, tnapi->tx_prod, 10744 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10655 skb_shinfo(skb)->dma_head, tx_len, 0, 1);
10656 10745
10657 tnapi->tx_prod++; 10746 tnapi->tx_prod++;
10658 num_pkts++; 10747 num_pkts++;
@@ -10676,7 +10765,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10676 break; 10765 break;
10677 } 10766 }
10678 10767
10679 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); 10768 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10680 dev_kfree_skb(skb); 10769 dev_kfree_skb(skb);
10681 10770
10682 if (tx_idx != tnapi->tx_prod) 10771 if (tx_idx != tnapi->tx_prod)
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 453a34fb72b9..89725231f7b9 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2441,10 +2441,6 @@ struct ring_info {
2441 DECLARE_PCI_UNMAP_ADDR(mapping) 2441 DECLARE_PCI_UNMAP_ADDR(mapping)
2442}; 2442};
2443 2443
2444struct tx_ring_info {
2445 struct sk_buff *skb;
2446};
2447
2448struct tg3_config_info { 2444struct tg3_config_info {
2449 u32 flags; 2445 u32 flags;
2450}; 2446};
@@ -2608,7 +2604,7 @@ struct tg3_napi {
2608 2604
2609 struct tg3_rx_buffer_desc *rx_rcb; 2605 struct tg3_rx_buffer_desc *rx_rcb;
2610 struct tg3_tx_buffer_desc *tx_ring; 2606 struct tg3_tx_buffer_desc *tx_ring;
2611 struct tx_ring_info *tx_buffers; 2607 struct ring_info *tx_buffers;
2612 2608
2613 dma_addr_t status_mapping; 2609 dma_addr_t status_mapping;
2614 dma_addr_t rx_rcb_mapping; 2610 dma_addr_t rx_rcb_mapping;