aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/realtek/8139cp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/realtek/8139cp.c')
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c48
1 files changed, 45 insertions, 3 deletions
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e6acb9fa5767..6f35f8404d68 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -478,7 +478,7 @@ rx_status_loop:
478 478
479 while (1) { 479 while (1) {
480 u32 status, len; 480 u32 status, len;
481 dma_addr_t mapping; 481 dma_addr_t mapping, new_mapping;
482 struct sk_buff *skb, *new_skb; 482 struct sk_buff *skb, *new_skb;
483 struct cp_desc *desc; 483 struct cp_desc *desc;
484 const unsigned buflen = cp->rx_buf_sz; 484 const unsigned buflen = cp->rx_buf_sz;
@@ -520,6 +520,13 @@ rx_status_loop:
520 goto rx_next; 520 goto rx_next;
521 } 521 }
522 522
523 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
524 PCI_DMA_FROMDEVICE);
525 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
526 dev->stats.rx_dropped++;
527 goto rx_next;
528 }
529
523 dma_unmap_single(&cp->pdev->dev, mapping, 530 dma_unmap_single(&cp->pdev->dev, mapping,
524 buflen, PCI_DMA_FROMDEVICE); 531 buflen, PCI_DMA_FROMDEVICE);
525 532
@@ -531,12 +538,11 @@ rx_status_loop:
531 538
532 skb_put(skb, len); 539 skb_put(skb, len);
533 540
534 mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
535 PCI_DMA_FROMDEVICE);
536 cp->rx_skb[rx_tail] = new_skb; 541 cp->rx_skb[rx_tail] = new_skb;
537 542
538 cp_rx_skb(cp, skb, desc); 543 cp_rx_skb(cp, skb, desc);
539 rx++; 544 rx++;
545 mapping = new_mapping;
540 546
541rx_next: 547rx_next:
542 cp->rx_ring[rx_tail].opts2 = 0; 548 cp->rx_ring[rx_tail].opts2 = 0;
@@ -716,6 +722,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
716 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 722 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
717} 723}
718 724
725static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
726 int first, int entry_last)
727{
728 int frag, index;
729 struct cp_desc *txd;
730 skb_frag_t *this_frag;
731 for (frag = 0; frag+first < entry_last; frag++) {
732 index = first+frag;
733 cp->tx_skb[index] = NULL;
734 txd = &cp->tx_ring[index];
735 this_frag = &skb_shinfo(skb)->frags[frag];
736 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
737 skb_frag_size(this_frag), PCI_DMA_TODEVICE);
738 }
739}
740
719static netdev_tx_t cp_start_xmit (struct sk_buff *skb, 741static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
720 struct net_device *dev) 742 struct net_device *dev)
721{ 743{
@@ -749,6 +771,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
749 771
750 len = skb->len; 772 len = skb->len;
751 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); 773 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
774 if (dma_mapping_error(&cp->pdev->dev, mapping))
775 goto out_dma_error;
776
752 txd->opts2 = opts2; 777 txd->opts2 = opts2;
753 txd->addr = cpu_to_le64(mapping); 778 txd->addr = cpu_to_le64(mapping);
754 wmb(); 779 wmb();
@@ -786,6 +811,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
786 first_len = skb_headlen(skb); 811 first_len = skb_headlen(skb);
787 first_mapping = dma_map_single(&cp->pdev->dev, skb->data, 812 first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
788 first_len, PCI_DMA_TODEVICE); 813 first_len, PCI_DMA_TODEVICE);
814 if (dma_mapping_error(&cp->pdev->dev, first_mapping))
815 goto out_dma_error;
816
789 cp->tx_skb[entry] = skb; 817 cp->tx_skb[entry] = skb;
790 entry = NEXT_TX(entry); 818 entry = NEXT_TX(entry);
791 819
@@ -799,6 +827,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
799 mapping = dma_map_single(&cp->pdev->dev, 827 mapping = dma_map_single(&cp->pdev->dev,
800 skb_frag_address(this_frag), 828 skb_frag_address(this_frag),
801 len, PCI_DMA_TODEVICE); 829 len, PCI_DMA_TODEVICE);
830 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
831 unwind_tx_frag_mapping(cp, skb, first_entry, entry);
832 goto out_dma_error;
833 }
834
802 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 835 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
803 836
804 ctrl = eor | len | DescOwn; 837 ctrl = eor | len | DescOwn;
@@ -859,11 +892,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
859 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) 892 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
860 netif_stop_queue(dev); 893 netif_stop_queue(dev);
861 894
895out_unlock:
862 spin_unlock_irqrestore(&cp->lock, intr_flags); 896 spin_unlock_irqrestore(&cp->lock, intr_flags);
863 897
864 cpw8(TxPoll, NormalTxPoll); 898 cpw8(TxPoll, NormalTxPoll);
865 899
866 return NETDEV_TX_OK; 900 return NETDEV_TX_OK;
901out_dma_error:
902 kfree_skb(skb);
903 cp->dev->stats.tx_dropped++;
904 goto out_unlock;
867} 905}
868 906
869/* Set or clear the multicast filter for this adaptor. 907/* Set or clear the multicast filter for this adaptor.
@@ -1054,6 +1092,10 @@ static int cp_refill_rx(struct cp_private *cp)
1054 1092
1055 mapping = dma_map_single(&cp->pdev->dev, skb->data, 1093 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1056 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1094 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1095 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
1096 kfree_skb(skb);
1097 goto err_out;
1098 }
1057 cp->rx_skb[i] = skb; 1099 cp->rx_skb[i] = skb;
1058 1100
1059 cp->rx_ring[i].opts2 = 0; 1101 cp->rx_ring[i].opts2 = 0;