aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/8139cp.c
diff options
context:
space:
mode:
authorFrancois Romieu <romieu@fr.zoreil.com>2005-05-12 19:31:31 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-05-12 19:31:31 -0400
commit5734418d4f3420352eae38c8fcec699bf09874c1 (patch)
tree3f9e087502565aeae22a6ac9a40cbc7ff67adbfc /drivers/net/8139cp.c
parentfcec34565827f2edb29d124498aa8f561455f15d (diff)
[PATCH] 8139cp: SG support fixes
- suspicious length in pci_unmap_single; - wait for the last frag before freeing the relevant skb; - no need to crash when facing some unexpected csum combination.
Diffstat (limited to 'drivers/net/8139cp.c')
-rw-r--r--drivers/net/8139cp.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index a7573dd92f26..212eb90dfcc7 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -315,7 +315,7 @@ struct cp_desc {
315struct ring_info { 315struct ring_info {
316 struct sk_buff *skb; 316 struct sk_buff *skb;
317 dma_addr_t mapping; 317 dma_addr_t mapping;
318 unsigned frag; 318 u32 len;
319}; 319};
320 320
321struct cp_dma_stats { 321struct cp_dma_stats {
@@ -710,7 +710,7 @@ static void cp_tx (struct cp_private *cp)
710 BUG(); 710 BUG();
711 711
712 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, 712 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
713 skb->len, PCI_DMA_TODEVICE); 713 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
714 714
715 if (status & LastFrag) { 715 if (status & LastFrag) {
716 if (status & (TxError | TxFIFOUnder)) { 716 if (status & (TxError | TxFIFOUnder)) {
@@ -801,7 +801,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
801 else if (ip->protocol == IPPROTO_UDP) 801 else if (ip->protocol == IPPROTO_UDP)
802 flags |= IPCS | UDPCS; 802 flags |= IPCS | UDPCS;
803 else 803 else
804 BUG(); 804 WARN_ON(1); /* we need a WARN() */
805 } 805 }
806 806
807 txd->opts1 = cpu_to_le32(flags); 807 txd->opts1 = cpu_to_le32(flags);
@@ -809,7 +809,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
809 809
810 cp->tx_skb[entry].skb = skb; 810 cp->tx_skb[entry].skb = skb;
811 cp->tx_skb[entry].mapping = mapping; 811 cp->tx_skb[entry].mapping = mapping;
812 cp->tx_skb[entry].frag = 0; 812 cp->tx_skb[entry].len = len;
813 entry = NEXT_TX(entry); 813 entry = NEXT_TX(entry);
814 } else { 814 } else {
815 struct cp_desc *txd; 815 struct cp_desc *txd;
@@ -827,7 +827,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
827 first_len, PCI_DMA_TODEVICE); 827 first_len, PCI_DMA_TODEVICE);
828 cp->tx_skb[entry].skb = skb; 828 cp->tx_skb[entry].skb = skb;
829 cp->tx_skb[entry].mapping = first_mapping; 829 cp->tx_skb[entry].mapping = first_mapping;
830 cp->tx_skb[entry].frag = 1; 830 cp->tx_skb[entry].len = first_len;
831 entry = NEXT_TX(entry); 831 entry = NEXT_TX(entry);
832 832
833 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 833 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -870,7 +870,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
870 870
871 cp->tx_skb[entry].skb = skb; 871 cp->tx_skb[entry].skb = skb;
872 cp->tx_skb[entry].mapping = mapping; 872 cp->tx_skb[entry].mapping = mapping;
873 cp->tx_skb[entry].frag = frag + 2; 873 cp->tx_skb[entry].len = len;
874 entry = NEXT_TX(entry); 874 entry = NEXT_TX(entry);
875 } 875 }
876 876
@@ -1084,7 +1084,6 @@ static int cp_refill_rx (struct cp_private *cp)
1084 cp->rx_skb[i].mapping = pci_map_single(cp->pdev, 1084 cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
1085 skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1085 skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1086 cp->rx_skb[i].skb = skb; 1086 cp->rx_skb[i].skb = skb;
1087 cp->rx_skb[i].frag = 0;
1088 1087
1089 cp->rx_ring[i].opts2 = 0; 1088 cp->rx_ring[i].opts2 = 0;
1090 cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); 1089 cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping);
@@ -1136,9 +1135,6 @@ static void cp_clean_rings (struct cp_private *cp)
1136{ 1135{
1137 unsigned i; 1136 unsigned i;
1138 1137
1139 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1140 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1141
1142 for (i = 0; i < CP_RX_RING_SIZE; i++) { 1138 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1143 if (cp->rx_skb[i].skb) { 1139 if (cp->rx_skb[i].skb) {
1144 pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, 1140 pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping,
@@ -1150,13 +1146,18 @@ static void cp_clean_rings (struct cp_private *cp)
1150 for (i = 0; i < CP_TX_RING_SIZE; i++) { 1146 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1151 if (cp->tx_skb[i].skb) { 1147 if (cp->tx_skb[i].skb) {
1152 struct sk_buff *skb = cp->tx_skb[i].skb; 1148 struct sk_buff *skb = cp->tx_skb[i].skb;
1149
1153 pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, 1150 pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping,
1154 skb->len, PCI_DMA_TODEVICE); 1151 cp->tx_skb[i].len, PCI_DMA_TODEVICE);
1155 dev_kfree_skb(skb); 1152 if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag)
1153 dev_kfree_skb(skb);
1156 cp->net_stats.tx_dropped++; 1154 cp->net_stats.tx_dropped++;
1157 } 1155 }
1158 } 1156 }
1159 1157
1158 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1159 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1160
1160 memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE); 1161 memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE);
1161 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); 1162 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
1162} 1163}