aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/pcnet32.c302
1 files changed, 141 insertions, 161 deletions
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 16b9538370cc..bf72aa80ccb6 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1124,161 +1124,140 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
1124 return 1; 1124 return 1;
1125} 1125}
1126 1126
1127/*
1128 * process one receive descriptor entry
1129 */
1130
1131static void pcnet32_rx_entry(struct net_device *dev,
1132 struct pcnet32_private *lp,
1133 struct pcnet32_rx_head *rxp,
1134 int entry)
1135{
1136 int status = (short)le16_to_cpu(rxp->status) >> 8;
1137 int rx_in_place = 0;
1138 struct sk_buff *skb;
1139 short pkt_len;
1140
1141 if (status != 0x03) { /* There was an error. */
1142 /*
1143 * There is a tricky error noted by John Murphy,
1144 * <murf@perftech.com> to Russ Nelson: Even with full-sized
1145 * buffers it's possible for a jabber packet to use two
1146 * buffers, with only the last correctly noting the error.
1147 */
1148 if (status & 0x01) /* Only count a general error at the */
1149 lp->stats.rx_errors++; /* end of a packet. */
1150 if (status & 0x20)
1151 lp->stats.rx_frame_errors++;
1152 if (status & 0x10)
1153 lp->stats.rx_over_errors++;
1154 if (status & 0x08)
1155 lp->stats.rx_crc_errors++;
1156 if (status & 0x04)
1157 lp->stats.rx_fifo_errors++;
1158 return;
1159 }
1160
1161 pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
1162
1163 /* Discard oversize frames. */
1164 if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
1165 if (netif_msg_drv(lp))
1166 printk(KERN_ERR "%s: Impossible packet size %d!\n",
1167 dev->name, pkt_len);
1168 lp->stats.rx_errors++;
1169 return;
1170 }
1171 if (pkt_len < 60) {
1172 if (netif_msg_rx_err(lp))
1173 printk(KERN_ERR "%s: Runt packet!\n", dev->name);
1174 lp->stats.rx_errors++;
1175 return;
1176 }
1177
1178 if (pkt_len > rx_copybreak) {
1179 struct sk_buff *newskb;
1180
1181 if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) {
1182 skb_reserve(newskb, 2);
1183 skb = lp->rx_skbuff[entry];
1184 pci_unmap_single(lp->pci_dev,
1185 lp->rx_dma_addr[entry],
1186 PKT_BUF_SZ - 2,
1187 PCI_DMA_FROMDEVICE);
1188 skb_put(skb, pkt_len);
1189 lp->rx_skbuff[entry] = newskb;
1190 newskb->dev = dev;
1191 lp->rx_dma_addr[entry] =
1192 pci_map_single(lp->pci_dev,
1193 newskb->data,
1194 PKT_BUF_SZ - 2,
1195 PCI_DMA_FROMDEVICE);
1196 rxp->base = le32_to_cpu(lp->rx_dma_addr[entry]);
1197 rx_in_place = 1;
1198 } else
1199 skb = NULL;
1200 } else {
1201 skb = dev_alloc_skb(pkt_len + 2);
1202 }
1203
1204 if (skb == NULL) {
1205 if (netif_msg_drv(lp))
1206 printk(KERN_ERR
1207 "%s: Memory squeeze, dropping packet.\n",
1208 dev->name);
1209 lp->stats.rx_dropped++;
1210 return;
1211 }
1212 skb->dev = dev;
1213 if (!rx_in_place) {
1214 skb_reserve(skb, 2); /* 16 byte align */
1215 skb_put(skb, pkt_len); /* Make room */
1216 pci_dma_sync_single_for_cpu(lp->pci_dev,
1217 lp->rx_dma_addr[entry],
1218 PKT_BUF_SZ - 2,
1219 PCI_DMA_FROMDEVICE);
1220 eth_copy_and_sum(skb,
1221 (unsigned char *)(lp->rx_skbuff[entry]->data),
1222 pkt_len, 0);
1223 pci_dma_sync_single_for_device(lp->pci_dev,
1224 lp->rx_dma_addr[entry],
1225 PKT_BUF_SZ - 2,
1226 PCI_DMA_FROMDEVICE);
1227 }
1228 lp->stats.rx_bytes += skb->len;
1229 skb->protocol = eth_type_trans(skb, dev);
1230 netif_rx(skb);
1231 dev->last_rx = jiffies;
1232 lp->stats.rx_packets++;
1233 return;
1234}
1235
1127 1236
1128static int pcnet32_rx(struct net_device *dev) 1237static void pcnet32_rx(struct net_device *dev)
1129{ 1238{
1130 struct pcnet32_private *lp = dev->priv; 1239 struct pcnet32_private *lp = dev->priv;
1131 int entry = lp->cur_rx & lp->rx_mod_mask; 1240 int entry = lp->cur_rx & lp->rx_mod_mask;
1241 struct pcnet32_rx_head *rxp = &lp->rx_ring[entry];
1242 int npackets = 0;
1132 int boguscnt = lp->rx_ring_size / 2; 1243 int boguscnt = lp->rx_ring_size / 2;
1133 1244
1134 /* If we own the next entry, it's a new packet. Send it up. */ 1245 /* If we own the next entry, it's a new packet. Send it up. */
1135 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { 1246 while (boguscnt > npackets && (short)le16_to_cpu(rxp->status) >= 0) {
1136 int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8; 1247 pcnet32_rx_entry(dev, lp, rxp, entry);
1137 1248 npackets += 1;
1138 if (status != 0x03) { /* There was an error. */
1139 /*
1140 * There is a tricky error noted by John Murphy,
1141 * <murf@perftech.com> to Russ Nelson: Even with full-sized
1142 * buffers it's possible for a jabber packet to use two
1143 * buffers, with only the last correctly noting the error.
1144 */
1145 if (status & 0x01) /* Only count a general error at the */
1146 lp->stats.rx_errors++; /* end of a packet. */
1147 if (status & 0x20)
1148 lp->stats.rx_frame_errors++;
1149 if (status & 0x10)
1150 lp->stats.rx_over_errors++;
1151 if (status & 0x08)
1152 lp->stats.rx_crc_errors++;
1153 if (status & 0x04)
1154 lp->stats.rx_fifo_errors++;
1155 lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
1156 } else {
1157 /* Malloc up new buffer, compatible with net-2e. */
1158 short pkt_len =
1159 (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)
1160 - 4;
1161 struct sk_buff *skb;
1162
1163 /* Discard oversize frames. */
1164 if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
1165 if (netif_msg_drv(lp))
1166 printk(KERN_ERR
1167 "%s: Impossible packet size %d!\n",
1168 dev->name, pkt_len);
1169 lp->stats.rx_errors++;
1170 } else if (pkt_len < 60) {
1171 if (netif_msg_rx_err(lp))
1172 printk(KERN_ERR "%s: Runt packet!\n",
1173 dev->name);
1174 lp->stats.rx_errors++;
1175 } else {
1176 int rx_in_place = 0;
1177
1178 if (pkt_len > rx_copybreak) {
1179 struct sk_buff *newskb;
1180
1181 if ((newskb =
1182 dev_alloc_skb(PKT_BUF_SZ))) {
1183 skb_reserve(newskb, 2);
1184 skb = lp->rx_skbuff[entry];
1185 pci_unmap_single(lp->pci_dev,
1186 lp->
1187 rx_dma_addr
1188 [entry],
1189 PKT_BUF_SZ - 2,
1190 PCI_DMA_FROMDEVICE);
1191 skb_put(skb, pkt_len);
1192 lp->rx_skbuff[entry] = newskb;
1193 newskb->dev = dev;
1194 lp->rx_dma_addr[entry] =
1195 pci_map_single(lp->pci_dev,
1196 newskb->data,
1197 PKT_BUF_SZ -
1198 2,
1199 PCI_DMA_FROMDEVICE);
1200 lp->rx_ring[entry].base =
1201 le32_to_cpu(lp->
1202 rx_dma_addr
1203 [entry]);
1204 rx_in_place = 1;
1205 } else
1206 skb = NULL;
1207 } else {
1208 skb = dev_alloc_skb(pkt_len + 2);
1209 }
1210
1211 if (skb == NULL) {
1212 int i;
1213 if (netif_msg_drv(lp))
1214 printk(KERN_ERR
1215 "%s: Memory squeeze, deferring packet.\n",
1216 dev->name);
1217 for (i = 0; i < lp->rx_ring_size; i++)
1218 if ((short)
1219 le16_to_cpu(lp->
1220 rx_ring[(entry +
1221 i)
1222 & lp->
1223 rx_mod_mask].
1224 status) < 0)
1225 break;
1226
1227 if (i > lp->rx_ring_size - 2) {
1228 lp->stats.rx_dropped++;
1229 lp->rx_ring[entry].status |=
1230 le16_to_cpu(0x8000);
1231 wmb(); /* Make sure adapter sees owner change */
1232 lp->cur_rx++;
1233 }
1234 break;
1235 }
1236 skb->dev = dev;
1237 if (!rx_in_place) {
1238 skb_reserve(skb, 2); /* 16 byte align */
1239 skb_put(skb, pkt_len); /* Make room */
1240 pci_dma_sync_single_for_cpu(lp->pci_dev,
1241 lp->
1242 rx_dma_addr
1243 [entry],
1244 PKT_BUF_SZ -
1245 2,
1246 PCI_DMA_FROMDEVICE);
1247 eth_copy_and_sum(skb,
1248 (unsigned char *)(lp->
1249 rx_skbuff
1250 [entry]->
1251 data),
1252 pkt_len, 0);
1253 pci_dma_sync_single_for_device(lp->
1254 pci_dev,
1255 lp->
1256 rx_dma_addr
1257 [entry],
1258 PKT_BUF_SZ
1259 - 2,
1260 PCI_DMA_FROMDEVICE);
1261 }
1262 lp->stats.rx_bytes += skb->len;
1263 skb->protocol = eth_type_trans(skb, dev);
1264 netif_rx(skb);
1265 dev->last_rx = jiffies;
1266 lp->stats.rx_packets++;
1267 }
1268 }
1269 /* 1249 /*
1270 * The docs say that the buffer length isn't touched, but Andrew Boyd 1250 * The docs say that the buffer length isn't touched, but Andrew
1271 * of QNX reports that some revs of the 79C965 clear it. 1251 * Boyd of QNX reports that some revs of the 79C965 clear it.
1272 */ 1252 */
1273 lp->rx_ring[entry].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); 1253 rxp->buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
1274 wmb(); /* Make sure owner changes after all others are visible */ 1254 wmb(); /* Make sure owner changes after others are visible */
1275 lp->rx_ring[entry].status |= le16_to_cpu(0x8000); 1255 rxp->status = le16_to_cpu(0x8000);
1276 entry = (++lp->cur_rx) & lp->rx_mod_mask; 1256 entry = (++lp->cur_rx) & lp->rx_mod_mask;
1277 if (--boguscnt <= 0) 1257 rxp = &lp->rx_ring[entry];
1278 break; /* don't stay in loop forever */
1279 } 1258 }
1280 1259
1281 return 0; 1260 return;
1282} 1261}
1283 1262
1284static int pcnet32_tx(struct net_device *dev, u16 csr0) 1263static int pcnet32_tx(struct net_device *dev, u16 csr0)
@@ -1298,7 +1277,7 @@ static int pcnet32_tx(struct net_device *dev, u16 csr0)
1298 lp->tx_ring[entry].base = 0; 1277 lp->tx_ring[entry].base = 0;
1299 1278
1300 if (status & 0x4000) { 1279 if (status & 0x4000) {
1301 /* There was an major error, log it. */ 1280 /* There was a major error, log it. */
1302 int err_status = le32_to_cpu(lp->tx_ring[entry].misc); 1281 int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
1303 lp->stats.tx_errors++; 1282 lp->stats.tx_errors++;
1304 if (netif_msg_tx_err(lp)) 1283 if (netif_msg_tx_err(lp))
@@ -1329,8 +1308,7 @@ static int pcnet32_tx(struct net_device *dev, u16 csr0)
1329 if (!lp->dxsuflo) { /* If controller doesn't recover ... */ 1308 if (!lp->dxsuflo) { /* If controller doesn't recover ... */
1330 /* Ackk! On FIFO errors the Tx unit is turned off! */ 1309 /* Ackk! On FIFO errors the Tx unit is turned off! */
1331 /* Remove this verbosity later! */ 1310 /* Remove this verbosity later! */
1332 if (netif_msg_tx_err 1311 if (netif_msg_tx_err(lp))
1333 (lp))
1334 printk(KERN_ERR 1312 printk(KERN_ERR
1335 "%s: Tx FIFO error! CSR0=%4.4x\n", 1313 "%s: Tx FIFO error! CSR0=%4.4x\n",
1336 dev->name, csr0); 1314 dev->name, csr0);
@@ -1350,16 +1328,14 @@ static int pcnet32_tx(struct net_device *dev, u16 csr0)
1350 lp->tx_dma_addr[entry], 1328 lp->tx_dma_addr[entry],
1351 lp->tx_skbuff[entry]-> 1329 lp->tx_skbuff[entry]->
1352 len, PCI_DMA_TODEVICE); 1330 len, PCI_DMA_TODEVICE);
1353 dev_kfree_skb_irq(lp->tx_skbuff[entry]); 1331 dev_kfree_skb_any(lp->tx_skbuff[entry]);
1354 lp->tx_skbuff[entry] = NULL; 1332 lp->tx_skbuff[entry] = NULL;
1355 lp->tx_dma_addr[entry] = 0; 1333 lp->tx_dma_addr[entry] = 0;
1356 } 1334 }
1357 dirty_tx++; 1335 dirty_tx++;
1358 } 1336 }
1359 1337
1360 delta = 1338 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
1361 (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask +
1362 lp->tx_ring_size);
1363 if (delta > lp->tx_ring_size) { 1339 if (delta > lp->tx_ring_size) {
1364 if (netif_msg_drv(lp)) 1340 if (netif_msg_drv(lp))
1365 printk(KERN_ERR 1341 printk(KERN_ERR
@@ -2535,19 +2511,20 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2535 2511
2536 spin_lock(&lp->lock); 2512 spin_lock(&lp->lock);
2537 2513
2538 while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) { 2514 csr0 = lp->a.read_csr(ioaddr, CSR0);
2515 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
2539 if (csr0 == 0xffff) { 2516 if (csr0 == 0xffff) {
2540 break; /* PCMCIA remove happened */ 2517 break; /* PCMCIA remove happened */
2541 } 2518 }
2542 /* Acknowledge all of the current interrupt sources ASAP. */ 2519 /* Acknowledge all of the current interrupt sources ASAP. */
2543 lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f); 2520 lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
2544 2521
2545 must_restart = 0; 2522 must_restart = 0;
2546 2523
2547 if (netif_msg_intr(lp)) 2524 if (netif_msg_intr(lp))
2548 printk(KERN_DEBUG 2525 printk(KERN_DEBUG
2549 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", 2526 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
2550 dev->name, csr0, lp->a.read_csr(ioaddr, 0)); 2527 dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
2551 2528
2552 if (csr0 & 0x0400) /* Rx interrupt */ 2529 if (csr0 & 0x0400) /* Rx interrupt */
2553 pcnet32_rx(dev); 2530 pcnet32_rx(dev);
@@ -2561,14 +2538,16 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2561 lp->stats.tx_errors++; /* Tx babble. */ 2538 lp->stats.tx_errors++; /* Tx babble. */
2562 if (csr0 & 0x1000) { 2539 if (csr0 & 0x1000) {
2563 /* 2540 /*
2564 * this happens when our receive ring is full. This shouldn't 2541 * This happens when our receive ring is full. This
2565 * be a problem as we will see normal rx interrupts for the frames 2542 * shouldn't be a problem as we will see normal rx
2566 * in the receive ring. But there are some PCI chipsets (I can 2543 * interrupts for the frames in the receive ring. But
2567 * reproduce this on SP3G with Intel saturn chipset) which have 2544 * there are some PCI chipsets (I can reproduce this
2568 * sometimes problems and will fill up the receive ring with 2545 * on SP3G with Intel saturn chipset) which have
2569 * error descriptors. In this situation we don't get a rx 2546 * sometimes problems and will fill up the receive
2570 * interrupt, but a missed frame interrupt sooner or later. 2547 * ring with error descriptors. In this situation we
2571 * So we try to clean up our receive ring here. 2548 * don't get a rx interrupt, but a missed frame
2549 * interrupt sooner or later. So we try to clean up
2550 * our receive ring here.
2572 */ 2551 */
2573 pcnet32_rx(dev); 2552 pcnet32_rx(dev);
2574 lp->stats.rx_errors++; /* Missed a Rx frame. */ 2553 lp->stats.rx_errors++; /* Missed a Rx frame. */
@@ -2588,6 +2567,7 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2588 pcnet32_restart(dev, CSR0_START); 2567 pcnet32_restart(dev, CSR0_START);
2589 netif_wake_queue(dev); 2568 netif_wake_queue(dev);
2590 } 2569 }
2570 csr0 = lp->a.read_csr(ioaddr, CSR0);
2591 } 2571 }
2592 2572
2593 /* Set interrupt enable. */ 2573 /* Set interrupt enable. */