aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/pcnet32.c
diff options
context:
space:
mode:
authorDon Fry <brazilnut@us.ibm.com>2006-09-13 13:16:21 -0400
committerJeff Garzik <jeff@garzik.org>2006-09-13 13:23:52 -0400
commit9691edd26cfae0484367a6b8e3d46f3a5179e663 (patch)
tree86e57f62c87c37bac7a2f5036f2e6e6512ab5e3b /drivers/net/pcnet32.c
parentb368a3fbe41c44e4c7eb628002bbd8891defa7e0 (diff)
[PATCH] pcnet32: move/create receive and transmit routines
Move the receive routine and create the transmit routine. Tested ia32 and ppc64. Signed-off-by: Don Fry <brazilnut@us.ibm.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/pcnet32.c')
-rw-r--r--drivers/net/pcnet32.c515
1 files changed, 259 insertions, 256 deletions
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 468c8bacb0d8..16b9538370cc 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -299,7 +299,6 @@ static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
299static int pcnet32_open(struct net_device *); 299static int pcnet32_open(struct net_device *);
300static int pcnet32_init_ring(struct net_device *); 300static int pcnet32_init_ring(struct net_device *);
301static int pcnet32_start_xmit(struct sk_buff *, struct net_device *); 301static int pcnet32_start_xmit(struct sk_buff *, struct net_device *);
302static int pcnet32_rx(struct net_device *);
303static void pcnet32_tx_timeout(struct net_device *dev); 302static void pcnet32_tx_timeout(struct net_device *dev);
304static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *); 303static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *);
305static int pcnet32_close(struct net_device *); 304static int pcnet32_close(struct net_device *);
@@ -1125,6 +1124,264 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
1125 return 1; 1124 return 1;
1126} 1125}
1127 1126
1127
1128static int pcnet32_rx(struct net_device *dev)
1129{
1130 struct pcnet32_private *lp = dev->priv;
1131 int entry = lp->cur_rx & lp->rx_mod_mask;
1132 int boguscnt = lp->rx_ring_size / 2;
1133
1134 /* If we own the next entry, it's a new packet. Send it up. */
1135 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
1136 int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
1137
1138 if (status != 0x03) { /* There was an error. */
1139 /*
1140 * There is a tricky error noted by John Murphy,
1141 * <murf@perftech.com> to Russ Nelson: Even with full-sized
1142 * buffers it's possible for a jabber packet to use two
1143 * buffers, with only the last correctly noting the error.
1144 */
1145 if (status & 0x01) /* Only count a general error at the */
1146 lp->stats.rx_errors++; /* end of a packet. */
1147 if (status & 0x20)
1148 lp->stats.rx_frame_errors++;
1149 if (status & 0x10)
1150 lp->stats.rx_over_errors++;
1151 if (status & 0x08)
1152 lp->stats.rx_crc_errors++;
1153 if (status & 0x04)
1154 lp->stats.rx_fifo_errors++;
1155 lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
1156 } else {
1157 /* Malloc up new buffer, compatible with net-2e. */
1158 short pkt_len =
1159 (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)
1160 - 4;
1161 struct sk_buff *skb;
1162
1163 /* Discard oversize frames. */
1164 if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
1165 if (netif_msg_drv(lp))
1166 printk(KERN_ERR
1167 "%s: Impossible packet size %d!\n",
1168 dev->name, pkt_len);
1169 lp->stats.rx_errors++;
1170 } else if (pkt_len < 60) {
1171 if (netif_msg_rx_err(lp))
1172 printk(KERN_ERR "%s: Runt packet!\n",
1173 dev->name);
1174 lp->stats.rx_errors++;
1175 } else {
1176 int rx_in_place = 0;
1177
1178 if (pkt_len > rx_copybreak) {
1179 struct sk_buff *newskb;
1180
1181 if ((newskb =
1182 dev_alloc_skb(PKT_BUF_SZ))) {
1183 skb_reserve(newskb, 2);
1184 skb = lp->rx_skbuff[entry];
1185 pci_unmap_single(lp->pci_dev,
1186 lp->
1187 rx_dma_addr
1188 [entry],
1189 PKT_BUF_SZ - 2,
1190 PCI_DMA_FROMDEVICE);
1191 skb_put(skb, pkt_len);
1192 lp->rx_skbuff[entry] = newskb;
1193 newskb->dev = dev;
1194 lp->rx_dma_addr[entry] =
1195 pci_map_single(lp->pci_dev,
1196 newskb->data,
1197 PKT_BUF_SZ -
1198 2,
1199 PCI_DMA_FROMDEVICE);
1200 lp->rx_ring[entry].base =
1201 le32_to_cpu(lp->
1202 rx_dma_addr
1203 [entry]);
1204 rx_in_place = 1;
1205 } else
1206 skb = NULL;
1207 } else {
1208 skb = dev_alloc_skb(pkt_len + 2);
1209 }
1210
1211 if (skb == NULL) {
1212 int i;
1213 if (netif_msg_drv(lp))
1214 printk(KERN_ERR
1215 "%s: Memory squeeze, deferring packet.\n",
1216 dev->name);
1217 for (i = 0; i < lp->rx_ring_size; i++)
1218 if ((short)
1219 le16_to_cpu(lp->
1220 rx_ring[(entry +
1221 i)
1222 & lp->
1223 rx_mod_mask].
1224 status) < 0)
1225 break;
1226
1227 if (i > lp->rx_ring_size - 2) {
1228 lp->stats.rx_dropped++;
1229 lp->rx_ring[entry].status |=
1230 le16_to_cpu(0x8000);
1231 wmb(); /* Make sure adapter sees owner change */
1232 lp->cur_rx++;
1233 }
1234 break;
1235 }
1236 skb->dev = dev;
1237 if (!rx_in_place) {
1238 skb_reserve(skb, 2); /* 16 byte align */
1239 skb_put(skb, pkt_len); /* Make room */
1240 pci_dma_sync_single_for_cpu(lp->pci_dev,
1241 lp->
1242 rx_dma_addr
1243 [entry],
1244 PKT_BUF_SZ -
1245 2,
1246 PCI_DMA_FROMDEVICE);
1247 eth_copy_and_sum(skb,
1248 (unsigned char *)(lp->
1249 rx_skbuff
1250 [entry]->
1251 data),
1252 pkt_len, 0);
1253 pci_dma_sync_single_for_device(lp->
1254 pci_dev,
1255 lp->
1256 rx_dma_addr
1257 [entry],
1258 PKT_BUF_SZ
1259 - 2,
1260 PCI_DMA_FROMDEVICE);
1261 }
1262 lp->stats.rx_bytes += skb->len;
1263 skb->protocol = eth_type_trans(skb, dev);
1264 netif_rx(skb);
1265 dev->last_rx = jiffies;
1266 lp->stats.rx_packets++;
1267 }
1268 }
1269 /*
1270 * The docs say that the buffer length isn't touched, but Andrew Boyd
1271 * of QNX reports that some revs of the 79C965 clear it.
1272 */
1273 lp->rx_ring[entry].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
1274 wmb(); /* Make sure owner changes after all others are visible */
1275 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
1276 entry = (++lp->cur_rx) & lp->rx_mod_mask;
1277 if (--boguscnt <= 0)
1278 break; /* don't stay in loop forever */
1279 }
1280
1281 return 0;
1282}
1283
1284static int pcnet32_tx(struct net_device *dev, u16 csr0)
1285{
1286 struct pcnet32_private *lp = dev->priv;
1287 unsigned int dirty_tx = lp->dirty_tx;
1288 int delta;
1289 int must_restart = 0;
1290
1291 while (dirty_tx != lp->cur_tx) {
1292 int entry = dirty_tx & lp->tx_mod_mask;
1293 int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
1294
1295 if (status < 0)
1296 break; /* It still hasn't been Txed */
1297
1298 lp->tx_ring[entry].base = 0;
1299
1300 if (status & 0x4000) {
1301 /* There was an major error, log it. */
1302 int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
1303 lp->stats.tx_errors++;
1304 if (netif_msg_tx_err(lp))
1305 printk(KERN_ERR
1306 "%s: Tx error status=%04x err_status=%08x\n",
1307 dev->name, status,
1308 err_status);
1309 if (err_status & 0x04000000)
1310 lp->stats.tx_aborted_errors++;
1311 if (err_status & 0x08000000)
1312 lp->stats.tx_carrier_errors++;
1313 if (err_status & 0x10000000)
1314 lp->stats.tx_window_errors++;
1315#ifndef DO_DXSUFLO
1316 if (err_status & 0x40000000) {
1317 lp->stats.tx_fifo_errors++;
1318 /* Ackk! On FIFO errors the Tx unit is turned off! */
1319 /* Remove this verbosity later! */
1320 if (netif_msg_tx_err(lp))
1321 printk(KERN_ERR
1322 "%s: Tx FIFO error! CSR0=%4.4x\n",
1323 dev->name, csr0);
1324 must_restart = 1;
1325 }
1326#else
1327 if (err_status & 0x40000000) {
1328 lp->stats.tx_fifo_errors++;
1329 if (!lp->dxsuflo) { /* If controller doesn't recover ... */
1330 /* Ackk! On FIFO errors the Tx unit is turned off! */
1331 /* Remove this verbosity later! */
1332 if (netif_msg_tx_err
1333 (lp))
1334 printk(KERN_ERR
1335 "%s: Tx FIFO error! CSR0=%4.4x\n",
1336 dev->name, csr0);
1337 must_restart = 1;
1338 }
1339 }
1340#endif
1341 } else {
1342 if (status & 0x1800)
1343 lp->stats.collisions++;
1344 lp->stats.tx_packets++;
1345 }
1346
1347 /* We must free the original skb */
1348 if (lp->tx_skbuff[entry]) {
1349 pci_unmap_single(lp->pci_dev,
1350 lp->tx_dma_addr[entry],
1351 lp->tx_skbuff[entry]->
1352 len, PCI_DMA_TODEVICE);
1353 dev_kfree_skb_irq(lp->tx_skbuff[entry]);
1354 lp->tx_skbuff[entry] = NULL;
1355 lp->tx_dma_addr[entry] = 0;
1356 }
1357 dirty_tx++;
1358 }
1359
1360 delta =
1361 (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask +
1362 lp->tx_ring_size);
1363 if (delta > lp->tx_ring_size) {
1364 if (netif_msg_drv(lp))
1365 printk(KERN_ERR
1366 "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1367 dev->name, dirty_tx, lp->cur_tx,
1368 lp->tx_full);
1369 dirty_tx += lp->tx_ring_size;
1370 delta -= lp->tx_ring_size;
1371 }
1372
1373 if (lp->tx_full &&
1374 netif_queue_stopped(dev) &&
1375 delta < lp->tx_ring_size - 2) {
1376 /* The ring is no longer full, clear tbusy. */
1377 lp->tx_full = 0;
1378 netif_wake_queue(dev);
1379 }
1380 lp->dirty_tx = dirty_tx;
1381
1382 return must_restart;
1383}
1384
1128#define PCNET32_REGS_PER_PHY 32 1385#define PCNET32_REGS_PER_PHY 32
1129#define PCNET32_MAX_PHYS 32 1386#define PCNET32_MAX_PHYS 32
1130static int pcnet32_get_regs_len(struct net_device *dev) 1387static int pcnet32_get_regs_len(struct net_device *dev)
@@ -2296,105 +2553,7 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2296 pcnet32_rx(dev); 2553 pcnet32_rx(dev);
2297 2554
2298 if (csr0 & 0x0200) { /* Tx-done interrupt */ 2555 if (csr0 & 0x0200) { /* Tx-done interrupt */
2299 unsigned int dirty_tx = lp->dirty_tx; 2556 must_restart = pcnet32_tx(dev, csr0);
2300 int delta;
2301
2302 while (dirty_tx != lp->cur_tx) {
2303 int entry = dirty_tx & lp->tx_mod_mask;
2304 int status =
2305 (short)le16_to_cpu(lp->tx_ring[entry].
2306 status);
2307
2308 if (status < 0)
2309 break; /* It still hasn't been Txed */
2310
2311 lp->tx_ring[entry].base = 0;
2312
2313 if (status & 0x4000) {
2314 /* There was an major error, log it. */
2315 int err_status =
2316 le32_to_cpu(lp->tx_ring[entry].
2317 misc);
2318 lp->stats.tx_errors++;
2319 if (netif_msg_tx_err(lp))
2320 printk(KERN_ERR
2321 "%s: Tx error status=%04x err_status=%08x\n",
2322 dev->name, status,
2323 err_status);
2324 if (err_status & 0x04000000)
2325 lp->stats.tx_aborted_errors++;
2326 if (err_status & 0x08000000)
2327 lp->stats.tx_carrier_errors++;
2328 if (err_status & 0x10000000)
2329 lp->stats.tx_window_errors++;
2330#ifndef DO_DXSUFLO
2331 if (err_status & 0x40000000) {
2332 lp->stats.tx_fifo_errors++;
2333 /* Ackk! On FIFO errors the Tx unit is turned off! */
2334 /* Remove this verbosity later! */
2335 if (netif_msg_tx_err(lp))
2336 printk(KERN_ERR
2337 "%s: Tx FIFO error! CSR0=%4.4x\n",
2338 dev->name, csr0);
2339 must_restart = 1;
2340 }
2341#else
2342 if (err_status & 0x40000000) {
2343 lp->stats.tx_fifo_errors++;
2344 if (!lp->dxsuflo) { /* If controller doesn't recover ... */
2345 /* Ackk! On FIFO errors the Tx unit is turned off! */
2346 /* Remove this verbosity later! */
2347 if (netif_msg_tx_err
2348 (lp))
2349 printk(KERN_ERR
2350 "%s: Tx FIFO error! CSR0=%4.4x\n",
2351 dev->
2352 name,
2353 csr0);
2354 must_restart = 1;
2355 }
2356 }
2357#endif
2358 } else {
2359 if (status & 0x1800)
2360 lp->stats.collisions++;
2361 lp->stats.tx_packets++;
2362 }
2363
2364 /* We must free the original skb */
2365 if (lp->tx_skbuff[entry]) {
2366 pci_unmap_single(lp->pci_dev,
2367 lp->tx_dma_addr[entry],
2368 lp->tx_skbuff[entry]->
2369 len, PCI_DMA_TODEVICE);
2370 dev_kfree_skb_irq(lp->tx_skbuff[entry]);
2371 lp->tx_skbuff[entry] = NULL;
2372 lp->tx_dma_addr[entry] = 0;
2373 }
2374 dirty_tx++;
2375 }
2376
2377 delta =
2378 (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask +
2379 lp->tx_ring_size);
2380 if (delta > lp->tx_ring_size) {
2381 if (netif_msg_drv(lp))
2382 printk(KERN_ERR
2383 "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
2384 dev->name, dirty_tx, lp->cur_tx,
2385 lp->tx_full);
2386 dirty_tx += lp->tx_ring_size;
2387 delta -= lp->tx_ring_size;
2388 }
2389
2390 if (lp->tx_full &&
2391 netif_queue_stopped(dev) &&
2392 delta < lp->tx_ring_size - 2) {
2393 /* The ring is no longer full, clear tbusy. */
2394 lp->tx_full = 0;
2395 netif_wake_queue(dev);
2396 }
2397 lp->dirty_tx = dirty_tx;
2398 } 2557 }
2399 2558
2400 /* Log misc errors. */ 2559 /* Log misc errors. */
@@ -2443,162 +2602,6 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2443 return IRQ_HANDLED; 2602 return IRQ_HANDLED;
2444} 2603}
2445 2604
2446static int pcnet32_rx(struct net_device *dev)
2447{
2448 struct pcnet32_private *lp = dev->priv;
2449 int entry = lp->cur_rx & lp->rx_mod_mask;
2450 int boguscnt = lp->rx_ring_size / 2;
2451
2452 /* If we own the next entry, it's a new packet. Send it up. */
2453 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
2454 int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
2455
2456 if (status != 0x03) { /* There was an error. */
2457 /*
2458 * There is a tricky error noted by John Murphy,
2459 * <murf@perftech.com> to Russ Nelson: Even with full-sized
2460 * buffers it's possible for a jabber packet to use two
2461 * buffers, with only the last correctly noting the error.
2462 */
2463 if (status & 0x01) /* Only count a general error at the */
2464 lp->stats.rx_errors++; /* end of a packet. */
2465 if (status & 0x20)
2466 lp->stats.rx_frame_errors++;
2467 if (status & 0x10)
2468 lp->stats.rx_over_errors++;
2469 if (status & 0x08)
2470 lp->stats.rx_crc_errors++;
2471 if (status & 0x04)
2472 lp->stats.rx_fifo_errors++;
2473 lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
2474 } else {
2475 /* Malloc up new buffer, compatible with net-2e. */
2476 short pkt_len =
2477 (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)
2478 - 4;
2479 struct sk_buff *skb;
2480
2481 /* Discard oversize frames. */
2482 if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
2483 if (netif_msg_drv(lp))
2484 printk(KERN_ERR
2485 "%s: Impossible packet size %d!\n",
2486 dev->name, pkt_len);
2487 lp->stats.rx_errors++;
2488 } else if (pkt_len < 60) {
2489 if (netif_msg_rx_err(lp))
2490 printk(KERN_ERR "%s: Runt packet!\n",
2491 dev->name);
2492 lp->stats.rx_errors++;
2493 } else {
2494 int rx_in_place = 0;
2495
2496 if (pkt_len > rx_copybreak) {
2497 struct sk_buff *newskb;
2498
2499 if ((newskb =
2500 dev_alloc_skb(PKT_BUF_SZ))) {
2501 skb_reserve(newskb, 2);
2502 skb = lp->rx_skbuff[entry];
2503 pci_unmap_single(lp->pci_dev,
2504 lp->
2505 rx_dma_addr
2506 [entry],
2507 PKT_BUF_SZ - 2,
2508 PCI_DMA_FROMDEVICE);
2509 skb_put(skb, pkt_len);
2510 lp->rx_skbuff[entry] = newskb;
2511 newskb->dev = dev;
2512 lp->rx_dma_addr[entry] =
2513 pci_map_single(lp->pci_dev,
2514 newskb->data,
2515 PKT_BUF_SZ -
2516 2,
2517 PCI_DMA_FROMDEVICE);
2518 lp->rx_ring[entry].base =
2519 le32_to_cpu(lp->
2520 rx_dma_addr
2521 [entry]);
2522 rx_in_place = 1;
2523 } else
2524 skb = NULL;
2525 } else {
2526 skb = dev_alloc_skb(pkt_len + 2);
2527 }
2528
2529 if (skb == NULL) {
2530 int i;
2531 if (netif_msg_drv(lp))
2532 printk(KERN_ERR
2533 "%s: Memory squeeze, deferring packet.\n",
2534 dev->name);
2535 for (i = 0; i < lp->rx_ring_size; i++)
2536 if ((short)
2537 le16_to_cpu(lp->
2538 rx_ring[(entry +
2539 i)
2540 & lp->
2541 rx_mod_mask].
2542 status) < 0)
2543 break;
2544
2545 if (i > lp->rx_ring_size - 2) {
2546 lp->stats.rx_dropped++;
2547 lp->rx_ring[entry].status |=
2548 le16_to_cpu(0x8000);
2549 wmb(); /* Make sure adapter sees owner change */
2550 lp->cur_rx++;
2551 }
2552 break;
2553 }
2554 skb->dev = dev;
2555 if (!rx_in_place) {
2556 skb_reserve(skb, 2); /* 16 byte align */
2557 skb_put(skb, pkt_len); /* Make room */
2558 pci_dma_sync_single_for_cpu(lp->pci_dev,
2559 lp->
2560 rx_dma_addr
2561 [entry],
2562 PKT_BUF_SZ -
2563 2,
2564 PCI_DMA_FROMDEVICE);
2565 eth_copy_and_sum(skb,
2566 (unsigned char *)(lp->
2567 rx_skbuff
2568 [entry]->
2569 data),
2570 pkt_len, 0);
2571 pci_dma_sync_single_for_device(lp->
2572 pci_dev,
2573 lp->
2574 rx_dma_addr
2575 [entry],
2576 PKT_BUF_SZ
2577 - 2,
2578 PCI_DMA_FROMDEVICE);
2579 }
2580 lp->stats.rx_bytes += skb->len;
2581 skb->protocol = eth_type_trans(skb, dev);
2582 netif_rx(skb);
2583 dev->last_rx = jiffies;
2584 lp->stats.rx_packets++;
2585 }
2586 }
2587 /*
2588 * The docs say that the buffer length isn't touched, but Andrew Boyd
2589 * of QNX reports that some revs of the 79C965 clear it.
2590 */
2591 lp->rx_ring[entry].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
2592 wmb(); /* Make sure owner changes after all others are visible */
2593 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
2594 entry = (++lp->cur_rx) & lp->rx_mod_mask;
2595 if (--boguscnt <= 0)
2596 break; /* don't stay in loop forever */
2597 }
2598
2599 return 0;
2600}
2601
2602static int pcnet32_close(struct net_device *dev) 2605static int pcnet32_close(struct net_device *dev)
2603{ 2606{
2604 unsigned long ioaddr = dev->base_addr; 2607 unsigned long ioaddr = dev->base_addr;