aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/pcnet32.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/pcnet32.c')
-rw-r--r--drivers/net/pcnet32.c278
1 files changed, 220 insertions, 58 deletions
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 113b68099216..70fe81a89df9 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,8 +22,8 @@
22 *************************************************************************/ 22 *************************************************************************/
23 23
24#define DRV_NAME "pcnet32" 24#define DRV_NAME "pcnet32"
25#define DRV_VERSION "1.30j" 25#define DRV_VERSION "1.31a"
26#define DRV_RELDATE "29.04.2005" 26#define DRV_RELDATE "12.Sep.2005"
27#define PFX DRV_NAME ": " 27#define PFX DRV_NAME ": "
28 28
29static const char *version = 29static const char *version =
@@ -257,6 +257,9 @@ static int homepna[MAX_UNITS];
257 * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32. 257 * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32.
258 * v1.30i 28 Jun 2004 Don Fry change to use module_param. 258 * v1.30i 28 Jun 2004 Don Fry change to use module_param.
259 * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test. 259 * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test.
260 * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam().
261 * v1.31a 12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4
262 * to allow loopback test to work unchanged.
260 */ 263 */
261 264
262 265
@@ -266,17 +269,17 @@ static int homepna[MAX_UNITS];
266 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). 269 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
267 */ 270 */
268#ifndef PCNET32_LOG_TX_BUFFERS 271#ifndef PCNET32_LOG_TX_BUFFERS
269#define PCNET32_LOG_TX_BUFFERS 4 272#define PCNET32_LOG_TX_BUFFERS 4
270#define PCNET32_LOG_RX_BUFFERS 5 273#define PCNET32_LOG_RX_BUFFERS 5
274#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
275#define PCNET32_LOG_MAX_RX_BUFFERS 9
271#endif 276#endif
272 277
273#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS)) 278#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
274#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) 279#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
275#define TX_RING_LEN_BITS ((PCNET32_LOG_TX_BUFFERS) << 12)
276 280
277#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) 281#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
278#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) 282#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
279#define RX_RING_LEN_BITS ((PCNET32_LOG_RX_BUFFERS) << 4)
280 283
281#define PKT_BUF_SZ 1544 284#define PKT_BUF_SZ 1544
282 285
@@ -334,14 +337,14 @@ struct pcnet32_access {
334}; 337};
335 338
336/* 339/*
337 * The first three fields of pcnet32_private are read by the ethernet device 340 * The first field of pcnet32_private is read by the ethernet device
338 * so we allocate the structure should be allocated by pci_alloc_consistent(). 341 * so the structure should be allocated using pci_alloc_consistent().
339 */ 342 */
340struct pcnet32_private { 343struct pcnet32_private {
341 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
342 struct pcnet32_rx_head rx_ring[RX_RING_SIZE];
343 struct pcnet32_tx_head tx_ring[TX_RING_SIZE];
344 struct pcnet32_init_block init_block; 344 struct pcnet32_init_block init_block;
345 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
346 struct pcnet32_rx_head *rx_ring;
347 struct pcnet32_tx_head *tx_ring;
345 dma_addr_t dma_addr; /* DMA address of beginning of this 348 dma_addr_t dma_addr; /* DMA address of beginning of this
346 object, returned by 349 object, returned by
347 pci_alloc_consistent */ 350 pci_alloc_consistent */
@@ -349,13 +352,21 @@ struct pcnet32_private {
349 structure */ 352 structure */
350 const char *name; 353 const char *name;
351 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 354 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
352 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 355 struct sk_buff **tx_skbuff;
353 struct sk_buff *rx_skbuff[RX_RING_SIZE]; 356 struct sk_buff **rx_skbuff;
354 dma_addr_t tx_dma_addr[TX_RING_SIZE]; 357 dma_addr_t *tx_dma_addr;
355 dma_addr_t rx_dma_addr[RX_RING_SIZE]; 358 dma_addr_t *rx_dma_addr;
356 struct pcnet32_access a; 359 struct pcnet32_access a;
357 spinlock_t lock; /* Guard lock */ 360 spinlock_t lock; /* Guard lock */
358 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 361 unsigned int cur_rx, cur_tx; /* The next free ring entry */
362 unsigned int rx_ring_size; /* current rx ring size */
363 unsigned int tx_ring_size; /* current tx ring size */
364 unsigned int rx_mod_mask; /* rx ring modular mask */
365 unsigned int tx_mod_mask; /* tx ring modular mask */
366 unsigned short rx_len_bits;
367 unsigned short tx_len_bits;
368 dma_addr_t rx_ring_dma_addr;
369 dma_addr_t tx_ring_dma_addr;
359 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ 370 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
360 struct net_device_stats stats; 371 struct net_device_stats stats;
361 char tx_full; 372 char tx_full;
@@ -397,6 +408,9 @@ static int pcnet32_get_regs_len(struct net_device *dev);
397static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 408static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
398 void *ptr); 409 void *ptr);
399static void pcnet32_purge_tx_ring(struct net_device *dev); 410static void pcnet32_purge_tx_ring(struct net_device *dev);
411static int pcnet32_alloc_ring(struct net_device *dev);
412static void pcnet32_free_ring(struct net_device *dev);
413
400 414
401enum pci_flags_bit { 415enum pci_flags_bit {
402 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, 416 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
@@ -613,10 +627,62 @@ static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringpar
613{ 627{
614 struct pcnet32_private *lp = dev->priv; 628 struct pcnet32_private *lp = dev->priv;
615 629
616 ering->tx_max_pending = TX_RING_SIZE - 1; 630 ering->tx_max_pending = TX_MAX_RING_SIZE - 1;
617 ering->tx_pending = lp->cur_tx - lp->dirty_tx; 631 ering->tx_pending = lp->tx_ring_size - 1;
618 ering->rx_max_pending = RX_RING_SIZE - 1; 632 ering->rx_max_pending = RX_MAX_RING_SIZE - 1;
619 ering->rx_pending = lp->cur_rx & RX_RING_MOD_MASK; 633 ering->rx_pending = lp->rx_ring_size - 1;
634}
635
636static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
637{
638 struct pcnet32_private *lp = dev->priv;
639 unsigned long flags;
640 int i;
641
642 if (ering->rx_mini_pending || ering->rx_jumbo_pending)
643 return -EINVAL;
644
645 if (netif_running(dev))
646 pcnet32_close(dev);
647
648 spin_lock_irqsave(&lp->lock, flags);
649 pcnet32_free_ring(dev);
650 lp->tx_ring_size = min(ering->tx_pending, (unsigned int) TX_MAX_RING_SIZE);
651 lp->rx_ring_size = min(ering->rx_pending, (unsigned int) RX_MAX_RING_SIZE);
652
653 /* set the minimum ring size to 4, to allow the loopback test to work
654 * unchanged.
655 */
656 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
657 if (lp->tx_ring_size <= (1 << i))
658 break;
659 }
660 lp->tx_ring_size = (1 << i);
661 lp->tx_mod_mask = lp->tx_ring_size - 1;
662 lp->tx_len_bits = (i << 12);
663
664 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
665 if (lp->rx_ring_size <= (1 << i))
666 break;
667 }
668 lp->rx_ring_size = (1 << i);
669 lp->rx_mod_mask = lp->rx_ring_size - 1;
670 lp->rx_len_bits = (i << 4);
671
672 if (pcnet32_alloc_ring(dev)) {
673 pcnet32_free_ring(dev);
674 return -ENOMEM;
675 }
676
677 spin_unlock_irqrestore(&lp->lock, flags);
678
679 if (pcnet32_debug & NETIF_MSG_DRV)
680 printk(KERN_INFO PFX "Ring Param Settings: RX: %d, TX: %d\n", lp->rx_ring_size, lp->tx_ring_size);
681
682 if (netif_running(dev))
683 pcnet32_open(dev);
684
685 return 0;
620} 686}
621 687
622static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data) 688static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -948,6 +1014,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = {
948 .nway_reset = pcnet32_nway_reset, 1014 .nway_reset = pcnet32_nway_reset,
949 .get_link = pcnet32_get_link, 1015 .get_link = pcnet32_get_link,
950 .get_ringparam = pcnet32_get_ringparam, 1016 .get_ringparam = pcnet32_get_ringparam,
1017 .set_ringparam = pcnet32_set_ringparam,
951 .get_tx_csum = ethtool_op_get_tx_csum, 1018 .get_tx_csum = ethtool_op_get_tx_csum,
952 .get_sg = ethtool_op_get_sg, 1019 .get_sg = ethtool_op_get_sg,
953 .get_tso = ethtool_op_get_tso, 1020 .get_tso = ethtool_op_get_tso,
@@ -957,6 +1024,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = {
957 .phys_id = pcnet32_phys_id, 1024 .phys_id = pcnet32_phys_id,
958 .get_regs_len = pcnet32_get_regs_len, 1025 .get_regs_len = pcnet32_get_regs_len,
959 .get_regs = pcnet32_get_regs, 1026 .get_regs = pcnet32_get_regs,
1027 .get_perm_addr = ethtool_op_get_perm_addr,
960}; 1028};
961 1029
962/* only probes for non-PCI devices, the rest are handled by 1030/* only probes for non-PCI devices, the rest are handled by
@@ -1185,9 +1253,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1185 memcpy(dev->dev_addr, promaddr, 6); 1253 memcpy(dev->dev_addr, promaddr, 6);
1186 } 1254 }
1187 } 1255 }
1256 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1188 1257
1189 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ 1258 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
1190 if (!is_valid_ether_addr(dev->dev_addr)) 1259 if (!is_valid_ether_addr(dev->perm_addr))
1191 memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); 1260 memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
1192 1261
1193 if (pcnet32_debug & NETIF_MSG_PROBE) { 1262 if (pcnet32_debug & NETIF_MSG_PROBE) {
@@ -1239,6 +1308,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1239 dev->priv = lp; 1308 dev->priv = lp;
1240 lp->name = chipname; 1309 lp->name = chipname;
1241 lp->shared_irq = shared; 1310 lp->shared_irq = shared;
1311 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
1312 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
1313 lp->tx_mod_mask = lp->tx_ring_size - 1;
1314 lp->rx_mod_mask = lp->rx_ring_size - 1;
1315 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
1316 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
1242 lp->mii_if.full_duplex = fdx; 1317 lp->mii_if.full_duplex = fdx;
1243 lp->mii_if.phy_id_mask = 0x1f; 1318 lp->mii_if.phy_id_mask = 0x1f;
1244 lp->mii_if.reg_num_mask = 0x1f; 1319 lp->mii_if.reg_num_mask = 0x1f;
@@ -1265,21 +1340,23 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1265 } 1340 }
1266 lp->a = *a; 1341 lp->a = *a;
1267 1342
1343 if (pcnet32_alloc_ring(dev)) {
1344 ret = -ENOMEM;
1345 goto err_free_ring;
1346 }
1268 /* detect special T1/E1 WAN card by checking for MAC address */ 1347 /* detect special T1/E1 WAN card by checking for MAC address */
1269 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 1348 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
1270 && dev->dev_addr[2] == 0x75) 1349 && dev->dev_addr[2] == 0x75)
1271 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; 1350 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
1272 1351
1273 lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ 1352 lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
1274 lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS); 1353 lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1275 for (i = 0; i < 6; i++) 1354 for (i = 0; i < 6; i++)
1276 lp->init_block.phys_addr[i] = dev->dev_addr[i]; 1355 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1277 lp->init_block.filter[0] = 0x00000000; 1356 lp->init_block.filter[0] = 0x00000000;
1278 lp->init_block.filter[1] = 0x00000000; 1357 lp->init_block.filter[1] = 0x00000000;
1279 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr + 1358 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
1280 offsetof(struct pcnet32_private, rx_ring)); 1359 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
1281 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr +
1282 offsetof(struct pcnet32_private, tx_ring));
1283 1360
1284 /* switch pcnet32 to 32bit mode */ 1361 /* switch pcnet32 to 32bit mode */
1285 a->write_bcr(ioaddr, 20, 2); 1362 a->write_bcr(ioaddr, 20, 2);
@@ -1310,7 +1387,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1310 if (pcnet32_debug & NETIF_MSG_PROBE) 1387 if (pcnet32_debug & NETIF_MSG_PROBE)
1311 printk(", failed to detect IRQ line.\n"); 1388 printk(", failed to detect IRQ line.\n");
1312 ret = -ENODEV; 1389 ret = -ENODEV;
1313 goto err_free_consistent; 1390 goto err_free_ring;
1314 } 1391 }
1315 if (pcnet32_debug & NETIF_MSG_PROBE) 1392 if (pcnet32_debug & NETIF_MSG_PROBE)
1316 printk(", probed IRQ %d.\n", dev->irq); 1393 printk(", probed IRQ %d.\n", dev->irq);
@@ -1341,7 +1418,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1341 1418
1342 /* Fill in the generic fields of the device structure. */ 1419 /* Fill in the generic fields of the device structure. */
1343 if (register_netdev(dev)) 1420 if (register_netdev(dev))
1344 goto err_free_consistent; 1421 goto err_free_ring;
1345 1422
1346 if (pdev) { 1423 if (pdev) {
1347 pci_set_drvdata(pdev, dev); 1424 pci_set_drvdata(pdev, dev);
@@ -1359,6 +1436,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1359 1436
1360 return 0; 1437 return 0;
1361 1438
1439err_free_ring:
1440 pcnet32_free_ring(dev);
1362err_free_consistent: 1441err_free_consistent:
1363 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); 1442 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
1364err_free_netdev: 1443err_free_netdev:
@@ -1369,6 +1448,86 @@ err_release_region:
1369} 1448}
1370 1449
1371 1450
1451static int pcnet32_alloc_ring(struct net_device *dev)
1452{
1453 struct pcnet32_private *lp = dev->priv;
1454
1455 if ((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
1456 &lp->tx_ring_dma_addr)) == NULL) {
1457 if (pcnet32_debug & NETIF_MSG_DRV)
1458 printk(KERN_ERR PFX "Consistent memory allocation failed.\n");
1459 return -ENOMEM;
1460 }
1461
1462 if ((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
1463 &lp->rx_ring_dma_addr)) == NULL) {
1464 if (pcnet32_debug & NETIF_MSG_DRV)
1465 printk(KERN_ERR PFX "Consistent memory allocation failed.\n");
1466 return -ENOMEM;
1467 }
1468
1469 if (!(lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, GFP_ATOMIC))) {
1470 if (pcnet32_debug & NETIF_MSG_DRV)
1471 printk(KERN_ERR PFX "Memory allocation failed.\n");
1472 return -ENOMEM;
1473 }
1474 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
1475
1476 if (!(lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, GFP_ATOMIC))) {
1477 if (pcnet32_debug & NETIF_MSG_DRV)
1478 printk(KERN_ERR PFX "Memory allocation failed.\n");
1479 return -ENOMEM;
1480 }
1481 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
1482
1483 if (!(lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, GFP_ATOMIC))) {
1484 if (pcnet32_debug & NETIF_MSG_DRV)
1485 printk(KERN_ERR PFX "Memory allocation failed.\n");
1486 return -ENOMEM;
1487 }
1488 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
1489
1490 if (!(lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, GFP_ATOMIC))) {
1491 if (pcnet32_debug & NETIF_MSG_DRV)
1492 printk(KERN_ERR PFX "Memory allocation failed.\n");
1493 return -ENOMEM;
1494 }
1495 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
1496
1497 return 0;
1498}
1499
1500
1501static void pcnet32_free_ring(struct net_device *dev)
1502{
1503 struct pcnet32_private *lp = dev->priv;
1504
1505 kfree(lp->tx_skbuff);
1506 lp->tx_skbuff = NULL;
1507
1508 kfree(lp->rx_skbuff);
1509 lp->rx_skbuff = NULL;
1510
1511 kfree(lp->tx_dma_addr);
1512 lp->tx_dma_addr = NULL;
1513
1514 kfree(lp->rx_dma_addr);
1515 lp->rx_dma_addr = NULL;
1516
1517 if (lp->tx_ring) {
1518 pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
1519 lp->tx_ring, lp->tx_ring_dma_addr);
1520 lp->tx_ring = NULL;
1521 }
1522
1523 if (lp->rx_ring) {
1524 pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
1525 lp->rx_ring, lp->rx_ring_dma_addr);
1526 lp->rx_ring = NULL;
1527 }
1528}
1529
1530
1372static int 1531static int
1373pcnet32_open(struct net_device *dev) 1532pcnet32_open(struct net_device *dev)
1374{ 1533{
@@ -1400,8 +1559,8 @@ pcnet32_open(struct net_device *dev)
1400 if (netif_msg_ifup(lp)) 1559 if (netif_msg_ifup(lp))
1401 printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", 1560 printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
1402 dev->name, dev->irq, 1561 dev->name, dev->irq,
1403 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, tx_ring)), 1562 (u32) (lp->tx_ring_dma_addr),
1404 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, rx_ring)), 1563 (u32) (lp->rx_ring_dma_addr),
1405 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block))); 1564 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block)));
1406 1565
1407 /* set/reset autoselect bit */ 1566 /* set/reset autoselect bit */
@@ -1521,7 +1680,7 @@ pcnet32_open(struct net_device *dev)
1521 1680
1522err_free_ring: 1681err_free_ring:
1523 /* free any allocated skbuffs */ 1682 /* free any allocated skbuffs */
1524 for (i = 0; i < RX_RING_SIZE; i++) { 1683 for (i = 0; i < lp->rx_ring_size; i++) {
1525 lp->rx_ring[i].status = 0; 1684 lp->rx_ring[i].status = 0;
1526 if (lp->rx_skbuff[i]) { 1685 if (lp->rx_skbuff[i]) {
1527 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, 1686 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2,
@@ -1531,6 +1690,9 @@ err_free_ring:
1531 lp->rx_skbuff[i] = NULL; 1690 lp->rx_skbuff[i] = NULL;
1532 lp->rx_dma_addr[i] = 0; 1691 lp->rx_dma_addr[i] = 0;
1533 } 1692 }
1693
1694 pcnet32_free_ring(dev);
1695
1534 /* 1696 /*
1535 * Switch back to 16bit mode to avoid problems with dumb 1697 * Switch back to 16bit mode to avoid problems with dumb
1536 * DOS packet driver after a warm reboot 1698 * DOS packet driver after a warm reboot
@@ -1562,7 +1724,7 @@ pcnet32_purge_tx_ring(struct net_device *dev)
1562 struct pcnet32_private *lp = dev->priv; 1724 struct pcnet32_private *lp = dev->priv;
1563 int i; 1725 int i;
1564 1726
1565 for (i = 0; i < TX_RING_SIZE; i++) { 1727 for (i = 0; i < lp->tx_ring_size; i++) {
1566 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 1728 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1567 wmb(); /* Make sure adapter sees owner change */ 1729 wmb(); /* Make sure adapter sees owner change */
1568 if (lp->tx_skbuff[i]) { 1730 if (lp->tx_skbuff[i]) {
@@ -1587,7 +1749,7 @@ pcnet32_init_ring(struct net_device *dev)
1587 lp->cur_rx = lp->cur_tx = 0; 1749 lp->cur_rx = lp->cur_tx = 0;
1588 lp->dirty_rx = lp->dirty_tx = 0; 1750 lp->dirty_rx = lp->dirty_tx = 0;
1589 1751
1590 for (i = 0; i < RX_RING_SIZE; i++) { 1752 for (i = 0; i < lp->rx_ring_size; i++) {
1591 struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; 1753 struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
1592 if (rx_skbuff == NULL) { 1754 if (rx_skbuff == NULL) {
1593 if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) { 1755 if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) {
@@ -1611,20 +1773,18 @@ pcnet32_init_ring(struct net_device *dev)
1611 } 1773 }
1612 /* The Tx buffer address is filled in as needed, but we do need to clear 1774 /* The Tx buffer address is filled in as needed, but we do need to clear
1613 * the upper ownership bit. */ 1775 * the upper ownership bit. */
1614 for (i = 0; i < TX_RING_SIZE; i++) { 1776 for (i = 0; i < lp->tx_ring_size; i++) {
1615 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 1777 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1616 wmb(); /* Make sure adapter sees owner change */ 1778 wmb(); /* Make sure adapter sees owner change */
1617 lp->tx_ring[i].base = 0; 1779 lp->tx_ring[i].base = 0;
1618 lp->tx_dma_addr[i] = 0; 1780 lp->tx_dma_addr[i] = 0;
1619 } 1781 }
1620 1782
1621 lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS); 1783 lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1622 for (i = 0; i < 6; i++) 1784 for (i = 0; i < 6; i++)
1623 lp->init_block.phys_addr[i] = dev->dev_addr[i]; 1785 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1624 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr + 1786 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
1625 offsetof(struct pcnet32_private, rx_ring)); 1787 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
1626 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr +
1627 offsetof(struct pcnet32_private, tx_ring));
1628 wmb(); /* Make sure all changes are visible */ 1788 wmb(); /* Make sure all changes are visible */
1629 return 0; 1789 return 0;
1630} 1790}
@@ -1682,13 +1842,13 @@ pcnet32_tx_timeout (struct net_device *dev)
1682 printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", 1842 printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
1683 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", 1843 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
1684 lp->cur_rx); 1844 lp->cur_rx);
1685 for (i = 0 ; i < RX_RING_SIZE; i++) 1845 for (i = 0 ; i < lp->rx_ring_size; i++)
1686 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", 1846 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
1687 le32_to_cpu(lp->rx_ring[i].base), 1847 le32_to_cpu(lp->rx_ring[i].base),
1688 (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff, 1848 (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff,
1689 le32_to_cpu(lp->rx_ring[i].msg_length), 1849 le32_to_cpu(lp->rx_ring[i].msg_length),
1690 le16_to_cpu(lp->rx_ring[i].status)); 1850 le16_to_cpu(lp->rx_ring[i].status));
1691 for (i = 0 ; i < TX_RING_SIZE; i++) 1851 for (i = 0 ; i < lp->tx_ring_size; i++)
1692 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", 1852 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
1693 le32_to_cpu(lp->tx_ring[i].base), 1853 le32_to_cpu(lp->tx_ring[i].base),
1694 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, 1854 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
@@ -1729,7 +1889,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
1729 /* Fill in a Tx ring entry */ 1889 /* Fill in a Tx ring entry */
1730 1890
1731 /* Mask to ring buffer boundary. */ 1891 /* Mask to ring buffer boundary. */
1732 entry = lp->cur_tx & TX_RING_MOD_MASK; 1892 entry = lp->cur_tx & lp->tx_mod_mask;
1733 1893
1734 /* Caution: the write order is important here, set the status 1894 /* Caution: the write order is important here, set the status
1735 * with the "ownership" bits last. */ 1895 * with the "ownership" bits last. */
@@ -1753,7 +1913,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
1753 1913
1754 dev->trans_start = jiffies; 1914 dev->trans_start = jiffies;
1755 1915
1756 if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base != 0) { 1916 if (lp->tx_ring[(entry+1) & lp->tx_mod_mask].base != 0) {
1757 lp->tx_full = 1; 1917 lp->tx_full = 1;
1758 netif_stop_queue(dev); 1918 netif_stop_queue(dev);
1759 } 1919 }
@@ -1806,7 +1966,7 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1806 int delta; 1966 int delta;
1807 1967
1808 while (dirty_tx != lp->cur_tx) { 1968 while (dirty_tx != lp->cur_tx) {
1809 int entry = dirty_tx & TX_RING_MOD_MASK; 1969 int entry = dirty_tx & lp->tx_mod_mask;
1810 int status = (short)le16_to_cpu(lp->tx_ring[entry].status); 1970 int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
1811 1971
1812 if (status < 0) 1972 if (status < 0)
@@ -1864,18 +2024,18 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1864 dirty_tx++; 2024 dirty_tx++;
1865 } 2025 }
1866 2026
1867 delta = (lp->cur_tx - dirty_tx) & (TX_RING_MOD_MASK + TX_RING_SIZE); 2027 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
1868 if (delta > TX_RING_SIZE) { 2028 if (delta > lp->tx_ring_size) {
1869 if (netif_msg_drv(lp)) 2029 if (netif_msg_drv(lp))
1870 printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", 2030 printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1871 dev->name, dirty_tx, lp->cur_tx, lp->tx_full); 2031 dev->name, dirty_tx, lp->cur_tx, lp->tx_full);
1872 dirty_tx += TX_RING_SIZE; 2032 dirty_tx += lp->tx_ring_size;
1873 delta -= TX_RING_SIZE; 2033 delta -= lp->tx_ring_size;
1874 } 2034 }
1875 2035
1876 if (lp->tx_full && 2036 if (lp->tx_full &&
1877 netif_queue_stopped(dev) && 2037 netif_queue_stopped(dev) &&
1878 delta < TX_RING_SIZE - 2) { 2038 delta < lp->tx_ring_size - 2) {
1879 /* The ring is no longer full, clear tbusy. */ 2039 /* The ring is no longer full, clear tbusy. */
1880 lp->tx_full = 0; 2040 lp->tx_full = 0;
1881 netif_wake_queue (dev); 2041 netif_wake_queue (dev);
@@ -1932,8 +2092,8 @@ static int
1932pcnet32_rx(struct net_device *dev) 2092pcnet32_rx(struct net_device *dev)
1933{ 2093{
1934 struct pcnet32_private *lp = dev->priv; 2094 struct pcnet32_private *lp = dev->priv;
1935 int entry = lp->cur_rx & RX_RING_MOD_MASK; 2095 int entry = lp->cur_rx & lp->rx_mod_mask;
1936 int boguscnt = RX_RING_SIZE / 2; 2096 int boguscnt = lp->rx_ring_size / 2;
1937 2097
1938 /* If we own the next entry, it's a new packet. Send it up. */ 2098 /* If we own the next entry, it's a new packet. Send it up. */
1939 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { 2099 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
@@ -1998,12 +2158,12 @@ pcnet32_rx(struct net_device *dev)
1998 if (netif_msg_drv(lp)) 2158 if (netif_msg_drv(lp))
1999 printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n", 2159 printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n",
2000 dev->name); 2160 dev->name);
2001 for (i = 0; i < RX_RING_SIZE; i++) 2161 for (i = 0; i < lp->rx_ring_size; i++)
2002 if ((short)le16_to_cpu(lp->rx_ring[(entry+i) 2162 if ((short)le16_to_cpu(lp->rx_ring[(entry+i)
2003 & RX_RING_MOD_MASK].status) < 0) 2163 & lp->rx_mod_mask].status) < 0)
2004 break; 2164 break;
2005 2165
2006 if (i > RX_RING_SIZE -2) { 2166 if (i > lp->rx_ring_size -2) {
2007 lp->stats.rx_dropped++; 2167 lp->stats.rx_dropped++;
2008 lp->rx_ring[entry].status |= le16_to_cpu(0x8000); 2168 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
2009 wmb(); /* Make sure adapter sees owner change */ 2169 wmb(); /* Make sure adapter sees owner change */
@@ -2041,7 +2201,7 @@ pcnet32_rx(struct net_device *dev)
2041 lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ); 2201 lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
2042 wmb(); /* Make sure owner changes after all others are visible */ 2202 wmb(); /* Make sure owner changes after all others are visible */
2043 lp->rx_ring[entry].status |= le16_to_cpu(0x8000); 2203 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
2044 entry = (++lp->cur_rx) & RX_RING_MOD_MASK; 2204 entry = (++lp->cur_rx) & lp->rx_mod_mask;
2045 if (--boguscnt <= 0) break; /* don't stay in loop forever */ 2205 if (--boguscnt <= 0) break; /* don't stay in loop forever */
2046 } 2206 }
2047 2207
@@ -2084,7 +2244,7 @@ pcnet32_close(struct net_device *dev)
2084 spin_lock_irqsave(&lp->lock, flags); 2244 spin_lock_irqsave(&lp->lock, flags);
2085 2245
2086 /* free all allocated skbuffs */ 2246 /* free all allocated skbuffs */
2087 for (i = 0; i < RX_RING_SIZE; i++) { 2247 for (i = 0; i < lp->rx_ring_size; i++) {
2088 lp->rx_ring[i].status = 0; 2248 lp->rx_ring[i].status = 0;
2089 wmb(); /* Make sure adapter sees owner change */ 2249 wmb(); /* Make sure adapter sees owner change */
2090 if (lp->rx_skbuff[i]) { 2250 if (lp->rx_skbuff[i]) {
@@ -2096,7 +2256,7 @@ pcnet32_close(struct net_device *dev)
2096 lp->rx_dma_addr[i] = 0; 2256 lp->rx_dma_addr[i] = 0;
2097 } 2257 }
2098 2258
2099 for (i = 0; i < TX_RING_SIZE; i++) { 2259 for (i = 0; i < lp->tx_ring_size; i++) {
2100 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 2260 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2101 wmb(); /* Make sure adapter sees owner change */ 2261 wmb(); /* Make sure adapter sees owner change */
2102 if (lp->tx_skbuff[i]) { 2262 if (lp->tx_skbuff[i]) {
@@ -2265,6 +2425,7 @@ static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
2265 struct pcnet32_private *lp = dev->priv; 2425 struct pcnet32_private *lp = dev->priv;
2266 2426
2267 unregister_netdev(dev); 2427 unregister_netdev(dev);
2428 pcnet32_free_ring(dev);
2268 release_region(dev->base_addr, PCNET32_TOTAL_SIZE); 2429 release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
2269 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); 2430 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2270 free_netdev(dev); 2431 free_netdev(dev);
@@ -2340,6 +2501,7 @@ static void __exit pcnet32_cleanup_module(void)
2340 struct pcnet32_private *lp = pcnet32_dev->priv; 2501 struct pcnet32_private *lp = pcnet32_dev->priv;
2341 next_dev = lp->next; 2502 next_dev = lp->next;
2342 unregister_netdev(pcnet32_dev); 2503 unregister_netdev(pcnet32_dev);
2504 pcnet32_free_ring(pcnet32_dev);
2343 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); 2505 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
2344 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); 2506 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2345 free_netdev(pcnet32_dev); 2507 free_netdev(pcnet32_dev);