aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHubert WS Lin <wslin@tw.ibm.com>2005-09-14 14:39:25 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-16 02:43:23 -0400
commiteabf04151682bc7b57c84fea58cf9e4e5a3cf2a9 (patch)
tree82e41403e8b3f0dbf8f4a47bd90a5afc57142e36
parentf89efd523b25cc1702e074dafdcac283da657002 (diff)
[PATCH] pcnet32: set_ringparam implementation
This patch implements the set_ringparam(), one of the ethtool operations, which allows changing tx/rx ring sizes via ethtool. - Changed memery allocation of tx/rx ring from static to dynamic - Implemented set_ringparam() - Tested on i386 and ppc64 Signed-off-by: Hubert WS Lin <wslin@tw.ibm.com> Signed-off-by: Jay Vosburgh <fubar@us.ibm.com> Cc: Jeff Garzik <jgarzik@pobox.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
-rw-r--r--drivers/net/pcnet32.c263
1 files changed, 209 insertions, 54 deletions
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 6c3731b608d..7350c27ea2d 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,8 +22,8 @@
22 *************************************************************************/ 22 *************************************************************************/
23 23
24#define DRV_NAME "pcnet32" 24#define DRV_NAME "pcnet32"
25#define DRV_VERSION "1.30j" 25#define DRV_VERSION "1.31"
26#define DRV_RELDATE "29.04.2005" 26#define DRV_RELDATE "02.Sep.2005"
27#define PFX DRV_NAME ": " 27#define PFX DRV_NAME ": "
28 28
29static const char *version = 29static const char *version =
@@ -257,6 +257,7 @@ static int homepna[MAX_UNITS];
257 * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32. 257 * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32.
258 * v1.30i 28 Jun 2004 Don Fry change to use module_param. 258 * v1.30i 28 Jun 2004 Don Fry change to use module_param.
259 * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test. 259 * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test.
260 * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam().
260 */ 261 */
261 262
262 263
@@ -266,17 +267,17 @@ static int homepna[MAX_UNITS];
266 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). 267 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
267 */ 268 */
268#ifndef PCNET32_LOG_TX_BUFFERS 269#ifndef PCNET32_LOG_TX_BUFFERS
269#define PCNET32_LOG_TX_BUFFERS 4 270#define PCNET32_LOG_TX_BUFFERS 4
270#define PCNET32_LOG_RX_BUFFERS 5 271#define PCNET32_LOG_RX_BUFFERS 5
272#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
273#define PCNET32_LOG_MAX_RX_BUFFERS 9
271#endif 274#endif
272 275
273#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS)) 276#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
274#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) 277#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
275#define TX_RING_LEN_BITS ((PCNET32_LOG_TX_BUFFERS) << 12)
276 278
277#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) 279#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
278#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) 280#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
279#define RX_RING_LEN_BITS ((PCNET32_LOG_RX_BUFFERS) << 4)
280 281
281#define PKT_BUF_SZ 1544 282#define PKT_BUF_SZ 1544
282 283
@@ -339,8 +340,8 @@ struct pcnet32_access {
339 */ 340 */
340struct pcnet32_private { 341struct pcnet32_private {
341 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ 342 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
342 struct pcnet32_rx_head rx_ring[RX_RING_SIZE]; 343 struct pcnet32_rx_head *rx_ring;
343 struct pcnet32_tx_head tx_ring[TX_RING_SIZE]; 344 struct pcnet32_tx_head *tx_ring;
344 struct pcnet32_init_block init_block; 345 struct pcnet32_init_block init_block;
345 dma_addr_t dma_addr; /* DMA address of beginning of this 346 dma_addr_t dma_addr; /* DMA address of beginning of this
346 object, returned by 347 object, returned by
@@ -349,13 +350,21 @@ struct pcnet32_private {
349 structure */ 350 structure */
350 const char *name; 351 const char *name;
351 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 352 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
352 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 353 struct sk_buff **tx_skbuff;
353 struct sk_buff *rx_skbuff[RX_RING_SIZE]; 354 struct sk_buff **rx_skbuff;
354 dma_addr_t tx_dma_addr[TX_RING_SIZE]; 355 dma_addr_t *tx_dma_addr;
355 dma_addr_t rx_dma_addr[RX_RING_SIZE]; 356 dma_addr_t *rx_dma_addr;
356 struct pcnet32_access a; 357 struct pcnet32_access a;
357 spinlock_t lock; /* Guard lock */ 358 spinlock_t lock; /* Guard lock */
358 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 359 unsigned int cur_rx, cur_tx; /* The next free ring entry */
360 unsigned int rx_ring_size; /* current rx ring size */
361 unsigned int tx_ring_size; /* current tx ring size */
362 unsigned int rx_mod_mask; /* rx ring modular mask */
363 unsigned int tx_mod_mask; /* tx ring modular mask */
364 unsigned short rx_len_bits;
365 unsigned short tx_len_bits;
366 dma_addr_t rx_ring_dma_addr;
367 dma_addr_t tx_ring_dma_addr;
359 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ 368 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
360 struct net_device_stats stats; 369 struct net_device_stats stats;
361 char tx_full; 370 char tx_full;
@@ -397,6 +406,9 @@ static int pcnet32_get_regs_len(struct net_device *dev);
397static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 406static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
398 void *ptr); 407 void *ptr);
399static void pcnet32_purge_tx_ring(struct net_device *dev); 408static void pcnet32_purge_tx_ring(struct net_device *dev);
409static int pcnet32_alloc_ring(struct net_device *dev);
410static void pcnet32_free_ring(struct net_device *dev);
411
400 412
401enum pci_flags_bit { 413enum pci_flags_bit {
402 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, 414 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
@@ -613,10 +625,59 @@ static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringpar
613{ 625{
614 struct pcnet32_private *lp = dev->priv; 626 struct pcnet32_private *lp = dev->priv;
615 627
616 ering->tx_max_pending = TX_RING_SIZE - 1; 628 ering->tx_max_pending = TX_MAX_RING_SIZE - 1;
617 ering->tx_pending = lp->cur_tx - lp->dirty_tx; 629 ering->tx_pending = lp->tx_ring_size - 1;
618 ering->rx_max_pending = RX_RING_SIZE - 1; 630 ering->rx_max_pending = RX_MAX_RING_SIZE - 1;
619 ering->rx_pending = lp->cur_rx & RX_RING_MOD_MASK; 631 ering->rx_pending = lp->rx_ring_size - 1;
632}
633
634static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
635{
636 struct pcnet32_private *lp = dev->priv;
637 unsigned long flags;
638 int i;
639
640 if (ering->rx_mini_pending || ering->rx_jumbo_pending)
641 return -EINVAL;
642
643 if (netif_running(dev))
644 pcnet32_close(dev);
645
646 spin_lock_irqsave(&lp->lock, flags);
647 pcnet32_free_ring(dev);
648 lp->tx_ring_size = min(ering->tx_pending, (unsigned int) TX_MAX_RING_SIZE);
649 lp->rx_ring_size = min(ering->rx_pending, (unsigned int) RX_MAX_RING_SIZE);
650
651 for (i = 0; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
652 if (lp->tx_ring_size <= (1 << i))
653 break;
654 }
655 lp->tx_ring_size = (1 << i);
656 lp->tx_mod_mask = lp->tx_ring_size - 1;
657 lp->tx_len_bits = (i << 12);
658
659 for (i = 0; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
660 if (lp->rx_ring_size <= (1 << i))
661 break;
662 }
663 lp->rx_ring_size = (1 << i);
664 lp->rx_mod_mask = lp->rx_ring_size - 1;
665 lp->rx_len_bits = (i << 4);
666
667 if (pcnet32_alloc_ring(dev)) {
668 pcnet32_free_ring(dev);
669 return -ENOMEM;
670 }
671
672 spin_unlock_irqrestore(&lp->lock, flags);
673
674 if (pcnet32_debug & NETIF_MSG_DRV)
675 printk(KERN_INFO PFX "Ring Param Settings: RX: %d, TX: %d\n", lp->rx_ring_size, lp->tx_ring_size);
676
677 if (netif_running(dev))
678 pcnet32_open(dev);
679
680 return 0;
620} 681}
621 682
622static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data) 683static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -948,6 +1009,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = {
948 .nway_reset = pcnet32_nway_reset, 1009 .nway_reset = pcnet32_nway_reset,
949 .get_link = pcnet32_get_link, 1010 .get_link = pcnet32_get_link,
950 .get_ringparam = pcnet32_get_ringparam, 1011 .get_ringparam = pcnet32_get_ringparam,
1012 .set_ringparam = pcnet32_set_ringparam,
951 .get_tx_csum = ethtool_op_get_tx_csum, 1013 .get_tx_csum = ethtool_op_get_tx_csum,
952 .get_sg = ethtool_op_get_sg, 1014 .get_sg = ethtool_op_get_sg,
953 .get_tso = ethtool_op_get_tso, 1015 .get_tso = ethtool_op_get_tso,
@@ -1241,6 +1303,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1241 dev->priv = lp; 1303 dev->priv = lp;
1242 lp->name = chipname; 1304 lp->name = chipname;
1243 lp->shared_irq = shared; 1305 lp->shared_irq = shared;
1306 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
1307 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
1308 lp->tx_mod_mask = lp->tx_ring_size - 1;
1309 lp->rx_mod_mask = lp->rx_ring_size - 1;
1310 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
1311 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
1244 lp->mii_if.full_duplex = fdx; 1312 lp->mii_if.full_duplex = fdx;
1245 lp->mii_if.phy_id_mask = 0x1f; 1313 lp->mii_if.phy_id_mask = 0x1f;
1246 lp->mii_if.reg_num_mask = 0x1f; 1314 lp->mii_if.reg_num_mask = 0x1f;
@@ -1267,21 +1335,23 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1267 } 1335 }
1268 lp->a = *a; 1336 lp->a = *a;
1269 1337
1338 if (pcnet32_alloc_ring(dev)) {
1339 ret = -ENOMEM;
1340 goto err_free_ring;
1341 }
1270 /* detect special T1/E1 WAN card by checking for MAC address */ 1342 /* detect special T1/E1 WAN card by checking for MAC address */
1271 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 1343 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
1272 && dev->dev_addr[2] == 0x75) 1344 && dev->dev_addr[2] == 0x75)
1273 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; 1345 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
1274 1346
1275 lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ 1347 lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
1276 lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS); 1348 lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1277 for (i = 0; i < 6; i++) 1349 for (i = 0; i < 6; i++)
1278 lp->init_block.phys_addr[i] = dev->dev_addr[i]; 1350 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1279 lp->init_block.filter[0] = 0x00000000; 1351 lp->init_block.filter[0] = 0x00000000;
1280 lp->init_block.filter[1] = 0x00000000; 1352 lp->init_block.filter[1] = 0x00000000;
1281 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr + 1353 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
1282 offsetof(struct pcnet32_private, rx_ring)); 1354 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
1283 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr +
1284 offsetof(struct pcnet32_private, tx_ring));
1285 1355
1286 /* switch pcnet32 to 32bit mode */ 1356 /* switch pcnet32 to 32bit mode */
1287 a->write_bcr(ioaddr, 20, 2); 1357 a->write_bcr(ioaddr, 20, 2);
@@ -1312,7 +1382,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1312 if (pcnet32_debug & NETIF_MSG_PROBE) 1382 if (pcnet32_debug & NETIF_MSG_PROBE)
1313 printk(", failed to detect IRQ line.\n"); 1383 printk(", failed to detect IRQ line.\n");
1314 ret = -ENODEV; 1384 ret = -ENODEV;
1315 goto err_free_consistent; 1385 goto err_free_ring;
1316 } 1386 }
1317 if (pcnet32_debug & NETIF_MSG_PROBE) 1387 if (pcnet32_debug & NETIF_MSG_PROBE)
1318 printk(", probed IRQ %d.\n", dev->irq); 1388 printk(", probed IRQ %d.\n", dev->irq);
@@ -1343,7 +1413,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1343 1413
1344 /* Fill in the generic fields of the device structure. */ 1414 /* Fill in the generic fields of the device structure. */
1345 if (register_netdev(dev)) 1415 if (register_netdev(dev))
1346 goto err_free_consistent; 1416 goto err_free_ring;
1347 1417
1348 if (pdev) { 1418 if (pdev) {
1349 pci_set_drvdata(pdev, dev); 1419 pci_set_drvdata(pdev, dev);
@@ -1361,6 +1431,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1361 1431
1362 return 0; 1432 return 0;
1363 1433
1434err_free_ring:
1435 pcnet32_free_ring(dev);
1364err_free_consistent: 1436err_free_consistent:
1365 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); 1437 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
1366err_free_netdev: 1438err_free_netdev:
@@ -1371,6 +1443,86 @@ err_release_region:
1371} 1443}
1372 1444
1373 1445
1446static int pcnet32_alloc_ring(struct net_device *dev)
1447{
1448 struct pcnet32_private *lp = dev->priv;
1449
1450 if ((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
1451 &lp->tx_ring_dma_addr)) == NULL) {
1452 if (pcnet32_debug & NETIF_MSG_DRV)
1453 printk(KERN_ERR PFX "Consistent memory allocation failed.\n");
1454 return -ENOMEM;
1455 }
1456
1457 if ((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
1458 &lp->rx_ring_dma_addr)) == NULL) {
1459 if (pcnet32_debug & NETIF_MSG_DRV)
1460 printk(KERN_ERR PFX "Consistent memory allocation failed.\n");
1461 return -ENOMEM;
1462 }
1463
1464 if (!(lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, GFP_ATOMIC))) {
1465 if (pcnet32_debug & NETIF_MSG_DRV)
1466 printk(KERN_ERR PFX "Memory allocation failed.\n");
1467 return -ENOMEM;
1468 }
1469 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
1470
1471 if (!(lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, GFP_ATOMIC))) {
1472 if (pcnet32_debug & NETIF_MSG_DRV)
1473 printk(KERN_ERR PFX "Memory allocation failed.\n");
1474 return -ENOMEM;
1475 }
1476 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
1477
1478 if (!(lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, GFP_ATOMIC))) {
1479 if (pcnet32_debug & NETIF_MSG_DRV)
1480 printk(KERN_ERR PFX "Memory allocation failed.\n");
1481 return -ENOMEM;
1482 }
1483 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
1484
1485 if (!(lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, GFP_ATOMIC))) {
1486 if (pcnet32_debug & NETIF_MSG_DRV)
1487 printk(KERN_ERR PFX "Memory allocation failed.\n");
1488 return -ENOMEM;
1489 }
1490 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
1491
1492 return 0;
1493}
1494
1495
1496static void pcnet32_free_ring(struct net_device *dev)
1497{
1498 struct pcnet32_private *lp = dev->priv;
1499
1500 kfree(lp->tx_skbuff);
1501 lp->tx_skbuff = NULL;
1502
1503 kfree(lp->rx_skbuff);
1504 lp->rx_skbuff = NULL;
1505
1506 kfree(lp->tx_dma_addr);
1507 lp->tx_dma_addr = NULL;
1508
1509 kfree(lp->rx_dma_addr);
1510 lp->rx_dma_addr = NULL;
1511
1512 if (lp->tx_ring) {
1513 pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
1514 lp->tx_ring, lp->tx_ring_dma_addr);
1515 lp->tx_ring = NULL;
1516 }
1517
1518 if (lp->rx_ring) {
1519 pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
1520 lp->rx_ring, lp->rx_ring_dma_addr);
1521 lp->rx_ring = NULL;
1522 }
1523}
1524
1525
1374static int 1526static int
1375pcnet32_open(struct net_device *dev) 1527pcnet32_open(struct net_device *dev)
1376{ 1528{
@@ -1402,8 +1554,8 @@ pcnet32_open(struct net_device *dev)
1402 if (netif_msg_ifup(lp)) 1554 if (netif_msg_ifup(lp))
1403 printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", 1555 printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
1404 dev->name, dev->irq, 1556 dev->name, dev->irq,
1405 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, tx_ring)), 1557 (u32) (lp->tx_ring_dma_addr),
1406 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, rx_ring)), 1558 (u32) (lp->rx_ring_dma_addr),
1407 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block))); 1559 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block)));
1408 1560
1409 /* set/reset autoselect bit */ 1561 /* set/reset autoselect bit */
@@ -1523,7 +1675,7 @@ pcnet32_open(struct net_device *dev)
1523 1675
1524err_free_ring: 1676err_free_ring:
1525 /* free any allocated skbuffs */ 1677 /* free any allocated skbuffs */
1526 for (i = 0; i < RX_RING_SIZE; i++) { 1678 for (i = 0; i < lp->rx_ring_size; i++) {
1527 lp->rx_ring[i].status = 0; 1679 lp->rx_ring[i].status = 0;
1528 if (lp->rx_skbuff[i]) { 1680 if (lp->rx_skbuff[i]) {
1529 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, 1681 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2,
@@ -1533,6 +1685,9 @@ err_free_ring:
1533 lp->rx_skbuff[i] = NULL; 1685 lp->rx_skbuff[i] = NULL;
1534 lp->rx_dma_addr[i] = 0; 1686 lp->rx_dma_addr[i] = 0;
1535 } 1687 }
1688
1689 pcnet32_free_ring(dev);
1690
1536 /* 1691 /*
1537 * Switch back to 16bit mode to avoid problems with dumb 1692 * Switch back to 16bit mode to avoid problems with dumb
1538 * DOS packet driver after a warm reboot 1693 * DOS packet driver after a warm reboot
@@ -1564,7 +1719,7 @@ pcnet32_purge_tx_ring(struct net_device *dev)
1564 struct pcnet32_private *lp = dev->priv; 1719 struct pcnet32_private *lp = dev->priv;
1565 int i; 1720 int i;
1566 1721
1567 for (i = 0; i < TX_RING_SIZE; i++) { 1722 for (i = 0; i < lp->tx_ring_size; i++) {
1568 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 1723 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1569 wmb(); /* Make sure adapter sees owner change */ 1724 wmb(); /* Make sure adapter sees owner change */
1570 if (lp->tx_skbuff[i]) { 1725 if (lp->tx_skbuff[i]) {
@@ -1589,7 +1744,7 @@ pcnet32_init_ring(struct net_device *dev)
1589 lp->cur_rx = lp->cur_tx = 0; 1744 lp->cur_rx = lp->cur_tx = 0;
1590 lp->dirty_rx = lp->dirty_tx = 0; 1745 lp->dirty_rx = lp->dirty_tx = 0;
1591 1746
1592 for (i = 0; i < RX_RING_SIZE; i++) { 1747 for (i = 0; i < lp->rx_ring_size; i++) {
1593 struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; 1748 struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
1594 if (rx_skbuff == NULL) { 1749 if (rx_skbuff == NULL) {
1595 if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) { 1750 if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) {
@@ -1613,20 +1768,18 @@ pcnet32_init_ring(struct net_device *dev)
1613 } 1768 }
1614 /* The Tx buffer address is filled in as needed, but we do need to clear 1769 /* The Tx buffer address is filled in as needed, but we do need to clear
1615 * the upper ownership bit. */ 1770 * the upper ownership bit. */
1616 for (i = 0; i < TX_RING_SIZE; i++) { 1771 for (i = 0; i < lp->tx_ring_size; i++) {
1617 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 1772 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1618 wmb(); /* Make sure adapter sees owner change */ 1773 wmb(); /* Make sure adapter sees owner change */
1619 lp->tx_ring[i].base = 0; 1774 lp->tx_ring[i].base = 0;
1620 lp->tx_dma_addr[i] = 0; 1775 lp->tx_dma_addr[i] = 0;
1621 } 1776 }
1622 1777
1623 lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS); 1778 lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1624 for (i = 0; i < 6; i++) 1779 for (i = 0; i < 6; i++)
1625 lp->init_block.phys_addr[i] = dev->dev_addr[i]; 1780 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1626 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr + 1781 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
1627 offsetof(struct pcnet32_private, rx_ring)); 1782 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
1628 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr +
1629 offsetof(struct pcnet32_private, tx_ring));
1630 wmb(); /* Make sure all changes are visible */ 1783 wmb(); /* Make sure all changes are visible */
1631 return 0; 1784 return 0;
1632} 1785}
@@ -1684,13 +1837,13 @@ pcnet32_tx_timeout (struct net_device *dev)
1684 printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", 1837 printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
1685 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", 1838 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
1686 lp->cur_rx); 1839 lp->cur_rx);
1687 for (i = 0 ; i < RX_RING_SIZE; i++) 1840 for (i = 0 ; i < lp->rx_ring_size; i++)
1688 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", 1841 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
1689 le32_to_cpu(lp->rx_ring[i].base), 1842 le32_to_cpu(lp->rx_ring[i].base),
1690 (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff, 1843 (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff,
1691 le32_to_cpu(lp->rx_ring[i].msg_length), 1844 le32_to_cpu(lp->rx_ring[i].msg_length),
1692 le16_to_cpu(lp->rx_ring[i].status)); 1845 le16_to_cpu(lp->rx_ring[i].status));
1693 for (i = 0 ; i < TX_RING_SIZE; i++) 1846 for (i = 0 ; i < lp->tx_ring_size; i++)
1694 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", 1847 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
1695 le32_to_cpu(lp->tx_ring[i].base), 1848 le32_to_cpu(lp->tx_ring[i].base),
1696 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, 1849 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
@@ -1731,7 +1884,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
1731 /* Fill in a Tx ring entry */ 1884 /* Fill in a Tx ring entry */
1732 1885
1733 /* Mask to ring buffer boundary. */ 1886 /* Mask to ring buffer boundary. */
1734 entry = lp->cur_tx & TX_RING_MOD_MASK; 1887 entry = lp->cur_tx & lp->tx_mod_mask;
1735 1888
1736 /* Caution: the write order is important here, set the status 1889 /* Caution: the write order is important here, set the status
1737 * with the "ownership" bits last. */ 1890 * with the "ownership" bits last. */
@@ -1755,7 +1908,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
1755 1908
1756 dev->trans_start = jiffies; 1909 dev->trans_start = jiffies;
1757 1910
1758 if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base != 0) { 1911 if (lp->tx_ring[(entry+1) & lp->tx_mod_mask].base != 0) {
1759 lp->tx_full = 1; 1912 lp->tx_full = 1;
1760 netif_stop_queue(dev); 1913 netif_stop_queue(dev);
1761 } 1914 }
@@ -1808,7 +1961,7 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1808 int delta; 1961 int delta;
1809 1962
1810 while (dirty_tx != lp->cur_tx) { 1963 while (dirty_tx != lp->cur_tx) {
1811 int entry = dirty_tx & TX_RING_MOD_MASK; 1964 int entry = dirty_tx & lp->tx_mod_mask;
1812 int status = (short)le16_to_cpu(lp->tx_ring[entry].status); 1965 int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
1813 1966
1814 if (status < 0) 1967 if (status < 0)
@@ -1866,18 +2019,18 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1866 dirty_tx++; 2019 dirty_tx++;
1867 } 2020 }
1868 2021
1869 delta = (lp->cur_tx - dirty_tx) & (TX_RING_MOD_MASK + TX_RING_SIZE); 2022 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
1870 if (delta > TX_RING_SIZE) { 2023 if (delta > lp->tx_ring_size) {
1871 if (netif_msg_drv(lp)) 2024 if (netif_msg_drv(lp))
1872 printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", 2025 printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1873 dev->name, dirty_tx, lp->cur_tx, lp->tx_full); 2026 dev->name, dirty_tx, lp->cur_tx, lp->tx_full);
1874 dirty_tx += TX_RING_SIZE; 2027 dirty_tx += lp->tx_ring_size;
1875 delta -= TX_RING_SIZE; 2028 delta -= lp->tx_ring_size;
1876 } 2029 }
1877 2030
1878 if (lp->tx_full && 2031 if (lp->tx_full &&
1879 netif_queue_stopped(dev) && 2032 netif_queue_stopped(dev) &&
1880 delta < TX_RING_SIZE - 2) { 2033 delta < lp->tx_ring_size - 2) {
1881 /* The ring is no longer full, clear tbusy. */ 2034 /* The ring is no longer full, clear tbusy. */
1882 lp->tx_full = 0; 2035 lp->tx_full = 0;
1883 netif_wake_queue (dev); 2036 netif_wake_queue (dev);
@@ -1934,8 +2087,8 @@ static int
1934pcnet32_rx(struct net_device *dev) 2087pcnet32_rx(struct net_device *dev)
1935{ 2088{
1936 struct pcnet32_private *lp = dev->priv; 2089 struct pcnet32_private *lp = dev->priv;
1937 int entry = lp->cur_rx & RX_RING_MOD_MASK; 2090 int entry = lp->cur_rx & lp->rx_mod_mask;
1938 int boguscnt = RX_RING_SIZE / 2; 2091 int boguscnt = lp->rx_ring_size / 2;
1939 2092
1940 /* If we own the next entry, it's a new packet. Send it up. */ 2093 /* If we own the next entry, it's a new packet. Send it up. */
1941 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { 2094 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
@@ -2000,12 +2153,12 @@ pcnet32_rx(struct net_device *dev)
2000 if (netif_msg_drv(lp)) 2153 if (netif_msg_drv(lp))
2001 printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n", 2154 printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n",
2002 dev->name); 2155 dev->name);
2003 for (i = 0; i < RX_RING_SIZE; i++) 2156 for (i = 0; i < lp->rx_ring_size; i++)
2004 if ((short)le16_to_cpu(lp->rx_ring[(entry+i) 2157 if ((short)le16_to_cpu(lp->rx_ring[(entry+i)
2005 & RX_RING_MOD_MASK].status) < 0) 2158 & lp->rx_mod_mask].status) < 0)
2006 break; 2159 break;
2007 2160
2008 if (i > RX_RING_SIZE -2) { 2161 if (i > lp->rx_ring_size -2) {
2009 lp->stats.rx_dropped++; 2162 lp->stats.rx_dropped++;
2010 lp->rx_ring[entry].status |= le16_to_cpu(0x8000); 2163 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
2011 wmb(); /* Make sure adapter sees owner change */ 2164 wmb(); /* Make sure adapter sees owner change */
@@ -2043,7 +2196,7 @@ pcnet32_rx(struct net_device *dev)
2043 lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ); 2196 lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
2044 wmb(); /* Make sure owner changes after all others are visible */ 2197 wmb(); /* Make sure owner changes after all others are visible */
2045 lp->rx_ring[entry].status |= le16_to_cpu(0x8000); 2198 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
2046 entry = (++lp->cur_rx) & RX_RING_MOD_MASK; 2199 entry = (++lp->cur_rx) & lp->rx_mod_mask;
2047 if (--boguscnt <= 0) break; /* don't stay in loop forever */ 2200 if (--boguscnt <= 0) break; /* don't stay in loop forever */
2048 } 2201 }
2049 2202
@@ -2086,7 +2239,7 @@ pcnet32_close(struct net_device *dev)
2086 spin_lock_irqsave(&lp->lock, flags); 2239 spin_lock_irqsave(&lp->lock, flags);
2087 2240
2088 /* free all allocated skbuffs */ 2241 /* free all allocated skbuffs */
2089 for (i = 0; i < RX_RING_SIZE; i++) { 2242 for (i = 0; i < lp->rx_ring_size; i++) {
2090 lp->rx_ring[i].status = 0; 2243 lp->rx_ring[i].status = 0;
2091 wmb(); /* Make sure adapter sees owner change */ 2244 wmb(); /* Make sure adapter sees owner change */
2092 if (lp->rx_skbuff[i]) { 2245 if (lp->rx_skbuff[i]) {
@@ -2098,7 +2251,7 @@ pcnet32_close(struct net_device *dev)
2098 lp->rx_dma_addr[i] = 0; 2251 lp->rx_dma_addr[i] = 0;
2099 } 2252 }
2100 2253
2101 for (i = 0; i < TX_RING_SIZE; i++) { 2254 for (i = 0; i < lp->tx_ring_size; i++) {
2102 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 2255 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2103 wmb(); /* Make sure adapter sees owner change */ 2256 wmb(); /* Make sure adapter sees owner change */
2104 if (lp->tx_skbuff[i]) { 2257 if (lp->tx_skbuff[i]) {
@@ -2267,6 +2420,7 @@ static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
2267 struct pcnet32_private *lp = dev->priv; 2420 struct pcnet32_private *lp = dev->priv;
2268 2421
2269 unregister_netdev(dev); 2422 unregister_netdev(dev);
2423 pcnet32_free_ring(dev);
2270 release_region(dev->base_addr, PCNET32_TOTAL_SIZE); 2424 release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
2271 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); 2425 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2272 free_netdev(dev); 2426 free_netdev(dev);
@@ -2342,6 +2496,7 @@ static void __exit pcnet32_cleanup_module(void)
2342 struct pcnet32_private *lp = pcnet32_dev->priv; 2496 struct pcnet32_private *lp = pcnet32_dev->priv;
2343 next_dev = lp->next; 2497 next_dev = lp->next;
2344 unregister_netdev(pcnet32_dev); 2498 unregister_netdev(pcnet32_dev);
2499 pcnet32_free_ring(pcnet32_dev);
2345 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); 2500 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
2346 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); 2501 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2347 free_netdev(pcnet32_dev); 2502 free_netdev(pcnet32_dev);