aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c231
1 files changed, 138 insertions, 93 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 22aec6ed80f5..525624fc03b4 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -80,7 +80,7 @@
80 * into nv_close, otherwise reenabling for wol can 80 * into nv_close, otherwise reenabling for wol can
81 * cause DMA to kfree'd memory. 81 * cause DMA to kfree'd memory.
82 * 0.31: 14 Nov 2004: ethtool support for getting/setting link 82 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
83 * capabilities. 83 * capabilities.
84 * 0.32: 16 Apr 2005: RX_ERROR4 handling added. 84 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
85 * 0.33: 16 May 2005: Support for MCP51 added. 85 * 0.33: 16 May 2005: Support for MCP51 added.
86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. 86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
@@ -89,14 +89,17 @@
89 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list 89 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
90 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of 90 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
91 * per-packet flags. 91 * per-packet flags.
92 * 0.39: 18 Jul 2005: Add 64bit descriptor support. 92 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
93 * 0.40: 19 Jul 2005: Add support for mac address change. 93 * 0.40: 19 Jul 2005: Add support for mac address change.
94 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead 94 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
95 * of nv_remove 95 * of nv_remove
96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization 96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
97 * in the second (and later) nv_open call 97 * in the second (and later) nv_open call
98 * 0.43: 10 Aug 2005: Add support for tx checksum. 98 * 0.43: 10 Aug 2005: Add support for tx checksum.
99 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. 99 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
100 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
101 * 0.46: 20 Oct 2005: Add irq optimization modes.
102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
100 * 103 *
101 * Known bugs: 104 * Known bugs:
102 * We suspect that on some hardware no TX done interrupts are generated. 105 * We suspect that on some hardware no TX done interrupts are generated.
@@ -108,7 +111,7 @@
108 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 111 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
109 * superfluous timer interrupts from the nic. 112 * superfluous timer interrupts from the nic.
110 */ 113 */
111#define FORCEDETH_VERSION "0.44" 114#define FORCEDETH_VERSION "0.47"
112#define DRV_NAME "forcedeth" 115#define DRV_NAME "forcedeth"
113 116
114#include <linux/module.h> 117#include <linux/module.h>
@@ -163,7 +166,8 @@ enum {
163#define NVREG_IRQ_LINK 0x0040 166#define NVREG_IRQ_LINK 0x0040
164#define NVREG_IRQ_TX_ERROR 0x0080 167#define NVREG_IRQ_TX_ERROR 0x0080
165#define NVREG_IRQ_TX1 0x0100 168#define NVREG_IRQ_TX1 0x0100
166#define NVREG_IRQMASK_WANTED 0x00df 169#define NVREG_IRQMASK_THROUGHPUT 0x00df
170#define NVREG_IRQMASK_CPU 0x0040
167 171
168#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ 172#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
169 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \ 173 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \
@@ -177,7 +181,8 @@ enum {
177 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 181 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
178 */ 182 */
179 NvRegPollingInterval = 0x00c, 183 NvRegPollingInterval = 0x00c,
180#define NVREG_POLL_DEFAULT 970 184#define NVREG_POLL_DEFAULT_THROUGHPUT 970
185#define NVREG_POLL_DEFAULT_CPU 13
181 NvRegMisc1 = 0x080, 186 NvRegMisc1 = 0x080,
182#define NVREG_MISC1_HD 0x02 187#define NVREG_MISC1_HD 0x02
183#define NVREG_MISC1_FORCE 0x3b0f3c 188#define NVREG_MISC1_FORCE 0x3b0f3c
@@ -538,6 +543,25 @@ struct fe_priv {
538 */ 543 */
539static int max_interrupt_work = 5; 544static int max_interrupt_work = 5;
540 545
546/*
547 * Optimization can be either throuput mode or cpu mode
548 *
549 * Throughput Mode: Every tx and rx packet will generate an interrupt.
550 * CPU Mode: Interrupts are controlled by a timer.
551 */
552#define NV_OPTIMIZATION_MODE_THROUGHPUT 0
553#define NV_OPTIMIZATION_MODE_CPU 1
554static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
555
556/*
557 * Poll interval for timer irq
558 *
559 * This interval determines how frequent an interrupt is generated.
560 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
561 * Min = 0, and Max = 65535
562 */
563static int poll_interval = -1;
564
541static inline struct fe_priv *get_nvpriv(struct net_device *dev) 565static inline struct fe_priv *get_nvpriv(struct net_device *dev)
542{ 566{
543 return netdev_priv(dev); 567 return netdev_priv(dev);
@@ -1328,67 +1352,71 @@ static void nv_rx_process(struct net_device *dev)
1328 if (!(Flags & NV_RX_DESCRIPTORVALID)) 1352 if (!(Flags & NV_RX_DESCRIPTORVALID))
1329 goto next_pkt; 1353 goto next_pkt;
1330 1354
1331 if (Flags & NV_RX_MISSEDFRAME) { 1355 if (Flags & NV_RX_ERROR) {
1332 np->stats.rx_missed_errors++; 1356 if (Flags & NV_RX_MISSEDFRAME) {
1333 np->stats.rx_errors++; 1357 np->stats.rx_missed_errors++;
1334 goto next_pkt;
1335 }
1336 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1337 np->stats.rx_errors++;
1338 goto next_pkt;
1339 }
1340 if (Flags & NV_RX_CRCERR) {
1341 np->stats.rx_crc_errors++;
1342 np->stats.rx_errors++;
1343 goto next_pkt;
1344 }
1345 if (Flags & NV_RX_OVERFLOW) {
1346 np->stats.rx_over_errors++;
1347 np->stats.rx_errors++;
1348 goto next_pkt;
1349 }
1350 if (Flags & NV_RX_ERROR4) {
1351 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1352 if (len < 0) {
1353 np->stats.rx_errors++; 1358 np->stats.rx_errors++;
1354 goto next_pkt; 1359 goto next_pkt;
1355 } 1360 }
1356 } 1361 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1357 /* framing errors are soft errors. */ 1362 np->stats.rx_errors++;
1358 if (Flags & NV_RX_FRAMINGERR) { 1363 goto next_pkt;
1359 if (Flags & NV_RX_SUBSTRACT1) { 1364 }
1360 len--; 1365 if (Flags & NV_RX_CRCERR) {
1366 np->stats.rx_crc_errors++;
1367 np->stats.rx_errors++;
1368 goto next_pkt;
1369 }
1370 if (Flags & NV_RX_OVERFLOW) {
1371 np->stats.rx_over_errors++;
1372 np->stats.rx_errors++;
1373 goto next_pkt;
1374 }
1375 if (Flags & NV_RX_ERROR4) {
1376 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1377 if (len < 0) {
1378 np->stats.rx_errors++;
1379 goto next_pkt;
1380 }
1381 }
1382 /* framing errors are soft errors. */
1383 if (Flags & NV_RX_FRAMINGERR) {
1384 if (Flags & NV_RX_SUBSTRACT1) {
1385 len--;
1386 }
1361 } 1387 }
1362 } 1388 }
1363 } else { 1389 } else {
1364 if (!(Flags & NV_RX2_DESCRIPTORVALID)) 1390 if (!(Flags & NV_RX2_DESCRIPTORVALID))
1365 goto next_pkt; 1391 goto next_pkt;
1366 1392
1367 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 1393 if (Flags & NV_RX2_ERROR) {
1368 np->stats.rx_errors++; 1394 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
1369 goto next_pkt;
1370 }
1371 if (Flags & NV_RX2_CRCERR) {
1372 np->stats.rx_crc_errors++;
1373 np->stats.rx_errors++;
1374 goto next_pkt;
1375 }
1376 if (Flags & NV_RX2_OVERFLOW) {
1377 np->stats.rx_over_errors++;
1378 np->stats.rx_errors++;
1379 goto next_pkt;
1380 }
1381 if (Flags & NV_RX2_ERROR4) {
1382 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1383 if (len < 0) {
1384 np->stats.rx_errors++; 1395 np->stats.rx_errors++;
1385 goto next_pkt; 1396 goto next_pkt;
1386 } 1397 }
1387 } 1398 if (Flags & NV_RX2_CRCERR) {
1388 /* framing errors are soft errors */ 1399 np->stats.rx_crc_errors++;
1389 if (Flags & NV_RX2_FRAMINGERR) { 1400 np->stats.rx_errors++;
1390 if (Flags & NV_RX2_SUBSTRACT1) { 1401 goto next_pkt;
1391 len--; 1402 }
1403 if (Flags & NV_RX2_OVERFLOW) {
1404 np->stats.rx_over_errors++;
1405 np->stats.rx_errors++;
1406 goto next_pkt;
1407 }
1408 if (Flags & NV_RX2_ERROR4) {
1409 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1410 if (len < 0) {
1411 np->stats.rx_errors++;
1412 goto next_pkt;
1413 }
1414 }
1415 /* framing errors are soft errors */
1416 if (Flags & NV_RX2_FRAMINGERR) {
1417 if (Flags & NV_RX2_SUBSTRACT1) {
1418 len--;
1419 }
1392 } 1420 }
1393 } 1421 }
1394 Flags &= NV_RX2_CHECKSUMMASK; 1422 Flags &= NV_RX2_CHECKSUMMASK;
@@ -1612,6 +1640,17 @@ static void nv_set_multicast(struct net_device *dev)
1612 spin_unlock_irq(&np->lock); 1640 spin_unlock_irq(&np->lock);
1613} 1641}
1614 1642
1643/**
1644 * nv_update_linkspeed: Setup the MAC according to the link partner
1645 * @dev: Network device to be configured
1646 *
1647 * The function queries the PHY and checks if there is a link partner.
1648 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
1649 * set to 10 MBit HD.
1650 *
1651 * The function returns 0 if there is no link partner and 1 if there is
1652 * a good link partner.
1653 */
1615static int nv_update_linkspeed(struct net_device *dev) 1654static int nv_update_linkspeed(struct net_device *dev)
1616{ 1655{
1617 struct fe_priv *np = netdev_priv(dev); 1656 struct fe_priv *np = netdev_priv(dev);
@@ -1751,13 +1790,11 @@ set_speed:
1751static void nv_linkchange(struct net_device *dev) 1790static void nv_linkchange(struct net_device *dev)
1752{ 1791{
1753 if (nv_update_linkspeed(dev)) { 1792 if (nv_update_linkspeed(dev)) {
1754 if (netif_carrier_ok(dev)) { 1793 if (!netif_carrier_ok(dev)) {
1755 nv_stop_rx(dev);
1756 } else {
1757 netif_carrier_on(dev); 1794 netif_carrier_on(dev);
1758 printk(KERN_INFO "%s: link up.\n", dev->name); 1795 printk(KERN_INFO "%s: link up.\n", dev->name);
1796 nv_start_rx(dev);
1759 } 1797 }
1760 nv_start_rx(dev);
1761 } else { 1798 } else {
1762 if (netif_carrier_ok(dev)) { 1799 if (netif_carrier_ok(dev)) {
1763 netif_carrier_off(dev); 1800 netif_carrier_off(dev);
@@ -1799,22 +1836,18 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1799 if (!(events & np->irqmask)) 1836 if (!(events & np->irqmask))
1800 break; 1837 break;
1801 1838
1802 if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_ERROR|NVREG_IRQ_TX_ERR)) { 1839 spin_lock(&np->lock);
1840 nv_tx_done(dev);
1841 spin_unlock(&np->lock);
1842
1843 nv_rx_process(dev);
1844 if (nv_alloc_rx(dev)) {
1803 spin_lock(&np->lock); 1845 spin_lock(&np->lock);
1804 nv_tx_done(dev); 1846 if (!np->in_shutdown)
1847 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1805 spin_unlock(&np->lock); 1848 spin_unlock(&np->lock);
1806 } 1849 }
1807 1850
1808 if (events & (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF)) {
1809 nv_rx_process(dev);
1810 if (nv_alloc_rx(dev)) {
1811 spin_lock(&np->lock);
1812 if (!np->in_shutdown)
1813 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1814 spin_unlock(&np->lock);
1815 }
1816 }
1817
1818 if (events & NVREG_IRQ_LINK) { 1851 if (events & NVREG_IRQ_LINK) {
1819 spin_lock(&np->lock); 1852 spin_lock(&np->lock);
1820 nv_link_irq(dev); 1853 nv_link_irq(dev);
@@ -2216,7 +2249,14 @@ static int nv_open(struct net_device *dev)
2216 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); 2249 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
2217 writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1); 2250 writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
2218 writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2); 2251 writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
2219 writel(NVREG_POLL_DEFAULT, base + NvRegPollingInterval); 2252 if (poll_interval == -1) {
2253 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
2254 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
2255 else
2256 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
2257 }
2258 else
2259 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
2220 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 2260 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
2221 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 2261 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
2222 base + NvRegAdapterControl); 2262 base + NvRegAdapterControl);
@@ -2501,7 +2541,11 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2501 } else { 2541 } else {
2502 np->tx_flags = NV_TX2_VALID; 2542 np->tx_flags = NV_TX2_VALID;
2503 } 2543 }
2504 np->irqmask = NVREG_IRQMASK_WANTED; 2544 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
2545 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
2546 else
2547 np->irqmask = NVREG_IRQMASK_CPU;
2548
2505 if (id->driver_data & DEV_NEED_TIMERIRQ) 2549 if (id->driver_data & DEV_NEED_TIMERIRQ)
2506 np->irqmask |= NVREG_IRQ_TIMER; 2550 np->irqmask |= NVREG_IRQ_TIMER;
2507 if (id->driver_data & DEV_NEED_LINKTIMER) { 2551 if (id->driver_data & DEV_NEED_LINKTIMER) {
@@ -2514,16 +2558,17 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2514 } 2558 }
2515 2559
2516 /* find a suitable phy */ 2560 /* find a suitable phy */
2517 for (i = 1; i < 32; i++) { 2561 for (i = 1; i <= 32; i++) {
2518 int id1, id2; 2562 int id1, id2;
2563 int phyaddr = i & 0x1F;
2519 2564
2520 spin_lock_irq(&np->lock); 2565 spin_lock_irq(&np->lock);
2521 id1 = mii_rw(dev, i, MII_PHYSID1, MII_READ); 2566 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
2522 spin_unlock_irq(&np->lock); 2567 spin_unlock_irq(&np->lock);
2523 if (id1 < 0 || id1 == 0xffff) 2568 if (id1 < 0 || id1 == 0xffff)
2524 continue; 2569 continue;
2525 spin_lock_irq(&np->lock); 2570 spin_lock_irq(&np->lock);
2526 id2 = mii_rw(dev, i, MII_PHYSID2, MII_READ); 2571 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
2527 spin_unlock_irq(&np->lock); 2572 spin_unlock_irq(&np->lock);
2528 if (id2 < 0 || id2 == 0xffff) 2573 if (id2 < 0 || id2 == 0xffff)
2529 continue; 2574 continue;
@@ -2531,23 +2576,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2531 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 2576 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
2532 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 2577 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
2533 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 2578 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
2534 pci_name(pci_dev), id1, id2, i); 2579 pci_name(pci_dev), id1, id2, phyaddr);
2535 np->phyaddr = i; 2580 np->phyaddr = phyaddr;
2536 np->phy_oui = id1 | id2; 2581 np->phy_oui = id1 | id2;
2537 break; 2582 break;
2538 } 2583 }
2539 if (i == 32) { 2584 if (i == 33) {
2540 /* PHY in isolate mode? No phy attached and user wants to
2541 * test loopback? Very odd, but can be correct.
2542 */
2543 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", 2585 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
2544 pci_name(pci_dev)); 2586 pci_name(pci_dev));
2545 } 2587 goto out_freering;
2546
2547 if (i != 32) {
2548 /* reset it */
2549 phy_init(dev);
2550 } 2588 }
2589
2590 /* reset it */
2591 phy_init(dev);
2551 2592
2552 /* set default link speed settings */ 2593 /* set default link speed settings */
2553 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 2594 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
@@ -2689,6 +2730,10 @@ static void __exit exit_nic(void)
2689 2730
2690module_param(max_interrupt_work, int, 0); 2731module_param(max_interrupt_work, int, 0);
2691MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 2732MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
2733module_param(optimization_mode, int, 0);
2734MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
2735module_param(poll_interval, int, 0);
2736MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
2692 2737
2693MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 2738MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
2694MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 2739MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");