aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAyaz Abdulla <aabdulla@nvidia.com>2005-11-11 08:30:38 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-11-11 08:30:38 -0500
commita971c32488569b5443c48168756e8ccfb0862c50 (patch)
tree4603c7507cf5a643fca7990ac660c62b75c6116e /drivers
parent4ea7f299bba0e4331bdbec4e1c1b90463478180d (diff)
[netdrvr forcedeth] support for irq mitigation
This patch contains support for different modes of interrupt mitigation of forcedeth. It includes changes based on Jeff's comments. Currently, the modes are changed through module parameters since ethtool does not support something similar. Signed-off-by: Ayaz Abdulla <aabdulla@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/forcedeth.c171
1 files changed, 104 insertions, 67 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 6364adbd73a1..731d0d7b2f37 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -98,6 +98,7 @@
98 * 0.43: 10 Aug 2005: Add support for tx checksum. 98 * 0.43: 10 Aug 2005: Add support for tx checksum.
99 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. 99 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
100 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check 100 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
101 * 0.46: 20 Oct 2005: Add irq optimization modes.
101 * 102 *
102 * Known bugs: 103 * Known bugs:
103 * We suspect that on some hardware no TX done interrupts are generated. 104 * We suspect that on some hardware no TX done interrupts are generated.
@@ -109,7 +110,7 @@
109 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 110 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
110 * superfluous timer interrupts from the nic. 111 * superfluous timer interrupts from the nic.
111 */ 112 */
112#define FORCEDETH_VERSION "0.45" 113#define FORCEDETH_VERSION "0.46"
113#define DRV_NAME "forcedeth" 114#define DRV_NAME "forcedeth"
114 115
115#include <linux/module.h> 116#include <linux/module.h>
@@ -164,7 +165,8 @@ enum {
164#define NVREG_IRQ_LINK 0x0040 165#define NVREG_IRQ_LINK 0x0040
165#define NVREG_IRQ_TX_ERROR 0x0080 166#define NVREG_IRQ_TX_ERROR 0x0080
166#define NVREG_IRQ_TX1 0x0100 167#define NVREG_IRQ_TX1 0x0100
167#define NVREG_IRQMASK_WANTED 0x00df 168#define NVREG_IRQMASK_THROUGHPUT 0x00df
169#define NVREG_IRQMASK_CPU 0x0040
168 170
169#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ 171#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
170 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \ 172 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \
@@ -178,7 +180,8 @@ enum {
178 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 180 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
179 */ 181 */
180 NvRegPollingInterval = 0x00c, 182 NvRegPollingInterval = 0x00c,
181#define NVREG_POLL_DEFAULT 970 183#define NVREG_POLL_DEFAULT_THROUGHPUT 970
184#define NVREG_POLL_DEFAULT_CPU 13
182 NvRegMisc1 = 0x080, 185 NvRegMisc1 = 0x080,
183#define NVREG_MISC1_HD 0x02 186#define NVREG_MISC1_HD 0x02
184#define NVREG_MISC1_FORCE 0x3b0f3c 187#define NVREG_MISC1_FORCE 0x3b0f3c
@@ -539,6 +542,25 @@ struct fe_priv {
539 */ 542 */
540static int max_interrupt_work = 5; 543static int max_interrupt_work = 5;
541 544
545/*
546 * Optimization can be either throuput mode or cpu mode
547 *
548 * Throughput Mode: Every tx and rx packet will generate an interrupt.
549 * CPU Mode: Interrupts are controlled by a timer.
550 */
551#define NV_OPTIMIZATION_MODE_THROUGHPUT 0
552#define NV_OPTIMIZATION_MODE_CPU 1
553static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
554
555/*
556 * Poll interval for timer irq
557 *
558 * This interval determines how frequent an interrupt is generated.
559 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
560 * Min = 0, and Max = 65535
561 */
562static int poll_interval = -1;
563
542static inline struct fe_priv *get_nvpriv(struct net_device *dev) 564static inline struct fe_priv *get_nvpriv(struct net_device *dev)
543{ 565{
544 return netdev_priv(dev); 566 return netdev_priv(dev);
@@ -1329,67 +1351,71 @@ static void nv_rx_process(struct net_device *dev)
1329 if (!(Flags & NV_RX_DESCRIPTORVALID)) 1351 if (!(Flags & NV_RX_DESCRIPTORVALID))
1330 goto next_pkt; 1352 goto next_pkt;
1331 1353
1332 if (Flags & NV_RX_MISSEDFRAME) { 1354 if (Flags & NV_RX_ERROR) {
1333 np->stats.rx_missed_errors++; 1355 if (Flags & NV_RX_MISSEDFRAME) {
1334 np->stats.rx_errors++; 1356 np->stats.rx_missed_errors++;
1335 goto next_pkt;
1336 }
1337 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1338 np->stats.rx_errors++;
1339 goto next_pkt;
1340 }
1341 if (Flags & NV_RX_CRCERR) {
1342 np->stats.rx_crc_errors++;
1343 np->stats.rx_errors++;
1344 goto next_pkt;
1345 }
1346 if (Flags & NV_RX_OVERFLOW) {
1347 np->stats.rx_over_errors++;
1348 np->stats.rx_errors++;
1349 goto next_pkt;
1350 }
1351 if (Flags & NV_RX_ERROR4) {
1352 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1353 if (len < 0) {
1354 np->stats.rx_errors++; 1357 np->stats.rx_errors++;
1355 goto next_pkt; 1358 goto next_pkt;
1356 } 1359 }
1357 } 1360 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1358 /* framing errors are soft errors. */ 1361 np->stats.rx_errors++;
1359 if (Flags & NV_RX_FRAMINGERR) { 1362 goto next_pkt;
1360 if (Flags & NV_RX_SUBSTRACT1) { 1363 }
1361 len--; 1364 if (Flags & NV_RX_CRCERR) {
1365 np->stats.rx_crc_errors++;
1366 np->stats.rx_errors++;
1367 goto next_pkt;
1368 }
1369 if (Flags & NV_RX_OVERFLOW) {
1370 np->stats.rx_over_errors++;
1371 np->stats.rx_errors++;
1372 goto next_pkt;
1373 }
1374 if (Flags & NV_RX_ERROR4) {
1375 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1376 if (len < 0) {
1377 np->stats.rx_errors++;
1378 goto next_pkt;
1379 }
1380 }
1381 /* framing errors are soft errors. */
1382 if (Flags & NV_RX_FRAMINGERR) {
1383 if (Flags & NV_RX_SUBSTRACT1) {
1384 len--;
1385 }
1362 } 1386 }
1363 } 1387 }
1364 } else { 1388 } else {
1365 if (!(Flags & NV_RX2_DESCRIPTORVALID)) 1389 if (!(Flags & NV_RX2_DESCRIPTORVALID))
1366 goto next_pkt; 1390 goto next_pkt;
1367 1391
1368 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 1392 if (Flags & NV_RX2_ERROR) {
1369 np->stats.rx_errors++; 1393 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
1370 goto next_pkt;
1371 }
1372 if (Flags & NV_RX2_CRCERR) {
1373 np->stats.rx_crc_errors++;
1374 np->stats.rx_errors++;
1375 goto next_pkt;
1376 }
1377 if (Flags & NV_RX2_OVERFLOW) {
1378 np->stats.rx_over_errors++;
1379 np->stats.rx_errors++;
1380 goto next_pkt;
1381 }
1382 if (Flags & NV_RX2_ERROR4) {
1383 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1384 if (len < 0) {
1385 np->stats.rx_errors++; 1394 np->stats.rx_errors++;
1386 goto next_pkt; 1395 goto next_pkt;
1387 } 1396 }
1388 } 1397 if (Flags & NV_RX2_CRCERR) {
1389 /* framing errors are soft errors */ 1398 np->stats.rx_crc_errors++;
1390 if (Flags & NV_RX2_FRAMINGERR) { 1399 np->stats.rx_errors++;
1391 if (Flags & NV_RX2_SUBSTRACT1) { 1400 goto next_pkt;
1392 len--; 1401 }
1402 if (Flags & NV_RX2_OVERFLOW) {
1403 np->stats.rx_over_errors++;
1404 np->stats.rx_errors++;
1405 goto next_pkt;
1406 }
1407 if (Flags & NV_RX2_ERROR4) {
1408 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1409 if (len < 0) {
1410 np->stats.rx_errors++;
1411 goto next_pkt;
1412 }
1413 }
1414 /* framing errors are soft errors */
1415 if (Flags & NV_RX2_FRAMINGERR) {
1416 if (Flags & NV_RX2_SUBSTRACT1) {
1417 len--;
1418 }
1393 } 1419 }
1394 } 1420 }
1395 Flags &= NV_RX2_CHECKSUMMASK; 1421 Flags &= NV_RX2_CHECKSUMMASK;
@@ -1809,22 +1835,18 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1809 if (!(events & np->irqmask)) 1835 if (!(events & np->irqmask))
1810 break; 1836 break;
1811 1837
1812 if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_ERROR|NVREG_IRQ_TX_ERR)) { 1838 spin_lock(&np->lock);
1839 nv_tx_done(dev);
1840 spin_unlock(&np->lock);
1841
1842 nv_rx_process(dev);
1843 if (nv_alloc_rx(dev)) {
1813 spin_lock(&np->lock); 1844 spin_lock(&np->lock);
1814 nv_tx_done(dev); 1845 if (!np->in_shutdown)
1846 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1815 spin_unlock(&np->lock); 1847 spin_unlock(&np->lock);
1816 } 1848 }
1817 1849
1818 if (events & (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF)) {
1819 nv_rx_process(dev);
1820 if (nv_alloc_rx(dev)) {
1821 spin_lock(&np->lock);
1822 if (!np->in_shutdown)
1823 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1824 spin_unlock(&np->lock);
1825 }
1826 }
1827
1828 if (events & NVREG_IRQ_LINK) { 1850 if (events & NVREG_IRQ_LINK) {
1829 spin_lock(&np->lock); 1851 spin_lock(&np->lock);
1830 nv_link_irq(dev); 1852 nv_link_irq(dev);
@@ -2226,7 +2248,14 @@ static int nv_open(struct net_device *dev)
2226 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); 2248 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
2227 writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1); 2249 writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
2228 writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2); 2250 writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
2229 writel(NVREG_POLL_DEFAULT, base + NvRegPollingInterval); 2251 if (poll_interval == -1) {
2252 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
2253 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
2254 else
2255 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
2256 }
2257 else
2258 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
2230 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 2259 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
2231 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 2260 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
2232 base + NvRegAdapterControl); 2261 base + NvRegAdapterControl);
@@ -2511,7 +2540,11 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2511 } else { 2540 } else {
2512 np->tx_flags = NV_TX2_VALID; 2541 np->tx_flags = NV_TX2_VALID;
2513 } 2542 }
2514 np->irqmask = NVREG_IRQMASK_WANTED; 2543 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
2544 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
2545 else
2546 np->irqmask = NVREG_IRQMASK_CPU;
2547
2515 if (id->driver_data & DEV_NEED_TIMERIRQ) 2548 if (id->driver_data & DEV_NEED_TIMERIRQ)
2516 np->irqmask |= NVREG_IRQ_TIMER; 2549 np->irqmask |= NVREG_IRQ_TIMER;
2517 if (id->driver_data & DEV_NEED_LINKTIMER) { 2550 if (id->driver_data & DEV_NEED_LINKTIMER) {
@@ -2699,6 +2732,10 @@ static void __exit exit_nic(void)
2699 2732
2700module_param(max_interrupt_work, int, 0); 2733module_param(max_interrupt_work, int, 0);
2701MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 2734MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
2735module_param(optimization_mode, int, 0);
2736MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
2737module_param(poll_interval, int, 0);
2738MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
2702 2739
2703MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 2740MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
2704MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 2741MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");