aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/Kconfig16
-rw-r--r--drivers/net/forcedeth.c140
2 files changed, 140 insertions, 16 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3a0d80b28503..ecfbd1c2408c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1411,6 +1411,22 @@ config FORCEDETH
1411 <file:Documentation/networking/net-modules.txt>. The module will be 1411 <file:Documentation/networking/net-modules.txt>. The module will be
1412 called forcedeth. 1412 called forcedeth.
1413 1413
1414config FORCEDETH_NAPI
1415 bool "Use Rx and Tx Polling (NAPI) (EXPERIMENTAL)"
1416 depends on FORCEDETH && EXPERIMENTAL
1417 help
1418 NAPI is a new driver API designed to reduce CPU and interrupt load
1419 when the driver is receiving lots of packets from the card. It is
1420 still somewhat experimental and thus not yet enabled by default.
1421
1422 If your estimated Rx load is 10kpps or more, or if the card will be
1423 deployed on potentially unfriendly networks (e.g. in a firewall),
1424 then say Y here.
1425
1426 See <file:Documentation/networking/NAPI_HOWTO.txt> for more
1427 information.
1428
1429 If in doubt, say N.
1414 1430
1415config CS89x0 1431config CS89x0
1416 tristate "CS89x0 support" 1432 tristate "CS89x0 support"
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 26fc00a9ff83..a2aca92e8b2a 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -121,6 +121,11 @@
121 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 121 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
122 * superfluous timer interrupts from the nic. 122 * superfluous timer interrupts from the nic.
123 */ 123 */
124#ifdef CONFIG_FORCEDETH_NAPI
125#define DRIVERNAPI "-NAPI"
126#else
127#define DRIVERNAPI
128#endif
124#define FORCEDETH_VERSION "0.57" 129#define FORCEDETH_VERSION "0.57"
125#define DRV_NAME "forcedeth" 130#define DRV_NAME "forcedeth"
126 131
@@ -1279,6 +1284,16 @@ static int nv_alloc_rx(struct net_device *dev)
1279 return 0; 1284 return 0;
1280} 1285}
1281 1286
1287/* If rx bufs are exhausted called after 50ms to attempt to refresh */
1288#ifdef CONFIG_FORCEDETH_NAPI
1289static void nv_do_rx_refill(unsigned long data)
1290{
1291 struct net_device *dev = (struct net_device *) data;
1292
1293 /* Just reschedule NAPI rx processing */
1294 netif_rx_schedule(dev);
1295}
1296#else
1282static void nv_do_rx_refill(unsigned long data) 1297static void nv_do_rx_refill(unsigned long data)
1283{ 1298{
1284 struct net_device *dev = (struct net_device *) data; 1299 struct net_device *dev = (struct net_device *) data;
@@ -1307,6 +1322,7 @@ static void nv_do_rx_refill(unsigned long data)
1307 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1322 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1308 } 1323 }
1309} 1324}
1325#endif
1310 1326
1311static void nv_init_rx(struct net_device *dev) 1327static void nv_init_rx(struct net_device *dev)
1312{ 1328{
@@ -1742,13 +1758,14 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1742 } 1758 }
1743} 1759}
1744 1760
1745static void nv_rx_process(struct net_device *dev) 1761static int nv_rx_process(struct net_device *dev, int limit)
1746{ 1762{
1747 struct fe_priv *np = netdev_priv(dev); 1763 struct fe_priv *np = netdev_priv(dev);
1748 u32 flags; 1764 u32 flags;
1749 u32 vlanflags = 0; 1765 u32 vlanflags = 0;
1766 int count;
1750 1767
1751 for (;;) { 1768 for (count = 0; count < limit; ++count) {
1752 struct sk_buff *skb; 1769 struct sk_buff *skb;
1753 int len; 1770 int len;
1754 int i; 1771 int i;
@@ -1882,17 +1899,27 @@ static void nv_rx_process(struct net_device *dev)
1882 skb->protocol = eth_type_trans(skb, dev); 1899 skb->protocol = eth_type_trans(skb, dev);
1883 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 1900 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1884 dev->name, np->cur_rx, len, skb->protocol); 1901 dev->name, np->cur_rx, len, skb->protocol);
1885 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { 1902#ifdef CONFIG_FORCEDETH_NAPI
1886 vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); 1903 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
1887 } else { 1904 vlan_hwaccel_receive_skb(skb, np->vlangrp,
1905 vlanflags & NV_RX3_VLAN_TAG_MASK);
1906 else
1907 netif_receive_skb(skb);
1908#else
1909 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
1910 vlan_hwaccel_rx(skb, np->vlangrp,
1911 vlanflags & NV_RX3_VLAN_TAG_MASK);
1912 else
1888 netif_rx(skb); 1913 netif_rx(skb);
1889 } 1914#endif
1890 dev->last_rx = jiffies; 1915 dev->last_rx = jiffies;
1891 np->stats.rx_packets++; 1916 np->stats.rx_packets++;
1892 np->stats.rx_bytes += len; 1917 np->stats.rx_bytes += len;
1893next_pkt: 1918next_pkt:
1894 np->cur_rx++; 1919 np->cur_rx++;
1895 } 1920 }
1921
1922 return count;
1896} 1923}
1897 1924
1898static void set_bufsize(struct net_device *dev) 1925static void set_bufsize(struct net_device *dev)
@@ -2378,14 +2405,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2378 nv_tx_done(dev); 2405 nv_tx_done(dev);
2379 spin_unlock(&np->lock); 2406 spin_unlock(&np->lock);
2380 2407
2381 nv_rx_process(dev);
2382 if (nv_alloc_rx(dev)) {
2383 spin_lock(&np->lock);
2384 if (!np->in_shutdown)
2385 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2386 spin_unlock(&np->lock);
2387 }
2388
2389 if (events & NVREG_IRQ_LINK) { 2408 if (events & NVREG_IRQ_LINK) {
2390 spin_lock(&np->lock); 2409 spin_lock(&np->lock);
2391 nv_link_irq(dev); 2410 nv_link_irq(dev);
@@ -2405,6 +2424,29 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2405 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2424 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2406 dev->name, events); 2425 dev->name, events);
2407 } 2426 }
2427#ifdef CONFIG_FORCEDETH_NAPI
2428 if (events & NVREG_IRQ_RX_ALL) {
2429 netif_rx_schedule(dev);
2430
2431 /* Disable furthur receive irq's */
2432 spin_lock(&np->lock);
2433 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2434
2435 if (np->msi_flags & NV_MSI_X_ENABLED)
2436 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2437 else
2438 writel(np->irqmask, base + NvRegIrqMask);
2439 spin_unlock(&np->lock);
2440 }
2441#else
2442 nv_rx_process(dev, dev->weight);
2443 if (nv_alloc_rx(dev)) {
2444 spin_lock(&np->lock);
2445 if (!np->in_shutdown)
2446 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2447 spin_unlock(&np->lock);
2448 }
2449#endif
2408 if (i > max_interrupt_work) { 2450 if (i > max_interrupt_work) {
2409 spin_lock(&np->lock); 2451 spin_lock(&np->lock);
2410 /* disable interrupts on the nic */ 2452 /* disable interrupts on the nic */
@@ -2476,6 +2518,63 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2476 return IRQ_RETVAL(i); 2518 return IRQ_RETVAL(i);
2477} 2519}
2478 2520
2521#ifdef CONFIG_FORCEDETH_NAPI
2522static int nv_napi_poll(struct net_device *dev, int *budget)
2523{
2524 int pkts, limit = min(*budget, dev->quota);
2525 struct fe_priv *np = netdev_priv(dev);
2526 u8 __iomem *base = get_hwbase(dev);
2527
2528 pkts = nv_rx_process(dev, limit);
2529
2530 if (nv_alloc_rx(dev)) {
2531 spin_lock_irq(&np->lock);
2532 if (!np->in_shutdown)
2533 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2534 spin_unlock_irq(&np->lock);
2535 }
2536
2537 if (pkts < limit) {
2538 /* all done, no more packets present */
2539 netif_rx_complete(dev);
2540
2541 /* re-enable receive interrupts */
2542 spin_lock_irq(&np->lock);
2543 np->irqmask |= NVREG_IRQ_RX_ALL;
2544 if (np->msi_flags & NV_MSI_X_ENABLED)
2545 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2546 else
2547 writel(np->irqmask, base + NvRegIrqMask);
2548 spin_unlock_irq(&np->lock);
2549 return 0;
2550 } else {
2551 /* used up our quantum, so reschedule */
2552 dev->quota -= pkts;
2553 *budget -= pkts;
2554 return 1;
2555 }
2556}
2557#endif
2558
2559#ifdef CONFIG_FORCEDETH_NAPI
2560static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2561{
2562 struct net_device *dev = (struct net_device *) data;
2563 u8 __iomem *base = get_hwbase(dev);
2564 u32 events;
2565
2566 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
2567 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
2568
2569 if (events) {
2570 netif_rx_schedule(dev);
2571 /* disable receive interrupts on the nic */
2572 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2573 pci_push(base);
2574 }
2575 return IRQ_HANDLED;
2576}
2577#else
2479static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) 2578static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2480{ 2579{
2481 struct net_device *dev = (struct net_device *) data; 2580 struct net_device *dev = (struct net_device *) data;
@@ -2494,7 +2593,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2494 if (!(events & np->irqmask)) 2593 if (!(events & np->irqmask))
2495 break; 2594 break;
2496 2595
2497 nv_rx_process(dev); 2596 nv_rx_process(dev, dev->weight);
2498 if (nv_alloc_rx(dev)) { 2597 if (nv_alloc_rx(dev)) {
2499 spin_lock_irq(&np->lock); 2598 spin_lock_irq(&np->lock);
2500 if (!np->in_shutdown) 2599 if (!np->in_shutdown)
@@ -2516,12 +2615,12 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2516 spin_unlock_irq(&np->lock); 2615 spin_unlock_irq(&np->lock);
2517 break; 2616 break;
2518 } 2617 }
2519
2520 } 2618 }
2521 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 2619 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
2522 2620
2523 return IRQ_RETVAL(i); 2621 return IRQ_RETVAL(i);
2524} 2622}
2623#endif
2525 2624
2526static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) 2625static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2527{ 2626{
@@ -3755,6 +3854,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
3755 if (test->flags & ETH_TEST_FL_OFFLINE) { 3854 if (test->flags & ETH_TEST_FL_OFFLINE) {
3756 if (netif_running(dev)) { 3855 if (netif_running(dev)) {
3757 netif_stop_queue(dev); 3856 netif_stop_queue(dev);
3857 netif_poll_disable(dev);
3758 netif_tx_lock_bh(dev); 3858 netif_tx_lock_bh(dev);
3759 spin_lock_irq(&np->lock); 3859 spin_lock_irq(&np->lock);
3760 nv_disable_hw_interrupts(dev, np->irqmask); 3860 nv_disable_hw_interrupts(dev, np->irqmask);
@@ -3813,6 +3913,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
3813 nv_start_rx(dev); 3913 nv_start_rx(dev);
3814 nv_start_tx(dev); 3914 nv_start_tx(dev);
3815 netif_start_queue(dev); 3915 netif_start_queue(dev);
3916 netif_poll_enable(dev);
3816 nv_enable_hw_interrupts(dev, np->irqmask); 3917 nv_enable_hw_interrupts(dev, np->irqmask);
3817 } 3918 }
3818 } 3919 }
@@ -4016,6 +4117,8 @@ static int nv_open(struct net_device *dev)
4016 nv_start_rx(dev); 4117 nv_start_rx(dev);
4017 nv_start_tx(dev); 4118 nv_start_tx(dev);
4018 netif_start_queue(dev); 4119 netif_start_queue(dev);
4120 netif_poll_enable(dev);
4121
4019 if (ret) { 4122 if (ret) {
4020 netif_carrier_on(dev); 4123 netif_carrier_on(dev);
4021 } else { 4124 } else {
@@ -4045,6 +4148,7 @@ static int nv_close(struct net_device *dev)
4045 spin_lock_irq(&np->lock); 4148 spin_lock_irq(&np->lock);
4046 np->in_shutdown = 1; 4149 np->in_shutdown = 1;
4047 spin_unlock_irq(&np->lock); 4150 spin_unlock_irq(&np->lock);
4151 netif_poll_disable(dev);
4048 synchronize_irq(dev->irq); 4152 synchronize_irq(dev->irq);
4049 4153
4050 del_timer_sync(&np->oom_kick); 4154 del_timer_sync(&np->oom_kick);
@@ -4260,6 +4364,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4260#ifdef CONFIG_NET_POLL_CONTROLLER 4364#ifdef CONFIG_NET_POLL_CONTROLLER
4261 dev->poll_controller = nv_poll_controller; 4365 dev->poll_controller = nv_poll_controller;
4262#endif 4366#endif
4367 dev->weight = 64;
4368#ifdef CONFIG_FORCEDETH_NAPI
4369 dev->poll = nv_napi_poll;
4370#endif
4263 SET_ETHTOOL_OPS(dev, &ops); 4371 SET_ETHTOOL_OPS(dev, &ops);
4264 dev->tx_timeout = nv_tx_timeout; 4372 dev->tx_timeout = nv_tx_timeout;
4265 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 4373 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;