aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
authorFrancois Romieu <romieu@fr.zoreil.com>2006-12-17 17:03:15 -0500
committerJeff Garzik <jeff@garzik.org>2006-12-26 16:24:11 -0500
commitd15e9c4d9a75702b30e00cdf95c71c88e3f3f51e (patch)
treee7ba1469eac6f732a7d2b9debc7713d8173dc0a2 /drivers/net/forcedeth.c
parent79f3d3996f06ee339c6f173e573826eccd3914ab (diff)
netpoll: drivers must not enable IRQ unconditionally in their NAPI handler
net/core/netpoll.c::netpoll_send_skb() calls the poll handler when it is available. As netconsole can be used from almost any context, IRQ must not be enabled blindly in the NAPI handler of a driver which supports netpoll. b57bd06655a028aba7b92e1c19c2093e7fcfb341 fixed the issue for the 8139too.c driver. Signed-off-by: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 439f41338291..820f8c798420 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2576,14 +2576,15 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
2576 int pkts, limit = min(*budget, dev->quota); 2576 int pkts, limit = min(*budget, dev->quota);
2577 struct fe_priv *np = netdev_priv(dev); 2577 struct fe_priv *np = netdev_priv(dev);
2578 u8 __iomem *base = get_hwbase(dev); 2578 u8 __iomem *base = get_hwbase(dev);
2579 unsigned long flags;
2579 2580
2580 pkts = nv_rx_process(dev, limit); 2581 pkts = nv_rx_process(dev, limit);
2581 2582
2582 if (nv_alloc_rx(dev)) { 2583 if (nv_alloc_rx(dev)) {
2583 spin_lock_irq(&np->lock); 2584 spin_lock_irqsave(&np->lock, flags);
2584 if (!np->in_shutdown) 2585 if (!np->in_shutdown)
2585 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2586 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2586 spin_unlock_irq(&np->lock); 2587 spin_unlock_irqrestore(&np->lock, flags);
2587 } 2588 }
2588 2589
2589 if (pkts < limit) { 2590 if (pkts < limit) {
@@ -2591,13 +2592,15 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
2591 netif_rx_complete(dev); 2592 netif_rx_complete(dev);
2592 2593
2593 /* re-enable receive interrupts */ 2594 /* re-enable receive interrupts */
2594 spin_lock_irq(&np->lock); 2595 spin_lock_irqsave(&np->lock, flags);
2596
2595 np->irqmask |= NVREG_IRQ_RX_ALL; 2597 np->irqmask |= NVREG_IRQ_RX_ALL;
2596 if (np->msi_flags & NV_MSI_X_ENABLED) 2598 if (np->msi_flags & NV_MSI_X_ENABLED)
2597 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2599 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2598 else 2600 else
2599 writel(np->irqmask, base + NvRegIrqMask); 2601 writel(np->irqmask, base + NvRegIrqMask);
2600 spin_unlock_irq(&np->lock); 2602
2603 spin_unlock_irqrestore(&np->lock, flags);
2601 return 0; 2604 return 0;
2602 } else { 2605 } else {
2603 /* used up our quantum, so reschedule */ 2606 /* used up our quantum, so reschedule */