diff options
author | Sergei Shtylyov <sshtylyov@ru.mvista.com> | 2007-03-05 15:10:08 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-03-06 06:10:01 -0500 |
commit | 6006f7f517b9a754e4c4628755c62872e322c68a (patch) | |
tree | 9ab2f93536aeb08f7c2a01d2ce9954076defe8ce | |
parent | a816c7c712ff9f6770168b91facb9bfa9f0acd48 (diff) |
natsemi: netpoll fixes
Fix two issues in this driver's netpoll path: one usual, with spin_unlock_irq()
enabling interrupts which nobody asks it to do (that has been fixed recently in
a number of drivers) and one unusual, with poll_controller() method possibly
causing loss of interrupts due to the interrupt status register being cleared
by a simple read and the interrpupt handler simply storing it, not accumulating.
Signed-off-by: Sergei Shtylyov <sshtylyov@ru.mvista.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r-- | drivers/net/natsemi.c | 24 |
1 files changed, 19 insertions, 5 deletions
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 5c57433cb306..c6172a77a6d7 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -2024,6 +2024,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
2024 | struct netdev_private *np = netdev_priv(dev); | 2024 | struct netdev_private *np = netdev_priv(dev); |
2025 | void __iomem * ioaddr = ns_ioaddr(dev); | 2025 | void __iomem * ioaddr = ns_ioaddr(dev); |
2026 | unsigned entry; | 2026 | unsigned entry; |
2027 | unsigned long flags; | ||
2027 | 2028 | ||
2028 | /* Note: Ordering is important here, set the field with the | 2029 | /* Note: Ordering is important here, set the field with the |
2029 | "ownership" bit last, and only then increment cur_tx. */ | 2030 | "ownership" bit last, and only then increment cur_tx. */ |
@@ -2037,7 +2038,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
2037 | 2038 | ||
2038 | np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); | 2039 | np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); |
2039 | 2040 | ||
2040 | spin_lock_irq(&np->lock); | 2041 | spin_lock_irqsave(&np->lock, flags); |
2041 | 2042 | ||
2042 | if (!np->hands_off) { | 2043 | if (!np->hands_off) { |
2043 | np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); | 2044 | np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); |
@@ -2056,7 +2057,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
2056 | dev_kfree_skb_irq(skb); | 2057 | dev_kfree_skb_irq(skb); |
2057 | np->stats.tx_dropped++; | 2058 | np->stats.tx_dropped++; |
2058 | } | 2059 | } |
2059 | spin_unlock_irq(&np->lock); | 2060 | spin_unlock_irqrestore(&np->lock, flags); |
2060 | 2061 | ||
2061 | dev->trans_start = jiffies; | 2062 | dev->trans_start = jiffies; |
2062 | 2063 | ||
@@ -2222,6 +2223,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) | |||
2222 | pkt_len = (desc_status & DescSizeMask) - 4; | 2223 | pkt_len = (desc_status & DescSizeMask) - 4; |
2223 | if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ | 2224 | if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ |
2224 | if (desc_status & DescMore) { | 2225 | if (desc_status & DescMore) { |
2226 | unsigned long flags; | ||
2227 | |||
2225 | if (netif_msg_rx_err(np)) | 2228 | if (netif_msg_rx_err(np)) |
2226 | printk(KERN_WARNING | 2229 | printk(KERN_WARNING |
2227 | "%s: Oversized(?) Ethernet " | 2230 | "%s: Oversized(?) Ethernet " |
@@ -2236,12 +2239,12 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) | |||
2236 | * reset procedure documented in | 2239 | * reset procedure documented in |
2237 | * AN-1287. */ | 2240 | * AN-1287. */ |
2238 | 2241 | ||
2239 | spin_lock_irq(&np->lock); | 2242 | spin_lock_irqsave(&np->lock, flags); |
2240 | reset_rx(dev); | 2243 | reset_rx(dev); |
2241 | reinit_rx(dev); | 2244 | reinit_rx(dev); |
2242 | writel(np->ring_dma, ioaddr + RxRingPtr); | 2245 | writel(np->ring_dma, ioaddr + RxRingPtr); |
2243 | check_link(dev); | 2246 | check_link(dev); |
2244 | spin_unlock_irq(&np->lock); | 2247 | spin_unlock_irqrestore(&np->lock, flags); |
2245 | 2248 | ||
2246 | /* We'll enable RX on exit from this | 2249 | /* We'll enable RX on exit from this |
2247 | * function. */ | 2250 | * function. */ |
@@ -2396,8 +2399,19 @@ static struct net_device_stats *get_stats(struct net_device *dev) | |||
2396 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2399 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2397 | static void natsemi_poll_controller(struct net_device *dev) | 2400 | static void natsemi_poll_controller(struct net_device *dev) |
2398 | { | 2401 | { |
2402 | struct netdev_private *np = netdev_priv(dev); | ||
2403 | |||
2399 | disable_irq(dev->irq); | 2404 | disable_irq(dev->irq); |
2400 | intr_handler(dev->irq, dev); | 2405 | |
2406 | /* | ||
2407 | * A real interrupt might have already reached us at this point | ||
2408 | * but NAPI might still haven't called us back. As the interrupt | ||
2409 | * status register is cleared by reading, we should prevent an | ||
2410 | * interrupt loss in this case... | ||
2411 | */ | ||
2412 | if (!np->intr_status) | ||
2413 | intr_handler(dev->irq, dev); | ||
2414 | |||
2401 | enable_irq(dev->irq); | 2415 | enable_irq(dev->irq); |
2402 | } | 2416 | } |
2403 | #endif | 2417 | #endif |