aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@infradead.org>2006-07-05 09:00:40 -0400
committerJeff Garzik <jeff@garzik.org>2006-07-05 14:29:26 -0400
commitbce305f4fe779f29d99d414685243f5da0803254 (patch)
tree67072c37319f3c6b091dec8094d342118fe3fd62
parent20ed7c094dfe33b0e15e8c60f60012b9278631d3 (diff)
[PATCH] 8139too deadlock fix
> stack backtrace: > [<f9099d31>] rtl8139_start_xmit+0xd9/0xff [8139too] > [<c11ad5ea>] netpoll_send_skb+0x98/0xea This seems to be a real deadlock... So netpoll_send_skb takes the _xmit_lock, which is all nitty gritty but then rtl8139_start_xmit comes around while that lock is taken, and does spin_unlock_irq(&tp->lock); which.. enables interrupts and softirqs; this is quite bad because the xmit lock is taken in softirq context for the watchdog like this: [<c1200376>] _spin_lock+0x23/0x32 [<c11af282>] dev_watchdog+0x14/0xb1 [<c101dab2>] run_timer_softirq+0xf2/0x14a [<c101a691>] __do_softirq+0x55/0xb0 [<c1004a8d>] do_softirq+0x58/0xbd Which would deadlock now that the spin_unlock_irq() has enabled irqs/softirqs while the _xmit_lock is still held. The patch below turns this into a irqsave/irqrestore pair so that interrupts don't get enabled unconditionally. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r--drivers/net/8139too.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index cd9718512d1c..e4f4eaff7679 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1709,6 +1709,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
1709 void __iomem *ioaddr = tp->mmio_addr; 1709 void __iomem *ioaddr = tp->mmio_addr;
1710 unsigned int entry; 1710 unsigned int entry;
1711 unsigned int len = skb->len; 1711 unsigned int len = skb->len;
1712 unsigned long flags;
1712 1713
1713 /* Calculate the next Tx descriptor entry. */ 1714 /* Calculate the next Tx descriptor entry. */
1714 entry = tp->cur_tx % NUM_TX_DESC; 1715 entry = tp->cur_tx % NUM_TX_DESC;
@@ -1725,7 +1726,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
1725 return 0; 1726 return 0;
1726 } 1727 }
1727 1728
1728 spin_lock_irq(&tp->lock); 1729 spin_lock_irqsave(&tp->lock, flags);
1729 RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), 1730 RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
1730 tp->tx_flag | max(len, (unsigned int)ETH_ZLEN)); 1731 tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
1731 1732
@@ -1736,7 +1737,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
1736 1737
1737 if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) 1738 if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
1738 netif_stop_queue (dev); 1739 netif_stop_queue (dev);
1739 spin_unlock_irq(&tp->lock); 1740 spin_unlock_irqrestore(&tp->lock, flags);
1740 1741
1741 if (netif_msg_tx_queued(tp)) 1742 if (netif_msg_tx_queued(tp))
1742 printk (KERN_DEBUG "%s: Queued Tx packet size %u to slot %d.\n", 1743 printk (KERN_DEBUG "%s: Queued Tx packet size %u to slot %d.\n",