aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-03-28 17:41:30 -0400
committerJeff Garzik <jeff@garzik.org>2008-03-28 22:08:02 -0400
commitbd6ca6375b9f18f40e814f391d9d1abaa916bc72 (patch)
tree60c5e531a1aa135aba6d68b3ad02ee7e8a751720 /drivers/net
parentc6cbcad1ec0dbb08b640d1ca166a42dcb4fb8faa (diff)
forcedeth: fix locking bug with netconsole
While using netconsole on forcedeth, lockdep noticed the following locking bug: ================================= [ INFO: inconsistent lock state ] 2.6.24-rc6 #6 Signed-off-by: Ingo Molnar <mingo@elte.hu> --------------------------------- inconsistent {softirq-on-W} -> {in-softirq-W} usage. udevd/719 [HC0[0]:SC1[1]:HE1:SE0] takes: (_xmit_ETHER){-+..}, at: [<c043062e>] dev_watchdog+0x1c/0xb9 {softirq-on-W} state was registered at: [<c0147f67>] mark_held_locks+0x4e/0x66 [<c014810e>] trace_hardirqs_on+0xfe/0x136 [<c048ae63>] _spin_unlock_irq+0x22/0x42 [<c02ec617>] nv_start_xmit_optimized+0x347/0x37a [<c042c80d>] netpoll_send_skb+0xa4/0x147 [<c042d4a6>] netpoll_send_udp+0x238/0x242 [<c02f44f6>] write_msg+0x6d/0x9b [<c012c129>] __call_console_drivers+0x4e/0x5a [<c012c18c>] _call_console_drivers+0x57/0x5b [<c012c2dd>] release_console_sem+0x11c/0x1b9 [<c012caeb>] register_console+0x1eb/0x1f3 [<c06ae673>] init_netconsole+0x119/0x15f [<c069149b>] kernel_init+0x147/0x294 [<c01058cb>] kernel_thread_helper+0x7/0x10 [<ffffffff>] 0xffffffff irq event stamp: 950 hardirqs last enabled at (950): [<c048ae63>] _spin_unlock_irq+0x22/0x42 hardirqs last disabled at (949): [<c048aaf7>] _spin_lock_irq+0xc/0x38 softirqs last enabled at (0): [<c012a29c>] copy_process+0x375/0x126d softirqs last disabled at (947): [<c0106d43>] do_softirq+0x61/0xc6 other info that might help us debug this: no locks held by udevd/719. stack backtrace: Pid: 719, comm: udevd Not tainted 2.6.24-rc6 #6 [<c0105c46>] show_trace_log_lvl+0x12/0x25 [<c01063ec>] show_trace+0xd/0x10 [<c010670c>] dump_stack+0x57/0x5f [<c0147505>] print_usage_bug+0x10a/0x117 [<c0147c38>] mark_lock+0x121/0x402 [<c01488b6>] __lock_acquire+0x3d1/0xb64 [<c0149405>] lock_acquire+0x4e/0x6a [<c048a99b>] _spin_lock+0x23/0x32 [<c043062e>] dev_watchdog+0x1c/0xb9 [<c0133e4a>] run_timer_softirq+0x133/0x193 [<c0130907>] __do_softirq+0x78/0xed [<c0106d43>] do_softirq+0x61/0xc6 ======================= eth1: link down The fix is to disable/restore irqs instead of disable/enable. Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: Ayaz Abdulla <aabdulla@nvidia.com> Cc: Jeff Garzik <jeff@garzik.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/forcedeth.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 6f7e3fde9e7c..980c2c229a71 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1854,6 +1854,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1854 struct ring_desc* start_tx; 1854 struct ring_desc* start_tx;
1855 struct ring_desc* prev_tx; 1855 struct ring_desc* prev_tx;
1856 struct nv_skb_map* prev_tx_ctx; 1856 struct nv_skb_map* prev_tx_ctx;
1857 unsigned long flags;
1857 1858
1858 /* add fragments to entries count */ 1859 /* add fragments to entries count */
1859 for (i = 0; i < fragments; i++) { 1860 for (i = 0; i < fragments; i++) {
@@ -1863,10 +1864,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1863 1864
1864 empty_slots = nv_get_empty_tx_slots(np); 1865 empty_slots = nv_get_empty_tx_slots(np);
1865 if (unlikely(empty_slots <= entries)) { 1866 if (unlikely(empty_slots <= entries)) {
1866 spin_lock_irq(&np->lock); 1867 spin_lock_irqsave(&np->lock, flags);
1867 netif_stop_queue(dev); 1868 netif_stop_queue(dev);
1868 np->tx_stop = 1; 1869 np->tx_stop = 1;
1869 spin_unlock_irq(&np->lock); 1870 spin_unlock_irqrestore(&np->lock, flags);
1870 return NETDEV_TX_BUSY; 1871 return NETDEV_TX_BUSY;
1871 } 1872 }
1872 1873
@@ -1929,13 +1930,13 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1929 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 1930 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1930 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 1931 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1931 1932
1932 spin_lock_irq(&np->lock); 1933 spin_lock_irqsave(&np->lock, flags);
1933 1934
1934 /* set tx flags */ 1935 /* set tx flags */
1935 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 1936 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1936 np->put_tx.orig = put_tx; 1937 np->put_tx.orig = put_tx;
1937 1938
1938 spin_unlock_irq(&np->lock); 1939 spin_unlock_irqrestore(&np->lock, flags);
1939 1940
1940 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", 1941 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1941 dev->name, entries, tx_flags_extra); 1942 dev->name, entries, tx_flags_extra);
@@ -1971,6 +1972,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1971 struct ring_desc_ex* prev_tx; 1972 struct ring_desc_ex* prev_tx;
1972 struct nv_skb_map* prev_tx_ctx; 1973 struct nv_skb_map* prev_tx_ctx;
1973 struct nv_skb_map* start_tx_ctx; 1974 struct nv_skb_map* start_tx_ctx;
1975 unsigned long flags;
1974 1976
1975 /* add fragments to entries count */ 1977 /* add fragments to entries count */
1976 for (i = 0; i < fragments; i++) { 1978 for (i = 0; i < fragments; i++) {
@@ -1980,10 +1982,10 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1980 1982
1981 empty_slots = nv_get_empty_tx_slots(np); 1983 empty_slots = nv_get_empty_tx_slots(np);
1982 if (unlikely(empty_slots <= entries)) { 1984 if (unlikely(empty_slots <= entries)) {
1983 spin_lock_irq(&np->lock); 1985 spin_lock_irqsave(&np->lock, flags);
1984 netif_stop_queue(dev); 1986 netif_stop_queue(dev);
1985 np->tx_stop = 1; 1987 np->tx_stop = 1;
1986 spin_unlock_irq(&np->lock); 1988 spin_unlock_irqrestore(&np->lock, flags);
1987 return NETDEV_TX_BUSY; 1989 return NETDEV_TX_BUSY;
1988 } 1990 }
1989 1991
@@ -2059,7 +2061,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
2059 start_tx->txvlan = 0; 2061 start_tx->txvlan = 0;
2060 } 2062 }
2061 2063
2062 spin_lock_irq(&np->lock); 2064 spin_lock_irqsave(&np->lock, flags);
2063 2065
2064 if (np->tx_limit) { 2066 if (np->tx_limit) {
2065 /* Limit the number of outstanding tx. Setup all fragments, but 2067 /* Limit the number of outstanding tx. Setup all fragments, but
@@ -2085,7 +2087,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
2085 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2087 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2086 np->put_tx.ex = put_tx; 2088 np->put_tx.ex = put_tx;
2087 2089
2088 spin_unlock_irq(&np->lock); 2090 spin_unlock_irqrestore(&np->lock, flags);
2089 2091
2090 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", 2092 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2091 dev->name, entries, tx_flags_extra); 2093 dev->name, entries, tx_flags_extra);