diff options
author | Chris Lalancette <clalance@redhat.com> | 2007-01-16 16:41:44 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-01-18 11:47:22 -0500 |
commit | 553af56775b3f23bf64f87090ab81a62bef2837b (patch) | |
tree | 1b8e9c5feb2bdaa2f61adb3a6f87d96f4afd21db /drivers/net | |
parent | 10764889c6355cbb335cf0578ce12427475d1a65 (diff) |
8139cp: Don't blindly enable interrupts
Similar to this commit:
http://kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=d15e9c4d9a75702b30e00cdf95c71c88e3f3f51e
It's not safe in cp_start_xmit to blindly call spin_lock_irq and then
spin_unlock_irq, since it may very well be the case that cp_start_xmit
was called with interrupts already disabled (I came across this bug in
the context of netdump in RedHat kernels, but the same issue holds, for
example, in netconsole). Therefore, replace all instances of
spin_lock_irq and spin_unlock_irq with spin_lock_irqsave and
spin_unlock_irqrestore, respectively, in cp_start_xmit(). I tested this
against a fully-virtualized Xen guest using netdump, which happens to
use the 8139cp driver to talk to the emulated hardware. I don't have a
real piece of 8139cp hardware to test on, so someone else will have to
do that.
Signed-off-by: Chris Lalancette <clalance@redhat.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/8139cp.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index e2cb19b582a1..6f93a765e564 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -765,17 +765,18 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
765 | struct cp_private *cp = netdev_priv(dev); | 765 | struct cp_private *cp = netdev_priv(dev); |
766 | unsigned entry; | 766 | unsigned entry; |
767 | u32 eor, flags; | 767 | u32 eor, flags; |
768 | unsigned long intr_flags; | ||
768 | #if CP_VLAN_TAG_USED | 769 | #if CP_VLAN_TAG_USED |
769 | u32 vlan_tag = 0; | 770 | u32 vlan_tag = 0; |
770 | #endif | 771 | #endif |
771 | int mss = 0; | 772 | int mss = 0; |
772 | 773 | ||
773 | spin_lock_irq(&cp->lock); | 774 | spin_lock_irqsave(&cp->lock, intr_flags); |
774 | 775 | ||
775 | /* This is a hard error, log it. */ | 776 | /* This is a hard error, log it. */ |
776 | if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { | 777 | if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { |
777 | netif_stop_queue(dev); | 778 | netif_stop_queue(dev); |
778 | spin_unlock_irq(&cp->lock); | 779 | spin_unlock_irqrestore(&cp->lock, intr_flags); |
779 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", | 780 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", |
780 | dev->name); | 781 | dev->name); |
781 | return 1; | 782 | return 1; |
@@ -908,7 +909,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
908 | if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) | 909 | if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) |
909 | netif_stop_queue(dev); | 910 | netif_stop_queue(dev); |
910 | 911 | ||
911 | spin_unlock_irq(&cp->lock); | 912 | spin_unlock_irqrestore(&cp->lock, intr_flags); |
912 | 913 | ||
913 | cpw8(TxPoll, NormalTxPoll); | 914 | cpw8(TxPoll, NormalTxPoll); |
914 | dev->trans_start = jiffies; | 915 | dev->trans_start = jiffies; |