diff options
author | Komuro <komurojun-mbn@nifty.com> | 2007-11-10 21:04:36 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 18:04:14 -0500 |
commit | bd5a93462093305a9ea7abd888a16c8c83e4bdc2 (patch) | |
tree | 68120e1f2fbae30acae07a743f045ff19e0ab106 /drivers/net/pcmcia/axnet_cs.c | |
parent | 2eab17ab880ad8d570d27517e6c9d9fe74adc214 (diff) |
axnet_cs: use spin_lock_irqsave instead of spin_lock + disable_irq
Signed-off-by: Komuro <komurojun-mbn@nifty.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/pcmcia/axnet_cs.c')
-rw-r--r-- | drivers/net/pcmcia/axnet_cs.c | 16 |
1 files changed, 5 insertions, 11 deletions
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 8d910a372f89..6d342f6c14f6 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c | |||
@@ -1040,15 +1040,13 @@ void ei_tx_timeout(struct net_device *dev) | |||
1040 | 1040 | ||
1041 | /* Ugly but a reset can be slow, yet must be protected */ | 1041 | /* Ugly but a reset can be slow, yet must be protected */ |
1042 | 1042 | ||
1043 | disable_irq_nosync(dev->irq); | 1043 | spin_lock_irqsave(&ei_local->page_lock, flags); |
1044 | spin_lock(&ei_local->page_lock); | ||
1045 | 1044 | ||
1046 | /* Try to restart the card. Perhaps the user has fixed something. */ | 1045 | /* Try to restart the card. Perhaps the user has fixed something. */ |
1047 | ei_reset_8390(dev); | 1046 | ei_reset_8390(dev); |
1048 | AX88190_init(dev, 1); | 1047 | AX88190_init(dev, 1); |
1049 | 1048 | ||
1050 | spin_unlock(&ei_local->page_lock); | 1049 | spin_unlock_irqrestore(&ei_local->page_lock, flags); |
1051 | enable_irq(dev->irq); | ||
1052 | netif_wake_queue(dev); | 1050 | netif_wake_queue(dev); |
1053 | } | 1051 | } |
1054 | 1052 | ||
@@ -1085,9 +1083,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1085 | * Slow phase with lock held. | 1083 | * Slow phase with lock held. |
1086 | */ | 1084 | */ |
1087 | 1085 | ||
1088 | disable_irq_nosync(dev->irq); | 1086 | spin_lock_irqsave(&ei_local->page_lock, flags); |
1089 | |||
1090 | spin_lock(&ei_local->page_lock); | ||
1091 | 1087 | ||
1092 | ei_local->irqlock = 1; | 1088 | ei_local->irqlock = 1; |
1093 | 1089 | ||
@@ -1125,8 +1121,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1125 | ei_local->irqlock = 0; | 1121 | ei_local->irqlock = 0; |
1126 | netif_stop_queue(dev); | 1122 | netif_stop_queue(dev); |
1127 | outb_p(ENISR_ALL, e8390_base + EN0_IMR); | 1123 | outb_p(ENISR_ALL, e8390_base + EN0_IMR); |
1128 | spin_unlock(&ei_local->page_lock); | 1124 | spin_unlock_irqrestore(&ei_local->page_lock, flags); |
1129 | enable_irq(dev->irq); | ||
1130 | ei_local->stat.tx_errors++; | 1125 | ei_local->stat.tx_errors++; |
1131 | return 1; | 1126 | return 1; |
1132 | } | 1127 | } |
@@ -1172,8 +1167,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1172 | ei_local->irqlock = 0; | 1167 | ei_local->irqlock = 0; |
1173 | outb_p(ENISR_ALL, e8390_base + EN0_IMR); | 1168 | outb_p(ENISR_ALL, e8390_base + EN0_IMR); |
1174 | 1169 | ||
1175 | spin_unlock(&ei_local->page_lock); | 1170 | spin_unlock_irqrestore(&ei_local->page_lock, flags); |
1176 | enable_irq(dev->irq); | ||
1177 | 1171 | ||
1178 | dev_kfree_skb (skb); | 1172 | dev_kfree_skb (skb); |
1179 | ei_local->stat.tx_bytes += send_length; | 1173 | ei_local->stat.tx_bytes += send_length; |