aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDongdong Deng <dongdong.deng@windriver.com>2009-08-13 15:12:31 -0400
committerDavid S. Miller <davem@davemloft.net>2009-08-14 19:41:16 -0400
commit22580f894ac190c46beebb5c3172e450a2318f79 (patch)
tree01f88df9a5bc27170ef7ca944ba40af578476d72
parent0527a1a8440a20b3d0fd1d0c9e75a6f38a9d5315 (diff)
drivers/net: fixed drivers that support netpoll use ndo_start_xmit()
The NETPOLL API requires that interrupts remain disabled in netpoll_send_skb(). The use of spin_lock_irq() and spin_unlock_irq() in the NETPOLL API callbacks causes the interrupts to get enabled and can lead to kernel instability. The solution is to use spin_lock_irqsave() and spin_unlock_restore() to prevent the irqs from getting enabled while in netpoll_send_skb(). Call trace: netpoll_send_skb() { -> local_irq_save(flags) ---> dev->ndo_start_xmit(skb, dev) ---> spin_lock_irq() ---> spin_unlock_irq() *******here would enable the interrupt. ... -> local_irq_restore(flags) } Signed-off-by: Dongdong Deng <dongdong.deng@windriver.com> Signed-off-by: Jason Wessel <jason.wessel@windriver.com> Acked-by: Bruce Ashfield <bruce.ashfield@windriver.com> Acked-by: Matt Mackall <mpm@selenic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/b44.c5
-rw-r--r--drivers/net/tulip/tulip_core.c5
-rw-r--r--drivers/net/ucc_geth.c5
-rw-r--r--drivers/net/via-rhine.c5
4 files changed, 12 insertions, 8 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 36d4d377ec2f..bafca672ea7d 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -952,9 +952,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
952 int rc = NETDEV_TX_OK; 952 int rc = NETDEV_TX_OK;
953 dma_addr_t mapping; 953 dma_addr_t mapping;
954 u32 len, entry, ctrl; 954 u32 len, entry, ctrl;
955 unsigned long flags;
955 956
956 len = skb->len; 957 len = skb->len;
957 spin_lock_irq(&bp->lock); 958 spin_lock_irqsave(&bp->lock, flags);
958 959
959 /* This is a hard error, log it. */ 960 /* This is a hard error, log it. */
960 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { 961 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
@@ -1027,7 +1028,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1027 dev->trans_start = jiffies; 1028 dev->trans_start = jiffies;
1028 1029
1029out_unlock: 1030out_unlock:
1030 spin_unlock_irq(&bp->lock); 1031 spin_unlock_irqrestore(&bp->lock, flags);
1031 1032
1032 return rc; 1033 return rc;
1033 1034
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 99a63649f4fc..4cf9a6588751 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -652,8 +652,9 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
652 int entry; 652 int entry;
653 u32 flag; 653 u32 flag;
654 dma_addr_t mapping; 654 dma_addr_t mapping;
655 unsigned long flags;
655 656
656 spin_lock_irq(&tp->lock); 657 spin_lock_irqsave(&tp->lock, flags);
657 658
658 /* Calculate the next Tx descriptor entry. */ 659 /* Calculate the next Tx descriptor entry. */
659 entry = tp->cur_tx % TX_RING_SIZE; 660 entry = tp->cur_tx % TX_RING_SIZE;
@@ -688,7 +689,7 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
688 /* Trigger an immediate transmit demand. */ 689 /* Trigger an immediate transmit demand. */
689 iowrite32(0, tp->base_addr + CSR1); 690 iowrite32(0, tp->base_addr + CSR1);
690 691
691 spin_unlock_irq(&tp->lock); 692 spin_unlock_irqrestore(&tp->lock, flags);
692 693
693 dev->trans_start = jiffies; 694 dev->trans_start = jiffies;
694 695
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 3b957e6412ee..8a7b8c7bd781 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3111,10 +3111,11 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3111 u8 __iomem *bd; /* BD pointer */ 3111 u8 __iomem *bd; /* BD pointer */
3112 u32 bd_status; 3112 u32 bd_status;
3113 u8 txQ = 0; 3113 u8 txQ = 0;
3114 unsigned long flags;
3114 3115
3115 ugeth_vdbg("%s: IN", __func__); 3116 ugeth_vdbg("%s: IN", __func__);
3116 3117
3117 spin_lock_irq(&ugeth->lock); 3118 spin_lock_irqsave(&ugeth->lock, flags);
3118 3119
3119 dev->stats.tx_bytes += skb->len; 3120 dev->stats.tx_bytes += skb->len;
3120 3121
@@ -3171,7 +3172,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3171 uccf = ugeth->uccf; 3172 uccf = ugeth->uccf;
3172 out_be16(uccf->p_utodr, UCC_FAST_TOD); 3173 out_be16(uccf->p_utodr, UCC_FAST_TOD);
3173#endif 3174#endif
3174 spin_unlock_irq(&ugeth->lock); 3175 spin_unlock_irqrestore(&ugeth->lock, flags);
3175 3176
3176 return 0; 3177 return 0;
3177} 3178}
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 88c30a58b4bd..934f7671650a 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1218,6 +1218,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1218 struct rhine_private *rp = netdev_priv(dev); 1218 struct rhine_private *rp = netdev_priv(dev);
1219 void __iomem *ioaddr = rp->base; 1219 void __iomem *ioaddr = rp->base;
1220 unsigned entry; 1220 unsigned entry;
1221 unsigned long flags;
1221 1222
1222 /* Caution: the write order is important here, set the field 1223 /* Caution: the write order is important here, set the field
1223 with the "ownership" bits last. */ 1224 with the "ownership" bits last. */
@@ -1261,7 +1262,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1261 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1262 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1262 1263
1263 /* lock eth irq */ 1264 /* lock eth irq */
1264 spin_lock_irq(&rp->lock); 1265 spin_lock_irqsave(&rp->lock, flags);
1265 wmb(); 1266 wmb();
1266 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); 1267 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1267 wmb(); 1268 wmb();
@@ -1280,7 +1281,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1280 1281
1281 dev->trans_start = jiffies; 1282 dev->trans_start = jiffies;
1282 1283
1283 spin_unlock_irq(&rp->lock); 1284 spin_unlock_irqrestore(&rp->lock, flags);
1284 1285
1285 if (debug > 4) { 1286 if (debug > 4) {
1286 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", 1287 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",