aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGovindarajulu Varadarajan <_govind@gmx.com>2014-11-22 14:52:52 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-23 14:31:25 -0500
commit78e2045d3d562e43562e3cbf0e96c26e38a718e9 (patch)
tree0358eacd0cecc7116465d68095d64c4860fb7337
parentdb40b3f55a2297eff15f24a4d3d52f70783a6530 (diff)
enic: use spin_lock(wq_lock) instead of spin_lock_irqsave(wq_lock)
All the access to wq has been moved out of hardirq context. We no longer need to use spin_lock_irqsave. Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b42a48097dbd..46647407d585 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -529,7 +529,6 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
529{ 529{
530 struct enic *enic = netdev_priv(netdev); 530 struct enic *enic = netdev_priv(netdev);
531 struct vnic_wq *wq; 531 struct vnic_wq *wq;
532 unsigned long flags;
533 unsigned int txq_map; 532 unsigned int txq_map;
534 struct netdev_queue *txq; 533 struct netdev_queue *txq;
535 534
@@ -554,14 +553,14 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
554 return NETDEV_TX_OK; 553 return NETDEV_TX_OK;
555 } 554 }
556 555
557 spin_lock_irqsave(&enic->wq_lock[txq_map], flags); 556 spin_lock(&enic->wq_lock[txq_map]);
558 557
559 if (vnic_wq_desc_avail(wq) < 558 if (vnic_wq_desc_avail(wq) <
560 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { 559 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
561 netif_tx_stop_queue(txq); 560 netif_tx_stop_queue(txq);
562 /* This is a hard error, log it */ 561 /* This is a hard error, log it */
563 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); 562 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
564 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); 563 spin_unlock(&enic->wq_lock[txq_map]);
565 return NETDEV_TX_BUSY; 564 return NETDEV_TX_BUSY;
566 } 565 }
567 566
@@ -572,7 +571,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
572 if (!skb->xmit_more || netif_xmit_stopped(txq)) 571 if (!skb->xmit_more || netif_xmit_stopped(txq))
573 vnic_wq_doorbell(wq); 572 vnic_wq_doorbell(wq);
574 573
575 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); 574 spin_unlock(&enic->wq_lock[txq_map]);
576 575
577 return NETDEV_TX_OK; 576 return NETDEV_TX_OK;
578} 577}