aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
authorRoland Dreier <rdreier@cisco.com>2008-03-20 16:30:48 -0400
committerJeff Garzik <jeff@garzik.org>2008-03-25 23:42:05 -0400
commitb1186dee3e785679876f6b629609ec080842edda (patch)
tree3017f34ea0ffef3728829b4b952ffefae95fbbf2 /drivers/net/cxgb3/sge.c
parentdc01c447123b489af7b4d0c58a15abcec36a40e6 (diff)
cxgb3: Fix lockdep problems with sge.reg_lock
Using iWARP with a Chelsio T3 NIC generates the following lockdep warning: ================================= [ INFO: inconsistent lock state ] 2.6.25-rc6 #50 --------------------------------- inconsistent {softirq-on-W} -> {in-softirq-W} usage. swapper/0 [HC0[0]:SC1[1]:HE0:SE0] takes: (&adap->sge.reg_lock){-+..}, at: [<ffffffff880e5ee2>] cxgb_offload_ctl+0x3af/0x507 [cxgb3] The problem is that reg_lock is used with plain spin_lock() in drivers/net/cxgb3/sge.c but is used with spin_lock_irqsave() in drivers/net/cxgb3/cxgb3_offload.c. This is technically a false positive, since the uses in sge.c are only in the initialization and cleanup paths and cannot overlap with any use in interrupt context. The best fix is probably just to use spin_lock_irq() with reg_lock in sge.c. Even though it's not strictly required for correctness, it avoids triggering lockdep and the extra overhead of disabling interrupts is not important at all in the initialization and cleanup slow paths. Signed-off-by: Roland Dreier <rolandd@cisco.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index db586870c5f4..98a6bbd11d4c 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -557,9 +557,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
557 557
558 for (i = 0; i < SGE_RXQ_PER_SET; ++i) 558 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
559 if (q->fl[i].desc) { 559 if (q->fl[i].desc) {
560 spin_lock(&adapter->sge.reg_lock); 560 spin_lock_irq(&adapter->sge.reg_lock);
561 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); 561 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
562 spin_unlock(&adapter->sge.reg_lock); 562 spin_unlock_irq(&adapter->sge.reg_lock);
563 free_rx_bufs(pdev, &q->fl[i]); 563 free_rx_bufs(pdev, &q->fl[i]);
564 kfree(q->fl[i].sdesc); 564 kfree(q->fl[i].sdesc);
565 dma_free_coherent(&pdev->dev, 565 dma_free_coherent(&pdev->dev,
@@ -570,9 +570,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
570 570
571 for (i = 0; i < SGE_TXQ_PER_SET; ++i) 571 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
572 if (q->txq[i].desc) { 572 if (q->txq[i].desc) {
573 spin_lock(&adapter->sge.reg_lock); 573 spin_lock_irq(&adapter->sge.reg_lock);
574 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); 574 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
575 spin_unlock(&adapter->sge.reg_lock); 575 spin_unlock_irq(&adapter->sge.reg_lock);
576 if (q->txq[i].sdesc) { 576 if (q->txq[i].sdesc) {
577 free_tx_desc(adapter, &q->txq[i], 577 free_tx_desc(adapter, &q->txq[i],
578 q->txq[i].in_use); 578 q->txq[i].in_use);
@@ -586,9 +586,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
586 } 586 }
587 587
588 if (q->rspq.desc) { 588 if (q->rspq.desc) {
589 spin_lock(&adapter->sge.reg_lock); 589 spin_lock_irq(&adapter->sge.reg_lock);
590 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); 590 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
591 spin_unlock(&adapter->sge.reg_lock); 591 spin_unlock_irq(&adapter->sge.reg_lock);
592 dma_free_coherent(&pdev->dev, 592 dma_free_coherent(&pdev->dev,
593 q->rspq.size * sizeof(struct rsp_desc), 593 q->rspq.size * sizeof(struct rsp_desc),
594 q->rspq.desc, q->rspq.phys_addr); 594 q->rspq.desc, q->rspq.phys_addr);
@@ -2667,7 +2667,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2667 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 2667 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2668 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); 2668 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
2669 2669
2670 spin_lock(&adapter->sge.reg_lock); 2670 spin_lock_irq(&adapter->sge.reg_lock);
2671 2671
2672 /* FL threshold comparison uses < */ 2672 /* FL threshold comparison uses < */
2673 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, 2673 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
@@ -2711,7 +2711,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2711 goto err_unlock; 2711 goto err_unlock;
2712 } 2712 }
2713 2713
2714 spin_unlock(&adapter->sge.reg_lock); 2714 spin_unlock_irq(&adapter->sge.reg_lock);
2715 2715
2716 q->adap = adapter; 2716 q->adap = adapter;
2717 q->netdev = dev; 2717 q->netdev = dev;
@@ -2728,7 +2728,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2728 return 0; 2728 return 0;
2729 2729
2730 err_unlock: 2730 err_unlock:
2731 spin_unlock(&adapter->sge.reg_lock); 2731 spin_unlock_irq(&adapter->sge.reg_lock);
2732 err: 2732 err:
2733 t3_free_qset(adapter, q); 2733 t3_free_qset(adapter, q);
2734 return ret; 2734 return ret;