aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2007-04-27 14:11:11 -0400
committerRoland Dreier <rolandd@cisco.com>2007-04-30 20:30:27 -0400
commit35ff032e65ab5cc03bbba46cefece7376c7c562f (patch)
tree76bfb852b0ccf0b095b39b4a4226da087d51dbdf /drivers/infiniband
parentb9099ff63c75216d6ca10bce5a1abcd9293c27e6 (diff)
IB/ipath: Don't call spin_lock_irq() from interrupt context
This patch fixes the problem reported by Bernd Schubert <bs@q-leap.de> with kernel debug options enabled: BUG: at kernel/lockdep.c:1860 trace_hardirqs_on() This was caused by using spin_lock_irq()/spin_unlock_irq() from interrupt context. Fix all the places that might be called from interrupts to use spin_lock_irqsave()/spin_unlock_irqrestore(). Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index b4b88d0b53f5..e3e533276356 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -587,6 +587,7 @@ static void send_rc_ack(struct ipath_qp *qp)
587 u32 hwords; 587 u32 hwords;
588 struct ipath_ib_header hdr; 588 struct ipath_ib_header hdr;
589 struct ipath_other_headers *ohdr; 589 struct ipath_other_headers *ohdr;
590 unsigned long flags;
590 591
591 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 592 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
592 if (qp->r_head_ack_queue != qp->s_tail_ack_queue) 593 if (qp->r_head_ack_queue != qp->s_tail_ack_queue)
@@ -640,11 +641,11 @@ static void send_rc_ack(struct ipath_qp *qp)
640 dev->n_rc_qacks++; 641 dev->n_rc_qacks++;
641 642
642queue_ack: 643queue_ack:
643 spin_lock_irq(&qp->s_lock); 644 spin_lock_irqsave(&qp->s_lock, flags);
644 qp->s_flags |= IPATH_S_ACK_PENDING; 645 qp->s_flags |= IPATH_S_ACK_PENDING;
645 qp->s_nak_state = qp->r_nak_state; 646 qp->s_nak_state = qp->r_nak_state;
646 qp->s_ack_psn = qp->r_ack_psn; 647 qp->s_ack_psn = qp->r_ack_psn;
647 spin_unlock_irq(&qp->s_lock); 648 spin_unlock_irqrestore(&qp->s_lock, flags);
648 649
649 /* Call ipath_do_rc_send() in another thread. */ 650 /* Call ipath_do_rc_send() in another thread. */
650 tasklet_hi_schedule(&qp->s_task); 651 tasklet_hi_schedule(&qp->s_task);
@@ -1294,6 +1295,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1294 struct ipath_ack_entry *e; 1295 struct ipath_ack_entry *e;
1295 u8 i, prev; 1296 u8 i, prev;
1296 int old_req; 1297 int old_req;
1298 unsigned long flags;
1297 1299
1298 if (diff > 0) { 1300 if (diff > 0) {
1299 /* 1301 /*
@@ -1327,7 +1329,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1327 psn &= IPATH_PSN_MASK; 1329 psn &= IPATH_PSN_MASK;
1328 e = NULL; 1330 e = NULL;
1329 old_req = 1; 1331 old_req = 1;
1330 spin_lock_irq(&qp->s_lock); 1332 spin_lock_irqsave(&qp->s_lock, flags);
1331 for (i = qp->r_head_ack_queue; ; i = prev) { 1333 for (i = qp->r_head_ack_queue; ; i = prev) {
1332 if (i == qp->s_tail_ack_queue) 1334 if (i == qp->s_tail_ack_queue)
1333 old_req = 0; 1335 old_req = 0;
@@ -1425,7 +1427,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1425 * after all the previous RDMA reads and atomics. 1427 * after all the previous RDMA reads and atomics.
1426 */ 1428 */
1427 if (i == qp->r_head_ack_queue) { 1429 if (i == qp->r_head_ack_queue) {
1428 spin_unlock_irq(&qp->s_lock); 1430 spin_unlock_irqrestore(&qp->s_lock, flags);
1429 qp->r_nak_state = 0; 1431 qp->r_nak_state = 0;
1430 qp->r_ack_psn = qp->r_psn - 1; 1432 qp->r_ack_psn = qp->r_psn - 1;
1431 goto send_ack; 1433 goto send_ack;
@@ -1443,7 +1445,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1443 tasklet_hi_schedule(&qp->s_task); 1445 tasklet_hi_schedule(&qp->s_task);
1444 1446
1445unlock_done: 1447unlock_done:
1446 spin_unlock_irq(&qp->s_lock); 1448 spin_unlock_irqrestore(&qp->s_lock, flags);
1447done: 1449done:
1448 return 1; 1450 return 1;
1449 1451
@@ -1453,10 +1455,12 @@ send_ack:
1453 1455
1454static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) 1456static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
1455{ 1457{
1456 spin_lock_irq(&qp->s_lock); 1458 unsigned long flags;
1459
1460 spin_lock_irqsave(&qp->s_lock, flags);
1457 qp->state = IB_QPS_ERR; 1461 qp->state = IB_QPS_ERR;
1458 ipath_error_qp(qp, err); 1462 ipath_error_qp(qp, err);
1459 spin_unlock_irq(&qp->s_lock); 1463 spin_unlock_irqrestore(&qp->s_lock, flags);
1460} 1464}
1461 1465
1462/** 1466/**