aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@intel.com>2016-09-06 07:37:41 -0400
committerDoug Ledford <dledford@redhat.com>2016-10-02 08:42:10 -0400
commit68e78b3d78ac69412c468460606cc767a743acab (patch)
tree5d54d79342d90dd20ae2b0617a85a74c148fac39 /drivers/infiniband
parent222f7a9aac26ae6bdeb3d4d29bad010ba34c31d3 (diff)
IB/rdmavt, IB/hfi1: Add lockdep asserts for lock debug
This patch adds lockdep asserts in key code paths for insuring lock correctness. Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c17
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c8
3 files changed, 30 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 53ad10ce4f50..9fc75e7e8781 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -449,13 +449,14 @@ static void qp_pio_drain(struct rvt_qp *qp)
449 */ 449 */
450void hfi1_schedule_send(struct rvt_qp *qp) 450void hfi1_schedule_send(struct rvt_qp *qp)
451{ 451{
452 lockdep_assert_held(&qp->s_lock);
452 if (hfi1_send_ok(qp)) 453 if (hfi1_send_ok(qp))
453 _hfi1_schedule_send(qp); 454 _hfi1_schedule_send(qp);
454} 455}
455 456
456/** 457/**
457 * hfi1_get_credit - flush the send work queue of a QP 458 * hfi1_get_credit - handle credit in aeth
458 * @qp: the qp who's send work queue to flush 459 * @qp: the qp
459 * @aeth: the Acknowledge Extended Transport Header 460 * @aeth: the Acknowledge Extended Transport Header
460 * 461 *
461 * The QP s_lock should be held. 462 * The QP s_lock should be held.
@@ -464,6 +465,7 @@ void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
464{ 465{
465 u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK; 466 u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK;
466 467
468 lockdep_assert_held(&qp->s_lock);
467 /* 469 /*
468 * If the credit is invalid, we can send 470 * If the credit is invalid, we can send
469 * as many packets as we like. Otherwise, we have to 471 * as many packets as we like. Otherwise, we have to
@@ -853,6 +855,7 @@ unsigned free_all_qps(struct rvt_dev_info *rdi)
853 855
854void flush_qp_waiters(struct rvt_qp *qp) 856void flush_qp_waiters(struct rvt_qp *qp)
855{ 857{
858 lockdep_assert_held(&qp->s_lock);
856 flush_iowait(qp); 859 flush_iowait(qp);
857 hfi1_stop_rc_timers(qp); 860 hfi1_stop_rc_timers(qp);
858} 861}
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 9f7900f15627..db6396752a97 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -68,6 +68,7 @@ static inline void hfi1_add_retry_timer(struct rvt_qp *qp)
68 struct ib_qp *ibqp = &qp->ibqp; 68 struct ib_qp *ibqp = &qp->ibqp;
69 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 69 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
70 70
71 lockdep_assert_held(&qp->s_lock);
71 qp->s_flags |= RVT_S_TIMER; 72 qp->s_flags |= RVT_S_TIMER;
72 /* 4.096 usec. * (1 << qp->timeout) */ 73 /* 4.096 usec. * (1 << qp->timeout) */
73 qp->s_timer.expires = jiffies + qp->timeout_jiffies + 74 qp->s_timer.expires = jiffies + qp->timeout_jiffies +
@@ -86,6 +87,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
86{ 87{
87 struct hfi1_qp_priv *priv = qp->priv; 88 struct hfi1_qp_priv *priv = qp->priv;
88 89
90 lockdep_assert_held(&qp->s_lock);
89 qp->s_flags |= RVT_S_WAIT_RNR; 91 qp->s_flags |= RVT_S_WAIT_RNR;
90 qp->s_timer.expires = jiffies + usecs_to_jiffies(to); 92 qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
91 add_timer(&priv->s_rnr_timer); 93 add_timer(&priv->s_rnr_timer);
@@ -103,6 +105,7 @@ static inline void hfi1_mod_retry_timer(struct rvt_qp *qp)
103 struct ib_qp *ibqp = &qp->ibqp; 105 struct ib_qp *ibqp = &qp->ibqp;
104 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 106 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
105 107
108 lockdep_assert_held(&qp->s_lock);
106 qp->s_flags |= RVT_S_TIMER; 109 qp->s_flags |= RVT_S_TIMER;
107 /* 4.096 usec. * (1 << qp->timeout) */ 110 /* 4.096 usec. * (1 << qp->timeout) */
108 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies + 111 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies +
@@ -120,6 +123,7 @@ static inline int hfi1_stop_retry_timer(struct rvt_qp *qp)
120{ 123{
121 int rval = 0; 124 int rval = 0;
122 125
126 lockdep_assert_held(&qp->s_lock);
123 /* Remove QP from retry */ 127 /* Remove QP from retry */
124 if (qp->s_flags & RVT_S_TIMER) { 128 if (qp->s_flags & RVT_S_TIMER) {
125 qp->s_flags &= ~RVT_S_TIMER; 129 qp->s_flags &= ~RVT_S_TIMER;
@@ -138,6 +142,7 @@ void hfi1_stop_rc_timers(struct rvt_qp *qp)
138{ 142{
139 struct hfi1_qp_priv *priv = qp->priv; 143 struct hfi1_qp_priv *priv = qp->priv;
140 144
145 lockdep_assert_held(&qp->s_lock);
141 /* Remove QP from all timers */ 146 /* Remove QP from all timers */
142 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 147 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
143 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 148 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
@@ -158,6 +163,7 @@ static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp)
158 int rval = 0; 163 int rval = 0;
159 struct hfi1_qp_priv *priv = qp->priv; 164 struct hfi1_qp_priv *priv = qp->priv;
160 165
166 lockdep_assert_held(&qp->s_lock);
161 /* Remove QP from rnr timer */ 167 /* Remove QP from rnr timer */
162 if (qp->s_flags & RVT_S_WAIT_RNR) { 168 if (qp->s_flags & RVT_S_WAIT_RNR) {
163 qp->s_flags &= ~RVT_S_WAIT_RNR; 169 qp->s_flags &= ~RVT_S_WAIT_RNR;
@@ -228,6 +234,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
228 u32 pmtu = qp->pmtu; 234 u32 pmtu = qp->pmtu;
229 struct hfi1_qp_priv *priv = qp->priv; 235 struct hfi1_qp_priv *priv = qp->priv;
230 236
237 lockdep_assert_held(&qp->s_lock);
231 /* Don't send an ACK if we aren't supposed to. */ 238 /* Don't send an ACK if we aren't supposed to. */
232 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 239 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
233 goto bail; 240 goto bail;
@@ -400,6 +407,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
400 int middle = 0; 407 int middle = 0;
401 int delta; 408 int delta;
402 409
410 lockdep_assert_held(&qp->s_lock);
403 ps->s_txreq = get_txreq(ps->dev, qp); 411 ps->s_txreq = get_txreq(ps->dev, qp);
404 if (IS_ERR(ps->s_txreq)) 412 if (IS_ERR(ps->s_txreq))
405 goto bail_no_tx; 413 goto bail_no_tx;
@@ -958,6 +966,7 @@ static void reset_psn(struct rvt_qp *qp, u32 psn)
958 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); 966 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
959 u32 opcode; 967 u32 opcode;
960 968
969 lockdep_assert_held(&qp->s_lock);
961 qp->s_cur = n; 970 qp->s_cur = n;
962 971
963 /* 972 /*
@@ -1043,6 +1052,8 @@ static void restart_rc(struct rvt_qp *qp, u32 psn, int wait)
1043 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1052 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1044 struct hfi1_ibport *ibp; 1053 struct hfi1_ibport *ibp;
1045 1054
1055 lockdep_assert_held(&qp->r_lock);
1056 lockdep_assert_held(&qp->s_lock);
1046 if (qp->s_retry == 0) { 1057 if (qp->s_retry == 0) {
1047 if (qp->s_mig_state == IB_MIG_ARMED) { 1058 if (qp->s_mig_state == IB_MIG_ARMED) {
1048 hfi1_migrate_qp(qp); 1059 hfi1_migrate_qp(qp);
@@ -1119,6 +1130,7 @@ static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
1119 struct rvt_swqe *wqe; 1130 struct rvt_swqe *wqe;
1120 u32 n = qp->s_last; 1131 u32 n = qp->s_last;
1121 1132
1133 lockdep_assert_held(&qp->s_lock);
1122 /* Find the work request corresponding to the given PSN. */ 1134 /* Find the work request corresponding to the given PSN. */
1123 for (;;) { 1135 for (;;) {
1124 wqe = rvt_get_swqe_ptr(qp, n); 1136 wqe = rvt_get_swqe_ptr(qp, n);
@@ -1148,6 +1160,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
1148 u32 opcode; 1160 u32 opcode;
1149 u32 psn; 1161 u32 psn;
1150 1162
1163 lockdep_assert_held(&qp->s_lock);
1151 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) 1164 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
1152 return; 1165 return;
1153 1166
@@ -1239,6 +1252,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1239 struct ib_wc wc; 1252 struct ib_wc wc;
1240 unsigned i; 1253 unsigned i;
1241 1254
1255 lockdep_assert_held(&qp->s_lock);
1242 /* 1256 /*
1243 * Don't decrement refcount and don't generate a 1257 * Don't decrement refcount and don't generate a
1244 * completion if the SWQE is being resent until the send 1258 * completion if the SWQE is being resent until the send
@@ -1338,6 +1352,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1338 int diff; 1352 int diff;
1339 unsigned long to; 1353 unsigned long to;
1340 1354
1355 lockdep_assert_held(&qp->s_lock);
1341 /* 1356 /*
1342 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 1357 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1343 * requests and implicitly NAK RDMA read and atomic requests issued 1358 * requests and implicitly NAK RDMA read and atomic requests issued
@@ -1553,6 +1568,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
1553{ 1568{
1554 struct rvt_swqe *wqe; 1569 struct rvt_swqe *wqe;
1555 1570
1571 lockdep_assert_held(&qp->s_lock);
1556 /* Remove QP from retry timer */ 1572 /* Remove QP from retry timer */
1557 hfi1_stop_rc_timers(qp); 1573 hfi1_stop_rc_timers(qp);
1558 1574
@@ -2136,6 +2152,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
2136 int copy_last = 0; 2152 int copy_last = 0;
2137 u32 rkey; 2153 u32 rkey;
2138 2154
2155 lockdep_assert_held(&qp->r_lock);
2139 bth0 = be32_to_cpu(ohdr->bth[0]); 2156 bth0 = be32_to_cpu(ohdr->bth[0]);
2140 if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0)) 2157 if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
2141 return; 2158 return;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 673e2f674215..7fe0d7e3ba46 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -562,6 +562,9 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
562 __must_hold(&qp->s_hlock) 562 __must_hold(&qp->s_hlock)
563 __must_hold(&qp->r_lock) 563 __must_hold(&qp->r_lock)
564{ 564{
565 lockdep_assert_held(&qp->r_lock);
566 lockdep_assert_held(&qp->s_hlock);
567 lockdep_assert_held(&qp->s_lock);
565 if (qp->state != IB_QPS_RESET) { 568 if (qp->state != IB_QPS_RESET) {
566 qp->state = IB_QPS_RESET; 569 qp->state = IB_QPS_RESET;
567 570
@@ -595,6 +598,9 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
595 rdi->driver_f.notify_qp_reset(qp); 598 rdi->driver_f.notify_qp_reset(qp);
596 } 599 }
597 rvt_init_qp(rdi, qp, type); 600 rvt_init_qp(rdi, qp, type);
601 lockdep_assert_held(&qp->r_lock);
602 lockdep_assert_held(&qp->s_hlock);
603 lockdep_assert_held(&qp->s_lock);
598} 604}
599 605
600/** 606/**
@@ -917,6 +923,8 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
917 int ret = 0; 923 int ret = 0;
918 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 924 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
919 925
926 lockdep_assert_held(&qp->r_lock);
927 lockdep_assert_held(&qp->s_lock);
920 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) 928 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
921 goto bail; 929 goto bail;
922 930