diff options
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_qp.c | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_rc.c | 47 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_sdma.c | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_uc.c | 6 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_ud.c | 17 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.c | 7 |
6 files changed, 26 insertions, 55 deletions
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index e0f65e39076b..6c39851d2ded 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
| @@ -450,7 +450,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) | |||
| 450 | * | 450 | * |
| 451 | * Flushes both send and receive work queues. | 451 | * Flushes both send and receive work queues. |
| 452 | * Returns true if last WQE event should be generated. | 452 | * Returns true if last WQE event should be generated. |
| 453 | * The QP s_lock should be held and interrupts disabled. | 453 | * The QP r_lock and s_lock should be held and interrupts disabled. |
| 454 | * If we are already in error state, just return. | 454 | * If we are already in error state, just return. |
| 455 | */ | 455 | */ |
| 456 | int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) | 456 | int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 40c0a373719c..a0931119bd78 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
| @@ -868,7 +868,7 @@ done: | |||
| 868 | 868 | ||
| 869 | /* | 869 | /* |
| 870 | * Back up requester to resend the last un-ACKed request. | 870 | * Back up requester to resend the last un-ACKed request. |
| 871 | * The QP s_lock should be held and interrupts disabled. | 871 | * The QP r_lock and s_lock should be held and interrupts disabled. |
| 872 | */ | 872 | */ |
| 873 | static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) | 873 | static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) |
| 874 | { | 874 | { |
| @@ -911,7 +911,8 @@ static void rc_timeout(unsigned long arg) | |||
| 911 | struct qib_ibport *ibp; | 911 | struct qib_ibport *ibp; |
| 912 | unsigned long flags; | 912 | unsigned long flags; |
| 913 | 913 | ||
| 914 | spin_lock_irqsave(&qp->s_lock, flags); | 914 | spin_lock_irqsave(&qp->r_lock, flags); |
| 915 | spin_lock(&qp->s_lock); | ||
| 915 | if (qp->s_flags & QIB_S_TIMER) { | 916 | if (qp->s_flags & QIB_S_TIMER) { |
| 916 | ibp = to_iport(qp->ibqp.device, qp->port_num); | 917 | ibp = to_iport(qp->ibqp.device, qp->port_num); |
| 917 | ibp->n_rc_timeouts++; | 918 | ibp->n_rc_timeouts++; |
| @@ -920,7 +921,8 @@ static void rc_timeout(unsigned long arg) | |||
| 920 | qib_restart_rc(qp, qp->s_last_psn + 1, 1); | 921 | qib_restart_rc(qp, qp->s_last_psn + 1, 1); |
| 921 | qib_schedule_send(qp); | 922 | qib_schedule_send(qp); |
| 922 | } | 923 | } |
| 923 | spin_unlock_irqrestore(&qp->s_lock, flags); | 924 | spin_unlock(&qp->s_lock); |
| 925 | spin_unlock_irqrestore(&qp->r_lock, flags); | ||
| 924 | } | 926 | } |
| 925 | 927 | ||
| 926 | /* | 928 | /* |
| @@ -1414,10 +1416,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, | |||
| 1414 | 1416 | ||
| 1415 | spin_lock_irqsave(&qp->s_lock, flags); | 1417 | spin_lock_irqsave(&qp->s_lock, flags); |
| 1416 | 1418 | ||
| 1417 | /* Double check we can process this now that we hold the s_lock. */ | ||
| 1418 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
| 1419 | goto ack_done; | ||
| 1420 | |||
| 1421 | /* Ignore invalid responses. */ | 1419 | /* Ignore invalid responses. */ |
| 1422 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) | 1420 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) |
| 1423 | goto ack_done; | 1421 | goto ack_done; |
| @@ -1661,9 +1659,6 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, | |||
| 1661 | ibp->n_rc_dupreq++; | 1659 | ibp->n_rc_dupreq++; |
| 1662 | 1660 | ||
| 1663 | spin_lock_irqsave(&qp->s_lock, flags); | 1661 | spin_lock_irqsave(&qp->s_lock, flags); |
| 1664 | /* Double check we can process this now that we hold the s_lock. */ | ||
| 1665 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
| 1666 | goto unlock_done; | ||
| 1667 | 1662 | ||
| 1668 | for (i = qp->r_head_ack_queue; ; i = prev) { | 1663 | for (i = qp->r_head_ack_queue; ; i = prev) { |
| 1669 | if (i == qp->s_tail_ack_queue) | 1664 | if (i == qp->s_tail_ack_queue) |
| @@ -1878,9 +1873,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
| 1878 | psn = be32_to_cpu(ohdr->bth[2]); | 1873 | psn = be32_to_cpu(ohdr->bth[2]); |
| 1879 | opcode >>= 24; | 1874 | opcode >>= 24; |
| 1880 | 1875 | ||
| 1881 | /* Prevent simultaneous processing after APM on different CPUs */ | ||
| 1882 | spin_lock(&qp->r_lock); | ||
| 1883 | |||
| 1884 | /* | 1876 | /* |
| 1885 | * Process responses (ACKs) before anything else. Note that the | 1877 | * Process responses (ACKs) before anything else. Note that the |
| 1886 | * packet sequence number will be for something in the send work | 1878 | * packet sequence number will be for something in the send work |
| @@ -1891,14 +1883,14 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
| 1891 | opcode <= OP(ATOMIC_ACKNOWLEDGE)) { | 1883 | opcode <= OP(ATOMIC_ACKNOWLEDGE)) { |
| 1892 | qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, | 1884 | qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, |
| 1893 | hdrsize, pmtu, rcd); | 1885 | hdrsize, pmtu, rcd); |
| 1894 | goto runlock; | 1886 | return; |
| 1895 | } | 1887 | } |
| 1896 | 1888 | ||
| 1897 | /* Compute 24 bits worth of difference. */ | 1889 | /* Compute 24 bits worth of difference. */ |
| 1898 | diff = qib_cmp24(psn, qp->r_psn); | 1890 | diff = qib_cmp24(psn, qp->r_psn); |
| 1899 | if (unlikely(diff)) { | 1891 | if (unlikely(diff)) { |
| 1900 | if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) | 1892 | if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) |
| 1901 | goto runlock; | 1893 | return; |
| 1902 | goto send_ack; | 1894 | goto send_ack; |
| 1903 | } | 1895 | } |
| 1904 | 1896 | ||
| @@ -2090,9 +2082,6 @@ send_last: | |||
| 2090 | if (next > QIB_MAX_RDMA_ATOMIC) | 2082 | if (next > QIB_MAX_RDMA_ATOMIC) |
| 2091 | next = 0; | 2083 | next = 0; |
| 2092 | spin_lock_irqsave(&qp->s_lock, flags); | 2084 | spin_lock_irqsave(&qp->s_lock, flags); |
| 2093 | /* Double check we can process this while holding the s_lock. */ | ||
| 2094 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
| 2095 | goto srunlock; | ||
| 2096 | if (unlikely(next == qp->s_tail_ack_queue)) { | 2085 | if (unlikely(next == qp->s_tail_ack_queue)) { |
| 2097 | if (!qp->s_ack_queue[next].sent) | 2086 | if (!qp->s_ack_queue[next].sent) |
| 2098 | goto nack_inv_unlck; | 2087 | goto nack_inv_unlck; |
| @@ -2146,7 +2135,7 @@ send_last: | |||
| 2146 | qp->s_flags |= QIB_S_RESP_PENDING; | 2135 | qp->s_flags |= QIB_S_RESP_PENDING; |
| 2147 | qib_schedule_send(qp); | 2136 | qib_schedule_send(qp); |
| 2148 | 2137 | ||
| 2149 | goto srunlock; | 2138 | goto sunlock; |
| 2150 | } | 2139 | } |
| 2151 | 2140 | ||
| 2152 | case OP(COMPARE_SWAP): | 2141 | case OP(COMPARE_SWAP): |
| @@ -2165,9 +2154,6 @@ send_last: | |||
| 2165 | if (next > QIB_MAX_RDMA_ATOMIC) | 2154 | if (next > QIB_MAX_RDMA_ATOMIC) |
| 2166 | next = 0; | 2155 | next = 0; |
| 2167 | spin_lock_irqsave(&qp->s_lock, flags); | 2156 | spin_lock_irqsave(&qp->s_lock, flags); |
| 2168 | /* Double check we can process this while holding the s_lock. */ | ||
| 2169 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
| 2170 | goto srunlock; | ||
| 2171 | if (unlikely(next == qp->s_tail_ack_queue)) { | 2157 | if (unlikely(next == qp->s_tail_ack_queue)) { |
| 2172 | if (!qp->s_ack_queue[next].sent) | 2158 | if (!qp->s_ack_queue[next].sent) |
| 2173 | goto nack_inv_unlck; | 2159 | goto nack_inv_unlck; |
| @@ -2213,7 +2199,7 @@ send_last: | |||
| 2213 | qp->s_flags |= QIB_S_RESP_PENDING; | 2199 | qp->s_flags |= QIB_S_RESP_PENDING; |
| 2214 | qib_schedule_send(qp); | 2200 | qib_schedule_send(qp); |
| 2215 | 2201 | ||
| 2216 | goto srunlock; | 2202 | goto sunlock; |
| 2217 | } | 2203 | } |
| 2218 | 2204 | ||
| 2219 | default: | 2205 | default: |
| @@ -2227,7 +2213,7 @@ send_last: | |||
| 2227 | /* Send an ACK if requested or required. */ | 2213 | /* Send an ACK if requested or required. */ |
| 2228 | if (psn & (1 << 31)) | 2214 | if (psn & (1 << 31)) |
| 2229 | goto send_ack; | 2215 | goto send_ack; |
| 2230 | goto runlock; | 2216 | return; |
| 2231 | 2217 | ||
| 2232 | rnr_nak: | 2218 | rnr_nak: |
| 2233 | qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; | 2219 | qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; |
| @@ -2238,7 +2224,7 @@ rnr_nak: | |||
| 2238 | atomic_inc(&qp->refcount); | 2224 | atomic_inc(&qp->refcount); |
| 2239 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); | 2225 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); |
| 2240 | } | 2226 | } |
| 2241 | goto runlock; | 2227 | return; |
| 2242 | 2228 | ||
| 2243 | nack_op_err: | 2229 | nack_op_err: |
| 2244 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | 2230 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); |
| @@ -2250,7 +2236,7 @@ nack_op_err: | |||
| 2250 | atomic_inc(&qp->refcount); | 2236 | atomic_inc(&qp->refcount); |
| 2251 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); | 2237 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); |
| 2252 | } | 2238 | } |
| 2253 | goto runlock; | 2239 | return; |
| 2254 | 2240 | ||
| 2255 | nack_inv_unlck: | 2241 | nack_inv_unlck: |
| 2256 | spin_unlock_irqrestore(&qp->s_lock, flags); | 2242 | spin_unlock_irqrestore(&qp->s_lock, flags); |
| @@ -2264,7 +2250,7 @@ nack_inv: | |||
| 2264 | atomic_inc(&qp->refcount); | 2250 | atomic_inc(&qp->refcount); |
| 2265 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); | 2251 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); |
| 2266 | } | 2252 | } |
| 2267 | goto runlock; | 2253 | return; |
| 2268 | 2254 | ||
| 2269 | nack_acc_unlck: | 2255 | nack_acc_unlck: |
| 2270 | spin_unlock_irqrestore(&qp->s_lock, flags); | 2256 | spin_unlock_irqrestore(&qp->s_lock, flags); |
| @@ -2274,13 +2260,6 @@ nack_acc: | |||
| 2274 | qp->r_ack_psn = qp->r_psn; | 2260 | qp->r_ack_psn = qp->r_psn; |
| 2275 | send_ack: | 2261 | send_ack: |
| 2276 | qib_send_rc_ack(qp); | 2262 | qib_send_rc_ack(qp); |
| 2277 | runlock: | ||
| 2278 | spin_unlock(&qp->r_lock); | ||
| 2279 | return; | ||
| 2280 | |||
| 2281 | srunlock: | ||
| 2282 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
| 2283 | spin_unlock(&qp->r_lock); | ||
| 2284 | return; | 2263 | return; |
| 2285 | 2264 | ||
| 2286 | sunlock: | 2265 | sunlock: |
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index b8456881f7f6..cad44491320b 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c | |||
| @@ -656,6 +656,7 @@ unmap: | |||
| 656 | } | 656 | } |
| 657 | qp = tx->qp; | 657 | qp = tx->qp; |
| 658 | qib_put_txreq(tx); | 658 | qib_put_txreq(tx); |
| 659 | spin_lock(&qp->r_lock); | ||
| 659 | spin_lock(&qp->s_lock); | 660 | spin_lock(&qp->s_lock); |
| 660 | if (qp->ibqp.qp_type == IB_QPT_RC) { | 661 | if (qp->ibqp.qp_type == IB_QPT_RC) { |
| 661 | /* XXX what about error sending RDMA read responses? */ | 662 | /* XXX what about error sending RDMA read responses? */ |
| @@ -664,6 +665,7 @@ unmap: | |||
| 664 | } else if (qp->s_wqe) | 665 | } else if (qp->s_wqe) |
| 665 | qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); | 666 | qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); |
| 666 | spin_unlock(&qp->s_lock); | 667 | spin_unlock(&qp->s_lock); |
| 668 | spin_unlock(&qp->r_lock); | ||
| 667 | /* return zero to process the next send work request */ | 669 | /* return zero to process the next send work request */ |
| 668 | goto unlock; | 670 | goto unlock; |
| 669 | 671 | ||
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 6c7fe78cca64..b9c8b6346c1b 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
| @@ -272,9 +272,6 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
| 272 | opcode >>= 24; | 272 | opcode >>= 24; |
| 273 | memset(&wc, 0, sizeof wc); | 273 | memset(&wc, 0, sizeof wc); |
| 274 | 274 | ||
| 275 | /* Prevent simultaneous processing after APM on different CPUs */ | ||
| 276 | spin_lock(&qp->r_lock); | ||
| 277 | |||
| 278 | /* Compare the PSN verses the expected PSN. */ | 275 | /* Compare the PSN verses the expected PSN. */ |
| 279 | if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { | 276 | if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { |
| 280 | /* | 277 | /* |
| @@ -534,7 +531,6 @@ rdma_last: | |||
| 534 | } | 531 | } |
| 535 | qp->r_psn++; | 532 | qp->r_psn++; |
| 536 | qp->r_state = opcode; | 533 | qp->r_state = opcode; |
| 537 | spin_unlock(&qp->r_lock); | ||
| 538 | return; | 534 | return; |
| 539 | 535 | ||
| 540 | rewind: | 536 | rewind: |
| @@ -542,12 +538,10 @@ rewind: | |||
| 542 | qp->r_sge.num_sge = 0; | 538 | qp->r_sge.num_sge = 0; |
| 543 | drop: | 539 | drop: |
| 544 | ibp->n_pkt_drops++; | 540 | ibp->n_pkt_drops++; |
| 545 | spin_unlock(&qp->r_lock); | ||
| 546 | return; | 541 | return; |
| 547 | 542 | ||
| 548 | op_err: | 543 | op_err: |
| 549 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | 544 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); |
| 550 | spin_unlock(&qp->r_lock); | ||
| 551 | return; | 545 | return; |
| 552 | 546 | ||
| 553 | sunlock: | 547 | sunlock: |
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index c838cda73347..e1b3da2a1f85 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
| @@ -535,13 +535,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
| 535 | wc.byte_len = tlen + sizeof(struct ib_grh); | 535 | wc.byte_len = tlen + sizeof(struct ib_grh); |
| 536 | 536 | ||
| 537 | /* | 537 | /* |
| 538 | * We need to serialize getting a receive work queue entry and | ||
| 539 | * generating a completion for it against QPs sending to this QP | ||
| 540 | * locally. | ||
| 541 | */ | ||
| 542 | spin_lock(&qp->r_lock); | ||
| 543 | |||
| 544 | /* | ||
| 545 | * Get the next work request entry to find where to put the data. | 538 | * Get the next work request entry to find where to put the data. |
| 546 | */ | 539 | */ |
| 547 | if (qp->r_flags & QIB_R_REUSE_SGE) | 540 | if (qp->r_flags & QIB_R_REUSE_SGE) |
| @@ -552,19 +545,19 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
| 552 | ret = qib_get_rwqe(qp, 0); | 545 | ret = qib_get_rwqe(qp, 0); |
| 553 | if (ret < 0) { | 546 | if (ret < 0) { |
| 554 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | 547 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); |
| 555 | goto bail_unlock; | 548 | return; |
| 556 | } | 549 | } |
| 557 | if (!ret) { | 550 | if (!ret) { |
| 558 | if (qp->ibqp.qp_num == 0) | 551 | if (qp->ibqp.qp_num == 0) |
| 559 | ibp->n_vl15_dropped++; | 552 | ibp->n_vl15_dropped++; |
| 560 | goto bail_unlock; | 553 | return; |
| 561 | } | 554 | } |
| 562 | } | 555 | } |
| 563 | /* Silently drop packets which are too big. */ | 556 | /* Silently drop packets which are too big. */ |
| 564 | if (unlikely(wc.byte_len > qp->r_len)) { | 557 | if (unlikely(wc.byte_len > qp->r_len)) { |
| 565 | qp->r_flags |= QIB_R_REUSE_SGE; | 558 | qp->r_flags |= QIB_R_REUSE_SGE; |
| 566 | ibp->n_pkt_drops++; | 559 | ibp->n_pkt_drops++; |
| 567 | goto bail_unlock; | 560 | return; |
| 568 | } | 561 | } |
| 569 | if (has_grh) { | 562 | if (has_grh) { |
| 570 | qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, | 563 | qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, |
| @@ -579,7 +572,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
| 579 | qp->r_sge.sge = *qp->r_sge.sg_list++; | 572 | qp->r_sge.sge = *qp->r_sge.sg_list++; |
| 580 | } | 573 | } |
| 581 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | 574 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) |
| 582 | goto bail_unlock; | 575 | return; |
| 583 | wc.wr_id = qp->r_wr_id; | 576 | wc.wr_id = qp->r_wr_id; |
| 584 | wc.status = IB_WC_SUCCESS; | 577 | wc.status = IB_WC_SUCCESS; |
| 585 | wc.opcode = IB_WC_RECV; | 578 | wc.opcode = IB_WC_RECV; |
| @@ -601,7 +594,5 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
| 601 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | 594 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
| 602 | (ohdr->bth[0] & | 595 | (ohdr->bth[0] & |
| 603 | cpu_to_be32(IB_BTH_SOLICITED)) != 0); | 596 | cpu_to_be32(IB_BTH_SOLICITED)) != 0); |
| 604 | bail_unlock: | ||
| 605 | spin_unlock(&qp->r_lock); | ||
| 606 | bail:; | 597 | bail:; |
| 607 | } | 598 | } |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index cda8f4173d23..9fab40488850 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
| @@ -550,10 +550,12 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
| 550 | { | 550 | { |
| 551 | struct qib_ibport *ibp = &rcd->ppd->ibport_data; | 551 | struct qib_ibport *ibp = &rcd->ppd->ibport_data; |
| 552 | 552 | ||
| 553 | spin_lock(&qp->r_lock); | ||
| 554 | |||
| 553 | /* Check for valid receive state. */ | 555 | /* Check for valid receive state. */ |
| 554 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { | 556 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { |
| 555 | ibp->n_pkt_drops++; | 557 | ibp->n_pkt_drops++; |
| 556 | return; | 558 | goto unlock; |
| 557 | } | 559 | } |
| 558 | 560 | ||
| 559 | switch (qp->ibqp.qp_type) { | 561 | switch (qp->ibqp.qp_type) { |
| @@ -577,6 +579,9 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
| 577 | default: | 579 | default: |
| 578 | break; | 580 | break; |
| 579 | } | 581 | } |
| 582 | |||
| 583 | unlock: | ||
| 584 | spin_unlock(&qp->r_lock); | ||
| 580 | } | 585 | } |
| 581 | 586 | ||
| 582 | /** | 587 | /** |
