diff options
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_ud.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_ud.c | 17 |
1 files changed, 4 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index c838cda73347..e1b3da2a1f85 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
@@ -535,13 +535,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
535 | wc.byte_len = tlen + sizeof(struct ib_grh); | 535 | wc.byte_len = tlen + sizeof(struct ib_grh); |
536 | 536 | ||
537 | /* | 537 | /* |
538 | * We need to serialize getting a receive work queue entry and | ||
539 | * generating a completion for it against QPs sending to this QP | ||
540 | * locally. | ||
541 | */ | ||
542 | spin_lock(&qp->r_lock); | ||
543 | |||
544 | /* | ||
545 | * Get the next work request entry to find where to put the data. | 538 | * Get the next work request entry to find where to put the data. |
546 | */ | 539 | */ |
547 | if (qp->r_flags & QIB_R_REUSE_SGE) | 540 | if (qp->r_flags & QIB_R_REUSE_SGE) |
@@ -552,19 +545,19 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
552 | ret = qib_get_rwqe(qp, 0); | 545 | ret = qib_get_rwqe(qp, 0); |
553 | if (ret < 0) { | 546 | if (ret < 0) { |
554 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | 547 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); |
555 | goto bail_unlock; | 548 | return; |
556 | } | 549 | } |
557 | if (!ret) { | 550 | if (!ret) { |
558 | if (qp->ibqp.qp_num == 0) | 551 | if (qp->ibqp.qp_num == 0) |
559 | ibp->n_vl15_dropped++; | 552 | ibp->n_vl15_dropped++; |
560 | goto bail_unlock; | 553 | return; |
561 | } | 554 | } |
562 | } | 555 | } |
563 | /* Silently drop packets which are too big. */ | 556 | /* Silently drop packets which are too big. */ |
564 | if (unlikely(wc.byte_len > qp->r_len)) { | 557 | if (unlikely(wc.byte_len > qp->r_len)) { |
565 | qp->r_flags |= QIB_R_REUSE_SGE; | 558 | qp->r_flags |= QIB_R_REUSE_SGE; |
566 | ibp->n_pkt_drops++; | 559 | ibp->n_pkt_drops++; |
567 | goto bail_unlock; | 560 | return; |
568 | } | 561 | } |
569 | if (has_grh) { | 562 | if (has_grh) { |
570 | qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, | 563 | qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, |
@@ -579,7 +572,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
579 | qp->r_sge.sge = *qp->r_sge.sg_list++; | 572 | qp->r_sge.sge = *qp->r_sge.sg_list++; |
580 | } | 573 | } |
581 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | 574 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) |
582 | goto bail_unlock; | 575 | return; |
583 | wc.wr_id = qp->r_wr_id; | 576 | wc.wr_id = qp->r_wr_id; |
584 | wc.status = IB_WC_SUCCESS; | 577 | wc.status = IB_WC_SUCCESS; |
585 | wc.opcode = IB_WC_RECV; | 578 | wc.opcode = IB_WC_RECV; |
@@ -601,7 +594,5 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
601 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | 594 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
602 | (ohdr->bth[0] & | 595 | (ohdr->bth[0] & |
603 | cpu_to_be32(IB_BTH_SOLICITED)) != 0); | 596 | cpu_to_be32(IB_BTH_SOLICITED)) != 0); |
604 | bail_unlock: | ||
605 | spin_unlock(&qp->r_lock); | ||
606 | bail:; | 597 | bail:; |
607 | } | 598 | } |