aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan O'Sullivan <bos@pathscale.com>2006-07-01 07:35:51 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-01 12:55:58 -0400
commit6700efdfc06d2dc9ef77988a00182c2ede0f1be0 (patch)
treea428669e9bb9b88c1dbe4192378bec454cdadfed
parent7bbb15ea8543e2e49476a27b507be3b02828a124 (diff)
[PATCH] IB/ipath: fix shared receive queues for RC
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com> Cc: "Michael S. Tsirkin" <mst@mellanox.co.il> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c243
1 files changed, 101 insertions, 142 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 720cb3ae1fc3..bd2c405c4bf0 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -257,7 +257,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
257 break; 257 break;
258 258
259 case IB_WR_RDMA_WRITE: 259 case IB_WR_RDMA_WRITE:
260 if (newreq) 260 if (newreq && qp->s_lsn != (u32) -1)
261 qp->s_lsn++; 261 qp->s_lsn++;
262 /* FALLTHROUGH */ 262 /* FALLTHROUGH */
263 case IB_WR_RDMA_WRITE_WITH_IMM: 263 case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -283,8 +283,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
283 else { 283 else {
284 qp->s_state = 284 qp->s_state =
285 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 285 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
286 /* Immediate data comes 286 /* Immediate data comes after RETH */
287 * after RETH */
288 ohdr->u.rc.imm_data = wqe->wr.imm_data; 287 ohdr->u.rc.imm_data = wqe->wr.imm_data;
289 hwords += 1; 288 hwords += 1;
290 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 289 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -304,7 +303,8 @@ int ipath_make_rc_req(struct ipath_qp *qp,
304 qp->s_state = OP(RDMA_READ_REQUEST); 303 qp->s_state = OP(RDMA_READ_REQUEST);
305 hwords += sizeof(ohdr->u.rc.reth) / 4; 304 hwords += sizeof(ohdr->u.rc.reth) / 4;
306 if (newreq) { 305 if (newreq) {
307 qp->s_lsn++; 306 if (qp->s_lsn != (u32) -1)
307 qp->s_lsn++;
308 /* 308 /*
309 * Adjust s_next_psn to count the 309 * Adjust s_next_psn to count the
310 * expected number of responses. 310 * expected number of responses.
@@ -335,7 +335,8 @@ int ipath_make_rc_req(struct ipath_qp *qp,
335 wqe->wr.wr.atomic.compare_add); 335 wqe->wr.wr.atomic.compare_add);
336 hwords += sizeof(struct ib_atomic_eth) / 4; 336 hwords += sizeof(struct ib_atomic_eth) / 4;
337 if (newreq) { 337 if (newreq) {
338 qp->s_lsn++; 338 if (qp->s_lsn != (u32) -1)
339 qp->s_lsn++;
339 wqe->lpsn = wqe->psn; 340 wqe->lpsn = wqe->psn;
340 } 341 }
341 if (++qp->s_cur == qp->s_size) 342 if (++qp->s_cur == qp->s_size)
@@ -553,6 +554,88 @@ static void send_rc_ack(struct ipath_qp *qp)
553} 554}
554 555
555/** 556/**
557 * reset_psn - reset the QP state to send starting from PSN
558 * @qp: the QP
559 * @psn: the packet sequence number to restart at
560 *
561 * This is called from ipath_rc_rcv() to process an incoming RC ACK
562 * for the given QP.
563 * Called at interrupt level with the QP s_lock held.
564 */
565static void reset_psn(struct ipath_qp *qp, u32 psn)
566{
567 u32 n = qp->s_last;
568 struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
569 u32 opcode;
570
571 qp->s_cur = n;
572
573 /*
574 * If we are starting the request from the beginning,
575 * let the normal send code handle initialization.
576 */
577 if (ipath_cmp24(psn, wqe->psn) <= 0) {
578 qp->s_state = OP(SEND_LAST);
579 goto done;
580 }
581
582 /* Find the work request opcode corresponding to the given PSN. */
583 opcode = wqe->wr.opcode;
584 for (;;) {
585 int diff;
586
587 if (++n == qp->s_size)
588 n = 0;
589 if (n == qp->s_tail)
590 break;
591 wqe = get_swqe_ptr(qp, n);
592 diff = ipath_cmp24(psn, wqe->psn);
593 if (diff < 0)
594 break;
595 qp->s_cur = n;
596 /*
597 * If we are starting the request from the beginning,
598 * let the normal send code handle initialization.
599 */
600 if (diff == 0) {
601 qp->s_state = OP(SEND_LAST);
602 goto done;
603 }
604 opcode = wqe->wr.opcode;
605 }
606
607 /*
608 * Set the state to restart in the middle of a request.
609 * Don't change the s_sge, s_cur_sge, or s_cur_size.
610 * See ipath_do_rc_send().
611 */
612 switch (opcode) {
613 case IB_WR_SEND:
614 case IB_WR_SEND_WITH_IMM:
615 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
616 break;
617
618 case IB_WR_RDMA_WRITE:
619 case IB_WR_RDMA_WRITE_WITH_IMM:
620 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
621 break;
622
623 case IB_WR_RDMA_READ:
624 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
625 break;
626
627 default:
628 /*
629 * This case shouldn't happen since its only
630 * one PSN per req.
631 */
632 qp->s_state = OP(SEND_LAST);
633 }
634done:
635 qp->s_psn = psn;
636}
637
638/**
556 * ipath_restart_rc - back up requester to resend the last un-ACKed request 639 * ipath_restart_rc - back up requester to resend the last un-ACKed request
557 * @qp: the QP to restart 640 * @qp: the QP to restart
558 * @psn: packet sequence number for the request 641 * @psn: packet sequence number for the request
@@ -564,7 +647,6 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
564{ 647{
565 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 648 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
566 struct ipath_ibdev *dev; 649 struct ipath_ibdev *dev;
567 u32 n;
568 650
569 /* 651 /*
570 * If there are no requests pending, we are done. 652 * If there are no requests pending, we are done.
@@ -606,62 +688,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
606 else 688 else
607 dev->n_rc_resends += (int)qp->s_psn - (int)psn; 689 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
608 690
609 /* 691 reset_psn(qp, psn);
610 * If we are starting the request from the beginning, let the normal
611 * send code handle initialization.
612 */
613 qp->s_cur = qp->s_last;
614 if (ipath_cmp24(psn, wqe->psn) <= 0) {
615 qp->s_state = OP(SEND_LAST);
616 qp->s_psn = wqe->psn;
617 } else {
618 n = qp->s_cur;
619 for (;;) {
620 if (++n == qp->s_size)
621 n = 0;
622 if (n == qp->s_tail) {
623 if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
624 qp->s_cur = n;
625 wqe = get_swqe_ptr(qp, n);
626 }
627 break;
628 }
629 wqe = get_swqe_ptr(qp, n);
630 if (ipath_cmp24(psn, wqe->psn) < 0)
631 break;
632 qp->s_cur = n;
633 }
634 qp->s_psn = psn;
635
636 /*
637 * Reset the state to restart in the middle of a request.
638 * Don't change the s_sge, s_cur_sge, or s_cur_size.
639 * See ipath_do_rc_send().
640 */
641 switch (wqe->wr.opcode) {
642 case IB_WR_SEND:
643 case IB_WR_SEND_WITH_IMM:
644 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
645 break;
646
647 case IB_WR_RDMA_WRITE:
648 case IB_WR_RDMA_WRITE_WITH_IMM:
649 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
650 break;
651
652 case IB_WR_RDMA_READ:
653 qp->s_state =
654 OP(RDMA_READ_RESPONSE_MIDDLE);
655 break;
656
657 default:
658 /*
659 * This case shouldn't happen since its only
660 * one PSN per req.
661 */
662 qp->s_state = OP(SEND_LAST);
663 }
664 }
665 692
666done: 693done:
667 tasklet_hi_schedule(&qp->s_task); 694 tasklet_hi_schedule(&qp->s_task);
@@ -671,74 +698,12 @@ bail:
671} 698}
672 699
673/** 700/**
674 * reset_psn - reset the QP state to send starting from PSN
675 * @qp: the QP
676 * @psn: the packet sequence number to restart at
677 *
678 * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
679 * for the given QP.
680 * Called at interrupt level with the QP s_lock held.
681 */
682static void reset_psn(struct ipath_qp *qp, u32 psn)
683{
684 struct ipath_swqe *wqe;
685 u32 n;
686
687 n = qp->s_cur;
688 wqe = get_swqe_ptr(qp, n);
689 for (;;) {
690 if (++n == qp->s_size)
691 n = 0;
692 if (n == qp->s_tail) {
693 if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
694 qp->s_cur = n;
695 wqe = get_swqe_ptr(qp, n);
696 }
697 break;
698 }
699 wqe = get_swqe_ptr(qp, n);
700 if (ipath_cmp24(psn, wqe->psn) < 0)
701 break;
702 qp->s_cur = n;
703 }
704 qp->s_psn = psn;
705
706 /*
707 * Set the state to restart in the middle of a
708 * request. Don't change the s_sge, s_cur_sge, or
709 * s_cur_size. See ipath_do_rc_send().
710 */
711 switch (wqe->wr.opcode) {
712 case IB_WR_SEND:
713 case IB_WR_SEND_WITH_IMM:
714 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
715 break;
716
717 case IB_WR_RDMA_WRITE:
718 case IB_WR_RDMA_WRITE_WITH_IMM:
719 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
720 break;
721
722 case IB_WR_RDMA_READ:
723 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
724 break;
725
726 default:
727 /*
728 * This case shouldn't happen since its only
729 * one PSN per req.
730 */
731 qp->s_state = OP(SEND_LAST);
732 }
733}
734
735/**
736 * do_rc_ack - process an incoming RC ACK 701 * do_rc_ack - process an incoming RC ACK
737 * @qp: the QP the ACK came in on 702 * @qp: the QP the ACK came in on
738 * @psn: the packet sequence number of the ACK 703 * @psn: the packet sequence number of the ACK
739 * @opcode: the opcode of the request that resulted in the ACK 704 * @opcode: the opcode of the request that resulted in the ACK
740 * 705 *
741 * This is called from ipath_rc_rcv() to process an incoming RC ACK 706 * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
742 * for the given QP. 707 * for the given QP.
743 * Called at interrupt level with the QP s_lock held. 708 * Called at interrupt level with the QP s_lock held.
744 * Returns 1 if OK, 0 if current operation should be aborted (NAK). 709 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
@@ -877,22 +842,12 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
877 if (qp->s_last == qp->s_tail) 842 if (qp->s_last == qp->s_tail)
878 goto bail; 843 goto bail;
879 844
880 /* The last valid PSN seen is the previous request's. */ 845 /* The last valid PSN is the previous PSN. */
881 qp->s_last_psn = wqe->psn - 1; 846 qp->s_last_psn = psn - 1;
882 847
883 dev->n_rc_resends += (int)qp->s_psn - (int)psn; 848 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
884 849
885 /* 850 reset_psn(qp, psn);
886 * If we are starting the request from the beginning, let
887 * the normal send code handle initialization.
888 */
889 qp->s_cur = qp->s_last;
890 wqe = get_swqe_ptr(qp, qp->s_cur);
891 if (ipath_cmp24(psn, wqe->psn) <= 0) {
892 qp->s_state = OP(SEND_LAST);
893 qp->s_psn = wqe->psn;
894 } else
895 reset_psn(qp, psn);
896 851
897 qp->s_rnr_timeout = 852 qp->s_rnr_timeout =
898 ib_ipath_rnr_table[(aeth >> IPS_AETH_CREDIT_SHIFT) & 853 ib_ipath_rnr_table[(aeth >> IPS_AETH_CREDIT_SHIFT) &
@@ -1070,9 +1025,10 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1070 &dev->pending[dev->pending_index]); 1025 &dev->pending[dev->pending_index]);
1071 spin_unlock(&dev->pending_lock); 1026 spin_unlock(&dev->pending_lock);
1072 /* 1027 /*
1073 * Update the RDMA receive state but do the copy w/o holding the 1028 * Update the RDMA receive state but do the copy w/o
1074 * locks and blocking interrupts. XXX Yet another place that 1029 * holding the locks and blocking interrupts.
1075 * affects relaxed RDMA order since we don't want s_sge modified. 1030 * XXX Yet another place that affects relaxed RDMA order
1031 * since we don't want s_sge modified.
1076 */ 1032 */
1077 qp->s_len -= pmtu; 1033 qp->s_len -= pmtu;
1078 qp->s_last_psn = psn; 1034 qp->s_last_psn = psn;
@@ -1119,9 +1075,12 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1119 if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) { 1075 if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
1120 /* 1076 /*
1121 * Change the state so we contimue 1077 * Change the state so we contimue
1122 * processing new requests. 1078 * processing new requests and wake up the
1079 * tasklet if there are posted sends.
1123 */ 1080 */
1124 qp->s_state = OP(SEND_LAST); 1081 qp->s_state = OP(SEND_LAST);
1082 if (qp->s_tail != qp->s_head)
1083 tasklet_hi_schedule(&qp->s_task);
1125 } 1084 }
1126 goto ack_done; 1085 goto ack_done;
1127 } 1086 }