diff options
author | Ralph Campbell <ralph.campbell@qlogic.com> | 2007-12-14 22:22:34 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-01-25 17:15:34 -0500 |
commit | cc65edcf0c174eff4367cfbc594a2f33c0d477fa (patch) | |
tree | 0422b9197852617730b9b0d51964726dd5518ec3 | |
parent | e57d62a14775c9d37195debe837431c75168ef69 (diff) |
IB/ipath: Fix RNR NAK handling
This patch fixes a couple of minor problems with RNR NAK handling:
- The insertion sort was causing extra delay when inserting ahead
vs. behind an existing entry on the list.
- A resend of a first packet of a message which is still not ready,
needs another RNR NAK (i.e., it was suppressed when it shouldn't).
- Also, the resend tasklet doesn't need to be woken up unless the
ACK/NAK actually indicates progress has been made.
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_rc.c | 18 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ruc.c | 6 |
2 files changed, 12 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index 120a61b03bc4..459e46e2c016 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c | |||
@@ -647,6 +647,7 @@ static void send_rc_ack(struct ipath_qp *qp) | |||
647 | 647 | ||
648 | queue_ack: | 648 | queue_ack: |
649 | spin_lock_irqsave(&qp->s_lock, flags); | 649 | spin_lock_irqsave(&qp->s_lock, flags); |
650 | dev->n_rc_qacks++; | ||
650 | qp->s_flags |= IPATH_S_ACK_PENDING; | 651 | qp->s_flags |= IPATH_S_ACK_PENDING; |
651 | qp->s_nak_state = qp->r_nak_state; | 652 | qp->s_nak_state = qp->r_nak_state; |
652 | qp->s_ack_psn = qp->r_ack_psn; | 653 | qp->s_ack_psn = qp->r_ack_psn; |
@@ -798,11 +799,13 @@ bail: | |||
798 | 799 | ||
799 | static inline void update_last_psn(struct ipath_qp *qp, u32 psn) | 800 | static inline void update_last_psn(struct ipath_qp *qp, u32 psn) |
800 | { | 801 | { |
801 | if (qp->s_wait_credit) { | 802 | if (qp->s_last_psn != psn) { |
802 | qp->s_wait_credit = 0; | 803 | qp->s_last_psn = psn; |
803 | tasklet_hi_schedule(&qp->s_task); | 804 | if (qp->s_wait_credit) { |
805 | qp->s_wait_credit = 0; | ||
806 | tasklet_hi_schedule(&qp->s_task); | ||
807 | } | ||
804 | } | 808 | } |
805 | qp->s_last_psn = psn; | ||
806 | } | 809 | } |
807 | 810 | ||
808 | /** | 811 | /** |
@@ -1653,13 +1656,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
1653 | case OP(SEND_FIRST): | 1656 | case OP(SEND_FIRST): |
1654 | if (!ipath_get_rwqe(qp, 0)) { | 1657 | if (!ipath_get_rwqe(qp, 0)) { |
1655 | rnr_nak: | 1658 | rnr_nak: |
1656 | /* | ||
1657 | * A RNR NAK will ACK earlier sends and RDMA writes. | ||
1658 | * Don't queue the NAK if a RDMA read or atomic | ||
1659 | * is pending though. | ||
1660 | */ | ||
1661 | if (qp->r_nak_state) | ||
1662 | goto done; | ||
1663 | qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; | 1659 | qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; |
1664 | qp->r_ack_psn = qp->r_psn; | 1660 | qp->r_ack_psn = qp->r_psn; |
1665 | goto send_ack; | 1661 | goto send_ack; |
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index 1b4f7e113b21..a59bdbd0ed87 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -98,11 +98,15 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp) | |||
98 | while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) { | 98 | while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) { |
99 | qp->s_rnr_timeout -= nqp->s_rnr_timeout; | 99 | qp->s_rnr_timeout -= nqp->s_rnr_timeout; |
100 | l = l->next; | 100 | l = l->next; |
101 | if (l->next == &dev->rnrwait) | 101 | if (l->next == &dev->rnrwait) { |
102 | nqp = NULL; | ||
102 | break; | 103 | break; |
104 | } | ||
103 | nqp = list_entry(l->next, struct ipath_qp, | 105 | nqp = list_entry(l->next, struct ipath_qp, |
104 | timerwait); | 106 | timerwait); |
105 | } | 107 | } |
108 | if (nqp) | ||
109 | nqp->s_rnr_timeout -= qp->s_rnr_timeout; | ||
106 | list_add(&qp->timerwait, l); | 110 | list_add(&qp->timerwait, l); |
107 | } | 111 | } |
108 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 112 | spin_unlock_irqrestore(&dev->pending_lock, flags); |