aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2007-05-03 15:43:03 -0400
committerRoland Dreier <rolandd@cisco.com>2007-05-07 00:18:11 -0400
commit154257f3626ea6dd96781fac0896c3f27fe2b0a1 (patch)
treeab91781594c064b9b7af781f8271000298ddf32b /drivers
parent6ed89b9574776d4178f1ad754d20e4f1e5a4b6c8 (diff)
IB/ipath: Fix a race condition when generating ACKs
Fix a problem where simple ACKs can be sent ahead of RDMA read responses thus implicitly NAKing the RDMA read. Signed-off-by: Ralph Campbell <ralph.cambpell@qlogic.com> Signed-off-by: Robert Walsh <robert.walsh@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index f4d729d5dd1a..1915771fd038 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -98,13 +98,21 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
98 case OP(RDMA_READ_RESPONSE_LAST): 98 case OP(RDMA_READ_RESPONSE_LAST):
99 case OP(RDMA_READ_RESPONSE_ONLY): 99 case OP(RDMA_READ_RESPONSE_ONLY):
100 case OP(ATOMIC_ACKNOWLEDGE): 100 case OP(ATOMIC_ACKNOWLEDGE):
101 qp->s_ack_state = OP(ACKNOWLEDGE); 101 /*
102 * We can increment the tail pointer now that the last
103 * response has been sent instead of only being
104 * constructed.
105 */
106 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
107 qp->s_tail_ack_queue = 0;
102 /* FALLTHROUGH */ 108 /* FALLTHROUGH */
109 case OP(SEND_ONLY):
103 case OP(ACKNOWLEDGE): 110 case OP(ACKNOWLEDGE):
104 /* Check for no next entry in the queue. */ 111 /* Check for no next entry in the queue. */
105 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { 112 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
106 if (qp->s_flags & IPATH_S_ACK_PENDING) 113 if (qp->s_flags & IPATH_S_ACK_PENDING)
107 goto normal; 114 goto normal;
115 qp->s_ack_state = OP(ACKNOWLEDGE);
108 goto bail; 116 goto bail;
109 } 117 }
110 118
@@ -117,12 +125,8 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
117 if (len > pmtu) { 125 if (len > pmtu) {
118 len = pmtu; 126 len = pmtu;
119 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); 127 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
120 } else { 128 } else
121 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); 129 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
122 if (++qp->s_tail_ack_queue >
123 IPATH_MAX_RDMA_ATOMIC)
124 qp->s_tail_ack_queue = 0;
125 }
126 ohdr->u.aeth = ipath_compute_aeth(qp); 130 ohdr->u.aeth = ipath_compute_aeth(qp);
127 hwords++; 131 hwords++;
128 qp->s_ack_rdma_psn = e->psn; 132 qp->s_ack_rdma_psn = e->psn;
@@ -139,8 +143,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
139 cpu_to_be32(e->atomic_data); 143 cpu_to_be32(e->atomic_data);
140 hwords += sizeof(ohdr->u.at) / sizeof(u32); 144 hwords += sizeof(ohdr->u.at) / sizeof(u32);
141 bth2 = e->psn; 145 bth2 = e->psn;
142 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
143 qp->s_tail_ack_queue = 0;
144 } 146 }
145 bth0 = qp->s_ack_state << 24; 147 bth0 = qp->s_ack_state << 24;
146 break; 148 break;
@@ -156,8 +158,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
156 ohdr->u.aeth = ipath_compute_aeth(qp); 158 ohdr->u.aeth = ipath_compute_aeth(qp);
157 hwords++; 159 hwords++;
158 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 160 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
159 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
160 qp->s_tail_ack_queue = 0;
161 } 161 }
162 bth0 = qp->s_ack_state << 24; 162 bth0 = qp->s_ack_state << 24;
163 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK; 163 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
@@ -171,7 +171,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
171 * the ACK before setting s_ack_state to ACKNOWLEDGE 171 * the ACK before setting s_ack_state to ACKNOWLEDGE
172 * (see above). 172 * (see above).
173 */ 173 */
174 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); 174 qp->s_ack_state = OP(SEND_ONLY);
175 qp->s_flags &= ~IPATH_S_ACK_PENDING; 175 qp->s_flags &= ~IPATH_S_ACK_PENDING;
176 qp->s_cur_sge = NULL; 176 qp->s_cur_sge = NULL;
177 if (qp->s_nak_state) 177 if (qp->s_nak_state)
@@ -223,7 +223,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
223 /* Sending responses has higher priority over sending requests. */ 223 /* Sending responses has higher priority over sending requests. */
224 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue || 224 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
225 (qp->s_flags & IPATH_S_ACK_PENDING) || 225 (qp->s_flags & IPATH_S_ACK_PENDING) ||
226 qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE) && 226 qp->s_ack_state != OP(ACKNOWLEDGE)) &&
227 ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p)) 227 ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p))
228 goto done; 228 goto done;
229 229
@@ -585,7 +585,9 @@ static void send_rc_ack(struct ipath_qp *qp)
585 unsigned long flags; 585 unsigned long flags;
586 586
587 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 587 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
588 if (qp->r_head_ack_queue != qp->s_tail_ack_queue) 588 if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
589 (qp->s_flags & IPATH_S_ACK_PENDING) ||
590 qp->s_ack_state != OP(ACKNOWLEDGE))
589 goto queue_ack; 591 goto queue_ack;
590 592
591 /* Construct the header. */ 593 /* Construct the header. */