diff options
author | Ralph Campbell <ralph.campbell@qlogic.com> | 2008-08-15 14:23:47 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-08-15 14:23:47 -0400 |
commit | 7ec01ff950c455aa1f1ccfaf347eb1aa9ec160d5 (patch) | |
tree | 3cde8a28f0c1418bc520b705c35d7c4b5de9ef10 /drivers | |
parent | ffaa5b984a9322bbd5d9a7f0814ca2ce70feebe5 (diff) |
IB/ipath: Fix lost UD send work request
If a UD QP has some work requests queued to be sent by the DMA engine
followed by a local loopback work request, we have to wait for the
previous work requests to finish or the completion for the local
loopback work request would be generated out of order. The problem
was that the work request queue pointer was already updated so that
the request would not be processed when the DMA queue drained.
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ud.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 36aa242c487c..729446f56aab 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c | |||
@@ -267,6 +267,7 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
267 | u16 lrh0; | 267 | u16 lrh0; |
268 | u16 lid; | 268 | u16 lid; |
269 | int ret = 0; | 269 | int ret = 0; |
270 | int next_cur; | ||
270 | 271 | ||
271 | spin_lock_irqsave(&qp->s_lock, flags); | 272 | spin_lock_irqsave(&qp->s_lock, flags); |
272 | 273 | ||
@@ -290,8 +291,9 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
290 | goto bail; | 291 | goto bail; |
291 | 292 | ||
292 | wqe = get_swqe_ptr(qp, qp->s_cur); | 293 | wqe = get_swqe_ptr(qp, qp->s_cur); |
293 | if (++qp->s_cur >= qp->s_size) | 294 | next_cur = qp->s_cur + 1; |
294 | qp->s_cur = 0; | 295 | if (next_cur >= qp->s_size) |
296 | next_cur = 0; | ||
295 | 297 | ||
296 | /* Construct the header. */ | 298 | /* Construct the header. */ |
297 | ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; | 299 | ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; |
@@ -315,6 +317,7 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
315 | qp->s_flags |= IPATH_S_WAIT_DMA; | 317 | qp->s_flags |= IPATH_S_WAIT_DMA; |
316 | goto bail; | 318 | goto bail; |
317 | } | 319 | } |
320 | qp->s_cur = next_cur; | ||
318 | spin_unlock_irqrestore(&qp->s_lock, flags); | 321 | spin_unlock_irqrestore(&qp->s_lock, flags); |
319 | ipath_ud_loopback(qp, wqe); | 322 | ipath_ud_loopback(qp, wqe); |
320 | spin_lock_irqsave(&qp->s_lock, flags); | 323 | spin_lock_irqsave(&qp->s_lock, flags); |
@@ -323,6 +326,7 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
323 | } | 326 | } |
324 | } | 327 | } |
325 | 328 | ||
329 | qp->s_cur = next_cur; | ||
326 | extra_bytes = -wqe->length & 3; | 330 | extra_bytes = -wqe->length & 3; |
327 | nwords = (wqe->length + extra_bytes) >> 2; | 331 | nwords = (wqe->length + extra_bytes) >> 2; |
328 | 332 | ||