diff options
author | Frank Zago <frank@zago.net> | 2009-12-16 02:39:10 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2009-12-16 02:39:10 -0500 |
commit | 48617f862f9e58ca2a609fea6a76733aff55d672 (patch) | |
tree | 2e0c75f1f58756d636cf1da020873b8f38e55240 /drivers/infiniband/hw | |
parent | 3939b20f90767655cbe5878b2ab663e64d690a88 (diff) |
RDMA/cxgb3: Fix error paths in post_send and post_recv
Always set bad_wr when an immediate error is detected. Return ENOMEM
for queue full instead of EINVAL to match other drivers.
Signed-off-by: Frank Zago <fzago@systemfabricworks.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_qp.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 1cecf98829ac..3eb8cecf81d7 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -365,18 +365,19 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
365 | spin_lock_irqsave(&qhp->lock, flag); | 365 | spin_lock_irqsave(&qhp->lock, flag); |
366 | if (qhp->attr.state > IWCH_QP_STATE_RTS) { | 366 | if (qhp->attr.state > IWCH_QP_STATE_RTS) { |
367 | spin_unlock_irqrestore(&qhp->lock, flag); | 367 | spin_unlock_irqrestore(&qhp->lock, flag); |
368 | return -EINVAL; | 368 | err = -EINVAL; |
369 | goto out; | ||
369 | } | 370 | } |
370 | num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, | 371 | num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, |
371 | qhp->wq.sq_size_log2); | 372 | qhp->wq.sq_size_log2); |
372 | if (num_wrs <= 0) { | 373 | if (num_wrs <= 0) { |
373 | spin_unlock_irqrestore(&qhp->lock, flag); | 374 | spin_unlock_irqrestore(&qhp->lock, flag); |
374 | return -ENOMEM; | 375 | err = -ENOMEM; |
376 | goto out; | ||
375 | } | 377 | } |
376 | while (wr) { | 378 | while (wr) { |
377 | if (num_wrs == 0) { | 379 | if (num_wrs == 0) { |
378 | err = -ENOMEM; | 380 | err = -ENOMEM; |
379 | *bad_wr = wr; | ||
380 | break; | 381 | break; |
381 | } | 382 | } |
382 | idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); | 383 | idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); |
@@ -428,10 +429,8 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
428 | wr->opcode); | 429 | wr->opcode); |
429 | err = -EINVAL; | 430 | err = -EINVAL; |
430 | } | 431 | } |
431 | if (err) { | 432 | if (err) |
432 | *bad_wr = wr; | ||
433 | break; | 433 | break; |
434 | } | ||
435 | wqe->send.wrid.id0.hi = qhp->wq.sq_wptr; | 434 | wqe->send.wrid.id0.hi = qhp->wq.sq_wptr; |
436 | sqp->wr_id = wr->wr_id; | 435 | sqp->wr_id = wr->wr_id; |
437 | sqp->opcode = wr2opcode(t3_wr_opcode); | 436 | sqp->opcode = wr2opcode(t3_wr_opcode); |
@@ -454,6 +453,10 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
454 | } | 453 | } |
455 | spin_unlock_irqrestore(&qhp->lock, flag); | 454 | spin_unlock_irqrestore(&qhp->lock, flag); |
456 | ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); | 455 | ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); |
456 | |||
457 | out: | ||
458 | if (err) | ||
459 | *bad_wr = wr; | ||
457 | return err; | 460 | return err; |
458 | } | 461 | } |
459 | 462 | ||
@@ -471,18 +474,19 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
471 | spin_lock_irqsave(&qhp->lock, flag); | 474 | spin_lock_irqsave(&qhp->lock, flag); |
472 | if (qhp->attr.state > IWCH_QP_STATE_RTS) { | 475 | if (qhp->attr.state > IWCH_QP_STATE_RTS) { |
473 | spin_unlock_irqrestore(&qhp->lock, flag); | 476 | spin_unlock_irqrestore(&qhp->lock, flag); |
474 | return -EINVAL; | 477 | err = -EINVAL; |
478 | goto out; | ||
475 | } | 479 | } |
476 | num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr, | 480 | num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr, |
477 | qhp->wq.rq_size_log2) - 1; | 481 | qhp->wq.rq_size_log2) - 1; |
478 | if (!wr) { | 482 | if (!wr) { |
479 | spin_unlock_irqrestore(&qhp->lock, flag); | 483 | spin_unlock_irqrestore(&qhp->lock, flag); |
480 | return -EINVAL; | 484 | err = -ENOMEM; |
485 | goto out; | ||
481 | } | 486 | } |
482 | while (wr) { | 487 | while (wr) { |
483 | if (wr->num_sge > T3_MAX_SGE) { | 488 | if (wr->num_sge > T3_MAX_SGE) { |
484 | err = -EINVAL; | 489 | err = -EINVAL; |
485 | *bad_wr = wr; | ||
486 | break; | 490 | break; |
487 | } | 491 | } |
488 | idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); | 492 | idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); |
@@ -494,10 +498,10 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
494 | err = build_zero_stag_recv(qhp, wqe, wr); | 498 | err = build_zero_stag_recv(qhp, wqe, wr); |
495 | else | 499 | else |
496 | err = -ENOMEM; | 500 | err = -ENOMEM; |
497 | if (err) { | 501 | |
498 | *bad_wr = wr; | 502 | if (err) |
499 | break; | 503 | break; |
500 | } | 504 | |
501 | build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG, | 505 | build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG, |
502 | Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), | 506 | Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), |
503 | 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP); | 507 | 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP); |
@@ -511,6 +515,10 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
511 | } | 515 | } |
512 | spin_unlock_irqrestore(&qhp->lock, flag); | 516 | spin_unlock_irqrestore(&qhp->lock, flag); |
513 | ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); | 517 | ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); |
518 | |||
519 | out: | ||
520 | if (err) | ||
521 | *bad_wr = wr; | ||
514 | return err; | 522 | return err; |
515 | } | 523 | } |
516 | 524 | ||