aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorFrank Zago <frank@zago.net>2009-12-09 18:07:25 -0500
committerRoland Dreier <rolandd@cisco.com>2009-12-09 18:07:25 -0500
commite147de03610fab7781c09aaed078a932e549ed4a (patch)
tree66674d008aa505e5e2dadc0a6260df30ef5ef768 /drivers/infiniband/hw
parent9420269428b3dc80c98e52beac60a3976fbef7d2 (diff)
IB/ehca: Fix error paths in post_send and post_recv
Always set bad_wr when an immediate error is detected. Do not report success if an error occurred. Signed-off-by: Frank Zago <fzago@systemfabricworks.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c67
1 files changed, 31 insertions, 36 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 8fd88cd828f..e3ec7fdd67b 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -400,7 +400,6 @@ static inline void map_ib_wc_status(u32 cqe_status,
400 400
401static inline int post_one_send(struct ehca_qp *my_qp, 401static inline int post_one_send(struct ehca_qp *my_qp,
402 struct ib_send_wr *cur_send_wr, 402 struct ib_send_wr *cur_send_wr,
403 struct ib_send_wr **bad_send_wr,
404 int hidden) 403 int hidden)
405{ 404{
406 struct ehca_wqe *wqe_p; 405 struct ehca_wqe *wqe_p;
@@ -412,8 +411,6 @@ static inline int post_one_send(struct ehca_qp *my_qp,
412 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue); 411 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
413 if (unlikely(!wqe_p)) { 412 if (unlikely(!wqe_p)) {
414 /* too many posted work requests: queue overflow */ 413 /* too many posted work requests: queue overflow */
415 if (bad_send_wr)
416 *bad_send_wr = cur_send_wr;
417 ehca_err(my_qp->ib_qp.device, "Too many posted WQEs " 414 ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
418 "qp_num=%x", my_qp->ib_qp.qp_num); 415 "qp_num=%x", my_qp->ib_qp.qp_num);
419 return -ENOMEM; 416 return -ENOMEM;
@@ -433,8 +430,6 @@ static inline int post_one_send(struct ehca_qp *my_qp,
433 */ 430 */
434 if (unlikely(ret)) { 431 if (unlikely(ret)) {
435 my_qp->ipz_squeue.current_q_offset = start_offset; 432 my_qp->ipz_squeue.current_q_offset = start_offset;
436 if (bad_send_wr)
437 *bad_send_wr = cur_send_wr;
438 ehca_err(my_qp->ib_qp.device, "Could not write WQE " 433 ehca_err(my_qp->ib_qp.device, "Could not write WQE "
439 "qp_num=%x", my_qp->ib_qp.qp_num); 434 "qp_num=%x", my_qp->ib_qp.qp_num);
440 return -EINVAL; 435 return -EINVAL;
@@ -448,7 +443,6 @@ int ehca_post_send(struct ib_qp *qp,
448 struct ib_send_wr **bad_send_wr) 443 struct ib_send_wr **bad_send_wr)
449{ 444{
450 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); 445 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
451 struct ib_send_wr *cur_send_wr;
452 int wqe_cnt = 0; 446 int wqe_cnt = 0;
453 int ret = 0; 447 int ret = 0;
454 unsigned long flags; 448 unsigned long flags;
@@ -457,7 +451,8 @@ int ehca_post_send(struct ib_qp *qp,
457 if (unlikely(my_qp->state < IB_QPS_RTS)) { 451 if (unlikely(my_qp->state < IB_QPS_RTS)) {
458 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x", 452 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
459 my_qp->state, qp->qp_num); 453 my_qp->state, qp->qp_num);
460 return -EINVAL; 454 ret = -EINVAL;
455 goto out;
461 } 456 }
462 457
463 /* LOCK the QUEUE */ 458 /* LOCK the QUEUE */
@@ -476,24 +471,21 @@ int ehca_post_send(struct ib_qp *qp,
476 struct ib_send_wr circ_wr; 471 struct ib_send_wr circ_wr;
477 memset(&circ_wr, 0, sizeof(circ_wr)); 472 memset(&circ_wr, 0, sizeof(circ_wr));
478 circ_wr.opcode = IB_WR_RDMA_READ; 473 circ_wr.opcode = IB_WR_RDMA_READ;
479 post_one_send(my_qp, &circ_wr, NULL, 1); /* ignore retcode */ 474 post_one_send(my_qp, &circ_wr, 1); /* ignore retcode */
480 wqe_cnt++; 475 wqe_cnt++;
481 ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num); 476 ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
482 my_qp->message_count = my_qp->packet_count = 0; 477 my_qp->message_count = my_qp->packet_count = 0;
483 } 478 }
484 479
485 /* loop processes list of send reqs */ 480 /* loop processes list of send reqs */
486 for (cur_send_wr = send_wr; cur_send_wr != NULL; 481 while (send_wr) {
487 cur_send_wr = cur_send_wr->next) { 482 ret = post_one_send(my_qp, send_wr, 0);
488 ret = post_one_send(my_qp, cur_send_wr, bad_send_wr, 0);
489 if (unlikely(ret)) { 483 if (unlikely(ret)) {
490 /* if one or more WQEs were successful, don't fail */
491 if (wqe_cnt)
492 ret = 0;
493 goto post_send_exit0; 484 goto post_send_exit0;
494 } 485 }
495 wqe_cnt++; 486 wqe_cnt++;
496 } /* eof for cur_send_wr */ 487 send_wr = send_wr->next;
488 }
497 489
498post_send_exit0: 490post_send_exit0:
499 iosync(); /* serialize GAL register access */ 491 iosync(); /* serialize GAL register access */
@@ -503,6 +495,10 @@ post_send_exit0:
503 my_qp, qp->qp_num, wqe_cnt, ret); 495 my_qp, qp->qp_num, wqe_cnt, ret);
504 my_qp->message_count += wqe_cnt; 496 my_qp->message_count += wqe_cnt;
505 spin_unlock_irqrestore(&my_qp->spinlock_s, flags); 497 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
498
499out:
500 if (ret)
501 *bad_send_wr = send_wr;
506 return ret; 502 return ret;
507} 503}
508 504
@@ -511,7 +507,6 @@ static int internal_post_recv(struct ehca_qp *my_qp,
511 struct ib_recv_wr *recv_wr, 507 struct ib_recv_wr *recv_wr,
512 struct ib_recv_wr **bad_recv_wr) 508 struct ib_recv_wr **bad_recv_wr)
513{ 509{
514 struct ib_recv_wr *cur_recv_wr;
515 struct ehca_wqe *wqe_p; 510 struct ehca_wqe *wqe_p;
516 int wqe_cnt = 0; 511 int wqe_cnt = 0;
517 int ret = 0; 512 int ret = 0;
@@ -522,27 +517,23 @@ static int internal_post_recv(struct ehca_qp *my_qp,
522 if (unlikely(!HAS_RQ(my_qp))) { 517 if (unlikely(!HAS_RQ(my_qp))) {
523 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d", 518 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
524 my_qp, my_qp->real_qp_num, my_qp->ext_type); 519 my_qp, my_qp->real_qp_num, my_qp->ext_type);
525 return -ENODEV; 520 ret = -ENODEV;
521 goto out;
526 } 522 }
527 523
528 /* LOCK the QUEUE */ 524 /* LOCK the QUEUE */
529 spin_lock_irqsave(&my_qp->spinlock_r, flags); 525 spin_lock_irqsave(&my_qp->spinlock_r, flags);
530 526
531 /* loop processes list of send reqs */ 527 /* loop processes list of recv reqs */
532 for (cur_recv_wr = recv_wr; cur_recv_wr != NULL; 528 while (recv_wr) {
533 cur_recv_wr = cur_recv_wr->next) {
534 u64 start_offset = my_qp->ipz_rqueue.current_q_offset; 529 u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
535 /* get pointer next to free WQE */ 530 /* get pointer next to free WQE */
536 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue); 531 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
537 if (unlikely(!wqe_p)) { 532 if (unlikely(!wqe_p)) {
538 /* too many posted work requests: queue overflow */ 533 /* too many posted work requests: queue overflow */
539 if (bad_recv_wr) 534 ret = -ENOMEM;
540 *bad_recv_wr = cur_recv_wr; 535 ehca_err(dev, "Too many posted WQEs "
541 if (wqe_cnt == 0) { 536 "qp_num=%x", my_qp->real_qp_num);
542 ret = -ENOMEM;
543 ehca_err(dev, "Too many posted WQEs "
544 "qp_num=%x", my_qp->real_qp_num);
545 }
546 goto post_recv_exit0; 537 goto post_recv_exit0;
547 } 538 }
548 /* 539 /*
@@ -552,7 +543,7 @@ static int internal_post_recv(struct ehca_qp *my_qp,
552 rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size; 543 rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
553 544
554 /* write a RECV WQE into the QUEUE */ 545 /* write a RECV WQE into the QUEUE */
555 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr, 546 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, recv_wr,
556 rq_map_idx); 547 rq_map_idx);
557 /* 548 /*
558 * if something failed, 549 * if something failed,
@@ -560,22 +551,20 @@ static int internal_post_recv(struct ehca_qp *my_qp,
560 */ 551 */
561 if (unlikely(ret)) { 552 if (unlikely(ret)) {
562 my_qp->ipz_rqueue.current_q_offset = start_offset; 553 my_qp->ipz_rqueue.current_q_offset = start_offset;
563 *bad_recv_wr = cur_recv_wr; 554 ret = -EINVAL;
564 if (wqe_cnt == 0) { 555 ehca_err(dev, "Could not write WQE "
565 ret = -EINVAL; 556 "qp_num=%x", my_qp->real_qp_num);
566 ehca_err(dev, "Could not write WQE "
567 "qp_num=%x", my_qp->real_qp_num);
568 }
569 goto post_recv_exit0; 557 goto post_recv_exit0;
570 } 558 }
571 559
572 qmap_entry = &my_qp->rq_map.map[rq_map_idx]; 560 qmap_entry = &my_qp->rq_map.map[rq_map_idx];
573 qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id); 561 qmap_entry->app_wr_id = get_app_wr_id(recv_wr->wr_id);
574 qmap_entry->reported = 0; 562 qmap_entry->reported = 0;
575 qmap_entry->cqe_req = 1; 563 qmap_entry->cqe_req = 1;
576 564
577 wqe_cnt++; 565 wqe_cnt++;
578 } /* eof for cur_recv_wr */ 566 recv_wr = recv_wr->next;
567 } /* eof for recv_wr */
579 568
580post_recv_exit0: 569post_recv_exit0:
581 iosync(); /* serialize GAL register access */ 570 iosync(); /* serialize GAL register access */
@@ -584,6 +573,11 @@ post_recv_exit0:
584 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i", 573 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
585 my_qp, my_qp->real_qp_num, wqe_cnt, ret); 574 my_qp, my_qp->real_qp_num, wqe_cnt, ret);
586 spin_unlock_irqrestore(&my_qp->spinlock_r, flags); 575 spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
576
577out:
578 if (ret)
579 *bad_recv_wr = recv_wr;
580
587 return ret; 581 return ret;
588} 582}
589 583
@@ -597,6 +591,7 @@ int ehca_post_recv(struct ib_qp *qp,
597 if (unlikely(my_qp->state == IB_QPS_RESET)) { 591 if (unlikely(my_qp->state == IB_QPS_RESET)) {
598 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x", 592 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
599 my_qp->state, qp->qp_num); 593 my_qp->state, qp->qp_num);
594 *bad_recv_wr = recv_wr;
600 return -EINVAL; 595 return -EINVAL;
601 } 596 }
602 597