diff options
author | Leon Romanovsky <leonro@mellanox.com> | 2016-08-28 03:58:38 -0400 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-09-02 14:11:40 -0400 |
commit | dbdf7d4e7f911f79ceb08365a756bbf6eecac81c (patch) | |
tree | d646ac271b2e3c23608b6c45d2f63567d76e1a94 | |
parent | d9f88e5ab9a73058ebdde589219c0d37da250f06 (diff) |
IB/mlx5: Don't return errors from poll_cq
Remove returning errors from mlx5 poll_cq function. Polling CQ
operation in kernel never fails by Mellanox HCA architecture and
respective driver design.
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/hw/mlx5/cq.c | 22 |
1 files changed, 2 insertions, 20 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 308a358e5b46..e4fac9292e4a 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
@@ -553,12 +553,6 @@ repoll: | |||
553 | * from the table. | 553 | * from the table. |
554 | */ | 554 | */ |
555 | mqp = __mlx5_qp_lookup(dev->mdev, qpn); | 555 | mqp = __mlx5_qp_lookup(dev->mdev, qpn); |
556 | if (unlikely(!mqp)) { | ||
557 | mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n", | ||
558 | cq->mcq.cqn, qpn); | ||
559 | return -EINVAL; | ||
560 | } | ||
561 | |||
562 | *cur_qp = to_mibqp(mqp); | 556 | *cur_qp = to_mibqp(mqp); |
563 | } | 557 | } |
564 | 558 | ||
@@ -619,13 +613,6 @@ repoll: | |||
619 | read_lock(&dev->mdev->priv.mkey_table.lock); | 613 | read_lock(&dev->mdev->priv.mkey_table.lock); |
620 | mmkey = __mlx5_mr_lookup(dev->mdev, | 614 | mmkey = __mlx5_mr_lookup(dev->mdev, |
621 | mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); | 615 | mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); |
622 | if (unlikely(!mmkey)) { | ||
623 | read_unlock(&dev->mdev->priv.mkey_table.lock); | ||
624 | mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n", | ||
625 | cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey)); | ||
626 | return -EINVAL; | ||
627 | } | ||
628 | |||
629 | mr = to_mibmr(mmkey); | 616 | mr = to_mibmr(mmkey); |
630 | get_sig_err_item(sig_err_cqe, &mr->sig->err_item); | 617 | get_sig_err_item(sig_err_cqe, &mr->sig->err_item); |
631 | mr->sig->sig_err_exists = true; | 618 | mr->sig->sig_err_exists = true; |
@@ -676,7 +663,6 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
676 | unsigned long flags; | 663 | unsigned long flags; |
677 | int soft_polled = 0; | 664 | int soft_polled = 0; |
678 | int npolled; | 665 | int npolled; |
679 | int err = 0; | ||
680 | 666 | ||
681 | spin_lock_irqsave(&cq->lock, flags); | 667 | spin_lock_irqsave(&cq->lock, flags); |
682 | if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { | 668 | if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { |
@@ -688,8 +674,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
688 | soft_polled = poll_soft_wc(cq, num_entries, wc); | 674 | soft_polled = poll_soft_wc(cq, num_entries, wc); |
689 | 675 | ||
690 | for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { | 676 | for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { |
691 | err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled); | 677 | if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled)) |
692 | if (err) | ||
693 | break; | 678 | break; |
694 | } | 679 | } |
695 | 680 | ||
@@ -698,10 +683,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
698 | out: | 683 | out: |
699 | spin_unlock_irqrestore(&cq->lock, flags); | 684 | spin_unlock_irqrestore(&cq->lock, flags); |
700 | 685 | ||
701 | if (err == 0 || err == -EAGAIN) | 686 | return soft_polled + npolled; |
702 | return soft_polled + npolled; | ||
703 | else | ||
704 | return err; | ||
705 | } | 687 | } |
706 | 688 | ||
707 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | 689 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) |