diff options
author | Shiraz Saleem <shiraz.saleem@intel.com> | 2016-07-12 12:48:39 -0400 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-08-02 14:17:38 -0400 |
commit | fe5d6e625df0f7cc132f7f6bf23414e29d361289 (patch) | |
tree | b61b9e0e7dbdde70ff66ad1f121dde08d39f39f7 | |
parent | fb92d8fb1b9c2de7d7d50c199e6d3020544702e8 (diff) |
i40iw: Fix return codes
Fix incorrect usage of ENOSYS and other return codes.
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_verbs.c | 47 |
1 files changed, 31 insertions, 16 deletions
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 39096a246863..86c387a2cf4f 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
@@ -528,7 +528,7 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev, | |||
528 | status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift); | 528 | status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift); |
529 | 529 | ||
530 | if (status) | 530 | if (status) |
531 | return -ENOSYS; | 531 | return -ENOMEM; |
532 | 532 | ||
533 | sqdepth = sq_size << sqshift; | 533 | sqdepth = sq_size << sqshift; |
534 | rqdepth = rq_size << rqshift; | 534 | rqdepth = rq_size << rqshift; |
@@ -670,7 +670,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, | |||
670 | iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; | 670 | iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; |
671 | 671 | ||
672 | if (init_attr->qp_type != IB_QPT_RC) { | 672 | if (init_attr->qp_type != IB_QPT_RC) { |
673 | err_code = -ENOSYS; | 673 | err_code = -EINVAL; |
674 | goto error; | 674 | goto error; |
675 | } | 675 | } |
676 | if (iwdev->push_mode) | 676 | if (iwdev->push_mode) |
@@ -2075,8 +2075,12 @@ static int i40iw_post_send(struct ib_qp *ibqp, | |||
2075 | ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false); | 2075 | ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false); |
2076 | } | 2076 | } |
2077 | 2077 | ||
2078 | if (ret) | 2078 | if (ret) { |
2079 | err = -EIO; | 2079 | if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED) |
2080 | err = -ENOMEM; | ||
2081 | else | ||
2082 | err = -EINVAL; | ||
2083 | } | ||
2080 | break; | 2084 | break; |
2081 | case IB_WR_RDMA_WRITE: | 2085 | case IB_WR_RDMA_WRITE: |
2082 | info.op_type = I40IW_OP_TYPE_RDMA_WRITE; | 2086 | info.op_type = I40IW_OP_TYPE_RDMA_WRITE; |
@@ -2097,8 +2101,12 @@ static int i40iw_post_send(struct ib_qp *ibqp, | |||
2097 | ret = ukqp->ops.iw_rdma_write(ukqp, &info, false); | 2101 | ret = ukqp->ops.iw_rdma_write(ukqp, &info, false); |
2098 | } | 2102 | } |
2099 | 2103 | ||
2100 | if (ret) | 2104 | if (ret) { |
2101 | err = -EIO; | 2105 | if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED) |
2106 | err = -ENOMEM; | ||
2107 | else | ||
2108 | err = -EINVAL; | ||
2109 | } | ||
2102 | break; | 2110 | break; |
2103 | case IB_WR_RDMA_READ_WITH_INV: | 2111 | case IB_WR_RDMA_READ_WITH_INV: |
2104 | inv_stag = true; | 2112 | inv_stag = true; |
@@ -2116,15 +2124,19 @@ static int i40iw_post_send(struct ib_qp *ibqp, | |||
2116 | info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey; | 2124 | info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey; |
2117 | info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length; | 2125 | info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length; |
2118 | ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false); | 2126 | ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false); |
2119 | if (ret) | 2127 | if (ret) { |
2120 | err = -EIO; | 2128 | if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED) |
2129 | err = -ENOMEM; | ||
2130 | else | ||
2131 | err = -EINVAL; | ||
2132 | } | ||
2121 | break; | 2133 | break; |
2122 | case IB_WR_LOCAL_INV: | 2134 | case IB_WR_LOCAL_INV: |
2123 | info.op_type = I40IW_OP_TYPE_INV_STAG; | 2135 | info.op_type = I40IW_OP_TYPE_INV_STAG; |
2124 | info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; | 2136 | info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; |
2125 | ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true); | 2137 | ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true); |
2126 | if (ret) | 2138 | if (ret) |
2127 | err = -EIO; | 2139 | err = -ENOMEM; |
2128 | break; | 2140 | break; |
2129 | case IB_WR_REG_MR: | 2141 | case IB_WR_REG_MR: |
2130 | { | 2142 | { |
@@ -2153,7 +2165,7 @@ static int i40iw_post_send(struct ib_qp *ibqp, | |||
2153 | 2165 | ||
2154 | ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true); | 2166 | ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true); |
2155 | if (ret) | 2167 | if (ret) |
2156 | err = -EIO; | 2168 | err = -ENOMEM; |
2157 | break; | 2169 | break; |
2158 | } | 2170 | } |
2159 | default: | 2171 | default: |
@@ -2193,6 +2205,7 @@ static int i40iw_post_recv(struct ib_qp *ibqp, | |||
2193 | struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT]; | 2205 | struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT]; |
2194 | enum i40iw_status_code ret = 0; | 2206 | enum i40iw_status_code ret = 0; |
2195 | unsigned long flags; | 2207 | unsigned long flags; |
2208 | int err = 0; | ||
2196 | 2209 | ||
2197 | iwqp = (struct i40iw_qp *)ibqp; | 2210 | iwqp = (struct i40iw_qp *)ibqp; |
2198 | ukqp = &iwqp->sc_qp.qp_uk; | 2211 | ukqp = &iwqp->sc_qp.qp_uk; |
@@ -2207,6 +2220,10 @@ static int i40iw_post_recv(struct ib_qp *ibqp, | |||
2207 | ret = ukqp->ops.iw_post_receive(ukqp, &post_recv); | 2220 | ret = ukqp->ops.iw_post_receive(ukqp, &post_recv); |
2208 | if (ret) { | 2221 | if (ret) { |
2209 | i40iw_pr_err(" post_recv err %d\n", ret); | 2222 | i40iw_pr_err(" post_recv err %d\n", ret); |
2223 | if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED) | ||
2224 | err = -ENOMEM; | ||
2225 | else | ||
2226 | err = -EINVAL; | ||
2210 | *bad_wr = ib_wr; | 2227 | *bad_wr = ib_wr; |
2211 | goto out; | 2228 | goto out; |
2212 | } | 2229 | } |
@@ -2214,9 +2231,7 @@ static int i40iw_post_recv(struct ib_qp *ibqp, | |||
2214 | } | 2231 | } |
2215 | out: | 2232 | out: |
2216 | spin_unlock_irqrestore(&iwqp->lock, flags); | 2233 | spin_unlock_irqrestore(&iwqp->lock, flags); |
2217 | if (ret) | 2234 | return err; |
2218 | return -ENOSYS; | ||
2219 | return 0; | ||
2220 | } | 2235 | } |
2221 | 2236 | ||
2222 | /** | 2237 | /** |
@@ -2513,7 +2528,7 @@ static int i40iw_modify_port(struct ib_device *ibdev, | |||
2513 | int port_modify_mask, | 2528 | int port_modify_mask, |
2514 | struct ib_port_modify *props) | 2529 | struct ib_port_modify *props) |
2515 | { | 2530 | { |
2516 | return 0; | 2531 | return -ENOSYS; |
2517 | } | 2532 | } |
2518 | 2533 | ||
2519 | /** | 2534 | /** |
@@ -2709,7 +2724,7 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev) | |||
2709 | 2724 | ||
2710 | iwdev->iwibdev = i40iw_init_rdma_device(iwdev); | 2725 | iwdev->iwibdev = i40iw_init_rdma_device(iwdev); |
2711 | if (!iwdev->iwibdev) | 2726 | if (!iwdev->iwibdev) |
2712 | return -ENOSYS; | 2727 | return -ENOMEM; |
2713 | iwibdev = iwdev->iwibdev; | 2728 | iwibdev = iwdev->iwibdev; |
2714 | 2729 | ||
2715 | ret = ib_register_device(&iwibdev->ibdev, NULL); | 2730 | ret = ib_register_device(&iwibdev->ibdev, NULL); |
@@ -2734,5 +2749,5 @@ error: | |||
2734 | kfree(iwdev->iwibdev->ibdev.iwcm); | 2749 | kfree(iwdev->iwibdev->ibdev.iwcm); |
2735 | iwdev->iwibdev->ibdev.iwcm = NULL; | 2750 | iwdev->iwibdev->ibdev.iwcm = NULL; |
2736 | ib_dealloc_device(&iwdev->iwibdev->ibdev); | 2751 | ib_dealloc_device(&iwdev->iwibdev->ibdev); |
2737 | return -ENOSYS; | 2752 | return ret; |
2738 | } | 2753 | } |