diff options
author | David S. Miller <davem@davemloft.net> | 2013-10-23 16:28:39 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-10-23 16:49:34 -0400 |
commit | c3fa32b9764dc45dcf8a2231b1c110abc4a63e0b (patch) | |
tree | 6cf2896a77b65bec64284681e1c3851eb3263e09 /drivers/infiniband/hw/mlx5/qp.c | |
parent | 34d92d5315b64a3e5292b7e9511c1bb617227fb6 (diff) | |
parent | 320437af954cbe66478f1f5e8b34cb5a8d072191 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/usb/qmi_wwan.c
include/net/dst.h
Trivial merge conflicts, both were overlapping changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/qp.c')
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 80 |
1 files changed, 30 insertions, 50 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 045f8cdbd303..5659ea880741 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -203,7 +203,7 @@ static int sq_overhead(enum ib_qp_type qp_type) | |||
203 | 203 | ||
204 | switch (qp_type) { | 204 | switch (qp_type) { |
205 | case IB_QPT_XRC_INI: | 205 | case IB_QPT_XRC_INI: |
206 | size = sizeof(struct mlx5_wqe_xrc_seg); | 206 | size += sizeof(struct mlx5_wqe_xrc_seg); |
207 | /* fall through */ | 207 | /* fall through */ |
208 | case IB_QPT_RC: | 208 | case IB_QPT_RC: |
209 | size += sizeof(struct mlx5_wqe_ctrl_seg) + | 209 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
@@ -211,20 +211,23 @@ static int sq_overhead(enum ib_qp_type qp_type) | |||
211 | sizeof(struct mlx5_wqe_raddr_seg); | 211 | sizeof(struct mlx5_wqe_raddr_seg); |
212 | break; | 212 | break; |
213 | 213 | ||
214 | case IB_QPT_XRC_TGT: | ||
215 | return 0; | ||
216 | |||
214 | case IB_QPT_UC: | 217 | case IB_QPT_UC: |
215 | size = sizeof(struct mlx5_wqe_ctrl_seg) + | 218 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
216 | sizeof(struct mlx5_wqe_raddr_seg); | 219 | sizeof(struct mlx5_wqe_raddr_seg); |
217 | break; | 220 | break; |
218 | 221 | ||
219 | case IB_QPT_UD: | 222 | case IB_QPT_UD: |
220 | case IB_QPT_SMI: | 223 | case IB_QPT_SMI: |
221 | case IB_QPT_GSI: | 224 | case IB_QPT_GSI: |
222 | size = sizeof(struct mlx5_wqe_ctrl_seg) + | 225 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
223 | sizeof(struct mlx5_wqe_datagram_seg); | 226 | sizeof(struct mlx5_wqe_datagram_seg); |
224 | break; | 227 | break; |
225 | 228 | ||
226 | case MLX5_IB_QPT_REG_UMR: | 229 | case MLX5_IB_QPT_REG_UMR: |
227 | size = sizeof(struct mlx5_wqe_ctrl_seg) + | 230 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
228 | sizeof(struct mlx5_wqe_umr_ctrl_seg) + | 231 | sizeof(struct mlx5_wqe_umr_ctrl_seg) + |
229 | sizeof(struct mlx5_mkey_seg); | 232 | sizeof(struct mlx5_mkey_seg); |
230 | break; | 233 | break; |
@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, | |||
270 | return wqe_size; | 273 | return wqe_size; |
271 | 274 | ||
272 | if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { | 275 | if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { |
273 | mlx5_ib_dbg(dev, "\n"); | 276 | mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", |
277 | wqe_size, dev->mdev.caps.max_sq_desc_sz); | ||
274 | return -EINVAL; | 278 | return -EINVAL; |
275 | } | 279 | } |
276 | 280 | ||
@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, | |||
280 | 284 | ||
281 | wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); | 285 | wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); |
282 | qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; | 286 | qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; |
287 | if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) { | ||
288 | mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", | ||
289 | qp->sq.wqe_cnt, dev->mdev.caps.max_wqes); | ||
290 | return -ENOMEM; | ||
291 | } | ||
283 | qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); | 292 | qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); |
284 | qp->sq.max_gs = attr->cap.max_send_sge; | 293 | qp->sq.max_gs = attr->cap.max_send_sge; |
285 | qp->sq.max_post = 1 << ilog2(wq_size / wqe_size); | 294 | qp->sq.max_post = wq_size / wqe_size; |
295 | attr->cap.max_send_wr = qp->sq.max_post; | ||
286 | 296 | ||
287 | return wq_size; | 297 | return wq_size; |
288 | } | 298 | } |
@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q | |||
1280 | MLX5_QP_OPTPAR_Q_KEY, | 1290 | MLX5_QP_OPTPAR_Q_KEY, |
1281 | [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | | 1291 | [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | |
1282 | MLX5_QP_OPTPAR_Q_KEY, | 1292 | MLX5_QP_OPTPAR_Q_KEY, |
1293 | [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | | ||
1294 | MLX5_QP_OPTPAR_RRE | | ||
1295 | MLX5_QP_OPTPAR_RAE | | ||
1296 | MLX5_QP_OPTPAR_RWE | | ||
1297 | MLX5_QP_OPTPAR_PKEY_INDEX, | ||
1283 | }, | 1298 | }, |
1284 | }, | 1299 | }, |
1285 | [MLX5_QP_STATE_RTR] = { | 1300 | [MLX5_QP_STATE_RTR] = { |
@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q | |||
1314 | [MLX5_QP_STATE_RTS] = { | 1329 | [MLX5_QP_STATE_RTS] = { |
1315 | [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, | 1330 | [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, |
1316 | [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, | 1331 | [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, |
1332 | [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, | ||
1333 | [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | | ||
1334 | MLX5_QP_OPTPAR_RWE | | ||
1335 | MLX5_QP_OPTPAR_RAE | | ||
1336 | MLX5_QP_OPTPAR_RRE, | ||
1317 | }, | 1337 | }, |
1318 | }, | 1338 | }, |
1319 | }; | 1339 | }; |
@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, | |||
1651 | rseg->reserved = 0; | 1671 | rseg->reserved = 0; |
1652 | } | 1672 | } |
1653 | 1673 | ||
1654 | static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr) | ||
1655 | { | ||
1656 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | ||
1657 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | ||
1658 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1659 | } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { | ||
1660 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1661 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); | ||
1662 | } else { | ||
1663 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1664 | aseg->compare = 0; | ||
1665 | } | ||
1666 | } | ||
1667 | |||
1668 | static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg, | ||
1669 | struct ib_send_wr *wr) | ||
1670 | { | ||
1671 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | ||
1672 | aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); | ||
1673 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1674 | aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); | ||
1675 | } | ||
1676 | |||
1677 | static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, | 1674 | static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, |
1678 | struct ib_send_wr *wr) | 1675 | struct ib_send_wr *wr) |
1679 | { | 1676 | { |
@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2063 | 2060 | ||
2064 | case IB_WR_ATOMIC_CMP_AND_SWP: | 2061 | case IB_WR_ATOMIC_CMP_AND_SWP: |
2065 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 2062 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
2066 | set_raddr_seg(seg, wr->wr.atomic.remote_addr, | ||
2067 | wr->wr.atomic.rkey); | ||
2068 | seg += sizeof(struct mlx5_wqe_raddr_seg); | ||
2069 | |||
2070 | set_atomic_seg(seg, wr); | ||
2071 | seg += sizeof(struct mlx5_wqe_atomic_seg); | ||
2072 | |||
2073 | size += (sizeof(struct mlx5_wqe_raddr_seg) + | ||
2074 | sizeof(struct mlx5_wqe_atomic_seg)) / 16; | ||
2075 | break; | ||
2076 | |||
2077 | case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: | 2063 | case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: |
2078 | set_raddr_seg(seg, wr->wr.atomic.remote_addr, | 2064 | mlx5_ib_warn(dev, "Atomic operations are not supported yet\n"); |
2079 | wr->wr.atomic.rkey); | 2065 | err = -ENOSYS; |
2080 | seg += sizeof(struct mlx5_wqe_raddr_seg); | 2066 | *bad_wr = wr; |
2081 | 2067 | goto out; | |
2082 | set_masked_atomic_seg(seg, wr); | ||
2083 | seg += sizeof(struct mlx5_wqe_masked_atomic_seg); | ||
2084 | |||
2085 | size += (sizeof(struct mlx5_wqe_raddr_seg) + | ||
2086 | sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16; | ||
2087 | break; | ||
2088 | 2068 | ||
2089 | case IB_WR_LOCAL_INV: | 2069 | case IB_WR_LOCAL_INV: |
2090 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | 2070 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; |