diff options
author | Eli Cohen <eli@mellanox.com> | 2013-09-11 09:35:30 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2013-10-10 12:23:58 -0400 |
commit | 81bea28ffdaad0bd3bf745ec399edb0387f4799d (patch) | |
tree | d44048fcea14e76520f421a678e3bc35957aaf13 /drivers | |
parent | 2f6daec14d02deb84e7896a93196d78fbe9956a2 (diff) |
IB/mlx5: Disable atomic operations
Currently Atomic operations don't work properly. Disable them for the
time being.
Signed-off-by: Eli Cohen <eli@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 48 |
2 files changed, 6 insertions, 47 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index e59761ed7d00..b267c65261c0 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
301 | props->max_srq_sge = max_rq_sg - 1; | 301 | props->max_srq_sge = max_rq_sg - 1; |
302 | props->max_fast_reg_page_list_len = (unsigned int)-1; | 302 | props->max_fast_reg_page_list_len = (unsigned int)-1; |
303 | props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; | 303 | props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; |
304 | props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ? | 304 | props->atomic_cap = IB_ATOMIC_NONE; |
305 | IB_ATOMIC_HCA : IB_ATOMIC_NONE; | 305 | props->masked_atomic_cap = IB_ATOMIC_NONE; |
306 | props->masked_atomic_cap = IB_ATOMIC_HCA; | ||
307 | props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); | 306 | props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); |
308 | props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; | 307 | props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; |
309 | props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; | 308 | props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 05d53f184744..10b077db42e8 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -1661,29 +1661,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, | |||
1661 | rseg->reserved = 0; | 1661 | rseg->reserved = 0; |
1662 | } | 1662 | } |
1663 | 1663 | ||
1664 | static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr) | ||
1665 | { | ||
1666 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | ||
1667 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | ||
1668 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1669 | } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { | ||
1670 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1671 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); | ||
1672 | } else { | ||
1673 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1674 | aseg->compare = 0; | ||
1675 | } | ||
1676 | } | ||
1677 | |||
1678 | static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg, | ||
1679 | struct ib_send_wr *wr) | ||
1680 | { | ||
1681 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | ||
1682 | aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); | ||
1683 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1684 | aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); | ||
1685 | } | ||
1686 | |||
1687 | static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, | 1664 | static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, |
1688 | struct ib_send_wr *wr) | 1665 | struct ib_send_wr *wr) |
1689 | { | 1666 | { |
@@ -2073,28 +2050,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2073 | 2050 | ||
2074 | case IB_WR_ATOMIC_CMP_AND_SWP: | 2051 | case IB_WR_ATOMIC_CMP_AND_SWP: |
2075 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 2052 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
2076 | set_raddr_seg(seg, wr->wr.atomic.remote_addr, | ||
2077 | wr->wr.atomic.rkey); | ||
2078 | seg += sizeof(struct mlx5_wqe_raddr_seg); | ||
2079 | |||
2080 | set_atomic_seg(seg, wr); | ||
2081 | seg += sizeof(struct mlx5_wqe_atomic_seg); | ||
2082 | |||
2083 | size += (sizeof(struct mlx5_wqe_raddr_seg) + | ||
2084 | sizeof(struct mlx5_wqe_atomic_seg)) / 16; | ||
2085 | break; | ||
2086 | |||
2087 | case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: | 2053 | case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: |
2088 | set_raddr_seg(seg, wr->wr.atomic.remote_addr, | 2054 | mlx5_ib_warn(dev, "Atomic operations are not supported yet\n"); |
2089 | wr->wr.atomic.rkey); | 2055 | err = -ENOSYS; |
2090 | seg += sizeof(struct mlx5_wqe_raddr_seg); | 2056 | *bad_wr = wr; |
2091 | 2057 | goto out; | |
2092 | set_masked_atomic_seg(seg, wr); | ||
2093 | seg += sizeof(struct mlx5_wqe_masked_atomic_seg); | ||
2094 | |||
2095 | size += (sizeof(struct mlx5_wqe_raddr_seg) + | ||
2096 | sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16; | ||
2097 | break; | ||
2098 | 2058 | ||
2099 | case IB_WR_LOCAL_INV: | 2059 | case IB_WR_LOCAL_INV: |
2100 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | 2060 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; |