aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h23
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c14
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c41
-rw-r--r--drivers/infiniband/hw/mlx5/user.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c2
7 files changed, 114 insertions, 21 deletions
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index d4b227126265..a6a57de278b4 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -57,6 +57,8 @@ pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
57 57
58#define field_avail(type, fld, sz) (offsetof(type, fld) + \ 58#define field_avail(type, fld, sz) (offsetof(type, fld) + \
59 sizeof(((type *)0)->fld) <= (sz)) 59 sizeof(((type *)0)->fld) <= (sz))
60#define MLX5_IB_DEFAULT_UIDX 0xffffff
61#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
60 62
61enum { 63enum {
62 MLX5_IB_MMAP_CMD_SHIFT = 8, 64 MLX5_IB_MMAP_CMD_SHIFT = 8,
@@ -94,6 +96,11 @@ enum {
94 MLX5_CROSS_CHANNEL_UUAR = 0, 96 MLX5_CROSS_CHANNEL_UUAR = 0,
95}; 97};
96 98
99enum {
100 MLX5_CQE_VERSION_V0,
101 MLX5_CQE_VERSION_V1,
102};
103
97struct mlx5_ib_ucontext { 104struct mlx5_ib_ucontext {
98 struct ib_ucontext ibucontext; 105 struct ib_ucontext ibucontext;
99 struct list_head db_page_list; 106 struct list_head db_page_list;
@@ -102,6 +109,7 @@ struct mlx5_ib_ucontext {
102 */ 109 */
103 struct mutex db_page_mutex; 110 struct mutex db_page_mutex;
104 struct mlx5_uuar_info uuari; 111 struct mlx5_uuar_info uuari;
112 u8 cqe_version;
105}; 113};
106 114
107static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) 115static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -694,4 +702,19 @@ static inline u32 check_cq_create_flags(u32 flags)
694 */ 702 */
695 return (flags & ~IB_CQ_FLAGS_IGNORE_OVERRUN); 703 return (flags & ~IB_CQ_FLAGS_IGNORE_OVERRUN);
696} 704}
705
706static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
707 u32 *user_index)
708{
709 if (cqe_version) {
710 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
711 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
712 return -EINVAL;
713 *user_index = cmd_uidx;
714 } else {
715 *user_index = MLX5_IB_DEFAULT_UIDX;
716 }
717
718 return 0;
719}
697#endif /* MLX5_IB_H */ 720#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 1ea049ed87da..5c92d087b9f0 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <rdma/ib_umem.h> 34#include <rdma/ib_umem.h>
35#include <rdma/ib_cache.h> 35#include <rdma/ib_cache.h>
36#include <rdma/ib_user_verbs.h>
36#include "mlx5_ib.h" 37#include "mlx5_ib.h"
37#include "user.h" 38#include "user.h"
38 39
@@ -870,6 +871,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
870 struct mlx5_ib_create_qp ucmd; 871 struct mlx5_ib_create_qp ucmd;
871 int inlen = sizeof(*in); 872 int inlen = sizeof(*in);
872 int err; 873 int err;
874 u32 uidx = MLX5_IB_DEFAULT_UIDX;
875 void *qpc;
873 876
874 mlx5_ib_odp_create_qp(qp); 877 mlx5_ib_odp_create_qp(qp);
875 878
@@ -910,6 +913,11 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
910 return -EFAULT; 913 return -EFAULT;
911 } 914 }
912 915
916 err = get_qp_user_index(to_mucontext(pd->uobject->context),
917 &ucmd, udata->inlen, &uidx);
918 if (err)
919 return err;
920
913 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE); 921 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
914 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE); 922 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
915 } else { 923 } else {
@@ -1046,6 +1054,12 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1046 1054
1047 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); 1055 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
1048 1056
1057 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
1058 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1059 /* 0xffffff means we ask to work with cqe version 0 */
1060 MLX5_SET(qpc, qpc, user_index, uidx);
1061 }
1062
1049 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen); 1063 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
1050 if (err) { 1064 if (err) {
1051 mlx5_ib_dbg(dev, "create qp failed\n"); 1065 mlx5_ib_dbg(dev, "create qp failed\n");
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index e008505e96e9..4659256cd95e 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -78,28 +78,41 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
78 struct ib_udata *udata, int buf_size, int *inlen) 78 struct ib_udata *udata, int buf_size, int *inlen)
79{ 79{
80 struct mlx5_ib_dev *dev = to_mdev(pd->device); 80 struct mlx5_ib_dev *dev = to_mdev(pd->device);
81 struct mlx5_ib_create_srq ucmd; 81 struct mlx5_ib_create_srq ucmd = {};
82 size_t ucmdlen; 82 size_t ucmdlen;
83 void *xsrqc;
83 int err; 84 int err;
84 int npages; 85 int npages;
85 int page_shift; 86 int page_shift;
86 int ncont; 87 int ncont;
87 u32 offset; 88 u32 offset;
89 u32 uidx = MLX5_IB_DEFAULT_UIDX;
90 int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
88 91
89 ucmdlen = 92 if (drv_data < 0)
90 (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) < 93 return -EINVAL;
91 sizeof(ucmd)) ? (sizeof(ucmd) - 94
92 sizeof(ucmd.reserved)) : sizeof(ucmd); 95 ucmdlen = (drv_data < sizeof(ucmd)) ?
96 drv_data : sizeof(ucmd);
93 97
94 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { 98 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
95 mlx5_ib_dbg(dev, "failed copy udata\n"); 99 mlx5_ib_dbg(dev, "failed copy udata\n");
96 return -EFAULT; 100 return -EFAULT;
97 } 101 }
98 102
99 if (ucmdlen == sizeof(ucmd) && 103 if (ucmd.reserved0 || ucmd.reserved1)
100 ucmd.reserved != 0)
101 return -EINVAL; 104 return -EINVAL;
102 105
106 if (drv_data > sizeof(ucmd) &&
107 !ib_is_udata_cleared(udata, sizeof(ucmd),
108 drv_data - sizeof(ucmd)))
109 return -EINVAL;
110
111 err = get_srq_user_index(to_mucontext(pd->uobject->context),
112 &ucmd, udata->inlen, &uidx);
113 if (err)
114 return err;
115
103 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); 116 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
104 117
105 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, 118 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
@@ -138,6 +151,12 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
138 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 151 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
139 (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); 152 (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
140 153
154 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
155 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
156 xrc_srq_context_entry);
157 MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
158 }
159
141 return 0; 160 return 0;
142 161
143err_in: 162err_in:
@@ -158,6 +177,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
158 struct mlx5_wqe_srq_next_seg *next; 177 struct mlx5_wqe_srq_next_seg *next;
159 int page_shift; 178 int page_shift;
160 int npages; 179 int npages;
180 void *xsrqc;
161 181
162 err = mlx5_db_alloc(dev->mdev, &srq->db); 182 err = mlx5_db_alloc(dev->mdev, &srq->db);
163 if (err) { 183 if (err) {
@@ -204,6 +224,13 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
204 224
205 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 225 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
206 226
227 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
228 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
229 xrc_srq_context_entry);
230 /* 0xffffff means we ask to work with cqe version 0 */
231 MLX5_SET(xrc_srqc, xsrqc, user_index, MLX5_IB_DEFAULT_UIDX);
232 }
233
207 return 0; 234 return 0;
208 235
209err_in: 236err_in:
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index e22a35f03238..5bf0935f0cb5 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -35,6 +35,8 @@
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37 37
38#include "mlx5_ib.h"
39
38enum { 40enum {
39 MLX5_QP_FLAG_SIGNATURE = 1 << 0, 41 MLX5_QP_FLAG_SIGNATURE = 1 << 0,
40 MLX5_QP_FLAG_SCATTER_CQE = 1 << 1, 42 MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
@@ -118,7 +120,9 @@ struct mlx5_ib_create_srq {
118 __u64 buf_addr; 120 __u64 buf_addr;
119 __u64 db_addr; 121 __u64 db_addr;
120 __u32 flags; 122 __u32 flags;
121 __u32 reserved; /* explicit padding (optional on i386) */ 123 __u32 reserved0; /* explicit padding (optional on i386) */
124 __u32 uidx;
125 __u32 reserved1;
122}; 126};
123 127
124struct mlx5_ib_create_srq_resp { 128struct mlx5_ib_create_srq_resp {
@@ -133,9 +137,47 @@ struct mlx5_ib_create_qp {
133 __u32 rq_wqe_count; 137 __u32 rq_wqe_count;
134 __u32 rq_wqe_shift; 138 __u32 rq_wqe_shift;
135 __u32 flags; 139 __u32 flags;
140 __u32 uidx;
141 __u32 reserved0;
136}; 142};
137 143
138struct mlx5_ib_create_qp_resp { 144struct mlx5_ib_create_qp_resp {
139 __u32 uuar_index; 145 __u32 uuar_index;
140}; 146};
147
148static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
149 struct mlx5_ib_create_qp *ucmd,
150 int inlen,
151 u32 *user_index)
152{
153 u8 cqe_version = ucontext->cqe_version;
154
155 if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
156 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
157 return 0;
158
159 if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
160 !!cqe_version))
161 return -EINVAL;
162
163 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
164}
165
166static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
167 struct mlx5_ib_create_srq *ucmd,
168 int inlen,
169 u32 *user_index)
170{
171 u8 cqe_version = ucontext->cqe_version;
172
173 if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
174 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
175 return 0;
176
177 if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
178 !!cqe_version))
179 return -EINVAL;
180
181 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
182}
141#endif /* MLX5_IB_USER_H */ 183#endif /* MLX5_IB_USER_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8b69fbfa2788..fa1ca28e8b5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -696,7 +696,6 @@ clean:
696 return err; 696 return err;
697} 697}
698 698
699#ifdef CONFIG_MLX5_CORE_EN
700static int mlx5_core_set_issi(struct mlx5_core_dev *dev) 699static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
701{ 700{
702 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)]; 701 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
@@ -749,7 +748,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
749 748
750 return -ENOTSUPP; 749 return -ENOTSUPP;
751} 750}
752#endif
753 751
754static int map_bf_area(struct mlx5_core_dev *dev) 752static int map_bf_area(struct mlx5_core_dev *dev)
755{ 753{
@@ -995,13 +993,11 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
995 goto err_pagealloc_cleanup; 993 goto err_pagealloc_cleanup;
996 } 994 }
997 995
998#ifdef CONFIG_MLX5_CORE_EN
999 err = mlx5_core_set_issi(dev); 996 err = mlx5_core_set_issi(dev);
1000 if (err) { 997 if (err) {
1001 dev_err(&pdev->dev, "failed to set issi\n"); 998 dev_err(&pdev->dev, "failed to set issi\n");
1002 goto err_disable_hca; 999 goto err_disable_hca;
1003 } 1000 }
1004#endif
1005 1001
1006 err = mlx5_satisfy_startup_pages(dev, 1); 1002 err = mlx5_satisfy_startup_pages(dev, 1);
1007 if (err) { 1003 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 30e2ba3f5f16..803a1f268c0f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -187,17 +187,10 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
187 struct mlx5_destroy_qp_mbox_in din; 187 struct mlx5_destroy_qp_mbox_in din;
188 struct mlx5_destroy_qp_mbox_out dout; 188 struct mlx5_destroy_qp_mbox_out dout;
189 int err; 189 int err;
190 void *qpc;
191 190
192 memset(&out, 0, sizeof(out)); 191 memset(&out, 0, sizeof(out));
193 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); 192 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
194 193
195 if (dev->issi) {
196 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
197 /* 0xffffff means we ask to work with cqe version 0 */
198 MLX5_SET(qpc, qpc, user_index, 0xffffff);
199 }
200
201 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 194 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
202 if (err) { 195 if (err) {
203 mlx5_core_warn(dev, "ret %d\n", err); 196 mlx5_core_warn(dev, "ret %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index ffada801976b..ec5f901cfcfe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -241,8 +241,6 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
241 241
242 memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc)); 242 memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
243 memcpy(pas, in->pas, pas_size); 243 memcpy(pas, in->pas, pas_size);
244 /* 0xffffff means we ask to work with cqe version 0 */
245 MLX5_SET(xrc_srqc, xrc_srqc, user_index, 0xffffff);
246 MLX5_SET(create_xrc_srq_in, create_in, opcode, 244 MLX5_SET(create_xrc_srq_in, create_in, opcode,
247 MLX5_CMD_OP_CREATE_XRC_SRQ); 245 MLX5_CMD_OP_CREATE_XRC_SRQ);
248 246