aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSaeed Mahameed <saeedm@mellanox.com>2016-07-15 19:33:22 -0400
committerLeon Romanovsky <leon@kernel.org>2016-08-14 07:39:15 -0400
commit278277866334e515141dde7c8ac143e15c0a767f (patch)
tree4b945038ae9d992868e08bdd8c717adb36e1e1fb
parent73b626c182dff06867ceba996a819e8372c9b2ce (diff)
{net,IB}/mlx5: CQ commands via mlx5 ifc
Remove old representation of manually created CQ commands layout, and use mlx5_ifc canonical structures and defines. Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: Leon Romanovsky <leon@kernel.org>
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c18
-rw-r--r--include/linux/mlx5/cq.h6
-rw-r--r--include/linux/mlx5/device.h76
5 files changed, 122 insertions, 201 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 308a358e5b46..35a9f718e669 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -747,14 +747,16 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
747 747
748static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, 748static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
749 struct ib_ucontext *context, struct mlx5_ib_cq *cq, 749 struct ib_ucontext *context, struct mlx5_ib_cq *cq,
750 int entries, struct mlx5_create_cq_mbox_in **cqb, 750 int entries, u32 **cqb,
751 int *cqe_size, int *index, int *inlen) 751 int *cqe_size, int *index, int *inlen)
752{ 752{
753 struct mlx5_ib_create_cq ucmd; 753 struct mlx5_ib_create_cq ucmd;
754 size_t ucmdlen; 754 size_t ucmdlen;
755 int page_shift; 755 int page_shift;
756 __be64 *pas;
756 int npages; 757 int npages;
757 int ncont; 758 int ncont;
759 void *cqc;
758 int err; 760 int err;
759 761
760 ucmdlen = 762 ucmdlen =
@@ -792,14 +794,20 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
792 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", 794 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
793 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); 795 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
794 796
795 *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont; 797 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
798 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
796 *cqb = mlx5_vzalloc(*inlen); 799 *cqb = mlx5_vzalloc(*inlen);
797 if (!*cqb) { 800 if (!*cqb) {
798 err = -ENOMEM; 801 err = -ENOMEM;
799 goto err_db; 802 goto err_db;
800 } 803 }
801 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); 804
802 (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 805 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
806 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
807
808 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
809 MLX5_SET(cqc, cqc, log_page_size,
810 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
803 811
804 *index = to_mucontext(context)->uuari.uars[0].index; 812 *index = to_mucontext(context)->uuari.uars[0].index;
805 813
@@ -834,9 +842,10 @@ static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
834 842
835static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 843static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
836 int entries, int cqe_size, 844 int entries, int cqe_size,
837 struct mlx5_create_cq_mbox_in **cqb, 845 u32 **cqb, int *index, int *inlen)
838 int *index, int *inlen)
839{ 846{
847 __be64 *pas;
848 void *cqc;
840 int err; 849 int err;
841 850
842 err = mlx5_db_alloc(dev->mdev, &cq->db); 851 err = mlx5_db_alloc(dev->mdev, &cq->db);
@@ -853,15 +862,21 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
853 862
854 init_cq_buf(cq, &cq->buf); 863 init_cq_buf(cq, &cq->buf);
855 864
856 *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; 865 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
866 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
857 *cqb = mlx5_vzalloc(*inlen); 867 *cqb = mlx5_vzalloc(*inlen);
858 if (!*cqb) { 868 if (!*cqb) {
859 err = -ENOMEM; 869 err = -ENOMEM;
860 goto err_buf; 870 goto err_buf;
861 } 871 }
862 mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
863 872
864 (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; 873 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
874 mlx5_fill_page_array(&cq->buf.buf, pas);
875
876 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
877 MLX5_SET(cqc, cqc, log_page_size,
878 cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
879
865 *index = dev->mdev->priv.uuari.uars[0].index; 880 *index = dev->mdev->priv.uuari.uars[0].index;
866 881
867 return 0; 882 return 0;
@@ -895,11 +910,12 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
895{ 910{
896 int entries = attr->cqe; 911 int entries = attr->cqe;
897 int vector = attr->comp_vector; 912 int vector = attr->comp_vector;
898 struct mlx5_create_cq_mbox_in *cqb = NULL;
899 struct mlx5_ib_dev *dev = to_mdev(ibdev); 913 struct mlx5_ib_dev *dev = to_mdev(ibdev);
900 struct mlx5_ib_cq *cq; 914 struct mlx5_ib_cq *cq;
901 int uninitialized_var(index); 915 int uninitialized_var(index);
902 int uninitialized_var(inlen); 916 int uninitialized_var(inlen);
917 u32 *cqb = NULL;
918 void *cqc;
903 int cqe_size; 919 int cqe_size;
904 unsigned int irqn; 920 unsigned int irqn;
905 int eqn; 921 int eqn;
@@ -945,19 +961,20 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
945 INIT_WORK(&cq->notify_work, notify_soft_wc_handler); 961 INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
946 } 962 }
947 963
948 cq->cqe_size = cqe_size;
949 cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
950
951 if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
952 cqb->ctx.cqe_sz_flags |= (1 << 1);
953
954 cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
955 err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); 964 err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
956 if (err) 965 if (err)
957 goto err_cqb; 966 goto err_cqb;
958 967
959 cqb->ctx.c_eqn = cpu_to_be16(eqn); 968 cq->cqe_size = cqe_size;
960 cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma); 969
970 cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
971 MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
972 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
973 MLX5_SET(cqc, cqc, uar_page, index);
974 MLX5_SET(cqc, cqc, c_eqn, eqn);
975 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
976 if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
977 MLX5_SET(cqc, cqc, oi, 1);
961 978
962 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); 979 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
963 if (err) 980 if (err)
@@ -1088,27 +1105,15 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1088 1105
1089int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1106int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1090{ 1107{
1091 struct mlx5_modify_cq_mbox_in *in;
1092 struct mlx5_ib_dev *dev = to_mdev(cq->device); 1108 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1093 struct mlx5_ib_cq *mcq = to_mcq(cq); 1109 struct mlx5_ib_cq *mcq = to_mcq(cq);
1094 int err; 1110 int err;
1095 u32 fsel;
1096 1111
1097 if (!MLX5_CAP_GEN(dev->mdev, cq_moderation)) 1112 if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
1098 return -ENOSYS; 1113 return -ENOSYS;
1099 1114
1100 in = kzalloc(sizeof(*in), GFP_KERNEL); 1115 err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
1101 if (!in) 1116 cq_period, cq_count);
1102 return -ENOMEM;
1103
1104 in->cqn = cpu_to_be32(mcq->mcq.cqn);
1105 fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
1106 in->ctx.cq_period = cpu_to_be16(cq_period);
1107 in->ctx.cq_max_count = cpu_to_be16(cq_count);
1108 in->field_select = cpu_to_be32(fsel);
1109 err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
1110 kfree(in);
1111
1112 if (err) 1117 if (err)
1113 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); 1118 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1114 1119
@@ -1241,9 +1246,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1241{ 1246{
1242 struct mlx5_ib_dev *dev = to_mdev(ibcq->device); 1247 struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
1243 struct mlx5_ib_cq *cq = to_mcq(ibcq); 1248 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1244 struct mlx5_modify_cq_mbox_in *in; 1249 void *cqc;
1250 u32 *in;
1245 int err; 1251 int err;
1246 int npas; 1252 int npas;
1253 __be64 *pas;
1247 int page_shift; 1254 int page_shift;
1248 int inlen; 1255 int inlen;
1249 int uninitialized_var(cqe_size); 1256 int uninitialized_var(cqe_size);
@@ -1285,28 +1292,37 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1285 if (err) 1292 if (err)
1286 goto ex; 1293 goto ex;
1287 1294
1288 inlen = sizeof(*in) + npas * sizeof(in->pas[0]); 1295 inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
1296 MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
1297
1289 in = mlx5_vzalloc(inlen); 1298 in = mlx5_vzalloc(inlen);
1290 if (!in) { 1299 if (!in) {
1291 err = -ENOMEM; 1300 err = -ENOMEM;
1292 goto ex_resize; 1301 goto ex_resize;
1293 } 1302 }
1294 1303
1304 pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
1295 if (udata) 1305 if (udata)
1296 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, 1306 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
1297 in->pas, 0); 1307 pas, 0);
1298 else 1308 else
1299 mlx5_fill_page_array(&cq->resize_buf->buf, in->pas); 1309 mlx5_fill_page_array(&cq->resize_buf->buf, pas);
1300 1310
1301 in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE | 1311 MLX5_SET(modify_cq_in, in,
1302 MLX5_MODIFY_CQ_MASK_PG_OFFSET | 1312 modify_field_select_resize_field_select.resize_field_select.resize_field_select,
1303 MLX5_MODIFY_CQ_MASK_PG_SIZE); 1313 MLX5_MODIFY_CQ_MASK_LOG_SIZE |
1304 in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 1314 MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1305 in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; 1315 MLX5_MODIFY_CQ_MASK_PG_SIZE);
1306 in->ctx.page_offset = 0; 1316
1307 in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24); 1317 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
1308 in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE); 1318
1309 in->cqn = cpu_to_be32(cq->mcq.cqn); 1319 MLX5_SET(cqc, cqc, log_page_size,
1320 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1321 MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
1322 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
1323
1324 MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
1325 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1310 1326
1311 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); 1327 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1312 if (err) 1328 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 873a631ad155..cf02d8a27874 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -134,33 +134,30 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
134 complete(&cq->free); 134 complete(&cq->free);
135} 135}
136 136
137
138int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 137int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
139 struct mlx5_create_cq_mbox_in *in, int inlen) 138 u32 *in, int inlen)
140{ 139{
141 int err;
142 struct mlx5_cq_table *table = &dev->priv.cq_table; 140 struct mlx5_cq_table *table = &dev->priv.cq_table;
143 struct mlx5_create_cq_mbox_out out; 141 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
144 struct mlx5_destroy_cq_mbox_in din; 142 u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
145 struct mlx5_destroy_cq_mbox_out dout; 143 u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
146 int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), 144 int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
147 c_eqn); 145 c_eqn);
148 struct mlx5_eq *eq; 146 struct mlx5_eq *eq;
147 int err;
149 148
150 eq = mlx5_eqn2eq(dev, eqn); 149 eq = mlx5_eqn2eq(dev, eqn);
151 if (IS_ERR(eq)) 150 if (IS_ERR(eq))
152 return PTR_ERR(eq); 151 return PTR_ERR(eq);
153 152
154 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ); 153 memset(out, 0, sizeof(out));
155 memset(&out, 0, sizeof(out)); 154 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
156 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 155 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
156 err = err ? : mlx5_cmd_status_to_err_v2(out);
157 if (err) 157 if (err)
158 return err; 158 return err;
159 159
160 if (out.hdr.status) 160 cq->cqn = MLX5_GET(create_cq_out, out, cqn);
161 return mlx5_cmd_status_to_err(&out.hdr);
162
163 cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
164 cq->cons_index = 0; 161 cq->cons_index = 0;
165 cq->arm_sn = 0; 162 cq->arm_sn = 0;
166 atomic_set(&cq->refcount, 1); 163 atomic_set(&cq->refcount, 1);
@@ -186,19 +183,21 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
186 return 0; 183 return 0;
187 184
188err_cmd: 185err_cmd:
189 memset(&din, 0, sizeof(din)); 186 memset(din, 0, sizeof(din));
190 memset(&dout, 0, sizeof(dout)); 187 memset(dout, 0, sizeof(dout));
191 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); 188 MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
192 mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); 189 MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
193 return err; 190 err = mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
191 return err ? : mlx5_cmd_status_to_err_v2(out);
192
194} 193}
195EXPORT_SYMBOL(mlx5_core_create_cq); 194EXPORT_SYMBOL(mlx5_core_create_cq);
196 195
197int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) 196int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
198{ 197{
199 struct mlx5_cq_table *table = &dev->priv.cq_table; 198 struct mlx5_cq_table *table = &dev->priv.cq_table;
200 struct mlx5_destroy_cq_mbox_in in; 199 u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
201 struct mlx5_destroy_cq_mbox_out out; 200 u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
202 struct mlx5_core_cq *tmp; 201 struct mlx5_core_cq *tmp;
203 int err; 202 int err;
204 203
@@ -214,17 +213,13 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
214 return -EINVAL; 213 return -EINVAL;
215 } 214 }
216 215
217 memset(&in, 0, sizeof(in)); 216 MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
218 memset(&out, 0, sizeof(out)); 217 MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
219 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); 218 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
220 in.cqn = cpu_to_be32(cq->cqn); 219 err = err ? : mlx5_cmd_status_to_err_v2(out);
221 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
222 if (err) 220 if (err)
223 return err; 221 return err;
224 222
225 if (out.hdr.status)
226 return mlx5_cmd_status_to_err(&out.hdr);
227
228 synchronize_irq(cq->irqn); 223 synchronize_irq(cq->irqn);
229 224
230 mlx5_debug_cq_remove(dev, cq); 225 mlx5_debug_cq_remove(dev, cq);
@@ -237,44 +232,28 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
237EXPORT_SYMBOL(mlx5_core_destroy_cq); 232EXPORT_SYMBOL(mlx5_core_destroy_cq);
238 233
239int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 234int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
240 struct mlx5_query_cq_mbox_out *out) 235 u32 *out, int outlen)
241{ 236{
242 struct mlx5_query_cq_mbox_in in; 237 u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
243 int err; 238 int err;
244 239
245 memset(&in, 0, sizeof(in)); 240 MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
246 memset(out, 0, sizeof(*out)); 241 MLX5_SET(query_cq_in, in, cqn, cq->cqn);
247
248 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
249 in.cqn = cpu_to_be32(cq->cqn);
250 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
251 if (err)
252 return err;
253
254 if (out->hdr.status)
255 return mlx5_cmd_status_to_err(&out->hdr);
256 242
257 return err; 243 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
244 return err ? : mlx5_cmd_status_to_err_v2(out);
258} 245}
259EXPORT_SYMBOL(mlx5_core_query_cq); 246EXPORT_SYMBOL(mlx5_core_query_cq);
260 247
261
262int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 248int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
263 struct mlx5_modify_cq_mbox_in *in, int in_sz) 249 u32 *in, int inlen)
264{ 250{
265 struct mlx5_modify_cq_mbox_out out; 251 u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
266 int err; 252 int err;
267 253
268 memset(&out, 0, sizeof(out)); 254 MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
269 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); 255 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
270 err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out)); 256 return err ? : mlx5_cmd_status_to_err_v2(out);
271 if (err)
272 return err;
273
274 if (out.hdr.status)
275 return mlx5_cmd_status_to_err(&out.hdr);
276
277 return 0;
278} 257}
279EXPORT_SYMBOL(mlx5_core_modify_cq); 258EXPORT_SYMBOL(mlx5_core_modify_cq);
280 259
@@ -283,18 +262,20 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
283 u16 cq_period, 262 u16 cq_period,
284 u16 cq_max_count) 263 u16 cq_max_count)
285{ 264{
286 struct mlx5_modify_cq_mbox_in in; 265 u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
287 266 void *cqc;
288 memset(&in, 0, sizeof(in)); 267
289 268 MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
290 in.cqn = cpu_to_be32(cq->cqn); 269 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
291 in.ctx.cq_period = cpu_to_be16(cq_period); 270 MLX5_SET(cqc, cqc, cq_period, cq_period);
292 in.ctx.cq_max_count = cpu_to_be16(cq_max_count); 271 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
293 in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD | 272 MLX5_SET(modify_cq_in, in,
294 MLX5_CQ_MODIFY_COUNT); 273 modify_field_select_resize_field_select.modify_field_select.modify_field_select,
295 274 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
296 return mlx5_core_modify_cq(dev, cq, &in, sizeof(in)); 275
276 return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
297} 277}
278EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);
298 279
299int mlx5_init_cq_table(struct mlx5_core_dev *dev) 280int mlx5_init_cq_table(struct mlx5_core_dev *dev)
300{ 281{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 58e5518ebb27..b7484e4128c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -395,37 +395,37 @@ out:
395static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 395static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
396 int index) 396 int index)
397{ 397{
398 struct mlx5_query_cq_mbox_out *out; 398 int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
399 struct mlx5_cq_context *ctx;
400 u64 param = 0; 399 u64 param = 0;
400 void *ctx;
401 u32 *out;
401 int err; 402 int err;
402 403
403 out = kzalloc(sizeof(*out), GFP_KERNEL); 404 out = mlx5_vzalloc(outlen);
404 if (!out) 405 if (!out)
405 return param; 406 return param;
406 407
407 ctx = &out->ctx; 408 err = mlx5_core_query_cq(dev, cq, out, outlen);
408
409 err = mlx5_core_query_cq(dev, cq, out);
410 if (err) { 409 if (err) {
411 mlx5_core_warn(dev, "failed to query cq\n"); 410 mlx5_core_warn(dev, "failed to query cq\n");
412 goto out; 411 goto out;
413 } 412 }
413 ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
414 414
415 switch (index) { 415 switch (index) {
416 case CQ_PID: 416 case CQ_PID:
417 param = cq->pid; 417 param = cq->pid;
418 break; 418 break;
419 case CQ_NUM_CQES: 419 case CQ_NUM_CQES:
420 param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); 420 param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
421 break; 421 break;
422 case CQ_LOG_PG_SZ: 422 case CQ_LOG_PG_SZ:
423 param = (ctx->log_pg_sz & 0x1f) + 12; 423 param = MLX5_GET(cqc, ctx, log_page_size);
424 break; 424 break;
425 } 425 }
426 426
427out: 427out:
428 kfree(out); 428 kvfree(out);
429 return param; 429 return param;
430} 430}
431 431
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2566f6d6444f..7c3c0d3aca37 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -170,12 +170,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
170int mlx5_init_cq_table(struct mlx5_core_dev *dev); 170int mlx5_init_cq_table(struct mlx5_core_dev *dev);
171void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev); 171void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
172int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 172int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
173 struct mlx5_create_cq_mbox_in *in, int inlen); 173 u32 *in, int inlen);
174int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); 174int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
175int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 175int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
176 struct mlx5_query_cq_mbox_out *out); 176 u32 *out, int outlen);
177int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 177int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
178 struct mlx5_modify_cq_mbox_in *in, int in_sz); 178 u32 *in, int inlen);
179int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, 179int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
180 struct mlx5_core_cq *cq, u16 cq_period, 180 struct mlx5_core_cq *cq, u16 cq_period,
181 u16 cq_max_count); 181 u16 cq_max_count);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index c84e0ba5b261..5a1c1606bdbd 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -899,82 +899,6 @@ struct mlx5_arm_srq_mbox_out {
899 u8 rsvd[8]; 899 u8 rsvd[8];
900}; 900};
901 901
902struct mlx5_cq_context {
903 u8 status;
904 u8 cqe_sz_flags;
905 u8 st;
906 u8 rsvd3;
907 u8 rsvd4[6];
908 __be16 page_offset;
909 __be32 log_sz_usr_page;
910 __be16 cq_period;
911 __be16 cq_max_count;
912 __be16 rsvd20;
913 __be16 c_eqn;
914 u8 log_pg_sz;
915 u8 rsvd25[7];
916 __be32 last_notified_index;
917 __be32 solicit_producer_index;
918 __be32 consumer_counter;
919 __be32 producer_counter;
920 u8 rsvd48[8];
921 __be64 db_record_addr;
922};
923
924struct mlx5_create_cq_mbox_in {
925 struct mlx5_inbox_hdr hdr;
926 __be32 input_cqn;
927 u8 rsvdx[4];
928 struct mlx5_cq_context ctx;
929 u8 rsvd6[192];
930 __be64 pas[0];
931};
932
933struct mlx5_create_cq_mbox_out {
934 struct mlx5_outbox_hdr hdr;
935 __be32 cqn;
936 u8 rsvd0[4];
937};
938
939struct mlx5_destroy_cq_mbox_in {
940 struct mlx5_inbox_hdr hdr;
941 __be32 cqn;
942 u8 rsvd0[4];
943};
944
945struct mlx5_destroy_cq_mbox_out {
946 struct mlx5_outbox_hdr hdr;
947 u8 rsvd0[8];
948};
949
950struct mlx5_query_cq_mbox_in {
951 struct mlx5_inbox_hdr hdr;
952 __be32 cqn;
953 u8 rsvd0[4];
954};
955
956struct mlx5_query_cq_mbox_out {
957 struct mlx5_outbox_hdr hdr;
958 u8 rsvd0[8];
959 struct mlx5_cq_context ctx;
960 u8 rsvd6[16];
961 __be64 pas[0];
962};
963
964struct mlx5_modify_cq_mbox_in {
965 struct mlx5_inbox_hdr hdr;
966 __be32 cqn;
967 __be32 field_select;
968 struct mlx5_cq_context ctx;
969 u8 rsvd[192];
970 __be64 pas[0];
971};
972
973struct mlx5_modify_cq_mbox_out {
974 struct mlx5_outbox_hdr hdr;
975 u8 rsvd[8];
976};
977
978struct mlx5_enable_hca_mbox_in { 902struct mlx5_enable_hca_mbox_in {
979 struct mlx5_inbox_hdr hdr; 903 struct mlx5_inbox_hdr hdr;
980 u8 rsvd[8]; 904 u8 rsvd[8];