aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
authorEli Cohen <eli@mellanox.com>2014-10-02 05:19:44 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-03 18:42:32 -0400
commitb775516b042f9e35f856bd2914afefd9d23021d7 (patch)
tree691a622abe8e714f8ec35291e3603aa16fc09444 /drivers/net/ethernet/mellanox
parentd29b796adada8780db3512c4a34b339f9aeef1ae (diff)
net/mlx5_core: use set/get macros in device caps
Transform device capabilities related commands to use set/get macros to manipulate command mailboxes. Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c150
2 files changed, 91 insertions, 76 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3ecef1310bae..368c6c5ea014 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1537,3 +1537,20 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1537 1537
1538 return cmd_status_to_err(hdr->status); 1538 return cmd_status_to_err(hdr->status);
1539} 1539}
1540
1541int mlx5_cmd_status_to_err_v2(void *ptr)
1542{
1543 u32 syndrome;
1544 u8 status;
1545
1546 status = be32_to_cpu(*(__be32 *)ptr) >> 24;
1547 if (!status)
1548 return 0;
1549
1550 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
1551
1552 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1553 cmd_status_str(status), status, syndrome);
1554
1555 return cmd_status_to_err(status);
1556}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d9f74618befa..b9e3259e415f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -43,6 +43,7 @@
43#include <linux/mlx5/qp.h> 43#include <linux/mlx5/qp.h>
44#include <linux/mlx5/srq.h> 44#include <linux/mlx5/srq.h>
45#include <linux/debugfs.h> 45#include <linux/debugfs.h>
46#include <linux/mlx5/mlx5_ifc.h>
46#include "mlx5_core.h" 47#include "mlx5_core.h"
47 48
48#define DRIVER_NAME "mlx5_core" 49#define DRIVER_NAME "mlx5_core"
@@ -277,18 +278,20 @@ static u16 to_fw_pkey_sz(u32 size)
277 278
278/* selectively copy writable fields clearing any reserved area 279/* selectively copy writable fields clearing any reserved area
279 */ 280 */
280static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_general_caps *from) 281static void copy_rw_fields(void *to, struct mlx5_caps *from)
281{ 282{
283 __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22);
282 u64 v64; 284 u64 v64;
283 285
284 to->log_max_qp = from->log_max_qp & 0x1f; 286 MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp);
285 to->log_max_ra_req_dc = from->log_max_ra_req_dc & 0x3f; 287 MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
286 to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f; 288 MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
287 to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f; 289 MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
288 to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f; 290 MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc);
289 to->pkey_table_size = cpu_to_be16(to_fw_pkey_sz(from->pkey_table_size)); 291 MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc);
290 v64 = from->flags & MLX5_CAP_BITS_RW_MASK; 292 MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
291 to->flags = cpu_to_be64(v64); 293 v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
294 *flags_off = cpu_to_be64(v64);
292} 295}
293 296
294static u16 get_pkey_table_size(int pkey) 297static u16 get_pkey_table_size(int pkey)
@@ -299,55 +302,47 @@ static u16 get_pkey_table_size(int pkey)
299 return MLX5_MIN_PKEY_TABLE_SIZE << pkey; 302 return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
300} 303}
301 304
302static void fw2drv_caps(struct mlx5_caps *caps, 305static void fw2drv_caps(struct mlx5_caps *caps, void *out)
303 struct mlx5_cmd_query_hca_cap_mbox_out *out)
304{ 306{
305 struct mlx5_general_caps *gen = &caps->gen; 307 struct mlx5_general_caps *gen = &caps->gen;
306 u16 t16;
307
308 gen->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
309 gen->max_wqes = 1 << out->hca_cap.log_max_qp_sz;
310 gen->log_max_qp = out->hca_cap.log_max_qp & 0x1f;
311 gen->log_max_strq = out->hca_cap.log_max_strq_sz;
312 gen->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
313 gen->max_cqes = 1 << out->hca_cap.log_max_cq_sz;
314 gen->log_max_cq = out->hca_cap.log_max_cq & 0x1f;
315 gen->max_eqes = out->hca_cap.log_max_eq_sz;
316 gen->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f;
317 gen->log_max_eq = out->hca_cap.log_max_eq & 0xf;
318 gen->max_indirection = out->hca_cap.max_indirection;
319 gen->log_max_mrw_sz = out->hca_cap.log_max_mrw_sz;
320 gen->log_max_bsf_list_size = 0;
321 gen->log_max_klm_list_size = 0;
322 gen->log_max_ra_req_dc = out->hca_cap.log_max_ra_req_dc;
323 gen->log_max_ra_res_dc = out->hca_cap.log_max_ra_res_dc;
324 gen->log_max_ra_req_qp = out->hca_cap.log_max_ra_req_qp;
325 gen->log_max_ra_res_qp = out->hca_cap.log_max_ra_res_qp;
326 gen->max_qp_counters = be16_to_cpu(out->hca_cap.max_qp_count);
327 gen->pkey_table_size = get_pkey_table_size(be16_to_cpu(out->hca_cap.pkey_table_size));
328 gen->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
329 gen->num_ports = out->hca_cap.num_ports & 0xf;
330 gen->log_max_msg = out->hca_cap.log_max_msg & 0x1f;
331 gen->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support);
332 gen->flags = be64_to_cpu(out->hca_cap.flags);
333 pr_debug("flags = 0x%llx\n", gen->flags);
334 gen->uar_sz = out->hca_cap.uar_sz;
335 gen->min_log_pg_sz = out->hca_cap.log_pg_sz;
336 308
337 t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size); 309 gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
338 if (t16 & 0x8000) { 310 gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
339 gen->bf_reg_size = 1 << (t16 & 0x1f); 311 gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
340 gen->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE; 312 gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
341 } else { 313 gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
342 gen->bf_reg_size = 0; 314 gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
343 gen->bf_regs_per_page = 0; 315 gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
344 } 316 gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
345 gen->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq); 317 gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey);
346 gen->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq); 318 gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq);
347 gen->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff; 319 gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection);
348 gen->log_max_pd = out->hca_cap.log_max_pd & 0x1f; 320 gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz);
349 gen->log_max_xrcd = out->hca_cap.log_max_xrcd; 321 gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size);
350 gen->log_uar_page_sz = be16_to_cpu(out->hca_cap.log_uar_page_sz); 322 gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size);
323 gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc);
324 gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc);
325 gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp);
326 gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp);
327 gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt);
328 gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size));
329 gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay);
330 gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports);
331 gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg);
332 gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support);
333 gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22));
334 pr_debug("flags = 0x%llx\n", gen->flags);
335 gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz);
336 gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz);
337 gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf);
338 gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size);
339 gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq);
340 gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq);
341 gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc);
342 gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg);
343 gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd);
344 gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd);
345 gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz);
351} 346}
352 347
353static const char *caps_opmod_str(u16 opmod) 348static const char *caps_opmod_str(u16 opmod)
@@ -365,59 +360,61 @@ static const char *caps_opmod_str(u16 opmod)
365int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, 360int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
366 u16 opmod) 361 u16 opmod)
367{ 362{
368 struct mlx5_cmd_query_hca_cap_mbox_out *out; 363 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
369 struct mlx5_cmd_query_hca_cap_mbox_in in; 364 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
365 void *out;
370 int err; 366 int err;
371 367
372 memset(&in, 0, sizeof(in)); 368 memset(in, 0, sizeof(in));
373 out = kzalloc(sizeof(*out), GFP_KERNEL); 369 out = kzalloc(out_sz, GFP_KERNEL);
374 if (!out) 370 if (!out)
375 return -ENOMEM; 371 return -ENOMEM;
372 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
373 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
374 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
375 if (err)
376 goto query_ex;
376 377
377 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); 378 err = mlx5_cmd_status_to_err_v2(out);
378 in.hdr.opmod = cpu_to_be16(opmod);
379 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
380
381 err = mlx5_cmd_status_to_err(&out->hdr);
382 if (err) { 379 if (err) {
383 mlx5_core_warn(dev, "query max hca cap failed, %d\n", err); 380 mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
384 goto query_ex; 381 goto query_ex;
385 } 382 }
386 mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod)); 383 mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
387 fw2drv_caps(caps, out); 384 fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
388 385
389query_ex: 386query_ex:
390 kfree(out); 387 kfree(out);
391 return err; 388 return err;
392} 389}
393 390
394static int set_caps(struct mlx5_core_dev *dev, 391static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
395 struct mlx5_cmd_set_hca_cap_mbox_in *in)
396{ 392{
397 struct mlx5_cmd_set_hca_cap_mbox_out out; 393 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
398 int err; 394 int err;
399 395
400 memset(&out, 0, sizeof(out)); 396 memset(out, 0, sizeof(out));
401 397
402 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); 398 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
403 err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); 399 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
404 if (err) 400 if (err)
405 return err; 401 return err;
406 402
407 err = mlx5_cmd_status_to_err(&out.hdr); 403 err = mlx5_cmd_status_to_err_v2(out);
408 404
409 return err; 405 return err;
410} 406}
411 407
412static int handle_hca_cap(struct mlx5_core_dev *dev) 408static int handle_hca_cap(struct mlx5_core_dev *dev)
413{ 409{
414 struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; 410 void *set_ctx = NULL;
415 struct mlx5_profile *prof = dev->profile; 411 struct mlx5_profile *prof = dev->profile;
416 struct mlx5_caps *cur_caps = NULL; 412 struct mlx5_caps *cur_caps = NULL;
417 struct mlx5_caps *max_caps = NULL; 413 struct mlx5_caps *max_caps = NULL;
418 int err = -ENOMEM; 414 int err = -ENOMEM;
415 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
419 416
420 set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL); 417 set_ctx = kzalloc(set_sz, GFP_KERNEL);
421 if (!set_ctx) 418 if (!set_ctx)
422 goto query_ex; 419 goto query_ex;
423 420
@@ -446,8 +443,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
446 /* disable checksum */ 443 /* disable checksum */
447 cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; 444 cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
448 445
449 copy_rw_fields(&set_ctx->hca_cap, &cur_caps->gen); 446 copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
450 err = set_caps(dev, set_ctx); 447 cur_caps);
448 err = set_caps(dev, set_ctx, set_sz);
451 449
452query_ex: 450query_ex:
453 kfree(cur_caps); 451 kfree(cur_caps);