aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArtemy Kovalyov <artemyko@mellanox.com>2017-08-17 08:52:10 -0400
committerDoug Ledford <dledford@redhat.com>2017-08-29 08:30:20 -0400
commit5b3ec3fcb6bbe081279c73fb574af8c72f14cea0 (patch)
treed822f048db7c845ea79dab7b9ef1b7874ace3955
parenteb761894351d0372248f2636c213d7b822e8775f (diff)
net/mlx5: Add XRQ support
Add support to new XRQ(eXtended shared Receive Queue) hardware object. It supports SRQ semantics with addition of extended receive buffers topologies and offloads. Currently supports tag matching topology and rendezvouz offload. Signed-off-by: Artemy Kovalyov <artemyko@mellanox.com> Reviewed-by: Yossi Itigin <yosefe@mellanox.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c150
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/srq.h5
3 files changed, 146 insertions, 10 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index f774de6f5fcb..7673da04efa4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -435,16 +435,128 @@ out:
435 return err; 435 return err;
436} 436}
437 437
438static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
439 struct mlx5_srq_attr *in)
440{
441 u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
442 void *create_in;
443 void *xrqc;
444 void *wq;
445 int pas_size;
446 int inlen;
447 int err;
448
449 pas_size = get_pas_size(in);
450 inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
451 create_in = kvzalloc(inlen, GFP_KERNEL);
452 if (!create_in)
453 return -ENOMEM;
454
455 xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
456 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
457
458 set_wq(wq, in);
459 memcpy(MLX5_ADDR_OF(xrqc, xrqc, wq.pas), in->pas, pas_size);
460
461 if (in->type == IB_SRQT_TM) {
462 MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
463 if (in->flags & MLX5_SRQ_FLAG_RNDV)
464 MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
465 MLX5_SET(xrqc, xrqc,
466 tag_matching_topology_context.log_matching_list_sz,
467 in->tm_log_list_size);
468 }
469 MLX5_SET(xrqc, xrqc, user_index, in->user_index);
470 MLX5_SET(xrqc, xrqc, cqn, in->cqn);
471 MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
472 err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
473 sizeof(create_out));
474 kvfree(create_in);
475 if (!err)
476 srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
477
478 return err;
479}
480
481static int destroy_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
482{
483 u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {0};
484 u32 out[MLX5_ST_SZ_DW(destroy_xrq_out)] = {0};
485
486 MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
487 MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
488
489 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
490}
491
492static int arm_xrq_cmd(struct mlx5_core_dev *dev,
493 struct mlx5_core_srq *srq,
494 u16 lwm)
495{
496 u32 out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
497 u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
498
499 MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
500 MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
501 MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
502 MLX5_SET(arm_rq_in, in, lwm, lwm);
503
504 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
505}
506
507static int query_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
508 struct mlx5_srq_attr *out)
509{
510 u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {0};
511 u32 *xrq_out;
512 int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
513 void *xrqc;
514 int err;
515
516 xrq_out = kvzalloc(outlen, GFP_KERNEL);
517 if (!xrq_out)
518 return -ENOMEM;
519
520 MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
521 MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
522
523 err = mlx5_cmd_exec(dev, in, sizeof(in), xrq_out, outlen);
524 if (err)
525 goto out;
526
527 xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
528 get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
529 if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
530 out->flags |= MLX5_SRQ_FLAG_ERR;
531 out->tm_next_tag =
532 MLX5_GET(xrqc, xrqc,
533 tag_matching_topology_context.append_next_index);
534 out->tm_hw_phase_cnt =
535 MLX5_GET(xrqc, xrqc,
536 tag_matching_topology_context.hw_phase_cnt);
537 out->tm_sw_phase_cnt =
538 MLX5_GET(xrqc, xrqc,
539 tag_matching_topology_context.sw_phase_cnt);
540
541out:
542 kvfree(xrq_out);
543 return err;
544}
545
438static int create_srq_split(struct mlx5_core_dev *dev, 546static int create_srq_split(struct mlx5_core_dev *dev,
439 struct mlx5_core_srq *srq, 547 struct mlx5_core_srq *srq,
440 struct mlx5_srq_attr *in) 548 struct mlx5_srq_attr *in)
441{ 549{
442 if (!dev->issi) 550 if (!dev->issi)
443 return create_srq_cmd(dev, srq, in); 551 return create_srq_cmd(dev, srq, in);
444 else if (srq->common.res == MLX5_RES_XSRQ) 552 switch (srq->common.res) {
553 case MLX5_RES_XSRQ:
445 return create_xrc_srq_cmd(dev, srq, in); 554 return create_xrc_srq_cmd(dev, srq, in);
446 else 555 case MLX5_RES_XRQ:
556 return create_xrq_cmd(dev, srq, in);
557 default:
447 return create_rmp_cmd(dev, srq, in); 558 return create_rmp_cmd(dev, srq, in);
559 }
448} 560}
449 561
450static int destroy_srq_split(struct mlx5_core_dev *dev, 562static int destroy_srq_split(struct mlx5_core_dev *dev,
@@ -452,10 +564,14 @@ static int destroy_srq_split(struct mlx5_core_dev *dev,
452{ 564{
453 if (!dev->issi) 565 if (!dev->issi)
454 return destroy_srq_cmd(dev, srq); 566 return destroy_srq_cmd(dev, srq);
455 else if (srq->common.res == MLX5_RES_XSRQ) 567 switch (srq->common.res) {
568 case MLX5_RES_XSRQ:
456 return destroy_xrc_srq_cmd(dev, srq); 569 return destroy_xrc_srq_cmd(dev, srq);
457 else 570 case MLX5_RES_XRQ:
571 return destroy_xrq_cmd(dev, srq);
572 default:
458 return destroy_rmp_cmd(dev, srq); 573 return destroy_rmp_cmd(dev, srq);
574 }
459} 575}
460 576
461int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 577int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
@@ -464,10 +580,16 @@ int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
464 int err; 580 int err;
465 struct mlx5_srq_table *table = &dev->priv.srq_table; 581 struct mlx5_srq_table *table = &dev->priv.srq_table;
466 582
467 if (in->type == IB_SRQT_XRC) 583 switch (in->type) {
584 case IB_SRQT_XRC:
468 srq->common.res = MLX5_RES_XSRQ; 585 srq->common.res = MLX5_RES_XSRQ;
469 else 586 break;
587 case IB_SRQT_TM:
588 srq->common.res = MLX5_RES_XRQ;
589 break;
590 default:
470 srq->common.res = MLX5_RES_SRQ; 591 srq->common.res = MLX5_RES_SRQ;
592 }
471 593
472 err = create_srq_split(dev, srq, in); 594 err = create_srq_split(dev, srq, in);
473 if (err) 595 if (err)
@@ -528,10 +650,14 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
528{ 650{
529 if (!dev->issi) 651 if (!dev->issi)
530 return query_srq_cmd(dev, srq, out); 652 return query_srq_cmd(dev, srq, out);
531 else if (srq->common.res == MLX5_RES_XSRQ) 653 switch (srq->common.res) {
654 case MLX5_RES_XSRQ:
532 return query_xrc_srq_cmd(dev, srq, out); 655 return query_xrc_srq_cmd(dev, srq, out);
533 else 656 case MLX5_RES_XRQ:
657 return query_xrq_cmd(dev, srq, out);
658 default:
534 return query_rmp_cmd(dev, srq, out); 659 return query_rmp_cmd(dev, srq, out);
660 }
535} 661}
536EXPORT_SYMBOL(mlx5_core_query_srq); 662EXPORT_SYMBOL(mlx5_core_query_srq);
537 663
@@ -540,10 +666,14 @@ int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
540{ 666{
541 if (!dev->issi) 667 if (!dev->issi)
542 return arm_srq_cmd(dev, srq, lwm, is_srq); 668 return arm_srq_cmd(dev, srq, lwm, is_srq);
543 else if (srq->common.res == MLX5_RES_XSRQ) 669 switch (srq->common.res) {
670 case MLX5_RES_XSRQ:
544 return arm_xrc_srq_cmd(dev, srq, lwm); 671 return arm_xrc_srq_cmd(dev, srq, lwm);
545 else 672 case MLX5_RES_XRQ:
673 return arm_xrq_cmd(dev, srq, lwm);
674 default:
546 return arm_rmp_cmd(dev, srq, lwm); 675 return arm_rmp_cmd(dev, srq, lwm);
676 }
547} 677}
548EXPORT_SYMBOL(mlx5_core_arm_srq); 678EXPORT_SYMBOL(mlx5_core_arm_srq);
549 679
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 99d88624ad07..c33e6f7a1afb 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -418,6 +418,7 @@ enum mlx5_res_type {
418 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, 418 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
419 MLX5_RES_SRQ = 3, 419 MLX5_RES_SRQ = 3,
420 MLX5_RES_XSRQ = 4, 420 MLX5_RES_XSRQ = 4,
421 MLX5_RES_XRQ = 5,
421}; 422};
422 423
423struct mlx5_core_rsc_common { 424struct mlx5_core_rsc_common {
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index 1cde0fd53f90..24ff23e27c8a 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -38,6 +38,7 @@
38enum { 38enum {
39 MLX5_SRQ_FLAG_ERR = (1 << 0), 39 MLX5_SRQ_FLAG_ERR = (1 << 0),
40 MLX5_SRQ_FLAG_WQ_SIG = (1 << 1), 40 MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
41 MLX5_SRQ_FLAG_RNDV = (1 << 2),
41}; 42};
42 43
43struct mlx5_srq_attr { 44struct mlx5_srq_attr {
@@ -56,6 +57,10 @@ struct mlx5_srq_attr {
56 u32 user_index; 57 u32 user_index;
57 u64 db_record; 58 u64 db_record;
58 __be64 *pas; 59 __be64 *pas;
60 u32 tm_log_list_size;
61 u32 tm_next_tag;
62 u32 tm_hw_phase_cnt;
63 u32 tm_sw_phase_cnt;
59}; 64};
60 65
61struct mlx5_core_dev; 66struct mlx5_core_dev;