aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormajd@mellanox.com <majd@mellanox.com>2016-01-14 12:13:05 -0500
committerDoug Ledford <dledford@redhat.com>2016-01-21 12:01:09 -0500
commit6d2f89df04b796e7dcc4f9f8dc0d8f04ad7f144b (patch)
tree4e46f16bddb50a93de0328b4cf8c82ad0864ac29
parent0fb2ed66a14c8c34096d6a8cff5112356c5e9ea2 (diff)
IB/mlx5: Add Raw Packet QP query functionality
Since Raw Packet QP is composed of RQ and SQ, the IB QP's state is derived from the sub-objects. Therefore we need to query each one of the sub-objects, and decide on the IB QP's state. Signed-off-by: Majd Dibbiny <majd@mellanox.com> Reviewed-by: Matan Barak <matanb@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c191
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c24
-rw-r--r--include/linux/mlx5/qp.h11
-rw-r--r--include/linux/mlx5/transobj.h2
4 files changed, 205 insertions, 23 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 89f05bff0485..290e97bc065c 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3438,40 +3438,153 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at
3438 } 3438 }
3439} 3439}
3440 3440
3441int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 3441static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev,
3442 struct ib_qp_init_attr *qp_init_attr) 3442 struct mlx5_ib_sq *sq,
3443 u8 *sq_state)
3444{
3445 void *out;
3446 void *sqc;
3447 int inlen;
3448 int err;
3449
3450 inlen = MLX5_ST_SZ_BYTES(query_sq_out);
3451 out = mlx5_vzalloc(inlen);
3452 if (!out)
3453 return -ENOMEM;
3454
3455 err = mlx5_core_query_sq(dev->mdev, sq->base.mqp.qpn, out);
3456 if (err)
3457 goto out;
3458
3459 sqc = MLX5_ADDR_OF(query_sq_out, out, sq_context);
3460 *sq_state = MLX5_GET(sqc, sqc, state);
3461 sq->state = *sq_state;
3462
3463out:
3464 kvfree(out);
3465 return err;
3466}
3467
3468static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev,
3469 struct mlx5_ib_rq *rq,
3470 u8 *rq_state)
3471{
3472 void *out;
3473 void *rqc;
3474 int inlen;
3475 int err;
3476
3477 inlen = MLX5_ST_SZ_BYTES(query_rq_out);
3478 out = mlx5_vzalloc(inlen);
3479 if (!out)
3480 return -ENOMEM;
3481
3482 err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out);
3483 if (err)
3484 goto out;
3485
3486 rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context);
3487 *rq_state = MLX5_GET(rqc, rqc, state);
3488 rq->state = *rq_state;
3489
3490out:
3491 kvfree(out);
3492 return err;
3493}
3494
3495static int sqrq_state_to_qp_state(u8 sq_state, u8 rq_state,
3496 struct mlx5_ib_qp *qp, u8 *qp_state)
3497{
3498 static const u8 sqrq_trans[MLX5_RQ_NUM_STATE][MLX5_SQ_NUM_STATE] = {
3499 [MLX5_RQC_STATE_RST] = {
3500 [MLX5_SQC_STATE_RST] = IB_QPS_RESET,
3501 [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD,
3502 [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE_BAD,
3503 [MLX5_SQ_STATE_NA] = IB_QPS_RESET,
3504 },
3505 [MLX5_RQC_STATE_RDY] = {
3506 [MLX5_SQC_STATE_RST] = MLX5_QP_STATE_BAD,
3507 [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE,
3508 [MLX5_SQC_STATE_ERR] = IB_QPS_SQE,
3509 [MLX5_SQ_STATE_NA] = MLX5_QP_STATE,
3510 },
3511 [MLX5_RQC_STATE_ERR] = {
3512 [MLX5_SQC_STATE_RST] = MLX5_QP_STATE_BAD,
3513 [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD,
3514 [MLX5_SQC_STATE_ERR] = IB_QPS_ERR,
3515 [MLX5_SQ_STATE_NA] = IB_QPS_ERR,
3516 },
3517 [MLX5_RQ_STATE_NA] = {
3518 [MLX5_SQC_STATE_RST] = IB_QPS_RESET,
3519 [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE,
3520 [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE,
3521 [MLX5_SQ_STATE_NA] = MLX5_QP_STATE_BAD,
3522 },
3523 };
3524
3525 *qp_state = sqrq_trans[rq_state][sq_state];
3526
3527 if (*qp_state == MLX5_QP_STATE_BAD) {
3528 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
3529 qp->raw_packet_qp.sq.base.mqp.qpn, sq_state,
3530 qp->raw_packet_qp.rq.base.mqp.qpn, rq_state);
3531 return -EINVAL;
3532 }
3533
3534 if (*qp_state == MLX5_QP_STATE)
3535 *qp_state = qp->state;
3536
3537 return 0;
3538}
3539
3540static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
3541 struct mlx5_ib_qp *qp,
3542 u8 *raw_packet_qp_state)
3543{
3544 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
3545 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
3546 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
3547 int err;
3548 u8 sq_state = MLX5_SQ_STATE_NA;
3549 u8 rq_state = MLX5_RQ_STATE_NA;
3550
3551 if (qp->sq.wqe_cnt) {
3552 err = query_raw_packet_qp_sq_state(dev, sq, &sq_state);
3553 if (err)
3554 return err;
3555 }
3556
3557 if (qp->rq.wqe_cnt) {
3558 err = query_raw_packet_qp_rq_state(dev, rq, &rq_state);
3559 if (err)
3560 return err;
3561 }
3562
3563 return sqrq_state_to_qp_state(sq_state, rq_state, qp,
3564 raw_packet_qp_state);
3565}
3566
3567static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
3568 struct ib_qp_attr *qp_attr)
3443{ 3569{
3444 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3445 struct mlx5_ib_qp *qp = to_mqp(ibqp);
3446 struct mlx5_query_qp_mbox_out *outb; 3570 struct mlx5_query_qp_mbox_out *outb;
3447 struct mlx5_qp_context *context; 3571 struct mlx5_qp_context *context;
3448 int mlx5_state; 3572 int mlx5_state;
3449 int err = 0; 3573 int err = 0;
3450 3574
3451#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3452 /*
3453 * Wait for any outstanding page faults, in case the user frees memory
3454 * based upon this query's result.
3455 */
3456 flush_workqueue(mlx5_ib_page_fault_wq);
3457#endif
3458
3459 mutex_lock(&qp->mutex);
3460 outb = kzalloc(sizeof(*outb), GFP_KERNEL); 3575 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
3461 if (!outb) { 3576 if (!outb)
3462 err = -ENOMEM; 3577 return -ENOMEM;
3463 goto out; 3578
3464 }
3465 context = &outb->ctx; 3579 context = &outb->ctx;
3466 err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb, 3580 err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
3467 sizeof(*outb)); 3581 sizeof(*outb));
3468 if (err) 3582 if (err)
3469 goto out_free; 3583 goto out;
3470 3584
3471 mlx5_state = be32_to_cpu(context->flags) >> 28; 3585 mlx5_state = be32_to_cpu(context->flags) >> 28;
3472 3586
3473 qp->state = to_ib_qp_state(mlx5_state); 3587 qp->state = to_ib_qp_state(mlx5_state);
3474 qp_attr->qp_state = qp->state;
3475 qp_attr->path_mtu = context->mtu_msgmax >> 5; 3588 qp_attr->path_mtu = context->mtu_msgmax >> 5;
3476 qp_attr->path_mig_state = 3589 qp_attr->path_mig_state =
3477 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 3590 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
@@ -3505,6 +3618,43 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
3505 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 3618 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
3506 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7; 3619 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
3507 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3; 3620 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
3621
3622out:
3623 kfree(outb);
3624 return err;
3625}
3626
3627int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3628 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
3629{
3630 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3631 struct mlx5_ib_qp *qp = to_mqp(ibqp);
3632 int err = 0;
3633 u8 raw_packet_qp_state;
3634
3635#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3636 /*
3637 * Wait for any outstanding page faults, in case the user frees memory
3638 * based upon this query's result.
3639 */
3640 flush_workqueue(mlx5_ib_page_fault_wq);
3641#endif
3642
3643 mutex_lock(&qp->mutex);
3644
3645 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
3646 err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
3647 if (err)
3648 goto out;
3649 qp->state = raw_packet_qp_state;
3650 qp_attr->port_num = 1;
3651 } else {
3652 err = query_qp_attr(dev, qp, qp_attr);
3653 if (err)
3654 goto out;
3655 }
3656
3657 qp_attr->qp_state = qp->state;
3508 qp_attr->cur_qp_state = qp_attr->qp_state; 3658 qp_attr->cur_qp_state = qp_attr->qp_state;
3509 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; 3659 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
3510 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 3660 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
@@ -3538,9 +3688,6 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
3538 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? 3688 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
3539 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 3689 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3540 3690
3541out_free:
3542 kfree(outb);
3543
3544out: 3691out:
3545 mutex_unlock(&qp->mutex); 3692 mutex_unlock(&qp->mutex);
3546 return err; 3693 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 91ea2780e412..460d9ff0222f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -111,6 +111,18 @@ void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
111 mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); 111 mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
112} 112}
113 113
114int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
115{
116 u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {0};
117 int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
118
119 MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
120 MLX5_SET(query_rq_in, in, rqn, rqn);
121
122 return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
123}
124EXPORT_SYMBOL(mlx5_core_query_rq);
125
114int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) 126int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
115{ 127{
116 u32 out[MLX5_ST_SZ_DW(create_sq_out)]; 128 u32 out[MLX5_ST_SZ_DW(create_sq_out)];
@@ -151,6 +163,18 @@ void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
151 mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); 163 mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
152} 164}
153 165
166int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
167{
168 u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {0};
169 int outlen = MLX5_ST_SZ_BYTES(query_sq_out);
170
171 MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ);
172 MLX5_SET(query_sq_in, in, sqn, sqn);
173
174 return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
175}
176EXPORT_SYMBOL(mlx5_core_query_sq);
177
154int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, 178int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
155 u32 *tirn) 179 u32 *tirn)
156{ 180{
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 431176ec70e2..f033c7a1490c 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -85,7 +85,16 @@ enum mlx5_qp_state {
85 MLX5_QP_STATE_ERR = 6, 85 MLX5_QP_STATE_ERR = 6,
86 MLX5_QP_STATE_SQ_DRAINING = 7, 86 MLX5_QP_STATE_SQ_DRAINING = 7,
87 MLX5_QP_STATE_SUSPENDED = 9, 87 MLX5_QP_STATE_SUSPENDED = 9,
88 MLX5_QP_NUM_STATE 88 MLX5_QP_NUM_STATE,
89 MLX5_QP_STATE,
90 MLX5_QP_STATE_BAD,
91};
92
93enum {
94 MLX5_SQ_STATE_NA = MLX5_SQC_STATE_ERR + 1,
95 MLX5_SQ_NUM_STATE = MLX5_SQ_STATE_NA + 1,
96 MLX5_RQ_STATE_NA = MLX5_RQC_STATE_ERR + 1,
97 MLX5_RQ_NUM_STATE = MLX5_RQ_STATE_NA + 1,
89}; 98};
90 99
91enum { 100enum {
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 376229f09499..d259e4c423dd 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -41,10 +41,12 @@ int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
41 u32 *rqn); 41 u32 *rqn);
42int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); 42int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
43void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn); 43void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
44int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out);
44int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, 45int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
45 u32 *sqn); 46 u32 *sqn);
46int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); 47int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
47void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); 48void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
49int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
48int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, 50int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
49 u32 *tirn); 51 u32 *tirn);
50int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, 52int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,