diff options
author | Bodong Wang <bodong@mellanox.com> | 2016-12-01 06:43:16 -0500 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-12-13 13:39:51 -0500 |
commit | 7d29f349a4b9dcf5bc9dcc05630d6a7f6b6b3ccd (patch) | |
tree | 9c180abecd253ecbb93f16abe19a89dc64935356 | |
parent | 189aba99e70030cfb56bd8f199bc5b077a1bc6ff (diff) |
IB/mlx5: Properly adjust rate limit on QP state transitions
- Add MODIFY_QP_EX CMD to extend modify_qp.
- Rate limit will be updated in the following state transactions: RTR2RTS,
RTS2RTS. The limit will be removed when SQ is in RST and ERR state.
Signed-off-by: Bodong Wang <bodong@mellanox.com>
Reviewed-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 74 |
3 files changed, 69 insertions, 9 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 6c194000903d..cda541ced141 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -3105,7 +3105,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
3105 | dev->ib_dev.uverbs_ex_cmd_mask = | 3105 | dev->ib_dev.uverbs_ex_cmd_mask = |
3106 | (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | | 3106 | (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | |
3107 | (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | | 3107 | (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | |
3108 | (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP); | 3108 | (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) | |
3109 | (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP); | ||
3109 | 3110 | ||
3110 | dev->ib_dev.query_device = mlx5_ib_query_device; | 3111 | dev->ib_dev.query_device = mlx5_ib_query_device; |
3111 | dev->ib_dev.query_port = mlx5_ib_query_port; | 3112 | dev->ib_dev.query_port = mlx5_ib_query_port; |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index df3d6af3f683..ab8961cc8bca 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -389,6 +389,7 @@ struct mlx5_ib_qp { | |||
389 | struct list_head qps_list; | 389 | struct list_head qps_list; |
390 | struct list_head cq_recv_list; | 390 | struct list_head cq_recv_list; |
391 | struct list_head cq_send_list; | 391 | struct list_head cq_send_list; |
392 | u32 rate_limit; | ||
392 | }; | 393 | }; |
393 | 394 | ||
394 | struct mlx5_ib_cq_buf { | 395 | struct mlx5_ib_cq_buf { |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index aa27688f5ae9..a69524fb6032 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -78,12 +78,14 @@ struct mlx5_wqe_eth_pad { | |||
78 | 78 | ||
79 | enum raw_qp_set_mask_map { | 79 | enum raw_qp_set_mask_map { |
80 | MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0, | 80 | MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0, |
81 | MLX5_RAW_QP_RATE_LIMIT = 1UL << 1, | ||
81 | }; | 82 | }; |
82 | 83 | ||
83 | struct mlx5_modify_raw_qp_param { | 84 | struct mlx5_modify_raw_qp_param { |
84 | u16 operation; | 85 | u16 operation; |
85 | 86 | ||
86 | u32 set_mask; /* raw_qp_set_mask_map */ | 87 | u32 set_mask; /* raw_qp_set_mask_map */ |
88 | u32 rate_limit; | ||
87 | u8 rq_q_ctr_id; | 89 | u8 rq_q_ctr_id; |
88 | }; | 90 | }; |
89 | 91 | ||
@@ -2470,8 +2472,14 @@ out: | |||
2470 | } | 2472 | } |
2471 | 2473 | ||
2472 | static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, | 2474 | static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, |
2473 | struct mlx5_ib_sq *sq, int new_state) | 2475 | struct mlx5_ib_sq *sq, |
2476 | int new_state, | ||
2477 | const struct mlx5_modify_raw_qp_param *raw_qp_param) | ||
2474 | { | 2478 | { |
2479 | struct mlx5_ib_qp *ibqp = sq->base.container_mibqp; | ||
2480 | u32 old_rate = ibqp->rate_limit; | ||
2481 | u32 new_rate = old_rate; | ||
2482 | u16 rl_index = 0; | ||
2475 | void *in; | 2483 | void *in; |
2476 | void *sqc; | 2484 | void *sqc; |
2477 | int inlen; | 2485 | int inlen; |
@@ -2487,10 +2495,44 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, | |||
2487 | sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); | 2495 | sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); |
2488 | MLX5_SET(sqc, sqc, state, new_state); | 2496 | MLX5_SET(sqc, sqc, state, new_state); |
2489 | 2497 | ||
2498 | if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) { | ||
2499 | if (new_state != MLX5_SQC_STATE_RDY) | ||
2500 | pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n", | ||
2501 | __func__); | ||
2502 | else | ||
2503 | new_rate = raw_qp_param->rate_limit; | ||
2504 | } | ||
2505 | |||
2506 | if (old_rate != new_rate) { | ||
2507 | if (new_rate) { | ||
2508 | err = mlx5_rl_add_rate(dev, new_rate, &rl_index); | ||
2509 | if (err) { | ||
2510 | pr_err("Failed configuring rate %u: %d\n", | ||
2511 | new_rate, err); | ||
2512 | goto out; | ||
2513 | } | ||
2514 | } | ||
2515 | |||
2516 | MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); | ||
2517 | MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); | ||
2518 | } | ||
2519 | |||
2490 | err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen); | 2520 | err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen); |
2491 | if (err) | 2521 | if (err) { |
2522 | /* Remove new rate from table if failed */ | ||
2523 | if (new_rate && | ||
2524 | old_rate != new_rate) | ||
2525 | mlx5_rl_remove_rate(dev, new_rate); | ||
2492 | goto out; | 2526 | goto out; |
2527 | } | ||
2493 | 2528 | ||
2529 | /* Only remove the old rate after new rate was set */ | ||
2530 | if ((old_rate && | ||
2531 | (old_rate != new_rate)) || | ||
2532 | (new_state != MLX5_SQC_STATE_RDY)) | ||
2533 | mlx5_rl_remove_rate(dev, old_rate); | ||
2534 | |||
2535 | ibqp->rate_limit = new_rate; | ||
2494 | sq->state = new_state; | 2536 | sq->state = new_state; |
2495 | 2537 | ||
2496 | out: | 2538 | out: |
@@ -2505,6 +2547,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
2505 | struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; | 2547 | struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; |
2506 | struct mlx5_ib_rq *rq = &raw_packet_qp->rq; | 2548 | struct mlx5_ib_rq *rq = &raw_packet_qp->rq; |
2507 | struct mlx5_ib_sq *sq = &raw_packet_qp->sq; | 2549 | struct mlx5_ib_sq *sq = &raw_packet_qp->sq; |
2550 | int modify_rq = !!qp->rq.wqe_cnt; | ||
2551 | int modify_sq = !!qp->sq.wqe_cnt; | ||
2508 | int rq_state; | 2552 | int rq_state; |
2509 | int sq_state; | 2553 | int sq_state; |
2510 | int err; | 2554 | int err; |
@@ -2522,10 +2566,18 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
2522 | rq_state = MLX5_RQC_STATE_RST; | 2566 | rq_state = MLX5_RQC_STATE_RST; |
2523 | sq_state = MLX5_SQC_STATE_RST; | 2567 | sq_state = MLX5_SQC_STATE_RST; |
2524 | break; | 2568 | break; |
2525 | case MLX5_CMD_OP_INIT2INIT_QP: | ||
2526 | case MLX5_CMD_OP_INIT2RTR_QP: | ||
2527 | case MLX5_CMD_OP_RTR2RTS_QP: | 2569 | case MLX5_CMD_OP_RTR2RTS_QP: |
2528 | case MLX5_CMD_OP_RTS2RTS_QP: | 2570 | case MLX5_CMD_OP_RTS2RTS_QP: |
2571 | if (raw_qp_param->set_mask == | ||
2572 | MLX5_RAW_QP_RATE_LIMIT) { | ||
2573 | modify_rq = 0; | ||
2574 | sq_state = sq->state; | ||
2575 | } else { | ||
2576 | return raw_qp_param->set_mask ? -EINVAL : 0; | ||
2577 | } | ||
2578 | break; | ||
2579 | case MLX5_CMD_OP_INIT2INIT_QP: | ||
2580 | case MLX5_CMD_OP_INIT2RTR_QP: | ||
2529 | if (raw_qp_param->set_mask) | 2581 | if (raw_qp_param->set_mask) |
2530 | return -EINVAL; | 2582 | return -EINVAL; |
2531 | else | 2583 | else |
@@ -2535,13 +2587,13 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
2535 | return -EINVAL; | 2587 | return -EINVAL; |
2536 | } | 2588 | } |
2537 | 2589 | ||
2538 | if (qp->rq.wqe_cnt) { | 2590 | if (modify_rq) { |
2539 | err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param); | 2591 | err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param); |
2540 | if (err) | 2592 | if (err) |
2541 | return err; | 2593 | return err; |
2542 | } | 2594 | } |
2543 | 2595 | ||
2544 | if (qp->sq.wqe_cnt) { | 2596 | if (modify_sq) { |
2545 | if (tx_affinity) { | 2597 | if (tx_affinity) { |
2546 | err = modify_raw_packet_tx_affinity(dev->mdev, sq, | 2598 | err = modify_raw_packet_tx_affinity(dev->mdev, sq, |
2547 | tx_affinity); | 2599 | tx_affinity); |
@@ -2549,7 +2601,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
2549 | return err; | 2601 | return err; |
2550 | } | 2602 | } |
2551 | 2603 | ||
2552 | return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state); | 2604 | return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, raw_qp_param); |
2553 | } | 2605 | } |
2554 | 2606 | ||
2555 | return 0; | 2607 | return 0; |
@@ -2804,6 +2856,12 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
2804 | raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id; | 2856 | raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id; |
2805 | raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID; | 2857 | raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID; |
2806 | } | 2858 | } |
2859 | |||
2860 | if (attr_mask & IB_QP_RATE_LIMIT) { | ||
2861 | raw_qp_param.rate_limit = attr->rate_limit; | ||
2862 | raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT; | ||
2863 | } | ||
2864 | |||
2807 | err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity); | 2865 | err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity); |
2808 | } else { | 2866 | } else { |
2809 | err = mlx5_core_qp_modify(dev->mdev, op, optpar, context, | 2867 | err = mlx5_core_qp_modify(dev->mdev, op, optpar, context, |