aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSaeed Mahameed <saeedm@mellanox.com>2016-08-28 18:13:43 -0400
committerDavid S. Miller <davem@davemloft.net>2016-08-28 23:24:15 -0400
commitf2fde18c52a7367a8f6cf6855e2a7174e601c8ee (patch)
treef31bf0d641f5759573ad42b13e24c424a0a4faed
parentfe4c988bdd1cc60402a4e3ca3976a686ea991b5a (diff)
net/mlx5e: Don't wait for RQ completions on close
This will significantly reduce receive queue flush time on interface down. Instead of asking the firmware to flush the RQ (Receive Queue) via asynchronous completions when moved to error, we handle RQ flush manually (mlx5e_free_rx_descs) same as we did when RQ flush got timed out. This will reduce RQs flush time and speedup interface down procedure (ifconfig down) from 6 sec to 0.3 sec on a 48 cores system. Moved mlx5e_free_rx_descs en_main.c where it is needed, to keep en_rx.c free form non critical data path code for better code locality. Fixes: 6cd392a082de ('net/mlx5e: Handle RQ flush in error cases') Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c23
3 files changed, 22 insertions, 42 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index d63a1b8f9c13..26a7ec7073f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -223,9 +223,8 @@ struct mlx5e_tstamp {
223}; 223};
224 224
225enum { 225enum {
226 MLX5E_RQ_STATE_POST_WQES_ENABLE, 226 MLX5E_RQ_STATE_FLUSH,
227 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, 227 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
228 MLX5E_RQ_STATE_FLUSH_TIMEOUT,
229 MLX5E_RQ_STATE_AM, 228 MLX5E_RQ_STATE_AM,
230}; 229};
231 230
@@ -703,7 +702,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
703bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); 702bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
704int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); 703int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
705void mlx5e_free_tx_descs(struct mlx5e_sq *sq); 704void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
706void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
707 705
708void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 706void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
709void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 707void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 65360b1f6ee3..2463eba75125 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -431,7 +431,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
431 431
432 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); 432 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
433 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 433 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
434 MLX5_SET(rqc, rqc, flush_in_error_en, 1);
435 MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable); 434 MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
436 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - 435 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
437 MLX5_ADAPTER_PAGE_SHIFT); 436 MLX5_ADAPTER_PAGE_SHIFT);
@@ -528,6 +527,23 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
528 return -ETIMEDOUT; 527 return -ETIMEDOUT;
529} 528}
530 529
530static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
531{
532 struct mlx5_wq_ll *wq = &rq->wq;
533 struct mlx5e_rx_wqe *wqe;
534 __be16 wqe_ix_be;
535 u16 wqe_ix;
536
537 while (!mlx5_wq_ll_is_empty(wq)) {
538 wqe_ix_be = *wq->tail_next;
539 wqe_ix = be16_to_cpu(wqe_ix_be);
540 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
541 rq->dealloc_wqe(rq, wqe_ix);
542 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
543 &wqe->next.next_wqe_index);
544 }
545}
546
531static int mlx5e_open_rq(struct mlx5e_channel *c, 547static int mlx5e_open_rq(struct mlx5e_channel *c,
532 struct mlx5e_rq_param *param, 548 struct mlx5e_rq_param *param,
533 struct mlx5e_rq *rq) 549 struct mlx5e_rq *rq)
@@ -551,8 +567,6 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
551 if (param->am_enabled) 567 if (param->am_enabled)
552 set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); 568 set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
553 569
554 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
555
556 sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; 570 sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
557 sq->ico_wqe_info[pi].num_wqebbs = 1; 571 sq->ico_wqe_info[pi].num_wqebbs = 1;
558 mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */ 572 mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
@@ -569,23 +583,8 @@ err_destroy_rq:
569 583
570static void mlx5e_close_rq(struct mlx5e_rq *rq) 584static void mlx5e_close_rq(struct mlx5e_rq *rq)
571{ 585{
572 int tout = 0; 586 set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
573 int err;
574
575 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
576 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ 587 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
577
578 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
579 while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
580 tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
581 msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
582
583 if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
584 set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
585
586 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
587 napi_synchronize(&rq->channel->napi);
588
589 cancel_work_sync(&rq->am.work); 588 cancel_work_sync(&rq->am.work);
590 589
591 mlx5e_disable_rq(rq); 590 mlx5e_disable_rq(rq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index bdc9e33a06e4..fee1e47769a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -595,26 +595,9 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
595 wi->free_wqe(rq, wi); 595 wi->free_wqe(rq, wi);
596} 596}
597 597
598void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
599{
600 struct mlx5_wq_ll *wq = &rq->wq;
601 struct mlx5e_rx_wqe *wqe;
602 __be16 wqe_ix_be;
603 u16 wqe_ix;
604
605 while (!mlx5_wq_ll_is_empty(wq)) {
606 wqe_ix_be = *wq->tail_next;
607 wqe_ix = be16_to_cpu(wqe_ix_be);
608 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
609 rq->dealloc_wqe(rq, wqe_ix);
610 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
611 &wqe->next.next_wqe_index);
612 }
613}
614
615#define RQ_CANNOT_POST(rq) \ 598#define RQ_CANNOT_POST(rq) \
616 (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ 599 (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
617 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) 600 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
618 601
619bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) 602bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
620{ 603{
@@ -916,7 +899,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
916 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); 899 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
917 int work_done = 0; 900 int work_done = 0;
918 901
919 if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state))) 902 if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
920 return 0; 903 return 0;
921 904
922 if (cq->decmprs_left) 905 if (cq->decmprs_left)