diff options
author | Tariq Toukan <tariqt@mellanox.com> | 2018-01-04 06:09:15 -0500 |
---|---|---|
committer | Saeed Mahameed <saeedm@mellanox.com> | 2018-03-30 19:55:07 -0400 |
commit | ab966d7e4ff988a48b3ad72e7abf903aa840afd1 (patch) | |
tree | 0dc36aa0c86c03b0588db5df2e0a4e789c9ad529 | |
parent | b8a98a4cf3221d8140969e3f5bde09206a6cb623 (diff) |
net/mlx5e: RX, Recycle buffer of UMR WQEs
Upon a new UMR post, check if the WQE buffer contains
a previous UMR WQE. If so, modify the dynamic fields
instead of a whole WQE overwrite. This saves a memcpy.
In current setting, after 2 WQ cycles (12 UMR posts),
this will always be the case.
No degradation sensed.
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 11 |
1 files changed, 9 insertions, 2 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 682f9ff9da34..176645762e49 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
@@ -365,6 +365,11 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) | |||
365 | mlx5_wq_ll_update_db_record(wq); | 365 | mlx5_wq_ll_update_db_record(wq); |
366 | } | 366 | } |
367 | 367 | ||
368 | static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq) | ||
369 | { | ||
370 | return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; | ||
371 | } | ||
372 | |||
368 | static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) | 373 | static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) |
369 | { | 374 | { |
370 | struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; | 375 | struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; |
@@ -372,7 +377,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) | |||
372 | struct mlx5e_icosq *sq = &rq->channel->icosq; | 377 | struct mlx5e_icosq *sq = &rq->channel->icosq; |
373 | struct mlx5_wq_cyc *wq = &sq->wq; | 378 | struct mlx5_wq_cyc *wq = &sq->wq; |
374 | struct mlx5e_umr_wqe *umr_wqe; | 379 | struct mlx5e_umr_wqe *umr_wqe; |
375 | int cpy = offsetof(struct mlx5e_umr_wqe, inline_mtts); | ||
376 | u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); | 380 | u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); |
377 | int err; | 381 | int err; |
378 | u16 pi; | 382 | u16 pi; |
@@ -385,7 +389,10 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) | |||
385 | } | 389 | } |
386 | 390 | ||
387 | umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); | 391 | umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); |
388 | memcpy(umr_wqe, &rq->mpwqe.umr_wqe, cpy); | 392 | if (unlikely(mlx5e_icosq_wrap_cnt(sq) < 2)) |
393 | memcpy(umr_wqe, &rq->mpwqe.umr_wqe, | ||
394 | offsetof(struct mlx5e_umr_wqe, inline_mtts)); | ||
395 | |||
389 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { | 396 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { |
390 | err = mlx5e_page_alloc_mapped(rq, dma_info); | 397 | err = mlx5e_page_alloc_mapped(rq, dma_info); |
391 | if (unlikely(err)) | 398 | if (unlikely(err)) |