diff options
author | Tariq Toukan <tariqt@mellanox.com> | 2017-07-18 05:07:06 -0400 |
---|---|---|
committer | Saeed Mahameed <saeedm@mellanox.com> | 2018-03-30 19:16:17 -0400 |
commit | 18187fb2c3c138630f6b7c7b7ba7ab41ccd95129 (patch) | |
tree | 4b9fd2b665e6ae60c83db1c97681d98d55ba3a68 | |
parent | 73281b78a37a1a3f392fd5b6116d04e597484529 (diff) |
net/mlx5e: Code movements in RX UMR WQE post
Gets the process of a UMR WQE post in one function,
in preparation for a downstream patch that inlines
the WQE data.
No functional change here.
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 107 |
1 files changed, 45 insertions, 62 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index c0d528f2131b..8aa94d3cff59 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
@@ -347,39 +347,44 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev, | |||
347 | } | 347 | } |
348 | } | 348 | } |
349 | 349 | ||
350 | static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) | 350 | void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) |
351 | { | 351 | { |
352 | struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; | 352 | int pg_strides = mlx5e_mpwqe_strides_per_page(rq); |
353 | struct mlx5e_icosq *sq = &rq->channel->icosq; | 353 | struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; |
354 | struct mlx5_wq_cyc *wq = &sq->wq; | 354 | int i; |
355 | struct mlx5e_umr_wqe *wqe; | ||
356 | u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB); | ||
357 | u16 pi; | ||
358 | 355 | ||
359 | /* fill sq edge with nops to avoid wqe wrap around */ | 356 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { |
360 | while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { | 357 | page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]); |
361 | sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; | 358 | mlx5e_page_release(rq, dma_info, true); |
362 | mlx5e_post_nop(wq, sq->sqn, &sq->pc); | ||
363 | } | 359 | } |
360 | } | ||
364 | 361 | ||
365 | wqe = mlx5_wq_cyc_get_wqe(wq, pi); | 362 | static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) |
366 | memcpy(wqe, &wi->umr.wqe, sizeof(*wqe)); | 363 | { |
367 | wqe->ctrl.opmod_idx_opcode = | 364 | struct mlx5_wq_ll *wq = &rq->wq; |
368 | cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | | 365 | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); |
369 | MLX5_OPCODE_UMR); | ||
370 | 366 | ||
371 | sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; | 367 | rq->mpwqe.umr_in_progress = false; |
372 | sq->pc += num_wqebbs; | 368 | |
373 | mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); | 369 | mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); |
370 | |||
371 | /* ensure wqes are visible to device before updating doorbell record */ | ||
372 | dma_wmb(); | ||
373 | |||
374 | mlx5_wq_ll_update_db_record(wq); | ||
374 | } | 375 | } |
375 | 376 | ||
376 | static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, | 377 | static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) |
377 | u16 ix) | ||
378 | { | 378 | { |
379 | struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; | 379 | struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; |
380 | int pg_strides = mlx5e_mpwqe_strides_per_page(rq); | 380 | int pg_strides = mlx5e_mpwqe_strides_per_page(rq); |
381 | struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; | 381 | struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; |
382 | struct mlx5e_icosq *sq = &rq->channel->icosq; | ||
383 | struct mlx5_wq_cyc *wq = &sq->wq; | ||
384 | struct mlx5e_umr_wqe *wqe; | ||
385 | u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB); | ||
382 | int err; | 386 | int err; |
387 | u16 pi; | ||
383 | int i; | 388 | int i; |
384 | 389 | ||
385 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { | 390 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { |
@@ -393,6 +398,24 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, | |||
393 | memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE); | 398 | memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE); |
394 | wi->consumed_strides = 0; | 399 | wi->consumed_strides = 0; |
395 | 400 | ||
401 | rq->mpwqe.umr_in_progress = true; | ||
402 | |||
403 | /* fill sq edge with nops to avoid wqe wrap around */ | ||
404 | while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { | ||
405 | sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; | ||
406 | mlx5e_post_nop(wq, sq->sqn, &sq->pc); | ||
407 | } | ||
408 | |||
409 | wqe = mlx5_wq_cyc_get_wqe(wq, pi); | ||
410 | memcpy(wqe, &wi->umr.wqe, sizeof(*wqe)); | ||
411 | wqe->ctrl.opmod_idx_opcode = | ||
412 | cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | | ||
413 | MLX5_OPCODE_UMR); | ||
414 | |||
415 | sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; | ||
416 | sq->pc += num_wqebbs; | ||
417 | mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); | ||
418 | |||
396 | return 0; | 419 | return 0; |
397 | 420 | ||
398 | err_unmap: | 421 | err_unmap: |
@@ -401,51 +424,11 @@ err_unmap: | |||
401 | page_ref_sub(dma_info->page, pg_strides); | 424 | page_ref_sub(dma_info->page, pg_strides); |
402 | mlx5e_page_release(rq, dma_info, true); | 425 | mlx5e_page_release(rq, dma_info, true); |
403 | } | 426 | } |
427 | rq->stats.buff_alloc_err++; | ||
404 | 428 | ||
405 | return err; | 429 | return err; |
406 | } | 430 | } |
407 | 431 | ||
408 | void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) | ||
409 | { | ||
410 | int pg_strides = mlx5e_mpwqe_strides_per_page(rq); | ||
411 | struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; | ||
412 | int i; | ||
413 | |||
414 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { | ||
415 | page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]); | ||
416 | mlx5e_page_release(rq, dma_info, true); | ||
417 | } | ||
418 | } | ||
419 | |||
420 | static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) | ||
421 | { | ||
422 | struct mlx5_wq_ll *wq = &rq->wq; | ||
423 | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); | ||
424 | |||
425 | rq->mpwqe.umr_in_progress = false; | ||
426 | |||
427 | mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); | ||
428 | |||
429 | /* ensure wqes are visible to device before updating doorbell record */ | ||
430 | dma_wmb(); | ||
431 | |||
432 | mlx5_wq_ll_update_db_record(wq); | ||
433 | } | ||
434 | |||
435 | static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) | ||
436 | { | ||
437 | int err; | ||
438 | |||
439 | err = mlx5e_alloc_rx_umr_mpwqe(rq, ix); | ||
440 | if (unlikely(err)) { | ||
441 | rq->stats.buff_alloc_err++; | ||
442 | return err; | ||
443 | } | ||
444 | rq->mpwqe.umr_in_progress = true; | ||
445 | mlx5e_post_umr_wqe(rq, ix); | ||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) | 432 | void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) |
450 | { | 433 | { |
451 | struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; | 434 | struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; |