aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorInbar Karmy <inbark@mellanox.com>2017-10-15 10:30:59 -0400
committerSaeed Mahameed <saeedm@mellanox.com>2017-11-10 01:39:21 -0500
commit2e50b2619538ea0224c037f6fa746023089e0654 (patch)
tree17bd1896764b82285889c2c0efca9df0dc24efad
parent2a8d6065e7b90ad9d5540650944d802b0f86bdfe (diff)
net/mlx5e: Set page to null in case dma mapping fails
Currently, when dma mapping fails, put_page is called, but the page is not set to null. Later, in the page_reuse treatment in mlx5e_free_rx_descs(), mlx5e_page_release() is called for the second time, improperly doing dma_unmap (for a non-mapped address) and an extra put_page. Prevent this by nullifying the page pointer when dma_map fails. Fixes: accd58833237 ("net/mlx5e: Introduce RX Page-Reuse") Signed-off-by: Inbar Karmy <inbark@mellanox.com> Reviewed-by: Tariq Toukan <tariqt@mellanox.com> Cc: kernel-team@fb.com Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 15a1687483cc..91b1b0938931 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -215,22 +215,20 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
215static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, 215static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
216 struct mlx5e_dma_info *dma_info) 216 struct mlx5e_dma_info *dma_info)
217{ 217{
218 struct page *page;
219
220 if (mlx5e_rx_cache_get(rq, dma_info)) 218 if (mlx5e_rx_cache_get(rq, dma_info))
221 return 0; 219 return 0;
222 220
223 page = dev_alloc_pages(rq->buff.page_order); 221 dma_info->page = dev_alloc_pages(rq->buff.page_order);
224 if (unlikely(!page)) 222 if (unlikely(!dma_info->page))
225 return -ENOMEM; 223 return -ENOMEM;
226 224
227 dma_info->addr = dma_map_page(rq->pdev, page, 0, 225 dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
228 RQ_PAGE_SIZE(rq), rq->buff.map_dir); 226 RQ_PAGE_SIZE(rq), rq->buff.map_dir);
229 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { 227 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
230 put_page(page); 228 put_page(dma_info->page);
229 dma_info->page = NULL;
231 return -ENOMEM; 230 return -ENOMEM;
232 } 231 }
233 dma_info->page = page;
234 232
235 return 0; 233 return 0;
236} 234}