aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSaeed Mahameed <saeedm@mellanox.com>2018-07-15 06:54:39 -0400
committerDavid S. Miller <davem@davemloft.net>2018-07-16 17:05:25 -0400
commit432e629e56432064761be63bcd5e263c0920430d (patch)
tree240179174a971852df78f7c74080ab9233b4b1e2
parentc133459765fae249ba482f62e12f987aec4376f0 (diff)
net/mlx4_en: Don't reuse RX page when XDP is set
When a new rx packet arrives, the rx path will decide whether to reuse the remainder of the page or not according to one of the below conditions: 1. frag_info->frag_stride == PAGE_SIZE / 2 2. frags->page_offset + frag_info->frag_size > PAGE_SIZE; The first condition is no met for when XDP is set. For XDP, page_offset is always set to priv->rx_headroom which is XDP_PACKET_HEADROOM and frag_info->frag_size is around mtu size + some padding, still the 2nd release condition will hold since XDP_PACKET_HEADROOM + 1536 < PAGE_SIZE, as a result the page will not be released and will be _wrongly_ reused for next free rx descriptor. In XDP there is an assumption to have a page per packet and reuse can break such assumption and might cause packet data corruptions. Fix this by adding an extra condition (!priv->rx_headroom) to the 2nd case to avoid page reuse when XDP is set, since rx_headroom is set to 0 for non XDP setup and set to XDP_PACKET_HEADROOM for XDP setup. No additional cache line is required for the new condition. Fixes: 34db548bfb95 ("mlx4: add page recycling in receive path") Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Suggested-by: Martin KaFai Lau <kafai@fb.com> CC: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9f54ccbddea7..3360f7b9ee73 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -474,10 +474,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
474{ 474{
475 const struct mlx4_en_frag_info *frag_info = priv->frag_info; 475 const struct mlx4_en_frag_info *frag_info = priv->frag_info;
476 unsigned int truesize = 0; 476 unsigned int truesize = 0;
477 bool release = true;
477 int nr, frag_size; 478 int nr, frag_size;
478 struct page *page; 479 struct page *page;
479 dma_addr_t dma; 480 dma_addr_t dma;
480 bool release;
481 481
482 /* Collect used fragments while replacing them in the HW descriptors */ 482 /* Collect used fragments while replacing them in the HW descriptors */
483 for (nr = 0;; frags++) { 483 for (nr = 0;; frags++) {
@@ -500,7 +500,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
500 release = page_count(page) != 1 || 500 release = page_count(page) != 1 ||
501 page_is_pfmemalloc(page) || 501 page_is_pfmemalloc(page) ||
502 page_to_nid(page) != numa_mem_id(); 502 page_to_nid(page) != numa_mem_id();
503 } else { 503 } else if (!priv->rx_headroom) {
504 /* rx_headroom for non XDP setup is always 0.
505 * When XDP is set, the above condition will
506 * guarantee page is always released.
507 */
504 u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); 508 u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
505 509
506 frags->page_offset += sz_align; 510 frags->page_offset += sz_align;