diff options
-rw-r--r-- | include/net/page_pool.h | 5 | ||||
-rw-r--r-- | include/net/xdp.h | 1 | ||||
-rw-r--r-- | kernel/bpf/cpumap.c | 2 | ||||
-rw-r--r-- | kernel/bpf/devmap.c | 2 | ||||
-rw-r--r-- | net/core/xdp.c | 20 |
5 files changed, 22 insertions, 8 deletions
diff --git a/include/net/page_pool.h b/include/net/page_pool.h index c79087153148..694d055e01ef 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h | |||
@@ -115,13 +115,14 @@ void page_pool_destroy(struct page_pool *pool); | |||
115 | void __page_pool_put_page(struct page_pool *pool, | 115 | void __page_pool_put_page(struct page_pool *pool, |
116 | struct page *page, bool allow_direct); | 116 | struct page *page, bool allow_direct); |
117 | 117 | ||
118 | static inline void page_pool_put_page(struct page_pool *pool, struct page *page) | 118 | static inline void page_pool_put_page(struct page_pool *pool, |
119 | struct page *page, bool allow_direct) | ||
119 | { | 120 | { |
120 | /* When page_pool isn't compiled-in, net/core/xdp.c doesn't | 121 | /* When page_pool isn't compiled-in, net/core/xdp.c doesn't |
121 | * allow registering MEM_TYPE_PAGE_POOL, but shield linker. | 122 | * allow registering MEM_TYPE_PAGE_POOL, but shield linker. |
122 | */ | 123 | */ |
123 | #ifdef CONFIG_PAGE_POOL | 124 | #ifdef CONFIG_PAGE_POOL |
124 | __page_pool_put_page(pool, page, false); | 125 | __page_pool_put_page(pool, page, allow_direct); |
125 | #endif | 126 | #endif |
126 | } | 127 | } |
127 | /* Very limited use-cases allow recycle direct */ | 128 | /* Very limited use-cases allow recycle direct */ |
diff --git a/include/net/xdp.h b/include/net/xdp.h index 0b689cf561c7..7ad779237ae8 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h | |||
@@ -104,6 +104,7 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | void xdp_return_frame(struct xdp_frame *xdpf); | 106 | void xdp_return_frame(struct xdp_frame *xdpf); |
107 | void xdp_return_frame_rx_napi(struct xdp_frame *xdpf); | ||
107 | void xdp_return_buff(struct xdp_buff *xdp); | 108 | void xdp_return_buff(struct xdp_buff *xdp); |
108 | 109 | ||
109 | int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, | 110 | int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, |
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index c95b04ec103e..e0918d180f08 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c | |||
@@ -578,7 +578,7 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, | |||
578 | err = __ptr_ring_produce(q, xdpf); | 578 | err = __ptr_ring_produce(q, xdpf); |
579 | if (err) { | 579 | if (err) { |
580 | drops++; | 580 | drops++; |
581 | xdp_return_frame(xdpf); | 581 | xdp_return_frame_rx_napi(xdpf); |
582 | } | 582 | } |
583 | processed++; | 583 | processed++; |
584 | } | 584 | } |
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index ff2f3bf59f2f..a9cd5c93dd2b 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c | |||
@@ -239,7 +239,7 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj, | |||
239 | err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf); | 239 | err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf); |
240 | if (err) { | 240 | if (err) { |
241 | drops++; | 241 | drops++; |
242 | xdp_return_frame(xdpf); | 242 | xdp_return_frame_rx_napi(xdpf); |
243 | } else { | 243 | } else { |
244 | sent++; | 244 | sent++; |
245 | } | 245 | } |
diff --git a/net/core/xdp.c b/net/core/xdp.c index bf6758f74339..cb8c4e061a5a 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c | |||
@@ -308,7 +308,13 @@ err: | |||
308 | } | 308 | } |
309 | EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); | 309 | EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); |
310 | 310 | ||
311 | static void xdp_return(void *data, struct xdp_mem_info *mem) | 311 | /* XDP RX runs under NAPI protection, and in different delivery error |
312 | * scenarios (e.g. queue full), it is possible to return the xdp_frame | ||
313 | * while still leveraging this protection. The @napi_direct boolian | ||
314 | * is used for those calls sites. Thus, allowing for faster recycling | ||
315 | * of xdp_frames/pages in those cases. | ||
316 | */ | ||
317 | static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct) | ||
312 | { | 318 | { |
313 | struct xdp_mem_allocator *xa; | 319 | struct xdp_mem_allocator *xa; |
314 | struct page *page; | 320 | struct page *page; |
@@ -320,7 +326,7 @@ static void xdp_return(void *data, struct xdp_mem_info *mem) | |||
320 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); | 326 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); |
321 | page = virt_to_head_page(data); | 327 | page = virt_to_head_page(data); |
322 | if (xa) | 328 | if (xa) |
323 | page_pool_put_page(xa->page_pool, page); | 329 | page_pool_put_page(xa->page_pool, page, napi_direct); |
324 | else | 330 | else |
325 | put_page(page); | 331 | put_page(page); |
326 | rcu_read_unlock(); | 332 | rcu_read_unlock(); |
@@ -340,12 +346,18 @@ static void xdp_return(void *data, struct xdp_mem_info *mem) | |||
340 | 346 | ||
341 | void xdp_return_frame(struct xdp_frame *xdpf) | 347 | void xdp_return_frame(struct xdp_frame *xdpf) |
342 | { | 348 | { |
343 | xdp_return(xdpf->data, &xdpf->mem); | 349 | __xdp_return(xdpf->data, &xdpf->mem, false); |
344 | } | 350 | } |
345 | EXPORT_SYMBOL_GPL(xdp_return_frame); | 351 | EXPORT_SYMBOL_GPL(xdp_return_frame); |
346 | 352 | ||
353 | void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) | ||
354 | { | ||
355 | __xdp_return(xdpf->data, &xdpf->mem, true); | ||
356 | } | ||
357 | EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); | ||
358 | |||
347 | void xdp_return_buff(struct xdp_buff *xdp) | 359 | void xdp_return_buff(struct xdp_buff *xdp) |
348 | { | 360 | { |
349 | xdp_return(xdp->data, &xdp->rxq->mem); | 361 | __xdp_return(xdp->data, &xdp->rxq->mem, true); |
350 | } | 362 | } |
351 | EXPORT_SYMBOL_GPL(xdp_return_buff); | 363 | EXPORT_SYMBOL_GPL(xdp_return_buff); |