diff options
author | Björn Töpel <bjorn.topel@intel.com> | 2018-06-04 08:05:54 -0400 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2018-06-05 09:46:26 -0400 |
commit | 02b55e5657c3a569fc681ba851e464cfa6b90d4f (patch) | |
tree | 6d7665f8ddb9640c9aade3e3cbf3cb5b1e099c60 | |
parent | 74515c5750f30244a901c3c0c82a2fe534b3c9c5 (diff) |
xdp: add MEM_TYPE_ZERO_COPY
Here, a new type of allocator support is added to the XDP return
API. A zero-copy allocated xdp_buff cannot be converted to an
xdp_frame. Instead is the buff has to be copied. This is not supported
at all in this commit.
Also, an opaque "handle" is added to xdp_buff. This can be used as a
context for the zero-copy allocator implementation.
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r-- | include/net/xdp.h | 10 | ||||
-rw-r--r-- | net/core/xdp.c | 19 |
2 files changed, 24 insertions, 5 deletions
diff --git a/include/net/xdp.h b/include/net/xdp.h index a3b71a4dd71d..2deea7166a34 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h | |||
@@ -37,6 +37,7 @@ enum xdp_mem_type { | |||
37 | MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */ | 37 | MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */ |
38 | MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ | 38 | MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ |
39 | MEM_TYPE_PAGE_POOL, | 39 | MEM_TYPE_PAGE_POOL, |
40 | MEM_TYPE_ZERO_COPY, | ||
40 | MEM_TYPE_MAX, | 41 | MEM_TYPE_MAX, |
41 | }; | 42 | }; |
42 | 43 | ||
@@ -51,6 +52,10 @@ struct xdp_mem_info { | |||
51 | 52 | ||
52 | struct page_pool; | 53 | struct page_pool; |
53 | 54 | ||
55 | struct zero_copy_allocator { | ||
56 | void (*free)(struct zero_copy_allocator *zca, unsigned long handle); | ||
57 | }; | ||
58 | |||
54 | struct xdp_rxq_info { | 59 | struct xdp_rxq_info { |
55 | struct net_device *dev; | 60 | struct net_device *dev; |
56 | u32 queue_index; | 61 | u32 queue_index; |
@@ -63,6 +68,7 @@ struct xdp_buff { | |||
63 | void *data_end; | 68 | void *data_end; |
64 | void *data_meta; | 69 | void *data_meta; |
65 | void *data_hard_start; | 70 | void *data_hard_start; |
71 | unsigned long handle; | ||
66 | struct xdp_rxq_info *rxq; | 72 | struct xdp_rxq_info *rxq; |
67 | }; | 73 | }; |
68 | 74 | ||
@@ -86,6 +92,10 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) | |||
86 | int metasize; | 92 | int metasize; |
87 | int headroom; | 93 | int headroom; |
88 | 94 | ||
95 | /* TODO: implement clone, copy, use "native" MEM_TYPE */ | ||
96 | if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) | ||
97 | return NULL; | ||
98 | |||
89 | /* Assure headroom is available for storing info */ | 99 | /* Assure headroom is available for storing info */ |
90 | headroom = xdp->data - xdp->data_hard_start; | 100 | headroom = xdp->data - xdp->data_hard_start; |
91 | metasize = xdp->data - xdp->data_meta; | 101 | metasize = xdp->data - xdp->data_meta; |
diff --git a/net/core/xdp.c b/net/core/xdp.c index cb8c4e061a5a..9d1f22072d5d 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c | |||
@@ -31,6 +31,7 @@ struct xdp_mem_allocator { | |||
31 | union { | 31 | union { |
32 | void *allocator; | 32 | void *allocator; |
33 | struct page_pool *page_pool; | 33 | struct page_pool *page_pool; |
34 | struct zero_copy_allocator *zc_alloc; | ||
34 | }; | 35 | }; |
35 | struct rhash_head node; | 36 | struct rhash_head node; |
36 | struct rcu_head rcu; | 37 | struct rcu_head rcu; |
@@ -261,7 +262,7 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, | |||
261 | xdp_rxq->mem.type = type; | 262 | xdp_rxq->mem.type = type; |
262 | 263 | ||
263 | if (!allocator) { | 264 | if (!allocator) { |
264 | if (type == MEM_TYPE_PAGE_POOL) | 265 | if (type == MEM_TYPE_PAGE_POOL || type == MEM_TYPE_ZERO_COPY) |
265 | return -EINVAL; /* Setup time check page_pool req */ | 266 | return -EINVAL; /* Setup time check page_pool req */ |
266 | return 0; | 267 | return 0; |
267 | } | 268 | } |
@@ -314,7 +315,8 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); | |||
314 | * is used for those calls sites. Thus, allowing for faster recycling | 315 | * is used for those calls sites. Thus, allowing for faster recycling |
315 | * of xdp_frames/pages in those cases. | 316 | * of xdp_frames/pages in those cases. |
316 | */ | 317 | */ |
317 | static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct) | 318 | static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, |
319 | unsigned long handle) | ||
318 | { | 320 | { |
319 | struct xdp_mem_allocator *xa; | 321 | struct xdp_mem_allocator *xa; |
320 | struct page *page; | 322 | struct page *page; |
@@ -338,6 +340,13 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct) | |||
338 | page = virt_to_page(data); /* Assumes order0 page*/ | 340 | page = virt_to_page(data); /* Assumes order0 page*/ |
339 | put_page(page); | 341 | put_page(page); |
340 | break; | 342 | break; |
343 | case MEM_TYPE_ZERO_COPY: | ||
344 | /* NB! Only valid from an xdp_buff! */ | ||
345 | rcu_read_lock(); | ||
346 | /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ | ||
347 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); | ||
348 | xa->zc_alloc->free(xa->zc_alloc, handle); | ||
349 | rcu_read_unlock(); | ||
341 | default: | 350 | default: |
342 | /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ | 351 | /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ |
343 | break; | 352 | break; |
@@ -346,18 +355,18 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct) | |||
346 | 355 | ||
347 | void xdp_return_frame(struct xdp_frame *xdpf) | 356 | void xdp_return_frame(struct xdp_frame *xdpf) |
348 | { | 357 | { |
349 | __xdp_return(xdpf->data, &xdpf->mem, false); | 358 | __xdp_return(xdpf->data, &xdpf->mem, false, 0); |
350 | } | 359 | } |
351 | EXPORT_SYMBOL_GPL(xdp_return_frame); | 360 | EXPORT_SYMBOL_GPL(xdp_return_frame); |
352 | 361 | ||
353 | void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) | 362 | void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) |
354 | { | 363 | { |
355 | __xdp_return(xdpf->data, &xdpf->mem, true); | 364 | __xdp_return(xdpf->data, &xdpf->mem, true, 0); |
356 | } | 365 | } |
357 | EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); | 366 | EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); |
358 | 367 | ||
359 | void xdp_return_buff(struct xdp_buff *xdp) | 368 | void xdp_return_buff(struct xdp_buff *xdp) |
360 | { | 369 | { |
361 | __xdp_return(xdp->data, &xdp->rxq->mem, true); | 370 | __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle); |
362 | } | 371 | } |
363 | EXPORT_SYMBOL_GPL(xdp_return_buff); | 372 | EXPORT_SYMBOL_GPL(xdp_return_buff); |