diff options
author | Jesper Dangaard Brouer <brouer@redhat.com> | 2018-05-24 10:45:57 -0400 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2018-05-24 21:36:15 -0400 |
commit | 38edddb81172e8b8decb057c0cd23271583a5fa0 (patch) | |
tree | 444ebbb5e686501073401c5836712b819ef03e10 /kernel/bpf/devmap.c | |
parent | 5d053f9da4311a86bc58be8588bb5660fb3f0724 (diff) |
xdp: add tracepoint for devmap like cpumap have
Notice how this allow us get XDP statistic without affecting the XDP
performance, as tracepoint is no-longer activated on a per packet basis.
V5: Spotted by John Fastabend.
Fix 'sent' also counted 'drops' in this patch, a later patch corrected
this, but it was a mistake in this intermediate step.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/devmap.c')
-rw-r--r-- | kernel/bpf/devmap.c | 27 |
1 files changed, 23 insertions, 4 deletions
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 15293b9dfb77..ff2f3bf59f2f 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #define DEV_MAP_BULK_SIZE 16 | 58 | #define DEV_MAP_BULK_SIZE 16 |
59 | struct xdp_bulk_queue { | 59 | struct xdp_bulk_queue { |
60 | struct xdp_frame *q[DEV_MAP_BULK_SIZE]; | 60 | struct xdp_frame *q[DEV_MAP_BULK_SIZE]; |
61 | struct net_device *dev_rx; | ||
61 | unsigned int count; | 62 | unsigned int count; |
62 | }; | 63 | }; |
63 | 64 | ||
@@ -219,6 +220,7 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj, | |||
219 | struct xdp_bulk_queue *bq) | 220 | struct xdp_bulk_queue *bq) |
220 | { | 221 | { |
221 | struct net_device *dev = obj->dev; | 222 | struct net_device *dev = obj->dev; |
223 | int sent = 0, drops = 0; | ||
222 | int i; | 224 | int i; |
223 | 225 | ||
224 | if (unlikely(!bq->count)) | 226 | if (unlikely(!bq->count)) |
@@ -235,11 +237,18 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj, | |||
235 | int err; | 237 | int err; |
236 | 238 | ||
237 | err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf); | 239 | err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf); |
238 | if (err) | 240 | if (err) { |
241 | drops++; | ||
239 | xdp_return_frame(xdpf); | 242 | xdp_return_frame(xdpf); |
243 | } else { | ||
244 | sent++; | ||
245 | } | ||
240 | } | 246 | } |
241 | bq->count = 0; | 247 | bq->count = 0; |
242 | 248 | ||
249 | trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit, | ||
250 | sent, drops, bq->dev_rx, dev); | ||
251 | bq->dev_rx = NULL; | ||
243 | return 0; | 252 | return 0; |
244 | } | 253 | } |
245 | 254 | ||
@@ -296,18 +305,28 @@ struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) | |||
296 | /* Runs under RCU-read-side, plus in softirq under NAPI protection. | 305 | /* Runs under RCU-read-side, plus in softirq under NAPI protection. |
297 | * Thus, safe percpu variable access. | 306 | * Thus, safe percpu variable access. |
298 | */ | 307 | */ |
299 | static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf) | 308 | static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf, |
309 | struct net_device *dev_rx) | ||
310 | |||
300 | { | 311 | { |
301 | struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); | 312 | struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); |
302 | 313 | ||
303 | if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) | 314 | if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) |
304 | bq_xmit_all(obj, bq); | 315 | bq_xmit_all(obj, bq); |
305 | 316 | ||
317 | /* Ingress dev_rx will be the same for all xdp_frame's in | ||
318 | * bulk_queue, because bq stored per-CPU and must be flushed | ||
319 | * from net_device drivers NAPI func end. | ||
320 | */ | ||
321 | if (!bq->dev_rx) | ||
322 | bq->dev_rx = dev_rx; | ||
323 | |||
306 | bq->q[bq->count++] = xdpf; | 324 | bq->q[bq->count++] = xdpf; |
307 | return 0; | 325 | return 0; |
308 | } | 326 | } |
309 | 327 | ||
310 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp) | 328 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, |
329 | struct net_device *dev_rx) | ||
311 | { | 330 | { |
312 | struct net_device *dev = dst->dev; | 331 | struct net_device *dev = dst->dev; |
313 | struct xdp_frame *xdpf; | 332 | struct xdp_frame *xdpf; |
@@ -319,7 +338,7 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp) | |||
319 | if (unlikely(!xdpf)) | 338 | if (unlikely(!xdpf)) |
320 | return -EOVERFLOW; | 339 | return -EOVERFLOW; |
321 | 340 | ||
322 | return bq_enqueue(dst, xdpf); | 341 | return bq_enqueue(dst, xdpf, dev_rx); |
323 | } | 342 | } |
324 | 343 | ||
325 | static void *dev_map_lookup_elem(struct bpf_map *map, void *key) | 344 | static void *dev_map_lookup_elem(struct bpf_map *map, void *key) |