diff options
author | Jesper Dangaard Brouer <brouer@redhat.com> | 2018-05-31 05:00:23 -0400 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2018-06-03 11:11:35 -0400 |
commit | c1ece6b245bd12a57124da78abafbf8a511394d6 (patch) | |
tree | 98d1d3475a3b6ca67e1f6eaa65fb4774fa1950cd | |
parent | 1e67575a5840908e33502b210a22509fe5d6ca53 (diff) |
bpf/xdp: devmap can avoid calling ndo_xdp_flush
The XDP_REDIRECT map devmap can avoid using ndo_xdp_flush, by instead
instructing ndo_xdp_xmit to flush via XDP_XMIT_FLUSH flag in
appropriate places.
Notice after this patch it is possible to remove ndo_xdp_flush
completely, as this is the last user of ndo_xdp_flush. This is left
for later patches, to keep driver changes separate.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r-- | kernel/bpf/devmap.c | 19 |
1 files changed, 6 insertions, 13 deletions
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 037e234056f7..a7cc7b3494a9 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c | |||
@@ -217,7 +217,7 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit) | |||
217 | } | 217 | } |
218 | 218 | ||
219 | static int bq_xmit_all(struct bpf_dtab_netdev *obj, | 219 | static int bq_xmit_all(struct bpf_dtab_netdev *obj, |
220 | struct xdp_bulk_queue *bq) | 220 | struct xdp_bulk_queue *bq, u32 flags) |
221 | { | 221 | { |
222 | struct net_device *dev = obj->dev; | 222 | struct net_device *dev = obj->dev; |
223 | int sent = 0, drops = 0, err = 0; | 223 | int sent = 0, drops = 0, err = 0; |
@@ -232,7 +232,7 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj, | |||
232 | prefetch(xdpf); | 232 | prefetch(xdpf); |
233 | } | 233 | } |
234 | 234 | ||
235 | sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, 0); | 235 | sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); |
236 | if (sent < 0) { | 236 | if (sent < 0) { |
237 | err = sent; | 237 | err = sent; |
238 | sent = 0; | 238 | sent = 0; |
@@ -276,7 +276,6 @@ void __dev_map_flush(struct bpf_map *map) | |||
276 | for_each_set_bit(bit, bitmap, map->max_entries) { | 276 | for_each_set_bit(bit, bitmap, map->max_entries) { |
277 | struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); | 277 | struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); |
278 | struct xdp_bulk_queue *bq; | 278 | struct xdp_bulk_queue *bq; |
279 | struct net_device *netdev; | ||
280 | 279 | ||
281 | /* This is possible if the dev entry is removed by user space | 280 | /* This is possible if the dev entry is removed by user space |
282 | * between xdp redirect and flush op. | 281 | * between xdp redirect and flush op. |
@@ -287,10 +286,7 @@ void __dev_map_flush(struct bpf_map *map) | |||
287 | __clear_bit(bit, bitmap); | 286 | __clear_bit(bit, bitmap); |
288 | 287 | ||
289 | bq = this_cpu_ptr(dev->bulkq); | 288 | bq = this_cpu_ptr(dev->bulkq); |
290 | bq_xmit_all(dev, bq); | 289 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); |
291 | netdev = dev->dev; | ||
292 | if (likely(netdev->netdev_ops->ndo_xdp_flush)) | ||
293 | netdev->netdev_ops->ndo_xdp_flush(netdev); | ||
294 | } | 290 | } |
295 | } | 291 | } |
296 | 292 | ||
@@ -320,7 +316,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf, | |||
320 | struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); | 316 | struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); |
321 | 317 | ||
322 | if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) | 318 | if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) |
323 | bq_xmit_all(obj, bq); | 319 | bq_xmit_all(obj, bq, 0); |
324 | 320 | ||
325 | /* Ingress dev_rx will be the same for all xdp_frame's in | 321 | /* Ingress dev_rx will be the same for all xdp_frame's in |
326 | * bulk_queue, because bq stored per-CPU and must be flushed | 322 | * bulk_queue, because bq stored per-CPU and must be flushed |
@@ -359,8 +355,7 @@ static void *dev_map_lookup_elem(struct bpf_map *map, void *key) | |||
359 | 355 | ||
360 | static void dev_map_flush_old(struct bpf_dtab_netdev *dev) | 356 | static void dev_map_flush_old(struct bpf_dtab_netdev *dev) |
361 | { | 357 | { |
362 | if (dev->dev->netdev_ops->ndo_xdp_flush) { | 358 | if (dev->dev->netdev_ops->ndo_xdp_xmit) { |
363 | struct net_device *fl = dev->dev; | ||
364 | struct xdp_bulk_queue *bq; | 359 | struct xdp_bulk_queue *bq; |
365 | unsigned long *bitmap; | 360 | unsigned long *bitmap; |
366 | 361 | ||
@@ -371,9 +366,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev) | |||
371 | __clear_bit(dev->bit, bitmap); | 366 | __clear_bit(dev->bit, bitmap); |
372 | 367 | ||
373 | bq = per_cpu_ptr(dev->bulkq, cpu); | 368 | bq = per_cpu_ptr(dev->bulkq, cpu); |
374 | bq_xmit_all(dev, bq); | 369 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); |
375 | |||
376 | fl->netdev_ops->ndo_xdp_flush(dev->dev); | ||
377 | } | 370 | } |
378 | } | 371 | } |
379 | } | 372 | } |