aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/devmap.c
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2018-08-08 17:00:45 -0400
committerDaniel Borkmann <daniel@iogearbox.net>2018-08-09 15:50:44 -0400
commit1bf9116d0866a649104a5dfa008c302ad54d1e02 (patch)
treee758a1cbc47dcaff5e9c13ee2b336dd48994c881 /kernel/bpf/devmap.c
parent37d7ff25957e05860e068a986209fee128db574a (diff)
xdp: fix bug in devmap teardown code path
Like cpumap teardown, the devmap teardown code also flush remaining xdp_frames, via bq_xmit_all() in case map entry is removed. The code can call xdp_return_frame_rx_napi, from the the wrong context, in-case ndo_xdp_xmit() fails. Fixes: 389ab7f01af9 ("xdp: introduce xdp_return_frame_rx_napi") Fixes: 735fc4054b3a ("xdp: change ndo_xdp_xmit API to support bulking") Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'kernel/bpf/devmap.c')
-rw-r--r--kernel/bpf/devmap.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index d361fc1e3bf3..750d45edae79 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
217} 217}
218 218
219static int bq_xmit_all(struct bpf_dtab_netdev *obj, 219static int bq_xmit_all(struct bpf_dtab_netdev *obj,
220 struct xdp_bulk_queue *bq, u32 flags) 220 struct xdp_bulk_queue *bq, u32 flags,
221 bool in_napi_ctx)
221{ 222{
222 struct net_device *dev = obj->dev; 223 struct net_device *dev = obj->dev;
223 int sent = 0, drops = 0, err = 0; 224 int sent = 0, drops = 0, err = 0;
@@ -254,7 +255,10 @@ error:
254 struct xdp_frame *xdpf = bq->q[i]; 255 struct xdp_frame *xdpf = bq->q[i];
255 256
256 /* RX path under NAPI protection, can return frames faster */ 257 /* RX path under NAPI protection, can return frames faster */
257 xdp_return_frame_rx_napi(xdpf); 258 if (likely(in_napi_ctx))
259 xdp_return_frame_rx_napi(xdpf);
260 else
261 xdp_return_frame(xdpf);
258 drops++; 262 drops++;
259 } 263 }
260 goto out; 264 goto out;
@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
286 __clear_bit(bit, bitmap); 290 __clear_bit(bit, bitmap);
287 291
288 bq = this_cpu_ptr(dev->bulkq); 292 bq = this_cpu_ptr(dev->bulkq);
289 bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); 293 bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
290 } 294 }
291} 295}
292 296
@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
316 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); 320 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
317 321
318 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 322 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
319 bq_xmit_all(obj, bq, 0); 323 bq_xmit_all(obj, bq, 0, true);
320 324
321 /* Ingress dev_rx will be the same for all xdp_frame's in 325 /* Ingress dev_rx will be the same for all xdp_frame's in
322 * bulk_queue, because bq stored per-CPU and must be flushed 326 * bulk_queue, because bq stored per-CPU and must be flushed
@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
385 __clear_bit(dev->bit, bitmap); 389 __clear_bit(dev->bit, bitmap);
386 390
387 bq = per_cpu_ptr(dev->bulkq, cpu); 391 bq = per_cpu_ptr(dev->bulkq, cpu);
388 bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); 392 bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
389 } 393 }
390 } 394 }
391} 395}