diff options
Diffstat (limited to 'kernel/bpf/devmap.c')
-rw-r--r-- | kernel/bpf/devmap.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 15dbc15c5b0c..cd8297b3bdb9 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c | |||
@@ -178,6 +178,7 @@ static void dev_map_free(struct bpf_map *map) | |||
178 | if (!dev) | 178 | if (!dev) |
179 | continue; | 179 | continue; |
180 | 180 | ||
181 | free_percpu(dev->bulkq); | ||
181 | dev_put(dev->dev); | 182 | dev_put(dev->dev); |
182 | kfree(dev); | 183 | kfree(dev); |
183 | } | 184 | } |
@@ -273,6 +274,7 @@ void __dev_map_flush(struct bpf_map *map) | |||
273 | unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); | 274 | unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); |
274 | u32 bit; | 275 | u32 bit; |
275 | 276 | ||
277 | rcu_read_lock(); | ||
276 | for_each_set_bit(bit, bitmap, map->max_entries) { | 278 | for_each_set_bit(bit, bitmap, map->max_entries) { |
277 | struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); | 279 | struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); |
278 | struct xdp_bulk_queue *bq; | 280 | struct xdp_bulk_queue *bq; |
@@ -283,11 +285,12 @@ void __dev_map_flush(struct bpf_map *map) | |||
283 | if (unlikely(!dev)) | 285 | if (unlikely(!dev)) |
284 | continue; | 286 | continue; |
285 | 287 | ||
286 | __clear_bit(bit, bitmap); | ||
287 | |||
288 | bq = this_cpu_ptr(dev->bulkq); | 288 | bq = this_cpu_ptr(dev->bulkq); |
289 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true); | 289 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true); |
290 | |||
291 | __clear_bit(bit, bitmap); | ||
290 | } | 292 | } |
293 | rcu_read_unlock(); | ||
291 | } | 294 | } |
292 | 295 | ||
293 | /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or | 296 | /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or |
@@ -380,6 +383,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev) | |||
380 | 383 | ||
381 | int cpu; | 384 | int cpu; |
382 | 385 | ||
386 | rcu_read_lock(); | ||
383 | for_each_online_cpu(cpu) { | 387 | for_each_online_cpu(cpu) { |
384 | bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu); | 388 | bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu); |
385 | __clear_bit(dev->bit, bitmap); | 389 | __clear_bit(dev->bit, bitmap); |
@@ -387,6 +391,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev) | |||
387 | bq = per_cpu_ptr(dev->bulkq, cpu); | 391 | bq = per_cpu_ptr(dev->bulkq, cpu); |
388 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false); | 392 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false); |
389 | } | 393 | } |
394 | rcu_read_unlock(); | ||
390 | } | 395 | } |
391 | } | 396 | } |
392 | 397 | ||