diff options
author | Jens Axboe <axboe@kernel.dk> | 2019-07-01 10:16:08 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-07-01 10:16:08 -0400 |
commit | 5be1f9d82fa73c199ebeee2866dbac83e419c897 (patch) | |
tree | aed1aec34f40b5e0f36dceea8b58d9cfdb41d233 /kernel/bpf/devmap.c | |
parent | ff91064ea37c8323eba31cc3d2e22464f107b50d (diff) | |
parent | 4b972a01a7da614b4796475f933094751a295a2f (diff) |
Merge tag 'v5.2-rc6' into for-5.3/block
Merge 5.2-rc6 into for-5.3/block, so we get the same page merge leak
fix. Otherwise we end up having conflicts with future patches between
for-5.3/block and master that touch this area. In particular, it makes
the bio_full() fix hard to backport to stable.
* tag 'v5.2-rc6': (482 commits)
Linux 5.2-rc6
Revert "iommu/vt-d: Fix lock inversion between iommu->lock and device_domain_lock"
Bluetooth: Fix regression with minimum encryption key size alignment
tcp: refine memory limit test in tcp_fragment()
x86/vdso: Prevent segfaults due to hoisted vclock reads
SUNRPC: Fix a credential refcount leak
Revert "SUNRPC: Declare RPC timers as TIMER_DEFERRABLE"
net :sunrpc :clnt :Fix xps refcount imbalance on the error path
NFS4: Only set creation opendata if O_CREAT
ARM: 8867/1: vdso: pass --be8 to linker if necessary
KVM: nVMX: reorganize initial steps of vmx_set_nested_state
KVM: PPC: Book3S HV: Invalidate ERAT when flushing guest TLB entries
habanalabs: use u64_to_user_ptr() for reading user pointers
nfsd: replace Jeff by Chuck as nfsd co-maintainer
inet: clear num_timeout reqsk_alloc()
PCI/P2PDMA: Ignore root complex whitelist when an IOMMU is present
net: mvpp2: debugfs: Add pmap to fs dump
ipv6: Default fib6_type to RTN_UNICAST when not set
net: hns3: Fix inconsistent indenting
net/af_iucv: always register net_device notifier
...
Diffstat (limited to 'kernel/bpf/devmap.c')
-rw-r--r-- | kernel/bpf/devmap.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 15dbc15c5b0c..cd8297b3bdb9 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c | |||
@@ -178,6 +178,7 @@ static void dev_map_free(struct bpf_map *map) | |||
178 | if (!dev) | 178 | if (!dev) |
179 | continue; | 179 | continue; |
180 | 180 | ||
181 | free_percpu(dev->bulkq); | ||
181 | dev_put(dev->dev); | 182 | dev_put(dev->dev); |
182 | kfree(dev); | 183 | kfree(dev); |
183 | } | 184 | } |
@@ -273,6 +274,7 @@ void __dev_map_flush(struct bpf_map *map) | |||
273 | unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); | 274 | unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); |
274 | u32 bit; | 275 | u32 bit; |
275 | 276 | ||
277 | rcu_read_lock(); | ||
276 | for_each_set_bit(bit, bitmap, map->max_entries) { | 278 | for_each_set_bit(bit, bitmap, map->max_entries) { |
277 | struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); | 279 | struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); |
278 | struct xdp_bulk_queue *bq; | 280 | struct xdp_bulk_queue *bq; |
@@ -283,11 +285,12 @@ void __dev_map_flush(struct bpf_map *map) | |||
283 | if (unlikely(!dev)) | 285 | if (unlikely(!dev)) |
284 | continue; | 286 | continue; |
285 | 287 | ||
286 | __clear_bit(bit, bitmap); | ||
287 | |||
288 | bq = this_cpu_ptr(dev->bulkq); | 288 | bq = this_cpu_ptr(dev->bulkq); |
289 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true); | 289 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true); |
290 | |||
291 | __clear_bit(bit, bitmap); | ||
290 | } | 292 | } |
293 | rcu_read_unlock(); | ||
291 | } | 294 | } |
292 | 295 | ||
293 | /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or | 296 | /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or |
@@ -380,6 +383,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev) | |||
380 | 383 | ||
381 | int cpu; | 384 | int cpu; |
382 | 385 | ||
386 | rcu_read_lock(); | ||
383 | for_each_online_cpu(cpu) { | 387 | for_each_online_cpu(cpu) { |
384 | bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu); | 388 | bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu); |
385 | __clear_bit(dev->bit, bitmap); | 389 | __clear_bit(dev->bit, bitmap); |
@@ -387,6 +391,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev) | |||
387 | bq = per_cpu_ptr(dev->bulkq, cpu); | 391 | bq = per_cpu_ptr(dev->bulkq, cpu); |
388 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false); | 392 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false); |
389 | } | 393 | } |
394 | rcu_read_unlock(); | ||
390 | } | 395 | } |
391 | } | 396 | } |
392 | 397 | ||