aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/devmap.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-10-26 21:30:55 -0400
committerDavid S. Miller <davem@davemloft.net>2019-10-26 21:30:55 -0400
commit1a51a47491a5a23f0625b03ad6dc84cf39bf6a82 (patch)
treed2089bedb4a4ff10a9f03c197a6e78966dfbb7a4 /kernel/bpf/devmap.c
parent45f338069941f799ecc22e5a51b423da0b32459d (diff)
parent2afd23f78f39da84937006ecd24aa664a4ab052b (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2019-10-27 The following pull-request contains BPF updates for your *net* tree. We've added 7 non-merge commits during the last 11 day(s) which contain a total of 7 files changed, 66 insertions(+), 16 deletions(-). The main changes are: 1) Fix two use-after-free bugs in relation to RCU in jited symbol exposure to kallsyms, from Daniel Borkmann. 2) Fix NULL pointer dereference in AF_XDP rx-only sockets, from Magnus Karlsson. 3) Fix hang in netdev unregister for hash based devmap as well as another overflow bug on 32 bit archs in memlock cost calculation, from Toke Høiland-Jørgensen. 4) Fix wrong memory access in LWT BPF programs on reroute due to invalid dst. Also fix BPF selftests to use more compatible nc options, from Jiri Benc. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/devmap.c')
-rw-r--r--kernel/bpf/devmap.c33
1 files changed, 32 insertions, 1 deletions
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index d27f3b60ff6d..3867864cdc2f 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -128,7 +128,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
128 128
129 if (!dtab->n_buckets) /* Overflow check */ 129 if (!dtab->n_buckets) /* Overflow check */
130 return -EINVAL; 130 return -EINVAL;
131 cost += sizeof(struct hlist_head) * dtab->n_buckets; 131 cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
132 } 132 }
133 133
134 /* if map size is larger than memlock limit, reject it */ 134 /* if map size is larger than memlock limit, reject it */
@@ -719,6 +719,32 @@ const struct bpf_map_ops dev_map_hash_ops = {
719 .map_check_btf = map_check_no_btf, 719 .map_check_btf = map_check_no_btf,
720}; 720};
721 721
722static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
723 struct net_device *netdev)
724{
725 unsigned long flags;
726 u32 i;
727
728 spin_lock_irqsave(&dtab->index_lock, flags);
729 for (i = 0; i < dtab->n_buckets; i++) {
730 struct bpf_dtab_netdev *dev;
731 struct hlist_head *head;
732 struct hlist_node *next;
733
734 head = dev_map_index_hash(dtab, i);
735
736 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
737 if (netdev != dev->dev)
738 continue;
739
740 dtab->items--;
741 hlist_del_rcu(&dev->index_hlist);
742 call_rcu(&dev->rcu, __dev_map_entry_free);
743 }
744 }
745 spin_unlock_irqrestore(&dtab->index_lock, flags);
746}
747
722static int dev_map_notification(struct notifier_block *notifier, 748static int dev_map_notification(struct notifier_block *notifier,
723 ulong event, void *ptr) 749 ulong event, void *ptr)
724{ 750{
@@ -735,6 +761,11 @@ static int dev_map_notification(struct notifier_block *notifier,
735 */ 761 */
736 rcu_read_lock(); 762 rcu_read_lock();
737 list_for_each_entry_rcu(dtab, &dev_map_list, list) { 763 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
764 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
765 dev_map_hash_remove_netdev(dtab, netdev);
766 continue;
767 }
768
738 for (i = 0; i < dtab->map.max_entries; i++) { 769 for (i = 0; i < dtab->map.max_entries; i++) {
739 struct bpf_dtab_netdev *dev, *odev; 770 struct bpf_dtab_netdev *dev, *odev;
740 771