diff options
Diffstat (limited to 'kernel/bpf')
| -rw-r--r-- | kernel/bpf/btf.c | 3 | ||||
| -rw-r--r-- | kernel/bpf/cgroup.c | 2 | ||||
| -rw-r--r-- | kernel/bpf/hashtab.c | 4 | ||||
| -rw-r--r-- | kernel/bpf/percpu_freelist.c | 41 | ||||
| -rw-r--r-- | kernel/bpf/percpu_freelist.h | 4 | ||||
| -rw-r--r-- | kernel/bpf/syscall.c | 12 |
6 files changed, 48 insertions, 18 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 7019c1f05cab..bd3921b1514b 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c | |||
| @@ -1530,7 +1530,8 @@ static int btf_modifier_resolve(struct btf_verifier_env *env, | |||
| 1530 | 1530 | ||
| 1531 | /* "typedef void new_void", "const void"...etc */ | 1531 | /* "typedef void new_void", "const void"...etc */ |
| 1532 | if (!btf_type_is_void(next_type) && | 1532 | if (!btf_type_is_void(next_type) && |
| 1533 | !btf_type_is_fwd(next_type)) { | 1533 | !btf_type_is_fwd(next_type) && |
| 1534 | !btf_type_is_func_proto(next_type)) { | ||
| 1534 | btf_verifier_log_type(env, v->t, "Invalid type_id"); | 1535 | btf_verifier_log_type(env, v->t, "Invalid type_id"); |
| 1535 | return -EINVAL; | 1536 | return -EINVAL; |
| 1536 | } | 1537 | } |
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index d78cfec5807d..4e807973aa80 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c | |||
| @@ -573,7 +573,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, | |||
| 573 | bpf_compute_and_save_data_end(skb, &saved_data_end); | 573 | bpf_compute_and_save_data_end(skb, &saved_data_end); |
| 574 | 574 | ||
| 575 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, | 575 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, |
| 576 | bpf_prog_run_save_cb); | 576 | __bpf_prog_run_save_cb); |
| 577 | bpf_restore_data_end(skb, saved_data_end); | 577 | bpf_restore_data_end(skb, saved_data_end); |
| 578 | __skb_pull(skb, offset); | 578 | __skb_pull(skb, offset); |
| 579 | skb->sk = save_sk; | 579 | skb->sk = save_sk; |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 937776531998..fed15cf94dca 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
| @@ -686,7 +686,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) | |||
| 686 | } | 686 | } |
| 687 | 687 | ||
| 688 | if (htab_is_prealloc(htab)) { | 688 | if (htab_is_prealloc(htab)) { |
| 689 | pcpu_freelist_push(&htab->freelist, &l->fnode); | 689 | __pcpu_freelist_push(&htab->freelist, &l->fnode); |
| 690 | } else { | 690 | } else { |
| 691 | atomic_dec(&htab->count); | 691 | atomic_dec(&htab->count); |
| 692 | l->htab = htab; | 692 | l->htab = htab; |
| @@ -739,7 +739,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, | |||
| 739 | } else { | 739 | } else { |
| 740 | struct pcpu_freelist_node *l; | 740 | struct pcpu_freelist_node *l; |
| 741 | 741 | ||
| 742 | l = pcpu_freelist_pop(&htab->freelist); | 742 | l = __pcpu_freelist_pop(&htab->freelist); |
| 743 | if (!l) | 743 | if (!l) |
| 744 | return ERR_PTR(-E2BIG); | 744 | return ERR_PTR(-E2BIG); |
| 745 | l_new = container_of(l, struct htab_elem, fnode); | 745 | l_new = container_of(l, struct htab_elem, fnode); |
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c index 673fa6fe2d73..0c1b4ba9e90e 100644 --- a/kernel/bpf/percpu_freelist.c +++ b/kernel/bpf/percpu_freelist.c | |||
| @@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s) | |||
| 28 | free_percpu(s->freelist); | 28 | free_percpu(s->freelist); |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, | 31 | static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head, |
| 32 | struct pcpu_freelist_node *node) | 32 | struct pcpu_freelist_node *node) |
| 33 | { | 33 | { |
| 34 | raw_spin_lock(&head->lock); | 34 | raw_spin_lock(&head->lock); |
| 35 | node->next = head->first; | 35 | node->next = head->first; |
| @@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, | |||
| 37 | raw_spin_unlock(&head->lock); | 37 | raw_spin_unlock(&head->lock); |
| 38 | } | 38 | } |
| 39 | 39 | ||
| 40 | void pcpu_freelist_push(struct pcpu_freelist *s, | 40 | void __pcpu_freelist_push(struct pcpu_freelist *s, |
| 41 | struct pcpu_freelist_node *node) | 41 | struct pcpu_freelist_node *node) |
| 42 | { | 42 | { |
| 43 | struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); | 43 | struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); |
| 44 | 44 | ||
| 45 | __pcpu_freelist_push(head, node); | 45 | ___pcpu_freelist_push(head, node); |
| 46 | } | ||
| 47 | |||
| 48 | void pcpu_freelist_push(struct pcpu_freelist *s, | ||
| 49 | struct pcpu_freelist_node *node) | ||
| 50 | { | ||
| 51 | unsigned long flags; | ||
| 52 | |||
| 53 | local_irq_save(flags); | ||
| 54 | __pcpu_freelist_push(s, node); | ||
| 55 | local_irq_restore(flags); | ||
| 46 | } | 56 | } |
| 47 | 57 | ||
| 48 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, | 58 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, |
| @@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, | |||
| 63 | for_each_possible_cpu(cpu) { | 73 | for_each_possible_cpu(cpu) { |
| 64 | again: | 74 | again: |
| 65 | head = per_cpu_ptr(s->freelist, cpu); | 75 | head = per_cpu_ptr(s->freelist, cpu); |
| 66 | __pcpu_freelist_push(head, buf); | 76 | ___pcpu_freelist_push(head, buf); |
| 67 | i++; | 77 | i++; |
| 68 | buf += elem_size; | 78 | buf += elem_size; |
| 69 | if (i == nr_elems) | 79 | if (i == nr_elems) |
| @@ -74,14 +84,12 @@ again: | |||
| 74 | local_irq_restore(flags); | 84 | local_irq_restore(flags); |
| 75 | } | 85 | } |
| 76 | 86 | ||
| 77 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) | 87 | struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s) |
| 78 | { | 88 | { |
| 79 | struct pcpu_freelist_head *head; | 89 | struct pcpu_freelist_head *head; |
| 80 | struct pcpu_freelist_node *node; | 90 | struct pcpu_freelist_node *node; |
| 81 | unsigned long flags; | ||
| 82 | int orig_cpu, cpu; | 91 | int orig_cpu, cpu; |
| 83 | 92 | ||
| 84 | local_irq_save(flags); | ||
| 85 | orig_cpu = cpu = raw_smp_processor_id(); | 93 | orig_cpu = cpu = raw_smp_processor_id(); |
| 86 | while (1) { | 94 | while (1) { |
| 87 | head = per_cpu_ptr(s->freelist, cpu); | 95 | head = per_cpu_ptr(s->freelist, cpu); |
| @@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) | |||
| 89 | node = head->first; | 97 | node = head->first; |
| 90 | if (node) { | 98 | if (node) { |
| 91 | head->first = node->next; | 99 | head->first = node->next; |
| 92 | raw_spin_unlock_irqrestore(&head->lock, flags); | 100 | raw_spin_unlock(&head->lock); |
| 93 | return node; | 101 | return node; |
| 94 | } | 102 | } |
| 95 | raw_spin_unlock(&head->lock); | 103 | raw_spin_unlock(&head->lock); |
| 96 | cpu = cpumask_next(cpu, cpu_possible_mask); | 104 | cpu = cpumask_next(cpu, cpu_possible_mask); |
| 97 | if (cpu >= nr_cpu_ids) | 105 | if (cpu >= nr_cpu_ids) |
| 98 | cpu = 0; | 106 | cpu = 0; |
| 99 | if (cpu == orig_cpu) { | 107 | if (cpu == orig_cpu) |
| 100 | local_irq_restore(flags); | ||
| 101 | return NULL; | 108 | return NULL; |
| 102 | } | ||
| 103 | } | 109 | } |
| 104 | } | 110 | } |
| 111 | |||
| 112 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) | ||
| 113 | { | ||
| 114 | struct pcpu_freelist_node *ret; | ||
| 115 | unsigned long flags; | ||
| 116 | |||
| 117 | local_irq_save(flags); | ||
| 118 | ret = __pcpu_freelist_pop(s); | ||
| 119 | local_irq_restore(flags); | ||
| 120 | return ret; | ||
| 121 | } | ||
diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h index 3049aae8ea1e..c3960118e617 100644 --- a/kernel/bpf/percpu_freelist.h +++ b/kernel/bpf/percpu_freelist.h | |||
| @@ -22,8 +22,12 @@ struct pcpu_freelist_node { | |||
| 22 | struct pcpu_freelist_node *next; | 22 | struct pcpu_freelist_node *next; |
| 23 | }; | 23 | }; |
| 24 | 24 | ||
| 25 | /* pcpu_freelist_* do spin_lock_irqsave. */ | ||
| 25 | void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); | 26 | void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); |
| 26 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); | 27 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); |
| 28 | /* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */ | ||
| 29 | void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); | ||
| 30 | struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *); | ||
| 27 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, | 31 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, |
| 28 | u32 nr_elems); | 32 | u32 nr_elems); |
| 29 | int pcpu_freelist_init(struct pcpu_freelist *); | 33 | int pcpu_freelist_init(struct pcpu_freelist *); |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0834958f1dc4..ec7c552af76b 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
| @@ -740,8 +740,13 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
| 740 | 740 | ||
| 741 | if (bpf_map_is_dev_bound(map)) { | 741 | if (bpf_map_is_dev_bound(map)) { |
| 742 | err = bpf_map_offload_lookup_elem(map, key, value); | 742 | err = bpf_map_offload_lookup_elem(map, key, value); |
| 743 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || | 743 | goto done; |
| 744 | map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { | 744 | } |
| 745 | |||
| 746 | preempt_disable(); | ||
| 747 | this_cpu_inc(bpf_prog_active); | ||
| 748 | if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || | ||
| 749 | map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { | ||
| 745 | err = bpf_percpu_hash_copy(map, key, value); | 750 | err = bpf_percpu_hash_copy(map, key, value); |
| 746 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { | 751 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { |
| 747 | err = bpf_percpu_array_copy(map, key, value); | 752 | err = bpf_percpu_array_copy(map, key, value); |
| @@ -777,7 +782,10 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
| 777 | } | 782 | } |
| 778 | rcu_read_unlock(); | 783 | rcu_read_unlock(); |
| 779 | } | 784 | } |
| 785 | this_cpu_dec(bpf_prog_active); | ||
| 786 | preempt_enable(); | ||
| 780 | 787 | ||
| 788 | done: | ||
| 781 | if (err) | 789 | if (err) |
| 782 | goto free_value; | 790 | goto free_value; |
| 783 | 791 | ||
