aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-01-16 22:42:14 -0500
committerDavid S. Miller <davem@davemloft.net>2018-01-16 22:42:14 -0500
commit7018d1b3f20fb4308ed9bc577160cb8ffb79b62a (patch)
treeb61a17c694d3cdc3490b190c35104b936bcc6638 /kernel/bpf/syscall.c
parente7e70fa6784b48a811fdd4253c41fc7195300570 (diff)
parente8a9d9683c8a62f917c19e57f1618363fb9ed04e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2018-01-17 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Add initial BPF map offloading for nfp driver. Currently only programs were supported so far w/o being able to access maps. Offloaded programs are right now only allowed to perform map lookups, and control path is responsible for populating the maps. BPF core infrastructure along with nfp implementation is provided, from Jakub. 2) Various follow-ups to Josef's BPF error injections. More specifically that includes: properly check whether the error injectable event is on function entry or not, remove the percpu bpf_kprobe_override and rather compare instruction pointer with original one, separate error-injection from kprobes since it's not limited to it, add injectable error types in order to specify what is the expected type of failure, and last but not least also support the kernel's fault injection framework, all from Masami. 3) Various misc improvements and cleanups to the libbpf Makefile. That is, fix permissions when installing BPF header files, remove unused variables and functions, and also install the libbpf.h header, from Jesper. 4) When offloading to nfp JIT and the BPF insn is unsupported in the JIT, then reject right at verification time. Also fix libbpf with regards to ELF section name matching by properly treating the program type as prefix. Both from Quentin. 5) Add -DPACKAGE to bpftool when including bfd.h for the disassembler. This is needed, for example, when building libfd from source as bpftool doesn't supply a config.h for bfd.h. Fix from Jiong. 6) xdp_convert_ctx_access() is simplified since it doesn't need to set target size during verification, from Jesper. 7) Let bpftool properly recognize BPF_PROG_TYPE_CGROUP_DEVICE program types, from Roman. 8) Various functions in BPF cpumap were not declared static, from Wei. 9) Fix a double semicolon in BPF samples, from Luis. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c71
1 files changed, 61 insertions, 10 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2bac0dc8baba..c691b9e972e3 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -94,18 +94,34 @@ static int check_uarg_tail_zero(void __user *uaddr,
94 return 0; 94 return 0;
95} 95}
96 96
97const struct bpf_map_ops bpf_map_offload_ops = {
98 .map_alloc = bpf_map_offload_map_alloc,
99 .map_free = bpf_map_offload_map_free,
100};
101
97static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 102static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
98{ 103{
104 const struct bpf_map_ops *ops;
99 struct bpf_map *map; 105 struct bpf_map *map;
106 int err;
100 107
101 if (attr->map_type >= ARRAY_SIZE(bpf_map_types) || 108 if (attr->map_type >= ARRAY_SIZE(bpf_map_types))
102 !bpf_map_types[attr->map_type]) 109 return ERR_PTR(-EINVAL);
110 ops = bpf_map_types[attr->map_type];
111 if (!ops)
103 return ERR_PTR(-EINVAL); 112 return ERR_PTR(-EINVAL);
104 113
105 map = bpf_map_types[attr->map_type]->map_alloc(attr); 114 if (ops->map_alloc_check) {
115 err = ops->map_alloc_check(attr);
116 if (err)
117 return ERR_PTR(err);
118 }
119 if (attr->map_ifindex)
120 ops = &bpf_map_offload_ops;
121 map = ops->map_alloc(attr);
106 if (IS_ERR(map)) 122 if (IS_ERR(map))
107 return map; 123 return map;
108 map->ops = bpf_map_types[attr->map_type]; 124 map->ops = ops;
109 map->map_type = attr->map_type; 125 map->map_type = attr->map_type;
110 return map; 126 return map;
111} 127}
@@ -134,6 +150,16 @@ void bpf_map_area_free(void *area)
134 kvfree(area); 150 kvfree(area);
135} 151}
136 152
153void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
154{
155 map->map_type = attr->map_type;
156 map->key_size = attr->key_size;
157 map->value_size = attr->value_size;
158 map->max_entries = attr->max_entries;
159 map->map_flags = attr->map_flags;
160 map->numa_node = bpf_map_attr_numa_node(attr);
161}
162
137int bpf_map_precharge_memlock(u32 pages) 163int bpf_map_precharge_memlock(u32 pages)
138{ 164{
139 struct user_struct *user = get_current_user(); 165 struct user_struct *user = get_current_user();
@@ -189,16 +215,25 @@ static int bpf_map_alloc_id(struct bpf_map *map)
189 return id > 0 ? 0 : id; 215 return id > 0 ? 0 : id;
190} 216}
191 217
192static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) 218void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
193{ 219{
194 unsigned long flags; 220 unsigned long flags;
195 221
222 /* Offloaded maps are removed from the IDR store when their device
223 * disappears - even if someone holds an fd to them they are unusable,
224 * the memory is gone, all ops will fail; they are simply waiting for
225 * refcnt to drop to be freed.
226 */
227 if (!map->id)
228 return;
229
196 if (do_idr_lock) 230 if (do_idr_lock)
197 spin_lock_irqsave(&map_idr_lock, flags); 231 spin_lock_irqsave(&map_idr_lock, flags);
198 else 232 else
199 __acquire(&map_idr_lock); 233 __acquire(&map_idr_lock);
200 234
201 idr_remove(&map_idr, map->id); 235 idr_remove(&map_idr, map->id);
236 map->id = 0;
202 237
203 if (do_idr_lock) 238 if (do_idr_lock)
204 spin_unlock_irqrestore(&map_idr_lock, flags); 239 spin_unlock_irqrestore(&map_idr_lock, flags);
@@ -378,7 +413,7 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
378 return 0; 413 return 0;
379} 414}
380 415
381#define BPF_MAP_CREATE_LAST_FIELD map_name 416#define BPF_MAP_CREATE_LAST_FIELD map_ifindex
382/* called via syscall */ 417/* called via syscall */
383static int map_create(union bpf_attr *attr) 418static int map_create(union bpf_attr *attr)
384{ 419{
@@ -566,8 +601,10 @@ static int map_lookup_elem(union bpf_attr *attr)
566 if (!value) 601 if (!value)
567 goto free_key; 602 goto free_key;
568 603
569 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 604 if (bpf_map_is_dev_bound(map)) {
570 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 605 err = bpf_map_offload_lookup_elem(map, key, value);
606 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
607 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
571 err = bpf_percpu_hash_copy(map, key, value); 608 err = bpf_percpu_hash_copy(map, key, value);
572 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 609 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
573 err = bpf_percpu_array_copy(map, key, value); 610 err = bpf_percpu_array_copy(map, key, value);
@@ -654,7 +691,10 @@ static int map_update_elem(union bpf_attr *attr)
654 goto free_value; 691 goto free_value;
655 692
656 /* Need to create a kthread, thus must support schedule */ 693 /* Need to create a kthread, thus must support schedule */
657 if (map->map_type == BPF_MAP_TYPE_CPUMAP) { 694 if (bpf_map_is_dev_bound(map)) {
695 err = bpf_map_offload_update_elem(map, key, value, attr->flags);
696 goto out;
697 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
658 err = map->ops->map_update_elem(map, key, value, attr->flags); 698 err = map->ops->map_update_elem(map, key, value, attr->flags);
659 goto out; 699 goto out;
660 } 700 }
@@ -731,6 +771,11 @@ static int map_delete_elem(union bpf_attr *attr)
731 goto err_put; 771 goto err_put;
732 } 772 }
733 773
774 if (bpf_map_is_dev_bound(map)) {
775 err = bpf_map_offload_delete_elem(map, key);
776 goto out;
777 }
778
734 preempt_disable(); 779 preempt_disable();
735 __this_cpu_inc(bpf_prog_active); 780 __this_cpu_inc(bpf_prog_active);
736 rcu_read_lock(); 781 rcu_read_lock();
@@ -738,7 +783,7 @@ static int map_delete_elem(union bpf_attr *attr)
738 rcu_read_unlock(); 783 rcu_read_unlock();
739 __this_cpu_dec(bpf_prog_active); 784 __this_cpu_dec(bpf_prog_active);
740 preempt_enable(); 785 preempt_enable();
741 786out:
742 if (!err) 787 if (!err)
743 trace_bpf_map_delete_elem(map, ufd, key); 788 trace_bpf_map_delete_elem(map, ufd, key);
744 kfree(key); 789 kfree(key);
@@ -788,9 +833,15 @@ static int map_get_next_key(union bpf_attr *attr)
788 if (!next_key) 833 if (!next_key)
789 goto free_key; 834 goto free_key;
790 835
836 if (bpf_map_is_dev_bound(map)) {
837 err = bpf_map_offload_get_next_key(map, key, next_key);
838 goto out;
839 }
840
791 rcu_read_lock(); 841 rcu_read_lock();
792 err = map->ops->map_get_next_key(map, key, next_key); 842 err = map->ops->map_get_next_key(map, key, next_key);
793 rcu_read_unlock(); 843 rcu_read_unlock();
844out:
794 if (err) 845 if (err)
795 goto free_next_key; 846 goto free_next_key;
796 847