summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@fb.com>2016-03-08 00:57:13 -0500
committerDavid S. Miller <davem@davemloft.net>2016-03-08 15:28:30 -0500
commitb121d1e74d1f24654bdc3165d3db1ca149501356 (patch)
treeaa0326edc95e2152a2277386b5363beb7768f7dc
parent8aba8b83128a04197991518e241aafd3323b705d (diff)
bpf: prevent kprobe+bpf deadlocks
if kprobe is placed within update or delete hash map helpers that hold bucket spin lock and triggered bpf program is trying to grab the spinlock for the same bucket on the same cpu, it will deadlock. Fix it by extending existing recursion prevention mechanism. Note, map_lookup and other tracing helpers don't have this problem, since they don't hold any locks and don't modify global data. bpf_trace_printk has its own recursive check and ok as well. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/bpf.h3
-rw-r--r--kernel/bpf/syscall.c13
-rw-r--r--kernel/trace/bpf_trace.c2
3 files changed, 16 insertions, 2 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 51e498e5470e..4b070827200d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,6 +10,7 @@
10#include <uapi/linux/bpf.h> 10#include <uapi/linux/bpf.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/file.h> 12#include <linux/file.h>
13#include <linux/percpu.h>
13 14
14struct bpf_map; 15struct bpf_map;
15 16
@@ -163,6 +164,8 @@ bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *f
163const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 164const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
164 165
165#ifdef CONFIG_BPF_SYSCALL 166#ifdef CONFIG_BPF_SYSCALL
167DECLARE_PER_CPU(int, bpf_prog_active);
168
166void bpf_register_prog_type(struct bpf_prog_type_list *tl); 169void bpf_register_prog_type(struct bpf_prog_type_list *tl);
167void bpf_register_map_type(struct bpf_map_type_list *tl); 170void bpf_register_map_type(struct bpf_map_type_list *tl);
168 171
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index c95a753c2007..dc99f6a000f5 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -18,6 +18,8 @@
18#include <linux/filter.h> 18#include <linux/filter.h>
19#include <linux/version.h> 19#include <linux/version.h>
20 20
21DEFINE_PER_CPU(int, bpf_prog_active);
22
21int sysctl_unprivileged_bpf_disabled __read_mostly; 23int sysctl_unprivileged_bpf_disabled __read_mostly;
22 24
23static LIST_HEAD(bpf_map_types); 25static LIST_HEAD(bpf_map_types);
@@ -347,6 +349,11 @@ static int map_update_elem(union bpf_attr *attr)
347 if (copy_from_user(value, uvalue, value_size) != 0) 349 if (copy_from_user(value, uvalue, value_size) != 0)
348 goto free_value; 350 goto free_value;
349 351
352 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
353 * inside bpf map update or delete otherwise deadlocks are possible
354 */
355 preempt_disable();
356 __this_cpu_inc(bpf_prog_active);
350 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) { 357 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
351 err = bpf_percpu_hash_update(map, key, value, attr->flags); 358 err = bpf_percpu_hash_update(map, key, value, attr->flags);
352 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 359 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
@@ -356,6 +363,8 @@ static int map_update_elem(union bpf_attr *attr)
356 err = map->ops->map_update_elem(map, key, value, attr->flags); 363 err = map->ops->map_update_elem(map, key, value, attr->flags);
357 rcu_read_unlock(); 364 rcu_read_unlock();
358 } 365 }
366 __this_cpu_dec(bpf_prog_active);
367 preempt_enable();
359 368
360free_value: 369free_value:
361 kfree(value); 370 kfree(value);
@@ -394,9 +403,13 @@ static int map_delete_elem(union bpf_attr *attr)
394 if (copy_from_user(key, ukey, map->key_size) != 0) 403 if (copy_from_user(key, ukey, map->key_size) != 0)
395 goto free_key; 404 goto free_key;
396 405
406 preempt_disable();
407 __this_cpu_inc(bpf_prog_active);
397 rcu_read_lock(); 408 rcu_read_lock();
398 err = map->ops->map_delete_elem(map, key); 409 err = map->ops->map_delete_elem(map, key);
399 rcu_read_unlock(); 410 rcu_read_unlock();
411 __this_cpu_dec(bpf_prog_active);
412 preempt_enable();
400 413
401free_key: 414free_key:
402 kfree(key); 415 kfree(key);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 4b8caa392b86..3e4ffb3ace5f 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -13,8 +13,6 @@
13#include <linux/ctype.h> 13#include <linux/ctype.h>
14#include "trace.h" 14#include "trace.h"
15 15
16static DEFINE_PER_CPU(int, bpf_prog_active);
17
18/** 16/**
19 * trace_call_bpf - invoke BPF program 17 * trace_call_bpf - invoke BPF program
20 * @prog: BPF program 18 * @prog: BPF program