aboutsummaryrefslogtreecommitdiffstats
path: root/net/bpf
diff options
context:
space:
mode:
authorStanislav Fomichev <sdf@google.com>2019-02-12 18:42:38 -0500
committerDaniel Borkmann <daniel@iogearbox.net>2019-02-18 18:17:03 -0500
commitdf1a2cb7c74b3d3abc8d8c2d690f82c8ebc3490a (patch)
tree21336c0eca9c46abc9ad23bbd9813663dd120522 /net/bpf
parent21d2cb491b9e10bfdf10424673b43cd9eddc2da1 (diff)
bpf/test_run: fix unkillable BPF_PROG_TEST_RUN
Syzbot found out that running BPF_PROG_TEST_RUN with repeat=0xffffffff makes process unkillable. The problem is that when CONFIG_PREEMPT is enabled, we never see need_resched() return true. This is due to the fact that preempt_enable() (which we do in bpf_test_run_one on each iteration) now handles resched if it's needed. Let's disable preemption for the whole run, not per test. In this case we can properly see whether resched is needed. Let's also properly return -EINTR to the userspace in case of a signal interrupt. See recent discussion: http://lore.kernel.org/netdev/CAH3MdRWHr4N8jei8jxDppXjmw-Nw=puNDLbu1dQOFQHxfU2onA@mail.gmail.com I'll follow up with the same fix bpf_prog_test_run_flow_dissector in bpf-next. Reported-by: syzbot <syzkaller@googlegroups.com> Signed-off-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'net/bpf')
-rw-r--r--net/bpf/test_run.c45
1 files changed, 24 insertions, 21 deletions
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fa2644d276ef..e31e1b20f7f4 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -13,27 +13,13 @@
13#include <net/sock.h> 13#include <net/sock.h>
14#include <net/tcp.h> 14#include <net/tcp.h>
15 15
16static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, 16static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
17 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) 17 u32 *retval, u32 *time)
18{
19 u32 ret;
20
21 preempt_disable();
22 rcu_read_lock();
23 bpf_cgroup_storage_set(storage);
24 ret = BPF_PROG_RUN(prog, ctx);
25 rcu_read_unlock();
26 preempt_enable();
27
28 return ret;
29}
30
31static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
32 u32 *time)
33{ 18{
34 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; 19 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
35 enum bpf_cgroup_storage_type stype; 20 enum bpf_cgroup_storage_type stype;
36 u64 time_start, time_spent = 0; 21 u64 time_start, time_spent = 0;
22 int ret = 0;
37 u32 i; 23 u32 i;
38 24
39 for_each_cgroup_storage_type(stype) { 25 for_each_cgroup_storage_type(stype) {
@@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
48 34
49 if (!repeat) 35 if (!repeat)
50 repeat = 1; 36 repeat = 1;
37
38 rcu_read_lock();
39 preempt_disable();
51 time_start = ktime_get_ns(); 40 time_start = ktime_get_ns();
52 for (i = 0; i < repeat; i++) { 41 for (i = 0; i < repeat; i++) {
53 *ret = bpf_test_run_one(prog, ctx, storage); 42 bpf_cgroup_storage_set(storage);
43 *retval = BPF_PROG_RUN(prog, ctx);
44
45 if (signal_pending(current)) {
46 ret = -EINTR;
47 break;
48 }
49
54 if (need_resched()) { 50 if (need_resched()) {
55 if (signal_pending(current))
56 break;
57 time_spent += ktime_get_ns() - time_start; 51 time_spent += ktime_get_ns() - time_start;
52 preempt_enable();
53 rcu_read_unlock();
54
58 cond_resched(); 55 cond_resched();
56
57 rcu_read_lock();
58 preempt_disable();
59 time_start = ktime_get_ns(); 59 time_start = ktime_get_ns();
60 } 60 }
61 } 61 }
62 time_spent += ktime_get_ns() - time_start; 62 time_spent += ktime_get_ns() - time_start;
63 preempt_enable();
64 rcu_read_unlock();
65
63 do_div(time_spent, repeat); 66 do_div(time_spent, repeat);
64 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; 67 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
65 68
66 for_each_cgroup_storage_type(stype) 69 for_each_cgroup_storage_type(stype)
67 bpf_cgroup_storage_free(storage[stype]); 70 bpf_cgroup_storage_free(storage[stype]);
68 71
69 return 0; 72 return ret;
70} 73}
71 74
72static int bpf_test_finish(const union bpf_attr *kattr, 75static int bpf_test_finish(const union bpf_attr *kattr,