diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2015-03-12 15:03:12 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-12 18:33:15 -0400 |
commit | 54720df130b3e6356391ed4f8a1a024318bcae23 (patch) | |
tree | bb15547c48f3937e6f36ffd89f0d98494c490c7e /net | |
parent | 06741d055bb1a630f793e36a486a209698ea672b (diff) |
cls_bpf: do eBPF invocation under non-bh RCU lock variant for maps
Currently, it is possible in cls_bpf to access eBPF maps only under
rcu_read_lock_bh() variants: while on ingress side, that is, handle_ing(),
the classifier would be called from __netif_receive_skb_core() under
rcu_read_lock(); on egress side, however, it's rcu_read_lock_bh() via
__dev_queue_xmit().
This rcu/rcu_bh mix doesn't work together with eBPF maps as they require
soley to be called under rcu_read_lock(). eBPF maps could also be shared
among various other eBPF programs (possibly even with other eBPF program
types, f.e. tracing) and user space processes, so any context is assumed.
Therefore, a possible fix for cls_bpf is to wrap/nest eBPF program
invocation under non-bh RCU lock variant.
Fixes: e2e9b6541dd4 ("cls_bpf: add initial eBPF support for programmable classifiers")
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/sched/cls_bpf.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 243c9f225a73..5c4171c5d2bd 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c | |||
@@ -64,8 +64,10 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, | |||
64 | { | 64 | { |
65 | struct cls_bpf_head *head = rcu_dereference_bh(tp->root); | 65 | struct cls_bpf_head *head = rcu_dereference_bh(tp->root); |
66 | struct cls_bpf_prog *prog; | 66 | struct cls_bpf_prog *prog; |
67 | int ret; | 67 | int ret = -1; |
68 | 68 | ||
69 | /* Needed here for accessing maps. */ | ||
70 | rcu_read_lock(); | ||
69 | list_for_each_entry_rcu(prog, &head->plist, link) { | 71 | list_for_each_entry_rcu(prog, &head->plist, link) { |
70 | int filter_res = BPF_PROG_RUN(prog->filter, skb); | 72 | int filter_res = BPF_PROG_RUN(prog->filter, skb); |
71 | 73 | ||
@@ -80,10 +82,11 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, | |||
80 | if (ret < 0) | 82 | if (ret < 0) |
81 | continue; | 83 | continue; |
82 | 84 | ||
83 | return ret; | 85 | break; |
84 | } | 86 | } |
87 | rcu_read_unlock(); | ||
85 | 88 | ||
86 | return -1; | 89 | return ret; |
87 | } | 90 | } |
88 | 91 | ||
89 | static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog) | 92 | static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog) |