diff options
author | John Fastabend <john.fastabend@gmail.com> | 2014-10-06 00:28:52 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-10-06 18:02:33 -0400 |
commit | 18cdb37ebf4c986d9502405cbd16b0ac29770c25 (patch) | |
tree | 2bf659bf5d527447c11845ca06d15d1b69b9ab31 /net/sched/cls_bpf.c | |
parent | 13990f8156862fe945a1a226850a6550c8988a33 (diff) |
net: sched: do not use tcf_proto 'tp' argument from call_rcu
Using the tcf_proto pointer 'tp' from inside the classifiers callback
is not valid because it may have been cleaned up by another call_rcu
occuring on another CPU.
'tp' is currently being used by tcf_unbind_filter() in this patch we
move instances of tcf_unbind_filter outside of the call_rcu() context.
This is safe to do because any running schedulers will either read the
valid class field or it will be zeroed.
And all schedulers today when the class is 0 do a lookup using the
same call used by the tcf_exts_bind(). So even if we have a running
classifier hit the null class pointer it will do a lookup and get
to the same result. This is particularly fragile at the moment because
the only way to verify this is to audit the schedulers call sites.
Reported-by: Cong Wang <xiyou.wangconf@gmail.com>
Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
Acked-by: Cong Wang <cwang@twopensource.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/cls_bpf.c')
-rw-r--r-- | net/sched/cls_bpf.c | 4 |
1 files changed, 3 insertions, 1 deletions
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 4318d067b0a0..eed49d1d0878 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c | |||
@@ -92,7 +92,6 @@ static int cls_bpf_init(struct tcf_proto *tp) | |||
92 | 92 | ||
93 | static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog) | 93 | static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog) |
94 | { | 94 | { |
95 | tcf_unbind_filter(tp, &prog->res); | ||
96 | tcf_exts_destroy(&prog->exts); | 95 | tcf_exts_destroy(&prog->exts); |
97 | 96 | ||
98 | bpf_prog_destroy(prog->filter); | 97 | bpf_prog_destroy(prog->filter); |
@@ -116,6 +115,7 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) | |||
116 | list_for_each_entry(prog, &head->plist, link) { | 115 | list_for_each_entry(prog, &head->plist, link) { |
117 | if (prog == todel) { | 116 | if (prog == todel) { |
118 | list_del_rcu(&prog->link); | 117 | list_del_rcu(&prog->link); |
118 | tcf_unbind_filter(tp, &prog->res); | ||
119 | call_rcu(&prog->rcu, __cls_bpf_delete_prog); | 119 | call_rcu(&prog->rcu, __cls_bpf_delete_prog); |
120 | return 0; | 120 | return 0; |
121 | } | 121 | } |
@@ -131,6 +131,7 @@ static void cls_bpf_destroy(struct tcf_proto *tp) | |||
131 | 131 | ||
132 | list_for_each_entry_safe(prog, tmp, &head->plist, link) { | 132 | list_for_each_entry_safe(prog, tmp, &head->plist, link) { |
133 | list_del_rcu(&prog->link); | 133 | list_del_rcu(&prog->link); |
134 | tcf_unbind_filter(tp, &prog->res); | ||
134 | call_rcu(&prog->rcu, __cls_bpf_delete_prog); | 135 | call_rcu(&prog->rcu, __cls_bpf_delete_prog); |
135 | } | 136 | } |
136 | 137 | ||
@@ -282,6 +283,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, | |||
282 | 283 | ||
283 | if (oldprog) { | 284 | if (oldprog) { |
284 | list_replace_rcu(&prog->link, &oldprog->link); | 285 | list_replace_rcu(&prog->link, &oldprog->link); |
286 | tcf_unbind_filter(tp, &oldprog->res); | ||
285 | call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); | 287 | call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); |
286 | } else { | 288 | } else { |
287 | list_add_rcu(&prog->link, &head->plist); | 289 | list_add_rcu(&prog->link, &head->plist); |