diff options
author | John Fastabend <john.fastabend@gmail.com> | 2014-09-12 23:10:24 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-13 12:30:26 -0400 |
commit | 1f947bf151e90ec0baad294881607ebf321a2863 (patch) | |
tree | fdc1f051d80db9c09511b0b497e16ec8f5357e3b /net/sched | |
parent | b929d86d25352496c528fcd74fdcabe3f6a4994a (diff) |
net: sched: rcu'ify cls_bpf
This patch makes the cls_bpf classifier RCU safe. The tcf_lock
was being used to protect a list of cls_bpf_prog now this list
is RCU safe and updates occur with rcu_replace.
Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/cls_bpf.c | 94 |
1 files changed, 47 insertions, 47 deletions
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 0e30d58149da..6a7386e6e5a8 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c | |||
@@ -27,6 +27,7 @@ MODULE_DESCRIPTION("TC BPF based classifier"); | |||
27 | struct cls_bpf_head { | 27 | struct cls_bpf_head { |
28 | struct list_head plist; | 28 | struct list_head plist; |
29 | u32 hgen; | 29 | u32 hgen; |
30 | struct rcu_head rcu; | ||
30 | }; | 31 | }; |
31 | 32 | ||
32 | struct cls_bpf_prog { | 33 | struct cls_bpf_prog { |
@@ -37,6 +38,8 @@ struct cls_bpf_prog { | |||
37 | struct list_head link; | 38 | struct list_head link; |
38 | u32 handle; | 39 | u32 handle; |
39 | u16 bpf_len; | 40 | u16 bpf_len; |
41 | struct tcf_proto *tp; | ||
42 | struct rcu_head rcu; | ||
40 | }; | 43 | }; |
41 | 44 | ||
42 | static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { | 45 | static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { |
@@ -49,11 +52,11 @@ static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { | |||
49 | static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, | 52 | static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
50 | struct tcf_result *res) | 53 | struct tcf_result *res) |
51 | { | 54 | { |
52 | struct cls_bpf_head *head = tp->root; | 55 | struct cls_bpf_head *head = rcu_dereference(tp->root); |
53 | struct cls_bpf_prog *prog; | 56 | struct cls_bpf_prog *prog; |
54 | int ret; | 57 | int ret; |
55 | 58 | ||
56 | list_for_each_entry(prog, &head->plist, link) { | 59 | list_for_each_entry_rcu(prog, &head->plist, link) { |
57 | int filter_res = BPF_PROG_RUN(prog->filter, skb); | 60 | int filter_res = BPF_PROG_RUN(prog->filter, skb); |
58 | 61 | ||
59 | if (filter_res == 0) | 62 | if (filter_res == 0) |
@@ -81,8 +84,8 @@ static int cls_bpf_init(struct tcf_proto *tp) | |||
81 | if (head == NULL) | 84 | if (head == NULL) |
82 | return -ENOBUFS; | 85 | return -ENOBUFS; |
83 | 86 | ||
84 | INIT_LIST_HEAD(&head->plist); | 87 | INIT_LIST_HEAD_RCU(&head->plist); |
85 | tp->root = head; | 88 | rcu_assign_pointer(tp->root, head); |
86 | 89 | ||
87 | return 0; | 90 | return 0; |
88 | } | 91 | } |
@@ -98,18 +101,22 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog) | |||
98 | kfree(prog); | 101 | kfree(prog); |
99 | } | 102 | } |
100 | 103 | ||
104 | static void __cls_bpf_delete_prog(struct rcu_head *rcu) | ||
105 | { | ||
106 | struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu); | ||
107 | |||
108 | cls_bpf_delete_prog(prog->tp, prog); | ||
109 | } | ||
110 | |||
101 | static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) | 111 | static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) |
102 | { | 112 | { |
103 | struct cls_bpf_head *head = tp->root; | 113 | struct cls_bpf_head *head = rtnl_dereference(tp->root); |
104 | struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg; | 114 | struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg; |
105 | 115 | ||
106 | list_for_each_entry(prog, &head->plist, link) { | 116 | list_for_each_entry(prog, &head->plist, link) { |
107 | if (prog == todel) { | 117 | if (prog == todel) { |
108 | tcf_tree_lock(tp); | 118 | list_del_rcu(&prog->link); |
109 | list_del(&prog->link); | 119 | call_rcu(&prog->rcu, __cls_bpf_delete_prog); |
110 | tcf_tree_unlock(tp); | ||
111 | |||
112 | cls_bpf_delete_prog(tp, prog); | ||
113 | return 0; | 120 | return 0; |
114 | } | 121 | } |
115 | } | 122 | } |
@@ -119,27 +126,28 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) | |||
119 | 126 | ||
120 | static void cls_bpf_destroy(struct tcf_proto *tp) | 127 | static void cls_bpf_destroy(struct tcf_proto *tp) |
121 | { | 128 | { |
122 | struct cls_bpf_head *head = tp->root; | 129 | struct cls_bpf_head *head = rtnl_dereference(tp->root); |
123 | struct cls_bpf_prog *prog, *tmp; | 130 | struct cls_bpf_prog *prog, *tmp; |
124 | 131 | ||
125 | list_for_each_entry_safe(prog, tmp, &head->plist, link) { | 132 | list_for_each_entry_safe(prog, tmp, &head->plist, link) { |
126 | list_del(&prog->link); | 133 | list_del_rcu(&prog->link); |
127 | cls_bpf_delete_prog(tp, prog); | 134 | call_rcu(&prog->rcu, __cls_bpf_delete_prog); |
128 | } | 135 | } |
129 | 136 | ||
130 | kfree(head); | 137 | RCU_INIT_POINTER(tp->root, NULL); |
138 | kfree_rcu(head, rcu); | ||
131 | } | 139 | } |
132 | 140 | ||
133 | static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) | 141 | static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) |
134 | { | 142 | { |
135 | struct cls_bpf_head *head = tp->root; | 143 | struct cls_bpf_head *head = rtnl_dereference(tp->root); |
136 | struct cls_bpf_prog *prog; | 144 | struct cls_bpf_prog *prog; |
137 | unsigned long ret = 0UL; | 145 | unsigned long ret = 0UL; |
138 | 146 | ||
139 | if (head == NULL) | 147 | if (head == NULL) |
140 | return 0UL; | 148 | return 0UL; |
141 | 149 | ||
142 | list_for_each_entry(prog, &head->plist, link) { | 150 | list_for_each_entry_rcu(prog, &head->plist, link) { |
143 | if (prog->handle == handle) { | 151 | if (prog->handle == handle) { |
144 | ret = (unsigned long) prog; | 152 | ret = (unsigned long) prog; |
145 | break; | 153 | break; |
@@ -158,10 +166,10 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, | |||
158 | unsigned long base, struct nlattr **tb, | 166 | unsigned long base, struct nlattr **tb, |
159 | struct nlattr *est, bool ovr) | 167 | struct nlattr *est, bool ovr) |
160 | { | 168 | { |
161 | struct sock_filter *bpf_ops, *bpf_old; | 169 | struct sock_filter *bpf_ops; |
162 | struct tcf_exts exts; | 170 | struct tcf_exts exts; |
163 | struct sock_fprog_kern tmp; | 171 | struct sock_fprog_kern tmp; |
164 | struct bpf_prog *fp, *fp_old; | 172 | struct bpf_prog *fp; |
165 | u16 bpf_size, bpf_len; | 173 | u16 bpf_size, bpf_len; |
166 | u32 classid; | 174 | u32 classid; |
167 | int ret; | 175 | int ret; |
@@ -197,26 +205,15 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, | |||
197 | if (ret) | 205 | if (ret) |
198 | goto errout_free; | 206 | goto errout_free; |
199 | 207 | ||
200 | tcf_tree_lock(tp); | ||
201 | fp_old = prog->filter; | ||
202 | bpf_old = prog->bpf_ops; | ||
203 | |||
204 | prog->bpf_len = bpf_len; | 208 | prog->bpf_len = bpf_len; |
205 | prog->bpf_ops = bpf_ops; | 209 | prog->bpf_ops = bpf_ops; |
206 | prog->filter = fp; | 210 | prog->filter = fp; |
207 | prog->res.classid = classid; | 211 | prog->res.classid = classid; |
208 | tcf_tree_unlock(tp); | ||
209 | 212 | ||
210 | tcf_bind_filter(tp, &prog->res, base); | 213 | tcf_bind_filter(tp, &prog->res, base); |
211 | tcf_exts_change(tp, &prog->exts, &exts); | 214 | tcf_exts_change(tp, &prog->exts, &exts); |
212 | 215 | ||
213 | if (fp_old) | ||
214 | bpf_prog_destroy(fp_old); | ||
215 | if (bpf_old) | ||
216 | kfree(bpf_old); | ||
217 | |||
218 | return 0; | 216 | return 0; |
219 | |||
220 | errout_free: | 217 | errout_free: |
221 | kfree(bpf_ops); | 218 | kfree(bpf_ops); |
222 | errout: | 219 | errout: |
@@ -244,9 +241,10 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, | |||
244 | u32 handle, struct nlattr **tca, | 241 | u32 handle, struct nlattr **tca, |
245 | unsigned long *arg, bool ovr) | 242 | unsigned long *arg, bool ovr) |
246 | { | 243 | { |
247 | struct cls_bpf_head *head = tp->root; | 244 | struct cls_bpf_head *head = rtnl_dereference(tp->root); |
248 | struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg; | 245 | struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg; |
249 | struct nlattr *tb[TCA_BPF_MAX + 1]; | 246 | struct nlattr *tb[TCA_BPF_MAX + 1]; |
247 | struct cls_bpf_prog *prog; | ||
250 | int ret; | 248 | int ret; |
251 | 249 | ||
252 | if (tca[TCA_OPTIONS] == NULL) | 250 | if (tca[TCA_OPTIONS] == NULL) |
@@ -256,18 +254,19 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, | |||
256 | if (ret < 0) | 254 | if (ret < 0) |
257 | return ret; | 255 | return ret; |
258 | 256 | ||
259 | if (prog != NULL) { | ||
260 | if (handle && prog->handle != handle) | ||
261 | return -EINVAL; | ||
262 | return cls_bpf_modify_existing(net, tp, prog, base, tb, | ||
263 | tca[TCA_RATE], ovr); | ||
264 | } | ||
265 | |||
266 | prog = kzalloc(sizeof(*prog), GFP_KERNEL); | 257 | prog = kzalloc(sizeof(*prog), GFP_KERNEL); |
267 | if (prog == NULL) | 258 | if (!prog) |
268 | return -ENOBUFS; | 259 | return -ENOBUFS; |
269 | 260 | ||
270 | tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE); | 261 | tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE); |
262 | |||
263 | if (oldprog) { | ||
264 | if (handle && oldprog->handle != handle) { | ||
265 | ret = -EINVAL; | ||
266 | goto errout; | ||
267 | } | ||
268 | } | ||
269 | |||
271 | if (handle == 0) | 270 | if (handle == 0) |
272 | prog->handle = cls_bpf_grab_new_handle(tp, head); | 271 | prog->handle = cls_bpf_grab_new_handle(tp, head); |
273 | else | 272 | else |
@@ -281,16 +280,17 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, | |||
281 | if (ret < 0) | 280 | if (ret < 0) |
282 | goto errout; | 281 | goto errout; |
283 | 282 | ||
284 | tcf_tree_lock(tp); | 283 | if (oldprog) { |
285 | list_add(&prog->link, &head->plist); | 284 | list_replace_rcu(&prog->link, &oldprog->link); |
286 | tcf_tree_unlock(tp); | 285 | call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); |
286 | } else { | ||
287 | list_add_rcu(&prog->link, &head->plist); | ||
288 | } | ||
287 | 289 | ||
288 | *arg = (unsigned long) prog; | 290 | *arg = (unsigned long) prog; |
289 | |||
290 | return 0; | 291 | return 0; |
291 | errout: | 292 | errout: |
292 | if (*arg == 0UL && prog) | 293 | kfree(prog); |
293 | kfree(prog); | ||
294 | 294 | ||
295 | return ret; | 295 | return ret; |
296 | } | 296 | } |
@@ -339,10 +339,10 @@ nla_put_failure: | |||
339 | 339 | ||
340 | static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg) | 340 | static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg) |
341 | { | 341 | { |
342 | struct cls_bpf_head *head = tp->root; | 342 | struct cls_bpf_head *head = rtnl_dereference(tp->root); |
343 | struct cls_bpf_prog *prog; | 343 | struct cls_bpf_prog *prog; |
344 | 344 | ||
345 | list_for_each_entry(prog, &head->plist, link) { | 345 | list_for_each_entry_rcu(prog, &head->plist, link) { |
346 | if (arg->count < arg->skip) | 346 | if (arg->count < arg->skip) |
347 | goto skip; | 347 | goto skip; |
348 | if (arg->fn(tp, (unsigned long) prog, arg) < 0) { | 348 | if (arg->fn(tp, (unsigned long) prog, arg) < 0) { |