diff options
author | John Fastabend <john.fastabend@gmail.com> | 2014-09-12 23:08:20 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-13 12:30:26 -0400 |
commit | 331b72922c5f58d48fd5500acadc91777cc31970 (patch) | |
tree | 03f16a1e47e4b4c01dca29868a9b13d86634aa82 /net/sched | |
parent | 1109c00547fc66df45b9ff923544be4c1e1bec13 (diff) |
net: sched: RCU cls_tcindex
Make cls_tcindex RCU safe.
This patch addds a new RCU routine rcu_dereference_bh_rtnl() to check
caller either holds the rcu read lock or RTNL. This is needed to
handle the case where tcindex_lookup() is being called in both cases.
Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/cls_tcindex.c | 248 |
1 files changed, 154 insertions, 94 deletions
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 3e9f76413b3b..a9f4279fbd69 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c | |||
@@ -32,19 +32,21 @@ struct tcindex_filter_result { | |||
32 | struct tcindex_filter { | 32 | struct tcindex_filter { |
33 | u16 key; | 33 | u16 key; |
34 | struct tcindex_filter_result result; | 34 | struct tcindex_filter_result result; |
35 | struct tcindex_filter *next; | 35 | struct tcindex_filter __rcu *next; |
36 | struct rcu_head rcu; | ||
36 | }; | 37 | }; |
37 | 38 | ||
38 | 39 | ||
39 | struct tcindex_data { | 40 | struct tcindex_data { |
40 | struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */ | 41 | struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */ |
41 | struct tcindex_filter **h; /* imperfect hash; only used if !perfect; | 42 | struct tcindex_filter __rcu **h; /* imperfect hash; */ |
42 | NULL if unused */ | 43 | struct tcf_proto *tp; |
43 | u16 mask; /* AND key with mask */ | 44 | u16 mask; /* AND key with mask */ |
44 | int shift; /* shift ANDed key to the right */ | 45 | u32 shift; /* shift ANDed key to the right */ |
45 | int hash; /* hash table size; 0 if undefined */ | 46 | u32 hash; /* hash table size; 0 if undefined */ |
46 | int alloc_hash; /* allocated size */ | 47 | u32 alloc_hash; /* allocated size */ |
47 | int fall_through; /* 0: only classify if explicit match */ | 48 | u32 fall_through; /* 0: only classify if explicit match */ |
49 | struct rcu_head rcu; | ||
48 | }; | 50 | }; |
49 | 51 | ||
50 | static inline int | 52 | static inline int |
@@ -56,13 +58,18 @@ tcindex_filter_is_set(struct tcindex_filter_result *r) | |||
56 | static struct tcindex_filter_result * | 58 | static struct tcindex_filter_result * |
57 | tcindex_lookup(struct tcindex_data *p, u16 key) | 59 | tcindex_lookup(struct tcindex_data *p, u16 key) |
58 | { | 60 | { |
59 | struct tcindex_filter *f; | 61 | if (p->perfect) { |
62 | struct tcindex_filter_result *f = p->perfect + key; | ||
63 | |||
64 | return tcindex_filter_is_set(f) ? f : NULL; | ||
65 | } else if (p->h) { | ||
66 | struct tcindex_filter __rcu **fp; | ||
67 | struct tcindex_filter *f; | ||
60 | 68 | ||
61 | if (p->perfect) | 69 | fp = &p->h[key % p->hash]; |
62 | return tcindex_filter_is_set(p->perfect + key) ? | 70 | for (f = rcu_dereference_bh_rtnl(*fp); |
63 | p->perfect + key : NULL; | 71 | f; |
64 | else if (p->h) { | 72 | fp = &f->next, f = rcu_dereference_bh_rtnl(*fp)) |
65 | for (f = p->h[key % p->hash]; f; f = f->next) | ||
66 | if (f->key == key) | 73 | if (f->key == key) |
67 | return &f->result; | 74 | return &f->result; |
68 | } | 75 | } |
@@ -74,7 +81,7 @@ tcindex_lookup(struct tcindex_data *p, u16 key) | |||
74 | static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, | 81 | static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
75 | struct tcf_result *res) | 82 | struct tcf_result *res) |
76 | { | 83 | { |
77 | struct tcindex_data *p = tp->root; | 84 | struct tcindex_data *p = rcu_dereference(tp->root); |
78 | struct tcindex_filter_result *f; | 85 | struct tcindex_filter_result *f; |
79 | int key = (skb->tc_index & p->mask) >> p->shift; | 86 | int key = (skb->tc_index & p->mask) >> p->shift; |
80 | 87 | ||
@@ -99,7 +106,7 @@ static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, | |||
99 | 106 | ||
100 | static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle) | 107 | static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle) |
101 | { | 108 | { |
102 | struct tcindex_data *p = tp->root; | 109 | struct tcindex_data *p = rtnl_dereference(tp->root); |
103 | struct tcindex_filter_result *r; | 110 | struct tcindex_filter_result *r; |
104 | 111 | ||
105 | pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle); | 112 | pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle); |
@@ -129,49 +136,59 @@ static int tcindex_init(struct tcf_proto *tp) | |||
129 | p->hash = DEFAULT_HASH_SIZE; | 136 | p->hash = DEFAULT_HASH_SIZE; |
130 | p->fall_through = 1; | 137 | p->fall_through = 1; |
131 | 138 | ||
132 | tp->root = p; | 139 | rcu_assign_pointer(tp->root, p); |
133 | return 0; | 140 | return 0; |
134 | } | 141 | } |
135 | 142 | ||
136 | |||
137 | static int | 143 | static int |
138 | __tcindex_delete(struct tcf_proto *tp, unsigned long arg, int lock) | 144 | tcindex_delete(struct tcf_proto *tp, unsigned long arg) |
139 | { | 145 | { |
140 | struct tcindex_data *p = tp->root; | 146 | struct tcindex_data *p = rtnl_dereference(tp->root); |
141 | struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg; | 147 | struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg; |
148 | struct tcindex_filter __rcu **walk; | ||
142 | struct tcindex_filter *f = NULL; | 149 | struct tcindex_filter *f = NULL; |
143 | 150 | ||
144 | pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n", tp, arg, p, f); | 151 | pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p\n", tp, arg, p); |
145 | if (p->perfect) { | 152 | if (p->perfect) { |
146 | if (!r->res.class) | 153 | if (!r->res.class) |
147 | return -ENOENT; | 154 | return -ENOENT; |
148 | } else { | 155 | } else { |
149 | int i; | 156 | int i; |
150 | struct tcindex_filter **walk = NULL; | ||
151 | 157 | ||
152 | for (i = 0; i < p->hash; i++) | 158 | for (i = 0; i < p->hash; i++) { |
153 | for (walk = p->h+i; *walk; walk = &(*walk)->next) | 159 | walk = p->h + i; |
154 | if (&(*walk)->result == r) | 160 | for (f = rtnl_dereference(*walk); f; |
161 | walk = &f->next, f = rtnl_dereference(*walk)) { | ||
162 | if (&f->result == r) | ||
155 | goto found; | 163 | goto found; |
164 | } | ||
165 | } | ||
156 | return -ENOENT; | 166 | return -ENOENT; |
157 | 167 | ||
158 | found: | 168 | found: |
159 | f = *walk; | 169 | rcu_assign_pointer(*walk, rtnl_dereference(f->next)); |
160 | if (lock) | ||
161 | tcf_tree_lock(tp); | ||
162 | *walk = f->next; | ||
163 | if (lock) | ||
164 | tcf_tree_unlock(tp); | ||
165 | } | 170 | } |
166 | tcf_unbind_filter(tp, &r->res); | 171 | tcf_unbind_filter(tp, &r->res); |
167 | tcf_exts_destroy(tp, &r->exts); | 172 | tcf_exts_destroy(tp, &r->exts); |
168 | kfree(f); | 173 | if (f) |
174 | kfree_rcu(f, rcu); | ||
169 | return 0; | 175 | return 0; |
170 | } | 176 | } |
171 | 177 | ||
172 | static int tcindex_delete(struct tcf_proto *tp, unsigned long arg) | 178 | static int tcindex_destroy_element(struct tcf_proto *tp, |
179 | unsigned long arg, | ||
180 | struct tcf_walker *walker) | ||
173 | { | 181 | { |
174 | return __tcindex_delete(tp, arg, 1); | 182 | return tcindex_delete(tp, arg); |
183 | } | ||
184 | |||
185 | static void __tcindex_destroy(struct rcu_head *head) | ||
186 | { | ||
187 | struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); | ||
188 | |||
189 | kfree(p->perfect); | ||
190 | kfree(p->h); | ||
191 | kfree(p); | ||
175 | } | 192 | } |
176 | 193 | ||
177 | static inline int | 194 | static inline int |
@@ -194,6 +211,14 @@ static void tcindex_filter_result_init(struct tcindex_filter_result *r) | |||
194 | tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); | 211 | tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
195 | } | 212 | } |
196 | 213 | ||
214 | static void __tcindex_partial_destroy(struct rcu_head *head) | ||
215 | { | ||
216 | struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); | ||
217 | |||
218 | kfree(p->perfect); | ||
219 | kfree(p); | ||
220 | } | ||
221 | |||
197 | static int | 222 | static int |
198 | tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | 223 | tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, |
199 | u32 handle, struct tcindex_data *p, | 224 | u32 handle, struct tcindex_data *p, |
@@ -203,7 +228,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
203 | int err, balloc = 0; | 228 | int err, balloc = 0; |
204 | struct tcindex_filter_result new_filter_result, *old_r = r; | 229 | struct tcindex_filter_result new_filter_result, *old_r = r; |
205 | struct tcindex_filter_result cr; | 230 | struct tcindex_filter_result cr; |
206 | struct tcindex_data cp; | 231 | struct tcindex_data *cp, *oldp; |
207 | struct tcindex_filter *f = NULL; /* make gcc behave */ | 232 | struct tcindex_filter *f = NULL; /* make gcc behave */ |
208 | struct tcf_exts e; | 233 | struct tcf_exts e; |
209 | 234 | ||
@@ -212,84 +237,118 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
212 | if (err < 0) | 237 | if (err < 0) |
213 | return err; | 238 | return err; |
214 | 239 | ||
215 | memcpy(&cp, p, sizeof(cp)); | 240 | /* tcindex_data attributes must look atomic to classifier/lookup so |
216 | tcindex_filter_result_init(&new_filter_result); | 241 | * allocate new tcindex data and RCU assign it onto root. Keeping |
242 | * perfect hash and hash pointers from old data. | ||
243 | */ | ||
244 | cp = kzalloc(sizeof(cp), GFP_KERNEL); | ||
245 | if (!cp) | ||
246 | return -ENOMEM; | ||
247 | |||
248 | cp->mask = p->mask; | ||
249 | cp->shift = p->shift; | ||
250 | cp->hash = p->hash; | ||
251 | cp->alloc_hash = p->alloc_hash; | ||
252 | cp->fall_through = p->fall_through; | ||
253 | cp->tp = tp; | ||
254 | |||
255 | if (p->perfect) { | ||
256 | cp->perfect = kmemdup(p->perfect, | ||
257 | sizeof(*r) * cp->hash, GFP_KERNEL); | ||
258 | if (!cp->perfect) | ||
259 | goto errout; | ||
260 | } | ||
261 | cp->h = p->h; | ||
262 | |||
263 | memset(&new_filter_result, 0, sizeof(new_filter_result)); | ||
264 | tcf_exts_init(&new_filter_result.exts, | ||
265 | TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); | ||
217 | 266 | ||
218 | tcindex_filter_result_init(&cr); | 267 | tcindex_filter_result_init(&cr); |
219 | if (old_r) | 268 | if (old_r) |
220 | cr.res = r->res; | 269 | cr.res = r->res; |
221 | 270 | ||
222 | if (tb[TCA_TCINDEX_HASH]) | 271 | if (tb[TCA_TCINDEX_HASH]) |
223 | cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); | 272 | cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); |
224 | 273 | ||
225 | if (tb[TCA_TCINDEX_MASK]) | 274 | if (tb[TCA_TCINDEX_MASK]) |
226 | cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); | 275 | cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); |
227 | 276 | ||
228 | if (tb[TCA_TCINDEX_SHIFT]) | 277 | if (tb[TCA_TCINDEX_SHIFT]) |
229 | cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); | 278 | cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); |
230 | 279 | ||
231 | err = -EBUSY; | 280 | err = -EBUSY; |
281 | |||
232 | /* Hash already allocated, make sure that we still meet the | 282 | /* Hash already allocated, make sure that we still meet the |
233 | * requirements for the allocated hash. | 283 | * requirements for the allocated hash. |
234 | */ | 284 | */ |
235 | if (cp.perfect) { | 285 | if (cp->perfect) { |
236 | if (!valid_perfect_hash(&cp) || | 286 | if (!valid_perfect_hash(cp) || |
237 | cp.hash > cp.alloc_hash) | 287 | cp->hash > cp->alloc_hash) |
238 | goto errout; | 288 | goto errout; |
239 | } else if (cp.h && cp.hash != cp.alloc_hash) | 289 | } else if (cp->h && cp->hash != cp->alloc_hash) { |
240 | goto errout; | 290 | goto errout; |
291 | } | ||
241 | 292 | ||
242 | err = -EINVAL; | 293 | err = -EINVAL; |
243 | if (tb[TCA_TCINDEX_FALL_THROUGH]) | 294 | if (tb[TCA_TCINDEX_FALL_THROUGH]) |
244 | cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); | 295 | cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); |
245 | 296 | ||
246 | if (!cp.hash) { | 297 | if (!cp->hash) { |
247 | /* Hash not specified, use perfect hash if the upper limit | 298 | /* Hash not specified, use perfect hash if the upper limit |
248 | * of the hashing index is below the threshold. | 299 | * of the hashing index is below the threshold. |
249 | */ | 300 | */ |
250 | if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD) | 301 | if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) |
251 | cp.hash = (cp.mask >> cp.shift) + 1; | 302 | cp->hash = (cp->mask >> cp->shift) + 1; |
252 | else | 303 | else |
253 | cp.hash = DEFAULT_HASH_SIZE; | 304 | cp->hash = DEFAULT_HASH_SIZE; |
254 | } | 305 | } |
255 | 306 | ||
256 | if (!cp.perfect && !cp.h) | 307 | if (!cp->perfect && cp->h) |
257 | cp.alloc_hash = cp.hash; | 308 | cp->alloc_hash = cp->hash; |
258 | 309 | ||
259 | /* Note: this could be as restrictive as if (handle & ~(mask >> shift)) | 310 | /* Note: this could be as restrictive as if (handle & ~(mask >> shift)) |
260 | * but then, we'd fail handles that may become valid after some future | 311 | * but then, we'd fail handles that may become valid after some future |
261 | * mask change. While this is extremely unlikely to ever matter, | 312 | * mask change. While this is extremely unlikely to ever matter, |
262 | * the check below is safer (and also more backwards-compatible). | 313 | * the check below is safer (and also more backwards-compatible). |
263 | */ | 314 | */ |
264 | if (cp.perfect || valid_perfect_hash(&cp)) | 315 | if (cp->perfect || valid_perfect_hash(cp)) |
265 | if (handle >= cp.alloc_hash) | 316 | if (handle >= cp->alloc_hash) |
266 | goto errout; | 317 | goto errout; |
267 | 318 | ||
268 | 319 | ||
269 | err = -ENOMEM; | 320 | err = -ENOMEM; |
270 | if (!cp.perfect && !cp.h) { | 321 | if (!cp->perfect && !cp->h) { |
271 | if (valid_perfect_hash(&cp)) { | 322 | if (valid_perfect_hash(cp)) { |
272 | int i; | 323 | int i; |
273 | 324 | ||
274 | cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL); | 325 | cp->perfect = kcalloc(cp->hash, sizeof(*r), GFP_KERNEL); |
275 | if (!cp.perfect) | 326 | if (!cp->perfect) |
276 | goto errout; | 327 | goto errout; |
277 | for (i = 0; i < cp.hash; i++) | 328 | for (i = 0; i < cp->hash; i++) |
278 | tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT, | 329 | tcf_exts_init(&cp->perfect[i].exts, |
330 | TCA_TCINDEX_ACT, | ||
279 | TCA_TCINDEX_POLICE); | 331 | TCA_TCINDEX_POLICE); |
280 | balloc = 1; | 332 | balloc = 1; |
281 | } else { | 333 | } else { |
282 | cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL); | 334 | struct tcindex_filter __rcu **hash; |
283 | if (!cp.h) | 335 | |
336 | hash = kcalloc(cp->hash, | ||
337 | sizeof(struct tcindex_filter *), | ||
338 | GFP_KERNEL); | ||
339 | |||
340 | if (!hash) | ||
284 | goto errout; | 341 | goto errout; |
342 | |||
343 | cp->h = hash; | ||
285 | balloc = 2; | 344 | balloc = 2; |
286 | } | 345 | } |
287 | } | 346 | } |
288 | 347 | ||
289 | if (cp.perfect) | 348 | if (cp->perfect) |
290 | r = cp.perfect + handle; | 349 | r = cp->perfect + handle; |
291 | else | 350 | else |
292 | r = tcindex_lookup(&cp, handle) ? : &new_filter_result; | 351 | r = tcindex_lookup(cp, handle) ? : &new_filter_result; |
293 | 352 | ||
294 | if (r == &new_filter_result) { | 353 | if (r == &new_filter_result) { |
295 | f = kzalloc(sizeof(*f), GFP_KERNEL); | 354 | f = kzalloc(sizeof(*f), GFP_KERNEL); |
@@ -307,33 +366,41 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
307 | else | 366 | else |
308 | tcf_exts_change(tp, &cr.exts, &e); | 367 | tcf_exts_change(tp, &cr.exts, &e); |
309 | 368 | ||
310 | tcf_tree_lock(tp); | ||
311 | if (old_r && old_r != r) | 369 | if (old_r && old_r != r) |
312 | tcindex_filter_result_init(old_r); | 370 | tcindex_filter_result_init(old_r); |
313 | 371 | ||
314 | memcpy(p, &cp, sizeof(cp)); | 372 | oldp = p; |
315 | r->res = cr.res; | 373 | r->res = cr.res; |
374 | rcu_assign_pointer(tp->root, cp); | ||
316 | 375 | ||
317 | if (r == &new_filter_result) { | 376 | if (r == &new_filter_result) { |
318 | struct tcindex_filter **fp; | 377 | struct tcindex_filter *nfp; |
378 | struct tcindex_filter __rcu **fp; | ||
319 | 379 | ||
320 | f->key = handle; | 380 | f->key = handle; |
321 | f->result = new_filter_result; | 381 | f->result = new_filter_result; |
322 | f->next = NULL; | 382 | f->next = NULL; |
323 | for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next) | 383 | |
324 | /* nothing */; | 384 | fp = p->h + (handle % p->hash); |
325 | *fp = f; | 385 | for (nfp = rtnl_dereference(*fp); |
386 | nfp; | ||
387 | fp = &nfp->next, nfp = rtnl_dereference(*fp)) | ||
388 | ; /* nothing */ | ||
389 | |||
390 | rcu_assign_pointer(*fp, f); | ||
326 | } | 391 | } |
327 | tcf_tree_unlock(tp); | ||
328 | 392 | ||
393 | if (oldp) | ||
394 | call_rcu(&oldp->rcu, __tcindex_partial_destroy); | ||
329 | return 0; | 395 | return 0; |
330 | 396 | ||
331 | errout_alloc: | 397 | errout_alloc: |
332 | if (balloc == 1) | 398 | if (balloc == 1) |
333 | kfree(cp.perfect); | 399 | kfree(cp->perfect); |
334 | else if (balloc == 2) | 400 | else if (balloc == 2) |
335 | kfree(cp.h); | 401 | kfree(cp->h); |
336 | errout: | 402 | errout: |
403 | kfree(cp); | ||
337 | tcf_exts_destroy(tp, &e); | 404 | tcf_exts_destroy(tp, &e); |
338 | return err; | 405 | return err; |
339 | } | 406 | } |
@@ -345,7 +412,7 @@ tcindex_change(struct net *net, struct sk_buff *in_skb, | |||
345 | { | 412 | { |
346 | struct nlattr *opt = tca[TCA_OPTIONS]; | 413 | struct nlattr *opt = tca[TCA_OPTIONS]; |
347 | struct nlattr *tb[TCA_TCINDEX_MAX + 1]; | 414 | struct nlattr *tb[TCA_TCINDEX_MAX + 1]; |
348 | struct tcindex_data *p = tp->root; | 415 | struct tcindex_data *p = rtnl_dereference(tp->root); |
349 | struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg; | 416 | struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg; |
350 | int err; | 417 | int err; |
351 | 418 | ||
@@ -364,10 +431,9 @@ tcindex_change(struct net *net, struct sk_buff *in_skb, | |||
364 | tca[TCA_RATE], ovr); | 431 | tca[TCA_RATE], ovr); |
365 | } | 432 | } |
366 | 433 | ||
367 | |||
368 | static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) | 434 | static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) |
369 | { | 435 | { |
370 | struct tcindex_data *p = tp->root; | 436 | struct tcindex_data *p = rtnl_dereference(tp->root); |
371 | struct tcindex_filter *f, *next; | 437 | struct tcindex_filter *f, *next; |
372 | int i; | 438 | int i; |
373 | 439 | ||
@@ -390,8 +456,8 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) | |||
390 | if (!p->h) | 456 | if (!p->h) |
391 | return; | 457 | return; |
392 | for (i = 0; i < p->hash; i++) { | 458 | for (i = 0; i < p->hash; i++) { |
393 | for (f = p->h[i]; f; f = next) { | 459 | for (f = rtnl_dereference(p->h[i]); f; f = next) { |
394 | next = f->next; | 460 | next = rtnl_dereference(f->next); |
395 | if (walker->count >= walker->skip) { | 461 | if (walker->count >= walker->skip) { |
396 | if (walker->fn(tp, (unsigned long) &f->result, | 462 | if (walker->fn(tp, (unsigned long) &f->result, |
397 | walker) < 0) { | 463 | walker) < 0) { |
@@ -404,17 +470,9 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) | |||
404 | } | 470 | } |
405 | } | 471 | } |
406 | 472 | ||
407 | |||
408 | static int tcindex_destroy_element(struct tcf_proto *tp, | ||
409 | unsigned long arg, struct tcf_walker *walker) | ||
410 | { | ||
411 | return __tcindex_delete(tp, arg, 0); | ||
412 | } | ||
413 | |||
414 | |||
415 | static void tcindex_destroy(struct tcf_proto *tp) | 473 | static void tcindex_destroy(struct tcf_proto *tp) |
416 | { | 474 | { |
417 | struct tcindex_data *p = tp->root; | 475 | struct tcindex_data *p = rtnl_dereference(tp->root); |
418 | struct tcf_walker walker; | 476 | struct tcf_walker walker; |
419 | 477 | ||
420 | pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); | 478 | pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); |
@@ -422,17 +480,16 @@ static void tcindex_destroy(struct tcf_proto *tp) | |||
422 | walker.skip = 0; | 480 | walker.skip = 0; |
423 | walker.fn = tcindex_destroy_element; | 481 | walker.fn = tcindex_destroy_element; |
424 | tcindex_walk(tp, &walker); | 482 | tcindex_walk(tp, &walker); |
425 | kfree(p->perfect); | 483 | |
426 | kfree(p->h); | 484 | RCU_INIT_POINTER(tp->root, NULL); |
427 | kfree(p); | 485 | call_rcu(&p->rcu, __tcindex_destroy); |
428 | tp->root = NULL; | ||
429 | } | 486 | } |
430 | 487 | ||
431 | 488 | ||
432 | static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, | 489 | static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, |
433 | struct sk_buff *skb, struct tcmsg *t) | 490 | struct sk_buff *skb, struct tcmsg *t) |
434 | { | 491 | { |
435 | struct tcindex_data *p = tp->root; | 492 | struct tcindex_data *p = rtnl_dereference(tp->root); |
436 | struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh; | 493 | struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh; |
437 | unsigned char *b = skb_tail_pointer(skb); | 494 | unsigned char *b = skb_tail_pointer(skb); |
438 | struct nlattr *nest; | 495 | struct nlattr *nest; |
@@ -455,15 +512,18 @@ static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, | |||
455 | nla_nest_end(skb, nest); | 512 | nla_nest_end(skb, nest); |
456 | } else { | 513 | } else { |
457 | if (p->perfect) { | 514 | if (p->perfect) { |
458 | t->tcm_handle = r-p->perfect; | 515 | t->tcm_handle = r - p->perfect; |
459 | } else { | 516 | } else { |
460 | struct tcindex_filter *f; | 517 | struct tcindex_filter *f; |
518 | struct tcindex_filter __rcu **fp; | ||
461 | int i; | 519 | int i; |
462 | 520 | ||
463 | t->tcm_handle = 0; | 521 | t->tcm_handle = 0; |
464 | for (i = 0; !t->tcm_handle && i < p->hash; i++) { | 522 | for (i = 0; !t->tcm_handle && i < p->hash; i++) { |
465 | for (f = p->h[i]; !t->tcm_handle && f; | 523 | fp = &p->h[i]; |
466 | f = f->next) { | 524 | for (f = rtnl_dereference(*fp); |
525 | !t->tcm_handle && f; | ||
526 | fp = &f->next, f = rtnl_dereference(*fp)) { | ||
467 | if (&f->result == r) | 527 | if (&f->result == r) |
468 | t->tcm_handle = f->key; | 528 | t->tcm_handle = f->key; |
469 | } | 529 | } |