aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/cls_flow.c
diff options
context:
space:
mode:
authorJohn Fastabend <john.fastabend@gmail.com>2014-09-12 23:06:55 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-13 12:30:26 -0400
commit70da9f0bf999627e50950f6845bd3819ff811085 (patch)
tree486169e1700460af247859f3ec106e57c6fb137e /net/sched/cls_flow.c
parent952313bd62589cae216a579bb7ebc76f8e290817 (diff)
net: sched: cls_flow use RCU
Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/cls_flow.c')
-rw-r--r--net/sched/cls_flow.c145
1 files changed, 84 insertions, 61 deletions
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 35be16f7c192..95736fa479f3 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -34,12 +34,14 @@
34 34
35struct flow_head { 35struct flow_head {
36 struct list_head filters; 36 struct list_head filters;
37 struct rcu_head rcu;
37}; 38};
38 39
39struct flow_filter { 40struct flow_filter {
40 struct list_head list; 41 struct list_head list;
41 struct tcf_exts exts; 42 struct tcf_exts exts;
42 struct tcf_ematch_tree ematches; 43 struct tcf_ematch_tree ematches;
44 struct tcf_proto *tp;
43 struct timer_list perturb_timer; 45 struct timer_list perturb_timer;
44 u32 perturb_period; 46 u32 perturb_period;
45 u32 handle; 47 u32 handle;
@@ -54,6 +56,7 @@ struct flow_filter {
54 u32 divisor; 56 u32 divisor;
55 u32 baseclass; 57 u32 baseclass;
56 u32 hashrnd; 58 u32 hashrnd;
59 struct rcu_head rcu;
57}; 60};
58 61
59static inline u32 addr_fold(void *addr) 62static inline u32 addr_fold(void *addr)
@@ -276,14 +279,14 @@ static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
276static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp, 279static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
277 struct tcf_result *res) 280 struct tcf_result *res)
278{ 281{
279 struct flow_head *head = tp->root; 282 struct flow_head *head = rcu_dereference_bh(tp->root);
280 struct flow_filter *f; 283 struct flow_filter *f;
281 u32 keymask; 284 u32 keymask;
282 u32 classid; 285 u32 classid;
283 unsigned int n, key; 286 unsigned int n, key;
284 int r; 287 int r;
285 288
286 list_for_each_entry(f, &head->filters, list) { 289 list_for_each_entry_rcu(f, &head->filters, list) {
287 u32 keys[FLOW_KEY_MAX + 1]; 290 u32 keys[FLOW_KEY_MAX + 1];
288 struct flow_keys flow_keys; 291 struct flow_keys flow_keys;
289 292
@@ -346,13 +349,23 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
346 [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, 349 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
347}; 350};
348 351
352static void flow_destroy_filter(struct rcu_head *head)
353{
354 struct flow_filter *f = container_of(head, struct flow_filter, rcu);
355
356 del_timer_sync(&f->perturb_timer);
357 tcf_exts_destroy(f->tp, &f->exts);
358 tcf_em_tree_destroy(f->tp, &f->ematches);
359 kfree(f);
360}
361
349static int flow_change(struct net *net, struct sk_buff *in_skb, 362static int flow_change(struct net *net, struct sk_buff *in_skb,
350 struct tcf_proto *tp, unsigned long base, 363 struct tcf_proto *tp, unsigned long base,
351 u32 handle, struct nlattr **tca, 364 u32 handle, struct nlattr **tca,
352 unsigned long *arg, bool ovr) 365 unsigned long *arg, bool ovr)
353{ 366{
354 struct flow_head *head = tp->root; 367 struct flow_head *head = rtnl_dereference(tp->root);
355 struct flow_filter *f; 368 struct flow_filter *fold, *fnew;
356 struct nlattr *opt = tca[TCA_OPTIONS]; 369 struct nlattr *opt = tca[TCA_OPTIONS];
357 struct nlattr *tb[TCA_FLOW_MAX + 1]; 370 struct nlattr *tb[TCA_FLOW_MAX + 1];
358 struct tcf_exts e; 371 struct tcf_exts e;
@@ -401,20 +414,42 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
401 if (err < 0) 414 if (err < 0)
402 goto err1; 415 goto err1;
403 416
404 f = (struct flow_filter *)*arg; 417 err = -ENOBUFS;
405 if (f != NULL) { 418 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
419 if (!fnew)
420 goto err2;
421
422 fold = (struct flow_filter *)*arg;
423 if (fold) {
406 err = -EINVAL; 424 err = -EINVAL;
407 if (f->handle != handle && handle) 425 if (fold->handle != handle && handle)
408 goto err2; 426 goto err2;
409 427
410 mode = f->mode; 428 /* Copy fold into fnew */
429 fnew->handle = fold->handle;
430 fnew->keymask = fold->keymask;
431 fnew->tp = fold->tp;
432
433 fnew->handle = fold->handle;
434 fnew->nkeys = fold->nkeys;
435 fnew->keymask = fold->keymask;
436 fnew->mode = fold->mode;
437 fnew->mask = fold->mask;
438 fnew->xor = fold->xor;
439 fnew->rshift = fold->rshift;
440 fnew->addend = fold->addend;
441 fnew->divisor = fold->divisor;
442 fnew->baseclass = fold->baseclass;
443 fnew->hashrnd = fold->hashrnd;
444
445 mode = fold->mode;
411 if (tb[TCA_FLOW_MODE]) 446 if (tb[TCA_FLOW_MODE])
412 mode = nla_get_u32(tb[TCA_FLOW_MODE]); 447 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
413 if (mode != FLOW_MODE_HASH && nkeys > 1) 448 if (mode != FLOW_MODE_HASH && nkeys > 1)
414 goto err2; 449 goto err2;
415 450
416 if (mode == FLOW_MODE_HASH) 451 if (mode == FLOW_MODE_HASH)
417 perturb_period = f->perturb_period; 452 perturb_period = fold->perturb_period;
418 if (tb[TCA_FLOW_PERTURB]) { 453 if (tb[TCA_FLOW_PERTURB]) {
419 if (mode != FLOW_MODE_HASH) 454 if (mode != FLOW_MODE_HASH)
420 goto err2; 455 goto err2;
@@ -444,83 +479,70 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
444 if (TC_H_MIN(baseclass) == 0) 479 if (TC_H_MIN(baseclass) == 0)
445 baseclass = TC_H_MAKE(baseclass, 1); 480 baseclass = TC_H_MAKE(baseclass, 1);
446 481
447 err = -ENOBUFS; 482 fnew->handle = handle;
448 f = kzalloc(sizeof(*f), GFP_KERNEL); 483 fnew->mask = ~0U;
449 if (f == NULL) 484 fnew->tp = tp;
450 goto err2; 485 get_random_bytes(&fnew->hashrnd, 4);
451 486 tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
452 f->handle = handle;
453 f->mask = ~0U;
454 tcf_exts_init(&f->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
455
456 get_random_bytes(&f->hashrnd, 4);
457 f->perturb_timer.function = flow_perturbation;
458 f->perturb_timer.data = (unsigned long)f;
459 init_timer_deferrable(&f->perturb_timer);
460 } 487 }
461 488
462 tcf_exts_change(tp, &f->exts, &e); 489 fnew->perturb_timer.function = flow_perturbation;
463 tcf_em_tree_change(tp, &f->ematches, &t); 490 fnew->perturb_timer.data = (unsigned long)fnew;
491 init_timer_deferrable(&fnew->perturb_timer);
464 492
465 tcf_tree_lock(tp); 493 tcf_exts_change(tp, &fnew->exts, &e);
494 tcf_em_tree_change(tp, &fnew->ematches, &t);
466 495
467 if (tb[TCA_FLOW_KEYS]) { 496 if (tb[TCA_FLOW_KEYS]) {
468 f->keymask = keymask; 497 fnew->keymask = keymask;
469 f->nkeys = nkeys; 498 fnew->nkeys = nkeys;
470 } 499 }
471 500
472 f->mode = mode; 501 fnew->mode = mode;
473 502
474 if (tb[TCA_FLOW_MASK]) 503 if (tb[TCA_FLOW_MASK])
475 f->mask = nla_get_u32(tb[TCA_FLOW_MASK]); 504 fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
476 if (tb[TCA_FLOW_XOR]) 505 if (tb[TCA_FLOW_XOR])
477 f->xor = nla_get_u32(tb[TCA_FLOW_XOR]); 506 fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
478 if (tb[TCA_FLOW_RSHIFT]) 507 if (tb[TCA_FLOW_RSHIFT])
479 f->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]); 508 fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
480 if (tb[TCA_FLOW_ADDEND]) 509 if (tb[TCA_FLOW_ADDEND])
481 f->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]); 510 fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
482 511
483 if (tb[TCA_FLOW_DIVISOR]) 512 if (tb[TCA_FLOW_DIVISOR])
484 f->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]); 513 fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
485 if (baseclass) 514 if (baseclass)
486 f->baseclass = baseclass; 515 fnew->baseclass = baseclass;
487 516
488 f->perturb_period = perturb_period; 517 fnew->perturb_period = perturb_period;
489 del_timer(&f->perturb_timer);
490 if (perturb_period) 518 if (perturb_period)
491 mod_timer(&f->perturb_timer, jiffies + perturb_period); 519 mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
492 520
493 if (*arg == 0) 521 if (*arg == 0)
494 list_add_tail(&f->list, &head->filters); 522 list_add_tail_rcu(&fnew->list, &head->filters);
523 else
524 list_replace_rcu(&fnew->list, &fold->list);
495 525
496 tcf_tree_unlock(tp); 526 *arg = (unsigned long)fnew;
497 527
498 *arg = (unsigned long)f; 528 if (fold)
529 call_rcu(&fold->rcu, flow_destroy_filter);
499 return 0; 530 return 0;
500 531
501err2: 532err2:
502 tcf_em_tree_destroy(tp, &t); 533 tcf_em_tree_destroy(tp, &t);
534 kfree(fnew);
503err1: 535err1:
504 tcf_exts_destroy(tp, &e); 536 tcf_exts_destroy(tp, &e);
505 return err; 537 return err;
506} 538}
507 539
508static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f)
509{
510 del_timer_sync(&f->perturb_timer);
511 tcf_exts_destroy(tp, &f->exts);
512 tcf_em_tree_destroy(tp, &f->ematches);
513 kfree(f);
514}
515
516static int flow_delete(struct tcf_proto *tp, unsigned long arg) 540static int flow_delete(struct tcf_proto *tp, unsigned long arg)
517{ 541{
518 struct flow_filter *f = (struct flow_filter *)arg; 542 struct flow_filter *f = (struct flow_filter *)arg;
519 543
520 tcf_tree_lock(tp); 544 list_del_rcu(&f->list);
521 list_del(&f->list); 545 call_rcu(&f->rcu, flow_destroy_filter);
522 tcf_tree_unlock(tp);
523 flow_destroy_filter(tp, f);
524 return 0; 546 return 0;
525} 547}
526 548
@@ -532,28 +554,29 @@ static int flow_init(struct tcf_proto *tp)
532 if (head == NULL) 554 if (head == NULL)
533 return -ENOBUFS; 555 return -ENOBUFS;
534 INIT_LIST_HEAD(&head->filters); 556 INIT_LIST_HEAD(&head->filters);
535 tp->root = head; 557 rcu_assign_pointer(tp->root, head);
536 return 0; 558 return 0;
537} 559}
538 560
539static void flow_destroy(struct tcf_proto *tp) 561static void flow_destroy(struct tcf_proto *tp)
540{ 562{
541 struct flow_head *head = tp->root; 563 struct flow_head *head = rtnl_dereference(tp->root);
542 struct flow_filter *f, *next; 564 struct flow_filter *f, *next;
543 565
544 list_for_each_entry_safe(f, next, &head->filters, list) { 566 list_for_each_entry_safe(f, next, &head->filters, list) {
545 list_del(&f->list); 567 list_del_rcu(&f->list);
546 flow_destroy_filter(tp, f); 568 call_rcu(&f->rcu, flow_destroy_filter);
547 } 569 }
548 kfree(head); 570 RCU_INIT_POINTER(tp->root, NULL);
571 kfree_rcu(head, rcu);
549} 572}
550 573
551static unsigned long flow_get(struct tcf_proto *tp, u32 handle) 574static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
552{ 575{
553 struct flow_head *head = tp->root; 576 struct flow_head *head = rtnl_dereference(tp->root);
554 struct flow_filter *f; 577 struct flow_filter *f;
555 578
556 list_for_each_entry(f, &head->filters, list) 579 list_for_each_entry_rcu(f, &head->filters, list)
557 if (f->handle == handle) 580 if (f->handle == handle)
558 return (unsigned long)f; 581 return (unsigned long)f;
559 return 0; 582 return 0;
@@ -626,10 +649,10 @@ nla_put_failure:
626 649
627static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg) 650static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
628{ 651{
629 struct flow_head *head = tp->root; 652 struct flow_head *head = rtnl_dereference(tp->root);
630 struct flow_filter *f; 653 struct flow_filter *f;
631 654
632 list_for_each_entry(f, &head->filters, list) { 655 list_for_each_entry_rcu(f, &head->filters, list) {
633 if (arg->count < arg->skip) 656 if (arg->count < arg->skip)
634 goto skip; 657 goto skip;
635 if (arg->fn(tp, (unsigned long)f, arg) < 0) { 658 if (arg->fn(tp, (unsigned long)f, arg) < 0) {