aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorJohn Fastabend <john.fastabend@gmail.com>2014-09-12 23:08:47 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-13 12:30:26 -0400
commit459d5f626da75573e985a7197b0919c3b143146c (patch)
treec9fda52cd39cf539718d2ae7f8bd2c5726e3ecdf /net/sched
parent331b72922c5f58d48fd5500acadc91777cc31970 (diff)
net: sched: make cls_u32 per cpu
This uses per cpu counters in cls_u32 in preparation to convert over to rcu. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/cls_u32.c75
1 files changed, 59 insertions, 16 deletions
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 70c0be8d0121..f3227d73a7ae 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -55,10 +55,12 @@ struct tc_u_knode {
55 struct tcf_result res; 55 struct tcf_result res;
56 struct tc_u_hnode *ht_down; 56 struct tc_u_hnode *ht_down;
57#ifdef CONFIG_CLS_U32_PERF 57#ifdef CONFIG_CLS_U32_PERF
58 struct tc_u32_pcnt *pf; 58 struct tc_u32_pcnt __percpu *pf;
59#endif 59#endif
60#ifdef CONFIG_CLS_U32_MARK 60#ifdef CONFIG_CLS_U32_MARK
61 struct tc_u32_mark mark; 61 u32 val;
62 u32 mask;
63 u32 __percpu *pcpu_success;
62#endif 64#endif
63 struct tc_u32_sel sel; 65 struct tc_u32_sel sel;
64}; 66};
@@ -115,16 +117,16 @@ next_knode:
115 struct tc_u32_key *key = n->sel.keys; 117 struct tc_u32_key *key = n->sel.keys;
116 118
117#ifdef CONFIG_CLS_U32_PERF 119#ifdef CONFIG_CLS_U32_PERF
118 n->pf->rcnt += 1; 120 __this_cpu_inc(n->pf->rcnt);
119 j = 0; 121 j = 0;
120#endif 122#endif
121 123
122#ifdef CONFIG_CLS_U32_MARK 124#ifdef CONFIG_CLS_U32_MARK
123 if ((skb->mark & n->mark.mask) != n->mark.val) { 125 if ((skb->mark & n->mask) != n->val) {
124 n = n->next; 126 n = n->next;
125 goto next_knode; 127 goto next_knode;
126 } else { 128 } else {
127 n->mark.success++; 129 __this_cpu_inc(*n->pcpu_success);
128 } 130 }
129#endif 131#endif
130 132
@@ -143,7 +145,7 @@ next_knode:
143 goto next_knode; 145 goto next_knode;
144 } 146 }
145#ifdef CONFIG_CLS_U32_PERF 147#ifdef CONFIG_CLS_U32_PERF
146 n->pf->kcnts[j] += 1; 148 __this_cpu_inc(n->pf->kcnts[j]);
147 j++; 149 j++;
148#endif 150#endif
149 } 151 }
@@ -159,7 +161,7 @@ check_terminal:
159 } 161 }
160#endif 162#endif
161#ifdef CONFIG_CLS_U32_PERF 163#ifdef CONFIG_CLS_U32_PERF
162 n->pf->rhit += 1; 164 __this_cpu_inc(n->pf->rhit);
163#endif 165#endif
164 r = tcf_exts_exec(skb, &n->exts, res); 166 r = tcf_exts_exec(skb, &n->exts, res);
165 if (r < 0) { 167 if (r < 0) {
@@ -342,7 +344,7 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
342 if (n->ht_down) 344 if (n->ht_down)
343 n->ht_down->refcnt--; 345 n->ht_down->refcnt--;
344#ifdef CONFIG_CLS_U32_PERF 346#ifdef CONFIG_CLS_U32_PERF
345 kfree(n->pf); 347 free_percpu(n->pf);
346#endif 348#endif
347 kfree(n); 349 kfree(n);
348 return 0; 350 return 0;
@@ -564,6 +566,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
564 struct nlattr *tb[TCA_U32_MAX + 1]; 566 struct nlattr *tb[TCA_U32_MAX + 1];
565 u32 htid; 567 u32 htid;
566 int err; 568 int err;
569#ifdef CONFIG_CLS_U32_PERF
570 size_t size;
571#endif
567 572
568 if (opt == NULL) 573 if (opt == NULL)
569 return handle ? -EINVAL : 0; 574 return handle ? -EINVAL : 0;
@@ -642,8 +647,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
642 return -ENOBUFS; 647 return -ENOBUFS;
643 648
644#ifdef CONFIG_CLS_U32_PERF 649#ifdef CONFIG_CLS_U32_PERF
645 n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); 650 size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
646 if (n->pf == NULL) { 651 n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
652 if (!n->pf) {
647 kfree(n); 653 kfree(n);
648 return -ENOBUFS; 654 return -ENOBUFS;
649 } 655 }
@@ -656,12 +662,14 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
656 tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE); 662 tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
657 663
658#ifdef CONFIG_CLS_U32_MARK 664#ifdef CONFIG_CLS_U32_MARK
665 n->pcpu_success = alloc_percpu(u32);
666
659 if (tb[TCA_U32_MARK]) { 667 if (tb[TCA_U32_MARK]) {
660 struct tc_u32_mark *mark; 668 struct tc_u32_mark *mark;
661 669
662 mark = nla_data(tb[TCA_U32_MARK]); 670 mark = nla_data(tb[TCA_U32_MARK]);
663 memcpy(&n->mark, mark, sizeof(struct tc_u32_mark)); 671 n->val = mark->val;
664 n->mark.success = 0; 672 n->mask = mark->mask;
665 } 673 }
666#endif 674#endif
667 675
@@ -745,6 +753,11 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
745 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor)) 753 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
746 goto nla_put_failure; 754 goto nla_put_failure;
747 } else { 755 } else {
756#ifdef CONFIG_CLS_U32_PERF
757 struct tc_u32_pcnt *gpf;
758#endif
759 int cpu;
760
748 if (nla_put(skb, TCA_U32_SEL, 761 if (nla_put(skb, TCA_U32_SEL,
749 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), 762 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
750 &n->sel)) 763 &n->sel))
@@ -762,9 +775,20 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
762 goto nla_put_failure; 775 goto nla_put_failure;
763 776
764#ifdef CONFIG_CLS_U32_MARK 777#ifdef CONFIG_CLS_U32_MARK
765 if ((n->mark.val || n->mark.mask) && 778 if ((n->val || n->mask)) {
766 nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark)) 779 struct tc_u32_mark mark = {.val = n->val,
767 goto nla_put_failure; 780 .mask = n->mask,
781 .success = 0};
782
783 for_each_possible_cpu(cpu) {
784 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpu);
785
786 mark.success += cnt;
787 }
788
789 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
790 goto nla_put_failure;
791 }
768#endif 792#endif
769 793
770 if (tcf_exts_dump(skb, &n->exts) < 0) 794 if (tcf_exts_dump(skb, &n->exts) < 0)
@@ -779,10 +803,29 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
779 } 803 }
780#endif 804#endif
781#ifdef CONFIG_CLS_U32_PERF 805#ifdef CONFIG_CLS_U32_PERF
806 gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
807 n->sel.nkeys * sizeof(u64),
808 GFP_KERNEL);
809 if (!gpf)
810 goto nla_put_failure;
811
812 for_each_possible_cpu(cpu) {
813 int i;
814 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
815
816 gpf->rcnt += pf->rcnt;
817 gpf->rhit += pf->rhit;
818 for (i = 0; i < n->sel.nkeys; i++)
819 gpf->kcnts[i] += pf->kcnts[i];
820 }
821
782 if (nla_put(skb, TCA_U32_PCNT, 822 if (nla_put(skb, TCA_U32_PCNT,
783 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), 823 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
784 n->pf)) 824 gpf)) {
825 kfree(gpf);
785 goto nla_put_failure; 826 goto nla_put_failure;
827 }
828 kfree(gpf);
786#endif 829#endif
787 } 830 }
788 831