aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2016-06-06 16:50:39 -0400
committerDavid S. Miller <davem@davemloft.net>2016-06-07 19:59:53 -0400
commit92c075dbdeed02bdf293cb0f513bad70aa714b8d (patch)
treef15e2f03239246b5927ac2975feb7ee1e55a1ff7 /net
parenta03e6fe569713fb3ff0714f8fd7c8785c0ca9e22 (diff)
net: sched: fix tc_should_offload for specific clsact classes
When offloading classifiers such as u32 or flower to hardware, and the qdisc is clsact (TC_H_CLSACT), then we need to differentiate its classes, since not all of them handle ingress, therefore we must leave those in software path. Add a .tcf_cl_offload() callback, so we can generically handle them, tested on ixgbe. Fixes: 10cbc6843446 ("net/sched: cls_flower: Hardware offloaded filters statistics support") Fixes: 5b33f48842fa ("net/flower: Introduce hardware offload support") Fixes: a1b7c5fd7fe9 ("net: sched: add cls_u32 offload hooks for netdevs") Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/cls_flower.c6
-rw-r--r--net/sched/cls_u32.c8
-rw-r--r--net/sched/sch_ingress.c12
3 files changed, 19 insertions, 7 deletions
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 730aacafc22d..b3b7978f4182 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -171,7 +171,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
171 struct tc_cls_flower_offload offload = {0}; 171 struct tc_cls_flower_offload offload = {0};
172 struct tc_to_netdev tc; 172 struct tc_to_netdev tc;
173 173
174 if (!tc_should_offload(dev, 0)) 174 if (!tc_should_offload(dev, tp, 0))
175 return; 175 return;
176 176
177 offload.command = TC_CLSFLOWER_DESTROY; 177 offload.command = TC_CLSFLOWER_DESTROY;
@@ -194,7 +194,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp,
194 struct tc_cls_flower_offload offload = {0}; 194 struct tc_cls_flower_offload offload = {0};
195 struct tc_to_netdev tc; 195 struct tc_to_netdev tc;
196 196
197 if (!tc_should_offload(dev, flags)) 197 if (!tc_should_offload(dev, tp, flags))
198 return; 198 return;
199 199
200 offload.command = TC_CLSFLOWER_REPLACE; 200 offload.command = TC_CLSFLOWER_REPLACE;
@@ -216,7 +216,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
216 struct tc_cls_flower_offload offload = {0}; 216 struct tc_cls_flower_offload offload = {0};
217 struct tc_to_netdev tc; 217 struct tc_to_netdev tc;
218 218
219 if (!tc_should_offload(dev, 0)) 219 if (!tc_should_offload(dev, tp, 0))
220 return; 220 return;
221 221
222 offload.command = TC_CLSFLOWER_STATS; 222 offload.command = TC_CLSFLOWER_STATS;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index fe05449537a3..27b99fd774d7 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
440 offload.type = TC_SETUP_CLSU32; 440 offload.type = TC_SETUP_CLSU32;
441 offload.cls_u32 = &u32_offload; 441 offload.cls_u32 = &u32_offload;
442 442
443 if (tc_should_offload(dev, 0)) { 443 if (tc_should_offload(dev, tp, 0)) {
444 offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; 444 offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
445 offload.cls_u32->knode.handle = handle; 445 offload.cls_u32->knode.handle = handle;
446 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, 446 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
@@ -457,7 +457,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp,
457 struct tc_to_netdev offload; 457 struct tc_to_netdev offload;
458 int err; 458 int err;
459 459
460 if (!tc_should_offload(dev, flags)) 460 if (!tc_should_offload(dev, tp, flags))
461 return tc_skip_sw(flags) ? -EINVAL : 0; 461 return tc_skip_sw(flags) ? -EINVAL : 0;
462 462
463 offload.type = TC_SETUP_CLSU32; 463 offload.type = TC_SETUP_CLSU32;
@@ -485,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
485 offload.type = TC_SETUP_CLSU32; 485 offload.type = TC_SETUP_CLSU32;
486 offload.cls_u32 = &u32_offload; 486 offload.cls_u32 = &u32_offload;
487 487
488 if (tc_should_offload(dev, 0)) { 488 if (tc_should_offload(dev, tp, 0)) {
489 offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; 489 offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
490 offload.cls_u32->hnode.divisor = h->divisor; 490 offload.cls_u32->hnode.divisor = h->divisor;
491 offload.cls_u32->hnode.handle = h->handle; 491 offload.cls_u32->hnode.handle = h->handle;
@@ -508,7 +508,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp,
508 offload.type = TC_SETUP_CLSU32; 508 offload.type = TC_SETUP_CLSU32;
509 offload.cls_u32 = &u32_offload; 509 offload.cls_u32 = &u32_offload;
510 510
511 if (tc_should_offload(dev, flags)) { 511 if (tc_should_offload(dev, tp, flags)) {
512 offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; 512 offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
513 offload.cls_u32->knode.handle = n->handle; 513 offload.cls_u32->knode.handle = n->handle;
514 offload.cls_u32->knode.fshift = n->fshift; 514 offload.cls_u32->knode.fshift = n->fshift;
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 10adbc617905..8fe6999b642a 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
27 return TC_H_MIN(classid) + 1; 27 return TC_H_MIN(classid) + 1;
28} 28}
29 29
30static bool ingress_cl_offload(u32 classid)
31{
32 return true;
33}
34
30static unsigned long ingress_bind_filter(struct Qdisc *sch, 35static unsigned long ingress_bind_filter(struct Qdisc *sch,
31 unsigned long parent, u32 classid) 36 unsigned long parent, u32 classid)
32{ 37{
@@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = {
86 .put = ingress_put, 91 .put = ingress_put,
87 .walk = ingress_walk, 92 .walk = ingress_walk,
88 .tcf_chain = ingress_find_tcf, 93 .tcf_chain = ingress_find_tcf,
94 .tcf_cl_offload = ingress_cl_offload,
89 .bind_tcf = ingress_bind_filter, 95 .bind_tcf = ingress_bind_filter,
90 .unbind_tcf = ingress_put, 96 .unbind_tcf = ingress_put,
91}; 97};
@@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
110 } 116 }
111} 117}
112 118
119static bool clsact_cl_offload(u32 classid)
120{
121 return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
122}
123
113static unsigned long clsact_bind_filter(struct Qdisc *sch, 124static unsigned long clsact_bind_filter(struct Qdisc *sch,
114 unsigned long parent, u32 classid) 125 unsigned long parent, u32 classid)
115{ 126{
@@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = {
158 .put = ingress_put, 169 .put = ingress_put,
159 .walk = ingress_walk, 170 .walk = ingress_walk,
160 .tcf_chain = clsact_find_tcf, 171 .tcf_chain = clsact_find_tcf,
172 .tcf_cl_offload = clsact_cl_offload,
161 .bind_tcf = clsact_bind_filter, 173 .bind_tcf = clsact_bind_filter,
162 .unbind_tcf = ingress_put, 174 .unbind_tcf = ingress_put,
163}; 175};