diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2016-06-06 16:50:39 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-06-07 19:59:53 -0400 |
commit | 92c075dbdeed02bdf293cb0f513bad70aa714b8d (patch) | |
tree | f15e2f03239246b5927ac2975feb7ee1e55a1ff7 | |
parent | a03e6fe569713fb3ff0714f8fd7c8785c0ca9e22 (diff) |
net: sched: fix tc_should_offload for specific clsact classes
When offloading classifiers such as u32 or flower to hardware, and the
qdisc is clsact (TC_H_CLSACT), then we need to differentiate its classes,
since not all of them handle ingress, therefore we must leave those in
software path. Add a .tcf_cl_offload() callback, so we can generically
handle them, tested on ixgbe.
Fixes: 10cbc6843446 ("net/sched: cls_flower: Hardware offloaded filters statistics support")
Fixes: 5b33f48842fa ("net/flower: Introduce hardware offload support")
Fixes: a1b7c5fd7fe9 ("net: sched: add cls_u32 offload hooks for netdevs")
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/pkt_cls.h | 10 | ||||
-rw-r--r-- | include/net/sch_generic.h | 1 | ||||
-rw-r--r-- | net/sched/cls_flower.c | 6 | ||||
-rw-r--r-- | net/sched/cls_u32.c | 8 | ||||
-rw-r--r-- | net/sched/sch_ingress.c | 12 |
5 files changed, 27 insertions, 10 deletions
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 0f7efa88f210..3722dda0199d 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h | |||
@@ -392,16 +392,20 @@ struct tc_cls_u32_offload { | |||
392 | }; | 392 | }; |
393 | }; | 393 | }; |
394 | 394 | ||
395 | static inline bool tc_should_offload(struct net_device *dev, u32 flags) | 395 | static inline bool tc_should_offload(const struct net_device *dev, |
396 | const struct tcf_proto *tp, u32 flags) | ||
396 | { | 397 | { |
398 | const struct Qdisc *sch = tp->q; | ||
399 | const struct Qdisc_class_ops *cops = sch->ops->cl_ops; | ||
400 | |||
397 | if (!(dev->features & NETIF_F_HW_TC)) | 401 | if (!(dev->features & NETIF_F_HW_TC)) |
398 | return false; | 402 | return false; |
399 | |||
400 | if (flags & TCA_CLS_FLAGS_SKIP_HW) | 403 | if (flags & TCA_CLS_FLAGS_SKIP_HW) |
401 | return false; | 404 | return false; |
402 | |||
403 | if (!dev->netdev_ops->ndo_setup_tc) | 405 | if (!dev->netdev_ops->ndo_setup_tc) |
404 | return false; | 406 | return false; |
407 | if (cops && cops->tcf_cl_offload) | ||
408 | return cops->tcf_cl_offload(tp->classid); | ||
405 | 409 | ||
406 | return true; | 410 | return true; |
407 | } | 411 | } |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 6803af17dfcf..62d553184e91 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -168,6 +168,7 @@ struct Qdisc_class_ops { | |||
168 | 168 | ||
169 | /* Filter manipulation */ | 169 | /* Filter manipulation */ |
170 | struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); | 170 | struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); |
171 | bool (*tcf_cl_offload)(u32 classid); | ||
171 | unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, | 172 | unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, |
172 | u32 classid); | 173 | u32 classid); |
173 | void (*unbind_tcf)(struct Qdisc *, unsigned long); | 174 | void (*unbind_tcf)(struct Qdisc *, unsigned long); |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 730aacafc22d..b3b7978f4182 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
@@ -171,7 +171,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie) | |||
171 | struct tc_cls_flower_offload offload = {0}; | 171 | struct tc_cls_flower_offload offload = {0}; |
172 | struct tc_to_netdev tc; | 172 | struct tc_to_netdev tc; |
173 | 173 | ||
174 | if (!tc_should_offload(dev, 0)) | 174 | if (!tc_should_offload(dev, tp, 0)) |
175 | return; | 175 | return; |
176 | 176 | ||
177 | offload.command = TC_CLSFLOWER_DESTROY; | 177 | offload.command = TC_CLSFLOWER_DESTROY; |
@@ -194,7 +194,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp, | |||
194 | struct tc_cls_flower_offload offload = {0}; | 194 | struct tc_cls_flower_offload offload = {0}; |
195 | struct tc_to_netdev tc; | 195 | struct tc_to_netdev tc; |
196 | 196 | ||
197 | if (!tc_should_offload(dev, flags)) | 197 | if (!tc_should_offload(dev, tp, flags)) |
198 | return; | 198 | return; |
199 | 199 | ||
200 | offload.command = TC_CLSFLOWER_REPLACE; | 200 | offload.command = TC_CLSFLOWER_REPLACE; |
@@ -216,7 +216,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) | |||
216 | struct tc_cls_flower_offload offload = {0}; | 216 | struct tc_cls_flower_offload offload = {0}; |
217 | struct tc_to_netdev tc; | 217 | struct tc_to_netdev tc; |
218 | 218 | ||
219 | if (!tc_should_offload(dev, 0)) | 219 | if (!tc_should_offload(dev, tp, 0)) |
220 | return; | 220 | return; |
221 | 221 | ||
222 | offload.command = TC_CLSFLOWER_STATS; | 222 | offload.command = TC_CLSFLOWER_STATS; |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index fe05449537a3..27b99fd774d7 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) | |||
440 | offload.type = TC_SETUP_CLSU32; | 440 | offload.type = TC_SETUP_CLSU32; |
441 | offload.cls_u32 = &u32_offload; | 441 | offload.cls_u32 = &u32_offload; |
442 | 442 | ||
443 | if (tc_should_offload(dev, 0)) { | 443 | if (tc_should_offload(dev, tp, 0)) { |
444 | offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; | 444 | offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; |
445 | offload.cls_u32->knode.handle = handle; | 445 | offload.cls_u32->knode.handle = handle; |
446 | dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, | 446 | dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, |
@@ -457,7 +457,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, | |||
457 | struct tc_to_netdev offload; | 457 | struct tc_to_netdev offload; |
458 | int err; | 458 | int err; |
459 | 459 | ||
460 | if (!tc_should_offload(dev, flags)) | 460 | if (!tc_should_offload(dev, tp, flags)) |
461 | return tc_skip_sw(flags) ? -EINVAL : 0; | 461 | return tc_skip_sw(flags) ? -EINVAL : 0; |
462 | 462 | ||
463 | offload.type = TC_SETUP_CLSU32; | 463 | offload.type = TC_SETUP_CLSU32; |
@@ -485,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) | |||
485 | offload.type = TC_SETUP_CLSU32; | 485 | offload.type = TC_SETUP_CLSU32; |
486 | offload.cls_u32 = &u32_offload; | 486 | offload.cls_u32 = &u32_offload; |
487 | 487 | ||
488 | if (tc_should_offload(dev, 0)) { | 488 | if (tc_should_offload(dev, tp, 0)) { |
489 | offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; | 489 | offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; |
490 | offload.cls_u32->hnode.divisor = h->divisor; | 490 | offload.cls_u32->hnode.divisor = h->divisor; |
491 | offload.cls_u32->hnode.handle = h->handle; | 491 | offload.cls_u32->hnode.handle = h->handle; |
@@ -508,7 +508,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, | |||
508 | offload.type = TC_SETUP_CLSU32; | 508 | offload.type = TC_SETUP_CLSU32; |
509 | offload.cls_u32 = &u32_offload; | 509 | offload.cls_u32 = &u32_offload; |
510 | 510 | ||
511 | if (tc_should_offload(dev, flags)) { | 511 | if (tc_should_offload(dev, tp, flags)) { |
512 | offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; | 512 | offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; |
513 | offload.cls_u32->knode.handle = n->handle; | 513 | offload.cls_u32->knode.handle = n->handle; |
514 | offload.cls_u32->knode.fshift = n->fshift; | 514 | offload.cls_u32->knode.fshift = n->fshift; |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 10adbc617905..8fe6999b642a 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid) | |||
27 | return TC_H_MIN(classid) + 1; | 27 | return TC_H_MIN(classid) + 1; |
28 | } | 28 | } |
29 | 29 | ||
30 | static bool ingress_cl_offload(u32 classid) | ||
31 | { | ||
32 | return true; | ||
33 | } | ||
34 | |||
30 | static unsigned long ingress_bind_filter(struct Qdisc *sch, | 35 | static unsigned long ingress_bind_filter(struct Qdisc *sch, |
31 | unsigned long parent, u32 classid) | 36 | unsigned long parent, u32 classid) |
32 | { | 37 | { |
@@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = { | |||
86 | .put = ingress_put, | 91 | .put = ingress_put, |
87 | .walk = ingress_walk, | 92 | .walk = ingress_walk, |
88 | .tcf_chain = ingress_find_tcf, | 93 | .tcf_chain = ingress_find_tcf, |
94 | .tcf_cl_offload = ingress_cl_offload, | ||
89 | .bind_tcf = ingress_bind_filter, | 95 | .bind_tcf = ingress_bind_filter, |
90 | .unbind_tcf = ingress_put, | 96 | .unbind_tcf = ingress_put, |
91 | }; | 97 | }; |
@@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid) | |||
110 | } | 116 | } |
111 | } | 117 | } |
112 | 118 | ||
119 | static bool clsact_cl_offload(u32 classid) | ||
120 | { | ||
121 | return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS); | ||
122 | } | ||
123 | |||
113 | static unsigned long clsact_bind_filter(struct Qdisc *sch, | 124 | static unsigned long clsact_bind_filter(struct Qdisc *sch, |
114 | unsigned long parent, u32 classid) | 125 | unsigned long parent, u32 classid) |
115 | { | 126 | { |
@@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = { | |||
158 | .put = ingress_put, | 169 | .put = ingress_put, |
159 | .walk = ingress_walk, | 170 | .walk = ingress_walk, |
160 | .tcf_chain = clsact_find_tcf, | 171 | .tcf_chain = clsact_find_tcf, |
172 | .tcf_cl_offload = clsact_cl_offload, | ||
161 | .bind_tcf = clsact_bind_filter, | 173 | .bind_tcf = clsact_bind_filter, |
162 | .unbind_tcf = ingress_put, | 174 | .unbind_tcf = ingress_put, |
163 | }; | 175 | }; |