diff options
author | Jiri Pirko <jiri@mellanox.com> | 2017-10-19 09:50:46 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-10-20 22:04:08 -0400 |
commit | 8d26d5636dff9fca30816579910aaa9a55b4d96d (patch) | |
tree | 3c82ff1aa167e07b81296a2d41cf6cfa3d2f7edd /net | |
parent | 6b3eb752b4b9481868b3393f06a236a1aedfa43f (diff) |
net: sched: avoid ndo_setup_tc calls for TC_SETUP_CLS*
All drivers are converted to use block callbacks for TC_SETUP_CLS*.
So it is now safe to remove the calls to ndo_setup_tc from cls_*
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/dsa/slave.c | 2 | ||||
-rw-r--r-- | net/sched/cls_bpf.c | 14 | ||||
-rw-r--r-- | net/sched/cls_flower.c | 20 | ||||
-rw-r--r-- | net/sched/cls_matchall.c | 16 | ||||
-rw-r--r-- | net/sched/cls_u32.c | 31 |
5 files changed, 0 insertions, 83 deletions
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 80142918d5d1..d0ae7010ea45 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -846,8 +846,6 @@ static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, | |||
846 | void *type_data) | 846 | void *type_data) |
847 | { | 847 | { |
848 | switch (type) { | 848 | switch (type) { |
849 | case TC_SETUP_CLSMATCHALL: | ||
850 | return 0; /* will be removed after conversion from ndo */ | ||
851 | case TC_SETUP_BLOCK: | 849 | case TC_SETUP_BLOCK: |
852 | return dsa_slave_setup_tc_block(dev, type_data); | 850 | return dsa_slave_setup_tc_block(dev, type_data); |
853 | default: | 851 | default: |
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index e379fdf928bd..0f8b51061c39 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c | |||
@@ -148,7 +148,6 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, | |||
148 | enum tc_clsbpf_command cmd) | 148 | enum tc_clsbpf_command cmd) |
149 | { | 149 | { |
150 | bool addorrep = cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE; | 150 | bool addorrep = cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE; |
151 | struct net_device *dev = tp->q->dev_queue->dev; | ||
152 | struct tcf_block *block = tp->chain->block; | 151 | struct tcf_block *block = tp->chain->block; |
153 | bool skip_sw = tc_skip_sw(prog->gen_flags); | 152 | bool skip_sw = tc_skip_sw(prog->gen_flags); |
154 | struct tc_cls_bpf_offload cls_bpf = {}; | 153 | struct tc_cls_bpf_offload cls_bpf = {}; |
@@ -162,19 +161,6 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, | |||
162 | cls_bpf.exts_integrated = prog->exts_integrated; | 161 | cls_bpf.exts_integrated = prog->exts_integrated; |
163 | cls_bpf.gen_flags = prog->gen_flags; | 162 | cls_bpf.gen_flags = prog->gen_flags; |
164 | 163 | ||
165 | if (tc_can_offload(dev)) { | ||
166 | err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, | ||
167 | &cls_bpf); | ||
168 | if (addorrep) { | ||
169 | if (err) { | ||
170 | if (skip_sw) | ||
171 | return err; | ||
172 | } else { | ||
173 | prog->gen_flags |= TCA_CLS_FLAGS_IN_HW; | ||
174 | } | ||
175 | } | ||
176 | } | ||
177 | |||
178 | err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw); | 164 | err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw); |
179 | if (addorrep) { | 165 | if (addorrep) { |
180 | if (err < 0) { | 166 | if (err < 0) { |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 76b4e0a1c92f..16f58abaa697 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
@@ -200,16 +200,12 @@ static void fl_destroy_filter(struct rcu_head *head) | |||
200 | static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) | 200 | static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) |
201 | { | 201 | { |
202 | struct tc_cls_flower_offload cls_flower = {}; | 202 | struct tc_cls_flower_offload cls_flower = {}; |
203 | struct net_device *dev = tp->q->dev_queue->dev; | ||
204 | struct tcf_block *block = tp->chain->block; | 203 | struct tcf_block *block = tp->chain->block; |
205 | 204 | ||
206 | tc_cls_common_offload_init(&cls_flower.common, tp); | 205 | tc_cls_common_offload_init(&cls_flower.common, tp); |
207 | cls_flower.command = TC_CLSFLOWER_DESTROY; | 206 | cls_flower.command = TC_CLSFLOWER_DESTROY; |
208 | cls_flower.cookie = (unsigned long) f; | 207 | cls_flower.cookie = (unsigned long) f; |
209 | 208 | ||
210 | if (tc_can_offload(dev)) | ||
211 | dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, | ||
212 | &cls_flower); | ||
213 | tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, | 209 | tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, |
214 | &cls_flower, false); | 210 | &cls_flower, false); |
215 | } | 211 | } |
@@ -219,7 +215,6 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, | |||
219 | struct fl_flow_key *mask, | 215 | struct fl_flow_key *mask, |
220 | struct cls_fl_filter *f) | 216 | struct cls_fl_filter *f) |
221 | { | 217 | { |
222 | struct net_device *dev = tp->q->dev_queue->dev; | ||
223 | struct tc_cls_flower_offload cls_flower = {}; | 218 | struct tc_cls_flower_offload cls_flower = {}; |
224 | struct tcf_block *block = tp->chain->block; | 219 | struct tcf_block *block = tp->chain->block; |
225 | bool skip_sw = tc_skip_sw(f->flags); | 220 | bool skip_sw = tc_skip_sw(f->flags); |
@@ -233,17 +228,6 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, | |||
233 | cls_flower.key = &f->mkey; | 228 | cls_flower.key = &f->mkey; |
234 | cls_flower.exts = &f->exts; | 229 | cls_flower.exts = &f->exts; |
235 | 230 | ||
236 | if (tc_can_offload(dev)) { | ||
237 | err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, | ||
238 | &cls_flower); | ||
239 | if (err) { | ||
240 | if (skip_sw) | ||
241 | return err; | ||
242 | } else { | ||
243 | f->flags |= TCA_CLS_FLAGS_IN_HW; | ||
244 | } | ||
245 | } | ||
246 | |||
247 | err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, | 231 | err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, |
248 | &cls_flower, skip_sw); | 232 | &cls_flower, skip_sw); |
249 | if (err < 0) { | 233 | if (err < 0) { |
@@ -262,7 +246,6 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, | |||
262 | static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) | 246 | static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) |
263 | { | 247 | { |
264 | struct tc_cls_flower_offload cls_flower = {}; | 248 | struct tc_cls_flower_offload cls_flower = {}; |
265 | struct net_device *dev = tp->q->dev_queue->dev; | ||
266 | struct tcf_block *block = tp->chain->block; | 249 | struct tcf_block *block = tp->chain->block; |
267 | 250 | ||
268 | tc_cls_common_offload_init(&cls_flower.common, tp); | 251 | tc_cls_common_offload_init(&cls_flower.common, tp); |
@@ -270,9 +253,6 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) | |||
270 | cls_flower.cookie = (unsigned long) f; | 253 | cls_flower.cookie = (unsigned long) f; |
271 | cls_flower.exts = &f->exts; | 254 | cls_flower.exts = &f->exts; |
272 | 255 | ||
273 | if (tc_can_offload(dev)) | ||
274 | dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, | ||
275 | &cls_flower); | ||
276 | tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, | 256 | tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, |
277 | &cls_flower, false); | 257 | &cls_flower, false); |
278 | } | 258 | } |
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 5278534c7e87..70e78d74f6d3 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c | |||
@@ -54,7 +54,6 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, | |||
54 | struct cls_mall_head *head, | 54 | struct cls_mall_head *head, |
55 | unsigned long cookie) | 55 | unsigned long cookie) |
56 | { | 56 | { |
57 | struct net_device *dev = tp->q->dev_queue->dev; | ||
58 | struct tc_cls_matchall_offload cls_mall = {}; | 57 | struct tc_cls_matchall_offload cls_mall = {}; |
59 | struct tcf_block *block = tp->chain->block; | 58 | struct tcf_block *block = tp->chain->block; |
60 | 59 | ||
@@ -62,9 +61,6 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, | |||
62 | cls_mall.command = TC_CLSMATCHALL_DESTROY; | 61 | cls_mall.command = TC_CLSMATCHALL_DESTROY; |
63 | cls_mall.cookie = cookie; | 62 | cls_mall.cookie = cookie; |
64 | 63 | ||
65 | if (tc_can_offload(dev)) | ||
66 | dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, | ||
67 | &cls_mall); | ||
68 | tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, false); | 64 | tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, false); |
69 | } | 65 | } |
70 | 66 | ||
@@ -72,7 +68,6 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, | |||
72 | struct cls_mall_head *head, | 68 | struct cls_mall_head *head, |
73 | unsigned long cookie) | 69 | unsigned long cookie) |
74 | { | 70 | { |
75 | struct net_device *dev = tp->q->dev_queue->dev; | ||
76 | struct tc_cls_matchall_offload cls_mall = {}; | 71 | struct tc_cls_matchall_offload cls_mall = {}; |
77 | struct tcf_block *block = tp->chain->block; | 72 | struct tcf_block *block = tp->chain->block; |
78 | bool skip_sw = tc_skip_sw(head->flags); | 73 | bool skip_sw = tc_skip_sw(head->flags); |
@@ -83,17 +78,6 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, | |||
83 | cls_mall.exts = &head->exts; | 78 | cls_mall.exts = &head->exts; |
84 | cls_mall.cookie = cookie; | 79 | cls_mall.cookie = cookie; |
85 | 80 | ||
86 | if (tc_can_offload(dev)) { | ||
87 | err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, | ||
88 | &cls_mall); | ||
89 | if (err) { | ||
90 | if (skip_sw) | ||
91 | return err; | ||
92 | } else { | ||
93 | head->flags |= TCA_CLS_FLAGS_IN_HW; | ||
94 | } | ||
95 | } | ||
96 | |||
97 | err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, | 81 | err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, |
98 | &cls_mall, skip_sw); | 82 | &cls_mall, skip_sw); |
99 | if (err < 0) { | 83 | if (err < 0) { |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index d53da7968eda..9ff17159fb61 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -464,7 +464,6 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) | |||
464 | 464 | ||
465 | static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) | 465 | static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) |
466 | { | 466 | { |
467 | struct net_device *dev = tp->q->dev_queue->dev; | ||
468 | struct tcf_block *block = tp->chain->block; | 467 | struct tcf_block *block = tp->chain->block; |
469 | struct tc_cls_u32_offload cls_u32 = {}; | 468 | struct tc_cls_u32_offload cls_u32 = {}; |
470 | 469 | ||
@@ -474,15 +473,12 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) | |||
474 | cls_u32.hnode.handle = h->handle; | 473 | cls_u32.hnode.handle = h->handle; |
475 | cls_u32.hnode.prio = h->prio; | 474 | cls_u32.hnode.prio = h->prio; |
476 | 475 | ||
477 | if (tc_can_offload(dev)) | ||
478 | dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32); | ||
479 | tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false); | 476 | tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false); |
480 | } | 477 | } |
481 | 478 | ||
482 | static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, | 479 | static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, |
483 | u32 flags) | 480 | u32 flags) |
484 | { | 481 | { |
485 | struct net_device *dev = tp->q->dev_queue->dev; | ||
486 | struct tcf_block *block = tp->chain->block; | 482 | struct tcf_block *block = tp->chain->block; |
487 | struct tc_cls_u32_offload cls_u32 = {}; | 483 | struct tc_cls_u32_offload cls_u32 = {}; |
488 | bool skip_sw = tc_skip_sw(flags); | 484 | bool skip_sw = tc_skip_sw(flags); |
@@ -495,17 +491,6 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, | |||
495 | cls_u32.hnode.handle = h->handle; | 491 | cls_u32.hnode.handle = h->handle; |
496 | cls_u32.hnode.prio = h->prio; | 492 | cls_u32.hnode.prio = h->prio; |
497 | 493 | ||
498 | if (tc_can_offload(dev)) { | ||
499 | err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, | ||
500 | &cls_u32); | ||
501 | if (err) { | ||
502 | if (skip_sw) | ||
503 | return err; | ||
504 | } else { | ||
505 | offloaded = true; | ||
506 | } | ||
507 | } | ||
508 | |||
509 | err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw); | 494 | err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw); |
510 | if (err < 0) { | 495 | if (err < 0) { |
511 | u32_clear_hw_hnode(tp, h); | 496 | u32_clear_hw_hnode(tp, h); |
@@ -522,7 +507,6 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, | |||
522 | 507 | ||
523 | static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) | 508 | static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) |
524 | { | 509 | { |
525 | struct net_device *dev = tp->q->dev_queue->dev; | ||
526 | struct tcf_block *block = tp->chain->block; | 510 | struct tcf_block *block = tp->chain->block; |
527 | struct tc_cls_u32_offload cls_u32 = {}; | 511 | struct tc_cls_u32_offload cls_u32 = {}; |
528 | 512 | ||
@@ -530,15 +514,12 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) | |||
530 | cls_u32.command = TC_CLSU32_DELETE_KNODE; | 514 | cls_u32.command = TC_CLSU32_DELETE_KNODE; |
531 | cls_u32.knode.handle = handle; | 515 | cls_u32.knode.handle = handle; |
532 | 516 | ||
533 | if (tc_can_offload(dev)) | ||
534 | dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32); | ||
535 | tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false); | 517 | tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false); |
536 | } | 518 | } |
537 | 519 | ||
538 | static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, | 520 | static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, |
539 | u32 flags) | 521 | u32 flags) |
540 | { | 522 | { |
541 | struct net_device *dev = tp->q->dev_queue->dev; | ||
542 | struct tcf_block *block = tp->chain->block; | 523 | struct tcf_block *block = tp->chain->block; |
543 | struct tc_cls_u32_offload cls_u32 = {}; | 524 | struct tc_cls_u32_offload cls_u32 = {}; |
544 | bool skip_sw = tc_skip_sw(flags); | 525 | bool skip_sw = tc_skip_sw(flags); |
@@ -560,18 +541,6 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, | |||
560 | if (n->ht_down) | 541 | if (n->ht_down) |
561 | cls_u32.knode.link_handle = n->ht_down->handle; | 542 | cls_u32.knode.link_handle = n->ht_down->handle; |
562 | 543 | ||
563 | |||
564 | if (tc_can_offload(dev)) { | ||
565 | err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, | ||
566 | &cls_u32); | ||
567 | if (err) { | ||
568 | if (skip_sw) | ||
569 | return err; | ||
570 | } else { | ||
571 | n->flags |= TCA_CLS_FLAGS_IN_HW; | ||
572 | } | ||
573 | } | ||
574 | |||
575 | err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw); | 544 | err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw); |
576 | if (err < 0) { | 545 | if (err < 0) { |
577 | u32_remove_hw_knode(tp, n->handle); | 546 | u32_remove_hw_knode(tp, n->handle); |