aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>2019-05-06 20:24:21 -0400
committerDavid S. Miller <davem@davemloft.net>2019-05-07 15:23:40 -0400
commitd6787147e15dffa7b7f3116a5bc3cbe0670bd74f (patch)
tree6c87638d50d56add45a94827c7abadd4d94a1728
parent2e7ae67b5fbbe596cb3e06410970945f975a241a (diff)
net/sched: remove block pointer from common offload structure
Based on feedback from Jiri avoid carrying a pointer to the tcf_block structure in the tc_cls_common_offload structure. Instead store a flag in driver private data which indicates if offloads apply to a shared block at block binding time. Suggested-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com> Acked-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c4
-rw-r--r--include/net/pkt_cls.h3
-rw-r--r--net/sched/cls_bpf.c8
-rw-r--r--net/sched/cls_flower.c11
-rw-r--r--net/sched/cls_matchall.c12
-rw-r--r--net/sched/cls_u32.c17
8 files changed, 25 insertions, 36 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 6a6be7285105..40957a8dbfe6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -215,6 +215,7 @@ struct nfp_fl_qos {
215 * @lag_port_flags: Extended port flags to record lag state of repr 215 * @lag_port_flags: Extended port flags to record lag state of repr
216 * @mac_offloaded: Flag indicating a MAC address is offloaded for repr 216 * @mac_offloaded: Flag indicating a MAC address is offloaded for repr
217 * @offloaded_mac_addr: MAC address that has been offloaded for repr 217 * @offloaded_mac_addr: MAC address that has been offloaded for repr
218 * @block_shared: Flag indicating if offload applies to shared blocks
218 * @mac_list: List entry of reprs that share the same offloaded MAC 219 * @mac_list: List entry of reprs that share the same offloaded MAC
219 * @qos_table: Stored info on filters implementing qos 220 * @qos_table: Stored info on filters implementing qos
220 */ 221 */
@@ -223,6 +224,7 @@ struct nfp_flower_repr_priv {
223 unsigned long lag_port_flags; 224 unsigned long lag_port_flags;
224 bool mac_offloaded; 225 bool mac_offloaded;
225 u8 offloaded_mac_addr[ETH_ALEN]; 226 u8 offloaded_mac_addr[ETH_ALEN];
227 bool block_shared;
226 struct list_head mac_list; 228 struct list_head mac_list;
227 struct nfp_fl_qos qos_table; 229 struct nfp_fl_qos qos_table;
228}; 230};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 9c6bcc6e9d68..1fbfeb43c538 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1197,10 +1197,14 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
1197 struct tc_block_offload *f) 1197 struct tc_block_offload *f)
1198{ 1198{
1199 struct nfp_repr *repr = netdev_priv(netdev); 1199 struct nfp_repr *repr = netdev_priv(netdev);
1200 struct nfp_flower_repr_priv *repr_priv;
1200 1201
1201 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1202 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1202 return -EOPNOTSUPP; 1203 return -EOPNOTSUPP;
1203 1204
1205 repr_priv = repr->app_priv;
1206 repr_priv->block_shared = tcf_block_shared(f->block);
1207
1204 switch (f->command) { 1208 switch (f->command) {
1205 case TC_BLOCK_BIND: 1209 case TC_BLOCK_BIND:
1206 return tcf_block_cb_register(f->block, 1210 return tcf_block_cb_register(f->block,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 1b2ee18d7ff9..86e968cd5ffd 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -76,8 +76,9 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
76 return -EOPNOTSUPP; 76 return -EOPNOTSUPP;
77 } 77 }
78 repr = netdev_priv(netdev); 78 repr = netdev_priv(netdev);
79 repr_priv = repr->app_priv;
79 80
80 if (tcf_block_shared(flow->common.block)) { 81 if (repr_priv->block_shared) {
81 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks"); 82 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
82 return -EOPNOTSUPP; 83 return -EOPNOTSUPP;
83 } 84 }
@@ -123,7 +124,6 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
123 config->cir = cpu_to_be32(rate); 124 config->cir = cpu_to_be32(rate);
124 nfp_ctrl_tx(repr->app->ctrl, skb); 125 nfp_ctrl_tx(repr->app->ctrl, skb);
125 126
126 repr_priv = repr->app_priv;
127 repr_priv->qos_table.netdev_port_id = netdev_port_id; 127 repr_priv->qos_table.netdev_port_id = netdev_port_id;
128 fl_priv->qos_rate_limiters++; 128 fl_priv->qos_rate_limiters++;
129 if (fl_priv->qos_rate_limiters == 1) 129 if (fl_priv->qos_rate_limiters == 1)
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index eed98f8fcb5e..514e3c80ecc1 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -629,7 +629,6 @@ struct tc_cls_common_offload {
629 u32 chain_index; 629 u32 chain_index;
630 __be16 protocol; 630 __be16 protocol;
631 u32 prio; 631 u32 prio;
632 struct tcf_block *block;
633 struct netlink_ext_ack *extack; 632 struct netlink_ext_ack *extack;
634}; 633};
635 634
@@ -731,13 +730,11 @@ static inline bool tc_in_hw(u32 flags)
731static inline void 730static inline void
732tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, 731tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
733 const struct tcf_proto *tp, u32 flags, 732 const struct tcf_proto *tp, u32 flags,
734 struct tcf_block *block,
735 struct netlink_ext_ack *extack) 733 struct netlink_ext_ack *extack)
736{ 734{
737 cls_common->chain_index = tp->chain->index; 735 cls_common->chain_index = tp->chain->index;
738 cls_common->protocol = tp->protocol; 736 cls_common->protocol = tp->protocol;
739 cls_common->prio = tp->prio; 737 cls_common->prio = tp->prio;
740 cls_common->block = block;
741 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 738 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
742 cls_common->extack = extack; 739 cls_common->extack = extack;
743} 740}
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index ce7ff286ccb8..27365ed3fe0b 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -157,8 +157,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
157 skip_sw = prog && tc_skip_sw(prog->gen_flags); 157 skip_sw = prog && tc_skip_sw(prog->gen_flags);
158 obj = prog ?: oldprog; 158 obj = prog ?: oldprog;
159 159
160 tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, block, 160 tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
161 extack);
162 cls_bpf.command = TC_CLSBPF_OFFLOAD; 161 cls_bpf.command = TC_CLSBPF_OFFLOAD;
163 cls_bpf.exts = &obj->exts; 162 cls_bpf.exts = &obj->exts;
164 cls_bpf.prog = prog ? prog->filter : NULL; 163 cls_bpf.prog = prog ? prog->filter : NULL;
@@ -227,8 +226,7 @@ static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
227 struct tcf_block *block = tp->chain->block; 226 struct tcf_block *block = tp->chain->block;
228 struct tc_cls_bpf_offload cls_bpf = {}; 227 struct tc_cls_bpf_offload cls_bpf = {};
229 228
230 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, block, 229 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
231 NULL);
232 cls_bpf.command = TC_CLSBPF_STATS; 230 cls_bpf.command = TC_CLSBPF_STATS;
233 cls_bpf.exts = &prog->exts; 231 cls_bpf.exts = &prog->exts;
234 cls_bpf.prog = prog->filter; 232 cls_bpf.prog = prog->filter;
@@ -670,7 +668,7 @@ static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
670 continue; 668 continue;
671 669
672 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, 670 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
673 block, extack); 671 extack);
674 cls_bpf.command = TC_CLSBPF_OFFLOAD; 672 cls_bpf.command = TC_CLSBPF_OFFLOAD;
675 cls_bpf.exts = &prog->exts; 673 cls_bpf.exts = &prog->exts;
676 cls_bpf.prog = add ? prog->filter : NULL; 674 cls_bpf.prog = add ? prog->filter : NULL;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 3cb372b0e933..f6685fc53119 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -389,8 +389,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
389 if (!rtnl_held) 389 if (!rtnl_held)
390 rtnl_lock(); 390 rtnl_lock();
391 391
392 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, block, 392 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
393 extack);
394 cls_flower.command = TC_CLSFLOWER_DESTROY; 393 cls_flower.command = TC_CLSFLOWER_DESTROY;
395 cls_flower.cookie = (unsigned long) f; 394 cls_flower.cookie = (unsigned long) f;
396 395
@@ -423,8 +422,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
423 goto errout; 422 goto errout;
424 } 423 }
425 424
426 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, block, 425 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
427 extack);
428 cls_flower.command = TC_CLSFLOWER_REPLACE; 426 cls_flower.command = TC_CLSFLOWER_REPLACE;
429 cls_flower.cookie = (unsigned long) f; 427 cls_flower.cookie = (unsigned long) f;
430 cls_flower.rule->match.dissector = &f->mask->dissector; 428 cls_flower.rule->match.dissector = &f->mask->dissector;
@@ -480,8 +478,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
480 if (!rtnl_held) 478 if (!rtnl_held)
481 rtnl_lock(); 479 rtnl_lock();
482 480
483 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, block, 481 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
484 NULL);
485 cls_flower.command = TC_CLSFLOWER_STATS; 482 cls_flower.command = TC_CLSFLOWER_STATS;
486 cls_flower.cookie = (unsigned long) f; 483 cls_flower.cookie = (unsigned long) f;
487 cls_flower.classid = f->res.classid; 484 cls_flower.classid = f->res.classid;
@@ -1760,7 +1757,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
1760 } 1757 }
1761 1758
1762 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, 1759 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
1763 block, extack); 1760 extack);
1764 cls_flower.command = add ? 1761 cls_flower.command = add ?
1765 TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY; 1762 TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
1766 cls_flower.cookie = (unsigned long)f; 1763 cls_flower.cookie = (unsigned long)f;
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 820938fa09ed..da916f39b719 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -71,8 +71,7 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp,
71 struct tc_cls_matchall_offload cls_mall = {}; 71 struct tc_cls_matchall_offload cls_mall = {};
72 struct tcf_block *block = tp->chain->block; 72 struct tcf_block *block = tp->chain->block;
73 73
74 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block, 74 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
75 extack);
76 cls_mall.command = TC_CLSMATCHALL_DESTROY; 75 cls_mall.command = TC_CLSMATCHALL_DESTROY;
77 cls_mall.cookie = cookie; 76 cls_mall.cookie = cookie;
78 77
@@ -94,8 +93,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
94 if (!cls_mall.rule) 93 if (!cls_mall.rule)
95 return -ENOMEM; 94 return -ENOMEM;
96 95
97 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block, 96 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
98 extack);
99 cls_mall.command = TC_CLSMATCHALL_REPLACE; 97 cls_mall.command = TC_CLSMATCHALL_REPLACE;
100 cls_mall.cookie = cookie; 98 cls_mall.cookie = cookie;
101 99
@@ -295,8 +293,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
295 if (!cls_mall.rule) 293 if (!cls_mall.rule)
296 return -ENOMEM; 294 return -ENOMEM;
297 295
298 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block, 296 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
299 extack);
300 cls_mall.command = add ? 297 cls_mall.command = add ?
301 TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY; 298 TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
302 cls_mall.cookie = (unsigned long)head; 299 cls_mall.cookie = (unsigned long)head;
@@ -331,8 +328,7 @@ static void mall_stats_hw_filter(struct tcf_proto *tp,
331 struct tc_cls_matchall_offload cls_mall = {}; 328 struct tc_cls_matchall_offload cls_mall = {};
332 struct tcf_block *block = tp->chain->block; 329 struct tcf_block *block = tp->chain->block;
333 330
334 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block, 331 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
335 NULL);
336 cls_mall.command = TC_CLSMATCHALL_STATS; 332 cls_mall.command = TC_CLSMATCHALL_STATS;
337 cls_mall.cookie = cookie; 333 cls_mall.cookie = cookie;
338 334
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 2feed0ffa269..4b8710a266cc 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -485,8 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
485 struct tcf_block *block = tp->chain->block; 485 struct tcf_block *block = tp->chain->block;
486 struct tc_cls_u32_offload cls_u32 = {}; 486 struct tc_cls_u32_offload cls_u32 = {};
487 487
488 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, block, 488 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
489 extack);
490 cls_u32.command = TC_CLSU32_DELETE_HNODE; 489 cls_u32.command = TC_CLSU32_DELETE_HNODE;
491 cls_u32.hnode.divisor = h->divisor; 490 cls_u32.hnode.divisor = h->divisor;
492 cls_u32.hnode.handle = h->handle; 491 cls_u32.hnode.handle = h->handle;
@@ -504,7 +503,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
504 bool offloaded = false; 503 bool offloaded = false;
505 int err; 504 int err;
506 505
507 tc_cls_common_offload_init(&cls_u32.common, tp, flags, block, extack); 506 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
508 cls_u32.command = TC_CLSU32_NEW_HNODE; 507 cls_u32.command = TC_CLSU32_NEW_HNODE;
509 cls_u32.hnode.divisor = h->divisor; 508 cls_u32.hnode.divisor = h->divisor;
510 cls_u32.hnode.handle = h->handle; 509 cls_u32.hnode.handle = h->handle;
@@ -530,8 +529,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
530 struct tcf_block *block = tp->chain->block; 529 struct tcf_block *block = tp->chain->block;
531 struct tc_cls_u32_offload cls_u32 = {}; 530 struct tc_cls_u32_offload cls_u32 = {};
532 531
533 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, block, 532 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
534 extack);
535 cls_u32.command = TC_CLSU32_DELETE_KNODE; 533 cls_u32.command = TC_CLSU32_DELETE_KNODE;
536 cls_u32.knode.handle = n->handle; 534 cls_u32.knode.handle = n->handle;
537 535
@@ -548,7 +546,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
548 bool skip_sw = tc_skip_sw(flags); 546 bool skip_sw = tc_skip_sw(flags);
549 int err; 547 int err;
550 548
551 tc_cls_common_offload_init(&cls_u32.common, tp, flags, block, extack); 549 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
552 cls_u32.command = TC_CLSU32_REPLACE_KNODE; 550 cls_u32.command = TC_CLSU32_REPLACE_KNODE;
553 cls_u32.knode.handle = n->handle; 551 cls_u32.knode.handle = n->handle;
554 cls_u32.knode.fshift = n->fshift; 552 cls_u32.knode.fshift = n->fshift;
@@ -1172,12 +1170,10 @@ static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1172 bool add, tc_setup_cb_t *cb, void *cb_priv, 1170 bool add, tc_setup_cb_t *cb, void *cb_priv,
1173 struct netlink_ext_ack *extack) 1171 struct netlink_ext_ack *extack)
1174{ 1172{
1175 struct tcf_block *block = tp->chain->block;
1176 struct tc_cls_u32_offload cls_u32 = {}; 1173 struct tc_cls_u32_offload cls_u32 = {};
1177 int err; 1174 int err;
1178 1175
1179 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, block, 1176 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1180 extack);
1181 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE; 1177 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1182 cls_u32.hnode.divisor = ht->divisor; 1178 cls_u32.hnode.divisor = ht->divisor;
1183 cls_u32.hnode.handle = ht->handle; 1179 cls_u32.hnode.handle = ht->handle;
@@ -1199,8 +1195,7 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1199 struct tc_cls_u32_offload cls_u32 = {}; 1195 struct tc_cls_u32_offload cls_u32 = {};
1200 int err; 1196 int err;
1201 1197
1202 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, block, 1198 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1203 extack);
1204 cls_u32.command = add ? 1199 cls_u32.command = add ?
1205 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE; 1200 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1206 cls_u32.knode.handle = n->handle; 1201 cls_u32.knode.handle = n->handle;