summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>2019-05-04 07:46:18 -0400
committerDavid S. Miller <davem@davemloft.net>2019-05-06 00:49:23 -0400
commitab79af32b0a5606324ce04c0f04a0d2f90b94464 (patch)
treebab85b73c9c428ffcd7239a9706d00c383c20bbf
parentf00cbf1968145afbae385a867a66c69845e30711 (diff)
mlxsw: use intermediate representation for matchall offload
Updates the Mellanox spectrum driver to use the newer intermediate representation for flow actions in matchall offloads. Signed-off-by: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com> Acked-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c38
-rw-r--r--include/net/flow_offload.h11
2 files changed, 30 insertions, 19 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index a6c6d5ee9ead..f594c6a913ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1269,21 +1269,19 @@ mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1269static int 1269static int
1270mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1270mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1271 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1271 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1272 const struct tc_action *a, 1272 const struct flow_action_entry *act,
1273 bool ingress) 1273 bool ingress)
1274{ 1274{
1275 enum mlxsw_sp_span_type span_type; 1275 enum mlxsw_sp_span_type span_type;
1276 struct net_device *to_dev;
1277 1276
1278 to_dev = tcf_mirred_dev(a); 1277 if (!act->dev) {
1279 if (!to_dev) {
1280 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1278 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1281 return -EINVAL; 1279 return -EINVAL;
1282 } 1280 }
1283 1281
1284 mirror->ingress = ingress; 1282 mirror->ingress = ingress;
1285 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1283 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1286 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type, 1284 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type,
1287 true, &mirror->span_id); 1285 true, &mirror->span_id);
1288} 1286}
1289 1287
@@ -1302,7 +1300,7 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1302static int 1300static int
1303mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1301mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1304 struct tc_cls_matchall_offload *cls, 1302 struct tc_cls_matchall_offload *cls,
1305 const struct tc_action *a, 1303 const struct flow_action_entry *act,
1306 bool ingress) 1304 bool ingress)
1307{ 1305{
1308 int err; 1306 int err;
@@ -1313,18 +1311,18 @@ mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1313 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1311 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1314 return -EEXIST; 1312 return -EEXIST;
1315 } 1313 }
1316 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1314 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
1317 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1315 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1318 return -EOPNOTSUPP; 1316 return -EOPNOTSUPP;
1319 } 1317 }
1320 1318
1321 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1319 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1322 tcf_sample_psample_group(a)); 1320 act->sample.psample_group);
1323 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1321 mlxsw_sp_port->sample->truncate = act->sample.truncate;
1324 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1322 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size;
1325 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1323 mlxsw_sp_port->sample->rate = act->sample.rate;
1326 1324
1327 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1325 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate);
1328 if (err) 1326 if (err)
1329 goto err_port_sample_set; 1327 goto err_port_sample_set;
1330 return 0; 1328 return 0;
@@ -1350,10 +1348,10 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1350{ 1348{
1351 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1349 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1352 __be16 protocol = f->common.protocol; 1350 __be16 protocol = f->common.protocol;
1353 const struct tc_action *a; 1351 struct flow_action_entry *act;
1354 int err; 1352 int err;
1355 1353
1356 if (!tcf_exts_has_one_action(f->exts)) { 1354 if (!flow_offload_has_one_action(&f->rule->action)) {
1357 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1355 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1358 return -EOPNOTSUPP; 1356 return -EOPNOTSUPP;
1359 } 1357 }
@@ -1363,19 +1361,21 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1363 return -ENOMEM; 1361 return -ENOMEM;
1364 mall_tc_entry->cookie = f->cookie; 1362 mall_tc_entry->cookie = f->cookie;
1365 1363
1366 a = tcf_exts_first_action(f->exts); 1364 act = &f->rule->action.entries[0];
1367 1365
1368 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1366 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
1369 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1367 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1370 1368
1371 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1369 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1372 mirror = &mall_tc_entry->mirror; 1370 mirror = &mall_tc_entry->mirror;
1373 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1371 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1374 mirror, a, ingress); 1372 mirror, act,
1375 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1373 ingress);
1374 } else if (act->id == FLOW_ACTION_SAMPLE &&
1375 protocol == htons(ETH_P_ALL)) {
1376 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1376 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1377 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1377 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
1378 a, ingress); 1378 act, ingress);
1379 } else { 1379 } else {
1380 err = -EOPNOTSUPP; 1380 err = -EOPNOTSUPP;
1381 } 1381 }
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 9a6c89b2c2bb..3bf67dd64be5 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -177,6 +177,17 @@ static inline bool flow_action_has_entries(const struct flow_action *action)
177 return action->num_entries; 177 return action->num_entries;
178} 178}
179 179
180/**
181 * flow_action_has_one_action() - check if exactly one action is present
182 * @action: tc filter flow offload action
183 *
184 * Returns true if exactly one action is present.
185 */
186static inline bool flow_offload_has_one_action(const struct flow_action *action)
187{
188 return action->num_entries == 1;
189}
190
180#define flow_action_for_each(__i, __act, __actions) \ 191#define flow_action_for_each(__i, __act, __actions) \
181 for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i]) 192 for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i])
182 193