aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOz Shlomo <ozsh@mellanox.com>2018-12-02 07:43:27 -0500
committerSaeed Mahameed <saeedm@mellanox.com>2018-12-10 18:53:04 -0500
commit101f4de9dd521c6d06dfdacaa35e506a8db8494b (patch)
tree8ec5d72238be41895c31a92cc97cf84d14e176c0
parent54c177ca9c6efe5df516eefb886761b89a82eaf0 (diff)
net/mlx5e: Move TC tunnel offloading code to separate source file
Move tunnel offloading related code to a separate source file for better code maintainability. Code refactoring with no functional change. Signed-off-by: Oz Shlomo <ozsh@mellanox.com> Reviewed-by: Eli Britstein <elibr@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c496
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c500
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h2
6 files changed, 548 insertions, 496 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index d499b3d00348..40764d87413a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -30,7 +30,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
30mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o 30mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
31mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o 31mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
32mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o 32mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
33mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o 33mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o
34 34
35# 35#
36# Core extra 36# Core extra
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
new file mode 100644
index 000000000000..eaa43477e0ea
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -0,0 +1,496 @@
1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/* Copyright (c) 2018 Mellanox Technologies. */
3
4#include <net/vxlan.h>
5#include "lib/vxlan.h"
6#include "en/tc_tun.h"
7
8static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
9 struct net_device *mirred_dev,
10 struct net_device **out_dev,
11 struct flowi4 *fl4,
12 struct neighbour **out_n,
13 u8 *out_ttl)
14{
15 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
16 struct mlx5e_rep_priv *uplink_rpriv;
17 struct rtable *rt;
18 struct neighbour *n = NULL;
19
20#if IS_ENABLED(CONFIG_INET)
21 int ret;
22
23 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
24 ret = PTR_ERR_OR_ZERO(rt);
25 if (ret)
26 return ret;
27#else
28 return -EOPNOTSUPP;
29#endif
30 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
31 /* if the egress device isn't on the same HW e-switch, we use the uplink */
32 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
33 *out_dev = uplink_rpriv->netdev;
34 else
35 *out_dev = rt->dst.dev;
36
37 if (!(*out_ttl))
38 *out_ttl = ip4_dst_hoplimit(&rt->dst);
39 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
40 ip_rt_put(rt);
41 if (!n)
42 return -ENOMEM;
43
44 *out_n = n;
45 return 0;
46}
47
48static const char *mlx5e_netdev_kind(struct net_device *dev)
49{
50 if (dev->rtnl_link_ops)
51 return dev->rtnl_link_ops->kind;
52 else
53 return "";
54}
55
56static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
57 struct net_device *mirred_dev,
58 struct net_device **out_dev,
59 struct flowi6 *fl6,
60 struct neighbour **out_n,
61 u8 *out_ttl)
62{
63 struct neighbour *n = NULL;
64 struct dst_entry *dst;
65
66#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
67 struct mlx5e_rep_priv *uplink_rpriv;
68 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
69 int ret;
70
71 ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
72 fl6);
73 if (ret < 0)
74 return ret;
75
76 if (!(*out_ttl))
77 *out_ttl = ip6_dst_hoplimit(dst);
78
79 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
80 /* if the egress device isn't on the same HW e-switch, we use the uplink */
81 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
82 *out_dev = uplink_rpriv->netdev;
83 else
84 *out_dev = dst->dev;
85#else
86 return -EOPNOTSUPP;
87#endif
88
89 n = dst_neigh_lookup(dst, &fl6->daddr);
90 dst_release(dst);
91 if (!n)
92 return -ENOMEM;
93
94 *out_n = n;
95 return 0;
96}
97
98static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
99{
100 __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
101 struct udphdr *udp = (struct udphdr *)(buf);
102 struct vxlanhdr *vxh = (struct vxlanhdr *)
103 ((char *)udp + sizeof(struct udphdr));
104
105 udp->dest = tun_key->tp_dst;
106 vxh->vx_flags = VXLAN_HF_VNI;
107 vxh->vx_vni = vxlan_vni_field(tun_id);
108
109 return 0;
110}
111
112static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
113 struct mlx5e_encap_entry *e)
114{
115 int err = 0;
116 struct ip_tunnel_key *key = &e->tun_info.key;
117
118 if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
119 *ip_proto = IPPROTO_UDP;
120 err = mlx5e_gen_vxlan_header(buf, key);
121 } else {
122 pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
123 , e->tunnel_type);
124 err = -EOPNOTSUPP;
125 }
126
127 return err;
128}
129
130int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
131 struct net_device *mirred_dev,
132 struct mlx5e_encap_entry *e)
133{
134 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
135 int ipv4_encap_size = ETH_HLEN +
136 sizeof(struct iphdr) +
137 e->tunnel_hlen;
138 struct ip_tunnel_key *tun_key = &e->tun_info.key;
139 struct net_device *out_dev;
140 struct neighbour *n = NULL;
141 struct flowi4 fl4 = {};
142 char *encap_header;
143 struct ethhdr *eth;
144 u8 nud_state, ttl;
145 struct iphdr *ip;
146 int err;
147
148 if (max_encap_size < ipv4_encap_size) {
149 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
150 ipv4_encap_size, max_encap_size);
151 return -EOPNOTSUPP;
152 }
153
154 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
155 if (!encap_header)
156 return -ENOMEM;
157
158 /* add the IP fields */
159 fl4.flowi4_tos = tun_key->tos;
160 fl4.daddr = tun_key->u.ipv4.dst;
161 fl4.saddr = tun_key->u.ipv4.src;
162 ttl = tun_key->ttl;
163
164 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
165 &fl4, &n, &ttl);
166 if (err)
167 goto free_encap;
168
169 /* used by mlx5e_detach_encap to lookup a neigh hash table
170 * entry in the neigh hash table when a user deletes a rule
171 */
172 e->m_neigh.dev = n->dev;
173 e->m_neigh.family = n->ops->family;
174 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
175 e->out_dev = out_dev;
176
177 /* It's important to add the neigh to the hash table before checking
178 * the neigh validity state. So if we'll get a notification, in case the
179 * neigh changes it's validity state, we would find the relevant neigh
180 * in the hash.
181 */
182 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
183 if (err)
184 goto free_encap;
185
186 read_lock_bh(&n->lock);
187 nud_state = n->nud_state;
188 ether_addr_copy(e->h_dest, n->ha);
189 read_unlock_bh(&n->lock);
190
191 /* add ethernet header */
192 eth = (struct ethhdr *)encap_header;
193 ether_addr_copy(eth->h_dest, e->h_dest);
194 ether_addr_copy(eth->h_source, out_dev->dev_addr);
195 eth->h_proto = htons(ETH_P_IP);
196
197 /* add ip header */
198 ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
199 ip->tos = tun_key->tos;
200 ip->version = 0x4;
201 ip->ihl = 0x5;
202 ip->ttl = ttl;
203 ip->daddr = fl4.daddr;
204 ip->saddr = fl4.saddr;
205
206 /* add tunneling protocol header */
207 err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
208 &ip->protocol, e);
209 if (err)
210 goto destroy_neigh_entry;
211
212 e->encap_size = ipv4_encap_size;
213 e->encap_header = encap_header;
214
215 if (!(nud_state & NUD_VALID)) {
216 neigh_event_send(n, NULL);
217 err = -EAGAIN;
218 goto out;
219 }
220
221 err = mlx5_packet_reformat_alloc(priv->mdev,
222 e->reformat_type,
223 ipv4_encap_size, encap_header,
224 MLX5_FLOW_NAMESPACE_FDB,
225 &e->encap_id);
226 if (err)
227 goto destroy_neigh_entry;
228
229 e->flags |= MLX5_ENCAP_ENTRY_VALID;
230 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
231 neigh_release(n);
232 return err;
233
234destroy_neigh_entry:
235 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
236free_encap:
237 kfree(encap_header);
238out:
239 if (n)
240 neigh_release(n);
241 return err;
242}
243
244int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
245 struct net_device *mirred_dev,
246 struct mlx5e_encap_entry *e)
247{
248 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
249 int ipv6_encap_size = ETH_HLEN +
250 sizeof(struct ipv6hdr) +
251 e->tunnel_hlen;
252 struct ip_tunnel_key *tun_key = &e->tun_info.key;
253 struct net_device *out_dev;
254 struct neighbour *n = NULL;
255 struct flowi6 fl6 = {};
256 struct ipv6hdr *ip6h;
257 char *encap_header;
258 struct ethhdr *eth;
259 u8 nud_state, ttl;
260 int err;
261
262 if (max_encap_size < ipv6_encap_size) {
263 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
264 ipv6_encap_size, max_encap_size);
265 return -EOPNOTSUPP;
266 }
267
268 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
269 if (!encap_header)
270 return -ENOMEM;
271
272 ttl = tun_key->ttl;
273
274 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
275 fl6.daddr = tun_key->u.ipv6.dst;
276 fl6.saddr = tun_key->u.ipv6.src;
277
278 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
279 &fl6, &n, &ttl);
280 if (err)
281 goto free_encap;
282
283 /* used by mlx5e_detach_encap to lookup a neigh hash table
284 * entry in the neigh hash table when a user deletes a rule
285 */
286 e->m_neigh.dev = n->dev;
287 e->m_neigh.family = n->ops->family;
288 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
289 e->out_dev = out_dev;
290
291 /* It's importent to add the neigh to the hash table before checking
292 * the neigh validity state. So if we'll get a notification, in case the
293 * neigh changes it's validity state, we would find the relevant neigh
294 * in the hash.
295 */
296 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
297 if (err)
298 goto free_encap;
299
300 read_lock_bh(&n->lock);
301 nud_state = n->nud_state;
302 ether_addr_copy(e->h_dest, n->ha);
303 read_unlock_bh(&n->lock);
304
305 /* add ethernet header */
306 eth = (struct ethhdr *)encap_header;
307 ether_addr_copy(eth->h_dest, e->h_dest);
308 ether_addr_copy(eth->h_source, out_dev->dev_addr);
309 eth->h_proto = htons(ETH_P_IPV6);
310
311 /* add ip header */
312 ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
313 ip6_flow_hdr(ip6h, tun_key->tos, 0);
314 /* the HW fills up ipv6 payload len */
315 ip6h->hop_limit = ttl;
316 ip6h->daddr = fl6.daddr;
317 ip6h->saddr = fl6.saddr;
318
319 /* add tunneling protocol header */
320 err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
321 &ip6h->nexthdr, e);
322 if (err)
323 goto destroy_neigh_entry;
324
325 e->encap_size = ipv6_encap_size;
326 e->encap_header = encap_header;
327
328 if (!(nud_state & NUD_VALID)) {
329 neigh_event_send(n, NULL);
330 err = -EAGAIN;
331 goto out;
332 }
333
334 err = mlx5_packet_reformat_alloc(priv->mdev,
335 e->reformat_type,
336 ipv6_encap_size, encap_header,
337 MLX5_FLOW_NAMESPACE_FDB,
338 &e->encap_id);
339 if (err)
340 goto destroy_neigh_entry;
341
342 e->flags |= MLX5_ENCAP_ENTRY_VALID;
343 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
344 neigh_release(n);
345 return err;
346
347destroy_neigh_entry:
348 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
349free_encap:
350 kfree(encap_header);
351out:
352 if (n)
353 neigh_release(n);
354 return err;
355}
356
357int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev)
358{
359 if (netif_is_vxlan(tunnel_dev))
360 return MLX5E_TC_TUNNEL_TYPE_VXLAN;
361 else
362 return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
363}
364
365bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
366 struct net_device *netdev)
367{
368 int tunnel_type = mlx5e_tc_tun_get_type(netdev);
369
370 if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
371 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
372 return true;
373 else
374 return false;
375}
376
377int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
378 struct mlx5e_priv *priv,
379 struct mlx5e_encap_entry *e,
380 struct netlink_ext_ack *extack)
381{
382 e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev);
383
384 if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
385 int dst_port = be16_to_cpu(e->tun_info.key.tp_dst);
386
387 if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
388 NL_SET_ERR_MSG_MOD(extack,
389 "vxlan udp dport was not registered with the HW");
390 netdev_warn(priv->netdev,
391 "%d isn't an offloaded vxlan udp dport\n",
392 dst_port);
393 return -EOPNOTSUPP;
394 }
395 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
396 e->tunnel_hlen = VXLAN_HLEN;
397 } else {
398 e->reformat_type = -1;
399 e->tunnel_hlen = -1;
400 return -EOPNOTSUPP;
401 }
402 return 0;
403}
404
405static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
406 struct mlx5_flow_spec *spec,
407 struct tc_cls_flower_offload *f,
408 void *headers_c,
409 void *headers_v)
410{
411 struct netlink_ext_ack *extack = f->common.extack;
412 struct flow_dissector_key_ports *key =
413 skb_flow_dissector_target(f->dissector,
414 FLOW_DISSECTOR_KEY_ENC_PORTS,
415 f->key);
416 struct flow_dissector_key_ports *mask =
417 skb_flow_dissector_target(f->dissector,
418 FLOW_DISSECTOR_KEY_ENC_PORTS,
419 f->mask);
420 void *misc_c = MLX5_ADDR_OF(fte_match_param,
421 spec->match_criteria,
422 misc_parameters);
423 void *misc_v = MLX5_ADDR_OF(fte_match_param,
424 spec->match_value,
425 misc_parameters);
426
427 /* Full udp dst port must be given */
428 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
429 memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) {
430 NL_SET_ERR_MSG_MOD(extack,
431 "VXLAN decap filter must include enc_dst_port condition");
432 netdev_warn(priv->netdev,
433 "VXLAN decap filter must include enc_dst_port condition\n");
434 return -EOPNOTSUPP;
435 }
436
437 /* udp dst port must be knonwn as a VXLAN port */
438 if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) {
439 NL_SET_ERR_MSG_MOD(extack,
440 "Matched UDP port is not registered as a VXLAN port");
441 netdev_warn(priv->netdev,
442 "UDP port %d is not registered as a VXLAN port\n",
443 be16_to_cpu(key->dst));
444 return -EOPNOTSUPP;
445 }
446
447 /* dst UDP port is valid here */
448 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
449 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
450
451 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst));
452 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst));
453
454 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src));
455 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src));
456
457 /* match on VNI */
458 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
459 struct flow_dissector_key_keyid *key =
460 skb_flow_dissector_target(f->dissector,
461 FLOW_DISSECTOR_KEY_ENC_KEYID,
462 f->key);
463 struct flow_dissector_key_keyid *mask =
464 skb_flow_dissector_target(f->dissector,
465 FLOW_DISSECTOR_KEY_ENC_KEYID,
466 f->mask);
467 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
468 be32_to_cpu(mask->keyid));
469 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
470 be32_to_cpu(key->keyid));
471 }
472 return 0;
473}
474
475int mlx5e_tc_tun_parse(struct net_device *filter_dev,
476 struct mlx5e_priv *priv,
477 struct mlx5_flow_spec *spec,
478 struct tc_cls_flower_offload *f,
479 void *headers_c,
480 void *headers_v)
481{
482 int tunnel_type;
483 int err = 0;
484
485 tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
486 if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
487 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
488 headers_c, headers_v);
489 } else {
490 netdev_warn(priv->netdev,
491 "decapsulation offload is not supported for %s net device (%d)\n",
492 mlx5e_netdev_kind(filter_dev), tunnel_type);
493 return -EOPNOTSUPP;
494 }
495 return err;
496}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
new file mode 100644
index 000000000000..ad4fc93b17a1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -0,0 +1,43 @@
1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/* Copyright (c) 2018 Mellanox Technologies. */
3
4#ifndef __MLX5_EN_TC_TUNNEL_H__
5#define __MLX5_EN_TC_TUNNEL_H__
6
7#include <linux/netdevice.h>
8#include <linux/mlx5/fs.h>
9#include <net/pkt_cls.h>
10#include <linux/netlink.h>
11#include "en.h"
12#include "en_rep.h"
13
14enum {
15 MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
16 MLX5E_TC_TUNNEL_TYPE_VXLAN
17};
18
19int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
20 struct mlx5e_priv *priv,
21 struct mlx5e_encap_entry *e,
22 struct netlink_ext_ack *extack);
23
24int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
25 struct net_device *mirred_dev,
26 struct mlx5e_encap_entry *e);
27
28int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
29 struct net_device *mirred_dev,
30 struct mlx5e_encap_entry *e);
31
32int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev);
33bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
34 struct net_device *netdev);
35
36int mlx5e_tc_tun_parse(struct net_device *filter_dev,
37 struct mlx5e_priv *priv,
38 struct mlx5_flow_spec *spec,
39 struct tc_cls_flower_offload *f,
40 void *headers_c,
41 void *headers_v);
42
43#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 5cac4de435c9..85e51bd4147f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -42,6 +42,7 @@
42#include "en.h" 42#include "en.h"
43#include "en_rep.h" 43#include "en_rep.h"
44#include "en_tc.h" 44#include "en_tc.h"
45#include "en/tc_tun.h"
45#include "fs_core.h" 46#include "fs_core.h"
46 47
47#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \ 48#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 7d7f490d8ff1..abc200947e84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -44,15 +44,14 @@
44#include <net/tc_act/tc_tunnel_key.h> 44#include <net/tc_act/tc_tunnel_key.h>
45#include <net/tc_act/tc_pedit.h> 45#include <net/tc_act/tc_pedit.h>
46#include <net/tc_act/tc_csum.h> 46#include <net/tc_act/tc_csum.h>
47#include <net/vxlan.h>
48#include <net/arp.h> 47#include <net/arp.h>
49#include "en.h" 48#include "en.h"
50#include "en_rep.h" 49#include "en_rep.h"
51#include "en_tc.h" 50#include "en_tc.h"
52#include "eswitch.h" 51#include "eswitch.h"
53#include "lib/vxlan.h"
54#include "fs_core.h" 52#include "fs_core.h"
55#include "en/port.h" 53#include "en/port.h"
54#include "en/tc_tun.h"
56 55
57struct mlx5_nic_flow_attr { 56struct mlx5_nic_flow_attr {
58 u32 action; 57 u32 action;
@@ -79,13 +78,6 @@ enum {
79 78
80#define MLX5E_TC_MAX_SPLITS 1 79#define MLX5E_TC_MAX_SPLITS 1
81 80
82enum {
83 MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
84 MLX5E_TC_TUNNEL_TYPE_VXLAN
85};
86
87static int mlx5e_get_tunnel_type(struct net_device *tunnel_dev);
88
89struct mlx5e_tc_flow { 81struct mlx5e_tc_flow {
90 struct rhash_head node; 82 struct rhash_head node;
91 struct mlx5e_priv *priv; 83 struct mlx5e_priv *priv;
@@ -684,14 +676,6 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
684 } 676 }
685} 677}
686 678
687static const char *mlx5e_netdev_kind(struct net_device *dev)
688{
689 if (dev->rtnl_link_ops)
690 return dev->rtnl_link_ops->kind;
691 else
692 return "";
693}
694
695static int 679static int
696mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, 680mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
697 struct mlx5e_tc_flow_parse_attr *parse_attr, 681 struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -1209,75 +1193,6 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1209 mlx5e_tc_del_nic_flow(priv, flow); 1193 mlx5e_tc_del_nic_flow(priv, flow);
1210} 1194}
1211 1195
1212static int parse_tunnel_vxlan_attr(struct mlx5e_priv *priv,
1213 struct mlx5_flow_spec *spec,
1214 struct tc_cls_flower_offload *f,
1215 void *headers_c,
1216 void *headers_v)
1217{
1218 struct netlink_ext_ack *extack = f->common.extack;
1219 struct flow_dissector_key_ports *key =
1220 skb_flow_dissector_target(f->dissector,
1221 FLOW_DISSECTOR_KEY_ENC_PORTS,
1222 f->key);
1223 struct flow_dissector_key_ports *mask =
1224 skb_flow_dissector_target(f->dissector,
1225 FLOW_DISSECTOR_KEY_ENC_PORTS,
1226 f->mask);
1227 void *misc_c = MLX5_ADDR_OF(fte_match_param,
1228 spec->match_criteria,
1229 misc_parameters);
1230 void *misc_v = MLX5_ADDR_OF(fte_match_param,
1231 spec->match_value,
1232 misc_parameters);
1233
1234 /* Full udp dst port must be given */
1235 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
1236 memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) {
1237 NL_SET_ERR_MSG_MOD(extack,
1238 "VXLAN decap filter must include enc_dst_port condition");
1239 netdev_warn(priv->netdev,
1240 "VXLAN decap filter must include enc_dst_port condition\n");
1241 return -EOPNOTSUPP;
1242 }
1243
1244 /* udp dst port must be knonwn as a VXLAN port */
1245 if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) {
1246 NL_SET_ERR_MSG_MOD(extack,
1247 "Matched UDP port is not registered as a VXLAN port");
1248 netdev_warn(priv->netdev,
1249 "UDP port %d is not registered as a VXLAN port\n",
1250 be16_to_cpu(key->dst));
1251 return -EOPNOTSUPP;
1252 }
1253
1254 /* dst UDP port is valid here */
1255 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
1256 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
1257
1258 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst));
1259 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst));
1260
1261 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src));
1262 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src));
1263
1264 /* match on VNI */
1265 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
1266 struct flow_dissector_key_keyid *key =
1267 skb_flow_dissector_target(f->dissector,
1268 FLOW_DISSECTOR_KEY_ENC_KEYID,
1269 f->key);
1270 struct flow_dissector_key_keyid *mask =
1271 skb_flow_dissector_target(f->dissector,
1272 FLOW_DISSECTOR_KEY_ENC_KEYID,
1273 f->mask);
1274 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
1275 be32_to_cpu(mask->keyid));
1276 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
1277 be32_to_cpu(key->keyid));
1278 }
1279 return 0;
1280}
1281 1196
1282static int parse_tunnel_attr(struct mlx5e_priv *priv, 1197static int parse_tunnel_attr(struct mlx5e_priv *priv,
1283 struct mlx5_flow_spec *spec, 1198 struct mlx5_flow_spec *spec,
@@ -1294,29 +1209,14 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
1294 skb_flow_dissector_target(f->dissector, 1209 skb_flow_dissector_target(f->dissector,
1295 FLOW_DISSECTOR_KEY_ENC_CONTROL, 1210 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1296 f->key); 1211 f->key);
1297 int tunnel_type;
1298 int err = 0; 1212 int err = 0;
1299 1213
1300 tunnel_type = mlx5e_get_tunnel_type(filter_dev); 1214 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1301 if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { 1215 headers_c, headers_v);
1302 err = parse_tunnel_vxlan_attr(priv, spec, f,
1303 headers_c, headers_v);
1304 } else {
1305 NL_SET_ERR_MSG_MOD(extack,
1306 "decapsulation offload is not supported");
1307 netdev_warn(priv->netdev,
1308 "decapsulation offload is not supported for %s net device (%d)\n",
1309 mlx5e_netdev_kind(filter_dev), tunnel_type);
1310 return -EOPNOTSUPP;
1311 }
1312
1313 if (err) { 1216 if (err) {
1314 NL_SET_ERR_MSG_MOD(extack, 1217 NL_SET_ERR_MSG_MOD(extack,
1315 "failed to parse tunnel attributes"); 1218 "failed to parse tunnel attributes");
1316 netdev_warn(priv->netdev, 1219 return err;
1317 "failed to parse %s tunnel attributes (%d)\n",
1318 mlx5e_netdev_kind(filter_dev), tunnel_type);
1319 return -EOPNOTSUPP;
1320 } 1220 }
1321 1221
1322 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 1222 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -2359,45 +2259,6 @@ static inline int hash_encap_info(struct ip_tunnel_key *key)
2359 return jhash(key, sizeof(*key), 0); 2259 return jhash(key, sizeof(*key), 0);
2360} 2260}
2361 2261
2362static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
2363 struct net_device *mirred_dev,
2364 struct net_device **out_dev,
2365 struct flowi4 *fl4,
2366 struct neighbour **out_n,
2367 u8 *out_ttl)
2368{
2369 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2370 struct mlx5e_rep_priv *uplink_rpriv;
2371 struct rtable *rt;
2372 struct neighbour *n = NULL;
2373
2374#if IS_ENABLED(CONFIG_INET)
2375 int ret;
2376
2377 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
2378 ret = PTR_ERR_OR_ZERO(rt);
2379 if (ret)
2380 return ret;
2381#else
2382 return -EOPNOTSUPP;
2383#endif
2384 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2385 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2386 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
2387 *out_dev = uplink_rpriv->netdev;
2388 else
2389 *out_dev = rt->dst.dev;
2390
2391 if (!(*out_ttl))
2392 *out_ttl = ip4_dst_hoplimit(&rt->dst);
2393 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
2394 ip_rt_put(rt);
2395 if (!n)
2396 return -ENOMEM;
2397
2398 *out_n = n;
2399 return 0;
2400}
2401 2262
2402static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, 2263static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2403 struct net_device *peer_netdev) 2264 struct net_device *peer_netdev)
@@ -2413,354 +2274,7 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2413 (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS)); 2274 (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
2414} 2275}
2415 2276
2416static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
2417 struct net_device *mirred_dev,
2418 struct net_device **out_dev,
2419 struct flowi6 *fl6,
2420 struct neighbour **out_n,
2421 u8 *out_ttl)
2422{
2423 struct neighbour *n = NULL;
2424 struct dst_entry *dst;
2425
2426#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
2427 struct mlx5e_rep_priv *uplink_rpriv;
2428 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2429 int ret;
2430
2431 ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
2432 fl6);
2433 if (ret < 0)
2434 return ret;
2435
2436 if (!(*out_ttl))
2437 *out_ttl = ip6_dst_hoplimit(dst);
2438
2439 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2440 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2441 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
2442 *out_dev = uplink_rpriv->netdev;
2443 else
2444 *out_dev = dst->dev;
2445#else
2446 return -EOPNOTSUPP;
2447#endif
2448
2449 n = dst_neigh_lookup(dst, &fl6->daddr);
2450 dst_release(dst);
2451 if (!n)
2452 return -ENOMEM;
2453
2454 *out_n = n;
2455 return 0;
2456}
2457
2458static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
2459{
2460 __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
2461 struct udphdr *udp = (struct udphdr *)(buf);
2462 struct vxlanhdr *vxh = (struct vxlanhdr *)
2463 ((char *)udp + sizeof(struct udphdr));
2464
2465 udp->dest = tun_key->tp_dst;
2466 vxh->vx_flags = VXLAN_HF_VNI;
2467 vxh->vx_vni = vxlan_vni_field(tun_id);
2468
2469 return 0;
2470}
2471
2472static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
2473 struct mlx5e_encap_entry *e)
2474{
2475 int err = 0;
2476 struct ip_tunnel_key *key = &e->tun_info.key;
2477
2478 if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
2479 *ip_proto = IPPROTO_UDP;
2480 err = mlx5e_gen_vxlan_header(buf, key);
2481 } else {
2482 pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
2483 , e->tunnel_type);
2484 err = -EOPNOTSUPP;
2485 }
2486
2487 return err;
2488}
2489
2490static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
2491 struct net_device *mirred_dev,
2492 struct mlx5e_encap_entry *e)
2493{
2494 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2495 int ipv4_encap_size = ETH_HLEN +
2496 sizeof(struct iphdr) +
2497 e->tunnel_hlen;
2498 struct ip_tunnel_key *tun_key = &e->tun_info.key;
2499 struct net_device *out_dev;
2500 struct neighbour *n = NULL;
2501 struct flowi4 fl4 = {};
2502 char *encap_header;
2503 struct ethhdr *eth;
2504 u8 nud_state, ttl;
2505 struct iphdr *ip;
2506 int err;
2507
2508 if (max_encap_size < ipv4_encap_size) {
2509 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2510 ipv4_encap_size, max_encap_size);
2511 return -EOPNOTSUPP;
2512 }
2513
2514 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
2515 if (!encap_header)
2516 return -ENOMEM;
2517
2518 /* add the IP fields */
2519 fl4.flowi4_tos = tun_key->tos;
2520 fl4.daddr = tun_key->u.ipv4.dst;
2521 fl4.saddr = tun_key->u.ipv4.src;
2522 ttl = tun_key->ttl;
2523
2524 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
2525 &fl4, &n, &ttl);
2526 if (err)
2527 goto free_encap;
2528
2529 /* used by mlx5e_detach_encap to lookup a neigh hash table
2530 * entry in the neigh hash table when a user deletes a rule
2531 */
2532 e->m_neigh.dev = n->dev;
2533 e->m_neigh.family = n->ops->family;
2534 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2535 e->out_dev = out_dev;
2536
2537 /* It's important to add the neigh to the hash table before checking
2538 * the neigh validity state. So if we'll get a notification, in case the
2539 * neigh changes it's validity state, we would find the relevant neigh
2540 * in the hash.
2541 */
2542 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2543 if (err)
2544 goto free_encap;
2545
2546 read_lock_bh(&n->lock);
2547 nud_state = n->nud_state;
2548 ether_addr_copy(e->h_dest, n->ha);
2549 read_unlock_bh(&n->lock);
2550
2551 /* add ethernet header */
2552 eth = (struct ethhdr *)encap_header;
2553 ether_addr_copy(eth->h_dest, e->h_dest);
2554 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2555 eth->h_proto = htons(ETH_P_IP);
2556
2557 /* add ip header */
2558 ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
2559 ip->tos = tun_key->tos;
2560 ip->version = 0x4;
2561 ip->ihl = 0x5;
2562 ip->ttl = ttl;
2563 ip->daddr = fl4.daddr;
2564 ip->saddr = fl4.saddr;
2565
2566 /* add tunneling protocol header */
2567 err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
2568 &ip->protocol, e);
2569 if (err)
2570 goto destroy_neigh_entry;
2571
2572 e->encap_size = ipv4_encap_size;
2573 e->encap_header = encap_header;
2574
2575 if (!(nud_state & NUD_VALID)) {
2576 neigh_event_send(n, NULL);
2577 err = -EAGAIN;
2578 goto out;
2579 }
2580
2581 err = mlx5_packet_reformat_alloc(priv->mdev,
2582 e->reformat_type,
2583 ipv4_encap_size, encap_header,
2584 MLX5_FLOW_NAMESPACE_FDB,
2585 &e->encap_id);
2586 if (err)
2587 goto destroy_neigh_entry;
2588
2589 e->flags |= MLX5_ENCAP_ENTRY_VALID;
2590 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2591 neigh_release(n);
2592 return err;
2593
2594destroy_neigh_entry:
2595 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2596free_encap:
2597 kfree(encap_header);
2598out:
2599 if (n)
2600 neigh_release(n);
2601 return err;
2602}
2603
2604static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
2605 struct net_device *mirred_dev,
2606 struct mlx5e_encap_entry *e)
2607{
2608 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2609 int ipv6_encap_size = ETH_HLEN +
2610 sizeof(struct ipv6hdr) +
2611 e->tunnel_hlen;
2612 struct ip_tunnel_key *tun_key = &e->tun_info.key;
2613 struct net_device *out_dev;
2614 struct neighbour *n = NULL;
2615 struct flowi6 fl6 = {};
2616 struct ipv6hdr *ip6h;
2617 char *encap_header;
2618 struct ethhdr *eth;
2619 u8 nud_state, ttl;
2620 int err;
2621
2622 if (max_encap_size < ipv6_encap_size) {
2623 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2624 ipv6_encap_size, max_encap_size);
2625 return -EOPNOTSUPP;
2626 }
2627
2628 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
2629 if (!encap_header)
2630 return -ENOMEM;
2631
2632 ttl = tun_key->ttl;
2633
2634 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
2635 fl6.daddr = tun_key->u.ipv6.dst;
2636 fl6.saddr = tun_key->u.ipv6.src;
2637
2638 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
2639 &fl6, &n, &ttl);
2640 if (err)
2641 goto free_encap;
2642
2643 /* used by mlx5e_detach_encap to lookup a neigh hash table
2644 * entry in the neigh hash table when a user deletes a rule
2645 */
2646 e->m_neigh.dev = n->dev;
2647 e->m_neigh.family = n->ops->family;
2648 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2649 e->out_dev = out_dev;
2650
2651 /* It's importent to add the neigh to the hash table before checking
2652 * the neigh validity state. So if we'll get a notification, in case the
2653 * neigh changes it's validity state, we would find the relevant neigh
2654 * in the hash.
2655 */
2656 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2657 if (err)
2658 goto free_encap;
2659
2660 read_lock_bh(&n->lock);
2661 nud_state = n->nud_state;
2662 ether_addr_copy(e->h_dest, n->ha);
2663 read_unlock_bh(&n->lock);
2664
2665 /* add ethernet header */
2666 eth = (struct ethhdr *)encap_header;
2667 ether_addr_copy(eth->h_dest, e->h_dest);
2668 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2669 eth->h_proto = htons(ETH_P_IPV6);
2670
2671 /* add ip header */
2672 ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
2673 ip6_flow_hdr(ip6h, tun_key->tos, 0);
2674 /* the HW fills up ipv6 payload len */
2675 ip6h->hop_limit = ttl;
2676 ip6h->daddr = fl6.daddr;
2677 ip6h->saddr = fl6.saddr;
2678
2679 /* add tunneling protocol header */
2680 err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
2681 &ip6h->nexthdr, e);
2682 if (err)
2683 goto destroy_neigh_entry;
2684
2685 e->encap_size = ipv6_encap_size;
2686 e->encap_header = encap_header;
2687
2688 if (!(nud_state & NUD_VALID)) {
2689 neigh_event_send(n, NULL);
2690 err = -EAGAIN;
2691 goto out;
2692 }
2693
2694 err = mlx5_packet_reformat_alloc(priv->mdev,
2695 e->reformat_type,
2696 ipv6_encap_size, encap_header,
2697 MLX5_FLOW_NAMESPACE_FDB,
2698 &e->encap_id);
2699 if (err)
2700 goto destroy_neigh_entry;
2701
2702 e->flags |= MLX5_ENCAP_ENTRY_VALID;
2703 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2704 neigh_release(n);
2705 return err;
2706
2707destroy_neigh_entry:
2708 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2709free_encap:
2710 kfree(encap_header);
2711out:
2712 if (n)
2713 neigh_release(n);
2714 return err;
2715}
2716
2717static int mlx5e_get_tunnel_type(struct net_device *tunnel_dev)
2718{
2719 if (netif_is_vxlan(tunnel_dev))
2720 return MLX5E_TC_TUNNEL_TYPE_VXLAN;
2721 else
2722 return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
2723}
2724
2725bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
2726 struct net_device *netdev)
2727{
2728 int tunnel_type = mlx5e_get_tunnel_type(netdev);
2729
2730 if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
2731 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
2732 return true;
2733 else
2734 return false;
2735}
2736 2277
2737static int mlx5e_init_tunnel_attr(struct net_device *tunnel_dev,
2738 struct mlx5e_priv *priv,
2739 struct mlx5e_encap_entry *e,
2740 struct netlink_ext_ack *extack)
2741{
2742 e->tunnel_type = mlx5e_get_tunnel_type(tunnel_dev);
2743
2744 if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
2745 int dst_port = be16_to_cpu(e->tun_info.key.tp_dst);
2746
2747 if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
2748 NL_SET_ERR_MSG_MOD(extack,
2749 "vxlan udp dport was not registered with the HW");
2750 netdev_warn(priv->netdev,
2751 "%d isn't an offloaded vxlan udp dport\n",
2752 dst_port);
2753 return -EOPNOTSUPP;
2754 }
2755 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
2756 e->tunnel_hlen = VXLAN_HLEN;
2757 } else {
2758 e->reformat_type = -1;
2759 e->tunnel_hlen = -1;
2760 return -EOPNOTSUPP;
2761 }
2762 return 0;
2763}
2764 2278
2765static int mlx5e_attach_encap(struct mlx5e_priv *priv, 2279static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2766 struct ip_tunnel_info *tun_info, 2280 struct ip_tunnel_info *tun_info,
@@ -2797,16 +2311,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2797 return -ENOMEM; 2311 return -ENOMEM;
2798 2312
2799 e->tun_info = *tun_info; 2313 e->tun_info = *tun_info;
2800 err = mlx5e_init_tunnel_attr(mirred_dev, priv, e, extack); 2314 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
2801 if (err) 2315 if (err)
2802 goto out_err; 2316 goto out_err;
2803 2317
2804 INIT_LIST_HEAD(&e->flows); 2318 INIT_LIST_HEAD(&e->flows);
2805 2319
2806 if (family == AF_INET) 2320 if (family == AF_INET)
2807 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e); 2321 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
2808 else if (family == AF_INET6) 2322 else if (family == AF_INET6)
2809 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e); 2323 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
2810 2324
2811 if (err && err != -EAGAIN) 2325 if (err && err != -EAGAIN)
2812 goto out_err; 2326 goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 004e679a4f53..a15c08a35054 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -70,8 +70,6 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
70 70
71int mlx5e_tc_num_filters(struct mlx5e_priv *priv); 71int mlx5e_tc_num_filters(struct mlx5e_priv *priv);
72 72
73bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
74 struct net_device *netdev);
75 73
76#else /* CONFIG_MLX5_ESWITCH */ 74#else /* CONFIG_MLX5_ESWITCH */
77static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } 75static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }