aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS10
-rw-r--r--drivers/infiniband/hw/mlx5/main.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c461
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h140
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c378
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c133
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c1042
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c376
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h204
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h27
-rw-r--r--include/linux/mlx5/device.h3
-rw-r--r--include/linux/mlx5/driver.h19
-rw-r--r--include/linux/mlx5/mlx5_ifc.h14
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h288
-rw-r--r--include/linux/mlx5/qp.h14
39 files changed, 4518 insertions, 127 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 71a74555afdf..c324460d5042 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8327,6 +8327,16 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
8327F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* 8327F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
8328F: include/linux/mlx5/mlx5_ifc_fpga.h 8328F: include/linux/mlx5/mlx5_ifc_fpga.h
8329 8329
8330MELLANOX ETHERNET INNOVA IPSEC DRIVER
8331M: Ilan Tayari <ilant@mellanox.com>
8332R: Boris Pismenny <borisp@mellanox.com>
8333L: netdev@vger.kernel.org
8334S: Supported
8335W: http://www.mellanox.com
8336Q: http://patchwork.ozlabs.org/project/netdev/list/
8337F: drivers/net/ethernet/mellanox/mlx5/core/en_ipsec/*
8338F: drivers/net/ethernet/mellanox/mlx5/core/ipsec*
8339
8330MELLANOX ETHERNET SWITCH DRIVERS 8340MELLANOX ETHERNET SWITCH DRIVERS
8331M: Jiri Pirko <jiri@mellanox.com> 8341M: Jiri Pirko <jiri@mellanox.com>
8332M: Ido Schimmel <idosch@mellanox.com> 8342M: Ido Schimmel <idosch@mellanox.com>
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 9f7e18612322..dc2f59e33971 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -223,8 +223,8 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
223 return 0; 223 return 0;
224} 224}
225 225
226static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, 226static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
227 struct ib_port_attr *props) 227 struct ib_port_attr *props)
228{ 228{
229 struct mlx5_ib_dev *dev = to_mdev(device); 229 struct mlx5_ib_dev *dev = to_mdev(device);
230 struct mlx5_core_dev *mdev = dev->mdev; 230 struct mlx5_core_dev *mdev = dev->mdev;
@@ -232,12 +232,14 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num,
232 enum ib_mtu ndev_ib_mtu; 232 enum ib_mtu ndev_ib_mtu;
233 u16 qkey_viol_cntr; 233 u16 qkey_viol_cntr;
234 u32 eth_prot_oper; 234 u32 eth_prot_oper;
235 int err;
235 236
236 /* Possible bad flows are checked before filling out props so in case 237 /* Possible bad flows are checked before filling out props so in case
237 * of an error it will still be zeroed out. 238 * of an error it will still be zeroed out.
238 */ 239 */
239 if (mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num)) 240 err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num);
240 return; 241 if (err)
242 return err;
241 243
242 translate_eth_proto_oper(eth_prot_oper, &props->active_speed, 244 translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
243 &props->active_width); 245 &props->active_width);
@@ -258,7 +260,7 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num,
258 260
259 ndev = mlx5_ib_get_netdev(device, port_num); 261 ndev = mlx5_ib_get_netdev(device, port_num);
260 if (!ndev) 262 if (!ndev)
261 return; 263 return 0;
262 264
263 if (mlx5_lag_is_active(dev->mdev)) { 265 if (mlx5_lag_is_active(dev->mdev)) {
264 rcu_read_lock(); 266 rcu_read_lock();
@@ -281,75 +283,49 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num,
281 dev_put(ndev); 283 dev_put(ndev);
282 284
283 props->active_mtu = min(props->max_mtu, ndev_ib_mtu); 285 props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
286 return 0;
284} 287}
285 288
286static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid, 289static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
287 const struct ib_gid_attr *attr, 290 unsigned int index, const union ib_gid *gid,
288 void *mlx5_addr) 291 const struct ib_gid_attr *attr)
289{ 292{
290#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v) 293 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
291 char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, 294 u8 roce_version = 0;
292 source_l3_address); 295 u8 roce_l3_type = 0;
293 void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, 296 bool vlan = false;
294 source_mac_47_32); 297 u8 mac[ETH_ALEN];
295 298 u16 vlan_id = 0;
296 if (!gid)
297 return;
298 299
299 ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr); 300 if (gid) {
301 gid_type = attr->gid_type;
302 ether_addr_copy(mac, attr->ndev->dev_addr);
300 303
301 if (is_vlan_dev(attr->ndev)) { 304 if (is_vlan_dev(attr->ndev)) {
302 MLX5_SET_RA(mlx5_addr, vlan_valid, 1); 305 vlan = true;
303 MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev)); 306 vlan_id = vlan_dev_vlan_id(attr->ndev);
307 }
304 } 308 }
305 309
306 switch (attr->gid_type) { 310 switch (gid_type) {
307 case IB_GID_TYPE_IB: 311 case IB_GID_TYPE_IB:
308 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1); 312 roce_version = MLX5_ROCE_VERSION_1;
309 break; 313 break;
310 case IB_GID_TYPE_ROCE_UDP_ENCAP: 314 case IB_GID_TYPE_ROCE_UDP_ENCAP:
311 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2); 315 roce_version = MLX5_ROCE_VERSION_2;
316 if (ipv6_addr_v4mapped((void *)gid))
317 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
318 else
319 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
312 break; 320 break;
313 321
314 default: 322 default:
315 WARN_ON(true); 323 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
316 } 324 }
317 325
318 if (attr->gid_type != IB_GID_TYPE_IB) { 326 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
319 if (ipv6_addr_v4mapped((void *)gid)) 327 roce_l3_type, gid->raw, mac, vlan,
320 MLX5_SET_RA(mlx5_addr, roce_l3_type, 328 vlan_id);
321 MLX5_ROCE_L3_TYPE_IPV4);
322 else
323 MLX5_SET_RA(mlx5_addr, roce_l3_type,
324 MLX5_ROCE_L3_TYPE_IPV6);
325 }
326
327 if ((attr->gid_type == IB_GID_TYPE_IB) ||
328 !ipv6_addr_v4mapped((void *)gid))
329 memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
330 else
331 memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
332}
333
334static int set_roce_addr(struct ib_device *device, u8 port_num,
335 unsigned int index,
336 const union ib_gid *gid,
337 const struct ib_gid_attr *attr)
338{
339 struct mlx5_ib_dev *dev = to_mdev(device);
340 u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
341 u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
342 void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
343 enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
344
345 if (ll != IB_LINK_LAYER_ETHERNET)
346 return -EINVAL;
347
348 ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
349
350 MLX5_SET(set_roce_address_in, in, roce_address_index, index);
351 MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
352 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
353} 329}
354 330
355static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, 331static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
@@ -357,13 +333,13 @@ static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
357 const struct ib_gid_attr *attr, 333 const struct ib_gid_attr *attr,
358 __always_unused void **context) 334 __always_unused void **context)
359{ 335{
360 return set_roce_addr(device, port_num, index, gid, attr); 336 return set_roce_addr(to_mdev(device), port_num, index, gid, attr);
361} 337}
362 338
363static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num, 339static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
364 unsigned int index, __always_unused void **context) 340 unsigned int index, __always_unused void **context)
365{ 341{
366 return set_roce_addr(device, port_num, index, NULL, NULL); 342 return set_roce_addr(to_mdev(device), port_num, index, NULL, NULL);
367} 343}
368 344
369__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, 345__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
@@ -978,20 +954,31 @@ out:
978int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 954int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
979 struct ib_port_attr *props) 955 struct ib_port_attr *props)
980{ 956{
957 unsigned int count;
958 int ret;
959
981 switch (mlx5_get_vport_access_method(ibdev)) { 960 switch (mlx5_get_vport_access_method(ibdev)) {
982 case MLX5_VPORT_ACCESS_METHOD_MAD: 961 case MLX5_VPORT_ACCESS_METHOD_MAD:
983 return mlx5_query_mad_ifc_port(ibdev, port, props); 962 ret = mlx5_query_mad_ifc_port(ibdev, port, props);
963 break;
984 964
985 case MLX5_VPORT_ACCESS_METHOD_HCA: 965 case MLX5_VPORT_ACCESS_METHOD_HCA:
986 return mlx5_query_hca_port(ibdev, port, props); 966 ret = mlx5_query_hca_port(ibdev, port, props);
967 break;
987 968
988 case MLX5_VPORT_ACCESS_METHOD_NIC: 969 case MLX5_VPORT_ACCESS_METHOD_NIC:
989 mlx5_query_port_roce(ibdev, port, props); 970 ret = mlx5_query_port_roce(ibdev, port, props);
990 return 0; 971 break;
991 972
992 default: 973 default:
993 return -EINVAL; 974 ret = -EINVAL;
975 }
976
977 if (!ret && props) {
978 count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev);
979 props->gid_tbl_len -= count;
994 } 980 }
981 return ret;
995} 982}
996 983
997static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 984static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index cf1ef48bfd8d..5aee05992f27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -11,9 +11,13 @@ config MLX5_CORE
11 Core driver for low level functionality of the ConnectX-4 and 11 Core driver for low level functionality of the ConnectX-4 and
12 Connect-IB cards by Mellanox Technologies. 12 Connect-IB cards by Mellanox Technologies.
13 13
14config MLX5_ACCEL
15 bool
16
14config MLX5_FPGA 17config MLX5_FPGA
15 bool "Mellanox Technologies Innova support" 18 bool "Mellanox Technologies Innova support"
16 depends on MLX5_CORE 19 depends on MLX5_CORE
20 select MLX5_ACCEL
17 ---help--- 21 ---help---
18 Build support for the Innova family of network cards by Mellanox 22 Build support for the Innova family of network cards by Mellanox
19 Technologies. Innova network cards are comprised of a ConnectX chip 23 Technologies. Innova network cards are comprised of a ConnectX chip
@@ -48,3 +52,15 @@ config MLX5_CORE_IPOIB
48 default n 52 default n
49 ---help--- 53 ---help---
50 MLX5 IPoIB offloads & acceleration support. 54 MLX5 IPoIB offloads & acceleration support.
55
56config MLX5_EN_IPSEC
57 bool "IPSec XFRM cryptography-offload accelaration"
58 depends on MLX5_ACCEL
59 depends on MLX5_CORE_EN
60 depends on XFRM_OFFLOAD
61 depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
62 default n
63 ---help---
64 Build support for IPsec cryptography-offload accelaration in the NIC.
65 Note: Support for hardware with this capability needs to be selected
66 for this option to become available.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 5ad093a21a6e..ca367445f864 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -4,9 +4,12 @@ subdir-ccflags-y += -I$(src)
4mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ 4mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
5 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ 5 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
6 mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ 6 mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
7 fs_counters.o rl.o lag.o dev.o 7 fs_counters.o rl.o lag.o dev.o lib/gid.o
8 8
9mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o 9mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o
10
11mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
12 fpga/ipsec.o
10 13
11mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \ 14mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
12 en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ 15 en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
@@ -16,3 +19,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
16mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o 19mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
17 20
18mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o 21mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o
22
23mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
24 en_accel/ipsec_stats.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
new file mode 100644
index 000000000000..53e69edaedde
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
@@ -0,0 +1,78 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx5/device.h>
35
36#include "accel/ipsec.h"
37#include "mlx5_core.h"
38#include "fpga/ipsec.h"
39
40void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
41 struct mlx5_accel_ipsec_sa *cmd)
42{
43 if (!MLX5_IPSEC_DEV(mdev))
44 return ERR_PTR(-EOPNOTSUPP);
45
46 return mlx5_fpga_ipsec_sa_cmd_exec(mdev, cmd);
47}
48
49int mlx5_accel_ipsec_sa_cmd_wait(void *ctx)
50{
51 return mlx5_fpga_ipsec_sa_cmd_wait(ctx);
52}
53
54u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
55{
56 return mlx5_fpga_ipsec_device_caps(mdev);
57}
58
59unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
60{
61 return mlx5_fpga_ipsec_counters_count(mdev);
62}
63
64int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
65 unsigned int count)
66{
67 return mlx5_fpga_ipsec_counters_read(mdev, counters, count);
68}
69
70int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
71{
72 return mlx5_fpga_ipsec_init(mdev);
73}
74
75void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
76{
77 mlx5_fpga_ipsec_cleanup(mdev);
78}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
new file mode 100644
index 000000000000..d6e20fea9554
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
@@ -0,0 +1,138 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef __MLX5_ACCEL_IPSEC_H__
35#define __MLX5_ACCEL_IPSEC_H__
36
37#ifdef CONFIG_MLX5_ACCEL
38
39#include <linux/mlx5/driver.h>
40
41enum {
42 MLX5_ACCEL_IPSEC_DEVICE = BIT(1),
43 MLX5_ACCEL_IPSEC_IPV6 = BIT(2),
44 MLX5_ACCEL_IPSEC_ESP = BIT(3),
45 MLX5_ACCEL_IPSEC_LSO = BIT(4),
46};
47
48#define MLX5_IPSEC_SADB_IP_AH BIT(7)
49#define MLX5_IPSEC_SADB_IP_ESP BIT(6)
50#define MLX5_IPSEC_SADB_SA_VALID BIT(5)
51#define MLX5_IPSEC_SADB_SPI_EN BIT(4)
52#define MLX5_IPSEC_SADB_DIR_SX BIT(3)
53#define MLX5_IPSEC_SADB_IPV6 BIT(2)
54
55enum {
56 MLX5_IPSEC_CMD_ADD_SA = 0,
57 MLX5_IPSEC_CMD_DEL_SA = 1,
58};
59
60enum mlx5_accel_ipsec_enc_mode {
61 MLX5_IPSEC_SADB_MODE_NONE = 0,
62 MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128 = 1,
63 MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128 = 3,
64};
65
66#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
67 MLX5_ACCEL_IPSEC_DEVICE)
68
69struct mlx5_accel_ipsec_sa {
70 __be32 cmd;
71 u8 key_enc[32];
72 u8 key_auth[32];
73 __be32 sip[4];
74 __be32 dip[4];
75 union {
76 struct {
77 __be32 reserved;
78 u8 salt_iv[8];
79 __be32 salt;
80 } __packed gcm;
81 struct {
82 u8 salt[16];
83 } __packed cbc;
84 };
85 __be32 spi;
86 __be32 sw_sa_handle;
87 __be16 tfclen;
88 u8 enc_mode;
89 u8 sip_masklen;
90 u8 dip_masklen;
91 u8 flags;
92 u8 reserved[2];
93} __packed;
94
95/**
96 * mlx5_accel_ipsec_sa_cmd_exec - Execute an IPSec SADB command
97 * @mdev: mlx5 device
98 * @cmd: command to execute
99 * May be called from atomic context. Returns context pointer, or error
100 * Caller must eventually call mlx5_accel_ipsec_sa_cmd_wait from non-atomic
101 * context, to cleanup the context pointer
102 */
103void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
104 struct mlx5_accel_ipsec_sa *cmd);
105
106/**
107 * mlx5_accel_ipsec_sa_cmd_wait - Wait for command execution completion
108 * @context: Context pointer returned from call to mlx5_accel_ipsec_sa_cmd_exec
109 * Sleeps (killable) until command execution is complete.
110 * Returns the command result, or -EINTR if killed
111 */
112int mlx5_accel_ipsec_sa_cmd_wait(void *context);
113
114u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
115
116unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
117int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
118 unsigned int count);
119
120int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
121void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
122
123#else
124
125#define MLX5_IPSEC_DEV(mdev) false
126
127static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
128{
129 return 0;
130}
131
132static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
133{
134}
135
136#endif
137
138#endif /* __MLX5_ACCEL_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 4d5bd01f1ebb..f5a2c605749f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -307,6 +307,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
307 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 307 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
308 case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: 308 case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
309 case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: 309 case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
310 case MLX5_CMD_OP_FPGA_DESTROY_QP:
310 return MLX5_CMD_STAT_OK; 311 return MLX5_CMD_STAT_OK;
311 312
312 case MLX5_CMD_OP_QUERY_HCA_CAP: 313 case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -419,6 +420,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
419 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 420 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
420 case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: 421 case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
421 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: 422 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
423 case MLX5_CMD_OP_FPGA_CREATE_QP:
424 case MLX5_CMD_OP_FPGA_MODIFY_QP:
425 case MLX5_CMD_OP_FPGA_QUERY_QP:
426 case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
422 *status = MLX5_DRIVER_STATUS_ABORTED; 427 *status = MLX5_DRIVER_STATUS_ABORTED;
423 *synd = MLX5_DRIVER_SYND; 428 *synd = MLX5_DRIVER_SYND;
424 return -EIO; 429 return -EIO;
@@ -585,6 +590,11 @@ const char *mlx5_command_str(int command)
585 MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); 590 MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
586 MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT); 591 MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
587 MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT); 592 MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
593 MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
594 MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
595 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
596 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
597 MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
588 default: return "unknown command opcode"; 598 default: return "unknown command opcode";
589 } 599 }
590} 600}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index eef0a50e2388..e1b7ddfecd01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -328,6 +328,7 @@ struct mlx5e_sq_dma {
328 328
329enum { 329enum {
330 MLX5E_SQ_STATE_ENABLED, 330 MLX5E_SQ_STATE_ENABLED,
331 MLX5E_SQ_STATE_IPSEC,
331}; 332};
332 333
333struct mlx5e_sq_wqe_info { 334struct mlx5e_sq_wqe_info {
@@ -784,6 +785,9 @@ struct mlx5e_priv {
784 785
785 const struct mlx5e_profile *profile; 786 const struct mlx5e_profile *profile;
786 void *ppriv; 787 void *ppriv;
788#ifdef CONFIG_MLX5_EN_IPSEC
789 struct mlx5e_ipsec *ipsec;
790#endif
787}; 791};
788 792
789struct mlx5e_profile { 793struct mlx5e_profile {
@@ -833,7 +837,6 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
833void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); 837void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
834void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq); 838void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq);
835void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); 839void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
836struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
837 840
838void mlx5e_rx_am(struct mlx5e_rq *rq); 841void mlx5e_rx_am(struct mlx5e_rq *rq);
839void mlx5e_rx_am_work(struct work_struct *work); 842void mlx5e_rx_am_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
new file mode 100644
index 000000000000..bac5103efad3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -0,0 +1,461 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <crypto/internal/geniv.h>
35#include <crypto/aead.h>
36#include <linux/inetdevice.h>
37#include <linux/netdevice.h>
38#include <linux/module.h>
39
40#include "en.h"
41#include "accel/ipsec.h"
42#include "en_accel/ipsec.h"
43#include "en_accel/ipsec_rxtx.h"
44
45struct mlx5e_ipsec_sa_entry {
46 struct hlist_node hlist; /* Item in SADB_RX hashtable */
47 unsigned int handle; /* Handle in SADB_RX */
48 struct xfrm_state *x;
49 struct mlx5e_ipsec *ipsec;
50 void *context;
51};
52
53struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
54 unsigned int handle)
55{
56 struct mlx5e_ipsec_sa_entry *sa_entry;
57 struct xfrm_state *ret = NULL;
58
59 rcu_read_lock();
60 hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
61 if (sa_entry->handle == handle) {
62 ret = sa_entry->x;
63 xfrm_state_hold(ret);
64 break;
65 }
66 rcu_read_unlock();
67
68 return ret;
69}
70
71static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
72{
73 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
74 unsigned long flags;
75 int ret;
76
77 spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
78 ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL);
79 if (ret < 0)
80 goto out;
81
82 sa_entry->handle = ret;
83 hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
84 ret = 0;
85
86out:
87 spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
88 return ret;
89}
90
91static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
92{
93 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
94 unsigned long flags;
95
96 spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
97 hash_del_rcu(&sa_entry->hlist);
98 spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
99}
100
101static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
102{
103 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
104 unsigned long flags;
105
106 /* Wait for the hash_del_rcu call in sadb_rx_del to affect data path */
107 synchronize_rcu();
108 spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
109 ida_simple_remove(&ipsec->halloc, sa_entry->handle);
110 spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
111}
112
113static enum mlx5_accel_ipsec_enc_mode mlx5e_ipsec_enc_mode(struct xfrm_state *x)
114{
115 unsigned int key_len = (x->aead->alg_key_len + 7) / 8 - 4;
116
117 switch (key_len) {
118 case 16:
119 return MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128;
120 case 32:
121 return MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128;
122 default:
123 netdev_warn(x->xso.dev, "Bad key len: %d for alg %s\n",
124 key_len, x->aead->alg_name);
125 return -1;
126 }
127}
128
129static void mlx5e_ipsec_build_hw_sa(u32 op, struct mlx5e_ipsec_sa_entry *sa_entry,
130 struct mlx5_accel_ipsec_sa *hw_sa)
131{
132 struct xfrm_state *x = sa_entry->x;
133 struct aead_geniv_ctx *geniv_ctx;
134 unsigned int crypto_data_len;
135 struct crypto_aead *aead;
136 unsigned int key_len;
137 int ivsize;
138
139 memset(hw_sa, 0, sizeof(*hw_sa));
140
141 if (op == MLX5_IPSEC_CMD_ADD_SA) {
142 crypto_data_len = (x->aead->alg_key_len + 7) / 8;
143 key_len = crypto_data_len - 4; /* 4 bytes salt at end */
144 aead = x->data;
145 geniv_ctx = crypto_aead_ctx(aead);
146 ivsize = crypto_aead_ivsize(aead);
147
148 memcpy(&hw_sa->key_enc, x->aead->alg_key, key_len);
149 /* Duplicate 128 bit key twice according to HW layout */
150 if (key_len == 16)
151 memcpy(&hw_sa->key_enc[16], x->aead->alg_key, key_len);
152 memcpy(&hw_sa->gcm.salt_iv, geniv_ctx->salt, ivsize);
153 hw_sa->gcm.salt = *((__be32 *)(x->aead->alg_key + key_len));
154 }
155
156 hw_sa->cmd = htonl(op);
157 hw_sa->flags |= MLX5_IPSEC_SADB_SA_VALID | MLX5_IPSEC_SADB_SPI_EN;
158 if (x->props.family == AF_INET) {
159 hw_sa->sip[3] = x->props.saddr.a4;
160 hw_sa->dip[3] = x->id.daddr.a4;
161 hw_sa->sip_masklen = 32;
162 hw_sa->dip_masklen = 32;
163 } else {
164 memcpy(hw_sa->sip, x->props.saddr.a6, sizeof(hw_sa->sip));
165 memcpy(hw_sa->dip, x->id.daddr.a6, sizeof(hw_sa->dip));
166 hw_sa->sip_masklen = 128;
167 hw_sa->dip_masklen = 128;
168 hw_sa->flags |= MLX5_IPSEC_SADB_IPV6;
169 }
170 hw_sa->spi = x->id.spi;
171 hw_sa->sw_sa_handle = htonl(sa_entry->handle);
172 switch (x->id.proto) {
173 case IPPROTO_ESP:
174 hw_sa->flags |= MLX5_IPSEC_SADB_IP_ESP;
175 break;
176 case IPPROTO_AH:
177 hw_sa->flags |= MLX5_IPSEC_SADB_IP_AH;
178 break;
179 default:
180 break;
181 }
182 hw_sa->enc_mode = mlx5e_ipsec_enc_mode(x);
183 if (!(x->xso.flags & XFRM_OFFLOAD_INBOUND))
184 hw_sa->flags |= MLX5_IPSEC_SADB_DIR_SX;
185}
186
187static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
188{
189 struct net_device *netdev = x->xso.dev;
190 struct mlx5e_priv *priv;
191
192 priv = netdev_priv(netdev);
193
194 if (x->props.aalgo != SADB_AALG_NONE) {
195 netdev_info(netdev, "Cannot offload authenticated xfrm states\n");
196 return -EINVAL;
197 }
198 if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
199 netdev_info(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
200 return -EINVAL;
201 }
202 if (x->props.calgo != SADB_X_CALG_NONE) {
203 netdev_info(netdev, "Cannot offload compressed xfrm states\n");
204 return -EINVAL;
205 }
206 if (x->props.flags & XFRM_STATE_ESN) {
207 netdev_info(netdev, "Cannot offload ESN xfrm states\n");
208 return -EINVAL;
209 }
210 if (x->props.family != AF_INET &&
211 x->props.family != AF_INET6) {
212 netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
213 return -EINVAL;
214 }
215 if (x->props.mode != XFRM_MODE_TRANSPORT &&
216 x->props.mode != XFRM_MODE_TUNNEL) {
217 dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
218 return -EINVAL;
219 }
220 if (x->id.proto != IPPROTO_ESP) {
221 netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
222 return -EINVAL;
223 }
224 if (x->encap) {
225 netdev_info(netdev, "Encapsulated xfrm state may not be offloaded\n");
226 return -EINVAL;
227 }
228 if (!x->aead) {
229 netdev_info(netdev, "Cannot offload xfrm states without aead\n");
230 return -EINVAL;
231 }
232 if (x->aead->alg_icv_len != 128) {
233 netdev_info(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
234 return -EINVAL;
235 }
236 if ((x->aead->alg_key_len != 128 + 32) &&
237 (x->aead->alg_key_len != 256 + 32)) {
238 netdev_info(netdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
239 return -EINVAL;
240 }
241 if (x->tfcpad) {
242 netdev_info(netdev, "Cannot offload xfrm states with tfc padding\n");
243 return -EINVAL;
244 }
245 if (!x->geniv) {
246 netdev_info(netdev, "Cannot offload xfrm states without geniv\n");
247 return -EINVAL;
248 }
249 if (strcmp(x->geniv, "seqiv")) {
250 netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
251 return -EINVAL;
252 }
253 if (x->props.family == AF_INET6 &&
254 !(mlx5_accel_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_IPV6)) {
255 netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
256 return -EINVAL;
257 }
258 return 0;
259}
260
261static int mlx5e_xfrm_add_state(struct xfrm_state *x)
262{
263 struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
264 struct net_device *netdev = x->xso.dev;
265 struct mlx5_accel_ipsec_sa hw_sa;
266 struct mlx5e_priv *priv;
267 void *context;
268 int err;
269
270 priv = netdev_priv(netdev);
271
272 err = mlx5e_xfrm_validate_state(x);
273 if (err)
274 return err;
275
276 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
277 if (!sa_entry) {
278 err = -ENOMEM;
279 goto out;
280 }
281
282 sa_entry->x = x;
283 sa_entry->ipsec = priv->ipsec;
284
285 /* Add the SA to handle processed incoming packets before the add SA
286 * completion was received
287 */
288 if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
289 err = mlx5e_ipsec_sadb_rx_add(sa_entry);
290 if (err) {
291 netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
292 goto err_entry;
293 }
294 }
295
296 mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_ADD_SA, sa_entry, &hw_sa);
297 context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
298 if (IS_ERR(context)) {
299 err = PTR_ERR(context);
300 goto err_sadb_rx;
301 }
302
303 err = mlx5_accel_ipsec_sa_cmd_wait(context);
304 if (err)
305 goto err_sadb_rx;
306
307 x->xso.offload_handle = (unsigned long)sa_entry;
308 goto out;
309
310err_sadb_rx:
311 if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
312 mlx5e_ipsec_sadb_rx_del(sa_entry);
313 mlx5e_ipsec_sadb_rx_free(sa_entry);
314 }
315err_entry:
316 kfree(sa_entry);
317out:
318 return err;
319}
320
321static void mlx5e_xfrm_del_state(struct xfrm_state *x)
322{
323 struct mlx5e_ipsec_sa_entry *sa_entry;
324 struct mlx5_accel_ipsec_sa hw_sa;
325 void *context;
326
327 if (!x->xso.offload_handle)
328 return;
329
330 sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
331 WARN_ON(sa_entry->x != x);
332
333 if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
334 mlx5e_ipsec_sadb_rx_del(sa_entry);
335
336 mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_DEL_SA, sa_entry, &hw_sa);
337 context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
338 if (IS_ERR(context))
339 return;
340
341 sa_entry->context = context;
342}
343
344static void mlx5e_xfrm_free_state(struct xfrm_state *x)
345{
346 struct mlx5e_ipsec_sa_entry *sa_entry;
347 int res;
348
349 if (!x->xso.offload_handle)
350 return;
351
352 sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
353 WARN_ON(sa_entry->x != x);
354
355 res = mlx5_accel_ipsec_sa_cmd_wait(sa_entry->context);
356 sa_entry->context = NULL;
357 if (res) {
358 /* Leftover object will leak */
359 return;
360 }
361
362 if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
363 mlx5e_ipsec_sadb_rx_free(sa_entry);
364
365 kfree(sa_entry);
366}
367
368int mlx5e_ipsec_init(struct mlx5e_priv *priv)
369{
370 struct mlx5e_ipsec *ipsec = NULL;
371
372 if (!MLX5_IPSEC_DEV(priv->mdev)) {
373 netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
374 return 0;
375 }
376
377 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
378 if (!ipsec)
379 return -ENOMEM;
380
381 hash_init(ipsec->sadb_rx);
382 spin_lock_init(&ipsec->sadb_rx_lock);
383 ida_init(&ipsec->halloc);
384 ipsec->en_priv = priv;
385 ipsec->en_priv->ipsec = ipsec;
386 netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
387 return 0;
388}
389
390void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
391{
392 struct mlx5e_ipsec *ipsec = priv->ipsec;
393
394 if (!ipsec)
395 return;
396
397 ida_destroy(&ipsec->halloc);
398 kfree(ipsec);
399 priv->ipsec = NULL;
400}
401
402static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
403{
404 if (x->props.family == AF_INET) {
405 /* Offload with IPv4 options is not supported yet */
406 if (ip_hdr(skb)->ihl > 5)
407 return false;
408 } else {
409 /* Offload with IPv6 extension headers is not support yet */
410 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
411 return false;
412 }
413
414 return true;
415}
416
417static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
418 .xdo_dev_state_add = mlx5e_xfrm_add_state,
419 .xdo_dev_state_delete = mlx5e_xfrm_del_state,
420 .xdo_dev_state_free = mlx5e_xfrm_free_state,
421 .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
422};
423
424void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
425{
426 struct mlx5_core_dev *mdev = priv->mdev;
427 struct net_device *netdev = priv->netdev;
428
429 if (!priv->ipsec)
430 return;
431
432 if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_ESP) ||
433 !MLX5_CAP_ETH(mdev, swp)) {
434 mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
435 return;
436 }
437
438 mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
439 netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
440 netdev->features |= NETIF_F_HW_ESP;
441 netdev->hw_enc_features |= NETIF_F_HW_ESP;
442
443 if (!MLX5_CAP_ETH(mdev, swp_csum)) {
444 mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
445 return;
446 }
447
448 netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
449 netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
450
451 if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_LSO) ||
452 !MLX5_CAP_ETH(mdev, swp_lso)) {
453 mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
454 return;
455 }
456
457 mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
458 netdev->features |= NETIF_F_GSO_ESP;
459 netdev->hw_features |= NETIF_F_GSO_ESP;
460 netdev->hw_enc_features |= NETIF_F_GSO_ESP;
461}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
new file mode 100644
index 000000000000..56e00baf16cc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -0,0 +1,140 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef __MLX5E_IPSEC_H__
35#define __MLX5E_IPSEC_H__
36
37#ifdef CONFIG_MLX5_EN_IPSEC
38
39#include <linux/mlx5/device.h>
40#include <net/xfrm.h>
41#include <linux/idr.h>
42
43#define MLX5E_IPSEC_SADB_RX_BITS 10
44#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
45#define MLX5E_METADATA_ETHER_LEN 8
46
47struct mlx5e_priv;
48
49struct mlx5e_ipsec_sw_stats {
50 atomic64_t ipsec_rx_drop_sp_alloc;
51 atomic64_t ipsec_rx_drop_sadb_miss;
52 atomic64_t ipsec_rx_drop_syndrome;
53 atomic64_t ipsec_tx_drop_bundle;
54 atomic64_t ipsec_tx_drop_no_state;
55 atomic64_t ipsec_tx_drop_not_ip;
56 atomic64_t ipsec_tx_drop_trailer;
57 atomic64_t ipsec_tx_drop_metadata;
58};
59
60struct mlx5e_ipsec_stats {
61 u64 ipsec_dec_in_packets;
62 u64 ipsec_dec_out_packets;
63 u64 ipsec_dec_bypass_packets;
64 u64 ipsec_enc_in_packets;
65 u64 ipsec_enc_out_packets;
66 u64 ipsec_enc_bypass_packets;
67 u64 ipsec_dec_drop_packets;
68 u64 ipsec_dec_auth_fail_packets;
69 u64 ipsec_enc_drop_packets;
70 u64 ipsec_add_sa_success;
71 u64 ipsec_add_sa_fail;
72 u64 ipsec_del_sa_success;
73 u64 ipsec_del_sa_fail;
74 u64 ipsec_cmd_drop;
75};
76
77struct mlx5e_ipsec {
78 struct mlx5e_priv *en_priv;
79 DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
80 spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */
81 struct ida halloc;
82 struct mlx5e_ipsec_sw_stats sw_stats;
83 struct mlx5e_ipsec_stats stats;
84};
85
86void mlx5e_ipsec_build_inverse_table(void);
87int mlx5e_ipsec_init(struct mlx5e_priv *priv);
88void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
89void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
90
91int mlx5e_ipsec_get_count(struct mlx5e_priv *priv);
92int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data);
93void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv);
94int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data);
95
96struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
97 unsigned int handle);
98
99#else
100
101static inline void mlx5e_ipsec_build_inverse_table(void)
102{
103}
104
105static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv)
106{
107 return 0;
108}
109
110static inline void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
111{
112}
113
114static inline void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
115{
116}
117
118static inline int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
119{
120 return 0;
121}
122
123static inline int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv,
124 uint8_t *data)
125{
126 return 0;
127}
128
129static inline void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
130{
131}
132
133static inline int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
134{
135 return 0;
136}
137
138#endif
139
140#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
new file mode 100644
index 000000000000..4a78aefdf157
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -0,0 +1,378 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <crypto/aead.h>
35#include <net/xfrm.h>
36#include <net/esp.h>
37
38#include "en_accel/ipsec_rxtx.h"
39#include "en_accel/ipsec.h"
40#include "en.h"
41
42enum {
43 MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
44 MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
45};
46
47struct mlx5e_ipsec_rx_metadata {
48 unsigned char reserved;
49 __be32 sa_handle;
50} __packed;
51
52enum {
53 MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
54 MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
55};
56
57struct mlx5e_ipsec_tx_metadata {
58 __be16 mss_inv; /* 1/MSS in 16bit fixed point, only for LSO */
59 __be16 seq; /* LSBs of the first TCP seq, only for LSO */
60 u8 esp_next_proto; /* Next protocol of ESP */
61} __packed;
62
63struct mlx5e_ipsec_metadata {
64 unsigned char syndrome;
65 union {
66 unsigned char raw[5];
67 /* from FPGA to host, on successful decrypt */
68 struct mlx5e_ipsec_rx_metadata rx;
69 /* from host to FPGA */
70 struct mlx5e_ipsec_tx_metadata tx;
71 } __packed content;
72 /* packet type ID field */
73 __be16 ethertype;
74} __packed;
75
76#define MAX_LSO_MSS 2048
77
78/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
79static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
80
81static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
82{
83 return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
84}
85
86static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
87{
88 struct mlx5e_ipsec_metadata *mdata;
89 struct ethhdr *eth;
90
91 if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
92 return ERR_PTR(-ENOMEM);
93
94 eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
95 skb->mac_header -= sizeof(*mdata);
96 mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
97
98 memmove(skb->data, skb->data + sizeof(*mdata),
99 2 * ETH_ALEN);
100
101 eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
102
103 memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
104 return mdata;
105}
106
107static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
108{
109 unsigned int alen = crypto_aead_authsize(x->data);
110 struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
111 struct iphdr *ipv4hdr = ip_hdr(skb);
112 unsigned int trailer_len;
113 u8 plen;
114 int ret;
115
116 ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
117 if (unlikely(ret))
118 return ret;
119
120 trailer_len = alen + plen + 2;
121
122 pskb_trim(skb, skb->len - trailer_len);
123 if (skb->protocol == htons(ETH_P_IP)) {
124 ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
125 ip_send_check(ipv4hdr);
126 } else {
127 ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
128 trailer_len);
129 }
130 return 0;
131}
132
133static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
134 struct mlx5_wqe_eth_seg *eseg, u8 mode,
135 struct xfrm_offload *xo)
136{
137 u8 proto;
138
139 /* Tunnel Mode:
140 * SWP: OutL3 InL3 InL4
141 * Pkt: MAC IP ESP IP L4
142 *
143 * Transport Mode:
144 * SWP: OutL3 InL4
145 * InL3
146 * Pkt: MAC IP ESP L4
147 *
148 * Offsets are in 2-byte words, counting from start of frame
149 */
150 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
151 if (skb->protocol == htons(ETH_P_IPV6))
152 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
153
154 if (mode == XFRM_MODE_TUNNEL) {
155 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
156 if (xo->proto == IPPROTO_IPV6) {
157 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
158 proto = inner_ipv6_hdr(skb)->nexthdr;
159 } else {
160 proto = inner_ip_hdr(skb)->protocol;
161 }
162 } else {
163 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
164 if (skb->protocol == htons(ETH_P_IPV6))
165 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
166 proto = xo->proto;
167 }
168 switch (proto) {
169 case IPPROTO_UDP:
170 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
171 /* Fall through */
172 case IPPROTO_TCP:
173 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
174 break;
175 }
176}
177
178static void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_offload *xo)
179{
180 int iv_offset;
181 __be64 seqno;
182
183 /* Place the SN in the IV field */
184 seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
185 iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
186 skb_store_bits(skb, iv_offset, &seqno, 8);
187}
188
189static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
190 struct mlx5e_ipsec_metadata *mdata,
191 struct xfrm_offload *xo)
192{
193 struct ip_esp_hdr *esph;
194 struct tcphdr *tcph;
195
196 if (skb_is_gso(skb)) {
197 /* Add LSO metadata indication */
198 esph = ip_esp_hdr(skb);
199 tcph = inner_tcp_hdr(skb);
200 netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
201 skb->network_header,
202 skb->transport_header,
203 skb->inner_network_header,
204 skb->inner_transport_header);
205 netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
206 skb->len, skb_shinfo(skb)->gso_size,
207 ntohs(tcph->source), ntohs(tcph->dest),
208 ntohl(tcph->seq), ntohl(esph->seq_no));
209 mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
210 mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
211 mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
212 } else {
213 mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
214 }
215 mdata->content.tx.esp_next_proto = xo->proto;
216
217 netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
218 mdata->syndrome, mdata->content.tx.esp_next_proto,
219 ntohs(mdata->content.tx.mss_inv),
220 ntohs(mdata->content.tx.seq));
221}
222
223struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
224 struct mlx5e_tx_wqe *wqe,
225 struct sk_buff *skb)
226{
227 struct mlx5e_priv *priv = netdev_priv(netdev);
228 struct xfrm_offload *xo = xfrm_offload(skb);
229 struct mlx5e_ipsec_metadata *mdata;
230 struct xfrm_state *x;
231
232 if (!xo)
233 return skb;
234
235 if (unlikely(skb->sp->len != 1)) {
236 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
237 goto drop;
238 }
239
240 x = xfrm_input_state(skb);
241 if (unlikely(!x)) {
242 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
243 goto drop;
244 }
245
246 if (unlikely(!x->xso.offload_handle ||
247 (skb->protocol != htons(ETH_P_IP) &&
248 skb->protocol != htons(ETH_P_IPV6)))) {
249 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
250 goto drop;
251 }
252
253 if (!skb_is_gso(skb))
254 if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
255 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
256 goto drop;
257 }
258 mdata = mlx5e_ipsec_add_metadata(skb);
259 if (unlikely(IS_ERR(mdata))) {
260 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
261 goto drop;
262 }
263 mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
264 mlx5e_ipsec_set_iv(skb, xo);
265 mlx5e_ipsec_set_metadata(skb, mdata, xo);
266
267 return skb;
268
269drop:
270 kfree_skb(skb);
271 return NULL;
272}
273
274static inline struct xfrm_state *
275mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
276 struct mlx5e_ipsec_metadata *mdata)
277{
278 struct mlx5e_priv *priv = netdev_priv(netdev);
279 struct xfrm_offload *xo;
280 struct xfrm_state *xs;
281 u32 sa_handle;
282
283 skb->sp = secpath_dup(skb->sp);
284 if (unlikely(!skb->sp)) {
285 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
286 return NULL;
287 }
288
289 sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
290 xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
291 if (unlikely(!xs)) {
292 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
293 return NULL;
294 }
295
296 skb->sp->xvec[skb->sp->len++] = xs;
297 skb->sp->olen++;
298
299 xo = xfrm_offload(skb);
300 xo->flags = CRYPTO_DONE;
301 switch (mdata->syndrome) {
302 case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
303 xo->status = CRYPTO_SUCCESS;
304 break;
305 case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
306 xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
307 break;
308 default:
309 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
310 return NULL;
311 }
312 return xs;
313}
314
315struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
316 struct sk_buff *skb)
317{
318 struct mlx5e_ipsec_metadata *mdata;
319 struct ethhdr *old_eth;
320 struct ethhdr *new_eth;
321 struct xfrm_state *xs;
322 __be16 *ethtype;
323
324 /* Detect inline metadata */
325 if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)
326 return skb;
327 ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
328 if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
329 return skb;
330
331 /* Use the metadata */
332 mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
333 xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
334 if (unlikely(!xs)) {
335 kfree_skb(skb);
336 return NULL;
337 }
338
339 /* Remove the metadata from the buffer */
340 old_eth = (struct ethhdr *)skb->data;
341 new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
342 memmove(new_eth, old_eth, 2 * ETH_ALEN);
343 /* Ethertype is already in its new place */
344 skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
345
346 return skb;
347}
348
349bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
350 netdev_features_t features)
351{
352 struct xfrm_state *x;
353
354 if (skb->sp && skb->sp->len) {
355 x = skb->sp->xvec[0];
356 if (x && x->xso.offload_handle)
357 return true;
358 }
359 return false;
360}
361
362void mlx5e_ipsec_build_inverse_table(void)
363{
364 u16 mss_inv;
365 u32 mss;
366
367 /* Calculate 1/x inverse table for use in GSO data path.
368 * Using this table, we provide the IPSec accelerator with the value of
369 * 1/gso_size so that it can infer the position of each segment inside
370 * the GSO, and increment the ESP sequence number, and generate the IV.
371 * The HW needs this value in Q0.16 fixed-point number format
372 */
373 mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
374 for (mss = 2; mss < MAX_LSO_MSS; mss++) {
375 mss_inv = ((1ULL << 32) / mss) >> 16;
376 mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
377 }
378}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
new file mode 100644
index 000000000000..e37ae2598dbb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef __MLX5E_IPSEC_RXTX_H__
35#define __MLX5E_IPSEC_RXTX_H__
36
37#ifdef CONFIG_MLX5_EN_IPSEC
38
39#include <linux/skbuff.h>
40#include "en.h"
41
42struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
43 struct sk_buff *skb);
44void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
45
46void mlx5e_ipsec_inverse_table_init(void);
47bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
48 netdev_features_t features);
49struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
50 struct mlx5e_tx_wqe *wqe,
51 struct sk_buff *skb);
52
53#endif /* CONFIG_MLX5_EN_IPSEC */
54
55#endif /* __MLX5E_IPSEC_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
new file mode 100644
index 000000000000..6fea59223dc4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -0,0 +1,133 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/ethtool.h>
35#include <net/sock.h>
36
37#include "en.h"
38#include "accel/ipsec.h"
39#include "fpga/sdk.h"
40#include "en_accel/ipsec.h"
41
42static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
43 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
44 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_out_packets) },
45 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_bypass_packets) },
46 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_in_packets) },
47 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_out_packets) },
48 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_bypass_packets) },
49 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_drop_packets) },
50 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_auth_fail_packets) },
51 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_drop_packets) },
52 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_success) },
53 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_fail) },
54 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_success) },
55 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_fail) },
56 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_cmd_drop) },
57};
58
59static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
60 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
61 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
62 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_syndrome) },
63 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_bundle) },
64 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
65 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
66 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_trailer) },
67 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_metadata) },
68};
69
70#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
71 atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
72
73#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
74#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
75
76#define NUM_IPSEC_COUNTERS (NUM_IPSEC_HW_COUNTERS + NUM_IPSEC_SW_COUNTERS)
77
78int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
79{
80 if (!priv->ipsec)
81 return 0;
82
83 return NUM_IPSEC_COUNTERS;
84}
85
86int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data)
87{
88 unsigned int i, idx = 0;
89
90 if (!priv->ipsec)
91 return 0;
92
93 for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
94 strcpy(data + (idx++) * ETH_GSTRING_LEN,
95 mlx5e_ipsec_hw_stats_desc[i].format);
96
97 for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
98 strcpy(data + (idx++) * ETH_GSTRING_LEN,
99 mlx5e_ipsec_sw_stats_desc[i].format);
100
101 return NUM_IPSEC_COUNTERS;
102}
103
104void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
105{
106 int ret;
107
108 if (!priv->ipsec)
109 return;
110
111 ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
112 NUM_IPSEC_HW_COUNTERS);
113 if (ret)
114 memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats));
115}
116
117int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
118{
119 int i, idx = 0;
120
121 if (!priv->ipsec)
122 return 0;
123
124 for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
125 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
126 mlx5e_ipsec_hw_stats_desc, i);
127
128 for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
129 data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
130 mlx5e_ipsec_sw_stats_desc, i);
131
132 return NUM_IPSEC_COUNTERS;
133}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 16b1e96a7050..917fade5f5d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include "en.h" 33#include "en.h"
34#include "en_accel/ipsec.h"
34 35
35void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, 36void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
36 struct ethtool_drvinfo *drvinfo) 37 struct ethtool_drvinfo *drvinfo)
@@ -186,7 +187,8 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
186 MLX5E_NUM_SQ_STATS(priv) + 187 MLX5E_NUM_SQ_STATS(priv) +
187 MLX5E_NUM_PFC_COUNTERS(priv) + 188 MLX5E_NUM_PFC_COUNTERS(priv) +
188 ARRAY_SIZE(mlx5e_pme_status_desc) + 189 ARRAY_SIZE(mlx5e_pme_status_desc) +
189 ARRAY_SIZE(mlx5e_pme_error_desc); 190 ARRAY_SIZE(mlx5e_pme_error_desc) +
191 mlx5e_ipsec_get_count(priv);
190 192
191 case ETH_SS_PRIV_FLAGS: 193 case ETH_SS_PRIV_FLAGS:
192 return ARRAY_SIZE(mlx5e_priv_flags); 194 return ARRAY_SIZE(mlx5e_priv_flags);
@@ -275,6 +277,9 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
275 for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++) 277 for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
276 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format); 278 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
277 279
280 /* IPSec counters */
281 idx += mlx5e_ipsec_get_strings(priv, data + idx * ETH_GSTRING_LEN);
282
278 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 283 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
279 return; 284 return;
280 285
@@ -403,6 +408,9 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
403 data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters, 408 data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
404 mlx5e_pme_error_desc, i); 409 mlx5e_pme_error_desc, i);
405 410
411 /* IPSec counters */
412 idx += mlx5e_ipsec_get_stats(priv, data + idx);
413
406 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 414 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
407 return; 415 return;
408 416
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 9f99f624004f..a09b11f467a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,9 @@
39#include "en.h" 39#include "en.h"
40#include "en_tc.h" 40#include "en_tc.h"
41#include "en_rep.h" 41#include "en_rep.h"
42#include "en_accel/ipsec.h"
43#include "en_accel/ipsec_rxtx.h"
44#include "accel/ipsec.h"
42#include "vxlan.h" 45#include "vxlan.h"
43 46
44struct mlx5e_rq_param { 47struct mlx5e_rq_param {
@@ -115,7 +118,7 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
115static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 118static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
116{ 119{
117 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) && 120 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
118 !params->xdp_prog ? 121 !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
119 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : 122 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
120 MLX5_WQ_TYPE_LINKED_LIST; 123 MLX5_WQ_TYPE_LINKED_LIST;
121 mlx5e_set_rq_type_params(mdev, params, rq_type); 124 mlx5e_set_rq_type_params(mdev, params, rq_type);
@@ -328,8 +331,10 @@ static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
328 331
329void mlx5e_update_stats(struct mlx5e_priv *priv, bool full) 332void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
330{ 333{
331 if (full) 334 if (full) {
332 mlx5e_update_pcie_counters(priv); 335 mlx5e_update_pcie_counters(priv);
336 mlx5e_ipsec_update_stats(priv);
337 }
333 mlx5e_update_pport_counters(priv, full); 338 mlx5e_update_pport_counters(priv, full);
334 mlx5e_update_vport_counters(priv); 339 mlx5e_update_vport_counters(priv);
335 mlx5e_update_q_counter(priv); 340 mlx5e_update_q_counter(priv);
@@ -592,6 +597,13 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
592 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; 597 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
593 598
594 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe; 599 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
600#ifdef CONFIG_MLX5_EN_IPSEC
601 if (MLX5_IPSEC_DEV(mdev)) {
602 err = -EINVAL;
603 netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
604 goto err_rq_wq_destroy;
605 }
606#endif
595 if (!rq->handle_rx_cqe) { 607 if (!rq->handle_rx_cqe) {
596 err = -EINVAL; 608 err = -EINVAL;
597 netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err); 609 netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
@@ -624,7 +636,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
624 rq->alloc_wqe = mlx5e_alloc_rx_wqe; 636 rq->alloc_wqe = mlx5e_alloc_rx_wqe;
625 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; 637 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
626 638
627 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe; 639#ifdef CONFIG_MLX5_EN_IPSEC
640 if (c->priv->ipsec)
641 rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
642 else
643#endif
644 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
628 if (!rq->handle_rx_cqe) { 645 if (!rq->handle_rx_cqe) {
629 kfree(rq->wqe.frag_info); 646 kfree(rq->wqe.frag_info);
630 err = -EINVAL; 647 err = -EINVAL;
@@ -635,6 +652,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
635 rq->buff.wqe_sz = params->lro_en ? 652 rq->buff.wqe_sz = params->lro_en ?
636 params->lro_wqe_sz : 653 params->lro_wqe_sz :
637 MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu); 654 MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
655#ifdef CONFIG_MLX5_EN_IPSEC
656 if (MLX5_IPSEC_DEV(mdev))
657 rq->buff.wqe_sz += MLX5E_METADATA_ETHER_LEN;
658#endif
638 rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en; 659 rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
639 byte_count = rq->buff.wqe_sz; 660 byte_count = rq->buff.wqe_sz;
640 661
@@ -1095,6 +1116,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1095 sq->uar_map = mdev->mlx5e_res.bfreg.map; 1116 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1096 sq->max_inline = params->tx_max_inline; 1117 sq->max_inline = params->tx_max_inline;
1097 sq->min_inline_mode = params->tx_min_inline_mode; 1118 sq->min_inline_mode = params->tx_min_inline_mode;
1119 if (MLX5_IPSEC_DEV(c->priv->mdev))
1120 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1098 1121
1099 param->wq.db_numa_node = cpu_to_node(c->cpu); 1122 param->wq.db_numa_node = cpu_to_node(c->cpu);
1100 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); 1123 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
@@ -1914,6 +1937,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1914 1937
1915 mlx5e_build_sq_param_common(priv, param); 1938 mlx5e_build_sq_param_common(priv, param);
1916 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 1939 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
1940 MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
1917} 1941}
1918 1942
1919static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, 1943static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -3508,6 +3532,11 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
3508 features = vlan_features_check(skb, features); 3532 features = vlan_features_check(skb, features);
3509 features = vxlan_features_check(skb, features); 3533 features = vxlan_features_check(skb, features);
3510 3534
3535#ifdef CONFIG_MLX5_EN_IPSEC
3536 if (mlx5e_ipsec_feature_check(skb, netdev, features))
3537 return features;
3538#endif
3539
3511 /* Validate if the tunneled packet is being offloaded by HW */ 3540 /* Validate if the tunneled packet is being offloaded by HW */
3512 if (skb->encapsulation && 3541 if (skb->encapsulation &&
3513 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK)) 3542 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
@@ -3555,6 +3584,12 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
3555 goto unlock; 3584 goto unlock;
3556 } 3585 }
3557 3586
3587 if ((netdev->features & NETIF_F_HW_ESP) && prog) {
3588 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
3589 err = -EINVAL;
3590 goto unlock;
3591 }
3592
3558 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 3593 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3559 /* no need for full reset when exchanging programs */ 3594 /* no need for full reset when exchanging programs */
3560 reset = (!priv->channels.params.xdp_prog || !prog); 3595 reset = (!priv->channels.params.xdp_prog || !prog);
@@ -4046,6 +4081,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
4046 if (MLX5_CAP_GEN(mdev, vport_group_manager)) 4081 if (MLX5_CAP_GEN(mdev, vport_group_manager))
4047 netdev->switchdev_ops = &mlx5e_switchdev_ops; 4082 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4048#endif 4083#endif
4084
4085 mlx5e_ipsec_build_netdev(priv);
4049} 4086}
4050 4087
4051static void mlx5e_create_q_counter(struct mlx5e_priv *priv) 4088static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
@@ -4074,14 +4111,19 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
4074 void *ppriv) 4111 void *ppriv)
4075{ 4112{
4076 struct mlx5e_priv *priv = netdev_priv(netdev); 4113 struct mlx5e_priv *priv = netdev_priv(netdev);
4114 int err;
4077 4115
4078 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv); 4116 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
4117 err = mlx5e_ipsec_init(priv);
4118 if (err)
4119 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
4079 mlx5e_build_nic_netdev(netdev); 4120 mlx5e_build_nic_netdev(netdev);
4080 mlx5e_vxlan_init(priv); 4121 mlx5e_vxlan_init(priv);
4081} 4122}
4082 4123
4083static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) 4124static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4084{ 4125{
4126 mlx5e_ipsec_cleanup(priv);
4085 mlx5e_vxlan_cleanup(priv); 4127 mlx5e_vxlan_cleanup(priv);
4086 4128
4087 if (priv->channels.params.xdp_prog) 4129 if (priv->channels.params.xdp_prog)
@@ -4473,6 +4515,7 @@ static struct mlx5_interface mlx5e_interface = {
4473 4515
4474void mlx5e_init(void) 4516void mlx5e_init(void)
4475{ 4517{
4518 mlx5e_ipsec_build_inverse_table();
4476 mlx5e_build_ptys2ethtool_map(); 4519 mlx5e_build_ptys2ethtool_map();
4477 mlx5_register_interface(&mlx5e_interface); 4520 mlx5_register_interface(&mlx5e_interface);
4478} 4521}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 5f3c138c948d..325b2c8c1c6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -41,6 +41,7 @@
41#include "eswitch.h" 41#include "eswitch.h"
42#include "en_rep.h" 42#include "en_rep.h"
43#include "ipoib/ipoib.h" 43#include "ipoib/ipoib.h"
44#include "en_accel/ipsec_rxtx.h"
44 45
45static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) 46static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
46{ 47{
@@ -996,7 +997,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
996 work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); 997 work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
997 998
998 for (; work_done < budget; work_done++) { 999 for (; work_done < budget; work_done++) {
999 struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq); 1000 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_cqe(&cq->wq);
1000 1001
1001 if (!cqe) 1002 if (!cqe)
1002 break; 1003 break;
@@ -1050,7 +1051,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
1050 u16 wqe_counter; 1051 u16 wqe_counter;
1051 bool last_wqe; 1052 bool last_wqe;
1052 1053
1053 cqe = mlx5e_get_cqe(cq); 1054 cqe = mlx5_cqwq_get_cqe(&cq->wq);
1054 if (!cqe) 1055 if (!cqe)
1055 break; 1056 break;
1056 1057
@@ -1183,3 +1184,43 @@ wq_free_wqe:
1183} 1184}
1184 1185
1185#endif /* CONFIG_MLX5_CORE_IPOIB */ 1186#endif /* CONFIG_MLX5_CORE_IPOIB */
1187
1188#ifdef CONFIG_MLX5_EN_IPSEC
1189
1190void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1191{
1192 struct mlx5e_wqe_frag_info *wi;
1193 struct mlx5e_rx_wqe *wqe;
1194 __be16 wqe_counter_be;
1195 struct sk_buff *skb;
1196 u16 wqe_counter;
1197 u32 cqe_bcnt;
1198
1199 wqe_counter_be = cqe->wqe_counter;
1200 wqe_counter = be16_to_cpu(wqe_counter_be);
1201 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
1202 wi = &rq->wqe.frag_info[wqe_counter];
1203 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1204
1205 skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
1206 if (unlikely(!skb)) {
1207 /* a DROP, save the page-reuse checks */
1208 mlx5e_free_rx_wqe(rq, wi);
1209 goto wq_ll_pop;
1210 }
1211 skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb);
1212 if (unlikely(!skb)) {
1213 mlx5e_free_rx_wqe(rq, wi);
1214 goto wq_ll_pop;
1215 }
1216
1217 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1218 napi_gro_receive(rq->cq.napi, skb);
1219
1220 mlx5e_free_rx_wqe_reuse(rq, wi);
1221wq_ll_pop:
1222 mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
1223 &wqe->next.next_wqe_index);
1224}
1225
1226#endif /* CONFIG_MLX5_EN_IPSEC */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 0433d69429f3..aaa0f4ebba9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -34,6 +34,7 @@
34#include <linux/if_vlan.h> 34#include <linux/if_vlan.h>
35#include "en.h" 35#include "en.h"
36#include "ipoib/ipoib.h" 36#include "ipoib/ipoib.h"
37#include "en_accel/ipsec_rxtx.h"
37 38
38#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS 39#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
39#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ 40#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
@@ -299,12 +300,9 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
299 } 300 }
300} 301}
301 302
302static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb) 303static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
304 struct mlx5e_tx_wqe *wqe, u16 pi)
303{ 305{
304 struct mlx5_wq_cyc *wq = &sq->wq;
305
306 u16 pi = sq->pc & wq->sz_m1;
307 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
308 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; 306 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
309 307
310 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 308 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
@@ -319,8 +317,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
319 u16 ds_cnt; 317 u16 ds_cnt;
320 u16 ihs; 318 u16 ihs;
321 319
322 memset(wqe, 0, sizeof(*wqe));
323
324 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); 320 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
325 321
326 if (skb_is_gso(skb)) { 322 if (skb_is_gso(skb)) {
@@ -375,8 +371,21 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
375{ 371{
376 struct mlx5e_priv *priv = netdev_priv(dev); 372 struct mlx5e_priv *priv = netdev_priv(dev);
377 struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)]; 373 struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
374 struct mlx5_wq_cyc *wq = &sq->wq;
375 u16 pi = sq->pc & wq->sz_m1;
376 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
377
378 memset(wqe, 0, sizeof(*wqe));
379
380#ifdef CONFIG_MLX5_EN_IPSEC
381 if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) {
382 skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb);
383 if (unlikely(!skb))
384 return NETDEV_TX_OK;
385 }
386#endif
378 387
379 return mlx5e_sq_xmit(sq, skb); 388 return mlx5e_sq_xmit(sq, skb, wqe, pi);
380} 389}
381 390
382bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) 391bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
@@ -409,7 +418,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
409 u16 wqe_counter; 418 u16 wqe_counter;
410 bool last_wqe; 419 bool last_wqe;
411 420
412 cqe = mlx5e_get_cqe(cq); 421 cqe = mlx5_cqwq_get_cqe(&cq->wq);
413 if (!cqe) 422 if (!cqe)
414 break; 423 break;
415 424
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 5ca6714e3e02..92db28a9ed43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -32,23 +32,6 @@
32 32
33#include "en.h" 33#include "en.h"
34 34
35struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
36{
37 struct mlx5_cqwq *wq = &cq->wq;
38 u32 ci = mlx5_cqwq_get_ci(wq);
39 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
40 u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
41 u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
42
43 if (cqe_ownership_bit != sw_ownership_val)
44 return NULL;
45
46 /* ensure cqe content is read after cqe ownership bit */
47 dma_rmb();
48
49 return cqe;
50}
51
52static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, 35static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
53 struct mlx5e_icosq *sq, 36 struct mlx5e_icosq *sq,
54 struct mlx5_cqe64 *cqe, 37 struct mlx5_cqe64 *cqe,
@@ -89,7 +72,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
89 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 72 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
90 return; 73 return;
91 74
92 cqe = mlx5e_get_cqe(cq); 75 cqe = mlx5_cqwq_get_cqe(&cq->wq);
93 if (likely(!cqe)) 76 if (likely(!cqe))
94 return; 77 return;
95 78
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
index 99cba644b4fc..5cb855fd618f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
@@ -33,10 +33,44 @@
33#include <linux/etherdevice.h> 33#include <linux/etherdevice.h>
34#include <linux/mlx5/cmd.h> 34#include <linux/mlx5/cmd.h>
35#include <linux/mlx5/driver.h> 35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/device.h>
36 37
37#include "mlx5_core.h" 38#include "mlx5_core.h"
38#include "fpga/cmd.h" 39#include "fpga/cmd.h"
39 40
41#define MLX5_FPGA_ACCESS_REG_SZ (MLX5_ST_SZ_DW(fpga_access_reg) + \
42 MLX5_FPGA_ACCESS_REG_SIZE_MAX)
43
44int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
45 void *buf, bool write)
46{
47 u32 in[MLX5_FPGA_ACCESS_REG_SZ] = {0};
48 u32 out[MLX5_FPGA_ACCESS_REG_SZ];
49 int err;
50
51 if (size & 3)
52 return -EINVAL;
53 if (addr & 3)
54 return -EINVAL;
55 if (size > MLX5_FPGA_ACCESS_REG_SIZE_MAX)
56 return -EINVAL;
57
58 MLX5_SET(fpga_access_reg, in, size, size);
59 MLX5_SET64(fpga_access_reg, in, address, addr);
60 if (write)
61 memcpy(MLX5_ADDR_OF(fpga_access_reg, in, data), buf, size);
62
63 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
64 MLX5_REG_FPGA_ACCESS_REG, 0, write);
65 if (err)
66 return err;
67
68 if (!write)
69 memcpy(buf, MLX5_ADDR_OF(fpga_access_reg, out, data), size);
70
71 return 0;
72}
73
40int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps) 74int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps)
41{ 75{
42 u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0}; 76 u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0};
@@ -46,6 +80,49 @@ int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps)
46 MLX5_REG_FPGA_CAP, 0, 0); 80 MLX5_REG_FPGA_CAP, 0, 0);
47} 81}
48 82
83int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op)
84{
85 u32 in[MLX5_ST_SZ_DW(fpga_ctrl)] = {0};
86 u32 out[MLX5_ST_SZ_DW(fpga_ctrl)];
87
88 MLX5_SET(fpga_ctrl, in, operation, op);
89
90 return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
91 MLX5_REG_FPGA_CTRL, 0, true);
92}
93
94int mlx5_fpga_sbu_caps(struct mlx5_core_dev *dev, void *caps, int size)
95{
96 unsigned int cap_size = MLX5_CAP_FPGA(dev, sandbox_extended_caps_len);
97 u64 addr = MLX5_CAP64_FPGA(dev, sandbox_extended_caps_addr);
98 unsigned int read;
99 int ret = 0;
100
101 if (cap_size > size) {
102 mlx5_core_warn(dev, "Not enough buffer %u for FPGA SBU caps %u",
103 size, cap_size);
104 return -EINVAL;
105 }
106
107 while (cap_size > 0) {
108 read = min_t(unsigned int, cap_size,
109 MLX5_FPGA_ACCESS_REG_SIZE_MAX);
110
111 ret = mlx5_fpga_access_reg(dev, read, addr, caps, false);
112 if (ret) {
113 mlx5_core_warn(dev, "Error reading FPGA SBU caps %u bytes at address 0x%llx: %d",
114 read, addr, ret);
115 return ret;
116 }
117
118 cap_size -= read;
119 addr += read;
120 caps += read;
121 }
122
123 return ret;
124}
125
49int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query) 126int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query)
50{ 127{
51 u32 in[MLX5_ST_SZ_DW(fpga_ctrl)] = {0}; 128 u32 in[MLX5_ST_SZ_DW(fpga_ctrl)] = {0};
@@ -62,3 +139,100 @@ int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query)
62 query->oper_image = MLX5_GET(fpga_ctrl, out, flash_select_oper); 139 query->oper_image = MLX5_GET(fpga_ctrl, out, flash_select_oper);
63 return 0; 140 return 0;
64} 141}
142
143int mlx5_fpga_create_qp(struct mlx5_core_dev *dev, void *fpga_qpc,
144 u32 *fpga_qpn)
145{
146 u32 in[MLX5_ST_SZ_DW(fpga_create_qp_in)] = {0};
147 u32 out[MLX5_ST_SZ_DW(fpga_create_qp_out)];
148 int ret;
149
150 MLX5_SET(fpga_create_qp_in, in, opcode, MLX5_CMD_OP_FPGA_CREATE_QP);
151 memcpy(MLX5_ADDR_OF(fpga_create_qp_in, in, fpga_qpc), fpga_qpc,
152 MLX5_FLD_SZ_BYTES(fpga_create_qp_in, fpga_qpc));
153
154 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
155 if (ret)
156 return ret;
157
158 memcpy(fpga_qpc, MLX5_ADDR_OF(fpga_create_qp_out, out, fpga_qpc),
159 MLX5_FLD_SZ_BYTES(fpga_create_qp_out, fpga_qpc));
160 *fpga_qpn = MLX5_GET(fpga_create_qp_out, out, fpga_qpn);
161 return ret;
162}
163
164int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
165 enum mlx5_fpga_qpc_field_select fields,
166 void *fpga_qpc)
167{
168 u32 in[MLX5_ST_SZ_DW(fpga_modify_qp_in)] = {0};
169 u32 out[MLX5_ST_SZ_DW(fpga_modify_qp_out)];
170
171 MLX5_SET(fpga_modify_qp_in, in, opcode, MLX5_CMD_OP_FPGA_MODIFY_QP);
172 MLX5_SET(fpga_modify_qp_in, in, field_select, fields);
173 MLX5_SET(fpga_modify_qp_in, in, fpga_qpn, fpga_qpn);
174 memcpy(MLX5_ADDR_OF(fpga_modify_qp_in, in, fpga_qpc), fpga_qpc,
175 MLX5_FLD_SZ_BYTES(fpga_modify_qp_in, fpga_qpc));
176
177 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
178}
179
180int mlx5_fpga_query_qp(struct mlx5_core_dev *dev,
181 u32 fpga_qpn, void *fpga_qpc)
182{
183 u32 in[MLX5_ST_SZ_DW(fpga_query_qp_in)] = {0};
184 u32 out[MLX5_ST_SZ_DW(fpga_query_qp_out)];
185 int ret;
186
187 MLX5_SET(fpga_query_qp_in, in, opcode, MLX5_CMD_OP_FPGA_QUERY_QP);
188 MLX5_SET(fpga_query_qp_in, in, fpga_qpn, fpga_qpn);
189
190 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
191 if (ret)
192 return ret;
193
194 memcpy(fpga_qpc, MLX5_ADDR_OF(fpga_query_qp_out, in, fpga_qpc),
195 MLX5_FLD_SZ_BYTES(fpga_query_qp_out, fpga_qpc));
196 return ret;
197}
198
199int mlx5_fpga_destroy_qp(struct mlx5_core_dev *dev, u32 fpga_qpn)
200{
201 u32 in[MLX5_ST_SZ_DW(fpga_destroy_qp_in)] = {0};
202 u32 out[MLX5_ST_SZ_DW(fpga_destroy_qp_out)];
203
204 MLX5_SET(fpga_destroy_qp_in, in, opcode, MLX5_CMD_OP_FPGA_DESTROY_QP);
205 MLX5_SET(fpga_destroy_qp_in, in, fpga_qpn, fpga_qpn);
206
207 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
208}
209
210int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
211 bool clear, struct mlx5_fpga_qp_counters *data)
212{
213 u32 in[MLX5_ST_SZ_DW(fpga_query_qp_counters_in)] = {0};
214 u32 out[MLX5_ST_SZ_DW(fpga_query_qp_counters_out)];
215 int ret;
216
217 MLX5_SET(fpga_query_qp_counters_in, in, opcode,
218 MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS);
219 MLX5_SET(fpga_query_qp_counters_in, in, clear, clear);
220 MLX5_SET(fpga_query_qp_counters_in, in, fpga_qpn, fpga_qpn);
221
222 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
223 if (ret)
224 return ret;
225
226 data->rx_ack_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
227 rx_ack_packets);
228 data->rx_send_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
229 rx_send_packets);
230 data->tx_ack_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
231 tx_ack_packets);
232 data->tx_send_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
233 tx_send_packets);
234 data->rx_total_drop = MLX5_GET64(fpga_query_qp_counters_out, out,
235 rx_total_drop);
236
237 return ret;
238}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
index a74396a61bc3..94bdfd47c3f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
@@ -53,7 +53,32 @@ struct mlx5_fpga_query {
53 enum mlx5_fpga_status status; 53 enum mlx5_fpga_status status;
54}; 54};
55 55
56enum mlx5_fpga_qpc_field_select {
57 MLX5_FPGA_QPC_STATE = BIT(0),
58};
59
60struct mlx5_fpga_qp_counters {
61 u64 rx_ack_packets;
62 u64 rx_send_packets;
63 u64 tx_ack_packets;
64 u64 tx_send_packets;
65 u64 rx_total_drop;
66};
67
56int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps); 68int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps);
57int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query); 69int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query);
70int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op);
71int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
72 void *buf, bool write);
73int mlx5_fpga_sbu_caps(struct mlx5_core_dev *dev, void *caps, int size);
74
75int mlx5_fpga_create_qp(struct mlx5_core_dev *dev, void *fpga_qpc,
76 u32 *fpga_qpn);
77int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
78 enum mlx5_fpga_qpc_field_select fields, void *fpga_qpc);
79int mlx5_fpga_query_qp(struct mlx5_core_dev *dev, u32 fpga_qpn, void *fpga_qpc);
80int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
81 bool clear, struct mlx5_fpga_qp_counters *data);
82int mlx5_fpga_destroy_qp(struct mlx5_core_dev *dev, u32 fpga_qpn);
58 83
59#endif /* __MLX5_FPGA_H__ */ 84#endif /* __MLX5_FPGA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
new file mode 100644
index 000000000000..c4392f741c5f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -0,0 +1,1042 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <net/addrconf.h>
35#include <linux/etherdevice.h>
36#include <linux/mlx5/vport.h>
37
38#include "mlx5_core.h"
39#include "lib/mlx5.h"
40#include "fpga/conn.h"
41
42#define MLX5_FPGA_PKEY 0xFFFF
43#define MLX5_FPGA_PKEY_INDEX 0 /* RoCE PKEY 0xFFFF is always at index 0 */
44#define MLX5_FPGA_RECV_SIZE 2048
45#define MLX5_FPGA_PORT_NUM 1
46#define MLX5_FPGA_CQ_BUDGET 64
47
48static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn,
49 struct mlx5_fpga_dma_buf *buf)
50{
51 struct device *dma_device;
52 int err = 0;
53
54 if (unlikely(!buf->sg[0].data))
55 goto out;
56
57 dma_device = &conn->fdev->mdev->pdev->dev;
58 buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
59 buf->sg[0].size, buf->dma_dir);
60 err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
61 if (unlikely(err)) {
62 mlx5_fpga_warn(conn->fdev, "DMA error on sg 0: %d\n", err);
63 err = -ENOMEM;
64 goto out;
65 }
66
67 if (!buf->sg[1].data)
68 goto out;
69
70 buf->sg[1].dma_addr = dma_map_single(dma_device, buf->sg[1].data,
71 buf->sg[1].size, buf->dma_dir);
72 err = dma_mapping_error(dma_device, buf->sg[1].dma_addr);
73 if (unlikely(err)) {
74 mlx5_fpga_warn(conn->fdev, "DMA error on sg 1: %d\n", err);
75 dma_unmap_single(dma_device, buf->sg[0].dma_addr,
76 buf->sg[0].size, buf->dma_dir);
77 err = -ENOMEM;
78 }
79
80out:
81 return err;
82}
83
84static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn,
85 struct mlx5_fpga_dma_buf *buf)
86{
87 struct device *dma_device;
88
89 dma_device = &conn->fdev->mdev->pdev->dev;
90 if (buf->sg[1].data)
91 dma_unmap_single(dma_device, buf->sg[1].dma_addr,
92 buf->sg[1].size, buf->dma_dir);
93
94 if (likely(buf->sg[0].data))
95 dma_unmap_single(dma_device, buf->sg[0].dma_addr,
96 buf->sg[0].size, buf->dma_dir);
97}
98
99static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn,
100 struct mlx5_fpga_dma_buf *buf)
101{
102 struct mlx5_wqe_data_seg *data;
103 unsigned int ix;
104 int err = 0;
105
106 err = mlx5_fpga_conn_map_buf(conn, buf);
107 if (unlikely(err))
108 goto out;
109
110 if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) {
111 mlx5_fpga_conn_unmap_buf(conn, buf);
112 return -EBUSY;
113 }
114
115 ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
116 data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
117 data->byte_count = cpu_to_be32(buf->sg[0].size);
118 data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
119 data->addr = cpu_to_be64(buf->sg[0].dma_addr);
120
121 conn->qp.rq.pc++;
122 conn->qp.rq.bufs[ix] = buf;
123
124 /* Make sure that descriptors are written before doorbell record. */
125 dma_wmb();
126 *conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff);
127out:
128 return err;
129}
130
131static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe)
132{
133 /* ensure wqe is visible to device before updating doorbell record */
134 dma_wmb();
135 *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc);
136 /* Make sure that doorbell record is visible before ringing */
137 wmb();
138 mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET, NULL);
139}
140
141static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
142 struct mlx5_fpga_dma_buf *buf)
143{
144 struct mlx5_wqe_ctrl_seg *ctrl;
145 struct mlx5_wqe_data_seg *data;
146 unsigned int ix, sgi;
147 int size = 1;
148
149 ix = conn->qp.sq.pc & (conn->qp.sq.size - 1);
150
151 ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix);
152 data = (void *)(ctrl + 1);
153
154 for (sgi = 0; sgi < ARRAY_SIZE(buf->sg); sgi++) {
155 if (!buf->sg[sgi].data)
156 break;
157 data->byte_count = cpu_to_be32(buf->sg[sgi].size);
158 data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
159 data->addr = cpu_to_be64(buf->sg[sgi].dma_addr);
160 data++;
161 size++;
162 }
163
164 ctrl->imm = 0;
165 ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
166 ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) |
167 MLX5_OPCODE_SEND);
168 ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8));
169
170 conn->qp.sq.pc++;
171 conn->qp.sq.bufs[ix] = buf;
172 mlx5_fpga_conn_notify_hw(conn, ctrl);
173}
174
175int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn,
176 struct mlx5_fpga_dma_buf *buf)
177{
178 unsigned long flags;
179 int err;
180
181 if (!conn->qp.active)
182 return -ENOTCONN;
183
184 err = mlx5_fpga_conn_map_buf(conn, buf);
185 if (err)
186 return err;
187
188 spin_lock_irqsave(&conn->qp.sq.lock, flags);
189
190 if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) {
191 list_add_tail(&buf->list, &conn->qp.sq.backlog);
192 goto out_unlock;
193 }
194
195 mlx5_fpga_conn_post_send(conn, buf);
196
197out_unlock:
198 spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
199 return err;
200}
201
202static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn)
203{
204 struct mlx5_fpga_dma_buf *buf;
205 int err;
206
207 buf = kzalloc(sizeof(*buf) + MLX5_FPGA_RECV_SIZE, 0);
208 if (!buf)
209 return -ENOMEM;
210
211 buf->sg[0].data = (void *)(buf + 1);
212 buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
213 buf->dma_dir = DMA_FROM_DEVICE;
214
215 err = mlx5_fpga_conn_post_recv(conn, buf);
216 if (err)
217 kfree(buf);
218
219 return err;
220}
221
222static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
223 struct mlx5_core_mkey *mkey)
224{
225 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
226 void *mkc;
227 u32 *in;
228 int err;
229
230 in = kvzalloc(inlen, GFP_KERNEL);
231 if (!in)
232 return -ENOMEM;
233
234 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
235 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
236 MLX5_SET(mkc, mkc, lw, 1);
237 MLX5_SET(mkc, mkc, lr, 1);
238
239 MLX5_SET(mkc, mkc, pd, pdn);
240 MLX5_SET(mkc, mkc, length64, 1);
241 MLX5_SET(mkc, mkc, qpn, 0xffffff);
242
243 err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
244
245 kvfree(in);
246 return err;
247}
248
249static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn,
250 struct mlx5_cqe64 *cqe, u8 status)
251{
252 struct mlx5_fpga_dma_buf *buf;
253 int ix, err;
254
255 ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
256 buf = conn->qp.rq.bufs[ix];
257 conn->qp.rq.bufs[ix] = NULL;
258 if (!status)
259 buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
260 conn->qp.rq.cc++;
261
262 if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
263 mlx5_fpga_warn(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
264 buf, conn->fpga_qpn, status);
265 else
266 mlx5_fpga_dbg(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
267 buf, conn->fpga_qpn, status);
268
269 mlx5_fpga_conn_unmap_buf(conn, buf);
270
271 if (unlikely(status || !conn->qp.active)) {
272 conn->qp.active = false;
273 kfree(buf);
274 return;
275 }
276
277 mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n",
278 buf->sg[0].size);
279 conn->recv_cb(conn->cb_arg, buf);
280
281 buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
282 err = mlx5_fpga_conn_post_recv(conn, buf);
283 if (unlikely(err)) {
284 mlx5_fpga_warn(conn->fdev,
285 "Failed to re-post recv buf: %d\n", err);
286 kfree(buf);
287 }
288}
289
290static void mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn *conn,
291 struct mlx5_cqe64 *cqe, u8 status)
292{
293 struct mlx5_fpga_dma_buf *buf, *nextbuf;
294 unsigned long flags;
295 int ix;
296
297 spin_lock_irqsave(&conn->qp.sq.lock, flags);
298
299 ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1);
300 buf = conn->qp.sq.bufs[ix];
301 conn->qp.sq.bufs[ix] = NULL;
302 conn->qp.sq.cc++;
303
304 /* Handle backlog still under the spinlock to ensure message post order */
305 if (unlikely(!list_empty(&conn->qp.sq.backlog))) {
306 if (likely(conn->qp.active)) {
307 nextbuf = list_first_entry(&conn->qp.sq.backlog,
308 struct mlx5_fpga_dma_buf, list);
309 list_del(&nextbuf->list);
310 mlx5_fpga_conn_post_send(conn, nextbuf);
311 }
312 }
313
314 spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
315
316 if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
317 mlx5_fpga_warn(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
318 buf, conn->fpga_qpn, status);
319 else
320 mlx5_fpga_dbg(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
321 buf, conn->fpga_qpn, status);
322
323 mlx5_fpga_conn_unmap_buf(conn, buf);
324
325 if (likely(buf->complete))
326 buf->complete(conn, conn->fdev, buf, status);
327
328 if (unlikely(status))
329 conn->qp.active = false;
330}
331
332static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
333 struct mlx5_cqe64 *cqe)
334{
335 u8 opcode, status = 0;
336
337 opcode = cqe->op_own >> 4;
338
339 switch (opcode) {
340 case MLX5_CQE_REQ_ERR:
341 status = ((struct mlx5_err_cqe *)cqe)->syndrome;
342 /* Fall through */
343 case MLX5_CQE_REQ:
344 mlx5_fpga_conn_sq_cqe(conn, cqe, status);
345 break;
346
347 case MLX5_CQE_RESP_ERR:
348 status = ((struct mlx5_err_cqe *)cqe)->syndrome;
349 /* Fall through */
350 case MLX5_CQE_RESP_SEND:
351 mlx5_fpga_conn_rq_cqe(conn, cqe, status);
352 break;
353 default:
354 mlx5_fpga_warn(conn->fdev, "Unexpected cqe opcode %u\n",
355 opcode);
356 }
357}
358
359static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn)
360{
361 mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT,
362 conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
363}
364
365static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq,
366 enum mlx5_event event)
367{
368 struct mlx5_fpga_conn *conn;
369
370 conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
371 mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn);
372}
373
374static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event)
375{
376 struct mlx5_fpga_conn *conn;
377
378 conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp);
379 mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn);
380}
381
382static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
383 unsigned int budget)
384{
385 struct mlx5_cqe64 *cqe;
386
387 while (budget) {
388 cqe = mlx5_cqwq_get_cqe(&conn->cq.wq);
389 if (!cqe)
390 break;
391
392 budget--;
393 mlx5_cqwq_pop(&conn->cq.wq);
394 mlx5_fpga_conn_handle_cqe(conn, cqe);
395 mlx5_cqwq_update_db_record(&conn->cq.wq);
396 }
397 if (!budget) {
398 tasklet_schedule(&conn->cq.tasklet);
399 return;
400 }
401
402 mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc);
403 /* ensure cq space is freed before enabling more cqes */
404 wmb();
405 mlx5_fpga_conn_arm_cq(conn);
406}
407
408static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
409{
410 struct mlx5_fpga_conn *conn = (void *)data;
411
412 if (unlikely(!conn->qp.active))
413 return;
414 mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
415}
416
417static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq)
418{
419 struct mlx5_fpga_conn *conn;
420
421 conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
422 if (unlikely(!conn->qp.active))
423 return;
424 mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
425}
426
427static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
428{
429 struct mlx5_fpga_device *fdev = conn->fdev;
430 struct mlx5_core_dev *mdev = fdev->mdev;
431 u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
432 struct mlx5_wq_param wqp;
433 struct mlx5_cqe64 *cqe;
434 int inlen, err, eqn;
435 unsigned int irqn;
436 void *cqc, *in;
437 __be64 *pas;
438 u32 i;
439
440 cq_size = roundup_pow_of_two(cq_size);
441 MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
442
443 wqp.buf_numa_node = mdev->priv.numa_node;
444 wqp.db_numa_node = mdev->priv.numa_node;
445
446 err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq,
447 &conn->cq.wq_ctrl);
448 if (err)
449 return err;
450
451 for (i = 0; i < mlx5_cqwq_get_size(&conn->cq.wq); i++) {
452 cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i);
453 cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
454 }
455
456 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
457 sizeof(u64) * conn->cq.wq_ctrl.frag_buf.npages;
458 in = kvzalloc(inlen, GFP_KERNEL);
459 if (!in) {
460 err = -ENOMEM;
461 goto err_cqwq;
462 }
463
464 err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
465 if (err)
466 goto err_cqwq;
467
468 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
469 MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
470 MLX5_SET(cqc, cqc, c_eqn, eqn);
471 MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
472 MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.frag_buf.page_shift -
473 MLX5_ADAPTER_PAGE_SHIFT);
474 MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma);
475
476 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
477 mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.frag_buf, pas);
478
479 err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
480 kvfree(in);
481
482 if (err)
483 goto err_cqwq;
484
485 conn->cq.mcq.cqe_sz = 64;
486 conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db;
487 conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1;
488 *conn->cq.mcq.set_ci_db = 0;
489 *conn->cq.mcq.arm_db = 0;
490 conn->cq.mcq.vector = 0;
491 conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
492 conn->cq.mcq.event = mlx5_fpga_conn_cq_event;
493 conn->cq.mcq.irqn = irqn;
494 conn->cq.mcq.uar = fdev->conn_res.uar;
495 tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
496 (unsigned long)conn);
497
498 mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
499
500 goto out;
501
502err_cqwq:
503 mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
504out:
505 return err;
506}
507
508static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn)
509{
510 tasklet_disable(&conn->cq.tasklet);
511 tasklet_kill(&conn->cq.tasklet);
512 mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
513 mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
514}
515
516static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc)
517{
518 struct mlx5_fpga_device *fdev = conn->fdev;
519 struct mlx5_core_dev *mdev = fdev->mdev;
520 struct mlx5_wq_param wqp;
521
522 wqp.buf_numa_node = mdev->priv.numa_node;
523 wqp.db_numa_node = mdev->priv.numa_node;
524
525 return mlx5_wq_qp_create(mdev, &wqp, qpc, &conn->qp.wq,
526 &conn->qp.wq_ctrl);
527}
528
529static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
530 unsigned int tx_size, unsigned int rx_size)
531{
532 struct mlx5_fpga_device *fdev = conn->fdev;
533 struct mlx5_core_dev *mdev = fdev->mdev;
534 u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {0};
535 void *in = NULL, *qpc;
536 int err, inlen;
537
538 conn->qp.rq.pc = 0;
539 conn->qp.rq.cc = 0;
540 conn->qp.rq.size = roundup_pow_of_two(rx_size);
541 conn->qp.sq.pc = 0;
542 conn->qp.sq.cc = 0;
543 conn->qp.sq.size = roundup_pow_of_two(tx_size);
544
545 MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
546 MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size));
547 MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(conn->qp.sq.size));
548 err = mlx5_fpga_conn_create_wq(conn, temp_qpc);
549 if (err)
550 goto out;
551
552 conn->qp.rq.bufs = kvzalloc(sizeof(conn->qp.rq.bufs[0]) *
553 conn->qp.rq.size, GFP_KERNEL);
554 if (!conn->qp.rq.bufs) {
555 err = -ENOMEM;
556 goto err_wq;
557 }
558
559 conn->qp.sq.bufs = kvzalloc(sizeof(conn->qp.sq.bufs[0]) *
560 conn->qp.sq.size, GFP_KERNEL);
561 if (!conn->qp.sq.bufs) {
562 err = -ENOMEM;
563 goto err_rq_bufs;
564 }
565
566 inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
567 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
568 conn->qp.wq_ctrl.buf.npages;
569 in = kvzalloc(inlen, GFP_KERNEL);
570 if (!in) {
571 err = -ENOMEM;
572 goto err_sq_bufs;
573 }
574
575 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
576 MLX5_SET(qpc, qpc, uar_page, fdev->conn_res.uar->index);
577 MLX5_SET(qpc, qpc, log_page_size,
578 conn->qp.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
579 MLX5_SET(qpc, qpc, fre, 1);
580 MLX5_SET(qpc, qpc, rlky, 1);
581 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
582 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
583 MLX5_SET(qpc, qpc, pd, fdev->conn_res.pdn);
584 MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
585 MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size));
586 MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
587 MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
588 MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
589 MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
590 MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
591 if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
592 MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
593
594 mlx5_fill_page_array(&conn->qp.wq_ctrl.buf,
595 (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
596
597 err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
598 if (err)
599 goto err_sq_bufs;
600
601 conn->qp.mqp.event = mlx5_fpga_conn_event;
602 mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn);
603
604 goto out;
605
606err_sq_bufs:
607 kvfree(conn->qp.sq.bufs);
608err_rq_bufs:
609 kvfree(conn->qp.rq.bufs);
610err_wq:
611 mlx5_wq_destroy(&conn->qp.wq_ctrl);
612out:
613 kvfree(in);
614 return err;
615}
616
617static void mlx5_fpga_conn_free_recv_bufs(struct mlx5_fpga_conn *conn)
618{
619 int ix;
620
621 for (ix = 0; ix < conn->qp.rq.size; ix++) {
622 if (!conn->qp.rq.bufs[ix])
623 continue;
624 mlx5_fpga_conn_unmap_buf(conn, conn->qp.rq.bufs[ix]);
625 kfree(conn->qp.rq.bufs[ix]);
626 conn->qp.rq.bufs[ix] = NULL;
627 }
628}
629
630static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn)
631{
632 struct mlx5_fpga_dma_buf *buf, *temp;
633 int ix;
634
635 for (ix = 0; ix < conn->qp.sq.size; ix++) {
636 buf = conn->qp.sq.bufs[ix];
637 if (!buf)
638 continue;
639 conn->qp.sq.bufs[ix] = NULL;
640 mlx5_fpga_conn_unmap_buf(conn, buf);
641 if (!buf->complete)
642 continue;
643 buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
644 }
645 list_for_each_entry_safe(buf, temp, &conn->qp.sq.backlog, list) {
646 mlx5_fpga_conn_unmap_buf(conn, buf);
647 if (!buf->complete)
648 continue;
649 buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
650 }
651}
652
653static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
654{
655 mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp);
656 mlx5_fpga_conn_free_recv_bufs(conn);
657 mlx5_fpga_conn_flush_send_bufs(conn);
658 kvfree(conn->qp.sq.bufs);
659 kvfree(conn->qp.rq.bufs);
660 mlx5_wq_destroy(&conn->qp.wq_ctrl);
661}
662
663static inline int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
664{
665 struct mlx5_core_dev *mdev = conn->fdev->mdev;
666
667 mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn);
668
669 return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL,
670 &conn->qp.mqp);
671}
672
673static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
674{
675 struct mlx5_fpga_device *fdev = conn->fdev;
676 struct mlx5_core_dev *mdev = fdev->mdev;
677 u32 *qpc = NULL;
678 int err;
679
680 mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn);
681
682 qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
683 if (!qpc) {
684 err = -ENOMEM;
685 goto out;
686 }
687
688 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
689 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
690 MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
691 MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
692 MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn);
693 MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
694 MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
695 MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
696
697 err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
698 &conn->qp.mqp);
699 if (err) {
700 mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
701 goto out;
702 }
703
704out:
705 kfree(qpc);
706 return err;
707}
708
709static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
710{
711 struct mlx5_fpga_device *fdev = conn->fdev;
712 struct mlx5_core_dev *mdev = fdev->mdev;
713 u32 *qpc = NULL;
714 int err;
715
716 mlx5_fpga_dbg(conn->fdev, "QP RTR\n");
717
718 qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
719 if (!qpc) {
720 err = -ENOMEM;
721 goto out;
722 }
723
724 MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES);
725 MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg));
726 MLX5_SET(qpc, qpc, remote_qpn, conn->fpga_qpn);
727 MLX5_SET(qpc, qpc, next_rcv_psn,
728 MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn));
729 MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
730 MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
731 ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
732 MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32));
733 MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
734 MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port));
735 MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
736 conn->qp.sgid_index);
737 MLX5_SET(qpc, qpc, primary_address_path.hop_limit, 0);
738 memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
739 MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip),
740 MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip));
741
742 err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
743 &conn->qp.mqp);
744 if (err) {
745 mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
746 goto out;
747 }
748
749out:
750 kfree(qpc);
751 return err;
752}
753
754static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
755{
756 struct mlx5_fpga_device *fdev = conn->fdev;
757 struct mlx5_core_dev *mdev = fdev->mdev;
758 u32 *qpc = NULL;
759 u32 opt_mask;
760 int err;
761
762 mlx5_fpga_dbg(conn->fdev, "QP RTS\n");
763
764 qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
765 if (!qpc) {
766 err = -ENOMEM;
767 goto out;
768 }
769
770 MLX5_SET(qpc, qpc, log_ack_req_freq, 8);
771 MLX5_SET(qpc, qpc, min_rnr_nak, 0x12);
772 MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x12); /* ~1.07s */
773 MLX5_SET(qpc, qpc, next_send_psn,
774 MLX5_GET(fpga_qpc, conn->fpga_qpc, next_rcv_psn));
775 MLX5_SET(qpc, qpc, retry_count, 7);
776 MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
777
778 opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT;
779 err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc,
780 &conn->qp.mqp);
781 if (err) {
782 mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
783 goto out;
784 }
785
786out:
787 kfree(qpc);
788 return err;
789}
790
791static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn)
792{
793 struct mlx5_fpga_device *fdev = conn->fdev;
794 int err;
795
796 MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_ACTIVE);
797 err = mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
798 MLX5_FPGA_QPC_STATE, &conn->fpga_qpc);
799 if (err) {
800 mlx5_fpga_err(fdev, "Failed to activate FPGA RC QP: %d\n", err);
801 goto out;
802 }
803
804 err = mlx5_fpga_conn_reset_qp(conn);
805 if (err) {
806 mlx5_fpga_err(fdev, "Failed to change QP state to reset\n");
807 goto err_fpga_qp;
808 }
809
810 err = mlx5_fpga_conn_init_qp(conn);
811 if (err) {
812 mlx5_fpga_err(fdev, "Failed to modify QP from RESET to INIT\n");
813 goto err_fpga_qp;
814 }
815 conn->qp.active = true;
816
817 while (!mlx5_fpga_conn_post_recv_buf(conn))
818 ;
819
820 err = mlx5_fpga_conn_rtr_qp(conn);
821 if (err) {
822 mlx5_fpga_err(fdev, "Failed to change QP state from INIT to RTR\n");
823 goto err_recv_bufs;
824 }
825
826 err = mlx5_fpga_conn_rts_qp(conn);
827 if (err) {
828 mlx5_fpga_err(fdev, "Failed to change QP state from RTR to RTS\n");
829 goto err_recv_bufs;
830 }
831 goto out;
832
833err_recv_bufs:
834 mlx5_fpga_conn_free_recv_bufs(conn);
835err_fpga_qp:
836 MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
837 if (mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
838 MLX5_FPGA_QPC_STATE, &conn->fpga_qpc))
839 mlx5_fpga_err(fdev, "Failed to revert FPGA QP to INIT\n");
840out:
841 return err;
842}
843
844struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
845 struct mlx5_fpga_conn_attr *attr,
846 enum mlx5_ifc_fpga_qp_type qp_type)
847{
848 struct mlx5_fpga_conn *ret, *conn;
849 u8 *remote_mac, *remote_ip;
850 int err;
851
852 if (!attr->recv_cb)
853 return ERR_PTR(-EINVAL);
854
855 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
856 if (!conn)
857 return ERR_PTR(-ENOMEM);
858
859 conn->fdev = fdev;
860 INIT_LIST_HEAD(&conn->qp.sq.backlog);
861
862 spin_lock_init(&conn->qp.sq.lock);
863
864 conn->recv_cb = attr->recv_cb;
865 conn->cb_arg = attr->cb_arg;
866
867 remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
868 err = mlx5_query_nic_vport_mac_address(fdev->mdev, 0, remote_mac);
869 if (err) {
870 mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
871 ret = ERR_PTR(err);
872 goto err;
873 }
874
875 /* Build Modified EUI-64 IPv6 address from the MAC address */
876 remote_ip = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_ip);
877 remote_ip[0] = 0xfe;
878 remote_ip[1] = 0x80;
879 addrconf_addr_eui48(&remote_ip[8], remote_mac);
880
881 err = mlx5_core_reserved_gid_alloc(fdev->mdev, &conn->qp.sgid_index);
882 if (err) {
883 mlx5_fpga_err(fdev, "Failed to allocate SGID: %d\n", err);
884 ret = ERR_PTR(err);
885 goto err;
886 }
887
888 err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
889 MLX5_ROCE_VERSION_2,
890 MLX5_ROCE_L3_TYPE_IPV6,
891 remote_ip, remote_mac, true, 0);
892 if (err) {
893 mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err);
894 ret = ERR_PTR(err);
895 goto err_rsvd_gid;
896 }
897 mlx5_fpga_dbg(fdev, "Reserved SGID index %u\n", conn->qp.sgid_index);
898
899 /* Allow for one cqe per rx/tx wqe, plus one cqe for the next wqe,
900 * created during processing of the cqe
901 */
902 err = mlx5_fpga_conn_create_cq(conn,
903 (attr->tx_size + attr->rx_size) * 2);
904 if (err) {
905 mlx5_fpga_err(fdev, "Failed to create CQ: %d\n", err);
906 ret = ERR_PTR(err);
907 goto err_gid;
908 }
909
910 mlx5_fpga_conn_arm_cq(conn);
911
912 err = mlx5_fpga_conn_create_qp(conn, attr->tx_size, attr->rx_size);
913 if (err) {
914 mlx5_fpga_err(fdev, "Failed to create QP: %d\n", err);
915 ret = ERR_PTR(err);
916 goto err_cq;
917 }
918
919 MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
920 MLX5_SET(fpga_qpc, conn->fpga_qpc, qp_type, qp_type);
921 MLX5_SET(fpga_qpc, conn->fpga_qpc, st, MLX5_FPGA_QPC_ST_RC);
922 MLX5_SET(fpga_qpc, conn->fpga_qpc, ether_type, ETH_P_8021Q);
923 MLX5_SET(fpga_qpc, conn->fpga_qpc, vid, 0);
924 MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1);
925 MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0);
926 MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY);
927 MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn);
928 MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7);
929 MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7);
930
931 err = mlx5_fpga_create_qp(fdev->mdev, &conn->fpga_qpc,
932 &conn->fpga_qpn);
933 if (err) {
934 mlx5_fpga_err(fdev, "Failed to create FPGA RC QP: %d\n", err);
935 ret = ERR_PTR(err);
936 goto err_qp;
937 }
938
939 err = mlx5_fpga_conn_connect(conn);
940 if (err) {
941 ret = ERR_PTR(err);
942 goto err_conn;
943 }
944
945 mlx5_fpga_dbg(fdev, "FPGA QPN is %u\n", conn->fpga_qpn);
946 ret = conn;
947 goto out;
948
949err_conn:
950 mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
951err_qp:
952 mlx5_fpga_conn_destroy_qp(conn);
953err_cq:
954 mlx5_fpga_conn_destroy_cq(conn);
955err_gid:
956 mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
957 NULL, false, 0);
958err_rsvd_gid:
959 mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
960err:
961 kfree(conn);
962out:
963 return ret;
964}
965
966void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
967{
968 struct mlx5_fpga_device *fdev = conn->fdev;
969 struct mlx5_core_dev *mdev = fdev->mdev;
970 int err = 0;
971
972 conn->qp.active = false;
973 tasklet_disable(&conn->cq.tasklet);
974 synchronize_irq(conn->cq.mcq.irqn);
975
976 mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
977 err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL,
978 &conn->qp.mqp);
979 if (err)
980 mlx5_fpga_warn(fdev, "qp_modify 2ERR failed: %d\n", err);
981 mlx5_fpga_conn_destroy_qp(conn);
982 mlx5_fpga_conn_destroy_cq(conn);
983
984 mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
985 NULL, NULL, false, 0);
986 mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
987 kfree(conn);
988}
989
990int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev)
991{
992 int err;
993
994 err = mlx5_nic_vport_enable_roce(fdev->mdev);
995 if (err) {
996 mlx5_fpga_err(fdev, "Failed to enable RoCE: %d\n", err);
997 goto out;
998 }
999
1000 fdev->conn_res.uar = mlx5_get_uars_page(fdev->mdev);
1001 if (IS_ERR(fdev->conn_res.uar)) {
1002 err = PTR_ERR(fdev->conn_res.uar);
1003 mlx5_fpga_err(fdev, "get_uars_page failed, %d\n", err);
1004 goto err_roce;
1005 }
1006 mlx5_fpga_dbg(fdev, "Allocated UAR index %u\n",
1007 fdev->conn_res.uar->index);
1008
1009 err = mlx5_core_alloc_pd(fdev->mdev, &fdev->conn_res.pdn);
1010 if (err) {
1011 mlx5_fpga_err(fdev, "alloc pd failed, %d\n", err);
1012 goto err_uar;
1013 }
1014 mlx5_fpga_dbg(fdev, "Allocated PD %u\n", fdev->conn_res.pdn);
1015
1016 err = mlx5_fpga_conn_create_mkey(fdev->mdev, fdev->conn_res.pdn,
1017 &fdev->conn_res.mkey);
1018 if (err) {
1019 mlx5_fpga_err(fdev, "create mkey failed, %d\n", err);
1020 goto err_dealloc_pd;
1021 }
1022 mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey.key);
1023
1024 return 0;
1025
1026err_dealloc_pd:
1027 mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
1028err_uar:
1029 mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
1030err_roce:
1031 mlx5_nic_vport_disable_roce(fdev->mdev);
1032out:
1033 return err;
1034}
1035
1036void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev)
1037{
1038 mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey);
1039 mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
1040 mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
1041 mlx5_nic_vport_disable_roce(fdev->mdev);
1042}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
new file mode 100644
index 000000000000..44bd9eccc711
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
@@ -0,0 +1,96 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef __MLX5_FPGA_CONN_H__
35#define __MLX5_FPGA_CONN_H__
36
37#include <linux/mlx5/cq.h>
38#include <linux/mlx5/qp.h>
39
40#include "fpga/core.h"
41#include "fpga/sdk.h"
42#include "wq.h"
43
44struct mlx5_fpga_conn {
45 struct mlx5_fpga_device *fdev;
46
47 void (*recv_cb)(void *cb_arg, struct mlx5_fpga_dma_buf *buf);
48 void *cb_arg;
49
50 /* FPGA QP */
51 u32 fpga_qpc[MLX5_ST_SZ_DW(fpga_qpc)];
52 u32 fpga_qpn;
53
54 /* CQ */
55 struct {
56 struct mlx5_cqwq wq;
57 struct mlx5_frag_wq_ctrl wq_ctrl;
58 struct mlx5_core_cq mcq;
59 struct tasklet_struct tasklet;
60 } cq;
61
62 /* QP */
63 struct {
64 bool active;
65 int sgid_index;
66 struct mlx5_wq_qp wq;
67 struct mlx5_wq_ctrl wq_ctrl;
68 struct mlx5_core_qp mqp;
69 struct {
70 spinlock_t lock; /* Protects all SQ state */
71 unsigned int pc;
72 unsigned int cc;
73 unsigned int size;
74 struct mlx5_fpga_dma_buf **bufs;
75 struct list_head backlog;
76 } sq;
77 struct {
78 unsigned int pc;
79 unsigned int cc;
80 unsigned int size;
81 struct mlx5_fpga_dma_buf **bufs;
82 } rq;
83 } qp;
84};
85
86int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev);
87void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev);
88struct mlx5_fpga_conn *
89mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
90 struct mlx5_fpga_conn_attr *attr,
91 enum mlx5_ifc_fpga_qp_type qp_type);
92void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn);
93int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn,
94 struct mlx5_fpga_dma_buf *buf);
95
96#endif /* __MLX5_FPGA_CONN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index d88b332e9669..31e5a2627eb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -35,7 +35,9 @@
35#include <linux/mlx5/driver.h> 35#include <linux/mlx5/driver.h>
36 36
37#include "mlx5_core.h" 37#include "mlx5_core.h"
38#include "lib/mlx5.h"
38#include "fpga/core.h" 39#include "fpga/core.h"
40#include "fpga/conn.h"
39 41
40static const char *const mlx5_fpga_error_strings[] = { 42static const char *const mlx5_fpga_error_strings[] = {
41 "Null Syndrome", 43 "Null Syndrome",
@@ -100,10 +102,34 @@ static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
100 return 0; 102 return 0;
101} 103}
102 104
105int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev)
106{
107 int err;
108 struct mlx5_core_dev *mdev = fdev->mdev;
109
110 err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_ON);
111 if (err) {
112 mlx5_fpga_err(fdev, "Failed to set bypass on: %d\n", err);
113 return err;
114 }
115 err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_RESET_SANDBOX);
116 if (err) {
117 mlx5_fpga_err(fdev, "Failed to reset SBU: %d\n", err);
118 return err;
119 }
120 err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_OFF);
121 if (err) {
122 mlx5_fpga_err(fdev, "Failed to set bypass off: %d\n", err);
123 return err;
124 }
125 return 0;
126}
127
103int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) 128int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
104{ 129{
105 struct mlx5_fpga_device *fdev = mdev->fpga; 130 struct mlx5_fpga_device *fdev = mdev->fpga;
106 unsigned long flags; 131 unsigned long flags;
132 unsigned int max_num_qps;
107 int err; 133 int err;
108 134
109 if (!fdev) 135 if (!fdev)
@@ -123,6 +149,28 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
123 mlx5_fpga_image_name(fdev->last_oper_image), 149 mlx5_fpga_image_name(fdev->last_oper_image),
124 MLX5_CAP_FPGA(fdev->mdev, image_version)); 150 MLX5_CAP_FPGA(fdev->mdev, image_version));
125 151
152 max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
153 err = mlx5_core_reserve_gids(mdev, max_num_qps);
154 if (err)
155 goto out;
156
157 err = mlx5_fpga_conn_device_init(fdev);
158 if (err)
159 goto err_rsvd_gid;
160
161 if (fdev->last_oper_image == MLX5_FPGA_IMAGE_USER) {
162 err = mlx5_fpga_device_brb(fdev);
163 if (err)
164 goto err_conn_init;
165 }
166
167 goto out;
168
169err_conn_init:
170 mlx5_fpga_conn_device_cleanup(fdev);
171
172err_rsvd_gid:
173 mlx5_core_unreserve_gids(mdev, max_num_qps);
126out: 174out:
127 spin_lock_irqsave(&fdev->state_lock, flags); 175 spin_lock_irqsave(&fdev->state_lock, flags);
128 fdev->state = err ? MLX5_FPGA_STATUS_FAILURE : MLX5_FPGA_STATUS_SUCCESS; 176 fdev->state = err ? MLX5_FPGA_STATUS_FAILURE : MLX5_FPGA_STATUS_SUCCESS;
@@ -130,7 +178,7 @@ out:
130 return err; 178 return err;
131} 179}
132 180
133int mlx5_fpga_device_init(struct mlx5_core_dev *mdev) 181int mlx5_fpga_init(struct mlx5_core_dev *mdev)
134{ 182{
135 struct mlx5_fpga_device *fdev = NULL; 183 struct mlx5_fpga_device *fdev = NULL;
136 184
@@ -151,9 +199,42 @@ int mlx5_fpga_device_init(struct mlx5_core_dev *mdev)
151 return 0; 199 return 0;
152} 200}
153 201
154void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev) 202void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
203{
204 struct mlx5_fpga_device *fdev = mdev->fpga;
205 unsigned int max_num_qps;
206 unsigned long flags;
207 int err;
208
209 if (!fdev)
210 return;
211
212 spin_lock_irqsave(&fdev->state_lock, flags);
213 if (fdev->state != MLX5_FPGA_STATUS_SUCCESS) {
214 spin_unlock_irqrestore(&fdev->state_lock, flags);
215 return;
216 }
217 fdev->state = MLX5_FPGA_STATUS_NONE;
218 spin_unlock_irqrestore(&fdev->state_lock, flags);
219
220 if (fdev->last_oper_image == MLX5_FPGA_IMAGE_USER) {
221 err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_ON);
222 if (err)
223 mlx5_fpga_err(fdev, "Failed to re-set SBU bypass on: %d\n",
224 err);
225 }
226
227 mlx5_fpga_conn_device_cleanup(fdev);
228 max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
229 mlx5_core_unreserve_gids(mdev, max_num_qps);
230}
231
232void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev)
155{ 233{
156 kfree(mdev->fpga); 234 struct mlx5_fpga_device *fdev = mdev->fpga;
235
236 mlx5_fpga_device_stop(mdev);
237 kfree(fdev);
157 mdev->fpga = NULL; 238 mdev->fpga = NULL;
158} 239}
159 240
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index c55044d66778..82405ed84725 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -44,6 +44,15 @@ struct mlx5_fpga_device {
44 enum mlx5_fpga_status state; 44 enum mlx5_fpga_status state;
45 enum mlx5_fpga_image last_admin_image; 45 enum mlx5_fpga_image last_admin_image;
46 enum mlx5_fpga_image last_oper_image; 46 enum mlx5_fpga_image last_oper_image;
47
48 /* QP Connection resources */
49 struct {
50 u32 pdn;
51 struct mlx5_core_mkey mkey;
52 struct mlx5_uars_page *uar;
53 } conn_res;
54
55 struct mlx5_fpga_ipsec *ipsec;
47}; 56};
48 57
49#define mlx5_fpga_dbg(__adev, format, ...) \ 58#define mlx5_fpga_dbg(__adev, format, ...) \
@@ -68,19 +77,20 @@ struct mlx5_fpga_device {
68#define mlx5_fpga_info(__adev, format, ...) \ 77#define mlx5_fpga_info(__adev, format, ...) \
69 dev_info(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__) 78 dev_info(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
70 79
71int mlx5_fpga_device_init(struct mlx5_core_dev *mdev); 80int mlx5_fpga_init(struct mlx5_core_dev *mdev);
72void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev); 81void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev);
73int mlx5_fpga_device_start(struct mlx5_core_dev *mdev); 82int mlx5_fpga_device_start(struct mlx5_core_dev *mdev);
83void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev);
74void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data); 84void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data);
75 85
76#else 86#else
77 87
78static inline int mlx5_fpga_device_init(struct mlx5_core_dev *mdev) 88static inline int mlx5_fpga_init(struct mlx5_core_dev *mdev)
79{ 89{
80 return 0; 90 return 0;
81} 91}
82 92
83static inline void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev) 93static inline void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev)
84{ 94{
85} 95}
86 96
@@ -89,6 +99,10 @@ static inline int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
89 return 0; 99 return 0;
90} 100}
91 101
102static inline void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
103{
104}
105
92static inline void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, 106static inline void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event,
93 void *data) 107 void *data)
94{ 108{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
new file mode 100644
index 000000000000..42970e2a05ff
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -0,0 +1,376 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx5/driver.h>
35
36#include "mlx5_core.h"
37#include "fpga/ipsec.h"
38#include "fpga/sdk.h"
39#include "fpga/core.h"
40
41#define SBU_QP_QUEUE_SIZE 8
42
43enum mlx5_ipsec_response_syndrome {
44 MLX5_IPSEC_RESPONSE_SUCCESS = 0,
45 MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
46 MLX5_IPSEC_RESPONSE_SADB_ISSUE = 2,
47 MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
48};
49
50enum mlx5_fpga_ipsec_sacmd_status {
51 MLX5_FPGA_IPSEC_SACMD_PENDING,
52 MLX5_FPGA_IPSEC_SACMD_SEND_FAIL,
53 MLX5_FPGA_IPSEC_SACMD_COMPLETE,
54};
55
56struct mlx5_ipsec_command_context {
57 struct mlx5_fpga_dma_buf buf;
58 struct mlx5_accel_ipsec_sa sa;
59 enum mlx5_fpga_ipsec_sacmd_status status;
60 int status_code;
61 struct completion complete;
62 struct mlx5_fpga_device *dev;
63 struct list_head list; /* Item in pending_cmds */
64};
65
66struct mlx5_ipsec_sadb_resp {
67 __be32 syndrome;
68 __be32 sw_sa_handle;
69 u8 reserved[24];
70} __packed;
71
72struct mlx5_fpga_ipsec {
73 struct list_head pending_cmds;
74 spinlock_t pending_cmds_lock; /* Protects pending_cmds */
75 u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
76 struct mlx5_fpga_conn *conn;
77};
78
79static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
80{
81 if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
82 return false;
83
84 if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
85 MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
86 return false;
87
88 if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
89 MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
90 return false;
91
92 return true;
93}
94
95static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
96 struct mlx5_fpga_device *fdev,
97 struct mlx5_fpga_dma_buf *buf,
98 u8 status)
99{
100 struct mlx5_ipsec_command_context *context;
101
102 if (status) {
103 context = container_of(buf, struct mlx5_ipsec_command_context,
104 buf);
105 mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
106 status);
107 context->status = MLX5_FPGA_IPSEC_SACMD_SEND_FAIL;
108 complete(&context->complete);
109 }
110}
111
112static inline int syndrome_to_errno(enum mlx5_ipsec_response_syndrome syndrome)
113{
114 switch (syndrome) {
115 case MLX5_IPSEC_RESPONSE_SUCCESS:
116 return 0;
117 case MLX5_IPSEC_RESPONSE_SADB_ISSUE:
118 return -EEXIST;
119 case MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST:
120 return -EINVAL;
121 case MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
122 return -EIO;
123 }
124 return -EIO;
125}
126
127static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
128{
129 struct mlx5_ipsec_sadb_resp *resp = buf->sg[0].data;
130 struct mlx5_ipsec_command_context *context;
131 enum mlx5_ipsec_response_syndrome syndrome;
132 struct mlx5_fpga_device *fdev = cb_arg;
133 unsigned long flags;
134
135 if (buf->sg[0].size < sizeof(*resp)) {
136 mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
137 buf->sg[0].size, sizeof(*resp));
138 return;
139 }
140
141 mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x sa_id %x\n",
142 ntohl(resp->syndrome), ntohl(resp->sw_sa_handle));
143
144 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
145 context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
146 struct mlx5_ipsec_command_context,
147 list);
148 if (context)
149 list_del(&context->list);
150 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
151
152 if (!context) {
153 mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
154 return;
155 }
156 mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
157
158 if (context->sa.sw_sa_handle != resp->sw_sa_handle) {
159 mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
160 ntohl(context->sa.sw_sa_handle),
161 ntohl(resp->sw_sa_handle));
162 return;
163 }
164
165 syndrome = ntohl(resp->syndrome);
166 context->status_code = syndrome_to_errno(syndrome);
167 context->status = MLX5_FPGA_IPSEC_SACMD_COMPLETE;
168
169 if (context->status_code)
170 mlx5_fpga_warn(fdev, "IPSec SADB command failed with syndrome %08x\n",
171 syndrome);
172 complete(&context->complete);
173}
174
175void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
176 struct mlx5_accel_ipsec_sa *cmd)
177{
178 struct mlx5_ipsec_command_context *context;
179 struct mlx5_fpga_device *fdev = mdev->fpga;
180 unsigned long flags;
181 int res = 0;
182
183 BUILD_BUG_ON((sizeof(struct mlx5_accel_ipsec_sa) & 3) != 0);
184 if (!fdev || !fdev->ipsec)
185 return ERR_PTR(-EOPNOTSUPP);
186
187 context = kzalloc(sizeof(*context), GFP_ATOMIC);
188 if (!context)
189 return ERR_PTR(-ENOMEM);
190
191 memcpy(&context->sa, cmd, sizeof(*cmd));
192 context->buf.complete = mlx5_fpga_ipsec_send_complete;
193 context->buf.sg[0].size = sizeof(context->sa);
194 context->buf.sg[0].data = &context->sa;
195 init_completion(&context->complete);
196 context->dev = fdev;
197 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
198 list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
199 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
200
201 context->status = MLX5_FPGA_IPSEC_SACMD_PENDING;
202
203 res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
204 if (res) {
205 mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n",
206 res);
207 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
208 list_del(&context->list);
209 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
210 kfree(context);
211 return ERR_PTR(res);
212 }
213 /* Context will be freed by wait func after completion */
214 return context;
215}
216
217int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx)
218{
219 struct mlx5_ipsec_command_context *context = ctx;
220 int res;
221
222 res = wait_for_completion_killable(&context->complete);
223 if (res) {
224 mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
225 return -EINTR;
226 }
227
228 if (context->status == MLX5_FPGA_IPSEC_SACMD_COMPLETE)
229 res = context->status_code;
230 else
231 res = -EIO;
232
233 kfree(context);
234 return res;
235}
236
237u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
238{
239 struct mlx5_fpga_device *fdev = mdev->fpga;
240 u32 ret = 0;
241
242 if (mlx5_fpga_is_ipsec_device(mdev))
243 ret |= MLX5_ACCEL_IPSEC_DEVICE;
244 else
245 return ret;
246
247 if (!fdev->ipsec)
248 return ret;
249
250 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
251 ret |= MLX5_ACCEL_IPSEC_ESP;
252
253 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
254 ret |= MLX5_ACCEL_IPSEC_IPV6;
255
256 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
257 ret |= MLX5_ACCEL_IPSEC_LSO;
258
259 return ret;
260}
261
262unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
263{
264 struct mlx5_fpga_device *fdev = mdev->fpga;
265
266 if (!fdev || !fdev->ipsec)
267 return 0;
268
269 return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
270 number_of_ipsec_counters);
271}
272
273int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
274 unsigned int counters_count)
275{
276 struct mlx5_fpga_device *fdev = mdev->fpga;
277 unsigned int i;
278 u32 *data;
279 u32 count;
280 u64 addr;
281 int ret;
282
283 if (!fdev || !fdev->ipsec)
284 return 0;
285
286 addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
287 ipsec_counters_addr_low) +
288 ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
289 ipsec_counters_addr_high) << 32);
290
291 count = mlx5_fpga_ipsec_counters_count(mdev);
292
293 data = kzalloc(sizeof(u32) * count * 2, GFP_KERNEL);
294 if (!data) {
295 ret = -ENOMEM;
296 goto out;
297 }
298
299 ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
300 MLX5_FPGA_ACCESS_TYPE_DONTCARE);
301 if (ret < 0) {
302 mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
303 ret);
304 goto out;
305 }
306 ret = 0;
307
308 if (count > counters_count)
309 count = counters_count;
310
311 /* Each counter is low word, then high. But each word is big-endian */
312 for (i = 0; i < count; i++)
313 counters[i] = (u64)ntohl(data[i * 2]) |
314 ((u64)ntohl(data[i * 2 + 1]) << 32);
315
316out:
317 kfree(data);
318 return ret;
319}
320
321int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
322{
323 struct mlx5_fpga_conn_attr init_attr = {0};
324 struct mlx5_fpga_device *fdev = mdev->fpga;
325 struct mlx5_fpga_conn *conn;
326 int err;
327
328 if (!mlx5_fpga_is_ipsec_device(mdev))
329 return 0;
330
331 fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
332 if (!fdev->ipsec)
333 return -ENOMEM;
334
335 err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
336 fdev->ipsec->caps);
337 if (err) {
338 mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
339 err);
340 goto error;
341 }
342
343 INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
344 spin_lock_init(&fdev->ipsec->pending_cmds_lock);
345
346 init_attr.rx_size = SBU_QP_QUEUE_SIZE;
347 init_attr.tx_size = SBU_QP_QUEUE_SIZE;
348 init_attr.recv_cb = mlx5_fpga_ipsec_recv;
349 init_attr.cb_arg = fdev;
350 conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
351 if (IS_ERR(conn)) {
352 err = PTR_ERR(conn);
353 mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
354 err);
355 goto error;
356 }
357 fdev->ipsec->conn = conn;
358 return 0;
359
360error:
361 kfree(fdev->ipsec);
362 fdev->ipsec = NULL;
363 return err;
364}
365
366void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
367{
368 struct mlx5_fpga_device *fdev = mdev->fpga;
369
370 if (!mlx5_fpga_is_ipsec_device(mdev))
371 return;
372
373 mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
374 kfree(fdev->ipsec);
375 fdev->ipsec = NULL;
376}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
new file mode 100644
index 000000000000..26a3e4b56972
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
@@ -0,0 +1,94 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef __MLX5_FPGA_IPSEC_H__
35#define __MLX5_FPGA_IPSEC_H__
36
37#include "accel/ipsec.h"
38
39#ifdef CONFIG_MLX5_FPGA
40
41void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
42 struct mlx5_accel_ipsec_sa *cmd);
43int mlx5_fpga_ipsec_sa_cmd_wait(void *context);
44
45u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
46unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
47int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
48 unsigned int counters_count);
49
50int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
51void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev);
52
53#else
54
55static inline void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
56 struct mlx5_accel_ipsec_sa *cmd)
57{
58 return ERR_PTR(-EOPNOTSUPP);
59}
60
61static inline int mlx5_fpga_ipsec_sa_cmd_wait(void *context)
62{
63 return -EOPNOTSUPP;
64}
65
66static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
67{
68 return 0;
69}
70
71static inline unsigned int
72mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
73{
74 return 0;
75}
76
77static inline int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev,
78 u64 *counters)
79{
80 return 0;
81}
82
83static inline int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
84{
85 return 0;
86}
87
88static inline void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
89{
90}
91
92#endif /* CONFIG_MLX5_FPGA */
93
94#endif /* __MLX5_FPGA_SADB_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
new file mode 100644
index 000000000000..3c11d6e2160a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
@@ -0,0 +1,164 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx5/device.h>
35
36#include "fpga/core.h"
37#include "fpga/conn.h"
38#include "fpga/sdk.h"
39
40struct mlx5_fpga_conn *
41mlx5_fpga_sbu_conn_create(struct mlx5_fpga_device *fdev,
42 struct mlx5_fpga_conn_attr *attr)
43{
44 return mlx5_fpga_conn_create(fdev, attr, MLX5_FPGA_QPC_QP_TYPE_SANDBOX_QP);
45}
46EXPORT_SYMBOL(mlx5_fpga_sbu_conn_create);
47
48void mlx5_fpga_sbu_conn_destroy(struct mlx5_fpga_conn *conn)
49{
50 mlx5_fpga_conn_destroy(conn);
51}
52EXPORT_SYMBOL(mlx5_fpga_sbu_conn_destroy);
53
54int mlx5_fpga_sbu_conn_sendmsg(struct mlx5_fpga_conn *conn,
55 struct mlx5_fpga_dma_buf *buf)
56{
57 return mlx5_fpga_conn_send(conn, buf);
58}
59EXPORT_SYMBOL(mlx5_fpga_sbu_conn_sendmsg);
60
61static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,
62 u64 addr, u8 *buf)
63{
64 size_t max_size = MLX5_FPGA_ACCESS_REG_SIZE_MAX;
65 size_t bytes_done = 0;
66 u8 actual_size;
67 int err;
68
69 if (!fdev->mdev)
70 return -ENOTCONN;
71
72 while (bytes_done < size) {
73 actual_size = min(max_size, (size - bytes_done));
74
75 err = mlx5_fpga_access_reg(fdev->mdev, actual_size,
76 addr + bytes_done,
77 buf + bytes_done, false);
78 if (err) {
79 mlx5_fpga_err(fdev, "Failed to read over I2C: %d\n",
80 err);
81 break;
82 }
83
84 bytes_done += actual_size;
85 }
86
87 return err;
88}
89
90static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,
91 u64 addr, u8 *buf)
92{
93 size_t max_size = MLX5_FPGA_ACCESS_REG_SIZE_MAX;
94 size_t bytes_done = 0;
95 u8 actual_size;
96 int err;
97
98 if (!fdev->mdev)
99 return -ENOTCONN;
100
101 while (bytes_done < size) {
102 actual_size = min(max_size, (size - bytes_done));
103
104 err = mlx5_fpga_access_reg(fdev->mdev, actual_size,
105 addr + bytes_done,
106 buf + bytes_done, true);
107 if (err) {
108 mlx5_fpga_err(fdev, "Failed to write FPGA crspace\n");
109 break;
110 }
111
112 bytes_done += actual_size;
113 }
114
115 return err;
116}
117
118int mlx5_fpga_mem_read(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
119 void *buf, enum mlx5_fpga_access_type access_type)
120{
121 int ret;
122
123 switch (access_type) {
124 case MLX5_FPGA_ACCESS_TYPE_I2C:
125 ret = mlx5_fpga_mem_read_i2c(fdev, size, addr, buf);
126 if (ret)
127 return ret;
128 break;
129 default:
130 mlx5_fpga_warn(fdev, "Unexpected read access_type %u\n",
131 access_type);
132 return -EACCES;
133 }
134
135 return size;
136}
137EXPORT_SYMBOL(mlx5_fpga_mem_read);
138
139int mlx5_fpga_mem_write(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
140 void *buf, enum mlx5_fpga_access_type access_type)
141{
142 int ret;
143
144 switch (access_type) {
145 case MLX5_FPGA_ACCESS_TYPE_I2C:
146 ret = mlx5_fpga_mem_write_i2c(fdev, size, addr, buf);
147 if (ret)
148 return ret;
149 break;
150 default:
151 mlx5_fpga_warn(fdev, "Unexpected write access_type %u\n",
152 access_type);
153 return -EACCES;
154 }
155
156 return size;
157}
158EXPORT_SYMBOL(mlx5_fpga_mem_write);
159
160int mlx5_fpga_get_sbu_caps(struct mlx5_fpga_device *fdev, int size, void *buf)
161{
162 return mlx5_fpga_sbu_caps(fdev->mdev, buf, size);
163}
164EXPORT_SYMBOL(mlx5_fpga_get_sbu_caps);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
new file mode 100644
index 000000000000..baa537e54a49
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
@@ -0,0 +1,204 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef MLX5_FPGA_SDK_H
35#define MLX5_FPGA_SDK_H
36
37#include <linux/types.h>
38#include <linux/dma-direction.h>
39
40/**
41 * DOC: Innova SDK
42 * This header defines the in-kernel API for Innova FPGA client drivers.
43 */
44
45enum mlx5_fpga_access_type {
46 MLX5_FPGA_ACCESS_TYPE_I2C = 0x0,
47 MLX5_FPGA_ACCESS_TYPE_DONTCARE = 0x0,
48};
49
50struct mlx5_fpga_conn;
51struct mlx5_fpga_device;
52
53/**
54 * struct mlx5_fpga_dma_entry - A scatter-gather DMA entry
55 */
56struct mlx5_fpga_dma_entry {
57 /** @data: Virtual address pointer to the data */
58 void *data;
59 /** @size: Size in bytes of the data */
60 unsigned int size;
61 /** @dma_addr: Private member. Physical DMA-mapped address of the data */
62 dma_addr_t dma_addr;
63};
64
65/**
66 * struct mlx5_fpga_dma_buf - A packet buffer
67 * May contain up to 2 scatter-gather data entries
68 */
69struct mlx5_fpga_dma_buf {
70 /** @dma_dir: DMA direction */
71 enum dma_data_direction dma_dir;
72 /** @sg: Scatter-gather entries pointing to the data in memory */
73 struct mlx5_fpga_dma_entry sg[2];
74 /** @list: Item in SQ backlog, for TX packets */
75 struct list_head list;
76 /**
77 * @complete: Completion routine, for TX packets
78 * @conn: FPGA Connection this packet was sent to
79 * @fdev: FPGA device this packet was sent to
80 * @buf: The packet buffer
81 * @status: 0 if successful, or an error code otherwise
82 */
83 void (*complete)(struct mlx5_fpga_conn *conn,
84 struct mlx5_fpga_device *fdev,
85 struct mlx5_fpga_dma_buf *buf, u8 status);
86};
87
88/**
89 * struct mlx5_fpga_conn_attr - FPGA connection attributes
90 * Describes the attributes of a connection
91 */
92struct mlx5_fpga_conn_attr {
93 /** @tx_size: Size of connection TX queue, in packets */
94 unsigned int tx_size;
95 /** @rx_size: Size of connection RX queue, in packets */
96 unsigned int rx_size;
97 /**
98 * @recv_cb: Callback function which is called for received packets
99 * @cb_arg: The value provided in mlx5_fpga_conn_attr.cb_arg
100 * @buf: A buffer containing a received packet
101 *
102 * buf is guaranteed to only contain a single scatter-gather entry.
103 * The size of the actual packet received is specified in buf.sg[0].size
104 * When this callback returns, the packet buffer may be re-used for
105 * subsequent receives.
106 */
107 void (*recv_cb)(void *cb_arg, struct mlx5_fpga_dma_buf *buf);
108 void *cb_arg;
109};
110
111/**
112 * mlx5_fpga_sbu_conn_create() - Initialize a new FPGA SBU connection
113 * @fdev: The FPGA device
114 * @attr: Attributes of the new connection
115 *
116 * Sets up a new FPGA SBU connection with the specified attributes.
117 * The receive callback function may be called for incoming messages even
118 * before this function returns.
119 *
120 * The caller must eventually destroy the connection by calling
121 * mlx5_fpga_sbu_conn_destroy.
122 *
123 * Return: A new connection, or ERR_PTR() error value otherwise.
124 */
125struct mlx5_fpga_conn *
126mlx5_fpga_sbu_conn_create(struct mlx5_fpga_device *fdev,
127 struct mlx5_fpga_conn_attr *attr);
128
129/**
130 * mlx5_fpga_sbu_conn_destroy() - Destroy an FPGA SBU connection
131 * @conn: The FPGA SBU connection to destroy
132 *
133 * Cleans up an FPGA SBU connection which was previously created with
134 * mlx5_fpga_sbu_conn_create.
135 */
136void mlx5_fpga_sbu_conn_destroy(struct mlx5_fpga_conn *conn);
137
138/**
139 * mlx5_fpga_sbu_conn_sendmsg() - Queue the transmission of a packet
140 * @fdev: An FPGA SBU connection
141 * @buf: The packet buffer
142 *
143 * Queues a packet for transmission over an FPGA SBU connection.
144 * The buffer should not be modified or freed until completion.
145 * Upon completion, the buf's complete() callback is invoked, indicating the
146 * success or error status of the transmission.
147 *
148 * Return: 0 if successful, or an error value otherwise.
149 */
150int mlx5_fpga_sbu_conn_sendmsg(struct mlx5_fpga_conn *conn,
151 struct mlx5_fpga_dma_buf *buf);
152
153/**
154 * mlx5_fpga_mem_read() - Read from FPGA memory address space
155 * @fdev: The FPGA device
156 * @size: Size of chunk to read, in bytes
157 * @addr: Starting address to read from, in FPGA address space
158 * @buf: Buffer to read into
159 * @access_type: Method for reading
160 *
161 * Reads from the specified address into the specified buffer.
162 * The address may point to configuration space or to DDR.
163 * Large reads may be performed internally as several non-atomic operations.
164 * This function may sleep, so should not be called from atomic contexts.
165 *
166 * Return: 0 if successful, or an error value otherwise.
167 */
168int mlx5_fpga_mem_read(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
169 void *buf, enum mlx5_fpga_access_type access_type);
170
171/**
172 * mlx5_fpga_mem_write() - Write to FPGA memory address space
173 * @fdev: The FPGA device
174 * @size: Size of chunk to write, in bytes
175 * @addr: Starting address to write to, in FPGA address space
176 * @buf: Buffer which contains data to write
177 * @access_type: Method for writing
178 *
179 * Writes the specified buffer data to FPGA memory at the specified address.
180 * The address may point to configuration space or to DDR.
181 * Large writes may be performed internally as several non-atomic operations.
182 * This function may sleep, so should not be called from atomic contexts.
183 *
184 * Return: 0 if successful, or an error value otherwise.
185 */
186int mlx5_fpga_mem_write(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
187 void *buf, enum mlx5_fpga_access_type access_type);
188
189/**
190 * mlx5_fpga_get_sbu_caps() - Read the SBU capabilities
191 * @fdev: The FPGA device
192 * @size: Size of the buffer to read into
193 * @buf: Buffer to read the capabilities into
194 *
195 * Reads the FPGA SBU capabilities into the specified buffer.
196 * The format of the capabilities buffer is SBU-dependent.
197 *
198 * Return: 0 if successful
199 * -EINVAL if the buffer is not large enough to contain SBU caps
200 * or any other error value otherwise.
201 */
202int mlx5_fpga_get_sbu_caps(struct mlx5_fpga_device *fdev, int size, void *buf);
203
204#endif /* MLX5_FPGA_SDK_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
new file mode 100644
index 000000000000..4d0db481f6c4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
@@ -0,0 +1,154 @@
1/*
2 * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/driver.h>
34#include <linux/etherdevice.h>
35#include <linux/idr.h>
36#include "mlx5_core.h"
37
38void mlx5_init_reserved_gids(struct mlx5_core_dev *dev)
39{
40 unsigned int tblsz = MLX5_CAP_ROCE(dev, roce_address_table_size);
41
42 ida_init(&dev->roce.reserved_gids.ida);
43 dev->roce.reserved_gids.start = tblsz;
44 dev->roce.reserved_gids.count = 0;
45}
46
47void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev)
48{
49 WARN_ON(!ida_is_empty(&dev->roce.reserved_gids.ida));
50 dev->roce.reserved_gids.start = 0;
51 dev->roce.reserved_gids.count = 0;
52 ida_destroy(&dev->roce.reserved_gids.ida);
53}
54
55int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count)
56{
57 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
58 mlx5_core_err(dev, "Cannot reserve GIDs when interfaces are up\n");
59 return -EPERM;
60 }
61 if (dev->roce.reserved_gids.start < count) {
62 mlx5_core_warn(dev, "GID table exhausted attempting to reserve %d more GIDs\n",
63 count);
64 return -ENOMEM;
65 }
66 if (dev->roce.reserved_gids.count + count > MLX5_MAX_RESERVED_GIDS) {
67 mlx5_core_warn(dev, "Unable to reserve %d more GIDs\n", count);
68 return -ENOMEM;
69 }
70
71 dev->roce.reserved_gids.start -= count;
72 dev->roce.reserved_gids.count += count;
73 mlx5_core_dbg(dev, "Reserved %u GIDs starting at %u\n",
74 dev->roce.reserved_gids.count,
75 dev->roce.reserved_gids.start);
76 return 0;
77}
78
79void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count)
80{
81 WARN(test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state), "Unreserving GIDs when interfaces are up");
82 WARN(count > dev->roce.reserved_gids.count, "Unreserving %u GIDs when only %u reserved",
83 count, dev->roce.reserved_gids.count);
84
85 dev->roce.reserved_gids.start += count;
86 dev->roce.reserved_gids.count -= count;
87 mlx5_core_dbg(dev, "%u GIDs starting at %u left reserved\n",
88 dev->roce.reserved_gids.count,
89 dev->roce.reserved_gids.start);
90}
91
92int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index)
93{
94 int end = dev->roce.reserved_gids.start +
95 dev->roce.reserved_gids.count;
96 int index = 0;
97
98 index = ida_simple_get(&dev->roce.reserved_gids.ida,
99 dev->roce.reserved_gids.start, end,
100 GFP_KERNEL);
101 if (index < 0)
102 return index;
103
104 mlx5_core_dbg(dev, "Allodating reserved GID %u\n", index);
105 *gid_index = index;
106 return 0;
107}
108
109void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index)
110{
111 mlx5_core_dbg(dev, "Freeing reserved GID %u\n", gid_index);
112 ida_simple_remove(&dev->roce.reserved_gids.ida, gid_index);
113}
114
115unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev)
116{
117 return dev->roce.reserved_gids.count;
118}
119EXPORT_SYMBOL_GPL(mlx5_core_reserved_gids_count);
120
121int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
122 u8 roce_version, u8 roce_l3_type, const u8 *gid,
123 const u8 *mac, bool vlan, u16 vlan_id)
124{
125#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
126 u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
127 u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
128 void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
129 char *addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, in_addr,
130 source_l3_address);
131 void *addr_mac = MLX5_ADDR_OF(roce_addr_layout, in_addr,
132 source_mac_47_32);
133 int gidsz = MLX5_FLD_SZ_BYTES(roce_addr_layout, source_l3_address);
134
135 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
136 return -EINVAL;
137
138 if (gid) {
139 if (vlan) {
140 MLX5_SET_RA(in_addr, vlan_valid, 1);
141 MLX5_SET_RA(in_addr, vlan_id, vlan_id);
142 }
143
144 ether_addr_copy(addr_mac, mac);
145 MLX5_SET_RA(in_addr, roce_version, roce_version);
146 MLX5_SET_RA(in_addr, roce_l3_type, roce_l3_type);
147 memcpy(addr_l3_addr, gid, gidsz);
148 }
149
150 MLX5_SET(set_roce_address_in, in, roce_address_index, index);
151 MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
152 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
153}
154EXPORT_SYMBOL(mlx5_core_roce_gid_set);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
new file mode 100644
index 000000000000..7550b1cc8c6a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __LIB_MLX5_H__
34#define __LIB_MLX5_H__
35
36void mlx5_init_reserved_gids(struct mlx5_core_dev *dev);
37void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev);
38int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count);
39void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count);
40int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index);
41void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index);
42
43#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c7f75e12c13b..719f8e974482 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -56,7 +56,9 @@
56#ifdef CONFIG_MLX5_CORE_EN 56#ifdef CONFIG_MLX5_CORE_EN
57#include "eswitch.h" 57#include "eswitch.h"
58#endif 58#endif
59#include "lib/mlx5.h"
59#include "fpga/core.h" 60#include "fpga/core.h"
61#include "accel/ipsec.h"
60 62
61MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 63MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
62MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); 64MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
@@ -936,6 +938,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
936 938
937 mlx5_init_mkey_table(dev); 939 mlx5_init_mkey_table(dev);
938 940
941 mlx5_init_reserved_gids(dev);
942
939 err = mlx5_init_rl_table(dev); 943 err = mlx5_init_rl_table(dev);
940 if (err) { 944 if (err) {
941 dev_err(&pdev->dev, "Failed to init rate limiting\n"); 945 dev_err(&pdev->dev, "Failed to init rate limiting\n");
@@ -956,8 +960,16 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
956 goto err_eswitch_cleanup; 960 goto err_eswitch_cleanup;
957 } 961 }
958 962
963 err = mlx5_fpga_init(dev);
964 if (err) {
965 dev_err(&pdev->dev, "Failed to init fpga device %d\n", err);
966 goto err_sriov_cleanup;
967 }
968
959 return 0; 969 return 0;
960 970
971err_sriov_cleanup:
972 mlx5_sriov_cleanup(dev);
961err_eswitch_cleanup: 973err_eswitch_cleanup:
962#ifdef CONFIG_MLX5_CORE_EN 974#ifdef CONFIG_MLX5_CORE_EN
963 mlx5_eswitch_cleanup(dev->priv.eswitch); 975 mlx5_eswitch_cleanup(dev->priv.eswitch);
@@ -981,11 +993,13 @@ out:
981 993
982static void mlx5_cleanup_once(struct mlx5_core_dev *dev) 994static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
983{ 995{
996 mlx5_fpga_cleanup(dev);
984 mlx5_sriov_cleanup(dev); 997 mlx5_sriov_cleanup(dev);
985#ifdef CONFIG_MLX5_CORE_EN 998#ifdef CONFIG_MLX5_CORE_EN
986 mlx5_eswitch_cleanup(dev->priv.eswitch); 999 mlx5_eswitch_cleanup(dev->priv.eswitch);
987#endif 1000#endif
988 mlx5_cleanup_rl_table(dev); 1001 mlx5_cleanup_rl_table(dev);
1002 mlx5_cleanup_reserved_gids(dev);
989 mlx5_cleanup_mkey_table(dev); 1003 mlx5_cleanup_mkey_table(dev);
990 mlx5_cleanup_srq_table(dev); 1004 mlx5_cleanup_srq_table(dev);
991 mlx5_cleanup_qp_table(dev); 1005 mlx5_cleanup_qp_table(dev);
@@ -1117,16 +1131,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1117 goto err_disable_msix; 1131 goto err_disable_msix;
1118 } 1132 }
1119 1133
1120 err = mlx5_fpga_device_init(dev);
1121 if (err) {
1122 dev_err(&pdev->dev, "fpga device init failed %d\n", err);
1123 goto err_put_uars;
1124 }
1125
1126 err = mlx5_start_eqs(dev); 1134 err = mlx5_start_eqs(dev);
1127 if (err) { 1135 if (err) {
1128 dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); 1136 dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
1129 goto err_fpga_init; 1137 goto err_put_uars;
1130 } 1138 }
1131 1139
1132 err = alloc_comp_eqs(dev); 1140 err = alloc_comp_eqs(dev);
@@ -1160,7 +1168,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1160 err = mlx5_fpga_device_start(dev); 1168 err = mlx5_fpga_device_start(dev);
1161 if (err) { 1169 if (err) {
1162 dev_err(&pdev->dev, "fpga device start failed %d\n", err); 1170 dev_err(&pdev->dev, "fpga device start failed %d\n", err);
1163 goto err_reg_dev; 1171 goto err_fpga_start;
1172 }
1173 err = mlx5_accel_ipsec_init(dev);
1174 if (err) {
1175 dev_err(&pdev->dev, "IPSec device start failed %d\n", err);
1176 goto err_ipsec_start;
1164 } 1177 }
1165 1178
1166 if (mlx5_device_registered(dev)) { 1179 if (mlx5_device_registered(dev)) {
@@ -1181,6 +1194,11 @@ out:
1181 return 0; 1194 return 0;
1182 1195
1183err_reg_dev: 1196err_reg_dev:
1197 mlx5_accel_ipsec_cleanup(dev);
1198err_ipsec_start:
1199 mlx5_fpga_device_stop(dev);
1200
1201err_fpga_start:
1184 mlx5_sriov_detach(dev); 1202 mlx5_sriov_detach(dev);
1185 1203
1186err_sriov: 1204err_sriov:
@@ -1198,9 +1216,6 @@ err_affinity_hints:
1198err_stop_eqs: 1216err_stop_eqs:
1199 mlx5_stop_eqs(dev); 1217 mlx5_stop_eqs(dev);
1200 1218
1201err_fpga_init:
1202 mlx5_fpga_device_cleanup(dev);
1203
1204err_put_uars: 1219err_put_uars:
1205 mlx5_put_uars_page(dev, priv->uar); 1220 mlx5_put_uars_page(dev, priv->uar);
1206 1221
@@ -1254,9 +1269,15 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1254 goto out; 1269 goto out;
1255 } 1270 }
1256 1271
1272 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1273 set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
1274
1257 if (mlx5_device_registered(dev)) 1275 if (mlx5_device_registered(dev))
1258 mlx5_detach_device(dev); 1276 mlx5_detach_device(dev);
1259 1277
1278 mlx5_accel_ipsec_cleanup(dev);
1279 mlx5_fpga_device_stop(dev);
1280
1260 mlx5_sriov_detach(dev); 1281 mlx5_sriov_detach(dev);
1261#ifdef CONFIG_MLX5_CORE_EN 1282#ifdef CONFIG_MLX5_CORE_EN
1262 mlx5_eswitch_detach(dev->priv.eswitch); 1283 mlx5_eswitch_detach(dev->priv.eswitch);
@@ -1265,7 +1286,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1265 mlx5_irq_clear_affinity_hints(dev); 1286 mlx5_irq_clear_affinity_hints(dev);
1266 free_comp_eqs(dev); 1287 free_comp_eqs(dev);
1267 mlx5_stop_eqs(dev); 1288 mlx5_stop_eqs(dev);
1268 mlx5_fpga_device_cleanup(dev);
1269 mlx5_put_uars_page(dev, priv->uar); 1289 mlx5_put_uars_page(dev, priv->uar);
1270 mlx5_disable_msix(dev); 1290 mlx5_disable_msix(dev);
1271 if (cleanup) 1291 if (cleanup)
@@ -1282,8 +1302,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1282 mlx5_cmd_cleanup(dev); 1302 mlx5_cmd_cleanup(dev);
1283 1303
1284out: 1304out:
1285 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1286 set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
1287 mutex_unlock(&dev->intf_state_mutex); 1305 mutex_unlock(&dev->intf_state_mutex);
1288 return err; 1306 return err;
1289} 1307}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 06019d00ab7b..5abfec1c3399 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -926,12 +926,16 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
926 926
927int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev) 927int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
928{ 928{
929 if (atomic_inc_return(&mdev->roce.roce_en) != 1)
930 return 0;
929 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED); 931 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
930} 932}
931EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce); 933EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
932 934
933int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev) 935int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
934{ 936{
937 if (atomic_dec_return(&mdev->roce.roce_en) != 0)
938 return 0;
935 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED); 939 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
936} 940}
937EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce); 941EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 921673c42bc9..6bcfc25350f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -54,6 +54,12 @@ static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
54 return mlx5_wq_cyc_get_size(wq) << wq->log_stride; 54 return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
55} 55}
56 56
57static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
58{
59 return mlx5_wq_cyc_get_byte_size(&wq->rq) +
60 mlx5_wq_cyc_get_byte_size(&wq->sq);
61}
62
57static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) 63static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
58{ 64{
59 return mlx5_cqwq_get_size(wq) << wq->log_stride; 65 return mlx5_cqwq_get_size(wq) << wq->log_stride;
@@ -99,6 +105,46 @@ err_db_free:
99 return err; 105 return err;
100} 106}
101 107
108int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
109 void *qpc, struct mlx5_wq_qp *wq,
110 struct mlx5_wq_ctrl *wq_ctrl)
111{
112 int err;
113
114 wq->rq.log_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
115 wq->rq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_rq_size)) - 1;
116
117 wq->sq.log_stride = ilog2(MLX5_SEND_WQE_BB);
118 wq->sq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_sq_size)) - 1;
119
120 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
121 if (err) {
122 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
123 return err;
124 }
125
126 err = mlx5_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
127 &wq_ctrl->buf, param->buf_numa_node);
128 if (err) {
129 mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
130 goto err_db_free;
131 }
132
133 wq->rq.buf = wq_ctrl->buf.direct.buf;
134 wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq);
135 wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
136 wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
137
138 wq_ctrl->mdev = mdev;
139
140 return 0;
141
142err_db_free:
143 mlx5_db_free(mdev, &wq_ctrl->db);
144
145 return err;
146}
147
102int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, 148int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
103 void *cqc, struct mlx5_cqwq *wq, 149 void *cqc, struct mlx5_cqwq *wq,
104 struct mlx5_frag_wq_ctrl *wq_ctrl) 150 struct mlx5_frag_wq_ctrl *wq_ctrl)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index d8afed898c31..718589d0cec2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -34,6 +34,8 @@
34#define __MLX5_WQ_H__ 34#define __MLX5_WQ_H__
35 35
36#include <linux/mlx5/mlx5_ifc.h> 36#include <linux/mlx5/mlx5_ifc.h>
37#include <linux/mlx5/cq.h>
38#include <linux/mlx5/qp.h>
37 39
38struct mlx5_wq_param { 40struct mlx5_wq_param {
39 int linear; 41 int linear;
@@ -60,6 +62,11 @@ struct mlx5_wq_cyc {
60 u8 log_stride; 62 u8 log_stride;
61}; 63};
62 64
65struct mlx5_wq_qp {
66 struct mlx5_wq_cyc rq;
67 struct mlx5_wq_cyc sq;
68};
69
63struct mlx5_cqwq { 70struct mlx5_cqwq {
64 struct mlx5_frag_buf frag_buf; 71 struct mlx5_frag_buf frag_buf;
65 __be32 *db; 72 __be32 *db;
@@ -87,6 +94,10 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
87 struct mlx5_wq_ctrl *wq_ctrl); 94 struct mlx5_wq_ctrl *wq_ctrl);
88u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); 95u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
89 96
97int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
98 void *qpc, struct mlx5_wq_qp *wq,
99 struct mlx5_wq_ctrl *wq_ctrl);
100
90int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, 101int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
91 void *cqc, struct mlx5_cqwq *wq, 102 void *cqc, struct mlx5_cqwq *wq,
92 struct mlx5_frag_wq_ctrl *wq_ctrl); 103 struct mlx5_frag_wq_ctrl *wq_ctrl);
@@ -146,6 +157,22 @@ static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
146 *wq->db = cpu_to_be32(wq->cc & 0xffffff); 157 *wq->db = cpu_to_be32(wq->cc & 0xffffff);
147} 158}
148 159
160static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
161{
162 u32 ci = mlx5_cqwq_get_ci(wq);
163 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
164 u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
165 u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
166
167 if (cqe_ownership_bit != sw_ownership_val)
168 return NULL;
169
170 /* ensure cqe content is read after cqe ownership bit */
171 dma_rmb();
172
173 return cqe;
174}
175
149static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) 176static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
150{ 177{
151 return wq->cur_sz == wq->sz_m1; 178 return wq->cur_sz == wq->sz_m1;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 556e1c31b5d0..f31a0b5377e1 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1103,6 +1103,9 @@ enum mlx5_mcam_feature_groups {
1103#define MLX5_CAP_FPGA(mdev, cap) \ 1103#define MLX5_CAP_FPGA(mdev, cap) \
1104 MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap) 1104 MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
1105 1105
1106#define MLX5_CAP64_FPGA(mdev, cap) \
1107 MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
1108
1106enum { 1109enum {
1107 MLX5_CMD_STAT_OK = 0x0, 1110 MLX5_CMD_STAT_OK = 0x0,
1108 MLX5_CMD_STAT_INT_ERR = 0x1, 1111 MLX5_CMD_STAT_INT_ERR = 0x1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 750701b3b863..2ab4ae3e3a1a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -44,6 +44,7 @@
44#include <linux/workqueue.h> 44#include <linux/workqueue.h>
45#include <linux/mempool.h> 45#include <linux/mempool.h>
46#include <linux/interrupt.h> 46#include <linux/interrupt.h>
47#include <linux/idr.h>
47 48
48#include <linux/mlx5/device.h> 49#include <linux/mlx5/device.h>
49#include <linux/mlx5/doorbell.h> 50#include <linux/mlx5/doorbell.h>
@@ -110,6 +111,7 @@ enum {
110 MLX5_REG_DCBX_APP = 0x4021, 111 MLX5_REG_DCBX_APP = 0x4021,
111 MLX5_REG_FPGA_CAP = 0x4022, 112 MLX5_REG_FPGA_CAP = 0x4022,
112 MLX5_REG_FPGA_CTRL = 0x4023, 113 MLX5_REG_FPGA_CTRL = 0x4023,
114 MLX5_REG_FPGA_ACCESS_REG = 0x4024,
113 MLX5_REG_PCAP = 0x5001, 115 MLX5_REG_PCAP = 0x5001,
114 MLX5_REG_PMTU = 0x5003, 116 MLX5_REG_PMTU = 0x5003,
115 MLX5_REG_PTYS = 0x5004, 117 MLX5_REG_PTYS = 0x5004,
@@ -737,6 +739,14 @@ struct mlx5e_resources {
737 struct mlx5_sq_bfreg bfreg; 739 struct mlx5_sq_bfreg bfreg;
738}; 740};
739 741
742#define MLX5_MAX_RESERVED_GIDS 8
743
744struct mlx5_rsvd_gids {
745 unsigned int start;
746 unsigned int count;
747 struct ida ida;
748};
749
740struct mlx5_core_dev { 750struct mlx5_core_dev {
741 struct pci_dev *pdev; 751 struct pci_dev *pdev;
742 /* sync pci state */ 752 /* sync pci state */
@@ -766,6 +776,10 @@ struct mlx5_core_dev {
766 atomic_t num_qps; 776 atomic_t num_qps;
767 u32 issi; 777 u32 issi;
768 struct mlx5e_resources mlx5e_res; 778 struct mlx5e_resources mlx5e_res;
779 struct {
780 struct mlx5_rsvd_gids reserved_gids;
781 atomic_t roce_en;
782 } roce;
769#ifdef CONFIG_MLX5_FPGA 783#ifdef CONFIG_MLX5_FPGA
770 struct mlx5_fpga_device *fpga; 784 struct mlx5_fpga_device *fpga;
771#endif 785#endif
@@ -1045,6 +1059,11 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
1045 bool map_wc, bool fast_path); 1059 bool map_wc, bool fast_path);
1046void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); 1060void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
1047 1061
1062unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
1063int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
1064 u8 roce_version, u8 roce_l3_type, const u8 *gid,
1065 const u8 *mac, bool vlan, u16 vlan_id);
1066
1048static inline int fw_initializing(struct mlx5_core_dev *dev) 1067static inline int fw_initializing(struct mlx5_core_dev *dev)
1049{ 1068{
1050 return ioread32be(&dev->iseg->initializing) >> 31; 1069 return ioread32be(&dev->iseg->initializing) >> 31;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index d6b99d5d0f24..87869c04849a 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -232,6 +232,11 @@ enum {
232 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e, 232 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
233 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, 233 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
234 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, 234 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
235 MLX5_CMD_OP_FPGA_CREATE_QP = 0x960,
236 MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961,
237 MLX5_CMD_OP_FPGA_QUERY_QP = 0x962,
238 MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963,
239 MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964,
235 MLX5_CMD_OP_MAX 240 MLX5_CMD_OP_MAX
236}; 241};
237 242
@@ -600,7 +605,10 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
600 u8 tunnel_statless_gre[0x1]; 605 u8 tunnel_statless_gre[0x1];
601 u8 tunnel_stateless_vxlan[0x1]; 606 u8 tunnel_stateless_vxlan[0x1];
602 607
603 u8 reserved_at_20[0x20]; 608 u8 swp[0x1];
609 u8 swp_csum[0x1];
610 u8 swp_lso[0x1];
611 u8 reserved_at_23[0x1d];
604 612
605 u8 reserved_at_40[0x10]; 613 u8 reserved_at_40[0x10];
606 u8 lro_min_mss_size[0x10]; 614 u8 lro_min_mss_size[0x10];
@@ -2433,7 +2441,8 @@ struct mlx5_ifc_sqc_bits {
2433 u8 min_wqe_inline_mode[0x3]; 2441 u8 min_wqe_inline_mode[0x3];
2434 u8 state[0x4]; 2442 u8 state[0x4];
2435 u8 reg_umr[0x1]; 2443 u8 reg_umr[0x1];
2436 u8 reserved_at_d[0x13]; 2444 u8 allow_swp[0x1];
2445 u8 reserved_at_e[0x12];
2437 2446
2438 u8 reserved_at_20[0x8]; 2447 u8 reserved_at_20[0x8];
2439 u8 user_index[0x18]; 2448 u8 user_index[0x18];
@@ -8304,6 +8313,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
8304 struct mlx5_ifc_sltp_reg_bits sltp_reg; 8313 struct mlx5_ifc_sltp_reg_bits sltp_reg;
8305 struct mlx5_ifc_mtpps_reg_bits mtpps_reg; 8314 struct mlx5_ifc_mtpps_reg_bits mtpps_reg;
8306 struct mlx5_ifc_mtppse_reg_bits mtppse_reg; 8315 struct mlx5_ifc_mtppse_reg_bits mtppse_reg;
8316 struct mlx5_ifc_fpga_access_reg_bits fpga_access_reg;
8307 struct mlx5_ifc_fpga_ctrl_bits fpga_ctrl_bits; 8317 struct mlx5_ifc_fpga_ctrl_bits fpga_ctrl_bits;
8308 struct mlx5_ifc_fpga_cap_bits fpga_cap_bits; 8318 struct mlx5_ifc_fpga_cap_bits fpga_cap_bits;
8309 struct mlx5_ifc_mcqi_reg_bits mcqi_reg; 8319 struct mlx5_ifc_mcqi_reg_bits mcqi_reg;
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 0032d10ac6cf..255a88d08078 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -32,6 +32,14 @@
32#ifndef MLX5_IFC_FPGA_H 32#ifndef MLX5_IFC_FPGA_H
33#define MLX5_IFC_FPGA_H 33#define MLX5_IFC_FPGA_H
34 34
35enum {
36 MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9,
37};
38
39enum {
40 MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2,
41};
42
35struct mlx5_ifc_fpga_shell_caps_bits { 43struct mlx5_ifc_fpga_shell_caps_bits {
36 u8 max_num_qps[0x10]; 44 u8 max_num_qps[0x10];
37 u8 reserved_at_10[0x8]; 45 u8 reserved_at_10[0x8];
@@ -108,6 +116,15 @@ struct mlx5_ifc_fpga_cap_bits {
108 u8 reserved_at_500[0x300]; 116 u8 reserved_at_500[0x300];
109}; 117};
110 118
119enum {
120 MLX5_FPGA_CTRL_OPERATION_LOAD = 0x1,
121 MLX5_FPGA_CTRL_OPERATION_RESET = 0x2,
122 MLX5_FPGA_CTRL_OPERATION_FLASH_SELECT = 0x3,
123 MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_ON = 0x4,
124 MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_OFF = 0x5,
125 MLX5_FPGA_CTRL_OPERATION_RESET_SANDBOX = 0x6,
126};
127
111struct mlx5_ifc_fpga_ctrl_bits { 128struct mlx5_ifc_fpga_ctrl_bits {
112 u8 reserved_at_0[0x8]; 129 u8 reserved_at_0[0x8];
113 u8 operation[0x8]; 130 u8 operation[0x8];
@@ -141,4 +158,275 @@ struct mlx5_ifc_fpga_error_event_bits {
141 u8 reserved_at_60[0x80]; 158 u8 reserved_at_60[0x80];
142}; 159};
143 160
161#define MLX5_FPGA_ACCESS_REG_SIZE_MAX 64
162
163struct mlx5_ifc_fpga_access_reg_bits {
164 u8 reserved_at_0[0x20];
165
166 u8 reserved_at_20[0x10];
167 u8 size[0x10];
168
169 u8 address[0x40];
170
171 u8 data[0][0x8];
172};
173
174enum mlx5_ifc_fpga_qp_state {
175 MLX5_FPGA_QPC_STATE_INIT = 0x0,
176 MLX5_FPGA_QPC_STATE_ACTIVE = 0x1,
177 MLX5_FPGA_QPC_STATE_ERROR = 0x2,
178};
179
180enum mlx5_ifc_fpga_qp_type {
181 MLX5_FPGA_QPC_QP_TYPE_SHELL_QP = 0x0,
182 MLX5_FPGA_QPC_QP_TYPE_SANDBOX_QP = 0x1,
183};
184
185enum mlx5_ifc_fpga_qp_service_type {
186 MLX5_FPGA_QPC_ST_RC = 0x0,
187};
188
189struct mlx5_ifc_fpga_qpc_bits {
190 u8 state[0x4];
191 u8 reserved_at_4[0x1b];
192 u8 qp_type[0x1];
193
194 u8 reserved_at_20[0x4];
195 u8 st[0x4];
196 u8 reserved_at_28[0x10];
197 u8 traffic_class[0x8];
198
199 u8 ether_type[0x10];
200 u8 prio[0x3];
201 u8 dei[0x1];
202 u8 vid[0xc];
203
204 u8 reserved_at_60[0x20];
205
206 u8 reserved_at_80[0x8];
207 u8 next_rcv_psn[0x18];
208
209 u8 reserved_at_a0[0x8];
210 u8 next_send_psn[0x18];
211
212 u8 reserved_at_c0[0x10];
213 u8 pkey[0x10];
214
215 u8 reserved_at_e0[0x8];
216 u8 remote_qpn[0x18];
217
218 u8 reserved_at_100[0x15];
219 u8 rnr_retry[0x3];
220 u8 reserved_at_118[0x5];
221 u8 retry_count[0x3];
222
223 u8 reserved_at_120[0x20];
224
225 u8 reserved_at_140[0x10];
226 u8 remote_mac_47_32[0x10];
227
228 u8 remote_mac_31_0[0x20];
229
230 u8 remote_ip[16][0x8];
231
232 u8 reserved_at_200[0x40];
233
234 u8 reserved_at_240[0x10];
235 u8 fpga_mac_47_32[0x10];
236
237 u8 fpga_mac_31_0[0x20];
238
239 u8 fpga_ip[16][0x8];
240};
241
242struct mlx5_ifc_fpga_create_qp_in_bits {
243 u8 opcode[0x10];
244 u8 reserved_at_10[0x10];
245
246 u8 reserved_at_20[0x10];
247 u8 op_mod[0x10];
248
249 u8 reserved_at_40[0x40];
250
251 struct mlx5_ifc_fpga_qpc_bits fpga_qpc;
252};
253
254struct mlx5_ifc_fpga_create_qp_out_bits {
255 u8 status[0x8];
256 u8 reserved_at_8[0x18];
257
258 u8 syndrome[0x20];
259
260 u8 reserved_at_40[0x8];
261 u8 fpga_qpn[0x18];
262
263 u8 reserved_at_60[0x20];
264
265 struct mlx5_ifc_fpga_qpc_bits fpga_qpc;
266};
267
268struct mlx5_ifc_fpga_modify_qp_in_bits {
269 u8 opcode[0x10];
270 u8 reserved_at_10[0x10];
271
272 u8 reserved_at_20[0x10];
273 u8 op_mod[0x10];
274
275 u8 reserved_at_40[0x8];
276 u8 fpga_qpn[0x18];
277
278 u8 field_select[0x20];
279
280 struct mlx5_ifc_fpga_qpc_bits fpga_qpc;
281};
282
283struct mlx5_ifc_fpga_modify_qp_out_bits {
284 u8 status[0x8];
285 u8 reserved_at_8[0x18];
286
287 u8 syndrome[0x20];
288
289 u8 reserved_at_40[0x40];
290};
291
292struct mlx5_ifc_fpga_query_qp_in_bits {
293 u8 opcode[0x10];
294 u8 reserved_at_10[0x10];
295
296 u8 reserved_at_20[0x10];
297 u8 op_mod[0x10];
298
299 u8 reserved_at_40[0x8];
300 u8 fpga_qpn[0x18];
301
302 u8 reserved_at_60[0x20];
303};
304
305struct mlx5_ifc_fpga_query_qp_out_bits {
306 u8 status[0x8];
307 u8 reserved_at_8[0x18];
308
309 u8 syndrome[0x20];
310
311 u8 reserved_at_40[0x40];
312
313 struct mlx5_ifc_fpga_qpc_bits fpga_qpc;
314};
315
316struct mlx5_ifc_fpga_query_qp_counters_in_bits {
317 u8 opcode[0x10];
318 u8 reserved_at_10[0x10];
319
320 u8 reserved_at_20[0x10];
321 u8 op_mod[0x10];
322
323 u8 clear[0x1];
324 u8 reserved_at_41[0x7];
325 u8 fpga_qpn[0x18];
326
327 u8 reserved_at_60[0x20];
328};
329
330struct mlx5_ifc_fpga_query_qp_counters_out_bits {
331 u8 status[0x8];
332 u8 reserved_at_8[0x18];
333
334 u8 syndrome[0x20];
335
336 u8 reserved_at_40[0x40];
337
338 u8 rx_ack_packets[0x40];
339
340 u8 rx_send_packets[0x40];
341
342 u8 tx_ack_packets[0x40];
343
344 u8 tx_send_packets[0x40];
345
346 u8 rx_total_drop[0x40];
347
348 u8 reserved_at_1c0[0x1c0];
349};
350
351struct mlx5_ifc_fpga_destroy_qp_in_bits {
352 u8 opcode[0x10];
353 u8 reserved_at_10[0x10];
354
355 u8 reserved_at_20[0x10];
356 u8 op_mod[0x10];
357
358 u8 reserved_at_40[0x8];
359 u8 fpga_qpn[0x18];
360
361 u8 reserved_at_60[0x20];
362};
363
364struct mlx5_ifc_fpga_destroy_qp_out_bits {
365 u8 status[0x8];
366 u8 reserved_at_8[0x18];
367
368 u8 syndrome[0x20];
369
370 u8 reserved_at_40[0x40];
371};
372
373struct mlx5_ifc_ipsec_extended_cap_bits {
374 u8 encapsulation[0x20];
375
376 u8 reserved_0[0x15];
377 u8 ipv4_fragment[0x1];
378 u8 ipv6[0x1];
379 u8 esn[0x1];
380 u8 lso[0x1];
381 u8 transport_and_tunnel_mode[0x1];
382 u8 tunnel_mode[0x1];
383 u8 transport_mode[0x1];
384 u8 ah_esp[0x1];
385 u8 esp[0x1];
386 u8 ah[0x1];
387 u8 ipv4_options[0x1];
388
389 u8 auth_alg[0x20];
390
391 u8 enc_alg[0x20];
392
393 u8 sa_cap[0x20];
394
395 u8 reserved_1[0x10];
396 u8 number_of_ipsec_counters[0x10];
397
398 u8 ipsec_counters_addr_low[0x20];
399 u8 ipsec_counters_addr_high[0x20];
400};
401
402struct mlx5_ifc_ipsec_counters_bits {
403 u8 dec_in_packets[0x40];
404
405 u8 dec_out_packets[0x40];
406
407 u8 dec_bypass_packets[0x40];
408
409 u8 enc_in_packets[0x40];
410
411 u8 enc_out_packets[0x40];
412
413 u8 enc_bypass_packets[0x40];
414
415 u8 drop_dec_packets[0x40];
416
417 u8 failed_auth_dec_packets[0x40];
418
419 u8 drop_enc_packets[0x40];
420
421 u8 success_add_sa[0x40];
422
423 u8 fail_add_sa[0x40];
424
425 u8 success_delete_sa[0x40];
426
427 u8 fail_delete_sa[0x40];
428
429 u8 dropped_cmd[0x40];
430};
431
144#endif /* MLX5_IFC_FPGA_H */ 432#endif /* MLX5_IFC_FPGA_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 1f637f4d1265..6f41270d80c0 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -225,10 +225,20 @@ enum {
225 MLX5_ETH_WQE_INSERT_VLAN = 1 << 15, 225 MLX5_ETH_WQE_INSERT_VLAN = 1 << 15,
226}; 226};
227 227
228enum {
229 MLX5_ETH_WQE_SWP_INNER_L3_IPV6 = 1 << 0,
230 MLX5_ETH_WQE_SWP_INNER_L4_UDP = 1 << 1,
231 MLX5_ETH_WQE_SWP_OUTER_L3_IPV6 = 1 << 4,
232 MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5,
233};
234
228struct mlx5_wqe_eth_seg { 235struct mlx5_wqe_eth_seg {
229 u8 rsvd0[4]; 236 u8 swp_outer_l4_offset;
237 u8 swp_outer_l3_offset;
238 u8 swp_inner_l4_offset;
239 u8 swp_inner_l3_offset;
230 u8 cs_flags; 240 u8 cs_flags;
231 u8 rsvd1; 241 u8 swp_flags;
232 __be16 mss; 242 __be16 mss;
233 __be32 rsvd2; 243 __be32 rsvd2;
234 union { 244 union {