aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/mlx4/main.c30
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c38
-rw-r--r--include/linux/mlx4/device.h3
5 files changed, 80 insertions, 30 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index e1e558a3d692..af8256353c7d 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1089,6 +1089,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1089 return err; 1089 return err;
1090} 1090}
1091 1091
1092static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1093 u64 *reg_id)
1094{
1095 void *ib_flow;
1096 union ib_flow_spec *ib_spec;
1097 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1098 int err = 0;
1099
1100 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1101 return 0; /* do nothing */
1102
1103 ib_flow = flow_attr + 1;
1104 ib_spec = (union ib_flow_spec *)ib_flow;
1105
1106 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1107 return 0; /* do nothing */
1108
1109 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1110 flow_attr->port, qp->qp_num,
1111 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1112 reg_id);
1113 return err;
1114}
1115
1092static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, 1116static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1093 struct ib_flow_attr *flow_attr, 1117 struct ib_flow_attr *flow_attr,
1094 int domain) 1118 int domain)
@@ -1136,6 +1160,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1136 i++; 1160 i++;
1137 } 1161 }
1138 1162
1163 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1164 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
1165 if (err)
1166 goto err_free;
1167 }
1168
1139 return &mflow->ibflow; 1169 return &mflow->ibflow;
1140 1170
1141err_free: 1171err_free:
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 67780452f0cf..efb9eff8906c 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1677 } 1677 }
1678 } 1678 }
1679 1679
1680 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) 1680 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
1681 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | 1681 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
1682 MLX4_IB_LINK_TYPE_ETH; 1682 MLX4_IB_LINK_TYPE_ETH;
1683 if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1684 /* set QP to receive both tunneled & non-tunneled packets */
1685 if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)))
1686 context->srqn = cpu_to_be32(7 << 28);
1687 }
1688 }
1683 1689
1684 if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) { 1690 if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
1685 int is_eth = rdma_port_get_link_layer( 1691 int is_eth = rdma_port_get_link_layer(
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bb536aa613f4..abddcf8c40aa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
474 int qpn, u64 *reg_id) 474 int qpn, u64 *reg_id)
475{ 475{
476 int err; 476 int err;
477 struct mlx4_spec_list spec_eth_outer = { {NULL} };
478 struct mlx4_spec_list spec_vxlan = { {NULL} };
479 struct mlx4_spec_list spec_eth_inner = { {NULL} };
480
481 struct mlx4_net_trans_rule rule = {
482 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
483 .exclusive = 0,
484 .allow_loopback = 1,
485 .promisc_mode = MLX4_FS_REGULAR,
486 .priority = MLX4_DOMAIN_NIC,
487 };
488
489 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
490 477
491 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 478 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
492 return 0; /* do nothing */ 479 return 0; /* do nothing */
493 480
494 rule.port = priv->port; 481 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
495 rule.qpn = qpn; 482 MLX4_DOMAIN_NIC, reg_id);
496 INIT_LIST_HEAD(&rule.list);
497
498 spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
499 memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
500 memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
501
502 spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
503 spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
504
505 list_add_tail(&spec_eth_outer.list, &rule.list);
506 list_add_tail(&spec_vxlan.list, &rule.list);
507 list_add_tail(&spec_eth_inner.list, &rule.list);
508
509 err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
510 if (err) { 483 if (err) {
511 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); 484 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
512 return err; 485 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index d80e7a6fac74..ca0f98c95105 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
1020} 1020}
1021EXPORT_SYMBOL_GPL(mlx4_flow_detach); 1021EXPORT_SYMBOL_GPL(mlx4_flow_detach);
1022 1022
1023int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
1024 int port, int qpn, u16 prio, u64 *reg_id)
1025{
1026 int err;
1027 struct mlx4_spec_list spec_eth_outer = { {NULL} };
1028 struct mlx4_spec_list spec_vxlan = { {NULL} };
1029 struct mlx4_spec_list spec_eth_inner = { {NULL} };
1030
1031 struct mlx4_net_trans_rule rule = {
1032 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1033 .exclusive = 0,
1034 .allow_loopback = 1,
1035 .promisc_mode = MLX4_FS_REGULAR,
1036 };
1037
1038 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1039
1040 rule.port = port;
1041 rule.qpn = qpn;
1042 rule.priority = prio;
1043 INIT_LIST_HEAD(&rule.list);
1044
1045 spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
1046 memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
1047 memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1048
1049 spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
1050 spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
1051
1052 list_add_tail(&spec_eth_outer.list, &rule.list);
1053 list_add_tail(&spec_vxlan.list, &rule.list);
1054 list_add_tail(&spec_eth_inner.list, &rule.list);
1055
1056 err = mlx4_flow_attach(dev, &rule, reg_id);
1057 return err;
1058}
1059EXPORT_SYMBOL(mlx4_tunnel_steer_add);
1060
1023int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, 1061int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
1024 u32 max_range_qpn) 1062 u32 max_range_qpn)
1025{ 1063{
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 071f6b234604..511c6e0d21a9 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1196,6 +1196,9 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
1196 enum mlx4_net_trans_rule_id id); 1196 enum mlx4_net_trans_rule_id id);
1197int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); 1197int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
1198 1198
1199int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
1200 int port, int qpn, u16 prio, u64 *reg_id);
1201
1199void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, 1202void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
1200 int i, int val); 1203 int i, int val);
1201 1204