aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorOr Gerlitz <ogerlitz@mellanox.com>2014-08-27 09:47:49 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-29 23:13:00 -0400
commitd2fce8a9060db3af7e1b25e259b251da17f6a0d6 (patch)
tree7b13e6461cae283319d915ddc01706694faaa6dc /drivers/infiniband
parentb95089d00c04712a9d4655d5c638930ac24b7bd3 (diff)
mlx4: Set user-space raw Ethernet QPs to properly handle VXLAN traffic
Raw Ethernet QPs opened from user-space lack the proper setup to recieve/handle VXLAN traffic when VXLAN offloads are enabled. Fix that by adding a tunnel steering rule on top of the normal unicast steering rule and set the tunnel_type field in the QP context. Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx4/main.c30
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
2 files changed, 37 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index e1e558a3d692..af8256353c7d 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1089,6 +1089,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1089 return err; 1089 return err;
1090} 1090}
1091 1091
1092static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1093 u64 *reg_id)
1094{
1095 void *ib_flow;
1096 union ib_flow_spec *ib_spec;
1097 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1098 int err = 0;
1099
1100 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1101 return 0; /* do nothing */
1102
1103 ib_flow = flow_attr + 1;
1104 ib_spec = (union ib_flow_spec *)ib_flow;
1105
1106 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1107 return 0; /* do nothing */
1108
1109 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1110 flow_attr->port, qp->qp_num,
1111 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1112 reg_id);
1113 return err;
1114}
1115
1092static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, 1116static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1093 struct ib_flow_attr *flow_attr, 1117 struct ib_flow_attr *flow_attr,
1094 int domain) 1118 int domain)
@@ -1136,6 +1160,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1136 i++; 1160 i++;
1137 } 1161 }
1138 1162
1163 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1164 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
1165 if (err)
1166 goto err_free;
1167 }
1168
1139 return &mflow->ibflow; 1169 return &mflow->ibflow;
1140 1170
1141err_free: 1171err_free:
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 67780452f0cf..efb9eff8906c 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1677 } 1677 }
1678 } 1678 }
1679 1679
1680 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) 1680 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
1681 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | 1681 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
1682 MLX4_IB_LINK_TYPE_ETH; 1682 MLX4_IB_LINK_TYPE_ETH;
1683 if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1684 /* set QP to receive both tunneled & non-tunneled packets */
1685 if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)))
1686 context->srqn = cpu_to_be32(7 << 28);
1687 }
1688 }
1683 1689
1684 if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) { 1690 if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
1685 int is_eth = rdma_port_get_link_layer( 1691 int is_eth = rdma_port_get_link_layer(