diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx4/qp.c')
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 62 |
1 files changed, 39 insertions, 23 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index efb9eff8906c..9c5150c3cb31 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -964,9 +964,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
964 | MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) | 964 | MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) |
965 | pr_warn("modify QP %06x to RESET failed.\n", | 965 | pr_warn("modify QP %06x to RESET failed.\n", |
966 | qp->mqp.qpn); | 966 | qp->mqp.qpn); |
967 | if (qp->pri.smac) { | 967 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { |
968 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); | 968 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
969 | qp->pri.smac = 0; | 969 | qp->pri.smac = 0; |
970 | qp->pri.smac_port = 0; | ||
970 | } | 971 | } |
971 | if (qp->alt.smac) { | 972 | if (qp->alt.smac) { |
972 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); | 973 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); |
@@ -1325,7 +1326,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, | |||
1325 | * If one was already assigned, but the new mac differs, | 1326 | * If one was already assigned, but the new mac differs, |
1326 | * unregister the old one and register the new one. | 1327 | * unregister the old one and register the new one. |
1327 | */ | 1328 | */ |
1328 | if (!smac_info->smac || smac_info->smac != smac) { | 1329 | if ((!smac_info->smac && !smac_info->smac_port) || |
1330 | smac_info->smac != smac) { | ||
1329 | /* register candidate now, unreg if needed, after success */ | 1331 | /* register candidate now, unreg if needed, after success */ |
1330 | smac_index = mlx4_register_mac(dev->dev, port, smac); | 1332 | smac_index = mlx4_register_mac(dev->dev, port, smac); |
1331 | if (smac_index >= 0) { | 1333 | if (smac_index >= 0) { |
@@ -1390,21 +1392,13 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) | |||
1390 | static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, | 1392 | static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, |
1391 | struct mlx4_qp_context *context) | 1393 | struct mlx4_qp_context *context) |
1392 | { | 1394 | { |
1393 | struct net_device *ndev; | ||
1394 | u64 u64_mac; | 1395 | u64 u64_mac; |
1395 | int smac_index; | 1396 | int smac_index; |
1396 | 1397 | ||
1397 | 1398 | u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); | |
1398 | ndev = dev->iboe.netdevs[qp->port - 1]; | ||
1399 | if (ndev) { | ||
1400 | smac = ndev->dev_addr; | ||
1401 | u64_mac = mlx4_mac_to_u64(smac); | ||
1402 | } else { | ||
1403 | u64_mac = dev->dev->caps.def_mac[qp->port]; | ||
1404 | } | ||
1405 | 1399 | ||
1406 | context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); | 1400 | context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); |
1407 | if (!qp->pri.smac) { | 1401 | if (!qp->pri.smac && !qp->pri.smac_port) { |
1408 | smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); | 1402 | smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); |
1409 | if (smac_index >= 0) { | 1403 | if (smac_index >= 0) { |
1410 | qp->pri.candidate_smac_index = smac_index; | 1404 | qp->pri.candidate_smac_index = smac_index; |
@@ -1432,6 +1426,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1432 | int steer_qp = 0; | 1426 | int steer_qp = 0; |
1433 | int err = -EINVAL; | 1427 | int err = -EINVAL; |
1434 | 1428 | ||
1429 | /* APM is not supported under RoCE */ | ||
1430 | if (attr_mask & IB_QP_ALT_PATH && | ||
1431 | rdma_port_get_link_layer(&dev->ib_dev, qp->port) == | ||
1432 | IB_LINK_LAYER_ETHERNET) | ||
1433 | return -ENOTSUPP; | ||
1434 | |||
1435 | context = kzalloc(sizeof *context, GFP_KERNEL); | 1435 | context = kzalloc(sizeof *context, GFP_KERNEL); |
1436 | if (!context) | 1436 | if (!context) |
1437 | return -ENOMEM; | 1437 | return -ENOMEM; |
@@ -1682,7 +1682,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1682 | MLX4_IB_LINK_TYPE_ETH; | 1682 | MLX4_IB_LINK_TYPE_ETH; |
1683 | if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { | 1683 | if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { |
1684 | /* set QP to receive both tunneled & non-tunneled packets */ | 1684 | /* set QP to receive both tunneled & non-tunneled packets */ |
1685 | if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET))) | 1685 | if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET))) |
1686 | context->srqn = cpu_to_be32(7 << 28); | 1686 | context->srqn = cpu_to_be32(7 << 28); |
1687 | } | 1687 | } |
1688 | } | 1688 | } |
@@ -1786,9 +1786,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1786 | if (qp->flags & MLX4_IB_QP_NETIF) | 1786 | if (qp->flags & MLX4_IB_QP_NETIF) |
1787 | mlx4_ib_steer_qp_reg(dev, qp, 0); | 1787 | mlx4_ib_steer_qp_reg(dev, qp, 0); |
1788 | } | 1788 | } |
1789 | if (qp->pri.smac) { | 1789 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { |
1790 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); | 1790 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
1791 | qp->pri.smac = 0; | 1791 | qp->pri.smac = 0; |
1792 | qp->pri.smac_port = 0; | ||
1792 | } | 1793 | } |
1793 | if (qp->alt.smac) { | 1794 | if (qp->alt.smac) { |
1794 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); | 1795 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); |
@@ -1812,11 +1813,12 @@ out: | |||
1812 | if (err && steer_qp) | 1813 | if (err && steer_qp) |
1813 | mlx4_ib_steer_qp_reg(dev, qp, 0); | 1814 | mlx4_ib_steer_qp_reg(dev, qp, 0); |
1814 | kfree(context); | 1815 | kfree(context); |
1815 | if (qp->pri.candidate_smac) { | 1816 | if (qp->pri.candidate_smac || |
1817 | (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { | ||
1816 | if (err) { | 1818 | if (err) { |
1817 | mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); | 1819 | mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); |
1818 | } else { | 1820 | } else { |
1819 | if (qp->pri.smac) | 1821 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) |
1820 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); | 1822 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
1821 | qp->pri.smac = qp->pri.candidate_smac; | 1823 | qp->pri.smac = qp->pri.candidate_smac; |
1822 | qp->pri.smac_index = qp->pri.candidate_smac_index; | 1824 | qp->pri.smac_index = qp->pri.candidate_smac_index; |
@@ -2089,6 +2091,16 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, | |||
2089 | return 0; | 2091 | return 0; |
2090 | } | 2092 | } |
2091 | 2093 | ||
2094 | static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac) | ||
2095 | { | ||
2096 | int i; | ||
2097 | |||
2098 | for (i = ETH_ALEN; i; i--) { | ||
2099 | dst_mac[i - 1] = src_mac & 0xff; | ||
2100 | src_mac >>= 8; | ||
2101 | } | ||
2102 | } | ||
2103 | |||
2092 | static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | 2104 | static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, |
2093 | void *wqe, unsigned *mlx_seg_len) | 2105 | void *wqe, unsigned *mlx_seg_len) |
2094 | { | 2106 | { |
@@ -2203,7 +2215,6 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
2203 | } | 2215 | } |
2204 | 2216 | ||
2205 | if (is_eth) { | 2217 | if (is_eth) { |
2206 | u8 *smac; | ||
2207 | struct in6_addr in6; | 2218 | struct in6_addr in6; |
2208 | 2219 | ||
2209 | u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; | 2220 | u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; |
@@ -2216,12 +2227,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
2216 | memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); | 2227 | memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); |
2217 | memcpy(&in6, sgid.raw, sizeof(in6)); | 2228 | memcpy(&in6, sgid.raw, sizeof(in6)); |
2218 | 2229 | ||
2219 | if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) | 2230 | if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { |
2220 | smac = to_mdev(sqp->qp.ibqp.device)-> | 2231 | u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]); |
2221 | iboe.netdevs[sqp->qp.port - 1]->dev_addr; | 2232 | u8 smac[ETH_ALEN]; |
2222 | else /* use the src mac of the tunnel */ | 2233 | |
2223 | smac = ah->av.eth.s_mac; | 2234 | mlx4_u64_to_smac(smac, mac); |
2224 | memcpy(sqp->ud_header.eth.smac_h, smac, 6); | 2235 | memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN); |
2236 | } else { | ||
2237 | /* use the src mac of the tunnel */ | ||
2238 | memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN); | ||
2239 | } | ||
2240 | |||
2225 | if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) | 2241 | if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) |
2226 | mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); | 2242 | mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); |
2227 | if (!is_vlan) { | 2243 | if (!is_vlan) { |