aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4
diff options
context:
space:
mode:
authorJack Morgenstein <jackm@dev.mellanox.co.il>2014-05-29 09:31:02 -0400
committerRoland Dreier <roland@purestorage.com>2014-05-30 00:12:58 -0400
commit97982f5a91e91dab26dd0246083b9adf3ba8b2e3 (patch)
treed766cc5b7f15a0718f3eda2d8ea0cd23b2d25c0c /drivers/infiniband/hw/mlx4
parent61565013cf7024e8aa52e0a8e78208a955ce7e5f (diff)
IB/mlx4: Preparation for VFs to issue/receive SMI (QP0) requests/responses
Currently, VFs in SRIOV VFs are denied QP0 access. The main reason for this decision is security, since Subnet Management Datagrams (SMPs) are not restricted by network partitioning and may affect the physical network topology. Moreover, even the SM may be denied access from portions of the network by setting management keys unknown to the SM. However, it is desirable to grant SMI access to certain privileged VFs, so that certain network management activities may be conducted within virtual machines instead of the hypervisor. This commit does the following: 1. Create QP0 tunnel QPs for all VFs. 2. Discard SMI mads sent-from/received-for non-privileged VFs in the hypervisor MAD multiplex/demultiplex logic. SMI mads from/for privileged VFs are allowed to pass. 3. MAD_IFC wrapper changes/fixes. For non-privileged VFs, only host-view MAD_IFC commands are allowed, and only for SMI LID-Routed GET mads. For privileged VFs, there are no restrictions. This commit does not allow privileged VFs as yet. To determine if a VF is privileged, it calls function mlx4_vf_smi_enabled(). This function returns 0 unconditionally for now. The next two commits allow defining and activating privileged VFs. Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c40
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c16
2 files changed, 32 insertions, 24 deletions
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index fd36ec672632..287ad0564acd 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -478,10 +478,6 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
478 if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE) 478 if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
479 return -EAGAIN; 479 return -EAGAIN;
480 480
481 /* QP0 forwarding only for Dom0 */
482 if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave))
483 return -EINVAL;
484
485 if (!dest_qpt) 481 if (!dest_qpt)
486 tun_qp = &tun_ctx->qp[0]; 482 tun_qp = &tun_ctx->qp[0];
487 else 483 else
@@ -667,6 +663,21 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
667 } 663 }
668 /* Class-specific handling */ 664 /* Class-specific handling */
669 switch (mad->mad_hdr.mgmt_class) { 665 switch (mad->mad_hdr.mgmt_class) {
666 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
667 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
668 /* 255 indicates the dom0 */
669 if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) {
670 if (!mlx4_vf_smi_enabled(dev->dev, slave, port))
671 return -EPERM;
672 /* for a VF. drop unsolicited MADs */
673 if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) {
674 mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n",
675 slave, mad->mad_hdr.mgmt_class,
676 mad->mad_hdr.method);
677 return -EINVAL;
678 }
679 }
680 break;
670 case IB_MGMT_CLASS_SUBN_ADM: 681 case IB_MGMT_CLASS_SUBN_ADM:
671 if (mlx4_ib_demux_sa_handler(ibdev, port, slave, 682 if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
672 (struct ib_sa_mad *) mad)) 683 (struct ib_sa_mad *) mad))
@@ -1165,10 +1176,6 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1165 if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE) 1176 if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
1166 return -EAGAIN; 1177 return -EAGAIN;
1167 1178
1168 /* QP0 forwarding only for Dom0 */
1169 if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave))
1170 return -EINVAL;
1171
1172 if (dest_qpt == IB_QPT_SMI) { 1179 if (dest_qpt == IB_QPT_SMI) {
1173 src_qpnum = 0; 1180 src_qpnum = 0;
1174 sqp = &sqp_ctx->qp[0]; 1181 sqp = &sqp_ctx->qp[0];
@@ -1285,11 +1292,6 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
1285 "belongs to another slave\n", wc->src_qp); 1292 "belongs to another slave\n", wc->src_qp);
1286 return; 1293 return;
1287 } 1294 }
1288 if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) {
1289 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1290 "non-master trying to send QP0 packets\n", wc->src_qp);
1291 return;
1292 }
1293 1295
1294 /* Map transaction ID */ 1296 /* Map transaction ID */
1295 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map, 1297 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
@@ -1317,6 +1319,12 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
1317 1319
1318 /* Class-specific handling */ 1320 /* Class-specific handling */
1319 switch (tunnel->mad.mad_hdr.mgmt_class) { 1321 switch (tunnel->mad.mad_hdr.mgmt_class) {
1322 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
1323 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
1324 if (slave != mlx4_master_func_num(dev->dev) &&
1325 !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port))
1326 return;
1327 break;
1320 case IB_MGMT_CLASS_SUBN_ADM: 1328 case IB_MGMT_CLASS_SUBN_ADM:
1321 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave, 1329 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1322 (struct ib_sa_mad *) &tunnel->mad)) 1330 (struct ib_sa_mad *) &tunnel->mad))
@@ -1749,9 +1757,9 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1749 return -EEXIST; 1757 return -EEXIST;
1750 1758
1751 ctx->state = DEMUX_PV_STATE_STARTING; 1759 ctx->state = DEMUX_PV_STATE_STARTING;
1752 /* have QP0 only on port owner, and only if link layer is IB */ 1760 /* have QP0 only if link layer is IB */
1753 if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) && 1761 if (rdma_port_get_link_layer(ibdev, ctx->port) ==
1754 rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND) 1762 IB_LINK_LAYER_INFINIBAND)
1755 ctx->has_smi = 1; 1763 ctx->has_smi = 1;
1756 1764
1757 if (ctx->has_smi) { 1765 if (ctx->has_smi) {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 41308af4163c..2e8c58806e2f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -2370,7 +2370,8 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
2370 2370
2371static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, 2371static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
2372 struct mlx4_wqe_datagram_seg *dseg, 2372 struct mlx4_wqe_datagram_seg *dseg,
2373 struct ib_send_wr *wr, enum ib_qp_type qpt) 2373 struct ib_send_wr *wr,
2374 enum mlx4_ib_qp_type qpt)
2374{ 2375{
2375 union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av; 2376 union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av;
2376 struct mlx4_av sqp_av = {0}; 2377 struct mlx4_av sqp_av = {0};
@@ -2383,8 +2384,10 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
2383 cpu_to_be32(0xf0000000); 2384 cpu_to_be32(0xf0000000);
2384 2385
2385 memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av)); 2386 memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av));
2386 /* This function used only for sending on QP1 proxies */ 2387 if (qpt == MLX4_IB_QPT_PROXY_GSI)
2387 dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]); 2388 dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]);
2389 else
2390 dseg->dqpn = cpu_to_be32(dev->dev->caps.qp0_tunnel[port - 1]);
2388 /* Use QKEY from the QP context, which is set by master */ 2391 /* Use QKEY from the QP context, which is set by master */
2389 dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); 2392 dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
2390} 2393}
@@ -2700,16 +2703,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2700 size += seglen / 16; 2703 size += seglen / 16;
2701 break; 2704 break;
2702 case MLX4_IB_QPT_PROXY_SMI: 2705 case MLX4_IB_QPT_PROXY_SMI:
2703 /* don't allow QP0 sends on guests */
2704 err = -ENOSYS;
2705 *bad_wr = wr;
2706 goto out;
2707 case MLX4_IB_QPT_PROXY_GSI: 2706 case MLX4_IB_QPT_PROXY_GSI:
2708 /* If we are tunneling special qps, this is a UD qp. 2707 /* If we are tunneling special qps, this is a UD qp.
2709 * In this case we first add a UD segment targeting 2708 * In this case we first add a UD segment targeting
2710 * the tunnel qp, and then add a header with address 2709 * the tunnel qp, and then add a header with address
2711 * information */ 2710 * information */
2712 set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, ibqp->qp_type); 2711 set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr,
2712 qp->mlx4_ib_qp_type);
2713 wqe += sizeof (struct mlx4_wqe_datagram_seg); 2713 wqe += sizeof (struct mlx4_wqe_datagram_seg);
2714 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 2714 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
2715 build_tunnel_header(wr, wqe, &seglen); 2715 build_tunnel_header(wr, wqe, &seglen);