aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorOr Gerlitz <ogerlitz@mellanox.com>2012-01-17 06:39:07 -0500
committerRoland Dreier <roland@purestorage.com>2012-05-08 14:18:09 -0400
commit3987a2d3193cf267cb852f48b2b299c9574644c3 (patch)
treeab1dae8e556caee4c5a5b1f6b1f6e6ce2c579dbb /drivers/infiniband
parentc938a616aadb621b8e26b0ac09ac13d053c7ed1c (diff)
IB/mlx4: Add raw packet QP support
Implement raw packet QPs for Ethernet ports using the MLX transport (as done by the mlx4_en Ethernet netdevice driver). Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c26
1 files changed, 23 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 3a7848966627..4649d83203df 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -84,6 +84,11 @@ enum {
84 MLX4_IB_CACHE_LINE_SIZE = 64, 84 MLX4_IB_CACHE_LINE_SIZE = 64,
85}; 85};
86 86
87enum {
88 MLX4_RAW_QP_MTU = 7,
89 MLX4_RAW_QP_MSGMAX = 31,
90};
91
87static const __be32 mlx4_ib_opcode[] = { 92static const __be32 mlx4_ib_opcode[] = {
88 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), 93 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
89 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), 94 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
@@ -573,7 +578,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
573 if (sqpn) { 578 if (sqpn) {
574 qpn = sqpn; 579 qpn = sqpn;
575 } else { 580 } else {
576 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn); 581 /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
582 * BlueFlame setup flow wrongly causes VLAN insertion. */
583 if (init_attr->qp_type == IB_QPT_RAW_PACKET)
584 err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn);
585 else
586 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
577 if (err) 587 if (err)
578 goto err_wrid; 588 goto err_wrid;
579 } 589 }
@@ -791,6 +801,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
791 case IB_QPT_RC: 801 case IB_QPT_RC:
792 case IB_QPT_UC: 802 case IB_QPT_UC:
793 case IB_QPT_UD: 803 case IB_QPT_UD:
804 case IB_QPT_RAW_PACKET:
794 { 805 {
795 qp = kzalloc(sizeof *qp, GFP_KERNEL); 806 qp = kzalloc(sizeof *qp, GFP_KERNEL);
796 if (!qp) 807 if (!qp)
@@ -872,7 +883,8 @@ static int to_mlx4_st(enum ib_qp_type type)
872 case IB_QPT_XRC_INI: 883 case IB_QPT_XRC_INI:
873 case IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC; 884 case IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC;
874 case IB_QPT_SMI: 885 case IB_QPT_SMI:
875 case IB_QPT_GSI: return MLX4_QP_ST_MLX; 886 case IB_QPT_GSI:
887 case IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX;
876 default: return -1; 888 default: return -1;
877 } 889 }
878} 890}
@@ -1042,6 +1054,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1042 1054
1043 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) 1055 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
1044 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; 1056 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
1057 else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
1058 context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
1045 else if (ibqp->qp_type == IB_QPT_UD) { 1059 else if (ibqp->qp_type == IB_QPT_UD) {
1046 if (qp->flags & MLX4_IB_QP_LSO) 1060 if (qp->flags & MLX4_IB_QP_LSO)
1047 context->mtu_msgmax = (IB_MTU_4096 << 5) | 1061 context->mtu_msgmax = (IB_MTU_4096 << 5) |
@@ -1200,7 +1214,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1200 if (cur_state == IB_QPS_INIT && 1214 if (cur_state == IB_QPS_INIT &&
1201 new_state == IB_QPS_RTR && 1215 new_state == IB_QPS_RTR &&
1202 (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || 1216 (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
1203 ibqp->qp_type == IB_QPT_UD)) { 1217 ibqp->qp_type == IB_QPT_UD ||
1218 ibqp->qp_type == IB_QPT_RAW_PACKET)) {
1204 context->pri_path.sched_queue = (qp->port - 1) << 6; 1219 context->pri_path.sched_queue = (qp->port - 1) << 6;
1205 if (is_qp0(dev, qp)) 1220 if (is_qp0(dev, qp))
1206 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE; 1221 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
@@ -1319,6 +1334,11 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1319 goto out; 1334 goto out;
1320 } 1335 }
1321 1336
1337 if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
1338 (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
1339 IB_LINK_LAYER_ETHERNET))
1340 goto out;
1341
1322 if (attr_mask & IB_QP_PKEY_INDEX) { 1342 if (attr_mask & IB_QP_PKEY_INDEX) {
1323 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 1343 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1324 if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) 1344 if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p])