aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorEli Cohen <eli@dev.mellanox.co.il>2008-04-17 00:09:27 -0400
committerRoland Dreier <rolandd@cisco.com>2008-04-17 00:09:27 -0400
commitb832be1e4007f4a54954ec68bd865ff05d6babca (patch)
treef8780fb17293a5b02cd21fed468e1270daac91d8 /drivers/infiniband/hw
parent40ca1988e03c001747d0b4cc1b25cf38297c9f9e (diff)
IB/mlx4: Add IPoIB LSO support
Add TSO support to the mlx4_ib driver. Signed-off-by: Eli Cohen <eli@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c3
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c72
4 files changed, 73 insertions, 9 deletions
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index d2e32b03e2f7..7d70af7952b0 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -420,6 +420,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
420 case MLX4_OPCODE_BIND_MW: 420 case MLX4_OPCODE_BIND_MW:
421 wc->opcode = IB_WC_BIND_MW; 421 wc->opcode = IB_WC_BIND_MW;
422 break; 422 break;
423 case MLX4_OPCODE_LSO:
424 wc->opcode = IB_WC_LSO;
425 break;
423 } 426 }
424 } else { 427 } else {
425 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 428 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 6ea4746c2e9b..e9330a0d6c03 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -101,6 +101,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
101 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; 101 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
102 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 102 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
103 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 103 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
104 if (dev->dev->caps.max_gso_sz)
105 props->device_cap_flags |= IB_DEVICE_UD_TSO;
104 106
105 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 107 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
106 0xffffff; 108 0xffffff;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 3726e451a327..3f8bd0a37b96 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -110,6 +110,10 @@ struct mlx4_ib_wq {
110 unsigned tail; 110 unsigned tail;
111}; 111};
112 112
113enum mlx4_ib_qp_flags {
114 MLX4_IB_QP_LSO = 1 << 0
115};
116
113struct mlx4_ib_qp { 117struct mlx4_ib_qp {
114 struct ib_qp ibqp; 118 struct ib_qp ibqp;
115 struct mlx4_qp mqp; 119 struct mlx4_qp mqp;
@@ -129,6 +133,7 @@ struct mlx4_ib_qp {
129 struct mlx4_mtt mtt; 133 struct mlx4_mtt mtt;
130 int buf_size; 134 int buf_size;
131 struct mutex mutex; 135 struct mutex mutex;
136 u32 flags;
132 u8 port; 137 u8 port;
133 u8 alt_port; 138 u8 alt_port;
134 u8 atomic_rd_en; 139 u8 atomic_rd_en;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 320c25fa74b1..2ba243084089 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -71,6 +71,7 @@ enum {
71 71
72static const __be32 mlx4_ib_opcode[] = { 72static const __be32 mlx4_ib_opcode[] = {
73 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND), 73 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND),
74 [IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO),
74 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM), 75 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
75 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 76 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
76 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 77 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
@@ -242,7 +243,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
242 } 243 }
243} 244}
244 245
245static int send_wqe_overhead(enum ib_qp_type type) 246static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
246{ 247{
247 /* 248 /*
248 * UD WQEs must have a datagram segment. 249 * UD WQEs must have a datagram segment.
@@ -253,7 +254,8 @@ static int send_wqe_overhead(enum ib_qp_type type)
253 switch (type) { 254 switch (type) {
254 case IB_QPT_UD: 255 case IB_QPT_UD:
255 return sizeof (struct mlx4_wqe_ctrl_seg) + 256 return sizeof (struct mlx4_wqe_ctrl_seg) +
256 sizeof (struct mlx4_wqe_datagram_seg); 257 sizeof (struct mlx4_wqe_datagram_seg) +
258 ((flags & MLX4_IB_QP_LSO) ? 64 : 0);
257 case IB_QPT_UC: 259 case IB_QPT_UC:
258 return sizeof (struct mlx4_wqe_ctrl_seg) + 260 return sizeof (struct mlx4_wqe_ctrl_seg) +
259 sizeof (struct mlx4_wqe_raddr_seg); 261 sizeof (struct mlx4_wqe_raddr_seg);
@@ -315,7 +317,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
315 /* Sanity check SQ size before proceeding */ 317 /* Sanity check SQ size before proceeding */
316 if (cap->max_send_wr > dev->dev->caps.max_wqes || 318 if (cap->max_send_wr > dev->dev->caps.max_wqes ||
317 cap->max_send_sge > dev->dev->caps.max_sq_sg || 319 cap->max_send_sge > dev->dev->caps.max_sq_sg ||
318 cap->max_inline_data + send_wqe_overhead(type) + 320 cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
319 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) 321 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
320 return -EINVAL; 322 return -EINVAL;
321 323
@@ -329,7 +331,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
329 331
330 s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), 332 s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
331 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + 333 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
332 send_wqe_overhead(type); 334 send_wqe_overhead(type, qp->flags);
333 335
334 /* 336 /*
335 * Hermon supports shrinking WQEs, such that a single work 337 * Hermon supports shrinking WQEs, such that a single work
@@ -394,7 +396,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
394 } 396 }
395 397
396 qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) - 398 qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
397 send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg); 399 send_wqe_overhead(type, qp->flags)) /
400 sizeof (struct mlx4_wqe_data_seg);
398 401
399 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 402 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
400 (qp->sq.wqe_cnt << qp->sq.wqe_shift); 403 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
@@ -503,6 +506,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
503 } else { 506 } else {
504 qp->sq_no_prefetch = 0; 507 qp->sq_no_prefetch = 0;
505 508
509 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
510 qp->flags |= MLX4_IB_QP_LSO;
511
506 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); 512 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
507 if (err) 513 if (err)
508 goto err; 514 goto err;
@@ -673,7 +679,11 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
673 struct mlx4_ib_qp *qp; 679 struct mlx4_ib_qp *qp;
674 int err; 680 int err;
675 681
676 if (init_attr->create_flags) 682 /* We only support LSO, and only for kernel UD QPs. */
683 if (init_attr->create_flags & ~IB_QP_CREATE_IPOIB_UD_LSO)
684 return ERR_PTR(-EINVAL);
685 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO &&
686 (pd->uobject || init_attr->qp_type != IB_QPT_UD))
677 return ERR_PTR(-EINVAL); 687 return ERR_PTR(-EINVAL);
678 688
679 switch (init_attr->qp_type) { 689 switch (init_attr->qp_type) {
@@ -879,10 +889,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
879 } 889 }
880 } 890 }
881 891
882 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || 892 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
883 ibqp->qp_type == IB_QPT_UD)
884 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; 893 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
885 else if (attr_mask & IB_QP_PATH_MTU) { 894 else if (ibqp->qp_type == IB_QPT_UD) {
895 if (qp->flags & MLX4_IB_QP_LSO)
896 context->mtu_msgmax = (IB_MTU_4096 << 5) |
897 ilog2(dev->dev->caps.max_gso_sz);
898 else
899 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
900 } else if (attr_mask & IB_QP_PATH_MTU) {
886 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { 901 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
887 printk(KERN_ERR "path MTU (%u) is invalid\n", 902 printk(KERN_ERR "path MTU (%u) is invalid\n",
888 attr->path_mtu); 903 attr->path_mtu);
@@ -1399,6 +1414,34 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
1399 dseg->addr = cpu_to_be64(sg->addr); 1414 dseg->addr = cpu_to_be64(sg->addr);
1400} 1415}
1401 1416
1417static int build_lso_seg(struct mlx4_lso_seg *wqe, struct ib_send_wr *wr,
1418 struct mlx4_ib_qp *qp, unsigned *lso_seg_len)
1419{
1420 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
1421
1422 /*
1423 * This is a temporary limitation and will be removed in
1424 * a forthcoming FW release:
1425 */
1426 if (unlikely(halign > 64))
1427 return -EINVAL;
1428
1429 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
1430 wr->num_sge > qp->sq.max_gs - (halign >> 4)))
1431 return -EINVAL;
1432
1433 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
1434
1435 /* make sure LSO header is written before overwriting stamping */
1436 wmb();
1437
1438 wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
1439 wr->wr.ud.hlen);
1440
1441 *lso_seg_len = halign;
1442 return 0;
1443}
1444
1402int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1445int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1403 struct ib_send_wr **bad_wr) 1446 struct ib_send_wr **bad_wr)
1404{ 1447{
@@ -1412,6 +1455,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1412 unsigned ind; 1455 unsigned ind;
1413 int uninitialized_var(stamp); 1456 int uninitialized_var(stamp);
1414 int uninitialized_var(size); 1457 int uninitialized_var(size);
1458 unsigned seglen;
1415 int i; 1459 int i;
1416 1460
1417 spin_lock_irqsave(&qp->sq.lock, flags); 1461 spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1490,6 +1534,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1490 set_datagram_seg(wqe, wr); 1534 set_datagram_seg(wqe, wr);
1491 wqe += sizeof (struct mlx4_wqe_datagram_seg); 1535 wqe += sizeof (struct mlx4_wqe_datagram_seg);
1492 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 1536 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
1537
1538 if (wr->opcode == IB_WR_LSO) {
1539 err = build_lso_seg(wqe, wr, qp, &seglen);
1540 if (unlikely(err)) {
1541 *bad_wr = wr;
1542 goto out;
1543 }
1544 wqe += seglen;
1545 size += seglen / 16;
1546 }
1493 break; 1547 break;
1494 1548
1495 case IB_QPT_SMI: 1549 case IB_QPT_SMI: