aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c3
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c72
-rw-r--r--drivers/net/mlx4/fw.c9
-rw-r--r--drivers/net/mlx4/fw.h1
-rw-r--r--drivers/net/mlx4/main.c1
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx4/qp.h5
9 files changed, 90 insertions, 9 deletions
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index d2e32b03e2f7..7d70af7952b0 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -420,6 +420,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
420 case MLX4_OPCODE_BIND_MW: 420 case MLX4_OPCODE_BIND_MW:
421 wc->opcode = IB_WC_BIND_MW; 421 wc->opcode = IB_WC_BIND_MW;
422 break; 422 break;
423 case MLX4_OPCODE_LSO:
424 wc->opcode = IB_WC_LSO;
425 break;
423 } 426 }
424 } else { 427 } else {
425 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 428 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 6ea4746c2e9b..e9330a0d6c03 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -101,6 +101,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
101 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; 101 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
102 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 102 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
103 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 103 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
104 if (dev->dev->caps.max_gso_sz)
105 props->device_cap_flags |= IB_DEVICE_UD_TSO;
104 106
105 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 107 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
106 0xffffff; 108 0xffffff;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 3726e451a327..3f8bd0a37b96 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -110,6 +110,10 @@ struct mlx4_ib_wq {
110 unsigned tail; 110 unsigned tail;
111}; 111};
112 112
113enum mlx4_ib_qp_flags {
114 MLX4_IB_QP_LSO = 1 << 0
115};
116
113struct mlx4_ib_qp { 117struct mlx4_ib_qp {
114 struct ib_qp ibqp; 118 struct ib_qp ibqp;
115 struct mlx4_qp mqp; 119 struct mlx4_qp mqp;
@@ -129,6 +133,7 @@ struct mlx4_ib_qp {
129 struct mlx4_mtt mtt; 133 struct mlx4_mtt mtt;
130 int buf_size; 134 int buf_size;
131 struct mutex mutex; 135 struct mutex mutex;
136 u32 flags;
132 u8 port; 137 u8 port;
133 u8 alt_port; 138 u8 alt_port;
134 u8 atomic_rd_en; 139 u8 atomic_rd_en;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 320c25fa74b1..2ba243084089 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -71,6 +71,7 @@ enum {
71 71
72static const __be32 mlx4_ib_opcode[] = { 72static const __be32 mlx4_ib_opcode[] = {
73 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND), 73 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND),
74 [IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO),
74 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM), 75 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
75 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 76 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
76 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 77 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
@@ -242,7 +243,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
242 } 243 }
243} 244}
244 245
245static int send_wqe_overhead(enum ib_qp_type type) 246static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
246{ 247{
247 /* 248 /*
248 * UD WQEs must have a datagram segment. 249 * UD WQEs must have a datagram segment.
@@ -253,7 +254,8 @@ static int send_wqe_overhead(enum ib_qp_type type)
253 switch (type) { 254 switch (type) {
254 case IB_QPT_UD: 255 case IB_QPT_UD:
255 return sizeof (struct mlx4_wqe_ctrl_seg) + 256 return sizeof (struct mlx4_wqe_ctrl_seg) +
256 sizeof (struct mlx4_wqe_datagram_seg); 257 sizeof (struct mlx4_wqe_datagram_seg) +
258 ((flags & MLX4_IB_QP_LSO) ? 64 : 0);
257 case IB_QPT_UC: 259 case IB_QPT_UC:
258 return sizeof (struct mlx4_wqe_ctrl_seg) + 260 return sizeof (struct mlx4_wqe_ctrl_seg) +
259 sizeof (struct mlx4_wqe_raddr_seg); 261 sizeof (struct mlx4_wqe_raddr_seg);
@@ -315,7 +317,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
315 /* Sanity check SQ size before proceeding */ 317 /* Sanity check SQ size before proceeding */
316 if (cap->max_send_wr > dev->dev->caps.max_wqes || 318 if (cap->max_send_wr > dev->dev->caps.max_wqes ||
317 cap->max_send_sge > dev->dev->caps.max_sq_sg || 319 cap->max_send_sge > dev->dev->caps.max_sq_sg ||
318 cap->max_inline_data + send_wqe_overhead(type) + 320 cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
319 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) 321 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
320 return -EINVAL; 322 return -EINVAL;
321 323
@@ -329,7 +331,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
329 331
330 s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), 332 s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
331 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + 333 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
332 send_wqe_overhead(type); 334 send_wqe_overhead(type, qp->flags);
333 335
334 /* 336 /*
335 * Hermon supports shrinking WQEs, such that a single work 337 * Hermon supports shrinking WQEs, such that a single work
@@ -394,7 +396,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
394 } 396 }
395 397
396 qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) - 398 qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
397 send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg); 399 send_wqe_overhead(type, qp->flags)) /
400 sizeof (struct mlx4_wqe_data_seg);
398 401
399 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 402 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
400 (qp->sq.wqe_cnt << qp->sq.wqe_shift); 403 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
@@ -503,6 +506,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
503 } else { 506 } else {
504 qp->sq_no_prefetch = 0; 507 qp->sq_no_prefetch = 0;
505 508
509 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
510 qp->flags |= MLX4_IB_QP_LSO;
511
506 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); 512 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
507 if (err) 513 if (err)
508 goto err; 514 goto err;
@@ -673,7 +679,11 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
673 struct mlx4_ib_qp *qp; 679 struct mlx4_ib_qp *qp;
674 int err; 680 int err;
675 681
676 if (init_attr->create_flags) 682 /* We only support LSO, and only for kernel UD QPs. */
683 if (init_attr->create_flags & ~IB_QP_CREATE_IPOIB_UD_LSO)
684 return ERR_PTR(-EINVAL);
685 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO &&
686 (pd->uobject || init_attr->qp_type != IB_QPT_UD))
677 return ERR_PTR(-EINVAL); 687 return ERR_PTR(-EINVAL);
678 688
679 switch (init_attr->qp_type) { 689 switch (init_attr->qp_type) {
@@ -879,10 +889,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
879 } 889 }
880 } 890 }
881 891
882 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || 892 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
883 ibqp->qp_type == IB_QPT_UD)
884 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; 893 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
885 else if (attr_mask & IB_QP_PATH_MTU) { 894 else if (ibqp->qp_type == IB_QPT_UD) {
895 if (qp->flags & MLX4_IB_QP_LSO)
896 context->mtu_msgmax = (IB_MTU_4096 << 5) |
897 ilog2(dev->dev->caps.max_gso_sz);
898 else
899 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
900 } else if (attr_mask & IB_QP_PATH_MTU) {
886 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { 901 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
887 printk(KERN_ERR "path MTU (%u) is invalid\n", 902 printk(KERN_ERR "path MTU (%u) is invalid\n",
888 attr->path_mtu); 903 attr->path_mtu);
@@ -1399,6 +1414,34 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
1399 dseg->addr = cpu_to_be64(sg->addr); 1414 dseg->addr = cpu_to_be64(sg->addr);
1400} 1415}
1401 1416
1417static int build_lso_seg(struct mlx4_lso_seg *wqe, struct ib_send_wr *wr,
1418 struct mlx4_ib_qp *qp, unsigned *lso_seg_len)
1419{
1420 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
1421
1422 /*
1423 * This is a temporary limitation and will be removed in
1424 * a forthcoming FW release:
1425 */
1426 if (unlikely(halign > 64))
1427 return -EINVAL;
1428
1429 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
1430 wr->num_sge > qp->sq.max_gs - (halign >> 4)))
1431 return -EINVAL;
1432
1433 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
1434
1435 /* make sure LSO header is written before overwriting stamping */
1436 wmb();
1437
1438 wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
1439 wr->wr.ud.hlen);
1440
1441 *lso_seg_len = halign;
1442 return 0;
1443}
1444
1402int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1445int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1403 struct ib_send_wr **bad_wr) 1446 struct ib_send_wr **bad_wr)
1404{ 1447{
@@ -1412,6 +1455,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1412 unsigned ind; 1455 unsigned ind;
1413 int uninitialized_var(stamp); 1456 int uninitialized_var(stamp);
1414 int uninitialized_var(size); 1457 int uninitialized_var(size);
1458 unsigned seglen;
1415 int i; 1459 int i;
1416 1460
1417 spin_lock_irqsave(&qp->sq.lock, flags); 1461 spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1490,6 +1534,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1490 set_datagram_seg(wqe, wr); 1534 set_datagram_seg(wqe, wr);
1491 wqe += sizeof (struct mlx4_wqe_datagram_seg); 1535 wqe += sizeof (struct mlx4_wqe_datagram_seg);
1492 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 1536 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
1537
1538 if (wr->opcode == IB_WR_LSO) {
1539 err = build_lso_seg(wqe, wr, qp, &seglen);
1540 if (unlikely(err)) {
1541 *bad_wr = wr;
1542 goto out;
1543 }
1544 wqe += seglen;
1545 size += seglen / 16;
1546 }
1493 break; 1547 break;
1494 1548
1495 case IB_QPT_SMI: 1549 case IB_QPT_SMI:
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index f494c3e8bce3..d82f2751d2c7 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -133,6 +133,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
133#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 133#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
134#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 134#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
135#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b 135#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
136#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
136#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f 137#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
137#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 138#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
138#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 139#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
@@ -215,6 +216,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
215 dev_cap->max_requester_per_qp = 1 << (field & 0x3f); 216 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
216 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); 217 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
217 dev_cap->max_responder_per_qp = 1 << (field & 0x3f); 218 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
219 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
220 field &= 0x1f;
221 if (!field)
222 dev_cap->max_gso_sz = 0;
223 else
224 dev_cap->max_gso_sz = 1 << field;
225
218 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); 226 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
219 dev_cap->max_rdma_global = 1 << (field & 0x3f); 227 dev_cap->max_rdma_global = 1 << (field & 0x3f);
220 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); 228 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
@@ -377,6 +385,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
377 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 385 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
378 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", 386 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
379 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); 387 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
388 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
380 389
381 dump_dev_cap_flags(dev, dev_cap->flags); 390 dump_dev_cap_flags(dev, dev_cap->flags);
382 391
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index e16dec890413..306cb9b0242d 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -96,6 +96,7 @@ struct mlx4_dev_cap {
96 u8 bmme_flags; 96 u8 bmme_flags;
97 u32 reserved_lkey; 97 u32 reserved_lkey;
98 u64 max_icm_sz; 98 u64 max_icm_sz;
99 int max_gso_sz;
99}; 100};
100 101
101struct mlx4_adapter { 102struct mlx4_adapter {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 08bfc130a33e..7cfbe75114d1 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -159,6 +159,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
160 dev->caps.flags = dev_cap->flags; 160 dev->caps.flags = dev_cap->flags;
161 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 161 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
162 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
162 163
163 return 0; 164 return 0;
164} 165}
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6cdf813cd478..ff7df1a2222f 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -186,6 +186,7 @@ struct mlx4_caps {
186 u32 flags; 186 u32 flags;
187 u16 stat_rate_support; 187 u16 stat_rate_support;
188 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 188 u8 port_width_cap[MLX4_MAX_PORTS + 1];
189 int max_gso_sz;
189}; 190};
190 191
191struct mlx4_buf_list { 192struct mlx4_buf_list {
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 31f9eb3ccbb3..a5e43febee4f 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -219,6 +219,11 @@ struct mlx4_wqe_datagram_seg {
219 __be32 reservd[2]; 219 __be32 reservd[2];
220}; 220};
221 221
222struct mlx4_lso_seg {
223 __be32 mss_hdr_size;
224 __be32 header[0];
225};
226
222struct mlx4_wqe_bind_seg { 227struct mlx4_wqe_bind_seg {
223 __be32 flags1; 228 __be32 flags1;
224 __be32 flags2; 229 __be32 flags2;