aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2008-05-20 17:00:02 -0400
committerRoland Dreier <rolandd@cisco.com>2008-05-20 17:00:02 -0400
commitcd155c1c7c9e64df6afb5504d292fef7cb783a4f (patch)
tree84981858bf7bbdd2e28b17557168124bc48650c3 /drivers/infiniband
parent12103dca52e79e23afe2fbcaf3d9e7fc9ceb6b18 (diff)
IB/mlx4: Fix creation of kernel QP with max number of send s/g entries
When creating a kernel QP where the consumer asked for a send queue with lots of scatter/gater entries, set_kernel_sq_size() incorrectly returned an error if the send queue stride is larger than the hardware's maximum send work request descriptor size. This is not a problem; the only issue is to make sure that the actual descriptors used do not overflow the maximum descriptor size, so check this instead. Clamp the returned max_send_sge value to be no bigger than what query_device returns for the max_sge to avoid confusing hapless users, even if the hardware is capable of handling a few more s/g entries. This bug caused NFS/RDMA mounts to fail when the server adapter used the mlx4 driver. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index cec030e118d1..a80df22deae8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -333,6 +333,9 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
333 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + 333 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
334 send_wqe_overhead(type, qp->flags); 334 send_wqe_overhead(type, qp->flags);
335 335
336 if (s > dev->dev->caps.max_sq_desc_sz)
337 return -EINVAL;
338
336 /* 339 /*
337 * Hermon supports shrinking WQEs, such that a single work 340 * Hermon supports shrinking WQEs, such that a single work
338 * request can include multiple units of 1 << wqe_shift. This 341 * request can include multiple units of 1 << wqe_shift. This
@@ -372,9 +375,6 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
372 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); 375 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
373 376
374 for (;;) { 377 for (;;) {
375 if (1 << qp->sq.wqe_shift > dev->dev->caps.max_sq_desc_sz)
376 return -EINVAL;
377
378 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); 378 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
379 379
380 /* 380 /*
@@ -395,7 +395,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
395 ++qp->sq.wqe_shift; 395 ++qp->sq.wqe_shift;
396 } 396 }
397 397
398 qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) - 398 qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz,
399 (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) -
399 send_wqe_overhead(type, qp->flags)) / 400 send_wqe_overhead(type, qp->flags)) /
400 sizeof (struct mlx4_wqe_data_seg); 401 sizeof (struct mlx4_wqe_data_seg);
401 402
@@ -411,7 +412,9 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
411 412
412 cap->max_send_wr = qp->sq.max_post = 413 cap->max_send_wr = qp->sq.max_post =
413 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; 414 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
414 cap->max_send_sge = qp->sq.max_gs; 415 cap->max_send_sge = min(qp->sq.max_gs,
416 min(dev->dev->caps.max_sq_sg,
417 dev->dev->caps.max_rq_sg));
415 /* We don't support inline sends for kernel QPs (yet) */ 418 /* We don't support inline sends for kernel QPs (yet) */
416 cap->max_inline_data = 0; 419 cap->max_inline_data = 0;
417 420