aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2008-05-16 17:58:44 -0400
committerRoland Dreier <rolandd@cisco.com>2008-05-16 17:58:44 -0400
commit12103dca52e79e23afe2fbcaf3d9e7fc9ceb6b18 (patch)
tree91c8f6f489e8e47a926c0bf6337660894b6a2e50 /drivers/infiniband
parent21609ae3efa42f4118ce741f7e55d66d716cb17c (diff)
IB/mthca: Fix max_sge value returned by query_device
The mthca driver returns the maximum number of scatter/gather entries returned by the firmware as the max_sge value when device properties are queried. However, the firmware also reports a limit on the maximum descriptor size allowed, and because mthca takes into account the worst case send request overhead when checking whether to allow a QP to be created, the largest number of scatter/gather entries that can be used with mthca may be limited by the maximum descriptor size rather than just by the actual s/g entry limit. This means that applications cannot actually create QPs with max_send_sge equal to the limit returned by ib_query_device(). Fix this by checking if the maximum descriptor size imposes a lower limit and if so returning that lower limit. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 9ebadd6e0cfb..200cf13fc9bb 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -45,6 +45,7 @@
45#include "mthca_cmd.h" 45#include "mthca_cmd.h"
46#include "mthca_profile.h" 46#include "mthca_profile.h"
47#include "mthca_memfree.h" 47#include "mthca_memfree.h"
48#include "mthca_wqe.h"
48 49
49MODULE_AUTHOR("Roland Dreier"); 50MODULE_AUTHOR("Roland Dreier");
50MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver"); 51MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
@@ -200,7 +201,18 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
200 mdev->limits.gid_table_len = dev_lim->max_gids; 201 mdev->limits.gid_table_len = dev_lim->max_gids;
201 mdev->limits.pkey_table_len = dev_lim->max_pkeys; 202 mdev->limits.pkey_table_len = dev_lim->max_pkeys;
202 mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; 203 mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
203 mdev->limits.max_sg = dev_lim->max_sg; 204 /*
205 * Need to allow for worst case send WQE overhead and check
206 * whether max_desc_sz imposes a lower limit than max_sg; UD
207 * send has the biggest overhead.
208 */
209 mdev->limits.max_sg = min_t(int, dev_lim->max_sg,
210 (dev_lim->max_desc_sz -
211 sizeof (struct mthca_next_seg) -
212 (mthca_is_memfree(mdev) ?
213 sizeof (struct mthca_arbel_ud_seg) :
214 sizeof (struct mthca_tavor_ud_seg))) /
215 sizeof (struct mthca_data_seg));
204 mdev->limits.max_wqes = dev_lim->max_qp_sz; 216 mdev->limits.max_wqes = dev_lim->max_qp_sz;
205 mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp; 217 mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp;
206 mdev->limits.reserved_qps = dev_lim->reserved_qps; 218 mdev->limits.reserved_qps = dev_lim->reserved_qps;