diff options
author | Eli Cohen <eli@mellanox.co.il> | 2007-05-17 03:32:41 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-05-20 13:18:04 -0400 |
commit | 2446304dd687488c054d0437f2aeef1ef2bfbd02 (patch) | |
tree | 70f999aca66563488d7b38ad226de34a071cfa90 /drivers/infiniband/hw/mlx4/qp.c | |
parent | 59b0ed121297b57abb2352bdc8313959e7cb5635 (diff) |
IB/mlx4: Pass send queue sizes from userspace to kernel
Pass the number of WQEs for the send queue and their size from userspace
to the kernel to avoid having to keep the QP size calculations in sync
between the kernel driver and libmlx4. This fixes a bug seen with the
current mlx4_ib driver and current libmlx4 caused by a difference in the
calculated sizes for SQ WQEs. Also, this gives more flexibility for
userspace to experiment with using multiple WQE BBs for a single SQ WQE.
Signed-off-by: Eli Cohen <eli@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx4/qp.c')
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 63 |
1 files changed, 47 insertions, 16 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 5706f988e2ec..a824bc5f79fd 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -188,14 +188,32 @@ static int send_wqe_overhead(enum ib_qp_type type) | |||
188 | } | 188 | } |
189 | } | 189 | } |
190 | 190 | ||
191 | static int set_qp_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | 191 | static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, |
192 | enum ib_qp_type type, struct mlx4_ib_qp *qp) | 192 | struct mlx4_ib_qp *qp) |
193 | { | 193 | { |
194 | /* Sanity check QP size before proceeding */ | 194 | /* Sanity check RQ size before proceeding */ |
195 | if (cap->max_recv_wr > dev->dev->caps.max_wqes || | ||
196 | cap->max_recv_sge > dev->dev->caps.max_rq_sg) | ||
197 | return -EINVAL; | ||
198 | |||
199 | qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0; | ||
200 | |||
201 | qp->rq.wqe_shift = ilog2(roundup_pow_of_two(cap->max_recv_sge * | ||
202 | sizeof (struct mlx4_wqe_data_seg))); | ||
203 | qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof (struct mlx4_wqe_data_seg); | ||
204 | |||
205 | cap->max_recv_wr = qp->rq.max; | ||
206 | cap->max_recv_sge = qp->rq.max_gs; | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | ||
212 | enum ib_qp_type type, struct mlx4_ib_qp *qp) | ||
213 | { | ||
214 | /* Sanity check SQ size before proceeding */ | ||
195 | if (cap->max_send_wr > dev->dev->caps.max_wqes || | 215 | if (cap->max_send_wr > dev->dev->caps.max_wqes || |
196 | cap->max_recv_wr > dev->dev->caps.max_wqes || | ||
197 | cap->max_send_sge > dev->dev->caps.max_sq_sg || | 216 | cap->max_send_sge > dev->dev->caps.max_sq_sg || |
198 | cap->max_recv_sge > dev->dev->caps.max_rq_sg || | ||
199 | cap->max_inline_data + send_wqe_overhead(type) + | 217 | cap->max_inline_data + send_wqe_overhead(type) + |
200 | sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) | 218 | sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) |
201 | return -EINVAL; | 219 | return -EINVAL; |
@@ -208,12 +226,7 @@ static int set_qp_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
208 | cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) | 226 | cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) |
209 | return -EINVAL; | 227 | return -EINVAL; |
210 | 228 | ||
211 | qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0; | 229 | qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 1; |
212 | qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 0; | ||
213 | |||
214 | qp->rq.wqe_shift = ilog2(roundup_pow_of_two(cap->max_recv_sge * | ||
215 | sizeof (struct mlx4_wqe_data_seg))); | ||
216 | qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof (struct mlx4_wqe_data_seg); | ||
217 | 230 | ||
218 | qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge * | 231 | qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge * |
219 | sizeof (struct mlx4_wqe_data_seg), | 232 | sizeof (struct mlx4_wqe_data_seg), |
@@ -233,16 +246,26 @@ static int set_qp_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
233 | qp->sq.offset = 0; | 246 | qp->sq.offset = 0; |
234 | } | 247 | } |
235 | 248 | ||
236 | cap->max_send_wr = qp->sq.max; | 249 | cap->max_send_wr = qp->sq.max; |
237 | cap->max_recv_wr = qp->rq.max; | 250 | cap->max_send_sge = qp->sq.max_gs; |
238 | cap->max_send_sge = qp->sq.max_gs; | ||
239 | cap->max_recv_sge = qp->rq.max_gs; | ||
240 | cap->max_inline_data = (1 << qp->sq.wqe_shift) - send_wqe_overhead(type) - | 251 | cap->max_inline_data = (1 << qp->sq.wqe_shift) - send_wqe_overhead(type) - |
241 | sizeof (struct mlx4_wqe_inline_seg); | 252 | sizeof (struct mlx4_wqe_inline_seg); |
242 | 253 | ||
243 | return 0; | 254 | return 0; |
244 | } | 255 | } |
245 | 256 | ||
257 | static int set_user_sq_size(struct mlx4_ib_qp *qp, | ||
258 | struct mlx4_ib_create_qp *ucmd) | ||
259 | { | ||
260 | qp->sq.max = 1 << ucmd->log_sq_bb_count; | ||
261 | qp->sq.wqe_shift = ucmd->log_sq_stride; | ||
262 | |||
263 | qp->buf_size = (qp->rq.max << qp->rq.wqe_shift) + | ||
264 | (qp->sq.max << qp->sq.wqe_shift); | ||
265 | |||
266 | return 0; | ||
267 | } | ||
268 | |||
246 | static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | 269 | static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, |
247 | struct ib_qp_init_attr *init_attr, | 270 | struct ib_qp_init_attr *init_attr, |
248 | struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) | 271 | struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) |
@@ -264,7 +287,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
264 | qp->sq.head = 0; | 287 | qp->sq.head = 0; |
265 | qp->sq.tail = 0; | 288 | qp->sq.tail = 0; |
266 | 289 | ||
267 | err = set_qp_size(dev, &init_attr->cap, init_attr->qp_type, qp); | 290 | err = set_rq_size(dev, &init_attr->cap, qp); |
268 | if (err) | 291 | if (err) |
269 | goto err; | 292 | goto err; |
270 | 293 | ||
@@ -276,6 +299,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
276 | goto err; | 299 | goto err; |
277 | } | 300 | } |
278 | 301 | ||
302 | err = set_user_sq_size(qp, &ucmd); | ||
303 | if (err) | ||
304 | goto err; | ||
305 | |||
279 | qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, | 306 | qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, |
280 | qp->buf_size, 0); | 307 | qp->buf_size, 0); |
281 | if (IS_ERR(qp->umem)) { | 308 | if (IS_ERR(qp->umem)) { |
@@ -297,6 +324,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
297 | if (err) | 324 | if (err) |
298 | goto err_mtt; | 325 | goto err_mtt; |
299 | } else { | 326 | } else { |
327 | err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); | ||
328 | if (err) | ||
329 | goto err; | ||
330 | |||
300 | err = mlx4_ib_db_alloc(dev, &qp->db, 0); | 331 | err = mlx4_ib_db_alloc(dev, &qp->db, 0); |
301 | if (err) | 332 | if (err) |
302 | goto err; | 333 | goto err; |