diff options
author | Vladimir Sokolovsky <vlad@dev.mellanox.co.il> | 2008-04-17 00:09:33 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-04-17 00:09:33 -0400 |
commit | bbf8eed1a0f8949f7385146624f736f829992a70 (patch) | |
tree | 5cf6a5c76ca4c038d3ca0f53abc5f2976872696b /drivers/infiniband/hw/mlx4/cq.c | |
parent | 3fdcb97f0b8d8a29117dc36acd0b15965d2a2160 (diff) |
IB/mlx4: Add support for resizing CQs
Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx4/cq.c')
-rw-r--r-- | drivers/infiniband/hw/mlx4/cq.c | 292 |
1 files changed, 259 insertions, 33 deletions
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index e4fb64b118e3..3557e7edc9b6 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -93,6 +93,74 @@ int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) | |||
93 | return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period); | 93 | return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period); |
94 | } | 94 | } |
95 | 95 | ||
96 | static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent) | ||
97 | { | ||
98 | int err; | ||
99 | |||
100 | err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe), | ||
101 | PAGE_SIZE * 2, &buf->buf); | ||
102 | |||
103 | if (err) | ||
104 | goto out; | ||
105 | |||
106 | err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, | ||
107 | &buf->mtt); | ||
108 | if (err) | ||
109 | goto err_buf; | ||
110 | |||
111 | err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf); | ||
112 | if (err) | ||
113 | goto err_mtt; | ||
114 | |||
115 | return 0; | ||
116 | |||
117 | err_mtt: | ||
118 | mlx4_mtt_cleanup(dev->dev, &buf->mtt); | ||
119 | |||
120 | err_buf: | ||
121 | mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe), | ||
122 | &buf->buf); | ||
123 | |||
124 | out: | ||
125 | return err; | ||
126 | } | ||
127 | |||
128 | static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) | ||
129 | { | ||
130 | mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf); | ||
131 | } | ||
132 | |||
133 | static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context, | ||
134 | struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, | ||
135 | u64 buf_addr, int cqe) | ||
136 | { | ||
137 | int err; | ||
138 | |||
139 | *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe), | ||
140 | IB_ACCESS_LOCAL_WRITE); | ||
141 | if (IS_ERR(*umem)) | ||
142 | return PTR_ERR(*umem); | ||
143 | |||
144 | err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem), | ||
145 | ilog2((*umem)->page_size), &buf->mtt); | ||
146 | if (err) | ||
147 | goto err_buf; | ||
148 | |||
149 | err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); | ||
150 | if (err) | ||
151 | goto err_mtt; | ||
152 | |||
153 | return 0; | ||
154 | |||
155 | err_mtt: | ||
156 | mlx4_mtt_cleanup(dev->dev, &buf->mtt); | ||
157 | |||
158 | err_buf: | ||
159 | ib_umem_release(*umem); | ||
160 | |||
161 | return err; | ||
162 | } | ||
163 | |||
96 | struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, | 164 | struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, |
97 | struct ib_ucontext *context, | 165 | struct ib_ucontext *context, |
98 | struct ib_udata *udata) | 166 | struct ib_udata *udata) |
@@ -100,7 +168,6 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector | |||
100 | struct mlx4_ib_dev *dev = to_mdev(ibdev); | 168 | struct mlx4_ib_dev *dev = to_mdev(ibdev); |
101 | struct mlx4_ib_cq *cq; | 169 | struct mlx4_ib_cq *cq; |
102 | struct mlx4_uar *uar; | 170 | struct mlx4_uar *uar; |
103 | int buf_size; | ||
104 | int err; | 171 | int err; |
105 | 172 | ||
106 | if (entries < 1 || entries > dev->dev->caps.max_cqes) | 173 | if (entries < 1 || entries > dev->dev->caps.max_cqes) |
@@ -112,8 +179,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector | |||
112 | 179 | ||
113 | entries = roundup_pow_of_two(entries + 1); | 180 | entries = roundup_pow_of_two(entries + 1); |
114 | cq->ibcq.cqe = entries - 1; | 181 | cq->ibcq.cqe = entries - 1; |
115 | buf_size = entries * sizeof (struct mlx4_cqe); | 182 | mutex_init(&cq->resize_mutex); |
116 | spin_lock_init(&cq->lock); | 183 | spin_lock_init(&cq->lock); |
184 | cq->resize_buf = NULL; | ||
185 | cq->resize_umem = NULL; | ||
117 | 186 | ||
118 | if (context) { | 187 | if (context) { |
119 | struct mlx4_ib_create_cq ucmd; | 188 | struct mlx4_ib_create_cq ucmd; |
@@ -123,21 +192,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector | |||
123 | goto err_cq; | 192 | goto err_cq; |
124 | } | 193 | } |
125 | 194 | ||
126 | cq->umem = ib_umem_get(context, ucmd.buf_addr, buf_size, | 195 | err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, |
127 | IB_ACCESS_LOCAL_WRITE); | 196 | ucmd.buf_addr, entries); |
128 | if (IS_ERR(cq->umem)) { | ||
129 | err = PTR_ERR(cq->umem); | ||
130 | goto err_cq; | ||
131 | } | ||
132 | |||
133 | err = mlx4_mtt_init(dev->dev, ib_umem_page_count(cq->umem), | ||
134 | ilog2(cq->umem->page_size), &cq->buf.mtt); | ||
135 | if (err) | 197 | if (err) |
136 | goto err_buf; | 198 | goto err_cq; |
137 | |||
138 | err = mlx4_ib_umem_write_mtt(dev, &cq->buf.mtt, cq->umem); | ||
139 | if (err) | ||
140 | goto err_mtt; | ||
141 | 199 | ||
142 | err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr, | 200 | err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr, |
143 | &cq->db); | 201 | &cq->db); |
@@ -155,19 +213,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector | |||
155 | *cq->mcq.set_ci_db = 0; | 213 | *cq->mcq.set_ci_db = 0; |
156 | *cq->mcq.arm_db = 0; | 214 | *cq->mcq.arm_db = 0; |
157 | 215 | ||
158 | if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &cq->buf.buf)) { | 216 | err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); |
159 | err = -ENOMEM; | ||
160 | goto err_db; | ||
161 | } | ||
162 | |||
163 | err = mlx4_mtt_init(dev->dev, cq->buf.buf.npages, cq->buf.buf.page_shift, | ||
164 | &cq->buf.mtt); | ||
165 | if (err) | 217 | if (err) |
166 | goto err_buf; | 218 | goto err_db; |
167 | |||
168 | err = mlx4_buf_write_mtt(dev->dev, &cq->buf.mtt, &cq->buf.buf); | ||
169 | if (err) | ||
170 | goto err_mtt; | ||
171 | 219 | ||
172 | uar = &dev->priv_uar; | 220 | uar = &dev->priv_uar; |
173 | } | 221 | } |
@@ -195,12 +243,10 @@ err_dbmap: | |||
195 | err_mtt: | 243 | err_mtt: |
196 | mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); | 244 | mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); |
197 | 245 | ||
198 | err_buf: | ||
199 | if (context) | 246 | if (context) |
200 | ib_umem_release(cq->umem); | 247 | ib_umem_release(cq->umem); |
201 | else | 248 | else |
202 | mlx4_buf_free(dev->dev, entries * sizeof (struct mlx4_cqe), | 249 | mlx4_ib_free_cq_buf(dev, &cq->buf, entries); |
203 | &cq->buf.buf); | ||
204 | 250 | ||
205 | err_db: | 251 | err_db: |
206 | if (!context) | 252 | if (!context) |
@@ -212,6 +258,170 @@ err_cq: | |||
212 | return ERR_PTR(err); | 258 | return ERR_PTR(err); |
213 | } | 259 | } |
214 | 260 | ||
261 | static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, | ||
262 | int entries) | ||
263 | { | ||
264 | int err; | ||
265 | |||
266 | if (cq->resize_buf) | ||
267 | return -EBUSY; | ||
268 | |||
269 | cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); | ||
270 | if (!cq->resize_buf) | ||
271 | return -ENOMEM; | ||
272 | |||
273 | err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); | ||
274 | if (err) { | ||
275 | kfree(cq->resize_buf); | ||
276 | cq->resize_buf = NULL; | ||
277 | return err; | ||
278 | } | ||
279 | |||
280 | cq->resize_buf->cqe = entries - 1; | ||
281 | |||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, | ||
286 | int entries, struct ib_udata *udata) | ||
287 | { | ||
288 | struct mlx4_ib_resize_cq ucmd; | ||
289 | int err; | ||
290 | |||
291 | if (cq->resize_umem) | ||
292 | return -EBUSY; | ||
293 | |||
294 | if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) | ||
295 | return -EFAULT; | ||
296 | |||
297 | cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); | ||
298 | if (!cq->resize_buf) | ||
299 | return -ENOMEM; | ||
300 | |||
301 | err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf, | ||
302 | &cq->resize_umem, ucmd.buf_addr, entries); | ||
303 | if (err) { | ||
304 | kfree(cq->resize_buf); | ||
305 | cq->resize_buf = NULL; | ||
306 | return err; | ||
307 | } | ||
308 | |||
309 | cq->resize_buf->cqe = entries - 1; | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) | ||
315 | { | ||
316 | u32 i; | ||
317 | |||
318 | i = cq->mcq.cons_index; | ||
319 | while (get_sw_cqe(cq, i & cq->ibcq.cqe)) | ||
320 | ++i; | ||
321 | |||
322 | return i - cq->mcq.cons_index; | ||
323 | } | ||
324 | |||
325 | static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) | ||
326 | { | ||
327 | struct mlx4_cqe *cqe; | ||
328 | int i; | ||
329 | |||
330 | i = cq->mcq.cons_index; | ||
331 | cqe = get_cqe(cq, i & cq->ibcq.cqe); | ||
332 | while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { | ||
333 | memcpy(get_cqe_from_buf(&cq->resize_buf->buf, | ||
334 | (i + 1) & cq->resize_buf->cqe), | ||
335 | get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe)); | ||
336 | cqe = get_cqe(cq, ++i & cq->ibcq.cqe); | ||
337 | } | ||
338 | ++cq->mcq.cons_index; | ||
339 | } | ||
340 | |||
341 | int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | ||
342 | { | ||
343 | struct mlx4_ib_dev *dev = to_mdev(ibcq->device); | ||
344 | struct mlx4_ib_cq *cq = to_mcq(ibcq); | ||
345 | int outst_cqe; | ||
346 | int err; | ||
347 | |||
348 | mutex_lock(&cq->resize_mutex); | ||
349 | |||
350 | if (entries < 1 || entries > dev->dev->caps.max_cqes) { | ||
351 | err = -EINVAL; | ||
352 | goto out; | ||
353 | } | ||
354 | |||
355 | entries = roundup_pow_of_two(entries + 1); | ||
356 | if (entries == ibcq->cqe + 1) { | ||
357 | err = 0; | ||
358 | goto out; | ||
359 | } | ||
360 | |||
361 | if (ibcq->uobject) { | ||
362 | err = mlx4_alloc_resize_umem(dev, cq, entries, udata); | ||
363 | if (err) | ||
364 | goto out; | ||
365 | } else { | ||
366 | /* Can't be smaller then the number of outstanding CQEs */ | ||
367 | outst_cqe = mlx4_ib_get_outstanding_cqes(cq); | ||
368 | if (entries < outst_cqe + 1) { | ||
369 | err = 0; | ||
370 | goto out; | ||
371 | } | ||
372 | |||
373 | err = mlx4_alloc_resize_buf(dev, cq, entries); | ||
374 | if (err) | ||
375 | goto out; | ||
376 | } | ||
377 | |||
378 | err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); | ||
379 | if (err) | ||
380 | goto err_buf; | ||
381 | |||
382 | if (ibcq->uobject) { | ||
383 | cq->buf = cq->resize_buf->buf; | ||
384 | cq->ibcq.cqe = cq->resize_buf->cqe; | ||
385 | ib_umem_release(cq->umem); | ||
386 | cq->umem = cq->resize_umem; | ||
387 | |||
388 | kfree(cq->resize_buf); | ||
389 | cq->resize_buf = NULL; | ||
390 | cq->resize_umem = NULL; | ||
391 | } else { | ||
392 | spin_lock_irq(&cq->lock); | ||
393 | if (cq->resize_buf) { | ||
394 | mlx4_ib_cq_resize_copy_cqes(cq); | ||
395 | mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); | ||
396 | cq->buf = cq->resize_buf->buf; | ||
397 | cq->ibcq.cqe = cq->resize_buf->cqe; | ||
398 | |||
399 | kfree(cq->resize_buf); | ||
400 | cq->resize_buf = NULL; | ||
401 | } | ||
402 | spin_unlock_irq(&cq->lock); | ||
403 | } | ||
404 | |||
405 | goto out; | ||
406 | |||
407 | err_buf: | ||
408 | if (!ibcq->uobject) | ||
409 | mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf, | ||
410 | cq->resize_buf->cqe); | ||
411 | |||
412 | kfree(cq->resize_buf); | ||
413 | cq->resize_buf = NULL; | ||
414 | |||
415 | if (cq->resize_umem) { | ||
416 | ib_umem_release(cq->resize_umem); | ||
417 | cq->resize_umem = NULL; | ||
418 | } | ||
419 | |||
420 | out: | ||
421 | mutex_unlock(&cq->resize_mutex); | ||
422 | return err; | ||
423 | } | ||
424 | |||
215 | int mlx4_ib_destroy_cq(struct ib_cq *cq) | 425 | int mlx4_ib_destroy_cq(struct ib_cq *cq) |
216 | { | 426 | { |
217 | struct mlx4_ib_dev *dev = to_mdev(cq->device); | 427 | struct mlx4_ib_dev *dev = to_mdev(cq->device); |
@@ -224,8 +434,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq) | |||
224 | mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db); | 434 | mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db); |
225 | ib_umem_release(mcq->umem); | 435 | ib_umem_release(mcq->umem); |
226 | } else { | 436 | } else { |
227 | mlx4_buf_free(dev->dev, (cq->cqe + 1) * sizeof (struct mlx4_cqe), | 437 | mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1); |
228 | &mcq->buf.buf); | ||
229 | mlx4_ib_db_free(dev, &mcq->db); | 438 | mlx4_ib_db_free(dev, &mcq->db); |
230 | } | 439 | } |
231 | 440 | ||
@@ -332,6 +541,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, | |||
332 | u32 g_mlpath_rqpn; | 541 | u32 g_mlpath_rqpn; |
333 | u16 wqe_ctr; | 542 | u16 wqe_ctr; |
334 | 543 | ||
544 | repoll: | ||
335 | cqe = next_cqe_sw(cq); | 545 | cqe = next_cqe_sw(cq); |
336 | if (!cqe) | 546 | if (!cqe) |
337 | return -EAGAIN; | 547 | return -EAGAIN; |
@@ -354,6 +564,22 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, | |||
354 | return -EINVAL; | 564 | return -EINVAL; |
355 | } | 565 | } |
356 | 566 | ||
567 | /* Resize CQ in progress */ | ||
568 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) { | ||
569 | if (cq->resize_buf) { | ||
570 | struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device); | ||
571 | |||
572 | mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); | ||
573 | cq->buf = cq->resize_buf->buf; | ||
574 | cq->ibcq.cqe = cq->resize_buf->cqe; | ||
575 | |||
576 | kfree(cq->resize_buf); | ||
577 | cq->resize_buf = NULL; | ||
578 | } | ||
579 | |||
580 | goto repoll; | ||
581 | } | ||
582 | |||
357 | if (!*cur_qp || | 583 | if (!*cur_qp || |
358 | (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { | 584 | (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { |
359 | /* | 585 | /* |