diff options
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_mr.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_mr.c | 46 |
1 files changed, 24 insertions, 22 deletions
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c index 19220dcb9a3b..294f5c706be9 100644 --- a/drivers/infiniband/hw/qib/qib_mr.c +++ b/drivers/infiniband/hw/qib/qib_mr.c | |||
@@ -303,6 +303,7 @@ int qib_dereg_mr(struct ib_mr *ibmr) | |||
303 | int ret = 0; | 303 | int ret = 0; |
304 | unsigned long timeout; | 304 | unsigned long timeout; |
305 | 305 | ||
306 | kfree(mr->pages); | ||
306 | qib_free_lkey(&mr->mr); | 307 | qib_free_lkey(&mr->mr); |
307 | 308 | ||
308 | qib_put_mr(&mr->mr); /* will set completion if last */ | 309 | qib_put_mr(&mr->mr); /* will set completion if last */ |
@@ -323,7 +324,7 @@ out: | |||
323 | 324 | ||
324 | /* | 325 | /* |
325 | * Allocate a memory region usable with the | 326 | * Allocate a memory region usable with the |
326 | * IB_WR_FAST_REG_MR send work request. | 327 | * IB_WR_REG_MR send work request. |
327 | * | 328 | * |
328 | * Return the memory region on success, otherwise return an errno. | 329 | * Return the memory region on success, otherwise return an errno. |
329 | */ | 330 | */ |
@@ -340,37 +341,38 @@ struct ib_mr *qib_alloc_mr(struct ib_pd *pd, | |||
340 | if (IS_ERR(mr)) | 341 | if (IS_ERR(mr)) |
341 | return (struct ib_mr *)mr; | 342 | return (struct ib_mr *)mr; |
342 | 343 | ||
344 | mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); | ||
345 | if (!mr->pages) | ||
346 | goto err; | ||
347 | |||
343 | return &mr->ibmr; | 348 | return &mr->ibmr; |
349 | |||
350 | err: | ||
351 | qib_dereg_mr(&mr->ibmr); | ||
352 | return ERR_PTR(-ENOMEM); | ||
344 | } | 353 | } |
345 | 354 | ||
346 | struct ib_fast_reg_page_list * | 355 | static int qib_set_page(struct ib_mr *ibmr, u64 addr) |
347 | qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len) | ||
348 | { | 356 | { |
349 | unsigned size = page_list_len * sizeof(u64); | 357 | struct qib_mr *mr = to_imr(ibmr); |
350 | struct ib_fast_reg_page_list *pl; | ||
351 | |||
352 | if (size > PAGE_SIZE) | ||
353 | return ERR_PTR(-EINVAL); | ||
354 | |||
355 | pl = kzalloc(sizeof(*pl), GFP_KERNEL); | ||
356 | if (!pl) | ||
357 | return ERR_PTR(-ENOMEM); | ||
358 | 358 | ||
359 | pl->page_list = kzalloc(size, GFP_KERNEL); | 359 | if (unlikely(mr->npages == mr->mr.max_segs)) |
360 | if (!pl->page_list) | 360 | return -ENOMEM; |
361 | goto err_free; | ||
362 | 361 | ||
363 | return pl; | 362 | mr->pages[mr->npages++] = addr; |
364 | 363 | ||
365 | err_free: | 364 | return 0; |
366 | kfree(pl); | ||
367 | return ERR_PTR(-ENOMEM); | ||
368 | } | 365 | } |
369 | 366 | ||
370 | void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl) | 367 | int qib_map_mr_sg(struct ib_mr *ibmr, |
368 | struct scatterlist *sg, | ||
369 | int sg_nents) | ||
371 | { | 370 | { |
372 | kfree(pl->page_list); | 371 | struct qib_mr *mr = to_imr(ibmr); |
373 | kfree(pl); | 372 | |
373 | mr->npages = 0; | ||
374 | |||
375 | return ib_sg_to_pages(ibmr, sg, sg_nents, qib_set_page); | ||
374 | } | 376 | } |
375 | 377 | ||
376 | /** | 378 | /** |