diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx4/mr.c')
-rw-r--r-- | drivers/infiniband/hw/mlx4/mr.c | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 631272172a0b..5d73989d9771 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device, | |||
277 | struct mlx4_ib_mr *mr, | 277 | struct mlx4_ib_mr *mr, |
278 | int max_pages) | 278 | int max_pages) |
279 | { | 279 | { |
280 | int size = max_pages * sizeof(u64); | ||
281 | int add_size; | ||
282 | int ret; | 280 | int ret; |
283 | 281 | ||
284 | add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0); | 282 | /* Ensure that size is aligned to DMA cacheline |
283 | * requirements. | ||
284 | * max_pages is limited to MLX4_MAX_FAST_REG_PAGES | ||
285 | * so page_map_size will never cross PAGE_SIZE. | ||
286 | */ | ||
287 | mr->page_map_size = roundup(max_pages * sizeof(u64), | ||
288 | MLX4_MR_PAGES_ALIGN); | ||
285 | 289 | ||
286 | mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL); | 290 | /* Prevent cross page boundary allocation. */ |
287 | if (!mr->pages_alloc) | 291 | mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL); |
292 | if (!mr->pages) | ||
288 | return -ENOMEM; | 293 | return -ENOMEM; |
289 | 294 | ||
290 | mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN); | ||
291 | |||
292 | mr->page_map = dma_map_single(device->dma_device, mr->pages, | 295 | mr->page_map = dma_map_single(device->dma_device, mr->pages, |
293 | size, DMA_TO_DEVICE); | 296 | mr->page_map_size, DMA_TO_DEVICE); |
294 | 297 | ||
295 | if (dma_mapping_error(device->dma_device, mr->page_map)) { | 298 | if (dma_mapping_error(device->dma_device, mr->page_map)) { |
296 | ret = -ENOMEM; | 299 | ret = -ENOMEM; |
@@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device, | |||
298 | } | 301 | } |
299 | 302 | ||
300 | return 0; | 303 | return 0; |
301 | err: | ||
302 | kfree(mr->pages_alloc); | ||
303 | 304 | ||
305 | err: | ||
306 | free_page((unsigned long)mr->pages); | ||
304 | return ret; | 307 | return ret; |
305 | } | 308 | } |
306 | 309 | ||
@@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr) | |||
309 | { | 312 | { |
310 | if (mr->pages) { | 313 | if (mr->pages) { |
311 | struct ib_device *device = mr->ibmr.device; | 314 | struct ib_device *device = mr->ibmr.device; |
312 | int size = mr->max_pages * sizeof(u64); | ||
313 | 315 | ||
314 | dma_unmap_single(device->dma_device, mr->page_map, | 316 | dma_unmap_single(device->dma_device, mr->page_map, |
315 | size, DMA_TO_DEVICE); | 317 | mr->page_map_size, DMA_TO_DEVICE); |
316 | kfree(mr->pages_alloc); | 318 | free_page((unsigned long)mr->pages); |
317 | mr->pages = NULL; | 319 | mr->pages = NULL; |
318 | } | 320 | } |
319 | } | 321 | } |
@@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, | |||
537 | mr->npages = 0; | 539 | mr->npages = 0; |
538 | 540 | ||
539 | ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, | 541 | ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, |
540 | sizeof(u64) * mr->max_pages, | 542 | mr->page_map_size, DMA_TO_DEVICE); |
541 | DMA_TO_DEVICE); | ||
542 | 543 | ||
543 | rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); | 544 | rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); |
544 | 545 | ||
545 | ib_dma_sync_single_for_device(ibmr->device, mr->page_map, | 546 | ib_dma_sync_single_for_device(ibmr->device, mr->page_map, |
546 | sizeof(u64) * mr->max_pages, | 547 | mr->page_map_size, DMA_TO_DEVICE); |
547 | DMA_TO_DEVICE); | ||
548 | 548 | ||
549 | return rc; | 549 | return rc; |
550 | } | 550 | } |