aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
authorHiroshi Doyu <hdoyu@nvidia.com>2012-08-28 01:13:01 -0400
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-08-28 15:01:05 -0400
commit6b3fe47264262fa082897ebe8ae01041eae65e14 (patch)
treea3802f904ad0f9b0b9e4565173ed24c95915987c /arch/arm/mm/dma-mapping.c
parentcb01b633eeb77ae7128cab0a3b5d3de56da6e913 (diff)
ARM: dma-mapping: atomic_pool with struct page **pages
struct page **pages is necessary to align with non atomic path in __iommu_get_pages(). atomic_pool() has the intialized **pages instead of just *page. Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index acced9332109..9a21284a6ac4 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -275,7 +275,7 @@ struct dma_pool {
275 unsigned long *bitmap; 275 unsigned long *bitmap;
276 unsigned long nr_pages; 276 unsigned long nr_pages;
277 void *vaddr; 277 void *vaddr;
278 struct page *page; 278 struct page **pages;
279}; 279};
280 280
281static struct dma_pool atomic_pool = { 281static struct dma_pool atomic_pool = {
@@ -314,6 +314,7 @@ static int __init atomic_pool_init(void)
314 unsigned long nr_pages = pool->size >> PAGE_SHIFT; 314 unsigned long nr_pages = pool->size >> PAGE_SHIFT;
315 unsigned long *bitmap; 315 unsigned long *bitmap;
316 struct page *page; 316 struct page *page;
317 struct page **pages;
317 void *ptr; 318 void *ptr;
318 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); 319 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
319 320
@@ -321,21 +322,31 @@ static int __init atomic_pool_init(void)
321 if (!bitmap) 322 if (!bitmap)
322 goto no_bitmap; 323 goto no_bitmap;
323 324
325 pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
326 if (!pages)
327 goto no_pages;
328
324 if (IS_ENABLED(CONFIG_CMA)) 329 if (IS_ENABLED(CONFIG_CMA))
325 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); 330 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
326 else 331 else
327 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, 332 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
328 &page, NULL); 333 &page, NULL);
329 if (ptr) { 334 if (ptr) {
335 int i;
336
337 for (i = 0; i < nr_pages; i++)
338 pages[i] = page + i;
339
330 spin_lock_init(&pool->lock); 340 spin_lock_init(&pool->lock);
331 pool->vaddr = ptr; 341 pool->vaddr = ptr;
332 pool->page = page; 342 pool->pages = pages;
333 pool->bitmap = bitmap; 343 pool->bitmap = bitmap;
334 pool->nr_pages = nr_pages; 344 pool->nr_pages = nr_pages;
335 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", 345 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
336 (unsigned)pool->size / 1024); 346 (unsigned)pool->size / 1024);
337 return 0; 347 return 0;
338 } 348 }
349no_pages:
339 kfree(bitmap); 350 kfree(bitmap);
340no_bitmap: 351no_bitmap:
341 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", 352 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
@@ -460,7 +471,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
460 if (pageno < pool->nr_pages) { 471 if (pageno < pool->nr_pages) {
461 bitmap_set(pool->bitmap, pageno, count); 472 bitmap_set(pool->bitmap, pageno, count);
462 ptr = pool->vaddr + PAGE_SIZE * pageno; 473 ptr = pool->vaddr + PAGE_SIZE * pageno;
463 *ret_page = pool->page + pageno; 474 *ret_page = pool->pages[pageno];
464 } else { 475 } else {
465 pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" 476 pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
466 "Please increase it with coherent_pool= kernel parameter!\n", 477 "Please increase it with coherent_pool= kernel parameter!\n",