aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLaura Abbott <lauraa@codeaurora.org>2014-10-09 18:26:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:52 -0400
commit36d0fd2198da3fd16b0e0da50ece05b4d295d2f1 (patch)
treeda587f5f38ae3b28a2815dfae658700d03a3a14a /arch
parent513510ddba9650fc7da456eefeb0ead7632324f6 (diff)
arm: use genalloc for the atomic pool
ARM currently uses a bitmap for tracking atomic allocations. genalloc already handles this type of memory pool allocation so switch to using that instead. Signed-off-by: Laura Abbott <lauraa@codeaurora.org> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: David Riley <davidriley@chromium.org> Cc: Olof Johansson <olof@lixom.net> Cc: Ritesh Harjain <ritesh.harjani@gmail.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Thierry Reding <thierry.reding@gmail.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/mm/dma-mapping.c153
2 files changed, 50 insertions, 104 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d9d32de9628c..36d47987a9e0 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -14,6 +14,7 @@ config ARM
14 select CLONE_BACKWARDS 14 select CLONE_BACKWARDS
15 select CPU_PM if (SUSPEND || CPU_IDLE) 15 select CPU_PM if (SUSPEND || CPU_IDLE)
16 select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS 16 select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
17 select GENERIC_ALLOCATOR
17 select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI) 18 select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI)
18 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 19 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
19 select GENERIC_IDLE_POLL_SETUP 20 select GENERIC_IDLE_POLL_SETUP
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index eecc8e60deea..c245d903927f 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -12,6 +12,7 @@
12#include <linux/bootmem.h> 12#include <linux/bootmem.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/genalloc.h>
15#include <linux/gfp.h> 16#include <linux/gfp.h>
16#include <linux/errno.h> 17#include <linux/errno.h>
17#include <linux/list.h> 18#include <linux/list.h>
@@ -314,23 +315,13 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
314} 315}
315 316
316#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 317#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
318static struct gen_pool *atomic_pool;
317 319
318struct dma_pool { 320static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
319 size_t size;
320 spinlock_t lock;
321 unsigned long *bitmap;
322 unsigned long nr_pages;
323 void *vaddr;
324 struct page **pages;
325};
326
327static struct dma_pool atomic_pool = {
328 .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
329};
330 321
331static int __init early_coherent_pool(char *p) 322static int __init early_coherent_pool(char *p)
332{ 323{
333 atomic_pool.size = memparse(p, &p); 324 atomic_pool_size = memparse(p, &p);
334 return 0; 325 return 0;
335} 326}
336early_param("coherent_pool", early_coherent_pool); 327early_param("coherent_pool", early_coherent_pool);
@@ -340,14 +331,14 @@ void __init init_dma_coherent_pool_size(unsigned long size)
340 /* 331 /*
341 * Catch any attempt to set the pool size too late. 332 * Catch any attempt to set the pool size too late.
342 */ 333 */
343 BUG_ON(atomic_pool.vaddr); 334 BUG_ON(atomic_pool);
344 335
345 /* 336 /*
346 * Set architecture specific coherent pool size only if 337 * Set architecture specific coherent pool size only if
347 * it has not been changed by kernel command line parameter. 338 * it has not been changed by kernel command line parameter.
348 */ 339 */
349 if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) 340 if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE)
350 atomic_pool.size = size; 341 atomic_pool_size = size;
351} 342}
352 343
353/* 344/*
@@ -355,52 +346,44 @@ void __init init_dma_coherent_pool_size(unsigned long size)
355 */ 346 */
356static int __init atomic_pool_init(void) 347static int __init atomic_pool_init(void)
357{ 348{
358 struct dma_pool *pool = &atomic_pool;
359 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 349 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
360 gfp_t gfp = GFP_KERNEL | GFP_DMA; 350 gfp_t gfp = GFP_KERNEL | GFP_DMA;
361 unsigned long nr_pages = pool->size >> PAGE_SHIFT;
362 unsigned long *bitmap;
363 struct page *page; 351 struct page *page;
364 struct page **pages;
365 void *ptr; 352 void *ptr;
366 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
367
368 bitmap = kzalloc(bitmap_size, GFP_KERNEL);
369 if (!bitmap)
370 goto no_bitmap;
371 353
372 pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); 354 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
373 if (!pages) 355 if (!atomic_pool)
374 goto no_pages; 356 goto out;
375 357
376 if (dev_get_cma_area(NULL)) 358 if (dev_get_cma_area(NULL))
377 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, 359 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
378 atomic_pool_init); 360 &page, atomic_pool_init);
379 else 361 else
380 ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, 362 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
381 atomic_pool_init); 363 &page, atomic_pool_init);
382 if (ptr) { 364 if (ptr) {
383 int i; 365 int ret;
384 366
385 for (i = 0; i < nr_pages; i++) 367 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
386 pages[i] = page + i; 368 page_to_phys(page),
387 369 atomic_pool_size, -1);
388 spin_lock_init(&pool->lock); 370 if (ret)
389 pool->vaddr = ptr; 371 goto destroy_genpool;
390 pool->pages = pages; 372
391 pool->bitmap = bitmap; 373 gen_pool_set_algo(atomic_pool,
392 pool->nr_pages = nr_pages; 374 gen_pool_first_fit_order_align,
393 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", 375 (void *)PAGE_SHIFT);
394 (unsigned)pool->size / 1024); 376 pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n",
377 atomic_pool_size / 1024);
395 return 0; 378 return 0;
396 } 379 }
397 380
398 kfree(pages); 381destroy_genpool:
399no_pages: 382 gen_pool_destroy(atomic_pool);
400 kfree(bitmap); 383 atomic_pool = NULL;
401no_bitmap: 384out:
402 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", 385 pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n",
403 (unsigned)pool->size / 1024); 386 atomic_pool_size / 1024);
404 return -ENOMEM; 387 return -ENOMEM;
405} 388}
406/* 389/*
@@ -504,76 +487,36 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
504 487
505static void *__alloc_from_pool(size_t size, struct page **ret_page) 488static void *__alloc_from_pool(size_t size, struct page **ret_page)
506{ 489{
507 struct dma_pool *pool = &atomic_pool; 490 unsigned long val;
508 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
509 unsigned int pageno;
510 unsigned long flags;
511 void *ptr = NULL; 491 void *ptr = NULL;
512 unsigned long align_mask;
513 492
514 if (!pool->vaddr) { 493 if (!atomic_pool) {
515 WARN(1, "coherent pool not initialised!\n"); 494 WARN(1, "coherent pool not initialised!\n");
516 return NULL; 495 return NULL;
517 } 496 }
518 497
519 /* 498 val = gen_pool_alloc(atomic_pool, size);
520 * Align the region allocation - allocations from pool are rather 499 if (val) {
521 * small, so align them to their order in pages, minimum is a page 500 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
522 * size. This helps reduce fragmentation of the DMA space. 501
523 */ 502 *ret_page = phys_to_page(phys);
524 align_mask = (1 << get_order(size)) - 1; 503 ptr = (void *)val;
525
526 spin_lock_irqsave(&pool->lock, flags);
527 pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
528 0, count, align_mask);
529 if (pageno < pool->nr_pages) {
530 bitmap_set(pool->bitmap, pageno, count);
531 ptr = pool->vaddr + PAGE_SIZE * pageno;
532 *ret_page = pool->pages[pageno];
533 } else {
534 pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
535 "Please increase it with coherent_pool= kernel parameter!\n",
536 (unsigned)pool->size / 1024);
537 } 504 }
538 spin_unlock_irqrestore(&pool->lock, flags);
539 505
540 return ptr; 506 return ptr;
541} 507}
542 508
543static bool __in_atomic_pool(void *start, size_t size) 509static bool __in_atomic_pool(void *start, size_t size)
544{ 510{
545 struct dma_pool *pool = &atomic_pool; 511 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
546 void *end = start + size;
547 void *pool_start = pool->vaddr;
548 void *pool_end = pool->vaddr + pool->size;
549
550 if (start < pool_start || start >= pool_end)
551 return false;
552
553 if (end <= pool_end)
554 return true;
555
556 WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
557 start, end - 1, pool_start, pool_end - 1);
558
559 return false;
560} 512}
561 513
562static int __free_from_pool(void *start, size_t size) 514static int __free_from_pool(void *start, size_t size)
563{ 515{
564 struct dma_pool *pool = &atomic_pool;
565 unsigned long pageno, count;
566 unsigned long flags;
567
568 if (!__in_atomic_pool(start, size)) 516 if (!__in_atomic_pool(start, size))
569 return 0; 517 return 0;
570 518
571 pageno = (start - pool->vaddr) >> PAGE_SHIFT; 519 gen_pool_free(atomic_pool, (unsigned long)start, size);
572 count = size >> PAGE_SHIFT;
573
574 spin_lock_irqsave(&pool->lock, flags);
575 bitmap_clear(pool->bitmap, pageno, count);
576 spin_unlock_irqrestore(&pool->lock, flags);
577 520
578 return 1; 521 return 1;
579} 522}
@@ -1316,11 +1259,13 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
1316 1259
1317static struct page **__atomic_get_pages(void *addr) 1260static struct page **__atomic_get_pages(void *addr)
1318{ 1261{
1319 struct dma_pool *pool = &atomic_pool; 1262 struct page *page;
1320 struct page **pages = pool->pages; 1263 phys_addr_t phys;
1321 int offs = (addr - pool->vaddr) >> PAGE_SHIFT; 1264
1265 phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
1266 page = phys_to_page(phys);
1322 1267
1323 return pages + offs; 1268 return (struct page **)page;
1324} 1269}
1325 1270
1326static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1271static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)