aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorLaura Abbott <lauraa@codeaurora.org>2014-10-09 18:26:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:52 -0400
commitd4932f9e81ae7a7bf3c3967e48373909b9c98ee5 (patch)
tree0f9cf08d5d95589b6bbe3fb0f479258a11a4a6f7 /arch/arm64/mm
parent36d0fd2198da3fd16b0e0da50ece05b4d295d2f1 (diff)
arm64: add atomic pool for non-coherent and CMA allocations
Neither CMA nor noncoherent allocations support atomic allocations. Add a dedicated atomic pool to support this. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Laura Abbott <lauraa@codeaurora.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: David Riley <davidriley@chromium.org> Cc: Olof Johansson <olof@lixom.net> Cc: Ritesh Harjain <ritesh.harjani@gmail.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Thierry Reding <thierry.reding@gmail.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/dma-mapping.c164
1 files changed, 145 insertions, 19 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 2c71077cacfd..d92094203913 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -20,6 +20,7 @@
20#include <linux/gfp.h> 20#include <linux/gfp.h>
21#include <linux/export.h> 21#include <linux/export.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/genalloc.h>
23#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
24#include <linux/dma-contiguous.h> 25#include <linux/dma-contiguous.h>
25#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
@@ -38,6 +39,54 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
38 return prot; 39 return prot;
39} 40}
40 41
42static struct gen_pool *atomic_pool;
43
44#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
45static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
46
47static int __init early_coherent_pool(char *p)
48{
49 atomic_pool_size = memparse(p, &p);
50 return 0;
51}
52early_param("coherent_pool", early_coherent_pool);
53
54static void *__alloc_from_pool(size_t size, struct page **ret_page)
55{
56 unsigned long val;
57 void *ptr = NULL;
58
59 if (!atomic_pool) {
60 WARN(1, "coherent pool not initialised!\n");
61 return NULL;
62 }
63
64 val = gen_pool_alloc(atomic_pool, size);
65 if (val) {
66 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
67
68 *ret_page = phys_to_page(phys);
69 ptr = (void *)val;
70 }
71
72 return ptr;
73}
74
75static bool __in_atomic_pool(void *start, size_t size)
76{
77 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
78}
79
80static int __free_from_pool(void *start, size_t size)
81{
82 if (!__in_atomic_pool(start, size))
83 return 0;
84
85 gen_pool_free(atomic_pool, (unsigned long)start, size);
86
87 return 1;
88}
89
41static void *__dma_alloc_coherent(struct device *dev, size_t size, 90static void *__dma_alloc_coherent(struct device *dev, size_t size,
42 dma_addr_t *dma_handle, gfp_t flags, 91 dma_addr_t *dma_handle, gfp_t flags,
43 struct dma_attrs *attrs) 92 struct dma_attrs *attrs)
@@ -50,7 +99,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
50 if (IS_ENABLED(CONFIG_ZONE_DMA) && 99 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
51 dev->coherent_dma_mask <= DMA_BIT_MASK(32)) 100 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
52 flags |= GFP_DMA; 101 flags |= GFP_DMA;
53 if (IS_ENABLED(CONFIG_DMA_CMA)) { 102 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
54 struct page *page; 103 struct page *page;
55 104
56 size = PAGE_ALIGN(size); 105 size = PAGE_ALIGN(size);
@@ -70,50 +119,54 @@ static void __dma_free_coherent(struct device *dev, size_t size,
70 void *vaddr, dma_addr_t dma_handle, 119 void *vaddr, dma_addr_t dma_handle,
71 struct dma_attrs *attrs) 120 struct dma_attrs *attrs)
72{ 121{
122 bool freed;
123 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
124
73 if (dev == NULL) { 125 if (dev == NULL) {
74 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); 126 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
75 return; 127 return;
76 } 128 }
77 129
78 if (IS_ENABLED(CONFIG_DMA_CMA)) { 130 freed = dma_release_from_contiguous(dev,
79 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
80
81 dma_release_from_contiguous(dev,
82 phys_to_page(paddr), 131 phys_to_page(paddr),
83 size >> PAGE_SHIFT); 132 size >> PAGE_SHIFT);
84 } else { 133 if (!freed)
85 swiotlb_free_coherent(dev, size, vaddr, dma_handle); 134 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
86 }
87} 135}
88 136
89static void *__dma_alloc_noncoherent(struct device *dev, size_t size, 137static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
90 dma_addr_t *dma_handle, gfp_t flags, 138 dma_addr_t *dma_handle, gfp_t flags,
91 struct dma_attrs *attrs) 139 struct dma_attrs *attrs)
92{ 140{
93 struct page *page, **map; 141 struct page *page;
94 void *ptr, *coherent_ptr; 142 void *ptr, *coherent_ptr;
95 int order, i;
96 143
97 size = PAGE_ALIGN(size); 144 size = PAGE_ALIGN(size);
98 order = get_order(size); 145
146 if (!(flags & __GFP_WAIT)) {
147 struct page *page = NULL;
148 void *addr = __alloc_from_pool(size, &page);
149
150 if (addr)
151 *dma_handle = phys_to_dma(dev, page_to_phys(page));
152
153 return addr;
154
155 }
99 156
100 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); 157 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
101 if (!ptr) 158 if (!ptr)
102 goto no_mem; 159 goto no_mem;
103 map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
104 if (!map)
105 goto no_map;
106 160
107 /* remove any dirty cache lines on the kernel alias */ 161 /* remove any dirty cache lines on the kernel alias */
108 __dma_flush_range(ptr, ptr + size); 162 __dma_flush_range(ptr, ptr + size);
109 163
110 /* create a coherent mapping */ 164 /* create a coherent mapping */
111 page = virt_to_page(ptr); 165 page = virt_to_page(ptr);
112 for (i = 0; i < (size >> PAGE_SHIFT); i++) 166 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
113 map[i] = page + i; 167 __get_dma_pgprot(attrs,
114 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP, 168 __pgprot(PROT_NORMAL_NC), false),
115 __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false)); 169 NULL);
116 kfree(map);
117 if (!coherent_ptr) 170 if (!coherent_ptr)
118 goto no_map; 171 goto no_map;
119 172
@@ -132,6 +185,8 @@ static void __dma_free_noncoherent(struct device *dev, size_t size,
132{ 185{
133 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); 186 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
134 187
188 if (__free_from_pool(vaddr, size))
189 return;
135 vunmap(vaddr); 190 vunmap(vaddr);
136 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); 191 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
137} 192}
@@ -307,6 +362,67 @@ EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
307 362
308extern int swiotlb_late_init_with_default_size(size_t default_size); 363extern int swiotlb_late_init_with_default_size(size_t default_size);
309 364
365static int __init atomic_pool_init(void)
366{
367 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
368 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
369 struct page *page;
370 void *addr;
371 unsigned int pool_size_order = get_order(atomic_pool_size);
372
373 if (dev_get_cma_area(NULL))
374 page = dma_alloc_from_contiguous(NULL, nr_pages,
375 pool_size_order);
376 else
377 page = alloc_pages(GFP_DMA, pool_size_order);
378
379 if (page) {
380 int ret;
381 void *page_addr = page_address(page);
382
383 memset(page_addr, 0, atomic_pool_size);
384 __dma_flush_range(page_addr, page_addr + atomic_pool_size);
385
386 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
387 if (!atomic_pool)
388 goto free_page;
389
390 addr = dma_common_contiguous_remap(page, atomic_pool_size,
391 VM_USERMAP, prot, atomic_pool_init);
392
393 if (!addr)
394 goto destroy_genpool;
395
396 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
397 page_to_phys(page),
398 atomic_pool_size, -1);
399 if (ret)
400 goto remove_mapping;
401
402 gen_pool_set_algo(atomic_pool,
403 gen_pool_first_fit_order_align,
404 (void *)PAGE_SHIFT);
405
406 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
407 atomic_pool_size / 1024);
408 return 0;
409 }
410 goto out;
411
412remove_mapping:
413 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
414destroy_genpool:
415 gen_pool_destroy(atomic_pool);
416 atomic_pool = NULL;
417free_page:
418 if (!dma_release_from_contiguous(NULL, page, nr_pages))
419 __free_pages(page, pool_size_order);
420out:
421 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
422 atomic_pool_size / 1024);
423 return -ENOMEM;
424}
425
310static int __init swiotlb_late_init(void) 426static int __init swiotlb_late_init(void)
311{ 427{
312 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); 428 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
@@ -315,7 +431,17 @@ static int __init swiotlb_late_init(void)
315 431
316 return swiotlb_late_init_with_default_size(swiotlb_size); 432 return swiotlb_late_init_with_default_size(swiotlb_size);
317} 433}
318arch_initcall(swiotlb_late_init); 434
435static int __init arm64_dma_init(void)
436{
437 int ret = 0;
438
439 ret |= swiotlb_late_init();
440 ret |= atomic_pool_init();
441
442 return ret;
443}
444arch_initcall(arm64_dma_init);
319 445
320#define PREALLOC_DMA_DEBUG_ENTRIES 4096 446#define PREALLOC_DMA_DEBUG_ENTRIES 4096
321 447