aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2013-01-16 09:38:44 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2013-02-25 09:30:42 -0500
commit9848e48f4c316ccb64d6f29ff0ed85f11d7bf532 (patch)
tree063c2d0a546557016f301607b39a7e41394c8eed /arch/arm
parent18177d12c0cee5646c7c2045ea90ddf882011c97 (diff)
ARM: dma-mapping: add support for CMA regions placed in highmem zone
This patch adds missing pieces to correctly support memory pages served from CMA regions placed in high memory zones. Please note that the default global CMA area is still put into lowmem and is limited by optional architecture specific DMA zone. One can however put device specific CMA regions in high memory zone to reduce lowmem usage. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Michal Nazarewicz <mina86@mina86.com>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/mm/dma-mapping.c57
1 files changed, 42 insertions, 15 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6ab882a05050..94d7359074c2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -186,13 +186,24 @@ static u64 get_coherent_dma_mask(struct device *dev)
186 186
187static void __dma_clear_buffer(struct page *page, size_t size) 187static void __dma_clear_buffer(struct page *page, size_t size)
188{ 188{
189 void *ptr;
190 /* 189 /*
191 * Ensure that the allocated pages are zeroed, and that any data 190 * Ensure that the allocated pages are zeroed, and that any data
192 * lurking in the kernel direct-mapped region is invalidated. 191 * lurking in the kernel direct-mapped region is invalidated.
193 */ 192 */
194 ptr = page_address(page); 193 if (PageHighMem(page)) {
195 if (ptr) { 194 phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
195 phys_addr_t end = base + size;
196 while (size > 0) {
197 void *ptr = kmap_atomic(page);
198 memset(ptr, 0, PAGE_SIZE);
199 dmac_flush_range(ptr, ptr + PAGE_SIZE);
200 kunmap_atomic(ptr);
201 page++;
202 size -= PAGE_SIZE;
203 }
204 outer_flush_range(base, end);
205 } else {
206 void *ptr = page_address(page);
196 memset(ptr, 0, size); 207 memset(ptr, 0, size);
197 dmac_flush_range(ptr, ptr + size); 208 dmac_flush_range(ptr, ptr + size);
198 outer_flush_range(__pa(ptr), __pa(ptr) + size); 209 outer_flush_range(__pa(ptr), __pa(ptr) + size);
@@ -243,7 +254,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
243#endif 254#endif
244 255
245static void *__alloc_from_contiguous(struct device *dev, size_t size, 256static void *__alloc_from_contiguous(struct device *dev, size_t size,
246 pgprot_t prot, struct page **ret_page); 257 pgprot_t prot, struct page **ret_page,
258 const void *caller);
247 259
248static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 260static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
249 pgprot_t prot, struct page **ret_page, 261 pgprot_t prot, struct page **ret_page,
@@ -346,10 +358,11 @@ static int __init atomic_pool_init(void)
346 goto no_pages; 358 goto no_pages;
347 359
348 if (IS_ENABLED(CONFIG_CMA)) 360 if (IS_ENABLED(CONFIG_CMA))
349 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); 361 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
362 atomic_pool_init);
350 else 363 else
351 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, 364 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
352 &page, NULL); 365 &page, atomic_pool_init);
353 if (ptr) { 366 if (ptr) {
354 int i; 367 int i;
355 368
@@ -542,27 +555,41 @@ static int __free_from_pool(void *start, size_t size)
542} 555}
543 556
544static void *__alloc_from_contiguous(struct device *dev, size_t size, 557static void *__alloc_from_contiguous(struct device *dev, size_t size,
545 pgprot_t prot, struct page **ret_page) 558 pgprot_t prot, struct page **ret_page,
559 const void *caller)
546{ 560{
547 unsigned long order = get_order(size); 561 unsigned long order = get_order(size);
548 size_t count = size >> PAGE_SHIFT; 562 size_t count = size >> PAGE_SHIFT;
549 struct page *page; 563 struct page *page;
564 void *ptr;
550 565
551 page = dma_alloc_from_contiguous(dev, count, order); 566 page = dma_alloc_from_contiguous(dev, count, order);
552 if (!page) 567 if (!page)
553 return NULL; 568 return NULL;
554 569
555 __dma_clear_buffer(page, size); 570 __dma_clear_buffer(page, size);
556 __dma_remap(page, size, prot);
557 571
572 if (PageHighMem(page)) {
573 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
574 if (!ptr) {
575 dma_release_from_contiguous(dev, page, count);
576 return NULL;
577 }
578 } else {
579 __dma_remap(page, size, prot);
580 ptr = page_address(page);
581 }
558 *ret_page = page; 582 *ret_page = page;
559 return page_address(page); 583 return ptr;
560} 584}
561 585
562static void __free_from_contiguous(struct device *dev, struct page *page, 586static void __free_from_contiguous(struct device *dev, struct page *page,
563 size_t size) 587 void *cpu_addr, size_t size)
564{ 588{
565 __dma_remap(page, size, pgprot_kernel); 589 if (PageHighMem(page))
590 __dma_free_remap(cpu_addr, size);
591 else
592 __dma_remap(page, size, pgprot_kernel);
566 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 593 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
567} 594}
568 595
@@ -583,9 +610,9 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
583#define __get_dma_pgprot(attrs, prot) __pgprot(0) 610#define __get_dma_pgprot(attrs, prot) __pgprot(0)
584#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL 611#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
585#define __alloc_from_pool(size, ret_page) NULL 612#define __alloc_from_pool(size, ret_page) NULL
586#define __alloc_from_contiguous(dev, size, prot, ret) NULL 613#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
587#define __free_from_pool(cpu_addr, size) 0 614#define __free_from_pool(cpu_addr, size) 0
588#define __free_from_contiguous(dev, page, size) do { } while (0) 615#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
589#define __dma_free_remap(cpu_addr, size) do { } while (0) 616#define __dma_free_remap(cpu_addr, size) do { } while (0)
590 617
591#endif /* CONFIG_MMU */ 618#endif /* CONFIG_MMU */
@@ -645,7 +672,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
645 else if (!IS_ENABLED(CONFIG_CMA)) 672 else if (!IS_ENABLED(CONFIG_CMA))
646 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 673 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
647 else 674 else
648 addr = __alloc_from_contiguous(dev, size, prot, &page); 675 addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
649 676
650 if (addr) 677 if (addr)
651 *handle = pfn_to_dma(dev, page_to_pfn(page)); 678 *handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -739,7 +766,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
739 * Non-atomic allocations cannot be freed with IRQs disabled 766 * Non-atomic allocations cannot be freed with IRQs disabled
740 */ 767 */
741 WARN_ON(irqs_disabled()); 768 WARN_ON(irqs_disabled());
742 __free_from_contiguous(dev, page, size); 769 __free_from_contiguous(dev, page, cpu_addr, size);
743 } 770 }
744} 771}
745 772