diff options
author | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-05-30 04:48:29 -0400 |
---|---|---|
committer | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-06-04 02:01:24 -0400 |
commit | f1ae98da8525c6b8b1c301c3a2b0bd2b6515cca2 (patch) | |
tree | f73e377f98bbb452612a1f53b3d399cff6cac1fa /arch | |
parent | f8f5701bdaf9134b1f90e5044a82c66324d2073f (diff) |
ARM: dma-mapping: remove unconditional dependency on CMA
CMA has been enabled unconditionally on all ARMv6+ systems to solve the
long standing issue of double kernel mappings for all dma coherent
buffers. This however created a dependency on CONFIG_EXPERIMENTAL for
the whole ARM architecture what should be really avoided. This patch
removes this dependency and lets one use old, well-tested dma-mapping
implementation also on ARMv6+ systems without the need to use
EXPERIMENTAL stuff.
Reported-by: Russell King <linux@arm.linux.org.uk>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 10 |
2 files changed, 4 insertions, 7 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b649c5904a4f..84449dd8f031 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -7,7 +7,6 @@ config ARM | |||
7 | select HAVE_IDE if PCI || ISA || PCMCIA | 7 | select HAVE_IDE if PCI || ISA || PCMCIA |
8 | select HAVE_DMA_ATTRS | 8 | select HAVE_DMA_ATTRS |
9 | select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) | 9 | select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) |
10 | select CMA if (CPU_V6 || CPU_V6K || CPU_V7) | ||
11 | select HAVE_MEMBLOCK | 10 | select HAVE_MEMBLOCK |
12 | select RTC_LIB | 11 | select RTC_LIB |
13 | select SYS_SUPPORTS_APM_EMULATION | 12 | select SYS_SUPPORTS_APM_EMULATION |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ea6b43154090..106c4c0ebccd 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -268,10 +268,8 @@ static int __init consistent_init(void) | |||
268 | unsigned long base = consistent_base; | 268 | unsigned long base = consistent_base; |
269 | unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; | 269 | unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; |
270 | 270 | ||
271 | #ifndef CONFIG_ARM_DMA_USE_IOMMU | 271 | if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) |
272 | if (cpu_architecture() >= CPU_ARCH_ARMv6) | ||
273 | return 0; | 272 | return 0; |
274 | #endif | ||
275 | 273 | ||
276 | consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); | 274 | consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); |
277 | if (!consistent_pte) { | 275 | if (!consistent_pte) { |
@@ -342,7 +340,7 @@ static int __init coherent_init(void) | |||
342 | struct page *page; | 340 | struct page *page; |
343 | void *ptr; | 341 | void *ptr; |
344 | 342 | ||
345 | if (cpu_architecture() < CPU_ARCH_ARMv6) | 343 | if (!IS_ENABLED(CONFIG_CMA)) |
346 | return 0; | 344 | return 0; |
347 | 345 | ||
348 | ptr = __alloc_from_contiguous(NULL, size, prot, &page); | 346 | ptr = __alloc_from_contiguous(NULL, size, prot, &page); |
@@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
704 | 702 | ||
705 | if (arch_is_coherent() || nommu()) | 703 | if (arch_is_coherent() || nommu()) |
706 | addr = __alloc_simple_buffer(dev, size, gfp, &page); | 704 | addr = __alloc_simple_buffer(dev, size, gfp, &page); |
707 | else if (cpu_architecture() < CPU_ARCH_ARMv6) | 705 | else if (!IS_ENABLED(CONFIG_CMA)) |
708 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); | 706 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); |
709 | else if (gfp & GFP_ATOMIC) | 707 | else if (gfp & GFP_ATOMIC) |
710 | addr = __alloc_from_pool(dev, size, &page, caller); | 708 | addr = __alloc_from_pool(dev, size, &page, caller); |
@@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | |||
773 | 771 | ||
774 | if (arch_is_coherent() || nommu()) { | 772 | if (arch_is_coherent() || nommu()) { |
775 | __dma_free_buffer(page, size); | 773 | __dma_free_buffer(page, size); |
776 | } else if (cpu_architecture() < CPU_ARCH_ARMv6) { | 774 | } else if (!IS_ENABLED(CONFIG_CMA)) { |
777 | __dma_free_remap(cpu_addr, size); | 775 | __dma_free_remap(cpu_addr, size); |
778 | __dma_free_buffer(page, size); | 776 | __dma_free_buffer(page, size); |
779 | } else { | 777 | } else { |