aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 13:45:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 13:45:25 -0500
commit42a0a1b0fd343888c59afc8b243a77bcec2cc11c (patch)
treeb136c088a244be8e0970767035c93d15127e8c83 /arch
parent52caa59ed335616c5254adff7911465a57ed9f14 (diff)
parentd589829107c5528164a9b7dfe50d0001780865ed (diff)
Merge branch 'for-v3.9' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull DMA-mapping updates from Marek Szyprowski: "This time all patches are related only to ARM DMA-mapping subsystem. The main extension provided by this pull request is highmem support. Besides that it contains a bunch of small bugfixes and cleanups." * 'for-v3.9' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: ARM: DMA-mapping: fix memory leak in IOMMU dma-mapping implementation ARM: dma-mapping: Add maximum alignment order for dma iommu buffers ARM: dma-mapping: use himem for DMA buffers for IOMMU-mapped devices ARM: dma-mapping: add support for CMA regions placed in highmem zone arm: dma mapping: export arm iommu functions ARM: dma-mapping: Add arm_iommu_detach_device() ARM: dma-mapping: Add macro to_dma_iommu_mapping() ARM: dma-mapping: Set arm_dma_set_mask() for iommu->set_dma_mask() ARM: iommu: Include linux/kref.h in asm/dma-iommu.h
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig21
-rw-r--r--arch/arm/include/asm/device.h6
-rw-r--r--arch/arm/include/asm/dma-iommu.h2
-rw-r--r--arch/arm/mm/dma-mapping.c108
4 files changed, 117 insertions, 20 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a955d89ed836..6ec8eb3149ea 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -77,6 +77,27 @@ config ARM_DMA_USE_IOMMU
77 select ARM_HAS_SG_CHAIN 77 select ARM_HAS_SG_CHAIN
78 select NEED_SG_DMA_LENGTH 78 select NEED_SG_DMA_LENGTH
79 79
80if ARM_DMA_USE_IOMMU
81
82config ARM_DMA_IOMMU_ALIGNMENT
83 int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
84 range 4 9
85 default 8
86 help
87 DMA mapping framework by default aligns all buffers to the smallest
88 PAGE_SIZE order which is greater than or equal to the requested buffer
89 size. This works well for buffers up to a few hundreds kilobytes, but
90 for larger buffers it just a waste of address space. Drivers which has
91 relatively small addressing window (like 64Mib) might run out of
92 virtual space with just a few allocations.
93
94 With this parameter you can specify the maximum PAGE_SIZE order for
95 DMA IOMMU buffers. Larger buffers will be aligned only to this
96 specified order. The order is expressed as a power of two multiplied
97 by the PAGE_SIZE.
98
99endif
100
80config HAVE_PWM 101config HAVE_PWM
81 bool 102 bool
82 103
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index b69c0d3285f8..dc662fca9230 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -27,4 +27,10 @@ struct pdev_archdata {
27#endif 27#endif
28}; 28};
29 29
30#ifdef CONFIG_ARM_DMA_USE_IOMMU
31#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
32#else
33#define to_dma_iommu_mapping(dev) NULL
34#endif
35
30#endif 36#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 799b09409fad..a8c56acc8c98 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -7,6 +7,7 @@
7#include <linux/scatterlist.h> 7#include <linux/scatterlist.h>
8#include <linux/dma-debug.h> 8#include <linux/dma-debug.h>
9#include <linux/kmemcheck.h> 9#include <linux/kmemcheck.h>
10#include <linux/kref.h>
10 11
11struct dma_iommu_mapping { 12struct dma_iommu_mapping {
12 /* iommu specific data */ 13 /* iommu specific data */
@@ -29,6 +30,7 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
29 30
30int arm_iommu_attach_device(struct device *dev, 31int arm_iommu_attach_device(struct device *dev,
31 struct dma_iommu_mapping *mapping); 32 struct dma_iommu_mapping *mapping);
33void arm_iommu_detach_device(struct device *dev);
32 34
33#endif /* __KERNEL__ */ 35#endif /* __KERNEL__ */
34#endif 36#endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index dda3904dc64c..c7e3759f16d3 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -186,13 +186,24 @@ static u64 get_coherent_dma_mask(struct device *dev)
186 186
187static void __dma_clear_buffer(struct page *page, size_t size) 187static void __dma_clear_buffer(struct page *page, size_t size)
188{ 188{
189 void *ptr;
190 /* 189 /*
191 * Ensure that the allocated pages are zeroed, and that any data 190 * Ensure that the allocated pages are zeroed, and that any data
192 * lurking in the kernel direct-mapped region is invalidated. 191 * lurking in the kernel direct-mapped region is invalidated.
193 */ 192 */
194 ptr = page_address(page); 193 if (PageHighMem(page)) {
195 if (ptr) { 194 phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
195 phys_addr_t end = base + size;
196 while (size > 0) {
197 void *ptr = kmap_atomic(page);
198 memset(ptr, 0, PAGE_SIZE);
199 dmac_flush_range(ptr, ptr + PAGE_SIZE);
200 kunmap_atomic(ptr);
201 page++;
202 size -= PAGE_SIZE;
203 }
204 outer_flush_range(base, end);
205 } else {
206 void *ptr = page_address(page);
196 memset(ptr, 0, size); 207 memset(ptr, 0, size);
197 dmac_flush_range(ptr, ptr + size); 208 dmac_flush_range(ptr, ptr + size);
198 outer_flush_range(__pa(ptr), __pa(ptr) + size); 209 outer_flush_range(__pa(ptr), __pa(ptr) + size);
@@ -243,7 +254,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
243#endif 254#endif
244 255
245static void *__alloc_from_contiguous(struct device *dev, size_t size, 256static void *__alloc_from_contiguous(struct device *dev, size_t size,
246 pgprot_t prot, struct page **ret_page); 257 pgprot_t prot, struct page **ret_page,
258 const void *caller);
247 259
248static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 260static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
249 pgprot_t prot, struct page **ret_page, 261 pgprot_t prot, struct page **ret_page,
@@ -346,10 +358,11 @@ static int __init atomic_pool_init(void)
346 goto no_pages; 358 goto no_pages;
347 359
348 if (IS_ENABLED(CONFIG_CMA)) 360 if (IS_ENABLED(CONFIG_CMA))
349 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); 361 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
362 atomic_pool_init);
350 else 363 else
351 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, 364 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
352 &page, NULL); 365 &page, atomic_pool_init);
353 if (ptr) { 366 if (ptr) {
354 int i; 367 int i;
355 368
@@ -542,27 +555,41 @@ static int __free_from_pool(void *start, size_t size)
542} 555}
543 556
544static void *__alloc_from_contiguous(struct device *dev, size_t size, 557static void *__alloc_from_contiguous(struct device *dev, size_t size,
545 pgprot_t prot, struct page **ret_page) 558 pgprot_t prot, struct page **ret_page,
559 const void *caller)
546{ 560{
547 unsigned long order = get_order(size); 561 unsigned long order = get_order(size);
548 size_t count = size >> PAGE_SHIFT; 562 size_t count = size >> PAGE_SHIFT;
549 struct page *page; 563 struct page *page;
564 void *ptr;
550 565
551 page = dma_alloc_from_contiguous(dev, count, order); 566 page = dma_alloc_from_contiguous(dev, count, order);
552 if (!page) 567 if (!page)
553 return NULL; 568 return NULL;
554 569
555 __dma_clear_buffer(page, size); 570 __dma_clear_buffer(page, size);
556 __dma_remap(page, size, prot);
557 571
572 if (PageHighMem(page)) {
573 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
574 if (!ptr) {
575 dma_release_from_contiguous(dev, page, count);
576 return NULL;
577 }
578 } else {
579 __dma_remap(page, size, prot);
580 ptr = page_address(page);
581 }
558 *ret_page = page; 582 *ret_page = page;
559 return page_address(page); 583 return ptr;
560} 584}
561 585
562static void __free_from_contiguous(struct device *dev, struct page *page, 586static void __free_from_contiguous(struct device *dev, struct page *page,
563 size_t size) 587 void *cpu_addr, size_t size)
564{ 588{
565 __dma_remap(page, size, pgprot_kernel); 589 if (PageHighMem(page))
590 __dma_free_remap(cpu_addr, size);
591 else
592 __dma_remap(page, size, pgprot_kernel);
566 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 593 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
567} 594}
568 595
@@ -583,9 +610,9 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
583#define __get_dma_pgprot(attrs, prot) __pgprot(0) 610#define __get_dma_pgprot(attrs, prot) __pgprot(0)
584#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL 611#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
585#define __alloc_from_pool(size, ret_page) NULL 612#define __alloc_from_pool(size, ret_page) NULL
586#define __alloc_from_contiguous(dev, size, prot, ret) NULL 613#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
587#define __free_from_pool(cpu_addr, size) 0 614#define __free_from_pool(cpu_addr, size) 0
588#define __free_from_contiguous(dev, page, size) do { } while (0) 615#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
589#define __dma_free_remap(cpu_addr, size) do { } while (0) 616#define __dma_free_remap(cpu_addr, size) do { } while (0)
590 617
591#endif /* CONFIG_MMU */ 618#endif /* CONFIG_MMU */
@@ -645,7 +672,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
645 else if (!IS_ENABLED(CONFIG_CMA)) 672 else if (!IS_ENABLED(CONFIG_CMA))
646 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 673 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
647 else 674 else
648 addr = __alloc_from_contiguous(dev, size, prot, &page); 675 addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
649 676
650 if (addr) 677 if (addr)
651 *handle = pfn_to_dma(dev, page_to_pfn(page)); 678 *handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -739,7 +766,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
739 * Non-atomic allocations cannot be freed with IRQs disabled 766 * Non-atomic allocations cannot be freed with IRQs disabled
740 */ 767 */
741 WARN_ON(irqs_disabled()); 768 WARN_ON(irqs_disabled());
742 __free_from_contiguous(dev, page, size); 769 __free_from_contiguous(dev, page, cpu_addr, size);
743 } 770 }
744} 771}
745 772
@@ -1002,6 +1029,9 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1002 unsigned int count, start; 1029 unsigned int count, start;
1003 unsigned long flags; 1030 unsigned long flags;
1004 1031
1032 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
1033 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
1034
1005 count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + 1035 count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
1006 (1 << mapping->order) - 1) >> mapping->order; 1036 (1 << mapping->order) - 1) >> mapping->order;
1007 1037
@@ -1068,12 +1098,17 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1068 return pages; 1098 return pages;
1069 } 1099 }
1070 1100
1101 /*
1102 * IOMMU can map any pages, so himem can also be used here
1103 */
1104 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
1105
1071 while (count) { 1106 while (count) {
1072 int j, order = __fls(count); 1107 int j, order = __fls(count);
1073 1108
1074 pages[i] = alloc_pages(gfp | __GFP_NOWARN, order); 1109 pages[i] = alloc_pages(gfp, order);
1075 while (!pages[i] && order) 1110 while (!pages[i] && order)
1076 pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order); 1111 pages[i] = alloc_pages(gfp, --order);
1077 if (!pages[i]) 1112 if (!pages[i])
1078 goto error; 1113 goto error;
1079 1114
@@ -1257,11 +1292,11 @@ err_mapping:
1257 return NULL; 1292 return NULL;
1258} 1293}
1259 1294
1260static void __iommu_free_atomic(struct device *dev, struct page **pages, 1295static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
1261 dma_addr_t handle, size_t size) 1296 dma_addr_t handle, size_t size)
1262{ 1297{
1263 __iommu_remove_mapping(dev, handle, size); 1298 __iommu_remove_mapping(dev, handle, size);
1264 __free_from_pool(page_address(pages[0]), size); 1299 __free_from_pool(cpu_addr, size);
1265} 1300}
1266 1301
1267static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 1302static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
@@ -1344,7 +1379,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1344 } 1379 }
1345 1380
1346 if (__in_atomic_pool(cpu_addr, size)) { 1381 if (__in_atomic_pool(cpu_addr, size)) {
1347 __iommu_free_atomic(dev, pages, handle, size); 1382 __iommu_free_atomic(dev, cpu_addr, handle, size);
1348 return; 1383 return;
1349 } 1384 }
1350 1385
@@ -1732,6 +1767,8 @@ struct dma_map_ops iommu_ops = {
1732 .unmap_sg = arm_iommu_unmap_sg, 1767 .unmap_sg = arm_iommu_unmap_sg,
1733 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 1768 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
1734 .sync_sg_for_device = arm_iommu_sync_sg_for_device, 1769 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
1770
1771 .set_dma_mask = arm_dma_set_mask,
1735}; 1772};
1736 1773
1737struct dma_map_ops iommu_coherent_ops = { 1774struct dma_map_ops iommu_coherent_ops = {
@@ -1745,6 +1782,8 @@ struct dma_map_ops iommu_coherent_ops = {
1745 1782
1746 .map_sg = arm_coherent_iommu_map_sg, 1783 .map_sg = arm_coherent_iommu_map_sg,
1747 .unmap_sg = arm_coherent_iommu_unmap_sg, 1784 .unmap_sg = arm_coherent_iommu_unmap_sg,
1785
1786 .set_dma_mask = arm_dma_set_mask,
1748}; 1787};
1749 1788
1750/** 1789/**
@@ -1799,6 +1838,7 @@ err2:
1799err: 1838err:
1800 return ERR_PTR(err); 1839 return ERR_PTR(err);
1801} 1840}
1841EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
1802 1842
1803static void release_iommu_mapping(struct kref *kref) 1843static void release_iommu_mapping(struct kref *kref)
1804{ 1844{
@@ -1815,6 +1855,7 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1815 if (mapping) 1855 if (mapping)
1816 kref_put(&mapping->kref, release_iommu_mapping); 1856 kref_put(&mapping->kref, release_iommu_mapping);
1817} 1857}
1858EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
1818 1859
1819/** 1860/**
1820 * arm_iommu_attach_device 1861 * arm_iommu_attach_device
@@ -1843,5 +1884,32 @@ int arm_iommu_attach_device(struct device *dev,
1843 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 1884 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
1844 return 0; 1885 return 0;
1845} 1886}
1887EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
1888
1889/**
1890 * arm_iommu_detach_device
1891 * @dev: valid struct device pointer
1892 *
1893 * Detaches the provided device from a previously attached map.
1894 * This voids the dma operations (dma_map_ops pointer)
1895 */
1896void arm_iommu_detach_device(struct device *dev)
1897{
1898 struct dma_iommu_mapping *mapping;
1899
1900 mapping = to_dma_iommu_mapping(dev);
1901 if (!mapping) {
1902 dev_warn(dev, "Not attached\n");
1903 return;
1904 }
1905
1906 iommu_detach_device(mapping->domain, dev);
1907 kref_put(&mapping->kref, release_iommu_mapping);
1908 mapping = NULL;
1909 set_dma_ops(dev, NULL);
1910
1911 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
1912}
1913EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
1846 1914
1847#endif 1915#endif