aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-09-08 05:22:43 -0400
committerChristoph Hellwig <hch@lst.de>2018-09-20 03:01:15 -0400
commitbc3ec75de5452db59b683487867ba562b950708a (patch)
treead93be8bbaea3429f83fb0afd6b7597ec90a1e7b
parentf3ecc0ff0457eae93503792c6fc35921fa8a6204 (diff)
dma-mapping: merge direct and noncoherent ops
All the cache maintainance is already stubbed out when not enabled, but merging the two allows us to nicely handle the case where cache maintainance is required for some devices, but not others. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Paul Burton <paul.burton@mips.com> # MIPS parts
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/mm/dma.c16
-rw-r--r--arch/arm/mm/dma-mapping-nommu.c5
-rw-r--r--arch/c6x/Kconfig2
-rw-r--r--arch/hexagon/Kconfig2
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/jazz/jazzdma.c6
-rw-r--r--arch/mips/mm/dma-noncoherent.c29
-rw-r--r--arch/nds32/Kconfig2
-rw-r--r--arch/nios2/Kconfig2
-rw-r--r--arch/openrisc/Kconfig2
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/kernel/setup.c2
-rw-r--r--arch/sh/Kconfig3
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/dma-mapping.h4
-rw-r--r--arch/x86/kernel/amd_gart_64.c6
-rw-r--r--arch/xtensa/Kconfig2
-rw-r--r--include/asm-generic/dma-mapping.h9
-rw-r--r--include/linux/dma-direct.h4
-rw-r--r--include/linux/dma-mapping.h1
-rw-r--r--include/linux/dma-noncoherent.h5
-rw-r--r--kernel/dma/Kconfig9
-rw-r--r--kernel/dma/Makefile1
-rw-r--r--kernel/dma/direct.c121
-rw-r--r--kernel/dma/noncoherent.c106
29 files changed, 160 insertions, 192 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index b4441b0764d7..ca03694d518a 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -17,7 +17,7 @@ config ARC
17 select BUILDTIME_EXTABLE_SORT 17 select BUILDTIME_EXTABLE_SORT
18 select CLONE_BACKWARDS 18 select CLONE_BACKWARDS
19 select COMMON_CLK 19 select COMMON_CLK
20 select DMA_NONCOHERENT_OPS 20 select DMA_DIRECT_OPS
21 select DMA_NONCOHERENT_MMAP 21 select DMA_NONCOHERENT_MMAP
22 select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) 22 select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC)
23 select GENERIC_CLOCKEVENTS 23 select GENERIC_CLOCKEVENTS
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index c75d5c3470e3..535ed4a068ef 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -167,7 +167,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
167} 167}
168 168
169/* 169/*
170 * Plug in coherent or noncoherent dma ops 170 * Plug in direct dma map ops.
171 */ 171 */
172void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 172void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
173 const struct iommu_ops *iommu, bool coherent) 173 const struct iommu_ops *iommu, bool coherent)
@@ -175,13 +175,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
175 /* 175 /*
176 * IOC hardware snoops all DMA traffic keeping the caches consistent 176 * IOC hardware snoops all DMA traffic keeping the caches consistent
177 * with memory - eliding need for any explicit cache maintenance of 177 * with memory - eliding need for any explicit cache maintenance of
178 * DMA buffers - so we can use dma_direct cache ops. 178 * DMA buffers.
179 */ 179 */
180 if (is_isa_arcv2() && ioc_enable && coherent) { 180 if (is_isa_arcv2() && ioc_enable && coherent)
181 set_dma_ops(dev, &dma_direct_ops); 181 dev->dma_coherent = true;
182 dev_info(dev, "use dma_direct_ops cache ops\n"); 182
183 } else { 183 dev_info(dev, "use %sncoherent DMA ops\n",
184 set_dma_ops(dev, &dma_noncoherent_ops); 184 dev->dma_coherent ? "" : "non");
185 dev_info(dev, "use dma_noncoherent_ops cache ops\n");
186 }
187} 185}
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
index aa7aba302e76..0ad156f9985b 100644
--- a/arch/arm/mm/dma-mapping-nommu.c
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -47,7 +47,8 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
47 */ 47 */
48 48
49 if (attrs & DMA_ATTR_NON_CONSISTENT) 49 if (attrs & DMA_ATTR_NON_CONSISTENT)
50 return dma_direct_alloc(dev, size, dma_handle, gfp, attrs); 50 return dma_direct_alloc_pages(dev, size, dma_handle, gfp,
51 attrs);
51 52
52 ret = dma_alloc_from_global_coherent(size, dma_handle); 53 ret = dma_alloc_from_global_coherent(size, dma_handle);
53 54
@@ -70,7 +71,7 @@ static void arm_nommu_dma_free(struct device *dev, size_t size,
70 unsigned long attrs) 71 unsigned long attrs)
71{ 72{
72 if (attrs & DMA_ATTR_NON_CONSISTENT) { 73 if (attrs & DMA_ATTR_NON_CONSISTENT) {
73 dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); 74 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
74 } else { 75 } else {
75 int ret = dma_release_from_global_coherent(get_order(size), 76 int ret = dma_release_from_global_coherent(get_order(size),
76 cpu_addr); 77 cpu_addr);
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index a641b0bf1611..f65a084607fd 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -9,7 +9,7 @@ config C6X
9 select ARCH_HAS_SYNC_DMA_FOR_CPU 9 select ARCH_HAS_SYNC_DMA_FOR_CPU
10 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 10 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
11 select CLKDEV_LOOKUP 11 select CLKDEV_LOOKUP
12 select DMA_NONCOHERENT_OPS 12 select DMA_DIRECT_OPS
13 select GENERIC_ATOMIC64 13 select GENERIC_ATOMIC64
14 select GENERIC_IRQ_SHOW 14 select GENERIC_IRQ_SHOW
15 select HAVE_ARCH_TRACEHOOK 15 select HAVE_ARCH_TRACEHOOK
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 6cee842a9b44..3ef46522e89f 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -30,7 +30,7 @@ config HEXAGON
30 select GENERIC_CLOCKEVENTS_BROADCAST 30 select GENERIC_CLOCKEVENTS_BROADCAST
31 select MODULES_USE_ELF_RELA 31 select MODULES_USE_ELF_RELA
32 select GENERIC_CPU_DEVICES 32 select GENERIC_CPU_DEVICES
33 select DMA_NONCOHERENT_OPS 33 select DMA_DIRECT_OPS
34 ---help--- 34 ---help---
35 Qualcomm Hexagon is a processor architecture designed for high 35 Qualcomm Hexagon is a processor architecture designed for high
36 performance and low power across a wide variety of applications. 36 performance and low power across a wide variety of applications.
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 070553791e97..c7b2a8d60a41 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -26,7 +26,7 @@ config M68K
26 select MODULES_USE_ELF_RELA 26 select MODULES_USE_ELF_RELA
27 select OLD_SIGSUSPEND3 27 select OLD_SIGSUSPEND3
28 select OLD_SIGACTION 28 select OLD_SIGACTION
29 select DMA_NONCOHERENT_OPS if HAS_DMA 29 select DMA_DIRECT_OPS if HAS_DMA
30 select HAVE_MEMBLOCK 30 select HAVE_MEMBLOCK
31 select ARCH_DISCARD_MEMBLOCK 31 select ARCH_DISCARD_MEMBLOCK
32 select NO_BOOTMEM 32 select NO_BOOTMEM
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index ace5c5bf1836..0f48ab6a8070 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -11,7 +11,7 @@ config MICROBLAZE
11 select TIMER_OF 11 select TIMER_OF
12 select CLONE_BACKWARDS3 12 select CLONE_BACKWARDS3
13 select COMMON_CLK 13 select COMMON_CLK
14 select DMA_NONCOHERENT_OPS 14 select DMA_DIRECT_OPS
15 select DMA_NONCOHERENT_MMAP 15 select DMA_NONCOHERENT_MMAP
16 select GENERIC_ATOMIC64 16 select GENERIC_ATOMIC64
17 select GENERIC_CLOCKEVENTS 17 select GENERIC_CLOCKEVENTS
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 54c52bd0d9d3..96da6e3396e1 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1121,7 +1121,6 @@ config DMA_NONCOHERENT
1121 select NEED_DMA_MAP_STATE 1121 select NEED_DMA_MAP_STATE
1122 select DMA_NONCOHERENT_MMAP 1122 select DMA_NONCOHERENT_MMAP
1123 select DMA_NONCOHERENT_CACHE_SYNC 1123 select DMA_NONCOHERENT_CACHE_SYNC
1124 select DMA_NONCOHERENT_OPS
1125 1124
1126config SYS_HAS_EARLY_PRINTK 1125config SYS_HAS_EARLY_PRINTK
1127 bool 1126 bool
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 40d825c779de..b4c477eb46ce 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -12,8 +12,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
12 return &jazz_dma_ops; 12 return &jazz_dma_ops;
13#elif defined(CONFIG_SWIOTLB) 13#elif defined(CONFIG_SWIOTLB)
14 return &swiotlb_dma_ops; 14 return &swiotlb_dma_ops;
15#elif defined(CONFIG_DMA_NONCOHERENT_OPS)
16 return &dma_noncoherent_ops;
17#else 15#else
18 return &dma_direct_ops; 16 return &dma_direct_ops;
19#endif 17#endif
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index d31bc2f01208..bb49dfa1a9a3 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -564,13 +564,13 @@ static void *jazz_dma_alloc(struct device *dev, size_t size,
564{ 564{
565 void *ret; 565 void *ret;
566 566
567 ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); 567 ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
568 if (!ret) 568 if (!ret)
569 return NULL; 569 return NULL;
570 570
571 *dma_handle = vdma_alloc(virt_to_phys(ret), size); 571 *dma_handle = vdma_alloc(virt_to_phys(ret), size);
572 if (*dma_handle == VDMA_ERROR) { 572 if (*dma_handle == VDMA_ERROR) {
573 dma_direct_free(dev, size, ret, *dma_handle, attrs); 573 dma_direct_free_pages(dev, size, ret, *dma_handle, attrs);
574 return NULL; 574 return NULL;
575 } 575 }
576 576
@@ -587,7 +587,7 @@ static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
587 vdma_free(dma_handle); 587 vdma_free(dma_handle);
588 if (!(attrs & DMA_ATTR_NON_CONSISTENT)) 588 if (!(attrs & DMA_ATTR_NON_CONSISTENT))
589 vaddr = (void *)CAC_ADDR((unsigned long)vaddr); 589 vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
590 return dma_direct_free(dev, size, vaddr, dma_handle, attrs); 590 dma_direct_free_pages(dev, size, vaddr, dma_handle, attrs);
591} 591}
592 592
593static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page, 593static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index d408ac51f56c..b01b9a3e424f 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -29,9 +29,6 @@
29 */ 29 */
30static inline bool cpu_needs_post_dma_flush(struct device *dev) 30static inline bool cpu_needs_post_dma_flush(struct device *dev)
31{ 31{
32 if (dev_is_dma_coherent(dev))
33 return false;
34
35 switch (boot_cpu_type()) { 32 switch (boot_cpu_type()) {
36 case CPU_R10000: 33 case CPU_R10000:
37 case CPU_R12000: 34 case CPU_R12000:
@@ -52,11 +49,8 @@ void *arch_dma_alloc(struct device *dev, size_t size,
52{ 49{
53 void *ret; 50 void *ret;
54 51
55 ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); 52 ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
56 if (!ret) 53 if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
57 return NULL;
58
59 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
60 dma_cache_wback_inv((unsigned long) ret, size); 54 dma_cache_wback_inv((unsigned long) ret, size);
61 ret = (void *)UNCAC_ADDR(ret); 55 ret = (void *)UNCAC_ADDR(ret);
62 } 56 }
@@ -67,9 +61,9 @@ void *arch_dma_alloc(struct device *dev, size_t size,
67void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 61void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
68 dma_addr_t dma_addr, unsigned long attrs) 62 dma_addr_t dma_addr, unsigned long attrs)
69{ 63{
70 if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_dma_coherent(dev)) 64 if (!(attrs & DMA_ATTR_NON_CONSISTENT))
71 cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr); 65 cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr);
72 dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); 66 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
73} 67}
74 68
75int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, 69int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
@@ -78,16 +72,11 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
78{ 72{
79 unsigned long user_count = vma_pages(vma); 73 unsigned long user_count = vma_pages(vma);
80 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 74 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
81 unsigned long addr = (unsigned long)cpu_addr; 75 unsigned long addr = CAC_ADDR((unsigned long)cpu_addr);
82 unsigned long off = vma->vm_pgoff; 76 unsigned long off = vma->vm_pgoff;
83 unsigned long pfn; 77 unsigned long pfn = page_to_pfn(virt_to_page((void *)addr));
84 int ret = -ENXIO; 78 int ret = -ENXIO;
85 79
86 if (!dev_is_dma_coherent(dev))
87 addr = CAC_ADDR(addr);
88
89 pfn = page_to_pfn(virt_to_page((void *)addr));
90
91 if (attrs & DMA_ATTR_WRITE_COMBINE) 80 if (attrs & DMA_ATTR_WRITE_COMBINE)
92 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 81 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
93 else 82 else
@@ -167,8 +156,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
167void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 156void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
168 size_t size, enum dma_data_direction dir) 157 size_t size, enum dma_data_direction dir)
169{ 158{
170 if (!dev_is_dma_coherent(dev)) 159 dma_sync_phys(paddr, size, dir);
171 dma_sync_phys(paddr, size, dir);
172} 160}
173 161
174void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, 162void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
@@ -183,6 +171,5 @@ void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
183{ 171{
184 BUG_ON(direction == DMA_NONE); 172 BUG_ON(direction == DMA_NONE);
185 173
186 if (!dev_is_dma_coherent(dev)) 174 dma_sync_virt(vaddr, size, direction);
187 dma_sync_virt(vaddr, size, direction);
188} 175}
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 7068f341133d..56992330026a 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -11,7 +11,7 @@ config NDS32
11 select CLKSRC_MMIO 11 select CLKSRC_MMIO
12 select CLONE_BACKWARDS 12 select CLONE_BACKWARDS
13 select COMMON_CLK 13 select COMMON_CLK
14 select DMA_NONCOHERENT_OPS 14 select DMA_DIRECT_OPS
15 select GENERIC_ATOMIC64 15 select GENERIC_ATOMIC64
16 select GENERIC_CPU_DEVICES 16 select GENERIC_CPU_DEVICES
17 select GENERIC_CLOCKEVENTS 17 select GENERIC_CLOCKEVENTS
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index f4ad1138e6b9..03965692fbfe 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -4,7 +4,7 @@ config NIOS2
4 select ARCH_HAS_SYNC_DMA_FOR_CPU 4 select ARCH_HAS_SYNC_DMA_FOR_CPU
5 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 5 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
6 select ARCH_NO_SWAP 6 select ARCH_NO_SWAP
7 select DMA_NONCOHERENT_OPS 7 select DMA_DIRECT_OPS
8 select TIMER_OF 8 select TIMER_OF
9 select GENERIC_ATOMIC64 9 select GENERIC_ATOMIC64
10 select GENERIC_CLOCKEVENTS 10 select GENERIC_CLOCKEVENTS
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index e0081e734827..a655ae280637 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -7,7 +7,7 @@
7config OPENRISC 7config OPENRISC
8 def_bool y 8 def_bool y
9 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 9 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
10 select DMA_NONCOHERENT_OPS 10 select DMA_DIRECT_OPS
11 select OF 11 select OF
12 select OF_EARLY_FLATTREE 12 select OF_EARLY_FLATTREE
13 select IRQ_DOMAIN 13 select IRQ_DOMAIN
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 8e6d83f79e72..f1cd12afd943 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -186,7 +186,7 @@ config PA11
186 depends on PA7000 || PA7100LC || PA7200 || PA7300LC 186 depends on PA7000 || PA7100LC || PA7200 || PA7300LC
187 select ARCH_HAS_SYNC_DMA_FOR_CPU 187 select ARCH_HAS_SYNC_DMA_FOR_CPU
188 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 188 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
189 select DMA_NONCOHERENT_OPS 189 select DMA_DIRECT_OPS
190 select DMA_NONCOHERENT_CACHE_SYNC 190 select DMA_NONCOHERENT_CACHE_SYNC
191 191
192config PREFETCH 192config PREFETCH
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 4e87c35c22b7..755e89ec828a 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -102,7 +102,7 @@ void __init dma_ops_init(void)
102 case pcxl: /* falls through */ 102 case pcxl: /* falls through */
103 case pcxs: 103 case pcxs:
104 case pcxt: 104 case pcxt:
105 hppa_dma_ops = &dma_noncoherent_ops; 105 hppa_dma_ops = &dma_direct_ops;
106 break; 106 break;
107 default: 107 default:
108 break; 108 break;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 1fb7b6d72baf..475d786a65b0 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -7,6 +7,7 @@ config SUPERH
7 select ARCH_NO_COHERENT_DMA_MMAP if !MMU 7 select ARCH_NO_COHERENT_DMA_MMAP if !MMU
8 select HAVE_PATA_PLATFORM 8 select HAVE_PATA_PLATFORM
9 select CLKDEV_LOOKUP 9 select CLKDEV_LOOKUP
10 select DMA_DIRECT_OPS
10 select HAVE_IDE if HAS_IOPORT_MAP 11 select HAVE_IDE if HAS_IOPORT_MAP
11 select HAVE_MEMBLOCK 12 select HAVE_MEMBLOCK
12 select HAVE_MEMBLOCK_NODE_MAP 13 select HAVE_MEMBLOCK_NODE_MAP
@@ -158,13 +159,11 @@ config SWAP_IO_SPACE
158 bool 159 bool
159 160
160config DMA_COHERENT 161config DMA_COHERENT
161 select DMA_DIRECT_OPS
162 bool 162 bool
163 163
164config DMA_NONCOHERENT 164config DMA_NONCOHERENT
165 def_bool !DMA_COHERENT 165 def_bool !DMA_COHERENT
166 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 166 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
167 select DMA_NONCOHERENT_OPS
168 167
169config PGTABLE_LEVELS 168config PGTABLE_LEVELS
170 default 3 if X2TLB 169 default 3 if X2TLB
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index e6f2a38d2e61..7e2aa59fcc29 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -51,7 +51,7 @@ config SPARC
51config SPARC32 51config SPARC32
52 def_bool !64BIT 52 def_bool !64BIT
53 select ARCH_HAS_SYNC_DMA_FOR_CPU 53 select ARCH_HAS_SYNC_DMA_FOR_CPU
54 select DMA_NONCOHERENT_OPS 54 select DMA_DIRECT_OPS
55 select GENERIC_ATOMIC64 55 select GENERIC_ATOMIC64
56 select CLZ_TAB 56 select CLZ_TAB
57 select HAVE_UID16 57 select HAVE_UID16
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index e17566376934..b0bb2fcaf1c9 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -14,11 +14,11 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
14{ 14{
15#ifdef CONFIG_SPARC_LEON 15#ifdef CONFIG_SPARC_LEON
16 if (sparc_cpu_model == sparc_leon) 16 if (sparc_cpu_model == sparc_leon)
17 return &dma_noncoherent_ops; 17 return &dma_direct_ops;
18#endif 18#endif
19#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) 19#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
20 if (bus == &pci_bus_type) 20 if (bus == &pci_bus_type)
21 return &dma_noncoherent_ops; 21 return &dma_direct_ops;
22#endif 22#endif
23 return dma_ops; 23 return dma_ops;
24} 24}
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index f299d8a479bb..3f9d1b4019bb 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -482,7 +482,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
482{ 482{
483 void *vaddr; 483 void *vaddr;
484 484
485 vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs); 485 vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs);
486 if (!vaddr || 486 if (!vaddr ||
487 !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24)) 487 !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
488 return vaddr; 488 return vaddr;
@@ -494,7 +494,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
494 goto out_free; 494 goto out_free;
495 return vaddr; 495 return vaddr;
496out_free: 496out_free:
497 dma_direct_free(dev, size, vaddr, *dma_addr, attrs); 497 dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs);
498 return NULL; 498 return NULL;
499} 499}
500 500
@@ -504,7 +504,7 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
504 dma_addr_t dma_addr, unsigned long attrs) 504 dma_addr_t dma_addr, unsigned long attrs)
505{ 505{
506 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); 506 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
507 dma_direct_free(dev, size, vaddr, dma_addr, attrs); 507 dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs);
508} 508}
509 509
510static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) 510static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 04d038f3b6fa..516694937b7a 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -12,7 +12,7 @@ config XTENSA
12 select BUILDTIME_EXTABLE_SORT 12 select BUILDTIME_EXTABLE_SORT
13 select CLONE_BACKWARDS 13 select CLONE_BACKWARDS
14 select COMMON_CLK 14 select COMMON_CLK
15 select DMA_NONCOHERENT_OPS 15 select DMA_DIRECT_OPS
16 select GENERIC_ATOMIC64 16 select GENERIC_ATOMIC64
17 select GENERIC_CLOCKEVENTS 17 select GENERIC_CLOCKEVENTS
18 select GENERIC_IRQ_SHOW 18 select GENERIC_IRQ_SHOW
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index ad2868263867..880a292d792f 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -4,16 +4,7 @@
4 4
5static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 5static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
6{ 6{
7 /*
8 * Use the non-coherent ops if available. If an architecture wants a
9 * more fine-grained selection of operations it will have to implement
10 * get_arch_dma_ops itself or use the per-device dma_ops.
11 */
12#ifdef CONFIG_DMA_NONCOHERENT_OPS
13 return &dma_noncoherent_ops;
14#else
15 return &dma_direct_ops; 7 return &dma_direct_ops;
16#endif
17} 8}
18 9
19#endif /* _ASM_GENERIC_DMA_MAPPING_H */ 10#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 8d9f33febde5..86a59ba5a7f3 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -59,6 +59,10 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
59 gfp_t gfp, unsigned long attrs); 59 gfp_t gfp, unsigned long attrs);
60void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, 60void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
61 dma_addr_t dma_addr, unsigned long attrs); 61 dma_addr_t dma_addr, unsigned long attrs);
62void *dma_direct_alloc_pages(struct device *dev, size_t size,
63 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
64void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
65 dma_addr_t dma_addr, unsigned long attrs);
62dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, 66dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
63 unsigned long offset, size_t size, enum dma_data_direction dir, 67 unsigned long offset, size_t size, enum dma_data_direction dir,
64 unsigned long attrs); 68 unsigned long attrs);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index eafd6f318e78..8f2001181cd1 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -136,7 +136,6 @@ struct dma_map_ops {
136}; 136};
137 137
138extern const struct dma_map_ops dma_direct_ops; 138extern const struct dma_map_ops dma_direct_ops;
139extern const struct dma_map_ops dma_noncoherent_ops;
140extern const struct dma_map_ops dma_virt_ops; 139extern const struct dma_map_ops dma_virt_ops;
141 140
142#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 141#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index ce9732506ef4..3f503025a0cd 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -24,14 +24,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
24 gfp_t gfp, unsigned long attrs); 24 gfp_t gfp, unsigned long attrs);
25void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 25void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
26 dma_addr_t dma_addr, unsigned long attrs); 26 dma_addr_t dma_addr, unsigned long attrs);
27
28#ifdef CONFIG_DMA_NONCOHERENT_MMAP
29int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, 27int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
30 void *cpu_addr, dma_addr_t dma_addr, size_t size, 28 void *cpu_addr, dma_addr_t dma_addr, size_t size,
31 unsigned long attrs); 29 unsigned long attrs);
32#else
33#define arch_dma_mmap NULL
34#endif /* CONFIG_DMA_NONCOHERENT_MMAP */
35 30
36#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC 31#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
37void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, 32void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 79476749f196..5617c9a76208 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -33,18 +33,13 @@ config DMA_DIRECT_OPS
33 bool 33 bool
34 depends on HAS_DMA 34 depends on HAS_DMA
35 35
36config DMA_NONCOHERENT_OPS
37 bool
38 depends on HAS_DMA
39 select DMA_DIRECT_OPS
40
41config DMA_NONCOHERENT_MMAP 36config DMA_NONCOHERENT_MMAP
42 bool 37 bool
43 depends on DMA_NONCOHERENT_OPS 38 depends on DMA_DIRECT_OPS
44 39
45config DMA_NONCOHERENT_CACHE_SYNC 40config DMA_NONCOHERENT_CACHE_SYNC
46 bool 41 bool
47 depends on DMA_NONCOHERENT_OPS 42 depends on DMA_DIRECT_OPS
48 43
49config DMA_VIRT_OPS 44config DMA_VIRT_OPS
50 bool 45 bool
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
index 6de44e4eb454..7d581e4eea4a 100644
--- a/kernel/dma/Makefile
+++ b/kernel/dma/Makefile
@@ -4,7 +4,6 @@ obj-$(CONFIG_HAS_DMA) += mapping.o
4obj-$(CONFIG_DMA_CMA) += contiguous.o 4obj-$(CONFIG_DMA_CMA) += contiguous.o
5obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o 5obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o
6obj-$(CONFIG_DMA_DIRECT_OPS) += direct.o 6obj-$(CONFIG_DMA_DIRECT_OPS) += direct.o
7obj-$(CONFIG_DMA_NONCOHERENT_OPS) += noncoherent.o
8obj-$(CONFIG_DMA_VIRT_OPS) += virt.o 7obj-$(CONFIG_DMA_VIRT_OPS) += virt.o
9obj-$(CONFIG_DMA_API_DEBUG) += debug.o 8obj-$(CONFIG_DMA_API_DEBUG) += debug.o
10obj-$(CONFIG_SWIOTLB) += swiotlb.o 9obj-$(CONFIG_SWIOTLB) += swiotlb.o
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index de87b0282e74..09e85f6aa4ba 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -1,13 +1,15 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * DMA operations that map physical memory directly without using an IOMMU or 3 * Copyright (C) 2018 Christoph Hellwig.
4 * flushing caches. 4 *
5 * DMA operations that map physical memory directly without using an IOMMU.
5 */ 6 */
6#include <linux/export.h> 7#include <linux/export.h>
7#include <linux/mm.h> 8#include <linux/mm.h>
8#include <linux/dma-direct.h> 9#include <linux/dma-direct.h>
9#include <linux/scatterlist.h> 10#include <linux/scatterlist.h>
10#include <linux/dma-contiguous.h> 11#include <linux/dma-contiguous.h>
12#include <linux/dma-noncoherent.h>
11#include <linux/pfn.h> 13#include <linux/pfn.h>
12#include <linux/set_memory.h> 14#include <linux/set_memory.h>
13 15
@@ -58,8 +60,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
58 return addr + size - 1 <= dev->coherent_dma_mask; 60 return addr + size - 1 <= dev->coherent_dma_mask;
59} 61}
60 62
61void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 63void *dma_direct_alloc_pages(struct device *dev, size_t size,
62 gfp_t gfp, unsigned long attrs) 64 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
63{ 65{
64 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 66 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
65 int page_order = get_order(size); 67 int page_order = get_order(size);
@@ -124,7 +126,7 @@ again:
124 * NOTE: this function must never look at the dma_addr argument, because we want 126 * NOTE: this function must never look at the dma_addr argument, because we want
125 * to be able to use it as a helper for iommu implementations as well. 127 * to be able to use it as a helper for iommu implementations as well.
126 */ 128 */
127void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, 129void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
128 dma_addr_t dma_addr, unsigned long attrs) 130 dma_addr_t dma_addr, unsigned long attrs)
129{ 131{
130 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 132 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -136,14 +138,106 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
136 free_pages((unsigned long)cpu_addr, page_order); 138 free_pages((unsigned long)cpu_addr, page_order);
137} 139}
138 140
141void *dma_direct_alloc(struct device *dev, size_t size,
142 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
143{
144 if (!dev_is_dma_coherent(dev))
145 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
146 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
147}
148
149void dma_direct_free(struct device *dev, size_t size,
150 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
151{
152 if (!dev_is_dma_coherent(dev))
153 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
154 else
155 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
156}
157
158static int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
159 void *cpu_addr, dma_addr_t dma_addr, size_t size,
160 unsigned long attrs)
161{
162 if (!dev_is_dma_coherent(dev) &&
163 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP))
164 return arch_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
165 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
166}
167
168static void dma_direct_sync_single_for_device(struct device *dev,
169 dma_addr_t addr, size_t size, enum dma_data_direction dir)
170{
171 if (dev_is_dma_coherent(dev))
172 return;
173 arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
174}
175
176static void dma_direct_sync_sg_for_device(struct device *dev,
177 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
178{
179 struct scatterlist *sg;
180 int i;
181
182 if (dev_is_dma_coherent(dev))
183 return;
184
185 for_each_sg(sgl, sg, nents, i)
186 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
187}
188
189#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
190 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
191static void dma_direct_sync_single_for_cpu(struct device *dev,
192 dma_addr_t addr, size_t size, enum dma_data_direction dir)
193{
194 if (dev_is_dma_coherent(dev))
195 return;
196 arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
197 arch_sync_dma_for_cpu_all(dev);
198}
199
200static void dma_direct_sync_sg_for_cpu(struct device *dev,
201 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
202{
203 struct scatterlist *sg;
204 int i;
205
206 if (dev_is_dma_coherent(dev))
207 return;
208
209 for_each_sg(sgl, sg, nents, i)
210 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
211 arch_sync_dma_for_cpu_all(dev);
212}
213
214static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
215 size_t size, enum dma_data_direction dir, unsigned long attrs)
216{
217 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
218 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
219}
220
221static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
222 int nents, enum dma_data_direction dir, unsigned long attrs)
223{
224 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
225 dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
226}
227#endif
228
139dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, 229dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
140 unsigned long offset, size_t size, enum dma_data_direction dir, 230 unsigned long offset, size_t size, enum dma_data_direction dir,
141 unsigned long attrs) 231 unsigned long attrs)
142{ 232{
143 dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset; 233 phys_addr_t phys = page_to_phys(page) + offset;
234 dma_addr_t dma_addr = phys_to_dma(dev, phys);
144 235
145 if (!check_addr(dev, dma_addr, size, __func__)) 236 if (!check_addr(dev, dma_addr, size, __func__))
146 return DIRECT_MAPPING_ERROR; 237 return DIRECT_MAPPING_ERROR;
238
239 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
240 dma_direct_sync_single_for_device(dev, dma_addr, size, dir);
147 return dma_addr; 241 return dma_addr;
148} 242}
149 243
@@ -162,6 +256,8 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
162 sg_dma_len(sg) = sg->length; 256 sg_dma_len(sg) = sg->length;
163 } 257 }
164 258
259 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
260 dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
165 return nents; 261 return nents;
166} 262}
167 263
@@ -197,9 +293,22 @@ int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
197const struct dma_map_ops dma_direct_ops = { 293const struct dma_map_ops dma_direct_ops = {
198 .alloc = dma_direct_alloc, 294 .alloc = dma_direct_alloc,
199 .free = dma_direct_free, 295 .free = dma_direct_free,
296 .mmap = dma_direct_mmap,
200 .map_page = dma_direct_map_page, 297 .map_page = dma_direct_map_page,
201 .map_sg = dma_direct_map_sg, 298 .map_sg = dma_direct_map_sg,
299#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
300 .sync_single_for_device = dma_direct_sync_single_for_device,
301 .sync_sg_for_device = dma_direct_sync_sg_for_device,
302#endif
303#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
304 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
305 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
306 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
307 .unmap_page = dma_direct_unmap_page,
308 .unmap_sg = dma_direct_unmap_sg,
309#endif
202 .dma_supported = dma_direct_supported, 310 .dma_supported = dma_direct_supported,
203 .mapping_error = dma_direct_mapping_error, 311 .mapping_error = dma_direct_mapping_error,
312 .cache_sync = arch_dma_cache_sync,
204}; 313};
205EXPORT_SYMBOL(dma_direct_ops); 314EXPORT_SYMBOL(dma_direct_ops);
diff --git a/kernel/dma/noncoherent.c b/kernel/dma/noncoherent.c
deleted file mode 100644
index 031fe235d958..000000000000
--- a/kernel/dma/noncoherent.c
+++ /dev/null
@@ -1,106 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Christoph Hellwig.
4 *
5 * DMA operations that map physical memory directly without providing cache
6 * coherence.
7 */
8#include <linux/export.h>
9#include <linux/mm.h>
10#include <linux/dma-direct.h>
11#include <linux/dma-noncoherent.h>
12#include <linux/scatterlist.h>
13
14static void dma_noncoherent_sync_single_for_device(struct device *dev,
15 dma_addr_t addr, size_t size, enum dma_data_direction dir)
16{
17 arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
18}
19
20static void dma_noncoherent_sync_sg_for_device(struct device *dev,
21 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
22{
23 struct scatterlist *sg;
24 int i;
25
26 for_each_sg(sgl, sg, nents, i)
27 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
28}
29
30static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page,
31 unsigned long offset, size_t size, enum dma_data_direction dir,
32 unsigned long attrs)
33{
34 dma_addr_t addr;
35
36 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
37 if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
38 arch_sync_dma_for_device(dev, page_to_phys(page) + offset,
39 size, dir);
40 return addr;
41}
42
43static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
44 int nents, enum dma_data_direction dir, unsigned long attrs)
45{
46 nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs);
47 if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
48 dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir);
49 return nents;
50}
51
52#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
53 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
54static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
55 dma_addr_t addr, size_t size, enum dma_data_direction dir)
56{
57 arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
58 arch_sync_dma_for_cpu_all(dev);
59}
60
61static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
62 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
63{
64 struct scatterlist *sg;
65 int i;
66
67 for_each_sg(sgl, sg, nents, i)
68 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
69 arch_sync_dma_for_cpu_all(dev);
70}
71
72static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
73 size_t size, enum dma_data_direction dir, unsigned long attrs)
74{
75 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
76 dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir);
77}
78
79static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl,
80 int nents, enum dma_data_direction dir, unsigned long attrs)
81{
82 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
83 dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir);
84}
85#endif
86
87const struct dma_map_ops dma_noncoherent_ops = {
88 .alloc = arch_dma_alloc,
89 .free = arch_dma_free,
90 .mmap = arch_dma_mmap,
91 .sync_single_for_device = dma_noncoherent_sync_single_for_device,
92 .sync_sg_for_device = dma_noncoherent_sync_sg_for_device,
93 .map_page = dma_noncoherent_map_page,
94 .map_sg = dma_noncoherent_map_sg,
95#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
96 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
97 .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu,
98 .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu,
99 .unmap_page = dma_noncoherent_unmap_page,
100 .unmap_sg = dma_noncoherent_unmap_sg,
101#endif
102 .dma_supported = dma_direct_supported,
103 .mapping_error = dma_direct_mapping_error,
104 .cache_sync = arch_dma_cache_sync,
105};
106EXPORT_SYMBOL(dma_noncoherent_ops);