aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-03-19 06:38:24 -0400
committerIngo Molnar <mingo@kernel.org>2018-03-20 05:01:59 -0400
commitb6e05477c10c12e36141558fc14f04b00ea634d4 (patch)
tree10fa56168c0fdeb896a6c845fadac8bbd112f554
parente7de6c7cc207be78369d45fb833d7d53aeda47f8 (diff)
dma/direct: Handle the memory encryption bit in common code
Give the basic phys_to_dma() and dma_to_phys() helpers a __-prefix and add the memory encryption mask to the non-prefixed versions. Use the __-prefixed versions directly instead of clearing the mask again in various places. Tested-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jon Mason <jdmason@kudzu.us> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Muli Ben-Yehuda <mulix@mulix.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-13-hch@lst.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/arm/include/asm/dma-direct.h4
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c10
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h4
-rw-r--r--arch/mips/include/asm/mach-loongson64/dma-coherence.h10
-rw-r--r--arch/mips/loongson64/common/dma-swiotlb.c4
-rw-r--r--arch/powerpc/include/asm/dma-direct.h4
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/dma-direct.h25
-rw-r--r--arch/x86/mm/mem_encrypt.c2
-rw-r--r--arch/x86/pci/sta2x11-fixup.c6
-rw-r--r--include/linux/dma-direct.h21
-rw-r--r--lib/swiotlb.c25
12 files changed, 53 insertions, 64 deletions
diff --git a/arch/arm/include/asm/dma-direct.h b/arch/arm/include/asm/dma-direct.h
index 5b0a8a421894..b67e5fc1fe43 100644
--- a/arch/arm/include/asm/dma-direct.h
+++ b/arch/arm/include/asm/dma-direct.h
@@ -2,13 +2,13 @@
2#ifndef ASM_ARM_DMA_DIRECT_H 2#ifndef ASM_ARM_DMA_DIRECT_H
3#define ASM_ARM_DMA_DIRECT_H 1 3#define ASM_ARM_DMA_DIRECT_H 1
4 4
5static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 5static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
6{ 6{
7 unsigned int offset = paddr & ~PAGE_MASK; 7 unsigned int offset = paddr & ~PAGE_MASK;
8 return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; 8 return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
9} 9}
10 10
11static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) 11static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
12{ 12{
13 unsigned int offset = dev_addr & ~PAGE_MASK; 13 unsigned int offset = dev_addr & ~PAGE_MASK;
14 return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; 14 return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index c7bb8a407041..7b335ab21697 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -10,7 +10,7 @@
10 * IP32 changes by Ilya. 10 * IP32 changes by Ilya.
11 * Copyright (C) 2010 Cavium Networks, Inc. 11 * Copyright (C) 2010 Cavium Networks, Inc.
12 */ 12 */
13#include <linux/dma-mapping.h> 13#include <linux/dma-direct.h>
14#include <linux/scatterlist.h> 14#include <linux/scatterlist.h>
15#include <linux/bootmem.h> 15#include <linux/bootmem.h>
16#include <linux/export.h> 16#include <linux/export.h>
@@ -182,7 +182,7 @@ struct octeon_dma_map_ops {
182 phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr); 182 phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
183}; 183};
184 184
185dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 185dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
186{ 186{
187 struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev), 187 struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
188 struct octeon_dma_map_ops, 188 struct octeon_dma_map_ops,
@@ -190,9 +190,9 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
190 190
191 return ops->phys_to_dma(dev, paddr); 191 return ops->phys_to_dma(dev, paddr);
192} 192}
193EXPORT_SYMBOL(phys_to_dma); 193EXPORT_SYMBOL(__phys_to_dma);
194 194
195phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 195phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
196{ 196{
197 struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev), 197 struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
198 struct octeon_dma_map_ops, 198 struct octeon_dma_map_ops,
@@ -200,7 +200,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
200 200
201 return ops->dma_to_phys(dev, daddr); 201 return ops->dma_to_phys(dev, daddr);
202} 202}
203EXPORT_SYMBOL(dma_to_phys); 203EXPORT_SYMBOL(__dma_to_phys);
204 204
205static struct octeon_dma_map_ops octeon_linear_dma_map_ops = { 205static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
206 .dma_map_ops = { 206 .dma_map_ops = {
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
index 138edf6b5b48..6eb1ee548b11 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
@@ -69,8 +69,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
69 return addr + size - 1 <= *dev->dma_mask; 69 return addr + size - 1 <= *dev->dma_mask;
70} 70}
71 71
72dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 72dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
73phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 73phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
74 74
75struct dma_map_ops; 75struct dma_map_ops;
76extern const struct dma_map_ops *octeon_pci_dma_map_ops; 76extern const struct dma_map_ops *octeon_pci_dma_map_ops;
diff --git a/arch/mips/include/asm/mach-loongson64/dma-coherence.h b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
index b1b575f5c6c1..64fc44dec0a8 100644
--- a/arch/mips/include/asm/mach-loongson64/dma-coherence.h
+++ b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
@@ -25,13 +25,13 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
25 return addr + size - 1 <= *dev->dma_mask; 25 return addr + size - 1 <= *dev->dma_mask;
26} 26}
27 27
28extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 28extern dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
29extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 29extern phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
30static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, 30static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
31 size_t size) 31 size_t size)
32{ 32{
33#ifdef CONFIG_CPU_LOONGSON3 33#ifdef CONFIG_CPU_LOONGSON3
34 return phys_to_dma(dev, virt_to_phys(addr)); 34 return __phys_to_dma(dev, virt_to_phys(addr));
35#else 35#else
36 return virt_to_phys(addr) | 0x80000000; 36 return virt_to_phys(addr) | 0x80000000;
37#endif 37#endif
@@ -41,7 +41,7 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
41 struct page *page) 41 struct page *page)
42{ 42{
43#ifdef CONFIG_CPU_LOONGSON3 43#ifdef CONFIG_CPU_LOONGSON3
44 return phys_to_dma(dev, page_to_phys(page)); 44 return __phys_to_dma(dev, page_to_phys(page));
45#else 45#else
46 return page_to_phys(page) | 0x80000000; 46 return page_to_phys(page) | 0x80000000;
47#endif 47#endif
@@ -51,7 +51,7 @@ static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
51 dma_addr_t dma_addr) 51 dma_addr_t dma_addr)
52{ 52{
53#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT) 53#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
54 return dma_to_phys(dev, dma_addr); 54 return __dma_to_phys(dev, dma_addr);
55#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT) 55#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
56 return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff); 56 return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
57#else 57#else
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
index 7bbcf89475f3..6a739f8ae110 100644
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ b/arch/mips/loongson64/common/dma-swiotlb.c
@@ -63,7 +63,7 @@ static int loongson_dma_supported(struct device *dev, u64 mask)
63 return swiotlb_dma_supported(dev, mask); 63 return swiotlb_dma_supported(dev, mask);
64} 64}
65 65
66dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 66dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
67{ 67{
68 long nid; 68 long nid;
69#ifdef CONFIG_PHYS48_TO_HT40 69#ifdef CONFIG_PHYS48_TO_HT40
@@ -75,7 +75,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
75 return paddr; 75 return paddr;
76} 76}
77 77
78phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 78phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
79{ 79{
80 long nid; 80 long nid;
81#ifdef CONFIG_PHYS48_TO_HT40 81#ifdef CONFIG_PHYS48_TO_HT40
diff --git a/arch/powerpc/include/asm/dma-direct.h b/arch/powerpc/include/asm/dma-direct.h
index a5b59c765426..7702875aabb7 100644
--- a/arch/powerpc/include/asm/dma-direct.h
+++ b/arch/powerpc/include/asm/dma-direct.h
@@ -17,12 +17,12 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
17 return addr + size - 1 <= *dev->dma_mask; 17 return addr + size - 1 <= *dev->dma_mask;
18} 18}
19 19
20static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 20static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
21{ 21{
22 return paddr + get_dma_offset(dev); 22 return paddr + get_dma_offset(dev);
23} 23}
24 24
25static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 25static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
26{ 26{
27 return daddr - get_dma_offset(dev); 27 return daddr - get_dma_offset(dev);
28} 28}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7dc347217d3a..5b4899de076f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -54,7 +54,6 @@ config X86
54 select ARCH_HAS_FORTIFY_SOURCE 54 select ARCH_HAS_FORTIFY_SOURCE
55 select ARCH_HAS_GCOV_PROFILE_ALL 55 select ARCH_HAS_GCOV_PROFILE_ALL
56 select ARCH_HAS_KCOV if X86_64 56 select ARCH_HAS_KCOV if X86_64
57 select ARCH_HAS_PHYS_TO_DMA
58 select ARCH_HAS_MEMBARRIER_SYNC_CORE 57 select ARCH_HAS_MEMBARRIER_SYNC_CORE
59 select ARCH_HAS_PMEM_API if X86_64 58 select ARCH_HAS_PMEM_API if X86_64
60 select ARCH_HAS_REFCOUNT 59 select ARCH_HAS_REFCOUNT
@@ -692,6 +691,7 @@ config X86_SUPPORTS_MEMORY_FAILURE
692config STA2X11 691config STA2X11
693 bool "STA2X11 Companion Chip Support" 692 bool "STA2X11 Companion Chip Support"
694 depends on X86_32_NON_STANDARD && PCI 693 depends on X86_32_NON_STANDARD && PCI
694 select ARCH_HAS_PHYS_TO_DMA
695 select X86_DEV_DMA_OPS 695 select X86_DEV_DMA_OPS
696 select X86_DMA_REMAP 696 select X86_DMA_REMAP
697 select SWIOTLB 697 select SWIOTLB
diff --git a/arch/x86/include/asm/dma-direct.h b/arch/x86/include/asm/dma-direct.h
index 1295bc622ebe..1a19251eaac9 100644
--- a/arch/x86/include/asm/dma-direct.h
+++ b/arch/x86/include/asm/dma-direct.h
@@ -2,29 +2,8 @@
2#ifndef ASM_X86_DMA_DIRECT_H 2#ifndef ASM_X86_DMA_DIRECT_H
3#define ASM_X86_DMA_DIRECT_H 1 3#define ASM_X86_DMA_DIRECT_H 1
4 4
5#include <linux/mem_encrypt.h>
6
7#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
8bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); 5bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
9dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 6dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
10phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 7phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
11#else
12static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
13{
14 if (!dev->dma_mask)
15 return 0;
16
17 return addr + size - 1 <= *dev->dma_mask;
18}
19
20static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
21{
22 return __sme_set(paddr);
23}
24 8
25static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
26{
27 return __sme_clr(daddr);
28}
29#endif /* CONFIG_X86_DMA_REMAP */
30#endif /* ASM_X86_DMA_DIRECT_H */ 9#endif /* ASM_X86_DMA_DIRECT_H */
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index d243e8d80d89..1b396422d26f 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -211,7 +211,7 @@ static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
211 * Since we will be clearing the encryption bit, check the 211 * Since we will be clearing the encryption bit, check the
212 * mask with it already cleared. 212 * mask with it already cleared.
213 */ 213 */
214 addr = __sme_clr(phys_to_dma(dev, page_to_phys(page))); 214 addr = __phys_to_dma(dev, page_to_phys(page));
215 if ((addr + size) > dev->coherent_dma_mask) { 215 if ((addr + size) > dev->coherent_dma_mask) {
216 __free_pages(page, get_order(size)); 216 __free_pages(page, get_order(size));
217 } else { 217 } else {
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index eac58e03f43c..7a5bafb76d77 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -207,11 +207,11 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
207} 207}
208 208
209/** 209/**
210 * phys_to_dma - Return the DMA AMBA address used for this STA2x11 device 210 * __phys_to_dma - Return the DMA AMBA address used for this STA2x11 device
211 * @dev: device for a PCI device 211 * @dev: device for a PCI device
212 * @paddr: Physical address 212 * @paddr: Physical address
213 */ 213 */
214dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 214dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
215{ 215{
216 if (!dev->archdata.is_sta2x11) 216 if (!dev->archdata.is_sta2x11)
217 return paddr; 217 return paddr;
@@ -223,7 +223,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
223 * @dev: device for a PCI device 223 * @dev: device for a PCI device
224 * @daddr: STA2x11 AMBA DMA address 224 * @daddr: STA2x11 AMBA DMA address
225 */ 225 */
226phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 226phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
227{ 227{
228 if (!dev->archdata.is_sta2x11) 228 if (!dev->archdata.is_sta2x11)
229 return daddr; 229 return daddr;
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index bcdb1a3e4b1f..53ad6a47f513 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -3,18 +3,19 @@
3#define _LINUX_DMA_DIRECT_H 1 3#define _LINUX_DMA_DIRECT_H 1
4 4
5#include <linux/dma-mapping.h> 5#include <linux/dma-mapping.h>
6#include <linux/mem_encrypt.h>
6 7
7#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA 8#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
8#include <asm/dma-direct.h> 9#include <asm/dma-direct.h>
9#else 10#else
10static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 11static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
11{ 12{
12 dma_addr_t dev_addr = (dma_addr_t)paddr; 13 dma_addr_t dev_addr = (dma_addr_t)paddr;
13 14
14 return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); 15 return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
15} 16}
16 17
17static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) 18static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
18{ 19{
19 phys_addr_t paddr = (phys_addr_t)dev_addr; 20 phys_addr_t paddr = (phys_addr_t)dev_addr;
20 21
@@ -30,6 +31,22 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
30} 31}
31#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ 32#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
32 33
34/*
35 * If memory encryption is supported, phys_to_dma will set the memory encryption
36 * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
37 * and __dma_to_phys versions should only be used on non-encrypted memory for
38 * special occasions like DMA coherent buffers.
39 */
40static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
41{
42 return __sme_set(__phys_to_dma(dev, paddr));
43}
44
45static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
46{
47 return __sme_clr(__dma_to_phys(dev, daddr));
48}
49
33#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN 50#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
34void dma_mark_clean(void *addr, size_t size); 51void dma_mark_clean(void *addr, size_t size);
35#else 52#else
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 005d1d87bb2e..8b06b4485e65 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -157,13 +157,6 @@ unsigned long swiotlb_size_or_default(void)
157 return size ? size : (IO_TLB_DEFAULT_SIZE); 157 return size ? size : (IO_TLB_DEFAULT_SIZE);
158} 158}
159 159
160/* For swiotlb, clear memory encryption mask from dma addresses */
161static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
162 phys_addr_t address)
163{
164 return __sme_clr(phys_to_dma(hwdev, address));
165}
166
167/* Note that this doesn't work with highmem page */ 160/* Note that this doesn't work with highmem page */
168static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 161static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
169 volatile void *address) 162 volatile void *address)
@@ -622,7 +615,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
622 return SWIOTLB_MAP_ERROR; 615 return SWIOTLB_MAP_ERROR;
623 } 616 }
624 617
625 start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start); 618 start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
626 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, 619 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
627 dir, attrs); 620 dir, attrs);
628} 621}
@@ -726,12 +719,12 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
726 goto out_warn; 719 goto out_warn;
727 720
728 phys_addr = swiotlb_tbl_map_single(dev, 721 phys_addr = swiotlb_tbl_map_single(dev,
729 swiotlb_phys_to_dma(dev, io_tlb_start), 722 __phys_to_dma(dev, io_tlb_start),
730 0, size, DMA_FROM_DEVICE, 0); 723 0, size, DMA_FROM_DEVICE, 0);
731 if (phys_addr == SWIOTLB_MAP_ERROR) 724 if (phys_addr == SWIOTLB_MAP_ERROR)
732 goto out_warn; 725 goto out_warn;
733 726
734 *dma_handle = swiotlb_phys_to_dma(dev, phys_addr); 727 *dma_handle = __phys_to_dma(dev, phys_addr);
735 if (dma_coherent_ok(dev, *dma_handle, size)) 728 if (dma_coherent_ok(dev, *dma_handle, size))
736 goto out_unmap; 729 goto out_unmap;
737 730
@@ -867,10 +860,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
867 map = map_single(dev, phys, size, dir, attrs); 860 map = map_single(dev, phys, size, dir, attrs);
868 if (map == SWIOTLB_MAP_ERROR) { 861 if (map == SWIOTLB_MAP_ERROR) {
869 swiotlb_full(dev, size, dir, 1); 862 swiotlb_full(dev, size, dir, 1);
870 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); 863 return __phys_to_dma(dev, io_tlb_overflow_buffer);
871 } 864 }
872 865
873 dev_addr = swiotlb_phys_to_dma(dev, map); 866 dev_addr = __phys_to_dma(dev, map);
874 867
875 /* Ensure that the address returned is DMA'ble */ 868 /* Ensure that the address returned is DMA'ble */
876 if (dma_capable(dev, dev_addr, size)) 869 if (dma_capable(dev, dev_addr, size))
@@ -879,7 +872,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
879 attrs |= DMA_ATTR_SKIP_CPU_SYNC; 872 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
880 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); 873 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
881 874
882 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); 875 return __phys_to_dma(dev, io_tlb_overflow_buffer);
883} 876}
884 877
885/* 878/*
@@ -1009,7 +1002,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
1009 sg_dma_len(sgl) = 0; 1002 sg_dma_len(sgl) = 0;
1010 return 0; 1003 return 0;
1011 } 1004 }
1012 sg->dma_address = swiotlb_phys_to_dma(hwdev, map); 1005 sg->dma_address = __phys_to_dma(hwdev, map);
1013 } else 1006 } else
1014 sg->dma_address = dev_addr; 1007 sg->dma_address = dev_addr;
1015 sg_dma_len(sg) = sg->length; 1008 sg_dma_len(sg) = sg->length;
@@ -1073,7 +1066,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
1073int 1066int
1074swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 1067swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1075{ 1068{
1076 return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer)); 1069 return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
1077} 1070}
1078 1071
1079/* 1072/*
@@ -1085,7 +1078,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1085int 1078int
1086swiotlb_dma_supported(struct device *hwdev, u64 mask) 1079swiotlb_dma_supported(struct device *hwdev, u64 mask)
1087{ 1080{
1088 return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask; 1081 return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
1089} 1082}
1090 1083
1091#ifdef CONFIG_DMA_DIRECT_OPS 1084#ifdef CONFIG_DMA_DIRECT_OPS