summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DMA-API-HOWTO.txt10
-rw-r--r--Documentation/features/io/dma_map_attrs/arch-support.txt40
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/dma-mapping.h2
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/include/asm/dma-mapping.h2
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/dma-mapping.h7
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/dma-mapping.h2
-rw-r--r--arch/avr32/Kconfig1
-rw-r--r--arch/avr32/include/asm/dma-mapping.h2
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/blackfin/include/asm/dma-mapping.h2
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/dma-mapping.h2
-rw-r--r--arch/cris/Kconfig1
-rw-r--r--arch/cris/include/asm/dma-mapping.h2
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/frv/include/asm/dma-mapping.h2
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/include/asm/dma-mapping.h2
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/include/asm/dma-mapping.h2
-rw-r--r--arch/metag/Kconfig1
-rw-r--r--arch/metag/include/asm/dma-mapping.h2
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/dma-mapping.h2
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/mn10300/include/asm/dma-mapping.h2
-rw-r--r--arch/nios2/Kconfig1
-rw-r--r--arch/openrisc/Kconfig3
-rw-r--r--arch/openrisc/include/asm/dma-mapping.h2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/dma-mapping.h2
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/dma-mapping.h2
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/include/asm/dma-mapping.h2
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/dma-mapping.h2
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/tile/include/asm/dma-mapping.h3
-rw-r--r--arch/unicore32/Kconfig1
-rw-r--r--arch/unicore32/include/asm/dma-mapping.h2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/dma-mapping.h2
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h2
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/imx/Kconfig2
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig2
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig2
-rw-r--r--drivers/gpu/drm/sti/Kconfig2
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig2
-rw-r--r--drivers/media/platform/Kconfig1
-rw-r--r--include/asm-generic/dma-mapping-broken.h95
-rw-r--r--include/asm-generic/dma-mapping-common.h358
-rw-r--r--include/linux/dma-attrs.h10
-rw-r--r--include/linux/dma-mapping.h379
70 files changed, 369 insertions, 633 deletions
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index d69b3fc64e14..781024ef9050 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -951,16 +951,6 @@ to "Closing".
951 alignment constraints (e.g. the alignment constraints about 64-bit 951 alignment constraints (e.g. the alignment constraints about 64-bit
952 objects). 952 objects).
953 953
9543) Supporting multiple types of IOMMUs
955
956 If your architecture needs to support multiple types of IOMMUs, you
957 can use include/linux/asm-generic/dma-mapping-common.h. It's a
958 library to support the DMA API with multiple types of IOMMUs. Lots
959 of architectures (x86, powerpc, sh, alpha, ia64, microblaze and
960 sparc) use it. Choose one to see how it can be used. If you need to
961 support multiple types of IOMMUs in a single system, the example of
962 x86 or powerpc helps.
963
964 Closing 954 Closing
965 955
966This document, and the API itself, would not be in its current 956This document, and the API itself, would not be in its current
diff --git a/Documentation/features/io/dma_map_attrs/arch-support.txt b/Documentation/features/io/dma_map_attrs/arch-support.txt
deleted file mode 100644
index 51d0f1c02a3e..000000000000
--- a/Documentation/features/io/dma_map_attrs/arch-support.txt
+++ /dev/null
@@ -1,40 +0,0 @@
1#
2# Feature name: dma_map_attrs
3# Kconfig: HAVE_DMA_ATTRS
4# description: arch provides dma_*map*_attrs() APIs
5#
6 -----------------------
7 | arch |status|
8 -----------------------
9 | alpha: | ok |
10 | arc: | TODO |
11 | arm: | ok |
12 | arm64: | ok |
13 | avr32: | TODO |
14 | blackfin: | TODO |
15 | c6x: | TODO |
16 | cris: | TODO |
17 | frv: | TODO |
18 | h8300: | ok |
19 | hexagon: | ok |
20 | ia64: | ok |
21 | m32r: | TODO |
22 | m68k: | TODO |
23 | metag: | TODO |
24 | microblaze: | ok |
25 | mips: | ok |
26 | mn10300: | TODO |
27 | nios2: | TODO |
28 | openrisc: | ok |
29 | parisc: | TODO |
30 | powerpc: | ok |
31 | s390: | ok |
32 | score: | TODO |
33 | sh: | ok |
34 | sparc: | ok |
35 | tile: | ok |
36 | um: | TODO |
37 | unicore32: | ok |
38 | x86: | ok |
39 | xtensa: | TODO |
40 -----------------------
diff --git a/arch/Kconfig b/arch/Kconfig
index 51c03efb4083..f6b649d88ec8 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -205,9 +205,6 @@ config HAVE_NMI_WATCHDOG
205config HAVE_ARCH_TRACEHOOK 205config HAVE_ARCH_TRACEHOOK
206 bool 206 bool
207 207
208config HAVE_DMA_ATTRS
209 bool
210
211config HAVE_DMA_CONTIGUOUS 208config HAVE_DMA_CONTIGUOUS
212 bool 209 bool
213 210
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index f515a4dbf7a0..9d8a85801ed1 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -9,7 +9,6 @@ config ALPHA
9 select HAVE_OPROFILE 9 select HAVE_OPROFILE
10 select HAVE_PCSPKR_PLATFORM 10 select HAVE_PCSPKR_PLATFORM
11 select HAVE_PERF_EVENTS 11 select HAVE_PERF_EVENTS
12 select HAVE_DMA_ATTRS
13 select VIRT_TO_BUS 12 select VIRT_TO_BUS
14 select GENERIC_IRQ_PROBE 13 select GENERIC_IRQ_PROBE
15 select AUTO_IRQ_AFFINITY if SMP 14 select AUTO_IRQ_AFFINITY if SMP
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index 72a8ca7796d9..3c3451f58ff4 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -10,8 +10,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10 return dma_ops; 10 return dma_ops;
11} 11}
12 12
13#include <asm-generic/dma-mapping-common.h>
14
15#define dma_cache_sync(dev, va, size, dir) ((void)0) 13#define dma_cache_sync(dev, va, size, dir) ((void)0)
16 14
17#endif /* _ALPHA_DMA_MAPPING_H */ 15#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 8150c2783583..76dde9db7934 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -38,7 +38,6 @@ config ARC
38 select OF_EARLY_FLATTREE 38 select OF_EARLY_FLATTREE
39 select PERF_USE_VMALLOC 39 select PERF_USE_VMALLOC
40 select HAVE_DEBUG_STACKOVERFLOW 40 select HAVE_DEBUG_STACKOVERFLOW
41 select HAVE_DMA_ATTRS
42 41
43config TRACE_IRQFLAGS_SUPPORT 42config TRACE_IRQFLAGS_SUPPORT
44 def_bool y 43 def_bool y
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index 2a617f9c1e92..660205414f1d 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -18,6 +18,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
18 return &arc_dma_ops; 18 return &arc_dma_ops;
19} 19}
20 20
21#include <asm-generic/dma-mapping-common.h>
22
23#endif 21#endif
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 6a889afa6a2c..52311774e18e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -47,7 +47,6 @@ config ARM
47 select HAVE_C_RECORDMCOUNT 47 select HAVE_C_RECORDMCOUNT
48 select HAVE_DEBUG_KMEMLEAK 48 select HAVE_DEBUG_KMEMLEAK
49 select HAVE_DMA_API_DEBUG 49 select HAVE_DMA_API_DEBUG
50 select HAVE_DMA_ATTRS
51 select HAVE_DMA_CONTIGUOUS if MMU 50 select HAVE_DMA_CONTIGUOUS if MMU
52 select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU 51 select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU
53 select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU 52 select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index ccb3aa64640d..6ad1ceda62a5 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -41,13 +41,6 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
41#define HAVE_ARCH_DMA_SUPPORTED 1 41#define HAVE_ARCH_DMA_SUPPORTED 1
42extern int dma_supported(struct device *dev, u64 mask); 42extern int dma_supported(struct device *dev, u64 mask);
43 43
44/*
45 * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent
46 * implementations, we don't provide a dma_cache_sync function so drivers using
47 * this API are highlighted with build warnings.
48 */
49#include <asm-generic/dma-mapping-common.h>
50
51#ifdef __arch_page_to_dma 44#ifdef __arch_page_to_dma
52#error Please update to __arch_pfn_to_dma 45#error Please update to __arch_pfn_to_dma
53#endif 46#endif
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6be3fa2310ee..8cc62289a63e 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -64,7 +64,6 @@ config ARM64
64 select HAVE_DEBUG_BUGVERBOSE 64 select HAVE_DEBUG_BUGVERBOSE
65 select HAVE_DEBUG_KMEMLEAK 65 select HAVE_DEBUG_KMEMLEAK
66 select HAVE_DMA_API_DEBUG 66 select HAVE_DMA_API_DEBUG
67 select HAVE_DMA_ATTRS
68 select HAVE_DMA_CONTIGUOUS 67 select HAVE_DMA_CONTIGUOUS
69 select HAVE_DYNAMIC_FTRACE 68 select HAVE_DYNAMIC_FTRACE
70 select HAVE_EFFICIENT_UNALIGNED_ACCESS 69 select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 61e08f360e31..ba437f090a74 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -64,8 +64,6 @@ static inline bool is_device_dma_coherent(struct device *dev)
64 return dev->archdata.dma_coherent; 64 return dev->archdata.dma_coherent;
65} 65}
66 66
67#include <asm-generic/dma-mapping-common.h>
68
69static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 67static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
70{ 68{
71 return (dma_addr_t)paddr; 69 return (dma_addr_t)paddr;
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index aac3d6972c30..b6878eb64884 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -7,7 +7,6 @@ config AVR32
7 select HAVE_OPROFILE 7 select HAVE_OPROFILE
8 select HAVE_KPROBES 8 select HAVE_KPROBES
9 select VIRT_TO_BUS 9 select VIRT_TO_BUS
10 select HAVE_DMA_ATTRS
11 select GENERIC_IRQ_PROBE 10 select GENERIC_IRQ_PROBE
12 select GENERIC_ATOMIC64 11 select GENERIC_ATOMIC64
13 select HARDIRQS_SW_RESEND 12 select HARDIRQS_SW_RESEND
diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h
index 0239ca84eb41..1115f2a645d1 100644
--- a/arch/avr32/include/asm/dma-mapping.h
+++ b/arch/avr32/include/asm/dma-mapping.h
@@ -11,6 +11,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
11 return &avr32_dma_ops; 11 return &avr32_dma_ops;
12} 12}
13 13
14#include <asm-generic/dma-mapping-common.h>
15
16#endif /* __ASM_AVR32_DMA_MAPPING_H */ 14#endif /* __ASM_AVR32_DMA_MAPPING_H */
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 4be2f905198d..af76634f8d98 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -14,7 +14,6 @@ config BLACKFIN
14 def_bool y 14 def_bool y
15 select HAVE_ARCH_KGDB 15 select HAVE_ARCH_KGDB
16 select HAVE_ARCH_TRACEHOOK 16 select HAVE_ARCH_TRACEHOOK
17 select HAVE_DMA_ATTRS
18 select HAVE_DYNAMIC_FTRACE 17 select HAVE_DYNAMIC_FTRACE
19 select HAVE_FTRACE_MCOUNT_RECORD 18 select HAVE_FTRACE_MCOUNT_RECORD
20 select HAVE_FUNCTION_GRAPH_TRACER 19 select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index ea5a2e82db7c..3490570aaa82 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -43,6 +43,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
43 return &bfin_dma_ops; 43 return &bfin_dma_ops;
44} 44}
45 45
46#include <asm-generic/dma-mapping-common.h>
47
48#endif /* _BLACKFIN_DMA_MAPPING_H */ 46#endif /* _BLACKFIN_DMA_MAPPING_H */
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 8602f725e270..79049d432d3c 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -18,7 +18,6 @@ config C6X
18 select GENERIC_CLOCKEVENTS 18 select GENERIC_CLOCKEVENTS
19 select MODULES_USE_ELF_RELA 19 select MODULES_USE_ELF_RELA
20 select ARCH_NO_COHERENT_DMA_MMAP 20 select ARCH_NO_COHERENT_DMA_MMAP
21 select HAVE_DMA_ATTRS
22 21
23config MMU 22config MMU
24 def_bool n 23 def_bool n
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
index f881e425d442..6b5cd7b0cf32 100644
--- a/arch/c6x/include/asm/dma-mapping.h
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -24,8 +24,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
24 return &c6x_dma_ops; 24 return &c6x_dma_ops;
25} 25}
26 26
27#include <asm-generic/dma-mapping-common.h>
28
29extern void coherent_mem_init(u32 start, u32 size); 27extern void coherent_mem_init(u32 start, u32 size);
30void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 28void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
31 gfp_t gfp, struct dma_attrs *attrs); 29 gfp_t gfp, struct dma_attrs *attrs);
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 20d919c93c7f..e086f9e93728 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -54,7 +54,6 @@ config CRIS
54 select GENERIC_ATOMIC64 54 select GENERIC_ATOMIC64
55 select HAVE_UID16 55 select HAVE_UID16
56 select VIRT_TO_BUS 56 select VIRT_TO_BUS
57 select HAVE_DMA_ATTRS
58 select ARCH_WANT_IPC_PARSE_VERSION 57 select ARCH_WANT_IPC_PARSE_VERSION
59 select GENERIC_IRQ_SHOW 58 select GENERIC_IRQ_SHOW
60 select GENERIC_IOMAP 59 select GENERIC_IOMAP
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h
index 34e7c7c7eccb..5a370178a0e9 100644
--- a/arch/cris/include/asm/dma-mapping.h
+++ b/arch/cris/include/asm/dma-mapping.h
@@ -16,8 +16,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
16} 16}
17#endif 17#endif
18 18
19#include <asm-generic/dma-mapping-common.h>
20
21static inline void 19static inline void
22dma_cache_sync(struct device *dev, void *vaddr, size_t size, 20dma_cache_sync(struct device *dev, void *vaddr, size_t size,
23 enum dma_data_direction direction) 21 enum dma_data_direction direction)
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index e3837814f593..eefd9a4ed156 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -16,7 +16,6 @@ config FRV
16 select OLD_SIGACTION 16 select OLD_SIGACTION
17 select HAVE_DEBUG_STACKOVERFLOW 17 select HAVE_DEBUG_STACKOVERFLOW
18 select ARCH_NO_COHERENT_DMA_MMAP 18 select ARCH_NO_COHERENT_DMA_MMAP
19 select HAVE_DMA_ATTRS
20 19
21config ZONE_DMA 20config ZONE_DMA
22 bool 21 bool
diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h
index 750951cbba88..9a82bfa4303b 100644
--- a/arch/frv/include/asm/dma-mapping.h
+++ b/arch/frv/include/asm/dma-mapping.h
@@ -21,6 +21,4 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
21 flush_write_buffers(); 21 flush_write_buffers();
22} 22}
23 23
24#include <asm-generic/dma-mapping-common.h>
25
26#endif /* _ASM_DMA_MAPPING_H */ 24#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 2e20333cbce9..8c7c82586da0 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -15,7 +15,6 @@ config H8300
15 select OF_IRQ 15 select OF_IRQ
16 select OF_EARLY_FLATTREE 16 select OF_EARLY_FLATTREE
17 select HAVE_MEMBLOCK 17 select HAVE_MEMBLOCK
18 select HAVE_DMA_ATTRS
19 select CLKSRC_OF 18 select CLKSRC_OF
20 select H8300_TMR8 19 select H8300_TMR8
21 20
diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h
index d9b5b806afe6..7ac7fadffed0 100644
--- a/arch/h8300/include/asm/dma-mapping.h
+++ b/arch/h8300/include/asm/dma-mapping.h
@@ -8,6 +8,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8 return &h8300_dma_map_ops; 8 return &h8300_dma_map_ops;
9} 9}
10 10
11#include <asm-generic/dma-mapping-common.h>
12
13#endif 11#endif
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 4dc89d1f9c48..57298e7b4867 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -27,7 +27,6 @@ config HEXAGON
27 select GENERIC_CLOCKEVENTS_BROADCAST 27 select GENERIC_CLOCKEVENTS_BROADCAST
28 select MODULES_USE_ELF_RELA 28 select MODULES_USE_ELF_RELA
29 select GENERIC_CPU_DEVICES 29 select GENERIC_CPU_DEVICES
30 select HAVE_DMA_ATTRS
31 ---help--- 30 ---help---
32 Qualcomm Hexagon is a processor architecture designed for high 31 Qualcomm Hexagon is a processor architecture designed for high
33 performance and low power across a wide variety of applications. 32 performance and low power across a wide variety of applications.
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
index 268fde8a4575..aa6203464520 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -49,8 +49,6 @@ extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
49extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 49extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
50 enum dma_data_direction direction); 50 enum dma_data_direction direction);
51 51
52#include <asm-generic/dma-mapping-common.h>
53
54static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 52static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
55{ 53{
56 if (!dev->dma_mask) 54 if (!dev->dma_mask)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index eb0249e37981..fb0515eb639b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -25,7 +25,6 @@ config IA64
25 select HAVE_FTRACE_MCOUNT_RECORD 25 select HAVE_FTRACE_MCOUNT_RECORD
26 select HAVE_DYNAMIC_FTRACE if (!ITANIUM) 26 select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
27 select HAVE_FUNCTION_TRACER 27 select HAVE_FUNCTION_TRACER
28 select HAVE_DMA_ATTRS
29 select TTY 28 select TTY
30 select HAVE_ARCH_TRACEHOOK 29 select HAVE_ARCH_TRACEHOOK
31 select HAVE_DMA_API_DEBUG 30 select HAVE_DMA_API_DEBUG
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 9beccf8010bd..d472805edfa9 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -25,8 +25,6 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
25 25
26#define get_dma_ops(dev) platform_dma_get_ops(dev) 26#define get_dma_ops(dev) platform_dma_get_ops(dev)
27 27
28#include <asm-generic/dma-mapping-common.h>
29
30static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 28static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
31{ 29{
32 if (!dev->dma_mask) 30 if (!dev->dma_mask)
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index d5d75b3154a1..498b567f007b 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -23,7 +23,6 @@ config M68K
23 select MODULES_USE_ELF_RELA 23 select MODULES_USE_ELF_RELA
24 select OLD_SIGSUSPEND3 24 select OLD_SIGSUSPEND3
25 select OLD_SIGACTION 25 select OLD_SIGACTION
26 select HAVE_DMA_ATTRS
27 26
28config RWSEM_GENERIC_SPINLOCK 27config RWSEM_GENERIC_SPINLOCK
29 bool 28 bool
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
index 2c082a63af35..96c536194287 100644
--- a/arch/m68k/include/asm/dma-mapping.h
+++ b/arch/m68k/include/asm/dma-mapping.h
@@ -8,8 +8,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8 return &m68k_dma_ops; 8 return &m68k_dma_ops;
9} 9}
10 10
11#include <asm-generic/dma-mapping-common.h>
12
13static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 11static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
14 enum dma_data_direction dir) 12 enum dma_data_direction dir)
15{ 13{
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
index ad8604c2d9f6..a0fa88da3e31 100644
--- a/arch/metag/Kconfig
+++ b/arch/metag/Kconfig
@@ -29,7 +29,6 @@ config METAG
29 select OF 29 select OF
30 select OF_EARLY_FLATTREE 30 select OF_EARLY_FLATTREE
31 select SPARSE_IRQ 31 select SPARSE_IRQ
32 select HAVE_DMA_ATTRS
33 32
34config STACKTRACE_SUPPORT 33config STACKTRACE_SUPPORT
35 def_bool y 34 def_bool y
diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h
index 768f2e30236d..27af5d479ce6 100644
--- a/arch/metag/include/asm/dma-mapping.h
+++ b/arch/metag/include/asm/dma-mapping.h
@@ -8,8 +8,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8 return &metag_dma_ops; 8 return &metag_dma_ops;
9} 9}
10 10
11#include <asm-generic/dma-mapping-common.h>
12
13/* 11/*
14 * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to 12 * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
15 * do any flushing here. 13 * do any flushing here.
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 5ecd0287a874..53b69deceb99 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -19,7 +19,6 @@ config MICROBLAZE
19 select HAVE_ARCH_KGDB 19 select HAVE_ARCH_KGDB
20 select HAVE_DEBUG_KMEMLEAK 20 select HAVE_DEBUG_KMEMLEAK
21 select HAVE_DMA_API_DEBUG 21 select HAVE_DMA_API_DEBUG
22 select HAVE_DMA_ATTRS
23 select HAVE_DYNAMIC_FTRACE 22 select HAVE_DYNAMIC_FTRACE
24 select HAVE_FTRACE_MCOUNT_RECORD 23 select HAVE_FTRACE_MCOUNT_RECORD
25 select HAVE_FUNCTION_GRAPH_TRACER 24 select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 24b12970c9cf..1884783d15c0 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -44,8 +44,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
44 return &dma_direct_ops; 44 return &dma_direct_ops;
45} 45}
46 46
47#include <asm-generic/dma-mapping-common.h>
48
49static inline void __dma_sync(unsigned long paddr, 47static inline void __dma_sync(unsigned long paddr,
50 size_t size, enum dma_data_direction direction) 48 size_t size, enum dma_data_direction direction)
51{ 49{
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 71683a853372..fbf3f6670b69 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -31,7 +31,6 @@ config MIPS
31 select RTC_LIB if !MACH_LOONGSON64 31 select RTC_LIB if !MACH_LOONGSON64
32 select GENERIC_ATOMIC64 if !64BIT 32 select GENERIC_ATOMIC64 if !64BIT
33 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 33 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
34 select HAVE_DMA_ATTRS
35 select HAVE_DMA_CONTIGUOUS 34 select HAVE_DMA_CONTIGUOUS
36 select HAVE_DMA_API_DEBUG 35 select HAVE_DMA_API_DEBUG
37 select GENERIC_IRQ_PROBE 36 select GENERIC_IRQ_PROBE
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index e604f760c4a0..12fa79e2f1b4 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -29,8 +29,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
29 29
30static inline void dma_mark_clean(void *addr, size_t size) {} 30static inline void dma_mark_clean(void *addr, size_t size) {}
31 31
32#include <asm-generic/dma-mapping-common.h>
33
34extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 32extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
35 enum dma_data_direction direction); 33 enum dma_data_direction direction);
36 34
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index e8ebf78f6d21..10607f0d2bcd 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -15,7 +15,6 @@ config MN10300
15 select OLD_SIGACTION 15 select OLD_SIGACTION
16 select HAVE_DEBUG_STACKOVERFLOW 16 select HAVE_DEBUG_STACKOVERFLOW
17 select ARCH_NO_COHERENT_DMA_MMAP 17 select ARCH_NO_COHERENT_DMA_MMAP
18 select HAVE_DMA_ATTRS
19 18
20config AM33_2 19config AM33_2
21 def_bool n 20 def_bool n
diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h
index e69b0130335c..1dcd44757f32 100644
--- a/arch/mn10300/include/asm/dma-mapping.h
+++ b/arch/mn10300/include/asm/dma-mapping.h
@@ -28,6 +28,4 @@ void dma_cache_sync(void *vaddr, size_t size,
28 mn10300_dcache_flush_inv(); 28 mn10300_dcache_flush_inv();
29} 29}
30 30
31#include <asm-generic/dma-mapping-common.h>
32
33#endif 31#endif
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 4b2504d28178..437555424bda 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -16,7 +16,6 @@ config NIOS2
16 select SOC_BUS 16 select SOC_BUS
17 select SPARSE_IRQ 17 select SPARSE_IRQ
18 select USB_ARCH_HAS_HCD if USB_SUPPORT 18 select USB_ARCH_HAS_HCD if USB_SUPPORT
19 select HAVE_DMA_ATTRS
20 19
21config GENERIC_CSUM 20config GENERIC_CSUM
22 def_bool y 21 def_bool y
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 443f44de1020..e118c02cc79a 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -29,9 +29,6 @@ config OPENRISC
29config MMU 29config MMU
30 def_bool y 30 def_bool y
31 31
32config HAVE_DMA_ATTRS
33 def_bool y
34
35config RWSEM_GENERIC_SPINLOCK 32config RWSEM_GENERIC_SPINLOCK
36 def_bool y 33 def_bool y
37 34
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h
index 413bfcf86384..1f260bccb368 100644
--- a/arch/openrisc/include/asm/dma-mapping.h
+++ b/arch/openrisc/include/asm/dma-mapping.h
@@ -42,6 +42,4 @@ static inline int dma_supported(struct device *dev, u64 dma_mask)
42 return dma_mask == DMA_BIT_MASK(32); 42 return dma_mask == DMA_BIT_MASK(32);
43} 43}
44 44
45#include <asm-generic/dma-mapping-common.h>
46
47#endif /* __ASM_OPENRISC_DMA_MAPPING_H */ 45#endif /* __ASM_OPENRISC_DMA_MAPPING_H */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 1489351134fa..14f655cf542e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -30,7 +30,6 @@ config PARISC
30 select HAVE_DEBUG_STACKOVERFLOW 30 select HAVE_DEBUG_STACKOVERFLOW
31 select HAVE_ARCH_AUDITSYSCALL 31 select HAVE_ARCH_AUDITSYSCALL
32 select ARCH_NO_COHERENT_DMA_MMAP 32 select ARCH_NO_COHERENT_DMA_MMAP
33 select HAVE_DMA_ATTRS
34 33
35 help 34 help
36 The PA-RISC microprocessor is designed by Hewlett-Packard and used 35 The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 4de518647612..16e024602737 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -83,6 +83,4 @@ struct parisc_device;
83void * sba_get_iommu(struct parisc_device *dev); 83void * sba_get_iommu(struct parisc_device *dev);
84#endif 84#endif
85 85
86#include <asm-generic/dma-mapping-common.h>
87
88#endif 86#endif
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8310be4ffe31..e4824fd04bb7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -108,7 +108,6 @@ config PPC
108 select HAVE_ARCH_TRACEHOOK 108 select HAVE_ARCH_TRACEHOOK
109 select HAVE_MEMBLOCK 109 select HAVE_MEMBLOCK
110 select HAVE_MEMBLOCK_NODE_MAP 110 select HAVE_MEMBLOCK_NODE_MAP
111 select HAVE_DMA_ATTRS
112 select HAVE_DMA_API_DEBUG 111 select HAVE_DMA_API_DEBUG
113 select HAVE_OPROFILE 112 select HAVE_OPROFILE
114 select HAVE_DEBUG_KMEMLEAK 113 select HAVE_DEBUG_KMEMLEAK
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 7f522c021dc3..77816acd4fd9 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -125,8 +125,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
125#define HAVE_ARCH_DMA_SET_MASK 1 125#define HAVE_ARCH_DMA_SET_MASK 1
126extern int dma_set_mask(struct device *dev, u64 dma_mask); 126extern int dma_set_mask(struct device *dev, u64 dma_mask);
127 127
128#include <asm-generic/dma-mapping-common.h>
129
130extern int __dma_set_mask(struct device *dev, u64 dma_mask); 128extern int __dma_set_mask(struct device *dev, u64 dma_mask);
131extern u64 __dma_get_required_mask(struct device *dev); 129extern u64 __dma_get_required_mask(struct device *dev);
132 130
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index dbeeb3a049f2..3be9c832dec1 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -579,7 +579,6 @@ config QDIO
579 579
580menuconfig PCI 580menuconfig PCI
581 bool "PCI support" 581 bool "PCI support"
582 select HAVE_DMA_ATTRS
583 select PCI_MSI 582 select PCI_MSI
584 select IOMMU_SUPPORT 583 select IOMMU_SUPPORT
585 help 584 help
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index b3fd54d93dd2..e64bfcb9702f 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -23,8 +23,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
23{ 23{
24} 24}
25 25
26#include <asm-generic/dma-mapping-common.h>
27
28static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 26static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
29{ 27{
30 if (!dev->dma_mask) 28 if (!dev->dma_mask)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 6c391a5d3e5c..e13da05505dc 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -11,7 +11,6 @@ config SUPERH
11 select HAVE_GENERIC_DMA_COHERENT 11 select HAVE_GENERIC_DMA_COHERENT
12 select HAVE_ARCH_TRACEHOOK 12 select HAVE_ARCH_TRACEHOOK
13 select HAVE_DMA_API_DEBUG 13 select HAVE_DMA_API_DEBUG
14 select HAVE_DMA_ATTRS
15 select HAVE_PERF_EVENTS 14 select HAVE_PERF_EVENTS
16 select HAVE_DEBUG_BUGVERBOSE 15 select HAVE_DEBUG_BUGVERBOSE
17 select ARCH_HAVE_CUSTOM_GPIO_H 16 select ARCH_HAVE_CUSTOM_GPIO_H
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index a3745a3fe029..e11cf0c8206b 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -11,8 +11,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
11 11
12#define DMA_ERROR_CODE 0 12#define DMA_ERROR_CODE 0
13 13
14#include <asm-generic/dma-mapping-common.h>
15
16void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 14void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
17 enum dma_data_direction dir); 15 enum dma_data_direction dir);
18 16
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 3203e42190dd..57ffaf285c2f 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -26,7 +26,6 @@ config SPARC
26 select RTC_CLASS 26 select RTC_CLASS
27 select RTC_DRV_M48T59 27 select RTC_DRV_M48T59
28 select RTC_SYSTOHC 28 select RTC_SYSTOHC
29 select HAVE_DMA_ATTRS
30 select HAVE_DMA_API_DEBUG 29 select HAVE_DMA_API_DEBUG
31 select HAVE_ARCH_JUMP_LABEL if SPARC64 30 select HAVE_ARCH_JUMP_LABEL if SPARC64
32 select GENERIC_IRQ_SHOW 31 select GENERIC_IRQ_SHOW
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 2777092dd851..1180ae254154 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -37,6 +37,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
37 return dma_ops; 37 return dma_ops;
38} 38}
39 39
40#include <asm-generic/dma-mapping-common.h>
41
42#endif 40#endif
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 6bfbe8b71e7e..de4a4fff9323 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -5,7 +5,6 @@ config TILE
5 def_bool y 5 def_bool y
6 select HAVE_PERF_EVENTS 6 select HAVE_PERF_EVENTS
7 select USE_PMC if PERF_EVENTS 7 select USE_PMC if PERF_EVENTS
8 select HAVE_DMA_ATTRS
9 select HAVE_DMA_API_DEBUG 8 select HAVE_DMA_API_DEBUG
10 select HAVE_KVM if !TILEGX 9 select HAVE_KVM if !TILEGX
11 select GENERIC_FIND_FIRST_BIT 10 select GENERIC_FIND_FIRST_BIT
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index c342736e3f1f..01ceb4a895b0 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -73,9 +73,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
73} 73}
74 74
75#define HAVE_ARCH_DMA_SET_MASK 1 75#define HAVE_ARCH_DMA_SET_MASK 1
76
77#include <asm-generic/dma-mapping-common.h>
78
79int dma_set_mask(struct device *dev, u64 mask); 76int dma_set_mask(struct device *dev, u64 mask);
80 77
81/* 78/*
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 877342640b6e..e5602ee9c610 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -5,7 +5,6 @@ config UNICORE32
5 select ARCH_MIGHT_HAVE_PC_SERIO 5 select ARCH_MIGHT_HAVE_PC_SERIO
6 select HAVE_MEMBLOCK 6 select HAVE_MEMBLOCK
7 select HAVE_GENERIC_DMA_COHERENT 7 select HAVE_GENERIC_DMA_COHERENT
8 select HAVE_DMA_ATTRS
9 select HAVE_KERNEL_GZIP 8 select HAVE_KERNEL_GZIP
10 select HAVE_KERNEL_BZIP2 9 select HAVE_KERNEL_BZIP2
11 select GENERIC_ATOMIC64 10 select GENERIC_ATOMIC64
diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h
index 8140e053ccd3..4749854afd03 100644
--- a/arch/unicore32/include/asm/dma-mapping.h
+++ b/arch/unicore32/include/asm/dma-mapping.h
@@ -28,8 +28,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
28 return &swiotlb_dma_map_ops; 28 return &swiotlb_dma_map_ops;
29} 29}
30 30
31#include <asm-generic/dma-mapping-common.h>
32
33static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 31static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
34{ 32{
35 if (dev && dev->dma_mask) 33 if (dev && dev->dma_mask)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 92b2a73162ee..89159a6fa503 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -100,7 +100,6 @@ config X86
100 select HAVE_DEBUG_KMEMLEAK 100 select HAVE_DEBUG_KMEMLEAK
101 select HAVE_DEBUG_STACKOVERFLOW 101 select HAVE_DEBUG_STACKOVERFLOW
102 select HAVE_DMA_API_DEBUG 102 select HAVE_DMA_API_DEBUG
103 select HAVE_DMA_ATTRS
104 select HAVE_DMA_CONTIGUOUS 103 select HAVE_DMA_CONTIGUOUS
105 select HAVE_DYNAMIC_FTRACE 104 select HAVE_DYNAMIC_FTRACE
106 select HAVE_DYNAMIC_FTRACE_WITH_REGS 105 select HAVE_DYNAMIC_FTRACE_WITH_REGS
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 953b7263f844..3a27b93e6261 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -46,8 +46,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
46#define HAVE_ARCH_DMA_SUPPORTED 1 46#define HAVE_ARCH_DMA_SUPPORTED 1
47extern int dma_supported(struct device *hwdev, u64 mask); 47extern int dma_supported(struct device *hwdev, u64 mask);
48 48
49#include <asm-generic/dma-mapping-common.h>
50
51extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 49extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
52 dma_addr_t *dma_addr, gfp_t flag, 50 dma_addr_t *dma_addr, gfp_t flag,
53 struct dma_attrs *attrs); 51 struct dma_attrs *attrs);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 82044f732323..e9df1567d778 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -15,7 +15,6 @@ config XTENSA
15 select GENERIC_PCI_IOMAP 15 select GENERIC_PCI_IOMAP
16 select GENERIC_SCHED_CLOCK 16 select GENERIC_SCHED_CLOCK
17 select HAVE_DMA_API_DEBUG 17 select HAVE_DMA_API_DEBUG
18 select HAVE_DMA_ATTRS
19 select HAVE_FUNCTION_TRACER 18 select HAVE_FUNCTION_TRACER
20 select HAVE_FUTEX_CMPXCHG if !MMU 19 select HAVE_FUTEX_CMPXCHG if !MMU
21 select HAVE_IRQ_TIME_ACCOUNTING 20 select HAVE_IRQ_TIME_ACCOUNTING
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 66c9ba261e30..87b7a7dfbcf3 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -30,8 +30,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
30 return &xtensa_dma_map_ops; 30 return &xtensa_dma_map_ops;
31} 31}
32 32
33#include <asm-generic/dma-mapping-common.h>
34
35void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 33void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
36 enum dma_data_direction direction); 34 enum dma_data_direction direction);
37 35
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 59babd5a5396..8ae7ab68cb97 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -82,13 +82,13 @@ config DRM_TTM
82 82
83config DRM_GEM_CMA_HELPER 83config DRM_GEM_CMA_HELPER
84 bool 84 bool
85 depends on DRM && HAVE_DMA_ATTRS 85 depends on DRM
86 help 86 help
87 Choose this if you need the GEM CMA helper functions 87 Choose this if you need the GEM CMA helper functions
88 88
89config DRM_KMS_CMA_HELPER 89config DRM_KMS_CMA_HELPER
90 bool 90 bool
91 depends on DRM && HAVE_DMA_ATTRS 91 depends on DRM
92 select DRM_GEM_CMA_HELPER 92 select DRM_GEM_CMA_HELPER
93 select DRM_KMS_FB_HELPER 93 select DRM_KMS_FB_HELPER
94 select FB_SYS_FILLRECT 94 select FB_SYS_FILLRECT
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 35ca4f007839..a1844b50546c 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -5,7 +5,7 @@ config DRM_IMX
5 select VIDEOMODE_HELPERS 5 select VIDEOMODE_HELPERS
6 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
7 select DRM_KMS_CMA_HELPER 7 select DRM_KMS_CMA_HELPER
8 depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS 8 depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
9 depends on IMX_IPUV3_CORE 9 depends on IMX_IPUV3_CORE
10 help 10 help
11 enable i.MX graphics support 11 enable i.MX graphics support
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index d4e0a39568f6..96dcd4a78951 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -1,6 +1,6 @@
1config DRM_RCAR_DU 1config DRM_RCAR_DU
2 tristate "DRM Support for R-Car Display Unit" 2 tristate "DRM Support for R-Car Display Unit"
3 depends on DRM && ARM && HAVE_DMA_ATTRS && OF 3 depends on DRM && ARM && OF
4 depends on ARCH_SHMOBILE || COMPILE_TEST 4 depends on ARCH_SHMOBILE || COMPILE_TEST
5 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
6 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index b9202aa6f8ab..8d17d00ddb4b 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -1,6 +1,6 @@
1config DRM_SHMOBILE 1config DRM_SHMOBILE
2 tristate "DRM Support for SH Mobile" 2 tristate "DRM Support for SH Mobile"
3 depends on DRM && ARM && HAVE_DMA_ATTRS 3 depends on DRM && ARM
4 depends on ARCH_SHMOBILE || COMPILE_TEST 4 depends on ARCH_SHMOBILE || COMPILE_TEST
5 depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM 5 depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM
6 select BACKLIGHT_CLASS_DEVICE 6 select BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index 10c1b1926e6f..5ad43a1bb260 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -1,6 +1,6 @@
1config DRM_STI 1config DRM_STI
2 tristate "DRM Support for STMicroelectronics SoC stiH41x Series" 2 tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
3 depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS 3 depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
4 select RESET_CONTROLLER 4 select RESET_CONTROLLER
5 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
6 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 78beafb0742c..f60a1ec84fa4 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -1,6 +1,6 @@
1config DRM_TILCDC 1config DRM_TILCDC
2 tristate "DRM Support for TI LCDC Display Controller" 2 tristate "DRM Support for TI LCDC Display Controller"
3 depends on DRM && OF && ARM && HAVE_DMA_ATTRS 3 depends on DRM && OF && ARM
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER 5 select DRM_KMS_FB_HELPER
6 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 2d7d115ddf3f..584810474e5b 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -1,7 +1,7 @@
1config DRM_VC4 1config DRM_VC4
2 tristate "Broadcom VC4 Graphics" 2 tristate "Broadcom VC4 Graphics"
3 depends on ARCH_BCM2835 || COMPILE_TEST 3 depends on ARCH_BCM2835 || COMPILE_TEST
4 depends on DRM && HAVE_DMA_ATTRS 4 depends on DRM
5 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
6 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
7 select DRM_GEM_CMA_HELPER 7 select DRM_GEM_CMA_HELPER
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 0c53805dff0e..526359447ff9 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -216,7 +216,6 @@ config VIDEO_STI_BDISP
216 tristate "STMicroelectronics BDISP 2D blitter driver" 216 tristate "STMicroelectronics BDISP 2D blitter driver"
217 depends on VIDEO_DEV && VIDEO_V4L2 217 depends on VIDEO_DEV && VIDEO_V4L2
218 depends on ARCH_STI || COMPILE_TEST 218 depends on ARCH_STI || COMPILE_TEST
219 depends on HAVE_DMA_ATTRS
220 select VIDEOBUF2_DMA_CONTIG 219 select VIDEOBUF2_DMA_CONTIG
221 select V4L2_MEM2MEM_DEV 220 select V4L2_MEM2MEM_DEV
222 help 221 help
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h
deleted file mode 100644
index 6c32af918c2f..000000000000
--- a/include/asm-generic/dma-mapping-broken.h
+++ /dev/null
@@ -1,95 +0,0 @@
1#ifndef _ASM_GENERIC_DMA_MAPPING_H
2#define _ASM_GENERIC_DMA_MAPPING_H
3
4/* define the dma api to allow compilation but not linking of
5 * dma dependent code. Code that depends on the dma-mapping
6 * API needs to set 'depends on HAS_DMA' in its Kconfig
7 */
8
9struct scatterlist;
10
11extern void *
12dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
13 gfp_t flag);
14
15extern void
16dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
17 dma_addr_t dma_handle);
18
19static inline void *dma_alloc_attrs(struct device *dev, size_t size,
20 dma_addr_t *dma_handle, gfp_t flag,
21 struct dma_attrs *attrs)
22{
23 /* attrs is not supported and ignored */
24 return dma_alloc_coherent(dev, size, dma_handle, flag);
25}
26
27static inline void dma_free_attrs(struct device *dev, size_t size,
28 void *cpu_addr, dma_addr_t dma_handle,
29 struct dma_attrs *attrs)
30{
31 /* attrs is not supported and ignored */
32 dma_free_coherent(dev, size, cpu_addr, dma_handle);
33}
34
35#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
36#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
37
38extern dma_addr_t
39dma_map_single(struct device *dev, void *ptr, size_t size,
40 enum dma_data_direction direction);
41
42extern void
43dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
44 enum dma_data_direction direction);
45
46extern int
47dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
48 enum dma_data_direction direction);
49
50extern void
51dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
52 enum dma_data_direction direction);
53
54extern dma_addr_t
55dma_map_page(struct device *dev, struct page *page, unsigned long offset,
56 size_t size, enum dma_data_direction direction);
57
58extern void
59dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
60 enum dma_data_direction direction);
61
62extern void
63dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
64 enum dma_data_direction direction);
65
66extern void
67dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
68 unsigned long offset, size_t size,
69 enum dma_data_direction direction);
70
71extern void
72dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
73 enum dma_data_direction direction);
74
75#define dma_sync_single_for_device dma_sync_single_for_cpu
76#define dma_sync_single_range_for_device dma_sync_single_range_for_cpu
77#define dma_sync_sg_for_device dma_sync_sg_for_cpu
78
79extern int
80dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
81
82extern int
83dma_supported(struct device *dev, u64 mask);
84
85extern int
86dma_set_mask(struct device *dev, u64 mask);
87
88extern int
89dma_get_cache_alignment(void);
90
91extern void
92dma_cache_sync(struct device *dev, void *vaddr, size_t size,
93 enum dma_data_direction direction);
94
95#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
deleted file mode 100644
index b1bc954eccf3..000000000000
--- a/include/asm-generic/dma-mapping-common.h
+++ /dev/null
@@ -1,358 +0,0 @@
1#ifndef _ASM_GENERIC_DMA_MAPPING_H
2#define _ASM_GENERIC_DMA_MAPPING_H
3
4#include <linux/kmemcheck.h>
5#include <linux/bug.h>
6#include <linux/scatterlist.h>
7#include <linux/dma-debug.h>
8#include <linux/dma-attrs.h>
9#include <asm-generic/dma-coherent.h>
10
11static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
12 size_t size,
13 enum dma_data_direction dir,
14 struct dma_attrs *attrs)
15{
16 struct dma_map_ops *ops = get_dma_ops(dev);
17 dma_addr_t addr;
18
19 kmemcheck_mark_initialized(ptr, size);
20 BUG_ON(!valid_dma_direction(dir));
21 addr = ops->map_page(dev, virt_to_page(ptr),
22 (unsigned long)ptr & ~PAGE_MASK, size,
23 dir, attrs);
24 debug_dma_map_page(dev, virt_to_page(ptr),
25 (unsigned long)ptr & ~PAGE_MASK, size,
26 dir, addr, true);
27 return addr;
28}
29
30static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
31 size_t size,
32 enum dma_data_direction dir,
33 struct dma_attrs *attrs)
34{
35 struct dma_map_ops *ops = get_dma_ops(dev);
36
37 BUG_ON(!valid_dma_direction(dir));
38 if (ops->unmap_page)
39 ops->unmap_page(dev, addr, size, dir, attrs);
40 debug_dma_unmap_page(dev, addr, size, dir, true);
41}
42
43/*
44 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
45 * It should never return a value < 0.
46 */
47static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
48 int nents, enum dma_data_direction dir,
49 struct dma_attrs *attrs)
50{
51 struct dma_map_ops *ops = get_dma_ops(dev);
52 int i, ents;
53 struct scatterlist *s;
54
55 for_each_sg(sg, s, nents, i)
56 kmemcheck_mark_initialized(sg_virt(s), s->length);
57 BUG_ON(!valid_dma_direction(dir));
58 ents = ops->map_sg(dev, sg, nents, dir, attrs);
59 BUG_ON(ents < 0);
60 debug_dma_map_sg(dev, sg, nents, ents, dir);
61
62 return ents;
63}
64
65static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
66 int nents, enum dma_data_direction dir,
67 struct dma_attrs *attrs)
68{
69 struct dma_map_ops *ops = get_dma_ops(dev);
70
71 BUG_ON(!valid_dma_direction(dir));
72 debug_dma_unmap_sg(dev, sg, nents, dir);
73 if (ops->unmap_sg)
74 ops->unmap_sg(dev, sg, nents, dir, attrs);
75}
76
77static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
78 size_t offset, size_t size,
79 enum dma_data_direction dir)
80{
81 struct dma_map_ops *ops = get_dma_ops(dev);
82 dma_addr_t addr;
83
84 kmemcheck_mark_initialized(page_address(page) + offset, size);
85 BUG_ON(!valid_dma_direction(dir));
86 addr = ops->map_page(dev, page, offset, size, dir, NULL);
87 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
88
89 return addr;
90}
91
92static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
93 size_t size, enum dma_data_direction dir)
94{
95 struct dma_map_ops *ops = get_dma_ops(dev);
96
97 BUG_ON(!valid_dma_direction(dir));
98 if (ops->unmap_page)
99 ops->unmap_page(dev, addr, size, dir, NULL);
100 debug_dma_unmap_page(dev, addr, size, dir, false);
101}
102
103static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
104 size_t size,
105 enum dma_data_direction dir)
106{
107 struct dma_map_ops *ops = get_dma_ops(dev);
108
109 BUG_ON(!valid_dma_direction(dir));
110 if (ops->sync_single_for_cpu)
111 ops->sync_single_for_cpu(dev, addr, size, dir);
112 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
113}
114
115static inline void dma_sync_single_for_device(struct device *dev,
116 dma_addr_t addr, size_t size,
117 enum dma_data_direction dir)
118{
119 struct dma_map_ops *ops = get_dma_ops(dev);
120
121 BUG_ON(!valid_dma_direction(dir));
122 if (ops->sync_single_for_device)
123 ops->sync_single_for_device(dev, addr, size, dir);
124 debug_dma_sync_single_for_device(dev, addr, size, dir);
125}
126
127static inline void dma_sync_single_range_for_cpu(struct device *dev,
128 dma_addr_t addr,
129 unsigned long offset,
130 size_t size,
131 enum dma_data_direction dir)
132{
133 const struct dma_map_ops *ops = get_dma_ops(dev);
134
135 BUG_ON(!valid_dma_direction(dir));
136 if (ops->sync_single_for_cpu)
137 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
138 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
139}
140
141static inline void dma_sync_single_range_for_device(struct device *dev,
142 dma_addr_t addr,
143 unsigned long offset,
144 size_t size,
145 enum dma_data_direction dir)
146{
147 const struct dma_map_ops *ops = get_dma_ops(dev);
148
149 BUG_ON(!valid_dma_direction(dir));
150 if (ops->sync_single_for_device)
151 ops->sync_single_for_device(dev, addr + offset, size, dir);
152 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
153}
154
155static inline void
156dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
157 int nelems, enum dma_data_direction dir)
158{
159 struct dma_map_ops *ops = get_dma_ops(dev);
160
161 BUG_ON(!valid_dma_direction(dir));
162 if (ops->sync_sg_for_cpu)
163 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
164 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
165}
166
167static inline void
168dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
169 int nelems, enum dma_data_direction dir)
170{
171 struct dma_map_ops *ops = get_dma_ops(dev);
172
173 BUG_ON(!valid_dma_direction(dir));
174 if (ops->sync_sg_for_device)
175 ops->sync_sg_for_device(dev, sg, nelems, dir);
176 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
177
178}
179
180#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
181#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
182#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
183#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
184
185extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
186 void *cpu_addr, dma_addr_t dma_addr, size_t size);
187
188void *dma_common_contiguous_remap(struct page *page, size_t size,
189 unsigned long vm_flags,
190 pgprot_t prot, const void *caller);
191
192void *dma_common_pages_remap(struct page **pages, size_t size,
193 unsigned long vm_flags, pgprot_t prot,
194 const void *caller);
195void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
196
197/**
198 * dma_mmap_attrs - map a coherent DMA allocation into user space
199 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
200 * @vma: vm_area_struct describing requested user mapping
201 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
202 * @handle: device-view address returned from dma_alloc_attrs
203 * @size: size of memory originally requested in dma_alloc_attrs
204 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
205 *
206 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
207 * into user space. The coherent DMA buffer must not be freed by the
208 * driver until the user space mapping has been released.
209 */
210static inline int
211dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
212 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
213{
214 struct dma_map_ops *ops = get_dma_ops(dev);
215 BUG_ON(!ops);
216 if (ops->mmap)
217 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
218 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
219}
220
221#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
222
223int
224dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
225 void *cpu_addr, dma_addr_t dma_addr, size_t size);
226
227static inline int
228dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
229 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
230{
231 struct dma_map_ops *ops = get_dma_ops(dev);
232 BUG_ON(!ops);
233 if (ops->get_sgtable)
234 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
235 attrs);
236 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
237}
238
239#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
240
241#ifndef arch_dma_alloc_attrs
242#define arch_dma_alloc_attrs(dev, flag) (true)
243#endif
244
245static inline void *dma_alloc_attrs(struct device *dev, size_t size,
246 dma_addr_t *dma_handle, gfp_t flag,
247 struct dma_attrs *attrs)
248{
249 struct dma_map_ops *ops = get_dma_ops(dev);
250 void *cpu_addr;
251
252 BUG_ON(!ops);
253
254 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
255 return cpu_addr;
256
257 if (!arch_dma_alloc_attrs(&dev, &flag))
258 return NULL;
259 if (!ops->alloc)
260 return NULL;
261
262 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
263 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
264 return cpu_addr;
265}
266
267static inline void dma_free_attrs(struct device *dev, size_t size,
268 void *cpu_addr, dma_addr_t dma_handle,
269 struct dma_attrs *attrs)
270{
271 struct dma_map_ops *ops = get_dma_ops(dev);
272
273 BUG_ON(!ops);
274 WARN_ON(irqs_disabled());
275
276 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
277 return;
278
279 if (!ops->free)
280 return;
281
282 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
283 ops->free(dev, size, cpu_addr, dma_handle, attrs);
284}
285
286static inline void *dma_alloc_coherent(struct device *dev, size_t size,
287 dma_addr_t *dma_handle, gfp_t flag)
288{
289 return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
290}
291
292static inline void dma_free_coherent(struct device *dev, size_t size,
293 void *cpu_addr, dma_addr_t dma_handle)
294{
295 return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
296}
297
298static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
299 dma_addr_t *dma_handle, gfp_t gfp)
300{
301 DEFINE_DMA_ATTRS(attrs);
302
303 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
304 return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
305}
306
307static inline void dma_free_noncoherent(struct device *dev, size_t size,
308 void *cpu_addr, dma_addr_t dma_handle)
309{
310 DEFINE_DMA_ATTRS(attrs);
311
312 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
313 dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
314}
315
316static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
317{
318 debug_dma_mapping_error(dev, dma_addr);
319
320 if (get_dma_ops(dev)->mapping_error)
321 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
322
323#ifdef DMA_ERROR_CODE
324 return dma_addr == DMA_ERROR_CODE;
325#else
326 return 0;
327#endif
328}
329
330#ifndef HAVE_ARCH_DMA_SUPPORTED
331static inline int dma_supported(struct device *dev, u64 mask)
332{
333 struct dma_map_ops *ops = get_dma_ops(dev);
334
335 if (!ops)
336 return 0;
337 if (!ops->dma_supported)
338 return 1;
339 return ops->dma_supported(dev, mask);
340}
341#endif
342
343#ifndef HAVE_ARCH_DMA_SET_MASK
344static inline int dma_set_mask(struct device *dev, u64 mask)
345{
346 struct dma_map_ops *ops = get_dma_ops(dev);
347
348 if (ops->set_dma_mask)
349 return ops->set_dma_mask(dev, mask);
350
351 if (!dev->dma_mask || !dma_supported(dev, mask))
352 return -EIO;
353 *dev->dma_mask = mask;
354 return 0;
355}
356#endif
357
358#endif
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index c8e1831d7572..99c0be00b47c 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -41,7 +41,6 @@ static inline void init_dma_attrs(struct dma_attrs *attrs)
41 bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS); 41 bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS);
42} 42}
43 43
44#ifdef CONFIG_HAVE_DMA_ATTRS
45/** 44/**
46 * dma_set_attr - set a specific attribute 45 * dma_set_attr - set a specific attribute
47 * @attr: attribute to set 46 * @attr: attribute to set
@@ -67,14 +66,5 @@ static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
67 BUG_ON(attr >= DMA_ATTR_MAX); 66 BUG_ON(attr >= DMA_ATTR_MAX);
68 return test_bit(attr, attrs->flags); 67 return test_bit(attr, attrs->flags);
69} 68}
70#else /* !CONFIG_HAVE_DMA_ATTRS */
71static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
72{
73}
74 69
75static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
76{
77 return 0;
78}
79#endif /* CONFIG_HAVE_DMA_ATTRS */
80#endif /* _DMA_ATTR_H */ 70#endif /* _DMA_ATTR_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 2e551e2d2d03..cc0517b71c5e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -6,8 +6,12 @@
6#include <linux/device.h> 6#include <linux/device.h>
7#include <linux/err.h> 7#include <linux/err.h>
8#include <linux/dma-attrs.h> 8#include <linux/dma-attrs.h>
9#include <linux/dma-debug.h>
9#include <linux/dma-direction.h> 10#include <linux/dma-direction.h>
10#include <linux/scatterlist.h> 11#include <linux/scatterlist.h>
12#include <linux/kmemcheck.h>
13#include <linux/bug.h>
14#include <asm-generic/dma-coherent.h>
11 15
12/* 16/*
13 * A dma_addr_t can hold any valid DMA or bus address for the platform. 17 * A dma_addr_t can hold any valid DMA or bus address for the platform.
@@ -86,7 +90,363 @@ static inline int is_device_dma_capable(struct device *dev)
86#ifdef CONFIG_HAS_DMA 90#ifdef CONFIG_HAS_DMA
87#include <asm/dma-mapping.h> 91#include <asm/dma-mapping.h>
88#else 92#else
89#include <asm-generic/dma-mapping-broken.h> 93/*
94 * Define the dma api to allow compilation but not linking of
95 * dma dependent code. Code that depends on the dma-mapping
96 * API needs to set 'depends on HAS_DMA' in its Kconfig
97 */
98extern struct dma_map_ops bad_dma_ops;
99static inline struct dma_map_ops *get_dma_ops(struct device *dev)
100{
101 return &bad_dma_ops;
102}
103#endif
104
105static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
106 size_t size,
107 enum dma_data_direction dir,
108 struct dma_attrs *attrs)
109{
110 struct dma_map_ops *ops = get_dma_ops(dev);
111 dma_addr_t addr;
112
113 kmemcheck_mark_initialized(ptr, size);
114 BUG_ON(!valid_dma_direction(dir));
115 addr = ops->map_page(dev, virt_to_page(ptr),
116 (unsigned long)ptr & ~PAGE_MASK, size,
117 dir, attrs);
118 debug_dma_map_page(dev, virt_to_page(ptr),
119 (unsigned long)ptr & ~PAGE_MASK, size,
120 dir, addr, true);
121 return addr;
122}
123
124static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
125 size_t size,
126 enum dma_data_direction dir,
127 struct dma_attrs *attrs)
128{
129 struct dma_map_ops *ops = get_dma_ops(dev);
130
131 BUG_ON(!valid_dma_direction(dir));
132 if (ops->unmap_page)
133 ops->unmap_page(dev, addr, size, dir, attrs);
134 debug_dma_unmap_page(dev, addr, size, dir, true);
135}
136
137/*
138 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
139 * It should never return a value < 0.
140 */
141static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
142 int nents, enum dma_data_direction dir,
143 struct dma_attrs *attrs)
144{
145 struct dma_map_ops *ops = get_dma_ops(dev);
146 int i, ents;
147 struct scatterlist *s;
148
149 for_each_sg(sg, s, nents, i)
150 kmemcheck_mark_initialized(sg_virt(s), s->length);
151 BUG_ON(!valid_dma_direction(dir));
152 ents = ops->map_sg(dev, sg, nents, dir, attrs);
153 BUG_ON(ents < 0);
154 debug_dma_map_sg(dev, sg, nents, ents, dir);
155
156 return ents;
157}
158
159static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
160 int nents, enum dma_data_direction dir,
161 struct dma_attrs *attrs)
162{
163 struct dma_map_ops *ops = get_dma_ops(dev);
164
165 BUG_ON(!valid_dma_direction(dir));
166 debug_dma_unmap_sg(dev, sg, nents, dir);
167 if (ops->unmap_sg)
168 ops->unmap_sg(dev, sg, nents, dir, attrs);
169}
170
171static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
172 size_t offset, size_t size,
173 enum dma_data_direction dir)
174{
175 struct dma_map_ops *ops = get_dma_ops(dev);
176 dma_addr_t addr;
177
178 kmemcheck_mark_initialized(page_address(page) + offset, size);
179 BUG_ON(!valid_dma_direction(dir));
180 addr = ops->map_page(dev, page, offset, size, dir, NULL);
181 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
182
183 return addr;
184}
185
186static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
187 size_t size, enum dma_data_direction dir)
188{
189 struct dma_map_ops *ops = get_dma_ops(dev);
190
191 BUG_ON(!valid_dma_direction(dir));
192 if (ops->unmap_page)
193 ops->unmap_page(dev, addr, size, dir, NULL);
194 debug_dma_unmap_page(dev, addr, size, dir, false);
195}
196
197static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
198 size_t size,
199 enum dma_data_direction dir)
200{
201 struct dma_map_ops *ops = get_dma_ops(dev);
202
203 BUG_ON(!valid_dma_direction(dir));
204 if (ops->sync_single_for_cpu)
205 ops->sync_single_for_cpu(dev, addr, size, dir);
206 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
207}
208
209static inline void dma_sync_single_for_device(struct device *dev,
210 dma_addr_t addr, size_t size,
211 enum dma_data_direction dir)
212{
213 struct dma_map_ops *ops = get_dma_ops(dev);
214
215 BUG_ON(!valid_dma_direction(dir));
216 if (ops->sync_single_for_device)
217 ops->sync_single_for_device(dev, addr, size, dir);
218 debug_dma_sync_single_for_device(dev, addr, size, dir);
219}
220
221static inline void dma_sync_single_range_for_cpu(struct device *dev,
222 dma_addr_t addr,
223 unsigned long offset,
224 size_t size,
225 enum dma_data_direction dir)
226{
227 const struct dma_map_ops *ops = get_dma_ops(dev);
228
229 BUG_ON(!valid_dma_direction(dir));
230 if (ops->sync_single_for_cpu)
231 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
232 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
233}
234
235static inline void dma_sync_single_range_for_device(struct device *dev,
236 dma_addr_t addr,
237 unsigned long offset,
238 size_t size,
239 enum dma_data_direction dir)
240{
241 const struct dma_map_ops *ops = get_dma_ops(dev);
242
243 BUG_ON(!valid_dma_direction(dir));
244 if (ops->sync_single_for_device)
245 ops->sync_single_for_device(dev, addr + offset, size, dir);
246 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
247}
248
249static inline void
250dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
251 int nelems, enum dma_data_direction dir)
252{
253 struct dma_map_ops *ops = get_dma_ops(dev);
254
255 BUG_ON(!valid_dma_direction(dir));
256 if (ops->sync_sg_for_cpu)
257 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
258 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
259}
260
261static inline void
262dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
263 int nelems, enum dma_data_direction dir)
264{
265 struct dma_map_ops *ops = get_dma_ops(dev);
266
267 BUG_ON(!valid_dma_direction(dir));
268 if (ops->sync_sg_for_device)
269 ops->sync_sg_for_device(dev, sg, nelems, dir);
270 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
271
272}
273
274#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
275#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
276#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
277#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
278
279extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
280 void *cpu_addr, dma_addr_t dma_addr, size_t size);
281
282void *dma_common_contiguous_remap(struct page *page, size_t size,
283 unsigned long vm_flags,
284 pgprot_t prot, const void *caller);
285
286void *dma_common_pages_remap(struct page **pages, size_t size,
287 unsigned long vm_flags, pgprot_t prot,
288 const void *caller);
289void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
290
291/**
292 * dma_mmap_attrs - map a coherent DMA allocation into user space
293 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
294 * @vma: vm_area_struct describing requested user mapping
295 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
296 * @handle: device-view address returned from dma_alloc_attrs
297 * @size: size of memory originally requested in dma_alloc_attrs
298 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
299 *
300 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
301 * into user space. The coherent DMA buffer must not be freed by the
302 * driver until the user space mapping has been released.
303 */
304static inline int
305dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
306 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
307{
308 struct dma_map_ops *ops = get_dma_ops(dev);
309 BUG_ON(!ops);
310 if (ops->mmap)
311 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
312 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
313}
314
315#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
316
317int
318dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
319 void *cpu_addr, dma_addr_t dma_addr, size_t size);
320
321static inline int
322dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
323 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
324{
325 struct dma_map_ops *ops = get_dma_ops(dev);
326 BUG_ON(!ops);
327 if (ops->get_sgtable)
328 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
329 attrs);
330 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
331}
332
333#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
334
335#ifndef arch_dma_alloc_attrs
336#define arch_dma_alloc_attrs(dev, flag) (true)
337#endif
338
339static inline void *dma_alloc_attrs(struct device *dev, size_t size,
340 dma_addr_t *dma_handle, gfp_t flag,
341 struct dma_attrs *attrs)
342{
343 struct dma_map_ops *ops = get_dma_ops(dev);
344 void *cpu_addr;
345
346 BUG_ON(!ops);
347
348 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
349 return cpu_addr;
350
351 if (!arch_dma_alloc_attrs(&dev, &flag))
352 return NULL;
353 if (!ops->alloc)
354 return NULL;
355
356 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
357 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
358 return cpu_addr;
359}
360
361static inline void dma_free_attrs(struct device *dev, size_t size,
362 void *cpu_addr, dma_addr_t dma_handle,
363 struct dma_attrs *attrs)
364{
365 struct dma_map_ops *ops = get_dma_ops(dev);
366
367 BUG_ON(!ops);
368 WARN_ON(irqs_disabled());
369
370 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
371 return;
372
373 if (!ops->free)
374 return;
375
376 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
377 ops->free(dev, size, cpu_addr, dma_handle, attrs);
378}
379
380static inline void *dma_alloc_coherent(struct device *dev, size_t size,
381 dma_addr_t *dma_handle, gfp_t flag)
382{
383 return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
384}
385
386static inline void dma_free_coherent(struct device *dev, size_t size,
387 void *cpu_addr, dma_addr_t dma_handle)
388{
389 return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
390}
391
392static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
393 dma_addr_t *dma_handle, gfp_t gfp)
394{
395 DEFINE_DMA_ATTRS(attrs);
396
397 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
398 return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
399}
400
401static inline void dma_free_noncoherent(struct device *dev, size_t size,
402 void *cpu_addr, dma_addr_t dma_handle)
403{
404 DEFINE_DMA_ATTRS(attrs);
405
406 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
407 dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
408}
409
410static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
411{
412 debug_dma_mapping_error(dev, dma_addr);
413
414 if (get_dma_ops(dev)->mapping_error)
415 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
416
417#ifdef DMA_ERROR_CODE
418 return dma_addr == DMA_ERROR_CODE;
419#else
420 return 0;
421#endif
422}
423
424#ifndef HAVE_ARCH_DMA_SUPPORTED
425static inline int dma_supported(struct device *dev, u64 mask)
426{
427 struct dma_map_ops *ops = get_dma_ops(dev);
428
429 if (!ops)
430 return 0;
431 if (!ops->dma_supported)
432 return 1;
433 return ops->dma_supported(dev, mask);
434}
435#endif
436
437#ifndef HAVE_ARCH_DMA_SET_MASK
438static inline int dma_set_mask(struct device *dev, u64 mask)
439{
440 struct dma_map_ops *ops = get_dma_ops(dev);
441
442 if (ops->set_dma_mask)
443 return ops->set_dma_mask(dev, mask);
444
445 if (!dev->dma_mask || !dma_supported(dev, mask))
446 return -EIO;
447 *dev->dma_mask = mask;
448 return 0;
449}
90#endif 450#endif
91 451
92static inline u64 dma_get_mask(struct device *dev) 452static inline u64 dma_get_mask(struct device *dev)
@@ -259,22 +619,6 @@ static inline void dmam_release_declared_memory(struct device *dev)
259} 619}
260#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ 620#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
261 621
262#ifndef CONFIG_HAVE_DMA_ATTRS
263struct dma_attrs;
264
265#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
266 dma_map_single(dev, cpu_addr, size, dir)
267
268#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
269 dma_unmap_single(dev, dma_addr, size, dir)
270
271#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
272 dma_map_sg(dev, sgl, nents, dir)
273
274#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
275 dma_unmap_sg(dev, sgl, nents, dir)
276
277#else
278static inline void *dma_alloc_writecombine(struct device *dev, size_t size, 622static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
279 dma_addr_t *dma_addr, gfp_t gfp) 623 dma_addr_t *dma_addr, gfp_t gfp)
280{ 624{
@@ -300,7 +644,6 @@ static inline int dma_mmap_writecombine(struct device *dev,
300 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 644 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
301 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); 645 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
302} 646}
303#endif /* CONFIG_HAVE_DMA_ATTRS */
304 647
305#ifdef CONFIG_NEED_DMA_MAP_STATE 648#ifdef CONFIG_NEED_DMA_MAP_STATE
306#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 649#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME