diff options
Diffstat (limited to 'include/asm-x86/dma-mapping.h')
-rw-r--r-- | include/asm-x86/dma-mapping.h | 238 |
1 files changed, 235 insertions, 3 deletions
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index 58f790f4df52..a1a4dc7fe6ec 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h | |||
@@ -1,5 +1,237 @@ | |||
1 | #ifndef _ASM_DMA_MAPPING_H_ | ||
2 | #define _ASM_DMA_MAPPING_H_ | ||
3 | |||
4 | /* | ||
5 | * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for | ||
6 | * documentation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/scatterlist.h> | ||
10 | #include <asm/io.h> | ||
11 | #include <asm/swiotlb.h> | ||
12 | |||
13 | extern dma_addr_t bad_dma_address; | ||
14 | extern int iommu_merge; | ||
15 | extern struct device fallback_dev; | ||
16 | extern int panic_on_overflow; | ||
17 | extern int forbid_dac; | ||
18 | extern int force_iommu; | ||
19 | |||
20 | struct dma_mapping_ops { | ||
21 | int (*mapping_error)(dma_addr_t dma_addr); | ||
22 | void* (*alloc_coherent)(struct device *dev, size_t size, | ||
23 | dma_addr_t *dma_handle, gfp_t gfp); | ||
24 | void (*free_coherent)(struct device *dev, size_t size, | ||
25 | void *vaddr, dma_addr_t dma_handle); | ||
26 | dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, | ||
27 | size_t size, int direction); | ||
28 | /* like map_single, but doesn't check the device mask */ | ||
29 | dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr, | ||
30 | size_t size, int direction); | ||
31 | void (*unmap_single)(struct device *dev, dma_addr_t addr, | ||
32 | size_t size, int direction); | ||
33 | void (*sync_single_for_cpu)(struct device *hwdev, | ||
34 | dma_addr_t dma_handle, size_t size, | ||
35 | int direction); | ||
36 | void (*sync_single_for_device)(struct device *hwdev, | ||
37 | dma_addr_t dma_handle, size_t size, | ||
38 | int direction); | ||
39 | void (*sync_single_range_for_cpu)(struct device *hwdev, | ||
40 | dma_addr_t dma_handle, unsigned long offset, | ||
41 | size_t size, int direction); | ||
42 | void (*sync_single_range_for_device)(struct device *hwdev, | ||
43 | dma_addr_t dma_handle, unsigned long offset, | ||
44 | size_t size, int direction); | ||
45 | void (*sync_sg_for_cpu)(struct device *hwdev, | ||
46 | struct scatterlist *sg, int nelems, | ||
47 | int direction); | ||
48 | void (*sync_sg_for_device)(struct device *hwdev, | ||
49 | struct scatterlist *sg, int nelems, | ||
50 | int direction); | ||
51 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, | ||
52 | int nents, int direction); | ||
53 | void (*unmap_sg)(struct device *hwdev, | ||
54 | struct scatterlist *sg, int nents, | ||
55 | int direction); | ||
56 | int (*dma_supported)(struct device *hwdev, u64 mask); | ||
57 | int is_phys; | ||
58 | }; | ||
59 | |||
60 | extern const struct dma_mapping_ops *dma_ops; | ||
61 | |||
62 | static inline int dma_mapping_error(dma_addr_t dma_addr) | ||
63 | { | ||
64 | if (dma_ops->mapping_error) | ||
65 | return dma_ops->mapping_error(dma_addr); | ||
66 | |||
67 | return (dma_addr == bad_dma_address); | ||
68 | } | ||
69 | |||
70 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
71 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
72 | |||
73 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
74 | dma_addr_t *dma_handle, gfp_t flag); | ||
75 | |||
76 | void dma_free_coherent(struct device *dev, size_t size, | ||
77 | void *vaddr, dma_addr_t dma_handle); | ||
78 | |||
79 | |||
80 | extern int dma_supported(struct device *hwdev, u64 mask); | ||
81 | extern int dma_set_mask(struct device *dev, u64 mask); | ||
82 | |||
83 | static inline dma_addr_t | ||
84 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | ||
85 | int direction) | ||
86 | { | ||
87 | BUG_ON(!valid_dma_direction(direction)); | ||
88 | return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); | ||
89 | } | ||
90 | |||
91 | static inline void | ||
92 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | ||
93 | int direction) | ||
94 | { | ||
95 | BUG_ON(!valid_dma_direction(direction)); | ||
96 | if (dma_ops->unmap_single) | ||
97 | dma_ops->unmap_single(dev, addr, size, direction); | ||
98 | } | ||
99 | |||
100 | static inline int | ||
101 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, | ||
102 | int nents, int direction) | ||
103 | { | ||
104 | BUG_ON(!valid_dma_direction(direction)); | ||
105 | return dma_ops->map_sg(hwdev, sg, nents, direction); | ||
106 | } | ||
107 | |||
108 | static inline void | ||
109 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | ||
110 | int direction) | ||
111 | { | ||
112 | BUG_ON(!valid_dma_direction(direction)); | ||
113 | if (dma_ops->unmap_sg) | ||
114 | dma_ops->unmap_sg(hwdev, sg, nents, direction); | ||
115 | } | ||
116 | |||
117 | static inline void | ||
118 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | ||
119 | size_t size, int direction) | ||
120 | { | ||
121 | BUG_ON(!valid_dma_direction(direction)); | ||
122 | if (dma_ops->sync_single_for_cpu) | ||
123 | dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, | ||
124 | direction); | ||
125 | flush_write_buffers(); | ||
126 | } | ||
127 | |||
128 | static inline void | ||
129 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | ||
130 | size_t size, int direction) | ||
131 | { | ||
132 | BUG_ON(!valid_dma_direction(direction)); | ||
133 | if (dma_ops->sync_single_for_device) | ||
134 | dma_ops->sync_single_for_device(hwdev, dma_handle, size, | ||
135 | direction); | ||
136 | flush_write_buffers(); | ||
137 | } | ||
138 | |||
139 | static inline void | ||
140 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | ||
141 | unsigned long offset, size_t size, int direction) | ||
142 | { | ||
143 | BUG_ON(!valid_dma_direction(direction)); | ||
144 | if (dma_ops->sync_single_range_for_cpu) | ||
145 | dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | ||
146 | size, direction); | ||
147 | |||
148 | flush_write_buffers(); | ||
149 | } | ||
150 | |||
151 | static inline void | ||
152 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | ||
153 | unsigned long offset, size_t size, | ||
154 | int direction) | ||
155 | { | ||
156 | BUG_ON(!valid_dma_direction(direction)); | ||
157 | if (dma_ops->sync_single_range_for_device) | ||
158 | dma_ops->sync_single_range_for_device(hwdev, dma_handle, | ||
159 | offset, size, direction); | ||
160 | |||
161 | flush_write_buffers(); | ||
162 | } | ||
163 | |||
164 | static inline void | ||
165 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | ||
166 | int nelems, int direction) | ||
167 | { | ||
168 | BUG_ON(!valid_dma_direction(direction)); | ||
169 | if (dma_ops->sync_sg_for_cpu) | ||
170 | dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); | ||
171 | flush_write_buffers(); | ||
172 | } | ||
173 | |||
174 | static inline void | ||
175 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | ||
176 | int nelems, int direction) | ||
177 | { | ||
178 | BUG_ON(!valid_dma_direction(direction)); | ||
179 | if (dma_ops->sync_sg_for_device) | ||
180 | dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); | ||
181 | |||
182 | flush_write_buffers(); | ||
183 | } | ||
184 | |||
185 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
186 | size_t offset, size_t size, | ||
187 | int direction) | ||
188 | { | ||
189 | BUG_ON(!valid_dma_direction(direction)); | ||
190 | return dma_ops->map_single(dev, page_to_phys(page)+offset, | ||
191 | size, direction); | ||
192 | } | ||
193 | |||
194 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
195 | size_t size, int direction) | ||
196 | { | ||
197 | dma_unmap_single(dev, addr, size, direction); | ||
198 | } | ||
199 | |||
200 | static inline void | ||
201 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
202 | enum dma_data_direction dir) | ||
203 | { | ||
204 | flush_write_buffers(); | ||
205 | } | ||
206 | |||
207 | static inline int dma_get_cache_alignment(void) | ||
208 | { | ||
209 | /* no easy way to get cache size on all x86, so return the | ||
210 | * maximum possible, to be safe */ | ||
211 | return boot_cpu_data.x86_clflush_size; | ||
212 | } | ||
213 | |||
214 | #define dma_is_consistent(d, h) (1) | ||
215 | |||
1 | #ifdef CONFIG_X86_32 | 216 | #ifdef CONFIG_X86_32 |
2 | # include "dma-mapping_32.h" | 217 | # define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY |
3 | #else | 218 | struct dma_coherent_mem { |
4 | # include "dma-mapping_64.h" | 219 | void *virt_base; |
220 | u32 device_base; | ||
221 | int size; | ||
222 | int flags; | ||
223 | unsigned long *bitmap; | ||
224 | }; | ||
225 | |||
226 | extern int | ||
227 | dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
228 | dma_addr_t device_addr, size_t size, int flags); | ||
229 | |||
230 | extern void | ||
231 | dma_release_declared_memory(struct device *dev); | ||
232 | |||
233 | extern void * | ||
234 | dma_mark_declared_memory_occupied(struct device *dev, | ||
235 | dma_addr_t device_addr, size_t size); | ||
236 | #endif /* CONFIG_X86_32 */ | ||
5 | #endif | 237 | #endif |