diff options
Diffstat (limited to 'include/asm-sparc64/dma-mapping.h')
-rw-r--r-- | include/asm-sparc64/dma-mapping.h | 337 |
1 files changed, 82 insertions, 255 deletions
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h index c58ec1661df8..0a1006692bb2 100644 --- a/include/asm-sparc64/dma-mapping.h +++ b/include/asm-sparc64/dma-mapping.h | |||
@@ -1,307 +1,134 @@ | |||
1 | #ifndef _ASM_SPARC64_DMA_MAPPING_H | 1 | #ifndef _ASM_SPARC64_DMA_MAPPING_H |
2 | #define _ASM_SPARC64_DMA_MAPPING_H | 2 | #define _ASM_SPARC64_DMA_MAPPING_H |
3 | 3 | ||
4 | 4 | #include <linux/scatterlist.h> | |
5 | #ifdef CONFIG_PCI | ||
6 | |||
7 | /* we implement the API below in terms of the existing PCI one, | ||
8 | * so include it */ | ||
9 | #include <linux/pci.h> | ||
10 | /* need struct page definitions */ | ||
11 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
12 | 6 | ||
13 | #include <asm/of_device.h> | 7 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) |
14 | 8 | ||
15 | static inline int | 9 | struct dma_ops { |
16 | dma_supported(struct device *dev, u64 mask) | 10 | void *(*alloc_coherent)(struct device *dev, size_t size, |
17 | { | 11 | dma_addr_t *dma_handle, gfp_t flag); |
18 | BUG_ON(dev->bus != &pci_bus_type); | 12 | void (*free_coherent)(struct device *dev, size_t size, |
19 | 13 | void *cpu_addr, dma_addr_t dma_handle); | |
20 | return pci_dma_supported(to_pci_dev(dev), mask); | 14 | dma_addr_t (*map_single)(struct device *dev, void *cpu_addr, |
21 | } | 15 | size_t size, |
22 | 16 | enum dma_data_direction direction); | |
23 | static inline int | 17 | void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, |
24 | dma_set_mask(struct device *dev, u64 dma_mask) | 18 | size_t size, |
25 | { | 19 | enum dma_data_direction direction); |
26 | BUG_ON(dev->bus != &pci_bus_type); | 20 | int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, |
27 | 21 | enum dma_data_direction direction); | |
28 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | 22 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, |
29 | } | 23 | int nhwentries, |
30 | 24 | enum dma_data_direction direction); | |
31 | static inline void * | 25 | void (*sync_single_for_cpu)(struct device *dev, |
32 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 26 | dma_addr_t dma_handle, size_t size, |
33 | gfp_t flag) | 27 | enum dma_data_direction direction); |
34 | { | 28 | void (*sync_single_for_device)(struct device *dev, |
35 | BUG_ON(dev->bus != &pci_bus_type); | 29 | dma_addr_t dma_handle, size_t size, |
36 | 30 | enum dma_data_direction direction); | |
37 | return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag); | 31 | void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, |
38 | } | 32 | int nelems, |
39 | 33 | enum dma_data_direction direction); | |
40 | static inline void | 34 | void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, |
41 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 35 | int nelems, |
42 | dma_addr_t dma_handle) | 36 | enum dma_data_direction direction); |
43 | { | 37 | }; |
44 | BUG_ON(dev->bus != &pci_bus_type); | 38 | extern const struct dma_ops *dma_ops; |
45 | 39 | ||
46 | pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); | 40 | extern int dma_supported(struct device *dev, u64 mask); |
47 | } | 41 | extern int dma_set_mask(struct device *dev, u64 dma_mask); |
48 | |||
49 | static inline dma_addr_t | ||
50 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
51 | enum dma_data_direction direction) | ||
52 | { | ||
53 | BUG_ON(dev->bus != &pci_bus_type); | ||
54 | |||
55 | return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction); | ||
56 | } | ||
57 | |||
58 | static inline void | ||
59 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
60 | enum dma_data_direction direction) | ||
61 | { | ||
62 | BUG_ON(dev->bus != &pci_bus_type); | ||
63 | |||
64 | pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction); | ||
65 | } | ||
66 | |||
67 | static inline dma_addr_t | ||
68 | dma_map_page(struct device *dev, struct page *page, | ||
69 | unsigned long offset, size_t size, | ||
70 | enum dma_data_direction direction) | ||
71 | { | ||
72 | BUG_ON(dev->bus != &pci_bus_type); | ||
73 | |||
74 | return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction); | ||
75 | } | ||
76 | |||
77 | static inline void | ||
78 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
79 | enum dma_data_direction direction) | ||
80 | { | ||
81 | BUG_ON(dev->bus != &pci_bus_type); | ||
82 | |||
83 | pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction); | ||
84 | } | ||
85 | |||
86 | static inline int | ||
87 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
88 | enum dma_data_direction direction) | ||
89 | { | ||
90 | BUG_ON(dev->bus != &pci_bus_type); | ||
91 | |||
92 | return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); | ||
93 | } | ||
94 | |||
95 | static inline void | ||
96 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
97 | enum dma_data_direction direction) | ||
98 | { | ||
99 | BUG_ON(dev->bus != &pci_bus_type); | ||
100 | |||
101 | pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); | ||
102 | } | ||
103 | |||
104 | static inline void | ||
105 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
106 | enum dma_data_direction direction) | ||
107 | { | ||
108 | BUG_ON(dev->bus != &pci_bus_type); | ||
109 | |||
110 | pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, | ||
111 | size, (int)direction); | ||
112 | } | ||
113 | |||
114 | static inline void | ||
115 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
116 | enum dma_data_direction direction) | ||
117 | { | ||
118 | BUG_ON(dev->bus != &pci_bus_type); | ||
119 | |||
120 | pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, | ||
121 | size, (int)direction); | ||
122 | } | ||
123 | |||
124 | static inline void | ||
125 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
126 | enum dma_data_direction direction) | ||
127 | { | ||
128 | BUG_ON(dev->bus != &pci_bus_type); | ||
129 | |||
130 | pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction); | ||
131 | } | ||
132 | |||
133 | static inline void | ||
134 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
135 | enum dma_data_direction direction) | ||
136 | { | ||
137 | BUG_ON(dev->bus != &pci_bus_type); | ||
138 | |||
139 | pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction); | ||
140 | } | ||
141 | |||
142 | static inline int | ||
143 | dma_mapping_error(dma_addr_t dma_addr) | ||
144 | { | ||
145 | return pci_dma_mapping_error(dma_addr); | ||
146 | } | ||
147 | |||
148 | #else | ||
149 | |||
150 | struct device; | ||
151 | struct page; | ||
152 | struct scatterlist; | ||
153 | |||
154 | static inline int | ||
155 | dma_supported(struct device *dev, u64 mask) | ||
156 | { | ||
157 | BUG(); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static inline int | ||
162 | dma_set_mask(struct device *dev, u64 dma_mask) | ||
163 | { | ||
164 | BUG(); | ||
165 | return 0; | ||
166 | } | ||
167 | 42 | ||
168 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 43 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
169 | dma_addr_t *dma_handle, gfp_t flag) | 44 | dma_addr_t *dma_handle, gfp_t flag) |
170 | { | 45 | { |
171 | BUG(); | 46 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); |
172 | return NULL; | ||
173 | } | 47 | } |
174 | 48 | ||
175 | static inline void dma_free_coherent(struct device *dev, size_t size, | 49 | static inline void dma_free_coherent(struct device *dev, size_t size, |
176 | void *vaddr, dma_addr_t dma_handle) | 50 | void *cpu_addr, dma_addr_t dma_handle) |
177 | { | 51 | { |
178 | BUG(); | 52 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); |
179 | } | 53 | } |
180 | 54 | ||
181 | static inline dma_addr_t | 55 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
182 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | 56 | size_t size, |
183 | enum dma_data_direction direction) | 57 | enum dma_data_direction direction) |
184 | { | 58 | { |
185 | BUG(); | 59 | return dma_ops->map_single(dev, cpu_addr, size, direction); |
186 | return 0; | ||
187 | } | 60 | } |
188 | 61 | ||
189 | static inline void | 62 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, |
190 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 63 | size_t size, |
191 | enum dma_data_direction direction) | 64 | enum dma_data_direction direction) |
192 | { | 65 | { |
193 | BUG(); | 66 | dma_ops->unmap_single(dev, dma_addr, size, direction); |
194 | } | 67 | } |
195 | 68 | ||
196 | static inline dma_addr_t | 69 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
197 | dma_map_page(struct device *dev, struct page *page, | 70 | unsigned long offset, size_t size, |
198 | unsigned long offset, size_t size, | 71 | enum dma_data_direction direction) |
199 | enum dma_data_direction direction) | ||
200 | { | 72 | { |
201 | BUG(); | 73 | return dma_ops->map_single(dev, page_address(page) + offset, |
202 | return 0; | 74 | size, direction); |
203 | } | 75 | } |
204 | 76 | ||
205 | static inline void | 77 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
206 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 78 | size_t size, |
207 | enum dma_data_direction direction) | 79 | enum dma_data_direction direction) |
208 | { | 80 | { |
209 | BUG(); | 81 | dma_ops->unmap_single(dev, dma_address, size, direction); |
210 | } | 82 | } |
211 | 83 | ||
212 | static inline int | 84 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, |
213 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 85 | int nents, enum dma_data_direction direction) |
214 | enum dma_data_direction direction) | ||
215 | { | 86 | { |
216 | BUG(); | 87 | return dma_ops->map_sg(dev, sg, nents, direction); |
217 | return 0; | ||
218 | } | 88 | } |
219 | 89 | ||
220 | static inline void | 90 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
221 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 91 | int nents, enum dma_data_direction direction) |
222 | enum dma_data_direction direction) | ||
223 | { | 92 | { |
224 | BUG(); | 93 | dma_ops->unmap_sg(dev, sg, nents, direction); |
225 | } | 94 | } |
226 | 95 | ||
227 | static inline void | 96 | static inline void dma_sync_single_for_cpu(struct device *dev, |
228 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | 97 | dma_addr_t dma_handle, size_t size, |
229 | enum dma_data_direction direction) | 98 | enum dma_data_direction direction) |
230 | { | 99 | { |
231 | BUG(); | 100 | dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction); |
232 | } | 101 | } |
233 | 102 | ||
234 | static inline void | 103 | static inline void dma_sync_single_for_device(struct device *dev, |
235 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | 104 | dma_addr_t dma_handle, |
236 | enum dma_data_direction direction) | 105 | size_t size, |
106 | enum dma_data_direction direction) | ||
237 | { | 107 | { |
238 | BUG(); | 108 | dma_ops->sync_single_for_device(dev, dma_handle, size, direction); |
239 | } | 109 | } |
240 | 110 | ||
241 | static inline void | 111 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
242 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | 112 | struct scatterlist *sg, int nelems, |
243 | enum dma_data_direction direction) | 113 | enum dma_data_direction direction) |
244 | { | 114 | { |
245 | BUG(); | 115 | dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); |
246 | } | 116 | } |
247 | 117 | ||
248 | static inline void | 118 | static inline void dma_sync_sg_for_device(struct device *dev, |
249 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | 119 | struct scatterlist *sg, int nelems, |
250 | enum dma_data_direction direction) | 120 | enum dma_data_direction direction) |
251 | { | 121 | { |
252 | BUG(); | 122 | dma_ops->sync_sg_for_device(dev, sg, nelems, direction); |
253 | } | 123 | } |
254 | 124 | ||
255 | static inline int | 125 | static inline int dma_mapping_error(dma_addr_t dma_addr) |
256 | dma_mapping_error(dma_addr_t dma_addr) | ||
257 | { | 126 | { |
258 | BUG(); | 127 | return (dma_addr == DMA_ERROR_CODE); |
259 | return 0; | ||
260 | } | 128 | } |
261 | 129 | ||
262 | #endif /* PCI */ | ||
263 | |||
264 | |||
265 | /* Now for the API extensions over the pci_ one */ | ||
266 | |||
267 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 130 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
268 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 131 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
269 | #define dma_is_consistent(d, h) (1) | 132 | #define dma_is_consistent(d, h) (1) |
270 | 133 | ||
271 | static inline int | ||
272 | dma_get_cache_alignment(void) | ||
273 | { | ||
274 | /* no easy way to get cache size on all processors, so return | ||
275 | * the maximum possible, to be safe */ | ||
276 | return (1 << INTERNODE_CACHE_SHIFT); | ||
277 | } | ||
278 | |||
279 | static inline void | ||
280 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
281 | unsigned long offset, size_t size, | ||
282 | enum dma_data_direction direction) | ||
283 | { | ||
284 | /* just sync everything, that's all the pci API can do */ | ||
285 | dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction); | ||
286 | } | ||
287 | |||
288 | static inline void | ||
289 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
290 | unsigned long offset, size_t size, | ||
291 | enum dma_data_direction direction) | ||
292 | { | ||
293 | /* just sync everything, that's all the pci API can do */ | ||
294 | dma_sync_single_for_device(dev, dma_handle, offset+size, direction); | ||
295 | } | ||
296 | |||
297 | static inline void | ||
298 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
299 | enum dma_data_direction direction) | ||
300 | { | ||
301 | /* could define this in terms of the dma_cache ... operations, | ||
302 | * but if you get this on a platform, you should convert the platform | ||
303 | * to using the generic device DMA API */ | ||
304 | BUG(); | ||
305 | } | ||
306 | |||
307 | #endif /* _ASM_SPARC64_DMA_MAPPING_H */ | 134 | #endif /* _ASM_SPARC64_DMA_MAPPING_H */ |