aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64/dma-mapping.h')
-rw-r--r--include/asm-sparc64/dma-mapping.h337
1 files changed, 82 insertions, 255 deletions
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index c58ec1661df8..0a1006692bb2 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -1,307 +1,134 @@
1#ifndef _ASM_SPARC64_DMA_MAPPING_H 1#ifndef _ASM_SPARC64_DMA_MAPPING_H
2#define _ASM_SPARC64_DMA_MAPPING_H 2#define _ASM_SPARC64_DMA_MAPPING_H
3 3
4 4#include <linux/scatterlist.h>
5#ifdef CONFIG_PCI
6
7/* we implement the API below in terms of the existing PCI one,
8 * so include it */
9#include <linux/pci.h>
10/* need struct page definitions */
11#include <linux/mm.h> 5#include <linux/mm.h>
12 6
13#include <asm/of_device.h> 7#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
14 8
15static inline int 9struct dma_ops {
16dma_supported(struct device *dev, u64 mask) 10 void *(*alloc_coherent)(struct device *dev, size_t size,
17{ 11 dma_addr_t *dma_handle, gfp_t flag);
18 BUG_ON(dev->bus != &pci_bus_type); 12 void (*free_coherent)(struct device *dev, size_t size,
19 13 void *cpu_addr, dma_addr_t dma_handle);
20 return pci_dma_supported(to_pci_dev(dev), mask); 14 dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
21} 15 size_t size,
22 16 enum dma_data_direction direction);
23static inline int 17 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
24dma_set_mask(struct device *dev, u64 dma_mask) 18 size_t size,
25{ 19 enum dma_data_direction direction);
26 BUG_ON(dev->bus != &pci_bus_type); 20 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
27 21 enum dma_data_direction direction);
28 return pci_set_dma_mask(to_pci_dev(dev), dma_mask); 22 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
29} 23 int nhwentries,
30 24 enum dma_data_direction direction);
31static inline void * 25 void (*sync_single_for_cpu)(struct device *dev,
32dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 26 dma_addr_t dma_handle, size_t size,
33 gfp_t flag) 27 enum dma_data_direction direction);
34{ 28 void (*sync_single_for_device)(struct device *dev,
35 BUG_ON(dev->bus != &pci_bus_type); 29 dma_addr_t dma_handle, size_t size,
36 30 enum dma_data_direction direction);
37 return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag); 31 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
38} 32 int nelems,
39 33 enum dma_data_direction direction);
40static inline void 34 void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
41dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 35 int nelems,
42 dma_addr_t dma_handle) 36 enum dma_data_direction direction);
43{ 37};
44 BUG_ON(dev->bus != &pci_bus_type); 38extern const struct dma_ops *dma_ops;
45 39
46 pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); 40extern int dma_supported(struct device *dev, u64 mask);
47} 41extern int dma_set_mask(struct device *dev, u64 dma_mask);
48
49static inline dma_addr_t
50dma_map_single(struct device *dev, void *cpu_addr, size_t size,
51 enum dma_data_direction direction)
52{
53 BUG_ON(dev->bus != &pci_bus_type);
54
55 return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
56}
57
58static inline void
59dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
60 enum dma_data_direction direction)
61{
62 BUG_ON(dev->bus != &pci_bus_type);
63
64 pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
65}
66
67static inline dma_addr_t
68dma_map_page(struct device *dev, struct page *page,
69 unsigned long offset, size_t size,
70 enum dma_data_direction direction)
71{
72 BUG_ON(dev->bus != &pci_bus_type);
73
74 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
75}
76
77static inline void
78dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
79 enum dma_data_direction direction)
80{
81 BUG_ON(dev->bus != &pci_bus_type);
82
83 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
84}
85
86static inline int
87dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
88 enum dma_data_direction direction)
89{
90 BUG_ON(dev->bus != &pci_bus_type);
91
92 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
93}
94
95static inline void
96dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
97 enum dma_data_direction direction)
98{
99 BUG_ON(dev->bus != &pci_bus_type);
100
101 pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
102}
103
104static inline void
105dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
106 enum dma_data_direction direction)
107{
108 BUG_ON(dev->bus != &pci_bus_type);
109
110 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
111 size, (int)direction);
112}
113
114static inline void
115dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
116 enum dma_data_direction direction)
117{
118 BUG_ON(dev->bus != &pci_bus_type);
119
120 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
121 size, (int)direction);
122}
123
124static inline void
125dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
126 enum dma_data_direction direction)
127{
128 BUG_ON(dev->bus != &pci_bus_type);
129
130 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
131}
132
133static inline void
134dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
135 enum dma_data_direction direction)
136{
137 BUG_ON(dev->bus != &pci_bus_type);
138
139 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
140}
141
142static inline int
143dma_mapping_error(dma_addr_t dma_addr)
144{
145 return pci_dma_mapping_error(dma_addr);
146}
147
148#else
149
150struct device;
151struct page;
152struct scatterlist;
153
154static inline int
155dma_supported(struct device *dev, u64 mask)
156{
157 BUG();
158 return 0;
159}
160
161static inline int
162dma_set_mask(struct device *dev, u64 dma_mask)
163{
164 BUG();
165 return 0;
166}
167 42
168static inline void *dma_alloc_coherent(struct device *dev, size_t size, 43static inline void *dma_alloc_coherent(struct device *dev, size_t size,
169 dma_addr_t *dma_handle, gfp_t flag) 44 dma_addr_t *dma_handle, gfp_t flag)
170{ 45{
171 BUG(); 46 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
172 return NULL;
173} 47}
174 48
175static inline void dma_free_coherent(struct device *dev, size_t size, 49static inline void dma_free_coherent(struct device *dev, size_t size,
176 void *vaddr, dma_addr_t dma_handle) 50 void *cpu_addr, dma_addr_t dma_handle)
177{ 51{
178 BUG(); 52 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
179} 53}
180 54
181static inline dma_addr_t 55static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
182dma_map_single(struct device *dev, void *cpu_addr, size_t size, 56 size_t size,
183 enum dma_data_direction direction) 57 enum dma_data_direction direction)
184{ 58{
185 BUG(); 59 return dma_ops->map_single(dev, cpu_addr, size, direction);
186 return 0;
187} 60}
188 61
189static inline void 62static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
190dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 63 size_t size,
191 enum dma_data_direction direction) 64 enum dma_data_direction direction)
192{ 65{
193 BUG(); 66 dma_ops->unmap_single(dev, dma_addr, size, direction);
194} 67}
195 68
196static inline dma_addr_t 69static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
197dma_map_page(struct device *dev, struct page *page, 70 unsigned long offset, size_t size,
198 unsigned long offset, size_t size, 71 enum dma_data_direction direction)
199 enum dma_data_direction direction)
200{ 72{
201 BUG(); 73 return dma_ops->map_single(dev, page_address(page) + offset,
202 return 0; 74 size, direction);
203} 75}
204 76
205static inline void 77static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
206dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 78 size_t size,
207 enum dma_data_direction direction) 79 enum dma_data_direction direction)
208{ 80{
209 BUG(); 81 dma_ops->unmap_single(dev, dma_address, size, direction);
210} 82}
211 83
212static inline int 84static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
213dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 85 int nents, enum dma_data_direction direction)
214 enum dma_data_direction direction)
215{ 86{
216 BUG(); 87 return dma_ops->map_sg(dev, sg, nents, direction);
217 return 0;
218} 88}
219 89
220static inline void 90static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
221dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 91 int nents, enum dma_data_direction direction)
222 enum dma_data_direction direction)
223{ 92{
224 BUG(); 93 dma_ops->unmap_sg(dev, sg, nents, direction);
225} 94}
226 95
227static inline void 96static inline void dma_sync_single_for_cpu(struct device *dev,
228dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 97 dma_addr_t dma_handle, size_t size,
229 enum dma_data_direction direction) 98 enum dma_data_direction direction)
230{ 99{
231 BUG(); 100 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
232} 101}
233 102
234static inline void 103static inline void dma_sync_single_for_device(struct device *dev,
235dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, 104 dma_addr_t dma_handle,
236 enum dma_data_direction direction) 105 size_t size,
106 enum dma_data_direction direction)
237{ 107{
238 BUG(); 108 dma_ops->sync_single_for_device(dev, dma_handle, size, direction);
239} 109}
240 110
241static inline void 111static inline void dma_sync_sg_for_cpu(struct device *dev,
242dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 112 struct scatterlist *sg, int nelems,
243 enum dma_data_direction direction) 113 enum dma_data_direction direction)
244{ 114{
245 BUG(); 115 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
246} 116}
247 117
248static inline void 118static inline void dma_sync_sg_for_device(struct device *dev,
249dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 119 struct scatterlist *sg, int nelems,
250 enum dma_data_direction direction) 120 enum dma_data_direction direction)
251{ 121{
252 BUG(); 122 dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
253} 123}
254 124
255static inline int 125static inline int dma_mapping_error(dma_addr_t dma_addr)
256dma_mapping_error(dma_addr_t dma_addr)
257{ 126{
258 BUG(); 127 return (dma_addr == DMA_ERROR_CODE);
259 return 0;
260} 128}
261 129
262#endif /* PCI */
263
264
265/* Now for the API extensions over the pci_ one */
266
267#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 130#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
268#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 131#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
269#define dma_is_consistent(d, h) (1) 132#define dma_is_consistent(d, h) (1)
270 133
271static inline int
272dma_get_cache_alignment(void)
273{
274 /* no easy way to get cache size on all processors, so return
275 * the maximum possible, to be safe */
276 return (1 << INTERNODE_CACHE_SHIFT);
277}
278
279static inline void
280dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
281 unsigned long offset, size_t size,
282 enum dma_data_direction direction)
283{
284 /* just sync everything, that's all the pci API can do */
285 dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
286}
287
288static inline void
289dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
290 unsigned long offset, size_t size,
291 enum dma_data_direction direction)
292{
293 /* just sync everything, that's all the pci API can do */
294 dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
295}
296
297static inline void
298dma_cache_sync(struct device *dev, void *vaddr, size_t size,
299 enum dma_data_direction direction)
300{
301 /* could define this in terms of the dma_cache ... operations,
302 * but if you get this on a platform, you should convert the platform
303 * to using the generic device DMA API */
304 BUG();
305}
306
307#endif /* _ASM_SPARC64_DMA_MAPPING_H */ 134#endif /* _ASM_SPARC64_DMA_MAPPING_H */