aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/dma-mapping.h
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-08-04 15:08:25 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-28 00:24:10 -0400
commit45223c549273bbb2c6e1bc6e3629174e8765ad01 (patch)
tree366f61a56de635c73faa1b779fec6694a075eaa8 /arch/powerpc/include/asm/dma-mapping.h
parentf726f30e32305a34a203ff975e60885aa7556c6a (diff)
powerpc: use dma_map_ops struct
This converts uses dma_map_ops struct (in include/linux/dma-mapping.h) instead of POWERPC homegrown dma_mapping_ops. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Becky Bruce <beckyb@kernel.crashing.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/dma-mapping.h')
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h84
1 files changed, 21 insertions, 63 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 1765c379138a..8ca2b5183c56 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -64,56 +64,14 @@ static inline unsigned long device_to_mask(struct device *dev)
64} 64}
65 65
66/* 66/*
67 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
68 */
69struct dma_mapping_ops {
70 void * (*alloc_coherent)(struct device *dev, size_t size,
71 dma_addr_t *dma_handle, gfp_t flag);
72 void (*free_coherent)(struct device *dev, size_t size,
73 void *vaddr, dma_addr_t dma_handle);
74 int (*map_sg)(struct device *dev, struct scatterlist *sg,
75 int nents, enum dma_data_direction direction,
76 struct dma_attrs *attrs);
77 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
78 int nents, enum dma_data_direction direction,
79 struct dma_attrs *attrs);
80 int (*dma_supported)(struct device *dev, u64 mask);
81 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
82 dma_addr_t (*map_page)(struct device *dev, struct page *page,
83 unsigned long offset, size_t size,
84 enum dma_data_direction direction,
85 struct dma_attrs *attrs);
86 void (*unmap_page)(struct device *dev,
87 dma_addr_t dma_address, size_t size,
88 enum dma_data_direction direction,
89 struct dma_attrs *attrs);
90#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
91 void (*sync_single_range_for_cpu)(struct device *hwdev,
92 dma_addr_t dma_handle, unsigned long offset,
93 size_t size,
94 enum dma_data_direction direction);
95 void (*sync_single_range_for_device)(struct device *hwdev,
96 dma_addr_t dma_handle, unsigned long offset,
97 size_t size,
98 enum dma_data_direction direction);
99 void (*sync_sg_for_cpu)(struct device *hwdev,
100 struct scatterlist *sg, int nelems,
101 enum dma_data_direction direction);
102 void (*sync_sg_for_device)(struct device *hwdev,
103 struct scatterlist *sg, int nelems,
104 enum dma_data_direction direction);
105#endif
106};
107
108/*
109 * Available generic sets of operations 67 * Available generic sets of operations
110 */ 68 */
111#ifdef CONFIG_PPC64 69#ifdef CONFIG_PPC64
112extern struct dma_mapping_ops dma_iommu_ops; 70extern struct dma_map_ops dma_iommu_ops;
113#endif 71#endif
114extern struct dma_mapping_ops dma_direct_ops; 72extern struct dma_map_ops dma_direct_ops;
115 73
116static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) 74static inline struct dma_map_ops *get_dma_ops(struct device *dev)
117{ 75{
118 /* We don't handle the NULL dev case for ISA for now. We could 76 /* We don't handle the NULL dev case for ISA for now. We could
119 * do it via an out of line call but it is not needed for now. The 77 * do it via an out of line call but it is not needed for now. The
@@ -126,14 +84,14 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
126 return dev->archdata.dma_ops; 84 return dev->archdata.dma_ops;
127} 85}
128 86
129static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops) 87static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
130{ 88{
131 dev->archdata.dma_ops = ops; 89 dev->archdata.dma_ops = ops;
132} 90}
133 91
134static inline int dma_supported(struct device *dev, u64 mask) 92static inline int dma_supported(struct device *dev, u64 mask)
135{ 93{
136 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 94 struct dma_map_ops *dma_ops = get_dma_ops(dev);
137 95
138 if (unlikely(dma_ops == NULL)) 96 if (unlikely(dma_ops == NULL))
139 return 0; 97 return 0;
@@ -147,7 +105,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
147 105
148static inline int dma_set_mask(struct device *dev, u64 dma_mask) 106static inline int dma_set_mask(struct device *dev, u64 dma_mask)
149{ 107{
150 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 108 struct dma_map_ops *dma_ops = get_dma_ops(dev);
151 109
152 if (unlikely(dma_ops == NULL)) 110 if (unlikely(dma_ops == NULL))
153 return -EIO; 111 return -EIO;
@@ -161,7 +119,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
161 119
162/* 120/*
163 * map_/unmap_single actually call through to map/unmap_page now that all the 121 * map_/unmap_single actually call through to map/unmap_page now that all the
164 * dma_mapping_ops have been converted over. We just have to get the page and 122 * dma_map_ops have been converted over. We just have to get the page and
165 * offset to pass through to map_page 123 * offset to pass through to map_page
166 */ 124 */
167static inline dma_addr_t dma_map_single_attrs(struct device *dev, 125static inline dma_addr_t dma_map_single_attrs(struct device *dev,
@@ -170,7 +128,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
170 enum dma_data_direction direction, 128 enum dma_data_direction direction,
171 struct dma_attrs *attrs) 129 struct dma_attrs *attrs)
172{ 130{
173 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 131 struct dma_map_ops *dma_ops = get_dma_ops(dev);
174 132
175 BUG_ON(!dma_ops); 133 BUG_ON(!dma_ops);
176 134
@@ -185,7 +143,7 @@ static inline void dma_unmap_single_attrs(struct device *dev,
185 enum dma_data_direction direction, 143 enum dma_data_direction direction,
186 struct dma_attrs *attrs) 144 struct dma_attrs *attrs)
187{ 145{
188 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 146 struct dma_map_ops *dma_ops = get_dma_ops(dev);
189 147
190 BUG_ON(!dma_ops); 148 BUG_ON(!dma_ops);
191 149
@@ -198,7 +156,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
198 enum dma_data_direction direction, 156 enum dma_data_direction direction,
199 struct dma_attrs *attrs) 157 struct dma_attrs *attrs)
200{ 158{
201 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 159 struct dma_map_ops *dma_ops = get_dma_ops(dev);
202 160
203 BUG_ON(!dma_ops); 161 BUG_ON(!dma_ops);
204 162
@@ -211,7 +169,7 @@ static inline void dma_unmap_page_attrs(struct device *dev,
211 enum dma_data_direction direction, 169 enum dma_data_direction direction,
212 struct dma_attrs *attrs) 170 struct dma_attrs *attrs)
213{ 171{
214 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 172 struct dma_map_ops *dma_ops = get_dma_ops(dev);
215 173
216 BUG_ON(!dma_ops); 174 BUG_ON(!dma_ops);
217 175
@@ -222,7 +180,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
222 int nents, enum dma_data_direction direction, 180 int nents, enum dma_data_direction direction,
223 struct dma_attrs *attrs) 181 struct dma_attrs *attrs)
224{ 182{
225 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 183 struct dma_map_ops *dma_ops = get_dma_ops(dev);
226 184
227 BUG_ON(!dma_ops); 185 BUG_ON(!dma_ops);
228 return dma_ops->map_sg(dev, sg, nents, direction, attrs); 186 return dma_ops->map_sg(dev, sg, nents, direction, attrs);
@@ -234,7 +192,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev,
234 enum dma_data_direction direction, 192 enum dma_data_direction direction,
235 struct dma_attrs *attrs) 193 struct dma_attrs *attrs)
236{ 194{
237 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 195 struct dma_map_ops *dma_ops = get_dma_ops(dev);
238 196
239 BUG_ON(!dma_ops); 197 BUG_ON(!dma_ops);
240 dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs); 198 dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
@@ -243,7 +201,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev,
243static inline void *dma_alloc_coherent(struct device *dev, size_t size, 201static inline void *dma_alloc_coherent(struct device *dev, size_t size,
244 dma_addr_t *dma_handle, gfp_t flag) 202 dma_addr_t *dma_handle, gfp_t flag)
245{ 203{
246 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 204 struct dma_map_ops *dma_ops = get_dma_ops(dev);
247 205
248 BUG_ON(!dma_ops); 206 BUG_ON(!dma_ops);
249 return dma_ops->alloc_coherent(dev, size, dma_handle, flag); 207 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
@@ -252,7 +210,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
252static inline void dma_free_coherent(struct device *dev, size_t size, 210static inline void dma_free_coherent(struct device *dev, size_t size,
253 void *cpu_addr, dma_addr_t dma_handle) 211 void *cpu_addr, dma_addr_t dma_handle)
254{ 212{
255 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 213 struct dma_map_ops *dma_ops = get_dma_ops(dev);
256 214
257 BUG_ON(!dma_ops); 215 BUG_ON(!dma_ops);
258 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 216 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
@@ -304,7 +262,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev,
304 dma_addr_t dma_handle, size_t size, 262 dma_addr_t dma_handle, size_t size,
305 enum dma_data_direction direction) 263 enum dma_data_direction direction)
306{ 264{
307 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 265 struct dma_map_ops *dma_ops = get_dma_ops(dev);
308 266
309 BUG_ON(!dma_ops); 267 BUG_ON(!dma_ops);
310 268
@@ -317,7 +275,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
317 dma_addr_t dma_handle, size_t size, 275 dma_addr_t dma_handle, size_t size,
318 enum dma_data_direction direction) 276 enum dma_data_direction direction)
319{ 277{
320 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 278 struct dma_map_ops *dma_ops = get_dma_ops(dev);
321 279
322 BUG_ON(!dma_ops); 280 BUG_ON(!dma_ops);
323 281
@@ -330,7 +288,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
330 struct scatterlist *sgl, int nents, 288 struct scatterlist *sgl, int nents,
331 enum dma_data_direction direction) 289 enum dma_data_direction direction)
332{ 290{
333 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 291 struct dma_map_ops *dma_ops = get_dma_ops(dev);
334 292
335 BUG_ON(!dma_ops); 293 BUG_ON(!dma_ops);
336 294
@@ -342,7 +300,7 @@ static inline void dma_sync_sg_for_device(struct device *dev,
342 struct scatterlist *sgl, int nents, 300 struct scatterlist *sgl, int nents,
343 enum dma_data_direction direction) 301 enum dma_data_direction direction)
344{ 302{
345 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 303 struct dma_map_ops *dma_ops = get_dma_ops(dev);
346 304
347 BUG_ON(!dma_ops); 305 BUG_ON(!dma_ops);
348 306
@@ -354,7 +312,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
354 dma_addr_t dma_handle, unsigned long offset, size_t size, 312 dma_addr_t dma_handle, unsigned long offset, size_t size,
355 enum dma_data_direction direction) 313 enum dma_data_direction direction)
356{ 314{
357 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 315 struct dma_map_ops *dma_ops = get_dma_ops(dev);
358 316
359 BUG_ON(!dma_ops); 317 BUG_ON(!dma_ops);
360 318
@@ -367,7 +325,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
367 dma_addr_t dma_handle, unsigned long offset, size_t size, 325 dma_addr_t dma_handle, unsigned long offset, size_t size,
368 enum dma_data_direction direction) 326 enum dma_data_direction direction)
369{ 327{
370 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 328 struct dma_map_ops *dma_ops = get_dma_ops(dev);
371 329
372 BUG_ON(!dma_ops); 330 BUG_ON(!dma_ops);
373 331