diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-09-25 17:23:31 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-09-29 05:41:02 -0400 |
commit | 3216a97bb0d5166ec5795aa3db1c3a02415ac060 (patch) | |
tree | 1e2ecda2ab14a76e3ce785fcb0f531976c6f6036 /arch/arm/include | |
parent | 125ab12acf64ff86b55d20e14db20becd917b7c4 (diff) |
[ARM] dma: coding style cleanups
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 104 |
1 files changed, 48 insertions, 56 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 1204dc958c43..1532b7a6079d 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -104,15 +104,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
104 | * Dummy noncoherent implementation. We don't provide a dma_cache_sync | 104 | * Dummy noncoherent implementation. We don't provide a dma_cache_sync |
105 | * function so drivers using this API are highlighted with build warnings. | 105 | * function so drivers using this API are highlighted with build warnings. |
106 | */ | 106 | */ |
107 | static inline void * | 107 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
108 | dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) | 108 | dma_addr_t *handle, gfp_t gfp) |
109 | { | 109 | { |
110 | return NULL; | 110 | return NULL; |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline void | 113 | static inline void dma_free_noncoherent(struct device *dev, size_t size, |
114 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | 114 | void *cpu_addr, dma_addr_t handle) |
115 | dma_addr_t handle) | ||
116 | { | 115 | { |
117 | } | 116 | } |
118 | 117 | ||
@@ -127,8 +126,7 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | |||
127 | * return the CPU-viewed address, and sets @handle to be the | 126 | * return the CPU-viewed address, and sets @handle to be the |
128 | * device-viewed address. | 127 | * device-viewed address. |
129 | */ | 128 | */ |
130 | extern void * | 129 | extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); |
131 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); | ||
132 | 130 | ||
133 | /** | 131 | /** |
134 | * dma_free_coherent - free memory allocated by dma_alloc_coherent | 132 | * dma_free_coherent - free memory allocated by dma_alloc_coherent |
@@ -143,9 +141,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf | |||
143 | * References to memory and mappings associated with cpu_addr/handle | 141 | * References to memory and mappings associated with cpu_addr/handle |
144 | * during and after this call executing are illegal. | 142 | * during and after this call executing are illegal. |
145 | */ | 143 | */ |
146 | extern void | 144 | extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); |
147 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
148 | dma_addr_t handle); | ||
149 | 145 | ||
150 | /** | 146 | /** |
151 | * dma_mmap_coherent - map a coherent DMA allocation into user space | 147 | * dma_mmap_coherent - map a coherent DMA allocation into user space |
@@ -159,8 +155,8 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | |||
159 | * into user space. The coherent DMA buffer must not be freed by the | 155 | * into user space. The coherent DMA buffer must not be freed by the |
160 | * driver until the user space mapping has been released. | 156 | * driver until the user space mapping has been released. |
161 | */ | 157 | */ |
162 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | 158 | int dma_mmap_coherent(struct device *, struct vm_area_struct *, |
163 | void *cpu_addr, dma_addr_t handle, size_t size); | 159 | void *, dma_addr_t, size_t); |
164 | 160 | ||
165 | 161 | ||
166 | /** | 162 | /** |
@@ -174,14 +170,14 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | |||
174 | * return the CPU-viewed address, and sets @handle to be the | 170 | * return the CPU-viewed address, and sets @handle to be the |
175 | * device-viewed address. | 171 | * device-viewed address. |
176 | */ | 172 | */ |
177 | extern void * | 173 | extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, |
178 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); | 174 | gfp_t); |
179 | 175 | ||
180 | #define dma_free_writecombine(dev,size,cpu_addr,handle) \ | 176 | #define dma_free_writecombine(dev,size,cpu_addr,handle) \ |
181 | dma_free_coherent(dev,size,cpu_addr,handle) | 177 | dma_free_coherent(dev,size,cpu_addr,handle) |
182 | 178 | ||
183 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | 179 | int dma_mmap_writecombine(struct device *, struct vm_area_struct *, |
184 | void *cpu_addr, dma_addr_t handle, size_t size); | 180 | void *, dma_addr_t, size_t); |
185 | 181 | ||
186 | 182 | ||
187 | #ifdef CONFIG_DMABOUNCE | 183 | #ifdef CONFIG_DMABOUNCE |
@@ -209,7 +205,8 @@ int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | |||
209 | * appropriate DMA pools for the device. | 205 | * appropriate DMA pools for the device. |
210 | * | 206 | * |
211 | */ | 207 | */ |
212 | extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); | 208 | extern int dmabounce_register_dev(struct device *, unsigned long, |
209 | unsigned long); | ||
213 | 210 | ||
214 | /** | 211 | /** |
215 | * dmabounce_unregister_dev | 212 | * dmabounce_unregister_dev |
@@ -244,19 +241,20 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | |||
244 | /* | 241 | /* |
245 | * The DMA API, implemented by dmabounce.c. See below for descriptions. | 242 | * The DMA API, implemented by dmabounce.c. See below for descriptions. |
246 | */ | 243 | */ |
247 | extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); | 244 | extern dma_addr_t dma_map_single(struct device *, void *, size_t, |
248 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | 245 | enum dma_data_direction); |
249 | unsigned long offset, size_t size, | 246 | extern dma_addr_t dma_map_page(struct device *, struct page *, |
250 | enum dma_data_direction dir); | 247 | unsigned long, size_t, enum dma_data_direction); |
251 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); | 248 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, |
249 | enum dma_data_direction); | ||
252 | 250 | ||
253 | /* | 251 | /* |
254 | * Private functions | 252 | * Private functions |
255 | */ | 253 | */ |
256 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, | 254 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, |
257 | size_t, enum dma_data_direction); | 255 | size_t, enum dma_data_direction); |
258 | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | 256 | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, |
259 | size_t, enum dma_data_direction); | 257 | size_t, enum dma_data_direction); |
260 | #else | 258 | #else |
261 | #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) | 259 | #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) |
262 | #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) | 260 | #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) |
@@ -276,9 +274,8 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | |||
276 | * can regain ownership by calling dma_unmap_single() or | 274 | * can regain ownership by calling dma_unmap_single() or |
277 | * dma_sync_single_for_cpu(). | 275 | * dma_sync_single_for_cpu(). |
278 | */ | 276 | */ |
279 | static inline dma_addr_t | 277 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
280 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | 278 | size_t size, enum dma_data_direction dir) |
281 | enum dma_data_direction dir) | ||
282 | { | 279 | { |
283 | if (!arch_is_coherent()) | 280 | if (!arch_is_coherent()) |
284 | dma_cache_maint(cpu_addr, size, dir); | 281 | dma_cache_maint(cpu_addr, size, dir); |
@@ -286,7 +283,6 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, | |||
286 | return virt_to_dma(dev, cpu_addr); | 283 | return virt_to_dma(dev, cpu_addr); |
287 | } | 284 | } |
288 | 285 | ||
289 | |||
290 | /** | 286 | /** |
291 | * dma_map_page - map a portion of a page for streaming DMA | 287 | * dma_map_page - map a portion of a page for streaming DMA |
292 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 288 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
@@ -302,10 +298,8 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, | |||
302 | * can regain ownership by calling dma_unmap_page() or | 298 | * can regain ownership by calling dma_unmap_page() or |
303 | * dma_sync_single_for_cpu(). | 299 | * dma_sync_single_for_cpu(). |
304 | */ | 300 | */ |
305 | static inline dma_addr_t | 301 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
306 | dma_map_page(struct device *dev, struct page *page, | 302 | unsigned long offset, size_t size, enum dma_data_direction dir) |
307 | unsigned long offset, size_t size, | ||
308 | enum dma_data_direction dir) | ||
309 | { | 303 | { |
310 | if (!arch_is_coherent()) | 304 | if (!arch_is_coherent()) |
311 | dma_cache_maint(page_address(page) + offset, size, dir); | 305 | dma_cache_maint(page_address(page) + offset, size, dir); |
@@ -327,9 +321,8 @@ dma_map_page(struct device *dev, struct page *page, | |||
327 | * After this call, reads by the CPU to the buffer are guaranteed to see | 321 | * After this call, reads by the CPU to the buffer are guaranteed to see |
328 | * whatever the device wrote there. | 322 | * whatever the device wrote there. |
329 | */ | 323 | */ |
330 | static inline void | 324 | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, |
331 | dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, | 325 | size_t size, enum dma_data_direction dir) |
332 | enum dma_data_direction dir) | ||
333 | { | 326 | { |
334 | /* nothing to do */ | 327 | /* nothing to do */ |
335 | } | 328 | } |
@@ -349,9 +342,8 @@ dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, | |||
349 | * After this call, reads by the CPU to the buffer are guaranteed to see | 342 | * After this call, reads by the CPU to the buffer are guaranteed to see |
350 | * whatever the device wrote there. | 343 | * whatever the device wrote there. |
351 | */ | 344 | */ |
352 | static inline void | 345 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, |
353 | dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | 346 | size_t size, enum dma_data_direction dir) |
354 | enum dma_data_direction dir) | ||
355 | { | 347 | { |
356 | dma_unmap_single(dev, handle, size, dir); | 348 | dma_unmap_single(dev, handle, size, dir); |
357 | } | 349 | } |
@@ -374,10 +366,9 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | |||
374 | * must first the perform a dma_sync_for_device, and then the | 366 | * must first the perform a dma_sync_for_device, and then the |
375 | * device again owns the buffer. | 367 | * device again owns the buffer. |
376 | */ | 368 | */ |
377 | static inline void | 369 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
378 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, | 370 | dma_addr_t handle, unsigned long offset, size_t size, |
379 | unsigned long offset, size_t size, | 371 | enum dma_data_direction dir) |
380 | enum dma_data_direction dir) | ||
381 | { | 372 | { |
382 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) | 373 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) |
383 | return; | 374 | return; |
@@ -386,10 +377,9 @@ dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, | |||
386 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); | 377 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); |
387 | } | 378 | } |
388 | 379 | ||
389 | static inline void | 380 | static inline void dma_sync_single_range_for_device(struct device *dev, |
390 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, | 381 | dma_addr_t handle, unsigned long offset, size_t size, |
391 | unsigned long offset, size_t size, | 382 | enum dma_data_direction dir) |
392 | enum dma_data_direction dir) | ||
393 | { | 383 | { |
394 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) | 384 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) |
395 | return; | 385 | return; |
@@ -398,16 +388,14 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, | |||
398 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); | 388 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); |
399 | } | 389 | } |
400 | 390 | ||
401 | static inline void | 391 | static inline void dma_sync_single_for_cpu(struct device *dev, |
402 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, | 392 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
403 | enum dma_data_direction dir) | ||
404 | { | 393 | { |
405 | dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); | 394 | dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); |
406 | } | 395 | } |
407 | 396 | ||
408 | static inline void | 397 | static inline void dma_sync_single_for_device(struct device *dev, |
409 | dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, | 398 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
410 | enum dma_data_direction dir) | ||
411 | { | 399 | { |
412 | dma_sync_single_range_for_device(dev, handle, 0, size, dir); | 400 | dma_sync_single_range_for_device(dev, handle, 0, size, dir); |
413 | } | 401 | } |
@@ -415,10 +403,14 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, | |||
415 | /* | 403 | /* |
416 | * The scatter list versions of the above methods. | 404 | * The scatter list versions of the above methods. |
417 | */ | 405 | */ |
418 | extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); | 406 | extern int dma_map_sg(struct device *, struct scatterlist *, int, |
419 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); | 407 | enum dma_data_direction); |
420 | extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction); | 408 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, |
421 | extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); | 409 | enum dma_data_direction); |
410 | extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, | ||
411 | enum dma_data_direction); | ||
412 | extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, | ||
413 | enum dma_data_direction); | ||
422 | 414 | ||
423 | 415 | ||
424 | #endif /* __KERNEL__ */ | 416 | #endif /* __KERNEL__ */ |