diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-09-25 17:23:31 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-09-29 05:41:02 -0400 |
commit | 3216a97bb0d5166ec5795aa3db1c3a02415ac060 (patch) | |
tree | 1e2ecda2ab14a76e3ce785fcb0f531976c6f6036 /arch/arm/common | |
parent | 125ab12acf64ff86b55d20e14db20becd917b7c4 (diff) |
[ARM] dma: coding style cleanups
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/common')
-rw-r--r-- | arch/arm/common/dmabounce.c | 48 |
1 files changed, 16 insertions, 32 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index d4b0c608fdee..22aec95c9863 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -154,9 +154,7 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, | |||
154 | #endif | 154 | #endif |
155 | 155 | ||
156 | write_lock_irqsave(&device_info->lock, flags); | 156 | write_lock_irqsave(&device_info->lock, flags); |
157 | |||
158 | list_add(&buf->node, &device_info->safe_buffers); | 157 | list_add(&buf->node, &device_info->safe_buffers); |
159 | |||
160 | write_unlock_irqrestore(&device_info->lock, flags); | 158 | write_unlock_irqrestore(&device_info->lock, flags); |
161 | 159 | ||
162 | return buf; | 160 | return buf; |
@@ -220,8 +218,7 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev, | |||
220 | return find_safe_buffer(dev->archdata.dmabounce, dma_addr); | 218 | return find_safe_buffer(dev->archdata.dmabounce, dma_addr); |
221 | } | 219 | } |
222 | 220 | ||
223 | static inline dma_addr_t | 221 | static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, |
224 | map_single(struct device *dev, void *ptr, size_t size, | ||
225 | enum dma_data_direction dir) | 222 | enum dma_data_direction dir) |
226 | { | 223 | { |
227 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | 224 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
@@ -285,9 +282,8 @@ map_single(struct device *dev, void *ptr, size_t size, | |||
285 | return dma_addr; | 282 | return dma_addr; |
286 | } | 283 | } |
287 | 284 | ||
288 | static inline void | 285 | static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, |
289 | unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 286 | size_t size, enum dma_data_direction dir) |
290 | enum dma_data_direction dir) | ||
291 | { | 287 | { |
292 | struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); | 288 | struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); |
293 | 289 | ||
@@ -332,25 +328,20 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
332 | * substitute the safe buffer for the unsafe one. | 328 | * substitute the safe buffer for the unsafe one. |
333 | * (basically move the buffer from an unsafe area to a safe one) | 329 | * (basically move the buffer from an unsafe area to a safe one) |
334 | */ | 330 | */ |
335 | dma_addr_t | 331 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, |
336 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
337 | enum dma_data_direction dir) | 332 | enum dma_data_direction dir) |
338 | { | 333 | { |
339 | dma_addr_t dma_addr; | ||
340 | |||
341 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 334 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
342 | __func__, ptr, size, dir); | 335 | __func__, ptr, size, dir); |
343 | 336 | ||
344 | BUG_ON(dir == DMA_NONE); | 337 | BUG_ON(dir == DMA_NONE); |
345 | 338 | ||
346 | dma_addr = map_single(dev, ptr, size, dir); | 339 | return map_single(dev, ptr, size, dir); |
347 | |||
348 | return dma_addr; | ||
349 | } | 340 | } |
341 | EXPORT_SYMBOL(dma_map_single); | ||
350 | 342 | ||
351 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 343 | dma_addr_t dma_map_page(struct device *dev, struct page *page, |
352 | unsigned long offset, size_t size, | 344 | unsigned long offset, size_t size, enum dma_data_direction dir) |
353 | enum dma_data_direction dir) | ||
354 | { | 345 | { |
355 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", | 346 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", |
356 | __func__, page, offset, size, dir); | 347 | __func__, page, offset, size, dir); |
@@ -368,9 +359,8 @@ EXPORT_SYMBOL(dma_map_page); | |||
368 | * should be) | 359 | * should be) |
369 | */ | 360 | */ |
370 | 361 | ||
371 | void | 362 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
372 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 363 | enum dma_data_direction dir) |
373 | enum dma_data_direction dir) | ||
374 | { | 364 | { |
375 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 365 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
376 | __func__, (void *) dma_addr, size, dir); | 366 | __func__, (void *) dma_addr, size, dir); |
@@ -379,6 +369,7 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
379 | 369 | ||
380 | unmap_single(dev, dma_addr, size, dir); | 370 | unmap_single(dev, dma_addr, size, dir); |
381 | } | 371 | } |
372 | EXPORT_SYMBOL(dma_unmap_single); | ||
382 | 373 | ||
383 | int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | 374 | int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, |
384 | unsigned long off, size_t sz, enum dma_data_direction dir) | 375 | unsigned long off, size_t sz, enum dma_data_direction dir) |
@@ -434,9 +425,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | |||
434 | } | 425 | } |
435 | EXPORT_SYMBOL(dmabounce_sync_for_device); | 426 | EXPORT_SYMBOL(dmabounce_sync_for_device); |
436 | 427 | ||
437 | static int | 428 | static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, |
438 | dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name, | 429 | const char *name, unsigned long size) |
439 | unsigned long size) | ||
440 | { | 430 | { |
441 | pool->size = size; | 431 | pool->size = size; |
442 | DO_STATS(pool->allocs = 0); | 432 | DO_STATS(pool->allocs = 0); |
@@ -447,9 +437,8 @@ dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char | |||
447 | return pool->pool ? 0 : -ENOMEM; | 437 | return pool->pool ? 0 : -ENOMEM; |
448 | } | 438 | } |
449 | 439 | ||
450 | int | 440 | int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, |
451 | dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | 441 | unsigned long large_buffer_size) |
452 | unsigned long large_buffer_size) | ||
453 | { | 442 | { |
454 | struct dmabounce_device_info *device_info; | 443 | struct dmabounce_device_info *device_info; |
455 | int ret; | 444 | int ret; |
@@ -505,9 +494,9 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | |||
505 | kfree(device_info); | 494 | kfree(device_info); |
506 | return ret; | 495 | return ret; |
507 | } | 496 | } |
497 | EXPORT_SYMBOL(dmabounce_register_dev); | ||
508 | 498 | ||
509 | void | 499 | void dmabounce_unregister_dev(struct device *dev) |
510 | dmabounce_unregister_dev(struct device *dev) | ||
511 | { | 500 | { |
512 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | 501 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
513 | 502 | ||
@@ -540,11 +529,6 @@ dmabounce_unregister_dev(struct device *dev) | |||
540 | 529 | ||
541 | dev_info(dev, "dmabounce: device unregistered\n"); | 530 | dev_info(dev, "dmabounce: device unregistered\n"); |
542 | } | 531 | } |
543 | |||
544 | |||
545 | EXPORT_SYMBOL(dma_map_single); | ||
546 | EXPORT_SYMBOL(dma_unmap_single); | ||
547 | EXPORT_SYMBOL(dmabounce_register_dev); | ||
548 | EXPORT_SYMBOL(dmabounce_unregister_dev); | 532 | EXPORT_SYMBOL(dmabounce_unregister_dev); |
549 | 533 | ||
550 | MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"); | 534 | MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"); |