aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common/dmabounce.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/common/dmabounce.c')
-rw-r--r--arch/arm/common/dmabounce.c87
1 files changed, 36 insertions, 51 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 2e635b814c14..6fbe7722aa44 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -32,7 +32,6 @@
32 32
33#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
34 34
35#undef DEBUG
36#undef STATS 35#undef STATS
37 36
38#ifdef STATS 37#ifdef STATS
@@ -66,14 +65,13 @@ struct dmabounce_pool {
66}; 65};
67 66
68struct dmabounce_device_info { 67struct dmabounce_device_info {
69 struct list_head node;
70
71 struct device *dev; 68 struct device *dev;
72 struct list_head safe_buffers; 69 struct list_head safe_buffers;
73#ifdef STATS 70#ifdef STATS
74 unsigned long total_allocs; 71 unsigned long total_allocs;
75 unsigned long map_op_count; 72 unsigned long map_op_count;
76 unsigned long bounce_count; 73 unsigned long bounce_count;
74 int attr_res;
77#endif 75#endif
78 struct dmabounce_pool small; 76 struct dmabounce_pool small;
79 struct dmabounce_pool large; 77 struct dmabounce_pool large;
@@ -81,33 +79,23 @@ struct dmabounce_device_info {
81 rwlock_t lock; 79 rwlock_t lock;
82}; 80};
83 81
84static LIST_HEAD(dmabounce_devs);
85
86#ifdef STATS 82#ifdef STATS
87static void print_alloc_stats(struct dmabounce_device_info *device_info) 83static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
84 char *buf)
88{ 85{
89 printk(KERN_INFO 86 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
90 "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", 87 return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
91 device_info->dev->bus_id, 88 device_info->small.allocs,
92 device_info->small.allocs, device_info->large.allocs, 89 device_info->large.allocs,
93 device_info->total_allocs - device_info->small.allocs - 90 device_info->total_allocs - device_info->small.allocs -
94 device_info->large.allocs, 91 device_info->large.allocs,
95 device_info->total_allocs); 92 device_info->total_allocs,
93 device_info->map_op_count,
94 device_info->bounce_count);
96} 95}
97#endif
98
99/* find the given device in the dmabounce device list */
100static inline struct dmabounce_device_info *
101find_dmabounce_dev(struct device *dev)
102{
103 struct dmabounce_device_info *d;
104 96
105 list_for_each_entry(d, &dmabounce_devs, node) 97static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
106 if (d->dev == dev) 98#endif
107 return d;
108
109 return NULL;
110}
111 99
112 100
113/* allocate a 'safe' buffer and keep track of it */ 101/* allocate a 'safe' buffer and keep track of it */
@@ -162,8 +150,6 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
162 if (pool) 150 if (pool)
163 pool->allocs++; 151 pool->allocs++;
164 device_info->total_allocs++; 152 device_info->total_allocs++;
165 if (device_info->total_allocs % 1000 == 0)
166 print_alloc_stats(device_info);
167#endif 153#endif
168 154
169 write_lock_irqsave(&device_info->lock, flags); 155 write_lock_irqsave(&device_info->lock, flags);
@@ -218,20 +204,11 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *
218 204
219/* ************************************************** */ 205/* ************************************************** */
220 206
221#ifdef STATS
222static void print_map_stats(struct dmabounce_device_info *device_info)
223{
224 dev_info(device_info->dev,
225 "dmabounce: map_op_count=%lu, bounce_count=%lu\n",
226 device_info->map_op_count, device_info->bounce_count);
227}
228#endif
229
230static inline dma_addr_t 207static inline dma_addr_t
231map_single(struct device *dev, void *ptr, size_t size, 208map_single(struct device *dev, void *ptr, size_t size,
232 enum dma_data_direction dir) 209 enum dma_data_direction dir)
233{ 210{
234 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); 211 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
235 dma_addr_t dma_addr; 212 dma_addr_t dma_addr;
236 int needs_bounce = 0; 213 int needs_bounce = 0;
237 214
@@ -281,10 +258,14 @@ map_single(struct device *dev, void *ptr, size_t size,
281 ptr = buf->safe; 258 ptr = buf->safe;
282 259
283 dma_addr = buf->safe_dma_addr; 260 dma_addr = buf->safe_dma_addr;
261 } else {
262 /*
263 * We don't need to sync the DMA buffer since
264 * it was allocated via the coherent allocators.
265 */
266 consistent_sync(ptr, size, dir);
284 } 267 }
285 268
286 consistent_sync(ptr, size, dir);
287
288 return dma_addr; 269 return dma_addr;
289} 270}
290 271
@@ -292,7 +273,7 @@ static inline void
292unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 273unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
293 enum dma_data_direction dir) 274 enum dma_data_direction dir)
294{ 275{
295 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); 276 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
296 struct safe_buffer *buf = NULL; 277 struct safe_buffer *buf = NULL;
297 278
298 /* 279 /*
@@ -317,12 +298,12 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
317 DO_STATS ( device_info->bounce_count++ ); 298 DO_STATS ( device_info->bounce_count++ );
318 299
319 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { 300 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
320 unsigned long ptr; 301 void *ptr = buf->ptr;
321 302
322 dev_dbg(dev, 303 dev_dbg(dev,
323 "%s: copy back safe %p to unsafe %p size %d\n", 304 "%s: copy back safe %p to unsafe %p size %d\n",
324 __func__, buf->safe, buf->ptr, size); 305 __func__, buf->safe, ptr, size);
325 memcpy(buf->ptr, buf->safe, size); 306 memcpy(ptr, buf->safe, size);
326 307
327 /* 308 /*
328 * DMA buffers must have the same cache properties 309 * DMA buffers must have the same cache properties
@@ -332,8 +313,8 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
332 * bidirectional case because we know the cache 313 * bidirectional case because we know the cache
333 * lines will be coherent with the data written. 314 * lines will be coherent with the data written.
334 */ 315 */
335 ptr = (unsigned long)buf->ptr;
336 dmac_clean_range(ptr, ptr + size); 316 dmac_clean_range(ptr, ptr + size);
317 outer_clean_range(__pa(ptr), __pa(ptr) + size);
337 } 318 }
338 free_safe_buffer(device_info, buf); 319 free_safe_buffer(device_info, buf);
339 } 320 }
@@ -343,7 +324,7 @@ static inline void
343sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, 324sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
344 enum dma_data_direction dir) 325 enum dma_data_direction dir)
345{ 326{
346 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); 327 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
347 struct safe_buffer *buf = NULL; 328 struct safe_buffer *buf = NULL;
348 329
349 if (device_info) 330 if (device_info)
@@ -397,7 +378,10 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
397 default: 378 default:
398 BUG(); 379 BUG();
399 } 380 }
400 consistent_sync(buf->safe, size, dir); 381 /*
382 * No need to sync the safe buffer - it was allocated
383 * via the coherent allocators.
384 */
401 } else { 385 } else {
402 consistent_sync(dma_to_virt(dev, dma_addr), size, dir); 386 consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
403 } 387 }
@@ -604,9 +588,10 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
604 device_info->total_allocs = 0; 588 device_info->total_allocs = 0;
605 device_info->map_op_count = 0; 589 device_info->map_op_count = 0;
606 device_info->bounce_count = 0; 590 device_info->bounce_count = 0;
591 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
607#endif 592#endif
608 593
609 list_add(&device_info->node, &dmabounce_devs); 594 dev->archdata.dmabounce = device_info;
610 595
611 printk(KERN_INFO "dmabounce: registered device %s on %s bus\n", 596 printk(KERN_INFO "dmabounce: registered device %s on %s bus\n",
612 dev->bus_id, dev->bus->name); 597 dev->bus_id, dev->bus->name);
@@ -623,7 +608,9 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
623void 608void
624dmabounce_unregister_dev(struct device *dev) 609dmabounce_unregister_dev(struct device *dev)
625{ 610{
626 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); 611 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
612
613 dev->archdata.dmabounce = NULL;
627 614
628 if (!device_info) { 615 if (!device_info) {
629 printk(KERN_WARNING 616 printk(KERN_WARNING
@@ -645,12 +632,10 @@ dmabounce_unregister_dev(struct device *dev)
645 dma_pool_destroy(device_info->large.pool); 632 dma_pool_destroy(device_info->large.pool);
646 633
647#ifdef STATS 634#ifdef STATS
648 print_alloc_stats(device_info); 635 if (device_info->attr_res == 0)
649 print_map_stats(device_info); 636 device_remove_file(dev, &dev_attr_dmabounce_stats);
650#endif 637#endif
651 638
652 list_del(&device_info->node);
653
654 kfree(device_info); 639 kfree(device_info);
655 640
656 printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n", 641 printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n",