aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common/dmabounce.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/arm/common/dmabounce.c
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'arch/arm/common/dmabounce.c')
-rw-r--r--arch/arm/common/dmabounce.c101
1 files changed, 27 insertions, 74 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 1143c4d5c56..595ecd290eb 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -173,8 +173,7 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
173 read_lock_irqsave(&device_info->lock, flags); 173 read_lock_irqsave(&device_info->lock, flags);
174 174
175 list_for_each_entry(b, &device_info->safe_buffers, node) 175 list_for_each_entry(b, &device_info->safe_buffers, node)
176 if (b->safe_dma_addr <= safe_dma_addr && 176 if (b->safe_dma_addr == safe_dma_addr) {
177 b->safe_dma_addr + b->size > safe_dma_addr) {
178 rb = b; 177 rb = b;
179 break; 178 break;
180 } 179 }
@@ -255,7 +254,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
255 if (buf == NULL) { 254 if (buf == NULL) {
256 dev_err(dev, "%s: unable to map unsafe buffer %p!\n", 255 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
257 __func__, ptr); 256 __func__, ptr);
258 return DMA_ERROR_CODE; 257 return ~0;
259 } 258 }
260 259
261 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 260 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -308,9 +307,8 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
308 * substitute the safe buffer for the unsafe one. 307 * substitute the safe buffer for the unsafe one.
309 * (basically move the buffer from an unsafe area to a safe one) 308 * (basically move the buffer from an unsafe area to a safe one)
310 */ 309 */
311static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, 310dma_addr_t __dma_map_page(struct device *dev, struct page *page,
312 unsigned long offset, size_t size, enum dma_data_direction dir, 311 unsigned long offset, size_t size, enum dma_data_direction dir)
313 struct dma_attrs *attrs)
314{ 312{
315 dma_addr_t dma_addr; 313 dma_addr_t dma_addr;
316 int ret; 314 int ret;
@@ -322,20 +320,21 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
322 320
323 ret = needs_bounce(dev, dma_addr, size); 321 ret = needs_bounce(dev, dma_addr, size);
324 if (ret < 0) 322 if (ret < 0)
325 return DMA_ERROR_CODE; 323 return ~0;
326 324
327 if (ret == 0) { 325 if (ret == 0) {
328 arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir); 326 __dma_page_cpu_to_dev(page, offset, size, dir);
329 return dma_addr; 327 return dma_addr;
330 } 328 }
331 329
332 if (PageHighMem(page)) { 330 if (PageHighMem(page)) {
333 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); 331 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
334 return DMA_ERROR_CODE; 332 return ~0;
335 } 333 }
336 334
337 return map_single(dev, page_address(page) + offset, size, dir); 335 return map_single(dev, page_address(page) + offset, size, dir);
338} 336}
337EXPORT_SYMBOL(__dma_map_page);
339 338
340/* 339/*
341 * see if a mapped address was really a "safe" buffer and if so, copy 340 * see if a mapped address was really a "safe" buffer and if so, copy
@@ -343,8 +342,8 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
343 * the safe buffer. (basically return things back to the way they 342 * the safe buffer. (basically return things back to the way they
344 * should be) 343 * should be)
345 */ 344 */
346static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 345void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
347 enum dma_data_direction dir, struct dma_attrs *attrs) 346 enum dma_data_direction dir)
348{ 347{
349 struct safe_buffer *buf; 348 struct safe_buffer *buf;
350 349
@@ -353,32 +352,31 @@ static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t
353 352
354 buf = find_safe_buffer_dev(dev, dma_addr, __func__); 353 buf = find_safe_buffer_dev(dev, dma_addr, __func__);
355 if (!buf) { 354 if (!buf) {
356 arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir); 355 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
356 dma_addr & ~PAGE_MASK, size, dir);
357 return; 357 return;
358 } 358 }
359 359
360 unmap_single(dev, buf, size, dir); 360 unmap_single(dev, buf, size, dir);
361} 361}
362EXPORT_SYMBOL(__dma_unmap_page);
362 363
363static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, 364int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
364 size_t sz, enum dma_data_direction dir) 365 unsigned long off, size_t sz, enum dma_data_direction dir)
365{ 366{
366 struct safe_buffer *buf; 367 struct safe_buffer *buf;
367 unsigned long off;
368 368
369 dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", 369 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
370 __func__, addr, sz, dir); 370 __func__, addr, off, sz, dir);
371 371
372 buf = find_safe_buffer_dev(dev, addr, __func__); 372 buf = find_safe_buffer_dev(dev, addr, __func__);
373 if (!buf) 373 if (!buf)
374 return 1; 374 return 1;
375 375
376 off = addr - buf->safe_dma_addr;
377
378 BUG_ON(buf->direction != dir); 376 BUG_ON(buf->direction != dir);
379 377
380 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", 378 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
381 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, 379 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
382 buf->safe, buf->safe_dma_addr); 380 buf->safe, buf->safe_dma_addr);
383 381
384 DO_STATS(dev->archdata.dmabounce->bounce_count++); 382 DO_STATS(dev->archdata.dmabounce->bounce_count++);
@@ -390,35 +388,24 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
390 } 388 }
391 return 0; 389 return 0;
392} 390}
391EXPORT_SYMBOL(dmabounce_sync_for_cpu);
393 392
394static void dmabounce_sync_for_cpu(struct device *dev, 393int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
395 dma_addr_t handle, size_t size, enum dma_data_direction dir) 394 unsigned long off, size_t sz, enum dma_data_direction dir)
396{
397 if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
398 return;
399
400 arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
401}
402
403static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
404 size_t sz, enum dma_data_direction dir)
405{ 395{
406 struct safe_buffer *buf; 396 struct safe_buffer *buf;
407 unsigned long off;
408 397
409 dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", 398 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
410 __func__, addr, sz, dir); 399 __func__, addr, off, sz, dir);
411 400
412 buf = find_safe_buffer_dev(dev, addr, __func__); 401 buf = find_safe_buffer_dev(dev, addr, __func__);
413 if (!buf) 402 if (!buf)
414 return 1; 403 return 1;
415 404
416 off = addr - buf->safe_dma_addr;
417
418 BUG_ON(buf->direction != dir); 405 BUG_ON(buf->direction != dir);
419 406
420 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", 407 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
421 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, 408 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
422 buf->safe, buf->safe_dma_addr); 409 buf->safe, buf->safe_dma_addr);
423 410
424 DO_STATS(dev->archdata.dmabounce->bounce_count++); 411 DO_STATS(dev->archdata.dmabounce->bounce_count++);
@@ -430,39 +417,7 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
430 } 417 }
431 return 0; 418 return 0;
432} 419}
433 420EXPORT_SYMBOL(dmabounce_sync_for_device);
434static void dmabounce_sync_for_device(struct device *dev,
435 dma_addr_t handle, size_t size, enum dma_data_direction dir)
436{
437 if (!__dmabounce_sync_for_device(dev, handle, size, dir))
438 return;
439
440 arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
441}
442
443static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
444{
445 if (dev->archdata.dmabounce)
446 return 0;
447
448 return arm_dma_ops.set_dma_mask(dev, dma_mask);
449}
450
451static struct dma_map_ops dmabounce_ops = {
452 .alloc = arm_dma_alloc,
453 .free = arm_dma_free,
454 .mmap = arm_dma_mmap,
455 .get_sgtable = arm_dma_get_sgtable,
456 .map_page = dmabounce_map_page,
457 .unmap_page = dmabounce_unmap_page,
458 .sync_single_for_cpu = dmabounce_sync_for_cpu,
459 .sync_single_for_device = dmabounce_sync_for_device,
460 .map_sg = arm_dma_map_sg,
461 .unmap_sg = arm_dma_unmap_sg,
462 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
463 .sync_sg_for_device = arm_dma_sync_sg_for_device,
464 .set_dma_mask = dmabounce_set_mask,
465};
466 421
467static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, 422static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
468 const char *name, unsigned long size) 423 const char *name, unsigned long size)
@@ -524,7 +479,6 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
524#endif 479#endif
525 480
526 dev->archdata.dmabounce = device_info; 481 dev->archdata.dmabounce = device_info;
527 set_dma_ops(dev, &dmabounce_ops);
528 482
529 dev_info(dev, "dmabounce: registered device\n"); 483 dev_info(dev, "dmabounce: registered device\n");
530 484
@@ -543,7 +497,6 @@ void dmabounce_unregister_dev(struct device *dev)
543 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; 497 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
544 498
545 dev->archdata.dmabounce = NULL; 499 dev->archdata.dmabounce = NULL;
546 set_dma_ops(dev, NULL);
547 500
548 if (!device_info) { 501 if (!device_info) {
549 dev_warn(dev, 502 dev_warn(dev,