aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/dma/dmaengine.c
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c42
1 files changed, 12 insertions, 30 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a815d44c70a..b48967b499d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -45,8 +45,6 @@
45 * See Documentation/dmaengine.txt for more details 45 * See Documentation/dmaengine.txt for more details
46 */ 46 */
47 47
48#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
50#include <linux/dma-mapping.h> 48#include <linux/dma-mapping.h>
51#include <linux/init.h> 49#include <linux/init.h>
52#include <linux/module.h> 50#include <linux/module.h>
@@ -263,7 +261,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
263 do { 261 do {
264 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 262 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
265 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 263 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
266 pr_err("%s: timeout!\n", __func__); 264 printk(KERN_ERR "dma_sync_wait_timeout!\n");
267 return DMA_ERROR; 265 return DMA_ERROR;
268 } 266 }
269 } while (status == DMA_IN_PROGRESS); 267 } while (status == DMA_IN_PROGRESS);
@@ -314,7 +312,7 @@ static int __init dma_channel_table_init(void)
314 } 312 }
315 313
316 if (err) { 314 if (err) {
317 pr_err("initialization failure\n"); 315 pr_err("dmaengine: initialization failure\n");
318 for_each_dma_cap_mask(cap, dma_cap_mask_all) 316 for_each_dma_cap_mask(cap, dma_cap_mask_all)
319 if (channel_table[cap]) 317 if (channel_table[cap])
320 free_percpu(channel_table[cap]); 318 free_percpu(channel_table[cap]);
@@ -334,20 +332,6 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
334} 332}
335EXPORT_SYMBOL(dma_find_channel); 333EXPORT_SYMBOL(dma_find_channel);
336 334
337/*
338 * net_dma_find_channel - find a channel for net_dma
339 * net_dma has alignment requirements
340 */
341struct dma_chan *net_dma_find_channel(void)
342{
343 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
344 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
345 return NULL;
346
347 return chan;
348}
349EXPORT_SYMBOL(net_dma_find_channel);
350
351/** 335/**
352 * dma_issue_pending_all - flush all pending operations across all channels 336 * dma_issue_pending_all - flush all pending operations across all channels
353 */ 337 */
@@ -522,12 +506,12 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
522 err = dma_chan_get(chan); 506 err = dma_chan_get(chan);
523 507
524 if (err == -ENODEV) { 508 if (err == -ENODEV) {
525 pr_debug("%s: %s module removed\n", 509 pr_debug("%s: %s module removed\n", __func__,
526 __func__, dma_chan_name(chan)); 510 dma_chan_name(chan));
527 list_del_rcu(&device->global_node); 511 list_del_rcu(&device->global_node);
528 } else if (err) 512 } else if (err)
529 pr_debug("%s: failed to get %s: (%d)\n", 513 pr_debug("dmaengine: failed to get %s: (%d)\n",
530 __func__, dma_chan_name(chan), err); 514 dma_chan_name(chan), err);
531 else 515 else
532 break; 516 break;
533 if (--device->privatecnt == 0) 517 if (--device->privatecnt == 0)
@@ -537,9 +521,7 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
537 } 521 }
538 mutex_unlock(&dma_list_mutex); 522 mutex_unlock(&dma_list_mutex);
539 523
540 pr_debug("%s: %s (%s)\n", 524 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
541 __func__,
542 chan ? "success" : "fail",
543 chan ? dma_chan_name(chan) : NULL); 525 chan ? dma_chan_name(chan) : NULL);
544 526
545 return chan; 527 return chan;
@@ -582,8 +564,8 @@ void dmaengine_get(void)
582 list_del_rcu(&device->global_node); 564 list_del_rcu(&device->global_node);
583 break; 565 break;
584 } else if (err) 566 } else if (err)
585 pr_debug("%s: failed to get %s: (%d)\n", 567 pr_err("dmaengine: failed to get %s: (%d)\n",
586 __func__, dma_chan_name(chan), err); 568 dma_chan_name(chan), err);
587 } 569 }
588 } 570 }
589 571
@@ -711,12 +693,12 @@ int dma_async_device_register(struct dma_device *device)
711 !device->device_prep_dma_interrupt); 693 !device->device_prep_dma_interrupt);
712 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && 694 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
713 !device->device_prep_dma_sg); 695 !device->device_prep_dma_sg);
696 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
697 !device->device_prep_slave_sg);
714 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && 698 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
715 !device->device_prep_dma_cyclic); 699 !device->device_prep_dma_cyclic);
716 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 700 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
717 !device->device_control); 701 !device->device_control);
718 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
719 !device->device_prep_interleaved_dma);
720 702
721 BUG_ON(!device->device_alloc_chan_resources); 703 BUG_ON(!device->device_alloc_chan_resources);
722 BUG_ON(!device->device_free_chan_resources); 704 BUG_ON(!device->device_free_chan_resources);
@@ -1019,7 +1001,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1019 while (tx->cookie == -EBUSY) { 1001 while (tx->cookie == -EBUSY) {
1020 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 1002 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1021 pr_err("%s timeout waiting for descriptor submission\n", 1003 pr_err("%s timeout waiting for descriptor submission\n",
1022 __func__); 1004 __func__);
1023 return DMA_ERROR; 1005 return DMA_ERROR;
1024 } 1006 }
1025 cpu_relax(); 1007 cpu_relax();