diff options
Diffstat (limited to 'drivers/dma/intel_mid_dma.c')
-rw-r--r-- | drivers/dma/intel_mid_dma.c | 46 |
1 files changed, 15 insertions, 31 deletions
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 74f70aadf9e4..c900ca7aaec4 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <linux/intel_mid_dma.h> | 29 | #include <linux/intel_mid_dma.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | 31 | ||
32 | #include "dmaengine.h" | ||
33 | |||
32 | #define MAX_CHAN 4 /*max ch across controllers*/ | 34 | #define MAX_CHAN 4 /*max ch across controllers*/ |
33 | #include "intel_mid_dma_regs.h" | 35 | #include "intel_mid_dma_regs.h" |
34 | 36 | ||
@@ -288,7 +290,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | |||
288 | struct intel_mid_dma_lli *llitem; | 290 | struct intel_mid_dma_lli *llitem; |
289 | void *param_txd = NULL; | 291 | void *param_txd = NULL; |
290 | 292 | ||
291 | midc->completed = txd->cookie; | 293 | dma_cookie_complete(txd); |
292 | callback_txd = txd->callback; | 294 | callback_txd = txd->callback; |
293 | param_txd = txd->callback_param; | 295 | param_txd = txd->callback_param; |
294 | 296 | ||
@@ -434,14 +436,7 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
434 | dma_cookie_t cookie; | 436 | dma_cookie_t cookie; |
435 | 437 | ||
436 | spin_lock_bh(&midc->lock); | 438 | spin_lock_bh(&midc->lock); |
437 | cookie = midc->chan.cookie; | 439 | cookie = dma_cookie_assign(tx); |
438 | |||
439 | if (++cookie < 0) | ||
440 | cookie = 1; | ||
441 | |||
442 | midc->chan.cookie = cookie; | ||
443 | desc->txd.cookie = cookie; | ||
444 | |||
445 | 440 | ||
446 | if (list_empty(&midc->active_list)) | 441 | if (list_empty(&midc->active_list)) |
447 | list_add_tail(&desc->desc_node, &midc->active_list); | 442 | list_add_tail(&desc->desc_node, &midc->active_list); |
@@ -482,31 +477,18 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, | |||
482 | dma_cookie_t cookie, | 477 | dma_cookie_t cookie, |
483 | struct dma_tx_state *txstate) | 478 | struct dma_tx_state *txstate) |
484 | { | 479 | { |
485 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | 480 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); |
486 | dma_cookie_t last_used; | 481 | enum dma_status ret; |
487 | dma_cookie_t last_complete; | ||
488 | int ret; | ||
489 | 482 | ||
490 | last_complete = midc->completed; | 483 | ret = dma_cookie_status(chan, cookie, txstate); |
491 | last_used = chan->cookie; | ||
492 | |||
493 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
494 | if (ret != DMA_SUCCESS) { | 484 | if (ret != DMA_SUCCESS) { |
495 | spin_lock_bh(&midc->lock); | 485 | spin_lock_bh(&midc->lock); |
496 | midc_scan_descriptors(to_middma_device(chan->device), midc); | 486 | midc_scan_descriptors(to_middma_device(chan->device), midc); |
497 | spin_unlock_bh(&midc->lock); | 487 | spin_unlock_bh(&midc->lock); |
498 | 488 | ||
499 | last_complete = midc->completed; | 489 | ret = dma_cookie_status(chan, cookie, txstate); |
500 | last_used = chan->cookie; | ||
501 | |||
502 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
503 | } | 490 | } |
504 | 491 | ||
505 | if (txstate) { | ||
506 | txstate->last = last_complete; | ||
507 | txstate->used = last_used; | ||
508 | txstate->residue = 0; | ||
509 | } | ||
510 | return ret; | 492 | return ret; |
511 | } | 493 | } |
512 | 494 | ||
@@ -732,13 +714,14 @@ err_desc_get: | |||
732 | * @sg_len: length of sg txn | 714 | * @sg_len: length of sg txn |
733 | * @direction: DMA transfer dirtn | 715 | * @direction: DMA transfer dirtn |
734 | * @flags: DMA flags | 716 | * @flags: DMA flags |
717 | * @context: transfer context (ignored) | ||
735 | * | 718 | * |
736 | * Prepares LLI based periphral transfer | 719 | * Prepares LLI based periphral transfer |
737 | */ | 720 | */ |
738 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | 721 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( |
739 | struct dma_chan *chan, struct scatterlist *sgl, | 722 | struct dma_chan *chan, struct scatterlist *sgl, |
740 | unsigned int sg_len, enum dma_transfer_direction direction, | 723 | unsigned int sg_len, enum dma_transfer_direction direction, |
741 | unsigned long flags) | 724 | unsigned long flags, void *context) |
742 | { | 725 | { |
743 | struct intel_mid_dma_chan *midc = NULL; | 726 | struct intel_mid_dma_chan *midc = NULL; |
744 | struct intel_mid_dma_slave *mids = NULL; | 727 | struct intel_mid_dma_slave *mids = NULL; |
@@ -832,7 +815,6 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | |||
832 | /*trying to free ch in use!!!!!*/ | 815 | /*trying to free ch in use!!!!!*/ |
833 | pr_err("ERR_MDMA: trying to free ch in use\n"); | 816 | pr_err("ERR_MDMA: trying to free ch in use\n"); |
834 | } | 817 | } |
835 | pm_runtime_put(&mid->pdev->dev); | ||
836 | spin_lock_bh(&midc->lock); | 818 | spin_lock_bh(&midc->lock); |
837 | midc->descs_allocated = 0; | 819 | midc->descs_allocated = 0; |
838 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | 820 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { |
@@ -853,6 +835,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | |||
853 | /* Disable CH interrupts */ | 835 | /* Disable CH interrupts */ |
854 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); | 836 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); |
855 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); | 837 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); |
838 | pm_runtime_put(&mid->pdev->dev); | ||
856 | } | 839 | } |
857 | 840 | ||
858 | /** | 841 | /** |
@@ -886,7 +869,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
886 | pm_runtime_put(&mid->pdev->dev); | 869 | pm_runtime_put(&mid->pdev->dev); |
887 | return -EIO; | 870 | return -EIO; |
888 | } | 871 | } |
889 | midc->completed = chan->cookie = 1; | 872 | dma_cookie_init(chan); |
890 | 873 | ||
891 | spin_lock_bh(&midc->lock); | 874 | spin_lock_bh(&midc->lock); |
892 | while (midc->descs_allocated < DESCS_PER_CHANNEL) { | 875 | while (midc->descs_allocated < DESCS_PER_CHANNEL) { |
@@ -1056,7 +1039,8 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) | |||
1056 | } | 1039 | } |
1057 | err_status &= mid->intr_mask; | 1040 | err_status &= mid->intr_mask; |
1058 | if (err_status) { | 1041 | if (err_status) { |
1059 | iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR); | 1042 | iowrite32((err_status << INT_MASK_WE), |
1043 | mid->dma_base + MASK_ERR); | ||
1060 | call_tasklet = 1; | 1044 | call_tasklet = 1; |
1061 | } | 1045 | } |
1062 | if (call_tasklet) | 1046 | if (call_tasklet) |
@@ -1118,7 +1102,7 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
1118 | struct intel_mid_dma_chan *midch = &dma->ch[i]; | 1102 | struct intel_mid_dma_chan *midch = &dma->ch[i]; |
1119 | 1103 | ||
1120 | midch->chan.device = &dma->common; | 1104 | midch->chan.device = &dma->common; |
1121 | midch->chan.cookie = 1; | 1105 | dma_cookie_init(&midch->chan); |
1122 | midch->ch_id = dma->chan_base + i; | 1106 | midch->ch_id = dma->chan_base + i; |
1123 | pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); | 1107 | pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); |
1124 | 1108 | ||