aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/intel_mid_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/intel_mid_dma.c')
-rw-r--r--drivers/dma/intel_mid_dma.c39
1 files changed, 25 insertions, 14 deletions
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 19a0c64d45d..74f70aadf9e 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
280 * callbacks but must be called with the lock held. 280 * callbacks but must be called with the lock held.
281 */ 281 */
282static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, 282static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
283 struct intel_mid_dma_desc *desc) 283 struct intel_mid_dma_desc *desc)
284 __releases(&midc->lock) __acquires(&midc->lock)
284{ 285{
285 struct dma_async_tx_descriptor *txd = &desc->txd; 286 struct dma_async_tx_descriptor *txd = &desc->txd;
286 dma_async_tx_callback callback_txd = NULL; 287 dma_async_tx_callback callback_txd = NULL;
@@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
311 pci_pool_free(desc->lli_pool, desc->lli, 312 pci_pool_free(desc->lli_pool, desc->lli,
312 desc->lli_phys); 313 desc->lli_phys);
313 pci_pool_destroy(desc->lli_pool); 314 pci_pool_destroy(desc->lli_pool);
315 desc->lli = NULL;
314 } 316 }
315 list_move(&desc->desc_node, &midc->free_list); 317 list_move(&desc->desc_node, &midc->free_list);
316 midc->busy = false; 318 midc->busy = false;
@@ -395,10 +397,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
395 midc->dma->block_size); 397 midc->dma->block_size);
396 /*Populate SAR and DAR values*/ 398 /*Populate SAR and DAR values*/
397 sg_phy_addr = sg_phys(sg); 399 sg_phy_addr = sg_phys(sg);
398 if (desc->dirn == DMA_TO_DEVICE) { 400 if (desc->dirn == DMA_MEM_TO_DEV) {
399 lli_bloc_desc->sar = sg_phy_addr; 401 lli_bloc_desc->sar = sg_phy_addr;
400 lli_bloc_desc->dar = mids->dma_slave.dst_addr; 402 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
401 } else if (desc->dirn == DMA_FROM_DEVICE) { 403 } else if (desc->dirn == DMA_DEV_TO_MEM) {
402 lli_bloc_desc->sar = mids->dma_slave.src_addr; 404 lli_bloc_desc->sar = mids->dma_slave.src_addr;
403 lli_bloc_desc->dar = sg_phy_addr; 405 lli_bloc_desc->dar = sg_phy_addr;
404 } 406 }
@@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
490 492
491 ret = dma_async_is_complete(cookie, last_complete, last_used); 493 ret = dma_async_is_complete(cookie, last_complete, last_used);
492 if (ret != DMA_SUCCESS) { 494 if (ret != DMA_SUCCESS) {
495 spin_lock_bh(&midc->lock);
493 midc_scan_descriptors(to_middma_device(chan->device), midc); 496 midc_scan_descriptors(to_middma_device(chan->device), midc);
497 spin_unlock_bh(&midc->lock);
494 498
495 last_complete = midc->completed; 499 last_complete = midc->completed;
496 last_used = chan->cookie; 500 last_used = chan->cookie;
@@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
566 pci_pool_free(desc->lli_pool, desc->lli, 570 pci_pool_free(desc->lli_pool, desc->lli,
567 desc->lli_phys); 571 desc->lli_phys);
568 pci_pool_destroy(desc->lli_pool); 572 pci_pool_destroy(desc->lli_pool);
573 desc->lli = NULL;
569 } 574 }
570 list_move(&desc->desc_node, &midc->free_list); 575 list_move(&desc->desc_node, &midc->free_list);
571 } 576 }
@@ -632,13 +637,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
632 if (midc->dma->pimr_mask) { 637 if (midc->dma->pimr_mask) {
633 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 638 cfg_hi.cfgx.protctl = 0x0; /*default value*/
634 cfg_hi.cfgx.fifo_mode = 1; 639 cfg_hi.cfgx.fifo_mode = 1;
635 if (mids->dma_slave.direction == DMA_TO_DEVICE) { 640 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
636 cfg_hi.cfgx.src_per = 0; 641 cfg_hi.cfgx.src_per = 0;
637 if (mids->device_instance == 0) 642 if (mids->device_instance == 0)
638 cfg_hi.cfgx.dst_per = 3; 643 cfg_hi.cfgx.dst_per = 3;
639 if (mids->device_instance == 1) 644 if (mids->device_instance == 1)
640 cfg_hi.cfgx.dst_per = 1; 645 cfg_hi.cfgx.dst_per = 1;
641 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { 646 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
642 if (mids->device_instance == 0) 647 if (mids->device_instance == 0)
643 cfg_hi.cfgx.src_per = 2; 648 cfg_hi.cfgx.src_per = 2;
644 if (mids->device_instance == 1) 649 if (mids->device_instance == 1)
@@ -682,11 +687,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
682 ctl_lo.ctlx.sinc = 0; 687 ctl_lo.ctlx.sinc = 0;
683 ctl_lo.ctlx.dinc = 0; 688 ctl_lo.ctlx.dinc = 0;
684 } else { 689 } else {
685 if (mids->dma_slave.direction == DMA_TO_DEVICE) { 690 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
686 ctl_lo.ctlx.sinc = 0; 691 ctl_lo.ctlx.sinc = 0;
687 ctl_lo.ctlx.dinc = 2; 692 ctl_lo.ctlx.dinc = 2;
688 ctl_lo.ctlx.tt_fc = 1; 693 ctl_lo.ctlx.tt_fc = 1;
689 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { 694 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
690 ctl_lo.ctlx.sinc = 2; 695 ctl_lo.ctlx.sinc = 2;
691 ctl_lo.ctlx.dinc = 0; 696 ctl_lo.ctlx.dinc = 0;
692 ctl_lo.ctlx.tt_fc = 2; 697 ctl_lo.ctlx.tt_fc = 2;
@@ -732,7 +737,7 @@ err_desc_get:
732 */ 737 */
733static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 738static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
734 struct dma_chan *chan, struct scatterlist *sgl, 739 struct dma_chan *chan, struct scatterlist *sgl,
735 unsigned int sg_len, enum dma_data_direction direction, 740 unsigned int sg_len, enum dma_transfer_direction direction,
736 unsigned long flags) 741 unsigned long flags)
737{ 742{
738 struct intel_mid_dma_chan *midc = NULL; 743 struct intel_mid_dma_chan *midc = NULL;
@@ -868,7 +873,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
868 pm_runtime_get_sync(&mid->pdev->dev); 873 pm_runtime_get_sync(&mid->pdev->dev);
869 874
870 if (mid->state == SUSPENDED) { 875 if (mid->state == SUSPENDED) {
871 if (dma_resume(mid->pdev)) { 876 if (dma_resume(&mid->pdev->dev)) {
872 pr_err("ERR_MDMA: resume failed"); 877 pr_err("ERR_MDMA: resume failed");
873 return -EFAULT; 878 return -EFAULT;
874 } 879 }
@@ -1099,7 +1104,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
1099 LNW_PERIPHRAL_MASK_SIZE); 1104 LNW_PERIPHRAL_MASK_SIZE);
1100 if (dma->mask_reg == NULL) { 1105 if (dma->mask_reg == NULL) {
1101 pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); 1106 pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
1102 return -ENOMEM; 1107 err = -ENOMEM;
1108 goto err_ioremap;
1103 } 1109 }
1104 } else 1110 } else
1105 dma->mask_reg = NULL; 1111 dma->mask_reg = NULL;
@@ -1196,6 +1202,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
1196err_engine: 1202err_engine:
1197 free_irq(pdev->irq, dma); 1203 free_irq(pdev->irq, dma);
1198err_irq: 1204err_irq:
1205 if (dma->mask_reg)
1206 iounmap(dma->mask_reg);
1207err_ioremap:
1199 pci_pool_destroy(dma->dma_pool); 1208 pci_pool_destroy(dma->dma_pool);
1200err_dma_pool: 1209err_dma_pool:
1201 pr_err("ERR_MDMA:setup_dma failed: %d\n", err); 1210 pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
@@ -1337,8 +1346,9 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
1337* 1346*
1338* This function is called by OS when a power event occurs 1347* This function is called by OS when a power event occurs
1339*/ 1348*/
1340int dma_suspend(struct pci_dev *pci, pm_message_t state) 1349static int dma_suspend(struct device *dev)
1341{ 1350{
1351 struct pci_dev *pci = to_pci_dev(dev);
1342 int i; 1352 int i;
1343 struct middma_device *device = pci_get_drvdata(pci); 1353 struct middma_device *device = pci_get_drvdata(pci);
1344 pr_debug("MDMA: dma_suspend called\n"); 1354 pr_debug("MDMA: dma_suspend called\n");
@@ -1362,8 +1372,9 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
1362* 1372*
1363* This function is called by OS when a power event occurs 1373* This function is called by OS when a power event occurs
1364*/ 1374*/
1365int dma_resume(struct pci_dev *pci) 1375int dma_resume(struct device *dev)
1366{ 1376{
1377 struct pci_dev *pci = to_pci_dev(dev);
1367 int ret; 1378 int ret;
1368 struct middma_device *device = pci_get_drvdata(pci); 1379 struct middma_device *device = pci_get_drvdata(pci);
1369 1380
@@ -1429,6 +1440,8 @@ static const struct dev_pm_ops intel_mid_dma_pm = {
1429 .runtime_suspend = dma_runtime_suspend, 1440 .runtime_suspend = dma_runtime_suspend,
1430 .runtime_resume = dma_runtime_resume, 1441 .runtime_resume = dma_runtime_resume,
1431 .runtime_idle = dma_runtime_idle, 1442 .runtime_idle = dma_runtime_idle,
1443 .suspend = dma_suspend,
1444 .resume = dma_resume,
1432}; 1445};
1433 1446
1434static struct pci_driver intel_mid_dma_pci_driver = { 1447static struct pci_driver intel_mid_dma_pci_driver = {
@@ -1437,8 +1450,6 @@ static struct pci_driver intel_mid_dma_pci_driver = {
1437 .probe = intel_mid_dma_probe, 1450 .probe = intel_mid_dma_probe,
1438 .remove = __devexit_p(intel_mid_dma_remove), 1451 .remove = __devexit_p(intel_mid_dma_remove),
1439#ifdef CONFIG_PM 1452#ifdef CONFIG_PM
1440 .suspend = dma_suspend,
1441 .resume = dma_resume,
1442 .driver = { 1453 .driver = {
1443 .pm = &intel_mid_dma_pm, 1454 .pm = &intel_mid_dma_pm,
1444 }, 1455 },