aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2010-03-03 23:22:21 -0500
committerDan Williams <dan.j.williams@intel.com>2010-03-03 23:22:21 -0500
commitdd58ffcf5a5352fc10820c8ffbcd5fed416a2c3a (patch)
treef36172b40f9f3fc2c646f70da40e01705399b6b8 /drivers
parentaa4d72ae946a4fa40486b871717778734184fa29 (diff)
parent56a5d3cf21c71963c8fc506e9b9d3f71641d9c71 (diff)
Merge branch 'coh' into dmaengine
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/coh901318.c184
-rw-r--r--drivers/dma/coh901318_lli.c23
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c25
6 files changed, 115 insertions, 122 deletions
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index b5f2ee0f8e2c..1656fdcdb6c2 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -39,7 +39,6 @@ struct coh901318_desc {
39 unsigned int sg_len; 39 unsigned int sg_len;
40 struct coh901318_lli *data; 40 struct coh901318_lli *data;
41 enum dma_data_direction dir; 41 enum dma_data_direction dir;
42 int pending_irqs;
43 unsigned long flags; 42 unsigned long flags;
44}; 43};
45 44
@@ -72,7 +71,6 @@ struct coh901318_chan {
72 71
73 unsigned long nbr_active_done; 72 unsigned long nbr_active_done;
74 unsigned long busy; 73 unsigned long busy;
75 int pending_irqs;
76 74
77 struct coh901318_base *base; 75 struct coh901318_base *base;
78}; 76};
@@ -80,18 +78,16 @@ struct coh901318_chan {
80static void coh901318_list_print(struct coh901318_chan *cohc, 78static void coh901318_list_print(struct coh901318_chan *cohc,
81 struct coh901318_lli *lli) 79 struct coh901318_lli *lli)
82{ 80{
83 struct coh901318_lli *l; 81 struct coh901318_lli *l = lli;
84 dma_addr_t addr = virt_to_phys(lli);
85 int i = 0; 82 int i = 0;
86 83
87 while (addr) { 84 while (l) {
88 l = phys_to_virt(addr);
89 dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" 85 dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
90 ", dst 0x%x, link 0x%x link_virt 0x%p\n", 86 ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n",
91 i, l, l->control, l->src_addr, l->dst_addr, 87 i, l, l->control, l->src_addr, l->dst_addr,
92 l->link_addr, phys_to_virt(l->link_addr)); 88 l->link_addr, l->virt_link_addr);
93 i++; 89 i++;
94 addr = l->link_addr; 90 l = l->virt_link_addr;
95 } 91 }
96} 92}
97 93
@@ -125,7 +121,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
125 goto err_kmalloc; 121 goto err_kmalloc;
126 tmp = dev_buf; 122 tmp = dev_buf;
127 123
128 tmp += sprintf(tmp, "DMA -- enable dma channels\n"); 124 tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
129 125
130 for (i = 0; i < debugfs_dma_base->platform->max_channels; i++) 126 for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
131 if (started_channels & (1 << i)) 127 if (started_channels & (1 << i))
@@ -337,16 +333,22 @@ coh901318_desc_get(struct coh901318_chan *cohc)
337 * TODO: alloc a pile of descs instead of just one, 333 * TODO: alloc a pile of descs instead of just one,
338 * avoid many small allocations. 334 * avoid many small allocations.
339 */ 335 */
340 desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); 336 desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
341 if (desc == NULL) 337 if (desc == NULL)
342 goto out; 338 goto out;
343 INIT_LIST_HEAD(&desc->node); 339 INIT_LIST_HEAD(&desc->node);
340 dma_async_tx_descriptor_init(&desc->desc, &cohc->chan);
344 } else { 341 } else {
345 /* Reuse an old desc. */ 342 /* Reuse an old desc. */
346 desc = list_first_entry(&cohc->free, 343 desc = list_first_entry(&cohc->free,
347 struct coh901318_desc, 344 struct coh901318_desc,
348 node); 345 node);
349 list_del(&desc->node); 346 list_del(&desc->node);
347 /* Initialize it a bit so it's not insane */
348 desc->sg = NULL;
349 desc->sg_len = 0;
350 desc->desc.callback = NULL;
351 desc->desc.callback_param = NULL;
350 } 352 }
351 353
352 out: 354 out:
@@ -364,10 +366,6 @@ static void
364coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) 366coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
365{ 367{
366 list_add_tail(&desc->node, &cohc->active); 368 list_add_tail(&desc->node, &cohc->active);
367
368 BUG_ON(cohc->pending_irqs != 0);
369
370 cohc->pending_irqs = desc->pending_irqs;
371} 369}
372 370
373static struct coh901318_desc * 371static struct coh901318_desc *
@@ -592,6 +590,10 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
592 return cohd_que; 590 return cohd_que;
593} 591}
594 592
593/*
594 * This tasklet is called from the interrupt handler to
595 * handle each descriptor (DMA job) that is sent to a channel.
596 */
595static void dma_tasklet(unsigned long data) 597static void dma_tasklet(unsigned long data)
596{ 598{
597 struct coh901318_chan *cohc = (struct coh901318_chan *) data; 599 struct coh901318_chan *cohc = (struct coh901318_chan *) data;
@@ -600,57 +602,58 @@ static void dma_tasklet(unsigned long data)
600 dma_async_tx_callback callback; 602 dma_async_tx_callback callback;
601 void *callback_param; 603 void *callback_param;
602 604
605 dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
606 " nbr_active_done %ld\n", __func__,
607 cohc->id, cohc->nbr_active_done);
608
603 spin_lock_irqsave(&cohc->lock, flags); 609 spin_lock_irqsave(&cohc->lock, flags);
604 610
605 /* get first active entry from list */ 611 /* get first active descriptor entry from list */
606 cohd_fin = coh901318_first_active_get(cohc); 612 cohd_fin = coh901318_first_active_get(cohc);
607 613
608 BUG_ON(cohd_fin->pending_irqs == 0);
609
610 if (cohd_fin == NULL) 614 if (cohd_fin == NULL)
611 goto err; 615 goto err;
612 616
613 cohd_fin->pending_irqs--; 617 /* locate callback to client */
614 cohc->completed = cohd_fin->desc.cookie; 618 callback = cohd_fin->desc.callback;
615 619 callback_param = cohd_fin->desc.callback_param;
616 BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
617
618 if (cohc->nbr_active_done == 0)
619 return;
620 620
621 if (!cohd_fin->pending_irqs) { 621 /* sign this job as completed on the channel */
622 /* release the lli allocation*/ 622 cohc->completed = cohd_fin->desc.cookie;
623 coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
624 }
625 623
626 dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d" 624 /* release the lli allocation and remove the descriptor */
627 " nbr_active_done %ld\n", __func__, 625 coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
628 cohc->id, cohc->pending_irqs, cohc->nbr_active_done);
629 626
630 /* callback to client */ 627 /* return desc to free-list */
631 callback = cohd_fin->desc.callback; 628 coh901318_desc_remove(cohd_fin);
632 callback_param = cohd_fin->desc.callback_param; 629 coh901318_desc_free(cohc, cohd_fin);
633 630
634 if (!cohd_fin->pending_irqs) { 631 spin_unlock_irqrestore(&cohc->lock, flags);
635 coh901318_desc_remove(cohd_fin);
636 632
637 /* return desc to free-list */ 633 /* Call the callback when we're done */
638 coh901318_desc_free(cohc, cohd_fin); 634 if (callback)
639 } 635 callback(callback_param);
640 636
641 if (cohc->nbr_active_done) 637 spin_lock_irqsave(&cohc->lock, flags);
642 cohc->nbr_active_done--;
643 638
639 /*
640 * If another interrupt fired while the tasklet was scheduling,
641 * we don't get called twice, so we have this number of active
642 * counter that keep track of the number of IRQs expected to
643 * be handled for this channel. If there happen to be more than
644 * one IRQ to be ack:ed, we simply schedule this tasklet again.
645 */
646 cohc->nbr_active_done--;
644 if (cohc->nbr_active_done) { 647 if (cohc->nbr_active_done) {
648 dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs "
649 "came in while we were scheduling this tasklet\n");
645 if (cohc_chan_conf(cohc)->priority_high) 650 if (cohc_chan_conf(cohc)->priority_high)
646 tasklet_hi_schedule(&cohc->tasklet); 651 tasklet_hi_schedule(&cohc->tasklet);
647 else 652 else
648 tasklet_schedule(&cohc->tasklet); 653 tasklet_schedule(&cohc->tasklet);
649 } 654 }
650 spin_unlock_irqrestore(&cohc->lock, flags);
651 655
652 if (callback) 656 spin_unlock_irqrestore(&cohc->lock, flags);
653 callback(callback_param);
654 657
655 return; 658 return;
656 659
@@ -669,16 +672,17 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
669 if (!cohc->allocated) 672 if (!cohc->allocated)
670 return; 673 return;
671 674
672 BUG_ON(cohc->pending_irqs == 0); 675 spin_lock(&cohc->lock);
673 676
674 cohc->pending_irqs--;
675 cohc->nbr_active_done++; 677 cohc->nbr_active_done++;
676 678
677 if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL) 679 if (coh901318_queue_start(cohc) == NULL)
678 cohc->busy = 0; 680 cohc->busy = 0;
679 681
680 BUG_ON(list_empty(&cohc->active)); 682 BUG_ON(list_empty(&cohc->active));
681 683
684 spin_unlock(&cohc->lock);
685
682 if (cohc_chan_conf(cohc)->priority_high) 686 if (cohc_chan_conf(cohc)->priority_high)
683 tasklet_hi_schedule(&cohc->tasklet); 687 tasklet_hi_schedule(&cohc->tasklet);
684 else 688 else
@@ -872,6 +876,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
872 struct coh901318_chan *cohc = to_coh901318_chan(chan); 876 struct coh901318_chan *cohc = to_coh901318_chan(chan);
873 int lli_len; 877 int lli_len;
874 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; 878 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
879 int ret;
875 880
876 spin_lock_irqsave(&cohc->lock, flg); 881 spin_lock_irqsave(&cohc->lock, flg);
877 882
@@ -892,22 +897,19 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
892 if (data == NULL) 897 if (data == NULL)
893 goto err; 898 goto err;
894 899
895 cohd = coh901318_desc_get(cohc); 900 ret = coh901318_lli_fill_memcpy(
896 cohd->sg = NULL; 901 &cohc->base->pool, data, src, size, dest,
897 cohd->sg_len = 0; 902 cohc_chan_param(cohc)->ctrl_lli_chained,
898 cohd->data = data; 903 ctrl_last);
899 904 if (ret)
900 cohd->pending_irqs = 905 goto err;
901 coh901318_lli_fill_memcpy(
902 &cohc->base->pool, data, src, size, dest,
903 cohc_chan_param(cohc)->ctrl_lli_chained,
904 ctrl_last);
905 cohd->flags = flags;
906 906
907 COH_DBG(coh901318_list_print(cohc, data)); 907 COH_DBG(coh901318_list_print(cohc, data));
908 908
909 dma_async_tx_descriptor_init(&cohd->desc, chan); 909 /* Pick a descriptor to handle this transfer */
910 910 cohd = coh901318_desc_get(cohc);
911 cohd->data = data;
912 cohd->flags = flags;
911 cohd->desc.tx_submit = coh901318_tx_submit; 913 cohd->desc.tx_submit = coh901318_tx_submit;
912 914
913 spin_unlock_irqrestore(&cohc->lock, flg); 915 spin_unlock_irqrestore(&cohc->lock, flg);
@@ -926,6 +928,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
926 struct coh901318_chan *cohc = to_coh901318_chan(chan); 928 struct coh901318_chan *cohc = to_coh901318_chan(chan);
927 struct coh901318_lli *data; 929 struct coh901318_lli *data;
928 struct coh901318_desc *cohd; 930 struct coh901318_desc *cohd;
931 const struct coh901318_params *params;
929 struct scatterlist *sg; 932 struct scatterlist *sg;
930 int len = 0; 933 int len = 0;
931 int size; 934 int size;
@@ -933,7 +936,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
933 u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained; 936 u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
934 u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; 937 u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
935 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; 938 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
939 u32 config;
936 unsigned long flg; 940 unsigned long flg;
941 int ret;
937 942
938 if (!sgl) 943 if (!sgl)
939 goto out; 944 goto out;
@@ -949,15 +954,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
949 /* Trigger interrupt after last lli */ 954 /* Trigger interrupt after last lli */
950 ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; 955 ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
951 956
952 cohd = coh901318_desc_get(cohc); 957 params = cohc_chan_param(cohc);
953 cohd->sg = NULL; 958 config = params->config;
954 cohd->sg_len = 0;
955 cohd->dir = direction;
956 959
957 if (direction == DMA_TO_DEVICE) { 960 if (direction == DMA_TO_DEVICE) {
958 u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | 961 u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
959 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; 962 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
960 963
964 config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY;
961 ctrl_chained |= tx_flags; 965 ctrl_chained |= tx_flags;
962 ctrl_last |= tx_flags; 966 ctrl_last |= tx_flags;
963 ctrl |= tx_flags; 967 ctrl |= tx_flags;
@@ -965,16 +969,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
965 u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | 969 u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
966 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; 970 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
967 971
972 config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY;
968 ctrl_chained |= rx_flags; 973 ctrl_chained |= rx_flags;
969 ctrl_last |= rx_flags; 974 ctrl_last |= rx_flags;
970 ctrl |= rx_flags; 975 ctrl |= rx_flags;
971 } else 976 } else
972 goto err_direction; 977 goto err_direction;
973 978
974 dma_async_tx_descriptor_init(&cohd->desc, chan); 979 coh901318_set_conf(cohc, config);
975
976 cohd->desc.tx_submit = coh901318_tx_submit;
977
978 980
979 /* The dma only supports transmitting packages up to 981 /* The dma only supports transmitting packages up to
980 * MAX_DMA_PACKET_SIZE. Calculate to total number of 982 * MAX_DMA_PACKET_SIZE. Calculate to total number of
@@ -996,32 +998,37 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
996 len += factor; 998 len += factor;
997 } 999 }
998 1000
1001 pr_debug("Allocate %d lli:s for this transfer\n", len);
999 data = coh901318_lli_alloc(&cohc->base->pool, len); 1002 data = coh901318_lli_alloc(&cohc->base->pool, len);
1000 1003
1001 if (data == NULL) 1004 if (data == NULL)
1002 goto err_dma_alloc; 1005 goto err_dma_alloc;
1003 1006
1004 /* initiate allocated data list */ 1007 /* initiate allocated data list */
1005 cohd->pending_irqs = 1008 ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
1006 coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len, 1009 cohc_dev_addr(cohc),
1007 cohc_dev_addr(cohc), 1010 ctrl_chained,
1008 ctrl_chained, 1011 ctrl,
1009 ctrl, 1012 ctrl_last,
1010 ctrl_last, 1013 direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
1011 direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); 1014 if (ret)
1012 cohd->data = data; 1015 goto err_lli_fill;
1013
1014 cohd->flags = flags;
1015 1016
1016 COH_DBG(coh901318_list_print(cohc, data)); 1017 COH_DBG(coh901318_list_print(cohc, data));
1017 1018
1019 /* Pick a descriptor to handle this transfer */
1020 cohd = coh901318_desc_get(cohc);
1021 cohd->dir = direction;
1022 cohd->flags = flags;
1023 cohd->desc.tx_submit = coh901318_tx_submit;
1024 cohd->data = data;
1025
1018 spin_unlock_irqrestore(&cohc->lock, flg); 1026 spin_unlock_irqrestore(&cohc->lock, flg);
1019 1027
1020 return &cohd->desc; 1028 return &cohd->desc;
1029 err_lli_fill:
1021 err_dma_alloc: 1030 err_dma_alloc:
1022 err_direction: 1031 err_direction:
1023 coh901318_desc_remove(cohd);
1024 coh901318_desc_free(cohc, cohd);
1025 spin_unlock_irqrestore(&cohc->lock, flg); 1032 spin_unlock_irqrestore(&cohc->lock, flg);
1026 out: 1033 out:
1027 return NULL; 1034 return NULL;
@@ -1094,9 +1101,8 @@ coh901318_terminate_all(struct dma_chan *chan)
1094 /* release the lli allocation*/ 1101 /* release the lli allocation*/
1095 coh901318_lli_free(&cohc->base->pool, &cohd->data); 1102 coh901318_lli_free(&cohc->base->pool, &cohd->data);
1096 1103
1097 coh901318_desc_remove(cohd);
1098
1099 /* return desc to free-list */ 1104 /* return desc to free-list */
1105 coh901318_desc_remove(cohd);
1100 coh901318_desc_free(cohc, cohd); 1106 coh901318_desc_free(cohc, cohd);
1101 } 1107 }
1102 1108
@@ -1104,16 +1110,14 @@ coh901318_terminate_all(struct dma_chan *chan)
1104 /* release the lli allocation*/ 1110 /* release the lli allocation*/
1105 coh901318_lli_free(&cohc->base->pool, &cohd->data); 1111 coh901318_lli_free(&cohc->base->pool, &cohd->data);
1106 1112
1107 coh901318_desc_remove(cohd);
1108
1109 /* return desc to free-list */ 1113 /* return desc to free-list */
1114 coh901318_desc_remove(cohd);
1110 coh901318_desc_free(cohc, cohd); 1115 coh901318_desc_free(cohc, cohd);
1111 } 1116 }
1112 1117
1113 1118
1114 cohc->nbr_active_done = 0; 1119 cohc->nbr_active_done = 0;
1115 cohc->busy = 0; 1120 cohc->busy = 0;
1116 cohc->pending_irqs = 0;
1117 1121
1118 spin_unlock_irqrestore(&cohc->lock, flags); 1122 spin_unlock_irqrestore(&cohc->lock, flags);
1119} 1123}
@@ -1140,7 +1144,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
1140 1144
1141 spin_lock_init(&cohc->lock); 1145 spin_lock_init(&cohc->lock);
1142 1146
1143 cohc->pending_irqs = 0;
1144 cohc->nbr_active_done = 0; 1147 cohc->nbr_active_done = 0;
1145 cohc->busy = 0; 1148 cohc->busy = 0;
1146 INIT_LIST_HEAD(&cohc->free); 1149 INIT_LIST_HEAD(&cohc->free);
@@ -1256,12 +1259,17 @@ static int __init coh901318_probe(struct platform_device *pdev)
1256 base->dma_memcpy.device_issue_pending = coh901318_issue_pending; 1259 base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
1257 base->dma_memcpy.device_terminate_all = coh901318_terminate_all; 1260 base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
1258 base->dma_memcpy.dev = &pdev->dev; 1261 base->dma_memcpy.dev = &pdev->dev;
1262 /*
1263 * This controller can only access address at even 32bit boundaries,
1264 * i.e. 2^2
1265 */
1266 base->dma_memcpy.copy_align = 2;
1259 err = dma_async_device_register(&base->dma_memcpy); 1267 err = dma_async_device_register(&base->dma_memcpy);
1260 1268
1261 if (err) 1269 if (err)
1262 goto err_register_memcpy; 1270 goto err_register_memcpy;
1263 1271
1264 dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", 1272 dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
1265 (u32) base->virtbase); 1273 (u32) base->virtbase);
1266 1274
1267 return err; 1275 return err;
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index f5120f238a4d..71d58c1a1e86 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -74,6 +74,8 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
74 74
75 lli = head; 75 lli = head;
76 lli->phy_this = phy; 76 lli->phy_this = phy;
77 lli->link_addr = 0x00000000;
78 lli->virt_link_addr = 0x00000000U;
77 79
78 for (i = 1; i < len; i++) { 80 for (i = 1; i < len; i++) {
79 lli_prev = lli; 81 lli_prev = lli;
@@ -85,13 +87,13 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
85 87
86 DEBUGFS_POOL_COUNTER_ADD(pool, 1); 88 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
87 lli->phy_this = phy; 89 lli->phy_this = phy;
90 lli->link_addr = 0x00000000;
91 lli->virt_link_addr = 0x00000000U;
88 92
89 lli_prev->link_addr = phy; 93 lli_prev->link_addr = phy;
90 lli_prev->virt_link_addr = lli; 94 lli_prev->virt_link_addr = lli;
91 } 95 }
92 96
93 lli->link_addr = 0x00000000U;
94
95 spin_unlock(&pool->lock); 97 spin_unlock(&pool->lock);
96 98
97 return head; 99 return head;
@@ -166,8 +168,7 @@ coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
166 lli->src_addr = src; 168 lli->src_addr = src;
167 lli->dst_addr = dst; 169 lli->dst_addr = dst;
168 170
169 /* One irq per single transfer */ 171 return 0;
170 return 1;
171} 172}
172 173
173int 174int
@@ -223,8 +224,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
223 lli->src_addr = src; 224 lli->src_addr = src;
224 lli->dst_addr = dst; 225 lli->dst_addr = dst;
225 226
226 /* One irq per single transfer */ 227 return 0;
227 return 1;
228} 228}
229 229
230int 230int
@@ -240,7 +240,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
240 u32 ctrl_sg; 240 u32 ctrl_sg;
241 dma_addr_t src = 0; 241 dma_addr_t src = 0;
242 dma_addr_t dst = 0; 242 dma_addr_t dst = 0;
243 int nbr_of_irq = 0;
244 u32 bytes_to_transfer; 243 u32 bytes_to_transfer;
245 u32 elem_size; 244 u32 elem_size;
246 245
@@ -269,15 +268,12 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
269 ctrl_sg = ctrl ? ctrl : ctrl_last; 268 ctrl_sg = ctrl ? ctrl : ctrl_last;
270 269
271 270
272 if ((ctrl_sg & ctrl_irq_mask))
273 nbr_of_irq++;
274
275 if (dir == DMA_TO_DEVICE) 271 if (dir == DMA_TO_DEVICE)
276 /* increment source address */ 272 /* increment source address */
277 src = sg_dma_address(sg); 273 src = sg_phys(sg);
278 else 274 else
279 /* increment destination address */ 275 /* increment destination address */
280 dst = sg_dma_address(sg); 276 dst = sg_phys(sg);
281 277
282 bytes_to_transfer = sg_dma_len(sg); 278 bytes_to_transfer = sg_dma_len(sg);
283 279
@@ -310,8 +306,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
310 } 306 }
311 spin_unlock(&pool->lock); 307 spin_unlock(&pool->lock);
312 308
313 /* There can be many IRQs per sg transfer */ 309 return 0;
314 return nbr_of_irq;
315 err: 310 err:
316 spin_unlock(&pool->lock); 311 spin_unlock(&pool->lock);
317 return -EINVAL; 312 return -EINVAL;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 6f51a0a7a8bb..e7a3230fb7d5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device)
826 chan->dev->chan = NULL; 826 chan->dev->chan = NULL;
827 mutex_unlock(&dma_list_mutex); 827 mutex_unlock(&dma_list_mutex);
828 device_unregister(&chan->dev->device); 828 device_unregister(&chan->dev->device);
829 free_percpu(chan->local);
829 } 830 }
830} 831}
831EXPORT_SYMBOL(dma_async_device_unregister); 832EXPORT_SYMBOL(dma_async_device_unregister);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 8e409fb50fc0..6fa55fe3dd24 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -467,7 +467,7 @@ err_srcs:
467 467
468 if (iterations > 0) 468 if (iterations > 0)
469 while (!kthread_should_stop()) { 469 while (!kthread_should_stop()) {
470 DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); 470 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
471 interruptible_sleep_on(&wait_dmatest_exit); 471 interruptible_sleep_on(&wait_dmatest_exit);
472 } 472 }
473 473
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 25a3c72b2941..1ed5d66d7dca 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -241,7 +241,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
241 if (is_ioat_active(status) || is_ioat_idle(status)) 241 if (is_ioat_active(status) || is_ioat_idle(status))
242 ioat_suspend(chan); 242 ioat_suspend(chan);
243 while (is_ioat_active(status) || is_ioat_idle(status)) { 243 while (is_ioat_active(status) || is_ioat_idle(status)) {
244 if (end && time_after(jiffies, end)) { 244 if (tmo && time_after(jiffies, end)) {
245 err = -ETIMEDOUT; 245 err = -ETIMEDOUT;
246 break; 246 break;
247 } 247 }
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 1c518f1cc49b..2a446397c884 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -748,12 +748,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
748 * @buffer_n: buffer number to update. 748 * @buffer_n: buffer number to update.
749 * 0 or 1 are the only valid values. 749 * 0 or 1 are the only valid values.
750 * @phyaddr: buffer physical address. 750 * @phyaddr: buffer physical address.
751 * @return: Returns 0 on success or negative error code on failure. This
752 * function will fail if the buffer is set to ready.
753 */ 751 */
754/* Called under spin_lock(_irqsave)(&ichan->lock) */ 752/* Called under spin_lock(_irqsave)(&ichan->lock) */
755static int ipu_update_channel_buffer(struct idmac_channel *ichan, 753static void ipu_update_channel_buffer(struct idmac_channel *ichan,
756 int buffer_n, dma_addr_t phyaddr) 754 int buffer_n, dma_addr_t phyaddr)
757{ 755{
758 enum ipu_channel channel = ichan->dma_chan.chan_id; 756 enum ipu_channel channel = ichan->dma_chan.chan_id;
759 uint32_t reg; 757 uint32_t reg;
@@ -793,8 +791,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan,
793 } 791 }
794 792
795 spin_unlock_irqrestore(&ipu_data.lock, flags); 793 spin_unlock_irqrestore(&ipu_data.lock, flags);
796
797 return 0;
798} 794}
799 795
800/* Called under spin_lock_irqsave(&ichan->lock) */ 796/* Called under spin_lock_irqsave(&ichan->lock) */
@@ -803,7 +799,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
803{ 799{
804 unsigned int chan_id = ichan->dma_chan.chan_id; 800 unsigned int chan_id = ichan->dma_chan.chan_id;
805 struct device *dev = &ichan->dma_chan.dev->device; 801 struct device *dev = &ichan->dma_chan.dev->device;
806 int ret;
807 802
808 if (async_tx_test_ack(&desc->txd)) 803 if (async_tx_test_ack(&desc->txd))
809 return -EINTR; 804 return -EINTR;
@@ -814,14 +809,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
814 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but 809 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
815 * doing it again shouldn't hurt either. 810 * doing it again shouldn't hurt either.
816 */ 811 */
817 ret = ipu_update_channel_buffer(ichan, buf_idx, 812 ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
818 sg_dma_address(sg));
819
820 if (ret < 0) {
821 dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
822 sg, chan_id, buf_idx);
823 return ret;
824 }
825 813
826 ipu_select_buffer(chan_id, buf_idx); 814 ipu_select_buffer(chan_id, buf_idx);
827 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", 815 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
@@ -1366,10 +1354,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1366 1354
1367 if (likely(sgnew) && 1355 if (likely(sgnew) &&
1368 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { 1356 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
1369 callback = desc->txd.callback; 1357 callback = descnew->txd.callback;
1370 callback_param = desc->txd.callback_param; 1358 callback_param = descnew->txd.callback_param;
1371 spin_unlock(&ichan->lock); 1359 spin_unlock(&ichan->lock);
1372 callback(callback_param); 1360 if (callback)
1361 callback(callback_param);
1373 spin_lock(&ichan->lock); 1362 spin_lock(&ichan->lock);
1374 } 1363 }
1375 1364