aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig23
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/at_hdmac.c35
-rw-r--r--drivers/dma/coh901318.c263
-rw-r--r--drivers/dma/dmaengine.c22
-rw-r--r--drivers/dma/dw_dmac.c24
-rw-r--r--drivers/dma/fsldma.c45
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h19
-rw-r--r--drivers/dma/ioat/dma_v2.c186
-rw-r--r--drivers/dma/ioat/dma_v2.h33
-rw-r--r--drivers/dma/ioat/dma_v3.c143
-rw-r--r--drivers/dma/ioat/pci.c7
-rw-r--r--drivers/dma/iop-adma.c39
-rw-r--r--drivers/dma/ipu/ipu_idmac.c34
-rw-r--r--drivers/dma/mpc512x_dma.c25
-rw-r--r--drivers/dma/mv_xor.c25
-rw-r--r--drivers/dma/pl330.c866
-rw-r--r--drivers/dma/ppc4xx/adma.c25
-rw-r--r--drivers/dma/shdma.c32
-rw-r--r--drivers/dma/ste_dma40.c2657
-rw-r--r--drivers/dma/ste_dma40_ll.c454
-rw-r--r--drivers/dma/ste_dma40_ll.h354
-rw-r--r--drivers/dma/timb_dma.c860
-rw-r--r--drivers/dma/txx9dmac.c23
25 files changed, 5744 insertions, 465 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c27f80e5d531..9e01e96fee94 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -141,6 +141,13 @@ config COH901318
141 help 141 help
142 Enable support for ST-Ericsson COH 901 318 DMA. 142 Enable support for ST-Ericsson COH 901 318 DMA.
143 143
144config STE_DMA40
145 bool "ST-Ericsson DMA40 support"
146 depends on ARCH_U8500
147 select DMA_ENGINE
148 help
149 Support for ST-Ericsson DMA40 controller
150
144config AMCC_PPC440SPE_ADMA 151config AMCC_PPC440SPE_ADMA
145 tristate "AMCC PPC440SPe ADMA support" 152 tristate "AMCC PPC440SPe ADMA support"
146 depends on 440SPe || 440SP 153 depends on 440SPe || 440SP
@@ -149,9 +156,25 @@ config AMCC_PPC440SPE_ADMA
149 help 156 help
150 Enable support for the AMCC PPC440SPe RAID engines. 157 Enable support for the AMCC PPC440SPe RAID engines.
151 158
159config TIMB_DMA
160 tristate "Timberdale FPGA DMA support"
161 depends on MFD_TIMBERDALE || HAS_IOMEM
162 select DMA_ENGINE
163 help
164 Enable support for the Timberdale FPGA DMA engine.
165
152config ARCH_HAS_ASYNC_TX_FIND_CHANNEL 166config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
153 bool 167 bool
154 168
169config PL330_DMA
170 tristate "DMA API Driver for PL330"
171 select DMA_ENGINE
172 depends on PL330
173 help
174 Select if your platform has one or more PL330 DMACs.
175 You need to provide platform specific settings via
176 platform_data for a dma-pl330 device.
177
155config DMA_ENGINE 178config DMA_ENGINE
156 bool 179 bool
157 180
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 22bba3d5e2b6..0fe5ebbfda5d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -20,3 +20,6 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
20obj-$(CONFIG_SH_DMAE) += shdma.o 20obj-$(CONFIG_SH_DMAE) += shdma.o
21obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 21obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
22obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 22obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
23obj-$(CONFIG_TIMB_DMA) += timb_dma.o
24obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
25obj-$(CONFIG_PL330_DMA) += pl330.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 308ab320e20b..e88076022a7a 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -760,13 +760,18 @@ err_desc_get:
760 return NULL; 760 return NULL;
761} 761}
762 762
763static void atc_terminate_all(struct dma_chan *chan) 763static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
764 unsigned long arg)
764{ 765{
765 struct at_dma_chan *atchan = to_at_dma_chan(chan); 766 struct at_dma_chan *atchan = to_at_dma_chan(chan);
766 struct at_dma *atdma = to_at_dma(chan->device); 767 struct at_dma *atdma = to_at_dma(chan->device);
767 struct at_desc *desc, *_desc; 768 struct at_desc *desc, *_desc;
768 LIST_HEAD(list); 769 LIST_HEAD(list);
769 770
771 /* Only supports DMA_TERMINATE_ALL */
772 if (cmd != DMA_TERMINATE_ALL)
773 return -ENXIO;
774
770 /* 775 /*
771 * This is only called when something went wrong elsewhere, so 776 * This is only called when something went wrong elsewhere, so
772 * we don't really care about the data. Just disable the 777 * we don't really care about the data. Just disable the
@@ -790,32 +795,30 @@ static void atc_terminate_all(struct dma_chan *chan)
790 /* Flush all pending and queued descriptors */ 795 /* Flush all pending and queued descriptors */
791 list_for_each_entry_safe(desc, _desc, &list, desc_node) 796 list_for_each_entry_safe(desc, _desc, &list, desc_node)
792 atc_chain_complete(atchan, desc); 797 atc_chain_complete(atchan, desc);
798
799 return 0;
793} 800}
794 801
795/** 802/**
796 * atc_is_tx_complete - poll for transaction completion 803 * atc_tx_status - poll for transaction completion
797 * @chan: DMA channel 804 * @chan: DMA channel
798 * @cookie: transaction identifier to check status of 805 * @cookie: transaction identifier to check status of
799 * @done: if not %NULL, updated with last completed transaction 806 * @txstate: if not %NULL updated with transaction state
800 * @used: if not %NULL, updated with last used transaction
801 * 807 *
802 * If @done and @used are passed in, upon return they reflect the driver 808 * If @txstate is passed in, upon return it reflect the driver
803 * internal state and can be used with dma_async_is_complete() to check 809 * internal state and can be used with dma_async_is_complete() to check
804 * the status of multiple cookies without re-checking hardware state. 810 * the status of multiple cookies without re-checking hardware state.
805 */ 811 */
806static enum dma_status 812static enum dma_status
807atc_is_tx_complete(struct dma_chan *chan, 813atc_tx_status(struct dma_chan *chan,
808 dma_cookie_t cookie, 814 dma_cookie_t cookie,
809 dma_cookie_t *done, dma_cookie_t *used) 815 struct dma_tx_state *txstate)
810{ 816{
811 struct at_dma_chan *atchan = to_at_dma_chan(chan); 817 struct at_dma_chan *atchan = to_at_dma_chan(chan);
812 dma_cookie_t last_used; 818 dma_cookie_t last_used;
813 dma_cookie_t last_complete; 819 dma_cookie_t last_complete;
814 enum dma_status ret; 820 enum dma_status ret;
815 821
816 dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
817 cookie, done ? *done : 0, used ? *used : 0);
818
819 spin_lock_bh(&atchan->lock); 822 spin_lock_bh(&atchan->lock);
820 823
821 last_complete = atchan->completed_cookie; 824 last_complete = atchan->completed_cookie;
@@ -833,10 +836,10 @@ atc_is_tx_complete(struct dma_chan *chan,
833 836
834 spin_unlock_bh(&atchan->lock); 837 spin_unlock_bh(&atchan->lock);
835 838
836 if (done) 839 dma_set_tx_state(txstate, last_complete, last_used, 0);
837 *done = last_complete; 840 dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
838 if (used) 841 cookie, last_complete ? last_complete : 0,
839 *used = last_used; 842 last_used ? last_used : 0);
840 843
841 return ret; 844 return ret;
842} 845}
@@ -1082,7 +1085,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
1082 /* set base routines */ 1085 /* set base routines */
1083 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1086 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1084 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 1087 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1085 atdma->dma_common.device_is_tx_complete = atc_is_tx_complete; 1088 atdma->dma_common.device_tx_status = atc_tx_status;
1086 atdma->dma_common.device_issue_pending = atc_issue_pending; 1089 atdma->dma_common.device_issue_pending = atc_issue_pending;
1087 atdma->dma_common.dev = &pdev->dev; 1090 atdma->dma_common.dev = &pdev->dev;
1088 1091
@@ -1092,7 +1095,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
1092 1095
1093 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1096 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1094 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1097 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1095 atdma->dma_common.device_terminate_all = atc_terminate_all; 1098 atdma->dma_common.device_control = atc_control;
1096 } 1099 }
1097 1100
1098 dma_writel(atdma, EN, AT_DMA_ENABLE); 1101 dma_writel(atdma, EN, AT_DMA_ENABLE);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 1656fdcdb6c2..a724e6be1b4d 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -37,7 +37,7 @@ struct coh901318_desc {
37 struct list_head node; 37 struct list_head node;
38 struct scatterlist *sg; 38 struct scatterlist *sg;
39 unsigned int sg_len; 39 unsigned int sg_len;
40 struct coh901318_lli *data; 40 struct coh901318_lli *lli;
41 enum dma_data_direction dir; 41 enum dma_data_direction dir;
42 unsigned long flags; 42 unsigned long flags;
43}; 43};
@@ -283,7 +283,7 @@ static int coh901318_start(struct coh901318_chan *cohc)
283} 283}
284 284
285static int coh901318_prep_linked_list(struct coh901318_chan *cohc, 285static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
286 struct coh901318_lli *data) 286 struct coh901318_lli *lli)
287{ 287{
288 int channel = cohc->id; 288 int channel = cohc->id;
289 void __iomem *virtbase = cohc->base->virtbase; 289 void __iomem *virtbase = cohc->base->virtbase;
@@ -292,18 +292,18 @@ static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
292 COH901318_CX_STAT_SPACING*channel) & 292 COH901318_CX_STAT_SPACING*channel) &
293 COH901318_CX_STAT_ACTIVE); 293 COH901318_CX_STAT_ACTIVE);
294 294
295 writel(data->src_addr, 295 writel(lli->src_addr,
296 virtbase + COH901318_CX_SRC_ADDR + 296 virtbase + COH901318_CX_SRC_ADDR +
297 COH901318_CX_SRC_ADDR_SPACING * channel); 297 COH901318_CX_SRC_ADDR_SPACING * channel);
298 298
299 writel(data->dst_addr, virtbase + 299 writel(lli->dst_addr, virtbase +
300 COH901318_CX_DST_ADDR + 300 COH901318_CX_DST_ADDR +
301 COH901318_CX_DST_ADDR_SPACING * channel); 301 COH901318_CX_DST_ADDR_SPACING * channel);
302 302
303 writel(data->link_addr, virtbase + COH901318_CX_LNK_ADDR + 303 writel(lli->link_addr, virtbase + COH901318_CX_LNK_ADDR +
304 COH901318_CX_LNK_ADDR_SPACING * channel); 304 COH901318_CX_LNK_ADDR_SPACING * channel);
305 305
306 writel(data->control, virtbase + COH901318_CX_CTRL + 306 writel(lli->control, virtbase + COH901318_CX_CTRL +
307 COH901318_CX_CTRL_SPACING * channel); 307 COH901318_CX_CTRL_SPACING * channel);
308 308
309 return 0; 309 return 0;
@@ -408,33 +408,107 @@ coh901318_first_queued(struct coh901318_chan *cohc)
408 return d; 408 return d;
409} 409}
410 410
411static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli)
412{
413 struct coh901318_lli *lli = in_lli;
414 u32 bytes = 0;
415
416 while (lli) {
417 bytes += lli->control & COH901318_CX_CTRL_TC_VALUE_MASK;
418 lli = lli->virt_link_addr;
419 }
420 return bytes;
421}
422
411/* 423/*
412 * DMA start/stop controls 424 * Get the number of bytes left to transfer on this channel,
425 * it is unwise to call this before stopping the channel for
426 * absolute measures, but for a rough guess you can still call
427 * it.
413 */ 428 */
414u32 coh901318_get_bytes_left(struct dma_chan *chan) 429static u32 coh901318_get_bytes_left(struct dma_chan *chan)
415{ 430{
416 unsigned long flags;
417 u32 ret;
418 struct coh901318_chan *cohc = to_coh901318_chan(chan); 431 struct coh901318_chan *cohc = to_coh901318_chan(chan);
432 struct coh901318_desc *cohd;
433 struct list_head *pos;
434 unsigned long flags;
435 u32 left = 0;
436 int i = 0;
419 437
420 spin_lock_irqsave(&cohc->lock, flags); 438 spin_lock_irqsave(&cohc->lock, flags);
421 439
422 /* Read transfer count value */ 440 /*
423 ret = readl(cohc->base->virtbase + 441 * If there are many queued jobs, we iterate and add the
424 COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING * 442 * size of them all. We take a special look on the first
425 cohc->id) & COH901318_CX_CTRL_TC_VALUE_MASK; 443 * job though, since it is probably active.
444 */
445 list_for_each(pos, &cohc->active) {
446 /*
447 * The first job in the list will be working on the
448 * hardware. The job can be stopped but still active,
449 * so that the transfer counter is somewhere inside
450 * the buffer.
451 */
452 cohd = list_entry(pos, struct coh901318_desc, node);
453
454 if (i == 0) {
455 struct coh901318_lli *lli;
456 dma_addr_t ladd;
457
458 /* Read current transfer count value */
459 left = readl(cohc->base->virtbase +
460 COH901318_CX_CTRL +
461 COH901318_CX_CTRL_SPACING * cohc->id) &
462 COH901318_CX_CTRL_TC_VALUE_MASK;
463
464 /* See if the transfer is linked... */
465 ladd = readl(cohc->base->virtbase +
466 COH901318_CX_LNK_ADDR +
467 COH901318_CX_LNK_ADDR_SPACING *
468 cohc->id) &
469 ~COH901318_CX_LNK_LINK_IMMEDIATE;
470 /* Single transaction */
471 if (!ladd)
472 continue;
473
474 /*
475 * Linked transaction, follow the lli, find the
476 * currently processing lli, and proceed to the next
477 */
478 lli = cohd->lli;
479 while (lli && lli->link_addr != ladd)
480 lli = lli->virt_link_addr;
481
482 if (lli)
483 lli = lli->virt_link_addr;
484
485 /*
486 * Follow remaining lli links around to count the total
487 * number of bytes left
488 */
489 left += coh901318_get_bytes_in_lli(lli);
490 } else {
491 left += coh901318_get_bytes_in_lli(cohd->lli);
492 }
493 i++;
494 }
495
496 /* Also count bytes in the queued jobs */
497 list_for_each(pos, &cohc->queue) {
498 cohd = list_entry(pos, struct coh901318_desc, node);
499 left += coh901318_get_bytes_in_lli(cohd->lli);
500 }
426 501
427 spin_unlock_irqrestore(&cohc->lock, flags); 502 spin_unlock_irqrestore(&cohc->lock, flags);
428 503
429 return ret; 504 return left;
430} 505}
431EXPORT_SYMBOL(coh901318_get_bytes_left);
432
433 506
434/* Stops a transfer without losing data. Enables power save. 507/*
435 Use this function in conjunction with coh901318_continue(..) 508 * Pauses a transfer without losing data. Enables power save.
436*/ 509 * Use this function in conjunction with coh901318_resume.
437void coh901318_stop(struct dma_chan *chan) 510 */
511static void coh901318_pause(struct dma_chan *chan)
438{ 512{
439 u32 val; 513 u32 val;
440 unsigned long flags; 514 unsigned long flags;
@@ -475,12 +549,11 @@ void coh901318_stop(struct dma_chan *chan)
475 549
476 spin_unlock_irqrestore(&cohc->lock, flags); 550 spin_unlock_irqrestore(&cohc->lock, flags);
477} 551}
478EXPORT_SYMBOL(coh901318_stop);
479 552
480/* Continues a transfer that has been stopped via 300_dma_stop(..). 553/* Resumes a transfer that has been stopped via 300_dma_stop(..).
481 Power save is handled. 554 Power save is handled.
482*/ 555*/
483void coh901318_continue(struct dma_chan *chan) 556static void coh901318_resume(struct dma_chan *chan)
484{ 557{
485 u32 val; 558 u32 val;
486 unsigned long flags; 559 unsigned long flags;
@@ -506,7 +579,6 @@ void coh901318_continue(struct dma_chan *chan)
506 579
507 spin_unlock_irqrestore(&cohc->lock, flags); 580 spin_unlock_irqrestore(&cohc->lock, flags);
508} 581}
509EXPORT_SYMBOL(coh901318_continue);
510 582
511bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) 583bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
512{ 584{
@@ -565,29 +637,30 @@ static int coh901318_config(struct coh901318_chan *cohc,
565 */ 637 */
566static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) 638static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
567{ 639{
568 struct coh901318_desc *cohd_que; 640 struct coh901318_desc *cohd;
569 641
570 /* start queued jobs, if any 642 /*
643 * start queued jobs, if any
571 * TODO: transmit all queued jobs in one go 644 * TODO: transmit all queued jobs in one go
572 */ 645 */
573 cohd_que = coh901318_first_queued(cohc); 646 cohd = coh901318_first_queued(cohc);
574 647
575 if (cohd_que != NULL) { 648 if (cohd != NULL) {
576 /* Remove from queue */ 649 /* Remove from queue */
577 coh901318_desc_remove(cohd_que); 650 coh901318_desc_remove(cohd);
578 /* initiate DMA job */ 651 /* initiate DMA job */
579 cohc->busy = 1; 652 cohc->busy = 1;
580 653
581 coh901318_desc_submit(cohc, cohd_que); 654 coh901318_desc_submit(cohc, cohd);
582 655
583 coh901318_prep_linked_list(cohc, cohd_que->data); 656 coh901318_prep_linked_list(cohc, cohd->lli);
584 657
585 /* start dma job */ 658 /* start dma job on this channel */
586 coh901318_start(cohc); 659 coh901318_start(cohc);
587 660
588 } 661 }
589 662
590 return cohd_que; 663 return cohd;
591} 664}
592 665
593/* 666/*
@@ -622,7 +695,7 @@ static void dma_tasklet(unsigned long data)
622 cohc->completed = cohd_fin->desc.cookie; 695 cohc->completed = cohd_fin->desc.cookie;
623 696
624 /* release the lli allocation and remove the descriptor */ 697 /* release the lli allocation and remove the descriptor */
625 coh901318_lli_free(&cohc->base->pool, &cohd_fin->data); 698 coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
626 699
627 /* return desc to free-list */ 700 /* return desc to free-list */
628 coh901318_desc_remove(cohd_fin); 701 coh901318_desc_remove(cohd_fin);
@@ -666,23 +739,44 @@ static void dma_tasklet(unsigned long data)
666/* called from interrupt context */ 739/* called from interrupt context */
667static void dma_tc_handle(struct coh901318_chan *cohc) 740static void dma_tc_handle(struct coh901318_chan *cohc)
668{ 741{
669 BUG_ON(!cohc->allocated && (list_empty(&cohc->active) || 742 /*
670 list_empty(&cohc->queue))); 743 * If the channel is not allocated, then we shouldn't have
671 744 * any TC interrupts on it.
672 if (!cohc->allocated) 745 */
746 if (!cohc->allocated) {
747 dev_err(COHC_2_DEV(cohc), "spurious interrupt from "
748 "unallocated channel\n");
673 return; 749 return;
750 }
674 751
675 spin_lock(&cohc->lock); 752 spin_lock(&cohc->lock);
676 753
754 /*
755 * When we reach this point, at least one queue item
756 * should have been moved over from cohc->queue to
757 * cohc->active and run to completion, that is why we're
758 * getting a terminal count interrupt is it not?
759 * If you get this BUG() the most probable cause is that
760 * the individual nodes in the lli chain have IRQ enabled,
761 * so check your platform config for lli chain ctrl.
762 */
763 BUG_ON(list_empty(&cohc->active));
764
677 cohc->nbr_active_done++; 765 cohc->nbr_active_done++;
678 766
767 /*
768 * This attempt to take a job from cohc->queue, put it
769 * into cohc->active and start it.
770 */
679 if (coh901318_queue_start(cohc) == NULL) 771 if (coh901318_queue_start(cohc) == NULL)
680 cohc->busy = 0; 772 cohc->busy = 0;
681 773
682 BUG_ON(list_empty(&cohc->active));
683
684 spin_unlock(&cohc->lock); 774 spin_unlock(&cohc->lock);
685 775
776 /*
777 * This tasklet will remove items from cohc->active
778 * and thus terminates them.
779 */
686 if (cohc_chan_conf(cohc)->priority_high) 780 if (cohc_chan_conf(cohc)->priority_high)
687 tasklet_hi_schedule(&cohc->tasklet); 781 tasklet_hi_schedule(&cohc->tasklet);
688 else 782 else
@@ -809,6 +903,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
809static int coh901318_alloc_chan_resources(struct dma_chan *chan) 903static int coh901318_alloc_chan_resources(struct dma_chan *chan)
810{ 904{
811 struct coh901318_chan *cohc = to_coh901318_chan(chan); 905 struct coh901318_chan *cohc = to_coh901318_chan(chan);
906 unsigned long flags;
812 907
813 dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n", 908 dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n",
814 __func__, cohc->id); 909 __func__, cohc->id);
@@ -816,11 +911,15 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan)
816 if (chan->client_count > 1) 911 if (chan->client_count > 1)
817 return -EBUSY; 912 return -EBUSY;
818 913
914 spin_lock_irqsave(&cohc->lock, flags);
915
819 coh901318_config(cohc, NULL); 916 coh901318_config(cohc, NULL);
820 917
821 cohc->allocated = 1; 918 cohc->allocated = 1;
822 cohc->completed = chan->cookie = 1; 919 cohc->completed = chan->cookie = 1;
823 920
921 spin_unlock_irqrestore(&cohc->lock, flags);
922
824 return 1; 923 return 1;
825} 924}
826 925
@@ -843,7 +942,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
843 942
844 spin_unlock_irqrestore(&cohc->lock, flags); 943 spin_unlock_irqrestore(&cohc->lock, flags);
845 944
846 chan->device->device_terminate_all(chan); 945 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
847} 946}
848 947
849 948
@@ -870,7 +969,7 @@ static struct dma_async_tx_descriptor *
870coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 969coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
871 size_t size, unsigned long flags) 970 size_t size, unsigned long flags)
872{ 971{
873 struct coh901318_lli *data; 972 struct coh901318_lli *lli;
874 struct coh901318_desc *cohd; 973 struct coh901318_desc *cohd;
875 unsigned long flg; 974 unsigned long flg;
876 struct coh901318_chan *cohc = to_coh901318_chan(chan); 975 struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -892,23 +991,23 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
892 if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size) 991 if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size)
893 lli_len++; 992 lli_len++;
894 993
895 data = coh901318_lli_alloc(&cohc->base->pool, lli_len); 994 lli = coh901318_lli_alloc(&cohc->base->pool, lli_len);
896 995
897 if (data == NULL) 996 if (lli == NULL)
898 goto err; 997 goto err;
899 998
900 ret = coh901318_lli_fill_memcpy( 999 ret = coh901318_lli_fill_memcpy(
901 &cohc->base->pool, data, src, size, dest, 1000 &cohc->base->pool, lli, src, size, dest,
902 cohc_chan_param(cohc)->ctrl_lli_chained, 1001 cohc_chan_param(cohc)->ctrl_lli_chained,
903 ctrl_last); 1002 ctrl_last);
904 if (ret) 1003 if (ret)
905 goto err; 1004 goto err;
906 1005
907 COH_DBG(coh901318_list_print(cohc, data)); 1006 COH_DBG(coh901318_list_print(cohc, lli));
908 1007
909 /* Pick a descriptor to handle this transfer */ 1008 /* Pick a descriptor to handle this transfer */
910 cohd = coh901318_desc_get(cohc); 1009 cohd = coh901318_desc_get(cohc);
911 cohd->data = data; 1010 cohd->lli = lli;
912 cohd->flags = flags; 1011 cohd->flags = flags;
913 cohd->desc.tx_submit = coh901318_tx_submit; 1012 cohd->desc.tx_submit = coh901318_tx_submit;
914 1013
@@ -926,7 +1025,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
926 unsigned long flags) 1025 unsigned long flags)
927{ 1026{
928 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1027 struct coh901318_chan *cohc = to_coh901318_chan(chan);
929 struct coh901318_lli *data; 1028 struct coh901318_lli *lli;
930 struct coh901318_desc *cohd; 1029 struct coh901318_desc *cohd;
931 const struct coh901318_params *params; 1030 const struct coh901318_params *params;
932 struct scatterlist *sg; 1031 struct scatterlist *sg;
@@ -999,13 +1098,13 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
999 } 1098 }
1000 1099
1001 pr_debug("Allocate %d lli:s for this transfer\n", len); 1100 pr_debug("Allocate %d lli:s for this transfer\n", len);
1002 data = coh901318_lli_alloc(&cohc->base->pool, len); 1101 lli = coh901318_lli_alloc(&cohc->base->pool, len);
1003 1102
1004 if (data == NULL) 1103 if (lli == NULL)
1005 goto err_dma_alloc; 1104 goto err_dma_alloc;
1006 1105
1007 /* initiate allocated data list */ 1106 /* initiate allocated lli list */
1008 ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len, 1107 ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
1009 cohc_dev_addr(cohc), 1108 cohc_dev_addr(cohc),
1010 ctrl_chained, 1109 ctrl_chained,
1011 ctrl, 1110 ctrl,
@@ -1014,14 +1113,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1014 if (ret) 1113 if (ret)
1015 goto err_lli_fill; 1114 goto err_lli_fill;
1016 1115
1017 COH_DBG(coh901318_list_print(cohc, data)); 1116 COH_DBG(coh901318_list_print(cohc, lli));
1018 1117
1019 /* Pick a descriptor to handle this transfer */ 1118 /* Pick a descriptor to handle this transfer */
1020 cohd = coh901318_desc_get(cohc); 1119 cohd = coh901318_desc_get(cohc);
1021 cohd->dir = direction; 1120 cohd->dir = direction;
1022 cohd->flags = flags; 1121 cohd->flags = flags;
1023 cohd->desc.tx_submit = coh901318_tx_submit; 1122 cohd->desc.tx_submit = coh901318_tx_submit;
1024 cohd->data = data; 1123 cohd->lli = lli;
1025 1124
1026 spin_unlock_irqrestore(&cohc->lock, flg); 1125 spin_unlock_irqrestore(&cohc->lock, flg);
1027 1126
@@ -1035,9 +1134,8 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1035} 1134}
1036 1135
1037static enum dma_status 1136static enum dma_status
1038coh901318_is_tx_complete(struct dma_chan *chan, 1137coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1039 dma_cookie_t cookie, dma_cookie_t *done, 1138 struct dma_tx_state *txstate)
1040 dma_cookie_t *used)
1041{ 1139{
1042 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1140 struct coh901318_chan *cohc = to_coh901318_chan(chan);
1043 dma_cookie_t last_used; 1141 dma_cookie_t last_used;
@@ -1049,10 +1147,10 @@ coh901318_is_tx_complete(struct dma_chan *chan,
1049 1147
1050 ret = dma_async_is_complete(cookie, last_complete, last_used); 1148 ret = dma_async_is_complete(cookie, last_complete, last_used);
1051 1149
1052 if (done) 1150 dma_set_tx_state(txstate, last_complete, last_used,
1053 *done = last_complete; 1151 coh901318_get_bytes_left(chan));
1054 if (used) 1152 if (ret == DMA_IN_PROGRESS && cohc->stopped)
1055 *used = last_used; 1153 ret = DMA_PAUSED;
1056 1154
1057 return ret; 1155 return ret;
1058} 1156}
@@ -1065,23 +1163,42 @@ coh901318_issue_pending(struct dma_chan *chan)
1065 1163
1066 spin_lock_irqsave(&cohc->lock, flags); 1164 spin_lock_irqsave(&cohc->lock, flags);
1067 1165
1068 /* Busy means that pending jobs are already being processed */ 1166 /*
1167 * Busy means that pending jobs are already being processed,
1168 * and then there is no point in starting the queue: the
1169 * terminal count interrupt on the channel will take the next
1170 * job on the queue and execute it anyway.
1171 */
1069 if (!cohc->busy) 1172 if (!cohc->busy)
1070 coh901318_queue_start(cohc); 1173 coh901318_queue_start(cohc);
1071 1174
1072 spin_unlock_irqrestore(&cohc->lock, flags); 1175 spin_unlock_irqrestore(&cohc->lock, flags);
1073} 1176}
1074 1177
1075static void 1178static int
1076coh901318_terminate_all(struct dma_chan *chan) 1179coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1180 unsigned long arg)
1077{ 1181{
1078 unsigned long flags; 1182 unsigned long flags;
1079 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1183 struct coh901318_chan *cohc = to_coh901318_chan(chan);
1080 struct coh901318_desc *cohd; 1184 struct coh901318_desc *cohd;
1081 void __iomem *virtbase = cohc->base->virtbase; 1185 void __iomem *virtbase = cohc->base->virtbase;
1082 1186
1083 coh901318_stop(chan); 1187 if (cmd == DMA_PAUSE) {
1188 coh901318_pause(chan);
1189 return 0;
1190 }
1191
1192 if (cmd == DMA_RESUME) {
1193 coh901318_resume(chan);
1194 return 0;
1195 }
1084 1196
1197 if (cmd != DMA_TERMINATE_ALL)
1198 return -ENXIO;
1199
1200 /* The remainder of this function terminates the transfer */
1201 coh901318_pause(chan);
1085 spin_lock_irqsave(&cohc->lock, flags); 1202 spin_lock_irqsave(&cohc->lock, flags);
1086 1203
1087 /* Clear any pending BE or TC interrupt */ 1204 /* Clear any pending BE or TC interrupt */
@@ -1099,7 +1216,7 @@ coh901318_terminate_all(struct dma_chan *chan)
1099 1216
1100 while ((cohd = coh901318_first_active_get(cohc))) { 1217 while ((cohd = coh901318_first_active_get(cohc))) {
1101 /* release the lli allocation*/ 1218 /* release the lli allocation*/
1102 coh901318_lli_free(&cohc->base->pool, &cohd->data); 1219 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
1103 1220
1104 /* return desc to free-list */ 1221 /* return desc to free-list */
1105 coh901318_desc_remove(cohd); 1222 coh901318_desc_remove(cohd);
@@ -1108,7 +1225,7 @@ coh901318_terminate_all(struct dma_chan *chan)
1108 1225
1109 while ((cohd = coh901318_first_queued(cohc))) { 1226 while ((cohd = coh901318_first_queued(cohc))) {
1110 /* release the lli allocation*/ 1227 /* release the lli allocation*/
1111 coh901318_lli_free(&cohc->base->pool, &cohd->data); 1228 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
1112 1229
1113 /* return desc to free-list */ 1230 /* return desc to free-list */
1114 coh901318_desc_remove(cohd); 1231 coh901318_desc_remove(cohd);
@@ -1120,6 +1237,8 @@ coh901318_terminate_all(struct dma_chan *chan)
1120 cohc->busy = 0; 1237 cohc->busy = 0;
1121 1238
1122 spin_unlock_irqrestore(&cohc->lock, flags); 1239 spin_unlock_irqrestore(&cohc->lock, flags);
1240
1241 return 0;
1123} 1242}
1124void coh901318_base_init(struct dma_device *dma, const int *pick_chans, 1243void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
1125 struct coh901318_base *base) 1244 struct coh901318_base *base)
@@ -1235,9 +1354,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
1235 base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources; 1354 base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources;
1236 base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources; 1355 base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources;
1237 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; 1356 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
1238 base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete; 1357 base->dma_slave.device_tx_status = coh901318_tx_status;
1239 base->dma_slave.device_issue_pending = coh901318_issue_pending; 1358 base->dma_slave.device_issue_pending = coh901318_issue_pending;
1240 base->dma_slave.device_terminate_all = coh901318_terminate_all; 1359 base->dma_slave.device_control = coh901318_control;
1241 base->dma_slave.dev = &pdev->dev; 1360 base->dma_slave.dev = &pdev->dev;
1242 1361
1243 err = dma_async_device_register(&base->dma_slave); 1362 err = dma_async_device_register(&base->dma_slave);
@@ -1255,9 +1374,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
1255 base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources; 1374 base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources;
1256 base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources; 1375 base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources;
1257 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; 1376 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
1258 base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete; 1377 base->dma_memcpy.device_tx_status = coh901318_tx_status;
1259 base->dma_memcpy.device_issue_pending = coh901318_issue_pending; 1378 base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
1260 base->dma_memcpy.device_terminate_all = coh901318_terminate_all; 1379 base->dma_memcpy.device_control = coh901318_control;
1261 base->dma_memcpy.dev = &pdev->dev; 1380 base->dma_memcpy.dev = &pdev->dev;
1262 /* 1381 /*
1263 * This controller can only access address at even 32bit boundaries, 1382 * This controller can only access address at even 32bit boundaries,
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d18b5d069d7e..9d31d5eb95c1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -515,7 +515,6 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
515 break; 515 break;
516 if (--device->privatecnt == 0) 516 if (--device->privatecnt == 0)
517 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 517 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
518 chan->private = NULL;
519 chan = NULL; 518 chan = NULL;
520 } 519 }
521 } 520 }
@@ -537,7 +536,6 @@ void dma_release_channel(struct dma_chan *chan)
537 /* drop PRIVATE cap enabled by __dma_request_channel() */ 536 /* drop PRIVATE cap enabled by __dma_request_channel() */
538 if (--chan->device->privatecnt == 0) 537 if (--chan->device->privatecnt == 0)
539 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); 538 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
540 chan->private = NULL;
541 mutex_unlock(&dma_list_mutex); 539 mutex_unlock(&dma_list_mutex);
542} 540}
543EXPORT_SYMBOL_GPL(dma_release_channel); 541EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -695,11 +693,11 @@ int dma_async_device_register(struct dma_device *device)
695 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 693 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
696 !device->device_prep_slave_sg); 694 !device->device_prep_slave_sg);
697 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 695 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
698 !device->device_terminate_all); 696 !device->device_control);
699 697
700 BUG_ON(!device->device_alloc_chan_resources); 698 BUG_ON(!device->device_alloc_chan_resources);
701 BUG_ON(!device->device_free_chan_resources); 699 BUG_ON(!device->device_free_chan_resources);
702 BUG_ON(!device->device_is_tx_complete); 700 BUG_ON(!device->device_tx_status);
703 BUG_ON(!device->device_issue_pending); 701 BUG_ON(!device->device_issue_pending);
704 BUG_ON(!device->dev); 702 BUG_ON(!device->dev);
705 703
@@ -978,7 +976,9 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
978 struct dma_chan *chan) 976 struct dma_chan *chan)
979{ 977{
980 tx->chan = chan; 978 tx->chan = chan;
979 #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
981 spin_lock_init(&tx->lock); 980 spin_lock_init(&tx->lock);
981 #endif
982} 982}
983EXPORT_SYMBOL(dma_async_tx_descriptor_init); 983EXPORT_SYMBOL(dma_async_tx_descriptor_init);
984 984
@@ -1011,7 +1011,7 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1011 */ 1011 */
1012void dma_run_dependencies(struct dma_async_tx_descriptor *tx) 1012void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1013{ 1013{
1014 struct dma_async_tx_descriptor *dep = tx->next; 1014 struct dma_async_tx_descriptor *dep = txd_next(tx);
1015 struct dma_async_tx_descriptor *dep_next; 1015 struct dma_async_tx_descriptor *dep_next;
1016 struct dma_chan *chan; 1016 struct dma_chan *chan;
1017 1017
@@ -1019,7 +1019,7 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1019 return; 1019 return;
1020 1020
1021 /* we'll submit tx->next now, so clear the link */ 1021 /* we'll submit tx->next now, so clear the link */
1022 tx->next = NULL; 1022 txd_clear_next(tx);
1023 chan = dep->chan; 1023 chan = dep->chan;
1024 1024
1025 /* keep submitting up until a channel switch is detected 1025 /* keep submitting up until a channel switch is detected
@@ -1027,14 +1027,14 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1027 * processing the interrupt from async_tx_channel_switch 1027 * processing the interrupt from async_tx_channel_switch
1028 */ 1028 */
1029 for (; dep; dep = dep_next) { 1029 for (; dep; dep = dep_next) {
1030 spin_lock_bh(&dep->lock); 1030 txd_lock(dep);
1031 dep->parent = NULL; 1031 txd_clear_parent(dep);
1032 dep_next = dep->next; 1032 dep_next = txd_next(dep);
1033 if (dep_next && dep_next->chan == chan) 1033 if (dep_next && dep_next->chan == chan)
1034 dep->next = NULL; /* ->next will be submitted */ 1034 txd_clear_next(dep); /* ->next will be submitted */
1035 else 1035 else
1036 dep_next = NULL; /* submit current dep and terminate */ 1036 dep_next = NULL; /* submit current dep and terminate */
1037 spin_unlock_bh(&dep->lock); 1037 txd_unlock(dep);
1038 1038
1039 dep->tx_submit(dep); 1039 dep->tx_submit(dep);
1040 } 1040 }
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index d28369f7afd2..a3991ab0d67e 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -781,13 +781,18 @@ err_desc_get:
781 return NULL; 781 return NULL;
782} 782}
783 783
784static void dwc_terminate_all(struct dma_chan *chan) 784static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
785 unsigned long arg)
785{ 786{
786 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 787 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
787 struct dw_dma *dw = to_dw_dma(chan->device); 788 struct dw_dma *dw = to_dw_dma(chan->device);
788 struct dw_desc *desc, *_desc; 789 struct dw_desc *desc, *_desc;
789 LIST_HEAD(list); 790 LIST_HEAD(list);
790 791
792 /* Only supports DMA_TERMINATE_ALL */
793 if (cmd != DMA_TERMINATE_ALL)
794 return -ENXIO;
795
791 /* 796 /*
792 * This is only called when something went wrong elsewhere, so 797 * This is only called when something went wrong elsewhere, so
793 * we don't really care about the data. Just disable the 798 * we don't really care about the data. Just disable the
@@ -810,12 +815,14 @@ static void dwc_terminate_all(struct dma_chan *chan)
810 /* Flush all pending and queued descriptors */ 815 /* Flush all pending and queued descriptors */
811 list_for_each_entry_safe(desc, _desc, &list, desc_node) 816 list_for_each_entry_safe(desc, _desc, &list, desc_node)
812 dwc_descriptor_complete(dwc, desc); 817 dwc_descriptor_complete(dwc, desc);
818
819 return 0;
813} 820}
814 821
815static enum dma_status 822static enum dma_status
816dwc_is_tx_complete(struct dma_chan *chan, 823dwc_tx_status(struct dma_chan *chan,
817 dma_cookie_t cookie, 824 dma_cookie_t cookie,
818 dma_cookie_t *done, dma_cookie_t *used) 825 struct dma_tx_state *txstate)
819{ 826{
820 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 827 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
821 dma_cookie_t last_used; 828 dma_cookie_t last_used;
@@ -835,10 +842,7 @@ dwc_is_tx_complete(struct dma_chan *chan,
835 ret = dma_async_is_complete(cookie, last_complete, last_used); 842 ret = dma_async_is_complete(cookie, last_complete, last_used);
836 } 843 }
837 844
838 if (done) 845 dma_set_tx_state(txstate, last_complete, last_used, 0);
839 *done = last_complete;
840 if (used)
841 *used = last_used;
842 846
843 return ret; 847 return ret;
844} 848}
@@ -1338,9 +1342,9 @@ static int __init dw_probe(struct platform_device *pdev)
1338 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; 1342 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1339 1343
1340 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; 1344 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1341 dw->dma.device_terminate_all = dwc_terminate_all; 1345 dw->dma.device_control = dwc_control;
1342 1346
1343 dw->dma.device_is_tx_complete = dwc_is_tx_complete; 1347 dw->dma.device_tx_status = dwc_tx_status;
1344 dw->dma.device_issue_pending = dwc_issue_pending; 1348 dw->dma.device_issue_pending = dwc_issue_pending;
1345 1349
1346 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1350 dma_writel(dw, CFG, DW_CFG_DMA_EN);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 88f470f0d820..8088b14ba5f7 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -775,13 +775,18 @@ fail:
775 return NULL; 775 return NULL;
776} 776}
777 777
778static void fsl_dma_device_terminate_all(struct dma_chan *dchan) 778static int fsl_dma_device_control(struct dma_chan *dchan,
779 enum dma_ctrl_cmd cmd, unsigned long arg)
779{ 780{
780 struct fsldma_chan *chan; 781 struct fsldma_chan *chan;
781 unsigned long flags; 782 unsigned long flags;
782 783
784 /* Only supports DMA_TERMINATE_ALL */
785 if (cmd != DMA_TERMINATE_ALL)
786 return -ENXIO;
787
783 if (!dchan) 788 if (!dchan)
784 return; 789 return -EINVAL;
785 790
786 chan = to_fsl_chan(dchan); 791 chan = to_fsl_chan(dchan);
787 792
@@ -795,6 +800,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
795 fsldma_free_desc_list(chan, &chan->ld_running); 800 fsldma_free_desc_list(chan, &chan->ld_running);
796 801
797 spin_unlock_irqrestore(&chan->desc_lock, flags); 802 spin_unlock_irqrestore(&chan->desc_lock, flags);
803
804 return 0;
798} 805}
799 806
800/** 807/**
@@ -965,13 +972,12 @@ static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
965} 972}
966 973
967/** 974/**
968 * fsl_dma_is_complete - Determine the DMA status 975 * fsl_tx_status - Determine the DMA status
969 * @chan : Freescale DMA channel 976 * @chan : Freescale DMA channel
970 */ 977 */
971static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, 978static enum dma_status fsl_tx_status(struct dma_chan *dchan,
972 dma_cookie_t cookie, 979 dma_cookie_t cookie,
973 dma_cookie_t *done, 980 struct dma_tx_state *txstate)
974 dma_cookie_t *used)
975{ 981{
976 struct fsldma_chan *chan = to_fsl_chan(dchan); 982 struct fsldma_chan *chan = to_fsl_chan(dchan);
977 dma_cookie_t last_used; 983 dma_cookie_t last_used;
@@ -982,11 +988,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
982 last_used = dchan->cookie; 988 last_used = dchan->cookie;
983 last_complete = chan->completed_cookie; 989 last_complete = chan->completed_cookie;
984 990
985 if (done) 991 dma_set_tx_state(txstate, last_complete, last_used, 0);
986 *done = last_complete;
987
988 if (used)
989 *used = last_used;
990 992
991 return dma_async_is_complete(cookie, last_complete, last_used); 993 return dma_async_is_complete(cookie, last_complete, last_used);
992} 994}
@@ -1313,7 +1315,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
1313 INIT_LIST_HEAD(&fdev->common.channels); 1315 INIT_LIST_HEAD(&fdev->common.channels);
1314 1316
1315 /* ioremap the registers for use */ 1317 /* ioremap the registers for use */
1316 fdev->regs = of_iomap(op->node, 0); 1318 fdev->regs = of_iomap(op->dev.of_node, 0);
1317 if (!fdev->regs) { 1319 if (!fdev->regs) {
1318 dev_err(&op->dev, "unable to ioremap registers\n"); 1320 dev_err(&op->dev, "unable to ioremap registers\n");
1319 err = -ENOMEM; 1321 err = -ENOMEM;
@@ -1321,7 +1323,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
1321 } 1323 }
1322 1324
1323 /* map the channel IRQ if it exists, but don't hookup the handler yet */ 1325 /* map the channel IRQ if it exists, but don't hookup the handler yet */
1324 fdev->irq = irq_of_parse_and_map(op->node, 0); 1326 fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1325 1327
1326 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 1328 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1327 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1329 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
@@ -1330,10 +1332,10 @@ static int __devinit fsldma_of_probe(struct of_device *op,
1330 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1332 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1331 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1333 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1332 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1334 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1333 fdev->common.device_is_tx_complete = fsl_dma_is_complete; 1335 fdev->common.device_tx_status = fsl_tx_status;
1334 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1336 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1335 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; 1337 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1336 fdev->common.device_terminate_all = fsl_dma_device_terminate_all; 1338 fdev->common.device_control = fsl_dma_device_control;
1337 fdev->common.dev = &op->dev; 1339 fdev->common.dev = &op->dev;
1338 1340
1339 dev_set_drvdata(&op->dev, fdev); 1341 dev_set_drvdata(&op->dev, fdev);
@@ -1343,7 +1345,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
1343 * of_platform_bus_remove(). Instead, we manually instantiate every DMA 1345 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1344 * channel object. 1346 * channel object.
1345 */ 1347 */
1346 for_each_child_of_node(op->node, child) { 1348 for_each_child_of_node(op->dev.of_node, child) {
1347 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { 1349 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1348 fsl_dma_chan_probe(fdev, child, 1350 fsl_dma_chan_probe(fdev, child,
1349 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, 1351 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
@@ -1409,10 +1411,13 @@ static const struct of_device_id fsldma_of_ids[] = {
1409}; 1411};
1410 1412
1411static struct of_platform_driver fsldma_of_driver = { 1413static struct of_platform_driver fsldma_of_driver = {
1412 .name = "fsl-elo-dma", 1414 .driver = {
1413 .match_table = fsldma_of_ids, 1415 .name = "fsl-elo-dma",
1414 .probe = fsldma_of_probe, 1416 .owner = THIS_MODULE,
1415 .remove = fsldma_of_remove, 1417 .of_match_table = fsldma_of_ids,
1418 },
1419 .probe = fsldma_of_probe,
1420 .remove = fsldma_of_remove,
1416}; 1421};
1417 1422
1418/*----------------------------------------------------------------------------*/ 1423/*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 3e5a8005c62b..c9213ead4a26 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -727,18 +727,18 @@ static void ioat1_timer_event(unsigned long data)
727} 727}
728 728
729enum dma_status 729enum dma_status
730ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, 730ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
731 dma_cookie_t *done, dma_cookie_t *used) 731 struct dma_tx_state *txstate)
732{ 732{
733 struct ioat_chan_common *chan = to_chan_common(c); 733 struct ioat_chan_common *chan = to_chan_common(c);
734 struct ioatdma_device *device = chan->device; 734 struct ioatdma_device *device = chan->device;
735 735
736 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) 736 if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
737 return DMA_SUCCESS; 737 return DMA_SUCCESS;
738 738
739 device->cleanup_fn((unsigned long) c); 739 device->cleanup_fn((unsigned long) c);
740 740
741 return ioat_is_complete(c, cookie, done, used); 741 return ioat_tx_status(c, cookie, txstate);
742} 742}
743 743
744static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) 744static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
@@ -858,7 +858,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device)
858 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 858 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
859 859
860 if (tmo == 0 || 860 if (tmo == 0 ||
861 dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) 861 dma->device_tx_status(dma_chan, cookie, NULL)
862 != DMA_SUCCESS) { 862 != DMA_SUCCESS) {
863 dev_err(dev, "Self-test copy timed out, disabling\n"); 863 dev_err(dev, "Self-test copy timed out, disabling\n");
864 err = -ENODEV; 864 err = -ENODEV;
@@ -1199,7 +1199,7 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
1199 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; 1199 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1200 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; 1200 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1201 dma->device_free_chan_resources = ioat1_dma_free_chan_resources; 1201 dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1202 dma->device_is_tx_complete = ioat_is_dma_complete; 1202 dma->device_tx_status = ioat_dma_tx_status;
1203 1203
1204 err = ioat_probe(device); 1204 err = ioat_probe(device);
1205 if (err) 1205 if (err)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 86b97ac8774e..6d3a73b57e54 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -96,6 +96,7 @@ struct ioat_chan_common {
96 #define IOAT_COMPLETION_ACK 1 96 #define IOAT_COMPLETION_ACK 1
97 #define IOAT_RESET_PENDING 2 97 #define IOAT_RESET_PENDING 2
98 #define IOAT_KOBJ_INIT_FAIL 3 98 #define IOAT_KOBJ_INIT_FAIL 3
99 #define IOAT_RESHAPE_PENDING 4
99 struct timer_list timer; 100 struct timer_list timer;
100 #define COMPLETION_TIMEOUT msecs_to_jiffies(100) 101 #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
101 #define IDLE_TIMEOUT msecs_to_jiffies(2000) 102 #define IDLE_TIMEOUT msecs_to_jiffies(2000)
@@ -142,15 +143,14 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
142} 143}
143 144
144/** 145/**
145 * ioat_is_complete - poll the status of an ioat transaction 146 * ioat_tx_status - poll the status of an ioat transaction
146 * @c: channel handle 147 * @c: channel handle
147 * @cookie: transaction identifier 148 * @cookie: transaction identifier
148 * @done: if set, updated with last completed transaction 149 * @txstate: if set, updated with the transaction state
149 * @used: if set, updated with last used transaction
150 */ 150 */
151static inline enum dma_status 151static inline enum dma_status
152ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, 152ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
153 dma_cookie_t *done, dma_cookie_t *used) 153 struct dma_tx_state *txstate)
154{ 154{
155 struct ioat_chan_common *chan = to_chan_common(c); 155 struct ioat_chan_common *chan = to_chan_common(c);
156 dma_cookie_t last_used; 156 dma_cookie_t last_used;
@@ -159,10 +159,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
159 last_used = c->cookie; 159 last_used = c->cookie;
160 last_complete = chan->completed_cookie; 160 last_complete = chan->completed_cookie;
161 161
162 if (done) 162 dma_set_tx_state(txstate, last_complete, last_used, 0);
163 *done = last_complete;
164 if (used)
165 *used = last_used;
166 163
167 return dma_async_is_complete(cookie, last_complete, last_used); 164 return dma_async_is_complete(cookie, last_complete, last_used);
168} 165}
@@ -338,8 +335,8 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
338unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); 335unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
339void ioat_init_channel(struct ioatdma_device *device, 336void ioat_init_channel(struct ioatdma_device *device,
340 struct ioat_chan_common *chan, int idx); 337 struct ioat_chan_common *chan, int idx);
341enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, 338enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
342 dma_cookie_t *done, dma_cookie_t *used); 339 struct dma_tx_state *txstate);
343void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, 340void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
344 size_t len, struct ioat_dma_descriptor *hw); 341 size_t len, struct ioat_dma_descriptor *hw);
345bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 342bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index b5ae56c211e6..3c8b32a83794 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -56,8 +56,6 @@ void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
56 56
57 ioat->dmacount += ioat2_ring_pending(ioat); 57 ioat->dmacount += ioat2_ring_pending(ioat);
58 ioat->issued = ioat->head; 58 ioat->issued = ioat->head;
59 /* make descriptor updates globally visible before notifying channel */
60 wmb();
61 writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 59 writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
62 dev_dbg(to_dev(chan), 60 dev_dbg(to_dev(chan),
63 "%s: head: %#x tail: %#x issued: %#x count: %#x\n", 61 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
@@ -69,9 +67,9 @@ void ioat2_issue_pending(struct dma_chan *c)
69 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 67 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
70 68
71 if (ioat2_ring_pending(ioat)) { 69 if (ioat2_ring_pending(ioat)) {
72 spin_lock_bh(&ioat->ring_lock); 70 spin_lock_bh(&ioat->prep_lock);
73 __ioat2_issue_pending(ioat); 71 __ioat2_issue_pending(ioat);
74 spin_unlock_bh(&ioat->ring_lock); 72 spin_unlock_bh(&ioat->prep_lock);
75 } 73 }
76} 74}
77 75
@@ -80,7 +78,7 @@ void ioat2_issue_pending(struct dma_chan *c)
80 * @ioat: ioat2+ channel 78 * @ioat: ioat2+ channel
81 * 79 *
82 * Check if the number of unsubmitted descriptors has exceeded the 80 * Check if the number of unsubmitted descriptors has exceeded the
83 * watermark. Called with ring_lock held 81 * watermark. Called with prep_lock held
84 */ 82 */
85static void ioat2_update_pending(struct ioat2_dma_chan *ioat) 83static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
86{ 84{
@@ -92,7 +90,6 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
92{ 90{
93 struct ioat_ring_ent *desc; 91 struct ioat_ring_ent *desc;
94 struct ioat_dma_descriptor *hw; 92 struct ioat_dma_descriptor *hw;
95 int idx;
96 93
97 if (ioat2_ring_space(ioat) < 1) { 94 if (ioat2_ring_space(ioat) < 1) {
98 dev_err(to_dev(&ioat->base), 95 dev_err(to_dev(&ioat->base),
@@ -102,8 +99,7 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
102 99
103 dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", 100 dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
104 __func__, ioat->head, ioat->tail, ioat->issued); 101 __func__, ioat->head, ioat->tail, ioat->issued);
105 idx = ioat2_desc_alloc(ioat, 1); 102 desc = ioat2_get_ring_ent(ioat, ioat->head);
106 desc = ioat2_get_ring_ent(ioat, idx);
107 103
108 hw = desc->hw; 104 hw = desc->hw;
109 hw->ctl = 0; 105 hw->ctl = 0;
@@ -117,14 +113,16 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
117 async_tx_ack(&desc->txd); 113 async_tx_ack(&desc->txd);
118 ioat2_set_chainaddr(ioat, desc->txd.phys); 114 ioat2_set_chainaddr(ioat, desc->txd.phys);
119 dump_desc_dbg(ioat, desc); 115 dump_desc_dbg(ioat, desc);
116 wmb();
117 ioat->head += 1;
120 __ioat2_issue_pending(ioat); 118 __ioat2_issue_pending(ioat);
121} 119}
122 120
123static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) 121static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
124{ 122{
125 spin_lock_bh(&ioat->ring_lock); 123 spin_lock_bh(&ioat->prep_lock);
126 __ioat2_start_null_desc(ioat); 124 __ioat2_start_null_desc(ioat);
127 spin_unlock_bh(&ioat->ring_lock); 125 spin_unlock_bh(&ioat->prep_lock);
128} 126}
129 127
130static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) 128static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
@@ -134,15 +132,16 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
134 struct ioat_ring_ent *desc; 132 struct ioat_ring_ent *desc;
135 bool seen_current = false; 133 bool seen_current = false;
136 u16 active; 134 u16 active;
137 int i; 135 int idx = ioat->tail, i;
138 136
139 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", 137 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
140 __func__, ioat->head, ioat->tail, ioat->issued); 138 __func__, ioat->head, ioat->tail, ioat->issued);
141 139
142 active = ioat2_ring_active(ioat); 140 active = ioat2_ring_active(ioat);
143 for (i = 0; i < active && !seen_current; i++) { 141 for (i = 0; i < active && !seen_current; i++) {
144 prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); 142 smp_read_barrier_depends();
145 desc = ioat2_get_ring_ent(ioat, ioat->tail + i); 143 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
144 desc = ioat2_get_ring_ent(ioat, idx + i);
146 tx = &desc->txd; 145 tx = &desc->txd;
147 dump_desc_dbg(ioat, desc); 146 dump_desc_dbg(ioat, desc);
148 if (tx->cookie) { 147 if (tx->cookie) {
@@ -158,11 +157,12 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
158 if (tx->phys == phys_complete) 157 if (tx->phys == phys_complete)
159 seen_current = true; 158 seen_current = true;
160 } 159 }
161 ioat->tail += i; 160 smp_mb(); /* finish all descriptor reads before incrementing tail */
161 ioat->tail = idx + i;
162 BUG_ON(active && !seen_current); /* no active descs have written a completion? */ 162 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
163 163
164 chan->last_completion = phys_complete; 164 chan->last_completion = phys_complete;
165 if (ioat->head == ioat->tail) { 165 if (active - i == 0) {
166 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", 166 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
167 __func__); 167 __func__);
168 clear_bit(IOAT_COMPLETION_PENDING, &chan->state); 168 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
@@ -179,24 +179,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
179 struct ioat_chan_common *chan = &ioat->base; 179 struct ioat_chan_common *chan = &ioat->base;
180 unsigned long phys_complete; 180 unsigned long phys_complete;
181 181
182 prefetch(chan->completion); 182 spin_lock_bh(&chan->cleanup_lock);
183 183 if (ioat_cleanup_preamble(chan, &phys_complete))
184 if (!spin_trylock_bh(&chan->cleanup_lock)) 184 __cleanup(ioat, phys_complete);
185 return;
186
187 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
188 spin_unlock_bh(&chan->cleanup_lock);
189 return;
190 }
191
192 if (!spin_trylock_bh(&ioat->ring_lock)) {
193 spin_unlock_bh(&chan->cleanup_lock);
194 return;
195 }
196
197 __cleanup(ioat, phys_complete);
198
199 spin_unlock_bh(&ioat->ring_lock);
200 spin_unlock_bh(&chan->cleanup_lock); 185 spin_unlock_bh(&chan->cleanup_lock);
201} 186}
202 187
@@ -287,12 +272,10 @@ void ioat2_timer_event(unsigned long data)
287 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 272 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
288 struct ioat_chan_common *chan = &ioat->base; 273 struct ioat_chan_common *chan = &ioat->base;
289 274
290 spin_lock_bh(&chan->cleanup_lock);
291 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 275 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
292 unsigned long phys_complete; 276 unsigned long phys_complete;
293 u64 status; 277 u64 status;
294 278
295 spin_lock_bh(&ioat->ring_lock);
296 status = ioat_chansts(chan); 279 status = ioat_chansts(chan);
297 280
298 /* when halted due to errors check for channel 281 /* when halted due to errors check for channel
@@ -311,26 +294,31 @@ void ioat2_timer_event(unsigned long data)
311 * acknowledged a pending completion once, then be more 294 * acknowledged a pending completion once, then be more
312 * forceful with a restart 295 * forceful with a restart
313 */ 296 */
314 if (ioat_cleanup_preamble(chan, &phys_complete)) 297 spin_lock_bh(&chan->cleanup_lock);
298 if (ioat_cleanup_preamble(chan, &phys_complete)) {
315 __cleanup(ioat, phys_complete); 299 __cleanup(ioat, phys_complete);
316 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) 300 } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
301 spin_lock_bh(&ioat->prep_lock);
317 ioat2_restart_channel(ioat); 302 ioat2_restart_channel(ioat);
318 else { 303 spin_unlock_bh(&ioat->prep_lock);
304 } else {
319 set_bit(IOAT_COMPLETION_ACK, &chan->state); 305 set_bit(IOAT_COMPLETION_ACK, &chan->state);
320 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 306 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
321 } 307 }
322 spin_unlock_bh(&ioat->ring_lock); 308 spin_unlock_bh(&chan->cleanup_lock);
323 } else { 309 } else {
324 u16 active; 310 u16 active;
325 311
326 /* if the ring is idle, empty, and oversized try to step 312 /* if the ring is idle, empty, and oversized try to step
327 * down the size 313 * down the size
328 */ 314 */
329 spin_lock_bh(&ioat->ring_lock); 315 spin_lock_bh(&chan->cleanup_lock);
316 spin_lock_bh(&ioat->prep_lock);
330 active = ioat2_ring_active(ioat); 317 active = ioat2_ring_active(ioat);
331 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) 318 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
332 reshape_ring(ioat, ioat->alloc_order-1); 319 reshape_ring(ioat, ioat->alloc_order-1);
333 spin_unlock_bh(&ioat->ring_lock); 320 spin_unlock_bh(&ioat->prep_lock);
321 spin_unlock_bh(&chan->cleanup_lock);
334 322
335 /* keep shrinking until we get back to our minimum 323 /* keep shrinking until we get back to our minimum
336 * default size 324 * default size
@@ -338,7 +326,6 @@ void ioat2_timer_event(unsigned long data)
338 if (ioat->alloc_order > ioat_get_alloc_order()) 326 if (ioat->alloc_order > ioat_get_alloc_order())
339 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 327 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
340 } 328 }
341 spin_unlock_bh(&chan->cleanup_lock);
342} 329}
343 330
344static int ioat2_reset_hw(struct ioat_chan_common *chan) 331static int ioat2_reset_hw(struct ioat_chan_common *chan)
@@ -392,7 +379,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
392 379
393 ioat_init_channel(device, &ioat->base, i); 380 ioat_init_channel(device, &ioat->base, i);
394 ioat->xfercap_log = xfercap_log; 381 ioat->xfercap_log = xfercap_log;
395 spin_lock_init(&ioat->ring_lock); 382 spin_lock_init(&ioat->prep_lock);
396 if (device->reset_hw(&ioat->base)) { 383 if (device->reset_hw(&ioat->base)) {
397 i = 0; 384 i = 0;
398 break; 385 break;
@@ -418,8 +405,17 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
418 405
419 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) 406 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
420 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 407 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
408
409 /* make descriptor updates visible before advancing ioat->head,
410 * this is purposefully not smp_wmb() since we are also
411 * publishing the descriptor updates to a dma device
412 */
413 wmb();
414
415 ioat->head += ioat->produce;
416
421 ioat2_update_pending(ioat); 417 ioat2_update_pending(ioat);
422 spin_unlock_bh(&ioat->ring_lock); 418 spin_unlock_bh(&ioat->prep_lock);
423 419
424 return cookie; 420 return cookie;
425} 421}
@@ -531,13 +527,15 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
531 if (!ring) 527 if (!ring)
532 return -ENOMEM; 528 return -ENOMEM;
533 529
534 spin_lock_bh(&ioat->ring_lock); 530 spin_lock_bh(&chan->cleanup_lock);
531 spin_lock_bh(&ioat->prep_lock);
535 ioat->ring = ring; 532 ioat->ring = ring;
536 ioat->head = 0; 533 ioat->head = 0;
537 ioat->issued = 0; 534 ioat->issued = 0;
538 ioat->tail = 0; 535 ioat->tail = 0;
539 ioat->alloc_order = order; 536 ioat->alloc_order = order;
540 spin_unlock_bh(&ioat->ring_lock); 537 spin_unlock_bh(&ioat->prep_lock);
538 spin_unlock_bh(&chan->cleanup_lock);
541 539
542 tasklet_enable(&chan->cleanup_task); 540 tasklet_enable(&chan->cleanup_task);
543 ioat2_start_null_desc(ioat); 541 ioat2_start_null_desc(ioat);
@@ -553,7 +551,7 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
553 */ 551 */
554 struct ioat_chan_common *chan = &ioat->base; 552 struct ioat_chan_common *chan = &ioat->base;
555 struct dma_chan *c = &chan->common; 553 struct dma_chan *c = &chan->common;
556 const u16 curr_size = ioat2_ring_mask(ioat) + 1; 554 const u16 curr_size = ioat2_ring_size(ioat);
557 const u16 active = ioat2_ring_active(ioat); 555 const u16 active = ioat2_ring_active(ioat);
558 const u16 new_size = 1 << order; 556 const u16 new_size = 1 << order;
559 struct ioat_ring_ent **ring; 557 struct ioat_ring_ent **ring;
@@ -653,54 +651,61 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
653} 651}
654 652
655/** 653/**
656 * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops 654 * ioat2_check_space_lock - verify space and grab ring producer lock
657 * @idx: gets starting descriptor index on successful allocation
658 * @ioat: ioat2,3 channel (ring) to operate on 655 * @ioat: ioat2,3 channel (ring) to operate on
659 * @num_descs: allocation length 656 * @num_descs: allocation length
660 */ 657 */
661int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) 658int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
662{ 659{
663 struct ioat_chan_common *chan = &ioat->base; 660 struct ioat_chan_common *chan = &ioat->base;
661 bool retry;
664 662
665 spin_lock_bh(&ioat->ring_lock); 663 retry:
664 spin_lock_bh(&ioat->prep_lock);
666 /* never allow the last descriptor to be consumed, we need at 665 /* never allow the last descriptor to be consumed, we need at
667 * least one free at all times to allow for on-the-fly ring 666 * least one free at all times to allow for on-the-fly ring
668 * resizing. 667 * resizing.
669 */ 668 */
670 while (unlikely(ioat2_ring_space(ioat) <= num_descs)) { 669 if (likely(ioat2_ring_space(ioat) > num_descs)) {
671 if (reshape_ring(ioat, ioat->alloc_order + 1) && 670 dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
672 ioat2_ring_space(ioat) > num_descs) 671 __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
673 break; 672 ioat->produce = num_descs;
674 673 return 0; /* with ioat->prep_lock held */
675 if (printk_ratelimit())
676 dev_dbg(to_dev(chan),
677 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
678 __func__, num_descs, ioat->head, ioat->tail,
679 ioat->issued);
680 spin_unlock_bh(&ioat->ring_lock);
681
682 /* progress reclaim in the allocation failure case we
683 * may be called under bh_disabled so we need to trigger
684 * the timer event directly
685 */
686 spin_lock_bh(&chan->cleanup_lock);
687 if (jiffies > chan->timer.expires &&
688 timer_pending(&chan->timer)) {
689 struct ioatdma_device *device = chan->device;
690
691 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
692 spin_unlock_bh(&chan->cleanup_lock);
693 device->timer_fn((unsigned long) &chan->common);
694 } else
695 spin_unlock_bh(&chan->cleanup_lock);
696 return -ENOMEM;
697 } 674 }
675 retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
676 spin_unlock_bh(&ioat->prep_lock);
698 677
699 dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", 678 /* is another cpu already trying to expand the ring? */
700 __func__, num_descs, ioat->head, ioat->tail, ioat->issued); 679 if (retry)
680 goto retry;
701 681
702 *idx = ioat2_desc_alloc(ioat, num_descs); 682 spin_lock_bh(&chan->cleanup_lock);
703 return 0; /* with ioat->ring_lock held */ 683 spin_lock_bh(&ioat->prep_lock);
684 retry = reshape_ring(ioat, ioat->alloc_order + 1);
685 clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
686 spin_unlock_bh(&ioat->prep_lock);
687 spin_unlock_bh(&chan->cleanup_lock);
688
689 /* if we were able to expand the ring retry the allocation */
690 if (retry)
691 goto retry;
692
693 if (printk_ratelimit())
694 dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
695 __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
696
697 /* progress reclaim in the allocation failure case we may be
698 * called under bh_disabled so we need to trigger the timer
699 * event directly
700 */
701 if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) {
702 struct ioatdma_device *device = chan->device;
703
704 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
705 device->timer_fn((unsigned long) &chan->common);
706 }
707
708 return -ENOMEM;
704} 709}
705 710
706struct dma_async_tx_descriptor * 711struct dma_async_tx_descriptor *
@@ -713,14 +718,11 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
713 dma_addr_t dst = dma_dest; 718 dma_addr_t dst = dma_dest;
714 dma_addr_t src = dma_src; 719 dma_addr_t src = dma_src;
715 size_t total_len = len; 720 size_t total_len = len;
716 int num_descs; 721 int num_descs, idx, i;
717 u16 idx;
718 int i;
719 722
720 num_descs = ioat2_xferlen_to_descs(ioat, len); 723 num_descs = ioat2_xferlen_to_descs(ioat, len);
721 if (likely(num_descs) && 724 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
722 ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) 725 idx = ioat->head;
723 /* pass */;
724 else 726 else
725 return NULL; 727 return NULL;
726 i = 0; 728 i = 0;
@@ -777,7 +779,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
777 device->cleanup_fn((unsigned long) c); 779 device->cleanup_fn((unsigned long) c);
778 device->reset_hw(chan); 780 device->reset_hw(chan);
779 781
780 spin_lock_bh(&ioat->ring_lock); 782 spin_lock_bh(&chan->cleanup_lock);
783 spin_lock_bh(&ioat->prep_lock);
781 descs = ioat2_ring_space(ioat); 784 descs = ioat2_ring_space(ioat);
782 dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); 785 dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
783 for (i = 0; i < descs; i++) { 786 for (i = 0; i < descs; i++) {
@@ -800,7 +803,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
800 ioat->alloc_order = 0; 803 ioat->alloc_order = 0;
801 pci_pool_free(device->completion_pool, chan->completion, 804 pci_pool_free(device->completion_pool, chan->completion,
802 chan->completion_dma); 805 chan->completion_dma);
803 spin_unlock_bh(&ioat->ring_lock); 806 spin_unlock_bh(&ioat->prep_lock);
807 spin_unlock_bh(&chan->cleanup_lock);
804 808
805 chan->last_completion = 0; 809 chan->last_completion = 0;
806 chan->completion_dma = 0; 810 chan->completion_dma = 0;
@@ -855,7 +859,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
855 dma->device_issue_pending = ioat2_issue_pending; 859 dma->device_issue_pending = ioat2_issue_pending;
856 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 860 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
857 dma->device_free_chan_resources = ioat2_free_chan_resources; 861 dma->device_free_chan_resources = ioat2_free_chan_resources;
858 dma->device_is_tx_complete = ioat_is_dma_complete; 862 dma->device_tx_status = ioat_tx_status;
859 863
860 err = ioat_probe(device); 864 err = ioat_probe(device);
861 if (err) 865 if (err)
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index ef2871fd7868..a2c413b2b8d8 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -22,6 +22,7 @@
22#define IOATDMA_V2_H 22#define IOATDMA_V2_H
23 23
24#include <linux/dmaengine.h> 24#include <linux/dmaengine.h>
25#include <linux/circ_buf.h>
25#include "dma.h" 26#include "dma.h"
26#include "hw.h" 27#include "hw.h"
27 28
@@ -49,8 +50,9 @@ extern int ioat_ring_alloc_order;
49 * @tail: cleanup index 50 * @tail: cleanup index
50 * @dmacount: identical to 'head' except for occasionally resetting to zero 51 * @dmacount: identical to 'head' except for occasionally resetting to zero
51 * @alloc_order: log2 of the number of allocated descriptors 52 * @alloc_order: log2 of the number of allocated descriptors
53 * @produce: number of descriptors to produce at submit time
52 * @ring: software ring buffer implementation of hardware ring 54 * @ring: software ring buffer implementation of hardware ring
53 * @ring_lock: protects ring attributes 55 * @prep_lock: serializes descriptor preparation (producers)
54 */ 56 */
55struct ioat2_dma_chan { 57struct ioat2_dma_chan {
56 struct ioat_chan_common base; 58 struct ioat_chan_common base;
@@ -60,8 +62,9 @@ struct ioat2_dma_chan {
60 u16 tail; 62 u16 tail;
61 u16 dmacount; 63 u16 dmacount;
62 u16 alloc_order; 64 u16 alloc_order;
65 u16 produce;
63 struct ioat_ring_ent **ring; 66 struct ioat_ring_ent **ring;
64 spinlock_t ring_lock; 67 spinlock_t prep_lock;
65}; 68};
66 69
67static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) 70static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
@@ -71,38 +74,26 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
71 return container_of(chan, struct ioat2_dma_chan, base); 74 return container_of(chan, struct ioat2_dma_chan, base);
72} 75}
73 76
74static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat) 77static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat)
75{ 78{
76 return (1 << ioat->alloc_order) - 1; 79 return 1 << ioat->alloc_order;
77} 80}
78 81
79/* count of descriptors in flight with the engine */ 82/* count of descriptors in flight with the engine */
80static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) 83static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
81{ 84{
82 return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat); 85 return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat));
83} 86}
84 87
85/* count of descriptors pending submission to hardware */ 88/* count of descriptors pending submission to hardware */
86static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) 89static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
87{ 90{
88 return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat); 91 return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
89} 92}
90 93
91static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) 94static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
92{ 95{
93 u16 num_descs = ioat2_ring_mask(ioat) + 1; 96 return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
94 u16 active = ioat2_ring_active(ioat);
95
96 BUG_ON(active > num_descs);
97
98 return num_descs - active;
99}
100
101/* assumes caller already checked space */
102static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len)
103{
104 ioat->head += len;
105 return ioat->head - len;
106} 97}
107 98
108static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) 99static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
@@ -151,7 +142,7 @@ struct ioat_ring_ent {
151static inline struct ioat_ring_ent * 142static inline struct ioat_ring_ent *
152ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) 143ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
153{ 144{
154 return ioat->ring[idx & ioat2_ring_mask(ioat)]; 145 return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)];
155} 146}
156 147
157static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) 148static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
@@ -168,7 +159,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
168int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); 159int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
169struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); 160struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
170struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); 161struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
171int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs); 162int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
172int ioat2_enumerate_channels(struct ioatdma_device *device); 163int ioat2_enumerate_channels(struct ioatdma_device *device);
173struct dma_async_tx_descriptor * 164struct dma_async_tx_descriptor *
174ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, 165ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 6740e319c9cf..1cdd22e1051b 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -260,8 +260,8 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
260 struct ioat_chan_common *chan = &ioat->base; 260 struct ioat_chan_common *chan = &ioat->base;
261 struct ioat_ring_ent *desc; 261 struct ioat_ring_ent *desc;
262 bool seen_current = false; 262 bool seen_current = false;
263 int idx = ioat->tail, i;
263 u16 active; 264 u16 active;
264 int i;
265 265
266 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", 266 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
267 __func__, ioat->head, ioat->tail, ioat->issued); 267 __func__, ioat->head, ioat->tail, ioat->issued);
@@ -270,13 +270,14 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
270 for (i = 0; i < active && !seen_current; i++) { 270 for (i = 0; i < active && !seen_current; i++) {
271 struct dma_async_tx_descriptor *tx; 271 struct dma_async_tx_descriptor *tx;
272 272
273 prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); 273 smp_read_barrier_depends();
274 desc = ioat2_get_ring_ent(ioat, ioat->tail + i); 274 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
275 desc = ioat2_get_ring_ent(ioat, idx + i);
275 dump_desc_dbg(ioat, desc); 276 dump_desc_dbg(ioat, desc);
276 tx = &desc->txd; 277 tx = &desc->txd;
277 if (tx->cookie) { 278 if (tx->cookie) {
278 chan->completed_cookie = tx->cookie; 279 chan->completed_cookie = tx->cookie;
279 ioat3_dma_unmap(ioat, desc, ioat->tail + i); 280 ioat3_dma_unmap(ioat, desc, idx + i);
280 tx->cookie = 0; 281 tx->cookie = 0;
281 if (tx->callback) { 282 if (tx->callback) {
282 tx->callback(tx->callback_param); 283 tx->callback(tx->callback_param);
@@ -293,69 +294,30 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
293 i++; 294 i++;
294 } 295 }
295 } 296 }
296 ioat->tail += i; 297 smp_mb(); /* finish all descriptor reads before incrementing tail */
298 ioat->tail = idx + i;
297 BUG_ON(active && !seen_current); /* no active descs have written a completion? */ 299 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
298 chan->last_completion = phys_complete; 300 chan->last_completion = phys_complete;
299 301
300 active = ioat2_ring_active(ioat); 302 if (active - i == 0) {
301 if (active == 0) {
302 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", 303 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
303 __func__); 304 __func__);
304 clear_bit(IOAT_COMPLETION_PENDING, &chan->state); 305 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
305 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 306 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
306 } 307 }
307 /* 5 microsecond delay per pending descriptor */ 308 /* 5 microsecond delay per pending descriptor */
308 writew(min((5 * active), IOAT_INTRDELAY_MASK), 309 writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
309 chan->device->reg_base + IOAT_INTRDELAY_OFFSET); 310 chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
310} 311}
311 312
312/* try to cleanup, but yield (via spin_trylock) to incoming submissions 313static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
313 * with the expectation that we will immediately poll again shortly
314 */
315static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat)
316{ 314{
317 struct ioat_chan_common *chan = &ioat->base; 315 struct ioat_chan_common *chan = &ioat->base;
318 unsigned long phys_complete; 316 unsigned long phys_complete;
319 317
320 prefetch(chan->completion);
321
322 if (!spin_trylock_bh(&chan->cleanup_lock))
323 return;
324
325 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
326 spin_unlock_bh(&chan->cleanup_lock);
327 return;
328 }
329
330 if (!spin_trylock_bh(&ioat->ring_lock)) {
331 spin_unlock_bh(&chan->cleanup_lock);
332 return;
333 }
334
335 __cleanup(ioat, phys_complete);
336
337 spin_unlock_bh(&ioat->ring_lock);
338 spin_unlock_bh(&chan->cleanup_lock);
339}
340
341/* run cleanup now because we already delayed the interrupt via INTRDELAY */
342static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat)
343{
344 struct ioat_chan_common *chan = &ioat->base;
345 unsigned long phys_complete;
346
347 prefetch(chan->completion);
348
349 spin_lock_bh(&chan->cleanup_lock); 318 spin_lock_bh(&chan->cleanup_lock);
350 if (!ioat_cleanup_preamble(chan, &phys_complete)) { 319 if (ioat_cleanup_preamble(chan, &phys_complete))
351 spin_unlock_bh(&chan->cleanup_lock); 320 __cleanup(ioat, phys_complete);
352 return;
353 }
354 spin_lock_bh(&ioat->ring_lock);
355
356 __cleanup(ioat, phys_complete);
357
358 spin_unlock_bh(&ioat->ring_lock);
359 spin_unlock_bh(&chan->cleanup_lock); 321 spin_unlock_bh(&chan->cleanup_lock);
360} 322}
361 323
@@ -363,7 +325,7 @@ static void ioat3_cleanup_event(unsigned long data)
363{ 325{
364 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 326 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
365 327
366 ioat3_cleanup_sync(ioat); 328 ioat3_cleanup(ioat);
367 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 329 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
368} 330}
369 331
@@ -384,12 +346,10 @@ static void ioat3_timer_event(unsigned long data)
384 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 346 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
385 struct ioat_chan_common *chan = &ioat->base; 347 struct ioat_chan_common *chan = &ioat->base;
386 348
387 spin_lock_bh(&chan->cleanup_lock);
388 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 349 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
389 unsigned long phys_complete; 350 unsigned long phys_complete;
390 u64 status; 351 u64 status;
391 352
392 spin_lock_bh(&ioat->ring_lock);
393 status = ioat_chansts(chan); 353 status = ioat_chansts(chan);
394 354
395 /* when halted due to errors check for channel 355 /* when halted due to errors check for channel
@@ -408,26 +368,31 @@ static void ioat3_timer_event(unsigned long data)
408 * acknowledged a pending completion once, then be more 368 * acknowledged a pending completion once, then be more
409 * forceful with a restart 369 * forceful with a restart
410 */ 370 */
371 spin_lock_bh(&chan->cleanup_lock);
411 if (ioat_cleanup_preamble(chan, &phys_complete)) 372 if (ioat_cleanup_preamble(chan, &phys_complete))
412 __cleanup(ioat, phys_complete); 373 __cleanup(ioat, phys_complete);
413 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) 374 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
375 spin_lock_bh(&ioat->prep_lock);
414 ioat3_restart_channel(ioat); 376 ioat3_restart_channel(ioat);
415 else { 377 spin_unlock_bh(&ioat->prep_lock);
378 } else {
416 set_bit(IOAT_COMPLETION_ACK, &chan->state); 379 set_bit(IOAT_COMPLETION_ACK, &chan->state);
417 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 380 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
418 } 381 }
419 spin_unlock_bh(&ioat->ring_lock); 382 spin_unlock_bh(&chan->cleanup_lock);
420 } else { 383 } else {
421 u16 active; 384 u16 active;
422 385
423 /* if the ring is idle, empty, and oversized try to step 386 /* if the ring is idle, empty, and oversized try to step
424 * down the size 387 * down the size
425 */ 388 */
426 spin_lock_bh(&ioat->ring_lock); 389 spin_lock_bh(&chan->cleanup_lock);
390 spin_lock_bh(&ioat->prep_lock);
427 active = ioat2_ring_active(ioat); 391 active = ioat2_ring_active(ioat);
428 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) 392 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
429 reshape_ring(ioat, ioat->alloc_order-1); 393 reshape_ring(ioat, ioat->alloc_order-1);
430 spin_unlock_bh(&ioat->ring_lock); 394 spin_unlock_bh(&ioat->prep_lock);
395 spin_unlock_bh(&chan->cleanup_lock);
431 396
432 /* keep shrinking until we get back to our minimum 397 /* keep shrinking until we get back to our minimum
433 * default size 398 * default size
@@ -435,21 +400,20 @@ static void ioat3_timer_event(unsigned long data)
435 if (ioat->alloc_order > ioat_get_alloc_order()) 400 if (ioat->alloc_order > ioat_get_alloc_order())
436 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 401 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
437 } 402 }
438 spin_unlock_bh(&chan->cleanup_lock);
439} 403}
440 404
441static enum dma_status 405static enum dma_status
442ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie, 406ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
443 dma_cookie_t *done, dma_cookie_t *used) 407 struct dma_tx_state *txstate)
444{ 408{
445 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 409 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
446 410
447 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) 411 if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
448 return DMA_SUCCESS; 412 return DMA_SUCCESS;
449 413
450 ioat3_cleanup_poll(ioat); 414 ioat3_cleanup(ioat);
451 415
452 return ioat_is_complete(c, cookie, done, used); 416 return ioat_tx_status(c, cookie, txstate);
453} 417}
454 418
455static struct dma_async_tx_descriptor * 419static struct dma_async_tx_descriptor *
@@ -460,15 +424,12 @@ ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value,
460 struct ioat_ring_ent *desc; 424 struct ioat_ring_ent *desc;
461 size_t total_len = len; 425 size_t total_len = len;
462 struct ioat_fill_descriptor *fill; 426 struct ioat_fill_descriptor *fill;
463 int num_descs;
464 u64 src_data = (0x0101010101010101ULL) * (value & 0xff); 427 u64 src_data = (0x0101010101010101ULL) * (value & 0xff);
465 u16 idx; 428 int num_descs, idx, i;
466 int i;
467 429
468 num_descs = ioat2_xferlen_to_descs(ioat, len); 430 num_descs = ioat2_xferlen_to_descs(ioat, len);
469 if (likely(num_descs) && 431 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
470 ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) 432 idx = ioat->head;
471 /* pass */;
472 else 433 else
473 return NULL; 434 return NULL;
474 i = 0; 435 i = 0;
@@ -513,11 +474,8 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
513 struct ioat_xor_descriptor *xor; 474 struct ioat_xor_descriptor *xor;
514 struct ioat_xor_ext_descriptor *xor_ex = NULL; 475 struct ioat_xor_ext_descriptor *xor_ex = NULL;
515 struct ioat_dma_descriptor *hw; 476 struct ioat_dma_descriptor *hw;
477 int num_descs, with_ext, idx, i;
516 u32 offset = 0; 478 u32 offset = 0;
517 int num_descs;
518 int with_ext;
519 int i;
520 u16 idx;
521 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; 479 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
522 480
523 BUG_ON(src_cnt < 2); 481 BUG_ON(src_cnt < 2);
@@ -537,9 +495,8 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
537 * (legacy) descriptor to ensure all completion writes arrive in 495 * (legacy) descriptor to ensure all completion writes arrive in
538 * order. 496 * order.
539 */ 497 */
540 if (likely(num_descs) && 498 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
541 ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) 499 idx = ioat->head;
542 /* pass */;
543 else 500 else
544 return NULL; 501 return NULL;
545 i = 0; 502 i = 0;
@@ -657,11 +614,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
657 struct ioat_pq_ext_descriptor *pq_ex = NULL; 614 struct ioat_pq_ext_descriptor *pq_ex = NULL;
658 struct ioat_dma_descriptor *hw; 615 struct ioat_dma_descriptor *hw;
659 u32 offset = 0; 616 u32 offset = 0;
660 int num_descs;
661 int with_ext;
662 int i, s;
663 u16 idx;
664 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; 617 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
618 int i, s, idx, with_ext, num_descs;
665 619
666 dev_dbg(to_dev(chan), "%s\n", __func__); 620 dev_dbg(to_dev(chan), "%s\n", __func__);
667 /* the engine requires at least two sources (we provide 621 /* the engine requires at least two sources (we provide
@@ -687,8 +641,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
687 * order. 641 * order.
688 */ 642 */
689 if (likely(num_descs) && 643 if (likely(num_descs) &&
690 ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) 644 ioat2_check_space_lock(ioat, num_descs+1) == 0)
691 /* pass */; 645 idx = ioat->head;
692 else 646 else
693 return NULL; 647 return NULL;
694 i = 0; 648 i = 0;
@@ -851,10 +805,9 @@ ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
851 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 805 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
852 struct ioat_ring_ent *desc; 806 struct ioat_ring_ent *desc;
853 struct ioat_dma_descriptor *hw; 807 struct ioat_dma_descriptor *hw;
854 u16 idx;
855 808
856 if (ioat2_alloc_and_lock(&idx, ioat, 1) == 0) 809 if (ioat2_check_space_lock(ioat, 1) == 0)
857 desc = ioat2_get_ring_ent(ioat, idx); 810 desc = ioat2_get_ring_ent(ioat, ioat->head);
858 else 811 else
859 return NULL; 812 return NULL;
860 813
@@ -977,7 +930,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
977 930
978 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 931 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
979 932
980 if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 933 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
981 dev_err(dev, "Self-test xor timed out\n"); 934 dev_err(dev, "Self-test xor timed out\n");
982 err = -ENODEV; 935 err = -ENODEV;
983 goto free_resources; 936 goto free_resources;
@@ -1031,7 +984,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
1031 984
1032 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 985 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1033 986
1034 if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 987 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1035 dev_err(dev, "Self-test validate timed out\n"); 988 dev_err(dev, "Self-test validate timed out\n");
1036 err = -ENODEV; 989 err = -ENODEV;
1037 goto free_resources; 990 goto free_resources;
@@ -1072,7 +1025,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
1072 1025
1073 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1026 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1074 1027
1075 if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1028 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1076 dev_err(dev, "Self-test memset timed out\n"); 1029 dev_err(dev, "Self-test memset timed out\n");
1077 err = -ENODEV; 1030 err = -ENODEV;
1078 goto free_resources; 1031 goto free_resources;
@@ -1115,7 +1068,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
1115 1068
1116 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1069 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1117 1070
1118 if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1071 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1119 dev_err(dev, "Self-test 2nd validate timed out\n"); 1072 dev_err(dev, "Self-test 2nd validate timed out\n");
1120 err = -ENODEV; 1073 err = -ENODEV;
1121 goto free_resources; 1074 goto free_resources;
@@ -1222,7 +1175,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1222 if (cap & IOAT_CAP_XOR) { 1175 if (cap & IOAT_CAP_XOR) {
1223 is_raid_device = true; 1176 is_raid_device = true;
1224 dma->max_xor = 8; 1177 dma->max_xor = 8;
1225 dma->xor_align = 2; 1178 dma->xor_align = 6;
1226 1179
1227 dma_cap_set(DMA_XOR, dma->cap_mask); 1180 dma_cap_set(DMA_XOR, dma->cap_mask);
1228 dma->device_prep_dma_xor = ioat3_prep_xor; 1181 dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1233,7 +1186,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1233 if (cap & IOAT_CAP_PQ) { 1186 if (cap & IOAT_CAP_PQ) {
1234 is_raid_device = true; 1187 is_raid_device = true;
1235 dma_set_maxpq(dma, 8, 0); 1188 dma_set_maxpq(dma, 8, 0);
1236 dma->pq_align = 2; 1189 dma->pq_align = 6;
1237 1190
1238 dma_cap_set(DMA_PQ, dma->cap_mask); 1191 dma_cap_set(DMA_PQ, dma->cap_mask);
1239 dma->device_prep_dma_pq = ioat3_prep_pq; 1192 dma->device_prep_dma_pq = ioat3_prep_pq;
@@ -1243,7 +1196,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1243 1196
1244 if (!(cap & IOAT_CAP_XOR)) { 1197 if (!(cap & IOAT_CAP_XOR)) {
1245 dma->max_xor = 8; 1198 dma->max_xor = 8;
1246 dma->xor_align = 2; 1199 dma->xor_align = 6;
1247 1200
1248 dma_cap_set(DMA_XOR, dma->cap_mask); 1201 dma_cap_set(DMA_XOR, dma->cap_mask);
1249 dma->device_prep_dma_xor = ioat3_prep_pqxor; 1202 dma->device_prep_dma_xor = ioat3_prep_pqxor;
@@ -1259,11 +1212,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1259 1212
1260 1213
1261 if (is_raid_device) { 1214 if (is_raid_device) {
1262 dma->device_is_tx_complete = ioat3_is_complete; 1215 dma->device_tx_status = ioat3_tx_status;
1263 device->cleanup_fn = ioat3_cleanup_event; 1216 device->cleanup_fn = ioat3_cleanup_event;
1264 device->timer_fn = ioat3_timer_event; 1217 device->timer_fn = ioat3_timer_event;
1265 } else { 1218 } else {
1266 dma->device_is_tx_complete = ioat_is_dma_complete; 1219 dma->device_tx_status = ioat_dma_tx_status;
1267 device->cleanup_fn = ioat2_cleanup_event; 1220 device->cleanup_fn = ioat2_cleanup_event;
1268 device->timer_fn = ioat2_timer_event; 1221 device->timer_fn = ioat2_timer_event;
1269 } 1222 }
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 99ec26725bae..fab37d1cf48d 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -138,15 +138,10 @@ static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_devic
138 if (err) 138 if (err)
139 return err; 139 return err;
140 140
141 device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
142 if (!device)
143 return -ENOMEM;
144
145 pci_set_master(pdev);
146
147 device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); 141 device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
148 if (!device) 142 if (!device)
149 return -ENOMEM; 143 return -ENOMEM;
144 pci_set_master(pdev);
150 pci_set_drvdata(pdev, device); 145 pci_set_drvdata(pdev, device);
151 146
152 device->version = readb(device->reg_base + IOAT_VER_OFFSET); 147 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 1ebc801678b0..161c452923b8 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -894,14 +894,14 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan)
894} 894}
895 895
896/** 896/**
897 * iop_adma_is_complete - poll the status of an ADMA transaction 897 * iop_adma_status - poll the status of an ADMA transaction
898 * @chan: ADMA channel handle 898 * @chan: ADMA channel handle
899 * @cookie: ADMA transaction identifier 899 * @cookie: ADMA transaction identifier
900 * @txstate: a holder for the current state of the channel or NULL
900 */ 901 */
901static enum dma_status iop_adma_is_complete(struct dma_chan *chan, 902static enum dma_status iop_adma_status(struct dma_chan *chan,
902 dma_cookie_t cookie, 903 dma_cookie_t cookie,
903 dma_cookie_t *done, 904 struct dma_tx_state *txstate)
904 dma_cookie_t *used)
905{ 905{
906 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 906 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
907 dma_cookie_t last_used; 907 dma_cookie_t last_used;
@@ -910,12 +910,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
910 910
911 last_used = chan->cookie; 911 last_used = chan->cookie;
912 last_complete = iop_chan->completed_cookie; 912 last_complete = iop_chan->completed_cookie;
913 913 dma_set_tx_state(txstate, last_complete, last_used, 0);
914 if (done)
915 *done = last_complete;
916 if (used)
917 *used = last_used;
918
919 ret = dma_async_is_complete(cookie, last_complete, last_used); 914 ret = dma_async_is_complete(cookie, last_complete, last_used);
920 if (ret == DMA_SUCCESS) 915 if (ret == DMA_SUCCESS)
921 return ret; 916 return ret;
@@ -924,11 +919,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
924 919
925 last_used = chan->cookie; 920 last_used = chan->cookie;
926 last_complete = iop_chan->completed_cookie; 921 last_complete = iop_chan->completed_cookie;
927 922 dma_set_tx_state(txstate, last_complete, last_used, 0);
928 if (done)
929 *done = last_complete;
930 if (used)
931 *used = last_used;
932 923
933 return dma_async_is_complete(cookie, last_complete, last_used); 924 return dma_async_is_complete(cookie, last_complete, last_used);
934} 925}
@@ -1043,7 +1034,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
1043 iop_adma_issue_pending(dma_chan); 1034 iop_adma_issue_pending(dma_chan);
1044 msleep(1); 1035 msleep(1);
1045 1036
1046 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 1037 if (iop_adma_status(dma_chan, cookie, NULL) !=
1047 DMA_SUCCESS) { 1038 DMA_SUCCESS) {
1048 dev_printk(KERN_ERR, dma_chan->device->dev, 1039 dev_printk(KERN_ERR, dma_chan->device->dev,
1049 "Self-test copy timed out, disabling\n"); 1040 "Self-test copy timed out, disabling\n");
@@ -1143,7 +1134,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
1143 iop_adma_issue_pending(dma_chan); 1134 iop_adma_issue_pending(dma_chan);
1144 msleep(8); 1135 msleep(8);
1145 1136
1146 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 1137 if (iop_adma_status(dma_chan, cookie, NULL) !=
1147 DMA_SUCCESS) { 1138 DMA_SUCCESS) {
1148 dev_printk(KERN_ERR, dma_chan->device->dev, 1139 dev_printk(KERN_ERR, dma_chan->device->dev,
1149 "Self-test xor timed out, disabling\n"); 1140 "Self-test xor timed out, disabling\n");
@@ -1190,7 +1181,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
1190 iop_adma_issue_pending(dma_chan); 1181 iop_adma_issue_pending(dma_chan);
1191 msleep(8); 1182 msleep(8);
1192 1183
1193 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1184 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1194 dev_printk(KERN_ERR, dma_chan->device->dev, 1185 dev_printk(KERN_ERR, dma_chan->device->dev,
1195 "Self-test zero sum timed out, disabling\n"); 1186 "Self-test zero sum timed out, disabling\n");
1196 err = -ENODEV; 1187 err = -ENODEV;
@@ -1214,7 +1205,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
1214 iop_adma_issue_pending(dma_chan); 1205 iop_adma_issue_pending(dma_chan);
1215 msleep(8); 1206 msleep(8);
1216 1207
1217 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1208 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1218 dev_printk(KERN_ERR, dma_chan->device->dev, 1209 dev_printk(KERN_ERR, dma_chan->device->dev,
1219 "Self-test memset timed out, disabling\n"); 1210 "Self-test memset timed out, disabling\n");
1220 err = -ENODEV; 1211 err = -ENODEV;
@@ -1246,7 +1237,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
1246 iop_adma_issue_pending(dma_chan); 1237 iop_adma_issue_pending(dma_chan);
1247 msleep(8); 1238 msleep(8);
1248 1239
1249 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1240 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1250 dev_printk(KERN_ERR, dma_chan->device->dev, 1241 dev_printk(KERN_ERR, dma_chan->device->dev,
1251 "Self-test non-zero sum timed out, disabling\n"); 1242 "Self-test non-zero sum timed out, disabling\n");
1252 err = -ENODEV; 1243 err = -ENODEV;
@@ -1341,7 +1332,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1341 iop_adma_issue_pending(dma_chan); 1332 iop_adma_issue_pending(dma_chan);
1342 msleep(8); 1333 msleep(8);
1343 1334
1344 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 1335 if (iop_adma_status(dma_chan, cookie, NULL) !=
1345 DMA_SUCCESS) { 1336 DMA_SUCCESS) {
1346 dev_err(dev, "Self-test pq timed out, disabling\n"); 1337 dev_err(dev, "Self-test pq timed out, disabling\n");
1347 err = -ENODEV; 1338 err = -ENODEV;
@@ -1378,7 +1369,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1378 iop_adma_issue_pending(dma_chan); 1369 iop_adma_issue_pending(dma_chan);
1379 msleep(8); 1370 msleep(8);
1380 1371
1381 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 1372 if (iop_adma_status(dma_chan, cookie, NULL) !=
1382 DMA_SUCCESS) { 1373 DMA_SUCCESS) {
1383 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); 1374 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
1384 err = -ENODEV; 1375 err = -ENODEV;
@@ -1410,7 +1401,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1410 iop_adma_issue_pending(dma_chan); 1401 iop_adma_issue_pending(dma_chan);
1411 msleep(8); 1402 msleep(8);
1412 1403
1413 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 1404 if (iop_adma_status(dma_chan, cookie, NULL) !=
1414 DMA_SUCCESS) { 1405 DMA_SUCCESS) {
1415 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); 1406 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
1416 err = -ENODEV; 1407 err = -ENODEV;
@@ -1508,7 +1499,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
1508 /* set base routines */ 1499 /* set base routines */
1509 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources; 1500 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1510 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; 1501 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1511 dma_dev->device_is_tx_complete = iop_adma_is_complete; 1502 dma_dev->device_tx_status = iop_adma_status;
1512 dma_dev->device_issue_pending = iop_adma_issue_pending; 1503 dma_dev->device_issue_pending = iop_adma_issue_pending;
1513 dma_dev->dev = &pdev->dev; 1504 dma_dev->dev = &pdev->dev;
1514 1505
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 2a446397c884..cb26ee9773d6 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1472,13 +1472,18 @@ static void idmac_issue_pending(struct dma_chan *chan)
1472 */ 1472 */
1473} 1473}
1474 1474
1475static void __idmac_terminate_all(struct dma_chan *chan) 1475static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1476 unsigned long arg)
1476{ 1477{
1477 struct idmac_channel *ichan = to_idmac_chan(chan); 1478 struct idmac_channel *ichan = to_idmac_chan(chan);
1478 struct idmac *idmac = to_idmac(chan->device); 1479 struct idmac *idmac = to_idmac(chan->device);
1479 unsigned long flags; 1480 unsigned long flags;
1480 int i; 1481 int i;
1481 1482
1483 /* Only supports DMA_TERMINATE_ALL */
1484 if (cmd != DMA_TERMINATE_ALL)
1485 return -ENXIO;
1486
1482 ipu_disable_channel(idmac, ichan, 1487 ipu_disable_channel(idmac, ichan,
1483 ichan->status >= IPU_CHANNEL_ENABLED); 1488 ichan->status >= IPU_CHANNEL_ENABLED);
1484 1489
@@ -1505,17 +1510,23 @@ static void __idmac_terminate_all(struct dma_chan *chan)
1505 tasklet_enable(&to_ipu(idmac)->tasklet); 1510 tasklet_enable(&to_ipu(idmac)->tasklet);
1506 1511
1507 ichan->status = IPU_CHANNEL_INITIALIZED; 1512 ichan->status = IPU_CHANNEL_INITIALIZED;
1513
1514 return 0;
1508} 1515}
1509 1516
1510static void idmac_terminate_all(struct dma_chan *chan) 1517static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1518 unsigned long arg)
1511{ 1519{
1512 struct idmac_channel *ichan = to_idmac_chan(chan); 1520 struct idmac_channel *ichan = to_idmac_chan(chan);
1521 int ret;
1513 1522
1514 mutex_lock(&ichan->chan_mutex); 1523 mutex_lock(&ichan->chan_mutex);
1515 1524
1516 __idmac_terminate_all(chan); 1525 ret = __idmac_control(chan, cmd, arg);
1517 1526
1518 mutex_unlock(&ichan->chan_mutex); 1527 mutex_unlock(&ichan->chan_mutex);
1528
1529 return ret;
1519} 1530}
1520 1531
1521#ifdef DEBUG 1532#ifdef DEBUG
@@ -1607,7 +1618,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
1607 1618
1608 mutex_lock(&ichan->chan_mutex); 1619 mutex_lock(&ichan->chan_mutex);
1609 1620
1610 __idmac_terminate_all(chan); 1621 __idmac_control(chan, DMA_TERMINATE_ALL, 0);
1611 1622
1612 if (ichan->status > IPU_CHANNEL_FREE) { 1623 if (ichan->status > IPU_CHANNEL_FREE) {
1613#ifdef DEBUG 1624#ifdef DEBUG
@@ -1637,15 +1648,12 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
1637 tasklet_schedule(&to_ipu(idmac)->tasklet); 1648 tasklet_schedule(&to_ipu(idmac)->tasklet);
1638} 1649}
1639 1650
1640static enum dma_status idmac_is_tx_complete(struct dma_chan *chan, 1651static enum dma_status idmac_tx_status(struct dma_chan *chan,
1641 dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) 1652 dma_cookie_t cookie, struct dma_tx_state *txstate)
1642{ 1653{
1643 struct idmac_channel *ichan = to_idmac_chan(chan); 1654 struct idmac_channel *ichan = to_idmac_chan(chan);
1644 1655
1645 if (done) 1656 dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0);
1646 *done = ichan->completed;
1647 if (used)
1648 *used = chan->cookie;
1649 if (cookie != chan->cookie) 1657 if (cookie != chan->cookie)
1650 return DMA_ERROR; 1658 return DMA_ERROR;
1651 return DMA_SUCCESS; 1659 return DMA_SUCCESS;
@@ -1664,12 +1672,12 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1664 dma->dev = ipu->dev; 1672 dma->dev = ipu->dev;
1665 dma->device_alloc_chan_resources = idmac_alloc_chan_resources; 1673 dma->device_alloc_chan_resources = idmac_alloc_chan_resources;
1666 dma->device_free_chan_resources = idmac_free_chan_resources; 1674 dma->device_free_chan_resources = idmac_free_chan_resources;
1667 dma->device_is_tx_complete = idmac_is_tx_complete; 1675 dma->device_tx_status = idmac_tx_status;
1668 dma->device_issue_pending = idmac_issue_pending; 1676 dma->device_issue_pending = idmac_issue_pending;
1669 1677
1670 /* Compulsory for DMA_SLAVE fields */ 1678 /* Compulsory for DMA_SLAVE fields */
1671 dma->device_prep_slave_sg = idmac_prep_slave_sg; 1679 dma->device_prep_slave_sg = idmac_prep_slave_sg;
1672 dma->device_terminate_all = idmac_terminate_all; 1680 dma->device_control = idmac_control;
1673 1681
1674 INIT_LIST_HEAD(&dma->channels); 1682 INIT_LIST_HEAD(&dma->channels);
1675 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1683 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1703,7 +1711,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
1703 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1711 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1704 struct idmac_channel *ichan = ipu->channel + i; 1712 struct idmac_channel *ichan = ipu->channel + i;
1705 1713
1706 idmac_terminate_all(&ichan->dma_chan); 1714 idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0);
1707 idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0); 1715 idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
1708 } 1716 }
1709 1717
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index bbbd58566625..14a8c0f1698e 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -541,8 +541,8 @@ static void mpc_dma_issue_pending(struct dma_chan *chan)
541 541
542/* Check request completion status */ 542/* Check request completion status */
543static enum dma_status 543static enum dma_status
544mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, 544mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
545 dma_cookie_t *done, dma_cookie_t *used) 545 struct dma_tx_state *txstate)
546{ 546{
547 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 547 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
548 unsigned long flags; 548 unsigned long flags;
@@ -554,12 +554,7 @@ mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
554 last_complete = mchan->completed_cookie; 554 last_complete = mchan->completed_cookie;
555 spin_unlock_irqrestore(&mchan->lock, flags); 555 spin_unlock_irqrestore(&mchan->lock, flags);
556 556
557 if (done) 557 dma_set_tx_state(txstate, last_complete, last_used, 0);
558 *done = last_complete;
559
560 if (used)
561 *used = last_used;
562
563 return dma_async_is_complete(cookie, last_complete, last_used); 558 return dma_async_is_complete(cookie, last_complete, last_used);
564} 559}
565 560
@@ -635,7 +630,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
635static int __devinit mpc_dma_probe(struct of_device *op, 630static int __devinit mpc_dma_probe(struct of_device *op,
636 const struct of_device_id *match) 631 const struct of_device_id *match)
637{ 632{
638 struct device_node *dn = op->node; 633 struct device_node *dn = op->dev.of_node;
639 struct device *dev = &op->dev; 634 struct device *dev = &op->dev;
640 struct dma_device *dma; 635 struct dma_device *dma;
641 struct mpc_dma *mdma; 636 struct mpc_dma *mdma;
@@ -663,7 +658,7 @@ static int __devinit mpc_dma_probe(struct of_device *op,
663 } 658 }
664 659
665 regs_start = res.start; 660 regs_start = res.start;
666 regs_size = res.end - res.start + 1; 661 regs_size = resource_size(&res);
667 662
668 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { 663 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
669 dev_err(dev, "Error requesting memory region!\n"); 664 dev_err(dev, "Error requesting memory region!\n");
@@ -694,7 +689,7 @@ static int __devinit mpc_dma_probe(struct of_device *op,
694 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; 689 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
695 dma->device_free_chan_resources = mpc_dma_free_chan_resources; 690 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
696 dma->device_issue_pending = mpc_dma_issue_pending; 691 dma->device_issue_pending = mpc_dma_issue_pending;
697 dma->device_is_tx_complete = mpc_dma_is_tx_complete; 692 dma->device_tx_status = mpc_dma_tx_status;
698 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; 693 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
699 694
700 INIT_LIST_HEAD(&dma->channels); 695 INIT_LIST_HEAD(&dma->channels);
@@ -776,12 +771,12 @@ static struct of_device_id mpc_dma_match[] = {
776}; 771};
777 772
778static struct of_platform_driver mpc_dma_driver = { 773static struct of_platform_driver mpc_dma_driver = {
779 .match_table = mpc_dma_match,
780 .probe = mpc_dma_probe, 774 .probe = mpc_dma_probe,
781 .remove = __devexit_p(mpc_dma_remove), 775 .remove = __devexit_p(mpc_dma_remove),
782 .driver = { 776 .driver = {
783 .name = DRV_NAME, 777 .name = DRV_NAME,
784 .owner = THIS_MODULE, 778 .owner = THIS_MODULE,
779 .of_match_table = mpc_dma_match,
785 }, 780 },
786}; 781};
787 782
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e2fd34da64f2..86c5ae9fde34 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -810,14 +810,14 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
810} 810}
811 811
812/** 812/**
813 * mv_xor_is_complete - poll the status of an XOR transaction 813 * mv_xor_status - poll the status of an XOR transaction
814 * @chan: XOR channel handle 814 * @chan: XOR channel handle
815 * @cookie: XOR transaction identifier 815 * @cookie: XOR transaction identifier
816 * @txstate: XOR transactions state holder (or NULL)
816 */ 817 */
817static enum dma_status mv_xor_is_complete(struct dma_chan *chan, 818static enum dma_status mv_xor_status(struct dma_chan *chan,
818 dma_cookie_t cookie, 819 dma_cookie_t cookie,
819 dma_cookie_t *done, 820 struct dma_tx_state *txstate)
820 dma_cookie_t *used)
821{ 821{
822 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 822 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
823 dma_cookie_t last_used; 823 dma_cookie_t last_used;
@@ -827,10 +827,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
827 last_used = chan->cookie; 827 last_used = chan->cookie;
828 last_complete = mv_chan->completed_cookie; 828 last_complete = mv_chan->completed_cookie;
829 mv_chan->is_complete_cookie = cookie; 829 mv_chan->is_complete_cookie = cookie;
830 if (done) 830 dma_set_tx_state(txstate, last_complete, last_used, 0);
831 *done = last_complete;
832 if (used)
833 *used = last_used;
834 831
835 ret = dma_async_is_complete(cookie, last_complete, last_used); 832 ret = dma_async_is_complete(cookie, last_complete, last_used);
836 if (ret == DMA_SUCCESS) { 833 if (ret == DMA_SUCCESS) {
@@ -842,11 +839,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
842 last_used = chan->cookie; 839 last_used = chan->cookie;
843 last_complete = mv_chan->completed_cookie; 840 last_complete = mv_chan->completed_cookie;
844 841
845 if (done) 842 dma_set_tx_state(txstate, last_complete, last_used, 0);
846 *done = last_complete;
847 if (used)
848 *used = last_used;
849
850 return dma_async_is_complete(cookie, last_complete, last_used); 843 return dma_async_is_complete(cookie, last_complete, last_used);
851} 844}
852 845
@@ -975,7 +968,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
975 async_tx_ack(tx); 968 async_tx_ack(tx);
976 msleep(1); 969 msleep(1);
977 970
978 if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != 971 if (mv_xor_status(dma_chan, cookie, NULL) !=
979 DMA_SUCCESS) { 972 DMA_SUCCESS) {
980 dev_printk(KERN_ERR, dma_chan->device->dev, 973 dev_printk(KERN_ERR, dma_chan->device->dev,
981 "Self-test copy timed out, disabling\n"); 974 "Self-test copy timed out, disabling\n");
@@ -1073,7 +1066,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
1073 async_tx_ack(tx); 1066 async_tx_ack(tx);
1074 msleep(8); 1067 msleep(8);
1075 1068
1076 if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != 1069 if (mv_xor_status(dma_chan, cookie, NULL) !=
1077 DMA_SUCCESS) { 1070 DMA_SUCCESS) {
1078 dev_printk(KERN_ERR, dma_chan->device->dev, 1071 dev_printk(KERN_ERR, dma_chan->device->dev,
1079 "Self-test xor timed out, disabling\n"); 1072 "Self-test xor timed out, disabling\n");
@@ -1168,7 +1161,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1168 /* set base routines */ 1161 /* set base routines */
1169 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; 1162 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1170 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1163 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1171 dma_dev->device_is_tx_complete = mv_xor_is_complete; 1164 dma_dev->device_tx_status = mv_xor_status;
1172 dma_dev->device_issue_pending = mv_xor_issue_pending; 1165 dma_dev->device_issue_pending = mv_xor_issue_pending;
1173 dma_dev->dev = &pdev->dev; 1166 dma_dev->dev = &pdev->dev;
1174 1167
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
new file mode 100644
index 000000000000..7c50f6dfd3f4
--- /dev/null
+++ b/drivers/dma/pl330.c
@@ -0,0 +1,866 @@
1/* linux/drivers/dma/pl330.c
2 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/io.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/dmaengine.h>
17#include <linux/interrupt.h>
18#include <linux/amba/bus.h>
19#include <linux/amba/pl330.h>
20
21#define NR_DEFAULT_DESC 16
22
23enum desc_status {
24 /* In the DMAC pool */
25 FREE,
26 /*
27 * Allocted to some channel during prep_xxx
28 * Also may be sitting on the work_list.
29 */
30 PREP,
31 /*
32 * Sitting on the work_list and already submitted
33 * to the PL330 core. Not more than two descriptors
34 * of a channel can be BUSY at any time.
35 */
36 BUSY,
37 /*
38 * Sitting on the channel work_list but xfer done
39 * by PL330 core
40 */
41 DONE,
42};
43
44struct dma_pl330_chan {
45 /* Schedule desc completion */
46 struct tasklet_struct task;
47
48 /* DMA-Engine Channel */
49 struct dma_chan chan;
50
51 /* Last completed cookie */
52 dma_cookie_t completed;
53
54 /* List of to be xfered descriptors */
55 struct list_head work_list;
56
57 /* Pointer to the DMAC that manages this channel,
58 * NULL if the channel is available to be acquired.
59 * As the parent, this DMAC also provides descriptors
60 * to the channel.
61 */
62 struct dma_pl330_dmac *dmac;
63
64 /* To protect channel manipulation */
65 spinlock_t lock;
66
67 /* Token of a hardware channel thread of PL330 DMAC
68 * NULL if the channel is available to be acquired.
69 */
70 void *pl330_chid;
71};
72
73struct dma_pl330_dmac {
74 struct pl330_info pif;
75
76 /* DMA-Engine Device */
77 struct dma_device ddma;
78
79 /* Pool of descriptors available for the DMAC's channels */
80 struct list_head desc_pool;
81 /* To protect desc_pool manipulation */
82 spinlock_t pool_lock;
83
84 /* Peripheral channels connected to this DMAC */
85 struct dma_pl330_chan peripherals[0]; /* keep at end */
86};
87
88struct dma_pl330_desc {
89 /* To attach to a queue as child */
90 struct list_head node;
91
92 /* Descriptor for the DMA Engine API */
93 struct dma_async_tx_descriptor txd;
94
95 /* Xfer for PL330 core */
96 struct pl330_xfer px;
97
98 struct pl330_reqcfg rqcfg;
99 struct pl330_req req;
100
101 enum desc_status status;
102
103 /* The channel which currently holds this desc */
104 struct dma_pl330_chan *pchan;
105};
106
107static inline struct dma_pl330_chan *
108to_pchan(struct dma_chan *ch)
109{
110 if (!ch)
111 return NULL;
112
113 return container_of(ch, struct dma_pl330_chan, chan);
114}
115
116static inline struct dma_pl330_desc *
117to_desc(struct dma_async_tx_descriptor *tx)
118{
119 return container_of(tx, struct dma_pl330_desc, txd);
120}
121
122static inline void free_desc_list(struct list_head *list)
123{
124 struct dma_pl330_dmac *pdmac;
125 struct dma_pl330_desc *desc;
126 struct dma_pl330_chan *pch;
127 unsigned long flags;
128
129 if (list_empty(list))
130 return;
131
132 /* Finish off the work list */
133 list_for_each_entry(desc, list, node) {
134 dma_async_tx_callback callback;
135 void *param;
136
137 /* All desc in a list belong to same channel */
138 pch = desc->pchan;
139 callback = desc->txd.callback;
140 param = desc->txd.callback_param;
141
142 if (callback)
143 callback(param);
144
145 desc->pchan = NULL;
146 }
147
148 pdmac = pch->dmac;
149
150 spin_lock_irqsave(&pdmac->pool_lock, flags);
151 list_splice_tail_init(list, &pdmac->desc_pool);
152 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
153}
154
155static inline void fill_queue(struct dma_pl330_chan *pch)
156{
157 struct dma_pl330_desc *desc;
158 int ret;
159
160 list_for_each_entry(desc, &pch->work_list, node) {
161
162 /* If already submitted */
163 if (desc->status == BUSY)
164 break;
165
166 ret = pl330_submit_req(pch->pl330_chid,
167 &desc->req);
168 if (!ret) {
169 desc->status = BUSY;
170 break;
171 } else if (ret == -EAGAIN) {
172 /* QFull or DMAC Dying */
173 break;
174 } else {
175 /* Unacceptable request */
176 desc->status = DONE;
177 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
178 __func__, __LINE__, desc->txd.cookie);
179 tasklet_schedule(&pch->task);
180 }
181 }
182}
183
184static void pl330_tasklet(unsigned long data)
185{
186 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
187 struct dma_pl330_desc *desc, *_dt;
188 unsigned long flags;
189 LIST_HEAD(list);
190
191 spin_lock_irqsave(&pch->lock, flags);
192
193 /* Pick up ripe tomatoes */
194 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
195 if (desc->status == DONE) {
196 pch->completed = desc->txd.cookie;
197 list_move_tail(&desc->node, &list);
198 }
199
200 /* Try to submit a req imm. next to the last completed cookie */
201 fill_queue(pch);
202
203 /* Make sure the PL330 Channel thread is active */
204 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
205
206 spin_unlock_irqrestore(&pch->lock, flags);
207
208 free_desc_list(&list);
209}
210
211static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
212{
213 struct dma_pl330_desc *desc = token;
214 struct dma_pl330_chan *pch = desc->pchan;
215 unsigned long flags;
216
217 /* If desc aborted */
218 if (!pch)
219 return;
220
221 spin_lock_irqsave(&pch->lock, flags);
222
223 desc->status = DONE;
224
225 spin_unlock_irqrestore(&pch->lock, flags);
226
227 tasklet_schedule(&pch->task);
228}
229
230static int pl330_alloc_chan_resources(struct dma_chan *chan)
231{
232 struct dma_pl330_chan *pch = to_pchan(chan);
233 struct dma_pl330_dmac *pdmac = pch->dmac;
234 unsigned long flags;
235
236 spin_lock_irqsave(&pch->lock, flags);
237
238 pch->completed = chan->cookie = 1;
239
240 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
241 if (!pch->pl330_chid) {
242 spin_unlock_irqrestore(&pch->lock, flags);
243 return 0;
244 }
245
246 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
247
248 spin_unlock_irqrestore(&pch->lock, flags);
249
250 return 1;
251}
252
253static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
254{
255 struct dma_pl330_chan *pch = to_pchan(chan);
256 struct dma_pl330_desc *desc;
257 unsigned long flags;
258
259 /* Only supports DMA_TERMINATE_ALL */
260 if (cmd != DMA_TERMINATE_ALL)
261 return -ENXIO;
262
263 spin_lock_irqsave(&pch->lock, flags);
264
265 /* FLUSH the PL330 Channel thread */
266 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
267
268 /* Mark all desc done */
269 list_for_each_entry(desc, &pch->work_list, node)
270 desc->status = DONE;
271
272 spin_unlock_irqrestore(&pch->lock, flags);
273
274 pl330_tasklet((unsigned long) pch);
275
276 return 0;
277}
278
279static void pl330_free_chan_resources(struct dma_chan *chan)
280{
281 struct dma_pl330_chan *pch = to_pchan(chan);
282 unsigned long flags;
283
284 spin_lock_irqsave(&pch->lock, flags);
285
286 tasklet_kill(&pch->task);
287
288 pl330_release_channel(pch->pl330_chid);
289 pch->pl330_chid = NULL;
290
291 spin_unlock_irqrestore(&pch->lock, flags);
292}
293
294static enum dma_status
295pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
296 struct dma_tx_state *txstate)
297{
298 struct dma_pl330_chan *pch = to_pchan(chan);
299 dma_cookie_t last_done, last_used;
300 int ret;
301
302 last_done = pch->completed;
303 last_used = chan->cookie;
304
305 ret = dma_async_is_complete(cookie, last_done, last_used);
306
307 dma_set_tx_state(txstate, last_done, last_used, 0);
308
309 return ret;
310}
311
312static void pl330_issue_pending(struct dma_chan *chan)
313{
314 pl330_tasklet((unsigned long) to_pchan(chan));
315}
316
317/*
318 * We returned the last one of the circular list of descriptor(s)
319 * from prep_xxx, so the argument to submit corresponds to the last
320 * descriptor of the list.
321 */
322static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
323{
324 struct dma_pl330_desc *desc, *last = to_desc(tx);
325 struct dma_pl330_chan *pch = to_pchan(tx->chan);
326 dma_cookie_t cookie;
327 unsigned long flags;
328
329 spin_lock_irqsave(&pch->lock, flags);
330
331 /* Assign cookies to all nodes */
332 cookie = tx->chan->cookie;
333
334 while (!list_empty(&last->node)) {
335 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
336
337 if (++cookie < 0)
338 cookie = 1;
339 desc->txd.cookie = cookie;
340
341 list_move_tail(&desc->node, &pch->work_list);
342 }
343
344 if (++cookie < 0)
345 cookie = 1;
346 last->txd.cookie = cookie;
347
348 list_add_tail(&last->node, &pch->work_list);
349
350 tx->chan->cookie = cookie;
351
352 spin_unlock_irqrestore(&pch->lock, flags);
353
354 return cookie;
355}
356
357static inline void _init_desc(struct dma_pl330_desc *desc)
358{
359 desc->pchan = NULL;
360 desc->req.x = &desc->px;
361 desc->req.token = desc;
362 desc->rqcfg.swap = SWAP_NO;
363 desc->rqcfg.privileged = 0;
364 desc->rqcfg.insnaccess = 0;
365 desc->rqcfg.scctl = SCCTRL0;
366 desc->rqcfg.dcctl = DCCTRL0;
367 desc->req.cfg = &desc->rqcfg;
368 desc->req.xfer_cb = dma_pl330_rqcb;
369 desc->txd.tx_submit = pl330_tx_submit;
370
371 INIT_LIST_HEAD(&desc->node);
372}
373
374/* Returns the number of descriptors added to the DMAC pool */
375int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
376{
377 struct dma_pl330_desc *desc;
378 unsigned long flags;
379 int i;
380
381 if (!pdmac)
382 return 0;
383
384 desc = kmalloc(count * sizeof(*desc), flg);
385 if (!desc)
386 return 0;
387
388 spin_lock_irqsave(&pdmac->pool_lock, flags);
389
390 for (i = 0; i < count; i++) {
391 _init_desc(&desc[i]);
392 list_add_tail(&desc[i].node, &pdmac->desc_pool);
393 }
394
395 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
396
397 return count;
398}
399
400static struct dma_pl330_desc *
401pluck_desc(struct dma_pl330_dmac *pdmac)
402{
403 struct dma_pl330_desc *desc = NULL;
404 unsigned long flags;
405
406 if (!pdmac)
407 return NULL;
408
409 spin_lock_irqsave(&pdmac->pool_lock, flags);
410
411 if (!list_empty(&pdmac->desc_pool)) {
412 desc = list_entry(pdmac->desc_pool.next,
413 struct dma_pl330_desc, node);
414
415 list_del_init(&desc->node);
416
417 desc->status = PREP;
418 desc->txd.callback = NULL;
419 }
420
421 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
422
423 return desc;
424}
425
426static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
427{
428 struct dma_pl330_dmac *pdmac = pch->dmac;
429 struct dma_pl330_peri *peri = pch->chan.private;
430 struct dma_pl330_desc *desc;
431
432 /* Pluck one desc from the pool of DMAC */
433 desc = pluck_desc(pdmac);
434
435 /* If the DMAC pool is empty, alloc new */
436 if (!desc) {
437 if (!add_desc(pdmac, GFP_ATOMIC, 1))
438 return NULL;
439
440 /* Try again */
441 desc = pluck_desc(pdmac);
442 if (!desc) {
443 dev_err(pch->dmac->pif.dev,
444 "%s:%d ALERT!\n", __func__, __LINE__);
445 return NULL;
446 }
447 }
448
449 /* Initialize the descriptor */
450 desc->pchan = pch;
451 desc->txd.cookie = 0;
452 async_tx_ack(&desc->txd);
453
454 desc->req.rqtype = peri->rqtype;
455 desc->req.peri = peri->peri_id;
456
457 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
458
459 return desc;
460}
461
462static inline void fill_px(struct pl330_xfer *px,
463 dma_addr_t dst, dma_addr_t src, size_t len)
464{
465 px->next = NULL;
466 px->bytes = len;
467 px->dst_addr = dst;
468 px->src_addr = src;
469}
470
471static struct dma_pl330_desc *
472__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
473 dma_addr_t src, size_t len)
474{
475 struct dma_pl330_desc *desc = pl330_get_desc(pch);
476
477 if (!desc) {
478 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
479 __func__, __LINE__);
480 return NULL;
481 }
482
483 /*
484 * Ideally we should lookout for reqs bigger than
485 * those that can be programmed with 256 bytes of
486 * MC buffer, but considering a req size is seldom
487 * going to be word-unaligned and more than 200MB,
488 * we take it easy.
489 * Also, should the limit is reached we'd rather
490 * have the platform increase MC buffer size than
491 * complicating this API driver.
492 */
493 fill_px(&desc->px, dst, src, len);
494
495 return desc;
496}
497
498/* Call after fixing burst size */
499static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
500{
501 struct dma_pl330_chan *pch = desc->pchan;
502 struct pl330_info *pi = &pch->dmac->pif;
503 int burst_len;
504
505 burst_len = pi->pcfg.data_bus_width / 8;
506 burst_len *= pi->pcfg.data_buf_dep;
507 burst_len >>= desc->rqcfg.brst_size;
508
509 /* src/dst_burst_len can't be more than 16 */
510 if (burst_len > 16)
511 burst_len = 16;
512
513 while (burst_len > 1) {
514 if (!(len % (burst_len << desc->rqcfg.brst_size)))
515 break;
516 burst_len--;
517 }
518
519 return burst_len;
520}
521
522static struct dma_async_tx_descriptor *
523pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
524 dma_addr_t src, size_t len, unsigned long flags)
525{
526 struct dma_pl330_desc *desc;
527 struct dma_pl330_chan *pch = to_pchan(chan);
528 struct dma_pl330_peri *peri = chan->private;
529 struct pl330_info *pi;
530 int burst;
531
532 if (unlikely(!pch || !len || !peri))
533 return NULL;
534
535 if (peri->rqtype != MEMTOMEM)
536 return NULL;
537
538 pi = &pch->dmac->pif;
539
540 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
541 if (!desc)
542 return NULL;
543
544 desc->rqcfg.src_inc = 1;
545 desc->rqcfg.dst_inc = 1;
546
547 /* Select max possible burst size */
548 burst = pi->pcfg.data_bus_width / 8;
549
550 while (burst > 1) {
551 if (!(len % burst))
552 break;
553 burst /= 2;
554 }
555
556 desc->rqcfg.brst_size = 0;
557 while (burst != (1 << desc->rqcfg.brst_size))
558 desc->rqcfg.brst_size++;
559
560 desc->rqcfg.brst_len = get_burst_len(desc, len);
561
562 desc->txd.flags = flags;
563
564 return &desc->txd;
565}
566
567static struct dma_async_tx_descriptor *
568pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
569 unsigned int sg_len, enum dma_data_direction direction,
570 unsigned long flg)
571{
572 struct dma_pl330_desc *first, *desc = NULL;
573 struct dma_pl330_chan *pch = to_pchan(chan);
574 struct dma_pl330_peri *peri = chan->private;
575 struct scatterlist *sg;
576 unsigned long flags;
577 int i, burst_size;
578 dma_addr_t addr;
579
580 if (unlikely(!pch || !sgl || !sg_len))
581 return NULL;
582
583 /* Make sure the direction is consistent */
584 if ((direction == DMA_TO_DEVICE &&
585 peri->rqtype != MEMTODEV) ||
586 (direction == DMA_FROM_DEVICE &&
587 peri->rqtype != DEVTOMEM)) {
588 dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
589 __func__, __LINE__);
590 return NULL;
591 }
592
593 addr = peri->fifo_addr;
594 burst_size = peri->burst_sz;
595
596 first = NULL;
597
598 for_each_sg(sgl, sg, sg_len, i) {
599
600 desc = pl330_get_desc(pch);
601 if (!desc) {
602 struct dma_pl330_dmac *pdmac = pch->dmac;
603
604 dev_err(pch->dmac->pif.dev,
605 "%s:%d Unable to fetch desc\n",
606 __func__, __LINE__);
607 if (!first)
608 return NULL;
609
610 spin_lock_irqsave(&pdmac->pool_lock, flags);
611
612 while (!list_empty(&first->node)) {
613 desc = list_entry(first->node.next,
614 struct dma_pl330_desc, node);
615 list_move_tail(&desc->node, &pdmac->desc_pool);
616 }
617
618 list_move_tail(&first->node, &pdmac->desc_pool);
619
620 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
621
622 return NULL;
623 }
624
625 if (!first)
626 first = desc;
627 else
628 list_add_tail(&desc->node, &first->node);
629
630 if (direction == DMA_TO_DEVICE) {
631 desc->rqcfg.src_inc = 1;
632 desc->rqcfg.dst_inc = 0;
633 fill_px(&desc->px,
634 addr, sg_dma_address(sg), sg_dma_len(sg));
635 } else {
636 desc->rqcfg.src_inc = 0;
637 desc->rqcfg.dst_inc = 1;
638 fill_px(&desc->px,
639 sg_dma_address(sg), addr, sg_dma_len(sg));
640 }
641
642 desc->rqcfg.brst_size = burst_size;
643 desc->rqcfg.brst_len = 1;
644 }
645
646 /* Return the last desc in the chain */
647 desc->txd.flags = flg;
648 return &desc->txd;
649}
650
651static irqreturn_t pl330_irq_handler(int irq, void *data)
652{
653 if (pl330_update(data))
654 return IRQ_HANDLED;
655 else
656 return IRQ_NONE;
657}
658
659static int __devinit
660pl330_probe(struct amba_device *adev, struct amba_id *id)
661{
662 struct dma_pl330_platdata *pdat;
663 struct dma_pl330_dmac *pdmac;
664 struct dma_pl330_chan *pch;
665 struct pl330_info *pi;
666 struct dma_device *pd;
667 struct resource *res;
668 int i, ret, irq;
669
670 pdat = adev->dev.platform_data;
671
672 if (!pdat || !pdat->nr_valid_peri) {
673 dev_err(&adev->dev, "platform data missing\n");
674 return -ENODEV;
675 }
676
677 /* Allocate a new DMAC and its Channels */
678 pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch)
679 + sizeof(*pdmac), GFP_KERNEL);
680 if (!pdmac) {
681 dev_err(&adev->dev, "unable to allocate mem\n");
682 return -ENOMEM;
683 }
684
685 pi = &pdmac->pif;
686 pi->dev = &adev->dev;
687 pi->pl330_data = NULL;
688 pi->mcbufsz = pdat->mcbuf_sz;
689
690 res = &adev->res;
691 request_mem_region(res->start, resource_size(res), "dma-pl330");
692
693 pi->base = ioremap(res->start, resource_size(res));
694 if (!pi->base) {
695 ret = -ENXIO;
696 goto probe_err1;
697 }
698
699 irq = adev->irq[0];
700 ret = request_irq(irq, pl330_irq_handler, 0,
701 dev_name(&adev->dev), pi);
702 if (ret)
703 goto probe_err2;
704
705 ret = pl330_add(pi);
706 if (ret)
707 goto probe_err3;
708
709 INIT_LIST_HEAD(&pdmac->desc_pool);
710 spin_lock_init(&pdmac->pool_lock);
711
712 /* Create a descriptor pool of default size */
713 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
714 dev_warn(&adev->dev, "unable to allocate desc\n");
715
716 pd = &pdmac->ddma;
717 INIT_LIST_HEAD(&pd->channels);
718
719 /* Initialize channel parameters */
720 for (i = 0; i < pdat->nr_valid_peri; i++) {
721 struct dma_pl330_peri *peri = &pdat->peri[i];
722 pch = &pdmac->peripherals[i];
723
724 switch (peri->rqtype) {
725 case MEMTOMEM:
726 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
727 break;
728 case MEMTODEV:
729 case DEVTOMEM:
730 dma_cap_set(DMA_SLAVE, pd->cap_mask);
731 break;
732 default:
733 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
734 continue;
735 }
736
737 INIT_LIST_HEAD(&pch->work_list);
738 spin_lock_init(&pch->lock);
739 pch->pl330_chid = NULL;
740 pch->chan.private = peri;
741 pch->chan.device = pd;
742 pch->chan.chan_id = i;
743 pch->dmac = pdmac;
744
745 /* Add the channel to the DMAC list */
746 pd->chancnt++;
747 list_add_tail(&pch->chan.device_node, &pd->channels);
748 }
749
750 pd->dev = &adev->dev;
751
752 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
753 pd->device_free_chan_resources = pl330_free_chan_resources;
754 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
755 pd->device_tx_status = pl330_tx_status;
756 pd->device_prep_slave_sg = pl330_prep_slave_sg;
757 pd->device_control = pl330_control;
758 pd->device_issue_pending = pl330_issue_pending;
759
760 ret = dma_async_device_register(pd);
761 if (ret) {
762 dev_err(&adev->dev, "unable to register DMAC\n");
763 goto probe_err4;
764 }
765
766 amba_set_drvdata(adev, pdmac);
767
768 dev_info(&adev->dev,
769 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
770 dev_info(&adev->dev,
771 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
772 pi->pcfg.data_buf_dep,
773 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
774 pi->pcfg.num_peri, pi->pcfg.num_events);
775
776 return 0;
777
778probe_err4:
779 pl330_del(pi);
780probe_err3:
781 free_irq(irq, pi);
782probe_err2:
783 iounmap(pi->base);
784probe_err1:
785 release_mem_region(res->start, resource_size(res));
786 kfree(pdmac);
787
788 return ret;
789}
790
791static int __devexit pl330_remove(struct amba_device *adev)
792{
793 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
794 struct dma_pl330_chan *pch, *_p;
795 struct pl330_info *pi;
796 struct resource *res;
797 int irq;
798
799 if (!pdmac)
800 return 0;
801
802 amba_set_drvdata(adev, NULL);
803
804 /* Idle the DMAC */
805 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
806 chan.device_node) {
807
808 /* Remove the channel */
809 list_del(&pch->chan.device_node);
810
811 /* Flush the channel */
812 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
813 pl330_free_chan_resources(&pch->chan);
814 }
815
816 pi = &pdmac->pif;
817
818 pl330_del(pi);
819
820 irq = adev->irq[0];
821 free_irq(irq, pi);
822
823 iounmap(pi->base);
824
825 res = &adev->res;
826 release_mem_region(res->start, resource_size(res));
827
828 kfree(pdmac);
829
830 return 0;
831}
832
833static struct amba_id pl330_ids[] = {
834 {
835 .id = 0x00041330,
836 .mask = 0x000fffff,
837 },
838 { 0, 0 },
839};
840
841static struct amba_driver pl330_driver = {
842 .drv = {
843 .owner = THIS_MODULE,
844 .name = "dma-pl330",
845 },
846 .id_table = pl330_ids,
847 .probe = pl330_probe,
848 .remove = pl330_remove,
849};
850
851static int __init pl330_init(void)
852{
853 return amba_driver_register(&pl330_driver);
854}
855module_init(pl330_init);
856
857static void __exit pl330_exit(void)
858{
859 amba_driver_unregister(&pl330_driver);
860 return;
861}
862module_exit(pl330_exit);
863
864MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
865MODULE_DESCRIPTION("API Driver for PL330 DMAC");
866MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index d44626fa35ad..5a22ca6927e5 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -3935,12 +3935,13 @@ static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
3935} 3935}
3936 3936
3937/** 3937/**
3938 * ppc440spe_adma_is_complete - poll the status of an ADMA transaction 3938 * ppc440spe_adma_tx_status - poll the status of an ADMA transaction
3939 * @chan: ADMA channel handle 3939 * @chan: ADMA channel handle
3940 * @cookie: ADMA transaction identifier 3940 * @cookie: ADMA transaction identifier
3941 * @txstate: a holder for the current state of the channel
3941 */ 3942 */
3942static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan, 3943static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
3943 dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) 3944 dma_cookie_t cookie, struct dma_tx_state *txstate)
3944{ 3945{
3945 struct ppc440spe_adma_chan *ppc440spe_chan; 3946 struct ppc440spe_adma_chan *ppc440spe_chan;
3946 dma_cookie_t last_used; 3947 dma_cookie_t last_used;
@@ -3951,10 +3952,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
3951 last_used = chan->cookie; 3952 last_used = chan->cookie;
3952 last_complete = ppc440spe_chan->completed_cookie; 3953 last_complete = ppc440spe_chan->completed_cookie;
3953 3954
3954 if (done) 3955 dma_set_tx_state(txstate, last_complete, last_used, 0);
3955 *done = last_complete;
3956 if (used)
3957 *used = last_used;
3958 3956
3959 ret = dma_async_is_complete(cookie, last_complete, last_used); 3957 ret = dma_async_is_complete(cookie, last_complete, last_used);
3960 if (ret == DMA_SUCCESS) 3958 if (ret == DMA_SUCCESS)
@@ -3965,10 +3963,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
3965 last_used = chan->cookie; 3963 last_used = chan->cookie;
3966 last_complete = ppc440spe_chan->completed_cookie; 3964 last_complete = ppc440spe_chan->completed_cookie;
3967 3965
3968 if (done) 3966 dma_set_tx_state(txstate, last_complete, last_used, 0);
3969 *done = last_complete;
3970 if (used)
3971 *used = last_used;
3972 3967
3973 return dma_async_is_complete(cookie, last_complete, last_used); 3968 return dma_async_is_complete(cookie, last_complete, last_used);
3974} 3969}
@@ -4180,7 +4175,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
4180 ppc440spe_adma_alloc_chan_resources; 4175 ppc440spe_adma_alloc_chan_resources;
4181 adev->common.device_free_chan_resources = 4176 adev->common.device_free_chan_resources =
4182 ppc440spe_adma_free_chan_resources; 4177 ppc440spe_adma_free_chan_resources;
4183 adev->common.device_is_tx_complete = ppc440spe_adma_is_complete; 4178 adev->common.device_tx_status = ppc440spe_adma_tx_status;
4184 adev->common.device_issue_pending = ppc440spe_adma_issue_pending; 4179 adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
4185 4180
4186 /* Set prep routines based on capability */ 4181 /* Set prep routines based on capability */
@@ -4399,7 +4394,7 @@ static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
4399static int __devinit ppc440spe_adma_probe(struct of_device *ofdev, 4394static int __devinit ppc440spe_adma_probe(struct of_device *ofdev,
4400 const struct of_device_id *match) 4395 const struct of_device_id *match)
4401{ 4396{
4402 struct device_node *np = ofdev->node; 4397 struct device_node *np = ofdev->dev.of_node;
4403 struct resource res; 4398 struct resource res;
4404 struct ppc440spe_adma_device *adev; 4399 struct ppc440spe_adma_device *adev;
4405 struct ppc440spe_adma_chan *chan; 4400 struct ppc440spe_adma_chan *chan;
@@ -4631,7 +4626,7 @@ out:
4631static int __devexit ppc440spe_adma_remove(struct of_device *ofdev) 4626static int __devexit ppc440spe_adma_remove(struct of_device *ofdev)
4632{ 4627{
4633 struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev); 4628 struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev);
4634 struct device_node *np = ofdev->node; 4629 struct device_node *np = ofdev->dev.of_node;
4635 struct resource res; 4630 struct resource res;
4636 struct dma_chan *chan, *_chan; 4631 struct dma_chan *chan, *_chan;
4637 struct ppc_dma_chan_ref *ref, *_ref; 4632 struct ppc_dma_chan_ref *ref, *_ref;
@@ -4949,12 +4944,12 @@ static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
4949MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match); 4944MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
4950 4945
4951static struct of_platform_driver ppc440spe_adma_driver = { 4946static struct of_platform_driver ppc440spe_adma_driver = {
4952 .match_table = ppc440spe_adma_of_match,
4953 .probe = ppc440spe_adma_probe, 4947 .probe = ppc440spe_adma_probe,
4954 .remove = __devexit_p(ppc440spe_adma_remove), 4948 .remove = __devexit_p(ppc440spe_adma_remove),
4955 .driver = { 4949 .driver = {
4956 .name = "PPC440SP(E)-ADMA", 4950 .name = "PPC440SP(E)-ADMA",
4957 .owner = THIS_MODULE, 4951 .owner = THIS_MODULE,
4952 .of_match_table = ppc440spe_adma_of_match,
4958 }, 4953 },
4959}; 4954};
4960 4955
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 323afef77802..a2a519fd2a24 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -597,12 +597,17 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
597 direction, flags); 597 direction, flags);
598} 598}
599 599
600static void sh_dmae_terminate_all(struct dma_chan *chan) 600static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
601 unsigned long arg)
601{ 602{
602 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 603 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
603 604
605 /* Only supports DMA_TERMINATE_ALL */
606 if (cmd != DMA_TERMINATE_ALL)
607 return -ENXIO;
608
604 if (!chan) 609 if (!chan)
605 return; 610 return -EINVAL;
606 611
607 dmae_halt(sh_chan); 612 dmae_halt(sh_chan);
608 613
@@ -618,6 +623,8 @@ static void sh_dmae_terminate_all(struct dma_chan *chan)
618 spin_unlock_bh(&sh_chan->desc_lock); 623 spin_unlock_bh(&sh_chan->desc_lock);
619 624
620 sh_dmae_chan_ld_cleanup(sh_chan, true); 625 sh_dmae_chan_ld_cleanup(sh_chan, true);
626
627 return 0;
621} 628}
622 629
623static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) 630static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
@@ -715,6 +722,10 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
715{ 722{
716 while (__ld_cleanup(sh_chan, all)) 723 while (__ld_cleanup(sh_chan, all))
717 ; 724 ;
725
726 if (all)
727 /* Terminating - forgive uncompleted cookies */
728 sh_chan->completed_cookie = sh_chan->common.cookie;
718} 729}
719 730
720static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 731static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
@@ -749,10 +760,9 @@ static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
749 sh_chan_xfer_ld_queue(sh_chan); 760 sh_chan_xfer_ld_queue(sh_chan);
750} 761}
751 762
752static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, 763static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
753 dma_cookie_t cookie, 764 dma_cookie_t cookie,
754 dma_cookie_t *done, 765 struct dma_tx_state *txstate)
755 dma_cookie_t *used)
756{ 766{
757 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 767 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
758 dma_cookie_t last_used; 768 dma_cookie_t last_used;
@@ -764,12 +774,7 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
764 last_used = chan->cookie; 774 last_used = chan->cookie;
765 last_complete = sh_chan->completed_cookie; 775 last_complete = sh_chan->completed_cookie;
766 BUG_ON(last_complete < 0); 776 BUG_ON(last_complete < 0);
767 777 dma_set_tx_state(txstate, last_complete, last_used, 0);
768 if (done)
769 *done = last_complete;
770
771 if (used)
772 *used = last_used;
773 778
774 spin_lock_bh(&sh_chan->desc_lock); 779 spin_lock_bh(&sh_chan->desc_lock);
775 780
@@ -1041,12 +1046,12 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1041 = sh_dmae_alloc_chan_resources; 1046 = sh_dmae_alloc_chan_resources;
1042 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; 1047 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1043 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; 1048 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
1044 shdev->common.device_is_tx_complete = sh_dmae_is_complete; 1049 shdev->common.device_tx_status = sh_dmae_tx_status;
1045 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; 1050 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
1046 1051
1047 /* Compulsory for DMA_SLAVE fields */ 1052 /* Compulsory for DMA_SLAVE fields */
1048 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; 1053 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1049 shdev->common.device_terminate_all = sh_dmae_terminate_all; 1054 shdev->common.device_control = sh_dmae_control;
1050 1055
1051 shdev->common.dev = &pdev->dev; 1056 shdev->common.dev = &pdev->dev;
1052 /* Default transfer size of 32 bytes requires 32-byte alignment */ 1057 /* Default transfer size of 32 bytes requires 32-byte alignment */
@@ -1187,6 +1192,7 @@ static struct platform_driver sh_dmae_driver = {
1187 .remove = __exit_p(sh_dmae_remove), 1192 .remove = __exit_p(sh_dmae_remove),
1188 .shutdown = sh_dmae_shutdown, 1193 .shutdown = sh_dmae_shutdown,
1189 .driver = { 1194 .driver = {
1195 .owner = THIS_MODULE,
1190 .name = "sh-dma-engine", 1196 .name = "sh-dma-engine",
1191 }, 1197 },
1192}; 1198};
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
new file mode 100644
index 000000000000..c426829f6ab8
--- /dev/null
+++ b/drivers/dma/ste_dma40.c
@@ -0,0 +1,2657 @@
1/*
2 * driver/dma/ste_dma40.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17
18#include <plat/ste_dma40.h>
19
20#include "ste_dma40_ll.h"
21
22#define D40_NAME "dma40"
23
24#define D40_PHY_CHAN -1
25
26/* For masking out/in 2 bit channel positions */
27#define D40_CHAN_POS(chan) (2 * (chan / 2))
28#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30/* Maximum iterations taken before giving up suspending a channel */
31#define D40_SUSPEND_MAX_IT 500
32
33#define D40_ALLOC_FREE (1 << 31)
34#define D40_ALLOC_PHY (1 << 30)
35#define D40_ALLOC_LOG_FREE 0
36
37/* The number of free d40_desc to keep in memory before starting
38 * to kfree() them */
39#define D40_DESC_CACHE_SIZE 50
40
41/* Hardware designer of the block */
42#define D40_PERIPHID2_DESIGNER 0x8
43
44/**
45 * enum 40_command - The different commands and/or statuses.
46 *
47 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
48 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
49 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
50 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
51 */
52enum d40_command {
53 D40_DMA_STOP = 0,
54 D40_DMA_RUN = 1,
55 D40_DMA_SUSPEND_REQ = 2,
56 D40_DMA_SUSPENDED = 3
57};
58
59/**
60 * struct d40_lli_pool - Structure for keeping LLIs in memory
61 *
62 * @base: Pointer to memory area when the pre_alloc_lli's are not large
63 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
64 * pre_alloc_lli is used.
65 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
66 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
67 * one buffer to one buffer.
68 */
69struct d40_lli_pool {
70 void *base;
71 int size;
72 /* Space for dst and src, plus an extra for padding */
73 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
74};
75
76/**
77 * struct d40_desc - A descriptor is one DMA job.
78 *
79 * @lli_phy: LLI settings for physical channel. Both src and dst=
80 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
81 * lli_len equals one.
82 * @lli_log: Same as above but for logical channels.
83 * @lli_pool: The pool with two entries pre-allocated.
84 * @lli_len: Number of LLI's in lli_pool
85 * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len
86 * then this transfer job is done.
87 * @txd: DMA engine struct. Used for among other things for communication
88 * during a transfer.
89 * @node: List entry.
90 * @dir: The transfer direction of this job.
91 * @is_in_client_list: true if the client owns this descriptor.
92 *
93 * This descriptor is used for both logical and physical transfers.
94 */
95
96struct d40_desc {
97 /* LLI physical */
98 struct d40_phy_lli_bidir lli_phy;
99 /* LLI logical */
100 struct d40_log_lli_bidir lli_log;
101
102 struct d40_lli_pool lli_pool;
103 u32 lli_len;
104 u32 lli_tcount;
105
106 struct dma_async_tx_descriptor txd;
107 struct list_head node;
108
109 enum dma_data_direction dir;
110 bool is_in_client_list;
111};
112
113/**
114 * struct d40_lcla_pool - LCLA pool settings and data.
115 *
116 * @base: The virtual address of LCLA.
117 * @phy: Physical base address of LCLA.
118 * @base_size: size of lcla.
119 * @lock: Lock to protect the content in this struct.
120 * @alloc_map: Mapping between physical channel and LCLA entries.
121 * @num_blocks: The number of entries of alloc_map. Equals to the
122 * number of physical channels.
123 */
124struct d40_lcla_pool {
125 void *base;
126 dma_addr_t phy;
127 resource_size_t base_size;
128 spinlock_t lock;
129 u32 *alloc_map;
130 int num_blocks;
131};
132
133/**
134 * struct d40_phy_res - struct for handling eventlines mapped to physical
135 * channels.
136 *
137 * @lock: A lock protection this entity.
138 * @num: The physical channel number of this entity.
139 * @allocated_src: Bit mapped to show which src event line's are mapped to
140 * this physical channel. Can also be free or physically allocated.
141 * @allocated_dst: Same as for src but is dst.
142 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
143 * event line number. Both allocated_src and allocated_dst can not be
144 * allocated to a physical channel, since the interrupt handler has then
145 * no way of figure out which one the interrupt belongs to.
146 */
147struct d40_phy_res {
148 spinlock_t lock;
149 int num;
150 u32 allocated_src;
151 u32 allocated_dst;
152};
153
154struct d40_base;
155
156/**
157 * struct d40_chan - Struct that describes a channel.
158 *
159 * @lock: A spinlock to protect this struct.
160 * @log_num: The logical number, if any of this channel.
161 * @completed: Starts with 1, after first interrupt it is set to dma engine's
162 * current cookie.
163 * @pending_tx: The number of pending transfers. Used between interrupt handler
164 * and tasklet.
165 * @busy: Set to true when transfer is ongoing on this channel.
166 * @phy_chan: Pointer to physical channel which this instance runs on.
167 * @chan: DMA engine handle.
168 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
169 * transfer and call client callback.
170 * @client: Cliented owned descriptor list.
171 * @active: Active descriptor.
172 * @queue: Queued jobs.
173 * @free: List of free descripts, ready to be reused.
174 * @free_len: Number of descriptors in the free list.
175 * @dma_cfg: The client configuration of this dma channel.
176 * @base: Pointer to the device instance struct.
177 * @src_def_cfg: Default cfg register setting for src.
178 * @dst_def_cfg: Default cfg register setting for dst.
179 * @log_def: Default logical channel settings.
180 * @lcla: Space for one dst src pair for logical channel transfers.
181 * @lcpa: Pointer to dst and src lcpa settings.
182 *
183 * This struct can either "be" a logical or a physical channel.
184 */
185struct d40_chan {
186 spinlock_t lock;
187 int log_num;
188 /* ID of the most recent completed transfer */
189 int completed;
190 int pending_tx;
191 bool busy;
192 struct d40_phy_res *phy_chan;
193 struct dma_chan chan;
194 struct tasklet_struct tasklet;
195 struct list_head client;
196 struct list_head active;
197 struct list_head queue;
198 struct list_head free;
199 int free_len;
200 struct stedma40_chan_cfg dma_cfg;
201 struct d40_base *base;
202 /* Default register configurations */
203 u32 src_def_cfg;
204 u32 dst_def_cfg;
205 struct d40_def_lcsp log_def;
206 struct d40_lcla_elem lcla;
207 struct d40_log_lli_full *lcpa;
208};
209
210/**
211 * struct d40_base - The big global struct, one for each probe'd instance.
212 *
213 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
214 * @execmd_lock: Lock for execute command usage since several channels share
215 * the same physical register.
216 * @dev: The device structure.
217 * @virtbase: The virtual base address of the DMA's register.
218 * @clk: Pointer to the DMA clock structure.
219 * @phy_start: Physical memory start of the DMA registers.
220 * @phy_size: Size of the DMA register map.
221 * @irq: The IRQ number.
222 * @num_phy_chans: The number of physical channels. Read from HW. This
223 * is the number of available channels for this driver, not counting "Secure
224 * mode" allocated physical channels.
225 * @num_log_chans: The number of logical channels. Calculated from
226 * num_phy_chans.
227 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
228 * @dma_slave: dma_device channels that can do only do slave transfers.
229 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
230 * @phy_chans: Room for all possible physical channels in system.
231 * @log_chans: Room for all possible logical channels in system.
232 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
233 * to log_chans entries.
234 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
235 * to phy_chans entries.
236 * @plat_data: Pointer to provided platform_data which is the driver
237 * configuration.
238 * @phy_res: Vector containing all physical channels.
239 * @lcla_pool: lcla pool settings and data.
240 * @lcpa_base: The virtual mapped address of LCPA.
241 * @phy_lcpa: The physical address of the LCPA.
242 * @lcpa_size: The size of the LCPA area.
243 */
244struct d40_base {
245 spinlock_t interrupt_lock;
246 spinlock_t execmd_lock;
247 struct device *dev;
248 void __iomem *virtbase;
249 struct clk *clk;
250 phys_addr_t phy_start;
251 resource_size_t phy_size;
252 int irq;
253 int num_phy_chans;
254 int num_log_chans;
255 struct dma_device dma_both;
256 struct dma_device dma_slave;
257 struct dma_device dma_memcpy;
258 struct d40_chan *phy_chans;
259 struct d40_chan *log_chans;
260 struct d40_chan **lookup_log_chans;
261 struct d40_chan **lookup_phy_chans;
262 struct stedma40_platform_data *plat_data;
263 /* Physical half channels */
264 struct d40_phy_res *phy_res;
265 struct d40_lcla_pool lcla_pool;
266 void *lcpa_base;
267 dma_addr_t phy_lcpa;
268 resource_size_t lcpa_size;
269};
270
271/**
272 * struct d40_interrupt_lookup - lookup table for interrupt handler
273 *
274 * @src: Interrupt mask register.
275 * @clr: Interrupt clear register.
276 * @is_error: true if this is an error interrupt.
277 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
278 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
279 */
280struct d40_interrupt_lookup {
281 u32 src;
282 u32 clr;
283 bool is_error;
284 int offset;
285};
286
287/**
288 * struct d40_reg_val - simple lookup struct
289 *
290 * @reg: The register.
291 * @val: The value that belongs to the register in reg.
292 */
293struct d40_reg_val {
294 unsigned int reg;
295 unsigned int val;
296};
297
298static int d40_pool_lli_alloc(struct d40_desc *d40d,
299 int lli_len, bool is_log)
300{
301 u32 align;
302 void *base;
303
304 if (is_log)
305 align = sizeof(struct d40_log_lli);
306 else
307 align = sizeof(struct d40_phy_lli);
308
309 if (lli_len == 1) {
310 base = d40d->lli_pool.pre_alloc_lli;
311 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
312 d40d->lli_pool.base = NULL;
313 } else {
314 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
315
316 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
317 d40d->lli_pool.base = base;
318
319 if (d40d->lli_pool.base == NULL)
320 return -ENOMEM;
321 }
322
323 if (is_log) {
324 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
325 align);
326 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
327 align);
328 } else {
329 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
330 align);
331 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
332 align);
333
334 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
335 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
336 }
337
338 return 0;
339}
340
341static void d40_pool_lli_free(struct d40_desc *d40d)
342{
343 kfree(d40d->lli_pool.base);
344 d40d->lli_pool.base = NULL;
345 d40d->lli_pool.size = 0;
346 d40d->lli_log.src = NULL;
347 d40d->lli_log.dst = NULL;
348 d40d->lli_phy.src = NULL;
349 d40d->lli_phy.dst = NULL;
350 d40d->lli_phy.src_addr = 0;
351 d40d->lli_phy.dst_addr = 0;
352}
353
354static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
355 struct d40_desc *desc)
356{
357 dma_cookie_t cookie = d40c->chan.cookie;
358
359 if (++cookie < 0)
360 cookie = 1;
361
362 d40c->chan.cookie = cookie;
363 desc->txd.cookie = cookie;
364
365 return cookie;
366}
367
368static void d40_desc_reset(struct d40_desc *d40d)
369{
370 d40d->lli_tcount = 0;
371}
372
373static void d40_desc_remove(struct d40_desc *d40d)
374{
375 list_del(&d40d->node);
376}
377
378static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
379{
380 struct d40_desc *desc;
381 struct d40_desc *d;
382 struct d40_desc *_d;
383
384 if (!list_empty(&d40c->client)) {
385 list_for_each_entry_safe(d, _d, &d40c->client, node)
386 if (async_tx_test_ack(&d->txd)) {
387 d40_pool_lli_free(d);
388 d40_desc_remove(d);
389 desc = d;
390 goto out;
391 }
392 }
393
394 if (list_empty(&d40c->free)) {
395 /* Alloc new desc because we're out of used ones */
396 desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT);
397 if (desc == NULL)
398 goto out;
399 INIT_LIST_HEAD(&desc->node);
400 } else {
401 /* Reuse an old desc. */
402 desc = list_first_entry(&d40c->free,
403 struct d40_desc,
404 node);
405 list_del(&desc->node);
406 d40c->free_len--;
407 }
408out:
409 return desc;
410}
411
412static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
413{
414 if (d40c->free_len < D40_DESC_CACHE_SIZE) {
415 list_add_tail(&d40d->node, &d40c->free);
416 d40c->free_len++;
417 } else
418 kfree(d40d);
419}
420
421static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
422{
423 list_add_tail(&desc->node, &d40c->active);
424}
425
426static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
427{
428 struct d40_desc *d;
429
430 if (list_empty(&d40c->active))
431 return NULL;
432
433 d = list_first_entry(&d40c->active,
434 struct d40_desc,
435 node);
436 return d;
437}
438
439static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
440{
441 list_add_tail(&desc->node, &d40c->queue);
442}
443
444static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
445{
446 struct d40_desc *d;
447
448 if (list_empty(&d40c->queue))
449 return NULL;
450
451 d = list_first_entry(&d40c->queue,
452 struct d40_desc,
453 node);
454 return d;
455}
456
457/* Support functions for logical channels */
458
459static int d40_lcla_id_get(struct d40_chan *d40c,
460 struct d40_lcla_pool *pool)
461{
462 int src_id = 0;
463 int dst_id = 0;
464 struct d40_log_lli *lcla_lidx_base =
465 pool->base + d40c->phy_chan->num * 1024;
466 int i;
467 int lli_per_log = d40c->base->plat_data->llis_per_log;
468
469 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
470 return 0;
471
472 if (pool->num_blocks > 32)
473 return -EINVAL;
474
475 spin_lock(&pool->lock);
476
477 for (i = 0; i < pool->num_blocks; i++) {
478 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
479 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
480 break;
481 }
482 }
483 src_id = i;
484 if (src_id >= pool->num_blocks)
485 goto err;
486
487 for (; i < pool->num_blocks; i++) {
488 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
489 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
490 break;
491 }
492 }
493
494 dst_id = i;
495 if (dst_id == src_id)
496 goto err;
497
498 d40c->lcla.src_id = src_id;
499 d40c->lcla.dst_id = dst_id;
500 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
501 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
502
503
504 spin_unlock(&pool->lock);
505 return 0;
506err:
507 spin_unlock(&pool->lock);
508 return -EINVAL;
509}
510
511static void d40_lcla_id_put(struct d40_chan *d40c,
512 struct d40_lcla_pool *pool,
513 int id)
514{
515 if (id < 0)
516 return;
517
518 d40c->lcla.src_id = -1;
519 d40c->lcla.dst_id = -1;
520
521 spin_lock(&pool->lock);
522 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
523 spin_unlock(&pool->lock);
524}
525
526static int d40_channel_execute_command(struct d40_chan *d40c,
527 enum d40_command command)
528{
529 int status, i;
530 void __iomem *active_reg;
531 int ret = 0;
532 unsigned long flags;
533
534 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
535
536 if (d40c->phy_chan->num % 2 == 0)
537 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
538 else
539 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
540
541 if (command == D40_DMA_SUSPEND_REQ) {
542 status = (readl(active_reg) &
543 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
544 D40_CHAN_POS(d40c->phy_chan->num);
545
546 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
547 goto done;
548 }
549
550 writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
551
552 if (command == D40_DMA_SUSPEND_REQ) {
553
554 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
555 status = (readl(active_reg) &
556 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
557 D40_CHAN_POS(d40c->phy_chan->num);
558
559 cpu_relax();
560 /*
561 * Reduce the number of bus accesses while
562 * waiting for the DMA to suspend.
563 */
564 udelay(3);
565
566 if (status == D40_DMA_STOP ||
567 status == D40_DMA_SUSPENDED)
568 break;
569 }
570
571 if (i == D40_SUSPEND_MAX_IT) {
572 dev_err(&d40c->chan.dev->device,
573 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
574 __func__, d40c->phy_chan->num, d40c->log_num,
575 status);
576 dump_stack();
577 ret = -EBUSY;
578 }
579
580 }
581done:
582 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
583 return ret;
584}
585
586static void d40_term_all(struct d40_chan *d40c)
587{
588 struct d40_desc *d40d;
589 struct d40_desc *d;
590 struct d40_desc *_d;
591
592 /* Release active descriptors */
593 while ((d40d = d40_first_active_get(d40c))) {
594 d40_desc_remove(d40d);
595
596 /* Return desc to free-list */
597 d40_desc_free(d40c, d40d);
598 }
599
600 /* Release queued descriptors waiting for transfer */
601 while ((d40d = d40_first_queued(d40c))) {
602 d40_desc_remove(d40d);
603
604 /* Return desc to free-list */
605 d40_desc_free(d40c, d40d);
606 }
607
608 /* Release client owned descriptors */
609 if (!list_empty(&d40c->client))
610 list_for_each_entry_safe(d, _d, &d40c->client, node) {
611 d40_pool_lli_free(d);
612 d40_desc_remove(d);
613 /* Return desc to free-list */
614 d40_desc_free(d40c, d40d);
615 }
616
617 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
618 d40c->lcla.src_id);
619 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
620 d40c->lcla.dst_id);
621
622 d40c->pending_tx = 0;
623 d40c->busy = false;
624}
625
626static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
627{
628 u32 val;
629 unsigned long flags;
630
631 if (do_enable)
632 val = D40_ACTIVATE_EVENTLINE;
633 else
634 val = D40_DEACTIVATE_EVENTLINE;
635
636 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
637
638 /* Enable event line connected to device (or memcpy) */
639 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
640 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
641 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
642
643 writel((val << D40_EVENTLINE_POS(event)) |
644 ~D40_EVENTLINE_MASK(event),
645 d40c->base->virtbase + D40_DREG_PCBASE +
646 d40c->phy_chan->num * D40_DREG_PCDELTA +
647 D40_CHAN_REG_SSLNK);
648 }
649 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
650 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
651
652 writel((val << D40_EVENTLINE_POS(event)) |
653 ~D40_EVENTLINE_MASK(event),
654 d40c->base->virtbase + D40_DREG_PCBASE +
655 d40c->phy_chan->num * D40_DREG_PCDELTA +
656 D40_CHAN_REG_SDLNK);
657 }
658
659 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
660}
661
662static u32 d40_chan_has_events(struct d40_chan *d40c)
663{
664 u32 val = 0;
665
666 /* If SSLNK or SDLNK is zero all events are disabled */
667 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
668 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
669 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
670 d40c->phy_chan->num * D40_DREG_PCDELTA +
671 D40_CHAN_REG_SSLNK);
672
673 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
674 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
675 d40c->phy_chan->num * D40_DREG_PCDELTA +
676 D40_CHAN_REG_SDLNK);
677 return val;
678}
679
680static void d40_config_enable_lidx(struct d40_chan *d40c)
681{
682 /* Set LIDX for lcla */
683 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
684 D40_SREG_ELEM_LOG_LIDX_MASK,
685 d40c->base->virtbase + D40_DREG_PCBASE +
686 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
687
688 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
689 D40_SREG_ELEM_LOG_LIDX_MASK,
690 d40c->base->virtbase + D40_DREG_PCBASE +
691 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
692}
693
694static int d40_config_write(struct d40_chan *d40c)
695{
696 u32 addr_base;
697 u32 var;
698 int res;
699
700 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
701 if (res)
702 return res;
703
704 /* Odd addresses are even addresses + 4 */
705 addr_base = (d40c->phy_chan->num % 2) * 4;
706 /* Setup channel mode to logical or physical */
707 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
708 D40_CHAN_POS(d40c->phy_chan->num);
709 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
710
711 /* Setup operational mode option register */
712 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
713 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
714
715 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
716
717 if (d40c->log_num != D40_PHY_CHAN) {
718 /* Set default config for CFG reg */
719 writel(d40c->src_def_cfg,
720 d40c->base->virtbase + D40_DREG_PCBASE +
721 d40c->phy_chan->num * D40_DREG_PCDELTA +
722 D40_CHAN_REG_SSCFG);
723 writel(d40c->dst_def_cfg,
724 d40c->base->virtbase + D40_DREG_PCBASE +
725 d40c->phy_chan->num * D40_DREG_PCDELTA +
726 D40_CHAN_REG_SDCFG);
727
728 d40_config_enable_lidx(d40c);
729 }
730 return res;
731}
732
733static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
734{
735
736 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
737 d40_phy_lli_write(d40c->base->virtbase,
738 d40c->phy_chan->num,
739 d40d->lli_phy.dst,
740 d40d->lli_phy.src);
741 d40d->lli_tcount = d40d->lli_len;
742 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
743 u32 lli_len;
744 struct d40_log_lli *src = d40d->lli_log.src;
745 struct d40_log_lli *dst = d40d->lli_log.dst;
746
747 src += d40d->lli_tcount;
748 dst += d40d->lli_tcount;
749
750 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
751 lli_len = d40d->lli_len;
752 else
753 lli_len = d40c->base->plat_data->llis_per_log;
754 d40d->lli_tcount += lli_len;
755 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
756 d40c->lcla.dst,
757 dst, src,
758 d40c->base->plat_data->llis_per_log);
759 }
760}
761
762static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
763{
764 struct d40_chan *d40c = container_of(tx->chan,
765 struct d40_chan,
766 chan);
767 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
768 unsigned long flags;
769
770 spin_lock_irqsave(&d40c->lock, flags);
771
772 tx->cookie = d40_assign_cookie(d40c, d40d);
773
774 d40_desc_queue(d40c, d40d);
775
776 spin_unlock_irqrestore(&d40c->lock, flags);
777
778 return tx->cookie;
779}
780
781static int d40_start(struct d40_chan *d40c)
782{
783 int err;
784
785 if (d40c->log_num != D40_PHY_CHAN) {
786 err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
787 if (err)
788 return err;
789 d40_config_set_event(d40c, true);
790 }
791
792 err = d40_channel_execute_command(d40c, D40_DMA_RUN);
793
794 return err;
795}
796
797static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
798{
799 struct d40_desc *d40d;
800 int err;
801
802 /* Start queued jobs, if any */
803 d40d = d40_first_queued(d40c);
804
805 if (d40d != NULL) {
806 d40c->busy = true;
807
808 /* Remove from queue */
809 d40_desc_remove(d40d);
810
811 /* Add to active queue */
812 d40_desc_submit(d40c, d40d);
813
814 /* Initiate DMA job */
815 d40_desc_load(d40c, d40d);
816
817 /* Start dma job */
818 err = d40_start(d40c);
819
820 if (err)
821 return NULL;
822 }
823
824 return d40d;
825}
826
827/* called from interrupt context */
828static void dma_tc_handle(struct d40_chan *d40c)
829{
830 struct d40_desc *d40d;
831
832 if (!d40c->phy_chan)
833 return;
834
835 /* Get first active entry from list */
836 d40d = d40_first_active_get(d40c);
837
838 if (d40d == NULL)
839 return;
840
841 if (d40d->lli_tcount < d40d->lli_len) {
842
843 d40_desc_load(d40c, d40d);
844 /* Start dma job */
845 (void) d40_start(d40c);
846 return;
847 }
848
849 if (d40_queue_start(d40c) == NULL)
850 d40c->busy = false;
851
852 d40c->pending_tx++;
853 tasklet_schedule(&d40c->tasklet);
854
855}
856
857static void dma_tasklet(unsigned long data)
858{
859 struct d40_chan *d40c = (struct d40_chan *) data;
860 struct d40_desc *d40d_fin;
861 unsigned long flags;
862 dma_async_tx_callback callback;
863 void *callback_param;
864
865 spin_lock_irqsave(&d40c->lock, flags);
866
867 /* Get first active entry from list */
868 d40d_fin = d40_first_active_get(d40c);
869
870 if (d40d_fin == NULL)
871 goto err;
872
873 d40c->completed = d40d_fin->txd.cookie;
874
875 /*
876 * If terminating a channel pending_tx is set to zero.
877 * This prevents any finished active jobs to return to the client.
878 */
879 if (d40c->pending_tx == 0) {
880 spin_unlock_irqrestore(&d40c->lock, flags);
881 return;
882 }
883
884 /* Callback to client */
885 callback = d40d_fin->txd.callback;
886 callback_param = d40d_fin->txd.callback_param;
887
888 if (async_tx_test_ack(&d40d_fin->txd)) {
889 d40_pool_lli_free(d40d_fin);
890 d40_desc_remove(d40d_fin);
891 /* Return desc to free-list */
892 d40_desc_free(d40c, d40d_fin);
893 } else {
894 d40_desc_reset(d40d_fin);
895 if (!d40d_fin->is_in_client_list) {
896 d40_desc_remove(d40d_fin);
897 list_add_tail(&d40d_fin->node, &d40c->client);
898 d40d_fin->is_in_client_list = true;
899 }
900 }
901
902 d40c->pending_tx--;
903
904 if (d40c->pending_tx)
905 tasklet_schedule(&d40c->tasklet);
906
907 spin_unlock_irqrestore(&d40c->lock, flags);
908
909 if (callback)
910 callback(callback_param);
911
912 return;
913
914 err:
915 /* Rescue manouver if receiving double interrupts */
916 if (d40c->pending_tx > 0)
917 d40c->pending_tx--;
918 spin_unlock_irqrestore(&d40c->lock, flags);
919}
920
921static irqreturn_t d40_handle_interrupt(int irq, void *data)
922{
923 static const struct d40_interrupt_lookup il[] = {
924 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
925 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
926 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
927 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
928 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
929 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
930 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
931 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
932 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
933 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
934 };
935
936 int i;
937 u32 regs[ARRAY_SIZE(il)];
938 u32 tmp;
939 u32 idx;
940 u32 row;
941 long chan = -1;
942 struct d40_chan *d40c;
943 unsigned long flags;
944 struct d40_base *base = data;
945
946 spin_lock_irqsave(&base->interrupt_lock, flags);
947
948 /* Read interrupt status of both logical and physical channels */
949 for (i = 0; i < ARRAY_SIZE(il); i++)
950 regs[i] = readl(base->virtbase + il[i].src);
951
952 for (;;) {
953
954 chan = find_next_bit((unsigned long *)regs,
955 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
956
957 /* No more set bits found? */
958 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
959 break;
960
961 row = chan / BITS_PER_LONG;
962 idx = chan & (BITS_PER_LONG - 1);
963
964 /* ACK interrupt */
965 tmp = readl(base->virtbase + il[row].clr);
966 tmp |= 1 << idx;
967 writel(tmp, base->virtbase + il[row].clr);
968
969 if (il[row].offset == D40_PHY_CHAN)
970 d40c = base->lookup_phy_chans[idx];
971 else
972 d40c = base->lookup_log_chans[il[row].offset + idx];
973 spin_lock(&d40c->lock);
974
975 if (!il[row].is_error)
976 dma_tc_handle(d40c);
977 else
978 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
979 __func__, chan, il[row].offset, idx);
980
981 spin_unlock(&d40c->lock);
982 }
983
984 spin_unlock_irqrestore(&base->interrupt_lock, flags);
985
986 return IRQ_HANDLED;
987}
988
989
990static int d40_validate_conf(struct d40_chan *d40c,
991 struct stedma40_chan_cfg *conf)
992{
993 int res = 0;
994 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
995 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
996 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
997 == STEDMA40_CHANNEL_IN_LOG_MODE;
998
999 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
1000 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1001 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
1002 __func__);
1003 res = -EINVAL;
1004 }
1005
1006 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
1007 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1008 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
1009 __func__);
1010 res = -EINVAL;
1011 }
1012
1013 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1014 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1015 dev_err(&d40c->chan.dev->device,
1016 "[%s] No event line\n", __func__);
1017 res = -EINVAL;
1018 }
1019
1020 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1021 (src_event_group != dst_event_group)) {
1022 dev_err(&d40c->chan.dev->device,
1023 "[%s] Invalid event group\n", __func__);
1024 res = -EINVAL;
1025 }
1026
1027 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1028 /*
1029 * DMAC HW supports it. Will be added to this driver,
1030 * in case any dma client requires it.
1031 */
1032 dev_err(&d40c->chan.dev->device,
1033 "[%s] periph to periph not supported\n",
1034 __func__);
1035 res = -EINVAL;
1036 }
1037
1038 return res;
1039}
1040
1041static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1042 int log_event_line, bool is_log)
1043{
1044 unsigned long flags;
1045 spin_lock_irqsave(&phy->lock, flags);
1046 if (!is_log) {
1047 /* Physical interrupts are masked per physical full channel */
1048 if (phy->allocated_src == D40_ALLOC_FREE &&
1049 phy->allocated_dst == D40_ALLOC_FREE) {
1050 phy->allocated_dst = D40_ALLOC_PHY;
1051 phy->allocated_src = D40_ALLOC_PHY;
1052 goto found;
1053 } else
1054 goto not_found;
1055 }
1056
1057 /* Logical channel */
1058 if (is_src) {
1059 if (phy->allocated_src == D40_ALLOC_PHY)
1060 goto not_found;
1061
1062 if (phy->allocated_src == D40_ALLOC_FREE)
1063 phy->allocated_src = D40_ALLOC_LOG_FREE;
1064
1065 if (!(phy->allocated_src & (1 << log_event_line))) {
1066 phy->allocated_src |= 1 << log_event_line;
1067 goto found;
1068 } else
1069 goto not_found;
1070 } else {
1071 if (phy->allocated_dst == D40_ALLOC_PHY)
1072 goto not_found;
1073
1074 if (phy->allocated_dst == D40_ALLOC_FREE)
1075 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1076
1077 if (!(phy->allocated_dst & (1 << log_event_line))) {
1078 phy->allocated_dst |= 1 << log_event_line;
1079 goto found;
1080 } else
1081 goto not_found;
1082 }
1083
1084not_found:
1085 spin_unlock_irqrestore(&phy->lock, flags);
1086 return false;
1087found:
1088 spin_unlock_irqrestore(&phy->lock, flags);
1089 return true;
1090}
1091
1092static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1093 int log_event_line)
1094{
1095 unsigned long flags;
1096 bool is_free = false;
1097
1098 spin_lock_irqsave(&phy->lock, flags);
1099 if (!log_event_line) {
1100 /* Physical interrupts are masked per physical full channel */
1101 phy->allocated_dst = D40_ALLOC_FREE;
1102 phy->allocated_src = D40_ALLOC_FREE;
1103 is_free = true;
1104 goto out;
1105 }
1106
1107 /* Logical channel */
1108 if (is_src) {
1109 phy->allocated_src &= ~(1 << log_event_line);
1110 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1111 phy->allocated_src = D40_ALLOC_FREE;
1112 } else {
1113 phy->allocated_dst &= ~(1 << log_event_line);
1114 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1115 phy->allocated_dst = D40_ALLOC_FREE;
1116 }
1117
1118 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1119 D40_ALLOC_FREE);
1120
1121out:
1122 spin_unlock_irqrestore(&phy->lock, flags);
1123
1124 return is_free;
1125}
1126
1127static int d40_allocate_channel(struct d40_chan *d40c)
1128{
1129 int dev_type;
1130 int event_group;
1131 int event_line;
1132 struct d40_phy_res *phys;
1133 int i;
1134 int j;
1135 int log_num;
1136 bool is_src;
1137 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1138 == STEDMA40_CHANNEL_IN_LOG_MODE;
1139
1140
1141 phys = d40c->base->phy_res;
1142
1143 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1144 dev_type = d40c->dma_cfg.src_dev_type;
1145 log_num = 2 * dev_type;
1146 is_src = true;
1147 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1148 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1149 /* dst event lines are used for logical memcpy */
1150 dev_type = d40c->dma_cfg.dst_dev_type;
1151 log_num = 2 * dev_type + 1;
1152 is_src = false;
1153 } else
1154 return -EINVAL;
1155
1156 event_group = D40_TYPE_TO_GROUP(dev_type);
1157 event_line = D40_TYPE_TO_EVENT(dev_type);
1158
1159 if (!is_log) {
1160 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1161 /* Find physical half channel */
1162 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1163
1164 if (d40_alloc_mask_set(&phys[i], is_src,
1165 0, is_log))
1166 goto found_phy;
1167 }
1168 } else
1169 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1170 int phy_num = j + event_group * 2;
1171 for (i = phy_num; i < phy_num + 2; i++) {
1172 if (d40_alloc_mask_set(&phys[i], is_src,
1173 0, is_log))
1174 goto found_phy;
1175 }
1176 }
1177 return -EINVAL;
1178found_phy:
1179 d40c->phy_chan = &phys[i];
1180 d40c->log_num = D40_PHY_CHAN;
1181 goto out;
1182 }
1183 if (dev_type == -1)
1184 return -EINVAL;
1185
1186 /* Find logical channel */
1187 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1188 int phy_num = j + event_group * 2;
1189 /*
1190 * Spread logical channels across all available physical rather
1191 * than pack every logical channel at the first available phy
1192 * channels.
1193 */
1194 if (is_src) {
1195 for (i = phy_num; i < phy_num + 2; i++) {
1196 if (d40_alloc_mask_set(&phys[i], is_src,
1197 event_line, is_log))
1198 goto found_log;
1199 }
1200 } else {
1201 for (i = phy_num + 1; i >= phy_num; i--) {
1202 if (d40_alloc_mask_set(&phys[i], is_src,
1203 event_line, is_log))
1204 goto found_log;
1205 }
1206 }
1207 }
1208 return -EINVAL;
1209
1210found_log:
1211 d40c->phy_chan = &phys[i];
1212 d40c->log_num = log_num;
1213out:
1214
1215 if (is_log)
1216 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1217 else
1218 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1219
1220 return 0;
1221
1222}
1223
1224static int d40_config_chan(struct d40_chan *d40c,
1225 struct stedma40_chan_cfg *info)
1226{
1227
1228 /* Fill in basic CFG register values */
1229 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1230 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1231
1232 if (d40c->log_num != D40_PHY_CHAN) {
1233 d40_log_cfg(&d40c->dma_cfg,
1234 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1235
1236 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1237 d40c->lcpa = d40c->base->lcpa_base +
1238 d40c->dma_cfg.src_dev_type * 32;
1239 else
1240 d40c->lcpa = d40c->base->lcpa_base +
1241 d40c->dma_cfg.dst_dev_type * 32 + 16;
1242 }
1243
1244 /* Write channel configuration to the DMA */
1245 return d40_config_write(d40c);
1246}
1247
1248static int d40_config_memcpy(struct d40_chan *d40c)
1249{
1250 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1251
1252 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1253 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1254 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1255 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1256 memcpy[d40c->chan.chan_id];
1257
1258 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1259 dma_has_cap(DMA_SLAVE, cap)) {
1260 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1261 } else {
1262 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1263 __func__);
1264 return -EINVAL;
1265 }
1266
1267 return 0;
1268}
1269
1270
1271static int d40_free_dma(struct d40_chan *d40c)
1272{
1273
1274 int res = 0;
1275 u32 event, dir;
1276 struct d40_phy_res *phy = d40c->phy_chan;
1277 bool is_src;
1278
1279 /* Terminate all queued and active transfers */
1280 d40_term_all(d40c);
1281
1282 if (phy == NULL) {
1283 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1284 __func__);
1285 return -EINVAL;
1286 }
1287
1288 if (phy->allocated_src == D40_ALLOC_FREE &&
1289 phy->allocated_dst == D40_ALLOC_FREE) {
1290 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1291 __func__);
1292 return -EINVAL;
1293 }
1294
1295
1296 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1297 if (res) {
1298 dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
1299 __func__);
1300 return res;
1301 }
1302
1303 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1304 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1305 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1306 dir = D40_CHAN_REG_SDLNK;
1307 is_src = false;
1308 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1309 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1310 dir = D40_CHAN_REG_SSLNK;
1311 is_src = true;
1312 } else {
1313 dev_err(&d40c->chan.dev->device,
1314 "[%s] Unknown direction\n", __func__);
1315 return -EINVAL;
1316 }
1317
1318 if (d40c->log_num != D40_PHY_CHAN) {
1319 /*
1320 * Release logical channel, deactivate the event line during
1321 * the time physical res is suspended.
1322 */
1323 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1324 D40_EVENTLINE_MASK(event),
1325 d40c->base->virtbase + D40_DREG_PCBASE +
1326 phy->num * D40_DREG_PCDELTA + dir);
1327
1328 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1329
1330 /*
1331 * Check if there are more logical allocation
1332 * on this phy channel.
1333 */
1334 if (!d40_alloc_mask_free(phy, is_src, event)) {
1335 /* Resume the other logical channels if any */
1336 if (d40_chan_has_events(d40c)) {
1337 res = d40_channel_execute_command(d40c,
1338 D40_DMA_RUN);
1339 if (res) {
1340 dev_err(&d40c->chan.dev->device,
1341 "[%s] Executing RUN command\n",
1342 __func__);
1343 return res;
1344 }
1345 }
1346 return 0;
1347 }
1348 } else
1349 d40_alloc_mask_free(phy, is_src, 0);
1350
1351 /* Release physical channel */
1352 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1353 if (res) {
1354 dev_err(&d40c->chan.dev->device,
1355 "[%s] Failed to stop channel\n", __func__);
1356 return res;
1357 }
1358 d40c->phy_chan = NULL;
1359 /* Invalidate channel type */
1360 d40c->dma_cfg.channel_type = 0;
1361 d40c->base->lookup_phy_chans[phy->num] = NULL;
1362
1363 return 0;
1364
1365
1366}
1367
1368static int d40_pause(struct dma_chan *chan)
1369{
1370 struct d40_chan *d40c =
1371 container_of(chan, struct d40_chan, chan);
1372 int res;
1373
1374 unsigned long flags;
1375
1376 spin_lock_irqsave(&d40c->lock, flags);
1377
1378 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1379 if (res == 0) {
1380 if (d40c->log_num != D40_PHY_CHAN) {
1381 d40_config_set_event(d40c, false);
1382 /* Resume the other logical channels if any */
1383 if (d40_chan_has_events(d40c))
1384 res = d40_channel_execute_command(d40c,
1385 D40_DMA_RUN);
1386 }
1387 }
1388
1389 spin_unlock_irqrestore(&d40c->lock, flags);
1390 return res;
1391}
1392
1393static bool d40_is_paused(struct d40_chan *d40c)
1394{
1395 bool is_paused = false;
1396 unsigned long flags;
1397 void __iomem *active_reg;
1398 u32 status;
1399 u32 event;
1400 int res;
1401
1402 spin_lock_irqsave(&d40c->lock, flags);
1403
1404 if (d40c->log_num == D40_PHY_CHAN) {
1405 if (d40c->phy_chan->num % 2 == 0)
1406 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1407 else
1408 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1409
1410 status = (readl(active_reg) &
1411 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1412 D40_CHAN_POS(d40c->phy_chan->num);
1413 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1414 is_paused = true;
1415
1416 goto _exit;
1417 }
1418
1419 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1420 if (res != 0)
1421 goto _exit;
1422
1423 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1424 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1425 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1426 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1427 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1428 else {
1429 dev_err(&d40c->chan.dev->device,
1430 "[%s] Unknown direction\n", __func__);
1431 goto _exit;
1432 }
1433 status = d40_chan_has_events(d40c);
1434 status = (status & D40_EVENTLINE_MASK(event)) >>
1435 D40_EVENTLINE_POS(event);
1436
1437 if (status != D40_DMA_RUN)
1438 is_paused = true;
1439
1440 /* Resume the other logical channels if any */
1441 if (d40_chan_has_events(d40c))
1442 res = d40_channel_execute_command(d40c,
1443 D40_DMA_RUN);
1444
1445_exit:
1446 spin_unlock_irqrestore(&d40c->lock, flags);
1447 return is_paused;
1448
1449}
1450
1451
1452static bool d40_tx_is_linked(struct d40_chan *d40c)
1453{
1454 bool is_link;
1455
1456 if (d40c->log_num != D40_PHY_CHAN)
1457 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1458 else
1459 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1460 d40c->phy_chan->num * D40_DREG_PCDELTA +
1461 D40_CHAN_REG_SDLNK) &
1462 D40_SREG_LNK_PHYS_LNK_MASK;
1463 return is_link;
1464}
1465
1466static u32 d40_residue(struct d40_chan *d40c)
1467{
1468 u32 num_elt;
1469
1470 if (d40c->log_num != D40_PHY_CHAN)
1471 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1472 >> D40_MEM_LCSP2_ECNT_POS;
1473 else
1474 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1475 d40c->phy_chan->num * D40_DREG_PCDELTA +
1476 D40_CHAN_REG_SDELT) &
1477 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1478 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1479}
1480
1481static int d40_resume(struct dma_chan *chan)
1482{
1483 struct d40_chan *d40c =
1484 container_of(chan, struct d40_chan, chan);
1485 int res = 0;
1486 unsigned long flags;
1487
1488 spin_lock_irqsave(&d40c->lock, flags);
1489
1490 if (d40c->log_num != D40_PHY_CHAN) {
1491 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1492 if (res)
1493 goto out;
1494
1495 /* If bytes left to transfer or linked tx resume job */
1496 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1497 d40_config_set_event(d40c, true);
1498 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1499 }
1500 } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1501 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1502
1503out:
1504 spin_unlock_irqrestore(&d40c->lock, flags);
1505 return res;
1506}
1507
1508static u32 stedma40_residue(struct dma_chan *chan)
1509{
1510 struct d40_chan *d40c =
1511 container_of(chan, struct d40_chan, chan);
1512 u32 bytes_left;
1513 unsigned long flags;
1514
1515 spin_lock_irqsave(&d40c->lock, flags);
1516 bytes_left = d40_residue(d40c);
1517 spin_unlock_irqrestore(&d40c->lock, flags);
1518
1519 return bytes_left;
1520}
1521
1522/* Public DMA functions in addition to the DMA engine framework */
1523
1524int stedma40_set_psize(struct dma_chan *chan,
1525 int src_psize,
1526 int dst_psize)
1527{
1528 struct d40_chan *d40c =
1529 container_of(chan, struct d40_chan, chan);
1530 unsigned long flags;
1531
1532 spin_lock_irqsave(&d40c->lock, flags);
1533
1534 if (d40c->log_num != D40_PHY_CHAN) {
1535 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1536 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1537 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1538 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1539 goto out;
1540 }
1541
1542 if (src_psize == STEDMA40_PSIZE_PHY_1)
1543 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1544 else {
1545 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1546 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1547 D40_SREG_CFG_PSIZE_POS);
1548 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1549 }
1550
1551 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1552 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1553 else {
1554 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1555 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1556 D40_SREG_CFG_PSIZE_POS);
1557 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1558 }
1559out:
1560 spin_unlock_irqrestore(&d40c->lock, flags);
1561 return 0;
1562}
1563EXPORT_SYMBOL(stedma40_set_psize);
1564
1565struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1566 struct scatterlist *sgl_dst,
1567 struct scatterlist *sgl_src,
1568 unsigned int sgl_len,
1569 unsigned long flags)
1570{
1571 int res;
1572 struct d40_desc *d40d;
1573 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1574 chan);
1575 unsigned long flg;
1576 int lli_max = d40c->base->plat_data->llis_per_log;
1577
1578
1579 spin_lock_irqsave(&d40c->lock, flg);
1580 d40d = d40_desc_get(d40c);
1581
1582 if (d40d == NULL)
1583 goto err;
1584
1585 memset(d40d, 0, sizeof(struct d40_desc));
1586 d40d->lli_len = sgl_len;
1587
1588 d40d->txd.flags = flags;
1589
1590 if (d40c->log_num != D40_PHY_CHAN) {
1591 if (sgl_len > 1)
1592 /*
1593 * Check if there is space available in lcla. If not,
1594 * split list into 1-length and run only in lcpa
1595 * space.
1596 */
1597 if (d40_lcla_id_get(d40c,
1598 &d40c->base->lcla_pool) != 0)
1599 lli_max = 1;
1600
1601 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1602 dev_err(&d40c->chan.dev->device,
1603 "[%s] Out of memory\n", __func__);
1604 goto err;
1605 }
1606
1607 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1608 sgl_src,
1609 sgl_len,
1610 d40d->lli_log.src,
1611 d40c->log_def.lcsp1,
1612 d40c->dma_cfg.src_info.data_width,
1613 flags & DMA_PREP_INTERRUPT, lli_max,
1614 d40c->base->plat_data->llis_per_log);
1615
1616 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1617 sgl_dst,
1618 sgl_len,
1619 d40d->lli_log.dst,
1620 d40c->log_def.lcsp3,
1621 d40c->dma_cfg.dst_info.data_width,
1622 flags & DMA_PREP_INTERRUPT, lli_max,
1623 d40c->base->plat_data->llis_per_log);
1624
1625
1626 } else {
1627 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1628 dev_err(&d40c->chan.dev->device,
1629 "[%s] Out of memory\n", __func__);
1630 goto err;
1631 }
1632
1633 res = d40_phy_sg_to_lli(sgl_src,
1634 sgl_len,
1635 0,
1636 d40d->lli_phy.src,
1637 d40d->lli_phy.src_addr,
1638 d40c->src_def_cfg,
1639 d40c->dma_cfg.src_info.data_width,
1640 d40c->dma_cfg.src_info.psize,
1641 true);
1642
1643 if (res < 0)
1644 goto err;
1645
1646 res = d40_phy_sg_to_lli(sgl_dst,
1647 sgl_len,
1648 0,
1649 d40d->lli_phy.dst,
1650 d40d->lli_phy.dst_addr,
1651 d40c->dst_def_cfg,
1652 d40c->dma_cfg.dst_info.data_width,
1653 d40c->dma_cfg.dst_info.psize,
1654 true);
1655
1656 if (res < 0)
1657 goto err;
1658
1659 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1660 d40d->lli_pool.size, DMA_TO_DEVICE);
1661 }
1662
1663 dma_async_tx_descriptor_init(&d40d->txd, chan);
1664
1665 d40d->txd.tx_submit = d40_tx_submit;
1666
1667 spin_unlock_irqrestore(&d40c->lock, flg);
1668
1669 return &d40d->txd;
1670err:
1671 spin_unlock_irqrestore(&d40c->lock, flg);
1672 return NULL;
1673}
1674EXPORT_SYMBOL(stedma40_memcpy_sg);
1675
1676bool stedma40_filter(struct dma_chan *chan, void *data)
1677{
1678 struct stedma40_chan_cfg *info = data;
1679 struct d40_chan *d40c =
1680 container_of(chan, struct d40_chan, chan);
1681 int err;
1682
1683 if (data) {
1684 err = d40_validate_conf(d40c, info);
1685 if (!err)
1686 d40c->dma_cfg = *info;
1687 } else
1688 err = d40_config_memcpy(d40c);
1689
1690 return err == 0;
1691}
1692EXPORT_SYMBOL(stedma40_filter);
1693
1694/* DMA ENGINE functions */
1695static int d40_alloc_chan_resources(struct dma_chan *chan)
1696{
1697 int err;
1698 unsigned long flags;
1699 struct d40_chan *d40c =
1700 container_of(chan, struct d40_chan, chan);
1701
1702 spin_lock_irqsave(&d40c->lock, flags);
1703
1704 d40c->completed = chan->cookie = 1;
1705
1706 /*
1707 * If no dma configuration is set (channel_type == 0)
1708 * use default configuration
1709 */
1710 if (d40c->dma_cfg.channel_type == 0) {
1711 err = d40_config_memcpy(d40c);
1712 if (err)
1713 goto err_alloc;
1714 }
1715
1716 err = d40_allocate_channel(d40c);
1717 if (err) {
1718 dev_err(&d40c->chan.dev->device,
1719 "[%s] Failed to allocate channel\n", __func__);
1720 goto err_alloc;
1721 }
1722
1723 err = d40_config_chan(d40c, &d40c->dma_cfg);
1724 if (err) {
1725 dev_err(&d40c->chan.dev->device,
1726 "[%s] Failed to configure channel\n",
1727 __func__);
1728 goto err_config;
1729 }
1730
1731 spin_unlock_irqrestore(&d40c->lock, flags);
1732 return 0;
1733
1734 err_config:
1735 (void) d40_free_dma(d40c);
1736 err_alloc:
1737 spin_unlock_irqrestore(&d40c->lock, flags);
1738 dev_err(&d40c->chan.dev->device,
1739 "[%s] Channel allocation failed\n", __func__);
1740 return -EINVAL;
1741}
1742
1743static void d40_free_chan_resources(struct dma_chan *chan)
1744{
1745 struct d40_chan *d40c =
1746 container_of(chan, struct d40_chan, chan);
1747 int err;
1748 unsigned long flags;
1749
1750 spin_lock_irqsave(&d40c->lock, flags);
1751
1752 err = d40_free_dma(d40c);
1753
1754 if (err)
1755 dev_err(&d40c->chan.dev->device,
1756 "[%s] Failed to free channel\n", __func__);
1757 spin_unlock_irqrestore(&d40c->lock, flags);
1758}
1759
1760static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1761 dma_addr_t dst,
1762 dma_addr_t src,
1763 size_t size,
1764 unsigned long flags)
1765{
1766 struct d40_desc *d40d;
1767 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1768 chan);
1769 unsigned long flg;
1770 int err = 0;
1771
1772 spin_lock_irqsave(&d40c->lock, flg);
1773 d40d = d40_desc_get(d40c);
1774
1775 if (d40d == NULL) {
1776 dev_err(&d40c->chan.dev->device,
1777 "[%s] Descriptor is NULL\n", __func__);
1778 goto err;
1779 }
1780
1781 memset(d40d, 0, sizeof(struct d40_desc));
1782
1783 d40d->txd.flags = flags;
1784
1785 dma_async_tx_descriptor_init(&d40d->txd, chan);
1786
1787 d40d->txd.tx_submit = d40_tx_submit;
1788
1789 if (d40c->log_num != D40_PHY_CHAN) {
1790
1791 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1792 dev_err(&d40c->chan.dev->device,
1793 "[%s] Out of memory\n", __func__);
1794 goto err;
1795 }
1796 d40d->lli_len = 1;
1797
1798 d40_log_fill_lli(d40d->lli_log.src,
1799 src,
1800 size,
1801 0,
1802 d40c->log_def.lcsp1,
1803 d40c->dma_cfg.src_info.data_width,
1804 true, true);
1805
1806 d40_log_fill_lli(d40d->lli_log.dst,
1807 dst,
1808 size,
1809 0,
1810 d40c->log_def.lcsp3,
1811 d40c->dma_cfg.dst_info.data_width,
1812 true, true);
1813
1814 } else {
1815
1816 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1817 dev_err(&d40c->chan.dev->device,
1818 "[%s] Out of memory\n", __func__);
1819 goto err;
1820 }
1821
1822 err = d40_phy_fill_lli(d40d->lli_phy.src,
1823 src,
1824 size,
1825 d40c->dma_cfg.src_info.psize,
1826 0,
1827 d40c->src_def_cfg,
1828 true,
1829 d40c->dma_cfg.src_info.data_width,
1830 false);
1831 if (err)
1832 goto err_fill_lli;
1833
1834 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1835 dst,
1836 size,
1837 d40c->dma_cfg.dst_info.psize,
1838 0,
1839 d40c->dst_def_cfg,
1840 true,
1841 d40c->dma_cfg.dst_info.data_width,
1842 false);
1843
1844 if (err)
1845 goto err_fill_lli;
1846
1847 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1848 d40d->lli_pool.size, DMA_TO_DEVICE);
1849 }
1850
1851 spin_unlock_irqrestore(&d40c->lock, flg);
1852 return &d40d->txd;
1853
1854err_fill_lli:
1855 dev_err(&d40c->chan.dev->device,
1856 "[%s] Failed filling in PHY LLI\n", __func__);
1857 d40_pool_lli_free(d40d);
1858err:
1859 spin_unlock_irqrestore(&d40c->lock, flg);
1860 return NULL;
1861}
1862
1863static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1864 struct d40_chan *d40c,
1865 struct scatterlist *sgl,
1866 unsigned int sg_len,
1867 enum dma_data_direction direction,
1868 unsigned long flags)
1869{
1870 dma_addr_t dev_addr = 0;
1871 int total_size;
1872 int lli_max = d40c->base->plat_data->llis_per_log;
1873
1874 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1875 dev_err(&d40c->chan.dev->device,
1876 "[%s] Out of memory\n", __func__);
1877 return -ENOMEM;
1878 }
1879
1880 d40d->lli_len = sg_len;
1881 d40d->lli_tcount = 0;
1882
1883 if (sg_len > 1)
1884 /*
1885 * Check if there is space available in lcla.
1886 * If not, split list into 1-length and run only
1887 * in lcpa space.
1888 */
1889 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
1890 lli_max = 1;
1891
1892 if (direction == DMA_FROM_DEVICE) {
1893 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1894 total_size = d40_log_sg_to_dev(&d40c->lcla,
1895 sgl, sg_len,
1896 &d40d->lli_log,
1897 &d40c->log_def,
1898 d40c->dma_cfg.src_info.data_width,
1899 d40c->dma_cfg.dst_info.data_width,
1900 direction,
1901 flags & DMA_PREP_INTERRUPT,
1902 dev_addr, lli_max,
1903 d40c->base->plat_data->llis_per_log);
1904 } else if (direction == DMA_TO_DEVICE) {
1905 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1906 total_size = d40_log_sg_to_dev(&d40c->lcla,
1907 sgl, sg_len,
1908 &d40d->lli_log,
1909 &d40c->log_def,
1910 d40c->dma_cfg.src_info.data_width,
1911 d40c->dma_cfg.dst_info.data_width,
1912 direction,
1913 flags & DMA_PREP_INTERRUPT,
1914 dev_addr, lli_max,
1915 d40c->base->plat_data->llis_per_log);
1916 } else
1917 return -EINVAL;
1918 if (total_size < 0)
1919 return -EINVAL;
1920
1921 return 0;
1922}
1923
1924static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1925 struct d40_chan *d40c,
1926 struct scatterlist *sgl,
1927 unsigned int sgl_len,
1928 enum dma_data_direction direction,
1929 unsigned long flags)
1930{
1931 dma_addr_t src_dev_addr;
1932 dma_addr_t dst_dev_addr;
1933 int res;
1934
1935 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1936 dev_err(&d40c->chan.dev->device,
1937 "[%s] Out of memory\n", __func__);
1938 return -ENOMEM;
1939 }
1940
1941 d40d->lli_len = sgl_len;
1942 d40d->lli_tcount = 0;
1943
1944 if (direction == DMA_FROM_DEVICE) {
1945 dst_dev_addr = 0;
1946 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1947 } else if (direction == DMA_TO_DEVICE) {
1948 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1949 src_dev_addr = 0;
1950 } else
1951 return -EINVAL;
1952
1953 res = d40_phy_sg_to_lli(sgl,
1954 sgl_len,
1955 src_dev_addr,
1956 d40d->lli_phy.src,
1957 d40d->lli_phy.src_addr,
1958 d40c->src_def_cfg,
1959 d40c->dma_cfg.src_info.data_width,
1960 d40c->dma_cfg.src_info.psize,
1961 true);
1962 if (res < 0)
1963 return res;
1964
1965 res = d40_phy_sg_to_lli(sgl,
1966 sgl_len,
1967 dst_dev_addr,
1968 d40d->lli_phy.dst,
1969 d40d->lli_phy.dst_addr,
1970 d40c->dst_def_cfg,
1971 d40c->dma_cfg.dst_info.data_width,
1972 d40c->dma_cfg.dst_info.psize,
1973 true);
1974 if (res < 0)
1975 return res;
1976
1977 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1978 d40d->lli_pool.size, DMA_TO_DEVICE);
1979 return 0;
1980}
1981
1982static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1983 struct scatterlist *sgl,
1984 unsigned int sg_len,
1985 enum dma_data_direction direction,
1986 unsigned long flags)
1987{
1988 struct d40_desc *d40d;
1989 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1990 chan);
1991 unsigned long flg;
1992 int err;
1993
1994 if (d40c->dma_cfg.pre_transfer)
1995 d40c->dma_cfg.pre_transfer(chan,
1996 d40c->dma_cfg.pre_transfer_data,
1997 sg_dma_len(sgl));
1998
1999 spin_lock_irqsave(&d40c->lock, flg);
2000 d40d = d40_desc_get(d40c);
2001 spin_unlock_irqrestore(&d40c->lock, flg);
2002
2003 if (d40d == NULL)
2004 return NULL;
2005
2006 memset(d40d, 0, sizeof(struct d40_desc));
2007
2008 if (d40c->log_num != D40_PHY_CHAN)
2009 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
2010 direction, flags);
2011 else
2012 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2013 direction, flags);
2014 if (err) {
2015 dev_err(&d40c->chan.dev->device,
2016 "[%s] Failed to prepare %s slave sg job: %d\n",
2017 __func__,
2018 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2019 return NULL;
2020 }
2021
2022 d40d->txd.flags = flags;
2023
2024 dma_async_tx_descriptor_init(&d40d->txd, chan);
2025
2026 d40d->txd.tx_submit = d40_tx_submit;
2027
2028 return &d40d->txd;
2029}
2030
2031static enum dma_status d40_tx_status(struct dma_chan *chan,
2032 dma_cookie_t cookie,
2033 struct dma_tx_state *txstate)
2034{
2035 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2036 dma_cookie_t last_used;
2037 dma_cookie_t last_complete;
2038 int ret;
2039
2040 last_complete = d40c->completed;
2041 last_used = chan->cookie;
2042
2043 if (d40_is_paused(d40c))
2044 ret = DMA_PAUSED;
2045 else
2046 ret = dma_async_is_complete(cookie, last_complete, last_used);
2047
2048 dma_set_tx_state(txstate, last_complete, last_used,
2049 stedma40_residue(chan));
2050
2051 return ret;
2052}
2053
2054static void d40_issue_pending(struct dma_chan *chan)
2055{
2056 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2057 unsigned long flags;
2058
2059 spin_lock_irqsave(&d40c->lock, flags);
2060
2061 /* Busy means that pending jobs are already being processed */
2062 if (!d40c->busy)
2063 (void) d40_queue_start(d40c);
2064
2065 spin_unlock_irqrestore(&d40c->lock, flags);
2066}
2067
2068static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2069 unsigned long arg)
2070{
2071 unsigned long flags;
2072 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2073
2074 switch (cmd) {
2075 case DMA_TERMINATE_ALL:
2076 spin_lock_irqsave(&d40c->lock, flags);
2077 d40_term_all(d40c);
2078 spin_unlock_irqrestore(&d40c->lock, flags);
2079 return 0;
2080 case DMA_PAUSE:
2081 return d40_pause(chan);
2082 case DMA_RESUME:
2083 return d40_resume(chan);
2084 }
2085
2086 /* Other commands are unimplemented */
2087 return -ENXIO;
2088}
2089
2090/* Initialization functions */
2091
2092static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2093 struct d40_chan *chans, int offset,
2094 int num_chans)
2095{
2096 int i = 0;
2097 struct d40_chan *d40c;
2098
2099 INIT_LIST_HEAD(&dma->channels);
2100
2101 for (i = offset; i < offset + num_chans; i++) {
2102 d40c = &chans[i];
2103 d40c->base = base;
2104 d40c->chan.device = dma;
2105
2106 /* Invalidate lcla element */
2107 d40c->lcla.src_id = -1;
2108 d40c->lcla.dst_id = -1;
2109
2110 spin_lock_init(&d40c->lock);
2111
2112 d40c->log_num = D40_PHY_CHAN;
2113
2114 INIT_LIST_HEAD(&d40c->free);
2115 INIT_LIST_HEAD(&d40c->active);
2116 INIT_LIST_HEAD(&d40c->queue);
2117 INIT_LIST_HEAD(&d40c->client);
2118
2119 d40c->free_len = 0;
2120
2121 tasklet_init(&d40c->tasklet, dma_tasklet,
2122 (unsigned long) d40c);
2123
2124 list_add_tail(&d40c->chan.device_node,
2125 &dma->channels);
2126 }
2127}
2128
2129static int __init d40_dmaengine_init(struct d40_base *base,
2130 int num_reserved_chans)
2131{
2132 int err ;
2133
2134 d40_chan_init(base, &base->dma_slave, base->log_chans,
2135 0, base->num_log_chans);
2136
2137 dma_cap_zero(base->dma_slave.cap_mask);
2138 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2139
2140 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2141 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2142 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2143 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2144 base->dma_slave.device_tx_status = d40_tx_status;
2145 base->dma_slave.device_issue_pending = d40_issue_pending;
2146 base->dma_slave.device_control = d40_control;
2147 base->dma_slave.dev = base->dev;
2148
2149 err = dma_async_device_register(&base->dma_slave);
2150
2151 if (err) {
2152 dev_err(base->dev,
2153 "[%s] Failed to register slave channels\n",
2154 __func__);
2155 goto failure1;
2156 }
2157
2158 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2159 base->num_log_chans, base->plat_data->memcpy_len);
2160
2161 dma_cap_zero(base->dma_memcpy.cap_mask);
2162 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2163
2164 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2165 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2166 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2167 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2168 base->dma_memcpy.device_tx_status = d40_tx_status;
2169 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2170 base->dma_memcpy.device_control = d40_control;
2171 base->dma_memcpy.dev = base->dev;
2172 /*
2173 * This controller can only access address at even
2174 * 32bit boundaries, i.e. 2^2
2175 */
2176 base->dma_memcpy.copy_align = 2;
2177
2178 err = dma_async_device_register(&base->dma_memcpy);
2179
2180 if (err) {
2181 dev_err(base->dev,
2182 "[%s] Failed to regsiter memcpy only channels\n",
2183 __func__);
2184 goto failure2;
2185 }
2186
2187 d40_chan_init(base, &base->dma_both, base->phy_chans,
2188 0, num_reserved_chans);
2189
2190 dma_cap_zero(base->dma_both.cap_mask);
2191 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2192 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2193
2194 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2195 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2196 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2197 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2198 base->dma_both.device_tx_status = d40_tx_status;
2199 base->dma_both.device_issue_pending = d40_issue_pending;
2200 base->dma_both.device_control = d40_control;
2201 base->dma_both.dev = base->dev;
2202 base->dma_both.copy_align = 2;
2203 err = dma_async_device_register(&base->dma_both);
2204
2205 if (err) {
2206 dev_err(base->dev,
2207 "[%s] Failed to register logical and physical capable channels\n",
2208 __func__);
2209 goto failure3;
2210 }
2211 return 0;
2212failure3:
2213 dma_async_device_unregister(&base->dma_memcpy);
2214failure2:
2215 dma_async_device_unregister(&base->dma_slave);
2216failure1:
2217 return err;
2218}
2219
2220/* Initialization functions. */
2221
2222static int __init d40_phy_res_init(struct d40_base *base)
2223{
2224 int i;
2225 int num_phy_chans_avail = 0;
2226 u32 val[2];
2227 int odd_even_bit = -2;
2228
2229 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2230 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2231
2232 for (i = 0; i < base->num_phy_chans; i++) {
2233 base->phy_res[i].num = i;
2234 odd_even_bit += 2 * ((i % 2) == 0);
2235 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2236 /* Mark security only channels as occupied */
2237 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2238 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2239 } else {
2240 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2241 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2242 num_phy_chans_avail++;
2243 }
2244 spin_lock_init(&base->phy_res[i].lock);
2245 }
2246 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2247 num_phy_chans_avail, base->num_phy_chans);
2248
2249 /* Verify settings extended vs standard */
2250 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2251
2252 for (i = 0; i < base->num_phy_chans; i++) {
2253
2254 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2255 (val[0] & 0x3) != 1)
2256 dev_info(base->dev,
2257 "[%s] INFO: channel %d is misconfigured (%d)\n",
2258 __func__, i, val[0] & 0x3);
2259
2260 val[0] = val[0] >> 2;
2261 }
2262
2263 return num_phy_chans_avail;
2264}
2265
2266static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2267{
2268 static const struct d40_reg_val dma_id_regs[] = {
2269 /* Peripheral Id */
2270 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2271 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2272 /*
2273 * D40_DREG_PERIPHID2 Depends on HW revision:
2274 * MOP500/HREF ED has 0x0008,
2275 * ? has 0x0018,
2276 * HREF V1 has 0x0028
2277 */
2278 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2279
2280 /* PCell Id */
2281 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2282 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2283 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2284 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2285 };
2286 struct stedma40_platform_data *plat_data;
2287 struct clk *clk = NULL;
2288 void __iomem *virtbase = NULL;
2289 struct resource *res = NULL;
2290 struct d40_base *base = NULL;
2291 int num_log_chans = 0;
2292 int num_phy_chans;
2293 int i;
2294
2295 clk = clk_get(&pdev->dev, NULL);
2296
2297 if (IS_ERR(clk)) {
2298 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2299 __func__);
2300 goto failure;
2301 }
2302
2303 clk_enable(clk);
2304
2305 /* Get IO for DMAC base address */
2306 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2307 if (!res)
2308 goto failure;
2309
2310 if (request_mem_region(res->start, resource_size(res),
2311 D40_NAME " I/O base") == NULL)
2312 goto failure;
2313
2314 virtbase = ioremap(res->start, resource_size(res));
2315 if (!virtbase)
2316 goto failure;
2317
2318 /* HW version check */
2319 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2320 if (dma_id_regs[i].val !=
2321 readl(virtbase + dma_id_regs[i].reg)) {
2322 dev_err(&pdev->dev,
2323 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2324 __func__,
2325 dma_id_regs[i].val,
2326 dma_id_regs[i].reg,
2327 readl(virtbase + dma_id_regs[i].reg));
2328 goto failure;
2329 }
2330 }
2331
2332 i = readl(virtbase + D40_DREG_PERIPHID2);
2333
2334 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2335 dev_err(&pdev->dev,
2336 "[%s] Unknown designer! Got %x wanted %x\n",
2337 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2338 goto failure;
2339 }
2340
2341 /* The number of physical channels on this HW */
2342 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2343
2344 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2345 (i >> 4) & 0xf, res->start);
2346
2347 plat_data = pdev->dev.platform_data;
2348
2349 /* Count the number of logical channels in use */
2350 for (i = 0; i < plat_data->dev_len; i++)
2351 if (plat_data->dev_rx[i] != 0)
2352 num_log_chans++;
2353
2354 for (i = 0; i < plat_data->dev_len; i++)
2355 if (plat_data->dev_tx[i] != 0)
2356 num_log_chans++;
2357
2358 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2359 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2360 sizeof(struct d40_chan), GFP_KERNEL);
2361
2362 if (base == NULL) {
2363 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2364 goto failure;
2365 }
2366
2367 base->clk = clk;
2368 base->num_phy_chans = num_phy_chans;
2369 base->num_log_chans = num_log_chans;
2370 base->phy_start = res->start;
2371 base->phy_size = resource_size(res);
2372 base->virtbase = virtbase;
2373 base->plat_data = plat_data;
2374 base->dev = &pdev->dev;
2375 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2376 base->log_chans = &base->phy_chans[num_phy_chans];
2377
2378 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2379 GFP_KERNEL);
2380 if (!base->phy_res)
2381 goto failure;
2382
2383 base->lookup_phy_chans = kzalloc(num_phy_chans *
2384 sizeof(struct d40_chan *),
2385 GFP_KERNEL);
2386 if (!base->lookup_phy_chans)
2387 goto failure;
2388
2389 if (num_log_chans + plat_data->memcpy_len) {
2390 /*
2391 * The max number of logical channels are event lines for all
2392 * src devices and dst devices
2393 */
2394 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2395 sizeof(struct d40_chan *),
2396 GFP_KERNEL);
2397 if (!base->lookup_log_chans)
2398 goto failure;
2399 }
2400 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2401 GFP_KERNEL);
2402 if (!base->lcla_pool.alloc_map)
2403 goto failure;
2404
2405 return base;
2406
2407failure:
2408 if (clk) {
2409 clk_disable(clk);
2410 clk_put(clk);
2411 }
2412 if (virtbase)
2413 iounmap(virtbase);
2414 if (res)
2415 release_mem_region(res->start,
2416 resource_size(res));
2417 if (virtbase)
2418 iounmap(virtbase);
2419
2420 if (base) {
2421 kfree(base->lcla_pool.alloc_map);
2422 kfree(base->lookup_log_chans);
2423 kfree(base->lookup_phy_chans);
2424 kfree(base->phy_res);
2425 kfree(base);
2426 }
2427
2428 return NULL;
2429}
2430
2431static void __init d40_hw_init(struct d40_base *base)
2432{
2433
2434 static const struct d40_reg_val dma_init_reg[] = {
2435 /* Clock every part of the DMA block from start */
2436 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2437
2438 /* Interrupts on all logical channels */
2439 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2440 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2441 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2442 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2443 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2444 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2445 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2446 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2447 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2448 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2449 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2450 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2451 };
2452 int i;
2453 u32 prmseo[2] = {0, 0};
2454 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2455 u32 pcmis = 0;
2456 u32 pcicr = 0;
2457
2458 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2459 writel(dma_init_reg[i].val,
2460 base->virtbase + dma_init_reg[i].reg);
2461
2462 /* Configure all our dma channels to default settings */
2463 for (i = 0; i < base->num_phy_chans; i++) {
2464
2465 activeo[i % 2] = activeo[i % 2] << 2;
2466
2467 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2468 == D40_ALLOC_PHY) {
2469 activeo[i % 2] |= 3;
2470 continue;
2471 }
2472
2473 /* Enable interrupt # */
2474 pcmis = (pcmis << 1) | 1;
2475
2476 /* Clear interrupt # */
2477 pcicr = (pcicr << 1) | 1;
2478
2479 /* Set channel to physical mode */
2480 prmseo[i % 2] = prmseo[i % 2] << 2;
2481 prmseo[i % 2] |= 1;
2482
2483 }
2484
2485 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2486 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2487 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2488 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2489
2490 /* Write which interrupt to enable */
2491 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2492
2493 /* Write which interrupt to clear */
2494 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2495
2496}
2497
2498static int __init d40_probe(struct platform_device *pdev)
2499{
2500 int err;
2501 int ret = -ENOENT;
2502 struct d40_base *base;
2503 struct resource *res = NULL;
2504 int num_reserved_chans;
2505 u32 val;
2506
2507 base = d40_hw_detect_init(pdev);
2508
2509 if (!base)
2510 goto failure;
2511
2512 num_reserved_chans = d40_phy_res_init(base);
2513
2514 platform_set_drvdata(pdev, base);
2515
2516 spin_lock_init(&base->interrupt_lock);
2517 spin_lock_init(&base->execmd_lock);
2518
2519 /* Get IO for logical channel parameter address */
2520 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2521 if (!res) {
2522 ret = -ENOENT;
2523 dev_err(&pdev->dev,
2524 "[%s] No \"lcpa\" memory resource\n",
2525 __func__);
2526 goto failure;
2527 }
2528 base->lcpa_size = resource_size(res);
2529 base->phy_lcpa = res->start;
2530
2531 if (request_mem_region(res->start, resource_size(res),
2532 D40_NAME " I/O lcpa") == NULL) {
2533 ret = -EBUSY;
2534 dev_err(&pdev->dev,
2535 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2536 __func__, res->start, res->end);
2537 goto failure;
2538 }
2539
2540 /* We make use of ESRAM memory for this. */
2541 val = readl(base->virtbase + D40_DREG_LCPA);
2542 if (res->start != val && val != 0) {
2543 dev_warn(&pdev->dev,
2544 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2545 __func__, val, res->start);
2546 } else
2547 writel(res->start, base->virtbase + D40_DREG_LCPA);
2548
2549 base->lcpa_base = ioremap(res->start, resource_size(res));
2550 if (!base->lcpa_base) {
2551 ret = -ENOMEM;
2552 dev_err(&pdev->dev,
2553 "[%s] Failed to ioremap LCPA region\n",
2554 __func__);
2555 goto failure;
2556 }
2557 /* Get IO for logical channel link address */
2558 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2559 if (!res) {
2560 ret = -ENOENT;
2561 dev_err(&pdev->dev,
2562 "[%s] No \"lcla\" resource defined\n",
2563 __func__);
2564 goto failure;
2565 }
2566
2567 base->lcla_pool.base_size = resource_size(res);
2568 base->lcla_pool.phy = res->start;
2569
2570 if (request_mem_region(res->start, resource_size(res),
2571 D40_NAME " I/O lcla") == NULL) {
2572 ret = -EBUSY;
2573 dev_err(&pdev->dev,
2574 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2575 __func__, res->start, res->end);
2576 goto failure;
2577 }
2578 val = readl(base->virtbase + D40_DREG_LCLA);
2579 if (res->start != val && val != 0) {
2580 dev_warn(&pdev->dev,
2581 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2582 __func__, val, res->start);
2583 } else
2584 writel(res->start, base->virtbase + D40_DREG_LCLA);
2585
2586 base->lcla_pool.base = ioremap(res->start, resource_size(res));
2587 if (!base->lcla_pool.base) {
2588 ret = -ENOMEM;
2589 dev_err(&pdev->dev,
2590 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2591 __func__, res->start, res->end);
2592 goto failure;
2593 }
2594
2595 spin_lock_init(&base->lcla_pool.lock);
2596
2597 base->lcla_pool.num_blocks = base->num_phy_chans;
2598
2599 base->irq = platform_get_irq(pdev, 0);
2600
2601 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2602
2603 if (ret) {
2604 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2605 goto failure;
2606 }
2607
2608 err = d40_dmaengine_init(base, num_reserved_chans);
2609 if (err)
2610 goto failure;
2611
2612 d40_hw_init(base);
2613
2614 dev_info(base->dev, "initialized\n");
2615 return 0;
2616
2617failure:
2618 if (base) {
2619 if (base->virtbase)
2620 iounmap(base->virtbase);
2621 if (base->lcla_pool.phy)
2622 release_mem_region(base->lcla_pool.phy,
2623 base->lcla_pool.base_size);
2624 if (base->phy_lcpa)
2625 release_mem_region(base->phy_lcpa,
2626 base->lcpa_size);
2627 if (base->phy_start)
2628 release_mem_region(base->phy_start,
2629 base->phy_size);
2630 if (base->clk) {
2631 clk_disable(base->clk);
2632 clk_put(base->clk);
2633 }
2634
2635 kfree(base->lcla_pool.alloc_map);
2636 kfree(base->lookup_log_chans);
2637 kfree(base->lookup_phy_chans);
2638 kfree(base->phy_res);
2639 kfree(base);
2640 }
2641
2642 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2643 return ret;
2644}
2645
2646static struct platform_driver d40_driver = {
2647 .driver = {
2648 .owner = THIS_MODULE,
2649 .name = D40_NAME,
2650 },
2651};
2652
2653int __init stedma40_init(void)
2654{
2655 return platform_driver_probe(&d40_driver, d40_probe);
2656}
2657arch_initcall(stedma40_init);
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
new file mode 100644
index 000000000000..561fdd8a80c1
--- /dev/null
+++ b/drivers/dma/ste_dma40_ll.c
@@ -0,0 +1,454 @@
1/*
2 * driver/dma/ste_dma40_ll.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 */
9
10#include <linux/kernel.h>
11#include <plat/ste_dma40.h>
12
13#include "ste_dma40_ll.h"
14
15/* Sets up proper LCSP1 and LCSP3 register for a logical channel */
16void d40_log_cfg(struct stedma40_chan_cfg *cfg,
17 u32 *lcsp1, u32 *lcsp3)
18{
19 u32 l3 = 0; /* dst */
20 u32 l1 = 0; /* src */
21
22 /* src is mem? -> increase address pos */
23 if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
24 cfg->dir == STEDMA40_MEM_TO_MEM)
25 l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS;
26
27 /* dst is mem? -> increase address pos */
28 if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
29 cfg->dir == STEDMA40_MEM_TO_MEM)
30 l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS;
31
32 /* src is hw? -> master port 1 */
33 if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
34 cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
35 l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS;
36
37 /* dst is hw? -> master port 1 */
38 if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
39 cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
40 l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
41
42 l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS;
43 l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
44 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
45 l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
46 l3 |= 1 << D40_MEM_LCSP3_DTCP_POS;
47
48 l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
49 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
50 l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
51 l1 |= 1 << D40_MEM_LCSP1_STCP_POS;
52
53 *lcsp1 = l1;
54 *lcsp3 = l3;
55
56}
57
58/* Sets up SRC and DST CFG register for both logical and physical channels */
59void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
60 u32 *src_cfg, u32 *dst_cfg, bool is_log)
61{
62 u32 src = 0;
63 u32 dst = 0;
64
65 if (!is_log) {
66 /* Physical channel */
67 if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) ||
68 (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
69 /* Set master port to 1 */
70 src |= 1 << D40_SREG_CFG_MST_POS;
71 src |= D40_TYPE_TO_EVENT(cfg->src_dev_type);
72
73 if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
74 src |= 1 << D40_SREG_CFG_PHY_TM_POS;
75 else
76 src |= 3 << D40_SREG_CFG_PHY_TM_POS;
77 }
78 if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) ||
79 (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
80 /* Set master port to 1 */
81 dst |= 1 << D40_SREG_CFG_MST_POS;
82 dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type);
83
84 if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
85 dst |= 1 << D40_SREG_CFG_PHY_TM_POS;
86 else
87 dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
88 }
89 /* Interrupt on end of transfer for destination */
90 dst |= 1 << D40_SREG_CFG_TIM_POS;
91
92 /* Generate interrupt on error */
93 src |= 1 << D40_SREG_CFG_EIM_POS;
94 dst |= 1 << D40_SREG_CFG_EIM_POS;
95
96 /* PSIZE */
97 if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
98 src |= 1 << D40_SREG_CFG_PHY_PEN_POS;
99 src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
100 }
101 if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
102 dst |= 1 << D40_SREG_CFG_PHY_PEN_POS;
103 dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
104 }
105
106 /* Element size */
107 src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
108 dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
109
110 } else {
111 /* Logical channel */
112 dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
113 src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
114 }
115
116 if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) {
117 src |= 1 << D40_SREG_CFG_PRI_POS;
118 dst |= 1 << D40_SREG_CFG_PRI_POS;
119 }
120
121 src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS;
122 dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS;
123
124 *src_cfg = src;
125 *dst_cfg = dst;
126}
127
128int d40_phy_fill_lli(struct d40_phy_lli *lli,
129 dma_addr_t data,
130 u32 data_size,
131 int psize,
132 dma_addr_t next_lli,
133 u32 reg_cfg,
134 bool term_int,
135 u32 data_width,
136 bool is_device)
137{
138 int num_elems;
139
140 if (psize == STEDMA40_PSIZE_PHY_1)
141 num_elems = 1;
142 else
143 num_elems = 2 << psize;
144
145 /*
146 * Size is 16bit. data_width is 8, 16, 32 or 64 bit
147 * Block large than 64 KiB must be split.
148 */
149 if (data_size > (0xffff << data_width))
150 return -EINVAL;
151
152 /* Must be aligned */
153 if (!IS_ALIGNED(data, 0x1 << data_width))
154 return -EINVAL;
155
156 /* Transfer size can't be smaller than (num_elms * elem_size) */
157 if (data_size < num_elems * (0x1 << data_width))
158 return -EINVAL;
159
160 /* The number of elements. IE now many chunks */
161 lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
162
163 /*
164 * Distance to next element sized entry.
165 * Usually the size of the element unless you want gaps.
166 */
167 if (!is_device)
168 lli->reg_elt |= (0x1 << data_width) <<
169 D40_SREG_ELEM_PHY_EIDX_POS;
170
171 /* Where the data is */
172 lli->reg_ptr = data;
173 lli->reg_cfg = reg_cfg;
174
175 /* If this scatter list entry is the last one, no next link */
176 if (next_lli == 0)
177 lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS;
178 else
179 lli->reg_lnk = next_lli;
180
181 /* Set/clear interrupt generation on this link item.*/
182 if (term_int)
183 lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS;
184 else
185 lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS);
186
187 /* Post link */
188 lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS;
189
190 return 0;
191}
192
193int d40_phy_sg_to_lli(struct scatterlist *sg,
194 int sg_len,
195 dma_addr_t target,
196 struct d40_phy_lli *lli,
197 dma_addr_t lli_phys,
198 u32 reg_cfg,
199 u32 data_width,
200 int psize,
201 bool term_int)
202{
203 int total_size = 0;
204 int i;
205 struct scatterlist *current_sg = sg;
206 dma_addr_t next_lli_phys;
207 dma_addr_t dst;
208 int err = 0;
209
210 for_each_sg(sg, current_sg, sg_len, i) {
211
212 total_size += sg_dma_len(current_sg);
213
214 /* If this scatter list entry is the last one, no next link */
215 if (sg_len - 1 == i)
216 next_lli_phys = 0;
217 else
218 next_lli_phys = ALIGN(lli_phys + (i + 1) *
219 sizeof(struct d40_phy_lli),
220 D40_LLI_ALIGN);
221
222 if (target)
223 dst = target;
224 else
225 dst = sg_phys(current_sg);
226
227 err = d40_phy_fill_lli(&lli[i],
228 dst,
229 sg_dma_len(current_sg),
230 psize,
231 next_lli_phys,
232 reg_cfg,
233 !next_lli_phys,
234 data_width,
235 target == dst);
236 if (err)
237 goto err;
238 }
239
240 return total_size;
241 err:
242 return err;
243}
244
245
246void d40_phy_lli_write(void __iomem *virtbase,
247 u32 phy_chan_num,
248 struct d40_phy_lli *lli_dst,
249 struct d40_phy_lli *lli_src)
250{
251
252 writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE +
253 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG);
254 writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE +
255 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
256 writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE +
257 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR);
258 writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE +
259 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK);
260
261 writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE +
262 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG);
263 writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE +
264 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
265 writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE +
266 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR);
267 writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE +
268 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK);
269
270}
271
272/* DMA logical lli operations */
273
274void d40_log_fill_lli(struct d40_log_lli *lli,
275 dma_addr_t data, u32 data_size,
276 u32 lli_next_off, u32 reg_cfg,
277 u32 data_width,
278 bool term_int, bool addr_inc)
279{
280 lli->lcsp13 = reg_cfg;
281
282 /* The number of elements to transfer */
283 lli->lcsp02 = ((data_size >> data_width) <<
284 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
285 /* 16 LSBs address of the current element */
286 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
287 /* 16 MSBs address of the current element */
288 lli->lcsp13 |= data & D40_MEM_LCSP1_SPTR_MASK;
289
290 if (addr_inc)
291 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
292
293 lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
294 /* If this scatter list entry is the last one, no next link */
295 lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) &
296 D40_MEM_LCSP1_SLOS_MASK;
297
298 if (term_int)
299 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
300 else
301 lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK;
302}
303
304int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
305 struct scatterlist *sg,
306 int sg_len,
307 struct d40_log_lli_bidir *lli,
308 struct d40_def_lcsp *lcsp,
309 u32 src_data_width,
310 u32 dst_data_width,
311 enum dma_data_direction direction,
312 bool term_int, dma_addr_t dev_addr, int max_len,
313 int llis_per_log)
314{
315 int total_size = 0;
316 struct scatterlist *current_sg = sg;
317 int i;
318 u32 next_lli_off_dst;
319 u32 next_lli_off_src;
320
321 next_lli_off_src = 0;
322 next_lli_off_dst = 0;
323
324 for_each_sg(sg, current_sg, sg_len, i) {
325 total_size += sg_dma_len(current_sg);
326
327 /*
328 * If this scatter list entry is the last one or
329 * max length, terminate link.
330 */
331 if (sg_len - 1 == i || ((i+1) % max_len == 0)) {
332 next_lli_off_src = 0;
333 next_lli_off_dst = 0;
334 } else {
335 if (next_lli_off_dst == 0 &&
336 next_lli_off_src == 0) {
337 /* The first lli will be at next_lli_off */
338 next_lli_off_dst = (lcla->dst_id *
339 llis_per_log + 1);
340 next_lli_off_src = (lcla->src_id *
341 llis_per_log + 1);
342 } else {
343 next_lli_off_dst++;
344 next_lli_off_src++;
345 }
346 }
347
348 if (direction == DMA_TO_DEVICE) {
349 d40_log_fill_lli(&lli->src[i],
350 sg_phys(current_sg),
351 sg_dma_len(current_sg),
352 next_lli_off_src,
353 lcsp->lcsp1, src_data_width,
354 term_int && !next_lli_off_src,
355 true);
356 d40_log_fill_lli(&lli->dst[i],
357 dev_addr,
358 sg_dma_len(current_sg),
359 next_lli_off_dst,
360 lcsp->lcsp3, dst_data_width,
361 /* No next == terminal interrupt */
362 term_int && !next_lli_off_dst,
363 false);
364 } else {
365 d40_log_fill_lli(&lli->dst[i],
366 sg_phys(current_sg),
367 sg_dma_len(current_sg),
368 next_lli_off_dst,
369 lcsp->lcsp3, dst_data_width,
370 /* No next == terminal interrupt */
371 term_int && !next_lli_off_dst,
372 true);
373 d40_log_fill_lli(&lli->src[i],
374 dev_addr,
375 sg_dma_len(current_sg),
376 next_lli_off_src,
377 lcsp->lcsp1, src_data_width,
378 term_int && !next_lli_off_src,
379 false);
380 }
381 }
382 return total_size;
383}
384
385int d40_log_sg_to_lli(int lcla_id,
386 struct scatterlist *sg,
387 int sg_len,
388 struct d40_log_lli *lli_sg,
389 u32 lcsp13, /* src or dst*/
390 u32 data_width,
391 bool term_int, int max_len, int llis_per_log)
392{
393 int total_size = 0;
394 struct scatterlist *current_sg = sg;
395 int i;
396 u32 next_lli_off = 0;
397
398 for_each_sg(sg, current_sg, sg_len, i) {
399 total_size += sg_dma_len(current_sg);
400
401 /*
402 * If this scatter list entry is the last one or
403 * max length, terminate link.
404 */
405 if (sg_len - 1 == i || ((i+1) % max_len == 0))
406 next_lli_off = 0;
407 else {
408 if (next_lli_off == 0)
409 /* The first lli will be at next_lli_off */
410 next_lli_off = lcla_id * llis_per_log + 1;
411 else
412 next_lli_off++;
413 }
414
415 d40_log_fill_lli(&lli_sg[i],
416 sg_phys(current_sg),
417 sg_dma_len(current_sg),
418 next_lli_off,
419 lcsp13, data_width,
420 term_int && !next_lli_off,
421 true);
422 }
423 return total_size;
424}
425
426void d40_log_lli_write(struct d40_log_lli_full *lcpa,
427 struct d40_log_lli *lcla_src,
428 struct d40_log_lli *lcla_dst,
429 struct d40_log_lli *lli_dst,
430 struct d40_log_lli *lli_src,
431 int llis_per_log)
432{
433 u32 slos = 0;
434 u32 dlos = 0;
435 int i;
436
437 lcpa->lcsp0 = lli_src->lcsp02;
438 lcpa->lcsp1 = lli_src->lcsp13;
439 lcpa->lcsp2 = lli_dst->lcsp02;
440 lcpa->lcsp3 = lli_dst->lcsp13;
441
442 slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
443 dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
444
445 for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
446 writel(lli_src[i+1].lcsp02, &lcla_src[i].lcsp02);
447 writel(lli_src[i+1].lcsp13, &lcla_src[i].lcsp13);
448 writel(lli_dst[i+1].lcsp02, &lcla_dst[i].lcsp02);
449 writel(lli_dst[i+1].lcsp13, &lcla_dst[i].lcsp13);
450
451 slos = lli_src[i+1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
452 dlos = lli_dst[i+1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
453 }
454}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
new file mode 100644
index 000000000000..2029280cb332
--- /dev/null
+++ b/drivers/dma/ste_dma40_ll.h
@@ -0,0 +1,354 @@
1/*
2 * driver/dma/ste_dma40_ll.h
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 */
9#ifndef STE_DMA40_LL_H
10#define STE_DMA40_LL_H
11
12#define D40_DREG_PCBASE 0x400
13#define D40_DREG_PCDELTA (8 * 4)
14#define D40_LLI_ALIGN 16 /* LLI alignment must be 16 bytes. */
15
16#define D40_TYPE_TO_GROUP(type) (type / 16)
17#define D40_TYPE_TO_EVENT(type) (type % 16)
18
19/* Most bits of the CFG register are the same in log as in phy mode */
20#define D40_SREG_CFG_MST_POS 15
21#define D40_SREG_CFG_TIM_POS 14
22#define D40_SREG_CFG_EIM_POS 13
23#define D40_SREG_CFG_LOG_INCR_POS 12
24#define D40_SREG_CFG_PHY_PEN_POS 12
25#define D40_SREG_CFG_PSIZE_POS 10
26#define D40_SREG_CFG_ESIZE_POS 8
27#define D40_SREG_CFG_PRI_POS 7
28#define D40_SREG_CFG_LBE_POS 6
29#define D40_SREG_CFG_LOG_GIM_POS 5
30#define D40_SREG_CFG_LOG_MFU_POS 4
31#define D40_SREG_CFG_PHY_TM_POS 4
32#define D40_SREG_CFG_PHY_EVTL_POS 0
33
34
35/* Standard channel parameters - basic mode (element register) */
36#define D40_SREG_ELEM_PHY_ECNT_POS 16
37#define D40_SREG_ELEM_PHY_EIDX_POS 0
38
39#define D40_SREG_ELEM_PHY_ECNT_MASK (0xFFFF << D40_SREG_ELEM_PHY_ECNT_POS)
40
41/* Standard channel parameters - basic mode (Link register) */
42#define D40_SREG_LNK_PHY_TCP_POS 0
43#define D40_SREG_LNK_PHY_LMP_POS 1
44#define D40_SREG_LNK_PHY_PRE_POS 2
45/*
46 * Source destination link address. Contains the
47 * 29-bit byte word aligned address of the reload area.
48 */
49#define D40_SREG_LNK_PHYS_LNK_MASK 0xFFFFFFF8UL
50
51/* Standard basic channel logical mode */
52
53/* Element register */
54#define D40_SREG_ELEM_LOG_ECNT_POS 16
55#define D40_SREG_ELEM_LOG_LIDX_POS 8
56#define D40_SREG_ELEM_LOG_LOS_POS 1
57#define D40_SREG_ELEM_LOG_TCP_POS 0
58
59#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS)
60
61/* Link register */
62#define D40_DEACTIVATE_EVENTLINE 0x0
63#define D40_ACTIVATE_EVENTLINE 0x1
64#define D40_EVENTLINE_POS(i) (2 * i)
65#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i))
66
67/* Standard basic channel logical params in memory */
68
69/* LCSP0 */
70#define D40_MEM_LCSP0_ECNT_POS 16
71#define D40_MEM_LCSP0_SPTR_POS 0
72
73#define D40_MEM_LCSP0_ECNT_MASK (0xFFFF << D40_MEM_LCSP0_ECNT_POS)
74#define D40_MEM_LCSP0_SPTR_MASK (0xFFFF << D40_MEM_LCSP0_SPTR_POS)
75
76/* LCSP1 */
77#define D40_MEM_LCSP1_SPTR_POS 16
78#define D40_MEM_LCSP1_SCFG_MST_POS 15
79#define D40_MEM_LCSP1_SCFG_TIM_POS 14
80#define D40_MEM_LCSP1_SCFG_EIM_POS 13
81#define D40_MEM_LCSP1_SCFG_INCR_POS 12
82#define D40_MEM_LCSP1_SCFG_PSIZE_POS 10
83#define D40_MEM_LCSP1_SCFG_ESIZE_POS 8
84#define D40_MEM_LCSP1_SLOS_POS 1
85#define D40_MEM_LCSP1_STCP_POS 0
86
87#define D40_MEM_LCSP1_SPTR_MASK (0xFFFF << D40_MEM_LCSP1_SPTR_POS)
88#define D40_MEM_LCSP1_SCFG_TIM_MASK (0x1 << D40_MEM_LCSP1_SCFG_TIM_POS)
89#define D40_MEM_LCSP1_SCFG_INCR_MASK (0x1 << D40_MEM_LCSP1_SCFG_INCR_POS)
90#define D40_MEM_LCSP1_SCFG_PSIZE_MASK (0x3 << D40_MEM_LCSP1_SCFG_PSIZE_POS)
91#define D40_MEM_LCSP1_SLOS_MASK (0x7F << D40_MEM_LCSP1_SLOS_POS)
92#define D40_MEM_LCSP1_STCP_MASK (0x1 << D40_MEM_LCSP1_STCP_POS)
93
94/* LCSP2 */
95#define D40_MEM_LCSP2_ECNT_POS 16
96
97#define D40_MEM_LCSP2_ECNT_MASK (0xFFFF << D40_MEM_LCSP2_ECNT_POS)
98
99/* LCSP3 */
100#define D40_MEM_LCSP3_DCFG_MST_POS 15
101#define D40_MEM_LCSP3_DCFG_TIM_POS 14
102#define D40_MEM_LCSP3_DCFG_EIM_POS 13
103#define D40_MEM_LCSP3_DCFG_INCR_POS 12
104#define D40_MEM_LCSP3_DCFG_PSIZE_POS 10
105#define D40_MEM_LCSP3_DCFG_ESIZE_POS 8
106#define D40_MEM_LCSP3_DLOS_POS 1
107#define D40_MEM_LCSP3_DTCP_POS 0
108
109#define D40_MEM_LCSP3_DLOS_MASK (0x7F << D40_MEM_LCSP3_DLOS_POS)
110#define D40_MEM_LCSP3_DTCP_MASK (0x1 << D40_MEM_LCSP3_DTCP_POS)
111
112
113/* Standard channel parameter register offsets */
114#define D40_CHAN_REG_SSCFG 0x00
115#define D40_CHAN_REG_SSELT 0x04
116#define D40_CHAN_REG_SSPTR 0x08
117#define D40_CHAN_REG_SSLNK 0x0C
118#define D40_CHAN_REG_SDCFG 0x10
119#define D40_CHAN_REG_SDELT 0x14
120#define D40_CHAN_REG_SDPTR 0x18
121#define D40_CHAN_REG_SDLNK 0x1C
122
123/* DMA Register Offsets */
124#define D40_DREG_GCC 0x000
125#define D40_DREG_PRTYP 0x004
126#define D40_DREG_PRSME 0x008
127#define D40_DREG_PRSMO 0x00C
128#define D40_DREG_PRMSE 0x010
129#define D40_DREG_PRMSO 0x014
130#define D40_DREG_PRMOE 0x018
131#define D40_DREG_PRMOO 0x01C
132#define D40_DREG_LCPA 0x020
133#define D40_DREG_LCLA 0x024
134#define D40_DREG_ACTIVE 0x050
135#define D40_DREG_ACTIVO 0x054
136#define D40_DREG_FSEB1 0x058
137#define D40_DREG_FSEB2 0x05C
138#define D40_DREG_PCMIS 0x060
139#define D40_DREG_PCICR 0x064
140#define D40_DREG_PCTIS 0x068
141#define D40_DREG_PCEIS 0x06C
142#define D40_DREG_LCMIS0 0x080
143#define D40_DREG_LCMIS1 0x084
144#define D40_DREG_LCMIS2 0x088
145#define D40_DREG_LCMIS3 0x08C
146#define D40_DREG_LCICR0 0x090
147#define D40_DREG_LCICR1 0x094
148#define D40_DREG_LCICR2 0x098
149#define D40_DREG_LCICR3 0x09C
150#define D40_DREG_LCTIS0 0x0A0
151#define D40_DREG_LCTIS1 0x0A4
152#define D40_DREG_LCTIS2 0x0A8
153#define D40_DREG_LCTIS3 0x0AC
154#define D40_DREG_LCEIS0 0x0B0
155#define D40_DREG_LCEIS1 0x0B4
156#define D40_DREG_LCEIS2 0x0B8
157#define D40_DREG_LCEIS3 0x0BC
158#define D40_DREG_STFU 0xFC8
159#define D40_DREG_ICFG 0xFCC
160#define D40_DREG_PERIPHID0 0xFE0
161#define D40_DREG_PERIPHID1 0xFE4
162#define D40_DREG_PERIPHID2 0xFE8
163#define D40_DREG_PERIPHID3 0xFEC
164#define D40_DREG_CELLID0 0xFF0
165#define D40_DREG_CELLID1 0xFF4
166#define D40_DREG_CELLID2 0xFF8
167#define D40_DREG_CELLID3 0xFFC
168
169/* LLI related structures */
170
171/**
172 * struct d40_phy_lli - The basic configration register for each physical
173 * channel.
174 *
175 * @reg_cfg: The configuration register.
176 * @reg_elt: The element register.
177 * @reg_ptr: The pointer register.
178 * @reg_lnk: The link register.
179 *
180 * These registers are set up for both physical and logical transfers
181 * Note that the bit in each register means differently in logical and
182 * physical(standard) mode.
183 *
184 * This struct must be 16 bytes aligned, and only contain physical registers
185 * since it will be directly accessed by the DMA.
186 */
187struct d40_phy_lli {
188 u32 reg_cfg;
189 u32 reg_elt;
190 u32 reg_ptr;
191 u32 reg_lnk;
192};
193
194/**
195 * struct d40_phy_lli_bidir - struct for a transfer.
196 *
197 * @src: Register settings for src channel.
198 * @dst: Register settings for dst channel.
199 * @dst_addr: Physical destination address.
200 * @src_addr: Physical source address.
201 *
202 * All DMA transfers have a source and a destination.
203 */
204
205struct d40_phy_lli_bidir {
206 struct d40_phy_lli *src;
207 struct d40_phy_lli *dst;
208 dma_addr_t dst_addr;
209 dma_addr_t src_addr;
210};
211
212
213/**
214 * struct d40_log_lli - logical lli configuration
215 *
216 * @lcsp02: Either maps to register lcsp0 if src or lcsp2 if dst.
217 * @lcsp13: Either maps to register lcsp1 if src or lcsp3 if dst.
218 *
219 * This struct must be 8 bytes aligned since it will be accessed directy by
220 * the DMA. Never add any none hw mapped registers to this struct.
221 */
222
223struct d40_log_lli {
224 u32 lcsp02;
225 u32 lcsp13;
226};
227
228/**
229 * struct d40_log_lli_bidir - For both src and dst
230 *
231 * @src: pointer to src lli configuration.
232 * @dst: pointer to dst lli configuration.
233 *
234 * You always have a src and a dst when doing DMA transfers.
235 */
236
237struct d40_log_lli_bidir {
238 struct d40_log_lli *src;
239 struct d40_log_lli *dst;
240};
241
242/**
243 * struct d40_log_lli_full - LCPA layout
244 *
245 * @lcsp0: Logical Channel Standard Param 0 - Src.
246 * @lcsp1: Logical Channel Standard Param 1 - Src.
247 * @lcsp2: Logical Channel Standard Param 2 - Dst.
248 * @lcsp3: Logical Channel Standard Param 3 - Dst.
249 *
250 * This struct maps to LCPA physical memory layout. Must map to
251 * the hw.
252 */
253struct d40_log_lli_full {
254 u32 lcsp0;
255 u32 lcsp1;
256 u32 lcsp2;
257 u32 lcsp3;
258};
259
260/**
261 * struct d40_def_lcsp - Default LCSP1 and LCSP3 settings
262 *
263 * @lcsp3: The default configuration for dst.
264 * @lcsp1: The default configuration for src.
265 */
266struct d40_def_lcsp {
267 u32 lcsp3;
268 u32 lcsp1;
269};
270
271/**
272 * struct d40_lcla_elem - Info for one LCA element.
273 *
274 * @src_id: logical channel src id
275 * @dst_id: logical channel dst id
276 * @src: LCPA formated src parameters
277 * @dst: LCPA formated dst parameters
278 *
279 */
280struct d40_lcla_elem {
281 int src_id;
282 int dst_id;
283 struct d40_log_lli *src;
284 struct d40_log_lli *dst;
285};
286
287/* Physical channels */
288
289void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
290 u32 *src_cfg, u32 *dst_cfg, bool is_log);
291
292void d40_log_cfg(struct stedma40_chan_cfg *cfg,
293 u32 *lcsp1, u32 *lcsp2);
294
295int d40_phy_sg_to_lli(struct scatterlist *sg,
296 int sg_len,
297 dma_addr_t target,
298 struct d40_phy_lli *lli,
299 dma_addr_t lli_phys,
300 u32 reg_cfg,
301 u32 data_width,
302 int psize,
303 bool term_int);
304
305int d40_phy_fill_lli(struct d40_phy_lli *lli,
306 dma_addr_t data,
307 u32 data_size,
308 int psize,
309 dma_addr_t next_lli,
310 u32 reg_cfg,
311 bool term_int,
312 u32 data_width,
313 bool is_device);
314
315void d40_phy_lli_write(void __iomem *virtbase,
316 u32 phy_chan_num,
317 struct d40_phy_lli *lli_dst,
318 struct d40_phy_lli *lli_src);
319
320/* Logical channels */
321
322void d40_log_fill_lli(struct d40_log_lli *lli,
323 dma_addr_t data, u32 data_size,
324 u32 lli_next_off, u32 reg_cfg,
325 u32 data_width,
326 bool term_int, bool addr_inc);
327
328int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
329 struct scatterlist *sg,
330 int sg_len,
331 struct d40_log_lli_bidir *lli,
332 struct d40_def_lcsp *lcsp,
333 u32 src_data_width,
334 u32 dst_data_width,
335 enum dma_data_direction direction,
336 bool term_int, dma_addr_t dev_addr, int max_len,
337 int llis_per_log);
338
339void d40_log_lli_write(struct d40_log_lli_full *lcpa,
340 struct d40_log_lli *lcla_src,
341 struct d40_log_lli *lcla_dst,
342 struct d40_log_lli *lli_dst,
343 struct d40_log_lli *lli_src,
344 int llis_per_log);
345
346int d40_log_sg_to_lli(int lcla_id,
347 struct scatterlist *sg,
348 int sg_len,
349 struct d40_log_lli *lli_sg,
350 u32 lcsp13, /* src or dst*/
351 u32 data_width,
352 bool term_int, int max_len, int llis_per_log);
353
354#endif /* STE_DMA40_LLI_H */
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
new file mode 100644
index 000000000000..a1bf77c1993f
--- /dev/null
+++ b/drivers/dma/timb_dma.c
@@ -0,0 +1,860 @@
1/*
2 * timb_dma.c timberdale FPGA DMA driver
3 * Copyright (c) 2010 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* Supports:
20 * Timberdale FPGA DMA engine
21 */
22
23#include <linux/dmaengine.h>
24#include <linux/dma-mapping.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/module.h>
29#include <linux/platform_device.h>
30#include <linux/slab.h>
31
32#include <linux/timb_dma.h>
33
34#define DRIVER_NAME "timb-dma"
35
36/* Global DMA registers */
37#define TIMBDMA_ACR 0x34
38#define TIMBDMA_32BIT_ADDR 0x01
39
40#define TIMBDMA_ISR 0x080000
41#define TIMBDMA_IPR 0x080004
42#define TIMBDMA_IER 0x080008
43
44/* Channel specific registers */
45/* RX instances base addresses are 0x00, 0x40, 0x80 ...
46 * TX instances base addresses are 0x18, 0x58, 0x98 ...
47 */
48#define TIMBDMA_INSTANCE_OFFSET 0x40
49#define TIMBDMA_INSTANCE_TX_OFFSET 0x18
50
51/* RX registers, relative the instance base */
52#define TIMBDMA_OFFS_RX_DHAR 0x00
53#define TIMBDMA_OFFS_RX_DLAR 0x04
54#define TIMBDMA_OFFS_RX_LR 0x0C
55#define TIMBDMA_OFFS_RX_BLR 0x10
56#define TIMBDMA_OFFS_RX_ER 0x14
57#define TIMBDMA_RX_EN 0x01
58/* bytes per Row, video specific register
59 * which is placed after the TX registers...
60 */
61#define TIMBDMA_OFFS_RX_BPRR 0x30
62
63/* TX registers, relative the instance base */
64#define TIMBDMA_OFFS_TX_DHAR 0x00
65#define TIMBDMA_OFFS_TX_DLAR 0x04
66#define TIMBDMA_OFFS_TX_BLR 0x0C
67#define TIMBDMA_OFFS_TX_LR 0x14
68
69
70#define TIMB_DMA_DESC_SIZE 8
71
72struct timb_dma_desc {
73 struct list_head desc_node;
74 struct dma_async_tx_descriptor txd;
75 u8 *desc_list;
76 unsigned int desc_list_len;
77 bool interrupt;
78};
79
80struct timb_dma_chan {
81 struct dma_chan chan;
82 void __iomem *membase;
83 spinlock_t lock; /* Used to protect data structures,
84 especially the lists and descriptors,
85 from races between the tasklet and calls
86 from above */
87 dma_cookie_t last_completed_cookie;
88 bool ongoing;
89 struct list_head active_list;
90 struct list_head queue;
91 struct list_head free_list;
92 unsigned int bytes_per_line;
93 enum dma_data_direction direction;
94 unsigned int descs; /* Descriptors to allocate */
95 unsigned int desc_elems; /* number of elems per descriptor */
96};
97
98struct timb_dma {
99 struct dma_device dma;
100 void __iomem *membase;
101 struct tasklet_struct tasklet;
102 struct timb_dma_chan channels[0];
103};
104
105static struct device *chan2dev(struct dma_chan *chan)
106{
107 return &chan->dev->device;
108}
109static struct device *chan2dmadev(struct dma_chan *chan)
110{
111 return chan2dev(chan)->parent->parent;
112}
113
114static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
115{
116 int id = td_chan->chan.chan_id;
117 return (struct timb_dma *)((u8 *)td_chan -
118 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
119}
120
121/* Must be called with the spinlock held */
122static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
123{
124 int id = td_chan->chan.chan_id;
125 struct timb_dma *td = tdchantotd(td_chan);
126 u32 ier;
127
128 /* enable interrupt for this channel */
129 ier = ioread32(td->membase + TIMBDMA_IER);
130 ier |= 1 << id;
131 dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
132 ier);
133 iowrite32(ier, td->membase + TIMBDMA_IER);
134}
135
136/* Should be called with the spinlock held */
137static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
138{
139 int id = td_chan->chan.chan_id;
140 struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
141 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
142 u32 isr;
143 bool done = false;
144
145 dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
146
147 isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
148 if (isr) {
149 iowrite32(isr, td->membase + TIMBDMA_ISR);
150 done = true;
151 }
152
153 return done;
154}
155
156static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
157 bool single)
158{
159 dma_addr_t addr;
160 int len;
161
162 addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
163 dma_desc[4];
164
165 len = (dma_desc[3] << 8) | dma_desc[2];
166
167 if (single)
168 dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
169 td_chan->direction);
170 else
171 dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
172 td_chan->direction);
173}
174
175static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
176{
177 struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
178 struct timb_dma_chan, chan);
179 u8 *descs;
180
181 for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
182 __td_unmap_desc(td_chan, descs, single);
183 if (descs[0] & 0x02)
184 break;
185 }
186}
187
188static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
189 struct scatterlist *sg, bool last)
190{
191 if (sg_dma_len(sg) > USHRT_MAX) {
192 dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
193 return -EINVAL;
194 }
195
196 /* length must be word aligned */
197 if (sg_dma_len(sg) % sizeof(u32)) {
198 dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
199 sg_dma_len(sg));
200 return -EINVAL;
201 }
202
203 dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n",
204 dma_desc, (void *)sg_dma_address(sg));
205
206 dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
207 dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
208 dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
209 dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
210
211 dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
212 dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
213
214 dma_desc[1] = 0x00;
215 dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
216
217 return 0;
218}
219
220/* Must be called with the spinlock held */
221static void __td_start_dma(struct timb_dma_chan *td_chan)
222{
223 struct timb_dma_desc *td_desc;
224
225 if (td_chan->ongoing) {
226 dev_err(chan2dev(&td_chan->chan),
227 "Transfer already ongoing\n");
228 return;
229 }
230
231 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
232 desc_node);
233
234 dev_dbg(chan2dev(&td_chan->chan),
235 "td_chan: %p, chan: %d, membase: %p\n",
236 td_chan, td_chan->chan.chan_id, td_chan->membase);
237
238 if (td_chan->direction == DMA_FROM_DEVICE) {
239
240 /* descriptor address */
241 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
242 iowrite32(td_desc->txd.phys, td_chan->membase +
243 TIMBDMA_OFFS_RX_DLAR);
244 /* Bytes per line */
245 iowrite32(td_chan->bytes_per_line, td_chan->membase +
246 TIMBDMA_OFFS_RX_BPRR);
247 /* enable RX */
248 iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
249 } else {
250 /* address high */
251 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
252 iowrite32(td_desc->txd.phys, td_chan->membase +
253 TIMBDMA_OFFS_TX_DLAR);
254 }
255
256 td_chan->ongoing = true;
257
258 if (td_desc->interrupt)
259 __td_enable_chan_irq(td_chan);
260}
261
262static void __td_finish(struct timb_dma_chan *td_chan)
263{
264 dma_async_tx_callback callback;
265 void *param;
266 struct dma_async_tx_descriptor *txd;
267 struct timb_dma_desc *td_desc;
268
269 /* can happen if the descriptor is canceled */
270 if (list_empty(&td_chan->active_list))
271 return;
272
273 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
274 desc_node);
275 txd = &td_desc->txd;
276
277 dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
278 txd->cookie);
279
280 /* make sure to stop the transfer */
281 if (td_chan->direction == DMA_FROM_DEVICE)
282 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
283/* Currently no support for stopping DMA transfers
284 else
285 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
286*/
287 td_chan->last_completed_cookie = txd->cookie;
288 td_chan->ongoing = false;
289
290 callback = txd->callback;
291 param = txd->callback_param;
292
293 list_move(&td_desc->desc_node, &td_chan->free_list);
294
295 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
296 __td_unmap_descs(td_desc,
297 txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
298
299 /*
300 * The API requires that no submissions are done from a
301 * callback, so we don't need to drop the lock here
302 */
303 if (callback)
304 callback(param);
305}
306
307static u32 __td_ier_mask(struct timb_dma *td)
308{
309 int i;
310 u32 ret = 0;
311
312 for (i = 0; i < td->dma.chancnt; i++) {
313 struct timb_dma_chan *td_chan = td->channels + i;
314 if (td_chan->ongoing) {
315 struct timb_dma_desc *td_desc =
316 list_entry(td_chan->active_list.next,
317 struct timb_dma_desc, desc_node);
318 if (td_desc->interrupt)
319 ret |= 1 << i;
320 }
321 }
322
323 return ret;
324}
325
326static void __td_start_next(struct timb_dma_chan *td_chan)
327{
328 struct timb_dma_desc *td_desc;
329
330 BUG_ON(list_empty(&td_chan->queue));
331 BUG_ON(td_chan->ongoing);
332
333 td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
334 desc_node);
335
336 dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
337 __func__, td_desc->txd.cookie);
338
339 list_move(&td_desc->desc_node, &td_chan->active_list);
340 __td_start_dma(td_chan);
341}
342
343static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
344{
345 struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
346 txd);
347 struct timb_dma_chan *td_chan = container_of(txd->chan,
348 struct timb_dma_chan, chan);
349 dma_cookie_t cookie;
350
351 spin_lock_bh(&td_chan->lock);
352
353 cookie = txd->chan->cookie;
354 if (++cookie < 0)
355 cookie = 1;
356 txd->chan->cookie = cookie;
357 txd->cookie = cookie;
358
359 if (list_empty(&td_chan->active_list)) {
360 dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
361 txd->cookie);
362 list_add_tail(&td_desc->desc_node, &td_chan->active_list);
363 __td_start_dma(td_chan);
364 } else {
365 dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
366 txd->cookie);
367
368 list_add_tail(&td_desc->desc_node, &td_chan->queue);
369 }
370
371 spin_unlock_bh(&td_chan->lock);
372
373 return cookie;
374}
375
376static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
377{
378 struct dma_chan *chan = &td_chan->chan;
379 struct timb_dma_desc *td_desc;
380 int err;
381
382 td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
383 if (!td_desc) {
384 dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
385 goto err;
386 }
387
388 td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
389
390 td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
391 if (!td_desc->desc_list) {
392 dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
393 goto err;
394 }
395
396 dma_async_tx_descriptor_init(&td_desc->txd, chan);
397 td_desc->txd.tx_submit = td_tx_submit;
398 td_desc->txd.flags = DMA_CTRL_ACK;
399
400 td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
401 td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
402
403 err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
404 if (err) {
405 dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
406 goto err;
407 }
408
409 return td_desc;
410err:
411 kfree(td_desc->desc_list);
412 kfree(td_desc);
413
414 return NULL;
415
416}
417
418static void td_free_desc(struct timb_dma_desc *td_desc)
419{
420 dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
421 dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
422 td_desc->desc_list_len, DMA_TO_DEVICE);
423
424 kfree(td_desc->desc_list);
425 kfree(td_desc);
426}
427
428static void td_desc_put(struct timb_dma_chan *td_chan,
429 struct timb_dma_desc *td_desc)
430{
431 dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
432
433 spin_lock_bh(&td_chan->lock);
434 list_add(&td_desc->desc_node, &td_chan->free_list);
435 spin_unlock_bh(&td_chan->lock);
436}
437
438static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
439{
440 struct timb_dma_desc *td_desc, *_td_desc;
441 struct timb_dma_desc *ret = NULL;
442
443 spin_lock_bh(&td_chan->lock);
444 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
445 desc_node) {
446 if (async_tx_test_ack(&td_desc->txd)) {
447 list_del(&td_desc->desc_node);
448 ret = td_desc;
449 break;
450 }
451 dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
452 td_desc);
453 }
454 spin_unlock_bh(&td_chan->lock);
455
456 return ret;
457}
458
459static int td_alloc_chan_resources(struct dma_chan *chan)
460{
461 struct timb_dma_chan *td_chan =
462 container_of(chan, struct timb_dma_chan, chan);
463 int i;
464
465 dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
466
467 BUG_ON(!list_empty(&td_chan->free_list));
468 for (i = 0; i < td_chan->descs; i++) {
469 struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
470 if (!td_desc) {
471 if (i)
472 break;
473 else {
474 dev_err(chan2dev(chan),
475 "Couldnt allocate any descriptors\n");
476 return -ENOMEM;
477 }
478 }
479
480 td_desc_put(td_chan, td_desc);
481 }
482
483 spin_lock_bh(&td_chan->lock);
484 td_chan->last_completed_cookie = 1;
485 chan->cookie = 1;
486 spin_unlock_bh(&td_chan->lock);
487
488 return 0;
489}
490
491static void td_free_chan_resources(struct dma_chan *chan)
492{
493 struct timb_dma_chan *td_chan =
494 container_of(chan, struct timb_dma_chan, chan);
495 struct timb_dma_desc *td_desc, *_td_desc;
496 LIST_HEAD(list);
497
498 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
499
500 /* check that all descriptors are free */
501 BUG_ON(!list_empty(&td_chan->active_list));
502 BUG_ON(!list_empty(&td_chan->queue));
503
504 spin_lock_bh(&td_chan->lock);
505 list_splice_init(&td_chan->free_list, &list);
506 spin_unlock_bh(&td_chan->lock);
507
508 list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
509 dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
510 td_desc);
511 td_free_desc(td_desc);
512 }
513}
514
515static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
516 struct dma_tx_state *txstate)
517{
518 struct timb_dma_chan *td_chan =
519 container_of(chan, struct timb_dma_chan, chan);
520 dma_cookie_t last_used;
521 dma_cookie_t last_complete;
522 int ret;
523
524 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
525
526 last_complete = td_chan->last_completed_cookie;
527 last_used = chan->cookie;
528
529 ret = dma_async_is_complete(cookie, last_complete, last_used);
530
531 dma_set_tx_state(txstate, last_complete, last_used, 0);
532
533 dev_dbg(chan2dev(chan),
534 "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
535 __func__, ret, last_complete, last_used);
536
537 return ret;
538}
539
540static void td_issue_pending(struct dma_chan *chan)
541{
542 struct timb_dma_chan *td_chan =
543 container_of(chan, struct timb_dma_chan, chan);
544
545 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
546 spin_lock_bh(&td_chan->lock);
547
548 if (!list_empty(&td_chan->active_list))
549 /* transfer ongoing */
550 if (__td_dma_done_ack(td_chan))
551 __td_finish(td_chan);
552
553 if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
554 __td_start_next(td_chan);
555
556 spin_unlock_bh(&td_chan->lock);
557}
558
559static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
560 struct scatterlist *sgl, unsigned int sg_len,
561 enum dma_data_direction direction, unsigned long flags)
562{
563 struct timb_dma_chan *td_chan =
564 container_of(chan, struct timb_dma_chan, chan);
565 struct timb_dma_desc *td_desc;
566 struct scatterlist *sg;
567 unsigned int i;
568 unsigned int desc_usage = 0;
569
570 if (!sgl || !sg_len) {
571 dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
572 return NULL;
573 }
574
575 /* even channels are for RX, odd for TX */
576 if (td_chan->direction != direction) {
577 dev_err(chan2dev(chan),
578 "Requesting channel in wrong direction\n");
579 return NULL;
580 }
581
582 td_desc = td_desc_get(td_chan);
583 if (!td_desc) {
584 dev_err(chan2dev(chan), "Not enough descriptors available\n");
585 return NULL;
586 }
587
588 td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
589
590 for_each_sg(sgl, sg, sg_len, i) {
591 int err;
592 if (desc_usage > td_desc->desc_list_len) {
593 dev_err(chan2dev(chan), "No descriptor space\n");
594 return NULL;
595 }
596
597 err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
598 i == (sg_len - 1));
599 if (err) {
600 dev_err(chan2dev(chan), "Failed to update desc: %d\n",
601 err);
602 td_desc_put(td_chan, td_desc);
603 return NULL;
604 }
605 desc_usage += TIMB_DMA_DESC_SIZE;
606 }
607
608 dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
609 td_desc->desc_list_len, DMA_TO_DEVICE);
610
611 return &td_desc->txd;
612}
613
614static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
615 unsigned long arg)
616{
617 struct timb_dma_chan *td_chan =
618 container_of(chan, struct timb_dma_chan, chan);
619 struct timb_dma_desc *td_desc, *_td_desc;
620
621 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
622
623 if (cmd != DMA_TERMINATE_ALL)
624 return -ENXIO;
625
626 /* first the easy part, put the queue into the free list */
627 spin_lock_bh(&td_chan->lock);
628 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
629 desc_node)
630 list_move(&td_desc->desc_node, &td_chan->free_list);
631
632 /* now tear down the runnning */
633 __td_finish(td_chan);
634 spin_unlock_bh(&td_chan->lock);
635
636 return 0;
637}
638
639static void td_tasklet(unsigned long data)
640{
641 struct timb_dma *td = (struct timb_dma *)data;
642 u32 isr;
643 u32 ipr;
644 u32 ier;
645 int i;
646
647 isr = ioread32(td->membase + TIMBDMA_ISR);
648 ipr = isr & __td_ier_mask(td);
649
650 /* ack the interrupts */
651 iowrite32(ipr, td->membase + TIMBDMA_ISR);
652
653 for (i = 0; i < td->dma.chancnt; i++)
654 if (ipr & (1 << i)) {
655 struct timb_dma_chan *td_chan = td->channels + i;
656 spin_lock(&td_chan->lock);
657 __td_finish(td_chan);
658 if (!list_empty(&td_chan->queue))
659 __td_start_next(td_chan);
660 spin_unlock(&td_chan->lock);
661 }
662
663 ier = __td_ier_mask(td);
664 iowrite32(ier, td->membase + TIMBDMA_IER);
665}
666
667
668static irqreturn_t td_irq(int irq, void *devid)
669{
670 struct timb_dma *td = devid;
671 u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
672
673 if (ipr) {
674 /* disable interrupts, will be re-enabled in tasklet */
675 iowrite32(0, td->membase + TIMBDMA_IER);
676
677 tasklet_schedule(&td->tasklet);
678
679 return IRQ_HANDLED;
680 } else
681 return IRQ_NONE;
682}
683
684
685static int __devinit td_probe(struct platform_device *pdev)
686{
687 struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
688 struct timb_dma *td;
689 struct resource *iomem;
690 int irq;
691 int err;
692 int i;
693
694 if (!pdata) {
695 dev_err(&pdev->dev, "No platform data\n");
696 return -EINVAL;
697 }
698
699 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
700 if (!iomem)
701 return -EINVAL;
702
703 irq = platform_get_irq(pdev, 0);
704 if (irq < 0)
705 return irq;
706
707 if (!request_mem_region(iomem->start, resource_size(iomem),
708 DRIVER_NAME))
709 return -EBUSY;
710
711 td = kzalloc(sizeof(struct timb_dma) +
712 sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
713 if (!td) {
714 err = -ENOMEM;
715 goto err_release_region;
716 }
717
718 dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
719
720 td->membase = ioremap(iomem->start, resource_size(iomem));
721 if (!td->membase) {
722 dev_err(&pdev->dev, "Failed to remap I/O memory\n");
723 err = -ENOMEM;
724 goto err_free_mem;
725 }
726
727 /* 32bit addressing */
728 iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
729
730 /* disable and clear any interrupts */
731 iowrite32(0x0, td->membase + TIMBDMA_IER);
732 iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
733
734 tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
735
736 err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
737 if (err) {
738 dev_err(&pdev->dev, "Failed to request IRQ\n");
739 goto err_tasklet_kill;
740 }
741
742 td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
743 td->dma.device_free_chan_resources = td_free_chan_resources;
744 td->dma.device_tx_status = td_tx_status;
745 td->dma.device_issue_pending = td_issue_pending;
746
747 dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
748 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
749 td->dma.device_prep_slave_sg = td_prep_slave_sg;
750 td->dma.device_control = td_control;
751
752 td->dma.dev = &pdev->dev;
753
754 INIT_LIST_HEAD(&td->dma.channels);
755
756 for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
757 struct timb_dma_chan *td_chan = &td->channels[i];
758 struct timb_dma_platform_data_channel *pchan =
759 pdata->channels + i;
760
761 /* even channels are RX, odd are TX */
762 if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) {
763 dev_err(&pdev->dev, "Wrong channel configuration\n");
764 err = -EINVAL;
765 goto err_tasklet_kill;
766 }
767
768 td_chan->chan.device = &td->dma;
769 td_chan->chan.cookie = 1;
770 td_chan->chan.chan_id = i;
771 spin_lock_init(&td_chan->lock);
772 INIT_LIST_HEAD(&td_chan->active_list);
773 INIT_LIST_HEAD(&td_chan->queue);
774 INIT_LIST_HEAD(&td_chan->free_list);
775
776 td_chan->descs = pchan->descriptors;
777 td_chan->desc_elems = pchan->descriptor_elements;
778 td_chan->bytes_per_line = pchan->bytes_per_line;
779 td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
780 DMA_TO_DEVICE;
781
782 td_chan->membase = td->membase +
783 (i / 2) * TIMBDMA_INSTANCE_OFFSET +
784 (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
785
786 dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
787 i, td_chan->membase);
788
789 list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
790 }
791
792 err = dma_async_device_register(&td->dma);
793 if (err) {
794 dev_err(&pdev->dev, "Failed to register async device\n");
795 goto err_free_irq;
796 }
797
798 platform_set_drvdata(pdev, td);
799
800 dev_dbg(&pdev->dev, "Probe result: %d\n", err);
801 return err;
802
803err_free_irq:
804 free_irq(irq, td);
805err_tasklet_kill:
806 tasklet_kill(&td->tasklet);
807 iounmap(td->membase);
808err_free_mem:
809 kfree(td);
810err_release_region:
811 release_mem_region(iomem->start, resource_size(iomem));
812
813 return err;
814
815}
816
817static int __devexit td_remove(struct platform_device *pdev)
818{
819 struct timb_dma *td = platform_get_drvdata(pdev);
820 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
821 int irq = platform_get_irq(pdev, 0);
822
823 dma_async_device_unregister(&td->dma);
824 free_irq(irq, td);
825 tasklet_kill(&td->tasklet);
826 iounmap(td->membase);
827 kfree(td);
828 release_mem_region(iomem->start, resource_size(iomem));
829
830 platform_set_drvdata(pdev, NULL);
831
832 dev_dbg(&pdev->dev, "Removed...\n");
833 return 0;
834}
835
836static struct platform_driver td_driver = {
837 .driver = {
838 .name = DRIVER_NAME,
839 .owner = THIS_MODULE,
840 },
841 .probe = td_probe,
842 .remove = __exit_p(td_remove),
843};
844
845static int __init td_init(void)
846{
847 return platform_driver_register(&td_driver);
848}
849module_init(td_init);
850
851static void __exit td_exit(void)
852{
853 platform_driver_unregister(&td_driver);
854}
855module_exit(td_exit);
856
857MODULE_LICENSE("GPL v2");
858MODULE_DESCRIPTION("Timberdale DMA controller driver");
859MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
860MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 75fcf1ac8bb7..cbd83e362b5e 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -938,12 +938,17 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
938 return &first->txd; 938 return &first->txd;
939} 939}
940 940
941static void txx9dmac_terminate_all(struct dma_chan *chan) 941static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
942 unsigned long arg)
942{ 943{
943 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 944 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
944 struct txx9dmac_desc *desc, *_desc; 945 struct txx9dmac_desc *desc, *_desc;
945 LIST_HEAD(list); 946 LIST_HEAD(list);
946 947
948 /* Only supports DMA_TERMINATE_ALL */
949 if (cmd != DMA_TERMINATE_ALL)
950 return -EINVAL;
951
947 dev_vdbg(chan2dev(chan), "terminate_all\n"); 952 dev_vdbg(chan2dev(chan), "terminate_all\n");
948 spin_lock_bh(&dc->lock); 953 spin_lock_bh(&dc->lock);
949 954
@@ -958,12 +963,13 @@ static void txx9dmac_terminate_all(struct dma_chan *chan)
958 /* Flush all pending and queued descriptors */ 963 /* Flush all pending and queued descriptors */
959 list_for_each_entry_safe(desc, _desc, &list, desc_node) 964 list_for_each_entry_safe(desc, _desc, &list, desc_node)
960 txx9dmac_descriptor_complete(dc, desc); 965 txx9dmac_descriptor_complete(dc, desc);
966
967 return 0;
961} 968}
962 969
963static enum dma_status 970static enum dma_status
964txx9dmac_is_tx_complete(struct dma_chan *chan, 971txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
965 dma_cookie_t cookie, 972 struct dma_tx_state *txstate)
966 dma_cookie_t *done, dma_cookie_t *used)
967{ 973{
968 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 974 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
969 dma_cookie_t last_used; 975 dma_cookie_t last_used;
@@ -985,10 +991,7 @@ txx9dmac_is_tx_complete(struct dma_chan *chan,
985 ret = dma_async_is_complete(cookie, last_complete, last_used); 991 ret = dma_async_is_complete(cookie, last_complete, last_used);
986 } 992 }
987 993
988 if (done) 994 dma_set_tx_state(txstate, last_complete, last_used, 0);
989 *done = last_complete;
990 if (used)
991 *used = last_used;
992 995
993 return ret; 996 return ret;
994} 997}
@@ -1153,8 +1156,8 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1153 dc->dma.dev = &pdev->dev; 1156 dc->dma.dev = &pdev->dev;
1154 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; 1157 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1155 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; 1158 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
1156 dc->dma.device_terminate_all = txx9dmac_terminate_all; 1159 dc->dma.device_control = txx9dmac_control;
1157 dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete; 1160 dc->dma.device_tx_status = txx9dmac_tx_status;
1158 dc->dma.device_issue_pending = txx9dmac_issue_pending; 1161 dc->dma.device_issue_pending = txx9dmac_issue_pending;
1159 if (pdata && pdata->memcpy_chan == ch) { 1162 if (pdata && pdata->memcpy_chan == ch) {
1160 dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy; 1163 dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;