diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-28 15:35:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-28 15:35:15 -0400 |
commit | 4cb865deec59ef31d966622d1ec87411ae32dfab (patch) | |
tree | e060d515f62e4f334aded38c9079485d50166693 /drivers/dma/at_hdmac.c | |
parent | 55f08e1baa3ef11c952b626dbc7ef9e3e8332a63 (diff) | |
parent | 19d78a61be6dd707dcec298c486303d4ba2c840a (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (33 commits)
x86: poll waiting for I/OAT DMA channel status
maintainers: add dma engine tree details
dmaengine: add TODO items for future work on dma drivers
dmaengine: Add API documentation for slave dma usage
dmaengine/dw_dmac: Update maintainer-ship
dmaengine: move link order
dmaengine/dw_dmac: implement pause and resume in dwc_control
dmaengine/dw_dmac: Replace spin_lock* with irqsave variants and enable submission from callback
dmaengine/dw_dmac: Divide one sg to many desc, if sg len is greater than DWC_MAX_COUNT
dmaengine/dw_dmac: set residue as total len in dwc_tx_status if status is !DMA_SUCCESS
dmaengine/dw_dmac: don't call callback routine in case dmaengine_terminate_all() is called
dmaengine: at_hdmac: pause: no need to wait for FIFO empty
pch_dma: modify pci device table definition
pch_dma: Support new device ML7223 IOH
pch_dma: Support I2S for ML7213 IOH
pch_dma: Fix DMA setting issue
pch_dma: modify for checkpatch
pch_dma: fix dma direction issue for ML7213 IOH video-in
dmaengine: at_hdmac: use descriptor chaining help function
dmaengine: at_hdmac: implement pause and resume in atc_control
...
Fix up trivial conflict in drivers/dma/dw_dmac.c
Diffstat (limited to 'drivers/dma/at_hdmac.c')
-rw-r--r-- | drivers/dma/at_hdmac.c | 376 |
1 files changed, 291 insertions, 85 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 235f53bf494..36144f88d71 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -37,8 +37,8 @@ | |||
37 | 37 | ||
38 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) | 38 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) |
39 | #define ATC_DEFAULT_CTRLA (0) | 39 | #define ATC_DEFAULT_CTRLA (0) |
40 | #define ATC_DEFAULT_CTRLB (ATC_SIF(0) \ | 40 | #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ |
41 | |ATC_DIF(1)) | 41 | |ATC_DIF(AT_DMA_MEM_IF)) |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * Initial number of descriptors to allocate for each channel. This could | 44 | * Initial number of descriptors to allocate for each channel. This could |
@@ -165,6 +165,29 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) | |||
165 | } | 165 | } |
166 | 166 | ||
167 | /** | 167 | /** |
168 | * atc_desc_chain - build chain adding a descripor | ||
169 | * @first: address of first descripor of the chain | ||
170 | * @prev: address of previous descripor of the chain | ||
171 | * @desc: descriptor to queue | ||
172 | * | ||
173 | * Called from prep_* functions | ||
174 | */ | ||
175 | static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, | ||
176 | struct at_desc *desc) | ||
177 | { | ||
178 | if (!(*first)) { | ||
179 | *first = desc; | ||
180 | } else { | ||
181 | /* inform the HW lli about chaining */ | ||
182 | (*prev)->lli.dscr = desc->txd.phys; | ||
183 | /* insert the link descriptor to the LD ring */ | ||
184 | list_add_tail(&desc->desc_node, | ||
185 | &(*first)->tx_list); | ||
186 | } | ||
187 | *prev = desc; | ||
188 | } | ||
189 | |||
190 | /** | ||
168 | * atc_assign_cookie - compute and assign new cookie | 191 | * atc_assign_cookie - compute and assign new cookie |
169 | * @atchan: channel we work on | 192 | * @atchan: channel we work on |
170 | * @desc: descriptor to assign cookie for | 193 | * @desc: descriptor to assign cookie for |
@@ -237,16 +260,12 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) | |||
237 | static void | 260 | static void |
238 | atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | 261 | atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) |
239 | { | 262 | { |
240 | dma_async_tx_callback callback; | ||
241 | void *param; | ||
242 | struct dma_async_tx_descriptor *txd = &desc->txd; | 263 | struct dma_async_tx_descriptor *txd = &desc->txd; |
243 | 264 | ||
244 | dev_vdbg(chan2dev(&atchan->chan_common), | 265 | dev_vdbg(chan2dev(&atchan->chan_common), |
245 | "descriptor %u complete\n", txd->cookie); | 266 | "descriptor %u complete\n", txd->cookie); |
246 | 267 | ||
247 | atchan->completed_cookie = txd->cookie; | 268 | atchan->completed_cookie = txd->cookie; |
248 | callback = txd->callback; | ||
249 | param = txd->callback_param; | ||
250 | 269 | ||
251 | /* move children to free_list */ | 270 | /* move children to free_list */ |
252 | list_splice_init(&desc->tx_list, &atchan->free_list); | 271 | list_splice_init(&desc->tx_list, &atchan->free_list); |
@@ -278,12 +297,19 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
278 | } | 297 | } |
279 | } | 298 | } |
280 | 299 | ||
281 | /* | 300 | /* for cyclic transfers, |
282 | * The API requires that no submissions are done from a | 301 | * no need to replay callback function while stopping */ |
283 | * callback, so we don't need to drop the lock here | 302 | if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { |
284 | */ | 303 | dma_async_tx_callback callback = txd->callback; |
285 | if (callback) | 304 | void *param = txd->callback_param; |
286 | callback(param); | 305 | |
306 | /* | ||
307 | * The API requires that no submissions are done from a | ||
308 | * callback, so we don't need to drop the lock here | ||
309 | */ | ||
310 | if (callback) | ||
311 | callback(param); | ||
312 | } | ||
287 | 313 | ||
288 | dma_run_dependencies(txd); | 314 | dma_run_dependencies(txd); |
289 | } | 315 | } |
@@ -419,6 +445,26 @@ static void atc_handle_error(struct at_dma_chan *atchan) | |||
419 | atc_chain_complete(atchan, bad_desc); | 445 | atc_chain_complete(atchan, bad_desc); |
420 | } | 446 | } |
421 | 447 | ||
448 | /** | ||
449 | * atc_handle_cyclic - at the end of a period, run callback function | ||
450 | * @atchan: channel used for cyclic operations | ||
451 | * | ||
452 | * Called with atchan->lock held and bh disabled | ||
453 | */ | ||
454 | static void atc_handle_cyclic(struct at_dma_chan *atchan) | ||
455 | { | ||
456 | struct at_desc *first = atc_first_active(atchan); | ||
457 | struct dma_async_tx_descriptor *txd = &first->txd; | ||
458 | dma_async_tx_callback callback = txd->callback; | ||
459 | void *param = txd->callback_param; | ||
460 | |||
461 | dev_vdbg(chan2dev(&atchan->chan_common), | ||
462 | "new cyclic period llp 0x%08x\n", | ||
463 | channel_readl(atchan, DSCR)); | ||
464 | |||
465 | if (callback) | ||
466 | callback(param); | ||
467 | } | ||
422 | 468 | ||
423 | /*-- IRQ & Tasklet ---------------------------------------------------*/ | 469 | /*-- IRQ & Tasklet ---------------------------------------------------*/ |
424 | 470 | ||
@@ -426,16 +472,11 @@ static void atc_tasklet(unsigned long data) | |||
426 | { | 472 | { |
427 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; | 473 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; |
428 | 474 | ||
429 | /* Channel cannot be enabled here */ | ||
430 | if (atc_chan_is_enabled(atchan)) { | ||
431 | dev_err(chan2dev(&atchan->chan_common), | ||
432 | "BUG: channel enabled in tasklet\n"); | ||
433 | return; | ||
434 | } | ||
435 | |||
436 | spin_lock(&atchan->lock); | 475 | spin_lock(&atchan->lock); |
437 | if (test_and_clear_bit(0, &atchan->error_status)) | 476 | if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) |
438 | atc_handle_error(atchan); | 477 | atc_handle_error(atchan); |
478 | else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) | ||
479 | atc_handle_cyclic(atchan); | ||
439 | else | 480 | else |
440 | atc_advance_work(atchan); | 481 | atc_advance_work(atchan); |
441 | 482 | ||
@@ -464,12 +505,13 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) | |||
464 | 505 | ||
465 | for (i = 0; i < atdma->dma_common.chancnt; i++) { | 506 | for (i = 0; i < atdma->dma_common.chancnt; i++) { |
466 | atchan = &atdma->chan[i]; | 507 | atchan = &atdma->chan[i]; |
467 | if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) { | 508 | if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { |
468 | if (pending & AT_DMA_ERR(i)) { | 509 | if (pending & AT_DMA_ERR(i)) { |
469 | /* Disable channel on AHB error */ | 510 | /* Disable channel on AHB error */ |
470 | dma_writel(atdma, CHDR, atchan->mask); | 511 | dma_writel(atdma, CHDR, |
512 | AT_DMA_RES(i) | atchan->mask); | ||
471 | /* Give information to tasklet */ | 513 | /* Give information to tasklet */ |
472 | set_bit(0, &atchan->error_status); | 514 | set_bit(ATC_IS_ERROR, &atchan->status); |
473 | } | 515 | } |
474 | tasklet_schedule(&atchan->tasklet); | 516 | tasklet_schedule(&atchan->tasklet); |
475 | ret = IRQ_HANDLED; | 517 | ret = IRQ_HANDLED; |
@@ -549,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
549 | } | 591 | } |
550 | 592 | ||
551 | ctrla = ATC_DEFAULT_CTRLA; | 593 | ctrla = ATC_DEFAULT_CTRLA; |
552 | ctrlb = ATC_DEFAULT_CTRLB | 594 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
553 | | ATC_SRC_ADDR_MODE_INCR | 595 | | ATC_SRC_ADDR_MODE_INCR |
554 | | ATC_DST_ADDR_MODE_INCR | 596 | | ATC_DST_ADDR_MODE_INCR |
555 | | ATC_FC_MEM2MEM; | 597 | | ATC_FC_MEM2MEM; |
@@ -584,16 +626,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
584 | 626 | ||
585 | desc->txd.cookie = 0; | 627 | desc->txd.cookie = 0; |
586 | 628 | ||
587 | if (!first) { | 629 | atc_desc_chain(&first, &prev, desc); |
588 | first = desc; | ||
589 | } else { | ||
590 | /* inform the HW lli about chaining */ | ||
591 | prev->lli.dscr = desc->txd.phys; | ||
592 | /* insert the link descriptor to the LD ring */ | ||
593 | list_add_tail(&desc->desc_node, | ||
594 | &first->tx_list); | ||
595 | } | ||
596 | prev = desc; | ||
597 | } | 630 | } |
598 | 631 | ||
599 | /* First descriptor of the chain embedds additional information */ | 632 | /* First descriptor of the chain embedds additional information */ |
@@ -639,7 +672,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
639 | struct scatterlist *sg; | 672 | struct scatterlist *sg; |
640 | size_t total_len = 0; | 673 | size_t total_len = 0; |
641 | 674 | ||
642 | dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n", | 675 | dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", |
676 | sg_len, | ||
643 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", | 677 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", |
644 | flags); | 678 | flags); |
645 | 679 | ||
@@ -651,14 +685,15 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
651 | reg_width = atslave->reg_width; | 685 | reg_width = atslave->reg_width; |
652 | 686 | ||
653 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; | 687 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; |
654 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; | 688 | ctrlb = ATC_IEN; |
655 | 689 | ||
656 | switch (direction) { | 690 | switch (direction) { |
657 | case DMA_TO_DEVICE: | 691 | case DMA_TO_DEVICE: |
658 | ctrla |= ATC_DST_WIDTH(reg_width); | 692 | ctrla |= ATC_DST_WIDTH(reg_width); |
659 | ctrlb |= ATC_DST_ADDR_MODE_FIXED | 693 | ctrlb |= ATC_DST_ADDR_MODE_FIXED |
660 | | ATC_SRC_ADDR_MODE_INCR | 694 | | ATC_SRC_ADDR_MODE_INCR |
661 | | ATC_FC_MEM2PER; | 695 | | ATC_FC_MEM2PER |
696 | | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); | ||
662 | reg = atslave->tx_reg; | 697 | reg = atslave->tx_reg; |
663 | for_each_sg(sgl, sg, sg_len, i) { | 698 | for_each_sg(sgl, sg, sg_len, i) { |
664 | struct at_desc *desc; | 699 | struct at_desc *desc; |
@@ -682,16 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
682 | | len >> mem_width; | 717 | | len >> mem_width; |
683 | desc->lli.ctrlb = ctrlb; | 718 | desc->lli.ctrlb = ctrlb; |
684 | 719 | ||
685 | if (!first) { | 720 | atc_desc_chain(&first, &prev, desc); |
686 | first = desc; | ||
687 | } else { | ||
688 | /* inform the HW lli about chaining */ | ||
689 | prev->lli.dscr = desc->txd.phys; | ||
690 | /* insert the link descriptor to the LD ring */ | ||
691 | list_add_tail(&desc->desc_node, | ||
692 | &first->tx_list); | ||
693 | } | ||
694 | prev = desc; | ||
695 | total_len += len; | 721 | total_len += len; |
696 | } | 722 | } |
697 | break; | 723 | break; |
@@ -699,7 +725,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
699 | ctrla |= ATC_SRC_WIDTH(reg_width); | 725 | ctrla |= ATC_SRC_WIDTH(reg_width); |
700 | ctrlb |= ATC_DST_ADDR_MODE_INCR | 726 | ctrlb |= ATC_DST_ADDR_MODE_INCR |
701 | | ATC_SRC_ADDR_MODE_FIXED | 727 | | ATC_SRC_ADDR_MODE_FIXED |
702 | | ATC_FC_PER2MEM; | 728 | | ATC_FC_PER2MEM |
729 | | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); | ||
703 | 730 | ||
704 | reg = atslave->rx_reg; | 731 | reg = atslave->rx_reg; |
705 | for_each_sg(sgl, sg, sg_len, i) { | 732 | for_each_sg(sgl, sg, sg_len, i) { |
@@ -724,16 +751,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
724 | | len >> reg_width; | 751 | | len >> reg_width; |
725 | desc->lli.ctrlb = ctrlb; | 752 | desc->lli.ctrlb = ctrlb; |
726 | 753 | ||
727 | if (!first) { | 754 | atc_desc_chain(&first, &prev, desc); |
728 | first = desc; | ||
729 | } else { | ||
730 | /* inform the HW lli about chaining */ | ||
731 | prev->lli.dscr = desc->txd.phys; | ||
732 | /* insert the link descriptor to the LD ring */ | ||
733 | list_add_tail(&desc->desc_node, | ||
734 | &first->tx_list); | ||
735 | } | ||
736 | prev = desc; | ||
737 | total_len += len; | 755 | total_len += len; |
738 | } | 756 | } |
739 | break; | 757 | break; |
@@ -759,41 +777,211 @@ err_desc_get: | |||
759 | return NULL; | 777 | return NULL; |
760 | } | 778 | } |
761 | 779 | ||
780 | /** | ||
781 | * atc_dma_cyclic_check_values | ||
782 | * Check for too big/unaligned periods and unaligned DMA buffer | ||
783 | */ | ||
784 | static int | ||
785 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, | ||
786 | size_t period_len, enum dma_data_direction direction) | ||
787 | { | ||
788 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) | ||
789 | goto err_out; | ||
790 | if (unlikely(period_len & ((1 << reg_width) - 1))) | ||
791 | goto err_out; | ||
792 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | ||
793 | goto err_out; | ||
794 | if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) | ||
795 | goto err_out; | ||
796 | |||
797 | return 0; | ||
798 | |||
799 | err_out: | ||
800 | return -EINVAL; | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * atc_dma_cyclic_fill_desc - Fill one period decriptor | ||
805 | */ | ||
806 | static int | ||
807 | atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | ||
808 | unsigned int period_index, dma_addr_t buf_addr, | ||
809 | size_t period_len, enum dma_data_direction direction) | ||
810 | { | ||
811 | u32 ctrla; | ||
812 | unsigned int reg_width = atslave->reg_width; | ||
813 | |||
814 | /* prepare common CRTLA value */ | ||
815 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla | ||
816 | | ATC_DST_WIDTH(reg_width) | ||
817 | | ATC_SRC_WIDTH(reg_width) | ||
818 | | period_len >> reg_width; | ||
819 | |||
820 | switch (direction) { | ||
821 | case DMA_TO_DEVICE: | ||
822 | desc->lli.saddr = buf_addr + (period_len * period_index); | ||
823 | desc->lli.daddr = atslave->tx_reg; | ||
824 | desc->lli.ctrla = ctrla; | ||
825 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED | ||
826 | | ATC_SRC_ADDR_MODE_INCR | ||
827 | | ATC_FC_MEM2PER | ||
828 | | ATC_SIF(AT_DMA_MEM_IF) | ||
829 | | ATC_DIF(AT_DMA_PER_IF); | ||
830 | break; | ||
831 | |||
832 | case DMA_FROM_DEVICE: | ||
833 | desc->lli.saddr = atslave->rx_reg; | ||
834 | desc->lli.daddr = buf_addr + (period_len * period_index); | ||
835 | desc->lli.ctrla = ctrla; | ||
836 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR | ||
837 | | ATC_SRC_ADDR_MODE_FIXED | ||
838 | | ATC_FC_PER2MEM | ||
839 | | ATC_SIF(AT_DMA_PER_IF) | ||
840 | | ATC_DIF(AT_DMA_MEM_IF); | ||
841 | break; | ||
842 | |||
843 | default: | ||
844 | return -EINVAL; | ||
845 | } | ||
846 | |||
847 | return 0; | ||
848 | } | ||
849 | |||
850 | /** | ||
851 | * atc_prep_dma_cyclic - prepare the cyclic DMA transfer | ||
852 | * @chan: the DMA channel to prepare | ||
853 | * @buf_addr: physical DMA address where the buffer starts | ||
854 | * @buf_len: total number of bytes for the entire buffer | ||
855 | * @period_len: number of bytes for each period | ||
856 | * @direction: transfer direction, to or from device | ||
857 | */ | ||
858 | static struct dma_async_tx_descriptor * | ||
859 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
860 | size_t period_len, enum dma_data_direction direction) | ||
861 | { | ||
862 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
863 | struct at_dma_slave *atslave = chan->private; | ||
864 | struct at_desc *first = NULL; | ||
865 | struct at_desc *prev = NULL; | ||
866 | unsigned long was_cyclic; | ||
867 | unsigned int periods = buf_len / period_len; | ||
868 | unsigned int i; | ||
869 | |||
870 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", | ||
871 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", | ||
872 | buf_addr, | ||
873 | periods, buf_len, period_len); | ||
874 | |||
875 | if (unlikely(!atslave || !buf_len || !period_len)) { | ||
876 | dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); | ||
877 | return NULL; | ||
878 | } | ||
879 | |||
880 | was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); | ||
881 | if (was_cyclic) { | ||
882 | dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); | ||
883 | return NULL; | ||
884 | } | ||
885 | |||
886 | /* Check for too big/unaligned periods and unaligned DMA buffer */ | ||
887 | if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr, | ||
888 | period_len, direction)) | ||
889 | goto err_out; | ||
890 | |||
891 | /* build cyclic linked list */ | ||
892 | for (i = 0; i < periods; i++) { | ||
893 | struct at_desc *desc; | ||
894 | |||
895 | desc = atc_desc_get(atchan); | ||
896 | if (!desc) | ||
897 | goto err_desc_get; | ||
898 | |||
899 | if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr, | ||
900 | period_len, direction)) | ||
901 | goto err_desc_get; | ||
902 | |||
903 | atc_desc_chain(&first, &prev, desc); | ||
904 | } | ||
905 | |||
906 | /* lets make a cyclic list */ | ||
907 | prev->lli.dscr = first->txd.phys; | ||
908 | |||
909 | /* First descriptor of the chain embedds additional information */ | ||
910 | first->txd.cookie = -EBUSY; | ||
911 | first->len = buf_len; | ||
912 | |||
913 | return &first->txd; | ||
914 | |||
915 | err_desc_get: | ||
916 | dev_err(chan2dev(chan), "not enough descriptors available\n"); | ||
917 | atc_desc_put(atchan, first); | ||
918 | err_out: | ||
919 | clear_bit(ATC_IS_CYCLIC, &atchan->status); | ||
920 | return NULL; | ||
921 | } | ||
922 | |||
923 | |||
762 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 924 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
763 | unsigned long arg) | 925 | unsigned long arg) |
764 | { | 926 | { |
765 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 927 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
766 | struct at_dma *atdma = to_at_dma(chan->device); | 928 | struct at_dma *atdma = to_at_dma(chan->device); |
767 | struct at_desc *desc, *_desc; | 929 | int chan_id = atchan->chan_common.chan_id; |
930 | |||
768 | LIST_HEAD(list); | 931 | LIST_HEAD(list); |
769 | 932 | ||
770 | /* Only supports DMA_TERMINATE_ALL */ | 933 | dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); |
771 | if (cmd != DMA_TERMINATE_ALL) | ||
772 | return -ENXIO; | ||
773 | 934 | ||
774 | /* | 935 | if (cmd == DMA_PAUSE) { |
775 | * This is only called when something went wrong elsewhere, so | 936 | spin_lock_bh(&atchan->lock); |
776 | * we don't really care about the data. Just disable the | ||
777 | * channel. We still have to poll the channel enable bit due | ||
778 | * to AHB/HSB limitations. | ||
779 | */ | ||
780 | spin_lock_bh(&atchan->lock); | ||
781 | 937 | ||
782 | dma_writel(atdma, CHDR, atchan->mask); | 938 | dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); |
939 | set_bit(ATC_IS_PAUSED, &atchan->status); | ||
783 | 940 | ||
784 | /* confirm that this channel is disabled */ | 941 | spin_unlock_bh(&atchan->lock); |
785 | while (dma_readl(atdma, CHSR) & atchan->mask) | 942 | } else if (cmd == DMA_RESUME) { |
786 | cpu_relax(); | 943 | if (!test_bit(ATC_IS_PAUSED, &atchan->status)) |
944 | return 0; | ||
787 | 945 | ||
788 | /* active_list entries will end up before queued entries */ | 946 | spin_lock_bh(&atchan->lock); |
789 | list_splice_init(&atchan->queue, &list); | ||
790 | list_splice_init(&atchan->active_list, &list); | ||
791 | 947 | ||
792 | /* Flush all pending and queued descriptors */ | 948 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); |
793 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 949 | clear_bit(ATC_IS_PAUSED, &atchan->status); |
794 | atc_chain_complete(atchan, desc); | ||
795 | 950 | ||
796 | spin_unlock_bh(&atchan->lock); | 951 | spin_unlock_bh(&atchan->lock); |
952 | } else if (cmd == DMA_TERMINATE_ALL) { | ||
953 | struct at_desc *desc, *_desc; | ||
954 | /* | ||
955 | * This is only called when something went wrong elsewhere, so | ||
956 | * we don't really care about the data. Just disable the | ||
957 | * channel. We still have to poll the channel enable bit due | ||
958 | * to AHB/HSB limitations. | ||
959 | */ | ||
960 | spin_lock_bh(&atchan->lock); | ||
961 | |||
962 | /* disabling channel: must also remove suspend state */ | ||
963 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); | ||
964 | |||
965 | /* confirm that this channel is disabled */ | ||
966 | while (dma_readl(atdma, CHSR) & atchan->mask) | ||
967 | cpu_relax(); | ||
968 | |||
969 | /* active_list entries will end up before queued entries */ | ||
970 | list_splice_init(&atchan->queue, &list); | ||
971 | list_splice_init(&atchan->active_list, &list); | ||
972 | |||
973 | /* Flush all pending and queued descriptors */ | ||
974 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | ||
975 | atc_chain_complete(atchan, desc); | ||
976 | |||
977 | clear_bit(ATC_IS_PAUSED, &atchan->status); | ||
978 | /* if channel dedicated to cyclic operations, free it */ | ||
979 | clear_bit(ATC_IS_CYCLIC, &atchan->status); | ||
980 | |||
981 | spin_unlock_bh(&atchan->lock); | ||
982 | } else { | ||
983 | return -ENXIO; | ||
984 | } | ||
797 | 985 | ||
798 | return 0; | 986 | return 0; |
799 | } | 987 | } |
@@ -835,9 +1023,17 @@ atc_tx_status(struct dma_chan *chan, | |||
835 | 1023 | ||
836 | spin_unlock_bh(&atchan->lock); | 1024 | spin_unlock_bh(&atchan->lock); |
837 | 1025 | ||
838 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 1026 | if (ret != DMA_SUCCESS) |
839 | dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n", | 1027 | dma_set_tx_state(txstate, last_complete, last_used, |
840 | cookie, last_complete ? last_complete : 0, | 1028 | atc_first_active(atchan)->len); |
1029 | else | ||
1030 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
1031 | |||
1032 | if (test_bit(ATC_IS_PAUSED, &atchan->status)) | ||
1033 | ret = DMA_PAUSED; | ||
1034 | |||
1035 | dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", | ||
1036 | ret, cookie, last_complete ? last_complete : 0, | ||
841 | last_used ? last_used : 0); | 1037 | last_used ? last_used : 0); |
842 | 1038 | ||
843 | return ret; | 1039 | return ret; |
@@ -853,6 +1049,10 @@ static void atc_issue_pending(struct dma_chan *chan) | |||
853 | 1049 | ||
854 | dev_vdbg(chan2dev(chan), "issue_pending\n"); | 1050 | dev_vdbg(chan2dev(chan), "issue_pending\n"); |
855 | 1051 | ||
1052 | /* Not needed for cyclic transfers */ | ||
1053 | if (test_bit(ATC_IS_CYCLIC, &atchan->status)) | ||
1054 | return; | ||
1055 | |||
856 | spin_lock_bh(&atchan->lock); | 1056 | spin_lock_bh(&atchan->lock); |
857 | if (!atc_chan_is_enabled(atchan)) { | 1057 | if (!atc_chan_is_enabled(atchan)) { |
858 | atc_advance_work(atchan); | 1058 | atc_advance_work(atchan); |
@@ -959,6 +1159,7 @@ static void atc_free_chan_resources(struct dma_chan *chan) | |||
959 | } | 1159 | } |
960 | list_splice_init(&atchan->free_list, &list); | 1160 | list_splice_init(&atchan->free_list, &list); |
961 | atchan->descs_allocated = 0; | 1161 | atchan->descs_allocated = 0; |
1162 | atchan->status = 0; | ||
962 | 1163 | ||
963 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); | 1164 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); |
964 | } | 1165 | } |
@@ -1092,10 +1293,15 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1092 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) | 1293 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) |
1093 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; | 1294 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; |
1094 | 1295 | ||
1095 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { | 1296 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) |
1096 | atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; | 1297 | atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; |
1298 | |||
1299 | if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) | ||
1300 | atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; | ||
1301 | |||
1302 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) || | ||
1303 | dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) | ||
1097 | atdma->dma_common.device_control = atc_control; | 1304 | atdma->dma_common.device_control = atc_control; |
1098 | } | ||
1099 | 1305 | ||
1100 | dma_writel(atdma, EN, AT_DMA_ENABLE); | 1306 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
1101 | 1307 | ||