diff options
author | Nicolas Ferre <nicolas.ferre@atmel.com> | 2009-07-22 14:04:45 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-07-23 02:15:33 -0400 |
commit | 808347f6a31792079e345ec865e9cfcb6e8ae6b2 (patch) | |
tree | 05fe2e32712d84ec5dc7553033432712313f6ba2 /drivers/dma/at_hdmac.c | |
parent | dc78baa2b90b289590911b40b6800f77d0dc935a (diff) |
dmaengine: at_hdmac: add DMA slave transfers
This patch for at_hdmac adds the slave transfers capability to the Atmel DMA
controller available on some AT91 SOCs. This allow peripheral to memory and
memory to peripheral transfers with hardware handshaking.
Slave structure for controller specific information is passed through channel
private data. This at_dma_slave structure is defined in at_hdmac.h header file
and relative hardware definition are moved to this file from at_hdmac_regs.h.
Doing this we allow the channel configuration from platform definition code.
This work is intensively based on dw_dmac and several slave implementations.
Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/at_hdmac.c')
-rw-r--r-- | drivers/dma/at_hdmac.c | 208 |
1 files changed, 206 insertions, 2 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 64dbf0ce128e..9a1e5fb412ed 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -608,6 +608,187 @@ err_desc_get: | |||
608 | return NULL; | 608 | return NULL; |
609 | } | 609 | } |
610 | 610 | ||
611 | |||
612 | /** | ||
613 | * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | ||
614 | * @chan: DMA channel | ||
615 | * @sgl: scatterlist to transfer to/from | ||
616 | * @sg_len: number of entries in @scatterlist | ||
617 | * @direction: DMA direction | ||
618 | * @flags: tx descriptor status flags | ||
619 | */ | ||
620 | static struct dma_async_tx_descriptor * | ||
621 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||
622 | unsigned int sg_len, enum dma_data_direction direction, | ||
623 | unsigned long flags) | ||
624 | { | ||
625 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
626 | struct at_dma_slave *atslave = chan->private; | ||
627 | struct at_desc *first = NULL; | ||
628 | struct at_desc *prev = NULL; | ||
629 | u32 ctrla; | ||
630 | u32 ctrlb; | ||
631 | dma_addr_t reg; | ||
632 | unsigned int reg_width; | ||
633 | unsigned int mem_width; | ||
634 | unsigned int i; | ||
635 | struct scatterlist *sg; | ||
636 | size_t total_len = 0; | ||
637 | |||
638 | dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n", | ||
639 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", | ||
640 | flags); | ||
641 | |||
642 | if (unlikely(!atslave || !sg_len)) { | ||
643 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | ||
644 | return NULL; | ||
645 | } | ||
646 | |||
647 | reg_width = atslave->reg_width; | ||
648 | |||
649 | sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction); | ||
650 | |||
651 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; | ||
652 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; | ||
653 | |||
654 | switch (direction) { | ||
655 | case DMA_TO_DEVICE: | ||
656 | ctrla |= ATC_DST_WIDTH(reg_width); | ||
657 | ctrlb |= ATC_DST_ADDR_MODE_FIXED | ||
658 | | ATC_SRC_ADDR_MODE_INCR | ||
659 | | ATC_FC_MEM2PER; | ||
660 | reg = atslave->tx_reg; | ||
661 | for_each_sg(sgl, sg, sg_len, i) { | ||
662 | struct at_desc *desc; | ||
663 | u32 len; | ||
664 | u32 mem; | ||
665 | |||
666 | desc = atc_desc_get(atchan); | ||
667 | if (!desc) | ||
668 | goto err_desc_get; | ||
669 | |||
670 | mem = sg_phys(sg); | ||
671 | len = sg_dma_len(sg); | ||
672 | mem_width = 2; | ||
673 | if (unlikely(mem & 3 || len & 3)) | ||
674 | mem_width = 0; | ||
675 | |||
676 | desc->lli.saddr = mem; | ||
677 | desc->lli.daddr = reg; | ||
678 | desc->lli.ctrla = ctrla | ||
679 | | ATC_SRC_WIDTH(mem_width) | ||
680 | | len >> mem_width; | ||
681 | desc->lli.ctrlb = ctrlb; | ||
682 | |||
683 | if (!first) { | ||
684 | first = desc; | ||
685 | } else { | ||
686 | /* inform the HW lli about chaining */ | ||
687 | prev->lli.dscr = desc->txd.phys; | ||
688 | /* insert the link descriptor to the LD ring */ | ||
689 | list_add_tail(&desc->desc_node, | ||
690 | &first->txd.tx_list); | ||
691 | } | ||
692 | prev = desc; | ||
693 | total_len += len; | ||
694 | } | ||
695 | break; | ||
696 | case DMA_FROM_DEVICE: | ||
697 | ctrla |= ATC_SRC_WIDTH(reg_width); | ||
698 | ctrlb |= ATC_DST_ADDR_MODE_INCR | ||
699 | | ATC_SRC_ADDR_MODE_FIXED | ||
700 | | ATC_FC_PER2MEM; | ||
701 | |||
702 | reg = atslave->rx_reg; | ||
703 | for_each_sg(sgl, sg, sg_len, i) { | ||
704 | struct at_desc *desc; | ||
705 | u32 len; | ||
706 | u32 mem; | ||
707 | |||
708 | desc = atc_desc_get(atchan); | ||
709 | if (!desc) | ||
710 | goto err_desc_get; | ||
711 | |||
712 | mem = sg_phys(sg); | ||
713 | len = sg_dma_len(sg); | ||
714 | mem_width = 2; | ||
715 | if (unlikely(mem & 3 || len & 3)) | ||
716 | mem_width = 0; | ||
717 | |||
718 | desc->lli.saddr = reg; | ||
719 | desc->lli.daddr = mem; | ||
720 | desc->lli.ctrla = ctrla | ||
721 | | ATC_DST_WIDTH(mem_width) | ||
722 | | len >> mem_width; | ||
723 | desc->lli.ctrlb = ctrlb; | ||
724 | |||
725 | if (!first) { | ||
726 | first = desc; | ||
727 | } else { | ||
728 | /* inform the HW lli about chaining */ | ||
729 | prev->lli.dscr = desc->txd.phys; | ||
730 | /* insert the link descriptor to the LD ring */ | ||
731 | list_add_tail(&desc->desc_node, | ||
732 | &first->txd.tx_list); | ||
733 | } | ||
734 | prev = desc; | ||
735 | total_len += len; | ||
736 | } | ||
737 | break; | ||
738 | default: | ||
739 | return NULL; | ||
740 | } | ||
741 | |||
742 | /* set end-of-link to the last link descriptor of list*/ | ||
743 | set_desc_eol(prev); | ||
744 | |||
745 | /* First descriptor of the chain embedds additional information */ | ||
746 | first->txd.cookie = -EBUSY; | ||
747 | first->len = total_len; | ||
748 | |||
749 | /* last link descriptor of list is responsible of flags */ | ||
750 | prev->txd.flags = flags; /* client is in control of this ack */ | ||
751 | |||
752 | return &first->txd; | ||
753 | |||
754 | err_desc_get: | ||
755 | dev_err(chan2dev(chan), "not enough descriptors available\n"); | ||
756 | atc_desc_put(atchan, first); | ||
757 | return NULL; | ||
758 | } | ||
759 | |||
760 | static void atc_terminate_all(struct dma_chan *chan) | ||
761 | { | ||
762 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
763 | struct at_dma *atdma = to_at_dma(chan->device); | ||
764 | struct at_desc *desc, *_desc; | ||
765 | LIST_HEAD(list); | ||
766 | |||
767 | /* | ||
768 | * This is only called when something went wrong elsewhere, so | ||
769 | * we don't really care about the data. Just disable the | ||
770 | * channel. We still have to poll the channel enable bit due | ||
771 | * to AHB/HSB limitations. | ||
772 | */ | ||
773 | spin_lock_bh(&atchan->lock); | ||
774 | |||
775 | dma_writel(atdma, CHDR, atchan->mask); | ||
776 | |||
777 | /* confirm that this channel is disabled */ | ||
778 | while (dma_readl(atdma, CHSR) & atchan->mask) | ||
779 | cpu_relax(); | ||
780 | |||
781 | /* active_list entries will end up before queued entries */ | ||
782 | list_splice_init(&atchan->queue, &list); | ||
783 | list_splice_init(&atchan->active_list, &list); | ||
784 | |||
785 | spin_unlock_bh(&atchan->lock); | ||
786 | |||
787 | /* Flush all pending and queued descriptors */ | ||
788 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | ||
789 | atc_chain_complete(atchan, desc); | ||
790 | } | ||
791 | |||
611 | /** | 792 | /** |
612 | * atc_is_tx_complete - poll for transaction completion | 793 | * atc_is_tx_complete - poll for transaction completion |
613 | * @chan: DMA channel | 794 | * @chan: DMA channel |
@@ -686,7 +867,9 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) | |||
686 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 867 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
687 | struct at_dma *atdma = to_at_dma(chan->device); | 868 | struct at_dma *atdma = to_at_dma(chan->device); |
688 | struct at_desc *desc; | 869 | struct at_desc *desc; |
870 | struct at_dma_slave *atslave; | ||
689 | int i; | 871 | int i; |
872 | u32 cfg; | ||
690 | LIST_HEAD(tmp_list); | 873 | LIST_HEAD(tmp_list); |
691 | 874 | ||
692 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | 875 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
@@ -697,7 +880,23 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) | |||
697 | return -EIO; | 880 | return -EIO; |
698 | } | 881 | } |
699 | 882 | ||
700 | /* have we already been set up? */ | 883 | cfg = ATC_DEFAULT_CFG; |
884 | |||
885 | atslave = chan->private; | ||
886 | if (atslave) { | ||
887 | /* | ||
888 | * We need controller-specific data to set up slave | ||
889 | * transfers. | ||
890 | */ | ||
891 | BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); | ||
892 | |||
893 | /* if cfg configuration specified take it instad of default */ | ||
894 | if (atslave->cfg) | ||
895 | cfg = atslave->cfg; | ||
896 | } | ||
897 | |||
898 | /* have we already been set up? | ||
899 | * reconfigure channel but no need to reallocate descriptors */ | ||
701 | if (!list_empty(&atchan->free_list)) | 900 | if (!list_empty(&atchan->free_list)) |
702 | return atchan->descs_allocated; | 901 | return atchan->descs_allocated; |
703 | 902 | ||
@@ -719,7 +918,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) | |||
719 | spin_unlock_bh(&atchan->lock); | 918 | spin_unlock_bh(&atchan->lock); |
720 | 919 | ||
721 | /* channel parameters */ | 920 | /* channel parameters */ |
722 | channel_writel(atchan, CFG, ATC_DEFAULT_CFG); | 921 | channel_writel(atchan, CFG, cfg); |
723 | 922 | ||
724 | dev_dbg(chan2dev(chan), | 923 | dev_dbg(chan2dev(chan), |
725 | "alloc_chan_resources: allocated %d descriptors\n", | 924 | "alloc_chan_resources: allocated %d descriptors\n", |
@@ -888,6 +1087,11 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
888 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) | 1087 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) |
889 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; | 1088 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; |
890 | 1089 | ||
1090 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { | ||
1091 | atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; | ||
1092 | atdma->dma_common.device_terminate_all = atc_terminate_all; | ||
1093 | } | ||
1094 | |||
891 | dma_writel(atdma, EN, AT_DMA_ENABLE); | 1095 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
892 | 1096 | ||
893 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", | 1097 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", |