aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorIra Snyder <iws@ovro.caltech.edu>2010-09-30 07:46:46 -0400
committerDan Williams <dan.j.williams@intel.com>2010-10-07 17:41:41 -0400
commit968f19ae802fdc6b6b6b5af6fe79cf23d281be0f (patch)
tree122f3912ed717627b0e5bac8c72f42ef2eb0cb6e /drivers/dma
parentc14330417ef2050f4bf38ac20e125785fea14351 (diff)
fsldma: improved DMA_SLAVE support
Now that the generic DMAEngine API has support for scatterlist to scatterlist copying, the device_prep_slave_sg() portion of the DMA_SLAVE API is no longer necessary and has been removed. However, the device_control() portion of the DMA_SLAVE API is still useful to control device specific parameters, such as externally controlled DMA transfers and maximum burst length. A special dma_ctrl_cmd has been added to enable externally controlled DMA transfers. This is currently specific to the Freescale DMA controller, but can easily be made generic when another user is found. Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/fsldma.c226
1 files changed, 44 insertions, 182 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 1ed29d10a5fa..286c3ac6bdcc 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -35,7 +35,6 @@
35#include <linux/dmapool.h> 35#include <linux/dmapool.h>
36#include <linux/of_platform.h> 36#include <linux/of_platform.h>
37 37
38#include <asm/fsldma.h>
39#include "fsldma.h" 38#include "fsldma.h"
40 39
41static const char msg_ld_oom[] = "No free memory for link descriptor\n"; 40static const char msg_ld_oom[] = "No free memory for link descriptor\n";
@@ -719,207 +718,70 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
719 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 718 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
720 enum dma_data_direction direction, unsigned long flags) 719 enum dma_data_direction direction, unsigned long flags)
721{ 720{
722 struct fsldma_chan *chan;
723 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
724 struct fsl_dma_slave *slave;
725 size_t copy;
726
727 int i;
728 struct scatterlist *sg;
729 size_t sg_used;
730 size_t hw_used;
731 struct fsl_dma_hw_addr *hw;
732 dma_addr_t dma_dst, dma_src;
733
734 if (!dchan)
735 return NULL;
736
737 if (!dchan->private)
738 return NULL;
739
740 chan = to_fsl_chan(dchan);
741 slave = dchan->private;
742
743 if (list_empty(&slave->addresses))
744 return NULL;
745
746 hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
747 hw_used = 0;
748
749 /* 721 /*
750 * Build the hardware transaction to copy from the scatterlist to 722 * This operation is not supported on the Freescale DMA controller
751 * the hardware, or from the hardware to the scatterlist
752 * 723 *
753 * If you are copying from the hardware to the scatterlist and it 724 * However, we need to provide the function pointer to allow the
754 * takes two hardware entries to fill an entire page, then both 725 * device_control() method to work.
755 * hardware entries will be coalesced into the same page
756 *
757 * If you are copying from the scatterlist to the hardware and a
758 * single page can fill two hardware entries, then the data will
759 * be read out of the page into the first hardware entry, and so on
760 */ 726 */
761 for_each_sg(sgl, sg, sg_len, i) {
762 sg_used = 0;
763
764 /* Loop until the entire scatterlist entry is used */
765 while (sg_used < sg_dma_len(sg)) {
766
767 /*
768 * If we've used up the current hardware address/length
769 * pair, we need to load a new one
770 *
771 * This is done in a while loop so that descriptors with
772 * length == 0 will be skipped
773 */
774 while (hw_used >= hw->length) {
775
776 /*
777 * If the current hardware entry is the last
778 * entry in the list, we're finished
779 */
780 if (list_is_last(&hw->entry, &slave->addresses))
781 goto finished;
782
783 /* Get the next hardware address/length pair */
784 hw = list_entry(hw->entry.next,
785 struct fsl_dma_hw_addr, entry);
786 hw_used = 0;
787 }
788
789 /* Allocate the link descriptor from DMA pool */
790 new = fsl_dma_alloc_descriptor(chan);
791 if (!new) {
792 dev_err(chan->dev, "No free memory for "
793 "link descriptor\n");
794 goto fail;
795 }
796#ifdef FSL_DMA_LD_DEBUG
797 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
798#endif
799
800 /*
801 * Calculate the maximum number of bytes to transfer,
802 * making sure it is less than the DMA controller limit
803 */
804 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
805 hw->length - hw_used);
806 copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);
807
808 /*
809 * DMA_FROM_DEVICE
810 * from the hardware to the scatterlist
811 *
812 * DMA_TO_DEVICE
813 * from the scatterlist to the hardware
814 */
815 if (direction == DMA_FROM_DEVICE) {
816 dma_src = hw->address + hw_used;
817 dma_dst = sg_dma_address(sg) + sg_used;
818 } else {
819 dma_src = sg_dma_address(sg) + sg_used;
820 dma_dst = hw->address + hw_used;
821 }
822
823 /* Fill in the descriptor */
824 set_desc_cnt(chan, &new->hw, copy);
825 set_desc_src(chan, &new->hw, dma_src);
826 set_desc_dst(chan, &new->hw, dma_dst);
827
828 /*
829 * If this is not the first descriptor, chain the
830 * current descriptor after the previous descriptor
831 */
832 if (!first) {
833 first = new;
834 } else {
835 set_desc_next(chan, &prev->hw,
836 new->async_tx.phys);
837 }
838
839 new->async_tx.cookie = 0;
840 async_tx_ack(&new->async_tx);
841
842 prev = new;
843 sg_used += copy;
844 hw_used += copy;
845
846 /* Insert the link descriptor into the LD ring */
847 list_add_tail(&new->node, &first->tx_list);
848 }
849 }
850
851finished:
852
853 /* All of the hardware address/length pairs had length == 0 */
854 if (!first || !new)
855 return NULL;
856
857 new->async_tx.flags = flags;
858 new->async_tx.cookie = -EBUSY;
859
860 /* Set End-of-link to the last link descriptor of new list */
861 set_ld_eol(chan, new);
862
863 /* Enable extra controller features */
864 if (chan->set_src_loop_size)
865 chan->set_src_loop_size(chan, slave->src_loop_size);
866
867 if (chan->set_dst_loop_size)
868 chan->set_dst_loop_size(chan, slave->dst_loop_size);
869
870 if (chan->toggle_ext_start)
871 chan->toggle_ext_start(chan, slave->external_start);
872
873 if (chan->toggle_ext_pause)
874 chan->toggle_ext_pause(chan, slave->external_pause);
875
876 if (chan->set_request_count)
877 chan->set_request_count(chan, slave->request_count);
878
879 return &first->async_tx;
880
881fail:
882 /* If first was not set, then we failed to allocate the very first
883 * descriptor, and we're done */
884 if (!first)
885 return NULL;
886
887 /*
888 * First is set, so all of the descriptors we allocated have been added
889 * to first->tx_list, INCLUDING "first" itself. Therefore we
890 * must traverse the list backwards freeing each descriptor in turn
891 *
892 * We're re-using variables for the loop, oh well
893 */
894 fsldma_free_desc_list_reverse(chan, &first->tx_list);
895 return NULL; 727 return NULL;
896} 728}
897 729
898static int fsl_dma_device_control(struct dma_chan *dchan, 730static int fsl_dma_device_control(struct dma_chan *dchan,
899 enum dma_ctrl_cmd cmd, unsigned long arg) 731 enum dma_ctrl_cmd cmd, unsigned long arg)
900{ 732{
733 struct dma_slave_config *config;
901 struct fsldma_chan *chan; 734 struct fsldma_chan *chan;
902 unsigned long flags; 735 unsigned long flags;
903 736 int size;
904 /* Only supports DMA_TERMINATE_ALL */
905 if (cmd != DMA_TERMINATE_ALL)
906 return -ENXIO;
907 737
908 if (!dchan) 738 if (!dchan)
909 return -EINVAL; 739 return -EINVAL;
910 740
911 chan = to_fsl_chan(dchan); 741 chan = to_fsl_chan(dchan);
912 742
913 /* Halt the DMA engine */ 743 switch (cmd) {
914 dma_halt(chan); 744 case DMA_TERMINATE_ALL:
745 /* Halt the DMA engine */
746 dma_halt(chan);
915 747
916 spin_lock_irqsave(&chan->desc_lock, flags); 748 spin_lock_irqsave(&chan->desc_lock, flags);
917 749
918 /* Remove and free all of the descriptors in the LD queue */ 750 /* Remove and free all of the descriptors in the LD queue */
919 fsldma_free_desc_list(chan, &chan->ld_pending); 751 fsldma_free_desc_list(chan, &chan->ld_pending);
920 fsldma_free_desc_list(chan, &chan->ld_running); 752 fsldma_free_desc_list(chan, &chan->ld_running);
921 753
922 spin_unlock_irqrestore(&chan->desc_lock, flags); 754 spin_unlock_irqrestore(&chan->desc_lock, flags);
755 return 0;
756
757 case DMA_SLAVE_CONFIG:
758 config = (struct dma_slave_config *)arg;
759
760 /* make sure the channel supports setting burst size */
761 if (!chan->set_request_count)
762 return -ENXIO;
763
764 /* we set the controller burst size depending on direction */
765 if (config->direction == DMA_TO_DEVICE)
766 size = config->dst_addr_width * config->dst_maxburst;
767 else
768 size = config->src_addr_width * config->src_maxburst;
769
770 chan->set_request_count(chan, size);
771 return 0;
772
773 case FSLDMA_EXTERNAL_START:
774
775 /* make sure the channel supports external start */
776 if (!chan->toggle_ext_start)
777 return -ENXIO;
778
779 chan->toggle_ext_start(chan, arg);
780 return 0;
781
782 default:
783 return -ENXIO;
784 }
923 785
924 return 0; 786 return 0;
925} 787}