aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/mpc512x_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/mpc512x_dma.c')
-rw-r--r--drivers/dma/mpc512x_dma.c111
1 files changed, 51 insertions, 60 deletions
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 01bec4023de2..57d2457545f3 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -800,79 +800,69 @@ err_prep:
800 return NULL; 800 return NULL;
801} 801}
802 802
803static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 803static int mpc_dma_device_config(struct dma_chan *chan,
804 unsigned long arg) 804 struct dma_slave_config *cfg)
805{ 805{
806 struct mpc_dma_chan *mchan; 806 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
807 struct mpc_dma *mdma;
808 struct dma_slave_config *cfg;
809 unsigned long flags; 807 unsigned long flags;
810 808
811 mchan = dma_chan_to_mpc_dma_chan(chan); 809 /*
812 switch (cmd) { 810 * Software constraints:
813 case DMA_TERMINATE_ALL: 811 * - only transfers between a peripheral device and
814 /* Disable channel requests */ 812 * memory are supported;
815 mdma = dma_chan_to_mpc_dma(chan); 813 * - only peripheral devices with 4-byte FIFO access register
816 814 * are supported;
817 spin_lock_irqsave(&mchan->lock, flags); 815 * - minimal transfer chunk is 4 bytes and consequently
818 816 * source and destination addresses must be 4-byte aligned
819 out_8(&mdma->regs->dmacerq, chan->chan_id); 817 * and transfer size must be aligned on (4 * maxburst)
820 list_splice_tail_init(&mchan->prepared, &mchan->free); 818 * boundary;
821 list_splice_tail_init(&mchan->queued, &mchan->free); 819 * - during the transfer RAM address is being incremented by
822 list_splice_tail_init(&mchan->active, &mchan->free); 820 * the size of minimal transfer chunk;
823 821 * - peripheral port's address is constant during the transfer.
824 spin_unlock_irqrestore(&mchan->lock, flags); 822 */
825 823
826 return 0; 824 if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
825 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
826 !IS_ALIGNED(cfg->src_addr, 4) ||
827 !IS_ALIGNED(cfg->dst_addr, 4)) {
828 return -EINVAL;
829 }
827 830
828 case DMA_SLAVE_CONFIG: 831 spin_lock_irqsave(&mchan->lock, flags);
829 /*
830 * Software constraints:
831 * - only transfers between a peripheral device and
832 * memory are supported;
833 * - only peripheral devices with 4-byte FIFO access register
834 * are supported;
835 * - minimal transfer chunk is 4 bytes and consequently
836 * source and destination addresses must be 4-byte aligned
837 * and transfer size must be aligned on (4 * maxburst)
838 * boundary;
839 * - during the transfer RAM address is being incremented by
840 * the size of minimal transfer chunk;
841 * - peripheral port's address is constant during the transfer.
842 */
843 832
844 cfg = (void *)arg; 833 mchan->src_per_paddr = cfg->src_addr;
834 mchan->src_tcd_nunits = cfg->src_maxburst;
835 mchan->dst_per_paddr = cfg->dst_addr;
836 mchan->dst_tcd_nunits = cfg->dst_maxburst;
845 837
846 if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || 838 /* Apply defaults */
847 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || 839 if (mchan->src_tcd_nunits == 0)
848 !IS_ALIGNED(cfg->src_addr, 4) || 840 mchan->src_tcd_nunits = 1;
849 !IS_ALIGNED(cfg->dst_addr, 4)) { 841 if (mchan->dst_tcd_nunits == 0)
850 return -EINVAL; 842 mchan->dst_tcd_nunits = 1;
851 }
852 843
853 spin_lock_irqsave(&mchan->lock, flags); 844 spin_unlock_irqrestore(&mchan->lock, flags);
854 845
855 mchan->src_per_paddr = cfg->src_addr; 846 return 0;
856 mchan->src_tcd_nunits = cfg->src_maxburst; 847}
857 mchan->dst_per_paddr = cfg->dst_addr;
858 mchan->dst_tcd_nunits = cfg->dst_maxburst;
859 848
860 /* Apply defaults */ 849static int mpc_dma_device_terminate_all(struct dma_chan *chan)
861 if (mchan->src_tcd_nunits == 0) 850{
862 mchan->src_tcd_nunits = 1; 851 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
863 if (mchan->dst_tcd_nunits == 0) 852 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
864 mchan->dst_tcd_nunits = 1; 853 unsigned long flags;
865 854
866 spin_unlock_irqrestore(&mchan->lock, flags); 855 /* Disable channel requests */
856 spin_lock_irqsave(&mchan->lock, flags);
867 857
868 return 0; 858 out_8(&mdma->regs->dmacerq, chan->chan_id);
859 list_splice_tail_init(&mchan->prepared, &mchan->free);
860 list_splice_tail_init(&mchan->queued, &mchan->free);
861 list_splice_tail_init(&mchan->active, &mchan->free);
869 862
870 default: 863 spin_unlock_irqrestore(&mchan->lock, flags);
871 /* Unknown command */
872 break;
873 }
874 864
875 return -ENXIO; 865 return 0;
876} 866}
877 867
878static int mpc_dma_probe(struct platform_device *op) 868static int mpc_dma_probe(struct platform_device *op)
@@ -963,7 +953,8 @@ static int mpc_dma_probe(struct platform_device *op)
963 dma->device_tx_status = mpc_dma_tx_status; 953 dma->device_tx_status = mpc_dma_tx_status;
964 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; 954 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
965 dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; 955 dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
966 dma->device_control = mpc_dma_device_control; 956 dma->device_config = mpc_dma_device_config;
957 dma->device_terminate_all = mpc_dma_device_terminate_all;
967 958
968 INIT_LIST_HEAD(&dma->channels); 959 INIT_LIST_HEAD(&dma->channels);
969 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 960 dma_cap_set(DMA_MEMCPY, dma->cap_mask);