diff options
author | Maxime Ripard <maxime.ripard@free-electrons.com> | 2014-11-17 08:42:33 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2014-12-22 02:02:16 -0500 |
commit | be60f94074cf1caf165c0494aa393bcd2e322af4 (patch) | |
tree | 6fa413f8e363bfdbbbd27f2cc7a0a0319042c83b /drivers/dma/sh | |
parent | 4a533218fccf82d4e371aeae737ce2383175fd01 (diff) |
dmaengine: sh: Split device_control
Split the device_control callback of the Super-H DMA driver to make use of the
newly introduced callbacks, that will eventually be used to retrieve slave
capabilities.
Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/sh')
-rw-r--r-- | drivers/dma/sh/shdma-base.c | 72 |
1 files changed, 33 insertions, 39 deletions
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 3a2adb131d46..8ee383d339a5 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c | |||
@@ -729,57 +729,50 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( | |||
729 | return desc; | 729 | return desc; |
730 | } | 730 | } |
731 | 731 | ||
732 | static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 732 | static int shdma_terminate_all(struct dma_chan *chan) |
733 | unsigned long arg) | ||
734 | { | 733 | { |
735 | struct shdma_chan *schan = to_shdma_chan(chan); | 734 | struct shdma_chan *schan = to_shdma_chan(chan); |
736 | struct shdma_dev *sdev = to_shdma_dev(chan->device); | 735 | struct shdma_dev *sdev = to_shdma_dev(chan->device); |
737 | const struct shdma_ops *ops = sdev->ops; | 736 | const struct shdma_ops *ops = sdev->ops; |
738 | struct dma_slave_config *config; | ||
739 | unsigned long flags; | 737 | unsigned long flags; |
740 | int ret; | ||
741 | 738 | ||
742 | switch (cmd) { | 739 | spin_lock_irqsave(&schan->chan_lock, flags); |
743 | case DMA_TERMINATE_ALL: | 740 | ops->halt_channel(schan); |
744 | spin_lock_irqsave(&schan->chan_lock, flags); | ||
745 | ops->halt_channel(schan); | ||
746 | 741 | ||
747 | if (ops->get_partial && !list_empty(&schan->ld_queue)) { | 742 | if (ops->get_partial && !list_empty(&schan->ld_queue)) { |
748 | /* Record partial transfer */ | 743 | /* Record partial transfer */ |
749 | struct shdma_desc *desc = list_first_entry(&schan->ld_queue, | 744 | struct shdma_desc *desc = list_first_entry(&schan->ld_queue, |
750 | struct shdma_desc, node); | 745 | struct shdma_desc, node); |
751 | desc->partial = ops->get_partial(schan, desc); | 746 | desc->partial = ops->get_partial(schan, desc); |
752 | } | 747 | } |
753 | 748 | ||
754 | spin_unlock_irqrestore(&schan->chan_lock, flags); | 749 | spin_unlock_irqrestore(&schan->chan_lock, flags); |
755 | 750 | ||
756 | shdma_chan_ld_cleanup(schan, true); | 751 | shdma_chan_ld_cleanup(schan, true); |
757 | break; | ||
758 | case DMA_SLAVE_CONFIG: | ||
759 | /* | ||
760 | * So far only .slave_id is used, but the slave drivers are | ||
761 | * encouraged to also set a transfer direction and an address. | ||
762 | */ | ||
763 | if (!arg) | ||
764 | return -EINVAL; | ||
765 | /* | ||
766 | * We could lock this, but you shouldn't be configuring the | ||
767 | * channel, while using it... | ||
768 | */ | ||
769 | config = (struct dma_slave_config *)arg; | ||
770 | ret = shdma_setup_slave(schan, config->slave_id, | ||
771 | config->direction == DMA_DEV_TO_MEM ? | ||
772 | config->src_addr : config->dst_addr); | ||
773 | if (ret < 0) | ||
774 | return ret; | ||
775 | break; | ||
776 | default: | ||
777 | return -ENXIO; | ||
778 | } | ||
779 | 752 | ||
780 | return 0; | 753 | return 0; |
781 | } | 754 | } |
782 | 755 | ||
756 | static int shdma_config(struct dma_chan *chan, | ||
757 | struct dma_slave_config *config) | ||
758 | { | ||
759 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
760 | |||
761 | /* | ||
762 | * So far only .slave_id is used, but the slave drivers are | ||
763 | * encouraged to also set a transfer direction and an address. | ||
764 | */ | ||
765 | if (!config) | ||
766 | return -EINVAL; | ||
767 | /* | ||
768 | * We could lock this, but you shouldn't be configuring the | ||
769 | * channel, while using it... | ||
770 | */ | ||
771 | return shdma_setup_slave(schan, config->slave_id, | ||
772 | config->direction == DMA_DEV_TO_MEM ? | ||
773 | config->src_addr : config->dst_addr); | ||
774 | } | ||
775 | |||
783 | static void shdma_issue_pending(struct dma_chan *chan) | 776 | static void shdma_issue_pending(struct dma_chan *chan) |
784 | { | 777 | { |
785 | struct shdma_chan *schan = to_shdma_chan(chan); | 778 | struct shdma_chan *schan = to_shdma_chan(chan); |
@@ -1002,7 +995,8 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev, | |||
1002 | /* Compulsory for DMA_SLAVE fields */ | 995 | /* Compulsory for DMA_SLAVE fields */ |
1003 | dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; | 996 | dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; |
1004 | dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; | 997 | dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; |
1005 | dma_dev->device_control = shdma_control; | 998 | dma_dev->device_config = shdma_config; |
999 | dma_dev->device_terminate_all = shdma_terminate_all; | ||
1006 | 1000 | ||
1007 | dma_dev->dev = dev; | 1001 | dma_dev->dev = dev; |
1008 | 1002 | ||