aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt4
-rw-r--r--Documentation/driver-api/dmaengine/provider.rst38
-rw-r--r--drivers/acpi/bus.c18
-rw-r--r--drivers/acpi/property.c8
-rw-r--r--drivers/base/property.c7
-rw-r--r--drivers/dma/amba-pl08x.c11
-rw-r--r--drivers/dma/bcm2835-dma.c10
-rw-r--r--drivers/dma/cppi41.c2
-rw-r--r--drivers/dma/dma-jz4780.c10
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/edma.c7
-rw-r--r--drivers/dma/img-mdc-dma.c17
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/k3dma.c10
-rw-r--r--drivers/dma/mic_x100_dma.c4
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/qcom/hidma.c41
-rw-r--r--drivers/dma/qcom/hidma_ll.c9
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c61
-rw-r--r--drivers/dma/s3c24xx-dma.c11
-rw-r--r--drivers/dma/sh/rcar-dmac.c44
-rw-r--r--drivers/dma/sprd-dma.c2
-rw-r--r--drivers/dma/stm32-dmamux.c3
-rw-r--r--drivers/dma/tegra20-apb-dma.c19
-rw-r--r--drivers/dma/ti-dma-crossbar.c10
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/virt-dma.c5
-rw-r--r--drivers/dma/virt-dma.h44
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c302
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c179
-rw-r--r--drivers/of/property.c8
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/fwnode.h4
-rw-r--r--include/linux/property.h2
34 files changed, 609 insertions, 299 deletions
diff --git a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
index b3408cc57be6..1ae4748730a8 100644
--- a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
+++ b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
@@ -47,8 +47,8 @@ When the OS is not in control of the management interface (i.e. it's a guest),
47the channel nodes appear on their own, not under a management node. 47the channel nodes appear on their own, not under a management node.
48 48
49Required properties: 49Required properties:
50- compatible: must contain "qcom,hidma-1.0" for initial HW or "qcom,hidma-1.1" 50- compatible: must contain "qcom,hidma-1.0" for initial HW or
51for MSI capable HW. 51 "qcom,hidma-1.1"/"qcom,hidma-1.2" for MSI capable HW.
52- reg: Addresses for the transfer and event channel 52- reg: Addresses for the transfer and event channel
53- interrupts: Should contain the event interrupt 53- interrupts: Should contain the event interrupt
54- desc-count: Number of asynchronous requests this channel can handle 54- desc-count: Number of asynchronous requests this channel can handle
diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst
index 814acb4d2294..dfc4486b5743 100644
--- a/Documentation/driver-api/dmaengine/provider.rst
+++ b/Documentation/driver-api/dmaengine/provider.rst
@@ -111,40 +111,36 @@ The first thing you need to do in your driver is to allocate this
111structure. Any of the usual memory allocators will do, but you'll also 111structure. Any of the usual memory allocators will do, but you'll also
112need to initialize a few fields in there: 112need to initialize a few fields in there:
113 113
114- channels: should be initialized as a list using the 114- ``channels``: should be initialized as a list using the
115 INIT_LIST_HEAD macro for example 115 INIT_LIST_HEAD macro for example
116 116
117- src_addr_widths: 117- ``src_addr_widths``:
118 should contain a bitmask of the supported source transfer width 118 should contain a bitmask of the supported source transfer width
119 119
120- dst_addr_widths: 120- ``dst_addr_widths``:
121 should contain a bitmask of the supported destination transfer width 121 should contain a bitmask of the supported destination transfer width
122 122
123- directions: 123- ``directions``:
124 should contain a bitmask of the supported slave directions 124 should contain a bitmask of the supported slave directions
125 (i.e. excluding mem2mem transfers) 125 (i.e. excluding mem2mem transfers)
126 126
127- residue_granularity: 127- ``residue_granularity``:
128 granularity of the transfer residue reported to dma_set_residue.
129 This can be either:
128 130
129 - Granularity of the transfer residue reported to dma_set_residue. 131 - Descriptor:
130 This can be either: 132 your device doesn't support any kind of residue
133 reporting. The framework will only know that a particular
134 transaction descriptor is done.
131 135
132 - Descriptor 136 - Segment:
137 your device is able to report which chunks have been transferred
133 138
134 - Your device doesn't support any kind of residue 139 - Burst:
135 reporting. The framework will only know that a particular 140 your device is able to report which burst have been transferred
136 transaction descriptor is done.
137 141
138 - Segment 142- ``dev``: should hold the pointer to the ``struct device`` associated
139 143 to your current driver instance.
140 - Your device is able to report which chunks have been transferred
141
142 - Burst
143
144 - Your device is able to report which burst have been transferred
145
146 - dev: should hold the pointer to the ``struct device`` associated
147 to your current driver instance.
148 144
149Supported transaction types 145Supported transaction types
150--------------------------- 146---------------------------
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 4d0979e02a28..f87ed3be779a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -785,6 +785,24 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
785} 785}
786EXPORT_SYMBOL_GPL(acpi_match_device); 786EXPORT_SYMBOL_GPL(acpi_match_device);
787 787
788void *acpi_get_match_data(const struct device *dev)
789{
790 const struct acpi_device_id *match;
791
792 if (!dev->driver)
793 return NULL;
794
795 if (!dev->driver->acpi_match_table)
796 return NULL;
797
798 match = acpi_match_device(dev->driver->acpi_match_table, dev);
799 if (!match)
800 return NULL;
801
802 return (void *)match->driver_data;
803}
804EXPORT_SYMBOL_GPL(acpi_get_match_data);
805
788int acpi_match_device_ids(struct acpi_device *device, 806int acpi_match_device_ids(struct acpi_device *device,
789 const struct acpi_device_id *ids) 807 const struct acpi_device_id *ids)
790{ 808{
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index e26ea209b63e..466d1503aba0 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1271,9 +1271,17 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
1271 return 0; 1271 return 0;
1272} 1272}
1273 1273
1274static void *
1275acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
1276 const struct device *dev)
1277{
1278 return acpi_get_match_data(dev);
1279}
1280
1274#define DECLARE_ACPI_FWNODE_OPS(ops) \ 1281#define DECLARE_ACPI_FWNODE_OPS(ops) \
1275 const struct fwnode_operations ops = { \ 1282 const struct fwnode_operations ops = { \
1276 .device_is_available = acpi_fwnode_device_is_available, \ 1283 .device_is_available = acpi_fwnode_device_is_available, \
1284 .device_get_match_data = acpi_fwnode_device_get_match_data, \
1277 .property_present = acpi_fwnode_property_present, \ 1285 .property_present = acpi_fwnode_property_present, \
1278 .property_read_int_array = \ 1286 .property_read_int_array = \
1279 acpi_fwnode_property_read_int_array, \ 1287 acpi_fwnode_property_read_int_array, \
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 851b1b6596a4..09eaac9400ed 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -1340,3 +1340,10 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
1340 return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint); 1340 return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint);
1341} 1341}
1342EXPORT_SYMBOL(fwnode_graph_parse_endpoint); 1342EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
1343
1344void *device_get_match_data(struct device *dev)
1345{
1346 return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data,
1347 dev);
1348}
1349EXPORT_SYMBOL_GPL(device_get_match_data);
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b52b0d55247e..97483df1f82e 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2182,7 +2182,7 @@ static int pl08x_terminate_all(struct dma_chan *chan)
2182 } 2182 }
2183 /* Dequeue jobs and free LLIs */ 2183 /* Dequeue jobs and free LLIs */
2184 if (plchan->at) { 2184 if (plchan->at) {
2185 pl08x_desc_free(&plchan->at->vd); 2185 vchan_terminate_vdesc(&plchan->at->vd);
2186 plchan->at = NULL; 2186 plchan->at = NULL;
2187 } 2187 }
2188 /* Dequeue jobs not yet fired as well */ 2188 /* Dequeue jobs not yet fired as well */
@@ -2193,6 +2193,13 @@ static int pl08x_terminate_all(struct dma_chan *chan)
2193 return 0; 2193 return 0;
2194} 2194}
2195 2195
2196static void pl08x_synchronize(struct dma_chan *chan)
2197{
2198 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
2199
2200 vchan_synchronize(&plchan->vc);
2201}
2202
2196static int pl08x_pause(struct dma_chan *chan) 2203static int pl08x_pause(struct dma_chan *chan)
2197{ 2204{
2198 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 2205 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
@@ -2773,6 +2780,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2773 pl08x->memcpy.device_pause = pl08x_pause; 2780 pl08x->memcpy.device_pause = pl08x_pause;
2774 pl08x->memcpy.device_resume = pl08x_resume; 2781 pl08x->memcpy.device_resume = pl08x_resume;
2775 pl08x->memcpy.device_terminate_all = pl08x_terminate_all; 2782 pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
2783 pl08x->memcpy.device_synchronize = pl08x_synchronize;
2776 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2784 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2777 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2785 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2778 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); 2786 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
@@ -2802,6 +2810,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2802 pl08x->slave.device_pause = pl08x_pause; 2810 pl08x->slave.device_pause = pl08x_pause;
2803 pl08x->slave.device_resume = pl08x_resume; 2811 pl08x->slave.device_resume = pl08x_resume;
2804 pl08x->slave.device_terminate_all = pl08x_terminate_all; 2812 pl08x->slave.device_terminate_all = pl08x_terminate_all;
2813 pl08x->slave.device_synchronize = pl08x_synchronize;
2805 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2814 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2806 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2815 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2807 pl08x->slave.directions = 2816 pl08x->slave.directions =
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 6204cc32d09c..847f84a41a69 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -812,7 +812,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
812 * c->desc is NULL and exit.) 812 * c->desc is NULL and exit.)
813 */ 813 */
814 if (c->desc) { 814 if (c->desc) {
815 bcm2835_dma_desc_free(&c->desc->vd); 815 vchan_terminate_vdesc(&c->desc->vd);
816 c->desc = NULL; 816 c->desc = NULL;
817 bcm2835_dma_abort(c->chan_base); 817 bcm2835_dma_abort(c->chan_base);
818 818
@@ -836,6 +836,13 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
836 return 0; 836 return 0;
837} 837}
838 838
839static void bcm2835_dma_synchronize(struct dma_chan *chan)
840{
841 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
842
843 vchan_synchronize(&c->vc);
844}
845
839static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, 846static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
840 int irq, unsigned int irq_flags) 847 int irq, unsigned int irq_flags)
841{ 848{
@@ -942,6 +949,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
942 od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy; 949 od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
943 od->ddev.device_config = bcm2835_dma_slave_config; 950 od->ddev.device_config = bcm2835_dma_slave_config;
944 od->ddev.device_terminate_all = bcm2835_dma_terminate_all; 951 od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
952 od->ddev.device_synchronize = bcm2835_dma_synchronize;
945 od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 953 od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
946 od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 954 od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
947 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | 955 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index f7e965f63274..d9bee65a18a4 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -934,7 +934,7 @@ static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
934 934
935 BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) != 935 BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) !=
936 ARRAY_SIZE(am335x_usb_queues_tx)); 936 ARRAY_SIZE(am335x_usb_queues_tx));
937 if (WARN_ON(cchan->port_num > ARRAY_SIZE(am335x_usb_queues_rx))) 937 if (WARN_ON(cchan->port_num >= ARRAY_SIZE(am335x_usb_queues_rx)))
938 return false; 938 return false;
939 939
940 cchan->q_num = queues[cchan->port_num].submit; 940 cchan->q_num = queues[cchan->port_num].submit;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 7373b7a555ec..85820a2d69d4 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -511,7 +511,7 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
511 /* Clear the DMA status and stop the transfer. */ 511 /* Clear the DMA status and stop the transfer. */
512 jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0); 512 jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
513 if (jzchan->desc) { 513 if (jzchan->desc) {
514 jz4780_dma_desc_free(&jzchan->desc->vdesc); 514 vchan_terminate_vdesc(&jzchan->desc->vdesc);
515 jzchan->desc = NULL; 515 jzchan->desc = NULL;
516 } 516 }
517 517
@@ -523,6 +523,13 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
523 return 0; 523 return 0;
524} 524}
525 525
526static void jz4780_dma_synchronize(struct dma_chan *chan)
527{
528 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
529
530 vchan_synchronize(&jzchan->vchan);
531}
532
526static int jz4780_dma_config(struct dma_chan *chan, 533static int jz4780_dma_config(struct dma_chan *chan,
527 struct dma_slave_config *config) 534 struct dma_slave_config *config)
528{ 535{
@@ -813,6 +820,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
813 dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy; 820 dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
814 dd->device_config = jz4780_dma_config; 821 dd->device_config = jz4780_dma_config;
815 dd->device_terminate_all = jz4780_dma_terminate_all; 822 dd->device_terminate_all = jz4780_dma_terminate_all;
823 dd->device_synchronize = jz4780_dma_synchronize;
816 dd->device_tx_status = jz4780_dma_tx_status; 824 dd->device_tx_status = jz4780_dma_tx_status;
817 dd->device_issue_pending = jz4780_dma_issue_pending; 825 dd->device_issue_pending = jz4780_dma_issue_pending;
818 dd->src_addr_widths = JZ_DMA_BUSWIDTHS; 826 dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index ec5f9d2bc820..80cc2be6483c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -355,7 +355,7 @@ static void dmatest_callback(void *arg)
355{ 355{
356 struct dmatest_done *done = arg; 356 struct dmatest_done *done = arg;
357 struct dmatest_thread *thread = 357 struct dmatest_thread *thread =
358 container_of(arg, struct dmatest_thread, done_wait); 358 container_of(done, struct dmatest_thread, test_done);
359 if (!thread->done) { 359 if (!thread->done) {
360 done->done = true; 360 done->done = true;
361 wake_up_all(done->wait); 361 wake_up_all(done->wait);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 9364a3ed345a..948df1ab5f1a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -860,11 +860,8 @@ static int edma_terminate_all(struct dma_chan *chan)
860 /* Move the cyclic channel back to default queue */ 860 /* Move the cyclic channel back to default queue */
861 if (!echan->tc && echan->edesc->cyclic) 861 if (!echan->tc && echan->edesc->cyclic)
862 edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); 862 edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
863 /* 863
864 * free the running request descriptor 864 vchan_terminate_vdesc(&echan->edesc->vdesc);
865 * since it is not in any of the vdesc lists
866 */
867 edma_desc_free(&echan->edesc->vdesc);
868 echan->edesc = NULL; 865 echan->edesc = NULL;
869 } 866 }
870 867
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 0391f930aecc..25cec9c243e1 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -694,7 +694,6 @@ static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
694static int mdc_terminate_all(struct dma_chan *chan) 694static int mdc_terminate_all(struct dma_chan *chan)
695{ 695{
696 struct mdc_chan *mchan = to_mdc_chan(chan); 696 struct mdc_chan *mchan = to_mdc_chan(chan);
697 struct mdc_tx_desc *mdesc;
698 unsigned long flags; 697 unsigned long flags;
699 LIST_HEAD(head); 698 LIST_HEAD(head);
700 699
@@ -703,21 +702,28 @@ static int mdc_terminate_all(struct dma_chan *chan)
703 mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL, 702 mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
704 MDC_CONTROL_AND_STATUS); 703 MDC_CONTROL_AND_STATUS);
705 704
706 mdesc = mchan->desc; 705 if (mchan->desc) {
707 mchan->desc = NULL; 706 vchan_terminate_vdesc(&mchan->desc->vd);
707 mchan->desc = NULL;
708 }
708 vchan_get_all_descriptors(&mchan->vc, &head); 709 vchan_get_all_descriptors(&mchan->vc, &head);
709 710
710 mdc_get_new_events(mchan); 711 mdc_get_new_events(mchan);
711 712
712 spin_unlock_irqrestore(&mchan->vc.lock, flags); 713 spin_unlock_irqrestore(&mchan->vc.lock, flags);
713 714
714 if (mdesc)
715 mdc_desc_free(&mdesc->vd);
716 vchan_dma_desc_free_list(&mchan->vc, &head); 715 vchan_dma_desc_free_list(&mchan->vc, &head);
717 716
718 return 0; 717 return 0;
719} 718}
720 719
720static void mdc_synchronize(struct dma_chan *chan)
721{
722 struct mdc_chan *mchan = to_mdc_chan(chan);
723
724 vchan_synchronize(&mchan->vc);
725}
726
721static int mdc_slave_config(struct dma_chan *chan, 727static int mdc_slave_config(struct dma_chan *chan,
722 struct dma_slave_config *config) 728 struct dma_slave_config *config)
723{ 729{
@@ -952,6 +958,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
952 mdma->dma_dev.device_tx_status = mdc_tx_status; 958 mdma->dma_dev.device_tx_status = mdc_tx_status;
953 mdma->dma_dev.device_issue_pending = mdc_issue_pending; 959 mdma->dma_dev.device_issue_pending = mdc_issue_pending;
954 mdma->dma_dev.device_terminate_all = mdc_terminate_all; 960 mdma->dma_dev.device_terminate_all = mdc_terminate_all;
961 mdma->dma_dev.device_synchronize = mdc_synchronize;
955 mdma->dma_dev.device_config = mdc_slave_config; 962 mdma->dma_dev.device_config = mdc_slave_config;
956 963
957 mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 964 mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 2184881afe76..e7db24c67030 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1939,4 +1939,10 @@ module_platform_driver(sdma_driver);
1939 1939
1940MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 1940MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1941MODULE_DESCRIPTION("i.MX SDMA driver"); 1941MODULE_DESCRIPTION("i.MX SDMA driver");
1942#if IS_ENABLED(CONFIG_SOC_IMX6Q)
1943MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
1944#endif
1945#if IS_ENABLED(CONFIG_SOC_IMX7D)
1946MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
1947#endif
1942MODULE_LICENSE("GPL"); 1948MODULE_LICENSE("GPL");
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 01d2a750a621..26b67455208f 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -719,7 +719,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
719 c->phy = NULL; 719 c->phy = NULL;
720 p->vchan = NULL; 720 p->vchan = NULL;
721 if (p->ds_run) { 721 if (p->ds_run) {
722 k3_dma_free_desc(&p->ds_run->vd); 722 vchan_terminate_vdesc(&p->ds_run->vd);
723 p->ds_run = NULL; 723 p->ds_run = NULL;
724 } 724 }
725 p->ds_done = NULL; 725 p->ds_done = NULL;
@@ -730,6 +730,13 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
730 return 0; 730 return 0;
731} 731}
732 732
733static void k3_dma_synchronize(struct dma_chan *chan)
734{
735 struct k3_dma_chan *c = to_k3_chan(chan);
736
737 vchan_synchronize(&c->vc);
738}
739
733static int k3_dma_transfer_pause(struct dma_chan *chan) 740static int k3_dma_transfer_pause(struct dma_chan *chan)
734{ 741{
735 struct k3_dma_chan *c = to_k3_chan(chan); 742 struct k3_dma_chan *c = to_k3_chan(chan);
@@ -868,6 +875,7 @@ static int k3_dma_probe(struct platform_device *op)
868 d->slave.device_pause = k3_dma_transfer_pause; 875 d->slave.device_pause = k3_dma_transfer_pause;
869 d->slave.device_resume = k3_dma_transfer_resume; 876 d->slave.device_resume = k3_dma_transfer_resume;
870 d->slave.device_terminate_all = k3_dma_terminate_all; 877 d->slave.device_terminate_all = k3_dma_terminate_all;
878 d->slave.device_synchronize = k3_dma_synchronize;
871 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES; 879 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
872 880
873 /* init virtual channel */ 881 /* init virtual channel */
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 5ba5714d0b7c..94d7bd7d2880 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -480,9 +480,7 @@ static int mic_dma_setup_irq(struct mic_dma_chan *ch)
480 to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch), 480 to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch),
481 mic_dma_intr_handler, mic_dma_thread_fn, 481 mic_dma_intr_handler, mic_dma_thread_fn,
482 "mic dma_channel", ch, ch->ch_num); 482 "mic dma_channel", ch, ch->ch_num);
483 if (IS_ERR(ch->cookie)) 483 return PTR_ERR_OR_ZERO(ch->cookie);
484 return PTR_ERR(ch->cookie);
485 return 0;
486} 484}
487 485
488static inline void mic_dma_free_irq(struct mic_dma_chan *ch) 486static inline void mic_dma_free_irq(struct mic_dma_chan *ch)
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index f6dd849159d8..d21c19822feb 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1311,7 +1311,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
1311 * c->desc is NULL and exit.) 1311 * c->desc is NULL and exit.)
1312 */ 1312 */
1313 if (c->desc) { 1313 if (c->desc) {
1314 omap_dma_desc_free(&c->desc->vd); 1314 vchan_terminate_vdesc(&c->desc->vd);
1315 c->desc = NULL; 1315 c->desc = NULL;
1316 /* Avoid stopping the dma twice */ 1316 /* Avoid stopping the dma twice */
1317 if (!c->paused) 1317 if (!c->paused)
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index e3669850aef4..963cc5228d05 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -50,6 +50,7 @@
50#include <linux/slab.h> 50#include <linux/slab.h>
51#include <linux/spinlock.h> 51#include <linux/spinlock.h>
52#include <linux/of_dma.h> 52#include <linux/of_dma.h>
53#include <linux/of_device.h>
53#include <linux/property.h> 54#include <linux/property.h>
54#include <linux/delay.h> 55#include <linux/delay.h>
55#include <linux/acpi.h> 56#include <linux/acpi.h>
@@ -104,6 +105,10 @@ static unsigned int nr_desc_prm;
104module_param(nr_desc_prm, uint, 0644); 105module_param(nr_desc_prm, uint, 0644);
105MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)"); 106MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
106 107
108enum hidma_cap {
109 HIDMA_MSI_CAP = 1,
110 HIDMA_IDENTITY_CAP,
111};
107 112
108/* process completed descriptors */ 113/* process completed descriptors */
109static void hidma_process_completed(struct hidma_chan *mchan) 114static void hidma_process_completed(struct hidma_chan *mchan)
@@ -736,25 +741,12 @@ static int hidma_request_msi(struct hidma_dev *dmadev,
736#endif 741#endif
737} 742}
738 743
739static bool hidma_msi_capable(struct device *dev) 744static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
740{ 745{
741 struct acpi_device *adev = ACPI_COMPANION(dev); 746 enum hidma_cap cap;
742 const char *of_compat;
743 int ret = -EINVAL;
744
745 if (!adev || acpi_disabled) {
746 ret = device_property_read_string(dev, "compatible",
747 &of_compat);
748 if (ret)
749 return false;
750 747
751 ret = strcmp(of_compat, "qcom,hidma-1.1"); 748 cap = (enum hidma_cap) device_get_match_data(dev);
752 } else { 749 return cap ? ((cap & test_cap) > 0) : 0;
753#ifdef CONFIG_ACPI
754 ret = strcmp(acpi_device_hid(adev), "QCOM8062");
755#endif
756 }
757 return ret == 0;
758} 750}
759 751
760static int hidma_probe(struct platform_device *pdev) 752static int hidma_probe(struct platform_device *pdev)
@@ -834,8 +826,7 @@ static int hidma_probe(struct platform_device *pdev)
834 * Determine the MSI capability of the platform. Old HW doesn't 826 * Determine the MSI capability of the platform. Old HW doesn't
835 * support MSI. 827 * support MSI.
836 */ 828 */
837 msi = hidma_msi_capable(&pdev->dev); 829 msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
838
839 device_property_read_u32(&pdev->dev, "desc-count", 830 device_property_read_u32(&pdev->dev, "desc-count",
840 &dmadev->nr_descriptors); 831 &dmadev->nr_descriptors);
841 832
@@ -848,7 +839,10 @@ static int hidma_probe(struct platform_device *pdev)
848 if (!dmadev->nr_descriptors) 839 if (!dmadev->nr_descriptors)
849 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC; 840 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
850 841
851 dmadev->chidx = readl(dmadev->dev_trca + 0x28); 842 if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
843 dmadev->chidx = readl(dmadev->dev_trca + 0x40);
844 else
845 dmadev->chidx = readl(dmadev->dev_trca + 0x28);
852 846
853 /* Set DMA mask to 64 bits. */ 847 /* Set DMA mask to 64 bits. */
854 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 848 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -953,7 +947,8 @@ static int hidma_remove(struct platform_device *pdev)
953#if IS_ENABLED(CONFIG_ACPI) 947#if IS_ENABLED(CONFIG_ACPI)
954static const struct acpi_device_id hidma_acpi_ids[] = { 948static const struct acpi_device_id hidma_acpi_ids[] = {
955 {"QCOM8061"}, 949 {"QCOM8061"},
956 {"QCOM8062"}, 950 {"QCOM8062", HIDMA_MSI_CAP},
951 {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
957 {}, 952 {},
958}; 953};
959MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); 954MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
@@ -961,7 +956,9 @@ MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
961 956
962static const struct of_device_id hidma_match[] = { 957static const struct of_device_id hidma_match[] = {
963 {.compatible = "qcom,hidma-1.0",}, 958 {.compatible = "qcom,hidma-1.0",},
964 {.compatible = "qcom,hidma-1.1",}, 959 {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
960 {.compatible = "qcom,hidma-1.2",
961 .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
965 {}, 962 {},
966}; 963};
967MODULE_DEVICE_TABLE(of, hidma_match); 964MODULE_DEVICE_TABLE(of, hidma_match);
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 4999e266b2de..7c6e2ff212a2 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -393,6 +393,8 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
393 */ 393 */
394static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause) 394static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
395{ 395{
396 unsigned long irqflags;
397
396 if (cause & HIDMA_ERR_INT_MASK) { 398 if (cause & HIDMA_ERR_INT_MASK) {
397 dev_err(lldev->dev, "error 0x%x, disabling...\n", 399 dev_err(lldev->dev, "error 0x%x, disabling...\n",
398 cause); 400 cause);
@@ -410,6 +412,10 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
410 return; 412 return;
411 } 413 }
412 414
415 spin_lock_irqsave(&lldev->lock, irqflags);
416 writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
417 spin_unlock_irqrestore(&lldev->lock, irqflags);
418
413 /* 419 /*
414 * Fine tuned for this HW... 420 * Fine tuned for this HW...
415 * 421 *
@@ -421,9 +427,6 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
421 * Try to consume as many EVREs as possible. 427 * Try to consume as many EVREs as possible.
422 */ 428 */
423 hidma_handle_tre_completion(lldev); 429 hidma_handle_tre_completion(lldev);
424
425 /* We consumed TREs or there are pending TREs or EVREs. */
426 writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
427} 430}
428 431
429irqreturn_t hidma_ll_inthandler(int chirq, void *arg) 432irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 7335e2eb9b72..000c7019ca7d 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -17,6 +17,7 @@
17#include <linux/acpi.h> 17#include <linux/acpi.h>
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/property.h> 19#include <linux/property.h>
20#include <linux/of_address.h>
20#include <linux/of_irq.h> 21#include <linux/of_irq.h>
21#include <linux/of_platform.h> 22#include <linux/of_platform.h>
22#include <linux/module.h> 23#include <linux/module.h>
@@ -356,67 +357,37 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
356{ 357{
357 struct platform_device *pdev_parent = of_find_device_by_node(np); 358 struct platform_device *pdev_parent = of_find_device_by_node(np);
358 struct platform_device_info pdevinfo; 359 struct platform_device_info pdevinfo;
359 struct of_phandle_args out_irq;
360 struct device_node *child; 360 struct device_node *child;
361 struct resource *res = NULL; 361 struct resource *res;
362 const __be32 *cell; 362 int ret = 0;
363 int ret = 0, size, i, num; 363
364 u64 addr, addr_size; 364 /* allocate a resource array */
365 res = kcalloc(3, sizeof(*res), GFP_KERNEL);
366 if (!res)
367 return -ENOMEM;
365 368
366 for_each_available_child_of_node(np, child) { 369 for_each_available_child_of_node(np, child) {
367 struct resource *res_iter;
368 struct platform_device *new_pdev; 370 struct platform_device *new_pdev;
369 371
370 cell = of_get_property(child, "reg", &size); 372 ret = of_address_to_resource(child, 0, &res[0]);
371 if (!cell) { 373 if (!ret)
372 ret = -EINVAL;
373 goto out; 374 goto out;
374 }
375
376 size /= sizeof(*cell);
377 num = size /
378 (of_n_addr_cells(child) + of_n_size_cells(child)) + 1;
379 375
380 /* allocate a resource array */ 376 ret = of_address_to_resource(child, 1, &res[1]);
381 res = kcalloc(num, sizeof(*res), GFP_KERNEL); 377 if (!ret)
382 if (!res) {
383 ret = -ENOMEM;
384 goto out; 378 goto out;
385 }
386
387 /* read each reg value */
388 i = 0;
389 res_iter = res;
390 while (i < size) {
391 addr = of_read_number(&cell[i],
392 of_n_addr_cells(child));
393 i += of_n_addr_cells(child);
394
395 addr_size = of_read_number(&cell[i],
396 of_n_size_cells(child));
397 i += of_n_size_cells(child);
398
399 res_iter->start = addr;
400 res_iter->end = res_iter->start + addr_size - 1;
401 res_iter->flags = IORESOURCE_MEM;
402 res_iter++;
403 }
404 379
405 ret = of_irq_parse_one(child, 0, &out_irq); 380 ret = of_irq_to_resource(child, 0, &res[2]);
406 if (ret) 381 if (ret <= 0)
407 goto out; 382 goto out;
408 383
409 res_iter->start = irq_create_of_mapping(&out_irq);
410 res_iter->name = "hidma event irq";
411 res_iter->flags = IORESOURCE_IRQ;
412
413 memset(&pdevinfo, 0, sizeof(pdevinfo)); 384 memset(&pdevinfo, 0, sizeof(pdevinfo));
414 pdevinfo.fwnode = &child->fwnode; 385 pdevinfo.fwnode = &child->fwnode;
415 pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL; 386 pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
416 pdevinfo.name = child->name; 387 pdevinfo.name = child->name;
417 pdevinfo.id = object_counter++; 388 pdevinfo.id = object_counter++;
418 pdevinfo.res = res; 389 pdevinfo.res = res;
419 pdevinfo.num_res = num; 390 pdevinfo.num_res = 3;
420 pdevinfo.data = NULL; 391 pdevinfo.data = NULL;
421 pdevinfo.size_data = 0; 392 pdevinfo.size_data = 0;
422 pdevinfo.dma_mask = DMA_BIT_MASK(64); 393 pdevinfo.dma_mask = DMA_BIT_MASK(64);
@@ -434,8 +405,6 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
434 */ 405 */
435 of_msi_configure(&new_pdev->dev, child); 406 of_msi_configure(&new_pdev->dev, child);
436 of_node_put(child); 407 of_node_put(child);
437 kfree(res);
438 res = NULL;
439 } 408 }
440out: 409out:
441 kfree(res); 410 kfree(res);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index f04c4702d98b..cd92d696bcf9 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -732,7 +732,7 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
732 732
733 /* Dequeue current job */ 733 /* Dequeue current job */
734 if (s3cchan->at) { 734 if (s3cchan->at) {
735 s3c24xx_dma_desc_free(&s3cchan->at->vd); 735 vchan_terminate_vdesc(&s3cchan->at->vd);
736 s3cchan->at = NULL; 736 s3cchan->at = NULL;
737 } 737 }
738 738
@@ -744,6 +744,13 @@ unlock:
744 return ret; 744 return ret;
745} 745}
746 746
747static void s3c24xx_dma_synchronize(struct dma_chan *chan)
748{
749 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
750
751 vchan_synchronize(&s3cchan->vc);
752}
753
747static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan) 754static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
748{ 755{
749 /* Ensure all queued descriptors are freed */ 756 /* Ensure all queued descriptors are freed */
@@ -1282,6 +1289,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
1282 s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; 1289 s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
1283 s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config; 1290 s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
1284 s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all; 1291 s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
1292 s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize;
1285 1293
1286 /* Initialize slave engine for SoC internal dedicated peripherals */ 1294 /* Initialize slave engine for SoC internal dedicated peripherals */
1287 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); 1295 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
@@ -1296,6 +1304,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
1296 s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; 1304 s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
1297 s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config; 1305 s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
1298 s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all; 1306 s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
1307 s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize;
1299 s3cdma->slave.filter.map = pdata->slave_map; 1308 s3cdma->slave.filter.map = pdata->slave_map;
1300 s3cdma->slave.filter.mapcnt = pdata->slavecnt; 1309 s3cdma->slave.filter.mapcnt = pdata->slavecnt;
1301 s3cdma->slave.filter.fn = s3c24xx_dma_filter; 1310 s3cdma->slave.filter.fn = s3c24xx_dma_filter;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 35c3936edc45..e3ff162c03fc 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/delay.h>
13#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
15#include <linux/interrupt.h> 16#include <linux/interrupt.h>
@@ -741,6 +742,41 @@ static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
741/* ----------------------------------------------------------------------------- 742/* -----------------------------------------------------------------------------
742 * Stop and reset 743 * Stop and reset
743 */ 744 */
745static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
746{
747 u32 chcr;
748 unsigned int i;
749
750 /*
751 * Ensure that the setting of the DE bit is actually 0 after
752 * clearing it.
753 */
754 for (i = 0; i < 1024; i++) {
755 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
756 if (!(chcr & RCAR_DMACHCR_DE))
757 return;
758 udelay(1);
759 }
760
761 dev_err(chan->chan.device->dev, "CHCR DE check error\n");
762}
763
764static void rcar_dmac_sync_tcr(struct rcar_dmac_chan *chan)
765{
766 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
767
768 if (!(chcr & RCAR_DMACHCR_DE))
769 return;
770
771 /* set DE=0 and flush remaining data */
772 rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
773
774 /* make sure all remaining data was flushed */
775 rcar_dmac_chcr_de_barrier(chan);
776
777 /* back DE */
778 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
779}
744 780
745static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) 781static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
746{ 782{
@@ -749,6 +785,7 @@ static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
749 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE | 785 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
750 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE); 786 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
751 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); 787 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
788 rcar_dmac_chcr_de_barrier(chan);
752} 789}
753 790
754static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan) 791static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
@@ -1309,8 +1346,11 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1309 residue += chunk->size; 1346 residue += chunk->size;
1310 } 1347 }
1311 1348
1349 if (desc->direction == DMA_DEV_TO_MEM)
1350 rcar_dmac_sync_tcr(chan);
1351
1312 /* Add the residue for the current chunk. */ 1352 /* Add the residue for the current chunk. */
1313 residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift; 1353 residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
1314 1354
1315 return residue; 1355 return residue;
1316} 1356}
@@ -1481,6 +1521,8 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1481 if (chcr & RCAR_DMACHCR_TE) 1521 if (chcr & RCAR_DMACHCR_TE)
1482 mask |= RCAR_DMACHCR_DE; 1522 mask |= RCAR_DMACHCR_DE;
1483 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask); 1523 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
1524 if (mask & RCAR_DMACHCR_DE)
1525 rcar_dmac_chcr_de_barrier(chan);
1484 1526
1485 if (chcr & RCAR_DMACHCR_DSE) 1527 if (chcr & RCAR_DMACHCR_DSE)
1486 ret |= rcar_dmac_isr_desc_stage_end(chan); 1528 ret |= rcar_dmac_isr_desc_stage_end(chan);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index b652071a2096..b106e8a60af6 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -710,7 +710,7 @@ static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
710 return 0; 710 return 0;
711} 711}
712 712
713struct dma_async_tx_descriptor * 713static struct dma_async_tx_descriptor *
714sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 714sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
715 size_t len, unsigned long flags) 715 size_t len, unsigned long flags)
716{ 716{
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index d5db0f6e1ff8..4dbb30cf94ac 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -253,9 +253,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
253 } 253 }
254 254
255 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 255 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
256 if (!res)
257 return -ENODEV;
258
259 iomem = devm_ioremap_resource(&pdev->dev, res); 256 iomem = devm_ioremap_resource(&pdev->dev, res);
260 if (IS_ERR(iomem)) 257 if (IS_ERR(iomem))
261 return PTR_ERR(iomem); 258 return PTR_ERR(iomem);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index b9d75a54c896..9a558e30c461 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -353,7 +353,8 @@ static int tegra_dma_slave_config(struct dma_chan *dc,
353 } 353 }
354 354
355 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); 355 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
356 if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID) { 356 if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
357 sconfig->device_fc) {
357 if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK) 358 if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
358 return -EINVAL; 359 return -EINVAL;
359 tdc->slave_id = sconfig->slave_id; 360 tdc->slave_id = sconfig->slave_id;
@@ -970,8 +971,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
970 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 971 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
971 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 972 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
972 973
973 csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW; 974 csr |= TEGRA_APBDMA_CSR_ONCE;
974 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 975
976 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
977 csr |= TEGRA_APBDMA_CSR_FLOW;
978 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
979 }
980
975 if (flags & DMA_PREP_INTERRUPT) 981 if (flags & DMA_PREP_INTERRUPT)
976 csr |= TEGRA_APBDMA_CSR_IE_EOC; 982 csr |= TEGRA_APBDMA_CSR_IE_EOC;
977 983
@@ -1110,10 +1116,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1110 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 1116 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1111 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 1117 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1112 1118
1113 csr |= TEGRA_APBDMA_CSR_FLOW; 1119 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1120 csr |= TEGRA_APBDMA_CSR_FLOW;
1121 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1122 }
1123
1114 if (flags & DMA_PREP_INTERRUPT) 1124 if (flags & DMA_PREP_INTERRUPT)
1115 csr |= TEGRA_APBDMA_CSR_IE_EOC; 1125 csr |= TEGRA_APBDMA_CSR_IE_EOC;
1116 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1117 1126
1118 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 1127 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1119 1128
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 7df910e7c348..9272b173c746 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -54,7 +54,15 @@ struct ti_am335x_xbar_map {
54 54
55static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) 55static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
56{ 56{
57 writeb_relaxed(val, iomem + event); 57 /*
58 * TPCC_EVT_MUX_60_63 register layout is different than the
59 * rest, in the sense, that event 63 is mapped to lowest byte
60 * and event 60 is mapped to highest, handle it separately.
61 */
62 if (event >= 60 && event <= 63)
63 writeb_relaxed(val, iomem + (63 - event % 4));
64 else
65 writeb_relaxed(val, iomem + event);
58} 66}
59 67
60static void ti_am335x_xbar_free(struct device *dev, void *route_data) 68static void ti_am335x_xbar_free(struct device *dev, void *route_data)
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 896bafb7a532..395c698edb4d 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -422,7 +422,7 @@ static int td_alloc_chan_resources(struct dma_chan *chan)
422 break; 422 break;
423 else { 423 else {
424 dev_err(chan2dev(chan), 424 dev_err(chan2dev(chan),
425 "Couldnt allocate any descriptors\n"); 425 "Couldn't allocate any descriptors\n");
426 return -ENOMEM; 426 return -ENOMEM;
427 } 427 }
428 } 428 }
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 545e97279083..88ad8ed2a8d6 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -107,10 +107,7 @@ static void vchan_complete(unsigned long arg)
107 dmaengine_desc_get_callback(&vd->tx, &cb); 107 dmaengine_desc_get_callback(&vd->tx, &cb);
108 108
109 list_del(&vd->node); 109 list_del(&vd->node);
110 if (dmaengine_desc_test_reuse(&vd->tx)) 110 vchan_vdesc_fini(vd);
111 list_add(&vd->node, &vc->desc_allocated);
112 else
113 vc->desc_free(vd);
114 111
115 dmaengine_desc_callback_invoke(&cb, NULL); 112 dmaengine_desc_callback_invoke(&cb, NULL);
116 } 113 }
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 3f776a46a29c..b09b75ab0751 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -35,6 +35,7 @@ struct virt_dma_chan {
35 struct list_head desc_completed; 35 struct list_head desc_completed;
36 36
37 struct virt_dma_desc *cyclic; 37 struct virt_dma_desc *cyclic;
38 struct virt_dma_desc *vd_terminated;
38}; 39};
39 40
40static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) 41static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
@@ -104,6 +105,20 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
104} 105}
105 106
106/** 107/**
108 * vchan_vdesc_fini - Free or reuse a descriptor
109 * @vd: virtual descriptor to free/reuse
110 */
111static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
112{
113 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
114
115 if (dmaengine_desc_test_reuse(&vd->tx))
116 list_add(&vd->node, &vc->desc_allocated);
117 else
118 vc->desc_free(vd);
119}
120
121/**
107 * vchan_cyclic_callback - report the completion of a period 122 * vchan_cyclic_callback - report the completion of a period
108 * @vd: virtual descriptor 123 * @vd: virtual descriptor
109 */ 124 */
@@ -116,6 +131,25 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
116} 131}
117 132
118/** 133/**
134 * vchan_terminate_vdesc - Disable pending cyclic callback
135 * @vd: virtual descriptor to be terminated
136 *
137 * vc.lock must be held by caller
138 */
139static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
140{
141 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
142
143 /* free up stuck descriptor */
144 if (vc->vd_terminated)
145 vchan_vdesc_fini(vc->vd_terminated);
146
147 vc->vd_terminated = vd;
148 if (vc->cyclic == vd)
149 vc->cyclic = NULL;
150}
151
152/**
119 * vchan_next_desc - peek at the next descriptor to be processed 153 * vchan_next_desc - peek at the next descriptor to be processed
120 * @vc: virtual channel to obtain descriptor from 154 * @vc: virtual channel to obtain descriptor from
121 * 155 *
@@ -168,10 +202,20 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
168 * Makes sure that all scheduled or active callbacks have finished running. For 202 * Makes sure that all scheduled or active callbacks have finished running. For
169 * proper operation the caller has to ensure that no new callbacks are scheduled 203 * proper operation the caller has to ensure that no new callbacks are scheduled
170 * after the invocation of this function started. 204 * after the invocation of this function started.
205 * Free up the terminated cyclic descriptor to prevent memory leakage.
171 */ 206 */
172static inline void vchan_synchronize(struct virt_dma_chan *vc) 207static inline void vchan_synchronize(struct virt_dma_chan *vc)
173{ 208{
209 unsigned long flags;
210
174 tasklet_kill(&vc->task); 211 tasklet_kill(&vc->task);
212
213 spin_lock_irqsave(&vc->lock, flags);
214 if (vc->vd_terminated) {
215 vchan_vdesc_fini(vc->vd_terminated);
216 vc->vd_terminated = NULL;
217 }
218 spin_unlock_irqrestore(&vc->lock, flags);
175} 219}
176 220
177#endif 221#endif
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 5eef13380ca8..27b523530c4a 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -99,7 +99,9 @@
99#define XILINX_DMA_REG_FRMPTR_STS 0x0024 99#define XILINX_DMA_REG_FRMPTR_STS 0x0024
100#define XILINX_DMA_REG_PARK_PTR 0x0028 100#define XILINX_DMA_REG_PARK_PTR 0x0028
101#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 101#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
102#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
102#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 103#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
104#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
103#define XILINX_DMA_REG_VDMA_VERSION 0x002c 105#define XILINX_DMA_REG_VDMA_VERSION 0x002c
104 106
105/* Register Direct Mode Registers */ 107/* Register Direct Mode Registers */
@@ -163,6 +165,7 @@
163#define XILINX_DMA_BD_SOP BIT(27) 165#define XILINX_DMA_BD_SOP BIT(27)
164#define XILINX_DMA_BD_EOP BIT(26) 166#define XILINX_DMA_BD_EOP BIT(26)
165#define XILINX_DMA_COALESCE_MAX 255 167#define XILINX_DMA_COALESCE_MAX 255
168#define XILINX_DMA_NUM_DESCS 255
166#define XILINX_DMA_NUM_APP_WORDS 5 169#define XILINX_DMA_NUM_APP_WORDS 5
167 170
168/* Multi-Channel DMA Descriptor offsets*/ 171/* Multi-Channel DMA Descriptor offsets*/
@@ -211,8 +214,8 @@ struct xilinx_vdma_desc_hw {
211 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 214 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
212 * @buf_addr: Buffer address @0x08 215 * @buf_addr: Buffer address @0x08
213 * @buf_addr_msb: MSB of Buffer address @0x0C 216 * @buf_addr_msb: MSB of Buffer address @0x0C
214 * @pad1: Reserved @0x10 217 * @mcdma_control: Control field for mcdma @0x10
215 * @pad2: Reserved @0x14 218 * @vsize_stride: Vsize and Stride field for mcdma @0x14
216 * @control: Control field @0x18 219 * @control: Control field @0x18
217 * @status: Status field @0x1C 220 * @status: Status field @0x1C
218 * @app: APP Fields @0x20 - 0x30 221 * @app: APP Fields @0x20 - 0x30
@@ -232,11 +235,11 @@ struct xilinx_axidma_desc_hw {
232/** 235/**
233 * struct xilinx_cdma_desc_hw - Hardware Descriptor 236 * struct xilinx_cdma_desc_hw - Hardware Descriptor
234 * @next_desc: Next Descriptor Pointer @0x00 237 * @next_desc: Next Descriptor Pointer @0x00
235 * @next_descmsb: Next Descriptor Pointer MSB @0x04 238 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
236 * @src_addr: Source address @0x08 239 * @src_addr: Source address @0x08
237 * @src_addrmsb: Source address MSB @0x0C 240 * @src_addr_msb: Source address MSB @0x0C
238 * @dest_addr: Destination address @0x10 241 * @dest_addr: Destination address @0x10
239 * @dest_addrmsb: Destination address MSB @0x14 242 * @dest_addr_msb: Destination address MSB @0x14
240 * @control: Control field @0x18 243 * @control: Control field @0x18
241 * @status: Status field @0x1C 244 * @status: Status field @0x1C
242 */ 245 */
@@ -310,6 +313,7 @@ struct xilinx_dma_tx_descriptor {
310 * @pending_list: Descriptors waiting 313 * @pending_list: Descriptors waiting
311 * @active_list: Descriptors ready to submit 314 * @active_list: Descriptors ready to submit
312 * @done_list: Complete descriptors 315 * @done_list: Complete descriptors
316 * @free_seg_list: Free descriptors
313 * @common: DMA common channel 317 * @common: DMA common channel
314 * @desc_pool: Descriptors pool 318 * @desc_pool: Descriptors pool
315 * @dev: The dma device 319 * @dev: The dma device
@@ -321,6 +325,7 @@ struct xilinx_dma_tx_descriptor {
321 * @cyclic: Check for cyclic transfers. 325 * @cyclic: Check for cyclic transfers.
322 * @genlock: Support genlock mode 326 * @genlock: Support genlock mode
323 * @err: Channel has errors 327 * @err: Channel has errors
328 * @idle: Check for channel idle
324 * @tasklet: Cleanup work after irq 329 * @tasklet: Cleanup work after irq
325 * @config: Device configuration info 330 * @config: Device configuration info
326 * @flush_on_fsync: Flush on Frame sync 331 * @flush_on_fsync: Flush on Frame sync
@@ -329,9 +334,12 @@ struct xilinx_dma_tx_descriptor {
329 * @desc_submitcount: Descriptor h/w submitted count 334 * @desc_submitcount: Descriptor h/w submitted count
330 * @residue: Residue for AXI DMA 335 * @residue: Residue for AXI DMA
331 * @seg_v: Statically allocated segments base 336 * @seg_v: Statically allocated segments base
337 * @seg_p: Physical allocated segments base
332 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers 338 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
339 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
333 * @start_transfer: Differentiate b/w DMA IP's transfer 340 * @start_transfer: Differentiate b/w DMA IP's transfer
334 * @stop_transfer: Differentiate b/w DMA IP's quiesce 341 * @stop_transfer: Differentiate b/w DMA IP's quiesce
342 * @tdest: TDEST value for mcdma
335 */ 343 */
336struct xilinx_dma_chan { 344struct xilinx_dma_chan {
337 struct xilinx_dma_device *xdev; 345 struct xilinx_dma_device *xdev;
@@ -341,6 +349,7 @@ struct xilinx_dma_chan {
341 struct list_head pending_list; 349 struct list_head pending_list;
342 struct list_head active_list; 350 struct list_head active_list;
343 struct list_head done_list; 351 struct list_head done_list;
352 struct list_head free_seg_list;
344 struct dma_chan common; 353 struct dma_chan common;
345 struct dma_pool *desc_pool; 354 struct dma_pool *desc_pool;
346 struct device *dev; 355 struct device *dev;
@@ -352,6 +361,7 @@ struct xilinx_dma_chan {
352 bool cyclic; 361 bool cyclic;
353 bool genlock; 362 bool genlock;
354 bool err; 363 bool err;
364 bool idle;
355 struct tasklet_struct tasklet; 365 struct tasklet_struct tasklet;
356 struct xilinx_vdma_config config; 366 struct xilinx_vdma_config config;
357 bool flush_on_fsync; 367 bool flush_on_fsync;
@@ -360,18 +370,20 @@ struct xilinx_dma_chan {
360 u32 desc_submitcount; 370 u32 desc_submitcount;
361 u32 residue; 371 u32 residue;
362 struct xilinx_axidma_tx_segment *seg_v; 372 struct xilinx_axidma_tx_segment *seg_v;
373 dma_addr_t seg_p;
363 struct xilinx_axidma_tx_segment *cyclic_seg_v; 374 struct xilinx_axidma_tx_segment *cyclic_seg_v;
375 dma_addr_t cyclic_seg_p;
364 void (*start_transfer)(struct xilinx_dma_chan *chan); 376 void (*start_transfer)(struct xilinx_dma_chan *chan);
365 int (*stop_transfer)(struct xilinx_dma_chan *chan); 377 int (*stop_transfer)(struct xilinx_dma_chan *chan);
366 u16 tdest; 378 u16 tdest;
367}; 379};
368 380
369/** 381/**
370 * enum xdma_ip_type: DMA IP type. 382 * enum xdma_ip_type - DMA IP type.
371 * 383 *
372 * XDMA_TYPE_AXIDMA: Axi dma ip. 384 * @XDMA_TYPE_AXIDMA: Axi dma ip.
373 * XDMA_TYPE_CDMA: Axi cdma ip. 385 * @XDMA_TYPE_CDMA: Axi cdma ip.
374 * XDMA_TYPE_VDMA: Axi vdma ip. 386 * @XDMA_TYPE_VDMA: Axi vdma ip.
375 * 387 *
376 */ 388 */
377enum xdma_ip_type { 389enum xdma_ip_type {
@@ -580,18 +592,32 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
580static struct xilinx_axidma_tx_segment * 592static struct xilinx_axidma_tx_segment *
581xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) 593xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
582{ 594{
583 struct xilinx_axidma_tx_segment *segment; 595 struct xilinx_axidma_tx_segment *segment = NULL;
584 dma_addr_t phys; 596 unsigned long flags;
585
586 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
587 if (!segment)
588 return NULL;
589 597
590 segment->phys = phys; 598 spin_lock_irqsave(&chan->lock, flags);
599 if (!list_empty(&chan->free_seg_list)) {
600 segment = list_first_entry(&chan->free_seg_list,
601 struct xilinx_axidma_tx_segment,
602 node);
603 list_del(&segment->node);
604 }
605 spin_unlock_irqrestore(&chan->lock, flags);
591 606
592 return segment; 607 return segment;
593} 608}
594 609
610static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
611{
612 u32 next_desc = hw->next_desc;
613 u32 next_desc_msb = hw->next_desc_msb;
614
615 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
616
617 hw->next_desc = next_desc;
618 hw->next_desc_msb = next_desc_msb;
619}
620
595/** 621/**
596 * xilinx_dma_free_tx_segment - Free transaction segment 622 * xilinx_dma_free_tx_segment - Free transaction segment
597 * @chan: Driver specific DMA channel 623 * @chan: Driver specific DMA channel
@@ -600,7 +626,9 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
600static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, 626static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
601 struct xilinx_axidma_tx_segment *segment) 627 struct xilinx_axidma_tx_segment *segment)
602{ 628{
603 dma_pool_free(chan->desc_pool, segment, segment->phys); 629 xilinx_dma_clean_hw_desc(&segment->hw);
630
631 list_add_tail(&segment->node, &chan->free_seg_list);
604} 632}
605 633
606/** 634/**
@@ -725,16 +753,31 @@ static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
725static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) 753static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
726{ 754{
727 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 755 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
756 unsigned long flags;
728 757
729 dev_dbg(chan->dev, "Free all channel resources.\n"); 758 dev_dbg(chan->dev, "Free all channel resources.\n");
730 759
731 xilinx_dma_free_descriptors(chan); 760 xilinx_dma_free_descriptors(chan);
761
732 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 762 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
733 xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v); 763 spin_lock_irqsave(&chan->lock, flags);
734 xilinx_dma_free_tx_segment(chan, chan->seg_v); 764 INIT_LIST_HEAD(&chan->free_seg_list);
765 spin_unlock_irqrestore(&chan->lock, flags);
766
767 /* Free memory that is allocated for BD */
768 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
769 XILINX_DMA_NUM_DESCS, chan->seg_v,
770 chan->seg_p);
771
772 /* Free Memory that is allocated for cyclic DMA Mode */
773 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
774 chan->cyclic_seg_v, chan->cyclic_seg_p);
775 }
776
777 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
778 dma_pool_destroy(chan->desc_pool);
779 chan->desc_pool = NULL;
735 } 780 }
736 dma_pool_destroy(chan->desc_pool);
737 chan->desc_pool = NULL;
738} 781}
739 782
740/** 783/**
@@ -817,6 +860,7 @@ static void xilinx_dma_do_tasklet(unsigned long data)
817static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) 860static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
818{ 861{
819 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 862 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
863 int i;
820 864
821 /* Has this channel already been allocated? */ 865 /* Has this channel already been allocated? */
822 if (chan->desc_pool) 866 if (chan->desc_pool)
@@ -827,11 +871,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
827 * for meeting Xilinx VDMA specification requirement. 871 * for meeting Xilinx VDMA specification requirement.
828 */ 872 */
829 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 873 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
830 chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool", 874 /* Allocate the buffer descriptors. */
831 chan->dev, 875 chan->seg_v = dma_zalloc_coherent(chan->dev,
832 sizeof(struct xilinx_axidma_tx_segment), 876 sizeof(*chan->seg_v) *
833 __alignof__(struct xilinx_axidma_tx_segment), 877 XILINX_DMA_NUM_DESCS,
834 0); 878 &chan->seg_p, GFP_KERNEL);
879 if (!chan->seg_v) {
880 dev_err(chan->dev,
881 "unable to allocate channel %d descriptors\n",
882 chan->id);
883 return -ENOMEM;
884 }
885
886 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
887 chan->seg_v[i].hw.next_desc =
888 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
889 ((i + 1) % XILINX_DMA_NUM_DESCS));
890 chan->seg_v[i].hw.next_desc_msb =
891 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
892 ((i + 1) % XILINX_DMA_NUM_DESCS));
893 chan->seg_v[i].phys = chan->seg_p +
894 sizeof(*chan->seg_v) * i;
895 list_add_tail(&chan->seg_v[i].node,
896 &chan->free_seg_list);
897 }
835 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 898 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
836 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", 899 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
837 chan->dev, 900 chan->dev,
@@ -846,7 +909,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
846 0); 909 0);
847 } 910 }
848 911
849 if (!chan->desc_pool) { 912 if (!chan->desc_pool &&
913 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
850 dev_err(chan->dev, 914 dev_err(chan->dev,
851 "unable to allocate channel %d descriptor pool\n", 915 "unable to allocate channel %d descriptor pool\n",
852 chan->id); 916 chan->id);
@@ -855,22 +919,20 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
855 919
856 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 920 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
857 /* 921 /*
858 * For AXI DMA case after submitting a pending_list, keep
859 * an extra segment allocated so that the "next descriptor"
860 * pointer on the tail descriptor always points to a
861 * valid descriptor, even when paused after reaching taildesc.
862 * This way, it is possible to issue additional
863 * transfers without halting and restarting the channel.
864 */
865 chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
866
867 /*
868 * For cyclic DMA mode we need to program the tail Descriptor 922 * For cyclic DMA mode we need to program the tail Descriptor
869 * register with a value which is not a part of the BD chain 923 * register with a value which is not a part of the BD chain
870 * so allocating a desc segment during channel allocation for 924 * so allocating a desc segment during channel allocation for
871 * programming tail descriptor. 925 * programming tail descriptor.
872 */ 926 */
873 chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan); 927 chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
928 sizeof(*chan->cyclic_seg_v),
929 &chan->cyclic_seg_p, GFP_KERNEL);
930 if (!chan->cyclic_seg_v) {
931 dev_err(chan->dev,
932 "unable to allocate desc segment for cyclic DMA\n");
933 return -ENOMEM;
934 }
935 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
874 } 936 }
875 937
876 dma_cookie_init(dchan); 938 dma_cookie_init(dchan);
@@ -936,34 +998,10 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
936} 998}
937 999
938/** 1000/**
939 * xilinx_dma_is_running - Check if DMA channel is running
940 * @chan: Driver specific DMA channel
941 *
942 * Return: '1' if running, '0' if not.
943 */
944static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
945{
946 return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
947 XILINX_DMA_DMASR_HALTED) &&
948 (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
949 XILINX_DMA_DMACR_RUNSTOP);
950}
951
952/**
953 * xilinx_dma_is_idle - Check if DMA channel is idle
954 * @chan: Driver specific DMA channel
955 *
956 * Return: '1' if idle, '0' if not.
957 */
958static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
959{
960 return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
961 XILINX_DMA_DMASR_IDLE;
962}
963
964/**
965 * xilinx_dma_stop_transfer - Halt DMA channel 1001 * xilinx_dma_stop_transfer - Halt DMA channel
966 * @chan: Driver specific DMA channel 1002 * @chan: Driver specific DMA channel
1003 *
1004 * Return: '0' on success and failure value on error
967 */ 1005 */
968static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) 1006static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
969{ 1007{
@@ -980,6 +1018,8 @@ static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
980/** 1018/**
981 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete 1019 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
982 * @chan: Driver specific DMA channel 1020 * @chan: Driver specific DMA channel
1021 *
1022 * Return: '0' on success and failure value on error
983 */ 1023 */
984static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) 1024static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
985{ 1025{
@@ -1022,13 +1062,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1022{ 1062{
1023 struct xilinx_vdma_config *config = &chan->config; 1063 struct xilinx_vdma_config *config = &chan->config;
1024 struct xilinx_dma_tx_descriptor *desc, *tail_desc; 1064 struct xilinx_dma_tx_descriptor *desc, *tail_desc;
1025 u32 reg; 1065 u32 reg, j;
1026 struct xilinx_vdma_tx_segment *tail_segment; 1066 struct xilinx_vdma_tx_segment *tail_segment;
1027 1067
1028 /* This function was invoked with lock held */ 1068 /* This function was invoked with lock held */
1029 if (chan->err) 1069 if (chan->err)
1030 return; 1070 return;
1031 1071
1072 if (!chan->idle)
1073 return;
1074
1032 if (list_empty(&chan->pending_list)) 1075 if (list_empty(&chan->pending_list))
1033 return; 1076 return;
1034 1077
@@ -1040,13 +1083,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1040 tail_segment = list_last_entry(&tail_desc->segments, 1083 tail_segment = list_last_entry(&tail_desc->segments,
1041 struct xilinx_vdma_tx_segment, node); 1084 struct xilinx_vdma_tx_segment, node);
1042 1085
1043 /* If it is SG mode and hardware is busy, cannot submit */
1044 if (chan->has_sg && xilinx_dma_is_running(chan) &&
1045 !xilinx_dma_is_idle(chan)) {
1046 dev_dbg(chan->dev, "DMA controller still busy\n");
1047 return;
1048 }
1049
1050 /* 1086 /*
1051 * If hardware is idle, then all descriptors on the running lists are 1087 * If hardware is idle, then all descriptors on the running lists are
1052 * done, start new transfers 1088 * done, start new transfers
@@ -1063,10 +1099,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1063 else 1099 else
1064 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; 1100 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1065 1101
1066 /* Configure channel to allow number frame buffers */
1067 dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
1068 chan->desc_pendingcount);
1069
1070 /* 1102 /*
1071 * With SG, start with circular mode, so that BDs can be fetched. 1103 * With SG, start with circular mode, so that BDs can be fetched.
1072 * In direct register mode, if not parking, enable circular mode 1104 * In direct register mode, if not parking, enable circular mode
@@ -1079,17 +1111,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1079 1111
1080 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1112 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1081 1113
1082 if (config->park && (config->park_frm >= 0) && 1114 j = chan->desc_submitcount;
1083 (config->park_frm < chan->num_frms)) { 1115 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1084 if (chan->direction == DMA_MEM_TO_DEV) 1116 if (chan->direction == DMA_MEM_TO_DEV) {
1085 dma_write(chan, XILINX_DMA_REG_PARK_PTR, 1117 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1086 config->park_frm << 1118 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1087 XILINX_DMA_PARK_PTR_RD_REF_SHIFT); 1119 } else {
1088 else 1120 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1089 dma_write(chan, XILINX_DMA_REG_PARK_PTR, 1121 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1090 config->park_frm <<
1091 XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
1092 } 1122 }
1123 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1093 1124
1094 /* Start the hardware */ 1125 /* Start the hardware */
1095 xilinx_dma_start(chan); 1126 xilinx_dma_start(chan);
@@ -1101,6 +1132,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1101 if (chan->has_sg) { 1132 if (chan->has_sg) {
1102 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1133 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1103 tail_segment->phys); 1134 tail_segment->phys);
1135 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1136 chan->desc_pendingcount = 0;
1104 } else { 1137 } else {
1105 struct xilinx_vdma_tx_segment *segment, *last = NULL; 1138 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1106 int i = 0; 1139 int i = 0;
@@ -1130,19 +1163,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1130 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, 1163 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1131 last->hw.stride); 1164 last->hw.stride);
1132 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); 1165 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1133 }
1134 1166
1135 if (!chan->has_sg) {
1136 list_del(&desc->node);
1137 list_add_tail(&desc->node, &chan->active_list);
1138 chan->desc_submitcount++; 1167 chan->desc_submitcount++;
1139 chan->desc_pendingcount--; 1168 chan->desc_pendingcount--;
1169 list_del(&desc->node);
1170 list_add_tail(&desc->node, &chan->active_list);
1140 if (chan->desc_submitcount == chan->num_frms) 1171 if (chan->desc_submitcount == chan->num_frms)
1141 chan->desc_submitcount = 0; 1172 chan->desc_submitcount = 0;
1142 } else {
1143 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1144 chan->desc_pendingcount = 0;
1145 } 1173 }
1174
1175 chan->idle = false;
1146} 1176}
1147 1177
1148/** 1178/**
@@ -1158,6 +1188,9 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1158 if (chan->err) 1188 if (chan->err)
1159 return; 1189 return;
1160 1190
1191 if (!chan->idle)
1192 return;
1193
1161 if (list_empty(&chan->pending_list)) 1194 if (list_empty(&chan->pending_list))
1162 return; 1195 return;
1163 1196
@@ -1176,6 +1209,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1176 } 1209 }
1177 1210
1178 if (chan->has_sg) { 1211 if (chan->has_sg) {
1212 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1213 XILINX_CDMA_CR_SGMODE);
1214
1215 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1216 XILINX_CDMA_CR_SGMODE);
1217
1179 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1218 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1180 head_desc->async_tx.phys); 1219 head_desc->async_tx.phys);
1181 1220
@@ -1203,6 +1242,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1203 1242
1204 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1243 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1205 chan->desc_pendingcount = 0; 1244 chan->desc_pendingcount = 0;
1245 chan->idle = false;
1206} 1246}
1207 1247
1208/** 1248/**
@@ -1212,7 +1252,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1212static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) 1252static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1213{ 1253{
1214 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1254 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1215 struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head; 1255 struct xilinx_axidma_tx_segment *tail_segment;
1216 u32 reg; 1256 u32 reg;
1217 1257
1218 if (chan->err) 1258 if (chan->err)
@@ -1221,12 +1261,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1221 if (list_empty(&chan->pending_list)) 1261 if (list_empty(&chan->pending_list))
1222 return; 1262 return;
1223 1263
1224 /* If it is SG mode and hardware is busy, cannot submit */ 1264 if (!chan->idle)
1225 if (chan->has_sg && xilinx_dma_is_running(chan) &&
1226 !xilinx_dma_is_idle(chan)) {
1227 dev_dbg(chan->dev, "DMA controller still busy\n");
1228 return; 1265 return;
1229 }
1230 1266
1231 head_desc = list_first_entry(&chan->pending_list, 1267 head_desc = list_first_entry(&chan->pending_list,
1232 struct xilinx_dma_tx_descriptor, node); 1268 struct xilinx_dma_tx_descriptor, node);
@@ -1235,21 +1271,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1235 tail_segment = list_last_entry(&tail_desc->segments, 1271 tail_segment = list_last_entry(&tail_desc->segments,
1236 struct xilinx_axidma_tx_segment, node); 1272 struct xilinx_axidma_tx_segment, node);
1237 1273
1238 if (chan->has_sg && !chan->xdev->mcdma) {
1239 old_head = list_first_entry(&head_desc->segments,
1240 struct xilinx_axidma_tx_segment, node);
1241 new_head = chan->seg_v;
1242 /* Copy Buffer Descriptor fields. */
1243 new_head->hw = old_head->hw;
1244
1245 /* Swap and save new reserve */
1246 list_replace_init(&old_head->node, &new_head->node);
1247 chan->seg_v = old_head;
1248
1249 tail_segment->hw.next_desc = chan->seg_v->phys;
1250 head_desc->async_tx.phys = new_head->phys;
1251 }
1252
1253 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1274 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1254 1275
1255 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1276 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
@@ -1324,6 +1345,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1324 1345
1325 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1346 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1326 chan->desc_pendingcount = 0; 1347 chan->desc_pendingcount = 0;
1348 chan->idle = false;
1327} 1349}
1328 1350
1329/** 1351/**
@@ -1388,6 +1410,8 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1388 } 1410 }
1389 1411
1390 chan->err = false; 1412 chan->err = false;
1413 chan->idle = true;
1414 chan->desc_submitcount = 0;
1391 1415
1392 return err; 1416 return err;
1393} 1417}
@@ -1469,6 +1493,7 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1469 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { 1493 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1470 spin_lock(&chan->lock); 1494 spin_lock(&chan->lock);
1471 xilinx_dma_complete_descriptor(chan); 1495 xilinx_dma_complete_descriptor(chan);
1496 chan->idle = true;
1472 chan->start_transfer(chan); 1497 chan->start_transfer(chan);
1473 spin_unlock(&chan->lock); 1498 spin_unlock(&chan->lock);
1474 } 1499 }
@@ -1591,7 +1616,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1591{ 1616{
1592 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1617 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1593 struct xilinx_dma_tx_descriptor *desc; 1618 struct xilinx_dma_tx_descriptor *desc;
1594 struct xilinx_vdma_tx_segment *segment, *prev = NULL; 1619 struct xilinx_vdma_tx_segment *segment;
1595 struct xilinx_vdma_desc_hw *hw; 1620 struct xilinx_vdma_desc_hw *hw;
1596 1621
1597 if (!is_slave_direction(xt->dir)) 1622 if (!is_slave_direction(xt->dir))
@@ -1645,8 +1670,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1645 /* Insert the segment into the descriptor segments list. */ 1670 /* Insert the segment into the descriptor segments list. */
1646 list_add_tail(&segment->node, &desc->segments); 1671 list_add_tail(&segment->node, &desc->segments);
1647 1672
1648 prev = segment;
1649
1650 /* Link the last hardware descriptor with the first. */ 1673 /* Link the last hardware descriptor with the first. */
1651 segment = list_first_entry(&desc->segments, 1674 segment = list_first_entry(&desc->segments,
1652 struct xilinx_vdma_tx_segment, node); 1675 struct xilinx_vdma_tx_segment, node);
@@ -1733,7 +1756,7 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1733{ 1756{
1734 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1757 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1735 struct xilinx_dma_tx_descriptor *desc; 1758 struct xilinx_dma_tx_descriptor *desc;
1736 struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL; 1759 struct xilinx_axidma_tx_segment *segment = NULL;
1737 u32 *app_w = (u32 *)context; 1760 u32 *app_w = (u32 *)context;
1738 struct scatterlist *sg; 1761 struct scatterlist *sg;
1739 size_t copy; 1762 size_t copy;
@@ -1784,10 +1807,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1784 XILINX_DMA_NUM_APP_WORDS); 1807 XILINX_DMA_NUM_APP_WORDS);
1785 } 1808 }
1786 1809
1787 if (prev)
1788 prev->hw.next_desc = segment->phys;
1789
1790 prev = segment;
1791 sg_used += copy; 1810 sg_used += copy;
1792 1811
1793 /* 1812 /*
@@ -1801,7 +1820,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1801 segment = list_first_entry(&desc->segments, 1820 segment = list_first_entry(&desc->segments,
1802 struct xilinx_axidma_tx_segment, node); 1821 struct xilinx_axidma_tx_segment, node);
1803 desc->async_tx.phys = segment->phys; 1822 desc->async_tx.phys = segment->phys;
1804 prev->hw.next_desc = segment->phys;
1805 1823
1806 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1824 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1807 if (chan->direction == DMA_MEM_TO_DEV) { 1825 if (chan->direction == DMA_MEM_TO_DEV) {
@@ -1821,11 +1839,14 @@ error:
1821 1839
1822/** 1840/**
1823 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction 1841 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1824 * @chan: DMA channel 1842 * @dchan: DMA channel
1825 * @sgl: scatterlist to transfer to/from 1843 * @buf_addr: Physical address of the buffer
1826 * @sg_len: number of entries in @scatterlist 1844 * @buf_len: Total length of the cyclic buffers
1845 * @period_len: length of individual cyclic buffer
1827 * @direction: DMA direction 1846 * @direction: DMA direction
1828 * @flags: transfer ack flags 1847 * @flags: transfer ack flags
1848 *
1849 * Return: Async transaction descriptor on success and NULL on failure
1829 */ 1850 */
1830static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( 1851static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1831 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, 1852 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
@@ -2009,7 +2030,9 @@ error:
2009 2030
2010/** 2031/**
2011 * xilinx_dma_terminate_all - Halt the channel and free descriptors 2032 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2012 * @chan: Driver specific DMA Channel pointer 2033 * @dchan: Driver specific DMA Channel pointer
2034 *
2035 * Return: '0' always.
2013 */ 2036 */
2014static int xilinx_dma_terminate_all(struct dma_chan *dchan) 2037static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2015{ 2038{
@@ -2029,6 +2052,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2029 2052
2030 /* Remove and free all of the descriptors in the lists */ 2053 /* Remove and free all of the descriptors in the lists */
2031 xilinx_dma_free_descriptors(chan); 2054 xilinx_dma_free_descriptors(chan);
2055 chan->idle = true;
2032 2056
2033 if (chan->cyclic) { 2057 if (chan->cyclic) {
2034 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2058 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
@@ -2037,6 +2061,10 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2037 chan->cyclic = false; 2061 chan->cyclic = false;
2038 } 2062 }
2039 2063
2064 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2065 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2066 XILINX_CDMA_CR_SGMODE);
2067
2040 return 0; 2068 return 0;
2041} 2069}
2042 2070
@@ -2323,6 +2351,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2323 * 2351 *
2324 * @xdev: Driver specific device structure 2352 * @xdev: Driver specific device structure
2325 * @node: Device node 2353 * @node: Device node
2354 * @chan_id: DMA Channel id
2326 * 2355 *
2327 * Return: '0' on success and failure value on error 2356 * Return: '0' on success and failure value on error
2328 */ 2357 */
@@ -2344,11 +2373,18 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2344 chan->has_sg = xdev->has_sg; 2373 chan->has_sg = xdev->has_sg;
2345 chan->desc_pendingcount = 0x0; 2374 chan->desc_pendingcount = 0x0;
2346 chan->ext_addr = xdev->ext_addr; 2375 chan->ext_addr = xdev->ext_addr;
2376 /* This variable ensures that descriptors are not
2377 * Submitted when dma engine is in progress. This variable is
2378 * Added to avoid polling for a bit in the status register to
2379 * Know dma state in the driver hot path.
2380 */
2381 chan->idle = true;
2347 2382
2348 spin_lock_init(&chan->lock); 2383 spin_lock_init(&chan->lock);
2349 INIT_LIST_HEAD(&chan->pending_list); 2384 INIT_LIST_HEAD(&chan->pending_list);
2350 INIT_LIST_HEAD(&chan->done_list); 2385 INIT_LIST_HEAD(&chan->done_list);
2351 INIT_LIST_HEAD(&chan->active_list); 2386 INIT_LIST_HEAD(&chan->active_list);
2387 INIT_LIST_HEAD(&chan->free_seg_list);
2352 2388
2353 /* Retrieve the channel properties from the device tree */ 2389 /* Retrieve the channel properties from the device tree */
2354 has_dre = of_property_read_bool(node, "xlnx,include-dre"); 2390 has_dre = of_property_read_bool(node, "xlnx,include-dre");
@@ -2379,6 +2415,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2379 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 2415 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2380 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2416 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2381 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; 2417 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2418 chan->config.park = 1;
2382 2419
2383 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2420 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2384 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) 2421 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
@@ -2395,6 +2432,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2395 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2432 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2396 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2433 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2397 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 2434 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2435 chan->config.park = 1;
2398 2436
2399 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2437 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2400 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) 2438 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
@@ -2459,7 +2497,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2459 * Return: 0 always. 2497 * Return: 0 always.
2460 */ 2498 */
2461static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, 2499static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2462 struct device_node *node) { 2500 struct device_node *node)
2501{
2463 int ret, i, nr_channels = 1; 2502 int ret, i, nr_channels = 1;
2464 2503
2465 ret = of_property_read_u32(node, "dma-channels", &nr_channels); 2504 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
@@ -2654,7 +2693,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
2654 goto error; 2693 goto error;
2655 } 2694 }
2656 2695
2657 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); 2696 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2697 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2698 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2699 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2700 else
2701 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2658 2702
2659 return 0; 2703 return 0;
2660 2704
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 1ee1241ca797..f14645817ed8 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -23,6 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/io-64-nonatomic-lo-hi.h> 25#include <linux/io-64-nonatomic-lo-hi.h>
26#include <linux/pm_runtime.h>
26 27
27#include "../dmaengine.h" 28#include "../dmaengine.h"
28 29
@@ -47,6 +48,7 @@
47#define ZYNQMP_DMA_SRC_START_MSB 0x15C 48#define ZYNQMP_DMA_SRC_START_MSB 0x15C
48#define ZYNQMP_DMA_DST_START_LSB 0x160 49#define ZYNQMP_DMA_DST_START_LSB 0x160
49#define ZYNQMP_DMA_DST_START_MSB 0x164 50#define ZYNQMP_DMA_DST_START_MSB 0x164
51#define ZYNQMP_DMA_TOTAL_BYTE 0x188
50#define ZYNQMP_DMA_RATE_CTRL 0x18C 52#define ZYNQMP_DMA_RATE_CTRL 0x18C
51#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190 53#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190
52#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194 54#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194
@@ -138,6 +140,8 @@
138#define ZYNQMP_DMA_BUS_WIDTH_64 64 140#define ZYNQMP_DMA_BUS_WIDTH_64 64
139#define ZYNQMP_DMA_BUS_WIDTH_128 128 141#define ZYNQMP_DMA_BUS_WIDTH_128 128
140 142
143#define ZDMA_PM_TIMEOUT 100
144
141#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size) 145#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size)
142 146
143#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \ 147#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \
@@ -211,8 +215,6 @@ struct zynqmp_dma_desc_sw {
211 * @bus_width: Bus width 215 * @bus_width: Bus width
212 * @src_burst_len: Source burst length 216 * @src_burst_len: Source burst length
213 * @dst_burst_len: Dest burst length 217 * @dst_burst_len: Dest burst length
214 * @clk_main: Pointer to main clock
215 * @clk_apb: Pointer to apb clock
216 */ 218 */
217struct zynqmp_dma_chan { 219struct zynqmp_dma_chan {
218 struct zynqmp_dma_device *zdev; 220 struct zynqmp_dma_device *zdev;
@@ -237,8 +239,6 @@ struct zynqmp_dma_chan {
237 u32 bus_width; 239 u32 bus_width;
238 u32 src_burst_len; 240 u32 src_burst_len;
239 u32 dst_burst_len; 241 u32 dst_burst_len;
240 struct clk *clk_main;
241 struct clk *clk_apb;
242}; 242};
243 243
244/** 244/**
@@ -246,11 +246,15 @@ struct zynqmp_dma_chan {
246 * @dev: Device Structure 246 * @dev: Device Structure
247 * @common: DMA device structure 247 * @common: DMA device structure
248 * @chan: Driver specific DMA channel 248 * @chan: Driver specific DMA channel
249 * @clk_main: Pointer to main clock
250 * @clk_apb: Pointer to apb clock
249 */ 251 */
250struct zynqmp_dma_device { 252struct zynqmp_dma_device {
251 struct device *dev; 253 struct device *dev;
252 struct dma_device common; 254 struct dma_device common;
253 struct zynqmp_dma_chan *chan; 255 struct zynqmp_dma_chan *chan;
256 struct clk *clk_main;
257 struct clk *clk_apb;
254}; 258};
255 259
256static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg, 260static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
@@ -461,7 +465,11 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
461{ 465{
462 struct zynqmp_dma_chan *chan = to_chan(dchan); 466 struct zynqmp_dma_chan *chan = to_chan(dchan);
463 struct zynqmp_dma_desc_sw *desc; 467 struct zynqmp_dma_desc_sw *desc;
464 int i; 468 int i, ret;
469
470 ret = pm_runtime_get_sync(chan->dev);
471 if (ret < 0)
472 return ret;
465 473
466 chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS, 474 chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS,
467 GFP_KERNEL); 475 GFP_KERNEL);
@@ -506,6 +514,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
506static void zynqmp_dma_start(struct zynqmp_dma_chan *chan) 514static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
507{ 515{
508 writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER); 516 writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER);
517 writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
509 chan->idle = false; 518 chan->idle = false;
510 writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2); 519 writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2);
511} 520}
@@ -517,12 +526,12 @@ static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
517 */ 526 */
518static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) 527static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
519{ 528{
520 u32 val; 529 if (status & ZYNQMP_DMA_BYTE_CNT_OVRFL)
521 530 writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
522 if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR) 531 if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
523 val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); 532 readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
524 if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR) 533 if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR)
525 val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); 534 readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
526} 535}
527 536
528static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) 537static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
@@ -545,6 +554,8 @@ static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
545 * zynqmp_dma_device_config - Zynqmp dma device configuration 554 * zynqmp_dma_device_config - Zynqmp dma device configuration
546 * @dchan: DMA channel 555 * @dchan: DMA channel
547 * @config: DMA device config 556 * @config: DMA device config
557 *
558 * Return: 0 always
548 */ 559 */
549static int zynqmp_dma_device_config(struct dma_chan *dchan, 560static int zynqmp_dma_device_config(struct dma_chan *dchan,
550 struct dma_slave_config *config) 561 struct dma_slave_config *config)
@@ -640,7 +651,7 @@ static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
640 651
641/** 652/**
642 * zynqmp_dma_free_descriptors - Free channel descriptors 653 * zynqmp_dma_free_descriptors - Free channel descriptors
643 * @dchan: DMA channel pointer 654 * @chan: ZynqMP DMA channel pointer
644 */ 655 */
645static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) 656static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
646{ 657{
@@ -664,6 +675,8 @@ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
664 (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), 675 (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
665 chan->desc_pool_v, chan->desc_pool_p); 676 chan->desc_pool_v, chan->desc_pool_p);
666 kfree(chan->sw_desc_pool); 677 kfree(chan->sw_desc_pool);
678 pm_runtime_mark_last_busy(chan->dev);
679 pm_runtime_put_autosuspend(chan->dev);
667} 680}
668 681
669/** 682/**
@@ -715,7 +728,7 @@ static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
715 728
716 if (status & ZYNQMP_DMA_INT_OVRFL) { 729 if (status & ZYNQMP_DMA_INT_OVRFL) {
717 zynqmp_dma_handle_ovfl_int(chan, status); 730 zynqmp_dma_handle_ovfl_int(chan, status);
718 dev_info(chan->dev, "Channel %p overflow interrupt\n", chan); 731 dev_dbg(chan->dev, "Channel %p overflow interrupt\n", chan);
719 ret = IRQ_HANDLED; 732 ret = IRQ_HANDLED;
720 } 733 }
721 734
@@ -838,11 +851,10 @@ static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
838 if (!chan) 851 if (!chan)
839 return; 852 return;
840 853
841 devm_free_irq(chan->zdev->dev, chan->irq, chan); 854 if (chan->irq)
855 devm_free_irq(chan->zdev->dev, chan->irq, chan);
842 tasklet_kill(&chan->tasklet); 856 tasklet_kill(&chan->tasklet);
843 list_del(&chan->common.device_node); 857 list_del(&chan->common.device_node);
844 clk_disable_unprepare(chan->clk_apb);
845 clk_disable_unprepare(chan->clk_main);
846} 858}
847 859
848/** 860/**
@@ -907,30 +919,6 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
907 "zynqmp-dma", chan); 919 "zynqmp-dma", chan);
908 if (err) 920 if (err)
909 return err; 921 return err;
910 chan->clk_main = devm_clk_get(&pdev->dev, "clk_main");
911 if (IS_ERR(chan->clk_main)) {
912 dev_err(&pdev->dev, "main clock not found.\n");
913 return PTR_ERR(chan->clk_main);
914 }
915
916 chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
917 if (IS_ERR(chan->clk_apb)) {
918 dev_err(&pdev->dev, "apb clock not found.\n");
919 return PTR_ERR(chan->clk_apb);
920 }
921
922 err = clk_prepare_enable(chan->clk_main);
923 if (err) {
924 dev_err(&pdev->dev, "Unable to enable main clock.\n");
925 return err;
926 }
927
928 err = clk_prepare_enable(chan->clk_apb);
929 if (err) {
930 clk_disable_unprepare(chan->clk_main);
931 dev_err(&pdev->dev, "Unable to enable apb clock.\n");
932 return err;
933 }
934 922
935 chan->desc_size = sizeof(struct zynqmp_dma_desc_ll); 923 chan->desc_size = sizeof(struct zynqmp_dma_desc_ll);
936 chan->idle = true; 924 chan->idle = true;
@@ -953,6 +941,87 @@ static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec,
953} 941}
954 942
955/** 943/**
944 * zynqmp_dma_suspend - Suspend method for the driver
945 * @dev: Address of the device structure
946 *
947 * Put the driver into low power mode.
948 * Return: 0 on success and failure value on error
949 */
950static int __maybe_unused zynqmp_dma_suspend(struct device *dev)
951{
952 if (!device_may_wakeup(dev))
953 return pm_runtime_force_suspend(dev);
954
955 return 0;
956}
957
958/**
959 * zynqmp_dma_resume - Resume from suspend
960 * @dev: Address of the device structure
961 *
962 * Resume operation after suspend.
963 * Return: 0 on success and failure value on error
964 */
965static int __maybe_unused zynqmp_dma_resume(struct device *dev)
966{
967 if (!device_may_wakeup(dev))
968 return pm_runtime_force_resume(dev);
969
970 return 0;
971}
972
973/**
974 * zynqmp_dma_runtime_suspend - Runtime suspend method for the driver
975 * @dev: Address of the device structure
976 *
977 * Put the driver into low power mode.
978 * Return: 0 always
979 */
980static int __maybe_unused zynqmp_dma_runtime_suspend(struct device *dev)
981{
982 struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
983
984 clk_disable_unprepare(zdev->clk_main);
985 clk_disable_unprepare(zdev->clk_apb);
986
987 return 0;
988}
989
990/**
991 * zynqmp_dma_runtime_resume - Runtime suspend method for the driver
992 * @dev: Address of the device structure
993 *
994 * Put the driver into low power mode.
995 * Return: 0 always
996 */
997static int __maybe_unused zynqmp_dma_runtime_resume(struct device *dev)
998{
999 struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
1000 int err;
1001
1002 err = clk_prepare_enable(zdev->clk_main);
1003 if (err) {
1004 dev_err(dev, "Unable to enable main clock.\n");
1005 return err;
1006 }
1007
1008 err = clk_prepare_enable(zdev->clk_apb);
1009 if (err) {
1010 dev_err(dev, "Unable to enable apb clock.\n");
1011 clk_disable_unprepare(zdev->clk_main);
1012 return err;
1013 }
1014
1015 return 0;
1016}
1017
1018static const struct dev_pm_ops zynqmp_dma_dev_pm_ops = {
1019 SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dma_suspend, zynqmp_dma_resume)
1020 SET_RUNTIME_PM_OPS(zynqmp_dma_runtime_suspend,
1021 zynqmp_dma_runtime_resume, NULL)
1022};
1023
1024/**
956 * zynqmp_dma_probe - Driver probe function 1025 * zynqmp_dma_probe - Driver probe function
957 * @pdev: Pointer to the platform_device structure 1026 * @pdev: Pointer to the platform_device structure
958 * 1027 *
@@ -984,12 +1053,33 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
984 p->device_config = zynqmp_dma_device_config; 1053 p->device_config = zynqmp_dma_device_config;
985 p->dev = &pdev->dev; 1054 p->dev = &pdev->dev;
986 1055
1056 zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main");
1057 if (IS_ERR(zdev->clk_main)) {
1058 dev_err(&pdev->dev, "main clock not found.\n");
1059 return PTR_ERR(zdev->clk_main);
1060 }
1061
1062 zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
1063 if (IS_ERR(zdev->clk_apb)) {
1064 dev_err(&pdev->dev, "apb clock not found.\n");
1065 return PTR_ERR(zdev->clk_apb);
1066 }
1067
987 platform_set_drvdata(pdev, zdev); 1068 platform_set_drvdata(pdev, zdev);
1069 pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
1070 pm_runtime_use_autosuspend(zdev->dev);
1071 pm_runtime_enable(zdev->dev);
1072 pm_runtime_get_sync(zdev->dev);
1073 if (!pm_runtime_enabled(zdev->dev)) {
1074 ret = zynqmp_dma_runtime_resume(zdev->dev);
1075 if (ret)
1076 return ret;
1077 }
988 1078
989 ret = zynqmp_dma_chan_probe(zdev, pdev); 1079 ret = zynqmp_dma_chan_probe(zdev, pdev);
990 if (ret) { 1080 if (ret) {
991 dev_err(&pdev->dev, "Probing channel failed\n"); 1081 dev_err(&pdev->dev, "Probing channel failed\n");
992 goto free_chan_resources; 1082 goto err_disable_pm;
993 } 1083 }
994 1084
995 p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); 1085 p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
@@ -1005,12 +1095,19 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
1005 goto free_chan_resources; 1095 goto free_chan_resources;
1006 } 1096 }
1007 1097
1098 pm_runtime_mark_last_busy(zdev->dev);
1099 pm_runtime_put_sync_autosuspend(zdev->dev);
1100
1008 dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n"); 1101 dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n");
1009 1102
1010 return 0; 1103 return 0;
1011 1104
1012free_chan_resources: 1105free_chan_resources:
1013 zynqmp_dma_chan_remove(zdev->chan); 1106 zynqmp_dma_chan_remove(zdev->chan);
1107err_disable_pm:
1108 if (!pm_runtime_enabled(zdev->dev))
1109 zynqmp_dma_runtime_suspend(zdev->dev);
1110 pm_runtime_disable(zdev->dev);
1014 return ret; 1111 return ret;
1015} 1112}
1016 1113
@@ -1028,6 +1125,9 @@ static int zynqmp_dma_remove(struct platform_device *pdev)
1028 dma_async_device_unregister(&zdev->common); 1125 dma_async_device_unregister(&zdev->common);
1029 1126
1030 zynqmp_dma_chan_remove(zdev->chan); 1127 zynqmp_dma_chan_remove(zdev->chan);
1128 pm_runtime_disable(zdev->dev);
1129 if (!pm_runtime_enabled(zdev->dev))
1130 zynqmp_dma_runtime_suspend(zdev->dev);
1031 1131
1032 return 0; 1132 return 0;
1033} 1133}
@@ -1042,6 +1142,7 @@ static struct platform_driver zynqmp_dma_driver = {
1042 .driver = { 1142 .driver = {
1043 .name = "xilinx-zynqmp-dma", 1143 .name = "xilinx-zynqmp-dma",
1044 .of_match_table = zynqmp_dma_of_match, 1144 .of_match_table = zynqmp_dma_of_match,
1145 .pm = &zynqmp_dma_dev_pm_ops,
1045 }, 1146 },
1046 .probe = zynqmp_dma_probe, 1147 .probe = zynqmp_dma_probe,
1047 .remove = zynqmp_dma_remove, 1148 .remove = zynqmp_dma_remove,
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 8ad33a44a7b8..f25d36358187 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -981,10 +981,18 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
981 return 0; 981 return 0;
982} 982}
983 983
984static void *
985of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
986 const struct device *dev)
987{
988 return (void *)of_device_get_match_data(dev);
989}
990
984const struct fwnode_operations of_fwnode_ops = { 991const struct fwnode_operations of_fwnode_ops = {
985 .get = of_fwnode_get, 992 .get = of_fwnode_get,
986 .put = of_fwnode_put, 993 .put = of_fwnode_put,
987 .device_is_available = of_fwnode_device_is_available, 994 .device_is_available = of_fwnode_device_is_available,
995 .device_get_match_data = of_fwnode_device_get_match_data,
988 .property_present = of_fwnode_property_present, 996 .property_present = of_fwnode_property_present,
989 .property_read_int_array = of_fwnode_property_read_int_array, 997 .property_read_int_array = of_fwnode_property_read_int_array,
990 .property_read_string_array = of_fwnode_property_read_string_array, 998 .property_read_string_array = of_fwnode_property_read_string_array,
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index b8f4c3c776e5..a933f87ef98d 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -585,6 +585,7 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
585const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, 585const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
586 const struct device *dev); 586 const struct device *dev);
587 587
588void *acpi_get_match_data(const struct device *dev);
588extern bool acpi_driver_match_device(struct device *dev, 589extern bool acpi_driver_match_device(struct device *dev,
589 const struct device_driver *drv); 590 const struct device_driver *drv);
590int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); 591int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
@@ -762,6 +763,11 @@ static inline const struct acpi_device_id *acpi_match_device(
762 return NULL; 763 return NULL;
763} 764}
764 765
766static inline void *acpi_get_match_data(const struct device *dev)
767{
768 return NULL;
769}
770
765static inline bool acpi_driver_match_device(struct device *dev, 771static inline bool acpi_driver_match_device(struct device *dev,
766 const struct device_driver *drv) 772 const struct device_driver *drv)
767{ 773{
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 411a84c6c400..4fa1a489efe4 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17struct fwnode_operations; 17struct fwnode_operations;
18struct device;
18 19
19struct fwnode_handle { 20struct fwnode_handle {
20 struct fwnode_handle *secondary; 21 struct fwnode_handle *secondary;
@@ -51,6 +52,7 @@ struct fwnode_reference_args {
51 * struct fwnode_operations - Operations for fwnode interface 52 * struct fwnode_operations - Operations for fwnode interface
52 * @get: Get a reference to an fwnode. 53 * @get: Get a reference to an fwnode.
53 * @put: Put a reference to an fwnode. 54 * @put: Put a reference to an fwnode.
55 * @device_get_match_data: Return the device driver match data.
54 * @property_present: Return true if a property is present. 56 * @property_present: Return true if a property is present.
55 * @property_read_integer_array: Read an array of integer properties. Return 57 * @property_read_integer_array: Read an array of integer properties. Return
56 * zero on success, a negative error code 58 * zero on success, a negative error code
@@ -71,6 +73,8 @@ struct fwnode_operations {
71 struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); 73 struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
72 void (*put)(struct fwnode_handle *fwnode); 74 void (*put)(struct fwnode_handle *fwnode);
73 bool (*device_is_available)(const struct fwnode_handle *fwnode); 75 bool (*device_is_available)(const struct fwnode_handle *fwnode);
76 void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
77 const struct device *dev);
74 bool (*property_present)(const struct fwnode_handle *fwnode, 78 bool (*property_present)(const struct fwnode_handle *fwnode,
75 const char *propname); 79 const char *propname);
76 int (*property_read_int_array)(const struct fwnode_handle *fwnode, 80 int (*property_read_int_array)(const struct fwnode_handle *fwnode,
diff --git a/include/linux/property.h b/include/linux/property.h
index f6189a3ac63c..6653ed4b99f9 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -275,6 +275,8 @@ bool device_dma_supported(struct device *dev);
275 275
276enum dev_dma_attr device_get_dma_attr(struct device *dev); 276enum dev_dma_attr device_get_dma_attr(struct device *dev);
277 277
278void *device_get_match_data(struct device *dev);
279
278int device_get_phy_mode(struct device *dev); 280int device_get_phy_mode(struct device *dev);
279 281
280void *device_get_mac_address(struct device *dev, char *addr, int alen); 282void *device_get_mac_address(struct device *dev, char *addr, int alen);