aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Ujfalusi <peter.ujfalusi@ti.com>2015-10-16 03:18:10 -0400
committerVinod Koul <vinod.koul@intel.com>2015-10-26 21:22:45 -0400
commit1be5336bc7ba050ee07d352643bf4c01c513553c (patch)
treed6a71da9c27970840895e7dec600243484b5fdef
parentf7c7cae94832fc09ccff080b4cc2358ac11e2150 (diff)
dmaengine: edma: New device tree binding
With the old binding and driver architecture we had many issues: No way to assign eDMA channels to event queues, thus not able to tune the system by moving specific DMA channels to low/high priority servicing. We moved the cyclic channels to high priority within the code, but that was just a workaround to this issue. Memcopy was fundamentally broken: even if the driver scanned the DT/devices in the booted system for direct DMA users (which is not effective when the events are going through a crossbar) and created a map of 'used' channels, this information was not really usable. Since via dmaengien API the eDMA driver will be called with _some_ channel number, we would try to request this channel when any channel is requested for memcpy. By luck we got channel which is not used by any device most of the time so things worked, but if a device would have been using the given channel, but not requested it, the memcpy channel would have been waiting for HW event. The old code had the am33xx/am43xx DMA event router handling embedded. This should have been done in a separate driver since it is not part of the actual eDMA IP. There were no way to 'lock' PaRAM slots to be used by the DSP for example when booting with DT. In DT boot the edma node used more than one hwmod which is not a good practice and the kernel prints warning because of this. With the new bindings and the changes in the driver we can: - No regression with Legacy binding and non DT boot - DMA channels can be assigned to any TC (to set priority) - PaRAM slots can be reserved for other cores to use - Dynamic power management for CC and TCs, if only TC0 is used all other TC can be powered down for example Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt117
-rw-r--r--drivers/dma/edma.c486
-rw-r--r--include/linux/platform_data/edma.h3
3 files changed, 459 insertions, 147 deletions
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index 5ba525a10035..d3d0a4fb1c73 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -1,4 +1,119 @@
1TI EDMA 1Texas Instruments eDMA
2
3The eDMA3 consists of two components: Channel controller (CC) and Transfer
4Controller(s) (TC). The CC is the main entry for DMA users since it is
5responsible for the DMA channel handling, while the TCs are responsible to
6execute the actual DMA tansfer.
7
8------------------------------------------------------------------------------
9eDMA3 Channel Controller
10
11Required properties:
12- compatible: "ti,edma3-tpcc" for the channel controller(s)
13- #dma-cells: Should be set to <2>. The first number is the DMA request
14 number and the second is the TC the channel is serviced on.
15- reg: Memory map of eDMA CC
16- reg-names: "edma3_cc"
17- interrupts: Interrupt lines for CCINT, MPERR and CCERRINT.
18- interrupt-names: "edma3_ccint", "emda3_mperr" and "edma3_ccerrint"
19- ti,tptcs: List of TPTCs associated with the eDMA in the following form:
20 <&tptc_phandle TC_priority_number>. The highest priority is 0.
21
22Optional properties:
23- ti,hwmods: Name of the hwmods associated to the eDMA CC
24- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
25 these channels will be SW triggered channels. The list must
26 contain 16 bits numbers, see example.
27- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
28 the driver, they are allocated to be used by for example the
29 DSP. See example.
30
31------------------------------------------------------------------------------
32eDMA3 Transfer Controller
33
34Required properties:
35- compatible: "ti,edma3-tptc" for the transfer controller(s)
36- reg: Memory map of eDMA TC
37- interrupts: Interrupt number for TCerrint.
38
39Optional properties:
40- ti,hwmods: Name of the hwmods associated to the given eDMA TC
41- interrupt-names: "edma3_tcerrint"
42
43------------------------------------------------------------------------------
44Example:
45
46edma: edma@49000000 {
47 compatible = "ti,edma3-tpcc";
48 ti,hwmods = "tpcc";
49 reg = <0x49000000 0x10000>;
50 reg-names = "edma3_cc";
51 interrupts = <12 13 14>;
52 interrupt-names = "edma3_ccint", "emda3_mperr", "edma3_ccerrint";
53 dma-requests = <64>;
54 #dma-cells = <2>;
55
56 ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
57
58 /* Channel 20 and 21 is allocated for memcpy */
59 ti,edma-memcpy-channels = /bits/ 16 <20 21>;
60 /* The following PaRAM slots are reserved: 35-45 and 100-110 */
61 ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>,
62 /bits/ 16 <100 10>;
63};
64
65edma_tptc0: tptc@49800000 {
66 compatible = "ti,edma3-tptc";
67 ti,hwmods = "tptc0";
68 reg = <0x49800000 0x100000>;
69 interrupts = <112>;
70 interrupt-names = "edm3_tcerrint";
71};
72
73edma_tptc1: tptc@49900000 {
74 compatible = "ti,edma3-tptc";
75 ti,hwmods = "tptc1";
76 reg = <0x49900000 0x100000>;
77 interrupts = <113>;
78 interrupt-names = "edm3_tcerrint";
79};
80
81edma_tptc2: tptc@49a00000 {
82 compatible = "ti,edma3-tptc";
83 ti,hwmods = "tptc2";
84 reg = <0x49a00000 0x100000>;
85 interrupts = <114>;
86 interrupt-names = "edm3_tcerrint";
87};
88
89sham: sham@53100000 {
90 compatible = "ti,omap4-sham";
91 ti,hwmods = "sham";
92 reg = <0x53100000 0x200>;
93 interrupts = <109>;
94 /* DMA channel 36 executed on eDMA TC0 - low priority queue */
95 dmas = <&edma 36 0>;
96 dma-names = "rx";
97};
98
99mcasp0: mcasp@48038000 {
100 compatible = "ti,am33xx-mcasp-audio";
101 ti,hwmods = "mcasp0";
102 reg = <0x48038000 0x2000>,
103 <0x46000000 0x400000>;
104 reg-names = "mpu", "dat";
105 interrupts = <80>, <81>;
106 interrupt-names = "tx", "rx";
107 status = "disabled";
108 /* DMA channels 8 and 9 executed on eDMA TC2 - high priority queue */
109 dmas = <&edma 8 2>,
110 <&edma 9 2>;
111 dma-names = "tx", "rx";
112};
113
114------------------------------------------------------------------------------
115DEPRECATED binding, new DTS files must use the ti,edma3-tpcc/ti,edma3-tptc
116binding.
2 117
3Required properties: 118Required properties:
4- compatible : "ti,edma3" 119- compatible : "ti,edma3"
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index d4d71e60da1b..31722d436a42 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -201,13 +201,20 @@ struct edma_desc {
201 201
202struct edma_cc; 202struct edma_cc;
203 203
204struct edma_tc {
205 struct device_node *node;
206 u16 id;
207};
208
204struct edma_chan { 209struct edma_chan {
205 struct virt_dma_chan vchan; 210 struct virt_dma_chan vchan;
206 struct list_head node; 211 struct list_head node;
207 struct edma_desc *edesc; 212 struct edma_desc *edesc;
208 struct edma_cc *ecc; 213 struct edma_cc *ecc;
214 struct edma_tc *tc;
209 int ch_num; 215 int ch_num;
210 bool alloced; 216 bool alloced;
217 bool hw_triggered;
211 int slot[EDMA_MAX_SLOTS]; 218 int slot[EDMA_MAX_SLOTS];
212 int missed; 219 int missed;
213 struct dma_slave_config cfg; 220 struct dma_slave_config cfg;
@@ -218,6 +225,7 @@ struct edma_cc {
218 struct edma_soc_info *info; 225 struct edma_soc_info *info;
219 void __iomem *base; 226 void __iomem *base;
220 int id; 227 int id;
228 bool legacy_mode;
221 229
222 /* eDMA3 resource information */ 230 /* eDMA3 resource information */
223 unsigned num_channels; 231 unsigned num_channels;
@@ -228,20 +236,16 @@ struct edma_cc {
228 bool chmap_exist; 236 bool chmap_exist;
229 enum dma_event_q default_queue; 237 enum dma_event_q default_queue;
230 238
231 bool unused_chan_list_done; 239 /*
232 /* The slot_inuse bit for each PaRAM slot is clear unless the 240 * The slot_inuse bit for each PaRAM slot is clear unless the slot is
233 * channel is in use ... by ARM or DSP, for QDMA, or whatever. 241 * in use by Linux or if it is allocated to be used by DSP.
234 */ 242 */
235 unsigned long *slot_inuse; 243 unsigned long *slot_inuse;
236 244
237 /* The channel_unused bit for each channel is clear unless
238 * it is not being used on this platform. It uses a bit
239 * of SOC-specific initialization code.
240 */
241 unsigned long *channel_unused;
242
243 struct dma_device dma_slave; 245 struct dma_device dma_slave;
246 struct dma_device *dma_memcpy;
244 struct edma_chan *slave_chans; 247 struct edma_chan *slave_chans;
248 struct edma_tc *tc_list;
245 int dummy_slot; 249 int dummy_slot;
246}; 250};
247 251
@@ -251,8 +255,17 @@ static const struct edmacc_param dummy_paramset = {
251 .ccnt = 1, 255 .ccnt = 1,
252}; 256};
253 257
258#define EDMA_BINDING_LEGACY 0
259#define EDMA_BINDING_TPCC 1
254static const struct of_device_id edma_of_ids[] = { 260static const struct of_device_id edma_of_ids[] = {
255 { .compatible = "ti,edma3", }, 261 {
262 .compatible = "ti,edma3",
263 .data = (void *)EDMA_BINDING_LEGACY,
264 },
265 {
266 .compatible = "ti,edma3-tpcc",
267 .data = (void *)EDMA_BINDING_TPCC,
268 },
256 {} 269 {}
257}; 270};
258 271
@@ -412,60 +425,6 @@ static void edma_set_chmap(struct edma_chan *echan, int slot)
412 } 425 }
413} 426}
414 427
415static int prepare_unused_channel_list(struct device *dev, void *data)
416{
417 struct platform_device *pdev = to_platform_device(dev);
418 struct edma_cc *ecc = data;
419 int dma_req_min = EDMA_CTLR_CHAN(ecc->id, 0);
420 int dma_req_max = dma_req_min + ecc->num_channels;
421 int i, count;
422 struct of_phandle_args dma_spec;
423
424 if (dev->of_node) {
425 struct platform_device *dma_pdev;
426
427 count = of_property_count_strings(dev->of_node, "dma-names");
428 if (count < 0)
429 return 0;
430 for (i = 0; i < count; i++) {
431 if (of_parse_phandle_with_args(dev->of_node, "dmas",
432 "#dma-cells", i,
433 &dma_spec))
434 continue;
435
436 if (!of_match_node(edma_of_ids, dma_spec.np)) {
437 of_node_put(dma_spec.np);
438 continue;
439 }
440
441 dma_pdev = of_find_device_by_node(dma_spec.np);
442 if (&dma_pdev->dev != ecc->dev)
443 continue;
444
445 clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
446 ecc->channel_unused);
447 of_node_put(dma_spec.np);
448 }
449 return 0;
450 }
451
452 /* For non-OF case */
453 for (i = 0; i < pdev->num_resources; i++) {
454 struct resource *res = &pdev->resource[i];
455 int dma_req;
456
457 if (!(res->flags & IORESOURCE_DMA))
458 continue;
459
460 dma_req = (int)res->start;
461 if (dma_req >= dma_req_min && dma_req < dma_req_max)
462 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
463 ecc->channel_unused);
464 }
465
466 return 0;
467}
468
469static void edma_setup_interrupt(struct edma_chan *echan, bool enable) 428static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
470{ 429{
471 struct edma_cc *ecc = echan->ecc; 430 struct edma_cc *ecc = echan->ecc;
@@ -617,7 +576,7 @@ static void edma_start(struct edma_chan *echan)
617 int j = (channel >> 5); 576 int j = (channel >> 5);
618 unsigned int mask = BIT(channel & 0x1f); 577 unsigned int mask = BIT(channel & 0x1f);
619 578
620 if (test_bit(channel, ecc->channel_unused)) { 579 if (!echan->hw_triggered) {
621 /* EDMA channels without event association */ 580 /* EDMA channels without event association */
622 dev_dbg(ecc->dev, "ESR%d %08x\n", j, 581 dev_dbg(ecc->dev, "ESR%d %08x\n", j,
623 edma_shadow0_read_array(ecc, SH_ESR, j)); 582 edma_shadow0_read_array(ecc, SH_ESR, j));
@@ -734,20 +693,6 @@ static int edma_alloc_channel(struct edma_chan *echan,
734 struct edma_cc *ecc = echan->ecc; 693 struct edma_cc *ecc = echan->ecc;
735 int channel = EDMA_CHAN_SLOT(echan->ch_num); 694 int channel = EDMA_CHAN_SLOT(echan->ch_num);
736 695
737 if (!ecc->unused_chan_list_done) {
738 /*
739 * Scan all the platform devices to find out the EDMA channels
740 * used and clear them in the unused list, making the rest
741 * available for ARM usage.
742 */
743 int ret = bus_for_each_dev(&platform_bus_type, NULL, ecc,
744 prepare_unused_channel_list);
745 if (ret < 0)
746 return ret;
747
748 ecc->unused_chan_list_done = true;
749 }
750
751 /* ensure access through shadow region 0 */ 696 /* ensure access through shadow region 0 */
752 edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); 697 edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
753 698
@@ -899,7 +844,7 @@ static int edma_terminate_all(struct dma_chan *chan)
899 if (echan->edesc) { 844 if (echan->edesc) {
900 edma_stop(echan); 845 edma_stop(echan);
901 /* Move the cyclic channel back to default queue */ 846 /* Move the cyclic channel back to default queue */
902 if (echan->edesc->cyclic) 847 if (!echan->tc && echan->edesc->cyclic)
903 edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); 848 edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
904 /* 849 /*
905 * free the running request descriptor 850 * free the running request descriptor
@@ -1403,7 +1348,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1403 } 1348 }
1404 1349
1405 /* Place the cyclic channel to highest priority queue */ 1350 /* Place the cyclic channel to highest priority queue */
1406 edma_assign_channel_eventq(echan, EVENTQ_0); 1351 if (!echan->tc)
1352 edma_assign_channel_eventq(echan, EVENTQ_0);
1407 1353
1408 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 1354 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1409} 1355}
@@ -1609,18 +1555,54 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
1609 return IRQ_HANDLED; 1555 return IRQ_HANDLED;
1610} 1556}
1611 1557
1558static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
1559{
1560 struct platform_device *tc_pdev;
1561 int ret;
1562
1563 if (!tc)
1564 return;
1565
1566 tc_pdev = of_find_device_by_node(tc->node);
1567 if (!tc_pdev) {
1568 pr_err("%s: TPTC device is not found\n", __func__);
1569 return;
1570 }
1571 if (!pm_runtime_enabled(&tc_pdev->dev))
1572 pm_runtime_enable(&tc_pdev->dev);
1573
1574 if (enable)
1575 ret = pm_runtime_get_sync(&tc_pdev->dev);
1576 else
1577 ret = pm_runtime_put_sync(&tc_pdev->dev);
1578
1579 if (ret < 0)
1580 pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
1581 enable ? "get" : "put", dev_name(&tc_pdev->dev));
1582}
1583
1612/* Alloc channel resources */ 1584/* Alloc channel resources */
1613static int edma_alloc_chan_resources(struct dma_chan *chan) 1585static int edma_alloc_chan_resources(struct dma_chan *chan)
1614{ 1586{
1615 struct edma_chan *echan = to_edma_chan(chan); 1587 struct edma_chan *echan = to_edma_chan(chan);
1616 struct device *dev = chan->device->dev; 1588 struct edma_cc *ecc = echan->ecc;
1589 struct device *dev = ecc->dev;
1590 enum dma_event_q eventq_no = EVENTQ_DEFAULT;
1617 int ret; 1591 int ret;
1618 1592
1619 ret = edma_alloc_channel(echan, EVENTQ_DEFAULT); 1593 if (echan->tc) {
1594 eventq_no = echan->tc->id;
1595 } else if (ecc->tc_list) {
1596 /* memcpy channel */
1597 echan->tc = &ecc->tc_list[ecc->info->default_queue];
1598 eventq_no = echan->tc->id;
1599 }
1600
1601 ret = edma_alloc_channel(echan, eventq_no);
1620 if (ret) 1602 if (ret)
1621 return ret; 1603 return ret;
1622 1604
1623 echan->slot[0] = edma_alloc_slot(echan->ecc, echan->ch_num); 1605 echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
1624 if (echan->slot[0] < 0) { 1606 if (echan->slot[0] < 0) {
1625 dev_err(dev, "Entry slot allocation failed for channel %u\n", 1607 dev_err(dev, "Entry slot allocation failed for channel %u\n",
1626 EDMA_CHAN_SLOT(echan->ch_num)); 1608 EDMA_CHAN_SLOT(echan->ch_num));
@@ -1631,8 +1613,11 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
1631 edma_set_chmap(echan, echan->slot[0]); 1613 edma_set_chmap(echan, echan->slot[0]);
1632 echan->alloced = true; 1614 echan->alloced = true;
1633 1615
1634 dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num, 1616 dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
1635 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); 1617 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
1618 echan->hw_triggered ? "HW" : "SW");
1619
1620 edma_tc_set_pm_state(echan->tc, true);
1636 1621
1637 return 0; 1622 return 0;
1638 1623
@@ -1645,6 +1630,7 @@ err_slot:
1645static void edma_free_chan_resources(struct dma_chan *chan) 1630static void edma_free_chan_resources(struct dma_chan *chan)
1646{ 1631{
1647 struct edma_chan *echan = to_edma_chan(chan); 1632 struct edma_chan *echan = to_edma_chan(chan);
1633 struct device *dev = echan->ecc->dev;
1648 int i; 1634 int i;
1649 1635
1650 /* Terminate transfers */ 1636 /* Terminate transfers */
@@ -1669,7 +1655,12 @@ static void edma_free_chan_resources(struct dma_chan *chan)
1669 echan->alloced = false; 1655 echan->alloced = false;
1670 } 1656 }
1671 1657
1672 dev_dbg(chan->device->dev, "freeing channel for %u\n", echan->ch_num); 1658 edma_tc_set_pm_state(echan->tc, false);
1659 echan->tc = NULL;
1660 echan->hw_triggered = false;
1661
1662 dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
1663 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
1673} 1664}
1674 1665
1675/* Send pending descriptor to hardware */ 1666/* Send pending descriptor to hardware */
@@ -1756,41 +1747,90 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
1756 return ret; 1747 return ret;
1757} 1748}
1758 1749
1750static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels)
1751{
1752 s16 *memcpy_ch = memcpy_channels;
1753
1754 if (!memcpy_channels)
1755 return false;
1756 while (*memcpy_ch != -1) {
1757 if (*memcpy_ch == ch_num)
1758 return true;
1759 memcpy_ch++;
1760 }
1761 return false;
1762}
1763
1759#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 1764#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1760 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 1765 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1761 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 1766 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1762 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 1767 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1763 1768
1764static void edma_dma_init(struct edma_cc *ecc) 1769static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
1765{ 1770{
1766 struct dma_device *ddev = &ecc->dma_slave; 1771 struct dma_device *s_ddev = &ecc->dma_slave;
1772 struct dma_device *m_ddev = NULL;
1773 s16 *memcpy_channels = ecc->info->memcpy_channels;
1767 int i, j; 1774 int i, j;
1768 1775
1769 dma_cap_zero(ddev->cap_mask); 1776 dma_cap_zero(s_ddev->cap_mask);
1770 dma_cap_set(DMA_SLAVE, ddev->cap_mask); 1777 dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
1771 dma_cap_set(DMA_CYCLIC, ddev->cap_mask); 1778 dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
1772 dma_cap_set(DMA_MEMCPY, ddev->cap_mask); 1779 if (ecc->legacy_mode && !memcpy_channels) {
1780 dev_warn(ecc->dev,
1781 "Legacy memcpy is enabled, things might not work\n");
1773 1782
1774 ddev->device_prep_slave_sg = edma_prep_slave_sg; 1783 dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
1775 ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic; 1784 s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1776 ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; 1785 s_ddev->directions = BIT(DMA_MEM_TO_MEM);
1777 ddev->device_alloc_chan_resources = edma_alloc_chan_resources; 1786 }
1778 ddev->device_free_chan_resources = edma_free_chan_resources;
1779 ddev->device_issue_pending = edma_issue_pending;
1780 ddev->device_tx_status = edma_tx_status;
1781 ddev->device_config = edma_slave_config;
1782 ddev->device_pause = edma_dma_pause;
1783 ddev->device_resume = edma_dma_resume;
1784 ddev->device_terminate_all = edma_terminate_all;
1785
1786 ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1787 ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1788 ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1789 ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1790
1791 ddev->dev = ecc->dev;
1792 1787
1793 INIT_LIST_HEAD(&ddev->channels); 1788 s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
1789 s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
1790 s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1791 s_ddev->device_free_chan_resources = edma_free_chan_resources;
1792 s_ddev->device_issue_pending = edma_issue_pending;
1793 s_ddev->device_tx_status = edma_tx_status;
1794 s_ddev->device_config = edma_slave_config;
1795 s_ddev->device_pause = edma_dma_pause;
1796 s_ddev->device_resume = edma_dma_resume;
1797 s_ddev->device_terminate_all = edma_terminate_all;
1798
1799 s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1800 s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1801 s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
1802 s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1803
1804 s_ddev->dev = ecc->dev;
1805 INIT_LIST_HEAD(&s_ddev->channels);
1806
1807 if (memcpy_channels) {
1808 m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
1809 ecc->dma_memcpy = m_ddev;
1810
1811 dma_cap_zero(m_ddev->cap_mask);
1812 dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
1813
1814 m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1815 m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1816 m_ddev->device_free_chan_resources = edma_free_chan_resources;
1817 m_ddev->device_issue_pending = edma_issue_pending;
1818 m_ddev->device_tx_status = edma_tx_status;
1819 m_ddev->device_config = edma_slave_config;
1820 m_ddev->device_pause = edma_dma_pause;
1821 m_ddev->device_resume = edma_dma_resume;
1822 m_ddev->device_terminate_all = edma_terminate_all;
1823
1824 m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1825 m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1826 m_ddev->directions = BIT(DMA_MEM_TO_MEM);
1827 m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1828
1829 m_ddev->dev = ecc->dev;
1830 INIT_LIST_HEAD(&m_ddev->channels);
1831 } else if (!ecc->legacy_mode) {
1832 dev_info(ecc->dev, "memcpy is disabled\n");
1833 }
1794 1834
1795 for (i = 0; i < ecc->num_channels; i++) { 1835 for (i = 0; i < ecc->num_channels; i++) {
1796 struct edma_chan *echan = &ecc->slave_chans[i]; 1836 struct edma_chan *echan = &ecc->slave_chans[i];
@@ -1798,7 +1838,10 @@ static void edma_dma_init(struct edma_cc *ecc)
1798 echan->ecc = ecc; 1838 echan->ecc = ecc;
1799 echan->vchan.desc_free = edma_desc_free; 1839 echan->vchan.desc_free = edma_desc_free;
1800 1840
1801 vchan_init(&echan->vchan, ddev); 1841 if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
1842 vchan_init(&echan->vchan, m_ddev);
1843 else
1844 vchan_init(&echan->vchan, s_ddev);
1802 1845
1803 INIT_LIST_HEAD(&echan->node); 1846 INIT_LIST_HEAD(&echan->node);
1804 for (j = 0; j < EDMA_MAX_SLOTS; j++) 1847 for (j = 0; j < EDMA_MAX_SLOTS; j++)
@@ -1921,7 +1964,8 @@ static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
1921 return 0; 1964 return 0;
1922} 1965}
1923 1966
1924static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev) 1967static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1968 bool legacy_mode)
1925{ 1969{
1926 struct edma_soc_info *info; 1970 struct edma_soc_info *info;
1927 struct property *prop; 1971 struct property *prop;
@@ -1932,20 +1976,121 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev)
1932 if (!info) 1976 if (!info)
1933 return ERR_PTR(-ENOMEM); 1977 return ERR_PTR(-ENOMEM);
1934 1978
1935 prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", &sz); 1979 if (legacy_mode) {
1980 prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
1981 &sz);
1982 if (prop) {
1983 ret = edma_xbar_event_map(dev, info, sz);
1984 if (ret)
1985 return ERR_PTR(ret);
1986 }
1987 return info;
1988 }
1989
1990 /* Get the list of channels allocated to be used for memcpy */
1991 prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
1992 if (prop) {
1993 const char pname[] = "ti,edma-memcpy-channels";
1994 size_t nelm = sz / sizeof(s16);
1995 s16 *memcpy_ch;
1996
1997 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16),
1998 GFP_KERNEL);
1999 if (!memcpy_ch)
2000 return ERR_PTR(-ENOMEM);
2001
2002 ret = of_property_read_u16_array(dev->of_node, pname,
2003 (u16 *)memcpy_ch, nelm);
2004 if (ret)
2005 return ERR_PTR(ret);
2006
2007 memcpy_ch[nelm] = -1;
2008 info->memcpy_channels = memcpy_ch;
2009 }
2010
2011 prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
2012 &sz);
1936 if (prop) { 2013 if (prop) {
1937 ret = edma_xbar_event_map(dev, info, sz); 2014 const char pname[] = "ti,edma-reserved-slot-ranges";
2015 s16 (*rsv_slots)[2];
2016 size_t nelm = sz / sizeof(*rsv_slots);
2017 struct edma_rsv_info *rsv_info;
2018
2019 if (!nelm)
2020 return info;
2021
2022 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
2023 if (!rsv_info)
2024 return ERR_PTR(-ENOMEM);
2025
2026 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
2027 GFP_KERNEL);
2028 if (!rsv_slots)
2029 return ERR_PTR(-ENOMEM);
2030
2031 ret = of_property_read_u16_array(dev->of_node, pname,
2032 (u16 *)rsv_slots, nelm * 2);
1938 if (ret) 2033 if (ret)
1939 return ERR_PTR(ret); 2034 return ERR_PTR(ret);
2035
2036 rsv_slots[nelm][0] = -1;
2037 rsv_slots[nelm][1] = -1;
2038 info->rsv = rsv_info;
2039 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
1940 } 2040 }
1941 2041
1942 return info; 2042 return info;
1943} 2043}
2044
2045static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2046 struct of_dma *ofdma)
2047{
2048 struct edma_cc *ecc = ofdma->of_dma_data;
2049 struct dma_chan *chan = NULL;
2050 struct edma_chan *echan;
2051 int i;
2052
2053 if (!ecc || dma_spec->args_count < 1)
2054 return NULL;
2055
2056 for (i = 0; i < ecc->num_channels; i++) {
2057 echan = &ecc->slave_chans[i];
2058 if (echan->ch_num == dma_spec->args[0]) {
2059 chan = &echan->vchan.chan;
2060 break;
2061 }
2062 }
2063
2064 if (!chan)
2065 return NULL;
2066
2067 if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
2068 goto out;
2069
2070 if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
2071 dma_spec->args[1] < echan->ecc->num_tc) {
2072 echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
2073 goto out;
2074 }
2075
2076 return NULL;
2077out:
2078 /* The channel is going to be used as HW synchronized */
2079 echan->hw_triggered = true;
2080 return dma_get_slave_channel(chan);
2081}
1944#else 2082#else
1945static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev) 2083static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2084 bool legacy_mode)
1946{ 2085{
1947 return ERR_PTR(-EINVAL); 2086 return ERR_PTR(-EINVAL);
1948} 2087}
2088
2089static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2090 struct of_dma *ofdma)
2091{
2092 return NULL;
2093}
1949#endif 2094#endif
1950 2095
1951static int edma_probe(struct platform_device *pdev) 2096static int edma_probe(struct platform_device *pdev)
@@ -1953,7 +2098,6 @@ static int edma_probe(struct platform_device *pdev)
1953 struct edma_soc_info *info = pdev->dev.platform_data; 2098 struct edma_soc_info *info = pdev->dev.platform_data;
1954 s8 (*queue_priority_mapping)[2]; 2099 s8 (*queue_priority_mapping)[2];
1955 int i, off, ln; 2100 int i, off, ln;
1956 const s16 (*rsv_chans)[2];
1957 const s16 (*rsv_slots)[2]; 2101 const s16 (*rsv_slots)[2];
1958 const s16 (*xbar_chans)[2]; 2102 const s16 (*xbar_chans)[2];
1959 int irq; 2103 int irq;
@@ -1962,10 +2106,17 @@ static int edma_probe(struct platform_device *pdev)
1962 struct device_node *node = pdev->dev.of_node; 2106 struct device_node *node = pdev->dev.of_node;
1963 struct device *dev = &pdev->dev; 2107 struct device *dev = &pdev->dev;
1964 struct edma_cc *ecc; 2108 struct edma_cc *ecc;
2109 bool legacy_mode = true;
1965 int ret; 2110 int ret;
1966 2111
1967 if (node) { 2112 if (node) {
1968 info = edma_setup_info_from_dt(dev); 2113 const struct of_device_id *match;
2114
2115 match = of_match_node(edma_of_ids, node);
2116 if (match && (u32)match->data == EDMA_BINDING_TPCC)
2117 legacy_mode = false;
2118
2119 info = edma_setup_info_from_dt(dev, legacy_mode);
1969 if (IS_ERR(info)) { 2120 if (IS_ERR(info)) {
1970 dev_err(dev, "failed to get DT data\n"); 2121 dev_err(dev, "failed to get DT data\n");
1971 return PTR_ERR(info); 2122 return PTR_ERR(info);
@@ -1994,6 +2145,7 @@ static int edma_probe(struct platform_device *pdev)
1994 2145
1995 ecc->dev = dev; 2146 ecc->dev = dev;
1996 ecc->id = pdev->id; 2147 ecc->id = pdev->id;
2148 ecc->legacy_mode = legacy_mode;
1997 /* When booting with DT the pdev->id is -1 */ 2149 /* When booting with DT the pdev->id is -1 */
1998 if (ecc->id < 0) 2150 if (ecc->id < 0)
1999 ecc->id = 0; 2151 ecc->id = 0;
@@ -2024,12 +2176,6 @@ static int edma_probe(struct platform_device *pdev)
2024 if (!ecc->slave_chans) 2176 if (!ecc->slave_chans)
2025 return -ENOMEM; 2177 return -ENOMEM;
2026 2178
2027 ecc->channel_unused = devm_kcalloc(dev,
2028 BITS_TO_LONGS(ecc->num_channels),
2029 sizeof(unsigned long), GFP_KERNEL);
2030 if (!ecc->channel_unused)
2031 return -ENOMEM;
2032
2033 ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), 2179 ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
2034 sizeof(unsigned long), GFP_KERNEL); 2180 sizeof(unsigned long), GFP_KERNEL);
2035 if (!ecc->slot_inuse) 2181 if (!ecc->slot_inuse)
@@ -2040,20 +2186,7 @@ static int edma_probe(struct platform_device *pdev)
2040 for (i = 0; i < ecc->num_slots; i++) 2186 for (i = 0; i < ecc->num_slots; i++)
2041 edma_write_slot(ecc, i, &dummy_paramset); 2187 edma_write_slot(ecc, i, &dummy_paramset);
2042 2188
2043 /* Mark all channels as unused */
2044 memset(ecc->channel_unused, 0xff, sizeof(ecc->channel_unused));
2045
2046 if (info->rsv) { 2189 if (info->rsv) {
2047 /* Clear the reserved channels in unused list */
2048 rsv_chans = info->rsv->rsv_chans;
2049 if (rsv_chans) {
2050 for (i = 0; rsv_chans[i][0] != -1; i++) {
2051 off = rsv_chans[i][0];
2052 ln = rsv_chans[i][1];
2053 clear_bits(off, ln, ecc->channel_unused);
2054 }
2055 }
2056
2057 /* Set the reserved slots in inuse list */ 2190 /* Set the reserved slots in inuse list */
2058 rsv_slots = info->rsv->rsv_slots; 2191 rsv_slots = info->rsv->rsv_slots;
2059 if (rsv_slots) { 2192 if (rsv_slots) {
@@ -2070,7 +2203,6 @@ static int edma_probe(struct platform_device *pdev)
2070 if (xbar_chans) { 2203 if (xbar_chans) {
2071 for (i = 0; xbar_chans[i][1] != -1; i++) { 2204 for (i = 0; xbar_chans[i][1] != -1; i++) {
2072 off = xbar_chans[i][1]; 2205 off = xbar_chans[i][1];
2073 clear_bits(off, 1, ecc->channel_unused);
2074 } 2206 }
2075 } 2207 }
2076 2208
@@ -2112,6 +2244,31 @@ static int edma_probe(struct platform_device *pdev)
2112 2244
2113 queue_priority_mapping = info->queue_priority_mapping; 2245 queue_priority_mapping = info->queue_priority_mapping;
2114 2246
2247 if (!ecc->legacy_mode) {
2248 int lowest_priority = 0;
2249 struct of_phandle_args tc_args;
2250
2251 ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
2252 sizeof(*ecc->tc_list), GFP_KERNEL);
2253 if (!ecc->tc_list)
2254 return -ENOMEM;
2255
2256 for (i = 0;; i++) {
2257 ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
2258 1, i, &tc_args);
2259 if (ret || i == ecc->num_tc)
2260 break;
2261
2262 ecc->tc_list[i].node = tc_args.np;
2263 ecc->tc_list[i].id = i;
2264 queue_priority_mapping[i][1] = tc_args.args[0];
2265 if (queue_priority_mapping[i][1] > lowest_priority) {
2266 lowest_priority = queue_priority_mapping[i][1];
2267 info->default_queue = i;
2268 }
2269 }
2270 }
2271
2115 /* Event queue priority mapping */ 2272 /* Event queue priority mapping */
2116 for (i = 0; queue_priority_mapping[i][0] != -1; i++) 2273 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2117 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], 2274 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
@@ -2125,7 +2282,7 @@ static int edma_probe(struct platform_device *pdev)
2125 ecc->info = info; 2282 ecc->info = info;
2126 2283
2127 /* Init the dma device and channels */ 2284 /* Init the dma device and channels */
2128 edma_dma_init(ecc); 2285 edma_dma_init(ecc, legacy_mode);
2129 2286
2130 for (i = 0; i < ecc->num_channels; i++) { 2287 for (i = 0; i < ecc->num_channels; i++) {
2131 /* Assign all channels to the default queue */ 2288 /* Assign all channels to the default queue */
@@ -2136,12 +2293,23 @@ static int edma_probe(struct platform_device *pdev)
2136 } 2293 }
2137 2294
2138 ret = dma_async_device_register(&ecc->dma_slave); 2295 ret = dma_async_device_register(&ecc->dma_slave);
2139 if (ret) 2296 if (ret) {
2297 dev_err(dev, "slave ddev registration failed (%d)\n", ret);
2140 goto err_reg1; 2298 goto err_reg1;
2299 }
2300
2301 if (ecc->dma_memcpy) {
2302 ret = dma_async_device_register(ecc->dma_memcpy);
2303 if (ret) {
2304 dev_err(dev, "memcpy ddev registration failed (%d)\n",
2305 ret);
2306 dma_async_device_unregister(&ecc->dma_slave);
2307 goto err_reg1;
2308 }
2309 }
2141 2310
2142 if (node) 2311 if (node)
2143 of_dma_controller_register(node, of_dma_xlate_by_chan_id, 2312 of_dma_controller_register(node, of_edma_xlate, ecc);
2144 &ecc->dma_slave);
2145 2313
2146 dev_info(dev, "TI EDMA DMA engine driver\n"); 2314 dev_info(dev, "TI EDMA DMA engine driver\n");
2147 2315
@@ -2160,12 +2328,30 @@ static int edma_remove(struct platform_device *pdev)
2160 if (dev->of_node) 2328 if (dev->of_node)
2161 of_dma_controller_free(dev->of_node); 2329 of_dma_controller_free(dev->of_node);
2162 dma_async_device_unregister(&ecc->dma_slave); 2330 dma_async_device_unregister(&ecc->dma_slave);
2331 if (ecc->dma_memcpy)
2332 dma_async_device_unregister(ecc->dma_memcpy);
2163 edma_free_slot(ecc, ecc->dummy_slot); 2333 edma_free_slot(ecc, ecc->dummy_slot);
2164 2334
2165 return 0; 2335 return 0;
2166} 2336}
2167 2337
2168#ifdef CONFIG_PM_SLEEP 2338#ifdef CONFIG_PM_SLEEP
2339static int edma_pm_suspend(struct device *dev)
2340{
2341 struct edma_cc *ecc = dev_get_drvdata(dev);
2342 struct edma_chan *echan = ecc->slave_chans;
2343 int i;
2344
2345 for (i = 0; i < ecc->num_channels; i++) {
2346 if (echan[i].alloced) {
2347 edma_setup_interrupt(&echan[i], false);
2348 edma_tc_set_pm_state(echan[i].tc, false);
2349 }
2350 }
2351
2352 return 0;
2353}
2354
2169static int edma_pm_resume(struct device *dev) 2355static int edma_pm_resume(struct device *dev)
2170{ 2356{
2171 struct edma_cc *ecc = dev_get_drvdata(dev); 2357 struct edma_cc *ecc = dev_get_drvdata(dev);
@@ -2190,6 +2376,8 @@ static int edma_pm_resume(struct device *dev)
2190 2376
2191 /* Set up channel -> slot mapping for the entry slot */ 2377 /* Set up channel -> slot mapping for the entry slot */
2192 edma_set_chmap(&echan[i], echan[i].slot[0]); 2378 edma_set_chmap(&echan[i], echan[i].slot[0]);
2379
2380 edma_tc_set_pm_state(echan[i].tc, true);
2193 } 2381 }
2194 } 2382 }
2195 2383
@@ -2198,7 +2386,7 @@ static int edma_pm_resume(struct device *dev)
2198#endif 2386#endif
2199 2387
2200static const struct dev_pm_ops edma_pm_ops = { 2388static const struct dev_pm_ops edma_pm_ops = {
2201 SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume) 2389 SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
2202}; 2390};
2203 2391
2204static struct platform_driver edma_driver = { 2392static struct platform_driver edma_driver = {
@@ -2213,12 +2401,18 @@ static struct platform_driver edma_driver = {
2213 2401
2214bool edma_filter_fn(struct dma_chan *chan, void *param) 2402bool edma_filter_fn(struct dma_chan *chan, void *param)
2215{ 2403{
2404 bool match = false;
2405
2216 if (chan->device->dev->driver == &edma_driver.driver) { 2406 if (chan->device->dev->driver == &edma_driver.driver) {
2217 struct edma_chan *echan = to_edma_chan(chan); 2407 struct edma_chan *echan = to_edma_chan(chan);
2218 unsigned ch_req = *(unsigned *)param; 2408 unsigned ch_req = *(unsigned *)param;
2219 return ch_req == echan->ch_num; 2409 if (ch_req == echan->ch_num) {
2410 /* The channel is going to be used as HW synchronized */
2411 echan->hw_triggered = true;
2412 match = true;
2413 }
2220 } 2414 }
2221 return false; 2415 return match;
2222} 2416}
2223EXPORT_SYMBOL(edma_filter_fn); 2417EXPORT_SYMBOL(edma_filter_fn);
2224 2418
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index 6b9d500956e4..e2878baeb90e 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -71,6 +71,9 @@ struct edma_soc_info {
71 /* Resource reservation for other cores */ 71 /* Resource reservation for other cores */
72 struct edma_rsv_info *rsv; 72 struct edma_rsv_info *rsv;
73 73
74 /* List of channels allocated for memcpy, terminated with -1 */
75 s16 *memcpy_channels;
76
74 s8 (*queue_priority_mapping)[2]; 77 s8 (*queue_priority_mapping)[2];
75 const s16 (*xbar_chans)[2]; 78 const s16 (*xbar_chans)[2];
76}; 79};