diff options
author | Olof Johansson <olof@lixom.net> | 2014-05-26 17:59:05 -0400 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2014-05-26 17:59:05 -0400 |
commit | 3a5e23cf9e553cd32e68bf29fb5429e3e9a95467 (patch) | |
tree | d6b028bb8ac6c9866be6d0ae14f7bed5ab6edad1 | |
parent | 7741fa197ccb4862403554e7829be727a8fa3ba0 (diff) | |
parent | 903ed4913c7fe78d2746445564634264291c7493 (diff) |
Merge tag 'davinci-for-v3.16/edma' of git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci into next/drivers
Merge "DaVinci EDMA clean-up for v3.16" from Sekhar Nori:
This series makes edma use configuration information available within
the IP instead of reading it from platform data or DT. Some other useful
clean-ups are included too.
* tag 'davinci-for-v3.16/edma' of git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci: (34 commits)
ARM: edma: Remove redundant/unused parameters from edma_soc_info
ARM: davinci: Remove redundant/unused parameters for edma
ARM: dts: am4372: Remove obsolete properties from edma node
ARM: dts: am33xx: Remove obsolete properties from edma node
dt/bindings: ti,edma: Remove redundant properties from documentation
ARM: edma: Get IP configuration from HW (number of channels, tc, etc)
ARM: edma: Save number of regions from pdata to struct edma
ARM: edma: Remove num_cc member from struct edma
ARM: edma: Remove queue_tc_mapping data from edma_soc_info
ARM: davinci: Remove eDMA3 queue_tc_mapping data from edma_soc_info
ARM: edma: Do not change TC -> Queue mapping, leave it to default.
ARM: edma: Take the number of tc from edma_soc_info (pdata)
ARM: edma: No need to clean the pdata in edma_of_parse_dt()
ARM: edma: Clean up and simplify the code around irq request
dmaengine: edma: update DMA memcpy to use new param element
dmaengine: edma: Document variables used for residue accounting
dmaengine: edma: Provide granular accounting
dmaengine: edma: Make reading the position of active channels work
dmaengine: edma: Store transfer data in edma_desc and edma_pset
dmaengine: edma: Create private pset struct
...
Signed-off-by: Olof Johansson <olof@lixom.net>
-rw-r--r-- | Documentation/devicetree/bindings/dma/ti-edma.txt | 13 | ||||
-rw-r--r-- | arch/arm/boot/dts/am33xx.dtsi | 3 | ||||
-rw-r--r-- | arch/arm/boot/dts/am4372.dtsi | 3 | ||||
-rw-r--r-- | arch/arm/common/edma.c | 197 | ||||
-rw-r--r-- | arch/arm/mach-davinci/devices-da8xx.c | 31 | ||||
-rw-r--r-- | arch/arm/mach-davinci/dm355.c | 14 | ||||
-rw-r--r-- | arch/arm/mach-davinci/dm365.c | 16 | ||||
-rw-r--r-- | arch/arm/mach-davinci/dm644x.c | 14 | ||||
-rw-r--r-- | arch/arm/mach-davinci/dm646x.c | 16 | ||||
-rw-r--r-- | drivers/dma/edma.c | 335 | ||||
-rw-r--r-- | include/linux/platform_data/edma.h | 28 |
11 files changed, 373 insertions, 297 deletions
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt index 68ff2137bae7..5ba525a10035 100644 --- a/Documentation/devicetree/bindings/dma/ti-edma.txt +++ b/Documentation/devicetree/bindings/dma/ti-edma.txt | |||
@@ -2,11 +2,8 @@ TI EDMA | |||
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible : "ti,edma3" | 4 | - compatible : "ti,edma3" |
5 | - ti,edma-regions: Number of regions | ||
6 | - ti,edma-slots: Number of slots | ||
7 | - #dma-cells: Should be set to <1> | 5 | - #dma-cells: Should be set to <1> |
8 | Clients should use a single channel number per DMA request. | 6 | Clients should use a single channel number per DMA request. |
9 | - dma-channels: Specify total DMA channels per CC | ||
10 | - reg: Memory map for accessing module | 7 | - reg: Memory map for accessing module |
11 | - interrupt-parent: Interrupt controller the interrupt is routed through | 8 | - interrupt-parent: Interrupt controller the interrupt is routed through |
12 | - interrupts: Exactly 3 interrupts need to be specified in the order: | 9 | - interrupts: Exactly 3 interrupts need to be specified in the order: |
@@ -17,6 +14,13 @@ Optional properties: | |||
17 | - ti,hwmods: Name of the hwmods associated to the EDMA | 14 | - ti,hwmods: Name of the hwmods associated to the EDMA |
18 | - ti,edma-xbar-event-map: Crossbar event to channel map | 15 | - ti,edma-xbar-event-map: Crossbar event to channel map |
19 | 16 | ||
17 | Deprecated properties: | ||
18 | Listed here in case one wants to boot an old kernel with new DTB. These | ||
19 | properties might need to be added to the new DTS files. | ||
20 | - ti,edma-regions: Number of regions | ||
21 | - ti,edma-slots: Number of slots | ||
22 | - dma-channels: Specify total DMA channels per CC | ||
23 | |||
20 | Example: | 24 | Example: |
21 | 25 | ||
22 | edma: edma@49000000 { | 26 | edma: edma@49000000 { |
@@ -26,9 +30,6 @@ edma: edma@49000000 { | |||
26 | compatible = "ti,edma3"; | 30 | compatible = "ti,edma3"; |
27 | ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; | 31 | ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; |
28 | #dma-cells = <1>; | 32 | #dma-cells = <1>; |
29 | dma-channels = <64>; | ||
30 | ti,edma-regions = <4>; | ||
31 | ti,edma-slots = <256>; | ||
32 | ti,edma-xbar-event-map = /bits/ 16 <1 12 | 33 | ti,edma-xbar-event-map = /bits/ 16 <1 12 |
33 | 2 13>; | 34 | 2 13>; |
34 | }; | 35 | }; |
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 7ad75b4e0663..99e572c45244 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi | |||
@@ -147,9 +147,6 @@ | |||
147 | <0x44e10f90 0x40>; | 147 | <0x44e10f90 0x40>; |
148 | interrupts = <12 13 14>; | 148 | interrupts = <12 13 14>; |
149 | #dma-cells = <1>; | 149 | #dma-cells = <1>; |
150 | dma-channels = <64>; | ||
151 | ti,edma-regions = <4>; | ||
152 | ti,edma-slots = <256>; | ||
153 | }; | 150 | }; |
154 | 151 | ||
155 | gpio0: gpio@44e07000 { | 152 | gpio0: gpio@44e07000 { |
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index d1f8707ff1df..befb680e5719 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi | |||
@@ -108,9 +108,6 @@ | |||
108 | <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, | 108 | <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, |
109 | <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; | 109 | <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; |
110 | #dma-cells = <1>; | 110 | #dma-cells = <1>; |
111 | dma-channels = <64>; | ||
112 | ti,edma-regions = <4>; | ||
113 | ti,edma-slots = <256>; | ||
114 | }; | 111 | }; |
115 | 112 | ||
116 | uart0: serial@44e09000 { | 113 | uart0: serial@44e09000 { |
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index 5339009b3c0c..485be42519b9 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c | |||
@@ -102,7 +102,13 @@ | |||
102 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) | 102 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) |
103 | 103 | ||
104 | #define EDMA_DCHMAP 0x0100 /* 64 registers */ | 104 | #define EDMA_DCHMAP 0x0100 /* 64 registers */ |
105 | #define CHMAP_EXIST BIT(24) | 105 | |
106 | /* CCCFG register */ | ||
107 | #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ | ||
108 | #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ | ||
109 | #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ | ||
110 | #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ | ||
111 | #define CHMAP_EXIST BIT(24) | ||
106 | 112 | ||
107 | #define EDMA_MAX_DMACH 64 | 113 | #define EDMA_MAX_DMACH 64 |
108 | #define EDMA_MAX_PARAMENTRY 512 | 114 | #define EDMA_MAX_PARAMENTRY 512 |
@@ -233,7 +239,6 @@ struct edma { | |||
233 | unsigned num_region; | 239 | unsigned num_region; |
234 | unsigned num_slots; | 240 | unsigned num_slots; |
235 | unsigned num_tc; | 241 | unsigned num_tc; |
236 | unsigned num_cc; | ||
237 | enum dma_event_q default_queue; | 242 | enum dma_event_q default_queue; |
238 | 243 | ||
239 | /* list of channels with no even trigger; terminated by "-1" */ | 244 | /* list of channels with no even trigger; terminated by "-1" */ |
@@ -290,12 +295,6 @@ static void map_dmach_queue(unsigned ctlr, unsigned ch_no, | |||
290 | ~(0x7 << bit), queue_no << bit); | 295 | ~(0x7 << bit), queue_no << bit); |
291 | } | 296 | } |
292 | 297 | ||
293 | static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no) | ||
294 | { | ||
295 | int bit = queue_no * 4; | ||
296 | edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit)); | ||
297 | } | ||
298 | |||
299 | static void __init assign_priority_to_queue(unsigned ctlr, int queue_no, | 298 | static void __init assign_priority_to_queue(unsigned ctlr, int queue_no, |
300 | int priority) | 299 | int priority) |
301 | { | 300 | { |
@@ -994,29 +993,23 @@ void edma_set_dest(unsigned slot, dma_addr_t dest_port, | |||
994 | EXPORT_SYMBOL(edma_set_dest); | 993 | EXPORT_SYMBOL(edma_set_dest); |
995 | 994 | ||
996 | /** | 995 | /** |
997 | * edma_get_position - returns the current transfer points | 996 | * edma_get_position - returns the current transfer point |
998 | * @slot: parameter RAM slot being examined | 997 | * @slot: parameter RAM slot being examined |
999 | * @src: pointer to source port position | 998 | * @dst: true selects the dest position, false the source |
1000 | * @dst: pointer to destination port position | ||
1001 | * | 999 | * |
1002 | * Returns current source and destination addresses for a particular | 1000 | * Returns the position of the current active slot |
1003 | * parameter RAM slot. Its channel should not be active when this is called. | ||
1004 | */ | 1001 | */ |
1005 | void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst) | 1002 | dma_addr_t edma_get_position(unsigned slot, bool dst) |
1006 | { | 1003 | { |
1007 | struct edmacc_param temp; | 1004 | u32 offs, ctlr = EDMA_CTLR(slot); |
1008 | unsigned ctlr; | ||
1009 | 1005 | ||
1010 | ctlr = EDMA_CTLR(slot); | ||
1011 | slot = EDMA_CHAN_SLOT(slot); | 1006 | slot = EDMA_CHAN_SLOT(slot); |
1012 | 1007 | ||
1013 | edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp); | 1008 | offs = PARM_OFFSET(slot); |
1014 | if (src != NULL) | 1009 | offs += dst ? PARM_DST : PARM_SRC; |
1015 | *src = temp.src; | 1010 | |
1016 | if (dst != NULL) | 1011 | return edma_read(ctlr, offs); |
1017 | *dst = temp.dst; | ||
1018 | } | 1012 | } |
1019 | EXPORT_SYMBOL(edma_get_position); | ||
1020 | 1013 | ||
1021 | /** | 1014 | /** |
1022 | * edma_set_src_index - configure DMA source address indexing | 1015 | * edma_set_src_index - configure DMA source address indexing |
@@ -1421,6 +1414,67 @@ void edma_clear_event(unsigned channel) | |||
1421 | } | 1414 | } |
1422 | EXPORT_SYMBOL(edma_clear_event); | 1415 | EXPORT_SYMBOL(edma_clear_event); |
1423 | 1416 | ||
1417 | static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, | ||
1418 | struct edma *edma_cc) | ||
1419 | { | ||
1420 | int i; | ||
1421 | u32 value, cccfg; | ||
1422 | s8 (*queue_priority_map)[2]; | ||
1423 | |||
1424 | /* Decode the eDMA3 configuration from CCCFG register */ | ||
1425 | cccfg = edma_read(0, EDMA_CCCFG); | ||
1426 | |||
1427 | value = GET_NUM_REGN(cccfg); | ||
1428 | edma_cc->num_region = BIT(value); | ||
1429 | |||
1430 | value = GET_NUM_DMACH(cccfg); | ||
1431 | edma_cc->num_channels = BIT(value + 1); | ||
1432 | |||
1433 | value = GET_NUM_PAENTRY(cccfg); | ||
1434 | edma_cc->num_slots = BIT(value + 4); | ||
1435 | |||
1436 | value = GET_NUM_EVQUE(cccfg); | ||
1437 | edma_cc->num_tc = value + 1; | ||
1438 | |||
1439 | dev_dbg(dev, "eDMA3 HW configuration (cccfg: 0x%08x):\n", cccfg); | ||
1440 | dev_dbg(dev, "num_region: %u\n", edma_cc->num_region); | ||
1441 | dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels); | ||
1442 | dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots); | ||
1443 | dev_dbg(dev, "num_tc: %u\n", edma_cc->num_tc); | ||
1444 | |||
1445 | /* Nothing need to be done if queue priority is provided */ | ||
1446 | if (pdata->queue_priority_mapping) | ||
1447 | return 0; | ||
1448 | |||
1449 | /* | ||
1450 | * Configure TC/queue priority as follows: | ||
1451 | * Q0 - priority 0 | ||
1452 | * Q1 - priority 1 | ||
1453 | * Q2 - priority 2 | ||
1454 | * ... | ||
1455 | * The meaning of priority numbers: 0 highest priority, 7 lowest | ||
1456 | * priority. So Q0 is the highest priority queue and the last queue has | ||
1457 | * the lowest priority. | ||
1458 | */ | ||
1459 | queue_priority_map = devm_kzalloc(dev, | ||
1460 | (edma_cc->num_tc + 1) * sizeof(s8), | ||
1461 | GFP_KERNEL); | ||
1462 | if (!queue_priority_map) | ||
1463 | return -ENOMEM; | ||
1464 | |||
1465 | for (i = 0; i < edma_cc->num_tc; i++) { | ||
1466 | queue_priority_map[i][0] = i; | ||
1467 | queue_priority_map[i][1] = i; | ||
1468 | } | ||
1469 | queue_priority_map[i][0] = -1; | ||
1470 | queue_priority_map[i][1] = -1; | ||
1471 | |||
1472 | pdata->queue_priority_mapping = queue_priority_map; | ||
1473 | pdata->default_queue = 0; | ||
1474 | |||
1475 | return 0; | ||
1476 | } | ||
1477 | |||
1424 | #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES) | 1478 | #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES) |
1425 | 1479 | ||
1426 | static int edma_xbar_event_map(struct device *dev, struct device_node *node, | 1480 | static int edma_xbar_event_map(struct device *dev, struct device_node *node, |
@@ -1471,65 +1525,16 @@ static int edma_of_parse_dt(struct device *dev, | |||
1471 | struct device_node *node, | 1525 | struct device_node *node, |
1472 | struct edma_soc_info *pdata) | 1526 | struct edma_soc_info *pdata) |
1473 | { | 1527 | { |
1474 | int ret = 0, i; | 1528 | int ret = 0; |
1475 | u32 value; | ||
1476 | struct property *prop; | 1529 | struct property *prop; |
1477 | size_t sz; | 1530 | size_t sz; |
1478 | struct edma_rsv_info *rsv_info; | 1531 | struct edma_rsv_info *rsv_info; |
1479 | s8 (*queue_tc_map)[2], (*queue_priority_map)[2]; | ||
1480 | |||
1481 | memset(pdata, 0, sizeof(struct edma_soc_info)); | ||
1482 | |||
1483 | ret = of_property_read_u32(node, "dma-channels", &value); | ||
1484 | if (ret < 0) | ||
1485 | return ret; | ||
1486 | pdata->n_channel = value; | ||
1487 | |||
1488 | ret = of_property_read_u32(node, "ti,edma-regions", &value); | ||
1489 | if (ret < 0) | ||
1490 | return ret; | ||
1491 | pdata->n_region = value; | ||
1492 | |||
1493 | ret = of_property_read_u32(node, "ti,edma-slots", &value); | ||
1494 | if (ret < 0) | ||
1495 | return ret; | ||
1496 | pdata->n_slot = value; | ||
1497 | |||
1498 | pdata->n_cc = 1; | ||
1499 | 1532 | ||
1500 | rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL); | 1533 | rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL); |
1501 | if (!rsv_info) | 1534 | if (!rsv_info) |
1502 | return -ENOMEM; | 1535 | return -ENOMEM; |
1503 | pdata->rsv = rsv_info; | 1536 | pdata->rsv = rsv_info; |
1504 | 1537 | ||
1505 | queue_tc_map = devm_kzalloc(dev, 8*sizeof(s8), GFP_KERNEL); | ||
1506 | if (!queue_tc_map) | ||
1507 | return -ENOMEM; | ||
1508 | |||
1509 | for (i = 0; i < 3; i++) { | ||
1510 | queue_tc_map[i][0] = i; | ||
1511 | queue_tc_map[i][1] = i; | ||
1512 | } | ||
1513 | queue_tc_map[i][0] = -1; | ||
1514 | queue_tc_map[i][1] = -1; | ||
1515 | |||
1516 | pdata->queue_tc_mapping = queue_tc_map; | ||
1517 | |||
1518 | queue_priority_map = devm_kzalloc(dev, 8*sizeof(s8), GFP_KERNEL); | ||
1519 | if (!queue_priority_map) | ||
1520 | return -ENOMEM; | ||
1521 | |||
1522 | for (i = 0; i < 3; i++) { | ||
1523 | queue_priority_map[i][0] = i; | ||
1524 | queue_priority_map[i][1] = i; | ||
1525 | } | ||
1526 | queue_priority_map[i][0] = -1; | ||
1527 | queue_priority_map[i][1] = -1; | ||
1528 | |||
1529 | pdata->queue_priority_mapping = queue_priority_map; | ||
1530 | |||
1531 | pdata->default_queue = 0; | ||
1532 | |||
1533 | prop = of_find_property(node, "ti,edma-xbar-event-map", &sz); | 1538 | prop = of_find_property(node, "ti,edma-xbar-event-map", &sz); |
1534 | if (prop) | 1539 | if (prop) |
1535 | ret = edma_xbar_event_map(dev, node, pdata, sz); | 1540 | ret = edma_xbar_event_map(dev, node, pdata, sz); |
@@ -1556,6 +1561,7 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, | |||
1556 | return ERR_PTR(ret); | 1561 | return ERR_PTR(ret); |
1557 | 1562 | ||
1558 | dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap); | 1563 | dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap); |
1564 | dma_cap_set(DMA_CYCLIC, edma_filter_info.dma_cap); | ||
1559 | of_dma_controller_register(dev->of_node, of_dma_simple_xlate, | 1565 | of_dma_controller_register(dev->of_node, of_dma_simple_xlate, |
1560 | &edma_filter_info); | 1566 | &edma_filter_info); |
1561 | 1567 | ||
@@ -1574,7 +1580,6 @@ static int edma_probe(struct platform_device *pdev) | |||
1574 | struct edma_soc_info **info = pdev->dev.platform_data; | 1580 | struct edma_soc_info **info = pdev->dev.platform_data; |
1575 | struct edma_soc_info *ninfo[EDMA_MAX_CC] = {NULL}; | 1581 | struct edma_soc_info *ninfo[EDMA_MAX_CC] = {NULL}; |
1576 | s8 (*queue_priority_mapping)[2]; | 1582 | s8 (*queue_priority_mapping)[2]; |
1577 | s8 (*queue_tc_mapping)[2]; | ||
1578 | int i, j, off, ln, found = 0; | 1583 | int i, j, off, ln, found = 0; |
1579 | int status = -1; | 1584 | int status = -1; |
1580 | const s16 (*rsv_chans)[2]; | 1585 | const s16 (*rsv_chans)[2]; |
@@ -1585,7 +1590,6 @@ static int edma_probe(struct platform_device *pdev) | |||
1585 | struct resource *r[EDMA_MAX_CC] = {NULL}; | 1590 | struct resource *r[EDMA_MAX_CC] = {NULL}; |
1586 | struct resource res[EDMA_MAX_CC]; | 1591 | struct resource res[EDMA_MAX_CC]; |
1587 | char res_name[10]; | 1592 | char res_name[10]; |
1588 | char irq_name[10]; | ||
1589 | struct device_node *node = pdev->dev.of_node; | 1593 | struct device_node *node = pdev->dev.of_node; |
1590 | struct device *dev = &pdev->dev; | 1594 | struct device *dev = &pdev->dev; |
1591 | int ret; | 1595 | int ret; |
@@ -1650,12 +1654,10 @@ static int edma_probe(struct platform_device *pdev) | |||
1650 | if (!edma_cc[j]) | 1654 | if (!edma_cc[j]) |
1651 | return -ENOMEM; | 1655 | return -ENOMEM; |
1652 | 1656 | ||
1653 | edma_cc[j]->num_channels = min_t(unsigned, info[j]->n_channel, | 1657 | /* Get eDMA3 configuration from IP */ |
1654 | EDMA_MAX_DMACH); | 1658 | ret = edma_setup_from_hw(dev, info[j], edma_cc[j]); |
1655 | edma_cc[j]->num_slots = min_t(unsigned, info[j]->n_slot, | 1659 | if (ret) |
1656 | EDMA_MAX_PARAMENTRY); | 1660 | return ret; |
1657 | edma_cc[j]->num_cc = min_t(unsigned, info[j]->n_cc, | ||
1658 | EDMA_MAX_CC); | ||
1659 | 1661 | ||
1660 | edma_cc[j]->default_queue = info[j]->default_queue; | 1662 | edma_cc[j]->default_queue = info[j]->default_queue; |
1661 | 1663 | ||
@@ -1707,14 +1709,21 @@ static int edma_probe(struct platform_device *pdev) | |||
1707 | 1709 | ||
1708 | if (node) { | 1710 | if (node) { |
1709 | irq[j] = irq_of_parse_and_map(node, 0); | 1711 | irq[j] = irq_of_parse_and_map(node, 0); |
1712 | err_irq[j] = irq_of_parse_and_map(node, 2); | ||
1710 | } else { | 1713 | } else { |
1714 | char irq_name[10]; | ||
1715 | |||
1711 | sprintf(irq_name, "edma%d", j); | 1716 | sprintf(irq_name, "edma%d", j); |
1712 | irq[j] = platform_get_irq_byname(pdev, irq_name); | 1717 | irq[j] = platform_get_irq_byname(pdev, irq_name); |
1718 | |||
1719 | sprintf(irq_name, "edma%d_err", j); | ||
1720 | err_irq[j] = platform_get_irq_byname(pdev, irq_name); | ||
1713 | } | 1721 | } |
1714 | edma_cc[j]->irq_res_start = irq[j]; | 1722 | edma_cc[j]->irq_res_start = irq[j]; |
1715 | status = devm_request_irq(&pdev->dev, irq[j], | 1723 | edma_cc[j]->irq_res_end = err_irq[j]; |
1716 | dma_irq_handler, 0, "edma", | 1724 | |
1717 | &pdev->dev); | 1725 | status = devm_request_irq(dev, irq[j], dma_irq_handler, 0, |
1726 | "edma", dev); | ||
1718 | if (status < 0) { | 1727 | if (status < 0) { |
1719 | dev_dbg(&pdev->dev, | 1728 | dev_dbg(&pdev->dev, |
1720 | "devm_request_irq %d failed --> %d\n", | 1729 | "devm_request_irq %d failed --> %d\n", |
@@ -1722,16 +1731,8 @@ static int edma_probe(struct platform_device *pdev) | |||
1722 | return status; | 1731 | return status; |
1723 | } | 1732 | } |
1724 | 1733 | ||
1725 | if (node) { | 1734 | status = devm_request_irq(dev, err_irq[j], dma_ccerr_handler, 0, |
1726 | err_irq[j] = irq_of_parse_and_map(node, 2); | 1735 | "edma_error", dev); |
1727 | } else { | ||
1728 | sprintf(irq_name, "edma%d_err", j); | ||
1729 | err_irq[j] = platform_get_irq_byname(pdev, irq_name); | ||
1730 | } | ||
1731 | edma_cc[j]->irq_res_end = err_irq[j]; | ||
1732 | status = devm_request_irq(&pdev->dev, err_irq[j], | ||
1733 | dma_ccerr_handler, 0, | ||
1734 | "edma_error", &pdev->dev); | ||
1735 | if (status < 0) { | 1736 | if (status < 0) { |
1736 | dev_dbg(&pdev->dev, | 1737 | dev_dbg(&pdev->dev, |
1737 | "devm_request_irq %d failed --> %d\n", | 1738 | "devm_request_irq %d failed --> %d\n", |
@@ -1742,14 +1743,8 @@ static int edma_probe(struct platform_device *pdev) | |||
1742 | for (i = 0; i < edma_cc[j]->num_channels; i++) | 1743 | for (i = 0; i < edma_cc[j]->num_channels; i++) |
1743 | map_dmach_queue(j, i, info[j]->default_queue); | 1744 | map_dmach_queue(j, i, info[j]->default_queue); |
1744 | 1745 | ||
1745 | queue_tc_mapping = info[j]->queue_tc_mapping; | ||
1746 | queue_priority_mapping = info[j]->queue_priority_mapping; | 1746 | queue_priority_mapping = info[j]->queue_priority_mapping; |
1747 | 1747 | ||
1748 | /* Event queue to TC mapping */ | ||
1749 | for (i = 0; queue_tc_mapping[i][0] != -1; i++) | ||
1750 | map_queue_tc(j, queue_tc_mapping[i][0], | ||
1751 | queue_tc_mapping[i][1]); | ||
1752 | |||
1753 | /* Event queue priority mapping */ | 1748 | /* Event queue priority mapping */ |
1754 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | 1749 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) |
1755 | assign_priority_to_queue(j, | 1750 | assign_priority_to_queue(j, |
@@ -1762,7 +1757,7 @@ static int edma_probe(struct platform_device *pdev) | |||
1762 | if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) | 1757 | if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) |
1763 | map_dmach_param(j); | 1758 | map_dmach_param(j); |
1764 | 1759 | ||
1765 | for (i = 0; i < info[j]->n_region; i++) { | 1760 | for (i = 0; i < edma_cc[j]->num_region; i++) { |
1766 | edma_write_array2(j, EDMA_DRAE, i, 0, 0x0); | 1761 | edma_write_array2(j, EDMA_DRAE, i, 0, 0x0); |
1767 | edma_write_array2(j, EDMA_DRAE, i, 1, 0x0); | 1762 | edma_write_array2(j, EDMA_DRAE, i, 1, 0x0); |
1768 | edma_write_array(j, EDMA_QRAE, i, 0x0); | 1763 | edma_write_array(j, EDMA_QRAE, i, 0x0); |
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 56ea41d5f849..b85b781b05fd 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c | |||
@@ -134,13 +134,6 @@ struct platform_device da8xx_serial_device[] = { | |||
134 | } | 134 | } |
135 | }; | 135 | }; |
136 | 136 | ||
137 | static s8 da8xx_queue_tc_mapping[][2] = { | ||
138 | /* {event queue no, TC no} */ | ||
139 | {0, 0}, | ||
140 | {1, 1}, | ||
141 | {-1, -1} | ||
142 | }; | ||
143 | |||
144 | static s8 da8xx_queue_priority_mapping[][2] = { | 137 | static s8 da8xx_queue_priority_mapping[][2] = { |
145 | /* {event queue no, Priority} */ | 138 | /* {event queue no, Priority} */ |
146 | {0, 3}, | 139 | {0, 3}, |
@@ -148,12 +141,6 @@ static s8 da8xx_queue_priority_mapping[][2] = { | |||
148 | {-1, -1} | 141 | {-1, -1} |
149 | }; | 142 | }; |
150 | 143 | ||
151 | static s8 da850_queue_tc_mapping[][2] = { | ||
152 | /* {event queue no, TC no} */ | ||
153 | {0, 0}, | ||
154 | {-1, -1} | ||
155 | }; | ||
156 | |||
157 | static s8 da850_queue_priority_mapping[][2] = { | 144 | static s8 da850_queue_priority_mapping[][2] = { |
158 | /* {event queue no, Priority} */ | 145 | /* {event queue no, Priority} */ |
159 | {0, 3}, | 146 | {0, 3}, |
@@ -161,12 +148,6 @@ static s8 da850_queue_priority_mapping[][2] = { | |||
161 | }; | 148 | }; |
162 | 149 | ||
163 | static struct edma_soc_info da830_edma_cc0_info = { | 150 | static struct edma_soc_info da830_edma_cc0_info = { |
164 | .n_channel = 32, | ||
165 | .n_region = 4, | ||
166 | .n_slot = 128, | ||
167 | .n_tc = 2, | ||
168 | .n_cc = 1, | ||
169 | .queue_tc_mapping = da8xx_queue_tc_mapping, | ||
170 | .queue_priority_mapping = da8xx_queue_priority_mapping, | 151 | .queue_priority_mapping = da8xx_queue_priority_mapping, |
171 | .default_queue = EVENTQ_1, | 152 | .default_queue = EVENTQ_1, |
172 | }; | 153 | }; |
@@ -177,22 +158,10 @@ static struct edma_soc_info *da830_edma_info[EDMA_MAX_CC] = { | |||
177 | 158 | ||
178 | static struct edma_soc_info da850_edma_cc_info[] = { | 159 | static struct edma_soc_info da850_edma_cc_info[] = { |
179 | { | 160 | { |
180 | .n_channel = 32, | ||
181 | .n_region = 4, | ||
182 | .n_slot = 128, | ||
183 | .n_tc = 2, | ||
184 | .n_cc = 1, | ||
185 | .queue_tc_mapping = da8xx_queue_tc_mapping, | ||
186 | .queue_priority_mapping = da8xx_queue_priority_mapping, | 161 | .queue_priority_mapping = da8xx_queue_priority_mapping, |
187 | .default_queue = EVENTQ_1, | 162 | .default_queue = EVENTQ_1, |
188 | }, | 163 | }, |
189 | { | 164 | { |
190 | .n_channel = 32, | ||
191 | .n_region = 4, | ||
192 | .n_slot = 128, | ||
193 | .n_tc = 1, | ||
194 | .n_cc = 1, | ||
195 | .queue_tc_mapping = da850_queue_tc_mapping, | ||
196 | .queue_priority_mapping = da850_queue_priority_mapping, | 165 | .queue_priority_mapping = da850_queue_priority_mapping, |
197 | .default_queue = EVENTQ_0, | 166 | .default_queue = EVENTQ_0, |
198 | }, | 167 | }, |
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 07381d8cea62..2f3ed3a58d57 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c | |||
@@ -569,14 +569,6 @@ static u8 dm355_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
569 | /*----------------------------------------------------------------------*/ | 569 | /*----------------------------------------------------------------------*/ |
570 | 570 | ||
571 | static s8 | 571 | static s8 |
572 | queue_tc_mapping[][2] = { | ||
573 | /* {event queue no, TC no} */ | ||
574 | {0, 0}, | ||
575 | {1, 1}, | ||
576 | {-1, -1}, | ||
577 | }; | ||
578 | |||
579 | static s8 | ||
580 | queue_priority_mapping[][2] = { | 572 | queue_priority_mapping[][2] = { |
581 | /* {event queue no, Priority} */ | 573 | /* {event queue no, Priority} */ |
582 | {0, 3}, | 574 | {0, 3}, |
@@ -585,12 +577,6 @@ queue_priority_mapping[][2] = { | |||
585 | }; | 577 | }; |
586 | 578 | ||
587 | static struct edma_soc_info edma_cc0_info = { | 579 | static struct edma_soc_info edma_cc0_info = { |
588 | .n_channel = 64, | ||
589 | .n_region = 4, | ||
590 | .n_slot = 128, | ||
591 | .n_tc = 2, | ||
592 | .n_cc = 1, | ||
593 | .queue_tc_mapping = queue_tc_mapping, | ||
594 | .queue_priority_mapping = queue_priority_mapping, | 580 | .queue_priority_mapping = queue_priority_mapping, |
595 | .default_queue = EVENTQ_1, | 581 | .default_queue = EVENTQ_1, |
596 | }; | 582 | }; |
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 08a61b938333..0ae8114f5cc9 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c | |||
@@ -853,16 +853,6 @@ static u8 dm365_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
853 | 853 | ||
854 | /* Four Transfer Controllers on DM365 */ | 854 | /* Four Transfer Controllers on DM365 */ |
855 | static s8 | 855 | static s8 |
856 | dm365_queue_tc_mapping[][2] = { | ||
857 | /* {event queue no, TC no} */ | ||
858 | {0, 0}, | ||
859 | {1, 1}, | ||
860 | {2, 2}, | ||
861 | {3, 3}, | ||
862 | {-1, -1}, | ||
863 | }; | ||
864 | |||
865 | static s8 | ||
866 | dm365_queue_priority_mapping[][2] = { | 856 | dm365_queue_priority_mapping[][2] = { |
867 | /* {event queue no, Priority} */ | 857 | /* {event queue no, Priority} */ |
868 | {0, 7}, | 858 | {0, 7}, |
@@ -873,12 +863,6 @@ dm365_queue_priority_mapping[][2] = { | |||
873 | }; | 863 | }; |
874 | 864 | ||
875 | static struct edma_soc_info edma_cc0_info = { | 865 | static struct edma_soc_info edma_cc0_info = { |
876 | .n_channel = 64, | ||
877 | .n_region = 4, | ||
878 | .n_slot = 256, | ||
879 | .n_tc = 4, | ||
880 | .n_cc = 1, | ||
881 | .queue_tc_mapping = dm365_queue_tc_mapping, | ||
882 | .queue_priority_mapping = dm365_queue_priority_mapping, | 866 | .queue_priority_mapping = dm365_queue_priority_mapping, |
883 | .default_queue = EVENTQ_3, | 867 | .default_queue = EVENTQ_3, |
884 | }; | 868 | }; |
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 5debffba4b24..dc52657909c4 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c | |||
@@ -499,14 +499,6 @@ static u8 dm644x_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
499 | /*----------------------------------------------------------------------*/ | 499 | /*----------------------------------------------------------------------*/ |
500 | 500 | ||
501 | static s8 | 501 | static s8 |
502 | queue_tc_mapping[][2] = { | ||
503 | /* {event queue no, TC no} */ | ||
504 | {0, 0}, | ||
505 | {1, 1}, | ||
506 | {-1, -1}, | ||
507 | }; | ||
508 | |||
509 | static s8 | ||
510 | queue_priority_mapping[][2] = { | 502 | queue_priority_mapping[][2] = { |
511 | /* {event queue no, Priority} */ | 503 | /* {event queue no, Priority} */ |
512 | {0, 3}, | 504 | {0, 3}, |
@@ -515,12 +507,6 @@ queue_priority_mapping[][2] = { | |||
515 | }; | 507 | }; |
516 | 508 | ||
517 | static struct edma_soc_info edma_cc0_info = { | 509 | static struct edma_soc_info edma_cc0_info = { |
518 | .n_channel = 64, | ||
519 | .n_region = 4, | ||
520 | .n_slot = 128, | ||
521 | .n_tc = 2, | ||
522 | .n_cc = 1, | ||
523 | .queue_tc_mapping = queue_tc_mapping, | ||
524 | .queue_priority_mapping = queue_priority_mapping, | 510 | .queue_priority_mapping = queue_priority_mapping, |
525 | .default_queue = EVENTQ_1, | 511 | .default_queue = EVENTQ_1, |
526 | }; | 512 | }; |
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 332d00d24dc2..6c3bbea7d77d 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c | |||
@@ -533,16 +533,6 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
533 | 533 | ||
534 | /* Four Transfer Controllers on DM646x */ | 534 | /* Four Transfer Controllers on DM646x */ |
535 | static s8 | 535 | static s8 |
536 | dm646x_queue_tc_mapping[][2] = { | ||
537 | /* {event queue no, TC no} */ | ||
538 | {0, 0}, | ||
539 | {1, 1}, | ||
540 | {2, 2}, | ||
541 | {3, 3}, | ||
542 | {-1, -1}, | ||
543 | }; | ||
544 | |||
545 | static s8 | ||
546 | dm646x_queue_priority_mapping[][2] = { | 536 | dm646x_queue_priority_mapping[][2] = { |
547 | /* {event queue no, Priority} */ | 537 | /* {event queue no, Priority} */ |
548 | {0, 4}, | 538 | {0, 4}, |
@@ -553,12 +543,6 @@ dm646x_queue_priority_mapping[][2] = { | |||
553 | }; | 543 | }; |
554 | 544 | ||
555 | static struct edma_soc_info edma_cc0_info = { | 545 | static struct edma_soc_info edma_cc0_info = { |
556 | .n_channel = 64, | ||
557 | .n_region = 6, /* 0-1, 4-7 */ | ||
558 | .n_slot = 512, | ||
559 | .n_tc = 4, | ||
560 | .n_cc = 1, | ||
561 | .queue_tc_mapping = dm646x_queue_tc_mapping, | ||
562 | .queue_priority_mapping = dm646x_queue_priority_mapping, | 546 | .queue_priority_mapping = dm646x_queue_priority_mapping, |
563 | .default_queue = EVENTQ_1, | 547 | .default_queue = EVENTQ_1, |
564 | }; | 548 | }; |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 926360c2db6a..d08c4dedef35 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -57,14 +57,48 @@ | |||
57 | #define EDMA_MAX_SLOTS MAX_NR_SG | 57 | #define EDMA_MAX_SLOTS MAX_NR_SG |
58 | #define EDMA_DESCRIPTORS 16 | 58 | #define EDMA_DESCRIPTORS 16 |
59 | 59 | ||
60 | struct edma_pset { | ||
61 | u32 len; | ||
62 | dma_addr_t addr; | ||
63 | struct edmacc_param param; | ||
64 | }; | ||
65 | |||
60 | struct edma_desc { | 66 | struct edma_desc { |
61 | struct virt_dma_desc vdesc; | 67 | struct virt_dma_desc vdesc; |
62 | struct list_head node; | 68 | struct list_head node; |
69 | enum dma_transfer_direction direction; | ||
63 | int cyclic; | 70 | int cyclic; |
64 | int absync; | 71 | int absync; |
65 | int pset_nr; | 72 | int pset_nr; |
73 | struct edma_chan *echan; | ||
66 | int processed; | 74 | int processed; |
67 | struct edmacc_param pset[0]; | 75 | |
76 | /* | ||
77 | * The following 4 elements are used for residue accounting. | ||
78 | * | ||
79 | * - processed_stat: the number of SG elements we have traversed | ||
80 | * so far to cover accounting. This is updated directly to processed | ||
81 | * during edma_callback and is always <= processed, because processed | ||
82 | * refers to the number of pending transfer (programmed to EDMA | ||
83 | * controller), where as processed_stat tracks number of transfers | ||
84 | * accounted for so far. | ||
85 | * | ||
86 | * - residue: The amount of bytes we have left to transfer for this desc | ||
87 | * | ||
88 | * - residue_stat: The residue in bytes of data we have covered | ||
89 | * so far for accounting. This is updated directly to residue | ||
90 | * during callbacks to keep it current. | ||
91 | * | ||
92 | * - sg_len: Tracks the length of the current intermediate transfer, | ||
93 | * this is required to update the residue during intermediate transfer | ||
94 | * completion callback. | ||
95 | */ | ||
96 | int processed_stat; | ||
97 | u32 sg_len; | ||
98 | u32 residue; | ||
99 | u32 residue_stat; | ||
100 | |||
101 | struct edma_pset pset[0]; | ||
68 | }; | 102 | }; |
69 | 103 | ||
70 | struct edma_cc; | 104 | struct edma_cc; |
@@ -136,12 +170,14 @@ static void edma_execute(struct edma_chan *echan) | |||
136 | /* Find out how many left */ | 170 | /* Find out how many left */ |
137 | left = edesc->pset_nr - edesc->processed; | 171 | left = edesc->pset_nr - edesc->processed; |
138 | nslots = min(MAX_NR_SG, left); | 172 | nslots = min(MAX_NR_SG, left); |
173 | edesc->sg_len = 0; | ||
139 | 174 | ||
140 | /* Write descriptor PaRAM set(s) */ | 175 | /* Write descriptor PaRAM set(s) */ |
141 | for (i = 0; i < nslots; i++) { | 176 | for (i = 0; i < nslots; i++) { |
142 | j = i + edesc->processed; | 177 | j = i + edesc->processed; |
143 | edma_write_slot(echan->slot[i], &edesc->pset[j]); | 178 | edma_write_slot(echan->slot[i], &edesc->pset[j].param); |
144 | dev_dbg(echan->vchan.chan.device->dev, | 179 | edesc->sg_len += edesc->pset[j].len; |
180 | dev_vdbg(echan->vchan.chan.device->dev, | ||
145 | "\n pset[%d]:\n" | 181 | "\n pset[%d]:\n" |
146 | " chnum\t%d\n" | 182 | " chnum\t%d\n" |
147 | " slot\t%d\n" | 183 | " slot\t%d\n" |
@@ -154,14 +190,14 @@ static void edma_execute(struct edma_chan *echan) | |||
154 | " cidx\t%08x\n" | 190 | " cidx\t%08x\n" |
155 | " lkrld\t%08x\n", | 191 | " lkrld\t%08x\n", |
156 | j, echan->ch_num, echan->slot[i], | 192 | j, echan->ch_num, echan->slot[i], |
157 | edesc->pset[j].opt, | 193 | edesc->pset[j].param.opt, |
158 | edesc->pset[j].src, | 194 | edesc->pset[j].param.src, |
159 | edesc->pset[j].dst, | 195 | edesc->pset[j].param.dst, |
160 | edesc->pset[j].a_b_cnt, | 196 | edesc->pset[j].param.a_b_cnt, |
161 | edesc->pset[j].ccnt, | 197 | edesc->pset[j].param.ccnt, |
162 | edesc->pset[j].src_dst_bidx, | 198 | edesc->pset[j].param.src_dst_bidx, |
163 | edesc->pset[j].src_dst_cidx, | 199 | edesc->pset[j].param.src_dst_cidx, |
164 | edesc->pset[j].link_bcntrld); | 200 | edesc->pset[j].param.link_bcntrld); |
165 | /* Link to the previous slot if not the last set */ | 201 | /* Link to the previous slot if not the last set */ |
166 | if (i != (nslots - 1)) | 202 | if (i != (nslots - 1)) |
167 | edma_link(echan->slot[i], echan->slot[i+1]); | 203 | edma_link(echan->slot[i], echan->slot[i+1]); |
@@ -183,7 +219,8 @@ static void edma_execute(struct edma_chan *echan) | |||
183 | } | 219 | } |
184 | 220 | ||
185 | if (edesc->processed <= MAX_NR_SG) { | 221 | if (edesc->processed <= MAX_NR_SG) { |
186 | dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); | 222 | dev_dbg(dev, "first transfer starting on channel %d\n", |
223 | echan->ch_num); | ||
187 | edma_start(echan->ch_num); | 224 | edma_start(echan->ch_num); |
188 | } else { | 225 | } else { |
189 | dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", | 226 | dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", |
@@ -197,7 +234,7 @@ static void edma_execute(struct edma_chan *echan) | |||
197 | * MAX_NR_SG | 234 | * MAX_NR_SG |
198 | */ | 235 | */ |
199 | if (echan->missed) { | 236 | if (echan->missed) { |
200 | dev_dbg(dev, "missed event in execute detected\n"); | 237 | dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); |
201 | edma_clean_channel(echan->ch_num); | 238 | edma_clean_channel(echan->ch_num); |
202 | edma_stop(echan->ch_num); | 239 | edma_stop(echan->ch_num); |
203 | edma_start(echan->ch_num); | 240 | edma_start(echan->ch_num); |
@@ -242,6 +279,26 @@ static int edma_slave_config(struct edma_chan *echan, | |||
242 | return 0; | 279 | return 0; |
243 | } | 280 | } |
244 | 281 | ||
282 | static int edma_dma_pause(struct edma_chan *echan) | ||
283 | { | ||
284 | /* Pause/Resume only allowed with cyclic mode */ | ||
285 | if (!echan->edesc->cyclic) | ||
286 | return -EINVAL; | ||
287 | |||
288 | edma_pause(echan->ch_num); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | static int edma_dma_resume(struct edma_chan *echan) | ||
293 | { | ||
294 | /* Pause/Resume only allowed with cyclic mode */ | ||
295 | if (!echan->edesc->cyclic) | ||
296 | return -EINVAL; | ||
297 | |||
298 | edma_resume(echan->ch_num); | ||
299 | return 0; | ||
300 | } | ||
301 | |||
245 | static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 302 | static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
246 | unsigned long arg) | 303 | unsigned long arg) |
247 | { | 304 | { |
@@ -257,6 +314,14 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
257 | config = (struct dma_slave_config *)arg; | 314 | config = (struct dma_slave_config *)arg; |
258 | ret = edma_slave_config(echan, config); | 315 | ret = edma_slave_config(echan, config); |
259 | break; | 316 | break; |
317 | case DMA_PAUSE: | ||
318 | ret = edma_dma_pause(echan); | ||
319 | break; | ||
320 | |||
321 | case DMA_RESUME: | ||
322 | ret = edma_dma_resume(echan); | ||
323 | break; | ||
324 | |||
260 | default: | 325 | default: |
261 | ret = -ENOSYS; | 326 | ret = -ENOSYS; |
262 | } | 327 | } |
@@ -275,18 +340,23 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
275 | * @dma_length: Total length of the DMA transfer | 340 | * @dma_length: Total length of the DMA transfer |
276 | * @direction: Direction of the transfer | 341 | * @direction: Direction of the transfer |
277 | */ | 342 | */ |
278 | static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, | 343 | static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, |
279 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, | 344 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, |
280 | enum dma_slave_buswidth dev_width, unsigned int dma_length, | 345 | enum dma_slave_buswidth dev_width, unsigned int dma_length, |
281 | enum dma_transfer_direction direction) | 346 | enum dma_transfer_direction direction) |
282 | { | 347 | { |
283 | struct edma_chan *echan = to_edma_chan(chan); | 348 | struct edma_chan *echan = to_edma_chan(chan); |
284 | struct device *dev = chan->device->dev; | 349 | struct device *dev = chan->device->dev; |
350 | struct edmacc_param *param = &epset->param; | ||
285 | int acnt, bcnt, ccnt, cidx; | 351 | int acnt, bcnt, ccnt, cidx; |
286 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | 352 | int src_bidx, dst_bidx, src_cidx, dst_cidx; |
287 | int absync; | 353 | int absync; |
288 | 354 | ||
289 | acnt = dev_width; | 355 | acnt = dev_width; |
356 | |||
357 | /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ | ||
358 | if (!burst) | ||
359 | burst = 1; | ||
290 | /* | 360 | /* |
291 | * If the maxburst is equal to the fifo width, use | 361 | * If the maxburst is equal to the fifo width, use |
292 | * A-synced transfers. This allows for large contiguous | 362 | * A-synced transfers. This allows for large contiguous |
@@ -337,41 +407,50 @@ static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, | |||
337 | cidx = acnt * bcnt; | 407 | cidx = acnt * bcnt; |
338 | } | 408 | } |
339 | 409 | ||
410 | epset->len = dma_length; | ||
411 | |||
340 | if (direction == DMA_MEM_TO_DEV) { | 412 | if (direction == DMA_MEM_TO_DEV) { |
341 | src_bidx = acnt; | 413 | src_bidx = acnt; |
342 | src_cidx = cidx; | 414 | src_cidx = cidx; |
343 | dst_bidx = 0; | 415 | dst_bidx = 0; |
344 | dst_cidx = 0; | 416 | dst_cidx = 0; |
417 | epset->addr = src_addr; | ||
345 | } else if (direction == DMA_DEV_TO_MEM) { | 418 | } else if (direction == DMA_DEV_TO_MEM) { |
346 | src_bidx = 0; | 419 | src_bidx = 0; |
347 | src_cidx = 0; | 420 | src_cidx = 0; |
348 | dst_bidx = acnt; | 421 | dst_bidx = acnt; |
349 | dst_cidx = cidx; | 422 | dst_cidx = cidx; |
423 | epset->addr = dst_addr; | ||
424 | } else if (direction == DMA_MEM_TO_MEM) { | ||
425 | src_bidx = acnt; | ||
426 | src_cidx = cidx; | ||
427 | dst_bidx = acnt; | ||
428 | dst_cidx = cidx; | ||
350 | } else { | 429 | } else { |
351 | dev_err(dev, "%s: direction not implemented yet\n", __func__); | 430 | dev_err(dev, "%s: direction not implemented yet\n", __func__); |
352 | return -EINVAL; | 431 | return -EINVAL; |
353 | } | 432 | } |
354 | 433 | ||
355 | pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); | 434 | param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); |
356 | /* Configure A or AB synchronized transfers */ | 435 | /* Configure A or AB synchronized transfers */ |
357 | if (absync) | 436 | if (absync) |
358 | pset->opt |= SYNCDIM; | 437 | param->opt |= SYNCDIM; |
359 | 438 | ||
360 | pset->src = src_addr; | 439 | param->src = src_addr; |
361 | pset->dst = dst_addr; | 440 | param->dst = dst_addr; |
362 | 441 | ||
363 | pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; | 442 | param->src_dst_bidx = (dst_bidx << 16) | src_bidx; |
364 | pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; | 443 | param->src_dst_cidx = (dst_cidx << 16) | src_cidx; |
365 | 444 | ||
366 | pset->a_b_cnt = bcnt << 16 | acnt; | 445 | param->a_b_cnt = bcnt << 16 | acnt; |
367 | pset->ccnt = ccnt; | 446 | param->ccnt = ccnt; |
368 | /* | 447 | /* |
369 | * Only time when (bcntrld) auto reload is required is for | 448 | * Only time when (bcntrld) auto reload is required is for |
370 | * A-sync case, and in this case, a requirement of reload value | 449 | * A-sync case, and in this case, a requirement of reload value |
371 | * of SZ_64K-1 only is assured. 'link' is initially set to NULL | 450 | * of SZ_64K-1 only is assured. 'link' is initially set to NULL |
372 | * and then later will be populated by edma_execute. | 451 | * and then later will be populated by edma_execute. |
373 | */ | 452 | */ |
374 | pset->link_bcntrld = 0xffffffff; | 453 | param->link_bcntrld = 0xffffffff; |
375 | return absync; | 454 | return absync; |
376 | } | 455 | } |
377 | 456 | ||
@@ -401,23 +480,26 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
401 | dev_width = echan->cfg.dst_addr_width; | 480 | dev_width = echan->cfg.dst_addr_width; |
402 | burst = echan->cfg.dst_maxburst; | 481 | burst = echan->cfg.dst_maxburst; |
403 | } else { | 482 | } else { |
404 | dev_err(dev, "%s: bad direction?\n", __func__); | 483 | dev_err(dev, "%s: bad direction: %d\n", __func__, direction); |
405 | return NULL; | 484 | return NULL; |
406 | } | 485 | } |
407 | 486 | ||
408 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | 487 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { |
409 | dev_err(dev, "Undefined slave buswidth\n"); | 488 | dev_err(dev, "%s: Undefined slave buswidth\n", __func__); |
410 | return NULL; | 489 | return NULL; |
411 | } | 490 | } |
412 | 491 | ||
413 | edesc = kzalloc(sizeof(*edesc) + sg_len * | 492 | edesc = kzalloc(sizeof(*edesc) + sg_len * |
414 | sizeof(edesc->pset[0]), GFP_ATOMIC); | 493 | sizeof(edesc->pset[0]), GFP_ATOMIC); |
415 | if (!edesc) { | 494 | if (!edesc) { |
416 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | 495 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); |
417 | return NULL; | 496 | return NULL; |
418 | } | 497 | } |
419 | 498 | ||
420 | edesc->pset_nr = sg_len; | 499 | edesc->pset_nr = sg_len; |
500 | edesc->residue = 0; | ||
501 | edesc->direction = direction; | ||
502 | edesc->echan = echan; | ||
421 | 503 | ||
422 | /* Allocate a PaRAM slot, if needed */ | 504 | /* Allocate a PaRAM slot, if needed */ |
423 | nslots = min_t(unsigned, MAX_NR_SG, sg_len); | 505 | nslots = min_t(unsigned, MAX_NR_SG, sg_len); |
@@ -429,7 +511,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
429 | EDMA_SLOT_ANY); | 511 | EDMA_SLOT_ANY); |
430 | if (echan->slot[i] < 0) { | 512 | if (echan->slot[i] < 0) { |
431 | kfree(edesc); | 513 | kfree(edesc); |
432 | dev_err(dev, "Failed to allocate slot\n"); | 514 | dev_err(dev, "%s: Failed to allocate slot\n", |
515 | __func__); | ||
433 | return NULL; | 516 | return NULL; |
434 | } | 517 | } |
435 | } | 518 | } |
@@ -452,16 +535,56 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
452 | } | 535 | } |
453 | 536 | ||
454 | edesc->absync = ret; | 537 | edesc->absync = ret; |
538 | edesc->residue += sg_dma_len(sg); | ||
455 | 539 | ||
456 | /* If this is the last in a current SG set of transactions, | 540 | /* If this is the last in a current SG set of transactions, |
457 | enable interrupts so that next set is processed */ | 541 | enable interrupts so that next set is processed */ |
458 | if (!((i+1) % MAX_NR_SG)) | 542 | if (!((i+1) % MAX_NR_SG)) |
459 | edesc->pset[i].opt |= TCINTEN; | 543 | edesc->pset[i].param.opt |= TCINTEN; |
460 | 544 | ||
461 | /* If this is the last set, enable completion interrupt flag */ | 545 | /* If this is the last set, enable completion interrupt flag */ |
462 | if (i == sg_len - 1) | 546 | if (i == sg_len - 1) |
463 | edesc->pset[i].opt |= TCINTEN; | 547 | edesc->pset[i].param.opt |= TCINTEN; |
464 | } | 548 | } |
549 | edesc->residue_stat = edesc->residue; | ||
550 | |||
551 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | ||
552 | } | ||
553 | |||
554 | struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | ||
555 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
556 | size_t len, unsigned long tx_flags) | ||
557 | { | ||
558 | int ret; | ||
559 | struct edma_desc *edesc; | ||
560 | struct device *dev = chan->device->dev; | ||
561 | struct edma_chan *echan = to_edma_chan(chan); | ||
562 | |||
563 | if (unlikely(!echan || !len)) | ||
564 | return NULL; | ||
565 | |||
566 | edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC); | ||
567 | if (!edesc) { | ||
568 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | ||
569 | return NULL; | ||
570 | } | ||
571 | |||
572 | edesc->pset_nr = 1; | ||
573 | |||
574 | ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, | ||
575 | DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM); | ||
576 | if (ret < 0) | ||
577 | return NULL; | ||
578 | |||
579 | edesc->absync = ret; | ||
580 | |||
581 | /* | ||
582 | * Enable intermediate transfer chaining to re-trigger channel | ||
583 | * on completion of every TR, and enable transfer-completion | ||
584 | * interrupt on completion of the whole transfer. | ||
585 | */ | ||
586 | edesc->pset[0].param.opt |= ITCCHEN; | ||
587 | edesc->pset[0].param.opt |= TCINTEN; | ||
465 | 588 | ||
466 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | 589 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
467 | } | 590 | } |
@@ -493,12 +616,12 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
493 | dev_width = echan->cfg.dst_addr_width; | 616 | dev_width = echan->cfg.dst_addr_width; |
494 | burst = echan->cfg.dst_maxburst; | 617 | burst = echan->cfg.dst_maxburst; |
495 | } else { | 618 | } else { |
496 | dev_err(dev, "%s: bad direction?\n", __func__); | 619 | dev_err(dev, "%s: bad direction: %d\n", __func__, direction); |
497 | return NULL; | 620 | return NULL; |
498 | } | 621 | } |
499 | 622 | ||
500 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | 623 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { |
501 | dev_err(dev, "Undefined slave buswidth\n"); | 624 | dev_err(dev, "%s: Undefined slave buswidth\n", __func__); |
502 | return NULL; | 625 | return NULL; |
503 | } | 626 | } |
504 | 627 | ||
@@ -523,16 +646,18 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
523 | edesc = kzalloc(sizeof(*edesc) + nslots * | 646 | edesc = kzalloc(sizeof(*edesc) + nslots * |
524 | sizeof(edesc->pset[0]), GFP_ATOMIC); | 647 | sizeof(edesc->pset[0]), GFP_ATOMIC); |
525 | if (!edesc) { | 648 | if (!edesc) { |
526 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | 649 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); |
527 | return NULL; | 650 | return NULL; |
528 | } | 651 | } |
529 | 652 | ||
530 | edesc->cyclic = 1; | 653 | edesc->cyclic = 1; |
531 | edesc->pset_nr = nslots; | 654 | edesc->pset_nr = nslots; |
655 | edesc->residue = edesc->residue_stat = buf_len; | ||
656 | edesc->direction = direction; | ||
657 | edesc->echan = echan; | ||
532 | 658 | ||
533 | dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots); | 659 | dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n", |
534 | dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len); | 660 | __func__, echan->ch_num, nslots, period_len, buf_len); |
535 | dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len); | ||
536 | 661 | ||
537 | for (i = 0; i < nslots; i++) { | 662 | for (i = 0; i < nslots; i++) { |
538 | /* Allocate a PaRAM slot, if needed */ | 663 | /* Allocate a PaRAM slot, if needed */ |
@@ -542,7 +667,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
542 | EDMA_SLOT_ANY); | 667 | EDMA_SLOT_ANY); |
543 | if (echan->slot[i] < 0) { | 668 | if (echan->slot[i] < 0) { |
544 | kfree(edesc); | 669 | kfree(edesc); |
545 | dev_err(dev, "Failed to allocate slot\n"); | 670 | dev_err(dev, "%s: Failed to allocate slot\n", |
671 | __func__); | ||
546 | return NULL; | 672 | return NULL; |
547 | } | 673 | } |
548 | } | 674 | } |
@@ -566,8 +692,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
566 | else | 692 | else |
567 | src_addr += period_len; | 693 | src_addr += period_len; |
568 | 694 | ||
569 | dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i); | 695 | dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i); |
570 | dev_dbg(dev, | 696 | dev_vdbg(dev, |
571 | "\n pset[%d]:\n" | 697 | "\n pset[%d]:\n" |
572 | " chnum\t%d\n" | 698 | " chnum\t%d\n" |
573 | " slot\t%d\n" | 699 | " slot\t%d\n" |
@@ -580,14 +706,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
580 | " cidx\t%08x\n" | 706 | " cidx\t%08x\n" |
581 | " lkrld\t%08x\n", | 707 | " lkrld\t%08x\n", |
582 | i, echan->ch_num, echan->slot[i], | 708 | i, echan->ch_num, echan->slot[i], |
583 | edesc->pset[i].opt, | 709 | edesc->pset[i].param.opt, |
584 | edesc->pset[i].src, | 710 | edesc->pset[i].param.src, |
585 | edesc->pset[i].dst, | 711 | edesc->pset[i].param.dst, |
586 | edesc->pset[i].a_b_cnt, | 712 | edesc->pset[i].param.a_b_cnt, |
587 | edesc->pset[i].ccnt, | 713 | edesc->pset[i].param.ccnt, |
588 | edesc->pset[i].src_dst_bidx, | 714 | edesc->pset[i].param.src_dst_bidx, |
589 | edesc->pset[i].src_dst_cidx, | 715 | edesc->pset[i].param.src_dst_cidx, |
590 | edesc->pset[i].link_bcntrld); | 716 | edesc->pset[i].param.link_bcntrld); |
591 | 717 | ||
592 | edesc->absync = ret; | 718 | edesc->absync = ret; |
593 | 719 | ||
@@ -595,7 +721,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
595 | * Enable interrupts for every period because callback | 721 | * Enable interrupts for every period because callback |
596 | * has to be called for every period. | 722 | * has to be called for every period. |
597 | */ | 723 | */ |
598 | edesc->pset[i].opt |= TCINTEN; | 724 | edesc->pset[i].param.opt |= TCINTEN; |
599 | } | 725 | } |
600 | 726 | ||
601 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | 727 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
@@ -606,7 +732,6 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | |||
606 | struct edma_chan *echan = data; | 732 | struct edma_chan *echan = data; |
607 | struct device *dev = echan->vchan.chan.device->dev; | 733 | struct device *dev = echan->vchan.chan.device->dev; |
608 | struct edma_desc *edesc; | 734 | struct edma_desc *edesc; |
609 | unsigned long flags; | ||
610 | struct edmacc_param p; | 735 | struct edmacc_param p; |
611 | 736 | ||
612 | edesc = echan->edesc; | 737 | edesc = echan->edesc; |
@@ -617,27 +742,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | |||
617 | 742 | ||
618 | switch (ch_status) { | 743 | switch (ch_status) { |
619 | case EDMA_DMA_COMPLETE: | 744 | case EDMA_DMA_COMPLETE: |
620 | spin_lock_irqsave(&echan->vchan.lock, flags); | 745 | spin_lock(&echan->vchan.lock); |
621 | 746 | ||
622 | if (edesc) { | 747 | if (edesc) { |
623 | if (edesc->cyclic) { | 748 | if (edesc->cyclic) { |
624 | vchan_cyclic_callback(&edesc->vdesc); | 749 | vchan_cyclic_callback(&edesc->vdesc); |
625 | } else if (edesc->processed == edesc->pset_nr) { | 750 | } else if (edesc->processed == edesc->pset_nr) { |
626 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); | 751 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); |
752 | edesc->residue = 0; | ||
627 | edma_stop(echan->ch_num); | 753 | edma_stop(echan->ch_num); |
628 | vchan_cookie_complete(&edesc->vdesc); | 754 | vchan_cookie_complete(&edesc->vdesc); |
629 | edma_execute(echan); | 755 | edma_execute(echan); |
630 | } else { | 756 | } else { |
631 | dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); | 757 | dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); |
758 | |||
759 | /* Update statistics for tx_status */ | ||
760 | edesc->residue -= edesc->sg_len; | ||
761 | edesc->residue_stat = edesc->residue; | ||
762 | edesc->processed_stat = edesc->processed; | ||
763 | |||
632 | edma_execute(echan); | 764 | edma_execute(echan); |
633 | } | 765 | } |
634 | } | 766 | } |
635 | 767 | ||
636 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 768 | spin_unlock(&echan->vchan.lock); |
637 | 769 | ||
638 | break; | 770 | break; |
639 | case EDMA_DMA_CC_ERROR: | 771 | case EDMA_DMA_CC_ERROR: |
640 | spin_lock_irqsave(&echan->vchan.lock, flags); | 772 | spin_lock(&echan->vchan.lock); |
641 | 773 | ||
642 | edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); | 774 | edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); |
643 | 775 | ||
@@ -668,7 +800,7 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | |||
668 | edma_trigger_channel(echan->ch_num); | 800 | edma_trigger_channel(echan->ch_num); |
669 | } | 801 | } |
670 | 802 | ||
671 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 803 | spin_unlock(&echan->vchan.lock); |
672 | 804 | ||
673 | break; | 805 | break; |
674 | default: | 806 | default: |
@@ -704,7 +836,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan) | |||
704 | echan->alloced = true; | 836 | echan->alloced = true; |
705 | echan->slot[0] = echan->ch_num; | 837 | echan->slot[0] = echan->ch_num; |
706 | 838 | ||
707 | dev_dbg(dev, "allocated channel for %u:%u\n", | 839 | dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num, |
708 | EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); | 840 | EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); |
709 | 841 | ||
710 | return 0; | 842 | return 0; |
@@ -756,23 +888,52 @@ static void edma_issue_pending(struct dma_chan *chan) | |||
756 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 888 | spin_unlock_irqrestore(&echan->vchan.lock, flags); |
757 | } | 889 | } |
758 | 890 | ||
759 | static size_t edma_desc_size(struct edma_desc *edesc) | 891 | static u32 edma_residue(struct edma_desc *edesc) |
760 | { | 892 | { |
893 | bool dst = edesc->direction == DMA_DEV_TO_MEM; | ||
894 | struct edma_pset *pset = edesc->pset; | ||
895 | dma_addr_t done, pos; | ||
761 | int i; | 896 | int i; |
762 | size_t size; | 897 | |
763 | 898 | /* | |
764 | if (edesc->absync) | 899 | * We always read the dst/src position from the first RamPar |
765 | for (size = i = 0; i < edesc->pset_nr; i++) | 900 | * pset. That's the one which is active now. |
766 | size += (edesc->pset[i].a_b_cnt & 0xffff) * | 901 | */ |
767 | (edesc->pset[i].a_b_cnt >> 16) * | 902 | pos = edma_get_position(edesc->echan->slot[0], dst); |
768 | edesc->pset[i].ccnt; | 903 | |
769 | else | 904 | /* |
770 | size = (edesc->pset[0].a_b_cnt & 0xffff) * | 905 | * Cyclic is simple. Just subtract pset[0].addr from pos. |
771 | (edesc->pset[0].a_b_cnt >> 16) + | 906 | * |
772 | (edesc->pset[0].a_b_cnt & 0xffff) * | 907 | * We never update edesc->residue in the cyclic case, so we |
773 | (SZ_64K - 1) * edesc->pset[0].ccnt; | 908 | * can tell the remaining room to the end of the circular |
774 | 909 | * buffer. | |
775 | return size; | 910 | */ |
911 | if (edesc->cyclic) { | ||
912 | done = pos - pset->addr; | ||
913 | edesc->residue_stat = edesc->residue - done; | ||
914 | return edesc->residue_stat; | ||
915 | } | ||
916 | |||
917 | /* | ||
918 | * For SG operation we catch up with the last processed | ||
919 | * status. | ||
920 | */ | ||
921 | pset += edesc->processed_stat; | ||
922 | |||
923 | for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) { | ||
924 | /* | ||
925 | * If we are inside this pset address range, we know | ||
926 | * this is the active one. Get the current delta and | ||
927 | * stop walking the psets. | ||
928 | */ | ||
929 | if (pos >= pset->addr && pos < pset->addr + pset->len) | ||
930 | return edesc->residue_stat - (pos - pset->addr); | ||
931 | |||
932 | /* Otherwise mark it done and update residue_stat. */ | ||
933 | edesc->processed_stat++; | ||
934 | edesc->residue_stat -= pset->len; | ||
935 | } | ||
936 | return edesc->residue_stat; | ||
776 | } | 937 | } |
777 | 938 | ||
778 | /* Check request completion status */ | 939 | /* Check request completion status */ |
@@ -790,13 +951,10 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, | |||
790 | return ret; | 951 | return ret; |
791 | 952 | ||
792 | spin_lock_irqsave(&echan->vchan.lock, flags); | 953 | spin_lock_irqsave(&echan->vchan.lock, flags); |
793 | vdesc = vchan_find_desc(&echan->vchan, cookie); | 954 | if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) |
794 | if (vdesc) { | 955 | txstate->residue = edma_residue(echan->edesc); |
795 | txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); | 956 | else if ((vdesc = vchan_find_desc(&echan->vchan, cookie))) |
796 | } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { | 957 | txstate->residue = to_edma_desc(&vdesc->tx)->residue; |
797 | struct edma_desc *edesc = echan->edesc; | ||
798 | txstate->residue = edma_desc_size(edesc); | ||
799 | } | ||
800 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 958 | spin_unlock_irqrestore(&echan->vchan.lock, flags); |
801 | 959 | ||
802 | return ret; | 960 | return ret; |
@@ -822,18 +980,43 @@ static void __init edma_chan_init(struct edma_cc *ecc, | |||
822 | } | 980 | } |
823 | } | 981 | } |
824 | 982 | ||
983 | #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
984 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
985 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
986 | |||
987 | static int edma_dma_device_slave_caps(struct dma_chan *dchan, | ||
988 | struct dma_slave_caps *caps) | ||
989 | { | ||
990 | caps->src_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
991 | caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
992 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
993 | caps->cmd_pause = true; | ||
994 | caps->cmd_terminate = true; | ||
995 | caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
996 | |||
997 | return 0; | ||
998 | } | ||
999 | |||
825 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | 1000 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, |
826 | struct device *dev) | 1001 | struct device *dev) |
827 | { | 1002 | { |
828 | dma->device_prep_slave_sg = edma_prep_slave_sg; | 1003 | dma->device_prep_slave_sg = edma_prep_slave_sg; |
829 | dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; | 1004 | dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; |
1005 | dma->device_prep_dma_memcpy = edma_prep_dma_memcpy; | ||
830 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; | 1006 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; |
831 | dma->device_free_chan_resources = edma_free_chan_resources; | 1007 | dma->device_free_chan_resources = edma_free_chan_resources; |
832 | dma->device_issue_pending = edma_issue_pending; | 1008 | dma->device_issue_pending = edma_issue_pending; |
833 | dma->device_tx_status = edma_tx_status; | 1009 | dma->device_tx_status = edma_tx_status; |
834 | dma->device_control = edma_control; | 1010 | dma->device_control = edma_control; |
1011 | dma->device_slave_caps = edma_dma_device_slave_caps; | ||
835 | dma->dev = dev; | 1012 | dma->dev = dev; |
836 | 1013 | ||
1014 | /* | ||
1015 | * code using dma memcpy must make sure alignment of | ||
1016 | * length is at dma->copy_align boundary. | ||
1017 | */ | ||
1018 | dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1019 | |||
837 | INIT_LIST_HEAD(&dma->channels); | 1020 | INIT_LIST_HEAD(&dma->channels); |
838 | } | 1021 | } |
839 | 1022 | ||
@@ -861,6 +1044,8 @@ static int edma_probe(struct platform_device *pdev) | |||
861 | 1044 | ||
862 | dma_cap_zero(ecc->dma_slave.cap_mask); | 1045 | dma_cap_zero(ecc->dma_slave.cap_mask); |
863 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); | 1046 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); |
1047 | dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask); | ||
1048 | dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask); | ||
864 | 1049 | ||
865 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); | 1050 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); |
866 | 1051 | ||
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index f50821cb64be..eb8d5627d080 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h | |||
@@ -43,15 +43,15 @@ | |||
43 | 43 | ||
44 | /* PaRAM slots are laid out like this */ | 44 | /* PaRAM slots are laid out like this */ |
45 | struct edmacc_param { | 45 | struct edmacc_param { |
46 | unsigned int opt; | 46 | u32 opt; |
47 | unsigned int src; | 47 | u32 src; |
48 | unsigned int a_b_cnt; | 48 | u32 a_b_cnt; |
49 | unsigned int dst; | 49 | u32 dst; |
50 | unsigned int src_dst_bidx; | 50 | u32 src_dst_bidx; |
51 | unsigned int link_bcntrld; | 51 | u32 link_bcntrld; |
52 | unsigned int src_dst_cidx; | 52 | u32 src_dst_cidx; |
53 | unsigned int ccnt; | 53 | u32 ccnt; |
54 | }; | 54 | } __packed; |
55 | 55 | ||
56 | /* fields in edmacc_param.opt */ | 56 | /* fields in edmacc_param.opt */ |
57 | #define SAM BIT(0) | 57 | #define SAM BIT(0) |
@@ -130,7 +130,7 @@ void edma_set_src(unsigned slot, dma_addr_t src_port, | |||
130 | enum address_mode mode, enum fifo_width); | 130 | enum address_mode mode, enum fifo_width); |
131 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, | 131 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, |
132 | enum address_mode mode, enum fifo_width); | 132 | enum address_mode mode, enum fifo_width); |
133 | void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst); | 133 | dma_addr_t edma_get_position(unsigned slot, bool dst); |
134 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx); | 134 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx); |
135 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx); | 135 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx); |
136 | void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt, | 136 | void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt, |
@@ -158,13 +158,6 @@ struct edma_rsv_info { | |||
158 | 158 | ||
159 | /* platform_data for EDMA driver */ | 159 | /* platform_data for EDMA driver */ |
160 | struct edma_soc_info { | 160 | struct edma_soc_info { |
161 | |||
162 | /* how many dma resources of each type */ | ||
163 | unsigned n_channel; | ||
164 | unsigned n_region; | ||
165 | unsigned n_slot; | ||
166 | unsigned n_tc; | ||
167 | unsigned n_cc; | ||
168 | /* | 161 | /* |
169 | * Default queue is expected to be a low-priority queue. | 162 | * Default queue is expected to be a low-priority queue. |
170 | * This way, long transfers on the default queue started | 163 | * This way, long transfers on the default queue started |
@@ -175,7 +168,6 @@ struct edma_soc_info { | |||
175 | /* Resource reservation for other cores */ | 168 | /* Resource reservation for other cores */ |
176 | struct edma_rsv_info *rsv; | 169 | struct edma_rsv_info *rsv; |
177 | 170 | ||
178 | s8 (*queue_tc_mapping)[2]; | ||
179 | s8 (*queue_priority_mapping)[2]; | 171 | s8 (*queue_priority_mapping)[2]; |
180 | const s16 (*xbar_chans)[2]; | 172 | const s16 (*xbar_chans)[2]; |
181 | }; | 173 | }; |