diff options
author | Vinod Koul <vinod.koul@intel.com> | 2011-07-27 11:13:21 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2011-07-27 11:13:21 -0400 |
commit | 1ae105aa7416087f2920c35c3cd16831d0d09c9c (patch) | |
tree | 935b2d7c2b902f77b37e38ec9108f905fb09f690 /drivers/dma | |
parent | 02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff) | |
parent | 5a42fb93e6a33224774786691027ef2d9795c245 (diff) |
Merge branch 'next' into for-linus-3.0
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 7 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/TODO | 1 | ||||
-rw-r--r-- | drivers/dma/amba-pl08x.c | 246 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 4 | ||||
-rw-r--r-- | drivers/dma/coh901318.c | 19 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 4 | ||||
-rw-r--r-- | drivers/dma/ep93xx_dma.c | 1355 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 4 | ||||
-rw-r--r-- | drivers/dma/intel_mid_dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_idmac.c | 6 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 4 | ||||
-rw-r--r-- | drivers/dma/mxs-dma.c | 13 | ||||
-rw-r--r-- | drivers/dma/pch_dma.c | 127 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 64 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 270 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.h | 3 |
17 files changed, 1804 insertions, 326 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 25cf327cd1cb..2e3b3d38c465 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -237,6 +237,13 @@ config MXS_DMA | |||
237 | Support the MXS DMA engine. This engine including APBH-DMA | 237 | Support the MXS DMA engine. This engine including APBH-DMA |
238 | and APBX-DMA is integrated into Freescale i.MX23/28 chips. | 238 | and APBX-DMA is integrated into Freescale i.MX23/28 chips. |
239 | 239 | ||
240 | config EP93XX_DMA | ||
241 | bool "Cirrus Logic EP93xx DMA support" | ||
242 | depends on ARCH_EP93XX | ||
243 | select DMA_ENGINE | ||
244 | help | ||
245 | Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. | ||
246 | |||
240 | config DMA_ENGINE | 247 | config DMA_ENGINE |
241 | bool | 248 | bool |
242 | 249 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 836095ab3c5c..30cf3b1f0c5c 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | |||
25 | obj-$(CONFIG_PL330_DMA) += pl330.o | 25 | obj-$(CONFIG_PL330_DMA) += pl330.o |
26 | obj-$(CONFIG_PCH_DMA) += pch_dma.o | 26 | obj-$(CONFIG_PCH_DMA) += pch_dma.o |
27 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o | 27 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o |
28 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o | ||
diff --git a/drivers/dma/TODO b/drivers/dma/TODO index a4af8589330c..734ed0206cd5 100644 --- a/drivers/dma/TODO +++ b/drivers/dma/TODO | |||
@@ -9,6 +9,5 @@ TODO for slave dma | |||
9 | - mxs-dma.c | 9 | - mxs-dma.c |
10 | - dw_dmac | 10 | - dw_dmac |
11 | - intel_mid_dma | 11 | - intel_mid_dma |
12 | - ste_dma40 | ||
13 | 4. Check other subsystems for dma drivers and merge/move to dmaengine | 12 | 4. Check other subsystems for dma drivers and merge/move to dmaengine |
14 | 5. Remove dma_slave_config's dma direction. | 13 | 5. Remove dma_slave_config's dma direction. |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index e6d7228b1479..196a7378d332 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -156,14 +156,10 @@ struct pl08x_driver_data { | |||
156 | #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ | 156 | #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ |
157 | #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) | 157 | #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) |
158 | 158 | ||
159 | /* Minimum period between work queue runs */ | ||
160 | #define PL08X_WQ_PERIODMIN 20 | ||
161 | |||
162 | /* Size (bytes) of each LLI buffer allocated for one transfer */ | 159 | /* Size (bytes) of each LLI buffer allocated for one transfer */ |
163 | # define PL08X_LLI_TSFR_SIZE 0x2000 | 160 | # define PL08X_LLI_TSFR_SIZE 0x2000 |
164 | 161 | ||
165 | /* Maximum times we call dma_pool_alloc on this pool without freeing */ | 162 | /* Maximum times we call dma_pool_alloc on this pool without freeing */ |
166 | #define PL08X_MAX_ALLOCS 0x40 | ||
167 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) | 163 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) |
168 | #define PL08X_ALIGN 8 | 164 | #define PL08X_ALIGN 8 |
169 | 165 | ||
@@ -495,10 +491,10 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | |||
495 | 491 | ||
496 | struct pl08x_lli_build_data { | 492 | struct pl08x_lli_build_data { |
497 | struct pl08x_txd *txd; | 493 | struct pl08x_txd *txd; |
498 | struct pl08x_driver_data *pl08x; | ||
499 | struct pl08x_bus_data srcbus; | 494 | struct pl08x_bus_data srcbus; |
500 | struct pl08x_bus_data dstbus; | 495 | struct pl08x_bus_data dstbus; |
501 | size_t remainder; | 496 | size_t remainder; |
497 | u32 lli_bus; | ||
502 | }; | 498 | }; |
503 | 499 | ||
504 | /* | 500 | /* |
@@ -551,8 +547,7 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, | |||
551 | llis_va[num_llis].src = bd->srcbus.addr; | 547 | llis_va[num_llis].src = bd->srcbus.addr; |
552 | llis_va[num_llis].dst = bd->dstbus.addr; | 548 | llis_va[num_llis].dst = bd->dstbus.addr; |
553 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); | 549 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); |
554 | if (bd->pl08x->lli_buses & PL08X_AHB2) | 550 | llis_va[num_llis].lli |= bd->lli_bus; |
555 | llis_va[num_llis].lli |= PL080_LLI_LM_AHB2; | ||
556 | 551 | ||
557 | if (cctl & PL080_CONTROL_SRC_INCR) | 552 | if (cctl & PL080_CONTROL_SRC_INCR) |
558 | bd->srcbus.addr += len; | 553 | bd->srcbus.addr += len; |
@@ -605,9 +600,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
605 | cctl = txd->cctl; | 600 | cctl = txd->cctl; |
606 | 601 | ||
607 | bd.txd = txd; | 602 | bd.txd = txd; |
608 | bd.pl08x = pl08x; | ||
609 | bd.srcbus.addr = txd->src_addr; | 603 | bd.srcbus.addr = txd->src_addr; |
610 | bd.dstbus.addr = txd->dst_addr; | 604 | bd.dstbus.addr = txd->dst_addr; |
605 | bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; | ||
611 | 606 | ||
612 | /* Find maximum width of the source bus */ | 607 | /* Find maximum width of the source bus */ |
613 | bd.srcbus.maxwidth = | 608 | bd.srcbus.maxwidth = |
@@ -622,25 +617,15 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
622 | /* Set up the bus widths to the maximum */ | 617 | /* Set up the bus widths to the maximum */ |
623 | bd.srcbus.buswidth = bd.srcbus.maxwidth; | 618 | bd.srcbus.buswidth = bd.srcbus.maxwidth; |
624 | bd.dstbus.buswidth = bd.dstbus.maxwidth; | 619 | bd.dstbus.buswidth = bd.dstbus.maxwidth; |
625 | dev_vdbg(&pl08x->adev->dev, | ||
626 | "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", | ||
627 | __func__, bd.srcbus.buswidth, bd.dstbus.buswidth); | ||
628 | |||
629 | 620 | ||
630 | /* | 621 | /* |
631 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) | 622 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) |
632 | */ | 623 | */ |
633 | max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * | 624 | max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * |
634 | PL080_CONTROL_TRANSFER_SIZE_MASK; | 625 | PL080_CONTROL_TRANSFER_SIZE_MASK; |
635 | dev_vdbg(&pl08x->adev->dev, | ||
636 | "%s max bytes per lli = %zu\n", | ||
637 | __func__, max_bytes_per_lli); | ||
638 | 626 | ||
639 | /* We need to count this down to zero */ | 627 | /* We need to count this down to zero */ |
640 | bd.remainder = txd->len; | 628 | bd.remainder = txd->len; |
641 | dev_vdbg(&pl08x->adev->dev, | ||
642 | "%s remainder = %zu\n", | ||
643 | __func__, bd.remainder); | ||
644 | 629 | ||
645 | /* | 630 | /* |
646 | * Choose bus to align to | 631 | * Choose bus to align to |
@@ -649,6 +634,16 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
649 | */ | 634 | */ |
650 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); | 635 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); |
651 | 636 | ||
637 | dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", | ||
638 | bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", | ||
639 | bd.srcbus.buswidth, | ||
640 | bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", | ||
641 | bd.dstbus.buswidth, | ||
642 | bd.remainder, max_bytes_per_lli); | ||
643 | dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", | ||
644 | mbus == &bd.srcbus ? "src" : "dst", | ||
645 | sbus == &bd.srcbus ? "src" : "dst"); | ||
646 | |||
652 | if (txd->len < mbus->buswidth) { | 647 | if (txd->len < mbus->buswidth) { |
653 | /* Less than a bus width available - send as single bytes */ | 648 | /* Less than a bus width available - send as single bytes */ |
654 | while (bd.remainder) { | 649 | while (bd.remainder) { |
@@ -840,15 +835,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
840 | { | 835 | { |
841 | int i; | 836 | int i; |
842 | 837 | ||
838 | dev_vdbg(&pl08x->adev->dev, | ||
839 | "%-3s %-9s %-10s %-10s %-10s %s\n", | ||
840 | "lli", "", "csrc", "cdst", "clli", "cctl"); | ||
843 | for (i = 0; i < num_llis; i++) { | 841 | for (i = 0; i < num_llis; i++) { |
844 | dev_vdbg(&pl08x->adev->dev, | 842 | dev_vdbg(&pl08x->adev->dev, |
845 | "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n", | 843 | "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", |
846 | i, | 844 | i, &llis_va[i], llis_va[i].src, |
847 | &llis_va[i], | 845 | llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl |
848 | llis_va[i].src, | ||
849 | llis_va[i].dst, | ||
850 | llis_va[i].cctl, | ||
851 | llis_va[i].lli | ||
852 | ); | 846 | ); |
853 | } | 847 | } |
854 | } | 848 | } |
@@ -1054,64 +1048,105 @@ pl08x_dma_tx_status(struct dma_chan *chan, | |||
1054 | 1048 | ||
1055 | /* PrimeCell DMA extension */ | 1049 | /* PrimeCell DMA extension */ |
1056 | struct burst_table { | 1050 | struct burst_table { |
1057 | int burstwords; | 1051 | u32 burstwords; |
1058 | u32 reg; | 1052 | u32 reg; |
1059 | }; | 1053 | }; |
1060 | 1054 | ||
1061 | static const struct burst_table burst_sizes[] = { | 1055 | static const struct burst_table burst_sizes[] = { |
1062 | { | 1056 | { |
1063 | .burstwords = 256, | 1057 | .burstwords = 256, |
1064 | .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1058 | .reg = PL080_BSIZE_256, |
1065 | (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1066 | }, | 1059 | }, |
1067 | { | 1060 | { |
1068 | .burstwords = 128, | 1061 | .burstwords = 128, |
1069 | .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1062 | .reg = PL080_BSIZE_128, |
1070 | (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1071 | }, | 1063 | }, |
1072 | { | 1064 | { |
1073 | .burstwords = 64, | 1065 | .burstwords = 64, |
1074 | .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1066 | .reg = PL080_BSIZE_64, |
1075 | (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1076 | }, | 1067 | }, |
1077 | { | 1068 | { |
1078 | .burstwords = 32, | 1069 | .burstwords = 32, |
1079 | .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1070 | .reg = PL080_BSIZE_32, |
1080 | (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1081 | }, | 1071 | }, |
1082 | { | 1072 | { |
1083 | .burstwords = 16, | 1073 | .burstwords = 16, |
1084 | .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1074 | .reg = PL080_BSIZE_16, |
1085 | (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1086 | }, | 1075 | }, |
1087 | { | 1076 | { |
1088 | .burstwords = 8, | 1077 | .burstwords = 8, |
1089 | .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1078 | .reg = PL080_BSIZE_8, |
1090 | (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1091 | }, | 1079 | }, |
1092 | { | 1080 | { |
1093 | .burstwords = 4, | 1081 | .burstwords = 4, |
1094 | .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1082 | .reg = PL080_BSIZE_4, |
1095 | (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1096 | }, | 1083 | }, |
1097 | { | 1084 | { |
1098 | .burstwords = 1, | 1085 | .burstwords = 0, |
1099 | .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1086 | .reg = PL080_BSIZE_1, |
1100 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1101 | }, | 1087 | }, |
1102 | }; | 1088 | }; |
1103 | 1089 | ||
1090 | /* | ||
1091 | * Given the source and destination available bus masks, select which | ||
1092 | * will be routed to each port. We try to have source and destination | ||
1093 | * on separate ports, but always respect the allowable settings. | ||
1094 | */ | ||
1095 | static u32 pl08x_select_bus(u8 src, u8 dst) | ||
1096 | { | ||
1097 | u32 cctl = 0; | ||
1098 | |||
1099 | if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) | ||
1100 | cctl |= PL080_CONTROL_DST_AHB2; | ||
1101 | if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) | ||
1102 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
1103 | |||
1104 | return cctl; | ||
1105 | } | ||
1106 | |||
1107 | static u32 pl08x_cctl(u32 cctl) | ||
1108 | { | ||
1109 | cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | | ||
1110 | PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | | ||
1111 | PL080_CONTROL_PROT_MASK); | ||
1112 | |||
1113 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | ||
1114 | return cctl | PL080_CONTROL_PROT_SYS; | ||
1115 | } | ||
1116 | |||
1117 | static u32 pl08x_width(enum dma_slave_buswidth width) | ||
1118 | { | ||
1119 | switch (width) { | ||
1120 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
1121 | return PL080_WIDTH_8BIT; | ||
1122 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
1123 | return PL080_WIDTH_16BIT; | ||
1124 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
1125 | return PL080_WIDTH_32BIT; | ||
1126 | default: | ||
1127 | return ~0; | ||
1128 | } | ||
1129 | } | ||
1130 | |||
1131 | static u32 pl08x_burst(u32 maxburst) | ||
1132 | { | ||
1133 | int i; | ||
1134 | |||
1135 | for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) | ||
1136 | if (burst_sizes[i].burstwords <= maxburst) | ||
1137 | break; | ||
1138 | |||
1139 | return burst_sizes[i].reg; | ||
1140 | } | ||
1141 | |||
1104 | static int dma_set_runtime_config(struct dma_chan *chan, | 1142 | static int dma_set_runtime_config(struct dma_chan *chan, |
1105 | struct dma_slave_config *config) | 1143 | struct dma_slave_config *config) |
1106 | { | 1144 | { |
1107 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1145 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1108 | struct pl08x_driver_data *pl08x = plchan->host; | 1146 | struct pl08x_driver_data *pl08x = plchan->host; |
1109 | struct pl08x_channel_data *cd = plchan->cd; | ||
1110 | enum dma_slave_buswidth addr_width; | 1147 | enum dma_slave_buswidth addr_width; |
1111 | dma_addr_t addr; | 1148 | u32 width, burst, maxburst; |
1112 | u32 maxburst; | ||
1113 | u32 cctl = 0; | 1149 | u32 cctl = 0; |
1114 | int i; | ||
1115 | 1150 | ||
1116 | if (!plchan->slave) | 1151 | if (!plchan->slave) |
1117 | return -EINVAL; | 1152 | return -EINVAL; |
@@ -1119,11 +1154,9 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1119 | /* Transfer direction */ | 1154 | /* Transfer direction */ |
1120 | plchan->runtime_direction = config->direction; | 1155 | plchan->runtime_direction = config->direction; |
1121 | if (config->direction == DMA_TO_DEVICE) { | 1156 | if (config->direction == DMA_TO_DEVICE) { |
1122 | addr = config->dst_addr; | ||
1123 | addr_width = config->dst_addr_width; | 1157 | addr_width = config->dst_addr_width; |
1124 | maxburst = config->dst_maxburst; | 1158 | maxburst = config->dst_maxburst; |
1125 | } else if (config->direction == DMA_FROM_DEVICE) { | 1159 | } else if (config->direction == DMA_FROM_DEVICE) { |
1126 | addr = config->src_addr; | ||
1127 | addr_width = config->src_addr_width; | 1160 | addr_width = config->src_addr_width; |
1128 | maxburst = config->src_maxburst; | 1161 | maxburst = config->src_maxburst; |
1129 | } else { | 1162 | } else { |
@@ -1132,46 +1165,40 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1132 | return -EINVAL; | 1165 | return -EINVAL; |
1133 | } | 1166 | } |
1134 | 1167 | ||
1135 | switch (addr_width) { | 1168 | width = pl08x_width(addr_width); |
1136 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | 1169 | if (width == ~0) { |
1137 | cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
1138 | (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
1139 | break; | ||
1140 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
1141 | cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
1142 | (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
1143 | break; | ||
1144 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
1145 | cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
1146 | (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
1147 | break; | ||
1148 | default: | ||
1149 | dev_err(&pl08x->adev->dev, | 1170 | dev_err(&pl08x->adev->dev, |
1150 | "bad runtime_config: alien address width\n"); | 1171 | "bad runtime_config: alien address width\n"); |
1151 | return -EINVAL; | 1172 | return -EINVAL; |
1152 | } | 1173 | } |
1153 | 1174 | ||
1175 | cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; | ||
1176 | cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; | ||
1177 | |||
1154 | /* | 1178 | /* |
1155 | * Now decide on a maxburst: | ||
1156 | * If this channel will only request single transfers, set this | 1179 | * If this channel will only request single transfers, set this |
1157 | * down to ONE element. Also select one element if no maxburst | 1180 | * down to ONE element. Also select one element if no maxburst |
1158 | * is specified. | 1181 | * is specified. |
1159 | */ | 1182 | */ |
1160 | if (plchan->cd->single || maxburst == 0) { | 1183 | if (plchan->cd->single) |
1161 | cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1184 | maxburst = 1; |
1162 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); | 1185 | |
1186 | burst = pl08x_burst(maxburst); | ||
1187 | cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; | ||
1188 | cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; | ||
1189 | |||
1190 | if (plchan->runtime_direction == DMA_FROM_DEVICE) { | ||
1191 | plchan->src_addr = config->src_addr; | ||
1192 | plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | | ||
1193 | pl08x_select_bus(plchan->cd->periph_buses, | ||
1194 | pl08x->mem_buses); | ||
1163 | } else { | 1195 | } else { |
1164 | for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) | 1196 | plchan->dst_addr = config->dst_addr; |
1165 | if (burst_sizes[i].burstwords <= maxburst) | 1197 | plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | |
1166 | break; | 1198 | pl08x_select_bus(pl08x->mem_buses, |
1167 | cctl |= burst_sizes[i].reg; | 1199 | plchan->cd->periph_buses); |
1168 | } | 1200 | } |
1169 | 1201 | ||
1170 | plchan->runtime_addr = addr; | ||
1171 | |||
1172 | /* Modify the default channel data to fit PrimeCell request */ | ||
1173 | cd->cctl = cctl; | ||
1174 | |||
1175 | dev_dbg(&pl08x->adev->dev, | 1202 | dev_dbg(&pl08x->adev->dev, |
1176 | "configured channel %s (%s) for %s, data width %d, " | 1203 | "configured channel %s (%s) for %s, data width %d, " |
1177 | "maxburst %d words, LE, CCTL=0x%08x\n", | 1204 | "maxburst %d words, LE, CCTL=0x%08x\n", |
@@ -1270,23 +1297,6 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |||
1270 | return 0; | 1297 | return 0; |
1271 | } | 1298 | } |
1272 | 1299 | ||
1273 | /* | ||
1274 | * Given the source and destination available bus masks, select which | ||
1275 | * will be routed to each port. We try to have source and destination | ||
1276 | * on separate ports, but always respect the allowable settings. | ||
1277 | */ | ||
1278 | static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst) | ||
1279 | { | ||
1280 | u32 cctl = 0; | ||
1281 | |||
1282 | if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) | ||
1283 | cctl |= PL080_CONTROL_DST_AHB2; | ||
1284 | if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) | ||
1285 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
1286 | |||
1287 | return cctl; | ||
1288 | } | ||
1289 | |||
1290 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, | 1300 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, |
1291 | unsigned long flags) | 1301 | unsigned long flags) |
1292 | { | 1302 | { |
@@ -1338,8 +1348,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1338 | txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; | 1348 | txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; |
1339 | 1349 | ||
1340 | if (pl08x->vd->dualmaster) | 1350 | if (pl08x->vd->dualmaster) |
1341 | txd->cctl |= pl08x_select_bus(pl08x, | 1351 | txd->cctl |= pl08x_select_bus(pl08x->mem_buses, |
1342 | pl08x->mem_buses, pl08x->mem_buses); | 1352 | pl08x->mem_buses); |
1343 | 1353 | ||
1344 | ret = pl08x_prep_channel_resources(plchan, txd); | 1354 | ret = pl08x_prep_channel_resources(plchan, txd); |
1345 | if (ret) | 1355 | if (ret) |
@@ -1356,7 +1366,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1356 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1366 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1357 | struct pl08x_driver_data *pl08x = plchan->host; | 1367 | struct pl08x_driver_data *pl08x = plchan->host; |
1358 | struct pl08x_txd *txd; | 1368 | struct pl08x_txd *txd; |
1359 | u8 src_buses, dst_buses; | ||
1360 | int ret; | 1369 | int ret; |
1361 | 1370 | ||
1362 | /* | 1371 | /* |
@@ -1390,42 +1399,22 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1390 | txd->direction = direction; | 1399 | txd->direction = direction; |
1391 | txd->len = sgl->length; | 1400 | txd->len = sgl->length; |
1392 | 1401 | ||
1393 | txd->cctl = plchan->cd->cctl & | ||
1394 | ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | | ||
1395 | PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | | ||
1396 | PL080_CONTROL_PROT_MASK); | ||
1397 | |||
1398 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | ||
1399 | txd->cctl |= PL080_CONTROL_PROT_SYS; | ||
1400 | |||
1401 | if (direction == DMA_TO_DEVICE) { | 1402 | if (direction == DMA_TO_DEVICE) { |
1402 | txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; | 1403 | txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1403 | txd->cctl |= PL080_CONTROL_SRC_INCR; | 1404 | txd->cctl = plchan->dst_cctl; |
1404 | txd->src_addr = sgl->dma_address; | 1405 | txd->src_addr = sgl->dma_address; |
1405 | if (plchan->runtime_addr) | 1406 | txd->dst_addr = plchan->dst_addr; |
1406 | txd->dst_addr = plchan->runtime_addr; | ||
1407 | else | ||
1408 | txd->dst_addr = plchan->cd->addr; | ||
1409 | src_buses = pl08x->mem_buses; | ||
1410 | dst_buses = plchan->cd->periph_buses; | ||
1411 | } else if (direction == DMA_FROM_DEVICE) { | 1407 | } else if (direction == DMA_FROM_DEVICE) { |
1412 | txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | 1408 | txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1413 | txd->cctl |= PL080_CONTROL_DST_INCR; | 1409 | txd->cctl = plchan->src_cctl; |
1414 | if (plchan->runtime_addr) | 1410 | txd->src_addr = plchan->src_addr; |
1415 | txd->src_addr = plchan->runtime_addr; | ||
1416 | else | ||
1417 | txd->src_addr = plchan->cd->addr; | ||
1418 | txd->dst_addr = sgl->dma_address; | 1411 | txd->dst_addr = sgl->dma_address; |
1419 | src_buses = plchan->cd->periph_buses; | ||
1420 | dst_buses = pl08x->mem_buses; | ||
1421 | } else { | 1412 | } else { |
1422 | dev_err(&pl08x->adev->dev, | 1413 | dev_err(&pl08x->adev->dev, |
1423 | "%s direction unsupported\n", __func__); | 1414 | "%s direction unsupported\n", __func__); |
1424 | return NULL; | 1415 | return NULL; |
1425 | } | 1416 | } |
1426 | 1417 | ||
1427 | txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses); | ||
1428 | |||
1429 | ret = pl08x_prep_channel_resources(plchan, txd); | 1418 | ret = pl08x_prep_channel_resources(plchan, txd); |
1430 | if (ret) | 1419 | if (ret) |
1431 | return NULL; | 1420 | return NULL; |
@@ -1676,6 +1665,20 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
1676 | return mask ? IRQ_HANDLED : IRQ_NONE; | 1665 | return mask ? IRQ_HANDLED : IRQ_NONE; |
1677 | } | 1666 | } |
1678 | 1667 | ||
1668 | static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) | ||
1669 | { | ||
1670 | u32 cctl = pl08x_cctl(chan->cd->cctl); | ||
1671 | |||
1672 | chan->slave = true; | ||
1673 | chan->name = chan->cd->bus_id; | ||
1674 | chan->src_addr = chan->cd->addr; | ||
1675 | chan->dst_addr = chan->cd->addr; | ||
1676 | chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | | ||
1677 | pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); | ||
1678 | chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | | ||
1679 | pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); | ||
1680 | } | ||
1681 | |||
1679 | /* | 1682 | /* |
1680 | * Initialise the DMAC memcpy/slave channels. | 1683 | * Initialise the DMAC memcpy/slave channels. |
1681 | * Make a local wrapper to hold required data | 1684 | * Make a local wrapper to hold required data |
@@ -1707,9 +1710,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1707 | chan->state = PL08X_CHAN_IDLE; | 1710 | chan->state = PL08X_CHAN_IDLE; |
1708 | 1711 | ||
1709 | if (slave) { | 1712 | if (slave) { |
1710 | chan->slave = true; | ||
1711 | chan->name = pl08x->pd->slave_channels[i].bus_id; | ||
1712 | chan->cd = &pl08x->pd->slave_channels[i]; | 1713 | chan->cd = &pl08x->pd->slave_channels[i]; |
1714 | pl08x_dma_slave_init(chan); | ||
1713 | } else { | 1715 | } else { |
1714 | chan->cd = &pl08x->pd->memcpy_channel; | 1716 | chan->cd = &pl08x->pd->memcpy_channel; |
1715 | chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); | 1717 | chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 36144f88d718..6a483eac7b3f 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -1216,7 +1216,7 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1216 | atdma->dma_common.cap_mask = pdata->cap_mask; | 1216 | atdma->dma_common.cap_mask = pdata->cap_mask; |
1217 | atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; | 1217 | atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; |
1218 | 1218 | ||
1219 | size = io->end - io->start + 1; | 1219 | size = resource_size(io); |
1220 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { | 1220 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { |
1221 | err = -EBUSY; | 1221 | err = -EBUSY; |
1222 | goto err_kfree; | 1222 | goto err_kfree; |
@@ -1362,7 +1362,7 @@ static int __exit at_dma_remove(struct platform_device *pdev) | |||
1362 | atdma->regs = NULL; | 1362 | atdma->regs = NULL; |
1363 | 1363 | ||
1364 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1364 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1365 | release_mem_region(io->start, io->end - io->start + 1); | 1365 | release_mem_region(io->start, resource_size(io)); |
1366 | 1366 | ||
1367 | kfree(atdma); | 1367 | kfree(atdma); |
1368 | 1368 | ||
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index af8c0b5ed70f..a7fca1653933 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -40,6 +40,8 @@ struct coh901318_desc { | |||
40 | struct coh901318_lli *lli; | 40 | struct coh901318_lli *lli; |
41 | enum dma_data_direction dir; | 41 | enum dma_data_direction dir; |
42 | unsigned long flags; | 42 | unsigned long flags; |
43 | u32 head_config; | ||
44 | u32 head_ctrl; | ||
43 | }; | 45 | }; |
44 | 46 | ||
45 | struct coh901318_base { | 47 | struct coh901318_base { |
@@ -660,6 +662,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) | |||
660 | 662 | ||
661 | coh901318_desc_submit(cohc, cohd); | 663 | coh901318_desc_submit(cohc, cohd); |
662 | 664 | ||
665 | /* Program the transaction head */ | ||
666 | coh901318_set_conf(cohc, cohd->head_config); | ||
667 | coh901318_set_ctrl(cohc, cohd->head_ctrl); | ||
663 | coh901318_prep_linked_list(cohc, cohd->lli); | 668 | coh901318_prep_linked_list(cohc, cohd->lli); |
664 | 669 | ||
665 | /* start dma job on this channel */ | 670 | /* start dma job on this channel */ |
@@ -1090,8 +1095,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
1090 | } else | 1095 | } else |
1091 | goto err_direction; | 1096 | goto err_direction; |
1092 | 1097 | ||
1093 | coh901318_set_conf(cohc, config); | ||
1094 | |||
1095 | /* The dma only supports transmitting packages up to | 1098 | /* The dma only supports transmitting packages up to |
1096 | * MAX_DMA_PACKET_SIZE. Calculate to total number of | 1099 | * MAX_DMA_PACKET_SIZE. Calculate to total number of |
1097 | * dma elemts required to send the entire sg list | 1100 | * dma elemts required to send the entire sg list |
@@ -1128,16 +1131,18 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
1128 | if (ret) | 1131 | if (ret) |
1129 | goto err_lli_fill; | 1132 | goto err_lli_fill; |
1130 | 1133 | ||
1131 | /* | ||
1132 | * Set the default ctrl for the channel to the one from the lli, | ||
1133 | * things may have changed due to odd buffer alignment etc. | ||
1134 | */ | ||
1135 | coh901318_set_ctrl(cohc, lli->control); | ||
1136 | 1134 | ||
1137 | COH_DBG(coh901318_list_print(cohc, lli)); | 1135 | COH_DBG(coh901318_list_print(cohc, lli)); |
1138 | 1136 | ||
1139 | /* Pick a descriptor to handle this transfer */ | 1137 | /* Pick a descriptor to handle this transfer */ |
1140 | cohd = coh901318_desc_get(cohc); | 1138 | cohd = coh901318_desc_get(cohc); |
1139 | cohd->head_config = config; | ||
1140 | /* | ||
1141 | * Set the default head ctrl for the channel to the one from the | ||
1142 | * lli, things may have changed due to odd buffer alignment | ||
1143 | * etc. | ||
1144 | */ | ||
1145 | cohd->head_ctrl = lli->control; | ||
1141 | cohd->dir = direction; | 1146 | cohd->dir = direction; |
1142 | cohd->flags = flags; | 1147 | cohd->flags = flags; |
1143 | cohd->desc.tx_submit = coh901318_tx_submit; | 1148 | cohd->desc.tx_submit = coh901318_tx_submit; |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 8bcb15fb959d..f7f21a5de3e1 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -509,8 +509,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
509 | dma_chan_name(chan)); | 509 | dma_chan_name(chan)); |
510 | list_del_rcu(&device->global_node); | 510 | list_del_rcu(&device->global_node); |
511 | } else if (err) | 511 | } else if (err) |
512 | pr_err("dmaengine: failed to get %s: (%d)\n", | 512 | pr_debug("dmaengine: failed to get %s: (%d)\n", |
513 | dma_chan_name(chan), err); | 513 | dma_chan_name(chan), err); |
514 | else | 514 | else |
515 | break; | 515 | break; |
516 | if (--device->privatecnt == 0) | 516 | if (--device->privatecnt == 0) |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c new file mode 100644 index 000000000000..5d7a49bd7c26 --- /dev/null +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -0,0 +1,1355 @@ | |||
1 | /* | ||
2 | * Driver for the Cirrus Logic EP93xx DMA Controller | ||
3 | * | ||
4 | * Copyright (C) 2011 Mika Westerberg | ||
5 | * | ||
6 | * DMA M2P implementation is based on the original | ||
7 | * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights: | ||
8 | * | ||
9 | * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> | ||
10 | * Copyright (C) 2006 Applied Data Systems | ||
11 | * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com> | ||
12 | * | ||
13 | * This driver is based on dw_dmac and amba-pl08x drivers. | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2 of the License, or | ||
18 | * (at your option) any later version. | ||
19 | */ | ||
20 | |||
21 | #include <linux/clk.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/dmaengine.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/slab.h> | ||
27 | |||
28 | #include <mach/dma.h> | ||
29 | |||
30 | /* M2P registers */ | ||
31 | #define M2P_CONTROL 0x0000 | ||
32 | #define M2P_CONTROL_STALLINT BIT(0) | ||
33 | #define M2P_CONTROL_NFBINT BIT(1) | ||
34 | #define M2P_CONTROL_CH_ERROR_INT BIT(3) | ||
35 | #define M2P_CONTROL_ENABLE BIT(4) | ||
36 | #define M2P_CONTROL_ICE BIT(6) | ||
37 | |||
38 | #define M2P_INTERRUPT 0x0004 | ||
39 | #define M2P_INTERRUPT_STALL BIT(0) | ||
40 | #define M2P_INTERRUPT_NFB BIT(1) | ||
41 | #define M2P_INTERRUPT_ERROR BIT(3) | ||
42 | |||
43 | #define M2P_PPALLOC 0x0008 | ||
44 | #define M2P_STATUS 0x000c | ||
45 | |||
46 | #define M2P_MAXCNT0 0x0020 | ||
47 | #define M2P_BASE0 0x0024 | ||
48 | #define M2P_MAXCNT1 0x0030 | ||
49 | #define M2P_BASE1 0x0034 | ||
50 | |||
51 | #define M2P_STATE_IDLE 0 | ||
52 | #define M2P_STATE_STALL 1 | ||
53 | #define M2P_STATE_ON 2 | ||
54 | #define M2P_STATE_NEXT 3 | ||
55 | |||
56 | /* M2M registers */ | ||
57 | #define M2M_CONTROL 0x0000 | ||
58 | #define M2M_CONTROL_DONEINT BIT(2) | ||
59 | #define M2M_CONTROL_ENABLE BIT(3) | ||
60 | #define M2M_CONTROL_START BIT(4) | ||
61 | #define M2M_CONTROL_DAH BIT(11) | ||
62 | #define M2M_CONTROL_SAH BIT(12) | ||
63 | #define M2M_CONTROL_PW_SHIFT 9 | ||
64 | #define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT) | ||
65 | #define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT) | ||
66 | #define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT) | ||
67 | #define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT) | ||
68 | #define M2M_CONTROL_TM_SHIFT 13 | ||
69 | #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) | ||
70 | #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) | ||
71 | #define M2M_CONTROL_RSS_SHIFT 22 | ||
72 | #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) | ||
73 | #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) | ||
74 | #define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT) | ||
75 | #define M2M_CONTROL_NO_HDSK BIT(24) | ||
76 | #define M2M_CONTROL_PWSC_SHIFT 25 | ||
77 | |||
78 | #define M2M_INTERRUPT 0x0004 | ||
79 | #define M2M_INTERRUPT_DONEINT BIT(1) | ||
80 | |||
81 | #define M2M_BCR0 0x0010 | ||
82 | #define M2M_BCR1 0x0014 | ||
83 | #define M2M_SAR_BASE0 0x0018 | ||
84 | #define M2M_SAR_BASE1 0x001c | ||
85 | #define M2M_DAR_BASE0 0x002c | ||
86 | #define M2M_DAR_BASE1 0x0030 | ||
87 | |||
88 | #define DMA_MAX_CHAN_BYTES 0xffff | ||
89 | #define DMA_MAX_CHAN_DESCRIPTORS 32 | ||
90 | |||
91 | struct ep93xx_dma_engine; | ||
92 | |||
93 | /** | ||
94 | * struct ep93xx_dma_desc - EP93xx specific transaction descriptor | ||
95 | * @src_addr: source address of the transaction | ||
96 | * @dst_addr: destination address of the transaction | ||
97 | * @size: size of the transaction (in bytes) | ||
98 | * @complete: this descriptor is completed | ||
99 | * @txd: dmaengine API descriptor | ||
100 | * @tx_list: list of linked descriptors | ||
101 | * @node: link used for putting this into a channel queue | ||
102 | */ | ||
103 | struct ep93xx_dma_desc { | ||
104 | u32 src_addr; | ||
105 | u32 dst_addr; | ||
106 | size_t size; | ||
107 | bool complete; | ||
108 | struct dma_async_tx_descriptor txd; | ||
109 | struct list_head tx_list; | ||
110 | struct list_head node; | ||
111 | }; | ||
112 | |||
113 | /** | ||
114 | * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel | ||
115 | * @chan: dmaengine API channel | ||
116 | * @edma: pointer to to the engine device | ||
117 | * @regs: memory mapped registers | ||
118 | * @irq: interrupt number of the channel | ||
119 | * @clk: clock used by this channel | ||
120 | * @tasklet: channel specific tasklet used for callbacks | ||
121 | * @lock: lock protecting the fields following | ||
122 | * @flags: flags for the channel | ||
123 | * @buffer: which buffer to use next (0/1) | ||
124 | * @last_completed: last completed cookie value | ||
125 | * @active: flattened chain of descriptors currently being processed | ||
126 | * @queue: pending descriptors which are handled next | ||
127 | * @free_list: list of free descriptors which can be used | ||
128 | * @runtime_addr: physical address currently used as dest/src (M2M only). This | ||
129 | * is set via %DMA_SLAVE_CONFIG before slave operation is | ||
130 | * prepared | ||
131 | * @runtime_ctrl: M2M runtime values for the control register. | ||
132 | * | ||
133 | * As EP93xx DMA controller doesn't support real chained DMA descriptors we | ||
134 | * will have slightly different scheme here: @active points to a head of | ||
135 | * flattened DMA descriptor chain. | ||
136 | * | ||
137 | * @queue holds pending transactions. These are linked through the first | ||
138 | * descriptor in the chain. When a descriptor is moved to the @active queue, | ||
139 | * the first and chained descriptors are flattened into a single list. | ||
140 | * | ||
141 | * @chan.private holds pointer to &struct ep93xx_dma_data which contains | ||
142 | * necessary channel configuration information. For memcpy channels this must | ||
143 | * be %NULL. | ||
144 | */ | ||
145 | struct ep93xx_dma_chan { | ||
146 | struct dma_chan chan; | ||
147 | const struct ep93xx_dma_engine *edma; | ||
148 | void __iomem *regs; | ||
149 | int irq; | ||
150 | struct clk *clk; | ||
151 | struct tasklet_struct tasklet; | ||
152 | /* protects the fields following */ | ||
153 | spinlock_t lock; | ||
154 | unsigned long flags; | ||
155 | /* Channel is configured for cyclic transfers */ | ||
156 | #define EP93XX_DMA_IS_CYCLIC 0 | ||
157 | |||
158 | int buffer; | ||
159 | dma_cookie_t last_completed; | ||
160 | struct list_head active; | ||
161 | struct list_head queue; | ||
162 | struct list_head free_list; | ||
163 | u32 runtime_addr; | ||
164 | u32 runtime_ctrl; | ||
165 | }; | ||
166 | |||
167 | /** | ||
168 | * struct ep93xx_dma_engine - the EP93xx DMA engine instance | ||
169 | * @dma_dev: holds the dmaengine device | ||
170 | * @m2m: is this an M2M or M2P device | ||
171 | * @hw_setup: method which sets the channel up for operation | ||
172 | * @hw_shutdown: shuts the channel down and flushes whatever is left | ||
173 | * @hw_submit: pushes active descriptor(s) to the hardware | ||
174 | * @hw_interrupt: handle the interrupt | ||
175 | * @num_channels: number of channels for this instance | ||
176 | * @channels: array of channels | ||
177 | * | ||
178 | * There is one instance of this struct for the M2P channels and one for the | ||
179 | * M2M channels. hw_xxx() methods are used to perform operations which are | ||
180 | * different on M2M and M2P channels. These methods are called with channel | ||
181 | * lock held and interrupts disabled so they cannot sleep. | ||
182 | */ | ||
183 | struct ep93xx_dma_engine { | ||
184 | struct dma_device dma_dev; | ||
185 | bool m2m; | ||
186 | int (*hw_setup)(struct ep93xx_dma_chan *); | ||
187 | void (*hw_shutdown)(struct ep93xx_dma_chan *); | ||
188 | void (*hw_submit)(struct ep93xx_dma_chan *); | ||
189 | int (*hw_interrupt)(struct ep93xx_dma_chan *); | ||
190 | #define INTERRUPT_UNKNOWN 0 | ||
191 | #define INTERRUPT_DONE 1 | ||
192 | #define INTERRUPT_NEXT_BUFFER 2 | ||
193 | |||
194 | size_t num_channels; | ||
195 | struct ep93xx_dma_chan channels[]; | ||
196 | }; | ||
197 | |||
198 | static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac) | ||
199 | { | ||
200 | return &edmac->chan.dev->device; | ||
201 | } | ||
202 | |||
203 | static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan) | ||
204 | { | ||
205 | return container_of(chan, struct ep93xx_dma_chan, chan); | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * ep93xx_dma_set_active - set new active descriptor chain | ||
210 | * @edmac: channel | ||
211 | * @desc: head of the new active descriptor chain | ||
212 | * | ||
213 | * Sets @desc to be the head of the new active descriptor chain. This is the | ||
214 | * chain which is processed next. The active list must be empty before calling | ||
215 | * this function. | ||
216 | * | ||
217 | * Called with @edmac->lock held and interrupts disabled. | ||
218 | */ | ||
219 | static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, | ||
220 | struct ep93xx_dma_desc *desc) | ||
221 | { | ||
222 | BUG_ON(!list_empty(&edmac->active)); | ||
223 | |||
224 | list_add_tail(&desc->node, &edmac->active); | ||
225 | |||
226 | /* Flatten the @desc->tx_list chain into @edmac->active list */ | ||
227 | while (!list_empty(&desc->tx_list)) { | ||
228 | struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list, | ||
229 | struct ep93xx_dma_desc, node); | ||
230 | |||
231 | /* | ||
232 | * We copy the callback parameters from the first descriptor | ||
233 | * to all the chained descriptors. This way we can call the | ||
234 | * callback without having to find out the first descriptor in | ||
235 | * the chain. Useful for cyclic transfers. | ||
236 | */ | ||
237 | d->txd.callback = desc->txd.callback; | ||
238 | d->txd.callback_param = desc->txd.callback_param; | ||
239 | |||
240 | list_move_tail(&d->node, &edmac->active); | ||
241 | } | ||
242 | } | ||
243 | |||
244 | /* Called with @edmac->lock held and interrupts disabled */ | ||
245 | static struct ep93xx_dma_desc * | ||
246 | ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) | ||
247 | { | ||
248 | return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); | ||
249 | } | ||
250 | |||
251 | /** | ||
252 | * ep93xx_dma_advance_active - advances to the next active descriptor | ||
253 | * @edmac: channel | ||
254 | * | ||
255 | * Function advances active descriptor to the next in the @edmac->active and | ||
256 | * returns %true if we still have descriptors in the chain to process. | ||
257 | * Otherwise returns %false. | ||
258 | * | ||
259 | * When the channel is in cyclic mode always returns %true. | ||
260 | * | ||
261 | * Called with @edmac->lock held and interrupts disabled. | ||
262 | */ | ||
263 | static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) | ||
264 | { | ||
265 | list_rotate_left(&edmac->active); | ||
266 | |||
267 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) | ||
268 | return true; | ||
269 | |||
270 | /* | ||
271 | * If txd.cookie is set it means that we are back in the first | ||
272 | * descriptor in the chain and hence done with it. | ||
273 | */ | ||
274 | return !ep93xx_dma_get_active(edmac)->txd.cookie; | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * M2P DMA implementation | ||
279 | */ | ||
280 | |||
281 | static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control) | ||
282 | { | ||
283 | writel(control, edmac->regs + M2P_CONTROL); | ||
284 | /* | ||
285 | * EP93xx User's Guide states that we must perform a dummy read after | ||
286 | * write to the control register. | ||
287 | */ | ||
288 | readl(edmac->regs + M2P_CONTROL); | ||
289 | } | ||
290 | |||
291 | static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) | ||
292 | { | ||
293 | struct ep93xx_dma_data *data = edmac->chan.private; | ||
294 | u32 control; | ||
295 | |||
296 | writel(data->port & 0xf, edmac->regs + M2P_PPALLOC); | ||
297 | |||
298 | control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE | ||
299 | | M2P_CONTROL_ENABLE; | ||
300 | m2p_set_control(edmac, control); | ||
301 | |||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac) | ||
306 | { | ||
307 | return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; | ||
308 | } | ||
309 | |||
310 | static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) | ||
311 | { | ||
312 | u32 control; | ||
313 | |||
314 | control = readl(edmac->regs + M2P_CONTROL); | ||
315 | control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); | ||
316 | m2p_set_control(edmac, control); | ||
317 | |||
318 | while (m2p_channel_state(edmac) >= M2P_STATE_ON) | ||
319 | cpu_relax(); | ||
320 | |||
321 | m2p_set_control(edmac, 0); | ||
322 | |||
323 | while (m2p_channel_state(edmac) == M2P_STATE_STALL) | ||
324 | cpu_relax(); | ||
325 | } | ||
326 | |||
327 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) | ||
328 | { | ||
329 | struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); | ||
330 | u32 bus_addr; | ||
331 | |||
332 | if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE) | ||
333 | bus_addr = desc->src_addr; | ||
334 | else | ||
335 | bus_addr = desc->dst_addr; | ||
336 | |||
337 | if (edmac->buffer == 0) { | ||
338 | writel(desc->size, edmac->regs + M2P_MAXCNT0); | ||
339 | writel(bus_addr, edmac->regs + M2P_BASE0); | ||
340 | } else { | ||
341 | writel(desc->size, edmac->regs + M2P_MAXCNT1); | ||
342 | writel(bus_addr, edmac->regs + M2P_BASE1); | ||
343 | } | ||
344 | |||
345 | edmac->buffer ^= 1; | ||
346 | } | ||
347 | |||
348 | static void m2p_hw_submit(struct ep93xx_dma_chan *edmac) | ||
349 | { | ||
350 | u32 control = readl(edmac->regs + M2P_CONTROL); | ||
351 | |||
352 | m2p_fill_desc(edmac); | ||
353 | control |= M2P_CONTROL_STALLINT; | ||
354 | |||
355 | if (ep93xx_dma_advance_active(edmac)) { | ||
356 | m2p_fill_desc(edmac); | ||
357 | control |= M2P_CONTROL_NFBINT; | ||
358 | } | ||
359 | |||
360 | m2p_set_control(edmac, control); | ||
361 | } | ||
362 | |||
363 | static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac) | ||
364 | { | ||
365 | u32 irq_status = readl(edmac->regs + M2P_INTERRUPT); | ||
366 | u32 control; | ||
367 | |||
368 | if (irq_status & M2P_INTERRUPT_ERROR) { | ||
369 | struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); | ||
370 | |||
371 | /* Clear the error interrupt */ | ||
372 | writel(1, edmac->regs + M2P_INTERRUPT); | ||
373 | |||
374 | /* | ||
375 | * It seems that there is no easy way of reporting errors back | ||
376 | * to client so we just report the error here and continue as | ||
377 | * usual. | ||
378 | * | ||
379 | * Revisit this when there is a mechanism to report back the | ||
380 | * errors. | ||
381 | */ | ||
382 | dev_err(chan2dev(edmac), | ||
383 | "DMA transfer failed! Details:\n" | ||
384 | "\tcookie : %d\n" | ||
385 | "\tsrc_addr : 0x%08x\n" | ||
386 | "\tdst_addr : 0x%08x\n" | ||
387 | "\tsize : %zu\n", | ||
388 | desc->txd.cookie, desc->src_addr, desc->dst_addr, | ||
389 | desc->size); | ||
390 | } | ||
391 | |||
392 | switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) { | ||
393 | case M2P_INTERRUPT_STALL: | ||
394 | /* Disable interrupts */ | ||
395 | control = readl(edmac->regs + M2P_CONTROL); | ||
396 | control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); | ||
397 | m2p_set_control(edmac, control); | ||
398 | |||
399 | return INTERRUPT_DONE; | ||
400 | |||
401 | case M2P_INTERRUPT_NFB: | ||
402 | if (ep93xx_dma_advance_active(edmac)) | ||
403 | m2p_fill_desc(edmac); | ||
404 | |||
405 | return INTERRUPT_NEXT_BUFFER; | ||
406 | } | ||
407 | |||
408 | return INTERRUPT_UNKNOWN; | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | * M2M DMA implementation | ||
413 | * | ||
414 | * For the M2M transfers we don't use NFB at all. This is because it simply | ||
415 | * doesn't work well with memcpy transfers. When you submit both buffers it is | ||
416 | * extremely unlikely that you get an NFB interrupt, but it instead reports | ||
417 | * DONE interrupt and both buffers are already transferred which means that we | ||
418 | * weren't able to update the next buffer. | ||
419 | * | ||
420 | * So for now we "simulate" NFB by just submitting buffer after buffer | ||
421 | * without double buffering. | ||
422 | */ | ||
423 | |||
424 | static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) | ||
425 | { | ||
426 | const struct ep93xx_dma_data *data = edmac->chan.private; | ||
427 | u32 control = 0; | ||
428 | |||
429 | if (!data) { | ||
430 | /* This is memcpy channel, nothing to configure */ | ||
431 | writel(control, edmac->regs + M2M_CONTROL); | ||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | switch (data->port) { | ||
436 | case EP93XX_DMA_SSP: | ||
437 | /* | ||
438 | * This was found via experimenting - anything less than 5 | ||
439 | * causes the channel to perform only a partial transfer which | ||
440 | * leads to problems since we don't get DONE interrupt then. | ||
441 | */ | ||
442 | control = (5 << M2M_CONTROL_PWSC_SHIFT); | ||
443 | control |= M2M_CONTROL_NO_HDSK; | ||
444 | |||
445 | if (data->direction == DMA_TO_DEVICE) { | ||
446 | control |= M2M_CONTROL_DAH; | ||
447 | control |= M2M_CONTROL_TM_TX; | ||
448 | control |= M2M_CONTROL_RSS_SSPTX; | ||
449 | } else { | ||
450 | control |= M2M_CONTROL_SAH; | ||
451 | control |= M2M_CONTROL_TM_RX; | ||
452 | control |= M2M_CONTROL_RSS_SSPRX; | ||
453 | } | ||
454 | break; | ||
455 | |||
456 | case EP93XX_DMA_IDE: | ||
457 | /* | ||
458 | * This IDE part is totally untested. Values below are taken | ||
459 | * from the EP93xx Users's Guide and might not be correct. | ||
460 | */ | ||
461 | control |= M2M_CONTROL_NO_HDSK; | ||
462 | control |= M2M_CONTROL_RSS_IDE; | ||
463 | control |= M2M_CONTROL_PW_16; | ||
464 | |||
465 | if (data->direction == DMA_TO_DEVICE) { | ||
466 | /* Worst case from the UG */ | ||
467 | control = (3 << M2M_CONTROL_PWSC_SHIFT); | ||
468 | control |= M2M_CONTROL_DAH; | ||
469 | control |= M2M_CONTROL_TM_TX; | ||
470 | } else { | ||
471 | control = (2 << M2M_CONTROL_PWSC_SHIFT); | ||
472 | control |= M2M_CONTROL_SAH; | ||
473 | control |= M2M_CONTROL_TM_RX; | ||
474 | } | ||
475 | break; | ||
476 | |||
477 | default: | ||
478 | return -EINVAL; | ||
479 | } | ||
480 | |||
481 | writel(control, edmac->regs + M2M_CONTROL); | ||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac) | ||
486 | { | ||
487 | /* Just disable the channel */ | ||
488 | writel(0, edmac->regs + M2M_CONTROL); | ||
489 | } | ||
490 | |||
491 | static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) | ||
492 | { | ||
493 | struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); | ||
494 | |||
495 | if (edmac->buffer == 0) { | ||
496 | writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); | ||
497 | writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0); | ||
498 | writel(desc->size, edmac->regs + M2M_BCR0); | ||
499 | } else { | ||
500 | writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1); | ||
501 | writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1); | ||
502 | writel(desc->size, edmac->regs + M2M_BCR1); | ||
503 | } | ||
504 | |||
505 | edmac->buffer ^= 1; | ||
506 | } | ||
507 | |||
508 | static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) | ||
509 | { | ||
510 | struct ep93xx_dma_data *data = edmac->chan.private; | ||
511 | u32 control = readl(edmac->regs + M2M_CONTROL); | ||
512 | |||
513 | /* | ||
514 | * Since we allow clients to configure PW (peripheral width) we always | ||
515 | * clear PW bits here and then set them according what is given in | ||
516 | * the runtime configuration. | ||
517 | */ | ||
518 | control &= ~M2M_CONTROL_PW_MASK; | ||
519 | control |= edmac->runtime_ctrl; | ||
520 | |||
521 | m2m_fill_desc(edmac); | ||
522 | control |= M2M_CONTROL_DONEINT; | ||
523 | |||
524 | /* | ||
525 | * Now we can finally enable the channel. For M2M channel this must be | ||
526 | * done _after_ the BCRx registers are programmed. | ||
527 | */ | ||
528 | control |= M2M_CONTROL_ENABLE; | ||
529 | writel(control, edmac->regs + M2M_CONTROL); | ||
530 | |||
531 | if (!data) { | ||
532 | /* | ||
533 | * For memcpy channels the software trigger must be asserted | ||
534 | * in order to start the memcpy operation. | ||
535 | */ | ||
536 | control |= M2M_CONTROL_START; | ||
537 | writel(control, edmac->regs + M2M_CONTROL); | ||
538 | } | ||
539 | } | ||
540 | |||
541 | static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) | ||
542 | { | ||
543 | u32 control; | ||
544 | |||
545 | if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT)) | ||
546 | return INTERRUPT_UNKNOWN; | ||
547 | |||
548 | /* Clear the DONE bit */ | ||
549 | writel(0, edmac->regs + M2M_INTERRUPT); | ||
550 | |||
551 | /* Disable interrupts and the channel */ | ||
552 | control = readl(edmac->regs + M2M_CONTROL); | ||
553 | control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE); | ||
554 | writel(control, edmac->regs + M2M_CONTROL); | ||
555 | |||
556 | /* | ||
557 | * Since we only get DONE interrupt we have to find out ourselves | ||
558 | * whether there still is something to process. So we try to advance | ||
559 | * the chain an see whether it succeeds. | ||
560 | */ | ||
561 | if (ep93xx_dma_advance_active(edmac)) { | ||
562 | edmac->edma->hw_submit(edmac); | ||
563 | return INTERRUPT_NEXT_BUFFER; | ||
564 | } | ||
565 | |||
566 | return INTERRUPT_DONE; | ||
567 | } | ||
568 | |||
569 | /* | ||
570 | * DMA engine API implementation | ||
571 | */ | ||
572 | |||
573 | static struct ep93xx_dma_desc * | ||
574 | ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac) | ||
575 | { | ||
576 | struct ep93xx_dma_desc *desc, *_desc; | ||
577 | struct ep93xx_dma_desc *ret = NULL; | ||
578 | unsigned long flags; | ||
579 | |||
580 | spin_lock_irqsave(&edmac->lock, flags); | ||
581 | list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) { | ||
582 | if (async_tx_test_ack(&desc->txd)) { | ||
583 | list_del_init(&desc->node); | ||
584 | |||
585 | /* Re-initialize the descriptor */ | ||
586 | desc->src_addr = 0; | ||
587 | desc->dst_addr = 0; | ||
588 | desc->size = 0; | ||
589 | desc->complete = false; | ||
590 | desc->txd.cookie = 0; | ||
591 | desc->txd.callback = NULL; | ||
592 | desc->txd.callback_param = NULL; | ||
593 | |||
594 | ret = desc; | ||
595 | break; | ||
596 | } | ||
597 | } | ||
598 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
599 | return ret; | ||
600 | } | ||
601 | |||
602 | static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac, | ||
603 | struct ep93xx_dma_desc *desc) | ||
604 | { | ||
605 | if (desc) { | ||
606 | unsigned long flags; | ||
607 | |||
608 | spin_lock_irqsave(&edmac->lock, flags); | ||
609 | list_splice_init(&desc->tx_list, &edmac->free_list); | ||
610 | list_add(&desc->node, &edmac->free_list); | ||
611 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
612 | } | ||
613 | } | ||
614 | |||
615 | /** | ||
616 | * ep93xx_dma_advance_work - start processing the next pending transaction | ||
617 | * @edmac: channel | ||
618 | * | ||
619 | * If we have pending transactions queued and we are currently idling, this | ||
620 | * function takes the next queued transaction from the @edmac->queue and | ||
621 | * pushes it to the hardware for execution. | ||
622 | */ | ||
623 | static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) | ||
624 | { | ||
625 | struct ep93xx_dma_desc *new; | ||
626 | unsigned long flags; | ||
627 | |||
628 | spin_lock_irqsave(&edmac->lock, flags); | ||
629 | if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) { | ||
630 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
631 | return; | ||
632 | } | ||
633 | |||
634 | /* Take the next descriptor from the pending queue */ | ||
635 | new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node); | ||
636 | list_del_init(&new->node); | ||
637 | |||
638 | ep93xx_dma_set_active(edmac, new); | ||
639 | |||
640 | /* Push it to the hardware */ | ||
641 | edmac->edma->hw_submit(edmac); | ||
642 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
643 | } | ||
644 | |||
645 | static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) | ||
646 | { | ||
647 | struct device *dev = desc->txd.chan->device->dev; | ||
648 | |||
649 | if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
650 | if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
651 | dma_unmap_single(dev, desc->src_addr, desc->size, | ||
652 | DMA_TO_DEVICE); | ||
653 | else | ||
654 | dma_unmap_page(dev, desc->src_addr, desc->size, | ||
655 | DMA_TO_DEVICE); | ||
656 | } | ||
657 | if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
658 | if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
659 | dma_unmap_single(dev, desc->dst_addr, desc->size, | ||
660 | DMA_FROM_DEVICE); | ||
661 | else | ||
662 | dma_unmap_page(dev, desc->dst_addr, desc->size, | ||
663 | DMA_FROM_DEVICE); | ||
664 | } | ||
665 | } | ||
666 | |||
667 | static void ep93xx_dma_tasklet(unsigned long data) | ||
668 | { | ||
669 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; | ||
670 | struct ep93xx_dma_desc *desc, *d; | ||
671 | dma_async_tx_callback callback; | ||
672 | void *callback_param; | ||
673 | LIST_HEAD(list); | ||
674 | |||
675 | spin_lock_irq(&edmac->lock); | ||
676 | desc = ep93xx_dma_get_active(edmac); | ||
677 | if (desc->complete) { | ||
678 | edmac->last_completed = desc->txd.cookie; | ||
679 | list_splice_init(&edmac->active, &list); | ||
680 | } | ||
681 | spin_unlock_irq(&edmac->lock); | ||
682 | |||
683 | /* Pick up the next descriptor from the queue */ | ||
684 | ep93xx_dma_advance_work(edmac); | ||
685 | |||
686 | callback = desc->txd.callback; | ||
687 | callback_param = desc->txd.callback_param; | ||
688 | |||
689 | /* Now we can release all the chained descriptors */ | ||
690 | list_for_each_entry_safe(desc, d, &list, node) { | ||
691 | /* | ||
692 | * For the memcpy channels the API requires us to unmap the | ||
693 | * buffers unless requested otherwise. | ||
694 | */ | ||
695 | if (!edmac->chan.private) | ||
696 | ep93xx_dma_unmap_buffers(desc); | ||
697 | |||
698 | ep93xx_dma_desc_put(edmac, desc); | ||
699 | } | ||
700 | |||
701 | if (callback) | ||
702 | callback(callback_param); | ||
703 | } | ||
704 | |||
705 | static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) | ||
706 | { | ||
707 | struct ep93xx_dma_chan *edmac = dev_id; | ||
708 | irqreturn_t ret = IRQ_HANDLED; | ||
709 | |||
710 | spin_lock(&edmac->lock); | ||
711 | |||
712 | switch (edmac->edma->hw_interrupt(edmac)) { | ||
713 | case INTERRUPT_DONE: | ||
714 | ep93xx_dma_get_active(edmac)->complete = true; | ||
715 | tasklet_schedule(&edmac->tasklet); | ||
716 | break; | ||
717 | |||
718 | case INTERRUPT_NEXT_BUFFER: | ||
719 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) | ||
720 | tasklet_schedule(&edmac->tasklet); | ||
721 | break; | ||
722 | |||
723 | default: | ||
724 | dev_warn(chan2dev(edmac), "unknown interrupt!\n"); | ||
725 | ret = IRQ_NONE; | ||
726 | break; | ||
727 | } | ||
728 | |||
729 | spin_unlock(&edmac->lock); | ||
730 | return ret; | ||
731 | } | ||
732 | |||
733 | /** | ||
734 | * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed | ||
735 | * @tx: descriptor to be executed | ||
736 | * | ||
737 | * Function will execute given descriptor on the hardware or if the hardware | ||
738 | * is busy, queue the descriptor to be executed later on. Returns cookie which | ||
739 | * can be used to poll the status of the descriptor. | ||
740 | */ | ||
741 | static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
742 | { | ||
743 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan); | ||
744 | struct ep93xx_dma_desc *desc; | ||
745 | dma_cookie_t cookie; | ||
746 | unsigned long flags; | ||
747 | |||
748 | spin_lock_irqsave(&edmac->lock, flags); | ||
749 | |||
750 | cookie = edmac->chan.cookie; | ||
751 | |||
752 | if (++cookie < 0) | ||
753 | cookie = 1; | ||
754 | |||
755 | desc = container_of(tx, struct ep93xx_dma_desc, txd); | ||
756 | |||
757 | edmac->chan.cookie = cookie; | ||
758 | desc->txd.cookie = cookie; | ||
759 | |||
760 | /* | ||
761 | * If nothing is currently prosessed, we push this descriptor | ||
762 | * directly to the hardware. Otherwise we put the descriptor | ||
763 | * to the pending queue. | ||
764 | */ | ||
765 | if (list_empty(&edmac->active)) { | ||
766 | ep93xx_dma_set_active(edmac, desc); | ||
767 | edmac->edma->hw_submit(edmac); | ||
768 | } else { | ||
769 | list_add_tail(&desc->node, &edmac->queue); | ||
770 | } | ||
771 | |||
772 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
773 | return cookie; | ||
774 | } | ||
775 | |||
776 | /** | ||
777 | * ep93xx_dma_alloc_chan_resources - allocate resources for the channel | ||
778 | * @chan: channel to allocate resources | ||
779 | * | ||
780 | * Function allocates necessary resources for the given DMA channel and | ||
781 | * returns number of allocated descriptors for the channel. Negative errno | ||
782 | * is returned in case of failure. | ||
783 | */ | ||
784 | static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) | ||
785 | { | ||
786 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
787 | struct ep93xx_dma_data *data = chan->private; | ||
788 | const char *name = dma_chan_name(chan); | ||
789 | int ret, i; | ||
790 | |||
791 | /* Sanity check the channel parameters */ | ||
792 | if (!edmac->edma->m2m) { | ||
793 | if (!data) | ||
794 | return -EINVAL; | ||
795 | if (data->port < EP93XX_DMA_I2S1 || | ||
796 | data->port > EP93XX_DMA_IRDA) | ||
797 | return -EINVAL; | ||
798 | if (data->direction != ep93xx_dma_chan_direction(chan)) | ||
799 | return -EINVAL; | ||
800 | } else { | ||
801 | if (data) { | ||
802 | switch (data->port) { | ||
803 | case EP93XX_DMA_SSP: | ||
804 | case EP93XX_DMA_IDE: | ||
805 | if (data->direction != DMA_TO_DEVICE && | ||
806 | data->direction != DMA_FROM_DEVICE) | ||
807 | return -EINVAL; | ||
808 | break; | ||
809 | default: | ||
810 | return -EINVAL; | ||
811 | } | ||
812 | } | ||
813 | } | ||
814 | |||
815 | if (data && data->name) | ||
816 | name = data->name; | ||
817 | |||
818 | ret = clk_enable(edmac->clk); | ||
819 | if (ret) | ||
820 | return ret; | ||
821 | |||
822 | ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac); | ||
823 | if (ret) | ||
824 | goto fail_clk_disable; | ||
825 | |||
826 | spin_lock_irq(&edmac->lock); | ||
827 | edmac->last_completed = 1; | ||
828 | edmac->chan.cookie = 1; | ||
829 | ret = edmac->edma->hw_setup(edmac); | ||
830 | spin_unlock_irq(&edmac->lock); | ||
831 | |||
832 | if (ret) | ||
833 | goto fail_free_irq; | ||
834 | |||
835 | for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) { | ||
836 | struct ep93xx_dma_desc *desc; | ||
837 | |||
838 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | ||
839 | if (!desc) { | ||
840 | dev_warn(chan2dev(edmac), "not enough descriptors\n"); | ||
841 | break; | ||
842 | } | ||
843 | |||
844 | INIT_LIST_HEAD(&desc->tx_list); | ||
845 | |||
846 | dma_async_tx_descriptor_init(&desc->txd, chan); | ||
847 | desc->txd.flags = DMA_CTRL_ACK; | ||
848 | desc->txd.tx_submit = ep93xx_dma_tx_submit; | ||
849 | |||
850 | ep93xx_dma_desc_put(edmac, desc); | ||
851 | } | ||
852 | |||
853 | return i; | ||
854 | |||
855 | fail_free_irq: | ||
856 | free_irq(edmac->irq, edmac); | ||
857 | fail_clk_disable: | ||
858 | clk_disable(edmac->clk); | ||
859 | |||
860 | return ret; | ||
861 | } | ||
862 | |||
863 | /** | ||
864 | * ep93xx_dma_free_chan_resources - release resources for the channel | ||
865 | * @chan: channel | ||
866 | * | ||
867 | * Function releases all the resources allocated for the given channel. | ||
868 | * The channel must be idle when this is called. | ||
869 | */ | ||
870 | static void ep93xx_dma_free_chan_resources(struct dma_chan *chan) | ||
871 | { | ||
872 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
873 | struct ep93xx_dma_desc *desc, *d; | ||
874 | unsigned long flags; | ||
875 | LIST_HEAD(list); | ||
876 | |||
877 | BUG_ON(!list_empty(&edmac->active)); | ||
878 | BUG_ON(!list_empty(&edmac->queue)); | ||
879 | |||
880 | spin_lock_irqsave(&edmac->lock, flags); | ||
881 | edmac->edma->hw_shutdown(edmac); | ||
882 | edmac->runtime_addr = 0; | ||
883 | edmac->runtime_ctrl = 0; | ||
884 | edmac->buffer = 0; | ||
885 | list_splice_init(&edmac->free_list, &list); | ||
886 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
887 | |||
888 | list_for_each_entry_safe(desc, d, &list, node) | ||
889 | kfree(desc); | ||
890 | |||
891 | clk_disable(edmac->clk); | ||
892 | free_irq(edmac->irq, edmac); | ||
893 | } | ||
894 | |||
895 | /** | ||
896 | * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation | ||
897 | * @chan: channel | ||
898 | * @dest: destination bus address | ||
899 | * @src: source bus address | ||
900 | * @len: size of the transaction | ||
901 | * @flags: flags for the descriptor | ||
902 | * | ||
903 | * Returns a valid DMA descriptor or %NULL in case of failure. | ||
904 | */ | ||
905 | static struct dma_async_tx_descriptor * | ||
906 | ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, | ||
907 | dma_addr_t src, size_t len, unsigned long flags) | ||
908 | { | ||
909 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
910 | struct ep93xx_dma_desc *desc, *first; | ||
911 | size_t bytes, offset; | ||
912 | |||
913 | first = NULL; | ||
914 | for (offset = 0; offset < len; offset += bytes) { | ||
915 | desc = ep93xx_dma_desc_get(edmac); | ||
916 | if (!desc) { | ||
917 | dev_warn(chan2dev(edmac), "couln't get descriptor\n"); | ||
918 | goto fail; | ||
919 | } | ||
920 | |||
921 | bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES); | ||
922 | |||
923 | desc->src_addr = src + offset; | ||
924 | desc->dst_addr = dest + offset; | ||
925 | desc->size = bytes; | ||
926 | |||
927 | if (!first) | ||
928 | first = desc; | ||
929 | else | ||
930 | list_add_tail(&desc->node, &first->tx_list); | ||
931 | } | ||
932 | |||
933 | first->txd.cookie = -EBUSY; | ||
934 | first->txd.flags = flags; | ||
935 | |||
936 | return &first->txd; | ||
937 | fail: | ||
938 | ep93xx_dma_desc_put(edmac, first); | ||
939 | return NULL; | ||
940 | } | ||
941 | |||
942 | /** | ||
943 | * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation | ||
944 | * @chan: channel | ||
945 | * @sgl: list of buffers to transfer | ||
946 | * @sg_len: number of entries in @sgl | ||
947 | * @dir: direction of tha DMA transfer | ||
948 | * @flags: flags for the descriptor | ||
949 | * | ||
950 | * Returns a valid DMA descriptor or %NULL in case of failure. | ||
951 | */ | ||
952 | static struct dma_async_tx_descriptor * | ||
953 | ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||
954 | unsigned int sg_len, enum dma_data_direction dir, | ||
955 | unsigned long flags) | ||
956 | { | ||
957 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
958 | struct ep93xx_dma_desc *desc, *first; | ||
959 | struct scatterlist *sg; | ||
960 | int i; | ||
961 | |||
962 | if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { | ||
963 | dev_warn(chan2dev(edmac), | ||
964 | "channel was configured with different direction\n"); | ||
965 | return NULL; | ||
966 | } | ||
967 | |||
968 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { | ||
969 | dev_warn(chan2dev(edmac), | ||
970 | "channel is already used for cyclic transfers\n"); | ||
971 | return NULL; | ||
972 | } | ||
973 | |||
974 | first = NULL; | ||
975 | for_each_sg(sgl, sg, sg_len, i) { | ||
976 | size_t sg_len = sg_dma_len(sg); | ||
977 | |||
978 | if (sg_len > DMA_MAX_CHAN_BYTES) { | ||
979 | dev_warn(chan2dev(edmac), "too big transfer size %d\n", | ||
980 | sg_len); | ||
981 | goto fail; | ||
982 | } | ||
983 | |||
984 | desc = ep93xx_dma_desc_get(edmac); | ||
985 | if (!desc) { | ||
986 | dev_warn(chan2dev(edmac), "couln't get descriptor\n"); | ||
987 | goto fail; | ||
988 | } | ||
989 | |||
990 | if (dir == DMA_TO_DEVICE) { | ||
991 | desc->src_addr = sg_dma_address(sg); | ||
992 | desc->dst_addr = edmac->runtime_addr; | ||
993 | } else { | ||
994 | desc->src_addr = edmac->runtime_addr; | ||
995 | desc->dst_addr = sg_dma_address(sg); | ||
996 | } | ||
997 | desc->size = sg_len; | ||
998 | |||
999 | if (!first) | ||
1000 | first = desc; | ||
1001 | else | ||
1002 | list_add_tail(&desc->node, &first->tx_list); | ||
1003 | } | ||
1004 | |||
1005 | first->txd.cookie = -EBUSY; | ||
1006 | first->txd.flags = flags; | ||
1007 | |||
1008 | return &first->txd; | ||
1009 | |||
1010 | fail: | ||
1011 | ep93xx_dma_desc_put(edmac, first); | ||
1012 | return NULL; | ||
1013 | } | ||
1014 | |||
1015 | /** | ||
1016 | * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation | ||
1017 | * @chan: channel | ||
1018 | * @dma_addr: DMA mapped address of the buffer | ||
1019 | * @buf_len: length of the buffer (in bytes) | ||
1020 | * @period_len: lenght of a single period | ||
1021 | * @dir: direction of the operation | ||
1022 | * | ||
1023 | * Prepares a descriptor for cyclic DMA operation. This means that once the | ||
1024 | * descriptor is submitted, we will be submitting in a @period_len sized | ||
1025 | * buffers and calling callback once the period has been elapsed. Transfer | ||
1026 | * terminates only when client calls dmaengine_terminate_all() for this | ||
1027 | * channel. | ||
1028 | * | ||
1029 | * Returns a valid DMA descriptor or %NULL in case of failure. | ||
1030 | */ | ||
1031 | static struct dma_async_tx_descriptor * | ||
1032 | ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | ||
1033 | size_t buf_len, size_t period_len, | ||
1034 | enum dma_data_direction dir) | ||
1035 | { | ||
1036 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
1037 | struct ep93xx_dma_desc *desc, *first; | ||
1038 | size_t offset = 0; | ||
1039 | |||
1040 | if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { | ||
1041 | dev_warn(chan2dev(edmac), | ||
1042 | "channel was configured with different direction\n"); | ||
1043 | return NULL; | ||
1044 | } | ||
1045 | |||
1046 | if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { | ||
1047 | dev_warn(chan2dev(edmac), | ||
1048 | "channel is already used for cyclic transfers\n"); | ||
1049 | return NULL; | ||
1050 | } | ||
1051 | |||
1052 | if (period_len > DMA_MAX_CHAN_BYTES) { | ||
1053 | dev_warn(chan2dev(edmac), "too big period length %d\n", | ||
1054 | period_len); | ||
1055 | return NULL; | ||
1056 | } | ||
1057 | |||
1058 | /* Split the buffer into period size chunks */ | ||
1059 | first = NULL; | ||
1060 | for (offset = 0; offset < buf_len; offset += period_len) { | ||
1061 | desc = ep93xx_dma_desc_get(edmac); | ||
1062 | if (!desc) { | ||
1063 | dev_warn(chan2dev(edmac), "couln't get descriptor\n"); | ||
1064 | goto fail; | ||
1065 | } | ||
1066 | |||
1067 | if (dir == DMA_TO_DEVICE) { | ||
1068 | desc->src_addr = dma_addr + offset; | ||
1069 | desc->dst_addr = edmac->runtime_addr; | ||
1070 | } else { | ||
1071 | desc->src_addr = edmac->runtime_addr; | ||
1072 | desc->dst_addr = dma_addr + offset; | ||
1073 | } | ||
1074 | |||
1075 | desc->size = period_len; | ||
1076 | |||
1077 | if (!first) | ||
1078 | first = desc; | ||
1079 | else | ||
1080 | list_add_tail(&desc->node, &first->tx_list); | ||
1081 | } | ||
1082 | |||
1083 | first->txd.cookie = -EBUSY; | ||
1084 | |||
1085 | return &first->txd; | ||
1086 | |||
1087 | fail: | ||
1088 | ep93xx_dma_desc_put(edmac, first); | ||
1089 | return NULL; | ||
1090 | } | ||
1091 | |||
1092 | /** | ||
1093 | * ep93xx_dma_terminate_all - terminate all transactions | ||
1094 | * @edmac: channel | ||
1095 | * | ||
1096 | * Stops all DMA transactions. All descriptors are put back to the | ||
1097 | * @edmac->free_list and callbacks are _not_ called. | ||
1098 | */ | ||
1099 | static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) | ||
1100 | { | ||
1101 | struct ep93xx_dma_desc *desc, *_d; | ||
1102 | unsigned long flags; | ||
1103 | LIST_HEAD(list); | ||
1104 | |||
1105 | spin_lock_irqsave(&edmac->lock, flags); | ||
1106 | /* First we disable and flush the DMA channel */ | ||
1107 | edmac->edma->hw_shutdown(edmac); | ||
1108 | clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags); | ||
1109 | list_splice_init(&edmac->active, &list); | ||
1110 | list_splice_init(&edmac->queue, &list); | ||
1111 | /* | ||
1112 | * We then re-enable the channel. This way we can continue submitting | ||
1113 | * the descriptors by just calling ->hw_submit() again. | ||
1114 | */ | ||
1115 | edmac->edma->hw_setup(edmac); | ||
1116 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
1117 | |||
1118 | list_for_each_entry_safe(desc, _d, &list, node) | ||
1119 | ep93xx_dma_desc_put(edmac, desc); | ||
1120 | |||
1121 | return 0; | ||
1122 | } | ||
1123 | |||
1124 | static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, | ||
1125 | struct dma_slave_config *config) | ||
1126 | { | ||
1127 | enum dma_slave_buswidth width; | ||
1128 | unsigned long flags; | ||
1129 | u32 addr, ctrl; | ||
1130 | |||
1131 | if (!edmac->edma->m2m) | ||
1132 | return -EINVAL; | ||
1133 | |||
1134 | switch (config->direction) { | ||
1135 | case DMA_FROM_DEVICE: | ||
1136 | width = config->src_addr_width; | ||
1137 | addr = config->src_addr; | ||
1138 | break; | ||
1139 | |||
1140 | case DMA_TO_DEVICE: | ||
1141 | width = config->dst_addr_width; | ||
1142 | addr = config->dst_addr; | ||
1143 | break; | ||
1144 | |||
1145 | default: | ||
1146 | return -EINVAL; | ||
1147 | } | ||
1148 | |||
1149 | switch (width) { | ||
1150 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
1151 | ctrl = 0; | ||
1152 | break; | ||
1153 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
1154 | ctrl = M2M_CONTROL_PW_16; | ||
1155 | break; | ||
1156 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
1157 | ctrl = M2M_CONTROL_PW_32; | ||
1158 | break; | ||
1159 | default: | ||
1160 | return -EINVAL; | ||
1161 | } | ||
1162 | |||
1163 | spin_lock_irqsave(&edmac->lock, flags); | ||
1164 | edmac->runtime_addr = addr; | ||
1165 | edmac->runtime_ctrl = ctrl; | ||
1166 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
1167 | |||
1168 | return 0; | ||
1169 | } | ||
1170 | |||
1171 | /** | ||
1172 | * ep93xx_dma_control - manipulate all pending operations on a channel | ||
1173 | * @chan: channel | ||
1174 | * @cmd: control command to perform | ||
1175 | * @arg: optional argument | ||
1176 | * | ||
1177 | * Controls the channel. Function returns %0 in case of success or negative | ||
1178 | * error in case of failure. | ||
1179 | */ | ||
1180 | static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
1181 | unsigned long arg) | ||
1182 | { | ||
1183 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
1184 | struct dma_slave_config *config; | ||
1185 | |||
1186 | switch (cmd) { | ||
1187 | case DMA_TERMINATE_ALL: | ||
1188 | return ep93xx_dma_terminate_all(edmac); | ||
1189 | |||
1190 | case DMA_SLAVE_CONFIG: | ||
1191 | config = (struct dma_slave_config *)arg; | ||
1192 | return ep93xx_dma_slave_config(edmac, config); | ||
1193 | |||
1194 | default: | ||
1195 | break; | ||
1196 | } | ||
1197 | |||
1198 | return -ENOSYS; | ||
1199 | } | ||
1200 | |||
1201 | /** | ||
1202 | * ep93xx_dma_tx_status - check if a transaction is completed | ||
1203 | * @chan: channel | ||
1204 | * @cookie: transaction specific cookie | ||
1205 | * @state: state of the transaction is stored here if given | ||
1206 | * | ||
1207 | * This function can be used to query state of a given transaction. | ||
1208 | */ | ||
1209 | static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan, | ||
1210 | dma_cookie_t cookie, | ||
1211 | struct dma_tx_state *state) | ||
1212 | { | ||
1213 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
1214 | dma_cookie_t last_used, last_completed; | ||
1215 | enum dma_status ret; | ||
1216 | unsigned long flags; | ||
1217 | |||
1218 | spin_lock_irqsave(&edmac->lock, flags); | ||
1219 | last_used = chan->cookie; | ||
1220 | last_completed = edmac->last_completed; | ||
1221 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
1222 | |||
1223 | ret = dma_async_is_complete(cookie, last_completed, last_used); | ||
1224 | dma_set_tx_state(state, last_completed, last_used, 0); | ||
1225 | |||
1226 | return ret; | ||
1227 | } | ||
1228 | |||
1229 | /** | ||
1230 | * ep93xx_dma_issue_pending - push pending transactions to the hardware | ||
1231 | * @chan: channel | ||
1232 | * | ||
1233 | * When this function is called, all pending transactions are pushed to the | ||
1234 | * hardware and executed. | ||
1235 | */ | ||
1236 | static void ep93xx_dma_issue_pending(struct dma_chan *chan) | ||
1237 | { | ||
1238 | ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan)); | ||
1239 | } | ||
1240 | |||
1241 | static int __init ep93xx_dma_probe(struct platform_device *pdev) | ||
1242 | { | ||
1243 | struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); | ||
1244 | struct ep93xx_dma_engine *edma; | ||
1245 | struct dma_device *dma_dev; | ||
1246 | size_t edma_size; | ||
1247 | int ret, i; | ||
1248 | |||
1249 | edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan); | ||
1250 | edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL); | ||
1251 | if (!edma) | ||
1252 | return -ENOMEM; | ||
1253 | |||
1254 | dma_dev = &edma->dma_dev; | ||
1255 | edma->m2m = platform_get_device_id(pdev)->driver_data; | ||
1256 | edma->num_channels = pdata->num_channels; | ||
1257 | |||
1258 | INIT_LIST_HEAD(&dma_dev->channels); | ||
1259 | for (i = 0; i < pdata->num_channels; i++) { | ||
1260 | const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i]; | ||
1261 | struct ep93xx_dma_chan *edmac = &edma->channels[i]; | ||
1262 | |||
1263 | edmac->chan.device = dma_dev; | ||
1264 | edmac->regs = cdata->base; | ||
1265 | edmac->irq = cdata->irq; | ||
1266 | edmac->edma = edma; | ||
1267 | |||
1268 | edmac->clk = clk_get(NULL, cdata->name); | ||
1269 | if (IS_ERR(edmac->clk)) { | ||
1270 | dev_warn(&pdev->dev, "failed to get clock for %s\n", | ||
1271 | cdata->name); | ||
1272 | continue; | ||
1273 | } | ||
1274 | |||
1275 | spin_lock_init(&edmac->lock); | ||
1276 | INIT_LIST_HEAD(&edmac->active); | ||
1277 | INIT_LIST_HEAD(&edmac->queue); | ||
1278 | INIT_LIST_HEAD(&edmac->free_list); | ||
1279 | tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet, | ||
1280 | (unsigned long)edmac); | ||
1281 | |||
1282 | list_add_tail(&edmac->chan.device_node, | ||
1283 | &dma_dev->channels); | ||
1284 | } | ||
1285 | |||
1286 | dma_cap_zero(dma_dev->cap_mask); | ||
1287 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
1288 | dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); | ||
1289 | |||
1290 | dma_dev->dev = &pdev->dev; | ||
1291 | dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources; | ||
1292 | dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; | ||
1293 | dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; | ||
1294 | dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; | ||
1295 | dma_dev->device_control = ep93xx_dma_control; | ||
1296 | dma_dev->device_issue_pending = ep93xx_dma_issue_pending; | ||
1297 | dma_dev->device_tx_status = ep93xx_dma_tx_status; | ||
1298 | |||
1299 | dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES); | ||
1300 | |||
1301 | if (edma->m2m) { | ||
1302 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
1303 | dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy; | ||
1304 | |||
1305 | edma->hw_setup = m2m_hw_setup; | ||
1306 | edma->hw_shutdown = m2m_hw_shutdown; | ||
1307 | edma->hw_submit = m2m_hw_submit; | ||
1308 | edma->hw_interrupt = m2m_hw_interrupt; | ||
1309 | } else { | ||
1310 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); | ||
1311 | |||
1312 | edma->hw_setup = m2p_hw_setup; | ||
1313 | edma->hw_shutdown = m2p_hw_shutdown; | ||
1314 | edma->hw_submit = m2p_hw_submit; | ||
1315 | edma->hw_interrupt = m2p_hw_interrupt; | ||
1316 | } | ||
1317 | |||
1318 | ret = dma_async_device_register(dma_dev); | ||
1319 | if (unlikely(ret)) { | ||
1320 | for (i = 0; i < edma->num_channels; i++) { | ||
1321 | struct ep93xx_dma_chan *edmac = &edma->channels[i]; | ||
1322 | if (!IS_ERR_OR_NULL(edmac->clk)) | ||
1323 | clk_put(edmac->clk); | ||
1324 | } | ||
1325 | kfree(edma); | ||
1326 | } else { | ||
1327 | dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", | ||
1328 | edma->m2m ? "M" : "P"); | ||
1329 | } | ||
1330 | |||
1331 | return ret; | ||
1332 | } | ||
1333 | |||
1334 | static struct platform_device_id ep93xx_dma_driver_ids[] = { | ||
1335 | { "ep93xx-dma-m2p", 0 }, | ||
1336 | { "ep93xx-dma-m2m", 1 }, | ||
1337 | { }, | ||
1338 | }; | ||
1339 | |||
1340 | static struct platform_driver ep93xx_dma_driver = { | ||
1341 | .driver = { | ||
1342 | .name = "ep93xx-dma", | ||
1343 | }, | ||
1344 | .id_table = ep93xx_dma_driver_ids, | ||
1345 | }; | ||
1346 | |||
1347 | static int __init ep93xx_dma_module_init(void) | ||
1348 | { | ||
1349 | return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe); | ||
1350 | } | ||
1351 | subsys_initcall(ep93xx_dma_module_init); | ||
1352 | |||
1353 | MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); | ||
1354 | MODULE_DESCRIPTION("EP93xx DMA driver"); | ||
1355 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index b6d1455fa936..ec53980f8fcf 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -1281,8 +1281,10 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1281 | goto err_request_irq; | 1281 | goto err_request_irq; |
1282 | 1282 | ||
1283 | sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); | 1283 | sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); |
1284 | if (!sdma->script_addrs) | 1284 | if (!sdma->script_addrs) { |
1285 | ret = -ENOMEM; | ||
1285 | goto err_alloc; | 1286 | goto err_alloc; |
1287 | } | ||
1286 | 1288 | ||
1287 | sdma->version = pdata->sdma_version; | 1289 | sdma->version = pdata->sdma_version; |
1288 | 1290 | ||
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index f653517ef744..8a3fdd87db97 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -1351,7 +1351,6 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state) | |||
1351 | return -EAGAIN; | 1351 | return -EAGAIN; |
1352 | } | 1352 | } |
1353 | device->state = SUSPENDED; | 1353 | device->state = SUSPENDED; |
1354 | pci_set_drvdata(pci, device); | ||
1355 | pci_save_state(pci); | 1354 | pci_save_state(pci); |
1356 | pci_disable_device(pci); | 1355 | pci_disable_device(pci); |
1357 | pci_set_power_state(pci, PCI_D3hot); | 1356 | pci_set_power_state(pci, PCI_D3hot); |
@@ -1380,7 +1379,6 @@ int dma_resume(struct pci_dev *pci) | |||
1380 | } | 1379 | } |
1381 | device->state = RUNNING; | 1380 | device->state = RUNNING; |
1382 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | 1381 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); |
1383 | pci_set_drvdata(pci, device); | ||
1384 | return 0; | 1382 | return 0; |
1385 | } | 1383 | } |
1386 | 1384 | ||
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index c1a125e7d1df..25447a8ca282 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1705,16 +1705,14 @@ static int __init ipu_probe(struct platform_device *pdev) | |||
1705 | ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); | 1705 | ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); |
1706 | 1706 | ||
1707 | /* Remap IPU common registers */ | 1707 | /* Remap IPU common registers */ |
1708 | ipu_data.reg_ipu = ioremap(mem_ipu->start, | 1708 | ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu)); |
1709 | mem_ipu->end - mem_ipu->start + 1); | ||
1710 | if (!ipu_data.reg_ipu) { | 1709 | if (!ipu_data.reg_ipu) { |
1711 | ret = -ENOMEM; | 1710 | ret = -ENOMEM; |
1712 | goto err_ioremap_ipu; | 1711 | goto err_ioremap_ipu; |
1713 | } | 1712 | } |
1714 | 1713 | ||
1715 | /* Remap Image Converter and Image DMA Controller registers */ | 1714 | /* Remap Image Converter and Image DMA Controller registers */ |
1716 | ipu_data.reg_ic = ioremap(mem_ic->start, | 1715 | ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic)); |
1717 | mem_ic->end - mem_ic->start + 1); | ||
1718 | if (!ipu_data.reg_ic) { | 1716 | if (!ipu_data.reg_ic) { |
1719 | ret = -ENOMEM; | 1717 | ret = -ENOMEM; |
1720 | goto err_ioremap_ic; | 1718 | goto err_ioremap_ic; |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 954e334e01bb..9a353c2216d0 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -1305,7 +1305,7 @@ static int mv_xor_shared_probe(struct platform_device *pdev) | |||
1305 | return -ENODEV; | 1305 | return -ENODEV; |
1306 | 1306 | ||
1307 | msp->xor_base = devm_ioremap(&pdev->dev, res->start, | 1307 | msp->xor_base = devm_ioremap(&pdev->dev, res->start, |
1308 | res->end - res->start + 1); | 1308 | resource_size(res)); |
1309 | if (!msp->xor_base) | 1309 | if (!msp->xor_base) |
1310 | return -EBUSY; | 1310 | return -EBUSY; |
1311 | 1311 | ||
@@ -1314,7 +1314,7 @@ static int mv_xor_shared_probe(struct platform_device *pdev) | |||
1314 | return -ENODEV; | 1314 | return -ENODEV; |
1315 | 1315 | ||
1316 | msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, | 1316 | msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, |
1317 | res->end - res->start + 1); | 1317 | resource_size(res)); |
1318 | if (!msp->xor_high_base) | 1318 | if (!msp->xor_high_base) |
1319 | return -EBUSY; | 1319 | return -EBUSY; |
1320 | 1320 | ||
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 88aad4f54002..be641cbd36fc 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -327,10 +327,12 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
327 | 327 | ||
328 | memset(mxs_chan->ccw, 0, PAGE_SIZE); | 328 | memset(mxs_chan->ccw, 0, PAGE_SIZE); |
329 | 329 | ||
330 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, | 330 | if (mxs_chan->chan_irq != NO_IRQ) { |
331 | 0, "mxs-dma", mxs_dma); | 331 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, |
332 | if (ret) | 332 | 0, "mxs-dma", mxs_dma); |
333 | goto err_irq; | 333 | if (ret) |
334 | goto err_irq; | ||
335 | } | ||
334 | 336 | ||
335 | ret = clk_enable(mxs_dma->clk); | 337 | ret = clk_enable(mxs_dma->clk); |
336 | if (ret) | 338 | if (ret) |
@@ -535,6 +537,7 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
535 | switch (cmd) { | 537 | switch (cmd) { |
536 | case DMA_TERMINATE_ALL: | 538 | case DMA_TERMINATE_ALL: |
537 | mxs_dma_disable_chan(mxs_chan); | 539 | mxs_dma_disable_chan(mxs_chan); |
540 | mxs_dma_reset_chan(mxs_chan); | ||
538 | break; | 541 | break; |
539 | case DMA_PAUSE: | 542 | case DMA_PAUSE: |
540 | mxs_dma_pause_chan(mxs_chan); | 543 | mxs_dma_pause_chan(mxs_chan); |
@@ -707,6 +710,8 @@ static struct platform_device_id mxs_dma_type[] = { | |||
707 | }, { | 710 | }, { |
708 | .name = "mxs-dma-apbx", | 711 | .name = "mxs-dma-apbx", |
709 | .driver_data = MXS_DMA_APBX, | 712 | .driver_data = MXS_DMA_APBX, |
713 | }, { | ||
714 | /* end of list */ | ||
710 | } | 715 | } |
711 | }; | 716 | }; |
712 | 717 | ||
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index ff5b38f9d45b..1ac8d4b580b7 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -45,7 +45,8 @@ | |||
45 | #define DMA_STATUS_MASK_BITS 0x3 | 45 | #define DMA_STATUS_MASK_BITS 0x3 |
46 | #define DMA_STATUS_SHIFT_BITS 16 | 46 | #define DMA_STATUS_SHIFT_BITS 16 |
47 | #define DMA_STATUS_IRQ(x) (0x1 << (x)) | 47 | #define DMA_STATUS_IRQ(x) (0x1 << (x)) |
48 | #define DMA_STATUS_ERR(x) (0x1 << ((x) + 8)) | 48 | #define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8)) |
49 | #define DMA_STATUS2_ERR(x) (0x1 << (x)) | ||
49 | 50 | ||
50 | #define DMA_DESC_WIDTH_SHIFT_BITS 12 | 51 | #define DMA_DESC_WIDTH_SHIFT_BITS 12 |
51 | #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) | 52 | #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) |
@@ -61,6 +62,9 @@ | |||
61 | 62 | ||
62 | #define MAX_CHAN_NR 8 | 63 | #define MAX_CHAN_NR 8 |
63 | 64 | ||
65 | #define DMA_MASK_CTL0_MODE 0x33333333 | ||
66 | #define DMA_MASK_CTL2_MODE 0x00003333 | ||
67 | |||
64 | static unsigned int init_nr_desc_per_channel = 64; | 68 | static unsigned int init_nr_desc_per_channel = 64; |
65 | module_param(init_nr_desc_per_channel, uint, 0644); | 69 | module_param(init_nr_desc_per_channel, uint, 0644); |
66 | MODULE_PARM_DESC(init_nr_desc_per_channel, | 70 | MODULE_PARM_DESC(init_nr_desc_per_channel, |
@@ -133,6 +137,7 @@ struct pch_dma { | |||
133 | #define PCH_DMA_CTL3 0x0C | 137 | #define PCH_DMA_CTL3 0x0C |
134 | #define PCH_DMA_STS0 0x10 | 138 | #define PCH_DMA_STS0 0x10 |
135 | #define PCH_DMA_STS1 0x14 | 139 | #define PCH_DMA_STS1 0x14 |
140 | #define PCH_DMA_STS2 0x18 | ||
136 | 141 | ||
137 | #define dma_readl(pd, name) \ | 142 | #define dma_readl(pd, name) \ |
138 | readl((pd)->membase + PCH_DMA_##name) | 143 | readl((pd)->membase + PCH_DMA_##name) |
@@ -183,13 +188,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable) | |||
183 | { | 188 | { |
184 | struct pch_dma *pd = to_pd(chan->device); | 189 | struct pch_dma *pd = to_pd(chan->device); |
185 | u32 val; | 190 | u32 val; |
191 | int pos; | ||
192 | |||
193 | if (chan->chan_id < 8) | ||
194 | pos = chan->chan_id; | ||
195 | else | ||
196 | pos = chan->chan_id + 8; | ||
186 | 197 | ||
187 | val = dma_readl(pd, CTL2); | 198 | val = dma_readl(pd, CTL2); |
188 | 199 | ||
189 | if (enable) | 200 | if (enable) |
190 | val |= 0x1 << chan->chan_id; | 201 | val |= 0x1 << pos; |
191 | else | 202 | else |
192 | val &= ~(0x1 << chan->chan_id); | 203 | val &= ~(0x1 << pos); |
193 | 204 | ||
194 | dma_writel(pd, CTL2, val); | 205 | dma_writel(pd, CTL2, val); |
195 | 206 | ||
@@ -202,10 +213,17 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
202 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 213 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
203 | struct pch_dma *pd = to_pd(chan->device); | 214 | struct pch_dma *pd = to_pd(chan->device); |
204 | u32 val; | 215 | u32 val; |
216 | u32 mask_mode; | ||
217 | u32 mask_ctl; | ||
205 | 218 | ||
206 | if (chan->chan_id < 8) { | 219 | if (chan->chan_id < 8) { |
207 | val = dma_readl(pd, CTL0); | 220 | val = dma_readl(pd, CTL0); |
208 | 221 | ||
222 | mask_mode = DMA_CTL0_MODE_MASK_BITS << | ||
223 | (DMA_CTL0_BITS_PER_CH * chan->chan_id); | ||
224 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | ||
225 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | ||
226 | val &= mask_mode; | ||
209 | if (pd_chan->dir == DMA_TO_DEVICE) | 227 | if (pd_chan->dir == DMA_TO_DEVICE) |
210 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 228 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
211 | DMA_CTL0_DIR_SHIFT_BITS); | 229 | DMA_CTL0_DIR_SHIFT_BITS); |
@@ -213,18 +231,24 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
213 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 231 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
214 | DMA_CTL0_DIR_SHIFT_BITS)); | 232 | DMA_CTL0_DIR_SHIFT_BITS)); |
215 | 233 | ||
234 | val |= mask_ctl; | ||
216 | dma_writel(pd, CTL0, val); | 235 | dma_writel(pd, CTL0, val); |
217 | } else { | 236 | } else { |
218 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ | 237 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ |
219 | val = dma_readl(pd, CTL3); | 238 | val = dma_readl(pd, CTL3); |
220 | 239 | ||
240 | mask_mode = DMA_CTL0_MODE_MASK_BITS << | ||
241 | (DMA_CTL0_BITS_PER_CH * ch); | ||
242 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | ||
243 | (DMA_CTL0_BITS_PER_CH * ch)); | ||
244 | val &= mask_mode; | ||
221 | if (pd_chan->dir == DMA_TO_DEVICE) | 245 | if (pd_chan->dir == DMA_TO_DEVICE) |
222 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + | 246 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
223 | DMA_CTL0_DIR_SHIFT_BITS); | 247 | DMA_CTL0_DIR_SHIFT_BITS); |
224 | else | 248 | else |
225 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + | 249 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
226 | DMA_CTL0_DIR_SHIFT_BITS)); | 250 | DMA_CTL0_DIR_SHIFT_BITS)); |
227 | 251 | val |= mask_ctl; | |
228 | dma_writel(pd, CTL3, val); | 252 | dma_writel(pd, CTL3, val); |
229 | } | 253 | } |
230 | 254 | ||
@@ -236,33 +260,37 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode) | |||
236 | { | 260 | { |
237 | struct pch_dma *pd = to_pd(chan->device); | 261 | struct pch_dma *pd = to_pd(chan->device); |
238 | u32 val; | 262 | u32 val; |
263 | u32 mask_ctl; | ||
264 | u32 mask_dir; | ||
239 | 265 | ||
240 | if (chan->chan_id < 8) { | 266 | if (chan->chan_id < 8) { |
267 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | ||
268 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | ||
269 | mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\ | ||
270 | DMA_CTL0_DIR_SHIFT_BITS); | ||
241 | val = dma_readl(pd, CTL0); | 271 | val = dma_readl(pd, CTL0); |
242 | 272 | val &= mask_dir; | |
243 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | ||
244 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | ||
245 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); | 273 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); |
246 | 274 | val |= mask_ctl; | |
247 | dma_writel(pd, CTL0, val); | 275 | dma_writel(pd, CTL0, val); |
248 | } else { | 276 | } else { |
249 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ | 277 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ |
250 | 278 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | |
279 | (DMA_CTL0_BITS_PER_CH * ch)); | ||
280 | mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\ | ||
281 | DMA_CTL0_DIR_SHIFT_BITS); | ||
251 | val = dma_readl(pd, CTL3); | 282 | val = dma_readl(pd, CTL3); |
252 | 283 | val &= mask_dir; | |
253 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | ||
254 | (DMA_CTL0_BITS_PER_CH * ch)); | ||
255 | val |= mode << (DMA_CTL0_BITS_PER_CH * ch); | 284 | val |= mode << (DMA_CTL0_BITS_PER_CH * ch); |
256 | 285 | val |= mask_ctl; | |
257 | dma_writel(pd, CTL3, val); | 286 | dma_writel(pd, CTL3, val); |
258 | |||
259 | } | 287 | } |
260 | 288 | ||
261 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", | 289 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", |
262 | chan->chan_id, val); | 290 | chan->chan_id, val); |
263 | } | 291 | } |
264 | 292 | ||
265 | static u32 pdc_get_status(struct pch_dma_chan *pd_chan) | 293 | static u32 pdc_get_status0(struct pch_dma_chan *pd_chan) |
266 | { | 294 | { |
267 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | 295 | struct pch_dma *pd = to_pd(pd_chan->chan.device); |
268 | u32 val; | 296 | u32 val; |
@@ -272,9 +300,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan) | |||
272 | DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); | 300 | DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); |
273 | } | 301 | } |
274 | 302 | ||
303 | static u32 pdc_get_status2(struct pch_dma_chan *pd_chan) | ||
304 | { | ||
305 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | ||
306 | u32 val; | ||
307 | |||
308 | val = dma_readl(pd, STS2); | ||
309 | return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + | ||
310 | DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8))); | ||
311 | } | ||
312 | |||
275 | static bool pdc_is_idle(struct pch_dma_chan *pd_chan) | 313 | static bool pdc_is_idle(struct pch_dma_chan *pd_chan) |
276 | { | 314 | { |
277 | if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE) | 315 | u32 sts; |
316 | |||
317 | if (pd_chan->chan.chan_id < 8) | ||
318 | sts = pdc_get_status0(pd_chan); | ||
319 | else | ||
320 | sts = pdc_get_status2(pd_chan); | ||
321 | |||
322 | |||
323 | if (sts == DMA_STATUS_IDLE) | ||
278 | return true; | 324 | return true; |
279 | else | 325 | else |
280 | return false; | 326 | return false; |
@@ -495,11 +541,11 @@ static int pd_alloc_chan_resources(struct dma_chan *chan) | |||
495 | list_add_tail(&desc->desc_node, &tmp_list); | 541 | list_add_tail(&desc->desc_node, &tmp_list); |
496 | } | 542 | } |
497 | 543 | ||
498 | spin_lock_bh(&pd_chan->lock); | 544 | spin_lock_irq(&pd_chan->lock); |
499 | list_splice(&tmp_list, &pd_chan->free_list); | 545 | list_splice(&tmp_list, &pd_chan->free_list); |
500 | pd_chan->descs_allocated = i; | 546 | pd_chan->descs_allocated = i; |
501 | pd_chan->completed_cookie = chan->cookie = 1; | 547 | pd_chan->completed_cookie = chan->cookie = 1; |
502 | spin_unlock_bh(&pd_chan->lock); | 548 | spin_unlock_irq(&pd_chan->lock); |
503 | 549 | ||
504 | pdc_enable_irq(chan, 1); | 550 | pdc_enable_irq(chan, 1); |
505 | 551 | ||
@@ -517,10 +563,10 @@ static void pd_free_chan_resources(struct dma_chan *chan) | |||
517 | BUG_ON(!list_empty(&pd_chan->active_list)); | 563 | BUG_ON(!list_empty(&pd_chan->active_list)); |
518 | BUG_ON(!list_empty(&pd_chan->queue)); | 564 | BUG_ON(!list_empty(&pd_chan->queue)); |
519 | 565 | ||
520 | spin_lock_bh(&pd_chan->lock); | 566 | spin_lock_irq(&pd_chan->lock); |
521 | list_splice_init(&pd_chan->free_list, &tmp_list); | 567 | list_splice_init(&pd_chan->free_list, &tmp_list); |
522 | pd_chan->descs_allocated = 0; | 568 | pd_chan->descs_allocated = 0; |
523 | spin_unlock_bh(&pd_chan->lock); | 569 | spin_unlock_irq(&pd_chan->lock); |
524 | 570 | ||
525 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) | 571 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) |
526 | pci_pool_free(pd->pool, desc, desc->txd.phys); | 572 | pci_pool_free(pd->pool, desc, desc->txd.phys); |
@@ -536,10 +582,10 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
536 | dma_cookie_t last_completed; | 582 | dma_cookie_t last_completed; |
537 | int ret; | 583 | int ret; |
538 | 584 | ||
539 | spin_lock_bh(&pd_chan->lock); | 585 | spin_lock_irq(&pd_chan->lock); |
540 | last_completed = pd_chan->completed_cookie; | 586 | last_completed = pd_chan->completed_cookie; |
541 | last_used = chan->cookie; | 587 | last_used = chan->cookie; |
542 | spin_unlock_bh(&pd_chan->lock); | 588 | spin_unlock_irq(&pd_chan->lock); |
543 | 589 | ||
544 | ret = dma_async_is_complete(cookie, last_completed, last_used); | 590 | ret = dma_async_is_complete(cookie, last_completed, last_used); |
545 | 591 | ||
@@ -654,7 +700,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
654 | if (cmd != DMA_TERMINATE_ALL) | 700 | if (cmd != DMA_TERMINATE_ALL) |
655 | return -ENXIO; | 701 | return -ENXIO; |
656 | 702 | ||
657 | spin_lock_bh(&pd_chan->lock); | 703 | spin_lock_irq(&pd_chan->lock); |
658 | 704 | ||
659 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); | 705 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); |
660 | 706 | ||
@@ -664,7 +710,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
664 | list_for_each_entry_safe(desc, _d, &list, desc_node) | 710 | list_for_each_entry_safe(desc, _d, &list, desc_node) |
665 | pdc_chain_complete(pd_chan, desc); | 711 | pdc_chain_complete(pd_chan, desc); |
666 | 712 | ||
667 | spin_unlock_bh(&pd_chan->lock); | 713 | spin_unlock_irq(&pd_chan->lock); |
668 | 714 | ||
669 | return 0; | 715 | return 0; |
670 | } | 716 | } |
@@ -693,30 +739,45 @@ static irqreturn_t pd_irq(int irq, void *devid) | |||
693 | struct pch_dma *pd = (struct pch_dma *)devid; | 739 | struct pch_dma *pd = (struct pch_dma *)devid; |
694 | struct pch_dma_chan *pd_chan; | 740 | struct pch_dma_chan *pd_chan; |
695 | u32 sts0; | 741 | u32 sts0; |
742 | u32 sts2; | ||
696 | int i; | 743 | int i; |
697 | int ret = IRQ_NONE; | 744 | int ret0 = IRQ_NONE; |
745 | int ret2 = IRQ_NONE; | ||
698 | 746 | ||
699 | sts0 = dma_readl(pd, STS0); | 747 | sts0 = dma_readl(pd, STS0); |
748 | sts2 = dma_readl(pd, STS2); | ||
700 | 749 | ||
701 | dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); | 750 | dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); |
702 | 751 | ||
703 | for (i = 0; i < pd->dma.chancnt; i++) { | 752 | for (i = 0; i < pd->dma.chancnt; i++) { |
704 | pd_chan = &pd->channels[i]; | 753 | pd_chan = &pd->channels[i]; |
705 | 754 | ||
706 | if (sts0 & DMA_STATUS_IRQ(i)) { | 755 | if (i < 8) { |
707 | if (sts0 & DMA_STATUS_ERR(i)) | 756 | if (sts0 & DMA_STATUS_IRQ(i)) { |
708 | set_bit(0, &pd_chan->err_status); | 757 | if (sts0 & DMA_STATUS0_ERR(i)) |
758 | set_bit(0, &pd_chan->err_status); | ||
709 | 759 | ||
710 | tasklet_schedule(&pd_chan->tasklet); | 760 | tasklet_schedule(&pd_chan->tasklet); |
711 | ret = IRQ_HANDLED; | 761 | ret0 = IRQ_HANDLED; |
712 | } | 762 | } |
763 | } else { | ||
764 | if (sts2 & DMA_STATUS_IRQ(i - 8)) { | ||
765 | if (sts2 & DMA_STATUS2_ERR(i)) | ||
766 | set_bit(0, &pd_chan->err_status); | ||
713 | 767 | ||
768 | tasklet_schedule(&pd_chan->tasklet); | ||
769 | ret2 = IRQ_HANDLED; | ||
770 | } | ||
771 | } | ||
714 | } | 772 | } |
715 | 773 | ||
716 | /* clear interrupt bits in status register */ | 774 | /* clear interrupt bits in status register */ |
717 | dma_writel(pd, STS0, sts0); | 775 | if (ret0) |
776 | dma_writel(pd, STS0, sts0); | ||
777 | if (ret2) | ||
778 | dma_writel(pd, STS2, sts2); | ||
718 | 779 | ||
719 | return ret; | 780 | return ret0 | ret2; |
720 | } | 781 | } |
721 | 782 | ||
722 | #ifdef CONFIG_PM | 783 | #ifdef CONFIG_PM |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6abe1ec1f2ce..00eee59e8b33 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -82,7 +82,7 @@ struct dma_pl330_dmac { | |||
82 | spinlock_t pool_lock; | 82 | spinlock_t pool_lock; |
83 | 83 | ||
84 | /* Peripheral channels connected to this DMAC */ | 84 | /* Peripheral channels connected to this DMAC */ |
85 | struct dma_pl330_chan peripherals[0]; /* keep at end */ | 85 | struct dma_pl330_chan *peripherals; /* keep at end */ |
86 | }; | 86 | }; |
87 | 87 | ||
88 | struct dma_pl330_desc { | 88 | struct dma_pl330_desc { |
@@ -451,8 +451,13 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | |||
451 | desc->txd.cookie = 0; | 451 | desc->txd.cookie = 0; |
452 | async_tx_ack(&desc->txd); | 452 | async_tx_ack(&desc->txd); |
453 | 453 | ||
454 | desc->req.rqtype = peri->rqtype; | 454 | if (peri) { |
455 | desc->req.peri = peri->peri_id; | 455 | desc->req.rqtype = peri->rqtype; |
456 | desc->req.peri = peri->peri_id; | ||
457 | } else { | ||
458 | desc->req.rqtype = MEMTOMEM; | ||
459 | desc->req.peri = 0; | ||
460 | } | ||
456 | 461 | ||
457 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); | 462 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); |
458 | 463 | ||
@@ -529,10 +534,10 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
529 | struct pl330_info *pi; | 534 | struct pl330_info *pi; |
530 | int burst; | 535 | int burst; |
531 | 536 | ||
532 | if (unlikely(!pch || !len || !peri)) | 537 | if (unlikely(!pch || !len)) |
533 | return NULL; | 538 | return NULL; |
534 | 539 | ||
535 | if (peri->rqtype != MEMTOMEM) | 540 | if (peri && peri->rqtype != MEMTOMEM) |
536 | return NULL; | 541 | return NULL; |
537 | 542 | ||
538 | pi = &pch->dmac->pif; | 543 | pi = &pch->dmac->pif; |
@@ -577,7 +582,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
577 | int i, burst_size; | 582 | int i, burst_size; |
578 | dma_addr_t addr; | 583 | dma_addr_t addr; |
579 | 584 | ||
580 | if (unlikely(!pch || !sgl || !sg_len)) | 585 | if (unlikely(!pch || !sgl || !sg_len || !peri)) |
581 | return NULL; | 586 | return NULL; |
582 | 587 | ||
583 | /* Make sure the direction is consistent */ | 588 | /* Make sure the direction is consistent */ |
@@ -666,17 +671,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
666 | struct dma_device *pd; | 671 | struct dma_device *pd; |
667 | struct resource *res; | 672 | struct resource *res; |
668 | int i, ret, irq; | 673 | int i, ret, irq; |
674 | int num_chan; | ||
669 | 675 | ||
670 | pdat = adev->dev.platform_data; | 676 | pdat = adev->dev.platform_data; |
671 | 677 | ||
672 | if (!pdat || !pdat->nr_valid_peri) { | ||
673 | dev_err(&adev->dev, "platform data missing\n"); | ||
674 | return -ENODEV; | ||
675 | } | ||
676 | |||
677 | /* Allocate a new DMAC and its Channels */ | 678 | /* Allocate a new DMAC and its Channels */ |
678 | pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch) | 679 | pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL); |
679 | + sizeof(*pdmac), GFP_KERNEL); | ||
680 | if (!pdmac) { | 680 | if (!pdmac) { |
681 | dev_err(&adev->dev, "unable to allocate mem\n"); | 681 | dev_err(&adev->dev, "unable to allocate mem\n"); |
682 | return -ENOMEM; | 682 | return -ENOMEM; |
@@ -685,7 +685,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
685 | pi = &pdmac->pif; | 685 | pi = &pdmac->pif; |
686 | pi->dev = &adev->dev; | 686 | pi->dev = &adev->dev; |
687 | pi->pl330_data = NULL; | 687 | pi->pl330_data = NULL; |
688 | pi->mcbufsz = pdat->mcbuf_sz; | 688 | pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; |
689 | 689 | ||
690 | res = &adev->res; | 690 | res = &adev->res; |
691 | request_mem_region(res->start, resource_size(res), "dma-pl330"); | 691 | request_mem_region(res->start, resource_size(res), "dma-pl330"); |
@@ -717,27 +717,35 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
717 | INIT_LIST_HEAD(&pd->channels); | 717 | INIT_LIST_HEAD(&pd->channels); |
718 | 718 | ||
719 | /* Initialize channel parameters */ | 719 | /* Initialize channel parameters */ |
720 | for (i = 0; i < pdat->nr_valid_peri; i++) { | 720 | num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan); |
721 | struct dma_pl330_peri *peri = &pdat->peri[i]; | 721 | pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); |
722 | pch = &pdmac->peripherals[i]; | ||
723 | 722 | ||
724 | switch (peri->rqtype) { | 723 | for (i = 0; i < num_chan; i++) { |
725 | case MEMTOMEM: | 724 | pch = &pdmac->peripherals[i]; |
725 | if (pdat) { | ||
726 | struct dma_pl330_peri *peri = &pdat->peri[i]; | ||
727 | |||
728 | switch (peri->rqtype) { | ||
729 | case MEMTOMEM: | ||
730 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | ||
731 | break; | ||
732 | case MEMTODEV: | ||
733 | case DEVTOMEM: | ||
734 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | ||
735 | break; | ||
736 | default: | ||
737 | dev_err(&adev->dev, "DEVTODEV Not Supported\n"); | ||
738 | continue; | ||
739 | } | ||
740 | pch->chan.private = peri; | ||
741 | } else { | ||
726 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | 742 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); |
727 | break; | 743 | pch->chan.private = NULL; |
728 | case MEMTODEV: | ||
729 | case DEVTOMEM: | ||
730 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | ||
731 | break; | ||
732 | default: | ||
733 | dev_err(&adev->dev, "DEVTODEV Not Supported\n"); | ||
734 | continue; | ||
735 | } | 744 | } |
736 | 745 | ||
737 | INIT_LIST_HEAD(&pch->work_list); | 746 | INIT_LIST_HEAD(&pch->work_list); |
738 | spin_lock_init(&pch->lock); | 747 | spin_lock_init(&pch->lock); |
739 | pch->pl330_chid = NULL; | 748 | pch->pl330_chid = NULL; |
740 | pch->chan.private = peri; | ||
741 | pch->chan.device = pd; | 749 | pch->chan.device = pd; |
742 | pch->chan.chan_id = i; | 750 | pch->chan.chan_id = i; |
743 | pch->dmac = pdmac; | 751 | pch->dmac = pdmac; |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 8f222d4db7de..75ba5865d7a4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/clk.h> | 13 | #include <linux/clk.h> |
14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/err.h> | 15 | #include <linux/err.h> |
16 | #include <linux/amba/bus.h> | ||
16 | 17 | ||
17 | #include <plat/ste_dma40.h> | 18 | #include <plat/ste_dma40.h> |
18 | 19 | ||
@@ -44,9 +45,6 @@ | |||
44 | #define D40_ALLOC_PHY (1 << 30) | 45 | #define D40_ALLOC_PHY (1 << 30) |
45 | #define D40_ALLOC_LOG_FREE 0 | 46 | #define D40_ALLOC_LOG_FREE 0 |
46 | 47 | ||
47 | /* Hardware designer of the block */ | ||
48 | #define D40_HW_DESIGNER 0x8 | ||
49 | |||
50 | /** | 48 | /** |
51 | * enum 40_command - The different commands and/or statuses. | 49 | * enum 40_command - The different commands and/or statuses. |
52 | * | 50 | * |
@@ -185,6 +183,8 @@ struct d40_base; | |||
185 | * @log_def: Default logical channel settings. | 183 | * @log_def: Default logical channel settings. |
186 | * @lcla: Space for one dst src pair for logical channel transfers. | 184 | * @lcla: Space for one dst src pair for logical channel transfers. |
187 | * @lcpa: Pointer to dst and src lcpa settings. | 185 | * @lcpa: Pointer to dst and src lcpa settings. |
186 | * @runtime_addr: runtime configured address. | ||
187 | * @runtime_direction: runtime configured direction. | ||
188 | * | 188 | * |
189 | * This struct can either "be" a logical or a physical channel. | 189 | * This struct can either "be" a logical or a physical channel. |
190 | */ | 190 | */ |
@@ -199,6 +199,7 @@ struct d40_chan { | |||
199 | struct dma_chan chan; | 199 | struct dma_chan chan; |
200 | struct tasklet_struct tasklet; | 200 | struct tasklet_struct tasklet; |
201 | struct list_head client; | 201 | struct list_head client; |
202 | struct list_head pending_queue; | ||
202 | struct list_head active; | 203 | struct list_head active; |
203 | struct list_head queue; | 204 | struct list_head queue; |
204 | struct stedma40_chan_cfg dma_cfg; | 205 | struct stedma40_chan_cfg dma_cfg; |
@@ -644,7 +645,20 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) | |||
644 | 645 | ||
645 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) | 646 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) |
646 | { | 647 | { |
647 | list_add_tail(&desc->node, &d40c->queue); | 648 | list_add_tail(&desc->node, &d40c->pending_queue); |
649 | } | ||
650 | |||
651 | static struct d40_desc *d40_first_pending(struct d40_chan *d40c) | ||
652 | { | ||
653 | struct d40_desc *d; | ||
654 | |||
655 | if (list_empty(&d40c->pending_queue)) | ||
656 | return NULL; | ||
657 | |||
658 | d = list_first_entry(&d40c->pending_queue, | ||
659 | struct d40_desc, | ||
660 | node); | ||
661 | return d; | ||
648 | } | 662 | } |
649 | 663 | ||
650 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | 664 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) |
@@ -801,6 +815,11 @@ static void d40_term_all(struct d40_chan *d40c) | |||
801 | d40_desc_free(d40c, d40d); | 815 | d40_desc_free(d40c, d40d); |
802 | } | 816 | } |
803 | 817 | ||
818 | /* Release pending descriptors */ | ||
819 | while ((d40d = d40_first_pending(d40c))) { | ||
820 | d40_desc_remove(d40d); | ||
821 | d40_desc_free(d40c, d40d); | ||
822 | } | ||
804 | 823 | ||
805 | d40c->pending_tx = 0; | 824 | d40c->pending_tx = 0; |
806 | d40c->busy = false; | 825 | d40c->busy = false; |
@@ -2091,7 +2110,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | |||
2091 | struct scatterlist *sg; | 2110 | struct scatterlist *sg; |
2092 | int i; | 2111 | int i; |
2093 | 2112 | ||
2094 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); | 2113 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); |
2095 | for (i = 0; i < periods; i++) { | 2114 | for (i = 0; i < periods; i++) { |
2096 | sg_dma_address(&sg[i]) = dma_addr; | 2115 | sg_dma_address(&sg[i]) = dma_addr; |
2097 | sg_dma_len(&sg[i]) = period_len; | 2116 | sg_dma_len(&sg[i]) = period_len; |
@@ -2151,24 +2170,87 @@ static void d40_issue_pending(struct dma_chan *chan) | |||
2151 | 2170 | ||
2152 | spin_lock_irqsave(&d40c->lock, flags); | 2171 | spin_lock_irqsave(&d40c->lock, flags); |
2153 | 2172 | ||
2154 | /* Busy means that pending jobs are already being processed */ | 2173 | list_splice_tail_init(&d40c->pending_queue, &d40c->queue); |
2174 | |||
2175 | /* Busy means that queued jobs are already being processed */ | ||
2155 | if (!d40c->busy) | 2176 | if (!d40c->busy) |
2156 | (void) d40_queue_start(d40c); | 2177 | (void) d40_queue_start(d40c); |
2157 | 2178 | ||
2158 | spin_unlock_irqrestore(&d40c->lock, flags); | 2179 | spin_unlock_irqrestore(&d40c->lock, flags); |
2159 | } | 2180 | } |
2160 | 2181 | ||
2182 | static int | ||
2183 | dma40_config_to_halfchannel(struct d40_chan *d40c, | ||
2184 | struct stedma40_half_channel_info *info, | ||
2185 | enum dma_slave_buswidth width, | ||
2186 | u32 maxburst) | ||
2187 | { | ||
2188 | enum stedma40_periph_data_width addr_width; | ||
2189 | int psize; | ||
2190 | |||
2191 | switch (width) { | ||
2192 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
2193 | addr_width = STEDMA40_BYTE_WIDTH; | ||
2194 | break; | ||
2195 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
2196 | addr_width = STEDMA40_HALFWORD_WIDTH; | ||
2197 | break; | ||
2198 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
2199 | addr_width = STEDMA40_WORD_WIDTH; | ||
2200 | break; | ||
2201 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
2202 | addr_width = STEDMA40_DOUBLEWORD_WIDTH; | ||
2203 | break; | ||
2204 | default: | ||
2205 | dev_err(d40c->base->dev, | ||
2206 | "illegal peripheral address width " | ||
2207 | "requested (%d)\n", | ||
2208 | width); | ||
2209 | return -EINVAL; | ||
2210 | } | ||
2211 | |||
2212 | if (chan_is_logical(d40c)) { | ||
2213 | if (maxburst >= 16) | ||
2214 | psize = STEDMA40_PSIZE_LOG_16; | ||
2215 | else if (maxburst >= 8) | ||
2216 | psize = STEDMA40_PSIZE_LOG_8; | ||
2217 | else if (maxburst >= 4) | ||
2218 | psize = STEDMA40_PSIZE_LOG_4; | ||
2219 | else | ||
2220 | psize = STEDMA40_PSIZE_LOG_1; | ||
2221 | } else { | ||
2222 | if (maxburst >= 16) | ||
2223 | psize = STEDMA40_PSIZE_PHY_16; | ||
2224 | else if (maxburst >= 8) | ||
2225 | psize = STEDMA40_PSIZE_PHY_8; | ||
2226 | else if (maxburst >= 4) | ||
2227 | psize = STEDMA40_PSIZE_PHY_4; | ||
2228 | else | ||
2229 | psize = STEDMA40_PSIZE_PHY_1; | ||
2230 | } | ||
2231 | |||
2232 | info->data_width = addr_width; | ||
2233 | info->psize = psize; | ||
2234 | info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; | ||
2235 | |||
2236 | return 0; | ||
2237 | } | ||
2238 | |||
2161 | /* Runtime reconfiguration extension */ | 2239 | /* Runtime reconfiguration extension */ |
2162 | static void d40_set_runtime_config(struct dma_chan *chan, | 2240 | static int d40_set_runtime_config(struct dma_chan *chan, |
2163 | struct dma_slave_config *config) | 2241 | struct dma_slave_config *config) |
2164 | { | 2242 | { |
2165 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2243 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2166 | struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; | 2244 | struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; |
2167 | enum dma_slave_buswidth config_addr_width; | 2245 | enum dma_slave_buswidth src_addr_width, dst_addr_width; |
2168 | dma_addr_t config_addr; | 2246 | dma_addr_t config_addr; |
2169 | u32 config_maxburst; | 2247 | u32 src_maxburst, dst_maxburst; |
2170 | enum stedma40_periph_data_width addr_width; | 2248 | int ret; |
2171 | int psize; | 2249 | |
2250 | src_addr_width = config->src_addr_width; | ||
2251 | src_maxburst = config->src_maxburst; | ||
2252 | dst_addr_width = config->dst_addr_width; | ||
2253 | dst_maxburst = config->dst_maxburst; | ||
2172 | 2254 | ||
2173 | if (config->direction == DMA_FROM_DEVICE) { | 2255 | if (config->direction == DMA_FROM_DEVICE) { |
2174 | dma_addr_t dev_addr_rx = | 2256 | dma_addr_t dev_addr_rx = |
@@ -2187,8 +2269,11 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2187 | cfg->dir); | 2269 | cfg->dir); |
2188 | cfg->dir = STEDMA40_PERIPH_TO_MEM; | 2270 | cfg->dir = STEDMA40_PERIPH_TO_MEM; |
2189 | 2271 | ||
2190 | config_addr_width = config->src_addr_width; | 2272 | /* Configure the memory side */ |
2191 | config_maxburst = config->src_maxburst; | 2273 | if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
2274 | dst_addr_width = src_addr_width; | ||
2275 | if (dst_maxburst == 0) | ||
2276 | dst_maxburst = src_maxburst; | ||
2192 | 2277 | ||
2193 | } else if (config->direction == DMA_TO_DEVICE) { | 2278 | } else if (config->direction == DMA_TO_DEVICE) { |
2194 | dma_addr_t dev_addr_tx = | 2279 | dma_addr_t dev_addr_tx = |
@@ -2207,68 +2292,39 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2207 | cfg->dir); | 2292 | cfg->dir); |
2208 | cfg->dir = STEDMA40_MEM_TO_PERIPH; | 2293 | cfg->dir = STEDMA40_MEM_TO_PERIPH; |
2209 | 2294 | ||
2210 | config_addr_width = config->dst_addr_width; | 2295 | /* Configure the memory side */ |
2211 | config_maxburst = config->dst_maxburst; | 2296 | if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
2212 | 2297 | src_addr_width = dst_addr_width; | |
2298 | if (src_maxburst == 0) | ||
2299 | src_maxburst = dst_maxburst; | ||
2213 | } else { | 2300 | } else { |
2214 | dev_err(d40c->base->dev, | 2301 | dev_err(d40c->base->dev, |
2215 | "unrecognized channel direction %d\n", | 2302 | "unrecognized channel direction %d\n", |
2216 | config->direction); | 2303 | config->direction); |
2217 | return; | 2304 | return -EINVAL; |
2218 | } | 2305 | } |
2219 | 2306 | ||
2220 | switch (config_addr_width) { | 2307 | if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { |
2221 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
2222 | addr_width = STEDMA40_BYTE_WIDTH; | ||
2223 | break; | ||
2224 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
2225 | addr_width = STEDMA40_HALFWORD_WIDTH; | ||
2226 | break; | ||
2227 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
2228 | addr_width = STEDMA40_WORD_WIDTH; | ||
2229 | break; | ||
2230 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
2231 | addr_width = STEDMA40_DOUBLEWORD_WIDTH; | ||
2232 | break; | ||
2233 | default: | ||
2234 | dev_err(d40c->base->dev, | 2308 | dev_err(d40c->base->dev, |
2235 | "illegal peripheral address width " | 2309 | "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", |
2236 | "requested (%d)\n", | 2310 | src_maxburst, |
2237 | config->src_addr_width); | 2311 | src_addr_width, |
2238 | return; | 2312 | dst_maxburst, |
2313 | dst_addr_width); | ||
2314 | return -EINVAL; | ||
2239 | } | 2315 | } |
2240 | 2316 | ||
2241 | if (chan_is_logical(d40c)) { | 2317 | ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, |
2242 | if (config_maxburst >= 16) | 2318 | src_addr_width, |
2243 | psize = STEDMA40_PSIZE_LOG_16; | 2319 | src_maxburst); |
2244 | else if (config_maxburst >= 8) | 2320 | if (ret) |
2245 | psize = STEDMA40_PSIZE_LOG_8; | 2321 | return ret; |
2246 | else if (config_maxburst >= 4) | ||
2247 | psize = STEDMA40_PSIZE_LOG_4; | ||
2248 | else | ||
2249 | psize = STEDMA40_PSIZE_LOG_1; | ||
2250 | } else { | ||
2251 | if (config_maxburst >= 16) | ||
2252 | psize = STEDMA40_PSIZE_PHY_16; | ||
2253 | else if (config_maxburst >= 8) | ||
2254 | psize = STEDMA40_PSIZE_PHY_8; | ||
2255 | else if (config_maxburst >= 4) | ||
2256 | psize = STEDMA40_PSIZE_PHY_4; | ||
2257 | else if (config_maxburst >= 2) | ||
2258 | psize = STEDMA40_PSIZE_PHY_2; | ||
2259 | else | ||
2260 | psize = STEDMA40_PSIZE_PHY_1; | ||
2261 | } | ||
2262 | 2322 | ||
2263 | /* Set up all the endpoint configs */ | 2323 | ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, |
2264 | cfg->src_info.data_width = addr_width; | 2324 | dst_addr_width, |
2265 | cfg->src_info.psize = psize; | 2325 | dst_maxburst); |
2266 | cfg->src_info.big_endian = false; | 2326 | if (ret) |
2267 | cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | 2327 | return ret; |
2268 | cfg->dst_info.data_width = addr_width; | ||
2269 | cfg->dst_info.psize = psize; | ||
2270 | cfg->dst_info.big_endian = false; | ||
2271 | cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | ||
2272 | 2328 | ||
2273 | /* Fill in register values */ | 2329 | /* Fill in register values */ |
2274 | if (chan_is_logical(d40c)) | 2330 | if (chan_is_logical(d40c)) |
@@ -2281,12 +2337,14 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2281 | d40c->runtime_addr = config_addr; | 2337 | d40c->runtime_addr = config_addr; |
2282 | d40c->runtime_direction = config->direction; | 2338 | d40c->runtime_direction = config->direction; |
2283 | dev_dbg(d40c->base->dev, | 2339 | dev_dbg(d40c->base->dev, |
2284 | "configured channel %s for %s, data width %d, " | 2340 | "configured channel %s for %s, data width %d/%d, " |
2285 | "maxburst %d bytes, LE, no flow control\n", | 2341 | "maxburst %d/%d elements, LE, no flow control\n", |
2286 | dma_chan_name(chan), | 2342 | dma_chan_name(chan), |
2287 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | 2343 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", |
2288 | config_addr_width, | 2344 | src_addr_width, dst_addr_width, |
2289 | config_maxburst); | 2345 | src_maxburst, dst_maxburst); |
2346 | |||
2347 | return 0; | ||
2290 | } | 2348 | } |
2291 | 2349 | ||
2292 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 2350 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
@@ -2307,9 +2365,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
2307 | case DMA_RESUME: | 2365 | case DMA_RESUME: |
2308 | return d40_resume(d40c); | 2366 | return d40_resume(d40c); |
2309 | case DMA_SLAVE_CONFIG: | 2367 | case DMA_SLAVE_CONFIG: |
2310 | d40_set_runtime_config(chan, | 2368 | return d40_set_runtime_config(chan, |
2311 | (struct dma_slave_config *) arg); | 2369 | (struct dma_slave_config *) arg); |
2312 | return 0; | ||
2313 | default: | 2370 | default: |
2314 | break; | 2371 | break; |
2315 | } | 2372 | } |
@@ -2340,6 +2397,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |||
2340 | 2397 | ||
2341 | INIT_LIST_HEAD(&d40c->active); | 2398 | INIT_LIST_HEAD(&d40c->active); |
2342 | INIT_LIST_HEAD(&d40c->queue); | 2399 | INIT_LIST_HEAD(&d40c->queue); |
2400 | INIT_LIST_HEAD(&d40c->pending_queue); | ||
2343 | INIT_LIST_HEAD(&d40c->client); | 2401 | INIT_LIST_HEAD(&d40c->client); |
2344 | 2402 | ||
2345 | tasklet_init(&d40c->tasklet, dma_tasklet, | 2403 | tasklet_init(&d40c->tasklet, dma_tasklet, |
@@ -2501,25 +2559,6 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2501 | 2559 | ||
2502 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | 2560 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) |
2503 | { | 2561 | { |
2504 | static const struct d40_reg_val dma_id_regs[] = { | ||
2505 | /* Peripheral Id */ | ||
2506 | { .reg = D40_DREG_PERIPHID0, .val = 0x0040}, | ||
2507 | { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, | ||
2508 | /* | ||
2509 | * D40_DREG_PERIPHID2 Depends on HW revision: | ||
2510 | * DB8500ed has 0x0008, | ||
2511 | * ? has 0x0018, | ||
2512 | * DB8500v1 has 0x0028 | ||
2513 | * DB8500v2 has 0x0038 | ||
2514 | */ | ||
2515 | { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, | ||
2516 | |||
2517 | /* PCell Id */ | ||
2518 | { .reg = D40_DREG_CELLID0, .val = 0x000d}, | ||
2519 | { .reg = D40_DREG_CELLID1, .val = 0x00f0}, | ||
2520 | { .reg = D40_DREG_CELLID2, .val = 0x0005}, | ||
2521 | { .reg = D40_DREG_CELLID3, .val = 0x00b1} | ||
2522 | }; | ||
2523 | struct stedma40_platform_data *plat_data; | 2562 | struct stedma40_platform_data *plat_data; |
2524 | struct clk *clk = NULL; | 2563 | struct clk *clk = NULL; |
2525 | void __iomem *virtbase = NULL; | 2564 | void __iomem *virtbase = NULL; |
@@ -2528,8 +2567,9 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2528 | int num_log_chans = 0; | 2567 | int num_log_chans = 0; |
2529 | int num_phy_chans; | 2568 | int num_phy_chans; |
2530 | int i; | 2569 | int i; |
2531 | u32 val; | 2570 | u32 pid; |
2532 | u32 rev; | 2571 | u32 cid; |
2572 | u8 rev; | ||
2533 | 2573 | ||
2534 | clk = clk_get(&pdev->dev, NULL); | 2574 | clk = clk_get(&pdev->dev, NULL); |
2535 | 2575 | ||
@@ -2553,32 +2593,32 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2553 | if (!virtbase) | 2593 | if (!virtbase) |
2554 | goto failure; | 2594 | goto failure; |
2555 | 2595 | ||
2556 | /* HW version check */ | 2596 | /* This is just a regular AMBA PrimeCell ID actually */ |
2557 | for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { | 2597 | for (pid = 0, i = 0; i < 4; i++) |
2558 | if (dma_id_regs[i].val != | 2598 | pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i) |
2559 | readl(virtbase + dma_id_regs[i].reg)) { | 2599 | & 255) << (i * 8); |
2560 | d40_err(&pdev->dev, | 2600 | for (cid = 0, i = 0; i < 4; i++) |
2561 | "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", | 2601 | cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i) |
2562 | dma_id_regs[i].val, | 2602 | & 255) << (i * 8); |
2563 | dma_id_regs[i].reg, | ||
2564 | readl(virtbase + dma_id_regs[i].reg)); | ||
2565 | goto failure; | ||
2566 | } | ||
2567 | } | ||
2568 | 2603 | ||
2569 | /* Get silicon revision and designer */ | 2604 | if (cid != AMBA_CID) { |
2570 | val = readl(virtbase + D40_DREG_PERIPHID2); | 2605 | d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); |
2571 | 2606 | goto failure; | |
2572 | if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != | 2607 | } |
2573 | D40_HW_DESIGNER) { | 2608 | if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { |
2574 | d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", | 2609 | d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", |
2575 | val & D40_DREG_PERIPHID2_DESIGNER_MASK, | 2610 | AMBA_MANF_BITS(pid), |
2576 | D40_HW_DESIGNER); | 2611 | AMBA_VENDOR_ST); |
2577 | goto failure; | 2612 | goto failure; |
2578 | } | 2613 | } |
2579 | 2614 | /* | |
2580 | rev = (val & D40_DREG_PERIPHID2_REV_MASK) >> | 2615 | * HW revision: |
2581 | D40_DREG_PERIPHID2_REV_POS; | 2616 | * DB8500ed has revision 0 |
2617 | * ? has revision 1 | ||
2618 | * DB8500v1 has revision 2 | ||
2619 | * DB8500v2 has revision 3 | ||
2620 | */ | ||
2621 | rev = AMBA_REV_BITS(pid); | ||
2582 | 2622 | ||
2583 | /* The number of physical channels on this HW */ | 2623 | /* The number of physical channels on this HW */ |
2584 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | 2624 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; |
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 195ee65ee7f3..b44c455158de 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
@@ -184,9 +184,6 @@ | |||
184 | #define D40_DREG_PERIPHID0 0xFE0 | 184 | #define D40_DREG_PERIPHID0 0xFE0 |
185 | #define D40_DREG_PERIPHID1 0xFE4 | 185 | #define D40_DREG_PERIPHID1 0xFE4 |
186 | #define D40_DREG_PERIPHID2 0xFE8 | 186 | #define D40_DREG_PERIPHID2 0xFE8 |
187 | #define D40_DREG_PERIPHID2_REV_POS 4 | ||
188 | #define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS) | ||
189 | #define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf | ||
190 | #define D40_DREG_PERIPHID3 0xFEC | 187 | #define D40_DREG_PERIPHID3 0xFEC |
191 | #define D40_DREG_CELLID0 0xFF0 | 188 | #define D40_DREG_CELLID0 0xFF0 |
192 | #define D40_DREG_CELLID1 0xFF4 | 189 | #define D40_DREG_CELLID1 0xFF4 |