aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-04 21:02:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-04 21:02:25 -0400
commitfba9569924e06da076cb2ad12474bbd82d69f54d (patch)
treef0b7d9c82f8dd90f0dc757a4c00afc0872fc1484
parent3d0a8d10cfb4cc3d1877c29a866ee7d8a46aa2fa (diff)
parent4598fc2c94b68740e0269db03c98a1e7ad5af773 (diff)
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (63 commits) dmaengine: mid_dma: mask_peripheral_interrupt only when dmac is idle dmaengine/ep93xx_dma: add module.h include pch_dma: Reduce wasting memory pch_dma: Fix suspend issue dma/timberdale: free_irq() on an error path dma: shdma: transfer based runtime PM dmaengine: shdma: protect against the IRQ handler dmaengine i.MX DMA/SDMA: add missing include of linux/module.h dmaengine: delete redundant chan_id and chancnt initialization in dma drivers dmaengine/amba-pl08x: Check txd->llis_va before freeing dma_pool dmaengine/amba-pl08x: Add support for sg len greater than one for slave transfers serial: sh-sci: don't filter on DMA device, use only channel ID ARM: SAMSUNG: Remove Samsung specific enum type for dma direction ASoC: Samsung: Update DMA interface spi/s3c64xx: Merge dma control code spi/s3c64xx: Add support DMA engine API ARM: SAMSUNG: Remove S3C-PL330-DMA driver ARM: S5P64X0: Use generic DMA PL330 driver ARM: S5PC100: Use generic DMA PL330 driver ARM: S5PV210: Use generic DMA PL330 driver ... Fix up fairly trivial conflicts in - arch/arm/mach-exynos4/{Kconfig,clock.c} - arch/arm/mach-s5p64x0/dma.c
-rw-r--r--arch/arm/include/asm/hardware/pl080.h4
-rw-r--r--arch/arm/mach-exynos4/Kconfig2
-rw-r--r--arch/arm/mach-exynos4/clock.c14
-rw-r--r--arch/arm/mach-exynos4/dma.c299
-rw-r--r--arch/arm/mach-exynos4/include/mach/dma.h4
-rw-r--r--arch/arm/mach-s3c2410/include/mach/dma.h20
-rw-r--r--arch/arm/mach-s3c2412/dma.c4
-rw-r--r--arch/arm/mach-s3c64xx/dma.c10
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/dma.h8
-rw-r--r--arch/arm/mach-s5p64x0/Kconfig4
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6440.c9
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6450.c9
-rw-r--r--arch/arm/mach-s5p64x0/dma.c269
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/dma.h4
-rw-r--r--arch/arm/mach-s5pc100/Kconfig2
-rw-r--r--arch/arm/mach-s5pc100/clock.c11
-rw-r--r--arch/arm/mach-s5pc100/dma.c323
-rw-r--r--arch/arm/mach-s5pc100/include/mach/dma.h4
-rw-r--r--arch/arm/mach-s5pv210/Kconfig2
-rw-r--r--arch/arm/mach-s5pv210/clock.c10
-rw-r--r--arch/arm/mach-s5pv210/dma.c316
-rw-r--r--arch/arm/mach-s5pv210/include/mach/dma.h4
-rw-r--r--arch/arm/plat-s3c24xx/dma.c10
-rw-r--r--arch/arm/plat-samsung/Kconfig9
-rw-r--r--arch/arm/plat-samsung/Makefile4
-rw-r--r--arch/arm/plat-samsung/dma-ops.c131
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-ops.h63
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-pl330.h (renamed from arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h)24
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-s3c24xx.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/dma.h10
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h32
-rw-r--r--arch/arm/plat-samsung/s3c-dma-ops.c130
-rw-r--r--arch/arm/plat-samsung/s3c-pl330.c1244
-rw-r--r--drivers/dma/Kconfig3
-rw-r--r--drivers/dma/amba-pl08x.c640
-rw-r--r--drivers/dma/at_hdmac.c164
-rw-r--r--drivers/dma/at_hdmac_regs.h24
-rw-r--r--drivers/dma/dmatest.c23
-rw-r--r--drivers/dma/dw_dmac.c5
-rw-r--r--drivers/dma/ep93xx_dma.c1
-rw-r--r--drivers/dma/imx-dma.c1
-rw-r--r--drivers/dma/imx-sdma.c48
-rw-r--r--drivers/dma/intel_mid_dma.c9
-rw-r--r--drivers/dma/mpc512x_dma.c1
-rw-r--r--drivers/dma/mxs-dma.c45
-rw-r--r--drivers/dma/pch_dma.c7
-rw-r--r--drivers/dma/pl330.c231
-rw-r--r--drivers/dma/shdma.c129
-rw-r--r--drivers/dma/shdma.h7
-rw-r--r--drivers/dma/timb_dma.c5
-rw-r--r--drivers/mmc/host/s3cmci.c6
-rw-r--r--drivers/spi/spi-s3c64xx.c175
-rw-r--r--drivers/tty/serial/sh-sci.c25
-rw-r--r--include/linux/amba/pl08x.h30
-rw-r--r--include/linux/amba/pl330.h6
-rw-r--r--include/linux/dmaengine.h13
-rw-r--r--include/linux/serial_sci.h2
-rw-r--r--sound/soc/samsung/ac97.c10
-rw-r--r--sound/soc/samsung/dma.c146
-rw-r--r--sound/soc/samsung/dma.h4
60 files changed, 2304 insertions, 2447 deletions
diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h
index e4a04e4e5627..33c78d7af2e1 100644
--- a/arch/arm/include/asm/hardware/pl080.h
+++ b/arch/arm/include/asm/hardware/pl080.h
@@ -21,6 +21,9 @@
21 * OneNAND features. 21 * OneNAND features.
22*/ 22*/
23 23
24#ifndef ASM_PL080_H
25#define ASM_PL080_H
26
24#define PL080_INT_STATUS (0x00) 27#define PL080_INT_STATUS (0x00)
25#define PL080_TC_STATUS (0x04) 28#define PL080_TC_STATUS (0x04)
26#define PL080_TC_CLEAR (0x08) 29#define PL080_TC_CLEAR (0x08)
@@ -138,3 +141,4 @@ struct pl080s_lli {
138 u32 control1; 141 u32 control1;
139}; 142};
140 143
144#endif /* ASM_PL080_H */
diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig
index a65273598036..44013e0672fe 100644
--- a/arch/arm/mach-exynos4/Kconfig
+++ b/arch/arm/mach-exynos4/Kconfig
@@ -11,7 +11,7 @@ if ARCH_EXYNOS4
11 11
12config CPU_EXYNOS4210 12config CPU_EXYNOS4210
13 bool 13 bool
14 select S3C_PL330_DMA 14 select SAMSUNG_DMADEV
15 select ARM_CPU_SUSPEND if PM 15 select ARM_CPU_SUSPEND if PM
16 help 16 help
17 Enable EXYNOS4210 CPU support 17 Enable EXYNOS4210 CPU support
diff --git a/arch/arm/mach-exynos4/clock.c b/arch/arm/mach-exynos4/clock.c
index 0d59be3fa1fe..e21952dfb7e1 100644
--- a/arch/arm/mach-exynos4/clock.c
+++ b/arch/arm/mach-exynos4/clock.c
@@ -111,6 +111,11 @@ struct clk clk_sclk_usbphy1 = {
111 .name = "sclk_usbphy1", 111 .name = "sclk_usbphy1",
112}; 112};
113 113
114static struct clk dummy_apb_pclk = {
115 .name = "apb_pclk",
116 .id = -1,
117};
118
114static int exynos4_clksrc_mask_top_ctrl(struct clk *clk, int enable) 119static int exynos4_clksrc_mask_top_ctrl(struct clk *clk, int enable)
115{ 120{
116 return s5p_gatectrl(S5P_CLKSRC_MASK_TOP, clk, enable); 121 return s5p_gatectrl(S5P_CLKSRC_MASK_TOP, clk, enable);
@@ -503,12 +508,12 @@ static struct clk init_clocks_off[] = {
503 .enable = exynos4_clk_ip_fsys_ctrl, 508 .enable = exynos4_clk_ip_fsys_ctrl,
504 .ctrlbit = (1 << 9), 509 .ctrlbit = (1 << 9),
505 }, { 510 }, {
506 .name = "pdma", 511 .name = "dma",
507 .devname = "s3c-pl330.0", 512 .devname = "s3c-pl330.0",
508 .enable = exynos4_clk_ip_fsys_ctrl, 513 .enable = exynos4_clk_ip_fsys_ctrl,
509 .ctrlbit = (1 << 0), 514 .ctrlbit = (1 << 0),
510 }, { 515 }, {
511 .name = "pdma", 516 .name = "dma",
512 .devname = "s3c-pl330.1", 517 .devname = "s3c-pl330.1",
513 .enable = exynos4_clk_ip_fsys_ctrl, 518 .enable = exynos4_clk_ip_fsys_ctrl,
514 .ctrlbit = (1 << 1), 519 .ctrlbit = (1 << 1),
@@ -1281,6 +1286,11 @@ void __init exynos4_register_clocks(void)
1281 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 1286 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
1282 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 1287 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
1283 1288
1289<<<<<<< HEAD
1284 register_syscore_ops(&exynos4_clock_syscore_ops); 1290 register_syscore_ops(&exynos4_clock_syscore_ops);
1291=======
1292 s3c24xx_register_clock(&dummy_apb_pclk);
1293
1294>>>>>>> 4598fc2c94b68740e0269db03c98a1e7ad5af773
1285 s3c_pwmclk_init(); 1295 s3c_pwmclk_init();
1286} 1296}
diff --git a/arch/arm/mach-exynos4/dma.c b/arch/arm/mach-exynos4/dma.c
index 564bb530f332..d57d66255021 100644
--- a/arch/arm/mach-exynos4/dma.c
+++ b/arch/arm/mach-exynos4/dma.c
@@ -21,151 +21,228 @@
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */ 22 */
23 23
24#include <linux/platform_device.h>
25#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
25#include <linux/amba/bus.h>
26#include <linux/amba/pl330.h>
26 27
28#include <asm/irq.h>
27#include <plat/devs.h> 29#include <plat/devs.h>
28#include <plat/irqs.h> 30#include <plat/irqs.h>
29 31
30#include <mach/map.h> 32#include <mach/map.h>
31#include <mach/irqs.h> 33#include <mach/irqs.h>
32 34#include <mach/dma.h>
33#include <plat/s3c-pl330-pdata.h>
34 35
35static u64 dma_dmamask = DMA_BIT_MASK(32); 36static u64 dma_dmamask = DMA_BIT_MASK(32);
36 37
37static struct resource exynos4_pdma0_resource[] = { 38struct dma_pl330_peri pdma0_peri[28] = {
38 [0] = { 39 {
39 .start = EXYNOS4_PA_PDMA0, 40 .peri_id = (u8)DMACH_PCM0_RX,
40 .end = EXYNOS4_PA_PDMA0 + SZ_4K, 41 .rqtype = DEVTOMEM,
41 .flags = IORESOURCE_MEM, 42 }, {
42 }, 43 .peri_id = (u8)DMACH_PCM0_TX,
43 [1] = { 44 .rqtype = MEMTODEV,
44 .start = IRQ_PDMA0, 45 }, {
45 .end = IRQ_PDMA0, 46 .peri_id = (u8)DMACH_PCM2_RX,
46 .flags = IORESOURCE_IRQ, 47 .rqtype = DEVTOMEM,
48 }, {
49 .peri_id = (u8)DMACH_PCM2_TX,
50 .rqtype = MEMTODEV,
51 }, {
52 .peri_id = (u8)DMACH_MSM_REQ0,
53 }, {
54 .peri_id = (u8)DMACH_MSM_REQ2,
55 }, {
56 .peri_id = (u8)DMACH_SPI0_RX,
57 .rqtype = DEVTOMEM,
58 }, {
59 .peri_id = (u8)DMACH_SPI0_TX,
60 .rqtype = MEMTODEV,
61 }, {
62 .peri_id = (u8)DMACH_SPI2_RX,
63 .rqtype = DEVTOMEM,
64 }, {
65 .peri_id = (u8)DMACH_SPI2_TX,
66 .rqtype = MEMTODEV,
67 }, {
68 .peri_id = (u8)DMACH_I2S0S_TX,
69 .rqtype = MEMTODEV,
70 }, {
71 .peri_id = (u8)DMACH_I2S0_RX,
72 .rqtype = DEVTOMEM,
73 }, {
74 .peri_id = (u8)DMACH_I2S0_TX,
75 .rqtype = MEMTODEV,
76 }, {
77 .peri_id = (u8)DMACH_UART0_RX,
78 .rqtype = DEVTOMEM,
79 }, {
80 .peri_id = (u8)DMACH_UART0_TX,
81 .rqtype = MEMTODEV,
82 }, {
83 .peri_id = (u8)DMACH_UART2_RX,
84 .rqtype = DEVTOMEM,
85 }, {
86 .peri_id = (u8)DMACH_UART2_TX,
87 .rqtype = MEMTODEV,
88 }, {
89 .peri_id = (u8)DMACH_UART4_RX,
90 .rqtype = DEVTOMEM,
91 }, {
92 .peri_id = (u8)DMACH_UART4_TX,
93 .rqtype = MEMTODEV,
94 }, {
95 .peri_id = (u8)DMACH_SLIMBUS0_RX,
96 .rqtype = DEVTOMEM,
97 }, {
98 .peri_id = (u8)DMACH_SLIMBUS0_TX,
99 .rqtype = MEMTODEV,
100 }, {
101 .peri_id = (u8)DMACH_SLIMBUS2_RX,
102 .rqtype = DEVTOMEM,
103 }, {
104 .peri_id = (u8)DMACH_SLIMBUS2_TX,
105 .rqtype = MEMTODEV,
106 }, {
107 .peri_id = (u8)DMACH_SLIMBUS4_RX,
108 .rqtype = DEVTOMEM,
109 }, {
110 .peri_id = (u8)DMACH_SLIMBUS4_TX,
111 .rqtype = MEMTODEV,
112 }, {
113 .peri_id = (u8)DMACH_AC97_MICIN,
114 .rqtype = DEVTOMEM,
115 }, {
116 .peri_id = (u8)DMACH_AC97_PCMIN,
117 .rqtype = DEVTOMEM,
118 }, {
119 .peri_id = (u8)DMACH_AC97_PCMOUT,
120 .rqtype = MEMTODEV,
47 }, 121 },
48}; 122};
49 123
50static struct s3c_pl330_platdata exynos4_pdma0_pdata = { 124struct dma_pl330_platdata exynos4_pdma0_pdata = {
51 .peri = { 125 .nr_valid_peri = ARRAY_SIZE(pdma0_peri),
52 [0] = DMACH_PCM0_RX, 126 .peri = pdma0_peri,
53 [1] = DMACH_PCM0_TX,
54 [2] = DMACH_PCM2_RX,
55 [3] = DMACH_PCM2_TX,
56 [4] = DMACH_MSM_REQ0,
57 [5] = DMACH_MSM_REQ2,
58 [6] = DMACH_SPI0_RX,
59 [7] = DMACH_SPI0_TX,
60 [8] = DMACH_SPI2_RX,
61 [9] = DMACH_SPI2_TX,
62 [10] = DMACH_I2S0S_TX,
63 [11] = DMACH_I2S0_RX,
64 [12] = DMACH_I2S0_TX,
65 [13] = DMACH_I2S2_RX,
66 [14] = DMACH_I2S2_TX,
67 [15] = DMACH_UART0_RX,
68 [16] = DMACH_UART0_TX,
69 [17] = DMACH_UART2_RX,
70 [18] = DMACH_UART2_TX,
71 [19] = DMACH_UART4_RX,
72 [20] = DMACH_UART4_TX,
73 [21] = DMACH_SLIMBUS0_RX,
74 [22] = DMACH_SLIMBUS0_TX,
75 [23] = DMACH_SLIMBUS2_RX,
76 [24] = DMACH_SLIMBUS2_TX,
77 [25] = DMACH_SLIMBUS4_RX,
78 [26] = DMACH_SLIMBUS4_TX,
79 [27] = DMACH_AC97_MICIN,
80 [28] = DMACH_AC97_PCMIN,
81 [29] = DMACH_AC97_PCMOUT,
82 [30] = DMACH_MAX,
83 [31] = DMACH_MAX,
84 },
85}; 127};
86 128
87static struct platform_device exynos4_device_pdma0 = { 129struct amba_device exynos4_device_pdma0 = {
88 .name = "s3c-pl330", 130 .dev = {
89 .id = 0, 131 .init_name = "dma-pl330.0",
90 .num_resources = ARRAY_SIZE(exynos4_pdma0_resource),
91 .resource = exynos4_pdma0_resource,
92 .dev = {
93 .dma_mask = &dma_dmamask, 132 .dma_mask = &dma_dmamask,
94 .coherent_dma_mask = DMA_BIT_MASK(32), 133 .coherent_dma_mask = DMA_BIT_MASK(32),
95 .platform_data = &exynos4_pdma0_pdata, 134 .platform_data = &exynos4_pdma0_pdata,
96 }, 135 },
136 .res = {
137 .start = EXYNOS4_PA_PDMA0,
138 .end = EXYNOS4_PA_PDMA0 + SZ_4K,
139 .flags = IORESOURCE_MEM,
140 },
141 .irq = {IRQ_PDMA0, NO_IRQ},
142 .periphid = 0x00041330,
97}; 143};
98 144
99static struct resource exynos4_pdma1_resource[] = { 145struct dma_pl330_peri pdma1_peri[25] = {
100 [0] = { 146 {
101 .start = EXYNOS4_PA_PDMA1, 147 .peri_id = (u8)DMACH_PCM0_RX,
102 .end = EXYNOS4_PA_PDMA1 + SZ_4K, 148 .rqtype = DEVTOMEM,
103 .flags = IORESOURCE_MEM, 149 }, {
104 }, 150 .peri_id = (u8)DMACH_PCM0_TX,
105 [1] = { 151 .rqtype = MEMTODEV,
106 .start = IRQ_PDMA1, 152 }, {
107 .end = IRQ_PDMA1, 153 .peri_id = (u8)DMACH_PCM1_RX,
108 .flags = IORESOURCE_IRQ, 154 .rqtype = DEVTOMEM,
155 }, {
156 .peri_id = (u8)DMACH_PCM1_TX,
157 .rqtype = MEMTODEV,
158 }, {
159 .peri_id = (u8)DMACH_MSM_REQ1,
160 }, {
161 .peri_id = (u8)DMACH_MSM_REQ3,
162 }, {
163 .peri_id = (u8)DMACH_SPI1_RX,
164 .rqtype = DEVTOMEM,
165 }, {
166 .peri_id = (u8)DMACH_SPI1_TX,
167 .rqtype = MEMTODEV,
168 }, {
169 .peri_id = (u8)DMACH_I2S0S_TX,
170 .rqtype = MEMTODEV,
171 }, {
172 .peri_id = (u8)DMACH_I2S0_RX,
173 .rqtype = DEVTOMEM,
174 }, {
175 .peri_id = (u8)DMACH_I2S0_TX,
176 .rqtype = MEMTODEV,
177 }, {
178 .peri_id = (u8)DMACH_I2S1_RX,
179 .rqtype = DEVTOMEM,
180 }, {
181 .peri_id = (u8)DMACH_I2S1_TX,
182 .rqtype = MEMTODEV,
183 }, {
184 .peri_id = (u8)DMACH_UART0_RX,
185 .rqtype = DEVTOMEM,
186 }, {
187 .peri_id = (u8)DMACH_UART0_TX,
188 .rqtype = MEMTODEV,
189 }, {
190 .peri_id = (u8)DMACH_UART1_RX,
191 .rqtype = DEVTOMEM,
192 }, {
193 .peri_id = (u8)DMACH_UART1_TX,
194 .rqtype = MEMTODEV,
195 }, {
196 .peri_id = (u8)DMACH_UART3_RX,
197 .rqtype = DEVTOMEM,
198 }, {
199 .peri_id = (u8)DMACH_UART3_TX,
200 .rqtype = MEMTODEV,
201 }, {
202 .peri_id = (u8)DMACH_SLIMBUS1_RX,
203 .rqtype = DEVTOMEM,
204 }, {
205 .peri_id = (u8)DMACH_SLIMBUS1_TX,
206 .rqtype = MEMTODEV,
207 }, {
208 .peri_id = (u8)DMACH_SLIMBUS3_RX,
209 .rqtype = DEVTOMEM,
210 }, {
211 .peri_id = (u8)DMACH_SLIMBUS3_TX,
212 .rqtype = MEMTODEV,
213 }, {
214 .peri_id = (u8)DMACH_SLIMBUS5_RX,
215 .rqtype = DEVTOMEM,
216 }, {
217 .peri_id = (u8)DMACH_SLIMBUS5_TX,
218 .rqtype = MEMTODEV,
109 }, 219 },
110}; 220};
111 221
112static struct s3c_pl330_platdata exynos4_pdma1_pdata = { 222struct dma_pl330_platdata exynos4_pdma1_pdata = {
113 .peri = { 223 .nr_valid_peri = ARRAY_SIZE(pdma1_peri),
114 [0] = DMACH_PCM0_RX, 224 .peri = pdma1_peri,
115 [1] = DMACH_PCM0_TX,
116 [2] = DMACH_PCM1_RX,
117 [3] = DMACH_PCM1_TX,
118 [4] = DMACH_MSM_REQ1,
119 [5] = DMACH_MSM_REQ3,
120 [6] = DMACH_SPI1_RX,
121 [7] = DMACH_SPI1_TX,
122 [8] = DMACH_I2S0S_TX,
123 [9] = DMACH_I2S0_RX,
124 [10] = DMACH_I2S0_TX,
125 [11] = DMACH_I2S1_RX,
126 [12] = DMACH_I2S1_TX,
127 [13] = DMACH_UART0_RX,
128 [14] = DMACH_UART0_TX,
129 [15] = DMACH_UART1_RX,
130 [16] = DMACH_UART1_TX,
131 [17] = DMACH_UART3_RX,
132 [18] = DMACH_UART3_TX,
133 [19] = DMACH_SLIMBUS1_RX,
134 [20] = DMACH_SLIMBUS1_TX,
135 [21] = DMACH_SLIMBUS3_RX,
136 [22] = DMACH_SLIMBUS3_TX,
137 [23] = DMACH_SLIMBUS5_RX,
138 [24] = DMACH_SLIMBUS5_TX,
139 [25] = DMACH_SLIMBUS0AUX_RX,
140 [26] = DMACH_SLIMBUS0AUX_TX,
141 [27] = DMACH_SPDIF,
142 [28] = DMACH_MAX,
143 [29] = DMACH_MAX,
144 [30] = DMACH_MAX,
145 [31] = DMACH_MAX,
146 },
147}; 225};
148 226
149static struct platform_device exynos4_device_pdma1 = { 227struct amba_device exynos4_device_pdma1 = {
150 .name = "s3c-pl330", 228 .dev = {
151 .id = 1, 229 .init_name = "dma-pl330.1",
152 .num_resources = ARRAY_SIZE(exynos4_pdma1_resource),
153 .resource = exynos4_pdma1_resource,
154 .dev = {
155 .dma_mask = &dma_dmamask, 230 .dma_mask = &dma_dmamask,
156 .coherent_dma_mask = DMA_BIT_MASK(32), 231 .coherent_dma_mask = DMA_BIT_MASK(32),
157 .platform_data = &exynos4_pdma1_pdata, 232 .platform_data = &exynos4_pdma1_pdata,
158 }, 233 },
159}; 234 .res = {
160 235 .start = EXYNOS4_PA_PDMA1,
161static struct platform_device *exynos4_dmacs[] __initdata = { 236 .end = EXYNOS4_PA_PDMA1 + SZ_4K,
162 &exynos4_device_pdma0, 237 .flags = IORESOURCE_MEM,
163 &exynos4_device_pdma1, 238 },
239 .irq = {IRQ_PDMA1, NO_IRQ},
240 .periphid = 0x00041330,
164}; 241};
165 242
166static int __init exynos4_dma_init(void) 243static int __init exynos4_dma_init(void)
167{ 244{
168 platform_add_devices(exynos4_dmacs, ARRAY_SIZE(exynos4_dmacs)); 245 amba_device_register(&exynos4_device_pdma0, &iomem_resource);
169 246
170 return 0; 247 return 0;
171} 248}
diff --git a/arch/arm/mach-exynos4/include/mach/dma.h b/arch/arm/mach-exynos4/include/mach/dma.h
index 81209eb1409b..201842a3769e 100644
--- a/arch/arm/mach-exynos4/include/mach/dma.h
+++ b/arch/arm/mach-exynos4/include/mach/dma.h
@@ -20,7 +20,7 @@
20#ifndef __MACH_DMA_H 20#ifndef __MACH_DMA_H
21#define __MACH_DMA_H 21#define __MACH_DMA_H
22 22
23/* This platform uses the common S3C DMA API driver for PL330 */ 23/* This platform uses the common DMA API driver for PL330 */
24#include <plat/s3c-dma-pl330.h> 24#include <plat/dma-pl330.h>
25 25
26#endif /* __MACH_DMA_H */ 26#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/dma.h b/arch/arm/mach-s3c2410/include/mach/dma.h
index b2b2a5bb275e..ae8e482b6427 100644
--- a/arch/arm/mach-s3c2410/include/mach/dma.h
+++ b/arch/arm/mach-s3c2410/include/mach/dma.h
@@ -13,7 +13,6 @@
13#ifndef __ASM_ARCH_DMA_H 13#ifndef __ASM_ARCH_DMA_H
14#define __ASM_ARCH_DMA_H __FILE__ 14#define __ASM_ARCH_DMA_H __FILE__
15 15
16#include <plat/dma.h>
17#include <linux/sysdev.h> 16#include <linux/sysdev.h>
18 17
19#define MAX_DMA_TRANSFER_SIZE 0x100000 /* Data Unit is half word */ 18#define MAX_DMA_TRANSFER_SIZE 0x100000 /* Data Unit is half word */
@@ -51,6 +50,18 @@ enum dma_ch {
51 DMACH_MAX, /* the end entry */ 50 DMACH_MAX, /* the end entry */
52}; 51};
53 52
53static inline bool samsung_dma_has_circular(void)
54{
55 return false;
56}
57
58static inline bool samsung_dma_is_dmadev(void)
59{
60 return false;
61}
62
63#include <plat/dma.h>
64
54#define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */ 65#define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */
55 66
56/* we have 4 dma channels */ 67/* we have 4 dma channels */
@@ -163,7 +174,7 @@ struct s3c2410_dma_chan {
163 struct s3c2410_dma_client *client; 174 struct s3c2410_dma_client *client;
164 175
165 /* channel configuration */ 176 /* channel configuration */
166 enum s3c2410_dmasrc source; 177 enum dma_data_direction source;
167 enum dma_ch req_ch; 178 enum dma_ch req_ch;
168 unsigned long dev_addr; 179 unsigned long dev_addr;
169 unsigned long load_timeout; 180 unsigned long load_timeout;
@@ -196,9 +207,4 @@ struct s3c2410_dma_chan {
196 207
197typedef unsigned long dma_device_t; 208typedef unsigned long dma_device_t;
198 209
199static inline bool s3c_dma_has_circular(void)
200{
201 return false;
202}
203
204#endif /* __ASM_ARCH_DMA_H */ 210#endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-s3c2412/dma.c b/arch/arm/mach-s3c2412/dma.c
index c61e3261615d..d2a7d5ef3e67 100644
--- a/arch/arm/mach-s3c2412/dma.c
+++ b/arch/arm/mach-s3c2412/dma.c
@@ -130,11 +130,11 @@ static struct s3c24xx_dma_map __initdata s3c2412_dma_mappings[] = {
130 130
131static void s3c2412_dma_direction(struct s3c2410_dma_chan *chan, 131static void s3c2412_dma_direction(struct s3c2410_dma_chan *chan,
132 struct s3c24xx_dma_map *map, 132 struct s3c24xx_dma_map *map,
133 enum s3c2410_dmasrc dir) 133 enum dma_data_direction dir)
134{ 134{
135 unsigned long chsel; 135 unsigned long chsel;
136 136
137 if (dir == S3C2410_DMASRC_HW) 137 if (dir == DMA_FROM_DEVICE)
138 chsel = map->channels_rx[0]; 138 chsel = map->channels_rx[0];
139 else 139 else
140 chsel = map->channels[0]; 140 chsel = map->channels[0];
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index 204bfafe4bfc..67c97fab62fd 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -147,14 +147,14 @@ static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan,
147 u32 control0, control1; 147 u32 control0, control1;
148 148
149 switch (chan->source) { 149 switch (chan->source) {
150 case S3C2410_DMASRC_HW: 150 case DMA_FROM_DEVICE:
151 src = chan->dev_addr; 151 src = chan->dev_addr;
152 dst = data; 152 dst = data;
153 control0 = PL080_CONTROL_SRC_AHB2; 153 control0 = PL080_CONTROL_SRC_AHB2;
154 control0 |= PL080_CONTROL_DST_INCR; 154 control0 |= PL080_CONTROL_DST_INCR;
155 break; 155 break;
156 156
157 case S3C2410_DMASRC_MEM: 157 case DMA_TO_DEVICE:
158 src = data; 158 src = data;
159 dst = chan->dev_addr; 159 dst = chan->dev_addr;
160 control0 = PL080_CONTROL_DST_AHB2; 160 control0 = PL080_CONTROL_DST_AHB2;
@@ -416,7 +416,7 @@ EXPORT_SYMBOL(s3c2410_dma_enqueue);
416 416
417 417
418int s3c2410_dma_devconfig(enum dma_ch channel, 418int s3c2410_dma_devconfig(enum dma_ch channel,
419 enum s3c2410_dmasrc source, 419 enum dma_data_direction source,
420 unsigned long devaddr) 420 unsigned long devaddr)
421{ 421{
422 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 422 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -437,11 +437,11 @@ int s3c2410_dma_devconfig(enum dma_ch channel,
437 pr_debug("%s: peripheral %d\n", __func__, peripheral); 437 pr_debug("%s: peripheral %d\n", __func__, peripheral);
438 438
439 switch (source) { 439 switch (source) {
440 case S3C2410_DMASRC_HW: 440 case DMA_FROM_DEVICE:
441 config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT; 441 config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
442 config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT; 442 config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT;
443 break; 443 break;
444 case S3C2410_DMASRC_MEM: 444 case DMA_TO_DEVICE:
445 config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT; 445 config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
446 config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT; 446 config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT;
447 break; 447 break;
diff --git a/arch/arm/mach-s3c64xx/include/mach/dma.h b/arch/arm/mach-s3c64xx/include/mach/dma.h
index 0a5d9268a23e..fe1a98cf0e4c 100644
--- a/arch/arm/mach-s3c64xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c64xx/include/mach/dma.h
@@ -58,11 +58,15 @@ enum dma_ch {
58 DMACH_MAX /* the end */ 58 DMACH_MAX /* the end */
59}; 59};
60 60
61static __inline__ bool s3c_dma_has_circular(void) 61static inline bool samsung_dma_has_circular(void)
62{ 62{
63 return true; 63 return true;
64} 64}
65 65
66static inline bool samsung_dma_is_dmadev(void)
67{
68 return false;
69}
66#define S3C2410_DMAF_CIRCULAR (1 << 0) 70#define S3C2410_DMAF_CIRCULAR (1 << 0)
67 71
68#include <plat/dma.h> 72#include <plat/dma.h>
@@ -95,7 +99,7 @@ struct s3c2410_dma_chan {
95 unsigned char peripheral; 99 unsigned char peripheral;
96 100
97 unsigned int flags; 101 unsigned int flags;
98 enum s3c2410_dmasrc source; 102 enum dma_data_direction source;
99 103
100 104
101 dma_addr_t dev_addr; 105 dma_addr_t dev_addr;
diff --git a/arch/arm/mach-s5p64x0/Kconfig b/arch/arm/mach-s5p64x0/Kconfig
index 65c7518dad7f..9527ed24dbff 100644
--- a/arch/arm/mach-s5p64x0/Kconfig
+++ b/arch/arm/mach-s5p64x0/Kconfig
@@ -9,14 +9,14 @@ if ARCH_S5P64X0
9 9
10config CPU_S5P6440 10config CPU_S5P6440
11 bool 11 bool
12 select S3C_PL330_DMA 12 select SAMSUNG_DMADEV
13 select S5P_HRT 13 select S5P_HRT
14 help 14 help
15 Enable S5P6440 CPU support 15 Enable S5P6440 CPU support
16 16
17config CPU_S5P6450 17config CPU_S5P6450
18 bool 18 bool
19 select S3C_PL330_DMA 19 select SAMSUNG_DMADEV
20 select S5P_HRT 20 select S5P_HRT
21 help 21 help
22 Enable S5P6450 CPU support 22 Enable S5P6450 CPU support
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c
index 0e9cd3092dd2..c1f548f69a0d 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6440.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c
@@ -146,7 +146,7 @@ static struct clk init_clocks_off[] = {
146 .enable = s5p64x0_hclk0_ctrl, 146 .enable = s5p64x0_hclk0_ctrl,
147 .ctrlbit = (1 << 8), 147 .ctrlbit = (1 << 8),
148 }, { 148 }, {
149 .name = "pdma", 149 .name = "dma",
150 .parent = &clk_hclk_low.clk, 150 .parent = &clk_hclk_low.clk,
151 .enable = s5p64x0_hclk0_ctrl, 151 .enable = s5p64x0_hclk0_ctrl,
152 .ctrlbit = (1 << 12), 152 .ctrlbit = (1 << 12),
@@ -499,6 +499,11 @@ static struct clksrc_clk *sysclks[] = {
499 &clk_pclk_low, 499 &clk_pclk_low,
500}; 500};
501 501
502static struct clk dummy_apb_pclk = {
503 .name = "apb_pclk",
504 .id = -1,
505};
506
502void __init_or_cpufreq s5p6440_setup_clocks(void) 507void __init_or_cpufreq s5p6440_setup_clocks(void)
503{ 508{
504 struct clk *xtal_clk; 509 struct clk *xtal_clk;
@@ -581,5 +586,7 @@ void __init s5p6440_register_clocks(void)
581 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 586 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
582 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 587 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
583 588
589 s3c24xx_register_clock(&dummy_apb_pclk);
590
584 s3c_pwmclk_init(); 591 s3c_pwmclk_init();
585} 592}
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c
index d9dc16cde109..3d9b60975570 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6450.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c
@@ -179,7 +179,7 @@ static struct clk init_clocks_off[] = {
179 .enable = s5p64x0_hclk0_ctrl, 179 .enable = s5p64x0_hclk0_ctrl,
180 .ctrlbit = (1 << 3), 180 .ctrlbit = (1 << 3),
181 }, { 181 }, {
182 .name = "pdma", 182 .name = "dma",
183 .parent = &clk_hclk_low.clk, 183 .parent = &clk_hclk_low.clk,
184 .enable = s5p64x0_hclk0_ctrl, 184 .enable = s5p64x0_hclk0_ctrl,
185 .ctrlbit = (1 << 12), 185 .ctrlbit = (1 << 12),
@@ -553,6 +553,11 @@ static struct clksrc_clk *sysclks[] = {
553 &clk_sclk_audio0, 553 &clk_sclk_audio0,
554}; 554};
555 555
556static struct clk dummy_apb_pclk = {
557 .name = "apb_pclk",
558 .id = -1,
559};
560
556void __init_or_cpufreq s5p6450_setup_clocks(void) 561void __init_or_cpufreq s5p6450_setup_clocks(void)
557{ 562{
558 struct clk *xtal_clk; 563 struct clk *xtal_clk;
@@ -632,5 +637,7 @@ void __init s5p6450_register_clocks(void)
632 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 637 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
633 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 638 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
634 639
640 s3c24xx_register_clock(&dummy_apb_pclk);
641
635 s3c_pwmclk_init(); 642 s3c_pwmclk_init();
636} 643}
diff --git a/arch/arm/mach-s5p64x0/dma.c b/arch/arm/mach-s5p64x0/dma.c
index 0e5b3e63e5b3..442dd4ad12da 100644
--- a/arch/arm/mach-s5p64x0/dma.c
+++ b/arch/arm/mach-s5p64x0/dma.c
@@ -21,115 +21,208 @@
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22*/ 22*/
23 23
24#include <linux/platform_device.h>
25#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
25#include <linux/amba/bus.h>
26#include <linux/amba/pl330.h>
27
28#include <asm/irq.h>
26 29
27#include <mach/map.h> 30#include <mach/map.h>
28#include <mach/irqs.h> 31#include <mach/irqs.h>
29#include <mach/regs-clock.h> 32#include <mach/regs-clock.h>
33#include <mach/dma.h>
30 34
31#include <plat/cpu.h> 35#include <plat/cpu.h>
32#include <plat/devs.h> 36#include <plat/devs.h>
33#include <plat/s3c-pl330-pdata.h> 37#include <plat/irqs.h>
34 38
35static u64 dma_dmamask = DMA_BIT_MASK(32); 39static u64 dma_dmamask = DMA_BIT_MASK(32);
36 40
37static struct resource s5p64x0_pdma_resource[] = { 41struct dma_pl330_peri s5p6440_pdma_peri[22] = {
38 [0] = { 42 {
39 .start = S5P64X0_PA_PDMA, 43 .peri_id = (u8)DMACH_UART0_RX,
40 .end = S5P64X0_PA_PDMA + SZ_4K, 44 .rqtype = DEVTOMEM,
41 .flags = IORESOURCE_MEM, 45 }, {
42 }, 46 .peri_id = (u8)DMACH_UART0_TX,
43 [1] = { 47 .rqtype = MEMTODEV,
44 .start = IRQ_DMA0, 48 }, {
45 .end = IRQ_DMA0, 49 .peri_id = (u8)DMACH_UART1_RX,
46 .flags = IORESOURCE_IRQ, 50 .rqtype = DEVTOMEM,
51 }, {
52 .peri_id = (u8)DMACH_UART1_TX,
53 .rqtype = MEMTODEV,
54 }, {
55 .peri_id = (u8)DMACH_UART2_RX,
56 .rqtype = DEVTOMEM,
57 }, {
58 .peri_id = (u8)DMACH_UART2_TX,
59 .rqtype = MEMTODEV,
60 }, {
61 .peri_id = (u8)DMACH_UART3_RX,
62 .rqtype = DEVTOMEM,
63 }, {
64 .peri_id = (u8)DMACH_UART3_TX,
65 .rqtype = MEMTODEV,
66 }, {
67 .peri_id = DMACH_MAX,
68 }, {
69 .peri_id = DMACH_MAX,
70 }, {
71 .peri_id = (u8)DMACH_PCM0_TX,
72 .rqtype = MEMTODEV,
73 }, {
74 .peri_id = (u8)DMACH_PCM0_RX,
75 .rqtype = DEVTOMEM,
76 }, {
77 .peri_id = (u8)DMACH_I2S0_TX,
78 .rqtype = MEMTODEV,
79 }, {
80 .peri_id = (u8)DMACH_I2S0_RX,
81 .rqtype = DEVTOMEM,
82 }, {
83 .peri_id = (u8)DMACH_SPI0_TX,
84 .rqtype = MEMTODEV,
85 }, {
86 .peri_id = (u8)DMACH_SPI0_RX,
87 .rqtype = DEVTOMEM,
88 }, {
89 .peri_id = (u8)DMACH_MAX,
90 }, {
91 .peri_id = (u8)DMACH_MAX,
92 }, {
93 .peri_id = (u8)DMACH_MAX,
94 }, {
95 .peri_id = (u8)DMACH_MAX,
96 }, {
97 .peri_id = (u8)DMACH_SPI1_TX,
98 .rqtype = MEMTODEV,
99 }, {
100 .peri_id = (u8)DMACH_SPI1_RX,
101 .rqtype = DEVTOMEM,
47 }, 102 },
48}; 103};
49 104
50static struct s3c_pl330_platdata s5p6440_pdma_pdata = { 105struct dma_pl330_platdata s5p6440_pdma_pdata = {
51 .peri = { 106 .nr_valid_peri = ARRAY_SIZE(s5p6440_pdma_peri),
52 [0] = DMACH_UART0_RX, 107 .peri = s5p6440_pdma_peri,
53 [1] = DMACH_UART0_TX,
54 [2] = DMACH_UART1_RX,
55 [3] = DMACH_UART1_TX,
56 [4] = DMACH_UART2_RX,
57 [5] = DMACH_UART2_TX,
58 [6] = DMACH_UART3_RX,
59 [7] = DMACH_UART3_TX,
60 [8] = DMACH_MAX,
61 [9] = DMACH_MAX,
62 [10] = DMACH_PCM0_TX,
63 [11] = DMACH_PCM0_RX,
64 [12] = DMACH_I2S0_TX,
65 [13] = DMACH_I2S0_RX,
66 [14] = DMACH_SPI0_TX,
67 [15] = DMACH_SPI0_RX,
68 [16] = DMACH_MAX,
69 [17] = DMACH_MAX,
70 [18] = DMACH_MAX,
71 [19] = DMACH_MAX,
72 [20] = DMACH_SPI1_TX,
73 [21] = DMACH_SPI1_RX,
74 [22] = DMACH_MAX,
75 [23] = DMACH_MAX,
76 [24] = DMACH_MAX,
77 [25] = DMACH_MAX,
78 [26] = DMACH_MAX,
79 [27] = DMACH_MAX,
80 [28] = DMACH_MAX,
81 [29] = DMACH_PWM,
82 [30] = DMACH_MAX,
83 [31] = DMACH_MAX,
84 },
85}; 108};
86 109
87static struct s3c_pl330_platdata s5p6450_pdma_pdata = { 110struct dma_pl330_peri s5p6450_pdma_peri[32] = {
88 .peri = { 111 {
89 [0] = DMACH_UART0_RX, 112 .peri_id = (u8)DMACH_UART0_RX,
90 [1] = DMACH_UART0_TX, 113 .rqtype = DEVTOMEM,
91 [2] = DMACH_UART1_RX, 114 }, {
92 [3] = DMACH_UART1_TX, 115 .peri_id = (u8)DMACH_UART0_TX,
93 [4] = DMACH_UART2_RX, 116 .rqtype = MEMTODEV,
94 [5] = DMACH_UART2_TX, 117 }, {
95 [6] = DMACH_UART3_RX, 118 .peri_id = (u8)DMACH_UART1_RX,
96 [7] = DMACH_UART3_TX, 119 .rqtype = DEVTOMEM,
97 [8] = DMACH_UART4_RX, 120 }, {
98 [9] = DMACH_UART4_TX, 121 .peri_id = (u8)DMACH_UART1_TX,
99 [10] = DMACH_PCM0_TX, 122 .rqtype = MEMTODEV,
100 [11] = DMACH_PCM0_RX, 123 }, {
101 [12] = DMACH_I2S0_TX, 124 .peri_id = (u8)DMACH_UART2_RX,
102 [13] = DMACH_I2S0_RX, 125 .rqtype = DEVTOMEM,
103 [14] = DMACH_SPI0_TX, 126 }, {
104 [15] = DMACH_SPI0_RX, 127 .peri_id = (u8)DMACH_UART2_TX,
105 [16] = DMACH_PCM1_TX, 128 .rqtype = MEMTODEV,
106 [17] = DMACH_PCM1_RX, 129 }, {
107 [18] = DMACH_PCM2_TX, 130 .peri_id = (u8)DMACH_UART3_RX,
108 [19] = DMACH_PCM2_RX, 131 .rqtype = DEVTOMEM,
109 [20] = DMACH_SPI1_TX, 132 }, {
110 [21] = DMACH_SPI1_RX, 133 .peri_id = (u8)DMACH_UART3_TX,
111 [22] = DMACH_USI_TX, 134 .rqtype = MEMTODEV,
112 [23] = DMACH_USI_RX, 135 }, {
113 [24] = DMACH_MAX, 136 .peri_id = (u8)DMACH_UART4_RX,
114 [25] = DMACH_I2S1_TX, 137 .rqtype = DEVTOMEM,
115 [26] = DMACH_I2S1_RX, 138 }, {
116 [27] = DMACH_I2S2_TX, 139 .peri_id = (u8)DMACH_UART4_TX,
117 [28] = DMACH_I2S2_RX, 140 .rqtype = MEMTODEV,
118 [29] = DMACH_PWM, 141 }, {
119 [30] = DMACH_UART5_RX, 142 .peri_id = (u8)DMACH_PCM0_TX,
120 [31] = DMACH_UART5_TX, 143 .rqtype = MEMTODEV,
144 }, {
145 .peri_id = (u8)DMACH_PCM0_RX,
146 .rqtype = DEVTOMEM,
147 }, {
148 .peri_id = (u8)DMACH_I2S0_TX,
149 .rqtype = MEMTODEV,
150 }, {
151 .peri_id = (u8)DMACH_I2S0_RX,
152 .rqtype = DEVTOMEM,
153 }, {
154 .peri_id = (u8)DMACH_SPI0_TX,
155 .rqtype = MEMTODEV,
156 }, {
157 .peri_id = (u8)DMACH_SPI0_RX,
158 .rqtype = DEVTOMEM,
159 }, {
160 .peri_id = (u8)DMACH_PCM1_TX,
161 .rqtype = MEMTODEV,
162 }, {
163 .peri_id = (u8)DMACH_PCM1_RX,
164 .rqtype = DEVTOMEM,
165 }, {
166 .peri_id = (u8)DMACH_PCM2_TX,
167 .rqtype = MEMTODEV,
168 }, {
169 .peri_id = (u8)DMACH_PCM2_RX,
170 .rqtype = DEVTOMEM,
171 }, {
172 .peri_id = (u8)DMACH_SPI1_TX,
173 .rqtype = MEMTODEV,
174 }, {
175 .peri_id = (u8)DMACH_SPI1_RX,
176 .rqtype = DEVTOMEM,
177 }, {
178 .peri_id = (u8)DMACH_USI_TX,
179 .rqtype = MEMTODEV,
180 }, {
181 .peri_id = (u8)DMACH_USI_RX,
182 .rqtype = DEVTOMEM,
183 }, {
184 .peri_id = (u8)DMACH_MAX,
185 }, {
186 .peri_id = (u8)DMACH_I2S1_TX,
187 .rqtype = MEMTODEV,
188 }, {
189 .peri_id = (u8)DMACH_I2S1_RX,
190 .rqtype = DEVTOMEM,
191 }, {
192 .peri_id = (u8)DMACH_I2S2_TX,
193 .rqtype = MEMTODEV,
194 }, {
195 .peri_id = (u8)DMACH_I2S2_RX,
196 .rqtype = DEVTOMEM,
197 }, {
198 .peri_id = (u8)DMACH_PWM,
199 }, {
200 .peri_id = (u8)DMACH_UART5_RX,
201 .rqtype = DEVTOMEM,
202 }, {
203 .peri_id = (u8)DMACH_UART5_TX,
204 .rqtype = MEMTODEV,
121 }, 205 },
122}; 206};
123 207
124static struct platform_device s5p64x0_device_pdma = { 208struct dma_pl330_platdata s5p6450_pdma_pdata = {
125 .name = "s3c-pl330", 209 .nr_valid_peri = ARRAY_SIZE(s5p6450_pdma_peri),
126 .id = -1, 210 .peri = s5p6450_pdma_peri,
127 .num_resources = ARRAY_SIZE(s5p64x0_pdma_resource), 211};
128 .resource = s5p64x0_pdma_resource, 212
129 .dev = { 213struct amba_device s5p64x0_device_pdma = {
214 .dev = {
215 .init_name = "dma-pl330",
130 .dma_mask = &dma_dmamask, 216 .dma_mask = &dma_dmamask,
131 .coherent_dma_mask = DMA_BIT_MASK(32), 217 .coherent_dma_mask = DMA_BIT_MASK(32),
132 }, 218 },
219 .res = {
220 .start = S5P64X0_PA_PDMA,
221 .end = S5P64X0_PA_PDMA + SZ_4K,
222 .flags = IORESOURCE_MEM,
223 },
224 .irq = {IRQ_DMA0, NO_IRQ},
225 .periphid = 0x00041330,
133}; 226};
134 227
135static int __init s5p64x0_dma_init(void) 228static int __init s5p64x0_dma_init(void)
@@ -139,7 +232,7 @@ static int __init s5p64x0_dma_init(void)
139 else 232 else
140 s5p64x0_device_pdma.dev.platform_data = &s5p6440_pdma_pdata; 233 s5p64x0_device_pdma.dev.platform_data = &s5p6440_pdma_pdata;
141 234
142 platform_device_register(&s5p64x0_device_pdma); 235 amba_device_register(&s5p64x0_device_pdma, &iomem_resource);
143 236
144 return 0; 237 return 0;
145} 238}
diff --git a/arch/arm/mach-s5p64x0/include/mach/dma.h b/arch/arm/mach-s5p64x0/include/mach/dma.h
index 81209eb1409b..5a622af461d7 100644
--- a/arch/arm/mach-s5p64x0/include/mach/dma.h
+++ b/arch/arm/mach-s5p64x0/include/mach/dma.h
@@ -20,7 +20,7 @@
20#ifndef __MACH_DMA_H 20#ifndef __MACH_DMA_H
21#define __MACH_DMA_H 21#define __MACH_DMA_H
22 22
23/* This platform uses the common S3C DMA API driver for PL330 */ 23/* This platform uses the common common DMA API driver for PL330 */
24#include <plat/s3c-dma-pl330.h> 24#include <plat/dma-pl330.h>
25 25
26#endif /* __MACH_DMA_H */ 26#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s5pc100/Kconfig b/arch/arm/mach-s5pc100/Kconfig
index e8a33c4b054c..e538a4c67e9c 100644
--- a/arch/arm/mach-s5pc100/Kconfig
+++ b/arch/arm/mach-s5pc100/Kconfig
@@ -10,7 +10,7 @@ if ARCH_S5PC100
10config CPU_S5PC100 10config CPU_S5PC100
11 bool 11 bool
12 select S5P_EXT_INT 12 select S5P_EXT_INT
13 select S3C_PL330_DMA 13 select SAMSUNG_DMADEV
14 help 14 help
15 Enable S5PC100 CPU support 15 Enable S5PC100 CPU support
16 16
diff --git a/arch/arm/mach-s5pc100/clock.c b/arch/arm/mach-s5pc100/clock.c
index ff5cbb30de5b..6527c05c5fa1 100644
--- a/arch/arm/mach-s5pc100/clock.c
+++ b/arch/arm/mach-s5pc100/clock.c
@@ -33,6 +33,11 @@ static struct clk s5p_clk_otgphy = {
33 .name = "otg_phy", 33 .name = "otg_phy",
34}; 34};
35 35
36static struct clk dummy_apb_pclk = {
37 .name = "apb_pclk",
38 .id = -1,
39};
40
36static struct clk *clk_src_mout_href_list[] = { 41static struct clk *clk_src_mout_href_list[] = {
37 [0] = &s5p_clk_27m, 42 [0] = &s5p_clk_27m,
38 [1] = &clk_fin_hpll, 43 [1] = &clk_fin_hpll,
@@ -454,13 +459,13 @@ static struct clk init_clocks_off[] = {
454 .enable = s5pc100_d1_0_ctrl, 459 .enable = s5pc100_d1_0_ctrl,
455 .ctrlbit = (1 << 2), 460 .ctrlbit = (1 << 2),
456 }, { 461 }, {
457 .name = "pdma", 462 .name = "dma",
458 .devname = "s3c-pl330.1", 463 .devname = "s3c-pl330.1",
459 .parent = &clk_div_d1_bus.clk, 464 .parent = &clk_div_d1_bus.clk,
460 .enable = s5pc100_d1_0_ctrl, 465 .enable = s5pc100_d1_0_ctrl,
461 .ctrlbit = (1 << 1), 466 .ctrlbit = (1 << 1),
462 }, { 467 }, {
463 .name = "pdma", 468 .name = "dma",
464 .devname = "s3c-pl330.0", 469 .devname = "s3c-pl330.0",
465 .parent = &clk_div_d1_bus.clk, 470 .parent = &clk_div_d1_bus.clk,
466 .enable = s5pc100_d1_0_ctrl, 471 .enable = s5pc100_d1_0_ctrl,
@@ -1276,5 +1281,7 @@ void __init s5pc100_register_clocks(void)
1276 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 1281 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
1277 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 1282 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
1278 1283
1284 s3c24xx_register_clock(&dummy_apb_pclk);
1285
1279 s3c_pwmclk_init(); 1286 s3c_pwmclk_init();
1280} 1287}
diff --git a/arch/arm/mach-s5pc100/dma.c b/arch/arm/mach-s5pc100/dma.c
index bf4cd0fb97c6..ef803e92d35d 100644
--- a/arch/arm/mach-s5pc100/dma.c
+++ b/arch/arm/mach-s5pc100/dma.c
@@ -1,4 +1,8 @@
1/* 1/* linux/arch/arm/mach-s5pc100/dma.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
2 * Copyright (C) 2010 Samsung Electronics Co. Ltd. 6 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
3 * Jaswinder Singh <jassi.brar@samsung.com> 7 * Jaswinder Singh <jassi.brar@samsung.com>
4 * 8 *
@@ -17,150 +21,245 @@
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */ 22 */
19 23
20#include <linux/platform_device.h>
21#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
25#include <linux/amba/bus.h>
26#include <linux/amba/pl330.h>
22 27
28#include <asm/irq.h>
23#include <plat/devs.h> 29#include <plat/devs.h>
30#include <plat/irqs.h>
24 31
25#include <mach/map.h> 32#include <mach/map.h>
26#include <mach/irqs.h> 33#include <mach/irqs.h>
27 34#include <mach/dma.h>
28#include <plat/s3c-pl330-pdata.h>
29 35
30static u64 dma_dmamask = DMA_BIT_MASK(32); 36static u64 dma_dmamask = DMA_BIT_MASK(32);
31 37
32static struct resource s5pc100_pdma0_resource[] = { 38struct dma_pl330_peri pdma0_peri[30] = {
33 [0] = { 39 {
34 .start = S5PC100_PA_PDMA0, 40 .peri_id = (u8)DMACH_UART0_RX,
35 .end = S5PC100_PA_PDMA0 + SZ_4K, 41 .rqtype = DEVTOMEM,
36 .flags = IORESOURCE_MEM, 42 }, {
37 }, 43 .peri_id = (u8)DMACH_UART0_TX,
38 [1] = { 44 .rqtype = MEMTODEV,
39 .start = IRQ_PDMA0, 45 }, {
40 .end = IRQ_PDMA0, 46 .peri_id = (u8)DMACH_UART1_RX,
41 .flags = IORESOURCE_IRQ, 47 .rqtype = DEVTOMEM,
48 }, {
49 .peri_id = (u8)DMACH_UART1_TX,
50 .rqtype = MEMTODEV,
51 }, {
52 .peri_id = (u8)DMACH_UART2_RX,
53 .rqtype = DEVTOMEM,
54 }, {
55 .peri_id = (u8)DMACH_UART2_TX,
56 .rqtype = MEMTODEV,
57 }, {
58 .peri_id = (u8)DMACH_UART3_RX,
59 .rqtype = DEVTOMEM,
60 }, {
61 .peri_id = (u8)DMACH_UART3_TX,
62 .rqtype = MEMTODEV,
63 }, {
64 .peri_id = DMACH_IRDA,
65 }, {
66 .peri_id = (u8)DMACH_I2S0_RX,
67 .rqtype = DEVTOMEM,
68 }, {
69 .peri_id = (u8)DMACH_I2S0_TX,
70 .rqtype = MEMTODEV,
71 }, {
72 .peri_id = (u8)DMACH_I2S0S_TX,
73 .rqtype = MEMTODEV,
74 }, {
75 .peri_id = (u8)DMACH_I2S1_RX,
76 .rqtype = DEVTOMEM,
77 }, {
78 .peri_id = (u8)DMACH_I2S1_TX,
79 .rqtype = MEMTODEV,
80 }, {
81 .peri_id = (u8)DMACH_I2S2_RX,
82 .rqtype = DEVTOMEM,
83 }, {
84 .peri_id = (u8)DMACH_I2S2_TX,
85 .rqtype = MEMTODEV,
86 }, {
87 .peri_id = (u8)DMACH_SPI0_RX,
88 .rqtype = DEVTOMEM,
89 }, {
90 .peri_id = (u8)DMACH_SPI0_TX,
91 .rqtype = MEMTODEV,
92 }, {
93 .peri_id = (u8)DMACH_SPI1_RX,
94 .rqtype = DEVTOMEM,
95 }, {
96 .peri_id = (u8)DMACH_SPI1_TX,
97 .rqtype = MEMTODEV,
98 }, {
99 .peri_id = (u8)DMACH_SPI2_RX,
100 .rqtype = DEVTOMEM,
101 }, {
102 .peri_id = (u8)DMACH_SPI2_TX,
103 .rqtype = MEMTODEV,
104 }, {
105 .peri_id = (u8)DMACH_AC97_MICIN,
106 .rqtype = DEVTOMEM,
107 }, {
108 .peri_id = (u8)DMACH_AC97_PCMIN,
109 .rqtype = DEVTOMEM,
110 }, {
111 .peri_id = (u8)DMACH_AC97_PCMOUT,
112 .rqtype = MEMTODEV,
113 }, {
114 .peri_id = (u8)DMACH_EXTERNAL,
115 }, {
116 .peri_id = (u8)DMACH_PWM,
117 }, {
118 .peri_id = (u8)DMACH_SPDIF,
119 .rqtype = MEMTODEV,
120 }, {
121 .peri_id = (u8)DMACH_HSI_RX,
122 .rqtype = DEVTOMEM,
123 }, {
124 .peri_id = (u8)DMACH_HSI_TX,
125 .rqtype = MEMTODEV,
42 }, 126 },
43}; 127};
44 128
45static struct s3c_pl330_platdata s5pc100_pdma0_pdata = { 129struct dma_pl330_platdata s5pc100_pdma0_pdata = {
46 .peri = { 130 .nr_valid_peri = ARRAY_SIZE(pdma0_peri),
47 [0] = DMACH_UART0_RX, 131 .peri = pdma0_peri,
48 [1] = DMACH_UART0_TX,
49 [2] = DMACH_UART1_RX,
50 [3] = DMACH_UART1_TX,
51 [4] = DMACH_UART2_RX,
52 [5] = DMACH_UART2_TX,
53 [6] = DMACH_UART3_RX,
54 [7] = DMACH_UART3_TX,
55 [8] = DMACH_IRDA,
56 [9] = DMACH_I2S0_RX,
57 [10] = DMACH_I2S0_TX,
58 [11] = DMACH_I2S0S_TX,
59 [12] = DMACH_I2S1_RX,
60 [13] = DMACH_I2S1_TX,
61 [14] = DMACH_I2S2_RX,
62 [15] = DMACH_I2S2_TX,
63 [16] = DMACH_SPI0_RX,
64 [17] = DMACH_SPI0_TX,
65 [18] = DMACH_SPI1_RX,
66 [19] = DMACH_SPI1_TX,
67 [20] = DMACH_SPI2_RX,
68 [21] = DMACH_SPI2_TX,
69 [22] = DMACH_AC97_MICIN,
70 [23] = DMACH_AC97_PCMIN,
71 [24] = DMACH_AC97_PCMOUT,
72 [25] = DMACH_EXTERNAL,
73 [26] = DMACH_PWM,
74 [27] = DMACH_SPDIF,
75 [28] = DMACH_HSI_RX,
76 [29] = DMACH_HSI_TX,
77 [30] = DMACH_MAX,
78 [31] = DMACH_MAX,
79 },
80}; 132};
81 133
82static struct platform_device s5pc100_device_pdma0 = { 134struct amba_device s5pc100_device_pdma0 = {
83 .name = "s3c-pl330", 135 .dev = {
84 .id = 0, 136 .init_name = "dma-pl330.0",
85 .num_resources = ARRAY_SIZE(s5pc100_pdma0_resource),
86 .resource = s5pc100_pdma0_resource,
87 .dev = {
88 .dma_mask = &dma_dmamask, 137 .dma_mask = &dma_dmamask,
89 .coherent_dma_mask = DMA_BIT_MASK(32), 138 .coherent_dma_mask = DMA_BIT_MASK(32),
90 .platform_data = &s5pc100_pdma0_pdata, 139 .platform_data = &s5pc100_pdma0_pdata,
91 }, 140 },
92}; 141 .res = {
93 142 .start = S5PC100_PA_PDMA0,
94static struct resource s5pc100_pdma1_resource[] = { 143 .end = S5PC100_PA_PDMA0 + SZ_4K,
95 [0] = {
96 .start = S5PC100_PA_PDMA1,
97 .end = S5PC100_PA_PDMA1 + SZ_4K,
98 .flags = IORESOURCE_MEM, 144 .flags = IORESOURCE_MEM,
99 }, 145 },
100 [1] = { 146 .irq = {IRQ_PDMA0, NO_IRQ},
101 .start = IRQ_PDMA1, 147 .periphid = 0x00041330,
102 .end = IRQ_PDMA1,
103 .flags = IORESOURCE_IRQ,
104 },
105}; 148};
106 149
107static struct s3c_pl330_platdata s5pc100_pdma1_pdata = { 150struct dma_pl330_peri pdma1_peri[30] = {
108 .peri = { 151 {
109 [0] = DMACH_UART0_RX, 152 .peri_id = (u8)DMACH_UART0_RX,
110 [1] = DMACH_UART0_TX, 153 .rqtype = DEVTOMEM,
111 [2] = DMACH_UART1_RX, 154 }, {
112 [3] = DMACH_UART1_TX, 155 .peri_id = (u8)DMACH_UART0_TX,
113 [4] = DMACH_UART2_RX, 156 .rqtype = MEMTODEV,
114 [5] = DMACH_UART2_TX, 157 }, {
115 [6] = DMACH_UART3_RX, 158 .peri_id = (u8)DMACH_UART1_RX,
116 [7] = DMACH_UART3_TX, 159 .rqtype = DEVTOMEM,
117 [8] = DMACH_IRDA, 160 }, {
118 [9] = DMACH_I2S0_RX, 161 .peri_id = (u8)DMACH_UART1_TX,
119 [10] = DMACH_I2S0_TX, 162 .rqtype = MEMTODEV,
120 [11] = DMACH_I2S0S_TX, 163 }, {
121 [12] = DMACH_I2S1_RX, 164 .peri_id = (u8)DMACH_UART2_RX,
122 [13] = DMACH_I2S1_TX, 165 .rqtype = DEVTOMEM,
123 [14] = DMACH_I2S2_RX, 166 }, {
124 [15] = DMACH_I2S2_TX, 167 .peri_id = (u8)DMACH_UART2_TX,
125 [16] = DMACH_SPI0_RX, 168 .rqtype = MEMTODEV,
126 [17] = DMACH_SPI0_TX, 169 }, {
127 [18] = DMACH_SPI1_RX, 170 .peri_id = (u8)DMACH_UART3_RX,
128 [19] = DMACH_SPI1_TX, 171 .rqtype = DEVTOMEM,
129 [20] = DMACH_SPI2_RX, 172 }, {
130 [21] = DMACH_SPI2_TX, 173 .peri_id = (u8)DMACH_UART3_TX,
131 [22] = DMACH_PCM0_RX, 174 .rqtype = MEMTODEV,
132 [23] = DMACH_PCM0_TX, 175 }, {
133 [24] = DMACH_PCM1_RX, 176 .peri_id = DMACH_IRDA,
134 [25] = DMACH_PCM1_TX, 177 }, {
135 [26] = DMACH_MSM_REQ0, 178 .peri_id = (u8)DMACH_I2S0_RX,
136 [27] = DMACH_MSM_REQ1, 179 .rqtype = DEVTOMEM,
137 [28] = DMACH_MSM_REQ2, 180 }, {
138 [29] = DMACH_MSM_REQ3, 181 .peri_id = (u8)DMACH_I2S0_TX,
139 [30] = DMACH_MAX, 182 .rqtype = MEMTODEV,
140 [31] = DMACH_MAX, 183 }, {
184 .peri_id = (u8)DMACH_I2S0S_TX,
185 .rqtype = MEMTODEV,
186 }, {
187 .peri_id = (u8)DMACH_I2S1_RX,
188 .rqtype = DEVTOMEM,
189 }, {
190 .peri_id = (u8)DMACH_I2S1_TX,
191 .rqtype = MEMTODEV,
192 }, {
193 .peri_id = (u8)DMACH_I2S2_RX,
194 .rqtype = DEVTOMEM,
195 }, {
196 .peri_id = (u8)DMACH_I2S2_TX,
197 .rqtype = MEMTODEV,
198 }, {
199 .peri_id = (u8)DMACH_SPI0_RX,
200 .rqtype = DEVTOMEM,
201 }, {
202 .peri_id = (u8)DMACH_SPI0_TX,
203 .rqtype = MEMTODEV,
204 }, {
205 .peri_id = (u8)DMACH_SPI1_RX,
206 .rqtype = DEVTOMEM,
207 }, {
208 .peri_id = (u8)DMACH_SPI1_TX,
209 .rqtype = MEMTODEV,
210 }, {
211 .peri_id = (u8)DMACH_SPI2_RX,
212 .rqtype = DEVTOMEM,
213 }, {
214 .peri_id = (u8)DMACH_SPI2_TX,
215 .rqtype = MEMTODEV,
216 }, {
217 .peri_id = (u8)DMACH_PCM0_RX,
218 .rqtype = DEVTOMEM,
219 }, {
220 .peri_id = (u8)DMACH_PCM1_TX,
221 .rqtype = MEMTODEV,
222 }, {
223 .peri_id = (u8)DMACH_PCM1_RX,
224 .rqtype = DEVTOMEM,
225 }, {
226 .peri_id = (u8)DMACH_PCM1_TX,
227 .rqtype = MEMTODEV,
228 }, {
229 .peri_id = (u8)DMACH_MSM_REQ0,
230 }, {
231 .peri_id = (u8)DMACH_MSM_REQ1,
232 }, {
233 .peri_id = (u8)DMACH_MSM_REQ2,
234 }, {
235 .peri_id = (u8)DMACH_MSM_REQ3,
141 }, 236 },
142}; 237};
143 238
144static struct platform_device s5pc100_device_pdma1 = { 239struct dma_pl330_platdata s5pc100_pdma1_pdata = {
145 .name = "s3c-pl330", 240 .nr_valid_peri = ARRAY_SIZE(pdma1_peri),
146 .id = 1, 241 .peri = pdma1_peri,
147 .num_resources = ARRAY_SIZE(s5pc100_pdma1_resource), 242};
148 .resource = s5pc100_pdma1_resource, 243
149 .dev = { 244struct amba_device s5pc100_device_pdma1 = {
245 .dev = {
246 .init_name = "dma-pl330.1",
150 .dma_mask = &dma_dmamask, 247 .dma_mask = &dma_dmamask,
151 .coherent_dma_mask = DMA_BIT_MASK(32), 248 .coherent_dma_mask = DMA_BIT_MASK(32),
152 .platform_data = &s5pc100_pdma1_pdata, 249 .platform_data = &s5pc100_pdma1_pdata,
153 }, 250 },
154}; 251 .res = {
155 252 .start = S5PC100_PA_PDMA1,
156static struct platform_device *s5pc100_dmacs[] __initdata = { 253 .end = S5PC100_PA_PDMA1 + SZ_4K,
157 &s5pc100_device_pdma0, 254 .flags = IORESOURCE_MEM,
158 &s5pc100_device_pdma1, 255 },
256 .irq = {IRQ_PDMA1, NO_IRQ},
257 .periphid = 0x00041330,
159}; 258};
160 259
161static int __init s5pc100_dma_init(void) 260static int __init s5pc100_dma_init(void)
162{ 261{
163 platform_add_devices(s5pc100_dmacs, ARRAY_SIZE(s5pc100_dmacs)); 262 amba_device_register(&s5pc100_device_pdma0, &iomem_resource);
164 263
165 return 0; 264 return 0;
166} 265}
diff --git a/arch/arm/mach-s5pc100/include/mach/dma.h b/arch/arm/mach-s5pc100/include/mach/dma.h
index 81209eb1409b..201842a3769e 100644
--- a/arch/arm/mach-s5pc100/include/mach/dma.h
+++ b/arch/arm/mach-s5pc100/include/mach/dma.h
@@ -20,7 +20,7 @@
20#ifndef __MACH_DMA_H 20#ifndef __MACH_DMA_H
21#define __MACH_DMA_H 21#define __MACH_DMA_H
22 22
23/* This platform uses the common S3C DMA API driver for PL330 */ 23/* This platform uses the common DMA API driver for PL330 */
24#include <plat/s3c-dma-pl330.h> 24#include <plat/dma-pl330.h>
25 25
26#endif /* __MACH_DMA_H */ 26#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig
index aaeb44a73716..e3ebe96923c8 100644
--- a/arch/arm/mach-s5pv210/Kconfig
+++ b/arch/arm/mach-s5pv210/Kconfig
@@ -11,7 +11,7 @@ if ARCH_S5PV210
11 11
12config CPU_S5PV210 12config CPU_S5PV210
13 bool 13 bool
14 select S3C_PL330_DMA 14 select SAMSUNG_DMADEV
15 select S5P_EXT_INT 15 select S5P_EXT_INT
16 select S5P_HRT 16 select S5P_HRT
17 help 17 help
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index f5f8fa89679c..1ab34000cc7e 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -203,6 +203,11 @@ static struct clk clk_pcmcdclk2 = {
203 .name = "pcmcdclk", 203 .name = "pcmcdclk",
204}; 204};
205 205
206static struct clk dummy_apb_pclk = {
207 .name = "apb_pclk",
208 .id = -1,
209};
210
206static struct clk *clkset_vpllsrc_list[] = { 211static struct clk *clkset_vpllsrc_list[] = {
207 [0] = &clk_fin_vpll, 212 [0] = &clk_fin_vpll,
208 [1] = &clk_sclk_hdmi27m, 213 [1] = &clk_sclk_hdmi27m,
@@ -289,13 +294,13 @@ static struct clk_ops clk_fout_apll_ops = {
289 294
290static struct clk init_clocks_off[] = { 295static struct clk init_clocks_off[] = {
291 { 296 {
292 .name = "pdma", 297 .name = "dma",
293 .devname = "s3c-pl330.0", 298 .devname = "s3c-pl330.0",
294 .parent = &clk_hclk_psys.clk, 299 .parent = &clk_hclk_psys.clk,
295 .enable = s5pv210_clk_ip0_ctrl, 300 .enable = s5pv210_clk_ip0_ctrl,
296 .ctrlbit = (1 << 3), 301 .ctrlbit = (1 << 3),
297 }, { 302 }, {
298 .name = "pdma", 303 .name = "dma",
299 .devname = "s3c-pl330.1", 304 .devname = "s3c-pl330.1",
300 .parent = &clk_hclk_psys.clk, 305 .parent = &clk_hclk_psys.clk,
301 .enable = s5pv210_clk_ip0_ctrl, 306 .enable = s5pv210_clk_ip0_ctrl,
@@ -1159,5 +1164,6 @@ void __init s5pv210_register_clocks(void)
1159 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 1164 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
1160 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 1165 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
1161 1166
1167 s3c24xx_register_clock(&dummy_apb_pclk);
1162 s3c_pwmclk_init(); 1168 s3c_pwmclk_init();
1163} 1169}
diff --git a/arch/arm/mach-s5pv210/dma.c b/arch/arm/mach-s5pv210/dma.c
index 497d3439a142..f79d0b06cbf9 100644
--- a/arch/arm/mach-s5pv210/dma.c
+++ b/arch/arm/mach-s5pv210/dma.c
@@ -1,4 +1,8 @@
1/* 1/* linux/arch/arm/mach-s5pv210/dma.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
2 * Copyright (C) 2010 Samsung Electronics Co. Ltd. 6 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
3 * Jaswinder Singh <jassi.brar@samsung.com> 7 * Jaswinder Singh <jassi.brar@samsung.com>
4 * 8 *
@@ -17,151 +21,239 @@
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */ 22 */
19 23
20#include <linux/platform_device.h>
21#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
25#include <linux/amba/bus.h>
26#include <linux/amba/pl330.h>
22 27
28#include <asm/irq.h>
23#include <plat/devs.h> 29#include <plat/devs.h>
24#include <plat/irqs.h> 30#include <plat/irqs.h>
25 31
26#include <mach/map.h> 32#include <mach/map.h>
27#include <mach/irqs.h> 33#include <mach/irqs.h>
28 34#include <mach/dma.h>
29#include <plat/s3c-pl330-pdata.h>
30 35
31static u64 dma_dmamask = DMA_BIT_MASK(32); 36static u64 dma_dmamask = DMA_BIT_MASK(32);
32 37
33static struct resource s5pv210_pdma0_resource[] = { 38struct dma_pl330_peri pdma0_peri[28] = {
34 [0] = { 39 {
35 .start = S5PV210_PA_PDMA0, 40 .peri_id = (u8)DMACH_UART0_RX,
36 .end = S5PV210_PA_PDMA0 + SZ_4K, 41 .rqtype = DEVTOMEM,
37 .flags = IORESOURCE_MEM, 42 }, {
38 }, 43 .peri_id = (u8)DMACH_UART0_TX,
39 [1] = { 44 .rqtype = MEMTODEV,
40 .start = IRQ_PDMA0, 45 }, {
41 .end = IRQ_PDMA0, 46 .peri_id = (u8)DMACH_UART1_RX,
42 .flags = IORESOURCE_IRQ, 47 .rqtype = DEVTOMEM,
48 }, {
49 .peri_id = (u8)DMACH_UART1_TX,
50 .rqtype = MEMTODEV,
51 }, {
52 .peri_id = (u8)DMACH_UART2_RX,
53 .rqtype = DEVTOMEM,
54 }, {
55 .peri_id = (u8)DMACH_UART2_TX,
56 .rqtype = MEMTODEV,
57 }, {
58 .peri_id = (u8)DMACH_UART3_RX,
59 .rqtype = DEVTOMEM,
60 }, {
61 .peri_id = (u8)DMACH_UART3_TX,
62 .rqtype = MEMTODEV,
63 }, {
64 .peri_id = DMACH_MAX,
65 }, {
66 .peri_id = (u8)DMACH_I2S0_RX,
67 .rqtype = DEVTOMEM,
68 }, {
69 .peri_id = (u8)DMACH_I2S0_TX,
70 .rqtype = MEMTODEV,
71 }, {
72 .peri_id = (u8)DMACH_I2S0S_TX,
73 .rqtype = MEMTODEV,
74 }, {
75 .peri_id = (u8)DMACH_I2S1_RX,
76 .rqtype = DEVTOMEM,
77 }, {
78 .peri_id = (u8)DMACH_I2S1_TX,
79 .rqtype = MEMTODEV,
80 }, {
81 .peri_id = (u8)DMACH_MAX,
82 }, {
83 .peri_id = (u8)DMACH_MAX,
84 }, {
85 .peri_id = (u8)DMACH_SPI0_RX,
86 .rqtype = DEVTOMEM,
87 }, {
88 .peri_id = (u8)DMACH_SPI0_TX,
89 .rqtype = MEMTODEV,
90 }, {
91 .peri_id = (u8)DMACH_SPI1_RX,
92 .rqtype = DEVTOMEM,
93 }, {
94 .peri_id = (u8)DMACH_SPI1_TX,
95 .rqtype = MEMTODEV,
96 }, {
97 .peri_id = (u8)DMACH_MAX,
98 }, {
99 .peri_id = (u8)DMACH_MAX,
100 }, {
101 .peri_id = (u8)DMACH_AC97_MICIN,
102 .rqtype = DEVTOMEM,
103 }, {
104 .peri_id = (u8)DMACH_AC97_PCMIN,
105 .rqtype = DEVTOMEM,
106 }, {
107 .peri_id = (u8)DMACH_AC97_PCMOUT,
108 .rqtype = MEMTODEV,
109 }, {
110 .peri_id = (u8)DMACH_MAX,
111 }, {
112 .peri_id = (u8)DMACH_PWM,
113 }, {
114 .peri_id = (u8)DMACH_SPDIF,
115 .rqtype = MEMTODEV,
43 }, 116 },
44}; 117};
45 118
46static struct s3c_pl330_platdata s5pv210_pdma0_pdata = { 119struct dma_pl330_platdata s5pv210_pdma0_pdata = {
47 .peri = { 120 .nr_valid_peri = ARRAY_SIZE(pdma0_peri),
48 [0] = DMACH_UART0_RX, 121 .peri = pdma0_peri,
49 [1] = DMACH_UART0_TX,
50 [2] = DMACH_UART1_RX,
51 [3] = DMACH_UART1_TX,
52 [4] = DMACH_UART2_RX,
53 [5] = DMACH_UART2_TX,
54 [6] = DMACH_UART3_RX,
55 [7] = DMACH_UART3_TX,
56 [8] = DMACH_MAX,
57 [9] = DMACH_I2S0_RX,
58 [10] = DMACH_I2S0_TX,
59 [11] = DMACH_I2S0S_TX,
60 [12] = DMACH_I2S1_RX,
61 [13] = DMACH_I2S1_TX,
62 [14] = DMACH_MAX,
63 [15] = DMACH_MAX,
64 [16] = DMACH_SPI0_RX,
65 [17] = DMACH_SPI0_TX,
66 [18] = DMACH_SPI1_RX,
67 [19] = DMACH_SPI1_TX,
68 [20] = DMACH_MAX,
69 [21] = DMACH_MAX,
70 [22] = DMACH_AC97_MICIN,
71 [23] = DMACH_AC97_PCMIN,
72 [24] = DMACH_AC97_PCMOUT,
73 [25] = DMACH_MAX,
74 [26] = DMACH_PWM,
75 [27] = DMACH_SPDIF,
76 [28] = DMACH_MAX,
77 [29] = DMACH_MAX,
78 [30] = DMACH_MAX,
79 [31] = DMACH_MAX,
80 },
81}; 122};
82 123
83static struct platform_device s5pv210_device_pdma0 = { 124struct amba_device s5pv210_device_pdma0 = {
84 .name = "s3c-pl330", 125 .dev = {
85 .id = 0, 126 .init_name = "dma-pl330.0",
86 .num_resources = ARRAY_SIZE(s5pv210_pdma0_resource),
87 .resource = s5pv210_pdma0_resource,
88 .dev = {
89 .dma_mask = &dma_dmamask, 127 .dma_mask = &dma_dmamask,
90 .coherent_dma_mask = DMA_BIT_MASK(32), 128 .coherent_dma_mask = DMA_BIT_MASK(32),
91 .platform_data = &s5pv210_pdma0_pdata, 129 .platform_data = &s5pv210_pdma0_pdata,
92 }, 130 },
93}; 131 .res = {
94 132 .start = S5PV210_PA_PDMA0,
95static struct resource s5pv210_pdma1_resource[] = { 133 .end = S5PV210_PA_PDMA0 + SZ_4K,
96 [0] = {
97 .start = S5PV210_PA_PDMA1,
98 .end = S5PV210_PA_PDMA1 + SZ_4K,
99 .flags = IORESOURCE_MEM, 134 .flags = IORESOURCE_MEM,
100 }, 135 },
101 [1] = { 136 .irq = {IRQ_PDMA0, NO_IRQ},
102 .start = IRQ_PDMA1, 137 .periphid = 0x00041330,
103 .end = IRQ_PDMA1,
104 .flags = IORESOURCE_IRQ,
105 },
106}; 138};
107 139
108static struct s3c_pl330_platdata s5pv210_pdma1_pdata = { 140struct dma_pl330_peri pdma1_peri[32] = {
109 .peri = { 141 {
110 [0] = DMACH_UART0_RX, 142 .peri_id = (u8)DMACH_UART0_RX,
111 [1] = DMACH_UART0_TX, 143 .rqtype = DEVTOMEM,
112 [2] = DMACH_UART1_RX, 144 }, {
113 [3] = DMACH_UART1_TX, 145 .peri_id = (u8)DMACH_UART0_TX,
114 [4] = DMACH_UART2_RX, 146 .rqtype = MEMTODEV,
115 [5] = DMACH_UART2_TX, 147 }, {
116 [6] = DMACH_UART3_RX, 148 .peri_id = (u8)DMACH_UART1_RX,
117 [7] = DMACH_UART3_TX, 149 .rqtype = DEVTOMEM,
118 [8] = DMACH_MAX, 150 }, {
119 [9] = DMACH_I2S0_RX, 151 .peri_id = (u8)DMACH_UART1_TX,
120 [10] = DMACH_I2S0_TX, 152 .rqtype = MEMTODEV,
121 [11] = DMACH_I2S0S_TX, 153 }, {
122 [12] = DMACH_I2S1_RX, 154 .peri_id = (u8)DMACH_UART2_RX,
123 [13] = DMACH_I2S1_TX, 155 .rqtype = DEVTOMEM,
124 [14] = DMACH_I2S2_RX, 156 }, {
125 [15] = DMACH_I2S2_TX, 157 .peri_id = (u8)DMACH_UART2_TX,
126 [16] = DMACH_SPI0_RX, 158 .rqtype = MEMTODEV,
127 [17] = DMACH_SPI0_TX, 159 }, {
128 [18] = DMACH_SPI1_RX, 160 .peri_id = (u8)DMACH_UART3_RX,
129 [19] = DMACH_SPI1_TX, 161 .rqtype = DEVTOMEM,
130 [20] = DMACH_MAX, 162 }, {
131 [21] = DMACH_MAX, 163 .peri_id = (u8)DMACH_UART3_TX,
132 [22] = DMACH_PCM0_RX, 164 .rqtype = MEMTODEV,
133 [23] = DMACH_PCM0_TX, 165 }, {
134 [24] = DMACH_PCM1_RX, 166 .peri_id = DMACH_MAX,
135 [25] = DMACH_PCM1_TX, 167 }, {
136 [26] = DMACH_MSM_REQ0, 168 .peri_id = (u8)DMACH_I2S0_RX,
137 [27] = DMACH_MSM_REQ1, 169 .rqtype = DEVTOMEM,
138 [28] = DMACH_MSM_REQ2, 170 }, {
139 [29] = DMACH_MSM_REQ3, 171 .peri_id = (u8)DMACH_I2S0_TX,
140 [30] = DMACH_PCM2_RX, 172 .rqtype = MEMTODEV,
141 [31] = DMACH_PCM2_TX, 173 }, {
174 .peri_id = (u8)DMACH_I2S0S_TX,
175 .rqtype = MEMTODEV,
176 }, {
177 .peri_id = (u8)DMACH_I2S1_RX,
178 .rqtype = DEVTOMEM,
179 }, {
180 .peri_id = (u8)DMACH_I2S1_TX,
181 .rqtype = MEMTODEV,
182 }, {
183 .peri_id = (u8)DMACH_I2S2_RX,
184 .rqtype = DEVTOMEM,
185 }, {
186 .peri_id = (u8)DMACH_I2S2_TX,
187 .rqtype = MEMTODEV,
188 }, {
189 .peri_id = (u8)DMACH_SPI0_RX,
190 .rqtype = DEVTOMEM,
191 }, {
192 .peri_id = (u8)DMACH_SPI0_TX,
193 .rqtype = MEMTODEV,
194 }, {
195 .peri_id = (u8)DMACH_SPI1_RX,
196 .rqtype = DEVTOMEM,
197 }, {
198 .peri_id = (u8)DMACH_SPI1_TX,
199 .rqtype = MEMTODEV,
200 }, {
201 .peri_id = (u8)DMACH_MAX,
202 }, {
203 .peri_id = (u8)DMACH_MAX,
204 }, {
205 .peri_id = (u8)DMACH_PCM0_RX,
206 .rqtype = DEVTOMEM,
207 }, {
208 .peri_id = (u8)DMACH_PCM0_TX,
209 .rqtype = MEMTODEV,
210 }, {
211 .peri_id = (u8)DMACH_PCM1_RX,
212 .rqtype = DEVTOMEM,
213 }, {
214 .peri_id = (u8)DMACH_PCM1_TX,
215 .rqtype = MEMTODEV,
216 }, {
217 .peri_id = (u8)DMACH_MSM_REQ0,
218 }, {
219 .peri_id = (u8)DMACH_MSM_REQ1,
220 }, {
221 .peri_id = (u8)DMACH_MSM_REQ2,
222 }, {
223 .peri_id = (u8)DMACH_MSM_REQ3,
224 }, {
225 .peri_id = (u8)DMACH_PCM2_RX,
226 .rqtype = DEVTOMEM,
227 }, {
228 .peri_id = (u8)DMACH_PCM2_TX,
229 .rqtype = MEMTODEV,
142 }, 230 },
143}; 231};
144 232
145static struct platform_device s5pv210_device_pdma1 = { 233struct dma_pl330_platdata s5pv210_pdma1_pdata = {
146 .name = "s3c-pl330", 234 .nr_valid_peri = ARRAY_SIZE(pdma1_peri),
147 .id = 1, 235 .peri = pdma1_peri,
148 .num_resources = ARRAY_SIZE(s5pv210_pdma1_resource), 236};
149 .resource = s5pv210_pdma1_resource, 237
150 .dev = { 238struct amba_device s5pv210_device_pdma1 = {
239 .dev = {
240 .init_name = "dma-pl330.1",
151 .dma_mask = &dma_dmamask, 241 .dma_mask = &dma_dmamask,
152 .coherent_dma_mask = DMA_BIT_MASK(32), 242 .coherent_dma_mask = DMA_BIT_MASK(32),
153 .platform_data = &s5pv210_pdma1_pdata, 243 .platform_data = &s5pv210_pdma1_pdata,
154 }, 244 },
155}; 245 .res = {
156 246 .start = S5PV210_PA_PDMA1,
157static struct platform_device *s5pv210_dmacs[] __initdata = { 247 .end = S5PV210_PA_PDMA1 + SZ_4K,
158 &s5pv210_device_pdma0, 248 .flags = IORESOURCE_MEM,
159 &s5pv210_device_pdma1, 249 },
250 .irq = {IRQ_PDMA1, NO_IRQ},
251 .periphid = 0x00041330,
160}; 252};
161 253
162static int __init s5pv210_dma_init(void) 254static int __init s5pv210_dma_init(void)
163{ 255{
164 platform_add_devices(s5pv210_dmacs, ARRAY_SIZE(s5pv210_dmacs)); 256 amba_device_register(&s5pv210_device_pdma0, &iomem_resource);
165 257
166 return 0; 258 return 0;
167} 259}
diff --git a/arch/arm/mach-s5pv210/include/mach/dma.h b/arch/arm/mach-s5pv210/include/mach/dma.h
index 81209eb1409b..201842a3769e 100644
--- a/arch/arm/mach-s5pv210/include/mach/dma.h
+++ b/arch/arm/mach-s5pv210/include/mach/dma.h
@@ -20,7 +20,7 @@
20#ifndef __MACH_DMA_H 20#ifndef __MACH_DMA_H
21#define __MACH_DMA_H 21#define __MACH_DMA_H
22 22
23/* This platform uses the common S3C DMA API driver for PL330 */ 23/* This platform uses the common DMA API driver for PL330 */
24#include <plat/s3c-dma-pl330.h> 24#include <plat/dma-pl330.h>
25 25
26#endif /* __MACH_DMA_H */ 26#endif /* __MACH_DMA_H */
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 539bd0e3defd..53754bcf15a7 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -1094,14 +1094,14 @@ EXPORT_SYMBOL(s3c2410_dma_config);
1094 * 1094 *
1095 * configure the dma source/destination hardware type and address 1095 * configure the dma source/destination hardware type and address
1096 * 1096 *
1097 * source: S3C2410_DMASRC_HW: source is hardware 1097 * source: DMA_FROM_DEVICE: source is hardware
1098 * S3C2410_DMASRC_MEM: source is memory 1098 * DMA_TO_DEVICE: source is memory
1099 * 1099 *
1100 * devaddr: physical address of the source 1100 * devaddr: physical address of the source
1101*/ 1101*/
1102 1102
1103int s3c2410_dma_devconfig(enum dma_ch channel, 1103int s3c2410_dma_devconfig(enum dma_ch channel,
1104 enum s3c2410_dmasrc source, 1104 enum dma_data_direction source,
1105 unsigned long devaddr) 1105 unsigned long devaddr)
1106{ 1106{
1107 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 1107 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -1131,7 +1131,7 @@ int s3c2410_dma_devconfig(enum dma_ch channel,
1131 hwcfg |= S3C2410_DISRCC_INC; 1131 hwcfg |= S3C2410_DISRCC_INC;
1132 1132
1133 switch (source) { 1133 switch (source) {
1134 case S3C2410_DMASRC_HW: 1134 case DMA_FROM_DEVICE:
1135 /* source is hardware */ 1135 /* source is hardware */
1136 pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n", 1136 pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n",
1137 __func__, devaddr, hwcfg); 1137 __func__, devaddr, hwcfg);
@@ -1142,7 +1142,7 @@ int s3c2410_dma_devconfig(enum dma_ch channel,
1142 chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST); 1142 chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST);
1143 break; 1143 break;
1144 1144
1145 case S3C2410_DMASRC_MEM: 1145 case DMA_TO_DEVICE:
1146 /* source is memory */ 1146 /* source is memory */
1147 pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n", 1147 pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n",
1148 __func__, devaddr, hwcfg); 1148 __func__, devaddr, hwcfg);
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 3895f9aff0dc..7a96198e3a78 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -295,11 +295,14 @@ config S3C_DMA
295 help 295 help
296 Internal configuration for S3C DMA core 296 Internal configuration for S3C DMA core
297 297
298config S3C_PL330_DMA 298config SAMSUNG_DMADEV
299 bool 299 bool
300 select PL330 300 select DMADEVICES
301 select PL330_DMA if (CPU_EXYNOS4210 || CPU_S5PV210 || CPU_S5PC100 || \
302 CPU_S5P6450 || CPU_S5P6440)
303 select ARM_AMBA
301 help 304 help
302 S3C DMA API Driver for PL330 DMAC. 305 Use DMA device engine for PL330 DMAC.
303 306
304comment "Power management" 307comment "Power management"
305 308
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 09adb84f2718..3dd5dbad55c6 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -62,9 +62,9 @@ obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT) += dev-backlight.o
62 62
63# DMA support 63# DMA support
64 64
65obj-$(CONFIG_S3C_DMA) += dma.o 65obj-$(CONFIG_S3C_DMA) += dma.o s3c-dma-ops.o
66 66
67obj-$(CONFIG_S3C_PL330_DMA) += s3c-pl330.o 67obj-$(CONFIG_SAMSUNG_DMADEV) += dma-ops.o
68 68
69# PM support 69# PM support
70 70
diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c
new file mode 100644
index 000000000000..6e3d9abc9e2e
--- /dev/null
+++ b/arch/arm/plat-samsung/dma-ops.c
@@ -0,0 +1,131 @@
1/* linux/arch/arm/plat-samsung/dma-ops.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Samsung DMA Operations
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/amba/pl330.h>
16#include <linux/scatterlist.h>
17
18#include <mach/dma.h>
19
20static inline bool pl330_filter(struct dma_chan *chan, void *param)
21{
22 struct dma_pl330_peri *peri = chan->private;
23 return peri->peri_id == (unsigned)param;
24}
25
26static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
27 struct samsung_dma_info *info)
28{
29 struct dma_chan *chan;
30 dma_cap_mask_t mask;
31 struct dma_slave_config slave_config;
32
33 dma_cap_zero(mask);
34 dma_cap_set(info->cap, mask);
35
36 chan = dma_request_channel(mask, pl330_filter, (void *)dma_ch);
37
38 if (info->direction == DMA_FROM_DEVICE) {
39 memset(&slave_config, 0, sizeof(struct dma_slave_config));
40 slave_config.direction = info->direction;
41 slave_config.src_addr = info->fifo;
42 slave_config.src_addr_width = info->width;
43 slave_config.src_maxburst = 1;
44 dmaengine_slave_config(chan, &slave_config);
45 } else if (info->direction == DMA_TO_DEVICE) {
46 memset(&slave_config, 0, sizeof(struct dma_slave_config));
47 slave_config.direction = info->direction;
48 slave_config.dst_addr = info->fifo;
49 slave_config.dst_addr_width = info->width;
50 slave_config.dst_maxburst = 1;
51 dmaengine_slave_config(chan, &slave_config);
52 }
53
54 return (unsigned)chan;
55}
56
57static int samsung_dmadev_release(unsigned ch,
58 struct s3c2410_dma_client *client)
59{
60 dma_release_channel((struct dma_chan *)ch);
61
62 return 0;
63}
64
65static int samsung_dmadev_prepare(unsigned ch,
66 struct samsung_dma_prep_info *info)
67{
68 struct scatterlist sg;
69 struct dma_chan *chan = (struct dma_chan *)ch;
70 struct dma_async_tx_descriptor *desc;
71
72 switch (info->cap) {
73 case DMA_SLAVE:
74 sg_init_table(&sg, 1);
75 sg_dma_len(&sg) = info->len;
76 sg_set_page(&sg, pfn_to_page(PFN_DOWN(info->buf)),
77 info->len, offset_in_page(info->buf));
78 sg_dma_address(&sg) = info->buf;
79
80 desc = chan->device->device_prep_slave_sg(chan,
81 &sg, 1, info->direction, DMA_PREP_INTERRUPT);
82 break;
83 case DMA_CYCLIC:
84 desc = chan->device->device_prep_dma_cyclic(chan,
85 info->buf, info->len, info->period, info->direction);
86 break;
87 default:
88 dev_err(&chan->dev->device, "unsupported format\n");
89 return -EFAULT;
90 }
91
92 if (!desc) {
93 dev_err(&chan->dev->device, "cannot prepare cyclic dma\n");
94 return -EFAULT;
95 }
96
97 desc->callback = info->fp;
98 desc->callback_param = info->fp_param;
99
100 dmaengine_submit((struct dma_async_tx_descriptor *)desc);
101
102 return 0;
103}
104
105static inline int samsung_dmadev_trigger(unsigned ch)
106{
107 dma_async_issue_pending((struct dma_chan *)ch);
108
109 return 0;
110}
111
112static inline int samsung_dmadev_flush(unsigned ch)
113{
114 return dmaengine_terminate_all((struct dma_chan *)ch);
115}
116
117struct samsung_dma_ops dmadev_ops = {
118 .request = samsung_dmadev_request,
119 .release = samsung_dmadev_release,
120 .prepare = samsung_dmadev_prepare,
121 .trigger = samsung_dmadev_trigger,
122 .started = NULL,
123 .flush = samsung_dmadev_flush,
124 .stop = samsung_dmadev_flush,
125};
126
127void *samsung_dmadev_get_ops(void)
128{
129 return &dmadev_ops;
130}
131EXPORT_SYMBOL(samsung_dmadev_get_ops);
diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
new file mode 100644
index 000000000000..4c1a363526cf
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
@@ -0,0 +1,63 @@
1/* arch/arm/plat-samsung/include/plat/dma-ops.h
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Samsung DMA support
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __SAMSUNG_DMA_OPS_H_
14#define __SAMSUNG_DMA_OPS_H_ __FILE__
15
16#include <linux/dmaengine.h>
17
18struct samsung_dma_prep_info {
19 enum dma_transaction_type cap;
20 enum dma_data_direction direction;
21 dma_addr_t buf;
22 unsigned long period;
23 unsigned long len;
24 void (*fp)(void *data);
25 void *fp_param;
26};
27
28struct samsung_dma_info {
29 enum dma_transaction_type cap;
30 enum dma_data_direction direction;
31 enum dma_slave_buswidth width;
32 dma_addr_t fifo;
33 struct s3c2410_dma_client *client;
34};
35
36struct samsung_dma_ops {
37 unsigned (*request)(enum dma_ch ch, struct samsung_dma_info *info);
38 int (*release)(unsigned ch, struct s3c2410_dma_client *client);
39 int (*prepare)(unsigned ch, struct samsung_dma_prep_info *info);
40 int (*trigger)(unsigned ch);
41 int (*started)(unsigned ch);
42 int (*flush)(unsigned ch);
43 int (*stop)(unsigned ch);
44};
45
46extern void *samsung_dmadev_get_ops(void);
47extern void *s3c_dma_get_ops(void);
48
49static inline void *__samsung_dma_get_ops(void)
50{
51 if (samsung_dma_is_dmadev())
52 return samsung_dmadev_get_ops();
53 else
54 return s3c_dma_get_ops();
55}
56
57/*
58 * samsung_dma_get_ops
59 * get the set of samsung dma operations
60 */
61#define samsung_dma_get_ops() __samsung_dma_get_ops()
62
63#endif /* __SAMSUNG_DMA_OPS_H_ */
diff --git a/arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h b/arch/arm/plat-samsung/include/plat/dma-pl330.h
index 810744213120..2e55e5958674 100644
--- a/arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h
+++ b/arch/arm/plat-samsung/include/plat/dma-pl330.h
@@ -8,11 +8,8 @@
8 * (at your option) any later version. 8 * (at your option) any later version.
9 */ 9 */
10 10
11#ifndef __S3C_DMA_PL330_H_ 11#ifndef __DMA_PL330_H_
12#define __S3C_DMA_PL330_H_ 12#define __DMA_PL330_H_ __FILE__
13
14#define S3C2410_DMAF_AUTOSTART (1 << 0)
15#define S3C2410_DMAF_CIRCULAR (1 << 1)
16 13
17/* 14/*
18 * PL330 can assign any channel to communicate with 15 * PL330 can assign any channel to communicate with
@@ -20,7 +17,7 @@
20 * For the sake of consistency across client drivers, 17 * For the sake of consistency across client drivers,
21 * We keep the channel names unchanged and only add 18 * We keep the channel names unchanged and only add
22 * missing peripherals are added. 19 * missing peripherals are added.
23 * Order is not important since S3C PL330 API driver 20 * Order is not important since DMA PL330 API driver
24 * use these just as IDs. 21 * use these just as IDs.
25 */ 22 */
26enum dma_ch { 23enum dma_ch {
@@ -88,11 +85,20 @@ enum dma_ch {
88 DMACH_MAX, 85 DMACH_MAX,
89}; 86};
90 87
91static inline bool s3c_dma_has_circular(void) 88struct s3c2410_dma_client {
89 char *name;
90};
91
92static inline bool samsung_dma_has_circular(void)
93{
94 return true;
95}
96
97static inline bool samsung_dma_is_dmadev(void)
92{ 98{
93 return true; 99 return true;
94} 100}
95 101
96#include <plat/dma.h> 102#include <plat/dma-ops.h>
97 103
98#endif /* __S3C_DMA_PL330_H_ */ 104#endif /* __DMA_PL330_H_ */
diff --git a/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h b/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
index ab9bce637cbd..1c1ed5481253 100644
--- a/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
+++ b/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
@@ -41,7 +41,7 @@ struct s3c24xx_dma_selection {
41 41
42 void (*direction)(struct s3c2410_dma_chan *chan, 42 void (*direction)(struct s3c2410_dma_chan *chan,
43 struct s3c24xx_dma_map *map, 43 struct s3c24xx_dma_map *map,
44 enum s3c2410_dmasrc dir); 44 enum dma_data_direction dir);
45}; 45};
46 46
47extern int s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel); 47extern int s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel);
diff --git a/arch/arm/plat-samsung/include/plat/dma.h b/arch/arm/plat-samsung/include/plat/dma.h
index 8c273b7a6f56..b9061128abde 100644
--- a/arch/arm/plat-samsung/include/plat/dma.h
+++ b/arch/arm/plat-samsung/include/plat/dma.h
@@ -10,17 +10,14 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11*/ 11*/
12 12
13#include <linux/dma-mapping.h>
14
13enum s3c2410_dma_buffresult { 15enum s3c2410_dma_buffresult {
14 S3C2410_RES_OK, 16 S3C2410_RES_OK,
15 S3C2410_RES_ERR, 17 S3C2410_RES_ERR,
16 S3C2410_RES_ABORT 18 S3C2410_RES_ABORT
17}; 19};
18 20
19enum s3c2410_dmasrc {
20 S3C2410_DMASRC_HW, /* source is memory */
21 S3C2410_DMASRC_MEM /* source is hardware */
22};
23
24/* enum s3c2410_chan_op 21/* enum s3c2410_chan_op
25 * 22 *
26 * operation codes passed to the DMA code by the user, and also used 23 * operation codes passed to the DMA code by the user, and also used
@@ -112,7 +109,7 @@ extern int s3c2410_dma_config(enum dma_ch channel, int xferunit);
112*/ 109*/
113 110
114extern int s3c2410_dma_devconfig(enum dma_ch channel, 111extern int s3c2410_dma_devconfig(enum dma_ch channel,
115 enum s3c2410_dmasrc source, unsigned long devaddr); 112 enum dma_data_direction source, unsigned long devaddr);
116 113
117/* s3c2410_dma_getposition 114/* s3c2410_dma_getposition
118 * 115 *
@@ -126,3 +123,4 @@ extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
126extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn); 123extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
127 124
128 125
126#include <plat/dma-ops.h>
diff --git a/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h b/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h
deleted file mode 100644
index bf5e2a9d408d..000000000000
--- a/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/* linux/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h
2 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __S3C_PL330_PDATA_H
13#define __S3C_PL330_PDATA_H
14
15#include <plat/s3c-dma-pl330.h>
16
17/*
18 * Every PL330 DMAC has max 32 peripheral interfaces,
19 * of which some may be not be really used in your
20 * DMAC's configuration.
21 * Populate this array of 32 peri i/fs with relevant
22 * channel IDs for used peri i/f and DMACH_MAX for
23 * those unused.
24 *
25 * The platforms just need to provide this info
26 * to the S3C DMA API driver for PL330.
27 */
28struct s3c_pl330_platdata {
29 enum dma_ch peri[32];
30};
31
32#endif /* __S3C_PL330_PDATA_H */
diff --git a/arch/arm/plat-samsung/s3c-dma-ops.c b/arch/arm/plat-samsung/s3c-dma-ops.c
new file mode 100644
index 000000000000..582333c70585
--- /dev/null
+++ b/arch/arm/plat-samsung/s3c-dma-ops.c
@@ -0,0 +1,130 @@
1/* linux/arch/arm/plat-samsung/s3c-dma-ops.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Samsung S3C-DMA Operations
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/slab.h>
16#include <linux/types.h>
17
18#include <mach/dma.h>
19
20struct cb_data {
21 void (*fp) (void *);
22 void *fp_param;
23 unsigned ch;
24 struct list_head node;
25};
26
27static LIST_HEAD(dma_list);
28
29static void s3c_dma_cb(struct s3c2410_dma_chan *channel, void *param,
30 int size, enum s3c2410_dma_buffresult res)
31{
32 struct cb_data *data = param;
33
34 data->fp(data->fp_param);
35}
36
37static unsigned s3c_dma_request(enum dma_ch dma_ch,
38 struct samsung_dma_info *info)
39{
40 struct cb_data *data;
41
42 if (s3c2410_dma_request(dma_ch, info->client, NULL) < 0) {
43 s3c2410_dma_free(dma_ch, info->client);
44 return 0;
45 }
46
47 data = kzalloc(sizeof(struct cb_data), GFP_KERNEL);
48 data->ch = dma_ch;
49 list_add_tail(&data->node, &dma_list);
50
51 s3c2410_dma_devconfig(dma_ch, info->direction, info->fifo);
52
53 if (info->cap == DMA_CYCLIC)
54 s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR);
55
56 s3c2410_dma_config(dma_ch, info->width);
57
58 return (unsigned)dma_ch;
59}
60
61static int s3c_dma_release(unsigned ch, struct s3c2410_dma_client *client)
62{
63 struct cb_data *data;
64
65 list_for_each_entry(data, &dma_list, node)
66 if (data->ch == ch)
67 break;
68 list_del(&data->node);
69
70 s3c2410_dma_free(ch, client);
71 kfree(data);
72
73 return 0;
74}
75
76static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep_info *info)
77{
78 struct cb_data *data;
79 int len = (info->cap == DMA_CYCLIC) ? info->period : info->len;
80
81 list_for_each_entry(data, &dma_list, node)
82 if (data->ch == ch)
83 break;
84
85 if (!data->fp) {
86 s3c2410_dma_set_buffdone_fn(ch, s3c_dma_cb);
87 data->fp = info->fp;
88 data->fp_param = info->fp_param;
89 }
90
91 s3c2410_dma_enqueue(ch, (void *)data, info->buf, len);
92
93 return 0;
94}
95
96static inline int s3c_dma_trigger(unsigned ch)
97{
98 return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_START);
99}
100
101static inline int s3c_dma_started(unsigned ch)
102{
103 return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STARTED);
104}
105
106static inline int s3c_dma_flush(unsigned ch)
107{
108 return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_FLUSH);
109}
110
111static inline int s3c_dma_stop(unsigned ch)
112{
113 return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STOP);
114}
115
116static struct samsung_dma_ops s3c_dma_ops = {
117 .request = s3c_dma_request,
118 .release = s3c_dma_release,
119 .prepare = s3c_dma_prepare,
120 .trigger = s3c_dma_trigger,
121 .started = s3c_dma_started,
122 .flush = s3c_dma_flush,
123 .stop = s3c_dma_stop,
124};
125
126void *s3c_dma_get_ops(void)
127{
128 return &s3c_dma_ops;
129}
130EXPORT_SYMBOL(s3c_dma_get_ops);
diff --git a/arch/arm/plat-samsung/s3c-pl330.c b/arch/arm/plat-samsung/s3c-pl330.c
deleted file mode 100644
index f85638c6f5ae..000000000000
--- a/arch/arm/plat-samsung/s3c-pl330.c
+++ /dev/null
@@ -1,1244 +0,0 @@
1/* linux/arch/arm/plat-samsung/s3c-pl330.c
2 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/err.h>
20
21#include <asm/hardware/pl330.h>
22
23#include <plat/s3c-pl330-pdata.h>
24
25/**
26 * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
27 * @busy_chan: Number of channels currently busy.
28 * @peri: List of IDs of peripherals this DMAC can work with.
29 * @node: To attach to the global list of DMACs.
30 * @pi: PL330 configuration info for the DMAC.
31 * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
32 * @clk: Pointer of DMAC operation clock.
33 */
34struct s3c_pl330_dmac {
35 unsigned busy_chan;
36 enum dma_ch *peri;
37 struct list_head node;
38 struct pl330_info *pi;
39 struct kmem_cache *kmcache;
40 struct clk *clk;
41};
42
43/**
44 * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
45 * @token: Xfer ID provided by the client.
46 * @node: To attach to the list of xfers on a channel.
47 * @px: Xfer for PL330 core.
48 * @chan: Owner channel of this xfer.
49 */
50struct s3c_pl330_xfer {
51 void *token;
52 struct list_head node;
53 struct pl330_xfer px;
54 struct s3c_pl330_chan *chan;
55};
56
57/**
58 * struct s3c_pl330_chan - Logical channel to communicate with
59 * a Physical peripheral.
60 * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
61 * NULL if the channel is available to be acquired.
62 * @id: ID of the peripheral that this channel can communicate with.
63 * @options: Options specified by the client.
64 * @sdaddr: Address provided via s3c2410_dma_devconfig.
65 * @node: To attach to the global list of channels.
66 * @lrq: Pointer to the last submitted pl330_req to PL330 core.
67 * @xfer_list: To manage list of xfers enqueued.
68 * @req: Two requests to communicate with the PL330 engine.
69 * @callback_fn: Callback function to the client.
70 * @rqcfg: Channel configuration for the xfers.
71 * @xfer_head: Pointer to the xfer to be next executed.
72 * @dmac: Pointer to the DMAC that manages this channel, NULL if the
73 * channel is available to be acquired.
74 * @client: Client of this channel. NULL if the
75 * channel is available to be acquired.
76 */
77struct s3c_pl330_chan {
78 void *pl330_chan_id;
79 enum dma_ch id;
80 unsigned int options;
81 unsigned long sdaddr;
82 struct list_head node;
83 struct pl330_req *lrq;
84 struct list_head xfer_list;
85 struct pl330_req req[2];
86 s3c2410_dma_cbfn_t callback_fn;
87 struct pl330_reqcfg rqcfg;
88 struct s3c_pl330_xfer *xfer_head;
89 struct s3c_pl330_dmac *dmac;
90 struct s3c2410_dma_client *client;
91};
92
93/* All DMACs in the platform */
94static LIST_HEAD(dmac_list);
95
96/* All channels to peripherals in the platform */
97static LIST_HEAD(chan_list);
98
99/*
100 * Since we add resources(DMACs and Channels) to the global pool,
101 * we need to guard access to the resources using a global lock
102 */
103static DEFINE_SPINLOCK(res_lock);
104
105/* Returns the channel with ID 'id' in the chan_list */
106static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
107{
108 struct s3c_pl330_chan *ch;
109
110 list_for_each_entry(ch, &chan_list, node)
111 if (ch->id == id)
112 return ch;
113
114 return NULL;
115}
116
117/* Allocate a new channel with ID 'id' and add to chan_list */
118static void chan_add(const enum dma_ch id)
119{
120 struct s3c_pl330_chan *ch = id_to_chan(id);
121
122 /* Return if the channel already exists */
123 if (ch)
124 return;
125
126 ch = kmalloc(sizeof(*ch), GFP_KERNEL);
127 /* Return silently to work with other channels */
128 if (!ch)
129 return;
130
131 ch->id = id;
132 ch->dmac = NULL;
133
134 list_add_tail(&ch->node, &chan_list);
135}
136
137/* If the channel is not yet acquired by any client */
138static bool chan_free(struct s3c_pl330_chan *ch)
139{
140 if (!ch)
141 return false;
142
143 /* Channel points to some DMAC only when it's acquired */
144 return ch->dmac ? false : true;
145}
146
147/*
148 * Returns 0 is peripheral i/f is invalid or not present on the dmac.
149 * Index + 1, otherwise.
150 */
151static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
152{
153 enum dma_ch *id = dmac->peri;
154 int i;
155
156 /* Discount invalid markers */
157 if (ch_id == DMACH_MAX)
158 return 0;
159
160 for (i = 0; i < PL330_MAX_PERI; i++)
161 if (id[i] == ch_id)
162 return i + 1;
163
164 return 0;
165}
166
167/* If all channel threads of the DMAC are busy */
168static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
169{
170 struct pl330_info *pi = dmac->pi;
171
172 return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
173}
174
175/*
176 * Returns the number of free channels that
177 * can be handled by this dmac only.
178 */
179static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
180{
181 enum dma_ch *id = dmac->peri;
182 struct s3c_pl330_dmac *d;
183 struct s3c_pl330_chan *ch;
184 unsigned found, count = 0;
185 enum dma_ch p;
186 int i;
187
188 for (i = 0; i < PL330_MAX_PERI; i++) {
189 p = id[i];
190 ch = id_to_chan(p);
191
192 if (p == DMACH_MAX || !chan_free(ch))
193 continue;
194
195 found = 0;
196 list_for_each_entry(d, &dmac_list, node) {
197 if (d != dmac && iface_of_dmac(d, ch->id)) {
198 found = 1;
199 break;
200 }
201 }
202 if (!found)
203 count++;
204 }
205
206 return count;
207}
208
209/*
210 * Measure of suitability of 'dmac' handling 'ch'
211 *
212 * 0 indicates 'dmac' can not handle 'ch' either
213 * because it is not supported by the hardware or
214 * because all dmac channels are currently busy.
215 *
216 * >0 vlaue indicates 'dmac' has the capability.
217 * The bigger the value the more suitable the dmac.
218 */
219#define MAX_SUIT UINT_MAX
220#define MIN_SUIT 0
221
222static unsigned suitablility(struct s3c_pl330_dmac *dmac,
223 struct s3c_pl330_chan *ch)
224{
225 struct pl330_info *pi = dmac->pi;
226 enum dma_ch *id = dmac->peri;
227 struct s3c_pl330_dmac *d;
228 unsigned s;
229 int i;
230
231 s = MIN_SUIT;
232 /* If all the DMAC channel threads are busy */
233 if (dmac_busy(dmac))
234 return s;
235
236 for (i = 0; i < PL330_MAX_PERI; i++)
237 if (id[i] == ch->id)
238 break;
239
240 /* If the 'dmac' can't talk to 'ch' */
241 if (i == PL330_MAX_PERI)
242 return s;
243
244 s = MAX_SUIT;
245 list_for_each_entry(d, &dmac_list, node) {
246 /*
247 * If some other dmac can talk to this
248 * peri and has some channel free.
249 */
250 if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
251 s = 0;
252 break;
253 }
254 }
255 if (s)
256 return s;
257
258 s = 100;
259
260 /* Good if free chans are more, bad otherwise */
261 s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
262
263 return s;
264}
265
266/* More than one DMAC may have capability to transfer data with the
267 * peripheral. This function assigns most suitable DMAC to manage the
268 * channel and hence communicate with the peripheral.
269 */
270static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
271{
272 struct s3c_pl330_dmac *d, *dmac = NULL;
273 unsigned sn, sl = MIN_SUIT;
274
275 list_for_each_entry(d, &dmac_list, node) {
276 sn = suitablility(d, ch);
277
278 if (sn == MAX_SUIT)
279 return d;
280
281 if (sn > sl)
282 dmac = d;
283 }
284
285 return dmac;
286}
287
288/* Acquire the channel for peripheral 'id' */
289static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
290{
291 struct s3c_pl330_chan *ch = id_to_chan(id);
292 struct s3c_pl330_dmac *dmac;
293
294 /* If the channel doesn't exist or is already acquired */
295 if (!ch || !chan_free(ch)) {
296 ch = NULL;
297 goto acq_exit;
298 }
299
300 dmac = map_chan_to_dmac(ch);
301 /* If couldn't map */
302 if (!dmac) {
303 ch = NULL;
304 goto acq_exit;
305 }
306
307 dmac->busy_chan++;
308 ch->dmac = dmac;
309
310acq_exit:
311 return ch;
312}
313
314/* Delete xfer from the queue */
315static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
316{
317 struct s3c_pl330_xfer *t;
318 struct s3c_pl330_chan *ch;
319 int found;
320
321 if (!xfer)
322 return;
323
324 ch = xfer->chan;
325
326 /* Make sure xfer is in the queue */
327 found = 0;
328 list_for_each_entry(t, &ch->xfer_list, node)
329 if (t == xfer) {
330 found = 1;
331 break;
332 }
333
334 if (!found)
335 return;
336
337 /* If xfer is last entry in the queue */
338 if (xfer->node.next == &ch->xfer_list)
339 t = list_entry(ch->xfer_list.next,
340 struct s3c_pl330_xfer, node);
341 else
342 t = list_entry(xfer->node.next,
343 struct s3c_pl330_xfer, node);
344
345 /* If there was only one node left */
346 if (t == xfer)
347 ch->xfer_head = NULL;
348 else if (ch->xfer_head == xfer)
349 ch->xfer_head = t;
350
351 list_del(&xfer->node);
352}
353
354/* Provides pointer to the next xfer in the queue.
355 * If CIRCULAR option is set, the list is left intact,
356 * otherwise the xfer is removed from the list.
357 * Forced delete 'pluck' can be set to override the CIRCULAR option.
358 */
359static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
360 int pluck)
361{
362 struct s3c_pl330_xfer *xfer = ch->xfer_head;
363
364 if (!xfer)
365 return NULL;
366
367 /* If xfer is last entry in the queue */
368 if (xfer->node.next == &ch->xfer_list)
369 ch->xfer_head = list_entry(ch->xfer_list.next,
370 struct s3c_pl330_xfer, node);
371 else
372 ch->xfer_head = list_entry(xfer->node.next,
373 struct s3c_pl330_xfer, node);
374
375 if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
376 del_from_queue(xfer);
377
378 return xfer;
379}
380
381static inline void add_to_queue(struct s3c_pl330_chan *ch,
382 struct s3c_pl330_xfer *xfer, int front)
383{
384 struct pl330_xfer *xt;
385
386 /* If queue empty */
387 if (ch->xfer_head == NULL)
388 ch->xfer_head = xfer;
389
390 xt = &ch->xfer_head->px;
391 /* If the head already submitted (CIRCULAR head) */
392 if (ch->options & S3C2410_DMAF_CIRCULAR &&
393 (xt == ch->req[0].x || xt == ch->req[1].x))
394 ch->xfer_head = xfer;
395
396 /* If this is a resubmission, it should go at the head */
397 if (front) {
398 ch->xfer_head = xfer;
399 list_add(&xfer->node, &ch->xfer_list);
400 } else {
401 list_add_tail(&xfer->node, &ch->xfer_list);
402 }
403}
404
405static inline void _finish_off(struct s3c_pl330_xfer *xfer,
406 enum s3c2410_dma_buffresult res, int ffree)
407{
408 struct s3c_pl330_chan *ch;
409
410 if (!xfer)
411 return;
412
413 ch = xfer->chan;
414
415 /* Do callback */
416 if (ch->callback_fn)
417 ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
418
419 /* Force Free or if buffer is not needed anymore */
420 if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
421 kmem_cache_free(ch->dmac->kmcache, xfer);
422}
423
424static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
425 struct pl330_req *r)
426{
427 struct s3c_pl330_xfer *xfer;
428 int ret = 0;
429
430 /* If already submitted */
431 if (r->x)
432 return 0;
433
434 xfer = get_from_queue(ch, 0);
435 if (xfer) {
436 r->x = &xfer->px;
437
438 /* Use max bandwidth for M<->M xfers */
439 if (r->rqtype == MEMTOMEM) {
440 struct pl330_info *pi = xfer->chan->dmac->pi;
441 int burst = 1 << ch->rqcfg.brst_size;
442 u32 bytes = r->x->bytes;
443 int bl;
444
445 bl = pi->pcfg.data_bus_width / 8;
446 bl *= pi->pcfg.data_buf_dep;
447 bl /= burst;
448
449 /* src/dst_burst_len can't be more than 16 */
450 if (bl > 16)
451 bl = 16;
452
453 while (bl > 1) {
454 if (!(bytes % (bl * burst)))
455 break;
456 bl--;
457 }
458
459 ch->rqcfg.brst_len = bl;
460 } else {
461 ch->rqcfg.brst_len = 1;
462 }
463
464 ret = pl330_submit_req(ch->pl330_chan_id, r);
465
466 /* If submission was successful */
467 if (!ret) {
468 ch->lrq = r; /* latest submitted req */
469 return 0;
470 }
471
472 r->x = NULL;
473
474 /* If both of the PL330 ping-pong buffers filled */
475 if (ret == -EAGAIN) {
476 dev_err(ch->dmac->pi->dev, "%s:%d!\n",
477 __func__, __LINE__);
478 /* Queue back again */
479 add_to_queue(ch, xfer, 1);
480 ret = 0;
481 } else {
482 dev_err(ch->dmac->pi->dev, "%s:%d!\n",
483 __func__, __LINE__);
484 _finish_off(xfer, S3C2410_RES_ERR, 0);
485 }
486 }
487
488 return ret;
489}
490
491static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
492 struct pl330_req *r, enum pl330_op_err err)
493{
494 unsigned long flags;
495 struct s3c_pl330_xfer *xfer;
496 struct pl330_xfer *xl = r->x;
497 enum s3c2410_dma_buffresult res;
498
499 spin_lock_irqsave(&res_lock, flags);
500
501 r->x = NULL;
502
503 s3c_pl330_submit(ch, r);
504
505 spin_unlock_irqrestore(&res_lock, flags);
506
507 /* Map result to S3C DMA API */
508 if (err == PL330_ERR_NONE)
509 res = S3C2410_RES_OK;
510 else if (err == PL330_ERR_ABORT)
511 res = S3C2410_RES_ABORT;
512 else
513 res = S3C2410_RES_ERR;
514
515 /* If last request had some xfer */
516 if (xl) {
517 xfer = container_of(xl, struct s3c_pl330_xfer, px);
518 _finish_off(xfer, res, 0);
519 } else {
520 dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
521 __func__, __LINE__);
522 }
523}
524
525static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
526{
527 struct pl330_req *r = token;
528 struct s3c_pl330_chan *ch = container_of(r,
529 struct s3c_pl330_chan, req[0]);
530 s3c_pl330_rq(ch, r, err);
531}
532
533static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
534{
535 struct pl330_req *r = token;
536 struct s3c_pl330_chan *ch = container_of(r,
537 struct s3c_pl330_chan, req[1]);
538 s3c_pl330_rq(ch, r, err);
539}
540
541/* Release an acquired channel */
542static void chan_release(struct s3c_pl330_chan *ch)
543{
544 struct s3c_pl330_dmac *dmac;
545
546 if (chan_free(ch))
547 return;
548
549 dmac = ch->dmac;
550 ch->dmac = NULL;
551 dmac->busy_chan--;
552}
553
554int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
555{
556 struct s3c_pl330_xfer *xfer;
557 enum pl330_chan_op pl330op;
558 struct s3c_pl330_chan *ch;
559 unsigned long flags;
560 int idx, ret;
561
562 spin_lock_irqsave(&res_lock, flags);
563
564 ch = id_to_chan(id);
565
566 if (!ch || chan_free(ch)) {
567 ret = -EINVAL;
568 goto ctrl_exit;
569 }
570
571 switch (op) {
572 case S3C2410_DMAOP_START:
573 /* Make sure both reqs are enqueued */
574 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
575 s3c_pl330_submit(ch, &ch->req[idx]);
576 s3c_pl330_submit(ch, &ch->req[1 - idx]);
577 pl330op = PL330_OP_START;
578 break;
579
580 case S3C2410_DMAOP_STOP:
581 pl330op = PL330_OP_ABORT;
582 break;
583
584 case S3C2410_DMAOP_FLUSH:
585 pl330op = PL330_OP_FLUSH;
586 break;
587
588 case S3C2410_DMAOP_PAUSE:
589 case S3C2410_DMAOP_RESUME:
590 case S3C2410_DMAOP_TIMEOUT:
591 case S3C2410_DMAOP_STARTED:
592 spin_unlock_irqrestore(&res_lock, flags);
593 return 0;
594
595 default:
596 spin_unlock_irqrestore(&res_lock, flags);
597 return -EINVAL;
598 }
599
600 ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
601
602 if (pl330op == PL330_OP_START) {
603 spin_unlock_irqrestore(&res_lock, flags);
604 return ret;
605 }
606
607 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
608
609 /* Abort the current xfer */
610 if (ch->req[idx].x) {
611 xfer = container_of(ch->req[idx].x,
612 struct s3c_pl330_xfer, px);
613
614 /* Drop xfer during FLUSH */
615 if (pl330op == PL330_OP_FLUSH)
616 del_from_queue(xfer);
617
618 ch->req[idx].x = NULL;
619
620 spin_unlock_irqrestore(&res_lock, flags);
621 _finish_off(xfer, S3C2410_RES_ABORT,
622 pl330op == PL330_OP_FLUSH ? 1 : 0);
623 spin_lock_irqsave(&res_lock, flags);
624 }
625
626 /* Flush the whole queue */
627 if (pl330op == PL330_OP_FLUSH) {
628
629 if (ch->req[1 - idx].x) {
630 xfer = container_of(ch->req[1 - idx].x,
631 struct s3c_pl330_xfer, px);
632
633 del_from_queue(xfer);
634
635 ch->req[1 - idx].x = NULL;
636
637 spin_unlock_irqrestore(&res_lock, flags);
638 _finish_off(xfer, S3C2410_RES_ABORT, 1);
639 spin_lock_irqsave(&res_lock, flags);
640 }
641
642 /* Finish off the remaining in the queue */
643 xfer = ch->xfer_head;
644 while (xfer) {
645
646 del_from_queue(xfer);
647
648 spin_unlock_irqrestore(&res_lock, flags);
649 _finish_off(xfer, S3C2410_RES_ABORT, 1);
650 spin_lock_irqsave(&res_lock, flags);
651
652 xfer = ch->xfer_head;
653 }
654 }
655
656ctrl_exit:
657 spin_unlock_irqrestore(&res_lock, flags);
658
659 return ret;
660}
661EXPORT_SYMBOL(s3c2410_dma_ctrl);
662
663int s3c2410_dma_enqueue(enum dma_ch id, void *token,
664 dma_addr_t addr, int size)
665{
666 struct s3c_pl330_chan *ch;
667 struct s3c_pl330_xfer *xfer;
668 unsigned long flags;
669 int idx, ret = 0;
670
671 spin_lock_irqsave(&res_lock, flags);
672
673 ch = id_to_chan(id);
674
675 /* Error if invalid or free channel */
676 if (!ch || chan_free(ch)) {
677 ret = -EINVAL;
678 goto enq_exit;
679 }
680
681 /* Error if size is unaligned */
682 if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
683 ret = -EINVAL;
684 goto enq_exit;
685 }
686
687 xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
688 if (!xfer) {
689 ret = -ENOMEM;
690 goto enq_exit;
691 }
692
693 xfer->token = token;
694 xfer->chan = ch;
695 xfer->px.bytes = size;
696 xfer->px.next = NULL; /* Single request */
697
698 /* For S3C DMA API, direction is always fixed for all xfers */
699 if (ch->req[0].rqtype == MEMTODEV) {
700 xfer->px.src_addr = addr;
701 xfer->px.dst_addr = ch->sdaddr;
702 } else {
703 xfer->px.src_addr = ch->sdaddr;
704 xfer->px.dst_addr = addr;
705 }
706
707 add_to_queue(ch, xfer, 0);
708
709 /* Try submitting on either request */
710 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
711
712 if (!ch->req[idx].x)
713 s3c_pl330_submit(ch, &ch->req[idx]);
714 else
715 s3c_pl330_submit(ch, &ch->req[1 - idx]);
716
717 spin_unlock_irqrestore(&res_lock, flags);
718
719 if (ch->options & S3C2410_DMAF_AUTOSTART)
720 s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
721
722 return 0;
723
724enq_exit:
725 spin_unlock_irqrestore(&res_lock, flags);
726
727 return ret;
728}
729EXPORT_SYMBOL(s3c2410_dma_enqueue);
730
731int s3c2410_dma_request(enum dma_ch id,
732 struct s3c2410_dma_client *client,
733 void *dev)
734{
735 struct s3c_pl330_dmac *dmac;
736 struct s3c_pl330_chan *ch;
737 unsigned long flags;
738 int ret = 0;
739
740 spin_lock_irqsave(&res_lock, flags);
741
742 ch = chan_acquire(id);
743 if (!ch) {
744 ret = -EBUSY;
745 goto req_exit;
746 }
747
748 dmac = ch->dmac;
749
750 ch->pl330_chan_id = pl330_request_channel(dmac->pi);
751 if (!ch->pl330_chan_id) {
752 chan_release(ch);
753 ret = -EBUSY;
754 goto req_exit;
755 }
756
757 ch->client = client;
758 ch->options = 0; /* Clear any option */
759 ch->callback_fn = NULL; /* Clear any callback */
760 ch->lrq = NULL;
761
762 ch->rqcfg.brst_size = 2; /* Default word size */
763 ch->rqcfg.swap = SWAP_NO;
764 ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
765 ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
766 ch->rqcfg.privileged = 0;
767 ch->rqcfg.insnaccess = 0;
768
769 /* Set invalid direction */
770 ch->req[0].rqtype = DEVTODEV;
771 ch->req[1].rqtype = ch->req[0].rqtype;
772
773 ch->req[0].cfg = &ch->rqcfg;
774 ch->req[1].cfg = ch->req[0].cfg;
775
776 ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
777 ch->req[1].peri = ch->req[0].peri;
778
779 ch->req[0].token = &ch->req[0];
780 ch->req[0].xfer_cb = s3c_pl330_rq0;
781 ch->req[1].token = &ch->req[1];
782 ch->req[1].xfer_cb = s3c_pl330_rq1;
783
784 ch->req[0].x = NULL;
785 ch->req[1].x = NULL;
786
787 /* Reset xfer list */
788 INIT_LIST_HEAD(&ch->xfer_list);
789 ch->xfer_head = NULL;
790
791req_exit:
792 spin_unlock_irqrestore(&res_lock, flags);
793
794 return ret;
795}
796EXPORT_SYMBOL(s3c2410_dma_request);
797
798int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
799{
800 struct s3c_pl330_chan *ch;
801 struct s3c_pl330_xfer *xfer;
802 unsigned long flags;
803 int ret = 0;
804 unsigned idx;
805
806 spin_lock_irqsave(&res_lock, flags);
807
808 ch = id_to_chan(id);
809
810 if (!ch || chan_free(ch))
811 goto free_exit;
812
813 /* Refuse if someone else wanted to free the channel */
814 if (ch->client != client) {
815 ret = -EBUSY;
816 goto free_exit;
817 }
818
819 /* Stop any active xfer, Flushe the queue and do callbacks */
820 pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
821
822 /* Abort the submitted requests */
823 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
824
825 if (ch->req[idx].x) {
826 xfer = container_of(ch->req[idx].x,
827 struct s3c_pl330_xfer, px);
828
829 ch->req[idx].x = NULL;
830 del_from_queue(xfer);
831
832 spin_unlock_irqrestore(&res_lock, flags);
833 _finish_off(xfer, S3C2410_RES_ABORT, 1);
834 spin_lock_irqsave(&res_lock, flags);
835 }
836
837 if (ch->req[1 - idx].x) {
838 xfer = container_of(ch->req[1 - idx].x,
839 struct s3c_pl330_xfer, px);
840
841 ch->req[1 - idx].x = NULL;
842 del_from_queue(xfer);
843
844 spin_unlock_irqrestore(&res_lock, flags);
845 _finish_off(xfer, S3C2410_RES_ABORT, 1);
846 spin_lock_irqsave(&res_lock, flags);
847 }
848
849 /* Pluck and Abort the queued requests in order */
850 do {
851 xfer = get_from_queue(ch, 1);
852
853 spin_unlock_irqrestore(&res_lock, flags);
854 _finish_off(xfer, S3C2410_RES_ABORT, 1);
855 spin_lock_irqsave(&res_lock, flags);
856 } while (xfer);
857
858 ch->client = NULL;
859
860 pl330_release_channel(ch->pl330_chan_id);
861
862 ch->pl330_chan_id = NULL;
863
864 chan_release(ch);
865
866free_exit:
867 spin_unlock_irqrestore(&res_lock, flags);
868
869 return ret;
870}
871EXPORT_SYMBOL(s3c2410_dma_free);
872
873int s3c2410_dma_config(enum dma_ch id, int xferunit)
874{
875 struct s3c_pl330_chan *ch;
876 struct pl330_info *pi;
877 unsigned long flags;
878 int i, dbwidth, ret = 0;
879
880 spin_lock_irqsave(&res_lock, flags);
881
882 ch = id_to_chan(id);
883
884 if (!ch || chan_free(ch)) {
885 ret = -EINVAL;
886 goto cfg_exit;
887 }
888
889 pi = ch->dmac->pi;
890 dbwidth = pi->pcfg.data_bus_width / 8;
891
892 /* Max size of xfer can be pcfg.data_bus_width */
893 if (xferunit > dbwidth) {
894 ret = -EINVAL;
895 goto cfg_exit;
896 }
897
898 i = 0;
899 while (xferunit != (1 << i))
900 i++;
901
902 /* If valid value */
903 if (xferunit == (1 << i))
904 ch->rqcfg.brst_size = i;
905 else
906 ret = -EINVAL;
907
908cfg_exit:
909 spin_unlock_irqrestore(&res_lock, flags);
910
911 return ret;
912}
913EXPORT_SYMBOL(s3c2410_dma_config);
914
915/* Options that are supported by this driver */
916#define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
917
918int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
919{
920 struct s3c_pl330_chan *ch;
921 unsigned long flags;
922 int ret = 0;
923
924 spin_lock_irqsave(&res_lock, flags);
925
926 ch = id_to_chan(id);
927
928 if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
929 ret = -EINVAL;
930 else
931 ch->options = options;
932
933 spin_unlock_irqrestore(&res_lock, flags);
934
935 return 0;
936}
937EXPORT_SYMBOL(s3c2410_dma_setflags);
938
939int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
940{
941 struct s3c_pl330_chan *ch;
942 unsigned long flags;
943 int ret = 0;
944
945 spin_lock_irqsave(&res_lock, flags);
946
947 ch = id_to_chan(id);
948
949 if (!ch || chan_free(ch))
950 ret = -EINVAL;
951 else
952 ch->callback_fn = rtn;
953
954 spin_unlock_irqrestore(&res_lock, flags);
955
956 return ret;
957}
958EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
959
960int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
961 unsigned long address)
962{
963 struct s3c_pl330_chan *ch;
964 unsigned long flags;
965 int ret = 0;
966
967 spin_lock_irqsave(&res_lock, flags);
968
969 ch = id_to_chan(id);
970
971 if (!ch || chan_free(ch)) {
972 ret = -EINVAL;
973 goto devcfg_exit;
974 }
975
976 switch (source) {
977 case S3C2410_DMASRC_HW: /* P->M */
978 ch->req[0].rqtype = DEVTOMEM;
979 ch->req[1].rqtype = DEVTOMEM;
980 ch->rqcfg.src_inc = 0;
981 ch->rqcfg.dst_inc = 1;
982 break;
983 case S3C2410_DMASRC_MEM: /* M->P */
984 ch->req[0].rqtype = MEMTODEV;
985 ch->req[1].rqtype = MEMTODEV;
986 ch->rqcfg.src_inc = 1;
987 ch->rqcfg.dst_inc = 0;
988 break;
989 default:
990 ret = -EINVAL;
991 goto devcfg_exit;
992 }
993
994 ch->sdaddr = address;
995
996devcfg_exit:
997 spin_unlock_irqrestore(&res_lock, flags);
998
999 return ret;
1000}
1001EXPORT_SYMBOL(s3c2410_dma_devconfig);
1002
1003int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
1004{
1005 struct s3c_pl330_chan *ch = id_to_chan(id);
1006 struct pl330_chanstatus status;
1007 int ret;
1008
1009 if (!ch || chan_free(ch))
1010 return -EINVAL;
1011
1012 ret = pl330_chan_status(ch->pl330_chan_id, &status);
1013 if (ret < 0)
1014 return ret;
1015
1016 *src = status.src_addr;
1017 *dst = status.dst_addr;
1018
1019 return 0;
1020}
1021EXPORT_SYMBOL(s3c2410_dma_getposition);
1022
1023static irqreturn_t pl330_irq_handler(int irq, void *data)
1024{
1025 if (pl330_update(data))
1026 return IRQ_HANDLED;
1027 else
1028 return IRQ_NONE;
1029}
1030
1031static int pl330_probe(struct platform_device *pdev)
1032{
1033 struct s3c_pl330_dmac *s3c_pl330_dmac;
1034 struct s3c_pl330_platdata *pl330pd;
1035 struct pl330_info *pl330_info;
1036 struct resource *res;
1037 int i, ret, irq;
1038
1039 pl330pd = pdev->dev.platform_data;
1040
1041 /* Can't do without the list of _32_ peripherals */
1042 if (!pl330pd || !pl330pd->peri) {
1043 dev_err(&pdev->dev, "platform data missing!\n");
1044 return -ENODEV;
1045 }
1046
1047 pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
1048 if (!pl330_info)
1049 return -ENOMEM;
1050
1051 pl330_info->pl330_data = NULL;
1052 pl330_info->dev = &pdev->dev;
1053
1054 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1055 if (!res) {
1056 ret = -ENODEV;
1057 goto probe_err1;
1058 }
1059
1060 request_mem_region(res->start, resource_size(res), pdev->name);
1061
1062 pl330_info->base = ioremap(res->start, resource_size(res));
1063 if (!pl330_info->base) {
1064 ret = -ENXIO;
1065 goto probe_err2;
1066 }
1067
1068 irq = platform_get_irq(pdev, 0);
1069 if (irq < 0) {
1070 ret = irq;
1071 goto probe_err3;
1072 }
1073
1074 ret = request_irq(irq, pl330_irq_handler, 0,
1075 dev_name(&pdev->dev), pl330_info);
1076 if (ret)
1077 goto probe_err4;
1078
1079 /* Allocate a new DMAC */
1080 s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
1081 if (!s3c_pl330_dmac) {
1082 ret = -ENOMEM;
1083 goto probe_err5;
1084 }
1085
1086 /* Get operation clock and enable it */
1087 s3c_pl330_dmac->clk = clk_get(&pdev->dev, "pdma");
1088 if (IS_ERR(s3c_pl330_dmac->clk)) {
1089 dev_err(&pdev->dev, "Cannot get operation clock.\n");
1090 ret = -EINVAL;
1091 goto probe_err6;
1092 }
1093 clk_enable(s3c_pl330_dmac->clk);
1094
1095 ret = pl330_add(pl330_info);
1096 if (ret)
1097 goto probe_err7;
1098
1099 /* Hook the info */
1100 s3c_pl330_dmac->pi = pl330_info;
1101
1102 /* No busy channels */
1103 s3c_pl330_dmac->busy_chan = 0;
1104
1105 s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
1106 sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
1107
1108 if (!s3c_pl330_dmac->kmcache) {
1109 ret = -ENOMEM;
1110 goto probe_err8;
1111 }
1112
1113 /* Get the list of peripherals */
1114 s3c_pl330_dmac->peri = pl330pd->peri;
1115
1116 /* Attach to the list of DMACs */
1117 list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
1118
1119 /* Create a channel for each peripheral in the DMAC
1120 * that is, if it doesn't already exist
1121 */
1122 for (i = 0; i < PL330_MAX_PERI; i++)
1123 if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
1124 chan_add(s3c_pl330_dmac->peri[i]);
1125
1126 printk(KERN_INFO
1127 "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name);
1128 printk(KERN_INFO
1129 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
1130 pl330_info->pcfg.data_buf_dep,
1131 pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
1132 pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
1133
1134 return 0;
1135
1136probe_err8:
1137 pl330_del(pl330_info);
1138probe_err7:
1139 clk_disable(s3c_pl330_dmac->clk);
1140 clk_put(s3c_pl330_dmac->clk);
1141probe_err6:
1142 kfree(s3c_pl330_dmac);
1143probe_err5:
1144 free_irq(irq, pl330_info);
1145probe_err4:
1146probe_err3:
1147 iounmap(pl330_info->base);
1148probe_err2:
1149 release_mem_region(res->start, resource_size(res));
1150probe_err1:
1151 kfree(pl330_info);
1152
1153 return ret;
1154}
1155
1156static int pl330_remove(struct platform_device *pdev)
1157{
1158 struct s3c_pl330_dmac *dmac, *d;
1159 struct s3c_pl330_chan *ch;
1160 unsigned long flags;
1161 int del, found;
1162
1163 if (!pdev->dev.platform_data)
1164 return -EINVAL;
1165
1166 spin_lock_irqsave(&res_lock, flags);
1167
1168 found = 0;
1169 list_for_each_entry(d, &dmac_list, node)
1170 if (d->pi->dev == &pdev->dev) {
1171 found = 1;
1172 break;
1173 }
1174
1175 if (!found) {
1176 spin_unlock_irqrestore(&res_lock, flags);
1177 return 0;
1178 }
1179
1180 dmac = d;
1181
1182 /* Remove all Channels that are managed only by this DMAC */
1183 list_for_each_entry(ch, &chan_list, node) {
1184
1185 /* Only channels that are handled by this DMAC */
1186 if (iface_of_dmac(dmac, ch->id))
1187 del = 1;
1188 else
1189 continue;
1190
1191 /* Don't remove if some other DMAC has it too */
1192 list_for_each_entry(d, &dmac_list, node)
1193 if (d != dmac && iface_of_dmac(d, ch->id)) {
1194 del = 0;
1195 break;
1196 }
1197
1198 if (del) {
1199 spin_unlock_irqrestore(&res_lock, flags);
1200 s3c2410_dma_free(ch->id, ch->client);
1201 spin_lock_irqsave(&res_lock, flags);
1202 list_del(&ch->node);
1203 kfree(ch);
1204 }
1205 }
1206
1207 /* Disable operation clock */
1208 clk_disable(dmac->clk);
1209 clk_put(dmac->clk);
1210
1211 /* Remove the DMAC */
1212 list_del(&dmac->node);
1213 kfree(dmac);
1214
1215 spin_unlock_irqrestore(&res_lock, flags);
1216
1217 return 0;
1218}
1219
1220static struct platform_driver pl330_driver = {
1221 .driver = {
1222 .owner = THIS_MODULE,
1223 .name = "s3c-pl330",
1224 },
1225 .probe = pl330_probe,
1226 .remove = pl330_remove,
1227};
1228
1229static int __init pl330_init(void)
1230{
1231 return platform_driver_register(&pl330_driver);
1232}
1233module_init(pl330_init);
1234
1235static void __exit pl330_exit(void)
1236{
1237 platform_driver_unregister(&pl330_driver);
1238 return;
1239}
1240module_exit(pl330_exit);
1241
1242MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1243MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
1244MODULE_LICENSE("GPL");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 2e3b3d38c465..ab8f469f5cf8 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -193,7 +193,8 @@ config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
193config PL330_DMA 193config PL330_DMA
194 tristate "DMA API Driver for PL330" 194 tristate "DMA API Driver for PL330"
195 select DMA_ENGINE 195 select DMA_ENGINE
196 depends on PL330 196 depends on ARM_AMBA
197 select PL330
197 help 198 help
198 Select if your platform has one or more PL330 DMACs. 199 Select if your platform has one or more PL330 DMACs.
199 You need to provide platform specific settings via 200 You need to provide platform specific settings via
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index be21e3f138a8..b7cbd1ab1db1 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -66,32 +66,29 @@
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry. 67 * will then move to the next LLI entry.
68 * 68 *
69 * Only the former works sanely with scatter lists, so we only implement
70 * the DMAC flow control method. However, peripherals which use the LBREQ
71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72 * these hardware restrictions prevents them from using scatter DMA.
73 *
74 * Global TODO: 69 * Global TODO:
75 * - Break out common code from arch/arm/mach-s3c64xx and share 70 * - Break out common code from arch/arm/mach-s3c64xx and share
76 */ 71 */
77#include <linux/device.h>
78#include <linux/init.h>
79#include <linux/module.h>
80#include <linux/interrupt.h>
81#include <linux/slab.h>
82#include <linux/delay.h>
83#include <linux/dma-mapping.h>
84#include <linux/dmapool.h>
85#include <linux/dmaengine.h>
86#include <linux/amba/bus.h> 72#include <linux/amba/bus.h>
87#include <linux/amba/pl08x.h> 73#include <linux/amba/pl08x.h>
88#include <linux/debugfs.h> 74#include <linux/debugfs.h>
75#include <linux/delay.h>
76#include <linux/device.h>
77#include <linux/dmaengine.h>
78#include <linux/dmapool.h>
79#include <linux/dma-mapping.h>
80#include <linux/init.h>
81#include <linux/interrupt.h>
82#include <linux/module.h>
83#include <linux/pm_runtime.h>
89#include <linux/seq_file.h> 84#include <linux/seq_file.h>
90 85#include <linux/slab.h>
91#include <asm/hardware/pl080.h> 86#include <asm/hardware/pl080.h>
92 87
93#define DRIVER_NAME "pl08xdmac" 88#define DRIVER_NAME "pl08xdmac"
94 89
90static struct amba_driver pl08x_amba_driver;
91
95/** 92/**
96 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 93 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
97 * @channels: the number of channels available in this variant 94 * @channels: the number of channels available in this variant
@@ -126,7 +123,8 @@ struct pl08x_lli {
126 * @phy_chans: array of data for the physical channels 123 * @phy_chans: array of data for the physical channels
127 * @pool: a pool for the LLI descriptors 124 * @pool: a pool for the LLI descriptors
128 * @pool_ctr: counter of LLIs in the pool 125 * @pool_ctr: counter of LLIs in the pool
129 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches 126 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
127 * fetches
130 * @mem_buses: set to indicate memory transfers on AHB2. 128 * @mem_buses: set to indicate memory transfers on AHB2.
131 * @lock: a spinlock for this struct 129 * @lock: a spinlock for this struct
132 */ 130 */
@@ -149,14 +147,6 @@ struct pl08x_driver_data {
149 * PL08X specific defines 147 * PL08X specific defines
150 */ 148 */
151 149
152/*
153 * Memory boundaries: the manual for PL08x says that the controller
154 * cannot read past a 1KiB boundary, so these defines are used to
155 * create transfer LLIs that do not cross such boundaries.
156 */
157#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
158#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
159
160/* Size (bytes) of each LLI buffer allocated for one transfer */ 150/* Size (bytes) of each LLI buffer allocated for one transfer */
161# define PL08X_LLI_TSFR_SIZE 0x2000 151# define PL08X_LLI_TSFR_SIZE 0x2000
162 152
@@ -272,7 +262,6 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
272 writel(val, ch->base + PL080_CH_CONFIG); 262 writel(val, ch->base + PL080_CH_CONFIG);
273} 263}
274 264
275
276/* 265/*
277 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 266 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
278 * clears any pending interrupt status. This should not be used for 267 * clears any pending interrupt status. This should not be used for
@@ -363,7 +352,9 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
363 if (!list_empty(&plchan->pend_list)) { 352 if (!list_empty(&plchan->pend_list)) {
364 struct pl08x_txd *txdi; 353 struct pl08x_txd *txdi;
365 list_for_each_entry(txdi, &plchan->pend_list, node) { 354 list_for_each_entry(txdi, &plchan->pend_list, node) {
366 bytes += txdi->len; 355 struct pl08x_sg *dsg;
356 list_for_each_entry(dsg, &txd->dsg_list, node)
357 bytes += dsg->len;
367 } 358 }
368 } 359 }
369 360
@@ -407,6 +398,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
407 return NULL; 398 return NULL;
408 } 399 }
409 400
401 pm_runtime_get_sync(&pl08x->adev->dev);
410 return ch; 402 return ch;
411} 403}
412 404
@@ -420,6 +412,8 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
420 /* Stop the channel and clear its interrupts */ 412 /* Stop the channel and clear its interrupts */
421 pl08x_terminate_phy_chan(pl08x, ch); 413 pl08x_terminate_phy_chan(pl08x, ch);
422 414
415 pm_runtime_put(&pl08x->adev->dev);
416
423 /* Mark it as free */ 417 /* Mark it as free */
424 ch->serving = NULL; 418 ch->serving = NULL;
425 spin_unlock_irqrestore(&ch->lock, flags); 419 spin_unlock_irqrestore(&ch->lock, flags);
@@ -499,36 +493,30 @@ struct pl08x_lli_build_data {
499}; 493};
500 494
501/* 495/*
502 * Autoselect a master bus to use for the transfer this prefers the 496 * Autoselect a master bus to use for the transfer. Slave will be the chosen as
503 * destination bus if both available if fixed address on one bus the 497 * victim in case src & dest are not similarly aligned. i.e. If after aligning
504 * other will be chosen 498 * masters address with width requirements of transfer (by sending few byte by
499 * byte data), slave is still not aligned, then its width will be reduced to
500 * BYTE.
501 * - prefers the destination bus if both available
502 * - prefers bus with fixed address (i.e. peripheral)
505 */ 503 */
506static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 504static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
507 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 505 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
508{ 506{
509 if (!(cctl & PL080_CONTROL_DST_INCR)) { 507 if (!(cctl & PL080_CONTROL_DST_INCR)) {
510 *mbus = &bd->srcbus;
511 *sbus = &bd->dstbus;
512 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
513 *mbus = &bd->dstbus; 508 *mbus = &bd->dstbus;
514 *sbus = &bd->srcbus; 509 *sbus = &bd->srcbus;
510 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
511 *mbus = &bd->srcbus;
512 *sbus = &bd->dstbus;
515 } else { 513 } else {
516 if (bd->dstbus.buswidth == 4) { 514 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
517 *mbus = &bd->dstbus; 515 *mbus = &bd->dstbus;
518 *sbus = &bd->srcbus; 516 *sbus = &bd->srcbus;
519 } else if (bd->srcbus.buswidth == 4) { 517 } else {
520 *mbus = &bd->srcbus;
521 *sbus = &bd->dstbus;
522 } else if (bd->dstbus.buswidth == 2) {
523 *mbus = &bd->dstbus;
524 *sbus = &bd->srcbus;
525 } else if (bd->srcbus.buswidth == 2) {
526 *mbus = &bd->srcbus; 518 *mbus = &bd->srcbus;
527 *sbus = &bd->dstbus; 519 *sbus = &bd->dstbus;
528 } else {
529 /* bd->srcbus.buswidth == 1 */
530 *mbus = &bd->dstbus;
531 *sbus = &bd->srcbus;
532 } 520 }
533 } 521 }
534} 522}
@@ -547,7 +535,8 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
547 llis_va[num_llis].cctl = cctl; 535 llis_va[num_llis].cctl = cctl;
548 llis_va[num_llis].src = bd->srcbus.addr; 536 llis_va[num_llis].src = bd->srcbus.addr;
549 llis_va[num_llis].dst = bd->dstbus.addr; 537 llis_va[num_llis].dst = bd->dstbus.addr;
550 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); 538 llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
539 sizeof(struct pl08x_lli);
551 llis_va[num_llis].lli |= bd->lli_bus; 540 llis_va[num_llis].lli |= bd->lli_bus;
552 541
553 if (cctl & PL080_CONTROL_SRC_INCR) 542 if (cctl & PL080_CONTROL_SRC_INCR)
@@ -560,16 +549,12 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
560 bd->remainder -= len; 549 bd->remainder -= len;
561} 550}
562 551
563/* 552static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
564 * Return number of bytes to fill to boundary, or len. 553 u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
565 * This calculation works for any value of addr.
566 */
567static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
568{ 554{
569 size_t boundary_len = PL08X_BOUNDARY_SIZE - 555 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
570 (addr & (PL08X_BOUNDARY_SIZE - 1)); 556 pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
571 557 (*total_bytes) += len;
572 return min(boundary_len, len);
573} 558}
574 559
575/* 560/*
@@ -583,13 +568,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
583 struct pl08x_bus_data *mbus, *sbus; 568 struct pl08x_bus_data *mbus, *sbus;
584 struct pl08x_lli_build_data bd; 569 struct pl08x_lli_build_data bd;
585 int num_llis = 0; 570 int num_llis = 0;
586 u32 cctl; 571 u32 cctl, early_bytes = 0;
587 size_t max_bytes_per_lli; 572 size_t max_bytes_per_lli, total_bytes;
588 size_t total_bytes = 0;
589 struct pl08x_lli *llis_va; 573 struct pl08x_lli *llis_va;
574 struct pl08x_sg *dsg;
590 575
591 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, 576 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
592 &txd->llis_bus);
593 if (!txd->llis_va) { 577 if (!txd->llis_va) {
594 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 578 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
595 return 0; 579 return 0;
@@ -597,13 +581,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
597 581
598 pl08x->pool_ctr++; 582 pl08x->pool_ctr++;
599 583
600 /* Get the default CCTL */
601 cctl = txd->cctl;
602
603 bd.txd = txd; 584 bd.txd = txd;
604 bd.srcbus.addr = txd->src_addr;
605 bd.dstbus.addr = txd->dst_addr;
606 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 585 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
586 cctl = txd->cctl;
607 587
608 /* Find maximum width of the source bus */ 588 /* Find maximum width of the source bus */
609 bd.srcbus.maxwidth = 589 bd.srcbus.maxwidth =
@@ -615,215 +595,179 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
615 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 595 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
616 PL080_CONTROL_DWIDTH_SHIFT); 596 PL080_CONTROL_DWIDTH_SHIFT);
617 597
618 /* Set up the bus widths to the maximum */ 598 list_for_each_entry(dsg, &txd->dsg_list, node) {
619 bd.srcbus.buswidth = bd.srcbus.maxwidth; 599 total_bytes = 0;
620 bd.dstbus.buswidth = bd.dstbus.maxwidth; 600 cctl = txd->cctl;
621 601
622 /* 602 bd.srcbus.addr = dsg->src_addr;
623 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 603 bd.dstbus.addr = dsg->dst_addr;
624 */ 604 bd.remainder = dsg->len;
625 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * 605 bd.srcbus.buswidth = bd.srcbus.maxwidth;
626 PL080_CONTROL_TRANSFER_SIZE_MASK; 606 bd.dstbus.buswidth = bd.dstbus.maxwidth;
627 607
628 /* We need to count this down to zero */ 608 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
629 bd.remainder = txd->len;
630 609
631 /* 610 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
632 * Choose bus to align to 611 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
633 * - prefers destination bus if both available 612 bd.srcbus.buswidth,
634 * - if fixed address on one bus chooses other 613 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
635 */ 614 bd.dstbus.buswidth,
636 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 615 bd.remainder);
637 616 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
638 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", 617 mbus == &bd.srcbus ? "src" : "dst",
639 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 618 sbus == &bd.srcbus ? "src" : "dst");
640 bd.srcbus.buswidth,
641 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
642 bd.dstbus.buswidth,
643 bd.remainder, max_bytes_per_lli);
644 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
645 mbus == &bd.srcbus ? "src" : "dst",
646 sbus == &bd.srcbus ? "src" : "dst");
647
648 if (txd->len < mbus->buswidth) {
649 /* Less than a bus width available - send as single bytes */
650 while (bd.remainder) {
651 dev_vdbg(&pl08x->adev->dev,
652 "%s single byte LLIs for a transfer of "
653 "less than a bus width (remain 0x%08x)\n",
654 __func__, bd.remainder);
655 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
656 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
657 total_bytes++;
658 }
659 } else {
660 /* Make one byte LLIs until master bus is aligned */
661 while ((mbus->addr) % (mbus->buswidth)) {
662 dev_vdbg(&pl08x->adev->dev,
663 "%s adjustment lli for less than bus width "
664 "(remain 0x%08x)\n",
665 __func__, bd.remainder);
666 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
667 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
668 total_bytes++;
669 }
670 619
671 /* 620 /*
672 * Master now aligned 621 * Zero length is only allowed if all these requirements are
673 * - if slave is not then we must set its width down 622 * met:
623 * - flow controller is peripheral.
624 * - src.addr is aligned to src.width
625 * - dst.addr is aligned to dst.width
626 *
627 * sg_len == 1 should be true, as there can be two cases here:
628 *
629 * - Memory addresses are contiguous and are not scattered.
630 * Here, Only one sg will be passed by user driver, with
631 * memory address and zero length. We pass this to controller
632 * and after the transfer it will receive the last burst
633 * request from peripheral and so transfer finishes.
634 *
635 * - Memory addresses are scattered and are not contiguous.
636 * Here, Obviously as DMA controller doesn't know when a lli's
637 * transfer gets over, it can't load next lli. So in this
638 * case, there has to be an assumption that only one lli is
639 * supported. Thus, we can't have scattered addresses.
674 */ 640 */
675 if (sbus->addr % sbus->buswidth) { 641 if (!bd.remainder) {
676 dev_dbg(&pl08x->adev->dev, 642 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
677 "%s set down bus width to one byte\n", 643 PL080_CONFIG_FLOW_CONTROL_SHIFT;
678 __func__); 644 if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
645 (fc <= PL080_FLOW_SRC2DST_SRC))) {
646 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
647 __func__);
648 return 0;
649 }
650
651 if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
652 (bd.srcbus.addr % bd.srcbus.buswidth)) {
653 dev_err(&pl08x->adev->dev,
654 "%s src & dst address must be aligned to src"
655 " & dst width if peripheral is flow controller",
656 __func__);
657 return 0;
658 }
679 659
680 sbus->buswidth = 1; 660 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
661 bd.dstbus.buswidth, 0);
662 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
663 break;
681 } 664 }
682 665
683 /* 666 /*
684 * Make largest possible LLIs until less than one bus 667 * Send byte by byte for following cases
685 * width left 668 * - Less than a bus width available
669 * - until master bus is aligned
686 */ 670 */
687 while (bd.remainder > (mbus->buswidth - 1)) { 671 if (bd.remainder < mbus->buswidth)
688 size_t lli_len, target_len, tsize, odd_bytes; 672 early_bytes = bd.remainder;
673 else if ((mbus->addr) % (mbus->buswidth)) {
674 early_bytes = mbus->buswidth - (mbus->addr) %
675 (mbus->buswidth);
676 if ((bd.remainder - early_bytes) < mbus->buswidth)
677 early_bytes = bd.remainder;
678 }
689 679
680 if (early_bytes) {
681 dev_vdbg(&pl08x->adev->dev,
682 "%s byte width LLIs (remain 0x%08x)\n",
683 __func__, bd.remainder);
684 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
685 &total_bytes);
686 }
687
688 if (bd.remainder) {
690 /* 689 /*
691 * If enough left try to send max possible, 690 * Master now aligned
692 * otherwise try to send the remainder 691 * - if slave is not then we must set its width down
693 */ 692 */
694 target_len = min(bd.remainder, max_bytes_per_lli); 693 if (sbus->addr % sbus->buswidth) {
694 dev_dbg(&pl08x->adev->dev,
695 "%s set down bus width to one byte\n",
696 __func__);
697
698 sbus->buswidth = 1;
699 }
695 700
696 /* 701 /*
697 * Set bus lengths for incrementing buses to the 702 * Bytes transferred = tsize * src width, not
698 * number of bytes which fill to next memory boundary, 703 * MIN(buswidths)
699 * limiting on the target length calculated above.
700 */ 704 */
701 if (cctl & PL080_CONTROL_SRC_INCR) 705 max_bytes_per_lli = bd.srcbus.buswidth *
702 bd.srcbus.fill_bytes = 706 PL080_CONTROL_TRANSFER_SIZE_MASK;
703 pl08x_pre_boundary(bd.srcbus.addr, 707 dev_vdbg(&pl08x->adev->dev,
704 target_len); 708 "%s max bytes per lli = %zu\n",
705 else 709 __func__, max_bytes_per_lli);
706 bd.srcbus.fill_bytes = target_len; 710
707 711 /*
708 if (cctl & PL080_CONTROL_DST_INCR) 712 * Make largest possible LLIs until less than one bus
709 bd.dstbus.fill_bytes = 713 * width left
710 pl08x_pre_boundary(bd.dstbus.addr, 714 */
711 target_len); 715 while (bd.remainder > (mbus->buswidth - 1)) {
712 else 716 size_t lli_len, tsize, width;
713 bd.dstbus.fill_bytes = target_len;
714
715 /* Find the nearest */
716 lli_len = min(bd.srcbus.fill_bytes,
717 bd.dstbus.fill_bytes);
718
719 BUG_ON(lli_len > bd.remainder);
720
721 if (lli_len <= 0) {
722 dev_err(&pl08x->adev->dev,
723 "%s lli_len is %zu, <= 0\n",
724 __func__, lli_len);
725 return 0;
726 }
727 717
728 if (lli_len == target_len) {
729 /*
730 * Can send what we wanted.
731 * Maintain alignment
732 */
733 lli_len = (lli_len/mbus->buswidth) *
734 mbus->buswidth;
735 odd_bytes = 0;
736 } else {
737 /* 718 /*
738 * So now we know how many bytes to transfer 719 * If enough left try to send max possible,
739 * to get to the nearest boundary. The next 720 * otherwise try to send the remainder
740 * LLI will past the boundary. However, we
741 * may be working to a boundary on the slave
742 * bus. We need to ensure the master stays
743 * aligned, and that we are working in
744 * multiples of the bus widths.
745 */ 721 */
746 odd_bytes = lli_len % mbus->buswidth; 722 lli_len = min(bd.remainder, max_bytes_per_lli);
747 lli_len -= odd_bytes;
748
749 }
750 723
751 if (lli_len) {
752 /* 724 /*
753 * Check against minimum bus alignment: 725 * Check against maximum bus alignment:
754 * Calculate actual transfer size in relation 726 * Calculate actual transfer size in relation to
755 * to bus width an get a maximum remainder of 727 * bus width an get a maximum remainder of the
756 * the smallest bus width - 1 728 * highest bus width - 1
757 */ 729 */
758 /* FIXME: use round_down()? */ 730 width = max(mbus->buswidth, sbus->buswidth);
759 tsize = lli_len / min(mbus->buswidth, 731 lli_len = (lli_len / width) * width;
760 sbus->buswidth); 732 tsize = lli_len / bd.srcbus.buswidth;
761 lli_len = tsize * min(mbus->buswidth,
762 sbus->buswidth);
763
764 if (target_len != lli_len) {
765 dev_vdbg(&pl08x->adev->dev,
766 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
767 __func__, target_len, lli_len, txd->len);
768 }
769
770 cctl = pl08x_cctl_bits(cctl,
771 bd.srcbus.buswidth,
772 bd.dstbus.buswidth,
773 tsize);
774 733
775 dev_vdbg(&pl08x->adev->dev, 734 dev_vdbg(&pl08x->adev->dev,
776 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", 735 "%s fill lli with single lli chunk of "
736 "size 0x%08zx (remainder 0x%08zx)\n",
777 __func__, lli_len, bd.remainder); 737 __func__, lli_len, bd.remainder);
738
739 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
740 bd.dstbus.buswidth, tsize);
778 pl08x_fill_lli_for_desc(&bd, num_llis++, 741 pl08x_fill_lli_for_desc(&bd, num_llis++,
779 lli_len, cctl); 742 lli_len, cctl);
780 total_bytes += lli_len; 743 total_bytes += lli_len;
781 } 744 }
782 745
783 746 /*
784 if (odd_bytes) { 747 * Send any odd bytes
785 /* 748 */
786 * Creep past the boundary, maintaining 749 if (bd.remainder) {
787 * master alignment 750 dev_vdbg(&pl08x->adev->dev,
788 */ 751 "%s align with boundary, send odd bytes (remain %zu)\n",
789 int j; 752 __func__, bd.remainder);
790 for (j = 0; (j < mbus->buswidth) 753 prep_byte_width_lli(&bd, &cctl, bd.remainder,
791 && (bd.remainder); j++) { 754 num_llis++, &total_bytes);
792 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
793 dev_vdbg(&pl08x->adev->dev,
794 "%s align with boundary, single byte (remain 0x%08zx)\n",
795 __func__, bd.remainder);
796 pl08x_fill_lli_for_desc(&bd,
797 num_llis++, 1, cctl);
798 total_bytes++;
799 }
800 } 755 }
801 } 756 }
802 757
803 /* 758 if (total_bytes != dsg->len) {
804 * Send any odd bytes 759 dev_err(&pl08x->adev->dev,
805 */ 760 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
806 while (bd.remainder) { 761 __func__, total_bytes, dsg->len);
807 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 762 return 0;
808 dev_vdbg(&pl08x->adev->dev,
809 "%s align with boundary, single odd byte (remain %zu)\n",
810 __func__, bd.remainder);
811 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
812 total_bytes++;
813 } 763 }
814 }
815 if (total_bytes != txd->len) {
816 dev_err(&pl08x->adev->dev,
817 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
818 __func__, total_bytes, txd->len);
819 return 0;
820 }
821 764
822 if (num_llis >= MAX_NUM_TSFR_LLIS) { 765 if (num_llis >= MAX_NUM_TSFR_LLIS) {
823 dev_err(&pl08x->adev->dev, 766 dev_err(&pl08x->adev->dev,
824 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 767 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
825 __func__, (u32) MAX_NUM_TSFR_LLIS); 768 __func__, (u32) MAX_NUM_TSFR_LLIS);
826 return 0; 769 return 0;
770 }
827 } 771 }
828 772
829 llis_va = txd->llis_va; 773 llis_va = txd->llis_va;
@@ -856,11 +800,19 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
856static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 800static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
857 struct pl08x_txd *txd) 801 struct pl08x_txd *txd)
858{ 802{
803 struct pl08x_sg *dsg, *_dsg;
804
859 /* Free the LLI */ 805 /* Free the LLI */
860 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 806 if (txd->llis_va)
807 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
861 808
862 pl08x->pool_ctr--; 809 pl08x->pool_ctr--;
863 810
811 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
812 list_del(&dsg->node);
813 kfree(dsg);
814 }
815
864 kfree(txd); 816 kfree(txd);
865} 817}
866 818
@@ -917,9 +869,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
917 * need, but for slaves the physical signals may be muxed! 869 * need, but for slaves the physical signals may be muxed!
918 * Can the platform allow us to use this channel? 870 * Can the platform allow us to use this channel?
919 */ 871 */
920 if (plchan->slave && 872 if (plchan->slave && pl08x->pd->get_signal) {
921 ch->signal < 0 &&
922 pl08x->pd->get_signal) {
923 ret = pl08x->pd->get_signal(plchan); 873 ret = pl08x->pd->get_signal(plchan);
924 if (ret < 0) { 874 if (ret < 0) {
925 dev_dbg(&pl08x->adev->dev, 875 dev_dbg(&pl08x->adev->dev,
@@ -1008,10 +958,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1008 * If slaves are relying on interrupts to signal completion this function 958 * If slaves are relying on interrupts to signal completion this function
1009 * must not be called with interrupts disabled. 959 * must not be called with interrupts disabled.
1010 */ 960 */
1011static enum dma_status 961static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1012pl08x_dma_tx_status(struct dma_chan *chan, 962 dma_cookie_t cookie, struct dma_tx_state *txstate)
1013 dma_cookie_t cookie,
1014 struct dma_tx_state *txstate)
1015{ 963{
1016 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 964 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1017 dma_cookie_t last_used; 965 dma_cookie_t last_used;
@@ -1253,7 +1201,9 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1253 1201
1254 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1202 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1255 if (!num_llis) { 1203 if (!num_llis) {
1256 kfree(txd); 1204 spin_lock_irqsave(&plchan->lock, flags);
1205 pl08x_free_txd(pl08x, txd);
1206 spin_unlock_irqrestore(&plchan->lock, flags);
1257 return -EINVAL; 1207 return -EINVAL;
1258 } 1208 }
1259 1209
@@ -1301,13 +1251,14 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1301static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1251static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1302 unsigned long flags) 1252 unsigned long flags)
1303{ 1253{
1304 struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1254 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
1305 1255
1306 if (txd) { 1256 if (txd) {
1307 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); 1257 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1308 txd->tx.flags = flags; 1258 txd->tx.flags = flags;
1309 txd->tx.tx_submit = pl08x_tx_submit; 1259 txd->tx.tx_submit = pl08x_tx_submit;
1310 INIT_LIST_HEAD(&txd->node); 1260 INIT_LIST_HEAD(&txd->node);
1261 INIT_LIST_HEAD(&txd->dsg_list);
1311 1262
1312 /* Always enable error and terminal interrupts */ 1263 /* Always enable error and terminal interrupts */
1313 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1264 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
@@ -1326,6 +1277,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1326 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1277 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1327 struct pl08x_driver_data *pl08x = plchan->host; 1278 struct pl08x_driver_data *pl08x = plchan->host;
1328 struct pl08x_txd *txd; 1279 struct pl08x_txd *txd;
1280 struct pl08x_sg *dsg;
1329 int ret; 1281 int ret;
1330 1282
1331 txd = pl08x_get_txd(plchan, flags); 1283 txd = pl08x_get_txd(plchan, flags);
@@ -1335,10 +1287,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1335 return NULL; 1287 return NULL;
1336 } 1288 }
1337 1289
1290 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1291 if (!dsg) {
1292 pl08x_free_txd(pl08x, txd);
1293 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
1294 __func__);
1295 return NULL;
1296 }
1297 list_add_tail(&dsg->node, &txd->dsg_list);
1298
1338 txd->direction = DMA_NONE; 1299 txd->direction = DMA_NONE;
1339 txd->src_addr = src; 1300 dsg->src_addr = src;
1340 txd->dst_addr = dest; 1301 dsg->dst_addr = dest;
1341 txd->len = len; 1302 dsg->len = len;
1342 1303
1343 /* Set platform data for m2m */ 1304 /* Set platform data for m2m */
1344 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1305 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1367,19 +1328,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1367 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1328 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1368 struct pl08x_driver_data *pl08x = plchan->host; 1329 struct pl08x_driver_data *pl08x = plchan->host;
1369 struct pl08x_txd *txd; 1330 struct pl08x_txd *txd;
1370 int ret; 1331 struct pl08x_sg *dsg;
1371 1332 struct scatterlist *sg;
1372 /* 1333 dma_addr_t slave_addr;
1373 * Current implementation ASSUMES only one sg 1334 int ret, tmp;
1374 */
1375 if (sg_len != 1) {
1376 dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
1377 __func__);
1378 BUG();
1379 }
1380 1335
1381 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1336 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1382 __func__, sgl->length, plchan->name); 1337 __func__, sgl->length, plchan->name);
1383 1338
1384 txd = pl08x_get_txd(plchan, flags); 1339 txd = pl08x_get_txd(plchan, flags);
1385 if (!txd) { 1340 if (!txd) {
@@ -1398,24 +1353,49 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1398 * channel target address dynamically at runtime. 1353 * channel target address dynamically at runtime.
1399 */ 1354 */
1400 txd->direction = direction; 1355 txd->direction = direction;
1401 txd->len = sgl->length;
1402 1356
1403 if (direction == DMA_TO_DEVICE) { 1357 if (direction == DMA_TO_DEVICE) {
1404 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1405 txd->cctl = plchan->dst_cctl; 1358 txd->cctl = plchan->dst_cctl;
1406 txd->src_addr = sgl->dma_address; 1359 slave_addr = plchan->dst_addr;
1407 txd->dst_addr = plchan->dst_addr;
1408 } else if (direction == DMA_FROM_DEVICE) { 1360 } else if (direction == DMA_FROM_DEVICE) {
1409 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1410 txd->cctl = plchan->src_cctl; 1361 txd->cctl = plchan->src_cctl;
1411 txd->src_addr = plchan->src_addr; 1362 slave_addr = plchan->src_addr;
1412 txd->dst_addr = sgl->dma_address;
1413 } else { 1363 } else {
1364 pl08x_free_txd(pl08x, txd);
1414 dev_err(&pl08x->adev->dev, 1365 dev_err(&pl08x->adev->dev,
1415 "%s direction unsupported\n", __func__); 1366 "%s direction unsupported\n", __func__);
1416 return NULL; 1367 return NULL;
1417 } 1368 }
1418 1369
1370 if (plchan->cd->device_fc)
1371 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
1372 PL080_FLOW_PER2MEM_PER;
1373 else
1374 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
1375 PL080_FLOW_PER2MEM;
1376
1377 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1378
1379 for_each_sg(sgl, sg, sg_len, tmp) {
1380 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1381 if (!dsg) {
1382 pl08x_free_txd(pl08x, txd);
1383 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1384 __func__);
1385 return NULL;
1386 }
1387 list_add_tail(&dsg->node, &txd->dsg_list);
1388
1389 dsg->len = sg_dma_len(sg);
1390 if (direction == DMA_TO_DEVICE) {
1391 dsg->src_addr = sg_phys(sg);
1392 dsg->dst_addr = slave_addr;
1393 } else {
1394 dsg->src_addr = slave_addr;
1395 dsg->dst_addr = sg_phys(sg);
1396 }
1397 }
1398
1419 ret = pl08x_prep_channel_resources(plchan, txd); 1399 ret = pl08x_prep_channel_resources(plchan, txd);
1420 if (ret) 1400 if (ret)
1421 return NULL; 1401 return NULL;
@@ -1489,9 +1469,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1489 1469
1490bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1470bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1491{ 1471{
1492 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1472 struct pl08x_dma_chan *plchan;
1493 char *name = chan_id; 1473 char *name = chan_id;
1494 1474
1475 /* Reject channels for devices not bound to this driver */
1476 if (chan->device->dev->driver != &pl08x_amba_driver.drv)
1477 return false;
1478
1479 plchan = to_pl08x_chan(chan);
1480
1495 /* Check that the channel is not taken! */ 1481 /* Check that the channel is not taken! */
1496 if (!strcmp(plchan->name, name)) 1482 if (!strcmp(plchan->name, name))
1497 return true; 1483 return true;
@@ -1507,34 +1493,34 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1507 */ 1493 */
1508static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1494static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1509{ 1495{
1510 u32 val; 1496 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
1511
1512 val = readl(pl08x->base + PL080_CONFIG);
1513 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
1514 /* We implicitly clear bit 1 and that means little-endian mode */
1515 val |= PL080_CONFIG_ENABLE;
1516 writel(val, pl08x->base + PL080_CONFIG);
1517} 1497}
1518 1498
1519static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1499static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1520{ 1500{
1521 struct device *dev = txd->tx.chan->device->dev; 1501 struct device *dev = txd->tx.chan->device->dev;
1502 struct pl08x_sg *dsg;
1522 1503
1523 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1504 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1524 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1505 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1525 dma_unmap_single(dev, txd->src_addr, txd->len, 1506 list_for_each_entry(dsg, &txd->dsg_list, node)
1526 DMA_TO_DEVICE); 1507 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1527 else 1508 DMA_TO_DEVICE);
1528 dma_unmap_page(dev, txd->src_addr, txd->len, 1509 else {
1529 DMA_TO_DEVICE); 1510 list_for_each_entry(dsg, &txd->dsg_list, node)
1511 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1512 DMA_TO_DEVICE);
1513 }
1530 } 1514 }
1531 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1515 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1532 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 1516 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1533 dma_unmap_single(dev, txd->dst_addr, txd->len, 1517 list_for_each_entry(dsg, &txd->dsg_list, node)
1534 DMA_FROM_DEVICE); 1518 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1519 DMA_FROM_DEVICE);
1535 else 1520 else
1536 dma_unmap_page(dev, txd->dst_addr, txd->len, 1521 list_for_each_entry(dsg, &txd->dsg_list, node)
1537 DMA_FROM_DEVICE); 1522 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1523 DMA_FROM_DEVICE);
1538 } 1524 }
1539} 1525}
1540 1526
@@ -1589,8 +1575,8 @@ static void pl08x_tasklet(unsigned long data)
1589 */ 1575 */
1590 list_for_each_entry(waiting, &pl08x->memcpy.channels, 1576 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1591 chan.device_node) { 1577 chan.device_node) {
1592 if (waiting->state == PL08X_CHAN_WAITING && 1578 if (waiting->state == PL08X_CHAN_WAITING &&
1593 waiting->waiting != NULL) { 1579 waiting->waiting != NULL) {
1594 int ret; 1580 int ret;
1595 1581
1596 /* This should REALLY not fail now */ 1582 /* This should REALLY not fail now */
@@ -1630,38 +1616,40 @@ static void pl08x_tasklet(unsigned long data)
1630static irqreturn_t pl08x_irq(int irq, void *dev) 1616static irqreturn_t pl08x_irq(int irq, void *dev)
1631{ 1617{
1632 struct pl08x_driver_data *pl08x = dev; 1618 struct pl08x_driver_data *pl08x = dev;
1633 u32 mask = 0; 1619 u32 mask = 0, err, tc, i;
1634 u32 val; 1620
1635 int i; 1621 /* check & clear - ERR & TC interrupts */
1636 1622 err = readl(pl08x->base + PL080_ERR_STATUS);
1637 val = readl(pl08x->base + PL080_ERR_STATUS); 1623 if (err) {
1638 if (val) { 1624 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
1639 /* An error interrupt (on one or more channels) */ 1625 __func__, err);
1640 dev_err(&pl08x->adev->dev, 1626 writel(err, pl08x->base + PL080_ERR_CLEAR);
1641 "%s error interrupt, register value 0x%08x\n",
1642 __func__, val);
1643 /*
1644 * Simply clear ALL PL08X error interrupts,
1645 * regardless of channel and cause
1646 * FIXME: should be 0x00000003 on PL081 really.
1647 */
1648 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
1649 } 1627 }
1650 val = readl(pl08x->base + PL080_INT_STATUS); 1628 tc = readl(pl08x->base + PL080_INT_STATUS);
1629 if (tc)
1630 writel(tc, pl08x->base + PL080_TC_CLEAR);
1631
1632 if (!err && !tc)
1633 return IRQ_NONE;
1634
1651 for (i = 0; i < pl08x->vd->channels; i++) { 1635 for (i = 0; i < pl08x->vd->channels; i++) {
1652 if ((1 << i) & val) { 1636 if (((1 << i) & err) || ((1 << i) & tc)) {
1653 /* Locate physical channel */ 1637 /* Locate physical channel */
1654 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1638 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1655 struct pl08x_dma_chan *plchan = phychan->serving; 1639 struct pl08x_dma_chan *plchan = phychan->serving;
1656 1640
1641 if (!plchan) {
1642 dev_err(&pl08x->adev->dev,
1643 "%s Error TC interrupt on unused channel: 0x%08x\n",
1644 __func__, i);
1645 continue;
1646 }
1647
1657 /* Schedule tasklet on this channel */ 1648 /* Schedule tasklet on this channel */
1658 tasklet_schedule(&plchan->tasklet); 1649 tasklet_schedule(&plchan->tasklet);
1659
1660 mask |= (1 << i); 1650 mask |= (1 << i);
1661 } 1651 }
1662 } 1652 }
1663 /* Clear only the terminal interrupts on channels we processed */
1664 writel(mask, pl08x->base + PL080_TC_CLEAR);
1665 1653
1666 return mask ? IRQ_HANDLED : IRQ_NONE; 1654 return mask ? IRQ_HANDLED : IRQ_NONE;
1667} 1655}
@@ -1685,9 +1673,7 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1685 * Make a local wrapper to hold required data 1673 * Make a local wrapper to hold required data
1686 */ 1674 */
1687static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1675static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1688 struct dma_device *dmadev, 1676 struct dma_device *dmadev, unsigned int channels, bool slave)
1689 unsigned int channels,
1690 bool slave)
1691{ 1677{
1692 struct pl08x_dma_chan *chan; 1678 struct pl08x_dma_chan *chan;
1693 int i; 1679 int i;
@@ -1700,7 +1686,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1700 * to cope with that situation. 1686 * to cope with that situation.
1701 */ 1687 */
1702 for (i = 0; i < channels; i++) { 1688 for (i = 0; i < channels; i++) {
1703 chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); 1689 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1704 if (!chan) { 1690 if (!chan) {
1705 dev_err(&pl08x->adev->dev, 1691 dev_err(&pl08x->adev->dev,
1706 "%s no memory for channel\n", __func__); 1692 "%s no memory for channel\n", __func__);
@@ -1728,7 +1714,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1728 kfree(chan); 1714 kfree(chan);
1729 continue; 1715 continue;
1730 } 1716 }
1731 dev_info(&pl08x->adev->dev, 1717 dev_dbg(&pl08x->adev->dev,
1732 "initialize virtual channel \"%s\"\n", 1718 "initialize virtual channel \"%s\"\n",
1733 chan->name); 1719 chan->name);
1734 1720
@@ -1837,9 +1823,9 @@ static const struct file_operations pl08x_debugfs_operations = {
1837static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1823static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1838{ 1824{
1839 /* Expose a simple debugfs interface to view all clocks */ 1825 /* Expose a simple debugfs interface to view all clocks */
1840 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, 1826 (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
1841 NULL, pl08x, 1827 S_IFREG | S_IRUGO, NULL, pl08x,
1842 &pl08x_debugfs_operations); 1828 &pl08x_debugfs_operations);
1843} 1829}
1844 1830
1845#else 1831#else
@@ -1860,12 +1846,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1860 return ret; 1846 return ret;
1861 1847
1862 /* Create the driver state holder */ 1848 /* Create the driver state holder */
1863 pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); 1849 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
1864 if (!pl08x) { 1850 if (!pl08x) {
1865 ret = -ENOMEM; 1851 ret = -ENOMEM;
1866 goto out_no_pl08x; 1852 goto out_no_pl08x;
1867 } 1853 }
1868 1854
1855 pm_runtime_set_active(&adev->dev);
1856 pm_runtime_enable(&adev->dev);
1857
1869 /* Initialize memcpy engine */ 1858 /* Initialize memcpy engine */
1870 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1859 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1871 pl08x->memcpy.dev = &adev->dev; 1860 pl08x->memcpy.dev = &adev->dev;
@@ -1939,7 +1928,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1939 } 1928 }
1940 1929
1941 /* Initialize physical channels */ 1930 /* Initialize physical channels */
1942 pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), 1931 pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
1943 GFP_KERNEL); 1932 GFP_KERNEL);
1944 if (!pl08x->phy_chans) { 1933 if (!pl08x->phy_chans) {
1945 dev_err(&adev->dev, "%s failed to allocate " 1934 dev_err(&adev->dev, "%s failed to allocate "
@@ -1956,9 +1945,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1956 spin_lock_init(&ch->lock); 1945 spin_lock_init(&ch->lock);
1957 ch->serving = NULL; 1946 ch->serving = NULL;
1958 ch->signal = -1; 1947 ch->signal = -1;
1959 dev_info(&adev->dev, 1948 dev_dbg(&adev->dev, "physical channel %d is %s\n",
1960 "physical channel %d is %s\n", i, 1949 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1961 pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1962 } 1950 }
1963 1951
1964 /* Register as many memcpy channels as there are physical channels */ 1952 /* Register as many memcpy channels as there are physical channels */
@@ -1974,8 +1962,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1974 1962
1975 /* Register slave channels */ 1963 /* Register slave channels */
1976 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 1964 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
1977 pl08x->pd->num_slave_channels, 1965 pl08x->pd->num_slave_channels, true);
1978 true);
1979 if (ret <= 0) { 1966 if (ret <= 0) {
1980 dev_warn(&pl08x->adev->dev, 1967 dev_warn(&pl08x->adev->dev,
1981 "%s failed to enumerate slave channels - %d\n", 1968 "%s failed to enumerate slave channels - %d\n",
@@ -2005,6 +1992,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2005 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", 1992 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
2006 amba_part(adev), amba_rev(adev), 1993 amba_part(adev), amba_rev(adev),
2007 (unsigned long long)adev->res.start, adev->irq[0]); 1994 (unsigned long long)adev->res.start, adev->irq[0]);
1995
1996 pm_runtime_put(&adev->dev);
2008 return 0; 1997 return 0;
2009 1998
2010out_no_slave_reg: 1999out_no_slave_reg:
@@ -2023,6 +2012,9 @@ out_no_ioremap:
2023 dma_pool_destroy(pl08x->pool); 2012 dma_pool_destroy(pl08x->pool);
2024out_no_lli_pool: 2013out_no_lli_pool:
2025out_no_platdata: 2014out_no_platdata:
2015 pm_runtime_put(&adev->dev);
2016 pm_runtime_disable(&adev->dev);
2017
2026 kfree(pl08x); 2018 kfree(pl08x);
2027out_no_pl08x: 2019out_no_pl08x:
2028 amba_release_regions(adev); 2020 amba_release_regions(adev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 6a483eac7b3f..fcfa0a8b5c59 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
107{ 107{
108 struct at_desc *desc, *_desc; 108 struct at_desc *desc, *_desc;
109 struct at_desc *ret = NULL; 109 struct at_desc *ret = NULL;
110 unsigned long flags;
110 unsigned int i = 0; 111 unsigned int i = 0;
111 LIST_HEAD(tmp_list); 112 LIST_HEAD(tmp_list);
112 113
113 spin_lock_bh(&atchan->lock); 114 spin_lock_irqsave(&atchan->lock, flags);
114 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 115 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
115 i++; 116 i++;
116 if (async_tx_test_ack(&desc->txd)) { 117 if (async_tx_test_ack(&desc->txd)) {
@@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
121 dev_dbg(chan2dev(&atchan->chan_common), 122 dev_dbg(chan2dev(&atchan->chan_common),
122 "desc %p not ACKed\n", desc); 123 "desc %p not ACKed\n", desc);
123 } 124 }
124 spin_unlock_bh(&atchan->lock); 125 spin_unlock_irqrestore(&atchan->lock, flags);
125 dev_vdbg(chan2dev(&atchan->chan_common), 126 dev_vdbg(chan2dev(&atchan->chan_common),
126 "scanned %u descriptors on freelist\n", i); 127 "scanned %u descriptors on freelist\n", i);
127 128
@@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
129 if (!ret) { 130 if (!ret) {
130 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 131 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
131 if (ret) { 132 if (ret) {
132 spin_lock_bh(&atchan->lock); 133 spin_lock_irqsave(&atchan->lock, flags);
133 atchan->descs_allocated++; 134 atchan->descs_allocated++;
134 spin_unlock_bh(&atchan->lock); 135 spin_unlock_irqrestore(&atchan->lock, flags);
135 } else { 136 } else {
136 dev_err(chan2dev(&atchan->chan_common), 137 dev_err(chan2dev(&atchan->chan_common),
137 "not enough descriptors available\n"); 138 "not enough descriptors available\n");
@@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
150{ 151{
151 if (desc) { 152 if (desc) {
152 struct at_desc *child; 153 struct at_desc *child;
154 unsigned long flags;
153 155
154 spin_lock_bh(&atchan->lock); 156 spin_lock_irqsave(&atchan->lock, flags);
155 list_for_each_entry(child, &desc->tx_list, desc_node) 157 list_for_each_entry(child, &desc->tx_list, desc_node)
156 dev_vdbg(chan2dev(&atchan->chan_common), 158 dev_vdbg(chan2dev(&atchan->chan_common),
157 "moving child desc %p to freelist\n", 159 "moving child desc %p to freelist\n",
@@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
160 dev_vdbg(chan2dev(&atchan->chan_common), 162 dev_vdbg(chan2dev(&atchan->chan_common),
161 "moving desc %p to freelist\n", desc); 163 "moving desc %p to freelist\n", desc);
162 list_add(&desc->desc_node, &atchan->free_list); 164 list_add(&desc->desc_node, &atchan->free_list);
163 spin_unlock_bh(&atchan->lock); 165 spin_unlock_irqrestore(&atchan->lock, flags);
164 } 166 }
165} 167}
166 168
@@ -299,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
299 301
300 /* for cyclic transfers, 302 /* for cyclic transfers,
301 * no need to replay callback function while stopping */ 303 * no need to replay callback function while stopping */
302 if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { 304 if (!atc_chan_is_cyclic(atchan)) {
303 dma_async_tx_callback callback = txd->callback; 305 dma_async_tx_callback callback = txd->callback;
304 void *param = txd->callback_param; 306 void *param = txd->callback_param;
305 307
@@ -471,16 +473,17 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
471static void atc_tasklet(unsigned long data) 473static void atc_tasklet(unsigned long data)
472{ 474{
473 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 475 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
476 unsigned long flags;
474 477
475 spin_lock(&atchan->lock); 478 spin_lock_irqsave(&atchan->lock, flags);
476 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 479 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
477 atc_handle_error(atchan); 480 atc_handle_error(atchan);
478 else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) 481 else if (atc_chan_is_cyclic(atchan))
479 atc_handle_cyclic(atchan); 482 atc_handle_cyclic(atchan);
480 else 483 else
481 atc_advance_work(atchan); 484 atc_advance_work(atchan);
482 485
483 spin_unlock(&atchan->lock); 486 spin_unlock_irqrestore(&atchan->lock, flags);
484} 487}
485 488
486static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 489static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
539 struct at_desc *desc = txd_to_at_desc(tx); 542 struct at_desc *desc = txd_to_at_desc(tx);
540 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 543 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
541 dma_cookie_t cookie; 544 dma_cookie_t cookie;
545 unsigned long flags;
542 546
543 spin_lock_bh(&atchan->lock); 547 spin_lock_irqsave(&atchan->lock, flags);
544 cookie = atc_assign_cookie(atchan, desc); 548 cookie = atc_assign_cookie(atchan, desc);
545 549
546 if (list_empty(&atchan->active_list)) { 550 if (list_empty(&atchan->active_list)) {
@@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
554 list_add_tail(&desc->desc_node, &atchan->queue); 558 list_add_tail(&desc->desc_node, &atchan->queue);
555 } 559 }
556 560
557 spin_unlock_bh(&atchan->lock); 561 spin_unlock_irqrestore(&atchan->lock, flags);
558 562
559 return cookie; 563 return cookie;
560} 564}
@@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
927 struct at_dma_chan *atchan = to_at_dma_chan(chan); 931 struct at_dma_chan *atchan = to_at_dma_chan(chan);
928 struct at_dma *atdma = to_at_dma(chan->device); 932 struct at_dma *atdma = to_at_dma(chan->device);
929 int chan_id = atchan->chan_common.chan_id; 933 int chan_id = atchan->chan_common.chan_id;
934 unsigned long flags;
930 935
931 LIST_HEAD(list); 936 LIST_HEAD(list);
932 937
933 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 938 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
934 939
935 if (cmd == DMA_PAUSE) { 940 if (cmd == DMA_PAUSE) {
936 spin_lock_bh(&atchan->lock); 941 spin_lock_irqsave(&atchan->lock, flags);
937 942
938 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 943 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
939 set_bit(ATC_IS_PAUSED, &atchan->status); 944 set_bit(ATC_IS_PAUSED, &atchan->status);
940 945
941 spin_unlock_bh(&atchan->lock); 946 spin_unlock_irqrestore(&atchan->lock, flags);
942 } else if (cmd == DMA_RESUME) { 947 } else if (cmd == DMA_RESUME) {
943 if (!test_bit(ATC_IS_PAUSED, &atchan->status)) 948 if (!atc_chan_is_paused(atchan))
944 return 0; 949 return 0;
945 950
946 spin_lock_bh(&atchan->lock); 951 spin_lock_irqsave(&atchan->lock, flags);
947 952
948 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 953 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
949 clear_bit(ATC_IS_PAUSED, &atchan->status); 954 clear_bit(ATC_IS_PAUSED, &atchan->status);
950 955
951 spin_unlock_bh(&atchan->lock); 956 spin_unlock_irqrestore(&atchan->lock, flags);
952 } else if (cmd == DMA_TERMINATE_ALL) { 957 } else if (cmd == DMA_TERMINATE_ALL) {
953 struct at_desc *desc, *_desc; 958 struct at_desc *desc, *_desc;
954 /* 959 /*
@@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
957 * channel. We still have to poll the channel enable bit due 962 * channel. We still have to poll the channel enable bit due
958 * to AHB/HSB limitations. 963 * to AHB/HSB limitations.
959 */ 964 */
960 spin_lock_bh(&atchan->lock); 965 spin_lock_irqsave(&atchan->lock, flags);
961 966
962 /* disabling channel: must also remove suspend state */ 967 /* disabling channel: must also remove suspend state */
963 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 968 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
@@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
978 /* if channel dedicated to cyclic operations, free it */ 983 /* if channel dedicated to cyclic operations, free it */
979 clear_bit(ATC_IS_CYCLIC, &atchan->status); 984 clear_bit(ATC_IS_CYCLIC, &atchan->status);
980 985
981 spin_unlock_bh(&atchan->lock); 986 spin_unlock_irqrestore(&atchan->lock, flags);
982 } else { 987 } else {
983 return -ENXIO; 988 return -ENXIO;
984 } 989 }
@@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan,
1004 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1009 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1005 dma_cookie_t last_used; 1010 dma_cookie_t last_used;
1006 dma_cookie_t last_complete; 1011 dma_cookie_t last_complete;
1012 unsigned long flags;
1007 enum dma_status ret; 1013 enum dma_status ret;
1008 1014
1009 spin_lock_bh(&atchan->lock); 1015 spin_lock_irqsave(&atchan->lock, flags);
1010 1016
1011 last_complete = atchan->completed_cookie; 1017 last_complete = atchan->completed_cookie;
1012 last_used = chan->cookie; 1018 last_used = chan->cookie;
@@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan,
1021 ret = dma_async_is_complete(cookie, last_complete, last_used); 1027 ret = dma_async_is_complete(cookie, last_complete, last_used);
1022 } 1028 }
1023 1029
1024 spin_unlock_bh(&atchan->lock); 1030 spin_unlock_irqrestore(&atchan->lock, flags);
1025 1031
1026 if (ret != DMA_SUCCESS) 1032 if (ret != DMA_SUCCESS)
1027 dma_set_tx_state(txstate, last_complete, last_used, 1033 dma_set_tx_state(txstate, last_complete, last_used,
@@ -1029,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan,
1029 else 1035 else
1030 dma_set_tx_state(txstate, last_complete, last_used, 0); 1036 dma_set_tx_state(txstate, last_complete, last_used, 0);
1031 1037
1032 if (test_bit(ATC_IS_PAUSED, &atchan->status)) 1038 if (atc_chan_is_paused(atchan))
1033 ret = DMA_PAUSED; 1039 ret = DMA_PAUSED;
1034 1040
1035 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", 1041 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
@@ -1046,18 +1052,19 @@ atc_tx_status(struct dma_chan *chan,
1046static void atc_issue_pending(struct dma_chan *chan) 1052static void atc_issue_pending(struct dma_chan *chan)
1047{ 1053{
1048 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1054 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1055 unsigned long flags;
1049 1056
1050 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1057 dev_vdbg(chan2dev(chan), "issue_pending\n");
1051 1058
1052 /* Not needed for cyclic transfers */ 1059 /* Not needed for cyclic transfers */
1053 if (test_bit(ATC_IS_CYCLIC, &atchan->status)) 1060 if (atc_chan_is_cyclic(atchan))
1054 return; 1061 return;
1055 1062
1056 spin_lock_bh(&atchan->lock); 1063 spin_lock_irqsave(&atchan->lock, flags);
1057 if (!atc_chan_is_enabled(atchan)) { 1064 if (!atc_chan_is_enabled(atchan)) {
1058 atc_advance_work(atchan); 1065 atc_advance_work(atchan);
1059 } 1066 }
1060 spin_unlock_bh(&atchan->lock); 1067 spin_unlock_irqrestore(&atchan->lock, flags);
1061} 1068}
1062 1069
1063/** 1070/**
@@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1073 struct at_dma *atdma = to_at_dma(chan->device); 1080 struct at_dma *atdma = to_at_dma(chan->device);
1074 struct at_desc *desc; 1081 struct at_desc *desc;
1075 struct at_dma_slave *atslave; 1082 struct at_dma_slave *atslave;
1083 unsigned long flags;
1076 int i; 1084 int i;
1077 u32 cfg; 1085 u32 cfg;
1078 LIST_HEAD(tmp_list); 1086 LIST_HEAD(tmp_list);
@@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1116 list_add_tail(&desc->desc_node, &tmp_list); 1124 list_add_tail(&desc->desc_node, &tmp_list);
1117 } 1125 }
1118 1126
1119 spin_lock_bh(&atchan->lock); 1127 spin_lock_irqsave(&atchan->lock, flags);
1120 atchan->descs_allocated = i; 1128 atchan->descs_allocated = i;
1121 list_splice(&tmp_list, &atchan->free_list); 1129 list_splice(&tmp_list, &atchan->free_list);
1122 atchan->completed_cookie = chan->cookie = 1; 1130 atchan->completed_cookie = chan->cookie = 1;
1123 spin_unlock_bh(&atchan->lock); 1131 spin_unlock_irqrestore(&atchan->lock, flags);
1124 1132
1125 /* channel parameters */ 1133 /* channel parameters */
1126 channel_writel(atchan, CFG, cfg); 1134 channel_writel(atchan, CFG, cfg);
@@ -1260,12 +1268,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
1260 1268
1261 /* initialize channels related values */ 1269 /* initialize channels related values */
1262 INIT_LIST_HEAD(&atdma->dma_common.channels); 1270 INIT_LIST_HEAD(&atdma->dma_common.channels);
1263 for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) { 1271 for (i = 0; i < pdata->nr_channels; i++) {
1264 struct at_dma_chan *atchan = &atdma->chan[i]; 1272 struct at_dma_chan *atchan = &atdma->chan[i];
1265 1273
1266 atchan->chan_common.device = &atdma->dma_common; 1274 atchan->chan_common.device = &atdma->dma_common;
1267 atchan->chan_common.cookie = atchan->completed_cookie = 1; 1275 atchan->chan_common.cookie = atchan->completed_cookie = 1;
1268 atchan->chan_common.chan_id = i;
1269 list_add_tail(&atchan->chan_common.device_node, 1276 list_add_tail(&atchan->chan_common.device_node,
1270 &atdma->dma_common.channels); 1277 &atdma->dma_common.channels);
1271 1278
@@ -1293,22 +1300,20 @@ static int __init at_dma_probe(struct platform_device *pdev)
1293 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1300 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1294 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1301 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1295 1302
1296 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) 1303 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1297 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1304 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1298 1305 /* controller can do slave DMA: can trigger cyclic transfers */
1299 if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) 1306 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1300 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1307 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1301
1302 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
1303 dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1304 atdma->dma_common.device_control = atc_control; 1308 atdma->dma_common.device_control = atc_control;
1309 }
1305 1310
1306 dma_writel(atdma, EN, AT_DMA_ENABLE); 1311 dma_writel(atdma, EN, AT_DMA_ENABLE);
1307 1312
1308 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1313 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1309 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1314 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1310 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1315 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1311 atdma->dma_common.chancnt); 1316 pdata->nr_channels);
1312 1317
1313 dma_async_device_register(&atdma->dma_common); 1318 dma_async_device_register(&atdma->dma_common);
1314 1319
@@ -1377,27 +1382,112 @@ static void at_dma_shutdown(struct platform_device *pdev)
1377 clk_disable(atdma->clk); 1382 clk_disable(atdma->clk);
1378} 1383}
1379 1384
1385static int at_dma_prepare(struct device *dev)
1386{
1387 struct platform_device *pdev = to_platform_device(dev);
1388 struct at_dma *atdma = platform_get_drvdata(pdev);
1389 struct dma_chan *chan, *_chan;
1390
1391 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1392 device_node) {
1393 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1394 /* wait for transaction completion (except in cyclic case) */
1395 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1396 return -EAGAIN;
1397 }
1398 return 0;
1399}
1400
1401static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1402{
1403 struct dma_chan *chan = &atchan->chan_common;
1404
1405 /* Channel should be paused by user
1406 * do it anyway even if it is not done already */
1407 if (!atc_chan_is_paused(atchan)) {
1408 dev_warn(chan2dev(chan),
1409 "cyclic channel not paused, should be done by channel user\n");
1410 atc_control(chan, DMA_PAUSE, 0);
1411 }
1412
1413 /* now preserve additional data for cyclic operations */
1414 /* next descriptor address in the cyclic list */
1415 atchan->save_dscr = channel_readl(atchan, DSCR);
1416
1417 vdbg_dump_regs(atchan);
1418}
1419
1380static int at_dma_suspend_noirq(struct device *dev) 1420static int at_dma_suspend_noirq(struct device *dev)
1381{ 1421{
1382 struct platform_device *pdev = to_platform_device(dev); 1422 struct platform_device *pdev = to_platform_device(dev);
1383 struct at_dma *atdma = platform_get_drvdata(pdev); 1423 struct at_dma *atdma = platform_get_drvdata(pdev);
1424 struct dma_chan *chan, *_chan;
1384 1425
1385 at_dma_off(platform_get_drvdata(pdev)); 1426 /* preserve data */
1427 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1428 device_node) {
1429 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1430
1431 if (atc_chan_is_cyclic(atchan))
1432 atc_suspend_cyclic(atchan);
1433 atchan->save_cfg = channel_readl(atchan, CFG);
1434 }
1435 atdma->save_imr = dma_readl(atdma, EBCIMR);
1436
1437 /* disable DMA controller */
1438 at_dma_off(atdma);
1386 clk_disable(atdma->clk); 1439 clk_disable(atdma->clk);
1387 return 0; 1440 return 0;
1388} 1441}
1389 1442
1443static void atc_resume_cyclic(struct at_dma_chan *atchan)
1444{
1445 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1446
1447 /* restore channel status for cyclic descriptors list:
1448 * next descriptor in the cyclic list at the time of suspend */
1449 channel_writel(atchan, SADDR, 0);
1450 channel_writel(atchan, DADDR, 0);
1451 channel_writel(atchan, CTRLA, 0);
1452 channel_writel(atchan, CTRLB, 0);
1453 channel_writel(atchan, DSCR, atchan->save_dscr);
1454 dma_writel(atdma, CHER, atchan->mask);
1455
1456 /* channel pause status should be removed by channel user
1457 * We cannot take the initiative to do it here */
1458
1459 vdbg_dump_regs(atchan);
1460}
1461
1390static int at_dma_resume_noirq(struct device *dev) 1462static int at_dma_resume_noirq(struct device *dev)
1391{ 1463{
1392 struct platform_device *pdev = to_platform_device(dev); 1464 struct platform_device *pdev = to_platform_device(dev);
1393 struct at_dma *atdma = platform_get_drvdata(pdev); 1465 struct at_dma *atdma = platform_get_drvdata(pdev);
1466 struct dma_chan *chan, *_chan;
1394 1467
1468 /* bring back DMA controller */
1395 clk_enable(atdma->clk); 1469 clk_enable(atdma->clk);
1396 dma_writel(atdma, EN, AT_DMA_ENABLE); 1470 dma_writel(atdma, EN, AT_DMA_ENABLE);
1471
1472 /* clear any pending interrupt */
1473 while (dma_readl(atdma, EBCISR))
1474 cpu_relax();
1475
1476 /* restore saved data */
1477 dma_writel(atdma, EBCIER, atdma->save_imr);
1478 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1479 device_node) {
1480 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1481
1482 channel_writel(atchan, CFG, atchan->save_cfg);
1483 if (atc_chan_is_cyclic(atchan))
1484 atc_resume_cyclic(atchan);
1485 }
1397 return 0; 1486 return 0;
1398} 1487}
1399 1488
1400static const struct dev_pm_ops at_dma_dev_pm_ops = { 1489static const struct dev_pm_ops at_dma_dev_pm_ops = {
1490 .prepare = at_dma_prepare,
1401 .suspend_noirq = at_dma_suspend_noirq, 1491 .suspend_noirq = at_dma_suspend_noirq,
1402 .resume_noirq = at_dma_resume_noirq, 1492 .resume_noirq = at_dma_resume_noirq,
1403}; 1493};
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 087dbf1dd39c..aa4c9aebab7c 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -204,6 +204,9 @@ enum atc_status {
204 * @status: transmit status information from irq/prep* functions 204 * @status: transmit status information from irq/prep* functions
205 * to tasklet (use atomic operations) 205 * to tasklet (use atomic operations)
206 * @tasklet: bottom half to finish transaction work 206 * @tasklet: bottom half to finish transaction work
207 * @save_cfg: configuration register that is saved on suspend/resume cycle
208 * @save_dscr: for cyclic operations, preserve next descriptor address in
209 * the cyclic list on suspend/resume cycle
207 * @lock: serializes enqueue/dequeue operations to descriptors lists 210 * @lock: serializes enqueue/dequeue operations to descriptors lists
208 * @completed_cookie: identifier for the most recently completed operation 211 * @completed_cookie: identifier for the most recently completed operation
209 * @active_list: list of descriptors dmaengine is being running on 212 * @active_list: list of descriptors dmaengine is being running on
@@ -218,6 +221,8 @@ struct at_dma_chan {
218 u8 mask; 221 u8 mask;
219 unsigned long status; 222 unsigned long status;
220 struct tasklet_struct tasklet; 223 struct tasklet_struct tasklet;
224 u32 save_cfg;
225 u32 save_dscr;
221 226
222 spinlock_t lock; 227 spinlock_t lock;
223 228
@@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
248 * @chan_common: common dmaengine dma_device object members 253 * @chan_common: common dmaengine dma_device object members
249 * @ch_regs: memory mapped register base 254 * @ch_regs: memory mapped register base
250 * @clk: dma controller clock 255 * @clk: dma controller clock
256 * @save_imr: interrupt mask register that is saved on suspend/resume cycle
251 * @all_chan_mask: all channels availlable in a mask 257 * @all_chan_mask: all channels availlable in a mask
252 * @dma_desc_pool: base of DMA descriptor region (DMA address) 258 * @dma_desc_pool: base of DMA descriptor region (DMA address)
253 * @chan: channels table to store at_dma_chan structures 259 * @chan: channels table to store at_dma_chan structures
@@ -256,6 +262,7 @@ struct at_dma {
256 struct dma_device dma_common; 262 struct dma_device dma_common;
257 void __iomem *regs; 263 void __iomem *regs;
258 struct clk *clk; 264 struct clk *clk;
265 u32 save_imr;
259 266
260 u8 all_chan_mask; 267 u8 all_chan_mask;
261 268
@@ -355,6 +362,23 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
355 return !!(dma_readl(atdma, CHSR) & atchan->mask); 362 return !!(dma_readl(atdma, CHSR) & atchan->mask);
356} 363}
357 364
365/**
366 * atc_chan_is_paused - test channel pause/resume status
367 * @atchan: channel we want to test status
368 */
369static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
370{
371 return test_bit(ATC_IS_PAUSED, &atchan->status);
372}
373
374/**
375 * atc_chan_is_cyclic - test if given channel has cyclic property set
376 * @atchan: channel we want to test status
377 */
378static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
379{
380 return test_bit(ATC_IS_CYCLIC, &atchan->status);
381}
358 382
359/** 383/**
360 * set_desc_eol - set end-of-link to descriptor so it will end transfer 384 * set_desc_eol - set end-of-link to descriptor so it will end transfer
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 765f5ff22304..eb1d8641cf5c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -10,6 +10,7 @@
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h> 12#include <linux/dmaengine.h>
13#include <linux/freezer.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/kthread.h> 15#include <linux/kthread.h>
15#include <linux/module.h> 16#include <linux/module.h>
@@ -251,6 +252,7 @@ static int dmatest_func(void *data)
251 int i; 252 int i;
252 253
253 thread_name = current->comm; 254 thread_name = current->comm;
255 set_freezable_with_signal();
254 256
255 ret = -ENOMEM; 257 ret = -ENOMEM;
256 258
@@ -305,7 +307,8 @@ static int dmatest_func(void *data)
305 dma_addr_t dma_srcs[src_cnt]; 307 dma_addr_t dma_srcs[src_cnt];
306 dma_addr_t dma_dsts[dst_cnt]; 308 dma_addr_t dma_dsts[dst_cnt];
307 struct completion cmp; 309 struct completion cmp;
308 unsigned long tmo = msecs_to_jiffies(timeout); 310 unsigned long start, tmo, end = 0 /* compiler... */;
311 bool reload = true;
309 u8 align = 0; 312 u8 align = 0;
310 313
311 total_tests++; 314 total_tests++;
@@ -404,7 +407,17 @@ static int dmatest_func(void *data)
404 } 407 }
405 dma_async_issue_pending(chan); 408 dma_async_issue_pending(chan);
406 409
407 tmo = wait_for_completion_timeout(&cmp, tmo); 410 do {
411 start = jiffies;
412 if (reload)
413 end = start + msecs_to_jiffies(timeout);
414 else if (end <= start)
415 end = start + 1;
416 tmo = wait_for_completion_interruptible_timeout(&cmp,
417 end - start);
418 reload = try_to_freeze();
419 } while (tmo == -ERESTARTSYS);
420
408 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 421 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
409 422
410 if (tmo == 0) { 423 if (tmo == 0) {
@@ -477,6 +490,8 @@ err_srcs:
477 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", 490 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
478 thread_name, total_tests, failed_tests, ret); 491 thread_name, total_tests, failed_tests, ret);
479 492
493 /* terminate all transfers on specified channels */
494 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
480 if (iterations > 0) 495 if (iterations > 0)
481 while (!kthread_should_stop()) { 496 while (!kthread_should_stop()) {
482 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); 497 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
@@ -499,6 +514,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
499 list_del(&thread->node); 514 list_del(&thread->node);
500 kfree(thread); 515 kfree(thread);
501 } 516 }
517
518 /* terminate all transfers on specified channels */
519 dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
520
502 kfree(dtc); 521 kfree(dtc);
503} 522}
504 523
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 4d180ca9a1d8..9bfd6d360718 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1407,12 +1407,11 @@ static int __init dw_probe(struct platform_device *pdev)
1407 dw->all_chan_mask = (1 << pdata->nr_channels) - 1; 1407 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1408 1408
1409 INIT_LIST_HEAD(&dw->dma.channels); 1409 INIT_LIST_HEAD(&dw->dma.channels);
1410 for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) { 1410 for (i = 0; i < pdata->nr_channels; i++) {
1411 struct dw_dma_chan *dwc = &dw->chan[i]; 1411 struct dw_dma_chan *dwc = &dw->chan[i];
1412 1412
1413 dwc->chan.device = &dw->dma; 1413 dwc->chan.device = &dw->dma;
1414 dwc->chan.cookie = dwc->completed = 1; 1414 dwc->chan.cookie = dwc->completed = 1;
1415 dwc->chan.chan_id = i;
1416 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1415 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1417 list_add_tail(&dwc->chan.device_node, 1416 list_add_tail(&dwc->chan.device_node,
1418 &dw->dma.channels); 1417 &dw->dma.channels);
@@ -1468,7 +1467,7 @@ static int __init dw_probe(struct platform_device *pdev)
1468 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1467 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1469 1468
1470 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", 1469 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1471 dev_name(&pdev->dev), dw->dma.chancnt); 1470 dev_name(&pdev->dev), pdata->nr_channels);
1472 1471
1473 dma_async_device_register(&dw->dma); 1472 dma_async_device_register(&dw->dma);
1474 1473
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 5d7a49bd7c26..b47e2b803faf 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -22,6 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/dmaengine.h> 24#include <linux/dmaengine.h>
25#include <linux/module.h>
25#include <linux/platform_device.h> 26#include <linux/platform_device.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27 28
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index d99f71c356b5..d746899f36e1 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -14,6 +14,7 @@
14 * http://www.gnu.org/copyleft/gpl.html 14 * http://www.gnu.org/copyleft/gpl.html
15 */ 15 */
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h>
17#include <linux/types.h> 18#include <linux/types.h>
18#include <linux/mm.h> 19#include <linux/mm.h>
19#include <linux/interrupt.h> 20#include <linux/interrupt.h>
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 7bd7e98548cd..eab1fe71259e 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/module.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
@@ -318,6 +319,7 @@ struct sdma_engine {
318 dma_addr_t context_phys; 319 dma_addr_t context_phys;
319 struct dma_device dma_device; 320 struct dma_device dma_device;
320 struct clk *clk; 321 struct clk *clk;
322 struct mutex channel_0_lock;
321 struct sdma_script_start_addrs *script_addrs; 323 struct sdma_script_start_addrs *script_addrs;
322}; 324};
323 325
@@ -415,11 +417,15 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
415 dma_addr_t buf_phys; 417 dma_addr_t buf_phys;
416 int ret; 418 int ret;
417 419
420 mutex_lock(&sdma->channel_0_lock);
421
418 buf_virt = dma_alloc_coherent(NULL, 422 buf_virt = dma_alloc_coherent(NULL,
419 size, 423 size,
420 &buf_phys, GFP_KERNEL); 424 &buf_phys, GFP_KERNEL);
421 if (!buf_virt) 425 if (!buf_virt) {
422 return -ENOMEM; 426 ret = -ENOMEM;
427 goto err_out;
428 }
423 429
424 bd0->mode.command = C0_SETPM; 430 bd0->mode.command = C0_SETPM;
425 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 431 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
@@ -433,6 +439,9 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
433 439
434 dma_free_coherent(NULL, size, buf_virt, buf_phys); 440 dma_free_coherent(NULL, size, buf_virt, buf_phys);
435 441
442err_out:
443 mutex_unlock(&sdma->channel_0_lock);
444
436 return ret; 445 return ret;
437} 446}
438 447
@@ -656,6 +665,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
656 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); 665 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
657 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); 666 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
658 667
668 mutex_lock(&sdma->channel_0_lock);
669
659 memset(context, 0, sizeof(*context)); 670 memset(context, 0, sizeof(*context));
660 context->channel_state.pc = load_address; 671 context->channel_state.pc = load_address;
661 672
@@ -676,6 +687,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
676 687
677 ret = sdma_run_channel(&sdma->channel[0]); 688 ret = sdma_run_channel(&sdma->channel[0]);
678 689
690 mutex_unlock(&sdma->channel_0_lock);
691
679 return ret; 692 return ret;
680} 693}
681 694
@@ -1131,18 +1144,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
1131 saddr_arr[i] = addr_arr[i]; 1144 saddr_arr[i] = addr_arr[i];
1132} 1145}
1133 1146
1134static int __init sdma_get_firmware(struct sdma_engine *sdma, 1147static void sdma_load_firmware(const struct firmware *fw, void *context)
1135 const char *fw_name)
1136{ 1148{
1137 const struct firmware *fw; 1149 struct sdma_engine *sdma = context;
1138 const struct sdma_firmware_header *header; 1150 const struct sdma_firmware_header *header;
1139 int ret;
1140 const struct sdma_script_start_addrs *addr; 1151 const struct sdma_script_start_addrs *addr;
1141 unsigned short *ram_code; 1152 unsigned short *ram_code;
1142 1153
1143 ret = request_firmware(&fw, fw_name, sdma->dev); 1154 if (!fw) {
1144 if (ret) 1155 dev_err(sdma->dev, "firmware not found\n");
1145 return ret; 1156 return;
1157 }
1146 1158
1147 if (fw->size < sizeof(*header)) 1159 if (fw->size < sizeof(*header))
1148 goto err_firmware; 1160 goto err_firmware;
@@ -1172,6 +1184,16 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
1172 1184
1173err_firmware: 1185err_firmware:
1174 release_firmware(fw); 1186 release_firmware(fw);
1187}
1188
1189static int __init sdma_get_firmware(struct sdma_engine *sdma,
1190 const char *fw_name)
1191{
1192 int ret;
1193
1194 ret = request_firmware_nowait(THIS_MODULE,
1195 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1196 GFP_KERNEL, sdma, sdma_load_firmware);
1175 1197
1176 return ret; 1198 return ret;
1177} 1199}
@@ -1269,11 +1291,14 @@ static int __init sdma_probe(struct platform_device *pdev)
1269 struct sdma_platform_data *pdata = pdev->dev.platform_data; 1291 struct sdma_platform_data *pdata = pdev->dev.platform_data;
1270 int i; 1292 int i;
1271 struct sdma_engine *sdma; 1293 struct sdma_engine *sdma;
1294 s32 *saddr_arr;
1272 1295
1273 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1296 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
1274 if (!sdma) 1297 if (!sdma)
1275 return -ENOMEM; 1298 return -ENOMEM;
1276 1299
1300 mutex_init(&sdma->channel_0_lock);
1301
1277 sdma->dev = &pdev->dev; 1302 sdma->dev = &pdev->dev;
1278 1303
1279 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1304 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1310,6 +1335,11 @@ static int __init sdma_probe(struct platform_device *pdev)
1310 goto err_alloc; 1335 goto err_alloc;
1311 } 1336 }
1312 1337
1338 /* initially no scripts available */
1339 saddr_arr = (s32 *)sdma->script_addrs;
1340 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1341 saddr_arr[i] = -EINVAL;
1342
1313 if (of_id) 1343 if (of_id)
1314 pdev->id_entry = of_id->data; 1344 pdev->id_entry = of_id->data;
1315 sdma->devtype = pdev->id_entry->driver_data; 1345 sdma->devtype = pdev->id_entry->driver_data;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 8a3fdd87db97..9e96c43a846a 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -115,16 +115,15 @@ DMAC1 interrupt Functions*/
115 115
116/** 116/**
117 * dmac1_mask_periphral_intr - mask the periphral interrupt 117 * dmac1_mask_periphral_intr - mask the periphral interrupt
118 * @midc: dma channel for which masking is required 118 * @mid: dma device for which masking is required
119 * 119 *
120 * Masks the DMA periphral interrupt 120 * Masks the DMA periphral interrupt
121 * this is valid for DMAC1 family controllers only 121 * this is valid for DMAC1 family controllers only
122 * This controller should have periphral mask registers already mapped 122 * This controller should have periphral mask registers already mapped
123 */ 123 */
124static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc) 124static void dmac1_mask_periphral_intr(struct middma_device *mid)
125{ 125{
126 u32 pimr; 126 u32 pimr;
127 struct middma_device *mid = to_middma_device(midc->chan.device);
128 127
129 if (mid->pimr_mask) { 128 if (mid->pimr_mask) {
130 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 129 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
@@ -184,7 +183,6 @@ static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
184static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) 183static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
185{ 184{
186 /*Check LPE PISR, make sure fwd is disabled*/ 185 /*Check LPE PISR, make sure fwd is disabled*/
187 dmac1_mask_periphral_intr(midc);
188 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); 186 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 187 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
190 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 188 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
@@ -1114,7 +1112,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
1114 1112
1115 midch->chan.device = &dma->common; 1113 midch->chan.device = &dma->common;
1116 midch->chan.cookie = 1; 1114 midch->chan.cookie = 1;
1117 midch->chan.chan_id = i;
1118 midch->ch_id = dma->chan_base + i; 1115 midch->ch_id = dma->chan_base + i;
1119 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); 1116 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
1120 1117
@@ -1150,7 +1147,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
1150 dma_cap_set(DMA_SLAVE, dma->common.cap_mask); 1147 dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
1151 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); 1148 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
1152 dma->common.dev = &pdev->dev; 1149 dma->common.dev = &pdev->dev;
1153 dma->common.chancnt = dma->max_chan;
1154 1150
1155 dma->common.device_alloc_chan_resources = 1151 dma->common.device_alloc_chan_resources =
1156 intel_mid_dma_alloc_chan_resources; 1152 intel_mid_dma_alloc_chan_resources;
@@ -1350,6 +1346,7 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
1350 if (device->ch[i].in_use) 1346 if (device->ch[i].in_use)
1351 return -EAGAIN; 1347 return -EAGAIN;
1352 } 1348 }
1349 dmac1_mask_periphral_intr(device);
1353 device->state = SUSPENDED; 1350 device->state = SUSPENDED;
1354 pci_save_state(pci); 1351 pci_save_state(pci);
1355 pci_disable_device(pci); 1352 pci_disable_device(pci);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index b9bae94f2015..8ba4edc6185e 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -741,7 +741,6 @@ static int __devinit mpc_dma_probe(struct platform_device *op)
741 mchan = &mdma->channels[i]; 741 mchan = &mdma->channels[i];
742 742
743 mchan->chan.device = dma; 743 mchan->chan.device = dma;
744 mchan->chan.chan_id = i;
745 mchan->chan.cookie = 1; 744 mchan->chan.cookie = 1;
746 mchan->completed_cookie = mchan->chan.cookie; 745 mchan->completed_cookie = mchan->chan.cookie;
747 746
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index be641cbd36fc..b4588bdd98bb 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -130,6 +130,23 @@ struct mxs_dma_engine {
130 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; 130 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
131}; 131};
132 132
133static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
134{
135 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
136 int chan_id = mxs_chan->chan.chan_id;
137 int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
138
139 /* enable apbh channel clock */
140 if (dma_is_apbh()) {
141 if (apbh_is_old())
142 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
143 mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
144 else
145 writel(1 << chan_id,
146 mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
147 }
148}
149
133static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 150static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
134{ 151{
135 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 152 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -148,38 +165,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
148 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 165 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
149 int chan_id = mxs_chan->chan.chan_id; 166 int chan_id = mxs_chan->chan.chan_id;
150 167
168 /* clkgate needs to be enabled before writing other registers */
169 mxs_dma_clkgate(mxs_chan, 1);
170
151 /* set cmd_addr up */ 171 /* set cmd_addr up */
152 writel(mxs_chan->ccw_phys, 172 writel(mxs_chan->ccw_phys,
153 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); 173 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
154 174
155 /* enable apbh channel clock */
156 if (dma_is_apbh()) {
157 if (apbh_is_old())
158 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
159 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
160 else
161 writel(1 << chan_id,
162 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
163 }
164
165 /* write 1 to SEMA to kick off the channel */ 175 /* write 1 to SEMA to kick off the channel */
166 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); 176 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
167} 177}
168 178
169static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 179static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
170{ 180{
171 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
172 int chan_id = mxs_chan->chan.chan_id;
173
174 /* disable apbh channel clock */ 181 /* disable apbh channel clock */
175 if (dma_is_apbh()) { 182 mxs_dma_clkgate(mxs_chan, 0);
176 if (apbh_is_old())
177 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
178 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
179 else
180 writel(1 << chan_id,
181 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
182 }
183 183
184 mxs_chan->status = DMA_SUCCESS; 184 mxs_chan->status = DMA_SUCCESS;
185} 185}
@@ -338,7 +338,10 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
338 if (ret) 338 if (ret)
339 goto err_clk; 339 goto err_clk;
340 340
341 /* clkgate needs to be enabled for reset to finish */
342 mxs_dma_clkgate(mxs_chan, 1);
341 mxs_dma_reset_chan(mxs_chan); 343 mxs_dma_reset_chan(mxs_chan);
344 mxs_dma_clkgate(mxs_chan, 0);
342 345
343 dma_async_tx_descriptor_init(&mxs_chan->desc, chan); 346 dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
344 mxs_chan->desc.tx_submit = mxs_dma_tx_submit; 347 mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 1ac8d4b580b7..a6d0e3dbed07 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -60,7 +60,7 @@
60#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 60#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
61#define DMA_DESC_FOLLOW_WITH_IRQ 0x3 61#define DMA_DESC_FOLLOW_WITH_IRQ 0x3
62 62
63#define MAX_CHAN_NR 8 63#define MAX_CHAN_NR 12
64 64
65#define DMA_MASK_CTL0_MODE 0x33333333 65#define DMA_MASK_CTL0_MODE 0x33333333
66#define DMA_MASK_CTL2_MODE 0x00003333 66#define DMA_MASK_CTL2_MODE 0x00003333
@@ -872,8 +872,7 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
872 int i; 872 int i;
873 873
874 nr_channels = id->driver_data; 874 nr_channels = id->driver_data;
875 pd = kzalloc(sizeof(struct pch_dma)+ 875 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
876 sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL);
877 if (!pd) 876 if (!pd)
878 return -ENOMEM; 877 return -ENOMEM;
879 878
@@ -926,7 +925,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
926 } 925 }
927 926
928 pd->dma.dev = &pdev->dev; 927 pd->dma.dev = &pdev->dev;
929 pd->dma.chancnt = nr_channels;
930 928
931 INIT_LIST_HEAD(&pd->dma.channels); 929 INIT_LIST_HEAD(&pd->dma.channels);
932 930
@@ -935,7 +933,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
935 933
936 pd_chan->chan.device = &pd->dma; 934 pd_chan->chan.device = &pd->dma;
937 pd_chan->chan.cookie = 1; 935 pd_chan->chan.cookie = 1;
938 pd_chan->chan.chan_id = i;
939 936
940 pd_chan->membase = &regs->desc[i]; 937 pd_chan->membase = &regs->desc[i];
941 938
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 00eee59e8b33..571041477ab2 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -17,6 +17,8 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/amba/bus.h> 18#include <linux/amba/bus.h>
19#include <linux/amba/pl330.h> 19#include <linux/amba/pl330.h>
20#include <linux/pm_runtime.h>
21#include <linux/scatterlist.h>
20 22
21#define NR_DEFAULT_DESC 16 23#define NR_DEFAULT_DESC 16
22 24
@@ -68,6 +70,14 @@ struct dma_pl330_chan {
68 * NULL if the channel is available to be acquired. 70 * NULL if the channel is available to be acquired.
69 */ 71 */
70 void *pl330_chid; 72 void *pl330_chid;
73
74 /* For D-to-M and M-to-D channels */
75 int burst_sz; /* the peripheral fifo width */
76 int burst_len; /* the number of burst */
77 dma_addr_t fifo_addr;
78
79 /* for cyclic capability */
80 bool cyclic;
71}; 81};
72 82
73struct dma_pl330_dmac { 83struct dma_pl330_dmac {
@@ -83,6 +93,8 @@ struct dma_pl330_dmac {
83 93
84 /* Peripheral channels connected to this DMAC */ 94 /* Peripheral channels connected to this DMAC */
85 struct dma_pl330_chan *peripherals; /* keep at end */ 95 struct dma_pl330_chan *peripherals; /* keep at end */
96
97 struct clk *clk;
86}; 98};
87 99
88struct dma_pl330_desc { 100struct dma_pl330_desc {
@@ -152,6 +164,31 @@ static inline void free_desc_list(struct list_head *list)
152 spin_unlock_irqrestore(&pdmac->pool_lock, flags); 164 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
153} 165}
154 166
167static inline void handle_cyclic_desc_list(struct list_head *list)
168{
169 struct dma_pl330_desc *desc;
170 struct dma_pl330_chan *pch;
171 unsigned long flags;
172
173 if (list_empty(list))
174 return;
175
176 list_for_each_entry(desc, list, node) {
177 dma_async_tx_callback callback;
178
179 /* Change status to reload it */
180 desc->status = PREP;
181 pch = desc->pchan;
182 callback = desc->txd.callback;
183 if (callback)
184 callback(desc->txd.callback_param);
185 }
186
187 spin_lock_irqsave(&pch->lock, flags);
188 list_splice_tail_init(list, &pch->work_list);
189 spin_unlock_irqrestore(&pch->lock, flags);
190}
191
155static inline void fill_queue(struct dma_pl330_chan *pch) 192static inline void fill_queue(struct dma_pl330_chan *pch)
156{ 193{
157 struct dma_pl330_desc *desc; 194 struct dma_pl330_desc *desc;
@@ -205,7 +242,10 @@ static void pl330_tasklet(unsigned long data)
205 242
206 spin_unlock_irqrestore(&pch->lock, flags); 243 spin_unlock_irqrestore(&pch->lock, flags);
207 244
208 free_desc_list(&list); 245 if (pch->cyclic)
246 handle_cyclic_desc_list(&list);
247 else
248 free_desc_list(&list);
209} 249}
210 250
211static void dma_pl330_rqcb(void *token, enum pl330_op_err err) 251static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -236,6 +276,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
236 spin_lock_irqsave(&pch->lock, flags); 276 spin_lock_irqsave(&pch->lock, flags);
237 277
238 pch->completed = chan->cookie = 1; 278 pch->completed = chan->cookie = 1;
279 pch->cyclic = false;
239 280
240 pch->pl330_chid = pl330_request_channel(&pdmac->pif); 281 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
241 if (!pch->pl330_chid) { 282 if (!pch->pl330_chid) {
@@ -253,25 +294,52 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
253static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) 294static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
254{ 295{
255 struct dma_pl330_chan *pch = to_pchan(chan); 296 struct dma_pl330_chan *pch = to_pchan(chan);
256 struct dma_pl330_desc *desc; 297 struct dma_pl330_desc *desc, *_dt;
257 unsigned long flags; 298 unsigned long flags;
299 struct dma_pl330_dmac *pdmac = pch->dmac;
300 struct dma_slave_config *slave_config;
301 LIST_HEAD(list);
258 302
259 /* Only supports DMA_TERMINATE_ALL */ 303 switch (cmd) {
260 if (cmd != DMA_TERMINATE_ALL) 304 case DMA_TERMINATE_ALL:
261 return -ENXIO; 305 spin_lock_irqsave(&pch->lock, flags);
262
263 spin_lock_irqsave(&pch->lock, flags);
264
265 /* FLUSH the PL330 Channel thread */
266 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
267 306
268 /* Mark all desc done */ 307 /* FLUSH the PL330 Channel thread */
269 list_for_each_entry(desc, &pch->work_list, node) 308 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
270 desc->status = DONE;
271 309
272 spin_unlock_irqrestore(&pch->lock, flags); 310 /* Mark all desc done */
311 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
312 desc->status = DONE;
313 pch->completed = desc->txd.cookie;
314 list_move_tail(&desc->node, &list);
315 }
273 316
274 pl330_tasklet((unsigned long) pch); 317 list_splice_tail_init(&list, &pdmac->desc_pool);
318 spin_unlock_irqrestore(&pch->lock, flags);
319 break;
320 case DMA_SLAVE_CONFIG:
321 slave_config = (struct dma_slave_config *)arg;
322
323 if (slave_config->direction == DMA_TO_DEVICE) {
324 if (slave_config->dst_addr)
325 pch->fifo_addr = slave_config->dst_addr;
326 if (slave_config->dst_addr_width)
327 pch->burst_sz = __ffs(slave_config->dst_addr_width);
328 if (slave_config->dst_maxburst)
329 pch->burst_len = slave_config->dst_maxburst;
330 } else if (slave_config->direction == DMA_FROM_DEVICE) {
331 if (slave_config->src_addr)
332 pch->fifo_addr = slave_config->src_addr;
333 if (slave_config->src_addr_width)
334 pch->burst_sz = __ffs(slave_config->src_addr_width);
335 if (slave_config->src_maxburst)
336 pch->burst_len = slave_config->src_maxburst;
337 }
338 break;
339 default:
340 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
341 return -ENXIO;
342 }
275 343
276 return 0; 344 return 0;
277} 345}
@@ -288,6 +356,9 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
288 pl330_release_channel(pch->pl330_chid); 356 pl330_release_channel(pch->pl330_chid);
289 pch->pl330_chid = NULL; 357 pch->pl330_chid = NULL;
290 358
359 if (pch->cyclic)
360 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
361
291 spin_unlock_irqrestore(&pch->lock, flags); 362 spin_unlock_irqrestore(&pch->lock, flags);
292} 363}
293 364
@@ -453,7 +524,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
453 524
454 if (peri) { 525 if (peri) {
455 desc->req.rqtype = peri->rqtype; 526 desc->req.rqtype = peri->rqtype;
456 desc->req.peri = peri->peri_id; 527 desc->req.peri = pch->chan.chan_id;
457 } else { 528 } else {
458 desc->req.rqtype = MEMTOMEM; 529 desc->req.rqtype = MEMTOMEM;
459 desc->req.peri = 0; 530 desc->req.peri = 0;
@@ -524,6 +595,51 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
524 return burst_len; 595 return burst_len;
525} 596}
526 597
598static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
599 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
600 size_t period_len, enum dma_data_direction direction)
601{
602 struct dma_pl330_desc *desc;
603 struct dma_pl330_chan *pch = to_pchan(chan);
604 dma_addr_t dst;
605 dma_addr_t src;
606
607 desc = pl330_get_desc(pch);
608 if (!desc) {
609 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
610 __func__, __LINE__);
611 return NULL;
612 }
613
614 switch (direction) {
615 case DMA_TO_DEVICE:
616 desc->rqcfg.src_inc = 1;
617 desc->rqcfg.dst_inc = 0;
618 src = dma_addr;
619 dst = pch->fifo_addr;
620 break;
621 case DMA_FROM_DEVICE:
622 desc->rqcfg.src_inc = 0;
623 desc->rqcfg.dst_inc = 1;
624 src = pch->fifo_addr;
625 dst = dma_addr;
626 break;
627 default:
628 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
629 __func__, __LINE__);
630 return NULL;
631 }
632
633 desc->rqcfg.brst_size = pch->burst_sz;
634 desc->rqcfg.brst_len = 1;
635
636 pch->cyclic = true;
637
638 fill_px(&desc->px, dst, src, period_len);
639
640 return &desc->txd;
641}
642
527static struct dma_async_tx_descriptor * 643static struct dma_async_tx_descriptor *
528pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, 644pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
529 dma_addr_t src, size_t len, unsigned long flags) 645 dma_addr_t src, size_t len, unsigned long flags)
@@ -579,7 +695,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
579 struct dma_pl330_peri *peri = chan->private; 695 struct dma_pl330_peri *peri = chan->private;
580 struct scatterlist *sg; 696 struct scatterlist *sg;
581 unsigned long flags; 697 unsigned long flags;
582 int i, burst_size; 698 int i;
583 dma_addr_t addr; 699 dma_addr_t addr;
584 700
585 if (unlikely(!pch || !sgl || !sg_len || !peri)) 701 if (unlikely(!pch || !sgl || !sg_len || !peri))
@@ -595,8 +711,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
595 return NULL; 711 return NULL;
596 } 712 }
597 713
598 addr = peri->fifo_addr; 714 addr = pch->fifo_addr;
599 burst_size = peri->burst_sz;
600 715
601 first = NULL; 716 first = NULL;
602 717
@@ -644,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
644 sg_dma_address(sg), addr, sg_dma_len(sg)); 759 sg_dma_address(sg), addr, sg_dma_len(sg));
645 } 760 }
646 761
647 desc->rqcfg.brst_size = burst_size; 762 desc->rqcfg.brst_size = pch->burst_sz;
648 desc->rqcfg.brst_len = 1; 763 desc->rqcfg.brst_len = 1;
649 } 764 }
650 765
@@ -696,6 +811,30 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
696 goto probe_err1; 811 goto probe_err1;
697 } 812 }
698 813
814 pdmac->clk = clk_get(&adev->dev, "dma");
815 if (IS_ERR(pdmac->clk)) {
816 dev_err(&adev->dev, "Cannot get operation clock.\n");
817 ret = -EINVAL;
818 goto probe_err1;
819 }
820
821 amba_set_drvdata(adev, pdmac);
822
823#ifdef CONFIG_PM_RUNTIME
824 /* to use the runtime PM helper functions */
825 pm_runtime_enable(&adev->dev);
826
827 /* enable the power domain */
828 if (pm_runtime_get_sync(&adev->dev)) {
829 dev_err(&adev->dev, "failed to get runtime pm\n");
830 ret = -ENODEV;
831 goto probe_err1;
832 }
833#else
834 /* enable dma clk */
835 clk_enable(pdmac->clk);
836#endif
837
699 irq = adev->irq[0]; 838 irq = adev->irq[0];
700 ret = request_irq(irq, pl330_irq_handler, 0, 839 ret = request_irq(irq, pl330_irq_handler, 0,
701 dev_name(&adev->dev), pi); 840 dev_name(&adev->dev), pi);
@@ -732,6 +871,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
732 case MEMTODEV: 871 case MEMTODEV:
733 case DEVTOMEM: 872 case DEVTOMEM:
734 dma_cap_set(DMA_SLAVE, pd->cap_mask); 873 dma_cap_set(DMA_SLAVE, pd->cap_mask);
874 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
735 break; 875 break;
736 default: 876 default:
737 dev_err(&adev->dev, "DEVTODEV Not Supported\n"); 877 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
@@ -747,11 +887,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
747 spin_lock_init(&pch->lock); 887 spin_lock_init(&pch->lock);
748 pch->pl330_chid = NULL; 888 pch->pl330_chid = NULL;
749 pch->chan.device = pd; 889 pch->chan.device = pd;
750 pch->chan.chan_id = i;
751 pch->dmac = pdmac; 890 pch->dmac = pdmac;
752 891
753 /* Add the channel to the DMAC list */ 892 /* Add the channel to the DMAC list */
754 pd->chancnt++;
755 list_add_tail(&pch->chan.device_node, &pd->channels); 893 list_add_tail(&pch->chan.device_node, &pd->channels);
756 } 894 }
757 895
@@ -760,6 +898,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
760 pd->device_alloc_chan_resources = pl330_alloc_chan_resources; 898 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
761 pd->device_free_chan_resources = pl330_free_chan_resources; 899 pd->device_free_chan_resources = pl330_free_chan_resources;
762 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; 900 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
901 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
763 pd->device_tx_status = pl330_tx_status; 902 pd->device_tx_status = pl330_tx_status;
764 pd->device_prep_slave_sg = pl330_prep_slave_sg; 903 pd->device_prep_slave_sg = pl330_prep_slave_sg;
765 pd->device_control = pl330_control; 904 pd->device_control = pl330_control;
@@ -771,8 +910,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
771 goto probe_err4; 910 goto probe_err4;
772 } 911 }
773 912
774 amba_set_drvdata(adev, pdmac);
775
776 dev_info(&adev->dev, 913 dev_info(&adev->dev,
777 "Loaded driver for PL330 DMAC-%d\n", adev->periphid); 914 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
778 dev_info(&adev->dev, 915 dev_info(&adev->dev,
@@ -833,6 +970,13 @@ static int __devexit pl330_remove(struct amba_device *adev)
833 res = &adev->res; 970 res = &adev->res;
834 release_mem_region(res->start, resource_size(res)); 971 release_mem_region(res->start, resource_size(res));
835 972
973#ifdef CONFIG_PM_RUNTIME
974 pm_runtime_put(&adev->dev);
975 pm_runtime_disable(&adev->dev);
976#else
977 clk_disable(pdmac->clk);
978#endif
979
836 kfree(pdmac); 980 kfree(pdmac);
837 981
838 return 0; 982 return 0;
@@ -846,10 +990,49 @@ static struct amba_id pl330_ids[] = {
846 { 0, 0 }, 990 { 0, 0 },
847}; 991};
848 992
993#ifdef CONFIG_PM_RUNTIME
994static int pl330_runtime_suspend(struct device *dev)
995{
996 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
997
998 if (!pdmac) {
999 dev_err(dev, "failed to get dmac\n");
1000 return -ENODEV;
1001 }
1002
1003 clk_disable(pdmac->clk);
1004
1005 return 0;
1006}
1007
1008static int pl330_runtime_resume(struct device *dev)
1009{
1010 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
1011
1012 if (!pdmac) {
1013 dev_err(dev, "failed to get dmac\n");
1014 return -ENODEV;
1015 }
1016
1017 clk_enable(pdmac->clk);
1018
1019 return 0;
1020}
1021#else
1022#define pl330_runtime_suspend NULL
1023#define pl330_runtime_resume NULL
1024#endif /* CONFIG_PM_RUNTIME */
1025
1026static const struct dev_pm_ops pl330_pm_ops = {
1027 .runtime_suspend = pl330_runtime_suspend,
1028 .runtime_resume = pl330_runtime_resume,
1029};
1030
849static struct amba_driver pl330_driver = { 1031static struct amba_driver pl330_driver = {
850 .drv = { 1032 .drv = {
851 .owner = THIS_MODULE, 1033 .owner = THIS_MODULE,
852 .name = "dma-pl330", 1034 .name = "dma-pl330",
1035 .pm = &pl330_pm_ops,
853 }, 1036 },
854 .id_table = pl330_ids, 1037 .id_table = pl330_ids,
855 .probe = pl330_probe, 1038 .probe = pl330_probe,
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 7f49235d14b9..81809c2b46ab 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -259,14 +259,23 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
259 return 0; 259 return 0;
260} 260}
261 261
262static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
263
262static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 264static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
263{ 265{
264 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; 266 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
265 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); 267 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
268 struct sh_dmae_slave *param = tx->chan->private;
266 dma_async_tx_callback callback = tx->callback; 269 dma_async_tx_callback callback = tx->callback;
267 dma_cookie_t cookie; 270 dma_cookie_t cookie;
271 bool power_up;
272
273 spin_lock_irq(&sh_chan->desc_lock);
268 274
269 spin_lock_bh(&sh_chan->desc_lock); 275 if (list_empty(&sh_chan->ld_queue))
276 power_up = true;
277 else
278 power_up = false;
270 279
271 cookie = sh_chan->common.cookie; 280 cookie = sh_chan->common.cookie;
272 cookie++; 281 cookie++;
@@ -302,7 +311,38 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
302 tx->cookie, &last->async_tx, sh_chan->id, 311 tx->cookie, &last->async_tx, sh_chan->id,
303 desc->hw.sar, desc->hw.tcr, desc->hw.dar); 312 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
304 313
305 spin_unlock_bh(&sh_chan->desc_lock); 314 if (power_up) {
315 sh_chan->pm_state = DMAE_PM_BUSY;
316
317 pm_runtime_get(sh_chan->dev);
318
319 spin_unlock_irq(&sh_chan->desc_lock);
320
321 pm_runtime_barrier(sh_chan->dev);
322
323 spin_lock_irq(&sh_chan->desc_lock);
324
325 /* Have we been reset, while waiting? */
326 if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
327 dev_dbg(sh_chan->dev, "Bring up channel %d\n",
328 sh_chan->id);
329 if (param) {
330 const struct sh_dmae_slave_config *cfg =
331 param->config;
332
333 dmae_set_dmars(sh_chan, cfg->mid_rid);
334 dmae_set_chcr(sh_chan, cfg->chcr);
335 } else {
336 dmae_init(sh_chan);
337 }
338
339 if (sh_chan->pm_state == DMAE_PM_PENDING)
340 sh_chan_xfer_ld_queue(sh_chan);
341 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
342 }
343 }
344
345 spin_unlock_irq(&sh_chan->desc_lock);
306 346
307 return cookie; 347 return cookie;
308} 348}
@@ -346,8 +386,6 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
346 struct sh_dmae_slave *param = chan->private; 386 struct sh_dmae_slave *param = chan->private;
347 int ret; 387 int ret;
348 388
349 pm_runtime_get_sync(sh_chan->dev);
350
351 /* 389 /*
352 * This relies on the guarantee from dmaengine that alloc_chan_resources 390 * This relies on the guarantee from dmaengine that alloc_chan_resources
353 * never runs concurrently with itself or free_chan_resources. 391 * never runs concurrently with itself or free_chan_resources.
@@ -367,31 +405,20 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
367 } 405 }
368 406
369 param->config = cfg; 407 param->config = cfg;
370
371 dmae_set_dmars(sh_chan, cfg->mid_rid);
372 dmae_set_chcr(sh_chan, cfg->chcr);
373 } else {
374 dmae_init(sh_chan);
375 } 408 }
376 409
377 spin_lock_bh(&sh_chan->desc_lock);
378 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { 410 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
379 spin_unlock_bh(&sh_chan->desc_lock);
380 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); 411 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
381 if (!desc) { 412 if (!desc)
382 spin_lock_bh(&sh_chan->desc_lock);
383 break; 413 break;
384 }
385 dma_async_tx_descriptor_init(&desc->async_tx, 414 dma_async_tx_descriptor_init(&desc->async_tx,
386 &sh_chan->common); 415 &sh_chan->common);
387 desc->async_tx.tx_submit = sh_dmae_tx_submit; 416 desc->async_tx.tx_submit = sh_dmae_tx_submit;
388 desc->mark = DESC_IDLE; 417 desc->mark = DESC_IDLE;
389 418
390 spin_lock_bh(&sh_chan->desc_lock);
391 list_add(&desc->node, &sh_chan->ld_free); 419 list_add(&desc->node, &sh_chan->ld_free);
392 sh_chan->descs_allocated++; 420 sh_chan->descs_allocated++;
393 } 421 }
394 spin_unlock_bh(&sh_chan->desc_lock);
395 422
396 if (!sh_chan->descs_allocated) { 423 if (!sh_chan->descs_allocated) {
397 ret = -ENOMEM; 424 ret = -ENOMEM;
@@ -405,7 +432,7 @@ edescalloc:
405 clear_bit(param->slave_id, sh_dmae_slave_used); 432 clear_bit(param->slave_id, sh_dmae_slave_used);
406etestused: 433etestused:
407efindslave: 434efindslave:
408 pm_runtime_put(sh_chan->dev); 435 chan->private = NULL;
409 return ret; 436 return ret;
410} 437}
411 438
@@ -417,7 +444,6 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
417 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 444 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
418 struct sh_desc *desc, *_desc; 445 struct sh_desc *desc, *_desc;
419 LIST_HEAD(list); 446 LIST_HEAD(list);
420 int descs = sh_chan->descs_allocated;
421 447
422 /* Protect against ISR */ 448 /* Protect against ISR */
423 spin_lock_irq(&sh_chan->desc_lock); 449 spin_lock_irq(&sh_chan->desc_lock);
@@ -437,15 +463,12 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
437 chan->private = NULL; 463 chan->private = NULL;
438 } 464 }
439 465
440 spin_lock_bh(&sh_chan->desc_lock); 466 spin_lock_irq(&sh_chan->desc_lock);
441 467
442 list_splice_init(&sh_chan->ld_free, &list); 468 list_splice_init(&sh_chan->ld_free, &list);
443 sh_chan->descs_allocated = 0; 469 sh_chan->descs_allocated = 0;
444 470
445 spin_unlock_bh(&sh_chan->desc_lock); 471 spin_unlock_irq(&sh_chan->desc_lock);
446
447 if (descs > 0)
448 pm_runtime_put(sh_chan->dev);
449 472
450 list_for_each_entry_safe(desc, _desc, &list, node) 473 list_for_each_entry_safe(desc, _desc, &list, node)
451 kfree(desc); 474 kfree(desc);
@@ -534,6 +557,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
534 struct sh_desc *first = NULL, *new = NULL /* compiler... */; 557 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
535 LIST_HEAD(tx_list); 558 LIST_HEAD(tx_list);
536 int chunks = 0; 559 int chunks = 0;
560 unsigned long irq_flags;
537 int i; 561 int i;
538 562
539 if (!sg_len) 563 if (!sg_len)
@@ -544,7 +568,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
544 (SH_DMA_TCR_MAX + 1); 568 (SH_DMA_TCR_MAX + 1);
545 569
546 /* Have to lock the whole loop to protect against concurrent release */ 570 /* Have to lock the whole loop to protect against concurrent release */
547 spin_lock_bh(&sh_chan->desc_lock); 571 spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
548 572
549 /* 573 /*
550 * Chaining: 574 * Chaining:
@@ -590,7 +614,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
590 /* Put them back on the free list, so, they don't get lost */ 614 /* Put them back on the free list, so, they don't get lost */
591 list_splice_tail(&tx_list, &sh_chan->ld_free); 615 list_splice_tail(&tx_list, &sh_chan->ld_free);
592 616
593 spin_unlock_bh(&sh_chan->desc_lock); 617 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
594 618
595 return &first->async_tx; 619 return &first->async_tx;
596 620
@@ -599,7 +623,7 @@ err_get_desc:
599 new->mark = DESC_IDLE; 623 new->mark = DESC_IDLE;
600 list_splice(&tx_list, &sh_chan->ld_free); 624 list_splice(&tx_list, &sh_chan->ld_free);
601 625
602 spin_unlock_bh(&sh_chan->desc_lock); 626 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
603 627
604 return NULL; 628 return NULL;
605} 629}
@@ -661,6 +685,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
661 unsigned long arg) 685 unsigned long arg)
662{ 686{
663 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 687 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
688 unsigned long flags;
664 689
665 /* Only supports DMA_TERMINATE_ALL */ 690 /* Only supports DMA_TERMINATE_ALL */
666 if (cmd != DMA_TERMINATE_ALL) 691 if (cmd != DMA_TERMINATE_ALL)
@@ -669,7 +694,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
669 if (!chan) 694 if (!chan)
670 return -EINVAL; 695 return -EINVAL;
671 696
672 spin_lock_bh(&sh_chan->desc_lock); 697 spin_lock_irqsave(&sh_chan->desc_lock, flags);
673 dmae_halt(sh_chan); 698 dmae_halt(sh_chan);
674 699
675 if (!list_empty(&sh_chan->ld_queue)) { 700 if (!list_empty(&sh_chan->ld_queue)) {
@@ -678,9 +703,8 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
678 struct sh_desc, node); 703 struct sh_desc, node);
679 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << 704 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
680 sh_chan->xmit_shift; 705 sh_chan->xmit_shift;
681
682 } 706 }
683 spin_unlock_bh(&sh_chan->desc_lock); 707 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
684 708
685 sh_dmae_chan_ld_cleanup(sh_chan, true); 709 sh_dmae_chan_ld_cleanup(sh_chan, true);
686 710
@@ -695,8 +719,9 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
695 dma_cookie_t cookie = 0; 719 dma_cookie_t cookie = 0;
696 dma_async_tx_callback callback = NULL; 720 dma_async_tx_callback callback = NULL;
697 void *param = NULL; 721 void *param = NULL;
722 unsigned long flags;
698 723
699 spin_lock_bh(&sh_chan->desc_lock); 724 spin_lock_irqsave(&sh_chan->desc_lock, flags);
700 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { 725 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
701 struct dma_async_tx_descriptor *tx = &desc->async_tx; 726 struct dma_async_tx_descriptor *tx = &desc->async_tx;
702 727
@@ -762,7 +787,13 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
762 async_tx_test_ack(&desc->async_tx)) || all) { 787 async_tx_test_ack(&desc->async_tx)) || all) {
763 /* Remove from ld_queue list */ 788 /* Remove from ld_queue list */
764 desc->mark = DESC_IDLE; 789 desc->mark = DESC_IDLE;
790
765 list_move(&desc->node, &sh_chan->ld_free); 791 list_move(&desc->node, &sh_chan->ld_free);
792
793 if (list_empty(&sh_chan->ld_queue)) {
794 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
795 pm_runtime_put(sh_chan->dev);
796 }
766 } 797 }
767 } 798 }
768 799
@@ -773,7 +804,7 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
773 */ 804 */
774 sh_chan->completed_cookie = sh_chan->common.cookie; 805 sh_chan->completed_cookie = sh_chan->common.cookie;
775 806
776 spin_unlock_bh(&sh_chan->desc_lock); 807 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
777 808
778 if (callback) 809 if (callback)
779 callback(param); 810 callback(param);
@@ -792,14 +823,14 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
792 ; 823 ;
793} 824}
794 825
826/* Called under spin_lock_irq(&sh_chan->desc_lock) */
795static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 827static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
796{ 828{
797 struct sh_desc *desc; 829 struct sh_desc *desc;
798 830
799 spin_lock_bh(&sh_chan->desc_lock);
800 /* DMA work check */ 831 /* DMA work check */
801 if (dmae_is_busy(sh_chan)) 832 if (dmae_is_busy(sh_chan))
802 goto sh_chan_xfer_ld_queue_end; 833 return;
803 834
804 /* Find the first not transferred descriptor */ 835 /* Find the first not transferred descriptor */
805 list_for_each_entry(desc, &sh_chan->ld_queue, node) 836 list_for_each_entry(desc, &sh_chan->ld_queue, node)
@@ -812,15 +843,18 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
812 dmae_start(sh_chan); 843 dmae_start(sh_chan);
813 break; 844 break;
814 } 845 }
815
816sh_chan_xfer_ld_queue_end:
817 spin_unlock_bh(&sh_chan->desc_lock);
818} 846}
819 847
820static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) 848static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
821{ 849{
822 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 850 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
823 sh_chan_xfer_ld_queue(sh_chan); 851
852 spin_lock_irq(&sh_chan->desc_lock);
853 if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
854 sh_chan_xfer_ld_queue(sh_chan);
855 else
856 sh_chan->pm_state = DMAE_PM_PENDING;
857 spin_unlock_irq(&sh_chan->desc_lock);
824} 858}
825 859
826static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, 860static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
@@ -831,6 +865,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
831 dma_cookie_t last_used; 865 dma_cookie_t last_used;
832 dma_cookie_t last_complete; 866 dma_cookie_t last_complete;
833 enum dma_status status; 867 enum dma_status status;
868 unsigned long flags;
834 869
835 sh_dmae_chan_ld_cleanup(sh_chan, false); 870 sh_dmae_chan_ld_cleanup(sh_chan, false);
836 871
@@ -841,7 +876,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
841 BUG_ON(last_complete < 0); 876 BUG_ON(last_complete < 0);
842 dma_set_tx_state(txstate, last_complete, last_used, 0); 877 dma_set_tx_state(txstate, last_complete, last_used, 0);
843 878
844 spin_lock_bh(&sh_chan->desc_lock); 879 spin_lock_irqsave(&sh_chan->desc_lock, flags);
845 880
846 status = dma_async_is_complete(cookie, last_complete, last_used); 881 status = dma_async_is_complete(cookie, last_complete, last_used);
847 882
@@ -859,7 +894,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
859 } 894 }
860 } 895 }
861 896
862 spin_unlock_bh(&sh_chan->desc_lock); 897 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
863 898
864 return status; 899 return status;
865} 900}
@@ -912,6 +947,12 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
912 947
913 list_splice_init(&sh_chan->ld_queue, &dl); 948 list_splice_init(&sh_chan->ld_queue, &dl);
914 949
950 if (!list_empty(&dl)) {
951 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
952 pm_runtime_put(sh_chan->dev);
953 }
954 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
955
915 spin_unlock(&sh_chan->desc_lock); 956 spin_unlock(&sh_chan->desc_lock);
916 957
917 /* Complete all */ 958 /* Complete all */
@@ -952,7 +993,7 @@ static void dmae_do_tasklet(unsigned long data)
952 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 993 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
953 u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 994 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
954 995
955 spin_lock(&sh_chan->desc_lock); 996 spin_lock_irq(&sh_chan->desc_lock);
956 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 997 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
957 if (desc->mark == DESC_SUBMITTED && 998 if (desc->mark == DESC_SUBMITTED &&
958 ((desc->direction == DMA_FROM_DEVICE && 999 ((desc->direction == DMA_FROM_DEVICE &&
@@ -965,10 +1006,10 @@ static void dmae_do_tasklet(unsigned long data)
965 break; 1006 break;
966 } 1007 }
967 } 1008 }
968 spin_unlock(&sh_chan->desc_lock);
969
970 /* Next desc */ 1009 /* Next desc */
971 sh_chan_xfer_ld_queue(sh_chan); 1010 sh_chan_xfer_ld_queue(sh_chan);
1011 spin_unlock_irq(&sh_chan->desc_lock);
1012
972 sh_dmae_chan_ld_cleanup(sh_chan, false); 1013 sh_dmae_chan_ld_cleanup(sh_chan, false);
973} 1014}
974 1015
@@ -1036,7 +1077,9 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1036 return -ENOMEM; 1077 return -ENOMEM;
1037 } 1078 }
1038 1079
1039 /* copy struct dma_device */ 1080 new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
1081
1082 /* reference struct dma_device */
1040 new_sh_chan->common.device = &shdev->common; 1083 new_sh_chan->common.device = &shdev->common;
1041 1084
1042 new_sh_chan->dev = shdev->common.dev; 1085 new_sh_chan->dev = shdev->common.dev;
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index dc56576f9fdb..2b55a276dc5b 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -23,6 +23,12 @@
23 23
24struct device; 24struct device;
25 25
26enum dmae_pm_state {
27 DMAE_PM_ESTABLISHED,
28 DMAE_PM_BUSY,
29 DMAE_PM_PENDING,
30};
31
26struct sh_dmae_chan { 32struct sh_dmae_chan {
27 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 33 dma_cookie_t completed_cookie; /* The maximum cookie completed */
28 spinlock_t desc_lock; /* Descriptor operation lock */ 34 spinlock_t desc_lock; /* Descriptor operation lock */
@@ -38,6 +44,7 @@ struct sh_dmae_chan {
38 u32 __iomem *base; 44 u32 __iomem *base;
39 char dev_id[16]; /* unique name per DMAC of channel */ 45 char dev_id[16]; /* unique name per DMAC of channel */
40 int pm_error; 46 int pm_error;
47 enum dmae_pm_state pm_state;
41}; 48};
42 49
43struct sh_dmae_device { 50struct sh_dmae_device {
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index f69f90a61873..a4a398f2ef61 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -753,7 +753,7 @@ static int __devinit td_probe(struct platform_device *pdev)
753 753
754 INIT_LIST_HEAD(&td->dma.channels); 754 INIT_LIST_HEAD(&td->dma.channels);
755 755
756 for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) { 756 for (i = 0; i < pdata->nr_channels; i++) {
757 struct timb_dma_chan *td_chan = &td->channels[i]; 757 struct timb_dma_chan *td_chan = &td->channels[i];
758 struct timb_dma_platform_data_channel *pchan = 758 struct timb_dma_platform_data_channel *pchan =
759 pdata->channels + i; 759 pdata->channels + i;
@@ -762,12 +762,11 @@ static int __devinit td_probe(struct platform_device *pdev)
762 if ((i % 2) == pchan->rx) { 762 if ((i % 2) == pchan->rx) {
763 dev_err(&pdev->dev, "Wrong channel configuration\n"); 763 dev_err(&pdev->dev, "Wrong channel configuration\n");
764 err = -EINVAL; 764 err = -EINVAL;
765 goto err_tasklet_kill; 765 goto err_free_irq;
766 } 766 }
767 767
768 td_chan->chan.device = &td->dma; 768 td_chan->chan.device = &td->dma;
769 td_chan->chan.cookie = 1; 769 td_chan->chan.cookie = 1;
770 td_chan->chan.chan_id = i;
771 spin_lock_init(&td_chan->lock); 770 spin_lock_init(&td_chan->lock);
772 INIT_LIST_HEAD(&td_chan->active_list); 771 INIT_LIST_HEAD(&td_chan->active_list);
773 INIT_LIST_HEAD(&td_chan->queue); 772 INIT_LIST_HEAD(&td_chan->queue);
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index d2856b6b2a62..720f99334a7f 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -913,9 +913,9 @@ request_done:
913} 913}
914 914
915static void s3cmci_dma_setup(struct s3cmci_host *host, 915static void s3cmci_dma_setup(struct s3cmci_host *host,
916 enum s3c2410_dmasrc source) 916 enum dma_data_direction source)
917{ 917{
918 static enum s3c2410_dmasrc last_source = -1; 918 static enum dma_data_direction last_source = -1;
919 static int setup_ok; 919 static int setup_ok;
920 920
921 if (last_source == source) 921 if (last_source == source)
@@ -1087,7 +1087,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
1087 1087
1088 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); 1088 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
1089 1089
1090 s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW); 1090 s3cmci_dma_setup(host, rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1091 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 1091 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
1092 1092
1093 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 1093 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 595dacc7645f..019a7163572f 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -131,6 +131,12 @@
131#define RXBUSY (1<<2) 131#define RXBUSY (1<<2)
132#define TXBUSY (1<<3) 132#define TXBUSY (1<<3)
133 133
134struct s3c64xx_spi_dma_data {
135 unsigned ch;
136 enum dma_data_direction direction;
137 enum dma_ch dmach;
138};
139
134/** 140/**
135 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. 141 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
136 * @clk: Pointer to the spi clock. 142 * @clk: Pointer to the spi clock.
@@ -164,13 +170,14 @@ struct s3c64xx_spi_driver_data {
164 struct work_struct work; 170 struct work_struct work;
165 struct list_head queue; 171 struct list_head queue;
166 spinlock_t lock; 172 spinlock_t lock;
167 enum dma_ch rx_dmach;
168 enum dma_ch tx_dmach;
169 unsigned long sfr_start; 173 unsigned long sfr_start;
170 struct completion xfer_completion; 174 struct completion xfer_completion;
171 unsigned state; 175 unsigned state;
172 unsigned cur_mode, cur_bpw; 176 unsigned cur_mode, cur_bpw;
173 unsigned cur_speed; 177 unsigned cur_speed;
178 struct s3c64xx_spi_dma_data rx_dma;
179 struct s3c64xx_spi_dma_data tx_dma;
180 struct samsung_dma_ops *ops;
174}; 181};
175 182
176static struct s3c2410_dma_client s3c64xx_spi_dma_client = { 183static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
@@ -226,6 +233,78 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
226 writel(val, regs + S3C64XX_SPI_CH_CFG); 233 writel(val, regs + S3C64XX_SPI_CH_CFG);
227} 234}
228 235
236static void s3c64xx_spi_dmacb(void *data)
237{
238 struct s3c64xx_spi_driver_data *sdd;
239 struct s3c64xx_spi_dma_data *dma = data;
240 unsigned long flags;
241
242 if (dma->direction == DMA_FROM_DEVICE)
243 sdd = container_of(data,
244 struct s3c64xx_spi_driver_data, rx_dma);
245 else
246 sdd = container_of(data,
247 struct s3c64xx_spi_driver_data, tx_dma);
248
249 spin_lock_irqsave(&sdd->lock, flags);
250
251 if (dma->direction == DMA_FROM_DEVICE) {
252 sdd->state &= ~RXBUSY;
253 if (!(sdd->state & TXBUSY))
254 complete(&sdd->xfer_completion);
255 } else {
256 sdd->state &= ~TXBUSY;
257 if (!(sdd->state & RXBUSY))
258 complete(&sdd->xfer_completion);
259 }
260
261 spin_unlock_irqrestore(&sdd->lock, flags);
262}
263
264static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
265 unsigned len, dma_addr_t buf)
266{
267 struct s3c64xx_spi_driver_data *sdd;
268 struct samsung_dma_prep_info info;
269
270 if (dma->direction == DMA_FROM_DEVICE)
271 sdd = container_of((void *)dma,
272 struct s3c64xx_spi_driver_data, rx_dma);
273 else
274 sdd = container_of((void *)dma,
275 struct s3c64xx_spi_driver_data, tx_dma);
276
277 info.cap = DMA_SLAVE;
278 info.len = len;
279 info.fp = s3c64xx_spi_dmacb;
280 info.fp_param = dma;
281 info.direction = dma->direction;
282 info.buf = buf;
283
284 sdd->ops->prepare(dma->ch, &info);
285 sdd->ops->trigger(dma->ch);
286}
287
288static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
289{
290 struct samsung_dma_info info;
291
292 sdd->ops = samsung_dma_get_ops();
293
294 info.cap = DMA_SLAVE;
295 info.client = &s3c64xx_spi_dma_client;
296 info.width = sdd->cur_bpw / 8;
297
298 info.direction = sdd->rx_dma.direction;
299 info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
300 sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &info);
301 info.direction = sdd->tx_dma.direction;
302 info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
303 sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &info);
304
305 return 1;
306}
307
229static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, 308static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
230 struct spi_device *spi, 309 struct spi_device *spi,
231 struct spi_transfer *xfer, int dma_mode) 310 struct spi_transfer *xfer, int dma_mode)
@@ -258,10 +337,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
258 chcfg |= S3C64XX_SPI_CH_TXCH_ON; 337 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
259 if (dma_mode) { 338 if (dma_mode) {
260 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; 339 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
261 s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); 340 prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
262 s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
263 xfer->tx_dma, xfer->len);
264 s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
265 } else { 341 } else {
266 switch (sdd->cur_bpw) { 342 switch (sdd->cur_bpw) {
267 case 32: 343 case 32:
@@ -293,10 +369,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
293 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 369 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
294 | S3C64XX_SPI_PACKET_CNT_EN, 370 | S3C64XX_SPI_PACKET_CNT_EN,
295 regs + S3C64XX_SPI_PACKET_CNT); 371 regs + S3C64XX_SPI_PACKET_CNT);
296 s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); 372 prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
297 s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
298 xfer->rx_dma, xfer->len);
299 s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
300 } 373 }
301 } 374 }
302 375
@@ -482,46 +555,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
482 } 555 }
483} 556}
484 557
485static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
486 int size, enum s3c2410_dma_buffresult res)
487{
488 struct s3c64xx_spi_driver_data *sdd = buf_id;
489 unsigned long flags;
490
491 spin_lock_irqsave(&sdd->lock, flags);
492
493 if (res == S3C2410_RES_OK)
494 sdd->state &= ~RXBUSY;
495 else
496 dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
497
498 /* If the other done */
499 if (!(sdd->state & TXBUSY))
500 complete(&sdd->xfer_completion);
501
502 spin_unlock_irqrestore(&sdd->lock, flags);
503}
504
505static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
506 int size, enum s3c2410_dma_buffresult res)
507{
508 struct s3c64xx_spi_driver_data *sdd = buf_id;
509 unsigned long flags;
510
511 spin_lock_irqsave(&sdd->lock, flags);
512
513 if (res == S3C2410_RES_OK)
514 sdd->state &= ~TXBUSY;
515 else
516 dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
517
518 /* If the other done */
519 if (!(sdd->state & RXBUSY))
520 complete(&sdd->xfer_completion);
521
522 spin_unlock_irqrestore(&sdd->lock, flags);
523}
524
525#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) 558#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
526 559
527static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, 560static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
@@ -696,12 +729,10 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
696 if (use_dma) { 729 if (use_dma) {
697 if (xfer->tx_buf != NULL 730 if (xfer->tx_buf != NULL
698 && (sdd->state & TXBUSY)) 731 && (sdd->state & TXBUSY))
699 s3c2410_dma_ctrl(sdd->tx_dmach, 732 sdd->ops->stop(sdd->tx_dma.ch);
700 S3C2410_DMAOP_FLUSH);
701 if (xfer->rx_buf != NULL 733 if (xfer->rx_buf != NULL
702 && (sdd->state & RXBUSY)) 734 && (sdd->state & RXBUSY))
703 s3c2410_dma_ctrl(sdd->rx_dmach, 735 sdd->ops->stop(sdd->rx_dma.ch);
704 S3C2410_DMAOP_FLUSH);
705 } 736 }
706 737
707 goto out; 738 goto out;
@@ -739,30 +770,6 @@ out:
739 msg->complete(msg->context); 770 msg->complete(msg->context);
740} 771}
741 772
742static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
743{
744 if (s3c2410_dma_request(sdd->rx_dmach,
745 &s3c64xx_spi_dma_client, NULL) < 0) {
746 dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
747 return 0;
748 }
749 s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
750 s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
751 sdd->sfr_start + S3C64XX_SPI_RX_DATA);
752
753 if (s3c2410_dma_request(sdd->tx_dmach,
754 &s3c64xx_spi_dma_client, NULL) < 0) {
755 dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
756 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
757 return 0;
758 }
759 s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
760 s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
761 sdd->sfr_start + S3C64XX_SPI_TX_DATA);
762
763 return 1;
764}
765
766static void s3c64xx_spi_work(struct work_struct *work) 773static void s3c64xx_spi_work(struct work_struct *work)
767{ 774{
768 struct s3c64xx_spi_driver_data *sdd = container_of(work, 775 struct s3c64xx_spi_driver_data *sdd = container_of(work,
@@ -799,8 +806,8 @@ static void s3c64xx_spi_work(struct work_struct *work)
799 spin_unlock_irqrestore(&sdd->lock, flags); 806 spin_unlock_irqrestore(&sdd->lock, flags);
800 807
801 /* Free DMA channels */ 808 /* Free DMA channels */
802 s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client); 809 sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
803 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); 810 sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
804} 811}
805 812
806static int s3c64xx_spi_transfer(struct spi_device *spi, 813static int s3c64xx_spi_transfer(struct spi_device *spi,
@@ -1017,8 +1024,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1017 sdd->cntrlr_info = sci; 1024 sdd->cntrlr_info = sci;
1018 sdd->pdev = pdev; 1025 sdd->pdev = pdev;
1019 sdd->sfr_start = mem_res->start; 1026 sdd->sfr_start = mem_res->start;
1020 sdd->tx_dmach = dmatx_res->start; 1027 sdd->tx_dma.dmach = dmatx_res->start;
1021 sdd->rx_dmach = dmarx_res->start; 1028 sdd->tx_dma.direction = DMA_TO_DEVICE;
1029 sdd->rx_dma.dmach = dmarx_res->start;
1030 sdd->rx_dma.direction = DMA_FROM_DEVICE;
1022 1031
1023 sdd->cur_bpw = 8; 1032 sdd->cur_bpw = 8;
1024 1033
@@ -1106,7 +1115,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1106 pdev->id, master->num_chipselect); 1115 pdev->id, master->num_chipselect);
1107 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", 1116 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
1108 mem_res->end, mem_res->start, 1117 mem_res->end, mem_res->start,
1109 sdd->rx_dmach, sdd->tx_dmach); 1118 sdd->rx_dma.dmach, sdd->tx_dma.dmach);
1110 1119
1111 return 0; 1120 return 0;
1112 1121
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 9871c57b348e..1945c70539c2 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1446,12 +1446,8 @@ static bool filter(struct dma_chan *chan, void *slave)
1446 dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__, 1446 dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
1447 param->slave_id); 1447 param->slave_id);
1448 1448
1449 if (param->dma_dev == chan->device->dev) { 1449 chan->private = param;
1450 chan->private = param; 1450 return true;
1451 return true;
1452 } else {
1453 return false;
1454 }
1455} 1451}
1456 1452
1457static void rx_timer_fn(unsigned long arg) 1453static void rx_timer_fn(unsigned long arg)
@@ -1477,10 +1473,10 @@ static void sci_request_dma(struct uart_port *port)
1477 dma_cap_mask_t mask; 1473 dma_cap_mask_t mask;
1478 int nent; 1474 int nent;
1479 1475
1480 dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__, 1476 dev_dbg(port->dev, "%s: port %d\n", __func__,
1481 port->line, s->cfg->dma_dev); 1477 port->line);
1482 1478
1483 if (!s->cfg->dma_dev) 1479 if (s->cfg->dma_slave_tx <= 0 || s->cfg->dma_slave_rx <= 0)
1484 return; 1480 return;
1485 1481
1486 dma_cap_zero(mask); 1482 dma_cap_zero(mask);
@@ -1490,7 +1486,6 @@ static void sci_request_dma(struct uart_port *port)
1490 1486
1491 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */ 1487 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1492 param->slave_id = s->cfg->dma_slave_tx; 1488 param->slave_id = s->cfg->dma_slave_tx;
1493 param->dma_dev = s->cfg->dma_dev;
1494 1489
1495 s->cookie_tx = -EINVAL; 1490 s->cookie_tx = -EINVAL;
1496 chan = dma_request_channel(mask, filter, param); 1491 chan = dma_request_channel(mask, filter, param);
@@ -1519,7 +1514,6 @@ static void sci_request_dma(struct uart_port *port)
1519 1514
1520 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */ 1515 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1521 param->slave_id = s->cfg->dma_slave_rx; 1516 param->slave_id = s->cfg->dma_slave_rx;
1522 param->dma_dev = s->cfg->dma_dev;
1523 1517
1524 chan = dma_request_channel(mask, filter, param); 1518 chan = dma_request_channel(mask, filter, param);
1525 dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan); 1519 dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
@@ -1564,9 +1558,6 @@ static void sci_free_dma(struct uart_port *port)
1564{ 1558{
1565 struct sci_port *s = to_sci_port(port); 1559 struct sci_port *s = to_sci_port(port);
1566 1560
1567 if (!s->cfg->dma_dev)
1568 return;
1569
1570 if (s->chan_tx) 1561 if (s->chan_tx)
1571 sci_tx_dma_release(s, false); 1562 sci_tx_dma_release(s, false);
1572 if (s->chan_rx) 1563 if (s->chan_rx)
@@ -1981,9 +1972,9 @@ static int __devinit sci_init_single(struct platform_device *dev,
1981 port->serial_in = sci_serial_in; 1972 port->serial_in = sci_serial_in;
1982 port->serial_out = sci_serial_out; 1973 port->serial_out = sci_serial_out;
1983 1974
1984 if (p->dma_dev) 1975 if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0)
1985 dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n", 1976 dev_dbg(port->dev, "DMA tx %d, rx %d\n",
1986 p->dma_dev, p->dma_slave_tx, p->dma_slave_rx); 1977 p->dma_slave_tx, p->dma_slave_rx);
1987 1978
1988 return 0; 1979 return 0;
1989} 1980}
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index e6e28f37d8ec..9eabffbc4e50 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -47,6 +47,9 @@ enum {
47 * @muxval: a number usually used to poke into some mux regiser to 47 * @muxval: a number usually used to poke into some mux regiser to
48 * mux in the signal to this channel 48 * mux in the signal to this channel
49 * @cctl_opt: default options for the channel control register 49 * @cctl_opt: default options for the channel control register
50 * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
51 * channels. Fill with 'true' if peripheral should be flow controller. Direction
52 * will be selected at Runtime.
50 * @addr: source/target address in physical memory for this DMA channel, 53 * @addr: source/target address in physical memory for this DMA channel,
51 * can be the address of a FIFO register for burst requests for example. 54 * can be the address of a FIFO register for burst requests for example.
52 * This can be left undefined if the PrimeCell API is used for configuring 55 * This can be left undefined if the PrimeCell API is used for configuring
@@ -65,6 +68,7 @@ struct pl08x_channel_data {
65 int max_signal; 68 int max_signal;
66 u32 muxval; 69 u32 muxval;
67 u32 cctl; 70 u32 cctl;
71 bool device_fc;
68 dma_addr_t addr; 72 dma_addr_t addr;
69 bool circular_buffer; 73 bool circular_buffer;
70 bool single; 74 bool single;
@@ -77,13 +81,11 @@ struct pl08x_channel_data {
77 * @addr: current address 81 * @addr: current address
78 * @maxwidth: the maximum width of a transfer on this bus 82 * @maxwidth: the maximum width of a transfer on this bus
79 * @buswidth: the width of this bus in bytes: 1, 2 or 4 83 * @buswidth: the width of this bus in bytes: 1, 2 or 4
80 * @fill_bytes: bytes required to fill to the next bus memory boundary
81 */ 84 */
82struct pl08x_bus_data { 85struct pl08x_bus_data {
83 dma_addr_t addr; 86 dma_addr_t addr;
84 u8 maxwidth; 87 u8 maxwidth;
85 u8 buswidth; 88 u8 buswidth;
86 size_t fill_bytes;
87}; 89};
88 90
89/** 91/**
@@ -104,17 +106,35 @@ struct pl08x_phy_chan {
104}; 106};
105 107
106/** 108/**
109 * struct pl08x_sg - structure containing data per sg
110 * @src_addr: src address of sg
111 * @dst_addr: dst address of sg
112 * @len: transfer len in bytes
113 * @node: node for txd's dsg_list
114 */
115struct pl08x_sg {
116 dma_addr_t src_addr;
117 dma_addr_t dst_addr;
118 size_t len;
119 struct list_head node;
120};
121
122/**
107 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 123 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
124 * @tx: async tx descriptor
125 * @node: node for txd list for channels
126 * @dsg_list: list of children sg's
127 * @direction: direction of transfer
108 * @llis_bus: DMA memory address (physical) start for the LLIs 128 * @llis_bus: DMA memory address (physical) start for the LLIs
109 * @llis_va: virtual memory address start for the LLIs 129 * @llis_va: virtual memory address start for the LLIs
130 * @cctl: control reg values for current txd
131 * @ccfg: config reg values for current txd
110 */ 132 */
111struct pl08x_txd { 133struct pl08x_txd {
112 struct dma_async_tx_descriptor tx; 134 struct dma_async_tx_descriptor tx;
113 struct list_head node; 135 struct list_head node;
136 struct list_head dsg_list;
114 enum dma_data_direction direction; 137 enum dma_data_direction direction;
115 dma_addr_t src_addr;
116 dma_addr_t dst_addr;
117 size_t len;
118 dma_addr_t llis_bus; 138 dma_addr_t llis_bus;
119 struct pl08x_lli *llis_va; 139 struct pl08x_lli *llis_va;
120 /* Default cctl value for LLIs */ 140 /* Default cctl value for LLIs */
diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h
index cbee7de7dd36..d12f077a6daf 100644
--- a/include/linux/amba/pl330.h
+++ b/include/linux/amba/pl330.h
@@ -19,12 +19,8 @@ struct dma_pl330_peri {
19 * Peri_Req i/f of the DMAC that is 19 * Peri_Req i/f of the DMAC that is
20 * peripheral could be reached from. 20 * peripheral could be reached from.
21 */ 21 */
22 u8 peri_id; /* {0, 31} */ 22 u8 peri_id; /* specific dma id */
23 enum pl330_reqtype rqtype; 23 enum pl330_reqtype rqtype;
24
25 /* For M->D and D->M Channels */
26 int burst_sz; /* in power of 2 */
27 dma_addr_t fifo_addr;
28}; 24};
29 25
30struct dma_pl330_platdata { 26struct dma_pl330_platdata {
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8fbf40e0713c..ace51af4369f 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -24,8 +24,7 @@
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/uio.h> 25#include <linux/uio.h>
26#include <linux/dma-direction.h> 26#include <linux/dma-direction.h>
27 27#include <linux/scatterlist.h>
28struct scatterlist;
29 28
30/** 29/**
31 * typedef dma_cookie_t - an opaque DMA cookie 30 * typedef dma_cookie_t - an opaque DMA cookie
@@ -519,6 +518,16 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
519 (unsigned long)config); 518 (unsigned long)config);
520} 519}
521 520
521static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
522 struct dma_chan *chan, void *buf, size_t len,
523 enum dma_data_direction dir, unsigned long flags)
524{
525 struct scatterlist sg;
526 sg_init_one(&sg, buf, len);
527
528 return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags);
529}
530
522static inline int dmaengine_terminate_all(struct dma_chan *chan) 531static inline int dmaengine_terminate_all(struct dma_chan *chan)
523{ 532{
524 return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 533 return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 8bffe9ae2ca0..0efa1f10bc2b 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -131,8 +131,6 @@ struct plat_sci_port {
131 131
132 struct plat_sci_port_ops *ops; 132 struct plat_sci_port_ops *ops;
133 133
134 struct device *dma_dev;
135
136 unsigned int dma_slave_tx; 134 unsigned int dma_slave_tx;
137 unsigned int dma_slave_rx; 135 unsigned int dma_slave_rx;
138}; 136};
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index b5e922f469d5..bad91b4584f9 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -271,7 +271,10 @@ static int s3c_ac97_trigger(struct snd_pcm_substream *substream, int cmd,
271 271
272 writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); 272 writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
273 273
274 s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); 274 if (!dma_data->ops)
275 dma_data->ops = samsung_dma_get_ops();
276
277 dma_data->ops->started(dma_data->channel);
275 278
276 return 0; 279 return 0;
277} 280}
@@ -317,7 +320,10 @@ static int s3c_ac97_mic_trigger(struct snd_pcm_substream *substream,
317 320
318 writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); 321 writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
319 322
320 s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); 323 if (!dma_data->ops)
324 dma_data->ops = samsung_dma_get_ops();
325
326 dma_data->ops->started(dma_data->channel);
321 327
322 return 0; 328 return 0;
323} 329}
diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
index 9465588b02f2..851346f7d68d 100644
--- a/sound/soc/samsung/dma.c
+++ b/sound/soc/samsung/dma.c
@@ -54,7 +54,6 @@ struct runtime_data {
54 spinlock_t lock; 54 spinlock_t lock;
55 int state; 55 int state;
56 unsigned int dma_loaded; 56 unsigned int dma_loaded;
57 unsigned int dma_limit;
58 unsigned int dma_period; 57 unsigned int dma_period;
59 dma_addr_t dma_start; 58 dma_addr_t dma_start;
60 dma_addr_t dma_pos; 59 dma_addr_t dma_pos;
@@ -62,77 +61,79 @@ struct runtime_data {
62 struct s3c_dma_params *params; 61 struct s3c_dma_params *params;
63}; 62};
64 63
64static void audio_buffdone(void *data);
65
65/* dma_enqueue 66/* dma_enqueue
66 * 67 *
67 * place a dma buffer onto the queue for the dma system 68 * place a dma buffer onto the queue for the dma system
68 * to handle. 69 * to handle.
69*/ 70 */
70static void dma_enqueue(struct snd_pcm_substream *substream) 71static void dma_enqueue(struct snd_pcm_substream *substream)
71{ 72{
72 struct runtime_data *prtd = substream->runtime->private_data; 73 struct runtime_data *prtd = substream->runtime->private_data;
73 dma_addr_t pos = prtd->dma_pos; 74 dma_addr_t pos = prtd->dma_pos;
74 unsigned int limit; 75 unsigned int limit;
75 int ret; 76 struct samsung_dma_prep_info dma_info;
76 77
77 pr_debug("Entered %s\n", __func__); 78 pr_debug("Entered %s\n", __func__);
78 79
79 if (s3c_dma_has_circular()) 80 limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period;
80 limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period;
81 else
82 limit = prtd->dma_limit;
83 81
84 pr_debug("%s: loaded %d, limit %d\n", 82 pr_debug("%s: loaded %d, limit %d\n",
85 __func__, prtd->dma_loaded, limit); 83 __func__, prtd->dma_loaded, limit);
86 84
87 while (prtd->dma_loaded < limit) { 85 dma_info.cap = (samsung_dma_has_circular() ? DMA_CYCLIC : DMA_SLAVE);
88 unsigned long len = prtd->dma_period; 86 dma_info.direction =
87 (substream->stream == SNDRV_PCM_STREAM_PLAYBACK
88 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
89 dma_info.fp = audio_buffdone;
90 dma_info.fp_param = substream;
91 dma_info.period = prtd->dma_period;
92 dma_info.len = prtd->dma_period*limit;
89 93
94 while (prtd->dma_loaded < limit) {
90 pr_debug("dma_loaded: %d\n", prtd->dma_loaded); 95 pr_debug("dma_loaded: %d\n", prtd->dma_loaded);
91 96
92 if ((pos + len) > prtd->dma_end) { 97 if ((pos + dma_info.period) > prtd->dma_end) {
93 len = prtd->dma_end - pos; 98 dma_info.period = prtd->dma_end - pos;
94 pr_debug("%s: corrected dma len %ld\n", __func__, len); 99 pr_debug("%s: corrected dma len %ld\n",
100 __func__, dma_info.period);
95 } 101 }
96 102
97 ret = s3c2410_dma_enqueue(prtd->params->channel, 103 dma_info.buf = pos;
98 substream, pos, len); 104 prtd->params->ops->prepare(prtd->params->ch, &dma_info);
99 105
100 if (ret == 0) { 106 prtd->dma_loaded++;
101 prtd->dma_loaded++; 107 pos += prtd->dma_period;
102 pos += prtd->dma_period; 108 if (pos >= prtd->dma_end)
103 if (pos >= prtd->dma_end) 109 pos = prtd->dma_start;
104 pos = prtd->dma_start;
105 } else
106 break;
107 } 110 }
108 111
109 prtd->dma_pos = pos; 112 prtd->dma_pos = pos;
110} 113}
111 114
112static void audio_buffdone(struct s3c2410_dma_chan *channel, 115static void audio_buffdone(void *data)
113 void *dev_id, int size,
114 enum s3c2410_dma_buffresult result)
115{ 116{
116 struct snd_pcm_substream *substream = dev_id; 117 struct snd_pcm_substream *substream = data;
117 struct runtime_data *prtd; 118 struct runtime_data *prtd = substream->runtime->private_data;
118 119
119 pr_debug("Entered %s\n", __func__); 120 pr_debug("Entered %s\n", __func__);
120 121
121 if (result == S3C2410_RES_ABORT || result == S3C2410_RES_ERR) 122 if (prtd->state & ST_RUNNING) {
122 return; 123 prtd->dma_pos += prtd->dma_period;
123 124 if (prtd->dma_pos >= prtd->dma_end)
124 prtd = substream->runtime->private_data; 125 prtd->dma_pos = prtd->dma_start;
125 126
126 if (substream) 127 if (substream)
127 snd_pcm_period_elapsed(substream); 128 snd_pcm_period_elapsed(substream);
128 129
129 spin_lock(&prtd->lock); 130 spin_lock(&prtd->lock);
130 if (prtd->state & ST_RUNNING && !s3c_dma_has_circular()) { 131 if (!samsung_dma_has_circular()) {
131 prtd->dma_loaded--; 132 prtd->dma_loaded--;
132 dma_enqueue(substream); 133 dma_enqueue(substream);
134 }
135 spin_unlock(&prtd->lock);
133 } 136 }
134
135 spin_unlock(&prtd->lock);
136} 137}
137 138
138static int dma_hw_params(struct snd_pcm_substream *substream, 139static int dma_hw_params(struct snd_pcm_substream *substream,
@@ -144,8 +145,7 @@ static int dma_hw_params(struct snd_pcm_substream *substream,
144 unsigned long totbytes = params_buffer_bytes(params); 145 unsigned long totbytes = params_buffer_bytes(params);
145 struct s3c_dma_params *dma = 146 struct s3c_dma_params *dma =
146 snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); 147 snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
147 int ret = 0; 148 struct samsung_dma_info dma_info;
148
149 149
150 pr_debug("Entered %s\n", __func__); 150 pr_debug("Entered %s\n", __func__);
151 151
@@ -163,30 +163,26 @@ static int dma_hw_params(struct snd_pcm_substream *substream,
163 pr_debug("params %p, client %p, channel %d\n", prtd->params, 163 pr_debug("params %p, client %p, channel %d\n", prtd->params,
164 prtd->params->client, prtd->params->channel); 164 prtd->params->client, prtd->params->channel);
165 165
166 ret = s3c2410_dma_request(prtd->params->channel, 166 prtd->params->ops = samsung_dma_get_ops();
167 prtd->params->client, NULL); 167
168 168 dma_info.cap = (samsung_dma_has_circular() ?
169 if (ret < 0) { 169 DMA_CYCLIC : DMA_SLAVE);
170 printk(KERN_ERR "failed to get dma channel\n"); 170 dma_info.client = prtd->params->client;
171 return ret; 171 dma_info.direction =
172 } 172 (substream->stream == SNDRV_PCM_STREAM_PLAYBACK
173 173 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
174 /* use the circular buffering if we have it available. */ 174 dma_info.width = prtd->params->dma_size;
175 if (s3c_dma_has_circular()) 175 dma_info.fifo = prtd->params->dma_addr;
176 s3c2410_dma_setflags(prtd->params->channel, 176 prtd->params->ch = prtd->params->ops->request(
177 S3C2410_DMAF_CIRCULAR); 177 prtd->params->channel, &dma_info);
178 } 178 }
179 179
180 s3c2410_dma_set_buffdone_fn(prtd->params->channel,
181 audio_buffdone);
182
183 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); 180 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
184 181
185 runtime->dma_bytes = totbytes; 182 runtime->dma_bytes = totbytes;
186 183
187 spin_lock_irq(&prtd->lock); 184 spin_lock_irq(&prtd->lock);
188 prtd->dma_loaded = 0; 185 prtd->dma_loaded = 0;
189 prtd->dma_limit = runtime->hw.periods_min;
190 prtd->dma_period = params_period_bytes(params); 186 prtd->dma_period = params_period_bytes(params);
191 prtd->dma_start = runtime->dma_addr; 187 prtd->dma_start = runtime->dma_addr;
192 prtd->dma_pos = prtd->dma_start; 188 prtd->dma_pos = prtd->dma_start;
@@ -206,7 +202,8 @@ static int dma_hw_free(struct snd_pcm_substream *substream)
206 snd_pcm_set_runtime_buffer(substream, NULL); 202 snd_pcm_set_runtime_buffer(substream, NULL);
207 203
208 if (prtd->params) { 204 if (prtd->params) {
209 s3c2410_dma_free(prtd->params->channel, prtd->params->client); 205 prtd->params->ops->release(prtd->params->ch,
206 prtd->params->client);
210 prtd->params = NULL; 207 prtd->params = NULL;
211 } 208 }
212 209
@@ -225,23 +222,9 @@ static int dma_prepare(struct snd_pcm_substream *substream)
225 if (!prtd->params) 222 if (!prtd->params)
226 return 0; 223 return 0;
227 224
228 /* channel needs configuring for mem=>device, increment memory addr,
229 * sync to pclk, half-word transfers to the IIS-FIFO. */
230 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
231 s3c2410_dma_devconfig(prtd->params->channel,
232 S3C2410_DMASRC_MEM,
233 prtd->params->dma_addr);
234 } else {
235 s3c2410_dma_devconfig(prtd->params->channel,
236 S3C2410_DMASRC_HW,
237 prtd->params->dma_addr);
238 }
239
240 s3c2410_dma_config(prtd->params->channel,
241 prtd->params->dma_size);
242
243 /* flush the DMA channel */ 225 /* flush the DMA channel */
244 s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_FLUSH); 226 prtd->params->ops->flush(prtd->params->ch);
227
245 prtd->dma_loaded = 0; 228 prtd->dma_loaded = 0;
246 prtd->dma_pos = prtd->dma_start; 229 prtd->dma_pos = prtd->dma_start;
247 230
@@ -265,14 +248,14 @@ static int dma_trigger(struct snd_pcm_substream *substream, int cmd)
265 case SNDRV_PCM_TRIGGER_RESUME: 248 case SNDRV_PCM_TRIGGER_RESUME:
266 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 249 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
267 prtd->state |= ST_RUNNING; 250 prtd->state |= ST_RUNNING;
268 s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_START); 251 prtd->params->ops->trigger(prtd->params->ch);
269 break; 252 break;
270 253
271 case SNDRV_PCM_TRIGGER_STOP: 254 case SNDRV_PCM_TRIGGER_STOP:
272 case SNDRV_PCM_TRIGGER_SUSPEND: 255 case SNDRV_PCM_TRIGGER_SUSPEND:
273 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 256 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
274 prtd->state &= ~ST_RUNNING; 257 prtd->state &= ~ST_RUNNING;
275 s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_STOP); 258 prtd->params->ops->stop(prtd->params->ch);
276 break; 259 break;
277 260
278 default: 261 default:
@@ -291,21 +274,12 @@ dma_pointer(struct snd_pcm_substream *substream)
291 struct snd_pcm_runtime *runtime = substream->runtime; 274 struct snd_pcm_runtime *runtime = substream->runtime;
292 struct runtime_data *prtd = runtime->private_data; 275 struct runtime_data *prtd = runtime->private_data;
293 unsigned long res; 276 unsigned long res;
294 dma_addr_t src, dst;
295 277
296 pr_debug("Entered %s\n", __func__); 278 pr_debug("Entered %s\n", __func__);
297 279
298 spin_lock(&prtd->lock); 280 res = prtd->dma_pos - prtd->dma_start;
299 s3c2410_dma_getposition(prtd->params->channel, &src, &dst);
300
301 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
302 res = dst - prtd->dma_start;
303 else
304 res = src - prtd->dma_start;
305
306 spin_unlock(&prtd->lock);
307 281
308 pr_debug("Pointer %x %x\n", src, dst); 282 pr_debug("Pointer offset: %lu\n", res);
309 283
310 /* we seem to be getting the odd error from the pcm library due 284 /* we seem to be getting the odd error from the pcm library due
311 * to out-of-bounds pointers. this is maybe due to the dma engine 285 * to out-of-bounds pointers. this is maybe due to the dma engine
diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
index c50659269a40..7d1ead77ef21 100644
--- a/sound/soc/samsung/dma.h
+++ b/sound/soc/samsung/dma.h
@@ -6,7 +6,7 @@
6 * Free Software Foundation; either version 2 of the License, or (at your 6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version. 7 * option) any later version.
8 * 8 *
9 * ALSA PCM interface for the Samsung S3C24xx CPU 9 * ALSA PCM interface for the Samsung SoC
10 */ 10 */
11 11
12#ifndef _S3C_AUDIO_H 12#ifndef _S3C_AUDIO_H
@@ -17,6 +17,8 @@ struct s3c_dma_params {
17 int channel; /* Channel ID */ 17 int channel; /* Channel ID */
18 dma_addr_t dma_addr; 18 dma_addr_t dma_addr;
19 int dma_size; /* Size of the DMA transfer */ 19 int dma_size; /* Size of the DMA transfer */
20 unsigned ch;
21 struct samsung_dma_ops *ops;
20}; 22};
21 23
22#endif 24#endif