aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-08-01 19:41:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-08-01 19:41:07 -0400
commita6dc77254b3c3eb0307b372b77b861d5cd2ead08 (patch)
tree5770a808b0527eebeff43f16508ea8f03e459b58
parent02a6ec6a24077ffda33b99cb193e8a536b90711d (diff)
parent0e52d987c0b242fe3fe4c8e9732bd663cce0e50b (diff)
Merge branch 'dmaengine' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM DMA engine updates from Russell King: "This looks scary at first glance, but what it is is: - a rework of the sa11x0 DMA engine driver merged during the previous cycle, to extract a common set of helper functions for DMA engine implementations. - conversion of amba-pl08x.c to use these helper functions. - addition of OMAP DMA engine driver (using these helper functions), and conversion of some of the OMAP DMA users to use DMA engine. Nothing in the helper functions is ARM specific, so I hope that other implementations can consolidate some of their code by making use of these helpers. This has been sitting in linux-next most of the merge cycle, and has been tested by several OMAP folk. I've tested it on sa11x0 platforms, and given it my best shot on my broken platforms which have the amba-pl08x controller. The last point is the addition to feature-removal-schedule.txt, which will have a merge conflict. Between myself and TI, we're planning to remove the old TI DMA implementation next year." Fix up trivial add/add conflicts in Documentation/feature-removal-schedule.txt and drivers/dma/{Kconfig,Makefile} * 'dmaengine' of git://git.linaro.org/people/rmk/linux-arm: (53 commits) ARM: 7481/1: OMAP2+: omap2plus_defconfig: enable OMAP DMA engine ARM: 7464/1: mmc: omap_hsmmc: ensure probe returns error if DMA channel request fails Add feature removal of old OMAP private DMA implementation mtd: omap2: remove private DMA API implementation mtd: omap2: add DMA engine support spi: omap2-mcspi: remove private DMA API implementation spi: omap2-mcspi: add DMA engine support ARM: omap: remove mmc platform data dma_mask and initialization mmc: omap: remove private DMA API implementation mmc: omap: add DMA engine support mmc: omap_hsmmc: remove private DMA API implementation mmc: omap_hsmmc: add DMA engine support dmaengine: omap: add support for cyclic DMA dmaengine: omap: add support for setting fi dmaengine: omap: add support for returning residue in tx_state method dmaengine: add OMAP DMA engine driver dmaengine: sa11x0-dma: add cyclic DMA support dmaengine: sa11x0-dma: fix DMA residue support dmaengine: PL08x: ensure all descriptors are freed when channel is released dmaengine: PL08x: get rid of write only pool_ctr and free_txd locking ...
-rw-r--r--Documentation/feature-removal-schedule.txt11
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/mach-omap1/board-h2-mmc.c1
-rw-r--r--arch/arm/mach-omap1/board-h3-mmc.c1
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c1
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c1
-rw-r--r--arch/arm/mach-omap2/hsmmc.c1
-rw-r--r--arch/arm/mach-spear3xx/spear300.c26
-rw-r--r--arch/arm/mach-spear3xx/spear310.c26
-rw-r--r--arch/arm/mach-spear3xx/spear320.c26
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c3
-rw-r--r--arch/arm/mach-spear6xx/spear6xx.c51
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h2
-rw-r--r--arch/arm/plat-spear/include/plat/pl080.h6
-rw-r--r--arch/arm/plat-spear/pl080.c10
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/amba-pl08x.c941
-rw-r--r--drivers/dma/omap-dma.c669
-rw-r--r--drivers/dma/sa11x0-dma.c388
-rw-r--r--drivers/dma/virt-dma.c123
-rw-r--r--drivers/dma/virt-dma.h152
-rw-r--r--drivers/mmc/host/omap.c368
-rw-r--r--drivers/mmc/host/omap_hsmmc.c204
-rw-r--r--drivers/mtd/nand/omap2.c106
-rw-r--r--drivers/spi/spi-omap2-mcspi.c229
-rw-r--r--include/linux/amba/pl08x.h156
-rw-r--r--include/linux/omap-dma.h22
28 files changed, 2124 insertions, 1415 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 72ed15075f79..afaff312bf41 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -626,3 +626,14 @@ Why: New drivers should use new V4L2_CAP_VIDEO_M2M capability flag
626Who: Sylwester Nawrocki <s.nawrocki@samsung.com> 626Who: Sylwester Nawrocki <s.nawrocki@samsung.com>
627 627
628---------------------------- 628----------------------------
629
630What: OMAP private DMA implementation
631When: 2013
632Why: We have a DMA engine implementation; all users should be updated
633 to use this rather than persisting with the old APIs. The old APIs
634 block merging the old DMA engine implementation into the DMA
635 engine driver.
636Who: Russell King <linux@arm.linux.org.uk>,
637 Santosh Shilimkar <santosh.shilimkar@ti.com>
638
639----------------------------
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index b152de79fd95..e58edc36b406 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -193,6 +193,8 @@ CONFIG_MMC_OMAP_HS=y
193CONFIG_RTC_CLASS=y 193CONFIG_RTC_CLASS=y
194CONFIG_RTC_DRV_TWL92330=y 194CONFIG_RTC_DRV_TWL92330=y
195CONFIG_RTC_DRV_TWL4030=y 195CONFIG_RTC_DRV_TWL4030=y
196CONFIG_DMADEVICES=y
197CONFIG_DMA_OMAP=y
196CONFIG_EXT2_FS=y 198CONFIG_EXT2_FS=y
197CONFIG_EXT3_FS=y 199CONFIG_EXT3_FS=y
198# CONFIG_EXT3_FS_XATTR is not set 200# CONFIG_EXT3_FS_XATTR is not set
diff --git a/arch/arm/mach-omap1/board-h2-mmc.c b/arch/arm/mach-omap1/board-h2-mmc.c
index da0e37d40823..e1362ce48497 100644
--- a/arch/arm/mach-omap1/board-h2-mmc.c
+++ b/arch/arm/mach-omap1/board-h2-mmc.c
@@ -54,7 +54,6 @@ static struct omap_mmc_platform_data mmc1_data = {
54 .nr_slots = 1, 54 .nr_slots = 1,
55 .init = mmc_late_init, 55 .init = mmc_late_init,
56 .cleanup = mmc_cleanup, 56 .cleanup = mmc_cleanup,
57 .dma_mask = 0xffffffff,
58 .slots[0] = { 57 .slots[0] = {
59 .set_power = mmc_set_power, 58 .set_power = mmc_set_power,
60 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 59 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
diff --git a/arch/arm/mach-omap1/board-h3-mmc.c b/arch/arm/mach-omap1/board-h3-mmc.c
index f8242aa9b763..c74daace8cd6 100644
--- a/arch/arm/mach-omap1/board-h3-mmc.c
+++ b/arch/arm/mach-omap1/board-h3-mmc.c
@@ -36,7 +36,6 @@ static int mmc_set_power(struct device *dev, int slot, int power_on,
36 */ 36 */
37static struct omap_mmc_platform_data mmc1_data = { 37static struct omap_mmc_platform_data mmc1_data = {
38 .nr_slots = 1, 38 .nr_slots = 1,
39 .dma_mask = 0xffffffff,
40 .slots[0] = { 39 .slots[0] = {
41 .set_power = mmc_set_power, 40 .set_power = mmc_set_power,
42 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 41 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 4007a372481b..2c0ca8fc3380 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -185,7 +185,6 @@ static int nokia770_mmc_get_cover_state(struct device *dev, int slot)
185 185
186static struct omap_mmc_platform_data nokia770_mmc2_data = { 186static struct omap_mmc_platform_data nokia770_mmc2_data = {
187 .nr_slots = 1, 187 .nr_slots = 1,
188 .dma_mask = 0xffffffff,
189 .max_freq = 12000000, 188 .max_freq = 12000000,
190 .slots[0] = { 189 .slots[0] = {
191 .set_power = nokia770_mmc_set_power, 190 .set_power = nokia770_mmc_set_power,
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 2c5d0ed75285..677357ff61ac 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -468,7 +468,6 @@ static struct omap_mmc_platform_data mmc1_data = {
468 .cleanup = n8x0_mmc_cleanup, 468 .cleanup = n8x0_mmc_cleanup,
469 .shutdown = n8x0_mmc_shutdown, 469 .shutdown = n8x0_mmc_shutdown,
470 .max_freq = 24000000, 470 .max_freq = 24000000,
471 .dma_mask = 0xffffffff,
472 .slots[0] = { 471 .slots[0] = {
473 .wires = 4, 472 .wires = 4,
474 .set_power = n8x0_mmc_set_power, 473 .set_power = n8x0_mmc_set_power,
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index be697d4e0843..a9675d8d1822 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -315,7 +315,6 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
315 mmc->slots[0].caps = c->caps; 315 mmc->slots[0].caps = c->caps;
316 mmc->slots[0].pm_caps = c->pm_caps; 316 mmc->slots[0].pm_caps = c->pm_caps;
317 mmc->slots[0].internal_clock = !c->ext_clock; 317 mmc->slots[0].internal_clock = !c->ext_clock;
318 mmc->dma_mask = 0xffffffff;
319 mmc->max_freq = c->max_freq; 318 mmc->max_freq = c->max_freq;
320 if (cpu_is_omap44xx()) 319 if (cpu_is_omap44xx())
321 mmc->reg_offset = OMAP4_MMC_REG_OFFSET; 320 mmc->reg_offset = OMAP4_MMC_REG_OFFSET;
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index 0f882ecb7d81..6ec300549960 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -120,182 +120,156 @@ struct pl08x_channel_data spear300_dma_info[] = {
120 .min_signal = 2, 120 .min_signal = 2,
121 .max_signal = 2, 121 .max_signal = 2,
122 .muxval = 0, 122 .muxval = 0,
123 .cctl = 0,
124 .periph_buses = PL08X_AHB1, 123 .periph_buses = PL08X_AHB1,
125 }, { 124 }, {
126 .bus_id = "uart0_tx", 125 .bus_id = "uart0_tx",
127 .min_signal = 3, 126 .min_signal = 3,
128 .max_signal = 3, 127 .max_signal = 3,
129 .muxval = 0, 128 .muxval = 0,
130 .cctl = 0,
131 .periph_buses = PL08X_AHB1, 129 .periph_buses = PL08X_AHB1,
132 }, { 130 }, {
133 .bus_id = "ssp0_rx", 131 .bus_id = "ssp0_rx",
134 .min_signal = 8, 132 .min_signal = 8,
135 .max_signal = 8, 133 .max_signal = 8,
136 .muxval = 0, 134 .muxval = 0,
137 .cctl = 0,
138 .periph_buses = PL08X_AHB1, 135 .periph_buses = PL08X_AHB1,
139 }, { 136 }, {
140 .bus_id = "ssp0_tx", 137 .bus_id = "ssp0_tx",
141 .min_signal = 9, 138 .min_signal = 9,
142 .max_signal = 9, 139 .max_signal = 9,
143 .muxval = 0, 140 .muxval = 0,
144 .cctl = 0,
145 .periph_buses = PL08X_AHB1, 141 .periph_buses = PL08X_AHB1,
146 }, { 142 }, {
147 .bus_id = "i2c_rx", 143 .bus_id = "i2c_rx",
148 .min_signal = 10, 144 .min_signal = 10,
149 .max_signal = 10, 145 .max_signal = 10,
150 .muxval = 0, 146 .muxval = 0,
151 .cctl = 0,
152 .periph_buses = PL08X_AHB1, 147 .periph_buses = PL08X_AHB1,
153 }, { 148 }, {
154 .bus_id = "i2c_tx", 149 .bus_id = "i2c_tx",
155 .min_signal = 11, 150 .min_signal = 11,
156 .max_signal = 11, 151 .max_signal = 11,
157 .muxval = 0, 152 .muxval = 0,
158 .cctl = 0,
159 .periph_buses = PL08X_AHB1, 153 .periph_buses = PL08X_AHB1,
160 }, { 154 }, {
161 .bus_id = "irda", 155 .bus_id = "irda",
162 .min_signal = 12, 156 .min_signal = 12,
163 .max_signal = 12, 157 .max_signal = 12,
164 .muxval = 0, 158 .muxval = 0,
165 .cctl = 0,
166 .periph_buses = PL08X_AHB1, 159 .periph_buses = PL08X_AHB1,
167 }, { 160 }, {
168 .bus_id = "adc", 161 .bus_id = "adc",
169 .min_signal = 13, 162 .min_signal = 13,
170 .max_signal = 13, 163 .max_signal = 13,
171 .muxval = 0, 164 .muxval = 0,
172 .cctl = 0,
173 .periph_buses = PL08X_AHB1, 165 .periph_buses = PL08X_AHB1,
174 }, { 166 }, {
175 .bus_id = "to_jpeg", 167 .bus_id = "to_jpeg",
176 .min_signal = 14, 168 .min_signal = 14,
177 .max_signal = 14, 169 .max_signal = 14,
178 .muxval = 0, 170 .muxval = 0,
179 .cctl = 0,
180 .periph_buses = PL08X_AHB1, 171 .periph_buses = PL08X_AHB1,
181 }, { 172 }, {
182 .bus_id = "from_jpeg", 173 .bus_id = "from_jpeg",
183 .min_signal = 15, 174 .min_signal = 15,
184 .max_signal = 15, 175 .max_signal = 15,
185 .muxval = 0, 176 .muxval = 0,
186 .cctl = 0,
187 .periph_buses = PL08X_AHB1, 177 .periph_buses = PL08X_AHB1,
188 }, { 178 }, {
189 .bus_id = "ras0_rx", 179 .bus_id = "ras0_rx",
190 .min_signal = 0, 180 .min_signal = 0,
191 .max_signal = 0, 181 .max_signal = 0,
192 .muxval = 1, 182 .muxval = 1,
193 .cctl = 0,
194 .periph_buses = PL08X_AHB1, 183 .periph_buses = PL08X_AHB1,
195 }, { 184 }, {
196 .bus_id = "ras0_tx", 185 .bus_id = "ras0_tx",
197 .min_signal = 1, 186 .min_signal = 1,
198 .max_signal = 1, 187 .max_signal = 1,
199 .muxval = 1, 188 .muxval = 1,
200 .cctl = 0,
201 .periph_buses = PL08X_AHB1, 189 .periph_buses = PL08X_AHB1,
202 }, { 190 }, {
203 .bus_id = "ras1_rx", 191 .bus_id = "ras1_rx",
204 .min_signal = 2, 192 .min_signal = 2,
205 .max_signal = 2, 193 .max_signal = 2,
206 .muxval = 1, 194 .muxval = 1,
207 .cctl = 0,
208 .periph_buses = PL08X_AHB1, 195 .periph_buses = PL08X_AHB1,
209 }, { 196 }, {
210 .bus_id = "ras1_tx", 197 .bus_id = "ras1_tx",
211 .min_signal = 3, 198 .min_signal = 3,
212 .max_signal = 3, 199 .max_signal = 3,
213 .muxval = 1, 200 .muxval = 1,
214 .cctl = 0,
215 .periph_buses = PL08X_AHB1, 201 .periph_buses = PL08X_AHB1,
216 }, { 202 }, {
217 .bus_id = "ras2_rx", 203 .bus_id = "ras2_rx",
218 .min_signal = 4, 204 .min_signal = 4,
219 .max_signal = 4, 205 .max_signal = 4,
220 .muxval = 1, 206 .muxval = 1,
221 .cctl = 0,
222 .periph_buses = PL08X_AHB1, 207 .periph_buses = PL08X_AHB1,
223 }, { 208 }, {
224 .bus_id = "ras2_tx", 209 .bus_id = "ras2_tx",
225 .min_signal = 5, 210 .min_signal = 5,
226 .max_signal = 5, 211 .max_signal = 5,
227 .muxval = 1, 212 .muxval = 1,
228 .cctl = 0,
229 .periph_buses = PL08X_AHB1, 213 .periph_buses = PL08X_AHB1,
230 }, { 214 }, {
231 .bus_id = "ras3_rx", 215 .bus_id = "ras3_rx",
232 .min_signal = 6, 216 .min_signal = 6,
233 .max_signal = 6, 217 .max_signal = 6,
234 .muxval = 1, 218 .muxval = 1,
235 .cctl = 0,
236 .periph_buses = PL08X_AHB1, 219 .periph_buses = PL08X_AHB1,
237 }, { 220 }, {
238 .bus_id = "ras3_tx", 221 .bus_id = "ras3_tx",
239 .min_signal = 7, 222 .min_signal = 7,
240 .max_signal = 7, 223 .max_signal = 7,
241 .muxval = 1, 224 .muxval = 1,
242 .cctl = 0,
243 .periph_buses = PL08X_AHB1, 225 .periph_buses = PL08X_AHB1,
244 }, { 226 }, {
245 .bus_id = "ras4_rx", 227 .bus_id = "ras4_rx",
246 .min_signal = 8, 228 .min_signal = 8,
247 .max_signal = 8, 229 .max_signal = 8,
248 .muxval = 1, 230 .muxval = 1,
249 .cctl = 0,
250 .periph_buses = PL08X_AHB1, 231 .periph_buses = PL08X_AHB1,
251 }, { 232 }, {
252 .bus_id = "ras4_tx", 233 .bus_id = "ras4_tx",
253 .min_signal = 9, 234 .min_signal = 9,
254 .max_signal = 9, 235 .max_signal = 9,
255 .muxval = 1, 236 .muxval = 1,
256 .cctl = 0,
257 .periph_buses = PL08X_AHB1, 237 .periph_buses = PL08X_AHB1,
258 }, { 238 }, {
259 .bus_id = "ras5_rx", 239 .bus_id = "ras5_rx",
260 .min_signal = 10, 240 .min_signal = 10,
261 .max_signal = 10, 241 .max_signal = 10,
262 .muxval = 1, 242 .muxval = 1,
263 .cctl = 0,
264 .periph_buses = PL08X_AHB1, 243 .periph_buses = PL08X_AHB1,
265 }, { 244 }, {
266 .bus_id = "ras5_tx", 245 .bus_id = "ras5_tx",
267 .min_signal = 11, 246 .min_signal = 11,
268 .max_signal = 11, 247 .max_signal = 11,
269 .muxval = 1, 248 .muxval = 1,
270 .cctl = 0,
271 .periph_buses = PL08X_AHB1, 249 .periph_buses = PL08X_AHB1,
272 }, { 250 }, {
273 .bus_id = "ras6_rx", 251 .bus_id = "ras6_rx",
274 .min_signal = 12, 252 .min_signal = 12,
275 .max_signal = 12, 253 .max_signal = 12,
276 .muxval = 1, 254 .muxval = 1,
277 .cctl = 0,
278 .periph_buses = PL08X_AHB1, 255 .periph_buses = PL08X_AHB1,
279 }, { 256 }, {
280 .bus_id = "ras6_tx", 257 .bus_id = "ras6_tx",
281 .min_signal = 13, 258 .min_signal = 13,
282 .max_signal = 13, 259 .max_signal = 13,
283 .muxval = 1, 260 .muxval = 1,
284 .cctl = 0,
285 .periph_buses = PL08X_AHB1, 261 .periph_buses = PL08X_AHB1,
286 }, { 262 }, {
287 .bus_id = "ras7_rx", 263 .bus_id = "ras7_rx",
288 .min_signal = 14, 264 .min_signal = 14,
289 .max_signal = 14, 265 .max_signal = 14,
290 .muxval = 1, 266 .muxval = 1,
291 .cctl = 0,
292 .periph_buses = PL08X_AHB1, 267 .periph_buses = PL08X_AHB1,
293 }, { 268 }, {
294 .bus_id = "ras7_tx", 269 .bus_id = "ras7_tx",
295 .min_signal = 15, 270 .min_signal = 15,
296 .max_signal = 15, 271 .max_signal = 15,
297 .muxval = 1, 272 .muxval = 1,
298 .cctl = 0,
299 .periph_buses = PL08X_AHB1, 273 .periph_buses = PL08X_AHB1,
300 }, 274 },
301}; 275};
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index bbcf4571d361..1d0e435b9045 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -205,182 +205,156 @@ struct pl08x_channel_data spear310_dma_info[] = {
205 .min_signal = 2, 205 .min_signal = 2,
206 .max_signal = 2, 206 .max_signal = 2,
207 .muxval = 0, 207 .muxval = 0,
208 .cctl = 0,
209 .periph_buses = PL08X_AHB1, 208 .periph_buses = PL08X_AHB1,
210 }, { 209 }, {
211 .bus_id = "uart0_tx", 210 .bus_id = "uart0_tx",
212 .min_signal = 3, 211 .min_signal = 3,
213 .max_signal = 3, 212 .max_signal = 3,
214 .muxval = 0, 213 .muxval = 0,
215 .cctl = 0,
216 .periph_buses = PL08X_AHB1, 214 .periph_buses = PL08X_AHB1,
217 }, { 215 }, {
218 .bus_id = "ssp0_rx", 216 .bus_id = "ssp0_rx",
219 .min_signal = 8, 217 .min_signal = 8,
220 .max_signal = 8, 218 .max_signal = 8,
221 .muxval = 0, 219 .muxval = 0,
222 .cctl = 0,
223 .periph_buses = PL08X_AHB1, 220 .periph_buses = PL08X_AHB1,
224 }, { 221 }, {
225 .bus_id = "ssp0_tx", 222 .bus_id = "ssp0_tx",
226 .min_signal = 9, 223 .min_signal = 9,
227 .max_signal = 9, 224 .max_signal = 9,
228 .muxval = 0, 225 .muxval = 0,
229 .cctl = 0,
230 .periph_buses = PL08X_AHB1, 226 .periph_buses = PL08X_AHB1,
231 }, { 227 }, {
232 .bus_id = "i2c_rx", 228 .bus_id = "i2c_rx",
233 .min_signal = 10, 229 .min_signal = 10,
234 .max_signal = 10, 230 .max_signal = 10,
235 .muxval = 0, 231 .muxval = 0,
236 .cctl = 0,
237 .periph_buses = PL08X_AHB1, 232 .periph_buses = PL08X_AHB1,
238 }, { 233 }, {
239 .bus_id = "i2c_tx", 234 .bus_id = "i2c_tx",
240 .min_signal = 11, 235 .min_signal = 11,
241 .max_signal = 11, 236 .max_signal = 11,
242 .muxval = 0, 237 .muxval = 0,
243 .cctl = 0,
244 .periph_buses = PL08X_AHB1, 238 .periph_buses = PL08X_AHB1,
245 }, { 239 }, {
246 .bus_id = "irda", 240 .bus_id = "irda",
247 .min_signal = 12, 241 .min_signal = 12,
248 .max_signal = 12, 242 .max_signal = 12,
249 .muxval = 0, 243 .muxval = 0,
250 .cctl = 0,
251 .periph_buses = PL08X_AHB1, 244 .periph_buses = PL08X_AHB1,
252 }, { 245 }, {
253 .bus_id = "adc", 246 .bus_id = "adc",
254 .min_signal = 13, 247 .min_signal = 13,
255 .max_signal = 13, 248 .max_signal = 13,
256 .muxval = 0, 249 .muxval = 0,
257 .cctl = 0,
258 .periph_buses = PL08X_AHB1, 250 .periph_buses = PL08X_AHB1,
259 }, { 251 }, {
260 .bus_id = "to_jpeg", 252 .bus_id = "to_jpeg",
261 .min_signal = 14, 253 .min_signal = 14,
262 .max_signal = 14, 254 .max_signal = 14,
263 .muxval = 0, 255 .muxval = 0,
264 .cctl = 0,
265 .periph_buses = PL08X_AHB1, 256 .periph_buses = PL08X_AHB1,
266 }, { 257 }, {
267 .bus_id = "from_jpeg", 258 .bus_id = "from_jpeg",
268 .min_signal = 15, 259 .min_signal = 15,
269 .max_signal = 15, 260 .max_signal = 15,
270 .muxval = 0, 261 .muxval = 0,
271 .cctl = 0,
272 .periph_buses = PL08X_AHB1, 262 .periph_buses = PL08X_AHB1,
273 }, { 263 }, {
274 .bus_id = "uart1_rx", 264 .bus_id = "uart1_rx",
275 .min_signal = 0, 265 .min_signal = 0,
276 .max_signal = 0, 266 .max_signal = 0,
277 .muxval = 1, 267 .muxval = 1,
278 .cctl = 0,
279 .periph_buses = PL08X_AHB1, 268 .periph_buses = PL08X_AHB1,
280 }, { 269 }, {
281 .bus_id = "uart1_tx", 270 .bus_id = "uart1_tx",
282 .min_signal = 1, 271 .min_signal = 1,
283 .max_signal = 1, 272 .max_signal = 1,
284 .muxval = 1, 273 .muxval = 1,
285 .cctl = 0,
286 .periph_buses = PL08X_AHB1, 274 .periph_buses = PL08X_AHB1,
287 }, { 275 }, {
288 .bus_id = "uart2_rx", 276 .bus_id = "uart2_rx",
289 .min_signal = 2, 277 .min_signal = 2,
290 .max_signal = 2, 278 .max_signal = 2,
291 .muxval = 1, 279 .muxval = 1,
292 .cctl = 0,
293 .periph_buses = PL08X_AHB1, 280 .periph_buses = PL08X_AHB1,
294 }, { 281 }, {
295 .bus_id = "uart2_tx", 282 .bus_id = "uart2_tx",
296 .min_signal = 3, 283 .min_signal = 3,
297 .max_signal = 3, 284 .max_signal = 3,
298 .muxval = 1, 285 .muxval = 1,
299 .cctl = 0,
300 .periph_buses = PL08X_AHB1, 286 .periph_buses = PL08X_AHB1,
301 }, { 287 }, {
302 .bus_id = "uart3_rx", 288 .bus_id = "uart3_rx",
303 .min_signal = 4, 289 .min_signal = 4,
304 .max_signal = 4, 290 .max_signal = 4,
305 .muxval = 1, 291 .muxval = 1,
306 .cctl = 0,
307 .periph_buses = PL08X_AHB1, 292 .periph_buses = PL08X_AHB1,
308 }, { 293 }, {
309 .bus_id = "uart3_tx", 294 .bus_id = "uart3_tx",
310 .min_signal = 5, 295 .min_signal = 5,
311 .max_signal = 5, 296 .max_signal = 5,
312 .muxval = 1, 297 .muxval = 1,
313 .cctl = 0,
314 .periph_buses = PL08X_AHB1, 298 .periph_buses = PL08X_AHB1,
315 }, { 299 }, {
316 .bus_id = "uart4_rx", 300 .bus_id = "uart4_rx",
317 .min_signal = 6, 301 .min_signal = 6,
318 .max_signal = 6, 302 .max_signal = 6,
319 .muxval = 1, 303 .muxval = 1,
320 .cctl = 0,
321 .periph_buses = PL08X_AHB1, 304 .periph_buses = PL08X_AHB1,
322 }, { 305 }, {
323 .bus_id = "uart4_tx", 306 .bus_id = "uart4_tx",
324 .min_signal = 7, 307 .min_signal = 7,
325 .max_signal = 7, 308 .max_signal = 7,
326 .muxval = 1, 309 .muxval = 1,
327 .cctl = 0,
328 .periph_buses = PL08X_AHB1, 310 .periph_buses = PL08X_AHB1,
329 }, { 311 }, {
330 .bus_id = "uart5_rx", 312 .bus_id = "uart5_rx",
331 .min_signal = 8, 313 .min_signal = 8,
332 .max_signal = 8, 314 .max_signal = 8,
333 .muxval = 1, 315 .muxval = 1,
334 .cctl = 0,
335 .periph_buses = PL08X_AHB1, 316 .periph_buses = PL08X_AHB1,
336 }, { 317 }, {
337 .bus_id = "uart5_tx", 318 .bus_id = "uart5_tx",
338 .min_signal = 9, 319 .min_signal = 9,
339 .max_signal = 9, 320 .max_signal = 9,
340 .muxval = 1, 321 .muxval = 1,
341 .cctl = 0,
342 .periph_buses = PL08X_AHB1, 322 .periph_buses = PL08X_AHB1,
343 }, { 323 }, {
344 .bus_id = "ras5_rx", 324 .bus_id = "ras5_rx",
345 .min_signal = 10, 325 .min_signal = 10,
346 .max_signal = 10, 326 .max_signal = 10,
347 .muxval = 1, 327 .muxval = 1,
348 .cctl = 0,
349 .periph_buses = PL08X_AHB1, 328 .periph_buses = PL08X_AHB1,
350 }, { 329 }, {
351 .bus_id = "ras5_tx", 330 .bus_id = "ras5_tx",
352 .min_signal = 11, 331 .min_signal = 11,
353 .max_signal = 11, 332 .max_signal = 11,
354 .muxval = 1, 333 .muxval = 1,
355 .cctl = 0,
356 .periph_buses = PL08X_AHB1, 334 .periph_buses = PL08X_AHB1,
357 }, { 335 }, {
358 .bus_id = "ras6_rx", 336 .bus_id = "ras6_rx",
359 .min_signal = 12, 337 .min_signal = 12,
360 .max_signal = 12, 338 .max_signal = 12,
361 .muxval = 1, 339 .muxval = 1,
362 .cctl = 0,
363 .periph_buses = PL08X_AHB1, 340 .periph_buses = PL08X_AHB1,
364 }, { 341 }, {
365 .bus_id = "ras6_tx", 342 .bus_id = "ras6_tx",
366 .min_signal = 13, 343 .min_signal = 13,
367 .max_signal = 13, 344 .max_signal = 13,
368 .muxval = 1, 345 .muxval = 1,
369 .cctl = 0,
370 .periph_buses = PL08X_AHB1, 346 .periph_buses = PL08X_AHB1,
371 }, { 347 }, {
372 .bus_id = "ras7_rx", 348 .bus_id = "ras7_rx",
373 .min_signal = 14, 349 .min_signal = 14,
374 .max_signal = 14, 350 .max_signal = 14,
375 .muxval = 1, 351 .muxval = 1,
376 .cctl = 0,
377 .periph_buses = PL08X_AHB1, 352 .periph_buses = PL08X_AHB1,
378 }, { 353 }, {
379 .bus_id = "ras7_tx", 354 .bus_id = "ras7_tx",
380 .min_signal = 15, 355 .min_signal = 15,
381 .max_signal = 15, 356 .max_signal = 15,
382 .muxval = 1, 357 .muxval = 1,
383 .cctl = 0,
384 .periph_buses = PL08X_AHB1, 358 .periph_buses = PL08X_AHB1,
385 }, 359 },
386}; 360};
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index 88d483bcd66a..fd823c624575 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -213,182 +213,156 @@ struct pl08x_channel_data spear320_dma_info[] = {
213 .min_signal = 2, 213 .min_signal = 2,
214 .max_signal = 2, 214 .max_signal = 2,
215 .muxval = 0, 215 .muxval = 0,
216 .cctl = 0,
217 .periph_buses = PL08X_AHB1, 216 .periph_buses = PL08X_AHB1,
218 }, { 217 }, {
219 .bus_id = "uart0_tx", 218 .bus_id = "uart0_tx",
220 .min_signal = 3, 219 .min_signal = 3,
221 .max_signal = 3, 220 .max_signal = 3,
222 .muxval = 0, 221 .muxval = 0,
223 .cctl = 0,
224 .periph_buses = PL08X_AHB1, 222 .periph_buses = PL08X_AHB1,
225 }, { 223 }, {
226 .bus_id = "ssp0_rx", 224 .bus_id = "ssp0_rx",
227 .min_signal = 8, 225 .min_signal = 8,
228 .max_signal = 8, 226 .max_signal = 8,
229 .muxval = 0, 227 .muxval = 0,
230 .cctl = 0,
231 .periph_buses = PL08X_AHB1, 228 .periph_buses = PL08X_AHB1,
232 }, { 229 }, {
233 .bus_id = "ssp0_tx", 230 .bus_id = "ssp0_tx",
234 .min_signal = 9, 231 .min_signal = 9,
235 .max_signal = 9, 232 .max_signal = 9,
236 .muxval = 0, 233 .muxval = 0,
237 .cctl = 0,
238 .periph_buses = PL08X_AHB1, 234 .periph_buses = PL08X_AHB1,
239 }, { 235 }, {
240 .bus_id = "i2c0_rx", 236 .bus_id = "i2c0_rx",
241 .min_signal = 10, 237 .min_signal = 10,
242 .max_signal = 10, 238 .max_signal = 10,
243 .muxval = 0, 239 .muxval = 0,
244 .cctl = 0,
245 .periph_buses = PL08X_AHB1, 240 .periph_buses = PL08X_AHB1,
246 }, { 241 }, {
247 .bus_id = "i2c0_tx", 242 .bus_id = "i2c0_tx",
248 .min_signal = 11, 243 .min_signal = 11,
249 .max_signal = 11, 244 .max_signal = 11,
250 .muxval = 0, 245 .muxval = 0,
251 .cctl = 0,
252 .periph_buses = PL08X_AHB1, 246 .periph_buses = PL08X_AHB1,
253 }, { 247 }, {
254 .bus_id = "irda", 248 .bus_id = "irda",
255 .min_signal = 12, 249 .min_signal = 12,
256 .max_signal = 12, 250 .max_signal = 12,
257 .muxval = 0, 251 .muxval = 0,
258 .cctl = 0,
259 .periph_buses = PL08X_AHB1, 252 .periph_buses = PL08X_AHB1,
260 }, { 253 }, {
261 .bus_id = "adc", 254 .bus_id = "adc",
262 .min_signal = 13, 255 .min_signal = 13,
263 .max_signal = 13, 256 .max_signal = 13,
264 .muxval = 0, 257 .muxval = 0,
265 .cctl = 0,
266 .periph_buses = PL08X_AHB1, 258 .periph_buses = PL08X_AHB1,
267 }, { 259 }, {
268 .bus_id = "to_jpeg", 260 .bus_id = "to_jpeg",
269 .min_signal = 14, 261 .min_signal = 14,
270 .max_signal = 14, 262 .max_signal = 14,
271 .muxval = 0, 263 .muxval = 0,
272 .cctl = 0,
273 .periph_buses = PL08X_AHB1, 264 .periph_buses = PL08X_AHB1,
274 }, { 265 }, {
275 .bus_id = "from_jpeg", 266 .bus_id = "from_jpeg",
276 .min_signal = 15, 267 .min_signal = 15,
277 .max_signal = 15, 268 .max_signal = 15,
278 .muxval = 0, 269 .muxval = 0,
279 .cctl = 0,
280 .periph_buses = PL08X_AHB1, 270 .periph_buses = PL08X_AHB1,
281 }, { 271 }, {
282 .bus_id = "ssp1_rx", 272 .bus_id = "ssp1_rx",
283 .min_signal = 0, 273 .min_signal = 0,
284 .max_signal = 0, 274 .max_signal = 0,
285 .muxval = 1, 275 .muxval = 1,
286 .cctl = 0,
287 .periph_buses = PL08X_AHB2, 276 .periph_buses = PL08X_AHB2,
288 }, { 277 }, {
289 .bus_id = "ssp1_tx", 278 .bus_id = "ssp1_tx",
290 .min_signal = 1, 279 .min_signal = 1,
291 .max_signal = 1, 280 .max_signal = 1,
292 .muxval = 1, 281 .muxval = 1,
293 .cctl = 0,
294 .periph_buses = PL08X_AHB2, 282 .periph_buses = PL08X_AHB2,
295 }, { 283 }, {
296 .bus_id = "ssp2_rx", 284 .bus_id = "ssp2_rx",
297 .min_signal = 2, 285 .min_signal = 2,
298 .max_signal = 2, 286 .max_signal = 2,
299 .muxval = 1, 287 .muxval = 1,
300 .cctl = 0,
301 .periph_buses = PL08X_AHB2, 288 .periph_buses = PL08X_AHB2,
302 }, { 289 }, {
303 .bus_id = "ssp2_tx", 290 .bus_id = "ssp2_tx",
304 .min_signal = 3, 291 .min_signal = 3,
305 .max_signal = 3, 292 .max_signal = 3,
306 .muxval = 1, 293 .muxval = 1,
307 .cctl = 0,
308 .periph_buses = PL08X_AHB2, 294 .periph_buses = PL08X_AHB2,
309 }, { 295 }, {
310 .bus_id = "uart1_rx", 296 .bus_id = "uart1_rx",
311 .min_signal = 4, 297 .min_signal = 4,
312 .max_signal = 4, 298 .max_signal = 4,
313 .muxval = 1, 299 .muxval = 1,
314 .cctl = 0,
315 .periph_buses = PL08X_AHB2, 300 .periph_buses = PL08X_AHB2,
316 }, { 301 }, {
317 .bus_id = "uart1_tx", 302 .bus_id = "uart1_tx",
318 .min_signal = 5, 303 .min_signal = 5,
319 .max_signal = 5, 304 .max_signal = 5,
320 .muxval = 1, 305 .muxval = 1,
321 .cctl = 0,
322 .periph_buses = PL08X_AHB2, 306 .periph_buses = PL08X_AHB2,
323 }, { 307 }, {
324 .bus_id = "uart2_rx", 308 .bus_id = "uart2_rx",
325 .min_signal = 6, 309 .min_signal = 6,
326 .max_signal = 6, 310 .max_signal = 6,
327 .muxval = 1, 311 .muxval = 1,
328 .cctl = 0,
329 .periph_buses = PL08X_AHB2, 312 .periph_buses = PL08X_AHB2,
330 }, { 313 }, {
331 .bus_id = "uart2_tx", 314 .bus_id = "uart2_tx",
332 .min_signal = 7, 315 .min_signal = 7,
333 .max_signal = 7, 316 .max_signal = 7,
334 .muxval = 1, 317 .muxval = 1,
335 .cctl = 0,
336 .periph_buses = PL08X_AHB2, 318 .periph_buses = PL08X_AHB2,
337 }, { 319 }, {
338 .bus_id = "i2c1_rx", 320 .bus_id = "i2c1_rx",
339 .min_signal = 8, 321 .min_signal = 8,
340 .max_signal = 8, 322 .max_signal = 8,
341 .muxval = 1, 323 .muxval = 1,
342 .cctl = 0,
343 .periph_buses = PL08X_AHB2, 324 .periph_buses = PL08X_AHB2,
344 }, { 325 }, {
345 .bus_id = "i2c1_tx", 326 .bus_id = "i2c1_tx",
346 .min_signal = 9, 327 .min_signal = 9,
347 .max_signal = 9, 328 .max_signal = 9,
348 .muxval = 1, 329 .muxval = 1,
349 .cctl = 0,
350 .periph_buses = PL08X_AHB2, 330 .periph_buses = PL08X_AHB2,
351 }, { 331 }, {
352 .bus_id = "i2c2_rx", 332 .bus_id = "i2c2_rx",
353 .min_signal = 10, 333 .min_signal = 10,
354 .max_signal = 10, 334 .max_signal = 10,
355 .muxval = 1, 335 .muxval = 1,
356 .cctl = 0,
357 .periph_buses = PL08X_AHB2, 336 .periph_buses = PL08X_AHB2,
358 }, { 337 }, {
359 .bus_id = "i2c2_tx", 338 .bus_id = "i2c2_tx",
360 .min_signal = 11, 339 .min_signal = 11,
361 .max_signal = 11, 340 .max_signal = 11,
362 .muxval = 1, 341 .muxval = 1,
363 .cctl = 0,
364 .periph_buses = PL08X_AHB2, 342 .periph_buses = PL08X_AHB2,
365 }, { 343 }, {
366 .bus_id = "i2s_rx", 344 .bus_id = "i2s_rx",
367 .min_signal = 12, 345 .min_signal = 12,
368 .max_signal = 12, 346 .max_signal = 12,
369 .muxval = 1, 347 .muxval = 1,
370 .cctl = 0,
371 .periph_buses = PL08X_AHB2, 348 .periph_buses = PL08X_AHB2,
372 }, { 349 }, {
373 .bus_id = "i2s_tx", 350 .bus_id = "i2s_tx",
374 .min_signal = 13, 351 .min_signal = 13,
375 .max_signal = 13, 352 .max_signal = 13,
376 .muxval = 1, 353 .muxval = 1,
377 .cctl = 0,
378 .periph_buses = PL08X_AHB2, 354 .periph_buses = PL08X_AHB2,
379 }, { 355 }, {
380 .bus_id = "rs485_rx", 356 .bus_id = "rs485_rx",
381 .min_signal = 14, 357 .min_signal = 14,
382 .max_signal = 14, 358 .max_signal = 14,
383 .muxval = 1, 359 .muxval = 1,
384 .cctl = 0,
385 .periph_buses = PL08X_AHB2, 360 .periph_buses = PL08X_AHB2,
386 }, { 361 }, {
387 .bus_id = "rs485_tx", 362 .bus_id = "rs485_tx",
388 .min_signal = 15, 363 .min_signal = 15,
389 .max_signal = 15, 364 .max_signal = 15,
390 .muxval = 1, 365 .muxval = 1,
391 .cctl = 0,
392 .periph_buses = PL08X_AHB2, 366 .periph_buses = PL08X_AHB2,
393 }, 367 },
394}; 368};
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index 66db5f13af84..98144baf8883 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -46,7 +46,8 @@ struct pl022_ssp_controller pl022_plat_data = {
46struct pl08x_platform_data pl080_plat_data = { 46struct pl08x_platform_data pl080_plat_data = {
47 .memcpy_channel = { 47 .memcpy_channel = {
48 .bus_id = "memcpy", 48 .bus_id = "memcpy",
49 .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ 49 .cctl_memcpy =
50 (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
50 PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \ 51 PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \
51 PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \ 52 PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \
52 PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \ 53 PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index 9af67d003c62..5a5a52db252b 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -36,336 +36,288 @@ static struct pl08x_channel_data spear600_dma_info[] = {
36 .min_signal = 0, 36 .min_signal = 0,
37 .max_signal = 0, 37 .max_signal = 0,
38 .muxval = 0, 38 .muxval = 0,
39 .cctl = 0,
40 .periph_buses = PL08X_AHB1, 39 .periph_buses = PL08X_AHB1,
41 }, { 40 }, {
42 .bus_id = "ssp1_tx", 41 .bus_id = "ssp1_tx",
43 .min_signal = 1, 42 .min_signal = 1,
44 .max_signal = 1, 43 .max_signal = 1,
45 .muxval = 0, 44 .muxval = 0,
46 .cctl = 0,
47 .periph_buses = PL08X_AHB1, 45 .periph_buses = PL08X_AHB1,
48 }, { 46 }, {
49 .bus_id = "uart0_rx", 47 .bus_id = "uart0_rx",
50 .min_signal = 2, 48 .min_signal = 2,
51 .max_signal = 2, 49 .max_signal = 2,
52 .muxval = 0, 50 .muxval = 0,
53 .cctl = 0,
54 .periph_buses = PL08X_AHB1, 51 .periph_buses = PL08X_AHB1,
55 }, { 52 }, {
56 .bus_id = "uart0_tx", 53 .bus_id = "uart0_tx",
57 .min_signal = 3, 54 .min_signal = 3,
58 .max_signal = 3, 55 .max_signal = 3,
59 .muxval = 0, 56 .muxval = 0,
60 .cctl = 0,
61 .periph_buses = PL08X_AHB1, 57 .periph_buses = PL08X_AHB1,
62 }, { 58 }, {
63 .bus_id = "uart1_rx", 59 .bus_id = "uart1_rx",
64 .min_signal = 4, 60 .min_signal = 4,
65 .max_signal = 4, 61 .max_signal = 4,
66 .muxval = 0, 62 .muxval = 0,
67 .cctl = 0,
68 .periph_buses = PL08X_AHB1, 63 .periph_buses = PL08X_AHB1,
69 }, { 64 }, {
70 .bus_id = "uart1_tx", 65 .bus_id = "uart1_tx",
71 .min_signal = 5, 66 .min_signal = 5,
72 .max_signal = 5, 67 .max_signal = 5,
73 .muxval = 0, 68 .muxval = 0,
74 .cctl = 0,
75 .periph_buses = PL08X_AHB1, 69 .periph_buses = PL08X_AHB1,
76 }, { 70 }, {
77 .bus_id = "ssp2_rx", 71 .bus_id = "ssp2_rx",
78 .min_signal = 6, 72 .min_signal = 6,
79 .max_signal = 6, 73 .max_signal = 6,
80 .muxval = 0, 74 .muxval = 0,
81 .cctl = 0,
82 .periph_buses = PL08X_AHB2, 75 .periph_buses = PL08X_AHB2,
83 }, { 76 }, {
84 .bus_id = "ssp2_tx", 77 .bus_id = "ssp2_tx",
85 .min_signal = 7, 78 .min_signal = 7,
86 .max_signal = 7, 79 .max_signal = 7,
87 .muxval = 0, 80 .muxval = 0,
88 .cctl = 0,
89 .periph_buses = PL08X_AHB2, 81 .periph_buses = PL08X_AHB2,
90 }, { 82 }, {
91 .bus_id = "ssp0_rx", 83 .bus_id = "ssp0_rx",
92 .min_signal = 8, 84 .min_signal = 8,
93 .max_signal = 8, 85 .max_signal = 8,
94 .muxval = 0, 86 .muxval = 0,
95 .cctl = 0,
96 .periph_buses = PL08X_AHB1, 87 .periph_buses = PL08X_AHB1,
97 }, { 88 }, {
98 .bus_id = "ssp0_tx", 89 .bus_id = "ssp0_tx",
99 .min_signal = 9, 90 .min_signal = 9,
100 .max_signal = 9, 91 .max_signal = 9,
101 .muxval = 0, 92 .muxval = 0,
102 .cctl = 0,
103 .periph_buses = PL08X_AHB1, 93 .periph_buses = PL08X_AHB1,
104 }, { 94 }, {
105 .bus_id = "i2c_rx", 95 .bus_id = "i2c_rx",
106 .min_signal = 10, 96 .min_signal = 10,
107 .max_signal = 10, 97 .max_signal = 10,
108 .muxval = 0, 98 .muxval = 0,
109 .cctl = 0,
110 .periph_buses = PL08X_AHB1, 99 .periph_buses = PL08X_AHB1,
111 }, { 100 }, {
112 .bus_id = "i2c_tx", 101 .bus_id = "i2c_tx",
113 .min_signal = 11, 102 .min_signal = 11,
114 .max_signal = 11, 103 .max_signal = 11,
115 .muxval = 0, 104 .muxval = 0,
116 .cctl = 0,
117 .periph_buses = PL08X_AHB1, 105 .periph_buses = PL08X_AHB1,
118 }, { 106 }, {
119 .bus_id = "irda", 107 .bus_id = "irda",
120 .min_signal = 12, 108 .min_signal = 12,
121 .max_signal = 12, 109 .max_signal = 12,
122 .muxval = 0, 110 .muxval = 0,
123 .cctl = 0,
124 .periph_buses = PL08X_AHB1, 111 .periph_buses = PL08X_AHB1,
125 }, { 112 }, {
126 .bus_id = "adc", 113 .bus_id = "adc",
127 .min_signal = 13, 114 .min_signal = 13,
128 .max_signal = 13, 115 .max_signal = 13,
129 .muxval = 0, 116 .muxval = 0,
130 .cctl = 0,
131 .periph_buses = PL08X_AHB2, 117 .periph_buses = PL08X_AHB2,
132 }, { 118 }, {
133 .bus_id = "to_jpeg", 119 .bus_id = "to_jpeg",
134 .min_signal = 14, 120 .min_signal = 14,
135 .max_signal = 14, 121 .max_signal = 14,
136 .muxval = 0, 122 .muxval = 0,
137 .cctl = 0,
138 .periph_buses = PL08X_AHB1, 123 .periph_buses = PL08X_AHB1,
139 }, { 124 }, {
140 .bus_id = "from_jpeg", 125 .bus_id = "from_jpeg",
141 .min_signal = 15, 126 .min_signal = 15,
142 .max_signal = 15, 127 .max_signal = 15,
143 .muxval = 0, 128 .muxval = 0,
144 .cctl = 0,
145 .periph_buses = PL08X_AHB1, 129 .periph_buses = PL08X_AHB1,
146 }, { 130 }, {
147 .bus_id = "ras0_rx", 131 .bus_id = "ras0_rx",
148 .min_signal = 0, 132 .min_signal = 0,
149 .max_signal = 0, 133 .max_signal = 0,
150 .muxval = 1, 134 .muxval = 1,
151 .cctl = 0,
152 .periph_buses = PL08X_AHB1, 135 .periph_buses = PL08X_AHB1,
153 }, { 136 }, {
154 .bus_id = "ras0_tx", 137 .bus_id = "ras0_tx",
155 .min_signal = 1, 138 .min_signal = 1,
156 .max_signal = 1, 139 .max_signal = 1,
157 .muxval = 1, 140 .muxval = 1,
158 .cctl = 0,
159 .periph_buses = PL08X_AHB1, 141 .periph_buses = PL08X_AHB1,
160 }, { 142 }, {
161 .bus_id = "ras1_rx", 143 .bus_id = "ras1_rx",
162 .min_signal = 2, 144 .min_signal = 2,
163 .max_signal = 2, 145 .max_signal = 2,
164 .muxval = 1, 146 .muxval = 1,
165 .cctl = 0,
166 .periph_buses = PL08X_AHB1, 147 .periph_buses = PL08X_AHB1,
167 }, { 148 }, {
168 .bus_id = "ras1_tx", 149 .bus_id = "ras1_tx",
169 .min_signal = 3, 150 .min_signal = 3,
170 .max_signal = 3, 151 .max_signal = 3,
171 .muxval = 1, 152 .muxval = 1,
172 .cctl = 0,
173 .periph_buses = PL08X_AHB1, 153 .periph_buses = PL08X_AHB1,
174 }, { 154 }, {
175 .bus_id = "ras2_rx", 155 .bus_id = "ras2_rx",
176 .min_signal = 4, 156 .min_signal = 4,
177 .max_signal = 4, 157 .max_signal = 4,
178 .muxval = 1, 158 .muxval = 1,
179 .cctl = 0,
180 .periph_buses = PL08X_AHB1, 159 .periph_buses = PL08X_AHB1,
181 }, { 160 }, {
182 .bus_id = "ras2_tx", 161 .bus_id = "ras2_tx",
183 .min_signal = 5, 162 .min_signal = 5,
184 .max_signal = 5, 163 .max_signal = 5,
185 .muxval = 1, 164 .muxval = 1,
186 .cctl = 0,
187 .periph_buses = PL08X_AHB1, 165 .periph_buses = PL08X_AHB1,
188 }, { 166 }, {
189 .bus_id = "ras3_rx", 167 .bus_id = "ras3_rx",
190 .min_signal = 6, 168 .min_signal = 6,
191 .max_signal = 6, 169 .max_signal = 6,
192 .muxval = 1, 170 .muxval = 1,
193 .cctl = 0,
194 .periph_buses = PL08X_AHB1, 171 .periph_buses = PL08X_AHB1,
195 }, { 172 }, {
196 .bus_id = "ras3_tx", 173 .bus_id = "ras3_tx",
197 .min_signal = 7, 174 .min_signal = 7,
198 .max_signal = 7, 175 .max_signal = 7,
199 .muxval = 1, 176 .muxval = 1,
200 .cctl = 0,
201 .periph_buses = PL08X_AHB1, 177 .periph_buses = PL08X_AHB1,
202 }, { 178 }, {
203 .bus_id = "ras4_rx", 179 .bus_id = "ras4_rx",
204 .min_signal = 8, 180 .min_signal = 8,
205 .max_signal = 8, 181 .max_signal = 8,
206 .muxval = 1, 182 .muxval = 1,
207 .cctl = 0,
208 .periph_buses = PL08X_AHB1, 183 .periph_buses = PL08X_AHB1,
209 }, { 184 }, {
210 .bus_id = "ras4_tx", 185 .bus_id = "ras4_tx",
211 .min_signal = 9, 186 .min_signal = 9,
212 .max_signal = 9, 187 .max_signal = 9,
213 .muxval = 1, 188 .muxval = 1,
214 .cctl = 0,
215 .periph_buses = PL08X_AHB1, 189 .periph_buses = PL08X_AHB1,
216 }, { 190 }, {
217 .bus_id = "ras5_rx", 191 .bus_id = "ras5_rx",
218 .min_signal = 10, 192 .min_signal = 10,
219 .max_signal = 10, 193 .max_signal = 10,
220 .muxval = 1, 194 .muxval = 1,
221 .cctl = 0,
222 .periph_buses = PL08X_AHB1, 195 .periph_buses = PL08X_AHB1,
223 }, { 196 }, {
224 .bus_id = "ras5_tx", 197 .bus_id = "ras5_tx",
225 .min_signal = 11, 198 .min_signal = 11,
226 .max_signal = 11, 199 .max_signal = 11,
227 .muxval = 1, 200 .muxval = 1,
228 .cctl = 0,
229 .periph_buses = PL08X_AHB1, 201 .periph_buses = PL08X_AHB1,
230 }, { 202 }, {
231 .bus_id = "ras6_rx", 203 .bus_id = "ras6_rx",
232 .min_signal = 12, 204 .min_signal = 12,
233 .max_signal = 12, 205 .max_signal = 12,
234 .muxval = 1, 206 .muxval = 1,
235 .cctl = 0,
236 .periph_buses = PL08X_AHB1, 207 .periph_buses = PL08X_AHB1,
237 }, { 208 }, {
238 .bus_id = "ras6_tx", 209 .bus_id = "ras6_tx",
239 .min_signal = 13, 210 .min_signal = 13,
240 .max_signal = 13, 211 .max_signal = 13,
241 .muxval = 1, 212 .muxval = 1,
242 .cctl = 0,
243 .periph_buses = PL08X_AHB1, 213 .periph_buses = PL08X_AHB1,
244 }, { 214 }, {
245 .bus_id = "ras7_rx", 215 .bus_id = "ras7_rx",
246 .min_signal = 14, 216 .min_signal = 14,
247 .max_signal = 14, 217 .max_signal = 14,
248 .muxval = 1, 218 .muxval = 1,
249 .cctl = 0,
250 .periph_buses = PL08X_AHB1, 219 .periph_buses = PL08X_AHB1,
251 }, { 220 }, {
252 .bus_id = "ras7_tx", 221 .bus_id = "ras7_tx",
253 .min_signal = 15, 222 .min_signal = 15,
254 .max_signal = 15, 223 .max_signal = 15,
255 .muxval = 1, 224 .muxval = 1,
256 .cctl = 0,
257 .periph_buses = PL08X_AHB1, 225 .periph_buses = PL08X_AHB1,
258 }, { 226 }, {
259 .bus_id = "ext0_rx", 227 .bus_id = "ext0_rx",
260 .min_signal = 0, 228 .min_signal = 0,
261 .max_signal = 0, 229 .max_signal = 0,
262 .muxval = 2, 230 .muxval = 2,
263 .cctl = 0,
264 .periph_buses = PL08X_AHB2, 231 .periph_buses = PL08X_AHB2,
265 }, { 232 }, {
266 .bus_id = "ext0_tx", 233 .bus_id = "ext0_tx",
267 .min_signal = 1, 234 .min_signal = 1,
268 .max_signal = 1, 235 .max_signal = 1,
269 .muxval = 2, 236 .muxval = 2,
270 .cctl = 0,
271 .periph_buses = PL08X_AHB2, 237 .periph_buses = PL08X_AHB2,
272 }, { 238 }, {
273 .bus_id = "ext1_rx", 239 .bus_id = "ext1_rx",
274 .min_signal = 2, 240 .min_signal = 2,
275 .max_signal = 2, 241 .max_signal = 2,
276 .muxval = 2, 242 .muxval = 2,
277 .cctl = 0,
278 .periph_buses = PL08X_AHB2, 243 .periph_buses = PL08X_AHB2,
279 }, { 244 }, {
280 .bus_id = "ext1_tx", 245 .bus_id = "ext1_tx",
281 .min_signal = 3, 246 .min_signal = 3,
282 .max_signal = 3, 247 .max_signal = 3,
283 .muxval = 2, 248 .muxval = 2,
284 .cctl = 0,
285 .periph_buses = PL08X_AHB2, 249 .periph_buses = PL08X_AHB2,
286 }, { 250 }, {
287 .bus_id = "ext2_rx", 251 .bus_id = "ext2_rx",
288 .min_signal = 4, 252 .min_signal = 4,
289 .max_signal = 4, 253 .max_signal = 4,
290 .muxval = 2, 254 .muxval = 2,
291 .cctl = 0,
292 .periph_buses = PL08X_AHB2, 255 .periph_buses = PL08X_AHB2,
293 }, { 256 }, {
294 .bus_id = "ext2_tx", 257 .bus_id = "ext2_tx",
295 .min_signal = 5, 258 .min_signal = 5,
296 .max_signal = 5, 259 .max_signal = 5,
297 .muxval = 2, 260 .muxval = 2,
298 .cctl = 0,
299 .periph_buses = PL08X_AHB2, 261 .periph_buses = PL08X_AHB2,
300 }, { 262 }, {
301 .bus_id = "ext3_rx", 263 .bus_id = "ext3_rx",
302 .min_signal = 6, 264 .min_signal = 6,
303 .max_signal = 6, 265 .max_signal = 6,
304 .muxval = 2, 266 .muxval = 2,
305 .cctl = 0,
306 .periph_buses = PL08X_AHB2, 267 .periph_buses = PL08X_AHB2,
307 }, { 268 }, {
308 .bus_id = "ext3_tx", 269 .bus_id = "ext3_tx",
309 .min_signal = 7, 270 .min_signal = 7,
310 .max_signal = 7, 271 .max_signal = 7,
311 .muxval = 2, 272 .muxval = 2,
312 .cctl = 0,
313 .periph_buses = PL08X_AHB2, 273 .periph_buses = PL08X_AHB2,
314 }, { 274 }, {
315 .bus_id = "ext4_rx", 275 .bus_id = "ext4_rx",
316 .min_signal = 8, 276 .min_signal = 8,
317 .max_signal = 8, 277 .max_signal = 8,
318 .muxval = 2, 278 .muxval = 2,
319 .cctl = 0,
320 .periph_buses = PL08X_AHB2, 279 .periph_buses = PL08X_AHB2,
321 }, { 280 }, {
322 .bus_id = "ext4_tx", 281 .bus_id = "ext4_tx",
323 .min_signal = 9, 282 .min_signal = 9,
324 .max_signal = 9, 283 .max_signal = 9,
325 .muxval = 2, 284 .muxval = 2,
326 .cctl = 0,
327 .periph_buses = PL08X_AHB2, 285 .periph_buses = PL08X_AHB2,
328 }, { 286 }, {
329 .bus_id = "ext5_rx", 287 .bus_id = "ext5_rx",
330 .min_signal = 10, 288 .min_signal = 10,
331 .max_signal = 10, 289 .max_signal = 10,
332 .muxval = 2, 290 .muxval = 2,
333 .cctl = 0,
334 .periph_buses = PL08X_AHB2, 291 .periph_buses = PL08X_AHB2,
335 }, { 292 }, {
336 .bus_id = "ext5_tx", 293 .bus_id = "ext5_tx",
337 .min_signal = 11, 294 .min_signal = 11,
338 .max_signal = 11, 295 .max_signal = 11,
339 .muxval = 2, 296 .muxval = 2,
340 .cctl = 0,
341 .periph_buses = PL08X_AHB2, 297 .periph_buses = PL08X_AHB2,
342 }, { 298 }, {
343 .bus_id = "ext6_rx", 299 .bus_id = "ext6_rx",
344 .min_signal = 12, 300 .min_signal = 12,
345 .max_signal = 12, 301 .max_signal = 12,
346 .muxval = 2, 302 .muxval = 2,
347 .cctl = 0,
348 .periph_buses = PL08X_AHB2, 303 .periph_buses = PL08X_AHB2,
349 }, { 304 }, {
350 .bus_id = "ext6_tx", 305 .bus_id = "ext6_tx",
351 .min_signal = 13, 306 .min_signal = 13,
352 .max_signal = 13, 307 .max_signal = 13,
353 .muxval = 2, 308 .muxval = 2,
354 .cctl = 0,
355 .periph_buses = PL08X_AHB2, 309 .periph_buses = PL08X_AHB2,
356 }, { 310 }, {
357 .bus_id = "ext7_rx", 311 .bus_id = "ext7_rx",
358 .min_signal = 14, 312 .min_signal = 14,
359 .max_signal = 14, 313 .max_signal = 14,
360 .muxval = 2, 314 .muxval = 2,
361 .cctl = 0,
362 .periph_buses = PL08X_AHB2, 315 .periph_buses = PL08X_AHB2,
363 }, { 316 }, {
364 .bus_id = "ext7_tx", 317 .bus_id = "ext7_tx",
365 .min_signal = 15, 318 .min_signal = 15,
366 .max_signal = 15, 319 .max_signal = 15,
367 .muxval = 2, 320 .muxval = 2,
368 .cctl = 0,
369 .periph_buses = PL08X_AHB2, 321 .periph_buses = PL08X_AHB2,
370 }, 322 },
371}; 323};
@@ -373,7 +325,8 @@ static struct pl08x_channel_data spear600_dma_info[] = {
373struct pl08x_platform_data pl080_plat_data = { 325struct pl08x_platform_data pl080_plat_data = {
374 .memcpy_channel = { 326 .memcpy_channel = {
375 .bus_id = "memcpy", 327 .bus_id = "memcpy",
376 .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ 328 .cctl_memcpy =
329 (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
377 PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \ 330 PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \
378 PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \ 331 PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \
379 PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \ 332 PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index 5493bd95da5e..eb3e4d555343 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -81,8 +81,6 @@ struct omap_mmc_platform_data {
81 /* Return context loss count due to PM states changing */ 81 /* Return context loss count due to PM states changing */
82 int (*get_context_loss_count)(struct device *dev); 82 int (*get_context_loss_count)(struct device *dev);
83 83
84 u64 dma_mask;
85
86 /* Integrating attributes from the omap_hwmod layer */ 84 /* Integrating attributes from the omap_hwmod layer */
87 u8 controller_flags; 85 u8 controller_flags;
88 86
diff --git a/arch/arm/plat-spear/include/plat/pl080.h b/arch/arm/plat-spear/include/plat/pl080.h
index 2bc6b54460a8..eb6590ded40d 100644
--- a/arch/arm/plat-spear/include/plat/pl080.h
+++ b/arch/arm/plat-spear/include/plat/pl080.h
@@ -14,8 +14,8 @@
14#ifndef __PLAT_PL080_H 14#ifndef __PLAT_PL080_H
15#define __PLAT_PL080_H 15#define __PLAT_PL080_H
16 16
17struct pl08x_dma_chan; 17struct pl08x_channel_data;
18int pl080_get_signal(struct pl08x_dma_chan *ch); 18int pl080_get_signal(const struct pl08x_channel_data *cd);
19void pl080_put_signal(struct pl08x_dma_chan *ch); 19void pl080_put_signal(const struct pl08x_channel_data *cd, int signal);
20 20
21#endif /* __PLAT_PL080_H */ 21#endif /* __PLAT_PL080_H */
diff --git a/arch/arm/plat-spear/pl080.c b/arch/arm/plat-spear/pl080.c
index 12cf27f935f9..cfa1199d0f4a 100644
--- a/arch/arm/plat-spear/pl080.c
+++ b/arch/arm/plat-spear/pl080.c
@@ -27,9 +27,8 @@ struct {
27 unsigned char val; 27 unsigned char val;
28} signals[16] = {{0, 0}, }; 28} signals[16] = {{0, 0}, };
29 29
30int pl080_get_signal(struct pl08x_dma_chan *ch) 30int pl080_get_signal(const struct pl08x_channel_data *cd)
31{ 31{
32 const struct pl08x_channel_data *cd = ch->cd;
33 unsigned int signal = cd->min_signal, val; 32 unsigned int signal = cd->min_signal, val;
34 unsigned long flags; 33 unsigned long flags;
35 34
@@ -63,18 +62,17 @@ int pl080_get_signal(struct pl08x_dma_chan *ch)
63 return signal; 62 return signal;
64} 63}
65 64
66void pl080_put_signal(struct pl08x_dma_chan *ch) 65void pl080_put_signal(const struct pl08x_channel_data *cd, int signal)
67{ 66{
68 const struct pl08x_channel_data *cd = ch->cd;
69 unsigned long flags; 67 unsigned long flags;
70 68
71 spin_lock_irqsave(&lock, flags); 69 spin_lock_irqsave(&lock, flags);
72 70
73 /* if signal is not used */ 71 /* if signal is not used */
74 if (!signals[cd->min_signal].busy) 72 if (!signals[signal].busy)
75 BUG(); 73 BUG();
76 74
77 signals[cd->min_signal].busy--; 75 signals[signal].busy--;
78 76
79 spin_unlock_irqrestore(&lock, flags); 77 spin_unlock_irqrestore(&lock, flags);
80} 78}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d45cf1bcbde5..d06ea2950dd9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -53,6 +53,7 @@ config AMBA_PL08X
53 bool "ARM PrimeCell PL080 or PL081 support" 53 bool "ARM PrimeCell PL080 or PL081 support"
54 depends on ARM_AMBA && EXPERIMENTAL 54 depends on ARM_AMBA && EXPERIMENTAL
55 select DMA_ENGINE 55 select DMA_ENGINE
56 select DMA_VIRTUAL_CHANNELS
56 help 57 help
57 Platform has a PL08x DMAC device 58 Platform has a PL08x DMAC device
58 which can provide DMA engine support 59 which can provide DMA engine support
@@ -269,6 +270,7 @@ config DMA_SA11X0
269 tristate "SA-11x0 DMA support" 270 tristate "SA-11x0 DMA support"
270 depends on ARCH_SA1100 271 depends on ARCH_SA1100
271 select DMA_ENGINE 272 select DMA_ENGINE
273 select DMA_VIRTUAL_CHANNELS
272 help 274 help
273 Support the DMA engine found on Intel StrongARM SA-1100 and 275 Support the DMA engine found on Intel StrongARM SA-1100 and
274 SA-1110 SoCs. This DMA engine can only be used with on-chip 276 SA-1110 SoCs. This DMA engine can only be used with on-chip
@@ -284,9 +286,18 @@ config MMP_TDMA
284 286
285 Say Y here if you enabled MMP ADMA, otherwise say N. 287 Say Y here if you enabled MMP ADMA, otherwise say N.
286 288
289config DMA_OMAP
290 tristate "OMAP DMA support"
291 depends on ARCH_OMAP
292 select DMA_ENGINE
293 select DMA_VIRTUAL_CHANNELS
294
287config DMA_ENGINE 295config DMA_ENGINE
288 bool 296 bool
289 297
298config DMA_VIRTUAL_CHANNELS
299 tristate
300
290comment "DMA Clients" 301comment "DMA Clients"
291 depends on DMA_ENGINE 302 depends on DMA_ENGINE
292 303
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 640356add0a3..4cf6b128ab9a 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
2ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG 2ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
3 3
4obj-$(CONFIG_DMA_ENGINE) += dmaengine.o 4obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
5obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
5obj-$(CONFIG_NET_DMA) += iovlock.o 6obj-$(CONFIG_NET_DMA) += iovlock.o
6obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o 7obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
7obj-$(CONFIG_DMATEST) += dmatest.o 8obj-$(CONFIG_DMATEST) += dmatest.o
@@ -30,3 +31,4 @@ obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
30obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o 31obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
31obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o 32obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
32obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o 33obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
34obj-$(CONFIG_DMA_OMAP) += omap-dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 49ecbbb8932d..6fbeebb9486f 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -86,10 +86,12 @@
86#include <asm/hardware/pl080.h> 86#include <asm/hardware/pl080.h>
87 87
88#include "dmaengine.h" 88#include "dmaengine.h"
89#include "virt-dma.h"
89 90
90#define DRIVER_NAME "pl08xdmac" 91#define DRIVER_NAME "pl08xdmac"
91 92
92static struct amba_driver pl08x_amba_driver; 93static struct amba_driver pl08x_amba_driver;
94struct pl08x_driver_data;
93 95
94/** 96/**
95 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 97 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
@@ -119,6 +121,123 @@ struct pl08x_lli {
119}; 121};
120 122
121/** 123/**
124 * struct pl08x_bus_data - information of source or destination
125 * busses for a transfer
126 * @addr: current address
127 * @maxwidth: the maximum width of a transfer on this bus
128 * @buswidth: the width of this bus in bytes: 1, 2 or 4
129 */
130struct pl08x_bus_data {
131 dma_addr_t addr;
132 u8 maxwidth;
133 u8 buswidth;
134};
135
136/**
137 * struct pl08x_phy_chan - holder for the physical channels
138 * @id: physical index to this channel
139 * @lock: a lock to use when altering an instance of this struct
140 * @serving: the virtual channel currently being served by this physical
141 * channel
142 * @locked: channel unavailable for the system, e.g. dedicated to secure
143 * world
144 */
145struct pl08x_phy_chan {
146 unsigned int id;
147 void __iomem *base;
148 spinlock_t lock;
149 struct pl08x_dma_chan *serving;
150 bool locked;
151};
152
153/**
154 * struct pl08x_sg - structure containing data per sg
155 * @src_addr: src address of sg
156 * @dst_addr: dst address of sg
157 * @len: transfer len in bytes
158 * @node: node for txd's dsg_list
159 */
160struct pl08x_sg {
161 dma_addr_t src_addr;
162 dma_addr_t dst_addr;
163 size_t len;
164 struct list_head node;
165};
166
167/**
168 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
169 * @vd: virtual DMA descriptor
170 * @dsg_list: list of children sg's
171 * @llis_bus: DMA memory address (physical) start for the LLIs
172 * @llis_va: virtual memory address start for the LLIs
173 * @cctl: control reg values for current txd
174 * @ccfg: config reg values for current txd
175 * @done: this marks completed descriptors, which should not have their
176 * mux released.
177 */
178struct pl08x_txd {
179 struct virt_dma_desc vd;
180 struct list_head dsg_list;
181 dma_addr_t llis_bus;
182 struct pl08x_lli *llis_va;
183 /* Default cctl value for LLIs */
184 u32 cctl;
185 /*
186 * Settings to be put into the physical channel when we
187 * trigger this txd. Other registers are in llis_va[0].
188 */
189 u32 ccfg;
190 bool done;
191};
192
193/**
194 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
195 * states
196 * @PL08X_CHAN_IDLE: the channel is idle
197 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
198 * channel and is running a transfer on it
199 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
200 * channel, but the transfer is currently paused
201 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
202 * channel to become available (only pertains to memcpy channels)
203 */
204enum pl08x_dma_chan_state {
205 PL08X_CHAN_IDLE,
206 PL08X_CHAN_RUNNING,
207 PL08X_CHAN_PAUSED,
208 PL08X_CHAN_WAITING,
209};
210
211/**
212 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
213 * @vc: wrappped virtual channel
214 * @phychan: the physical channel utilized by this channel, if there is one
215 * @name: name of channel
216 * @cd: channel platform data
217 * @runtime_addr: address for RX/TX according to the runtime config
218 * @at: active transaction on this channel
219 * @lock: a lock for this channel data
220 * @host: a pointer to the host (internal use)
221 * @state: whether the channel is idle, paused, running etc
222 * @slave: whether this channel is a device (slave) or for memcpy
223 * @signal: the physical DMA request signal which this channel is using
224 * @mux_use: count of descriptors using this DMA request signal setting
225 */
226struct pl08x_dma_chan {
227 struct virt_dma_chan vc;
228 struct pl08x_phy_chan *phychan;
229 const char *name;
230 const struct pl08x_channel_data *cd;
231 struct dma_slave_config cfg;
232 struct pl08x_txd *at;
233 struct pl08x_driver_data *host;
234 enum pl08x_dma_chan_state state;
235 bool slave;
236 int signal;
237 unsigned mux_use;
238};
239
240/**
122 * struct pl08x_driver_data - the local state holder for the PL08x 241 * struct pl08x_driver_data - the local state holder for the PL08x
123 * @slave: slave engine for this instance 242 * @slave: slave engine for this instance
124 * @memcpy: memcpy engine for this instance 243 * @memcpy: memcpy engine for this instance
@@ -128,7 +247,6 @@ struct pl08x_lli {
128 * @pd: platform data passed in from the platform/machine 247 * @pd: platform data passed in from the platform/machine
129 * @phy_chans: array of data for the physical channels 248 * @phy_chans: array of data for the physical channels
130 * @pool: a pool for the LLI descriptors 249 * @pool: a pool for the LLI descriptors
131 * @pool_ctr: counter of LLIs in the pool
132 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 250 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
133 * fetches 251 * fetches
134 * @mem_buses: set to indicate memory transfers on AHB2. 252 * @mem_buses: set to indicate memory transfers on AHB2.
@@ -143,10 +261,8 @@ struct pl08x_driver_data {
143 struct pl08x_platform_data *pd; 261 struct pl08x_platform_data *pd;
144 struct pl08x_phy_chan *phy_chans; 262 struct pl08x_phy_chan *phy_chans;
145 struct dma_pool *pool; 263 struct dma_pool *pool;
146 int pool_ctr;
147 u8 lli_buses; 264 u8 lli_buses;
148 u8 mem_buses; 265 u8 mem_buses;
149 spinlock_t lock;
150}; 266};
151 267
152/* 268/*
@@ -162,12 +278,51 @@ struct pl08x_driver_data {
162 278
163static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 279static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
164{ 280{
165 return container_of(chan, struct pl08x_dma_chan, chan); 281 return container_of(chan, struct pl08x_dma_chan, vc.chan);
166} 282}
167 283
168static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 284static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
169{ 285{
170 return container_of(tx, struct pl08x_txd, tx); 286 return container_of(tx, struct pl08x_txd, vd.tx);
287}
288
289/*
290 * Mux handling.
291 *
292 * This gives us the DMA request input to the PL08x primecell which the
293 * peripheral described by the channel data will be routed to, possibly
294 * via a board/SoC specific external MUX. One important point to note
295 * here is that this does not depend on the physical channel.
296 */
297static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
298{
299 const struct pl08x_platform_data *pd = plchan->host->pd;
300 int ret;
301
302 if (plchan->mux_use++ == 0 && pd->get_signal) {
303 ret = pd->get_signal(plchan->cd);
304 if (ret < 0) {
305 plchan->mux_use = 0;
306 return ret;
307 }
308
309 plchan->signal = ret;
310 }
311 return 0;
312}
313
314static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
315{
316 const struct pl08x_platform_data *pd = plchan->host->pd;
317
318 if (plchan->signal >= 0) {
319 WARN_ON(plchan->mux_use == 0);
320
321 if (--plchan->mux_use == 0 && pd->put_signal) {
322 pd->put_signal(plchan->cd, plchan->signal);
323 plchan->signal = -1;
324 }
325 }
171} 326}
172 327
173/* 328/*
@@ -189,20 +344,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
189 * been set when the LLIs were constructed. Poke them into the hardware 344 * been set when the LLIs were constructed. Poke them into the hardware
190 * and start the transfer. 345 * and start the transfer.
191 */ 346 */
192static void pl08x_start_txd(struct pl08x_dma_chan *plchan, 347static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
193 struct pl08x_txd *txd)
194{ 348{
195 struct pl08x_driver_data *pl08x = plchan->host; 349 struct pl08x_driver_data *pl08x = plchan->host;
196 struct pl08x_phy_chan *phychan = plchan->phychan; 350 struct pl08x_phy_chan *phychan = plchan->phychan;
197 struct pl08x_lli *lli = &txd->llis_va[0]; 351 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
352 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
353 struct pl08x_lli *lli;
198 u32 val; 354 u32 val;
199 355
356 list_del(&txd->vd.node);
357
200 plchan->at = txd; 358 plchan->at = txd;
201 359
202 /* Wait for channel inactive */ 360 /* Wait for channel inactive */
203 while (pl08x_phy_channel_busy(phychan)) 361 while (pl08x_phy_channel_busy(phychan))
204 cpu_relax(); 362 cpu_relax();
205 363
364 lli = &txd->llis_va[0];
365
206 dev_vdbg(&pl08x->adev->dev, 366 dev_vdbg(&pl08x->adev->dev,
207 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 367 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
208 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 368 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
@@ -311,10 +471,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
311{ 471{
312 struct pl08x_phy_chan *ch; 472 struct pl08x_phy_chan *ch;
313 struct pl08x_txd *txd; 473 struct pl08x_txd *txd;
314 unsigned long flags;
315 size_t bytes = 0; 474 size_t bytes = 0;
316 475
317 spin_lock_irqsave(&plchan->lock, flags);
318 ch = plchan->phychan; 476 ch = plchan->phychan;
319 txd = plchan->at; 477 txd = plchan->at;
320 478
@@ -354,18 +512,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
354 } 512 }
355 } 513 }
356 514
357 /* Sum up all queued transactions */
358 if (!list_empty(&plchan->pend_list)) {
359 struct pl08x_txd *txdi;
360 list_for_each_entry(txdi, &plchan->pend_list, node) {
361 struct pl08x_sg *dsg;
362 list_for_each_entry(dsg, &txd->dsg_list, node)
363 bytes += dsg->len;
364 }
365 }
366
367 spin_unlock_irqrestore(&plchan->lock, flags);
368
369 return bytes; 515 return bytes;
370} 516}
371 517
@@ -391,7 +537,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
391 537
392 if (!ch->locked && !ch->serving) { 538 if (!ch->locked && !ch->serving) {
393 ch->serving = virt_chan; 539 ch->serving = virt_chan;
394 ch->signal = -1;
395 spin_unlock_irqrestore(&ch->lock, flags); 540 spin_unlock_irqrestore(&ch->lock, flags);
396 break; 541 break;
397 } 542 }
@@ -404,25 +549,114 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
404 return NULL; 549 return NULL;
405 } 550 }
406 551
407 pm_runtime_get_sync(&pl08x->adev->dev);
408 return ch; 552 return ch;
409} 553}
410 554
555/* Mark the physical channel as free. Note, this write is atomic. */
411static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 556static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
412 struct pl08x_phy_chan *ch) 557 struct pl08x_phy_chan *ch)
413{ 558{
414 unsigned long flags; 559 ch->serving = NULL;
560}
561
562/*
563 * Try to allocate a physical channel. When successful, assign it to
564 * this virtual channel, and initiate the next descriptor. The
565 * virtual channel lock must be held at this point.
566 */
567static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
568{
569 struct pl08x_driver_data *pl08x = plchan->host;
570 struct pl08x_phy_chan *ch;
415 571
416 spin_lock_irqsave(&ch->lock, flags); 572 ch = pl08x_get_phy_channel(pl08x, plchan);
573 if (!ch) {
574 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
575 plchan->state = PL08X_CHAN_WAITING;
576 return;
577 }
417 578
418 /* Stop the channel and clear its interrupts */ 579 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
419 pl08x_terminate_phy_chan(pl08x, ch); 580 ch->id, plchan->name);
420 581
421 pm_runtime_put(&pl08x->adev->dev); 582 plchan->phychan = ch;
583 plchan->state = PL08X_CHAN_RUNNING;
584 pl08x_start_next_txd(plchan);
585}
422 586
423 /* Mark it as free */ 587static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
424 ch->serving = NULL; 588 struct pl08x_dma_chan *plchan)
425 spin_unlock_irqrestore(&ch->lock, flags); 589{
590 struct pl08x_driver_data *pl08x = plchan->host;
591
592 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
593 ch->id, plchan->name);
594
595 /*
596 * We do this without taking the lock; we're really only concerned
597 * about whether this pointer is NULL or not, and we're guaranteed
598 * that this will only be called when it _already_ is non-NULL.
599 */
600 ch->serving = plchan;
601 plchan->phychan = ch;
602 plchan->state = PL08X_CHAN_RUNNING;
603 pl08x_start_next_txd(plchan);
604}
605
606/*
607 * Free a physical DMA channel, potentially reallocating it to another
608 * virtual channel if we have any pending.
609 */
610static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
611{
612 struct pl08x_driver_data *pl08x = plchan->host;
613 struct pl08x_dma_chan *p, *next;
614
615 retry:
616 next = NULL;
617
618 /* Find a waiting virtual channel for the next transfer. */
619 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
620 if (p->state == PL08X_CHAN_WAITING) {
621 next = p;
622 break;
623 }
624
625 if (!next) {
626 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
627 if (p->state == PL08X_CHAN_WAITING) {
628 next = p;
629 break;
630 }
631 }
632
633 /* Ensure that the physical channel is stopped */
634 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
635
636 if (next) {
637 bool success;
638
639 /*
640 * Eww. We know this isn't going to deadlock
641 * but lockdep probably doesn't.
642 */
643 spin_lock(&next->vc.lock);
644 /* Re-check the state now that we have the lock */
645 success = next->state == PL08X_CHAN_WAITING;
646 if (success)
647 pl08x_phy_reassign_start(plchan->phychan, next);
648 spin_unlock(&next->vc.lock);
649
650 /* If the state changed, try to find another channel */
651 if (!success)
652 goto retry;
653 } else {
654 /* No more jobs, so free up the physical channel */
655 pl08x_put_phy_channel(pl08x, plchan->phychan);
656 }
657
658 plchan->phychan = NULL;
659 plchan->state = PL08X_CHAN_IDLE;
426} 660}
427 661
428/* 662/*
@@ -585,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
585 return 0; 819 return 0;
586 } 820 }
587 821
588 pl08x->pool_ctr++;
589
590 bd.txd = txd; 822 bd.txd = txd;
591 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 823 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
592 cctl = txd->cctl; 824 cctl = txd->cctl;
@@ -802,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
802 return num_llis; 1034 return num_llis;
803} 1035}
804 1036
805/* You should call this with the struct pl08x lock held */
806static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 1037static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
807 struct pl08x_txd *txd) 1038 struct pl08x_txd *txd)
808{ 1039{
809 struct pl08x_sg *dsg, *_dsg; 1040 struct pl08x_sg *dsg, *_dsg;
810 1041
811 /* Free the LLI */
812 if (txd->llis_va) 1042 if (txd->llis_va)
813 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 1043 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
814 1044
815 pl08x->pool_ctr--;
816
817 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 1045 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
818 list_del(&dsg->node); 1046 list_del(&dsg->node);
819 kfree(dsg); 1047 kfree(dsg);
@@ -822,133 +1050,75 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
822 kfree(txd); 1050 kfree(txd);
823} 1051}
824 1052
825static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 1053static void pl08x_unmap_buffers(struct pl08x_txd *txd)
826 struct pl08x_dma_chan *plchan)
827{ 1054{
828 struct pl08x_txd *txdi = NULL; 1055 struct device *dev = txd->vd.tx.chan->device->dev;
829 struct pl08x_txd *next; 1056 struct pl08x_sg *dsg;
830 1057
831 if (!list_empty(&plchan->pend_list)) { 1058 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
832 list_for_each_entry_safe(txdi, 1059 if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
833 next, &plchan->pend_list, node) { 1060 list_for_each_entry(dsg, &txd->dsg_list, node)
834 list_del(&txdi->node); 1061 dma_unmap_single(dev, dsg->src_addr, dsg->len,
835 pl08x_free_txd(pl08x, txdi); 1062 DMA_TO_DEVICE);
1063 else {
1064 list_for_each_entry(dsg, &txd->dsg_list, node)
1065 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1066 DMA_TO_DEVICE);
836 } 1067 }
837 } 1068 }
1069 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1070 if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1071 list_for_each_entry(dsg, &txd->dsg_list, node)
1072 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1073 DMA_FROM_DEVICE);
1074 else
1075 list_for_each_entry(dsg, &txd->dsg_list, node)
1076 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1077 DMA_FROM_DEVICE);
1078 }
838} 1079}
839 1080
840/* 1081static void pl08x_desc_free(struct virt_dma_desc *vd)
841 * The DMA ENGINE API
842 */
843static int pl08x_alloc_chan_resources(struct dma_chan *chan)
844{ 1082{
845 return 0; 1083 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
846} 1084 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
847 1085
848static void pl08x_free_chan_resources(struct dma_chan *chan) 1086 if (!plchan->slave)
849{ 1087 pl08x_unmap_buffers(txd);
1088
1089 if (!txd->done)
1090 pl08x_release_mux(plchan);
1091
1092 pl08x_free_txd(plchan->host, txd);
850} 1093}
851 1094
852/* 1095static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
853 * This should be called with the channel plchan->lock held 1096 struct pl08x_dma_chan *plchan)
854 */
855static int prep_phy_channel(struct pl08x_dma_chan *plchan,
856 struct pl08x_txd *txd)
857{ 1097{
858 struct pl08x_driver_data *pl08x = plchan->host; 1098 LIST_HEAD(head);
859 struct pl08x_phy_chan *ch; 1099 struct pl08x_txd *txd;
860 int ret;
861
862 /* Check if we already have a channel */
863 if (plchan->phychan) {
864 ch = plchan->phychan;
865 goto got_channel;
866 }
867 1100
868 ch = pl08x_get_phy_channel(pl08x, plchan); 1101 vchan_get_all_descriptors(&plchan->vc, &head);
869 if (!ch) {
870 /* No physical channel available, cope with it */
871 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
872 return -EBUSY;
873 }
874 1102
875 /* 1103 while (!list_empty(&head)) {
876 * OK we have a physical channel: for memcpy() this is all we 1104 txd = list_first_entry(&head, struct pl08x_txd, vd.node);
877 * need, but for slaves the physical signals may be muxed! 1105 list_del(&txd->vd.node);
878 * Can the platform allow us to use this channel? 1106 pl08x_desc_free(&txd->vd);
879 */
880 if (plchan->slave && pl08x->pd->get_signal) {
881 ret = pl08x->pd->get_signal(plchan);
882 if (ret < 0) {
883 dev_dbg(&pl08x->adev->dev,
884 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
885 ch->id, plchan->name);
886 /* Release physical channel & return */
887 pl08x_put_phy_channel(pl08x, ch);
888 return -EBUSY;
889 }
890 ch->signal = ret;
891 } 1107 }
892
893 plchan->phychan = ch;
894 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
895 ch->id,
896 ch->signal,
897 plchan->name);
898
899got_channel:
900 /* Assign the flow control signal to this channel */
901 if (txd->direction == DMA_MEM_TO_DEV)
902 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
903 else if (txd->direction == DMA_DEV_TO_MEM)
904 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
905
906 plchan->phychan_hold++;
907
908 return 0;
909} 1108}
910 1109
911static void release_phy_channel(struct pl08x_dma_chan *plchan) 1110/*
1111 * The DMA ENGINE API
1112 */
1113static int pl08x_alloc_chan_resources(struct dma_chan *chan)
912{ 1114{
913 struct pl08x_driver_data *pl08x = plchan->host; 1115 return 0;
914
915 if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
916 pl08x->pd->put_signal(plchan);
917 plchan->phychan->signal = -1;
918 }
919 pl08x_put_phy_channel(pl08x, plchan->phychan);
920 plchan->phychan = NULL;
921} 1116}
922 1117
923static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) 1118static void pl08x_free_chan_resources(struct dma_chan *chan)
924{ 1119{
925 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 1120 /* Ensure all queued descriptors are freed */
926 struct pl08x_txd *txd = to_pl08x_txd(tx); 1121 vchan_free_chan_resources(to_virt_chan(chan));
927 unsigned long flags;
928 dma_cookie_t cookie;
929
930 spin_lock_irqsave(&plchan->lock, flags);
931 cookie = dma_cookie_assign(tx);
932
933 /* Put this onto the pending list */
934 list_add_tail(&txd->node, &plchan->pend_list);
935
936 /*
937 * If there was no physical channel available for this memcpy,
938 * stack the request up and indicate that the channel is waiting
939 * for a free physical channel.
940 */
941 if (!plchan->slave && !plchan->phychan) {
942 /* Do this memcpy whenever there is a channel ready */
943 plchan->state = PL08X_CHAN_WAITING;
944 plchan->waiting = txd;
945 } else {
946 plchan->phychan_hold--;
947 }
948
949 spin_unlock_irqrestore(&plchan->lock, flags);
950
951 return cookie;
952} 1122}
953 1123
954static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1124static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
@@ -968,23 +1138,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
968 dma_cookie_t cookie, struct dma_tx_state *txstate) 1138 dma_cookie_t cookie, struct dma_tx_state *txstate)
969{ 1139{
970 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1140 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1141 struct virt_dma_desc *vd;
1142 unsigned long flags;
971 enum dma_status ret; 1143 enum dma_status ret;
1144 size_t bytes = 0;
972 1145
973 ret = dma_cookie_status(chan, cookie, txstate); 1146 ret = dma_cookie_status(chan, cookie, txstate);
974 if (ret == DMA_SUCCESS) 1147 if (ret == DMA_SUCCESS)
975 return ret; 1148 return ret;
976 1149
977 /* 1150 /*
1151 * There's no point calculating the residue if there's
1152 * no txstate to store the value.
1153 */
1154 if (!txstate) {
1155 if (plchan->state == PL08X_CHAN_PAUSED)
1156 ret = DMA_PAUSED;
1157 return ret;
1158 }
1159
1160 spin_lock_irqsave(&plchan->vc.lock, flags);
1161 ret = dma_cookie_status(chan, cookie, txstate);
1162 if (ret != DMA_SUCCESS) {
1163 vd = vchan_find_desc(&plchan->vc, cookie);
1164 if (vd) {
1165 /* On the issued list, so hasn't been processed yet */
1166 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
1167 struct pl08x_sg *dsg;
1168
1169 list_for_each_entry(dsg, &txd->dsg_list, node)
1170 bytes += dsg->len;
1171 } else {
1172 bytes = pl08x_getbytes_chan(plchan);
1173 }
1174 }
1175 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1176
1177 /*
978 * This cookie not complete yet 1178 * This cookie not complete yet
979 * Get number of bytes left in the active transactions and queue 1179 * Get number of bytes left in the active transactions and queue
980 */ 1180 */
981 dma_set_residue(txstate, pl08x_getbytes_chan(plchan)); 1181 dma_set_residue(txstate, bytes);
982 1182
983 if (plchan->state == PL08X_CHAN_PAUSED) 1183 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
984 return DMA_PAUSED; 1184 ret = DMA_PAUSED;
985 1185
986 /* Whether waiting or running, we're in progress */ 1186 /* Whether waiting or running, we're in progress */
987 return DMA_IN_PROGRESS; 1187 return ret;
988} 1188}
989 1189
990/* PrimeCell DMA extension */ 1190/* PrimeCell DMA extension */
@@ -1080,38 +1280,14 @@ static u32 pl08x_burst(u32 maxburst)
1080 return burst_sizes[i].reg; 1280 return burst_sizes[i].reg;
1081} 1281}
1082 1282
1083static int dma_set_runtime_config(struct dma_chan *chan, 1283static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
1084 struct dma_slave_config *config) 1284 enum dma_slave_buswidth addr_width, u32 maxburst)
1085{ 1285{
1086 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1286 u32 width, burst, cctl = 0;
1087 struct pl08x_driver_data *pl08x = plchan->host;
1088 enum dma_slave_buswidth addr_width;
1089 u32 width, burst, maxburst;
1090 u32 cctl = 0;
1091
1092 if (!plchan->slave)
1093 return -EINVAL;
1094
1095 /* Transfer direction */
1096 plchan->runtime_direction = config->direction;
1097 if (config->direction == DMA_MEM_TO_DEV) {
1098 addr_width = config->dst_addr_width;
1099 maxburst = config->dst_maxburst;
1100 } else if (config->direction == DMA_DEV_TO_MEM) {
1101 addr_width = config->src_addr_width;
1102 maxburst = config->src_maxburst;
1103 } else {
1104 dev_err(&pl08x->adev->dev,
1105 "bad runtime_config: alien transfer direction\n");
1106 return -EINVAL;
1107 }
1108 1287
1109 width = pl08x_width(addr_width); 1288 width = pl08x_width(addr_width);
1110 if (width == ~0) { 1289 if (width == ~0)
1111 dev_err(&pl08x->adev->dev, 1290 return ~0;
1112 "bad runtime_config: alien address width\n");
1113 return -EINVAL;
1114 }
1115 1291
1116 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1292 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
1117 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1293 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
@@ -1128,28 +1304,23 @@ static int dma_set_runtime_config(struct dma_chan *chan,
1128 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1304 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1129 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1305 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1130 1306
1131 plchan->device_fc = config->device_fc; 1307 return pl08x_cctl(cctl);
1308}
1132 1309
1133 if (plchan->runtime_direction == DMA_DEV_TO_MEM) { 1310static int dma_set_runtime_config(struct dma_chan *chan,
1134 plchan->src_addr = config->src_addr; 1311 struct dma_slave_config *config)
1135 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | 1312{
1136 pl08x_select_bus(plchan->cd->periph_buses, 1313 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1137 pl08x->mem_buses);
1138 } else {
1139 plchan->dst_addr = config->dst_addr;
1140 plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
1141 pl08x_select_bus(pl08x->mem_buses,
1142 plchan->cd->periph_buses);
1143 }
1144 1314
1145 dev_dbg(&pl08x->adev->dev, 1315 if (!plchan->slave)
1146 "configured channel %s (%s) for %s, data width %d, " 1316 return -EINVAL;
1147 "maxburst %d words, LE, CCTL=0x%08x\n", 1317
1148 dma_chan_name(chan), plchan->name, 1318 /* Reject definitely invalid configurations */
1149 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", 1319 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1150 addr_width, 1320 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1151 maxburst, 1321 return -EINVAL;
1152 cctl); 1322
1323 plchan->cfg = *config;
1153 1324
1154 return 0; 1325 return 0;
1155} 1326}
@@ -1163,95 +1334,19 @@ static void pl08x_issue_pending(struct dma_chan *chan)
1163 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1334 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1164 unsigned long flags; 1335 unsigned long flags;
1165 1336
1166 spin_lock_irqsave(&plchan->lock, flags); 1337 spin_lock_irqsave(&plchan->vc.lock, flags);
1167 /* Something is already active, or we're waiting for a channel... */ 1338 if (vchan_issue_pending(&plchan->vc)) {
1168 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { 1339 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
1169 spin_unlock_irqrestore(&plchan->lock, flags); 1340 pl08x_phy_alloc_and_start(plchan);
1170 return;
1171 }
1172
1173 /* Take the first element in the queue and execute it */
1174 if (!list_empty(&plchan->pend_list)) {
1175 struct pl08x_txd *next;
1176
1177 next = list_first_entry(&plchan->pend_list,
1178 struct pl08x_txd,
1179 node);
1180 list_del(&next->node);
1181 plchan->state = PL08X_CHAN_RUNNING;
1182
1183 pl08x_start_txd(plchan, next);
1184 } 1341 }
1185 1342 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1186 spin_unlock_irqrestore(&plchan->lock, flags);
1187}
1188
1189static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1190 struct pl08x_txd *txd)
1191{
1192 struct pl08x_driver_data *pl08x = plchan->host;
1193 unsigned long flags;
1194 int num_llis, ret;
1195
1196 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1197 if (!num_llis) {
1198 spin_lock_irqsave(&plchan->lock, flags);
1199 pl08x_free_txd(pl08x, txd);
1200 spin_unlock_irqrestore(&plchan->lock, flags);
1201 return -EINVAL;
1202 }
1203
1204 spin_lock_irqsave(&plchan->lock, flags);
1205
1206 /*
1207 * See if we already have a physical channel allocated,
1208 * else this is the time to try to get one.
1209 */
1210 ret = prep_phy_channel(plchan, txd);
1211 if (ret) {
1212 /*
1213 * No physical channel was available.
1214 *
1215 * memcpy transfers can be sorted out at submission time.
1216 *
1217 * Slave transfers may have been denied due to platform
1218 * channel muxing restrictions. Since there is no guarantee
1219 * that this will ever be resolved, and the signal must be
1220 * acquired AFTER acquiring the physical channel, we will let
1221 * them be NACK:ed with -EBUSY here. The drivers can retry
1222 * the prep() call if they are eager on doing this using DMA.
1223 */
1224 if (plchan->slave) {
1225 pl08x_free_txd_list(pl08x, plchan);
1226 pl08x_free_txd(pl08x, txd);
1227 spin_unlock_irqrestore(&plchan->lock, flags);
1228 return -EBUSY;
1229 }
1230 } else
1231 /*
1232 * Else we're all set, paused and ready to roll, status
1233 * will switch to PL08X_CHAN_RUNNING when we call
1234 * issue_pending(). If there is something running on the
1235 * channel already we don't change its state.
1236 */
1237 if (plchan->state == PL08X_CHAN_IDLE)
1238 plchan->state = PL08X_CHAN_PAUSED;
1239
1240 spin_unlock_irqrestore(&plchan->lock, flags);
1241
1242 return 0;
1243} 1343}
1244 1344
1245static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1345static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
1246 unsigned long flags)
1247{ 1346{
1248 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1347 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
1249 1348
1250 if (txd) { 1349 if (txd) {
1251 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1252 txd->tx.flags = flags;
1253 txd->tx.tx_submit = pl08x_tx_submit;
1254 INIT_LIST_HEAD(&txd->node);
1255 INIT_LIST_HEAD(&txd->dsg_list); 1350 INIT_LIST_HEAD(&txd->dsg_list);
1256 1351
1257 /* Always enable error and terminal interrupts */ 1352 /* Always enable error and terminal interrupts */
@@ -1274,7 +1369,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1274 struct pl08x_sg *dsg; 1369 struct pl08x_sg *dsg;
1275 int ret; 1370 int ret;
1276 1371
1277 txd = pl08x_get_txd(plchan, flags); 1372 txd = pl08x_get_txd(plchan);
1278 if (!txd) { 1373 if (!txd) {
1279 dev_err(&pl08x->adev->dev, 1374 dev_err(&pl08x->adev->dev,
1280 "%s no memory for descriptor\n", __func__); 1375 "%s no memory for descriptor\n", __func__);
@@ -1290,14 +1385,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1290 } 1385 }
1291 list_add_tail(&dsg->node, &txd->dsg_list); 1386 list_add_tail(&dsg->node, &txd->dsg_list);
1292 1387
1293 txd->direction = DMA_NONE;
1294 dsg->src_addr = src; 1388 dsg->src_addr = src;
1295 dsg->dst_addr = dest; 1389 dsg->dst_addr = dest;
1296 dsg->len = len; 1390 dsg->len = len;
1297 1391
1298 /* Set platform data for m2m */ 1392 /* Set platform data for m2m */
1299 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1393 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1300 txd->cctl = pl08x->pd->memcpy_channel.cctl & 1394 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
1301 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1395 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
1302 1396
1303 /* Both to be incremented or the code will break */ 1397 /* Both to be incremented or the code will break */
@@ -1307,11 +1401,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1307 txd->cctl |= pl08x_select_bus(pl08x->mem_buses, 1401 txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
1308 pl08x->mem_buses); 1402 pl08x->mem_buses);
1309 1403
1310 ret = pl08x_prep_channel_resources(plchan, txd); 1404 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1311 if (ret) 1405 if (!ret) {
1406 pl08x_free_txd(pl08x, txd);
1312 return NULL; 1407 return NULL;
1408 }
1313 1409
1314 return &txd->tx; 1410 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1315} 1411}
1316 1412
1317static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1413static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
@@ -1324,36 +1420,40 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1324 struct pl08x_txd *txd; 1420 struct pl08x_txd *txd;
1325 struct pl08x_sg *dsg; 1421 struct pl08x_sg *dsg;
1326 struct scatterlist *sg; 1422 struct scatterlist *sg;
1423 enum dma_slave_buswidth addr_width;
1327 dma_addr_t slave_addr; 1424 dma_addr_t slave_addr;
1328 int ret, tmp; 1425 int ret, tmp;
1426 u8 src_buses, dst_buses;
1427 u32 maxburst, cctl;
1329 1428
1330 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1429 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1331 __func__, sg_dma_len(sgl), plchan->name); 1430 __func__, sg_dma_len(sgl), plchan->name);
1332 1431
1333 txd = pl08x_get_txd(plchan, flags); 1432 txd = pl08x_get_txd(plchan);
1334 if (!txd) { 1433 if (!txd) {
1335 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1434 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1336 return NULL; 1435 return NULL;
1337 } 1436 }
1338 1437
1339 if (direction != plchan->runtime_direction)
1340 dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
1341 "the direction configured for the PrimeCell\n",
1342 __func__);
1343
1344 /* 1438 /*
1345 * Set up addresses, the PrimeCell configured address 1439 * Set up addresses, the PrimeCell configured address
1346 * will take precedence since this may configure the 1440 * will take precedence since this may configure the
1347 * channel target address dynamically at runtime. 1441 * channel target address dynamically at runtime.
1348 */ 1442 */
1349 txd->direction = direction;
1350
1351 if (direction == DMA_MEM_TO_DEV) { 1443 if (direction == DMA_MEM_TO_DEV) {
1352 txd->cctl = plchan->dst_cctl; 1444 cctl = PL080_CONTROL_SRC_INCR;
1353 slave_addr = plchan->dst_addr; 1445 slave_addr = plchan->cfg.dst_addr;
1446 addr_width = plchan->cfg.dst_addr_width;
1447 maxburst = plchan->cfg.dst_maxburst;
1448 src_buses = pl08x->mem_buses;
1449 dst_buses = plchan->cd->periph_buses;
1354 } else if (direction == DMA_DEV_TO_MEM) { 1450 } else if (direction == DMA_DEV_TO_MEM) {
1355 txd->cctl = plchan->src_cctl; 1451 cctl = PL080_CONTROL_DST_INCR;
1356 slave_addr = plchan->src_addr; 1452 slave_addr = plchan->cfg.src_addr;
1453 addr_width = plchan->cfg.src_addr_width;
1454 maxburst = plchan->cfg.src_maxburst;
1455 src_buses = plchan->cd->periph_buses;
1456 dst_buses = pl08x->mem_buses;
1357 } else { 1457 } else {
1358 pl08x_free_txd(pl08x, txd); 1458 pl08x_free_txd(pl08x, txd);
1359 dev_err(&pl08x->adev->dev, 1459 dev_err(&pl08x->adev->dev,
@@ -1361,7 +1461,17 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1361 return NULL; 1461 return NULL;
1362 } 1462 }
1363 1463
1364 if (plchan->device_fc) 1464 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
1465 if (cctl == ~0) {
1466 pl08x_free_txd(pl08x, txd);
1467 dev_err(&pl08x->adev->dev,
1468 "DMA slave configuration botched?\n");
1469 return NULL;
1470 }
1471
1472 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
1473
1474 if (plchan->cfg.device_fc)
1365 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1475 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
1366 PL080_FLOW_PER2MEM_PER; 1476 PL080_FLOW_PER2MEM_PER;
1367 else 1477 else
@@ -1370,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1370 1480
1371 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1481 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1372 1482
1483 ret = pl08x_request_mux(plchan);
1484 if (ret < 0) {
1485 pl08x_free_txd(pl08x, txd);
1486 dev_dbg(&pl08x->adev->dev,
1487 "unable to mux for transfer on %s due to platform restrictions\n",
1488 plchan->name);
1489 return NULL;
1490 }
1491
1492 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
1493 plchan->signal, plchan->name);
1494
1495 /* Assign the flow control signal to this channel */
1496 if (direction == DMA_MEM_TO_DEV)
1497 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
1498 else
1499 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1500
1373 for_each_sg(sgl, sg, sg_len, tmp) { 1501 for_each_sg(sgl, sg, sg_len, tmp) {
1374 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1502 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1375 if (!dsg) { 1503 if (!dsg) {
1504 pl08x_release_mux(plchan);
1376 pl08x_free_txd(pl08x, txd); 1505 pl08x_free_txd(pl08x, txd);
1377 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1506 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1378 __func__); 1507 __func__);
@@ -1390,11 +1519,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1390 } 1519 }
1391 } 1520 }
1392 1521
1393 ret = pl08x_prep_channel_resources(plchan, txd); 1522 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1394 if (ret) 1523 if (!ret) {
1524 pl08x_release_mux(plchan);
1525 pl08x_free_txd(pl08x, txd);
1395 return NULL; 1526 return NULL;
1527 }
1396 1528
1397 return &txd->tx; 1529 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1398} 1530}
1399 1531
1400static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1532static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -1415,9 +1547,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1415 * Anything succeeds on channels with no physical allocation and 1547 * Anything succeeds on channels with no physical allocation and
1416 * no queued transfers. 1548 * no queued transfers.
1417 */ 1549 */
1418 spin_lock_irqsave(&plchan->lock, flags); 1550 spin_lock_irqsave(&plchan->vc.lock, flags);
1419 if (!plchan->phychan && !plchan->at) { 1551 if (!plchan->phychan && !plchan->at) {
1420 spin_unlock_irqrestore(&plchan->lock, flags); 1552 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1421 return 0; 1553 return 0;
1422 } 1554 }
1423 1555
@@ -1426,18 +1558,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1426 plchan->state = PL08X_CHAN_IDLE; 1558 plchan->state = PL08X_CHAN_IDLE;
1427 1559
1428 if (plchan->phychan) { 1560 if (plchan->phychan) {
1429 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
1430
1431 /* 1561 /*
1432 * Mark physical channel as free and free any slave 1562 * Mark physical channel as free and free any slave
1433 * signal 1563 * signal
1434 */ 1564 */
1435 release_phy_channel(plchan); 1565 pl08x_phy_free(plchan);
1436 plchan->phychan_hold = 0;
1437 } 1566 }
1438 /* Dequeue jobs and free LLIs */ 1567 /* Dequeue jobs and free LLIs */
1439 if (plchan->at) { 1568 if (plchan->at) {
1440 pl08x_free_txd(pl08x, plchan->at); 1569 pl08x_desc_free(&plchan->at->vd);
1441 plchan->at = NULL; 1570 plchan->at = NULL;
1442 } 1571 }
1443 /* Dequeue jobs not yet fired as well */ 1572 /* Dequeue jobs not yet fired as well */
@@ -1457,7 +1586,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1457 break; 1586 break;
1458 } 1587 }
1459 1588
1460 spin_unlock_irqrestore(&plchan->lock, flags); 1589 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1461 1590
1462 return ret; 1591 return ret;
1463} 1592}
@@ -1494,123 +1623,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1494 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1623 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
1495} 1624}
1496 1625
1497static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1498{
1499 struct device *dev = txd->tx.chan->device->dev;
1500 struct pl08x_sg *dsg;
1501
1502 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1503 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1504 list_for_each_entry(dsg, &txd->dsg_list, node)
1505 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1506 DMA_TO_DEVICE);
1507 else {
1508 list_for_each_entry(dsg, &txd->dsg_list, node)
1509 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1510 DMA_TO_DEVICE);
1511 }
1512 }
1513 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1514 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1515 list_for_each_entry(dsg, &txd->dsg_list, node)
1516 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1517 DMA_FROM_DEVICE);
1518 else
1519 list_for_each_entry(dsg, &txd->dsg_list, node)
1520 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1521 DMA_FROM_DEVICE);
1522 }
1523}
1524
1525static void pl08x_tasklet(unsigned long data)
1526{
1527 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
1528 struct pl08x_driver_data *pl08x = plchan->host;
1529 struct pl08x_txd *txd;
1530 unsigned long flags;
1531
1532 spin_lock_irqsave(&plchan->lock, flags);
1533
1534 txd = plchan->at;
1535 plchan->at = NULL;
1536
1537 if (txd) {
1538 /* Update last completed */
1539 dma_cookie_complete(&txd->tx);
1540 }
1541
1542 /* If a new descriptor is queued, set it up plchan->at is NULL here */
1543 if (!list_empty(&plchan->pend_list)) {
1544 struct pl08x_txd *next;
1545
1546 next = list_first_entry(&plchan->pend_list,
1547 struct pl08x_txd,
1548 node);
1549 list_del(&next->node);
1550
1551 pl08x_start_txd(plchan, next);
1552 } else if (plchan->phychan_hold) {
1553 /*
1554 * This channel is still in use - we have a new txd being
1555 * prepared and will soon be queued. Don't give up the
1556 * physical channel.
1557 */
1558 } else {
1559 struct pl08x_dma_chan *waiting = NULL;
1560
1561 /*
1562 * No more jobs, so free up the physical channel
1563 * Free any allocated signal on slave transfers too
1564 */
1565 release_phy_channel(plchan);
1566 plchan->state = PL08X_CHAN_IDLE;
1567
1568 /*
1569 * And NOW before anyone else can grab that free:d up
1570 * physical channel, see if there is some memcpy pending
1571 * that seriously needs to start because of being stacked
1572 * up while we were choking the physical channels with data.
1573 */
1574 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1575 chan.device_node) {
1576 if (waiting->state == PL08X_CHAN_WAITING &&
1577 waiting->waiting != NULL) {
1578 int ret;
1579
1580 /* This should REALLY not fail now */
1581 ret = prep_phy_channel(waiting,
1582 waiting->waiting);
1583 BUG_ON(ret);
1584 waiting->phychan_hold--;
1585 waiting->state = PL08X_CHAN_RUNNING;
1586 waiting->waiting = NULL;
1587 pl08x_issue_pending(&waiting->chan);
1588 break;
1589 }
1590 }
1591 }
1592
1593 spin_unlock_irqrestore(&plchan->lock, flags);
1594
1595 if (txd) {
1596 dma_async_tx_callback callback = txd->tx.callback;
1597 void *callback_param = txd->tx.callback_param;
1598
1599 /* Don't try to unmap buffers on slave channels */
1600 if (!plchan->slave)
1601 pl08x_unmap_buffers(txd);
1602
1603 /* Free the descriptor */
1604 spin_lock_irqsave(&plchan->lock, flags);
1605 pl08x_free_txd(pl08x, txd);
1606 spin_unlock_irqrestore(&plchan->lock, flags);
1607
1608 /* Callback to signal completion */
1609 if (callback)
1610 callback(callback_param);
1611 }
1612}
1613
1614static irqreturn_t pl08x_irq(int irq, void *dev) 1626static irqreturn_t pl08x_irq(int irq, void *dev)
1615{ 1627{
1616 struct pl08x_driver_data *pl08x = dev; 1628 struct pl08x_driver_data *pl08x = dev;
@@ -1635,6 +1647,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1635 /* Locate physical channel */ 1647 /* Locate physical channel */
1636 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1648 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1637 struct pl08x_dma_chan *plchan = phychan->serving; 1649 struct pl08x_dma_chan *plchan = phychan->serving;
1650 struct pl08x_txd *tx;
1638 1651
1639 if (!plchan) { 1652 if (!plchan) {
1640 dev_err(&pl08x->adev->dev, 1653 dev_err(&pl08x->adev->dev,
@@ -1643,8 +1656,29 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1643 continue; 1656 continue;
1644 } 1657 }
1645 1658
1646 /* Schedule tasklet on this channel */ 1659 spin_lock(&plchan->vc.lock);
1647 tasklet_schedule(&plchan->tasklet); 1660 tx = plchan->at;
1661 if (tx) {
1662 plchan->at = NULL;
1663 /*
1664 * This descriptor is done, release its mux
1665 * reservation.
1666 */
1667 pl08x_release_mux(plchan);
1668 tx->done = true;
1669 vchan_cookie_complete(&tx->vd);
1670
1671 /*
1672 * And start the next descriptor (if any),
1673 * otherwise free this channel.
1674 */
1675 if (vchan_next_desc(&plchan->vc))
1676 pl08x_start_next_txd(plchan);
1677 else
1678 pl08x_phy_free(plchan);
1679 }
1680 spin_unlock(&plchan->vc.lock);
1681
1648 mask |= (1 << i); 1682 mask |= (1 << i);
1649 } 1683 }
1650 } 1684 }
@@ -1654,16 +1688,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1654 1688
1655static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 1689static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1656{ 1690{
1657 u32 cctl = pl08x_cctl(chan->cd->cctl);
1658
1659 chan->slave = true; 1691 chan->slave = true;
1660 chan->name = chan->cd->bus_id; 1692 chan->name = chan->cd->bus_id;
1661 chan->src_addr = chan->cd->addr; 1693 chan->cfg.src_addr = chan->cd->addr;
1662 chan->dst_addr = chan->cd->addr; 1694 chan->cfg.dst_addr = chan->cd->addr;
1663 chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
1664 pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
1665 chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
1666 pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
1667} 1695}
1668 1696
1669/* 1697/*
@@ -1693,6 +1721,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1693 1721
1694 chan->host = pl08x; 1722 chan->host = pl08x;
1695 chan->state = PL08X_CHAN_IDLE; 1723 chan->state = PL08X_CHAN_IDLE;
1724 chan->signal = -1;
1696 1725
1697 if (slave) { 1726 if (slave) {
1698 chan->cd = &pl08x->pd->slave_channels[i]; 1727 chan->cd = &pl08x->pd->slave_channels[i];
@@ -1705,26 +1734,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1705 return -ENOMEM; 1734 return -ENOMEM;
1706 } 1735 }
1707 } 1736 }
1708 if (chan->cd->circular_buffer) {
1709 dev_err(&pl08x->adev->dev,
1710 "channel %s: circular buffers not supported\n",
1711 chan->name);
1712 kfree(chan);
1713 continue;
1714 }
1715 dev_dbg(&pl08x->adev->dev, 1737 dev_dbg(&pl08x->adev->dev,
1716 "initialize virtual channel \"%s\"\n", 1738 "initialize virtual channel \"%s\"\n",
1717 chan->name); 1739 chan->name);
1718 1740
1719 chan->chan.device = dmadev; 1741 chan->vc.desc_free = pl08x_desc_free;
1720 dma_cookie_init(&chan->chan); 1742 vchan_init(&chan->vc, dmadev);
1721
1722 spin_lock_init(&chan->lock);
1723 INIT_LIST_HEAD(&chan->pend_list);
1724 tasklet_init(&chan->tasklet, pl08x_tasklet,
1725 (unsigned long) chan);
1726
1727 list_add_tail(&chan->chan.device_node, &dmadev->channels);
1728 } 1743 }
1729 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1744 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1730 i, slave ? "slave" : "memcpy"); 1745 i, slave ? "slave" : "memcpy");
@@ -1737,8 +1752,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1737 struct pl08x_dma_chan *next; 1752 struct pl08x_dma_chan *next;
1738 1753
1739 list_for_each_entry_safe(chan, 1754 list_for_each_entry_safe(chan,
1740 next, &dmadev->channels, chan.device_node) { 1755 next, &dmadev->channels, vc.chan.device_node) {
1741 list_del(&chan->chan.device_node); 1756 list_del(&chan->vc.chan.device_node);
1742 kfree(chan); 1757 kfree(chan);
1743 } 1758 }
1744} 1759}
@@ -1791,7 +1806,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1791 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 1806 seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1792 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1807 seq_printf(s, "CHANNEL:\tSTATE:\n");
1793 seq_printf(s, "--------\t------\n"); 1808 seq_printf(s, "--------\t------\n");
1794 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { 1809 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
1795 seq_printf(s, "%s\t\t%s\n", chan->name, 1810 seq_printf(s, "%s\t\t%s\n", chan->name,
1796 pl08x_state_str(chan->state)); 1811 pl08x_state_str(chan->state));
1797 } 1812 }
@@ -1799,7 +1814,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1799 seq_printf(s, "\nPL08x virtual slave channels:\n"); 1814 seq_printf(s, "\nPL08x virtual slave channels:\n");
1800 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1815 seq_printf(s, "CHANNEL:\tSTATE:\n");
1801 seq_printf(s, "--------\t------\n"); 1816 seq_printf(s, "--------\t------\n");
1802 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { 1817 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
1803 seq_printf(s, "%s\t\t%s\n", chan->name, 1818 seq_printf(s, "%s\t\t%s\n", chan->name,
1804 pl08x_state_str(chan->state)); 1819 pl08x_state_str(chan->state));
1805 } 1820 }
@@ -1851,9 +1866,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1851 goto out_no_pl08x; 1866 goto out_no_pl08x;
1852 } 1867 }
1853 1868
1854 pm_runtime_set_active(&adev->dev);
1855 pm_runtime_enable(&adev->dev);
1856
1857 /* Initialize memcpy engine */ 1869 /* Initialize memcpy engine */
1858 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1870 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1859 pl08x->memcpy.dev = &adev->dev; 1871 pl08x->memcpy.dev = &adev->dev;
@@ -1903,8 +1915,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1903 goto out_no_lli_pool; 1915 goto out_no_lli_pool;
1904 } 1916 }
1905 1917
1906 spin_lock_init(&pl08x->lock);
1907
1908 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 1918 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
1909 if (!pl08x->base) { 1919 if (!pl08x->base) {
1910 ret = -ENOMEM; 1920 ret = -ENOMEM;
@@ -1942,7 +1952,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1942 ch->id = i; 1952 ch->id = i;
1943 ch->base = pl08x->base + PL080_Cx_BASE(i); 1953 ch->base = pl08x->base + PL080_Cx_BASE(i);
1944 spin_lock_init(&ch->lock); 1954 spin_lock_init(&ch->lock);
1945 ch->signal = -1;
1946 1955
1947 /* 1956 /*
1948 * Nomadik variants can have channels that are locked 1957 * Nomadik variants can have channels that are locked
@@ -2007,7 +2016,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2007 amba_part(adev), amba_rev(adev), 2016 amba_part(adev), amba_rev(adev),
2008 (unsigned long long)adev->res.start, adev->irq[0]); 2017 (unsigned long long)adev->res.start, adev->irq[0]);
2009 2018
2010 pm_runtime_put(&adev->dev);
2011 return 0; 2019 return 0;
2012 2020
2013out_no_slave_reg: 2021out_no_slave_reg:
@@ -2026,9 +2034,6 @@ out_no_ioremap:
2026 dma_pool_destroy(pl08x->pool); 2034 dma_pool_destroy(pl08x->pool);
2027out_no_lli_pool: 2035out_no_lli_pool:
2028out_no_platdata: 2036out_no_platdata:
2029 pm_runtime_put(&adev->dev);
2030 pm_runtime_disable(&adev->dev);
2031
2032 kfree(pl08x); 2037 kfree(pl08x);
2033out_no_pl08x: 2038out_no_pl08x:
2034 amba_release_regions(adev); 2039 amba_release_regions(adev);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
new file mode 100644
index 000000000000..ae0561826137
--- /dev/null
+++ b/drivers/dma/omap-dma.c
@@ -0,0 +1,669 @@
1/*
2 * OMAP DMAengine support
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/dmaengine.h>
9#include <linux/dma-mapping.h>
10#include <linux/err.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/list.h>
14#include <linux/module.h>
15#include <linux/omap-dma.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19
20#include "virt-dma.h"
21#include <plat/dma.h>
22
23struct omap_dmadev {
24 struct dma_device ddev;
25 spinlock_t lock;
26 struct tasklet_struct task;
27 struct list_head pending;
28};
29
30struct omap_chan {
31 struct virt_dma_chan vc;
32 struct list_head node;
33
34 struct dma_slave_config cfg;
35 unsigned dma_sig;
36 bool cyclic;
37
38 int dma_ch;
39 struct omap_desc *desc;
40 unsigned sgidx;
41};
42
43struct omap_sg {
44 dma_addr_t addr;
45 uint32_t en; /* number of elements (24-bit) */
46 uint32_t fn; /* number of frames (16-bit) */
47};
48
49struct omap_desc {
50 struct virt_dma_desc vd;
51 enum dma_transfer_direction dir;
52 dma_addr_t dev_addr;
53
54 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
55 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
56 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
57 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
58 uint8_t periph_port; /* Peripheral port */
59
60 unsigned sglen;
61 struct omap_sg sg[0];
62};
63
64static const unsigned es_bytes[] = {
65 [OMAP_DMA_DATA_TYPE_S8] = 1,
66 [OMAP_DMA_DATA_TYPE_S16] = 2,
67 [OMAP_DMA_DATA_TYPE_S32] = 4,
68};
69
70static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
71{
72 return container_of(d, struct omap_dmadev, ddev);
73}
74
75static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
76{
77 return container_of(c, struct omap_chan, vc.chan);
78}
79
80static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
81{
82 return container_of(t, struct omap_desc, vd.tx);
83}
84
85static void omap_dma_desc_free(struct virt_dma_desc *vd)
86{
87 kfree(container_of(vd, struct omap_desc, vd));
88}
89
90static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
91 unsigned idx)
92{
93 struct omap_sg *sg = d->sg + idx;
94
95 if (d->dir == DMA_DEV_TO_MEM)
96 omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
97 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
98 else
99 omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
100 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
101
102 omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
103 d->sync_mode, c->dma_sig, d->sync_type);
104
105 omap_start_dma(c->dma_ch);
106}
107
108static void omap_dma_start_desc(struct omap_chan *c)
109{
110 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
111 struct omap_desc *d;
112
113 if (!vd) {
114 c->desc = NULL;
115 return;
116 }
117
118 list_del(&vd->node);
119
120 c->desc = d = to_omap_dma_desc(&vd->tx);
121 c->sgidx = 0;
122
123 if (d->dir == DMA_DEV_TO_MEM)
124 omap_set_dma_src_params(c->dma_ch, d->periph_port,
125 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
126 else
127 omap_set_dma_dest_params(c->dma_ch, d->periph_port,
128 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
129
130 omap_dma_start_sg(c, d, 0);
131}
132
133static void omap_dma_callback(int ch, u16 status, void *data)
134{
135 struct omap_chan *c = data;
136 struct omap_desc *d;
137 unsigned long flags;
138
139 spin_lock_irqsave(&c->vc.lock, flags);
140 d = c->desc;
141 if (d) {
142 if (!c->cyclic) {
143 if (++c->sgidx < d->sglen) {
144 omap_dma_start_sg(c, d, c->sgidx);
145 } else {
146 omap_dma_start_desc(c);
147 vchan_cookie_complete(&d->vd);
148 }
149 } else {
150 vchan_cyclic_callback(&d->vd);
151 }
152 }
153 spin_unlock_irqrestore(&c->vc.lock, flags);
154}
155
156/*
157 * This callback schedules all pending channels. We could be more
158 * clever here by postponing allocation of the real DMA channels to
159 * this point, and freeing them when our virtual channel becomes idle.
160 *
161 * We would then need to deal with 'all channels in-use'
162 */
163static void omap_dma_sched(unsigned long data)
164{
165 struct omap_dmadev *d = (struct omap_dmadev *)data;
166 LIST_HEAD(head);
167
168 spin_lock_irq(&d->lock);
169 list_splice_tail_init(&d->pending, &head);
170 spin_unlock_irq(&d->lock);
171
172 while (!list_empty(&head)) {
173 struct omap_chan *c = list_first_entry(&head,
174 struct omap_chan, node);
175
176 spin_lock_irq(&c->vc.lock);
177 list_del_init(&c->node);
178 omap_dma_start_desc(c);
179 spin_unlock_irq(&c->vc.lock);
180 }
181}
182
183static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
184{
185 struct omap_chan *c = to_omap_dma_chan(chan);
186
187 dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
188
189 return omap_request_dma(c->dma_sig, "DMA engine",
190 omap_dma_callback, c, &c->dma_ch);
191}
192
193static void omap_dma_free_chan_resources(struct dma_chan *chan)
194{
195 struct omap_chan *c = to_omap_dma_chan(chan);
196
197 vchan_free_chan_resources(&c->vc);
198 omap_free_dma(c->dma_ch);
199
200 dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
201}
202
203static size_t omap_dma_sg_size(struct omap_sg *sg)
204{
205 return sg->en * sg->fn;
206}
207
208static size_t omap_dma_desc_size(struct omap_desc *d)
209{
210 unsigned i;
211 size_t size;
212
213 for (size = i = 0; i < d->sglen; i++)
214 size += omap_dma_sg_size(&d->sg[i]);
215
216 return size * es_bytes[d->es];
217}
218
219static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
220{
221 unsigned i;
222 size_t size, es_size = es_bytes[d->es];
223
224 for (size = i = 0; i < d->sglen; i++) {
225 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
226
227 if (size)
228 size += this_size;
229 else if (addr >= d->sg[i].addr &&
230 addr < d->sg[i].addr + this_size)
231 size += d->sg[i].addr + this_size - addr;
232 }
233 return size;
234}
235
236static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
237 dma_cookie_t cookie, struct dma_tx_state *txstate)
238{
239 struct omap_chan *c = to_omap_dma_chan(chan);
240 struct virt_dma_desc *vd;
241 enum dma_status ret;
242 unsigned long flags;
243
244 ret = dma_cookie_status(chan, cookie, txstate);
245 if (ret == DMA_SUCCESS || !txstate)
246 return ret;
247
248 spin_lock_irqsave(&c->vc.lock, flags);
249 vd = vchan_find_desc(&c->vc, cookie);
250 if (vd) {
251 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
252 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
253 struct omap_desc *d = c->desc;
254 dma_addr_t pos;
255
256 if (d->dir == DMA_MEM_TO_DEV)
257 pos = omap_get_dma_src_pos(c->dma_ch);
258 else if (d->dir == DMA_DEV_TO_MEM)
259 pos = omap_get_dma_dst_pos(c->dma_ch);
260 else
261 pos = 0;
262
263 txstate->residue = omap_dma_desc_size_pos(d, pos);
264 } else {
265 txstate->residue = 0;
266 }
267 spin_unlock_irqrestore(&c->vc.lock, flags);
268
269 return ret;
270}
271
272static void omap_dma_issue_pending(struct dma_chan *chan)
273{
274 struct omap_chan *c = to_omap_dma_chan(chan);
275 unsigned long flags;
276
277 spin_lock_irqsave(&c->vc.lock, flags);
278 if (vchan_issue_pending(&c->vc) && !c->desc) {
279 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
280 spin_lock(&d->lock);
281 if (list_empty(&c->node))
282 list_add_tail(&c->node, &d->pending);
283 spin_unlock(&d->lock);
284 tasklet_schedule(&d->task);
285 }
286 spin_unlock_irqrestore(&c->vc.lock, flags);
287}
288
289static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
290 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
291 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
292{
293 struct omap_chan *c = to_omap_dma_chan(chan);
294 enum dma_slave_buswidth dev_width;
295 struct scatterlist *sgent;
296 struct omap_desc *d;
297 dma_addr_t dev_addr;
298 unsigned i, j = 0, es, en, frame_bytes, sync_type;
299 u32 burst;
300
301 if (dir == DMA_DEV_TO_MEM) {
302 dev_addr = c->cfg.src_addr;
303 dev_width = c->cfg.src_addr_width;
304 burst = c->cfg.src_maxburst;
305 sync_type = OMAP_DMA_SRC_SYNC;
306 } else if (dir == DMA_MEM_TO_DEV) {
307 dev_addr = c->cfg.dst_addr;
308 dev_width = c->cfg.dst_addr_width;
309 burst = c->cfg.dst_maxburst;
310 sync_type = OMAP_DMA_DST_SYNC;
311 } else {
312 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
313 return NULL;
314 }
315
316 /* Bus width translates to the element size (ES) */
317 switch (dev_width) {
318 case DMA_SLAVE_BUSWIDTH_1_BYTE:
319 es = OMAP_DMA_DATA_TYPE_S8;
320 break;
321 case DMA_SLAVE_BUSWIDTH_2_BYTES:
322 es = OMAP_DMA_DATA_TYPE_S16;
323 break;
324 case DMA_SLAVE_BUSWIDTH_4_BYTES:
325 es = OMAP_DMA_DATA_TYPE_S32;
326 break;
327 default: /* not reached */
328 return NULL;
329 }
330
331 /* Now allocate and setup the descriptor. */
332 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
333 if (!d)
334 return NULL;
335
336 d->dir = dir;
337 d->dev_addr = dev_addr;
338 d->es = es;
339 d->sync_mode = OMAP_DMA_SYNC_FRAME;
340 d->sync_type = sync_type;
341 d->periph_port = OMAP_DMA_PORT_TIPB;
342
343 /*
344 * Build our scatterlist entries: each contains the address,
345 * the number of elements (EN) in each frame, and the number of
346 * frames (FN). Number of bytes for this entry = ES * EN * FN.
347 *
348 * Burst size translates to number of elements with frame sync.
349 * Note: DMA engine defines burst to be the number of dev-width
350 * transfers.
351 */
352 en = burst;
353 frame_bytes = es_bytes[es] * en;
354 for_each_sg(sgl, sgent, sglen, i) {
355 d->sg[j].addr = sg_dma_address(sgent);
356 d->sg[j].en = en;
357 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
358 j++;
359 }
360
361 d->sglen = j;
362
363 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
364}
365
366static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
367 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
368 size_t period_len, enum dma_transfer_direction dir, void *context)
369{
370 struct omap_chan *c = to_omap_dma_chan(chan);
371 enum dma_slave_buswidth dev_width;
372 struct omap_desc *d;
373 dma_addr_t dev_addr;
374 unsigned es, sync_type;
375 u32 burst;
376
377 if (dir == DMA_DEV_TO_MEM) {
378 dev_addr = c->cfg.src_addr;
379 dev_width = c->cfg.src_addr_width;
380 burst = c->cfg.src_maxburst;
381 sync_type = OMAP_DMA_SRC_SYNC;
382 } else if (dir == DMA_MEM_TO_DEV) {
383 dev_addr = c->cfg.dst_addr;
384 dev_width = c->cfg.dst_addr_width;
385 burst = c->cfg.dst_maxburst;
386 sync_type = OMAP_DMA_DST_SYNC;
387 } else {
388 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
389 return NULL;
390 }
391
392 /* Bus width translates to the element size (ES) */
393 switch (dev_width) {
394 case DMA_SLAVE_BUSWIDTH_1_BYTE:
395 es = OMAP_DMA_DATA_TYPE_S8;
396 break;
397 case DMA_SLAVE_BUSWIDTH_2_BYTES:
398 es = OMAP_DMA_DATA_TYPE_S16;
399 break;
400 case DMA_SLAVE_BUSWIDTH_4_BYTES:
401 es = OMAP_DMA_DATA_TYPE_S32;
402 break;
403 default: /* not reached */
404 return NULL;
405 }
406
407 /* Now allocate and setup the descriptor. */
408 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
409 if (!d)
410 return NULL;
411
412 d->dir = dir;
413 d->dev_addr = dev_addr;
414 d->fi = burst;
415 d->es = es;
416 d->sync_mode = OMAP_DMA_SYNC_PACKET;
417 d->sync_type = sync_type;
418 d->periph_port = OMAP_DMA_PORT_MPUI;
419 d->sg[0].addr = buf_addr;
420 d->sg[0].en = period_len / es_bytes[es];
421 d->sg[0].fn = buf_len / period_len;
422 d->sglen = 1;
423
424 if (!c->cyclic) {
425 c->cyclic = true;
426 omap_dma_link_lch(c->dma_ch, c->dma_ch);
427 omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
428 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
429 }
430
431 if (!cpu_class_is_omap1()) {
432 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
433 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
434 }
435
436 return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
437}
438
439static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
440{
441 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
442 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
443 return -EINVAL;
444
445 memcpy(&c->cfg, cfg, sizeof(c->cfg));
446
447 return 0;
448}
449
450static int omap_dma_terminate_all(struct omap_chan *c)
451{
452 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
453 unsigned long flags;
454 LIST_HEAD(head);
455
456 spin_lock_irqsave(&c->vc.lock, flags);
457
458 /* Prevent this channel being scheduled */
459 spin_lock(&d->lock);
460 list_del_init(&c->node);
461 spin_unlock(&d->lock);
462
463 /*
464 * Stop DMA activity: we assume the callback will not be called
465 * after omap_stop_dma() returns (even if it does, it will see
466 * c->desc is NULL and exit.)
467 */
468 if (c->desc) {
469 c->desc = NULL;
470 omap_stop_dma(c->dma_ch);
471 }
472
473 if (c->cyclic) {
474 c->cyclic = false;
475 omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
476 }
477
478 vchan_get_all_descriptors(&c->vc, &head);
479 spin_unlock_irqrestore(&c->vc.lock, flags);
480 vchan_dma_desc_free_list(&c->vc, &head);
481
482 return 0;
483}
484
485static int omap_dma_pause(struct omap_chan *c)
486{
487 /* FIXME: not supported by platform private API */
488 return -EINVAL;
489}
490
491static int omap_dma_resume(struct omap_chan *c)
492{
493 /* FIXME: not supported by platform private API */
494 return -EINVAL;
495}
496
497static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
498 unsigned long arg)
499{
500 struct omap_chan *c = to_omap_dma_chan(chan);
501 int ret;
502
503 switch (cmd) {
504 case DMA_SLAVE_CONFIG:
505 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
506 break;
507
508 case DMA_TERMINATE_ALL:
509 ret = omap_dma_terminate_all(c);
510 break;
511
512 case DMA_PAUSE:
513 ret = omap_dma_pause(c);
514 break;
515
516 case DMA_RESUME:
517 ret = omap_dma_resume(c);
518 break;
519
520 default:
521 ret = -ENXIO;
522 break;
523 }
524
525 return ret;
526}
527
528static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
529{
530 struct omap_chan *c;
531
532 c = kzalloc(sizeof(*c), GFP_KERNEL);
533 if (!c)
534 return -ENOMEM;
535
536 c->dma_sig = dma_sig;
537 c->vc.desc_free = omap_dma_desc_free;
538 vchan_init(&c->vc, &od->ddev);
539 INIT_LIST_HEAD(&c->node);
540
541 od->ddev.chancnt++;
542
543 return 0;
544}
545
546static void omap_dma_free(struct omap_dmadev *od)
547{
548 tasklet_kill(&od->task);
549 while (!list_empty(&od->ddev.channels)) {
550 struct omap_chan *c = list_first_entry(&od->ddev.channels,
551 struct omap_chan, vc.chan.device_node);
552
553 list_del(&c->vc.chan.device_node);
554 tasklet_kill(&c->vc.task);
555 kfree(c);
556 }
557 kfree(od);
558}
559
560static int omap_dma_probe(struct platform_device *pdev)
561{
562 struct omap_dmadev *od;
563 int rc, i;
564
565 od = kzalloc(sizeof(*od), GFP_KERNEL);
566 if (!od)
567 return -ENOMEM;
568
569 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
570 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
571 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
572 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
573 od->ddev.device_tx_status = omap_dma_tx_status;
574 od->ddev.device_issue_pending = omap_dma_issue_pending;
575 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
576 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
577 od->ddev.device_control = omap_dma_control;
578 od->ddev.dev = &pdev->dev;
579 INIT_LIST_HEAD(&od->ddev.channels);
580 INIT_LIST_HEAD(&od->pending);
581 spin_lock_init(&od->lock);
582
583 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
584
585 for (i = 0; i < 127; i++) {
586 rc = omap_dma_chan_init(od, i);
587 if (rc) {
588 omap_dma_free(od);
589 return rc;
590 }
591 }
592
593 rc = dma_async_device_register(&od->ddev);
594 if (rc) {
595 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
596 rc);
597 omap_dma_free(od);
598 } else {
599 platform_set_drvdata(pdev, od);
600 }
601
602 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
603
604 return rc;
605}
606
607static int omap_dma_remove(struct platform_device *pdev)
608{
609 struct omap_dmadev *od = platform_get_drvdata(pdev);
610
611 dma_async_device_unregister(&od->ddev);
612 omap_dma_free(od);
613
614 return 0;
615}
616
617static struct platform_driver omap_dma_driver = {
618 .probe = omap_dma_probe,
619 .remove = omap_dma_remove,
620 .driver = {
621 .name = "omap-dma-engine",
622 .owner = THIS_MODULE,
623 },
624};
625
626bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
627{
628 if (chan->device->dev->driver == &omap_dma_driver.driver) {
629 struct omap_chan *c = to_omap_dma_chan(chan);
630 unsigned req = *(unsigned *)param;
631
632 return req == c->dma_sig;
633 }
634 return false;
635}
636EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
637
638static struct platform_device *pdev;
639
640static const struct platform_device_info omap_dma_dev_info = {
641 .name = "omap-dma-engine",
642 .id = -1,
643 .dma_mask = DMA_BIT_MASK(32),
644};
645
646static int omap_dma_init(void)
647{
648 int rc = platform_driver_register(&omap_dma_driver);
649
650 if (rc == 0) {
651 pdev = platform_device_register_full(&omap_dma_dev_info);
652 if (IS_ERR(pdev)) {
653 platform_driver_unregister(&omap_dma_driver);
654 rc = PTR_ERR(pdev);
655 }
656 }
657 return rc;
658}
659subsys_initcall(omap_dma_init);
660
661static void __exit omap_dma_exit(void)
662{
663 platform_device_unregister(pdev);
664 platform_driver_unregister(&omap_dma_driver);
665}
666module_exit(omap_dma_exit);
667
668MODULE_AUTHOR("Russell King");
669MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ec78ccef9132..f5a73606217e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -21,6 +21,8 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23 23
24#include "virt-dma.h"
25
24#define NR_PHY_CHAN 6 26#define NR_PHY_CHAN 6
25#define DMA_ALIGN 3 27#define DMA_ALIGN 3
26#define DMA_MAX_SIZE 0x1fff 28#define DMA_MAX_SIZE 0x1fff
@@ -72,12 +74,13 @@ struct sa11x0_dma_sg {
72}; 74};
73 75
74struct sa11x0_dma_desc { 76struct sa11x0_dma_desc {
75 struct dma_async_tx_descriptor tx; 77 struct virt_dma_desc vd;
78
76 u32 ddar; 79 u32 ddar;
77 size_t size; 80 size_t size;
81 unsigned period;
82 bool cyclic;
78 83
79 /* maybe protected by c->lock */
80 struct list_head node;
81 unsigned sglen; 84 unsigned sglen;
82 struct sa11x0_dma_sg sg[0]; 85 struct sa11x0_dma_sg sg[0];
83}; 86};
@@ -85,15 +88,11 @@ struct sa11x0_dma_desc {
85struct sa11x0_dma_phy; 88struct sa11x0_dma_phy;
86 89
87struct sa11x0_dma_chan { 90struct sa11x0_dma_chan {
88 struct dma_chan chan; 91 struct virt_dma_chan vc;
89 spinlock_t lock;
90 dma_cookie_t lc;
91 92
92 /* protected by c->lock */ 93 /* protected by c->vc.lock */
93 struct sa11x0_dma_phy *phy; 94 struct sa11x0_dma_phy *phy;
94 enum dma_status status; 95 enum dma_status status;
95 struct list_head desc_submitted;
96 struct list_head desc_issued;
97 96
98 /* protected by d->lock */ 97 /* protected by d->lock */
99 struct list_head node; 98 struct list_head node;
@@ -109,7 +108,7 @@ struct sa11x0_dma_phy {
109 108
110 struct sa11x0_dma_chan *vchan; 109 struct sa11x0_dma_chan *vchan;
111 110
112 /* Protected by c->lock */ 111 /* Protected by c->vc.lock */
113 unsigned sg_load; 112 unsigned sg_load;
114 struct sa11x0_dma_desc *txd_load; 113 struct sa11x0_dma_desc *txd_load;
115 unsigned sg_done; 114 unsigned sg_done;
@@ -127,13 +126,12 @@ struct sa11x0_dma_dev {
127 spinlock_t lock; 126 spinlock_t lock;
128 struct tasklet_struct task; 127 struct tasklet_struct task;
129 struct list_head chan_pending; 128 struct list_head chan_pending;
130 struct list_head desc_complete;
131 struct sa11x0_dma_phy phy[NR_PHY_CHAN]; 129 struct sa11x0_dma_phy phy[NR_PHY_CHAN];
132}; 130};
133 131
134static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) 132static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
135{ 133{
136 return container_of(chan, struct sa11x0_dma_chan, chan); 134 return container_of(chan, struct sa11x0_dma_chan, vc.chan);
137} 135}
138 136
139static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) 137static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
@@ -141,27 +139,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
141 return container_of(dmadev, struct sa11x0_dma_dev, slave); 139 return container_of(dmadev, struct sa11x0_dma_dev, slave);
142} 140}
143 141
144static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx) 142static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
145{ 143{
146 return container_of(tx, struct sa11x0_dma_desc, tx); 144 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
145
146 return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
147} 147}
148 148
149static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) 149static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
150{ 150{
151 if (list_empty(&c->desc_issued)) 151 kfree(container_of(vd, struct sa11x0_dma_desc, vd));
152 return NULL;
153
154 return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node);
155} 152}
156 153
157static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) 154static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
158{ 155{
159 list_del(&txd->node); 156 list_del(&txd->vd.node);
160 p->txd_load = txd; 157 p->txd_load = txd;
161 p->sg_load = 0; 158 p->sg_load = 0;
162 159
163 dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", 160 dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
164 p->num, txd, txd->tx.cookie, txd->ddar); 161 p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
165} 162}
166 163
167static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, 164static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
@@ -183,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
183 return; 180 return;
184 181
185 if (p->sg_load == txd->sglen) { 182 if (p->sg_load == txd->sglen) {
186 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); 183 if (!txd->cyclic) {
184 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
187 185
188 /* 186 /*
189 * We have reached the end of the current descriptor. 187 * We have reached the end of the current descriptor.
190 * Peek at the next descriptor, and if compatible with 188 * Peek at the next descriptor, and if compatible with
191 * the current, start processing it. 189 * the current, start processing it.
192 */ 190 */
193 if (txn && txn->ddar == txd->ddar) { 191 if (txn && txn->ddar == txd->ddar) {
194 txd = txn; 192 txd = txn;
195 sa11x0_dma_start_desc(p, txn); 193 sa11x0_dma_start_desc(p, txn);
194 } else {
195 p->txd_load = NULL;
196 return;
197 }
196 } else { 198 } else {
197 p->txd_load = NULL; 199 /* Cyclic: reset back to beginning */
198 return; 200 p->sg_load = 0;
199 } 201 }
200 } 202 }
201 203
@@ -229,21 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
229 struct sa11x0_dma_desc *txd = p->txd_done; 231 struct sa11x0_dma_desc *txd = p->txd_done;
230 232
231 if (++p->sg_done == txd->sglen) { 233 if (++p->sg_done == txd->sglen) {
232 struct sa11x0_dma_dev *d = p->dev; 234 if (!txd->cyclic) {
233 235 vchan_cookie_complete(&txd->vd);
234 dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n",
235 p->num, p->txd_done, p->txd_done->tx.cookie);
236
237 c->lc = txd->tx.cookie;
238 236
239 spin_lock(&d->lock); 237 p->sg_done = 0;
240 list_add_tail(&txd->node, &d->desc_complete); 238 p->txd_done = p->txd_load;
241 spin_unlock(&d->lock);
242 239
243 p->sg_done = 0; 240 if (!p->txd_done)
244 p->txd_done = p->txd_load; 241 tasklet_schedule(&p->dev->task);
242 } else {
243 if ((p->sg_done % txd->period) == 0)
244 vchan_cyclic_callback(&txd->vd);
245 245
246 tasklet_schedule(&d->task); 246 /* Cyclic: reset back to beginning */
247 p->sg_done = 0;
248 }
247 } 249 }
248 250
249 sa11x0_dma_start_sg(p, c); 251 sa11x0_dma_start_sg(p, c);
@@ -280,7 +282,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
280 if (c) { 282 if (c) {
281 unsigned long flags; 283 unsigned long flags;
282 284
283 spin_lock_irqsave(&c->lock, flags); 285 spin_lock_irqsave(&c->vc.lock, flags);
284 /* 286 /*
285 * Now that we're holding the lock, check that the vchan 287 * Now that we're holding the lock, check that the vchan
286 * really is associated with this pchan before touching the 288 * really is associated with this pchan before touching the
@@ -294,7 +296,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
294 if (dcsr & DCSR_DONEB) 296 if (dcsr & DCSR_DONEB)
295 sa11x0_dma_complete(p, c); 297 sa11x0_dma_complete(p, c);
296 } 298 }
297 spin_unlock_irqrestore(&c->lock, flags); 299 spin_unlock_irqrestore(&c->vc.lock, flags);
298 } 300 }
299 301
300 return IRQ_HANDLED; 302 return IRQ_HANDLED;
@@ -332,28 +334,15 @@ static void sa11x0_dma_tasklet(unsigned long arg)
332 struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; 334 struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
333 struct sa11x0_dma_phy *p; 335 struct sa11x0_dma_phy *p;
334 struct sa11x0_dma_chan *c; 336 struct sa11x0_dma_chan *c;
335 struct sa11x0_dma_desc *txd, *txn;
336 LIST_HEAD(head);
337 unsigned pch, pch_alloc = 0; 337 unsigned pch, pch_alloc = 0;
338 338
339 dev_dbg(d->slave.dev, "tasklet enter\n"); 339 dev_dbg(d->slave.dev, "tasklet enter\n");
340 340
341 /* Get the completed tx descriptors */ 341 list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
342 spin_lock_irq(&d->lock); 342 spin_lock_irq(&c->vc.lock);
343 list_splice_init(&d->desc_complete, &head);
344 spin_unlock_irq(&d->lock);
345
346 list_for_each_entry(txd, &head, node) {
347 c = to_sa11x0_dma_chan(txd->tx.chan);
348
349 dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n",
350 c, txd, txd->tx.cookie);
351
352 spin_lock_irq(&c->lock);
353 p = c->phy; 343 p = c->phy;
354 if (p) { 344 if (p && !p->txd_done) {
355 if (!p->txd_done) 345 sa11x0_dma_start_txd(c);
356 sa11x0_dma_start_txd(c);
357 if (!p->txd_done) { 346 if (!p->txd_done) {
358 /* No current txd associated with this channel */ 347 /* No current txd associated with this channel */
359 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); 348 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
@@ -363,7 +352,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
363 p->vchan = NULL; 352 p->vchan = NULL;
364 } 353 }
365 } 354 }
366 spin_unlock_irq(&c->lock); 355 spin_unlock_irq(&c->vc.lock);
367 } 356 }
368 357
369 spin_lock_irq(&d->lock); 358 spin_lock_irq(&d->lock);
@@ -380,7 +369,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
380 /* Mark this channel allocated */ 369 /* Mark this channel allocated */
381 p->vchan = c; 370 p->vchan = c;
382 371
383 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c); 372 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
384 } 373 }
385 } 374 }
386 spin_unlock_irq(&d->lock); 375 spin_unlock_irq(&d->lock);
@@ -390,42 +379,18 @@ static void sa11x0_dma_tasklet(unsigned long arg)
390 p = &d->phy[pch]; 379 p = &d->phy[pch];
391 c = p->vchan; 380 c = p->vchan;
392 381
393 spin_lock_irq(&c->lock); 382 spin_lock_irq(&c->vc.lock);
394 c->phy = p; 383 c->phy = p;
395 384
396 sa11x0_dma_start_txd(c); 385 sa11x0_dma_start_txd(c);
397 spin_unlock_irq(&c->lock); 386 spin_unlock_irq(&c->vc.lock);
398 } 387 }
399 } 388 }
400 389
401 /* Now free the completed tx descriptor, and call their callbacks */
402 list_for_each_entry_safe(txd, txn, &head, node) {
403 dma_async_tx_callback callback = txd->tx.callback;
404 void *callback_param = txd->tx.callback_param;
405
406 dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n",
407 txd, txd->tx.cookie);
408
409 kfree(txd);
410
411 if (callback)
412 callback(callback_param);
413 }
414
415 dev_dbg(d->slave.dev, "tasklet exit\n"); 390 dev_dbg(d->slave.dev, "tasklet exit\n");
416} 391}
417 392
418 393
419static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head)
420{
421 struct sa11x0_dma_desc *txd, *txn;
422
423 list_for_each_entry_safe(txd, txn, head, node) {
424 dev_dbg(d->slave.dev, "txd %p: freeing\n", txd);
425 kfree(txd);
426 }
427}
428
429static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) 394static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
430{ 395{
431 return 0; 396 return 0;
@@ -436,18 +401,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
436 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 401 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
437 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 402 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
438 unsigned long flags; 403 unsigned long flags;
439 LIST_HEAD(head);
440 404
441 spin_lock_irqsave(&c->lock, flags); 405 spin_lock_irqsave(&d->lock, flags);
442 spin_lock(&d->lock);
443 list_del_init(&c->node); 406 list_del_init(&c->node);
444 spin_unlock(&d->lock); 407 spin_unlock_irqrestore(&d->lock, flags);
445
446 list_splice_tail_init(&c->desc_submitted, &head);
447 list_splice_tail_init(&c->desc_issued, &head);
448 spin_unlock_irqrestore(&c->lock, flags);
449 408
450 sa11x0_dma_desc_free(d, &head); 409 vchan_free_chan_resources(&c->vc);
451} 410}
452 411
453static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) 412static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
@@ -472,33 +431,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
472 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 431 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
473 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 432 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
474 struct sa11x0_dma_phy *p; 433 struct sa11x0_dma_phy *p;
475 struct sa11x0_dma_desc *txd; 434 struct virt_dma_desc *vd;
476 dma_cookie_t last_used, last_complete;
477 unsigned long flags; 435 unsigned long flags;
478 enum dma_status ret; 436 enum dma_status ret;
479 size_t bytes = 0;
480
481 last_used = c->chan.cookie;
482 last_complete = c->lc;
483 437
484 ret = dma_async_is_complete(cookie, last_complete, last_used); 438 ret = dma_cookie_status(&c->vc.chan, cookie, state);
485 if (ret == DMA_SUCCESS) { 439 if (ret == DMA_SUCCESS)
486 dma_set_tx_state(state, last_complete, last_used, 0);
487 return ret; 440 return ret;
488 }
489 441
490 spin_lock_irqsave(&c->lock, flags); 442 if (!state)
443 return c->status;
444
445 spin_lock_irqsave(&c->vc.lock, flags);
491 p = c->phy; 446 p = c->phy;
492 ret = c->status;
493 if (p) {
494 dma_addr_t addr = sa11x0_dma_pos(p);
495 447
496 dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); 448 /*
449 * If the cookie is on our issue queue, then the residue is
450 * its total size.
451 */
452 vd = vchan_find_desc(&c->vc, cookie);
453 if (vd) {
454 state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
455 } else if (!p) {
456 state->residue = 0;
457 } else {
458 struct sa11x0_dma_desc *txd;
459 size_t bytes = 0;
497 460
498 txd = p->txd_done; 461 if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
462 txd = p->txd_done;
463 else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
464 txd = p->txd_load;
465 else
466 txd = NULL;
467
468 ret = c->status;
499 if (txd) { 469 if (txd) {
470 dma_addr_t addr = sa11x0_dma_pos(p);
500 unsigned i; 471 unsigned i;
501 472
473 dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
474
502 for (i = 0; i < txd->sglen; i++) { 475 for (i = 0; i < txd->sglen; i++) {
503 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", 476 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
504 i, txd->sg[i].addr, txd->sg[i].len); 477 i, txd->sg[i].addr, txd->sg[i].len);
@@ -521,17 +494,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
521 bytes += txd->sg[i].len; 494 bytes += txd->sg[i].len;
522 } 495 }
523 } 496 }
524 if (txd != p->txd_load && p->txd_load) 497 state->residue = bytes;
525 bytes += p->txd_load->size;
526 }
527 list_for_each_entry(txd, &c->desc_issued, node) {
528 bytes += txd->size;
529 } 498 }
530 spin_unlock_irqrestore(&c->lock, flags); 499 spin_unlock_irqrestore(&c->vc.lock, flags);
531
532 dma_set_tx_state(state, last_complete, last_used, bytes);
533 500
534 dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); 501 dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
535 502
536 return ret; 503 return ret;
537} 504}
@@ -547,40 +514,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan)
547 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 514 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
548 unsigned long flags; 515 unsigned long flags;
549 516
550 spin_lock_irqsave(&c->lock, flags); 517 spin_lock_irqsave(&c->vc.lock, flags);
551 list_splice_tail_init(&c->desc_submitted, &c->desc_issued); 518 if (vchan_issue_pending(&c->vc)) {
552 if (!list_empty(&c->desc_issued)) { 519 if (!c->phy) {
553 spin_lock(&d->lock); 520 spin_lock(&d->lock);
554 if (!c->phy && list_empty(&c->node)) { 521 if (list_empty(&c->node)) {
555 list_add_tail(&c->node, &d->chan_pending); 522 list_add_tail(&c->node, &d->chan_pending);
556 tasklet_schedule(&d->task); 523 tasklet_schedule(&d->task);
557 dev_dbg(d->slave.dev, "vchan %p: issued\n", c); 524 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
525 }
526 spin_unlock(&d->lock);
558 } 527 }
559 spin_unlock(&d->lock);
560 } else 528 } else
561 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c); 529 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
562 spin_unlock_irqrestore(&c->lock, flags); 530 spin_unlock_irqrestore(&c->vc.lock, flags);
563}
564
565static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx)
566{
567 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan);
568 struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx);
569 unsigned long flags;
570
571 spin_lock_irqsave(&c->lock, flags);
572 c->chan.cookie += 1;
573 if (c->chan.cookie < 0)
574 c->chan.cookie = 1;
575 txd->tx.cookie = c->chan.cookie;
576
577 list_add_tail(&txd->node, &c->desc_submitted);
578 spin_unlock_irqrestore(&c->lock, flags);
579
580 dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n",
581 c, txd, txd->tx.cookie);
582
583 return txd->tx.cookie;
584} 531}
585 532
586static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( 533static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
@@ -596,7 +543,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
596 /* SA11x0 channels can only operate in their native direction */ 543 /* SA11x0 channels can only operate in their native direction */
597 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { 544 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
598 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", 545 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
599 c, c->ddar, dir); 546 &c->vc, c->ddar, dir);
600 return NULL; 547 return NULL;
601 } 548 }
602 549
@@ -612,14 +559,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
612 j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; 559 j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
613 if (addr & DMA_ALIGN) { 560 if (addr & DMA_ALIGN) {
614 dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", 561 dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
615 c, addr); 562 &c->vc, addr);
616 return NULL; 563 return NULL;
617 } 564 }
618 } 565 }
619 566
620 txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); 567 txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
621 if (!txd) { 568 if (!txd) {
622 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c); 569 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
623 return NULL; 570 return NULL;
624 } 571 }
625 572
@@ -655,17 +602,73 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
655 } while (len); 602 } while (len);
656 } 603 }
657 604
658 dma_async_tx_descriptor_init(&txd->tx, &c->chan);
659 txd->tx.flags = flags;
660 txd->tx.tx_submit = sa11x0_dma_tx_submit;
661 txd->ddar = c->ddar; 605 txd->ddar = c->ddar;
662 txd->size = size; 606 txd->size = size;
663 txd->sglen = j; 607 txd->sglen = j;
664 608
665 dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", 609 dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
666 c, txd, txd->size, txd->sglen); 610 &c->vc, &txd->vd, txd->size, txd->sglen);
667 611
668 return &txd->tx; 612 return vchan_tx_prep(&c->vc, &txd->vd, flags);
613}
614
615static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
616 struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
617 enum dma_transfer_direction dir, void *context)
618{
619 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
620 struct sa11x0_dma_desc *txd;
621 unsigned i, j, k, sglen, sgperiod;
622
623 /* SA11x0 channels can only operate in their native direction */
624 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
625 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
626 &c->vc, c->ddar, dir);
627 return NULL;
628 }
629
630 sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
631 sglen = size * sgperiod / period;
632
633 /* Do not allow zero-sized txds */
634 if (sglen == 0)
635 return NULL;
636
637 txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
638 if (!txd) {
639 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
640 return NULL;
641 }
642
643 for (i = k = 0; i < size / period; i++) {
644 size_t tlen, len = period;
645
646 for (j = 0; j < sgperiod; j++, k++) {
647 tlen = len;
648
649 if (tlen > DMA_MAX_SIZE) {
650 unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
651 tlen = (tlen / mult) & ~DMA_ALIGN;
652 }
653
654 txd->sg[k].addr = addr;
655 txd->sg[k].len = tlen;
656 addr += tlen;
657 len -= tlen;
658 }
659
660 WARN_ON(len != 0);
661 }
662
663 WARN_ON(k != sglen);
664
665 txd->ddar = c->ddar;
666 txd->size = size;
667 txd->sglen = sglen;
668 txd->cyclic = 1;
669 txd->period = sgperiod;
670
671 return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
669} 672}
670 673
671static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) 674static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
@@ -695,8 +698,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c
695 if (maxburst == 8) 698 if (maxburst == 8)
696 ddar |= DDAR_BS; 699 ddar |= DDAR_BS;
697 700
698 dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", 701 dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
699 c, addr, width, maxburst); 702 &c->vc, addr, width, maxburst);
700 703
701 c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; 704 c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
702 705
@@ -718,16 +721,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
718 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); 721 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
719 722
720 case DMA_TERMINATE_ALL: 723 case DMA_TERMINATE_ALL:
721 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c); 724 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
722 /* Clear the tx descriptor lists */ 725 /* Clear the tx descriptor lists */
723 spin_lock_irqsave(&c->lock, flags); 726 spin_lock_irqsave(&c->vc.lock, flags);
724 list_splice_tail_init(&c->desc_submitted, &head); 727 vchan_get_all_descriptors(&c->vc, &head);
725 list_splice_tail_init(&c->desc_issued, &head);
726 728
727 p = c->phy; 729 p = c->phy;
728 if (p) { 730 if (p) {
729 struct sa11x0_dma_desc *txd, *txn;
730
731 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); 731 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
732 /* vchan is assigned to a pchan - stop the channel */ 732 /* vchan is assigned to a pchan - stop the channel */
733 writel(DCSR_RUN | DCSR_IE | 733 writel(DCSR_RUN | DCSR_IE |
@@ -735,17 +735,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
735 DCSR_STRTB | DCSR_DONEB, 735 DCSR_STRTB | DCSR_DONEB,
736 p->base + DMA_DCSR_C); 736 p->base + DMA_DCSR_C);
737 737
738 list_for_each_entry_safe(txd, txn, &d->desc_complete, node)
739 if (txd->tx.chan == &c->chan)
740 list_move(&txd->node, &head);
741
742 if (p->txd_load) { 738 if (p->txd_load) {
743 if (p->txd_load != p->txd_done) 739 if (p->txd_load != p->txd_done)
744 list_add_tail(&p->txd_load->node, &head); 740 list_add_tail(&p->txd_load->vd.node, &head);
745 p->txd_load = NULL; 741 p->txd_load = NULL;
746 } 742 }
747 if (p->txd_done) { 743 if (p->txd_done) {
748 list_add_tail(&p->txd_done->node, &head); 744 list_add_tail(&p->txd_done->vd.node, &head);
749 p->txd_done = NULL; 745 p->txd_done = NULL;
750 } 746 }
751 c->phy = NULL; 747 c->phy = NULL;
@@ -754,14 +750,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
754 spin_unlock(&d->lock); 750 spin_unlock(&d->lock);
755 tasklet_schedule(&d->task); 751 tasklet_schedule(&d->task);
756 } 752 }
757 spin_unlock_irqrestore(&c->lock, flags); 753 spin_unlock_irqrestore(&c->vc.lock, flags);
758 sa11x0_dma_desc_free(d, &head); 754 vchan_dma_desc_free_list(&c->vc, &head);
759 ret = 0; 755 ret = 0;
760 break; 756 break;
761 757
762 case DMA_PAUSE: 758 case DMA_PAUSE:
763 dev_dbg(d->slave.dev, "vchan %p: pause\n", c); 759 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
764 spin_lock_irqsave(&c->lock, flags); 760 spin_lock_irqsave(&c->vc.lock, flags);
765 if (c->status == DMA_IN_PROGRESS) { 761 if (c->status == DMA_IN_PROGRESS) {
766 c->status = DMA_PAUSED; 762 c->status = DMA_PAUSED;
767 763
@@ -774,26 +770,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
774 spin_unlock(&d->lock); 770 spin_unlock(&d->lock);
775 } 771 }
776 } 772 }
777 spin_unlock_irqrestore(&c->lock, flags); 773 spin_unlock_irqrestore(&c->vc.lock, flags);
778 ret = 0; 774 ret = 0;
779 break; 775 break;
780 776
781 case DMA_RESUME: 777 case DMA_RESUME:
782 dev_dbg(d->slave.dev, "vchan %p: resume\n", c); 778 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
783 spin_lock_irqsave(&c->lock, flags); 779 spin_lock_irqsave(&c->vc.lock, flags);
784 if (c->status == DMA_PAUSED) { 780 if (c->status == DMA_PAUSED) {
785 c->status = DMA_IN_PROGRESS; 781 c->status = DMA_IN_PROGRESS;
786 782
787 p = c->phy; 783 p = c->phy;
788 if (p) { 784 if (p) {
789 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); 785 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
790 } else if (!list_empty(&c->desc_issued)) { 786 } else if (!list_empty(&c->vc.desc_issued)) {
791 spin_lock(&d->lock); 787 spin_lock(&d->lock);
792 list_add_tail(&c->node, &d->chan_pending); 788 list_add_tail(&c->node, &d->chan_pending);
793 spin_unlock(&d->lock); 789 spin_unlock(&d->lock);
794 } 790 }
795 } 791 }
796 spin_unlock_irqrestore(&c->lock, flags); 792 spin_unlock_irqrestore(&c->vc.lock, flags);
797 ret = 0; 793 ret = 0;
798 break; 794 break;
799 795
@@ -853,15 +849,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
853 return -ENOMEM; 849 return -ENOMEM;
854 } 850 }
855 851
856 c->chan.device = dmadev;
857 c->status = DMA_IN_PROGRESS; 852 c->status = DMA_IN_PROGRESS;
858 c->ddar = chan_desc[i].ddar; 853 c->ddar = chan_desc[i].ddar;
859 c->name = chan_desc[i].name; 854 c->name = chan_desc[i].name;
860 spin_lock_init(&c->lock);
861 INIT_LIST_HEAD(&c->desc_submitted);
862 INIT_LIST_HEAD(&c->desc_issued);
863 INIT_LIST_HEAD(&c->node); 855 INIT_LIST_HEAD(&c->node);
864 list_add_tail(&c->chan.device_node, &dmadev->channels); 856
857 c->vc.desc_free = sa11x0_dma_free_desc;
858 vchan_init(&c->vc, dmadev);
865 } 859 }
866 860
867 return dma_async_device_register(dmadev); 861 return dma_async_device_register(dmadev);
@@ -890,8 +884,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev)
890{ 884{
891 struct sa11x0_dma_chan *c, *cn; 885 struct sa11x0_dma_chan *c, *cn;
892 886
893 list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) { 887 list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
894 list_del(&c->chan.device_node); 888 list_del(&c->vc.chan.device_node);
889 tasklet_kill(&c->vc.task);
895 kfree(c); 890 kfree(c);
896 } 891 }
897} 892}
@@ -915,7 +910,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
915 910
916 spin_lock_init(&d->lock); 911 spin_lock_init(&d->lock);
917 INIT_LIST_HEAD(&d->chan_pending); 912 INIT_LIST_HEAD(&d->chan_pending);
918 INIT_LIST_HEAD(&d->desc_complete);
919 913
920 d->base = ioremap(res->start, resource_size(res)); 914 d->base = ioremap(res->start, resource_size(res));
921 if (!d->base) { 915 if (!d->base) {
@@ -947,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
947 } 941 }
948 942
949 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); 943 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
944 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
950 d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; 945 d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
946 d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
951 ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); 947 ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
952 if (ret) { 948 if (ret) {
953 dev_warn(d->slave.dev, "failed to register slave async device: %d\n", 949 dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
new file mode 100644
index 000000000000..6f80432a3f0a
--- /dev/null
+++ b/drivers/dma/virt-dma.c
@@ -0,0 +1,123 @@
1/*
2 * Virtual DMA channel support for DMAengine
3 *
4 * Copyright (C) 2012 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/device.h>
11#include <linux/dmaengine.h>
12#include <linux/module.h>
13#include <linux/spinlock.h>
14
15#include "virt-dma.h"
16
17static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
18{
19 return container_of(tx, struct virt_dma_desc, tx);
20}
21
22dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
23{
24 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
25 struct virt_dma_desc *vd = to_virt_desc(tx);
26 unsigned long flags;
27 dma_cookie_t cookie;
28
29 spin_lock_irqsave(&vc->lock, flags);
30 cookie = dma_cookie_assign(tx);
31
32 list_add_tail(&vd->node, &vc->desc_submitted);
33 spin_unlock_irqrestore(&vc->lock, flags);
34
35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
36 vc, vd, cookie);
37
38 return cookie;
39}
40EXPORT_SYMBOL_GPL(vchan_tx_submit);
41
42struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
43 dma_cookie_t cookie)
44{
45 struct virt_dma_desc *vd;
46
47 list_for_each_entry(vd, &vc->desc_issued, node)
48 if (vd->tx.cookie == cookie)
49 return vd;
50
51 return NULL;
52}
53EXPORT_SYMBOL_GPL(vchan_find_desc);
54
55/*
56 * This tasklet handles the completion of a DMA descriptor by
57 * calling its callback and freeing it.
58 */
59static void vchan_complete(unsigned long arg)
60{
61 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
62 struct virt_dma_desc *vd;
63 dma_async_tx_callback cb = NULL;
64 void *cb_data = NULL;
65 LIST_HEAD(head);
66
67 spin_lock_irq(&vc->lock);
68 list_splice_tail_init(&vc->desc_completed, &head);
69 vd = vc->cyclic;
70 if (vd) {
71 vc->cyclic = NULL;
72 cb = vd->tx.callback;
73 cb_data = vd->tx.callback_param;
74 }
75 spin_unlock_irq(&vc->lock);
76
77 if (cb)
78 cb(cb_data);
79
80 while (!list_empty(&head)) {
81 vd = list_first_entry(&head, struct virt_dma_desc, node);
82 cb = vd->tx.callback;
83 cb_data = vd->tx.callback_param;
84
85 list_del(&vd->node);
86
87 vc->desc_free(vd);
88
89 if (cb)
90 cb(cb_data);
91 }
92}
93
94void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
95{
96 while (!list_empty(head)) {
97 struct virt_dma_desc *vd = list_first_entry(head,
98 struct virt_dma_desc, node);
99 list_del(&vd->node);
100 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
101 vc->desc_free(vd);
102 }
103}
104EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
105
106void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
107{
108 dma_cookie_init(&vc->chan);
109
110 spin_lock_init(&vc->lock);
111 INIT_LIST_HEAD(&vc->desc_submitted);
112 INIT_LIST_HEAD(&vc->desc_issued);
113 INIT_LIST_HEAD(&vc->desc_completed);
114
115 tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
116
117 vc->chan.device = dmadev;
118 list_add_tail(&vc->chan.device_node, &dmadev->channels);
119}
120EXPORT_SYMBOL_GPL(vchan_init);
121
122MODULE_AUTHOR("Russell King");
123MODULE_LICENSE("GPL");
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
new file mode 100644
index 000000000000..85c19d63f9fb
--- /dev/null
+++ b/drivers/dma/virt-dma.h
@@ -0,0 +1,152 @@
1/*
2 * Virtual DMA channel support for DMAengine
3 *
4 * Copyright (C) 2012 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef VIRT_DMA_H
11#define VIRT_DMA_H
12
13#include <linux/dmaengine.h>
14#include <linux/interrupt.h>
15
16#include "dmaengine.h"
17
18struct virt_dma_desc {
19 struct dma_async_tx_descriptor tx;
20 /* protected by vc.lock */
21 struct list_head node;
22};
23
24struct virt_dma_chan {
25 struct dma_chan chan;
26 struct tasklet_struct task;
27 void (*desc_free)(struct virt_dma_desc *);
28
29 spinlock_t lock;
30
31 /* protected by vc.lock */
32 struct list_head desc_submitted;
33 struct list_head desc_issued;
34 struct list_head desc_completed;
35
36 struct virt_dma_desc *cyclic;
37};
38
39static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
40{
41 return container_of(chan, struct virt_dma_chan, chan);
42}
43
44void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
45void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
46struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
47
48/**
49 * vchan_tx_prep - prepare a descriptor
50 * vc: virtual channel allocating this descriptor
51 * vd: virtual descriptor to prepare
52 * tx_flags: flags argument passed in to prepare function
53 */
54static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
55 struct virt_dma_desc *vd, unsigned long tx_flags)
56{
57 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
58
59 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
60 vd->tx.flags = tx_flags;
61 vd->tx.tx_submit = vchan_tx_submit;
62
63 return &vd->tx;
64}
65
66/**
67 * vchan_issue_pending - move submitted descriptors to issued list
68 * vc: virtual channel to update
69 *
70 * vc.lock must be held by caller
71 */
72static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
73{
74 list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
75 return !list_empty(&vc->desc_issued);
76}
77
78/**
79 * vchan_cookie_complete - report completion of a descriptor
80 * vd: virtual descriptor to update
81 *
82 * vc.lock must be held by caller
83 */
84static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
85{
86 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
87
88 dma_cookie_complete(&vd->tx);
89 dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
90 vd, vd->tx.cookie);
91 list_add_tail(&vd->node, &vc->desc_completed);
92
93 tasklet_schedule(&vc->task);
94}
95
96/**
97 * vchan_cyclic_callback - report the completion of a period
98 * vd: virtual descriptor
99 */
100static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
101{
102 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
103
104 vc->cyclic = vd;
105 tasklet_schedule(&vc->task);
106}
107
108/**
109 * vchan_next_desc - peek at the next descriptor to be processed
110 * vc: virtual channel to obtain descriptor from
111 *
112 * vc.lock must be held by caller
113 */
114static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
115{
116 if (list_empty(&vc->desc_issued))
117 return NULL;
118
119 return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node);
120}
121
122/**
123 * vchan_get_all_descriptors - obtain all submitted and issued descriptors
124 * vc: virtual channel to get descriptors from
125 * head: list of descriptors found
126 *
127 * vc.lock must be held by caller
128 *
129 * Removes all submitted and issued descriptors from internal lists, and
130 * provides a list of all descriptors found
131 */
132static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
133 struct list_head *head)
134{
135 list_splice_tail_init(&vc->desc_submitted, head);
136 list_splice_tail_init(&vc->desc_issued, head);
137 list_splice_tail_init(&vc->desc_completed, head);
138}
139
140static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
141{
142 unsigned long flags;
143 LIST_HEAD(head);
144
145 spin_lock_irqsave(&vc->lock, flags);
146 vchan_get_all_descriptors(vc, &head);
147 spin_unlock_irqrestore(&vc->lock, flags);
148
149 vchan_dma_desc_free_list(vc, &head);
150}
151
152#endif
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 3e8dcf8d2e05..50e08f03aa65 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -17,10 +17,12 @@
17#include <linux/ioport.h> 17#include <linux/ioport.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
21#include <linux/delay.h> 22#include <linux/delay.h>
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
23#include <linux/timer.h> 24#include <linux/timer.h>
25#include <linux/omap-dma.h>
24#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
25#include <linux/mmc/card.h> 27#include <linux/mmc/card.h>
26#include <linux/clk.h> 28#include <linux/clk.h>
@@ -128,6 +130,10 @@ struct mmc_omap_host {
128 unsigned char id; /* 16xx chips have 2 MMC blocks */ 130 unsigned char id; /* 16xx chips have 2 MMC blocks */
129 struct clk * iclk; 131 struct clk * iclk;
130 struct clk * fclk; 132 struct clk * fclk;
133 struct dma_chan *dma_rx;
134 u32 dma_rx_burst;
135 struct dma_chan *dma_tx;
136 u32 dma_tx_burst;
131 struct resource *mem_res; 137 struct resource *mem_res;
132 void __iomem *virt_base; 138 void __iomem *virt_base;
133 unsigned int phys_base; 139 unsigned int phys_base;
@@ -153,12 +159,8 @@ struct mmc_omap_host {
153 159
154 unsigned use_dma:1; 160 unsigned use_dma:1;
155 unsigned brs_received:1, dma_done:1; 161 unsigned brs_received:1, dma_done:1;
156 unsigned dma_is_read:1;
157 unsigned dma_in_use:1; 162 unsigned dma_in_use:1;
158 int dma_ch;
159 spinlock_t dma_lock; 163 spinlock_t dma_lock;
160 struct timer_list dma_timer;
161 unsigned dma_len;
162 164
163 struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS]; 165 struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS];
164 struct mmc_omap_slot *current_slot; 166 struct mmc_omap_slot *current_slot;
@@ -406,18 +408,25 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
406 int abort) 408 int abort)
407{ 409{
408 enum dma_data_direction dma_data_dir; 410 enum dma_data_direction dma_data_dir;
411 struct device *dev = mmc_dev(host->mmc);
412 struct dma_chan *c;
409 413
410 BUG_ON(host->dma_ch < 0); 414 if (data->flags & MMC_DATA_WRITE) {
411 if (data->error)
412 omap_stop_dma(host->dma_ch);
413 /* Release DMA channel lazily */
414 mod_timer(&host->dma_timer, jiffies + HZ);
415 if (data->flags & MMC_DATA_WRITE)
416 dma_data_dir = DMA_TO_DEVICE; 415 dma_data_dir = DMA_TO_DEVICE;
417 else 416 c = host->dma_tx;
417 } else {
418 dma_data_dir = DMA_FROM_DEVICE; 418 dma_data_dir = DMA_FROM_DEVICE;
419 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, 419 c = host->dma_rx;
420 dma_data_dir); 420 }
421 if (c) {
422 if (data->error) {
423 dmaengine_terminate_all(c);
424 /* Claim nothing transferred on error... */
425 data->bytes_xfered = 0;
426 }
427 dev = c->device->dev;
428 }
429 dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
421} 430}
422 431
423static void mmc_omap_send_stop_work(struct work_struct *work) 432static void mmc_omap_send_stop_work(struct work_struct *work)
@@ -525,16 +534,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
525} 534}
526 535
527static void 536static void
528mmc_omap_dma_timer(unsigned long data)
529{
530 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
531
532 BUG_ON(host->dma_ch < 0);
533 omap_free_dma(host->dma_ch);
534 host->dma_ch = -1;
535}
536
537static void
538mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) 537mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
539{ 538{
540 unsigned long flags; 539 unsigned long flags;
@@ -891,159 +890,15 @@ static void mmc_omap_cover_handler(unsigned long param)
891 jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY)); 890 jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
892} 891}
893 892
894/* Prepare to transfer the next segment of a scatterlist */ 893static void mmc_omap_dma_callback(void *priv)
895static void
896mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
897{ 894{
898 int dma_ch = host->dma_ch; 895 struct mmc_omap_host *host = priv;
899 unsigned long data_addr; 896 struct mmc_data *data = host->data;
900 u16 buf, frame;
901 u32 count;
902 struct scatterlist *sg = &data->sg[host->sg_idx];
903 int src_port = 0;
904 int dst_port = 0;
905 int sync_dev = 0;
906
907 data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
908 frame = data->blksz;
909 count = sg_dma_len(sg);
910
911 if ((data->blocks == 1) && (count > data->blksz))
912 count = frame;
913
914 host->dma_len = count;
915
916 /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
917 * Use 16 or 32 word frames when the blocksize is at least that large.
918 * Blocksize is usually 512 bytes; but not for some SD reads.
919 */
920 if (cpu_is_omap15xx() && frame > 32)
921 frame = 32;
922 else if (frame > 64)
923 frame = 64;
924 count /= frame;
925 frame >>= 1;
926
927 if (!(data->flags & MMC_DATA_WRITE)) {
928 buf = 0x800f | ((frame - 1) << 8);
929
930 if (cpu_class_is_omap1()) {
931 src_port = OMAP_DMA_PORT_TIPB;
932 dst_port = OMAP_DMA_PORT_EMIFF;
933 }
934 if (cpu_is_omap24xx())
935 sync_dev = OMAP24XX_DMA_MMC1_RX;
936
937 omap_set_dma_src_params(dma_ch, src_port,
938 OMAP_DMA_AMODE_CONSTANT,
939 data_addr, 0, 0);
940 omap_set_dma_dest_params(dma_ch, dst_port,
941 OMAP_DMA_AMODE_POST_INC,
942 sg_dma_address(sg), 0, 0);
943 omap_set_dma_dest_data_pack(dma_ch, 1);
944 omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
945 } else {
946 buf = 0x0f80 | ((frame - 1) << 0);
947
948 if (cpu_class_is_omap1()) {
949 src_port = OMAP_DMA_PORT_EMIFF;
950 dst_port = OMAP_DMA_PORT_TIPB;
951 }
952 if (cpu_is_omap24xx())
953 sync_dev = OMAP24XX_DMA_MMC1_TX;
954
955 omap_set_dma_dest_params(dma_ch, dst_port,
956 OMAP_DMA_AMODE_CONSTANT,
957 data_addr, 0, 0);
958 omap_set_dma_src_params(dma_ch, src_port,
959 OMAP_DMA_AMODE_POST_INC,
960 sg_dma_address(sg), 0, 0);
961 omap_set_dma_src_data_pack(dma_ch, 1);
962 omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
963 }
964 897
965 /* Max limit for DMA frame count is 0xffff */ 898 /* If we got to the end of DMA, assume everything went well */
966 BUG_ON(count > 0xffff); 899 data->bytes_xfered += data->blocks * data->blksz;
967 900
968 OMAP_MMC_WRITE(host, BUF, buf); 901 mmc_omap_dma_done(host, data);
969 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
970 frame, count, OMAP_DMA_SYNC_FRAME,
971 sync_dev, 0);
972}
973
974/* A scatterlist segment completed */
975static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
976{
977 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
978 struct mmc_data *mmcdat = host->data;
979
980 if (unlikely(host->dma_ch < 0)) {
981 dev_err(mmc_dev(host->mmc),
982 "DMA callback while DMA not enabled\n");
983 return;
984 }
985 /* FIXME: We really should do something to _handle_ the errors */
986 if (ch_status & OMAP1_DMA_TOUT_IRQ) {
987 dev_err(mmc_dev(host->mmc),"DMA timeout\n");
988 return;
989 }
990 if (ch_status & OMAP_DMA_DROP_IRQ) {
991 dev_err(mmc_dev(host->mmc), "DMA sync error\n");
992 return;
993 }
994 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
995 return;
996 }
997 mmcdat->bytes_xfered += host->dma_len;
998 host->sg_idx++;
999 if (host->sg_idx < host->sg_len) {
1000 mmc_omap_prepare_dma(host, host->data);
1001 omap_start_dma(host->dma_ch);
1002 } else
1003 mmc_omap_dma_done(host, host->data);
1004}
1005
1006static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
1007{
1008 const char *dma_dev_name;
1009 int sync_dev, dma_ch, is_read, r;
1010
1011 is_read = !(data->flags & MMC_DATA_WRITE);
1012 del_timer_sync(&host->dma_timer);
1013 if (host->dma_ch >= 0) {
1014 if (is_read == host->dma_is_read)
1015 return 0;
1016 omap_free_dma(host->dma_ch);
1017 host->dma_ch = -1;
1018 }
1019
1020 if (is_read) {
1021 if (host->id == 0) {
1022 sync_dev = OMAP_DMA_MMC_RX;
1023 dma_dev_name = "MMC1 read";
1024 } else {
1025 sync_dev = OMAP_DMA_MMC2_RX;
1026 dma_dev_name = "MMC2 read";
1027 }
1028 } else {
1029 if (host->id == 0) {
1030 sync_dev = OMAP_DMA_MMC_TX;
1031 dma_dev_name = "MMC1 write";
1032 } else {
1033 sync_dev = OMAP_DMA_MMC2_TX;
1034 dma_dev_name = "MMC2 write";
1035 }
1036 }
1037 r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb,
1038 host, &dma_ch);
1039 if (r != 0) {
1040 dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
1041 return r;
1042 }
1043 host->dma_ch = dma_ch;
1044 host->dma_is_read = is_read;
1045
1046 return 0;
1047} 902}
1048 903
1049static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) 904static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
@@ -1118,33 +973,85 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
1118 973
1119 host->sg_idx = 0; 974 host->sg_idx = 0;
1120 if (use_dma) { 975 if (use_dma) {
1121 if (mmc_omap_get_dma_channel(host, data) == 0) { 976 enum dma_data_direction dma_data_dir;
1122 enum dma_data_direction dma_data_dir; 977 struct dma_async_tx_descriptor *tx;
1123 978 struct dma_chan *c;
1124 if (data->flags & MMC_DATA_WRITE) 979 u32 burst, *bp;
1125 dma_data_dir = DMA_TO_DEVICE; 980 u16 buf;
1126 else 981
1127 dma_data_dir = DMA_FROM_DEVICE; 982 /*
1128 983 * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
1129 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, 984 * and 24xx. Use 16 or 32 word frames when the
1130 sg_len, dma_data_dir); 985 * blocksize is at least that large. Blocksize is
1131 host->total_bytes_left = 0; 986 * usually 512 bytes; but not for some SD reads.
1132 mmc_omap_prepare_dma(host, req->data); 987 */
1133 host->brs_received = 0; 988 burst = cpu_is_omap15xx() ? 32 : 64;
1134 host->dma_done = 0; 989 if (burst > data->blksz)
1135 host->dma_in_use = 1; 990 burst = data->blksz;
1136 } else 991
1137 use_dma = 0; 992 burst >>= 1;
993
994 if (data->flags & MMC_DATA_WRITE) {
995 c = host->dma_tx;
996 bp = &host->dma_tx_burst;
997 buf = 0x0f80 | (burst - 1) << 0;
998 dma_data_dir = DMA_TO_DEVICE;
999 } else {
1000 c = host->dma_rx;
1001 bp = &host->dma_rx_burst;
1002 buf = 0x800f | (burst - 1) << 8;
1003 dma_data_dir = DMA_FROM_DEVICE;
1004 }
1005
1006 if (!c)
1007 goto use_pio;
1008
1009 /* Only reconfigure if we have a different burst size */
1010 if (*bp != burst) {
1011 struct dma_slave_config cfg;
1012
1013 cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
1014 cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
1015 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1016 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1017 cfg.src_maxburst = burst;
1018 cfg.dst_maxburst = burst;
1019
1020 if (dmaengine_slave_config(c, &cfg))
1021 goto use_pio;
1022
1023 *bp = burst;
1024 }
1025
1026 host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
1027 dma_data_dir);
1028 if (host->sg_len == 0)
1029 goto use_pio;
1030
1031 tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
1032 data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
1033 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1034 if (!tx)
1035 goto use_pio;
1036
1037 OMAP_MMC_WRITE(host, BUF, buf);
1038
1039 tx->callback = mmc_omap_dma_callback;
1040 tx->callback_param = host;
1041 dmaengine_submit(tx);
1042 host->brs_received = 0;
1043 host->dma_done = 0;
1044 host->dma_in_use = 1;
1045 return;
1138 } 1046 }
1047 use_pio:
1139 1048
1140 /* Revert to PIO? */ 1049 /* Revert to PIO? */
1141 if (!use_dma) { 1050 OMAP_MMC_WRITE(host, BUF, 0x1f1f);
1142 OMAP_MMC_WRITE(host, BUF, 0x1f1f); 1051 host->total_bytes_left = data->blocks * block_size;
1143 host->total_bytes_left = data->blocks * block_size; 1052 host->sg_len = sg_len;
1144 host->sg_len = sg_len; 1053 mmc_omap_sg_to_buf(host);
1145 mmc_omap_sg_to_buf(host); 1054 host->dma_in_use = 0;
1146 host->dma_in_use = 0;
1147 }
1148} 1055}
1149 1056
1150static void mmc_omap_start_request(struct mmc_omap_host *host, 1057static void mmc_omap_start_request(struct mmc_omap_host *host,
@@ -1157,8 +1064,12 @@ static void mmc_omap_start_request(struct mmc_omap_host *host,
1157 /* only touch fifo AFTER the controller readies it */ 1064 /* only touch fifo AFTER the controller readies it */
1158 mmc_omap_prepare_data(host, req); 1065 mmc_omap_prepare_data(host, req);
1159 mmc_omap_start_command(host, req->cmd); 1066 mmc_omap_start_command(host, req->cmd);
1160 if (host->dma_in_use) 1067 if (host->dma_in_use) {
1161 omap_start_dma(host->dma_ch); 1068 struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
1069 host->dma_tx : host->dma_rx;
1070
1071 dma_async_issue_pending(c);
1072 }
1162} 1073}
1163 1074
1164static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) 1075static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
@@ -1400,6 +1311,8 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
1400 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; 1311 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
1401 struct mmc_omap_host *host = NULL; 1312 struct mmc_omap_host *host = NULL;
1402 struct resource *res; 1313 struct resource *res;
1314 dma_cap_mask_t mask;
1315 unsigned sig;
1403 int i, ret = 0; 1316 int i, ret = 0;
1404 int irq; 1317 int irq;
1405 1318
@@ -1439,7 +1352,6 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
1439 setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); 1352 setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
1440 1353
1441 spin_lock_init(&host->dma_lock); 1354 spin_lock_init(&host->dma_lock);
1442 setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
1443 spin_lock_init(&host->slot_lock); 1355 spin_lock_init(&host->slot_lock);
1444 init_waitqueue_head(&host->slot_wq); 1356 init_waitqueue_head(&host->slot_wq);
1445 1357
@@ -1450,11 +1362,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
1450 host->id = pdev->id; 1362 host->id = pdev->id;
1451 host->mem_res = res; 1363 host->mem_res = res;
1452 host->irq = irq; 1364 host->irq = irq;
1453
1454 host->use_dma = 1; 1365 host->use_dma = 1;
1455 host->dev->dma_mask = &pdata->dma_mask;
1456 host->dma_ch = -1;
1457
1458 host->irq = irq; 1366 host->irq = irq;
1459 host->phys_base = host->mem_res->start; 1367 host->phys_base = host->mem_res->start;
1460 host->virt_base = ioremap(res->start, resource_size(res)); 1368 host->virt_base = ioremap(res->start, resource_size(res));
@@ -1474,9 +1382,48 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
1474 goto err_free_iclk; 1382 goto err_free_iclk;
1475 } 1383 }
1476 1384
1385 dma_cap_zero(mask);
1386 dma_cap_set(DMA_SLAVE, mask);
1387
1388 host->dma_tx_burst = -1;
1389 host->dma_rx_burst = -1;
1390
1391 if (cpu_is_omap24xx())
1392 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
1393 else
1394 sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
1395 host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1396#if 0
1397 if (!host->dma_tx) {
1398 dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n",
1399 sig);
1400 goto err_dma;
1401 }
1402#else
1403 if (!host->dma_tx)
1404 dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
1405 sig);
1406#endif
1407 if (cpu_is_omap24xx())
1408 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
1409 else
1410 sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
1411 host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1412#if 0
1413 if (!host->dma_rx) {
1414 dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n",
1415 sig);
1416 goto err_dma;
1417 }
1418#else
1419 if (!host->dma_rx)
1420 dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
1421 sig);
1422#endif
1423
1477 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); 1424 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1478 if (ret) 1425 if (ret)
1479 goto err_free_fclk; 1426 goto err_free_dma;
1480 1427
1481 if (pdata->init != NULL) { 1428 if (pdata->init != NULL) {
1482 ret = pdata->init(&pdev->dev); 1429 ret = pdata->init(&pdev->dev);
@@ -1510,7 +1457,11 @@ err_plat_cleanup:
1510 pdata->cleanup(&pdev->dev); 1457 pdata->cleanup(&pdev->dev);
1511err_free_irq: 1458err_free_irq:
1512 free_irq(host->irq, host); 1459 free_irq(host->irq, host);
1513err_free_fclk: 1460err_free_dma:
1461 if (host->dma_tx)
1462 dma_release_channel(host->dma_tx);
1463 if (host->dma_rx)
1464 dma_release_channel(host->dma_rx);
1514 clk_put(host->fclk); 1465 clk_put(host->fclk);
1515err_free_iclk: 1466err_free_iclk:
1516 clk_disable(host->iclk); 1467 clk_disable(host->iclk);
@@ -1545,6 +1496,11 @@ static int __devexit mmc_omap_remove(struct platform_device *pdev)
1545 clk_disable(host->iclk); 1496 clk_disable(host->iclk);
1546 clk_put(host->iclk); 1497 clk_put(host->iclk);
1547 1498
1499 if (host->dma_tx)
1500 dma_release_channel(host->dma_tx);
1501 if (host->dma_rx)
1502 dma_release_channel(host->dma_rx);
1503
1548 iounmap(host->virt_base); 1504 iounmap(host->virt_base);
1549 release_mem_region(pdev->resource[0].start, 1505 release_mem_region(pdev->resource[0].start,
1550 pdev->resource[0].end - pdev->resource[0].start + 1); 1506 pdev->resource[0].end - pdev->resource[0].start + 1);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index bc28627af66b..3a09f93cc3b6 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/debugfs.h> 21#include <linux/debugfs.h>
22#include <linux/dmaengine.h>
22#include <linux/seq_file.h> 23#include <linux/seq_file.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24#include <linux/delay.h> 25#include <linux/delay.h>
@@ -29,6 +30,7 @@
29#include <linux/of.h> 30#include <linux/of.h>
30#include <linux/of_gpio.h> 31#include <linux/of_gpio.h>
31#include <linux/of_device.h> 32#include <linux/of_device.h>
33#include <linux/omap-dma.h>
32#include <linux/mmc/host.h> 34#include <linux/mmc/host.h>
33#include <linux/mmc/core.h> 35#include <linux/mmc/core.h>
34#include <linux/mmc/mmc.h> 36#include <linux/mmc/mmc.h>
@@ -37,7 +39,6 @@
37#include <linux/gpio.h> 39#include <linux/gpio.h>
38#include <linux/regulator/consumer.h> 40#include <linux/regulator/consumer.h>
39#include <linux/pm_runtime.h> 41#include <linux/pm_runtime.h>
40#include <plat/dma.h>
41#include <mach/hardware.h> 42#include <mach/hardware.h>
42#include <plat/board.h> 43#include <plat/board.h>
43#include <plat/mmc.h> 44#include <plat/mmc.h>
@@ -166,7 +167,8 @@ struct omap_hsmmc_host {
166 int suspended; 167 int suspended;
167 int irq; 168 int irq;
168 int use_dma, dma_ch; 169 int use_dma, dma_ch;
169 int dma_line_tx, dma_line_rx; 170 struct dma_chan *tx_chan;
171 struct dma_chan *rx_chan;
170 int slot_id; 172 int slot_id;
171 int response_busy; 173 int response_busy;
172 int context_loss; 174 int context_loss;
@@ -797,6 +799,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
797 return DMA_FROM_DEVICE; 799 return DMA_FROM_DEVICE;
798} 800}
799 801
802static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
803 struct mmc_data *data)
804{
805 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
806}
807
800static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) 808static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
801{ 809{
802 int dma_ch; 810 int dma_ch;
@@ -889,10 +897,13 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
889 spin_unlock_irqrestore(&host->irq_lock, flags); 897 spin_unlock_irqrestore(&host->irq_lock, flags);
890 898
891 if (host->use_dma && dma_ch != -1) { 899 if (host->use_dma && dma_ch != -1) {
892 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, 900 struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
893 host->data->sg_len, 901
902 dmaengine_terminate_all(chan);
903 dma_unmap_sg(chan->device->dev,
904 host->data->sg, host->data->sg_len,
894 omap_hsmmc_get_dma_dir(host, host->data)); 905 omap_hsmmc_get_dma_dir(host, host->data));
895 omap_free_dma(dma_ch); 906
896 host->data->host_cookie = 0; 907 host->data->host_cookie = 0;
897 } 908 }
898 host->data = NULL; 909 host->data = NULL;
@@ -1190,90 +1201,29 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
1190 return IRQ_HANDLED; 1201 return IRQ_HANDLED;
1191} 1202}
1192 1203
1193static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host, 1204static void omap_hsmmc_dma_callback(void *param)
1194 struct mmc_data *data)
1195{
1196 int sync_dev;
1197
1198 if (data->flags & MMC_DATA_WRITE)
1199 sync_dev = host->dma_line_tx;
1200 else
1201 sync_dev = host->dma_line_rx;
1202 return sync_dev;
1203}
1204
1205static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
1206 struct mmc_data *data,
1207 struct scatterlist *sgl)
1208{
1209 int blksz, nblk, dma_ch;
1210
1211 dma_ch = host->dma_ch;
1212 if (data->flags & MMC_DATA_WRITE) {
1213 omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
1214 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
1215 omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
1216 sg_dma_address(sgl), 0, 0);
1217 } else {
1218 omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
1219 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
1220 omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
1221 sg_dma_address(sgl), 0, 0);
1222 }
1223
1224 blksz = host->data->blksz;
1225 nblk = sg_dma_len(sgl) / blksz;
1226
1227 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
1228 blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
1229 omap_hsmmc_get_dma_sync_dev(host, data),
1230 !(data->flags & MMC_DATA_WRITE));
1231
1232 omap_start_dma(dma_ch);
1233}
1234
1235/*
1236 * DMA call back function
1237 */
1238static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1239{ 1205{
1240 struct omap_hsmmc_host *host = cb_data; 1206 struct omap_hsmmc_host *host = param;
1207 struct dma_chan *chan;
1241 struct mmc_data *data; 1208 struct mmc_data *data;
1242 int dma_ch, req_in_progress; 1209 int req_in_progress;
1243 unsigned long flags;
1244
1245 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
1246 dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
1247 ch_status);
1248 return;
1249 }
1250 1210
1251 spin_lock_irqsave(&host->irq_lock, flags); 1211 spin_lock_irq(&host->irq_lock);
1252 if (host->dma_ch < 0) { 1212 if (host->dma_ch < 0) {
1253 spin_unlock_irqrestore(&host->irq_lock, flags); 1213 spin_unlock_irq(&host->irq_lock);
1254 return; 1214 return;
1255 } 1215 }
1256 1216
1257 data = host->mrq->data; 1217 data = host->mrq->data;
1258 host->dma_sg_idx++; 1218 chan = omap_hsmmc_get_dma_chan(host, data);
1259 if (host->dma_sg_idx < host->dma_len) {
1260 /* Fire up the next transfer. */
1261 omap_hsmmc_config_dma_params(host, data,
1262 data->sg + host->dma_sg_idx);
1263 spin_unlock_irqrestore(&host->irq_lock, flags);
1264 return;
1265 }
1266
1267 if (!data->host_cookie) 1219 if (!data->host_cookie)
1268 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 1220 dma_unmap_sg(chan->device->dev,
1221 data->sg, data->sg_len,
1269 omap_hsmmc_get_dma_dir(host, data)); 1222 omap_hsmmc_get_dma_dir(host, data));
1270 1223
1271 req_in_progress = host->req_in_progress; 1224 req_in_progress = host->req_in_progress;
1272 dma_ch = host->dma_ch;
1273 host->dma_ch = -1; 1225 host->dma_ch = -1;
1274 spin_unlock_irqrestore(&host->irq_lock, flags); 1226 spin_unlock_irq(&host->irq_lock);
1275
1276 omap_free_dma(dma_ch);
1277 1227
1278 /* If DMA has finished after TC, complete the request */ 1228 /* If DMA has finished after TC, complete the request */
1279 if (!req_in_progress) { 1229 if (!req_in_progress) {
@@ -1286,7 +1236,8 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1286 1236
1287static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, 1237static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1288 struct mmc_data *data, 1238 struct mmc_data *data,
1289 struct omap_hsmmc_next *next) 1239 struct omap_hsmmc_next *next,
1240 struct dma_chan *chan)
1290{ 1241{
1291 int dma_len; 1242 int dma_len;
1292 1243
@@ -1301,8 +1252,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1301 /* Check if next job is already prepared */ 1252 /* Check if next job is already prepared */
1302 if (next || 1253 if (next ||
1303 (!next && data->host_cookie != host->next_data.cookie)) { 1254 (!next && data->host_cookie != host->next_data.cookie)) {
1304 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, 1255 dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
1305 data->sg_len,
1306 omap_hsmmc_get_dma_dir(host, data)); 1256 omap_hsmmc_get_dma_dir(host, data));
1307 1257
1308 } else { 1258 } else {
@@ -1329,8 +1279,11 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1329static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, 1279static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1330 struct mmc_request *req) 1280 struct mmc_request *req)
1331{ 1281{
1332 int dma_ch = 0, ret = 0, i; 1282 struct dma_slave_config cfg;
1283 struct dma_async_tx_descriptor *tx;
1284 int ret = 0, i;
1333 struct mmc_data *data = req->data; 1285 struct mmc_data *data = req->data;
1286 struct dma_chan *chan;
1334 1287
1335 /* Sanity check: all the SG entries must be aligned by block size. */ 1288 /* Sanity check: all the SG entries must be aligned by block size. */
1336 for (i = 0; i < data->sg_len; i++) { 1289 for (i = 0; i < data->sg_len; i++) {
@@ -1348,22 +1301,41 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1348 1301
1349 BUG_ON(host->dma_ch != -1); 1302 BUG_ON(host->dma_ch != -1);
1350 1303
1351 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), 1304 chan = omap_hsmmc_get_dma_chan(host, data);
1352 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); 1305
1353 if (ret != 0) { 1306 cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
1354 dev_err(mmc_dev(host->mmc), 1307 cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
1355 "%s: omap_request_dma() failed with %d\n", 1308 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1356 mmc_hostname(host->mmc), ret); 1309 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1310 cfg.src_maxburst = data->blksz / 4;
1311 cfg.dst_maxburst = data->blksz / 4;
1312
1313 ret = dmaengine_slave_config(chan, &cfg);
1314 if (ret)
1357 return ret; 1315 return ret;
1358 } 1316
1359 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL); 1317 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
1360 if (ret) 1318 if (ret)
1361 return ret; 1319 return ret;
1362 1320
1363 host->dma_ch = dma_ch; 1321 tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
1364 host->dma_sg_idx = 0; 1322 data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
1323 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1324 if (!tx) {
1325 dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
1326 /* FIXME: cleanup */
1327 return -1;
1328 }
1329
1330 tx->callback = omap_hsmmc_dma_callback;
1331 tx->callback_param = host;
1365 1332
1366 omap_hsmmc_config_dma_params(host, data, data->sg); 1333 /* Does not fail */
1334 dmaengine_submit(tx);
1335
1336 host->dma_ch = 1;
1337
1338 dma_async_issue_pending(chan);
1367 1339
1368 return 0; 1340 return 0;
1369} 1341}
@@ -1445,11 +1417,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
1445 struct omap_hsmmc_host *host = mmc_priv(mmc); 1417 struct omap_hsmmc_host *host = mmc_priv(mmc);
1446 struct mmc_data *data = mrq->data; 1418 struct mmc_data *data = mrq->data;
1447 1419
1448 if (host->use_dma) { 1420 if (host->use_dma && data->host_cookie) {
1449 if (data->host_cookie) 1421 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
1450 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 1422
1451 data->sg_len, 1423 dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
1452 omap_hsmmc_get_dma_dir(host, data)); 1424 omap_hsmmc_get_dma_dir(host, data));
1453 data->host_cookie = 0; 1425 data->host_cookie = 0;
1454 } 1426 }
1455} 1427}
@@ -1464,10 +1436,13 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
1464 return ; 1436 return ;
1465 } 1437 }
1466 1438
1467 if (host->use_dma) 1439 if (host->use_dma) {
1440 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
1441
1468 if (omap_hsmmc_pre_dma_transfer(host, mrq->data, 1442 if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
1469 &host->next_data)) 1443 &host->next_data, c))
1470 mrq->data->host_cookie = 0; 1444 mrq->data->host_cookie = 0;
1445 }
1471} 1446}
1472 1447
1473/* 1448/*
@@ -1800,6 +1775,8 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1800 struct resource *res; 1775 struct resource *res;
1801 int ret, irq; 1776 int ret, irq;
1802 const struct of_device_id *match; 1777 const struct of_device_id *match;
1778 dma_cap_mask_t mask;
1779 unsigned tx_req, rx_req;
1803 1780
1804 match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); 1781 match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
1805 if (match) { 1782 if (match) {
@@ -1844,7 +1821,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1844 host->pdata = pdata; 1821 host->pdata = pdata;
1845 host->dev = &pdev->dev; 1822 host->dev = &pdev->dev;
1846 host->use_dma = 1; 1823 host->use_dma = 1;
1847 host->dev->dma_mask = &pdata->dma_mask;
1848 host->dma_ch = -1; 1824 host->dma_ch = -1;
1849 host->irq = irq; 1825 host->irq = irq;
1850 host->slot_id = 0; 1826 host->slot_id = 0;
@@ -1934,7 +1910,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1934 ret = -ENXIO; 1910 ret = -ENXIO;
1935 goto err_irq; 1911 goto err_irq;
1936 } 1912 }
1937 host->dma_line_tx = res->start; 1913 tx_req = res->start;
1938 1914
1939 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); 1915 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
1940 if (!res) { 1916 if (!res) {
@@ -1942,7 +1918,24 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1942 ret = -ENXIO; 1918 ret = -ENXIO;
1943 goto err_irq; 1919 goto err_irq;
1944 } 1920 }
1945 host->dma_line_rx = res->start; 1921 rx_req = res->start;
1922
1923 dma_cap_zero(mask);
1924 dma_cap_set(DMA_SLAVE, mask);
1925
1926 host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
1927 if (!host->rx_chan) {
1928 dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
1929 ret = -ENXIO;
1930 goto err_irq;
1931 }
1932
1933 host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
1934 if (!host->tx_chan) {
1935 dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
1936 ret = -ENXIO;
1937 goto err_irq;
1938 }
1946 1939
1947 /* Request IRQ for MMC operations */ 1940 /* Request IRQ for MMC operations */
1948 ret = request_irq(host->irq, omap_hsmmc_irq, 0, 1941 ret = request_irq(host->irq, omap_hsmmc_irq, 0,
@@ -2021,6 +2014,10 @@ err_reg:
2021err_irq_cd_init: 2014err_irq_cd_init:
2022 free_irq(host->irq, host); 2015 free_irq(host->irq, host);
2023err_irq: 2016err_irq:
2017 if (host->tx_chan)
2018 dma_release_channel(host->tx_chan);
2019 if (host->rx_chan)
2020 dma_release_channel(host->rx_chan);
2024 pm_runtime_put_sync(host->dev); 2021 pm_runtime_put_sync(host->dev);
2025 pm_runtime_disable(host->dev); 2022 pm_runtime_disable(host->dev);
2026 clk_put(host->fclk); 2023 clk_put(host->fclk);
@@ -2056,6 +2053,11 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
2056 if (mmc_slot(host).card_detect_irq) 2053 if (mmc_slot(host).card_detect_irq)
2057 free_irq(mmc_slot(host).card_detect_irq, host); 2054 free_irq(mmc_slot(host).card_detect_irq, host);
2058 2055
2056 if (host->tx_chan)
2057 dma_release_channel(host->tx_chan);
2058 if (host->rx_chan)
2059 dma_release_channel(host->rx_chan);
2060
2059 pm_runtime_put_sync(host->dev); 2061 pm_runtime_put_sync(host->dev);
2060 pm_runtime_disable(host->dev); 2062 pm_runtime_disable(host->dev);
2061 clk_put(host->fclk); 2063 clk_put(host->fclk);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index d7f681d0c9b9..e9309b3659e7 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/dmaengine.h>
12#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
13#include <linux/delay.h> 14#include <linux/delay.h>
14#include <linux/module.h> 15#include <linux/module.h>
@@ -18,6 +19,7 @@
18#include <linux/mtd/mtd.h> 19#include <linux/mtd/mtd.h>
19#include <linux/mtd/nand.h> 20#include <linux/mtd/nand.h>
20#include <linux/mtd/partitions.h> 21#include <linux/mtd/partitions.h>
22#include <linux/omap-dma.h>
21#include <linux/io.h> 23#include <linux/io.h>
22#include <linux/slab.h> 24#include <linux/slab.h>
23 25
@@ -123,7 +125,7 @@ struct omap_nand_info {
123 int gpmc_cs; 125 int gpmc_cs;
124 unsigned long phys_base; 126 unsigned long phys_base;
125 struct completion comp; 127 struct completion comp;
126 int dma_ch; 128 struct dma_chan *dma;
127 int gpmc_irq; 129 int gpmc_irq;
128 enum { 130 enum {
129 OMAP_NAND_IO_READ = 0, /* read */ 131 OMAP_NAND_IO_READ = 0, /* read */
@@ -336,12 +338,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
336} 338}
337 339
338/* 340/*
339 * omap_nand_dma_cb: callback on the completion of dma transfer 341 * omap_nand_dma_callback: callback on the completion of dma transfer
340 * @lch: logical channel
341 * @ch_satuts: channel status
342 * @data: pointer to completion data structure 342 * @data: pointer to completion data structure
343 */ 343 */
344static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) 344static void omap_nand_dma_callback(void *data)
345{ 345{
346 complete((struct completion *) data); 346 complete((struct completion *) data);
347} 347}
@@ -358,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
358{ 358{
359 struct omap_nand_info *info = container_of(mtd, 359 struct omap_nand_info *info = container_of(mtd,
360 struct omap_nand_info, mtd); 360 struct omap_nand_info, mtd);
361 struct dma_async_tx_descriptor *tx;
361 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : 362 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
362 DMA_FROM_DEVICE; 363 DMA_FROM_DEVICE;
363 dma_addr_t dma_addr; 364 struct scatterlist sg;
364 int ret;
365 unsigned long tim, limit; 365 unsigned long tim, limit;
366 366 unsigned n;
367 /* The fifo depth is 64 bytes max. 367 int ret;
368 * But configure the FIFO-threahold to 32 to get a sync at each frame
369 * and frame length is 32 bytes.
370 */
371 int buf_len = len >> 6;
372 368
373 if (addr >= high_memory) { 369 if (addr >= high_memory) {
374 struct page *p1; 370 struct page *p1;
@@ -382,40 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
382 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); 378 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
383 } 379 }
384 380
385 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir); 381 sg_init_one(&sg, addr, len);
386 if (dma_mapping_error(&info->pdev->dev, dma_addr)) { 382 n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
383 if (n == 0) {
387 dev_err(&info->pdev->dev, 384 dev_err(&info->pdev->dev,
388 "Couldn't DMA map a %d byte buffer\n", len); 385 "Couldn't DMA map a %d byte buffer\n", len);
389 goto out_copy; 386 goto out_copy;
390 } 387 }
391 388
392 if (is_write) { 389 tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
393 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, 390 is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
394 info->phys_base, 0, 0); 391 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
395 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, 392 if (!tx)
396 dma_addr, 0, 0); 393 goto out_copy_unmap;
397 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, 394
398 0x10, buf_len, OMAP_DMA_SYNC_FRAME, 395 tx->callback = omap_nand_dma_callback;
399 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC); 396 tx->callback_param = &info->comp;
400 } else { 397 dmaengine_submit(tx);
401 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, 398
402 info->phys_base, 0, 0); 399 /* configure and start prefetch transfer */
403 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
404 dma_addr, 0, 0);
405 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
406 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
407 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
408 }
409 /* configure and start prefetch transfer */
410 ret = gpmc_prefetch_enable(info->gpmc_cs, 400 ret = gpmc_prefetch_enable(info->gpmc_cs,
411 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); 401 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
412 if (ret) 402 if (ret)
413 /* PFPW engine is busy, use cpu copy method */ 403 /* PFPW engine is busy, use cpu copy method */
414 goto out_copy_unmap; 404 goto out_copy_unmap;
415 405
416 init_completion(&info->comp); 406 init_completion(&info->comp);
417 407 dma_async_issue_pending(info->dma);
418 omap_start_dma(info->dma_ch);
419 408
420 /* setup and start DMA using dma_addr */ 409 /* setup and start DMA using dma_addr */
421 wait_for_completion(&info->comp); 410 wait_for_completion(&info->comp);
@@ -427,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
427 /* disable and stop the PFPW engine */ 416 /* disable and stop the PFPW engine */
428 gpmc_prefetch_reset(info->gpmc_cs); 417 gpmc_prefetch_reset(info->gpmc_cs);
429 418
430 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 419 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
431 return 0; 420 return 0;
432 421
433out_copy_unmap: 422out_copy_unmap:
434 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 423 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
435out_copy: 424out_copy:
436 if (info->nand.options & NAND_BUSWIDTH_16) 425 if (info->nand.options & NAND_BUSWIDTH_16)
437 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) 426 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -1164,6 +1153,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1164 struct omap_nand_platform_data *pdata; 1153 struct omap_nand_platform_data *pdata;
1165 int err; 1154 int err;
1166 int i, offset; 1155 int i, offset;
1156 dma_cap_mask_t mask;
1157 unsigned sig;
1167 1158
1168 pdata = pdev->dev.platform_data; 1159 pdata = pdev->dev.platform_data;
1169 if (pdata == NULL) { 1160 if (pdata == NULL) {
@@ -1244,18 +1235,31 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1244 break; 1235 break;
1245 1236
1246 case NAND_OMAP_PREFETCH_DMA: 1237 case NAND_OMAP_PREFETCH_DMA:
1247 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", 1238 dma_cap_zero(mask);
1248 omap_nand_dma_cb, &info->comp, &info->dma_ch); 1239 dma_cap_set(DMA_SLAVE, mask);
1249 if (err < 0) { 1240 sig = OMAP24XX_DMA_GPMC;
1250 info->dma_ch = -1; 1241 info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1251 dev_err(&pdev->dev, "DMA request failed!\n"); 1242 if (!info->dma) {
1243 dev_err(&pdev->dev, "DMA engine request failed\n");
1244 err = -ENXIO;
1252 goto out_release_mem_region; 1245 goto out_release_mem_region;
1253 } else { 1246 } else {
1254 omap_set_dma_dest_burst_mode(info->dma_ch, 1247 struct dma_slave_config cfg;
1255 OMAP_DMA_DATA_BURST_16); 1248 int rc;
1256 omap_set_dma_src_burst_mode(info->dma_ch, 1249
1257 OMAP_DMA_DATA_BURST_16); 1250 memset(&cfg, 0, sizeof(cfg));
1258 1251 cfg.src_addr = info->phys_base;
1252 cfg.dst_addr = info->phys_base;
1253 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1254 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1255 cfg.src_maxburst = 16;
1256 cfg.dst_maxburst = 16;
1257 rc = dmaengine_slave_config(info->dma, &cfg);
1258 if (rc) {
1259 dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
1260 rc);
1261 goto out_release_mem_region;
1262 }
1259 info->nand.read_buf = omap_read_buf_dma_pref; 1263 info->nand.read_buf = omap_read_buf_dma_pref;
1260 info->nand.write_buf = omap_write_buf_dma_pref; 1264 info->nand.write_buf = omap_write_buf_dma_pref;
1261 } 1265 }
@@ -1358,6 +1362,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1358 return 0; 1362 return 0;
1359 1363
1360out_release_mem_region: 1364out_release_mem_region:
1365 if (info->dma)
1366 dma_release_channel(info->dma);
1361 release_mem_region(info->phys_base, NAND_IO_SIZE); 1367 release_mem_region(info->phys_base, NAND_IO_SIZE);
1362out_free_info: 1368out_free_info:
1363 kfree(info); 1369 kfree(info);
@@ -1373,8 +1379,8 @@ static int omap_nand_remove(struct platform_device *pdev)
1373 omap3_free_bch(&info->mtd); 1379 omap3_free_bch(&info->mtd);
1374 1380
1375 platform_set_drvdata(pdev, NULL); 1381 platform_set_drvdata(pdev, NULL);
1376 if (info->dma_ch != -1) 1382 if (info->dma)
1377 omap_free_dma(info->dma_ch); 1383 dma_release_channel(info->dma);
1378 1384
1379 if (info->gpmc_irq) 1385 if (info->gpmc_irq)
1380 free_irq(info->gpmc_irq, info); 1386 free_irq(info->gpmc_irq, info);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7d46b15e1520..bc4778175e34 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -28,6 +28,8 @@
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/dma-mapping.h> 30#include <linux/dma-mapping.h>
31#include <linux/dmaengine.h>
32#include <linux/omap-dma.h>
31#include <linux/platform_device.h> 33#include <linux/platform_device.h>
32#include <linux/err.h> 34#include <linux/err.h>
33#include <linux/clk.h> 35#include <linux/clk.h>
@@ -39,7 +41,6 @@
39 41
40#include <linux/spi/spi.h> 42#include <linux/spi/spi.h>
41 43
42#include <plat/dma.h>
43#include <plat/clock.h> 44#include <plat/clock.h>
44#include <plat/mcspi.h> 45#include <plat/mcspi.h>
45 46
@@ -93,8 +94,8 @@
93 94
94/* We have 2 DMA channels per CS, one for RX and one for TX */ 95/* We have 2 DMA channels per CS, one for RX and one for TX */
95struct omap2_mcspi_dma { 96struct omap2_mcspi_dma {
96 int dma_tx_channel; 97 struct dma_chan *dma_tx;
97 int dma_rx_channel; 98 struct dma_chan *dma_rx;
98 99
99 int dma_tx_sync_dev; 100 int dma_tx_sync_dev;
100 int dma_rx_sync_dev; 101 int dma_rx_sync_dev;
@@ -300,20 +301,46 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
300 return 0; 301 return 0;
301} 302}
302 303
304static void omap2_mcspi_rx_callback(void *data)
305{
306 struct spi_device *spi = data;
307 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
308 struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
309
310 complete(&mcspi_dma->dma_rx_completion);
311
312 /* We must disable the DMA RX request */
313 omap2_mcspi_set_dma_req(spi, 1, 0);
314}
315
316static void omap2_mcspi_tx_callback(void *data)
317{
318 struct spi_device *spi = data;
319 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
320 struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
321
322 complete(&mcspi_dma->dma_tx_completion);
323
324 /* We must disable the DMA TX request */
325 omap2_mcspi_set_dma_req(spi, 0, 0);
326}
327
303static unsigned 328static unsigned
304omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) 329omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
305{ 330{
306 struct omap2_mcspi *mcspi; 331 struct omap2_mcspi *mcspi;
307 struct omap2_mcspi_cs *cs = spi->controller_state; 332 struct omap2_mcspi_cs *cs = spi->controller_state;
308 struct omap2_mcspi_dma *mcspi_dma; 333 struct omap2_mcspi_dma *mcspi_dma;
309 unsigned int count, c; 334 unsigned int count;
310 unsigned long base, tx_reg, rx_reg; 335 int word_len, element_count;
311 int word_len, data_type, element_count;
312 int elements = 0; 336 int elements = 0;
313 u32 l; 337 u32 l;
314 u8 * rx; 338 u8 * rx;
315 const u8 * tx; 339 const u8 * tx;
316 void __iomem *chstat_reg; 340 void __iomem *chstat_reg;
341 struct dma_slave_config cfg;
342 enum dma_slave_buswidth width;
343 unsigned es;
317 344
318 mcspi = spi_master_get_devdata(spi->master); 345 mcspi = spi_master_get_devdata(spi->master);
319 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 346 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
@@ -321,68 +348,92 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
321 348
322 chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; 349 chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
323 350
351 if (cs->word_len <= 8) {
352 width = DMA_SLAVE_BUSWIDTH_1_BYTE;
353 es = 1;
354 } else if (cs->word_len <= 16) {
355 width = DMA_SLAVE_BUSWIDTH_2_BYTES;
356 es = 2;
357 } else {
358 width = DMA_SLAVE_BUSWIDTH_4_BYTES;
359 es = 4;
360 }
361
362 memset(&cfg, 0, sizeof(cfg));
363 cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
364 cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
365 cfg.src_addr_width = width;
366 cfg.dst_addr_width = width;
367 cfg.src_maxburst = 1;
368 cfg.dst_maxburst = 1;
369
370 if (xfer->tx_buf && mcspi_dma->dma_tx) {
371 struct dma_async_tx_descriptor *tx;
372 struct scatterlist sg;
373
374 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
375
376 sg_init_table(&sg, 1);
377 sg_dma_address(&sg) = xfer->tx_dma;
378 sg_dma_len(&sg) = xfer->len;
379
380 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
381 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
382 if (tx) {
383 tx->callback = omap2_mcspi_tx_callback;
384 tx->callback_param = spi;
385 dmaengine_submit(tx);
386 } else {
387 /* FIXME: fall back to PIO? */
388 }
389 }
390
391 if (xfer->rx_buf && mcspi_dma->dma_rx) {
392 struct dma_async_tx_descriptor *tx;
393 struct scatterlist sg;
394 size_t len = xfer->len - es;
395
396 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
397
398 if (l & OMAP2_MCSPI_CHCONF_TURBO)
399 len -= es;
400
401 sg_init_table(&sg, 1);
402 sg_dma_address(&sg) = xfer->rx_dma;
403 sg_dma_len(&sg) = len;
404
405 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
406 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
407 if (tx) {
408 tx->callback = omap2_mcspi_rx_callback;
409 tx->callback_param = spi;
410 dmaengine_submit(tx);
411 } else {
412 /* FIXME: fall back to PIO? */
413 }
414 }
415
324 count = xfer->len; 416 count = xfer->len;
325 c = count;
326 word_len = cs->word_len; 417 word_len = cs->word_len;
327 418
328 base = cs->phys;
329 tx_reg = base + OMAP2_MCSPI_TX0;
330 rx_reg = base + OMAP2_MCSPI_RX0;
331 rx = xfer->rx_buf; 419 rx = xfer->rx_buf;
332 tx = xfer->tx_buf; 420 tx = xfer->tx_buf;
333 421
334 if (word_len <= 8) { 422 if (word_len <= 8) {
335 data_type = OMAP_DMA_DATA_TYPE_S8;
336 element_count = count; 423 element_count = count;
337 } else if (word_len <= 16) { 424 } else if (word_len <= 16) {
338 data_type = OMAP_DMA_DATA_TYPE_S16;
339 element_count = count >> 1; 425 element_count = count >> 1;
340 } else /* word_len <= 32 */ { 426 } else /* word_len <= 32 */ {
341 data_type = OMAP_DMA_DATA_TYPE_S32;
342 element_count = count >> 2; 427 element_count = count >> 2;
343 } 428 }
344 429
345 if (tx != NULL) { 430 if (tx != NULL) {
346 omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel, 431 dma_async_issue_pending(mcspi_dma->dma_tx);
347 data_type, element_count, 1,
348 OMAP_DMA_SYNC_ELEMENT,
349 mcspi_dma->dma_tx_sync_dev, 0);
350
351 omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
352 OMAP_DMA_AMODE_CONSTANT,
353 tx_reg, 0, 0);
354
355 omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
356 OMAP_DMA_AMODE_POST_INC,
357 xfer->tx_dma, 0, 0);
358 }
359
360 if (rx != NULL) {
361 elements = element_count - 1;
362 if (l & OMAP2_MCSPI_CHCONF_TURBO)
363 elements--;
364
365 omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
366 data_type, elements, 1,
367 OMAP_DMA_SYNC_ELEMENT,
368 mcspi_dma->dma_rx_sync_dev, 1);
369
370 omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
371 OMAP_DMA_AMODE_CONSTANT,
372 rx_reg, 0, 0);
373
374 omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
375 OMAP_DMA_AMODE_POST_INC,
376 xfer->rx_dma, 0, 0);
377 }
378
379 if (tx != NULL) {
380 omap_start_dma(mcspi_dma->dma_tx_channel);
381 omap2_mcspi_set_dma_req(spi, 0, 1); 432 omap2_mcspi_set_dma_req(spi, 0, 1);
382 } 433 }
383 434
384 if (rx != NULL) { 435 if (rx != NULL) {
385 omap_start_dma(mcspi_dma->dma_rx_channel); 436 dma_async_issue_pending(mcspi_dma->dma_rx);
386 omap2_mcspi_set_dma_req(spi, 1, 1); 437 omap2_mcspi_set_dma_req(spi, 1, 1);
387 } 438 }
388 439
@@ -408,7 +459,10 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
408 DMA_FROM_DEVICE); 459 DMA_FROM_DEVICE);
409 omap2_mcspi_set_enable(spi, 0); 460 omap2_mcspi_set_enable(spi, 0);
410 461
462 elements = element_count - 1;
463
411 if (l & OMAP2_MCSPI_CHCONF_TURBO) { 464 if (l & OMAP2_MCSPI_CHCONF_TURBO) {
465 elements--;
412 466
413 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) 467 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
414 & OMAP2_MCSPI_CHSTAT_RXS)) { 468 & OMAP2_MCSPI_CHSTAT_RXS)) {
@@ -725,64 +779,38 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
725 return 0; 779 return 0;
726} 780}
727 781
728static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
729{
730 struct spi_device *spi = data;
731 struct omap2_mcspi *mcspi;
732 struct omap2_mcspi_dma *mcspi_dma;
733
734 mcspi = spi_master_get_devdata(spi->master);
735 mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
736
737 complete(&mcspi_dma->dma_rx_completion);
738
739 /* We must disable the DMA RX request */
740 omap2_mcspi_set_dma_req(spi, 1, 0);
741}
742
743static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
744{
745 struct spi_device *spi = data;
746 struct omap2_mcspi *mcspi;
747 struct omap2_mcspi_dma *mcspi_dma;
748
749 mcspi = spi_master_get_devdata(spi->master);
750 mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
751
752 complete(&mcspi_dma->dma_tx_completion);
753
754 /* We must disable the DMA TX request */
755 omap2_mcspi_set_dma_req(spi, 0, 0);
756}
757
758static int omap2_mcspi_request_dma(struct spi_device *spi) 782static int omap2_mcspi_request_dma(struct spi_device *spi)
759{ 783{
760 struct spi_master *master = spi->master; 784 struct spi_master *master = spi->master;
761 struct omap2_mcspi *mcspi; 785 struct omap2_mcspi *mcspi;
762 struct omap2_mcspi_dma *mcspi_dma; 786 struct omap2_mcspi_dma *mcspi_dma;
787 dma_cap_mask_t mask;
788 unsigned sig;
763 789
764 mcspi = spi_master_get_devdata(master); 790 mcspi = spi_master_get_devdata(master);
765 mcspi_dma = mcspi->dma_channels + spi->chip_select; 791 mcspi_dma = mcspi->dma_channels + spi->chip_select;
766 792
767 if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX", 793 init_completion(&mcspi_dma->dma_rx_completion);
768 omap2_mcspi_dma_rx_callback, spi, 794 init_completion(&mcspi_dma->dma_tx_completion);
769 &mcspi_dma->dma_rx_channel)) { 795
770 dev_err(&spi->dev, "no RX DMA channel for McSPI\n"); 796 dma_cap_zero(mask);
797 dma_cap_set(DMA_SLAVE, mask);
798 sig = mcspi_dma->dma_rx_sync_dev;
799 mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
800 if (!mcspi_dma->dma_rx) {
801 dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
771 return -EAGAIN; 802 return -EAGAIN;
772 } 803 }
773 804
774 if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX", 805 sig = mcspi_dma->dma_tx_sync_dev;
775 omap2_mcspi_dma_tx_callback, spi, 806 mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
776 &mcspi_dma->dma_tx_channel)) { 807 if (!mcspi_dma->dma_tx) {
777 omap_free_dma(mcspi_dma->dma_rx_channel); 808 dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n");
778 mcspi_dma->dma_rx_channel = -1; 809 dma_release_channel(mcspi_dma->dma_rx);
779 dev_err(&spi->dev, "no TX DMA channel for McSPI\n"); 810 mcspi_dma->dma_rx = NULL;
780 return -EAGAIN; 811 return -EAGAIN;
781 } 812 }
782 813
783 init_completion(&mcspi_dma->dma_rx_completion);
784 init_completion(&mcspi_dma->dma_tx_completion);
785
786 return 0; 814 return 0;
787} 815}
788 816
@@ -814,8 +842,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
814 list_add_tail(&cs->node, &ctx->cs); 842 list_add_tail(&cs->node, &ctx->cs);
815 } 843 }
816 844
817 if (mcspi_dma->dma_rx_channel == -1 845 if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
818 || mcspi_dma->dma_tx_channel == -1) {
819 ret = omap2_mcspi_request_dma(spi); 846 ret = omap2_mcspi_request_dma(spi);
820 if (ret < 0) 847 if (ret < 0)
821 return ret; 848 return ret;
@@ -850,13 +877,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
850 if (spi->chip_select < spi->master->num_chipselect) { 877 if (spi->chip_select < spi->master->num_chipselect) {
851 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 878 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
852 879
853 if (mcspi_dma->dma_rx_channel != -1) { 880 if (mcspi_dma->dma_rx) {
854 omap_free_dma(mcspi_dma->dma_rx_channel); 881 dma_release_channel(mcspi_dma->dma_rx);
855 mcspi_dma->dma_rx_channel = -1; 882 mcspi_dma->dma_rx = NULL;
856 } 883 }
857 if (mcspi_dma->dma_tx_channel != -1) { 884 if (mcspi_dma->dma_tx) {
858 omap_free_dma(mcspi_dma->dma_tx_channel); 885 dma_release_channel(mcspi_dma->dma_tx);
859 mcspi_dma->dma_tx_channel = -1; 886 mcspi_dma->dma_tx = NULL;
860 } 887 }
861 } 888 }
862} 889}
@@ -1176,7 +1203,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
1176 break; 1203 break;
1177 } 1204 }
1178 1205
1179 mcspi->dma_channels[i].dma_rx_channel = -1;
1180 mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start; 1206 mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
1181 sprintf(dma_ch_name, "tx%d", i); 1207 sprintf(dma_ch_name, "tx%d", i);
1182 dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, 1208 dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
@@ -1187,7 +1213,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
1187 break; 1213 break;
1188 } 1214 }
1189 1215
1190 mcspi->dma_channels[i].dma_tx_channel = -1;
1191 mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start; 1216 mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
1192 } 1217 }
1193 1218
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 02549017212a..2a5f64a11b77 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -21,8 +21,9 @@
21#include <linux/dmaengine.h> 21#include <linux/dmaengine.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23 23
24struct pl08x_lli;
25struct pl08x_driver_data; 24struct pl08x_driver_data;
25struct pl08x_phy_chan;
26struct pl08x_txd;
26 27
27/* Bitmasks for selecting AHB ports for DMA transfers */ 28/* Bitmasks for selecting AHB ports for DMA transfers */
28enum { 29enum {
@@ -46,170 +47,29 @@ enum {
46 * devices with static assignments 47 * devices with static assignments
47 * @muxval: a number usually used to poke into some mux regiser to 48 * @muxval: a number usually used to poke into some mux regiser to
48 * mux in the signal to this channel 49 * mux in the signal to this channel
49 * @cctl_opt: default options for the channel control register 50 * @cctl_memcpy: options for the channel control register for memcpy
51 * *** not used for slave channels ***
50 * @addr: source/target address in physical memory for this DMA channel, 52 * @addr: source/target address in physical memory for this DMA channel,
51 * can be the address of a FIFO register for burst requests for example. 53 * can be the address of a FIFO register for burst requests for example.
52 * This can be left undefined if the PrimeCell API is used for configuring 54 * This can be left undefined if the PrimeCell API is used for configuring
53 * this. 55 * this.
54 * @circular_buffer: whether the buffer passed in is circular and
55 * shall simply be looped round round (like a record baby round
56 * round round round)
57 * @single: the device connected to this channel will request single DMA 56 * @single: the device connected to this channel will request single DMA
58 * transfers, not bursts. (Bursts are default.) 57 * transfers, not bursts. (Bursts are default.)
59 * @periph_buses: the device connected to this channel is accessible via 58 * @periph_buses: the device connected to this channel is accessible via
60 * these buses (use PL08X_AHB1 | PL08X_AHB2). 59 * these buses (use PL08X_AHB1 | PL08X_AHB2).
61 */ 60 */
62struct pl08x_channel_data { 61struct pl08x_channel_data {
63 char *bus_id; 62 const char *bus_id;
64 int min_signal; 63 int min_signal;
65 int max_signal; 64 int max_signal;
66 u32 muxval; 65 u32 muxval;
67 u32 cctl; 66 u32 cctl_memcpy;
68 dma_addr_t addr; 67 dma_addr_t addr;
69 bool circular_buffer;
70 bool single; 68 bool single;
71 u8 periph_buses; 69 u8 periph_buses;
72}; 70};
73 71
74/** 72/**
75 * Struct pl08x_bus_data - information of source or destination
76 * busses for a transfer
77 * @addr: current address
78 * @maxwidth: the maximum width of a transfer on this bus
79 * @buswidth: the width of this bus in bytes: 1, 2 or 4
80 */
81struct pl08x_bus_data {
82 dma_addr_t addr;
83 u8 maxwidth;
84 u8 buswidth;
85};
86
87/**
88 * struct pl08x_phy_chan - holder for the physical channels
89 * @id: physical index to this channel
90 * @lock: a lock to use when altering an instance of this struct
91 * @signal: the physical signal (aka channel) serving this physical channel
92 * right now
93 * @serving: the virtual channel currently being served by this physical
94 * channel
95 * @locked: channel unavailable for the system, e.g. dedicated to secure
96 * world
97 */
98struct pl08x_phy_chan {
99 unsigned int id;
100 void __iomem *base;
101 spinlock_t lock;
102 int signal;
103 struct pl08x_dma_chan *serving;
104 bool locked;
105};
106
107/**
108 * struct pl08x_sg - structure containing data per sg
109 * @src_addr: src address of sg
110 * @dst_addr: dst address of sg
111 * @len: transfer len in bytes
112 * @node: node for txd's dsg_list
113 */
114struct pl08x_sg {
115 dma_addr_t src_addr;
116 dma_addr_t dst_addr;
117 size_t len;
118 struct list_head node;
119};
120
121/**
122 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
123 * @tx: async tx descriptor
124 * @node: node for txd list for channels
125 * @dsg_list: list of children sg's
126 * @direction: direction of transfer
127 * @llis_bus: DMA memory address (physical) start for the LLIs
128 * @llis_va: virtual memory address start for the LLIs
129 * @cctl: control reg values for current txd
130 * @ccfg: config reg values for current txd
131 */
132struct pl08x_txd {
133 struct dma_async_tx_descriptor tx;
134 struct list_head node;
135 struct list_head dsg_list;
136 enum dma_transfer_direction direction;
137 dma_addr_t llis_bus;
138 struct pl08x_lli *llis_va;
139 /* Default cctl value for LLIs */
140 u32 cctl;
141 /*
142 * Settings to be put into the physical channel when we
143 * trigger this txd. Other registers are in llis_va[0].
144 */
145 u32 ccfg;
146};
147
148/**
149 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
150 * states
151 * @PL08X_CHAN_IDLE: the channel is idle
152 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
153 * channel and is running a transfer on it
154 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
155 * channel, but the transfer is currently paused
156 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
157 * channel to become available (only pertains to memcpy channels)
158 */
159enum pl08x_dma_chan_state {
160 PL08X_CHAN_IDLE,
161 PL08X_CHAN_RUNNING,
162 PL08X_CHAN_PAUSED,
163 PL08X_CHAN_WAITING,
164};
165
166/**
167 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
168 * @chan: wrappped abstract channel
169 * @phychan: the physical channel utilized by this channel, if there is one
170 * @phychan_hold: if non-zero, hold on to the physical channel even if we
171 * have no pending entries
172 * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
173 * @name: name of channel
174 * @cd: channel platform data
175 * @runtime_addr: address for RX/TX according to the runtime config
176 * @runtime_direction: current direction of this channel according to
177 * runtime config
178 * @pend_list: queued transactions pending on this channel
179 * @at: active transaction on this channel
180 * @lock: a lock for this channel data
181 * @host: a pointer to the host (internal use)
182 * @state: whether the channel is idle, paused, running etc
183 * @slave: whether this channel is a device (slave) or for memcpy
184 * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
185 * channels. Fill with 'true' if peripheral should be flow controller. Direction
186 * will be selected at Runtime.
187 * @waiting: a TX descriptor on this channel which is waiting for a physical
188 * channel to become available
189 */
190struct pl08x_dma_chan {
191 struct dma_chan chan;
192 struct pl08x_phy_chan *phychan;
193 int phychan_hold;
194 struct tasklet_struct tasklet;
195 char *name;
196 const struct pl08x_channel_data *cd;
197 dma_addr_t src_addr;
198 dma_addr_t dst_addr;
199 u32 src_cctl;
200 u32 dst_cctl;
201 enum dma_transfer_direction runtime_direction;
202 struct list_head pend_list;
203 struct pl08x_txd *at;
204 spinlock_t lock;
205 struct pl08x_driver_data *host;
206 enum pl08x_dma_chan_state state;
207 bool slave;
208 bool device_fc;
209 struct pl08x_txd *waiting;
210};
211
212/**
213 * struct pl08x_platform_data - the platform configuration for the PL08x 73 * struct pl08x_platform_data - the platform configuration for the PL08x
214 * PrimeCells. 74 * PrimeCells.
215 * @slave_channels: the channels defined for the different devices on the 75 * @slave_channels: the channels defined for the different devices on the
@@ -229,8 +89,8 @@ struct pl08x_platform_data {
229 const struct pl08x_channel_data *slave_channels; 89 const struct pl08x_channel_data *slave_channels;
230 unsigned int num_slave_channels; 90 unsigned int num_slave_channels;
231 struct pl08x_channel_data memcpy_channel; 91 struct pl08x_channel_data memcpy_channel;
232 int (*get_signal)(struct pl08x_dma_chan *); 92 int (*get_signal)(const struct pl08x_channel_data *);
233 void (*put_signal)(struct pl08x_dma_chan *); 93 void (*put_signal)(const struct pl08x_channel_data *, int);
234 u8 lli_buses; 94 u8 lli_buses;
235 u8 mem_buses; 95 u8 mem_buses;
236}; 96};
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
new file mode 100644
index 000000000000..eb475a8ea25b
--- /dev/null
+++ b/include/linux/omap-dma.h
@@ -0,0 +1,22 @@
1/*
2 * OMAP DMA Engine support
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef __LINUX_OMAP_DMA_H
9#define __LINUX_OMAP_DMA_H
10
11struct dma_chan;
12
13#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE)
14bool omap_dma_filter_fn(struct dma_chan *, void *);
15#else
16static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
17{
18 return false;
19}
20#endif
21
22#endif