diff options
81 files changed, 3706 insertions, 4229 deletions
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index 3bb1d7589bd9..283fa1d804f4 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig | |||
@@ -24,9 +24,6 @@ config ARM_VIC_NR | |||
24 | config ICST | 24 | config ICST |
25 | bool | 25 | bool |
26 | 26 | ||
27 | config PL330 | ||
28 | bool | ||
29 | |||
30 | config SA1111 | 27 | config SA1111 |
31 | bool | 28 | bool |
32 | select DMABOUNCE if !ARCH_PXA | 29 | select DMABOUNCE if !ARCH_PXA |
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 69feafe7286c..215816f1775f 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile | |||
@@ -5,7 +5,6 @@ | |||
5 | obj-$(CONFIG_ARM_GIC) += gic.o | 5 | obj-$(CONFIG_ARM_GIC) += gic.o |
6 | obj-$(CONFIG_ARM_VIC) += vic.o | 6 | obj-$(CONFIG_ARM_VIC) += vic.o |
7 | obj-$(CONFIG_ICST) += icst.o | 7 | obj-$(CONFIG_ICST) += icst.o |
8 | obj-$(CONFIG_PL330) += pl330.o | ||
9 | obj-$(CONFIG_SA1111) += sa1111.o | 8 | obj-$(CONFIG_SA1111) += sa1111.o |
10 | obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o | 9 | obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o |
11 | obj-$(CONFIG_DMABOUNCE) += dmabounce.o | 10 | obj-$(CONFIG_DMABOUNCE) += dmabounce.o |
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c deleted file mode 100644 index ff3ad2244824..000000000000 --- a/arch/arm/common/pl330.c +++ /dev/null | |||
@@ -1,1960 +0,0 @@ | |||
1 | /* linux/arch/arm/common/pl330.c | ||
2 | * | ||
3 | * Copyright (C) 2010 Samsung Electronics Co Ltd. | ||
4 | * Jaswinder Singh <jassi.brar@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/string.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/dma-mapping.h> | ||
30 | |||
31 | #include <asm/hardware/pl330.h> | ||
32 | |||
33 | /* Register and Bit field Definitions */ | ||
34 | #define DS 0x0 | ||
35 | #define DS_ST_STOP 0x0 | ||
36 | #define DS_ST_EXEC 0x1 | ||
37 | #define DS_ST_CMISS 0x2 | ||
38 | #define DS_ST_UPDTPC 0x3 | ||
39 | #define DS_ST_WFE 0x4 | ||
40 | #define DS_ST_ATBRR 0x5 | ||
41 | #define DS_ST_QBUSY 0x6 | ||
42 | #define DS_ST_WFP 0x7 | ||
43 | #define DS_ST_KILL 0x8 | ||
44 | #define DS_ST_CMPLT 0x9 | ||
45 | #define DS_ST_FLTCMP 0xe | ||
46 | #define DS_ST_FAULT 0xf | ||
47 | |||
48 | #define DPC 0x4 | ||
49 | #define INTEN 0x20 | ||
50 | #define ES 0x24 | ||
51 | #define INTSTATUS 0x28 | ||
52 | #define INTCLR 0x2c | ||
53 | #define FSM 0x30 | ||
54 | #define FSC 0x34 | ||
55 | #define FTM 0x38 | ||
56 | |||
57 | #define _FTC 0x40 | ||
58 | #define FTC(n) (_FTC + (n)*0x4) | ||
59 | |||
60 | #define _CS 0x100 | ||
61 | #define CS(n) (_CS + (n)*0x8) | ||
62 | #define CS_CNS (1 << 21) | ||
63 | |||
64 | #define _CPC 0x104 | ||
65 | #define CPC(n) (_CPC + (n)*0x8) | ||
66 | |||
67 | #define _SA 0x400 | ||
68 | #define SA(n) (_SA + (n)*0x20) | ||
69 | |||
70 | #define _DA 0x404 | ||
71 | #define DA(n) (_DA + (n)*0x20) | ||
72 | |||
73 | #define _CC 0x408 | ||
74 | #define CC(n) (_CC + (n)*0x20) | ||
75 | |||
76 | #define CC_SRCINC (1 << 0) | ||
77 | #define CC_DSTINC (1 << 14) | ||
78 | #define CC_SRCPRI (1 << 8) | ||
79 | #define CC_DSTPRI (1 << 22) | ||
80 | #define CC_SRCNS (1 << 9) | ||
81 | #define CC_DSTNS (1 << 23) | ||
82 | #define CC_SRCIA (1 << 10) | ||
83 | #define CC_DSTIA (1 << 24) | ||
84 | #define CC_SRCBRSTLEN_SHFT 4 | ||
85 | #define CC_DSTBRSTLEN_SHFT 18 | ||
86 | #define CC_SRCBRSTSIZE_SHFT 1 | ||
87 | #define CC_DSTBRSTSIZE_SHFT 15 | ||
88 | #define CC_SRCCCTRL_SHFT 11 | ||
89 | #define CC_SRCCCTRL_MASK 0x7 | ||
90 | #define CC_DSTCCTRL_SHFT 25 | ||
91 | #define CC_DRCCCTRL_MASK 0x7 | ||
92 | #define CC_SWAP_SHFT 28 | ||
93 | |||
94 | #define _LC0 0x40c | ||
95 | #define LC0(n) (_LC0 + (n)*0x20) | ||
96 | |||
97 | #define _LC1 0x410 | ||
98 | #define LC1(n) (_LC1 + (n)*0x20) | ||
99 | |||
100 | #define DBGSTATUS 0xd00 | ||
101 | #define DBG_BUSY (1 << 0) | ||
102 | |||
103 | #define DBGCMD 0xd04 | ||
104 | #define DBGINST0 0xd08 | ||
105 | #define DBGINST1 0xd0c | ||
106 | |||
107 | #define CR0 0xe00 | ||
108 | #define CR1 0xe04 | ||
109 | #define CR2 0xe08 | ||
110 | #define CR3 0xe0c | ||
111 | #define CR4 0xe10 | ||
112 | #define CRD 0xe14 | ||
113 | |||
114 | #define PERIPH_ID 0xfe0 | ||
115 | #define PCELL_ID 0xff0 | ||
116 | |||
117 | #define CR0_PERIPH_REQ_SET (1 << 0) | ||
118 | #define CR0_BOOT_EN_SET (1 << 1) | ||
119 | #define CR0_BOOT_MAN_NS (1 << 2) | ||
120 | #define CR0_NUM_CHANS_SHIFT 4 | ||
121 | #define CR0_NUM_CHANS_MASK 0x7 | ||
122 | #define CR0_NUM_PERIPH_SHIFT 12 | ||
123 | #define CR0_NUM_PERIPH_MASK 0x1f | ||
124 | #define CR0_NUM_EVENTS_SHIFT 17 | ||
125 | #define CR0_NUM_EVENTS_MASK 0x1f | ||
126 | |||
127 | #define CR1_ICACHE_LEN_SHIFT 0 | ||
128 | #define CR1_ICACHE_LEN_MASK 0x7 | ||
129 | #define CR1_NUM_ICACHELINES_SHIFT 4 | ||
130 | #define CR1_NUM_ICACHELINES_MASK 0xf | ||
131 | |||
132 | #define CRD_DATA_WIDTH_SHIFT 0 | ||
133 | #define CRD_DATA_WIDTH_MASK 0x7 | ||
134 | #define CRD_WR_CAP_SHIFT 4 | ||
135 | #define CRD_WR_CAP_MASK 0x7 | ||
136 | #define CRD_WR_Q_DEP_SHIFT 8 | ||
137 | #define CRD_WR_Q_DEP_MASK 0xf | ||
138 | #define CRD_RD_CAP_SHIFT 12 | ||
139 | #define CRD_RD_CAP_MASK 0x7 | ||
140 | #define CRD_RD_Q_DEP_SHIFT 16 | ||
141 | #define CRD_RD_Q_DEP_MASK 0xf | ||
142 | #define CRD_DATA_BUFF_SHIFT 20 | ||
143 | #define CRD_DATA_BUFF_MASK 0x3ff | ||
144 | |||
145 | #define PART 0x330 | ||
146 | #define DESIGNER 0x41 | ||
147 | #define REVISION 0x0 | ||
148 | #define INTEG_CFG 0x0 | ||
149 | #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12)) | ||
150 | |||
151 | #define PCELL_ID_VAL 0xb105f00d | ||
152 | |||
153 | #define PL330_STATE_STOPPED (1 << 0) | ||
154 | #define PL330_STATE_EXECUTING (1 << 1) | ||
155 | #define PL330_STATE_WFE (1 << 2) | ||
156 | #define PL330_STATE_FAULTING (1 << 3) | ||
157 | #define PL330_STATE_COMPLETING (1 << 4) | ||
158 | #define PL330_STATE_WFP (1 << 5) | ||
159 | #define PL330_STATE_KILLING (1 << 6) | ||
160 | #define PL330_STATE_FAULT_COMPLETING (1 << 7) | ||
161 | #define PL330_STATE_CACHEMISS (1 << 8) | ||
162 | #define PL330_STATE_UPDTPC (1 << 9) | ||
163 | #define PL330_STATE_ATBARRIER (1 << 10) | ||
164 | #define PL330_STATE_QUEUEBUSY (1 << 11) | ||
165 | #define PL330_STATE_INVALID (1 << 15) | ||
166 | |||
167 | #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \ | ||
168 | | PL330_STATE_WFE | PL330_STATE_FAULTING) | ||
169 | |||
170 | #define CMD_DMAADDH 0x54 | ||
171 | #define CMD_DMAEND 0x00 | ||
172 | #define CMD_DMAFLUSHP 0x35 | ||
173 | #define CMD_DMAGO 0xa0 | ||
174 | #define CMD_DMALD 0x04 | ||
175 | #define CMD_DMALDP 0x25 | ||
176 | #define CMD_DMALP 0x20 | ||
177 | #define CMD_DMALPEND 0x28 | ||
178 | #define CMD_DMAKILL 0x01 | ||
179 | #define CMD_DMAMOV 0xbc | ||
180 | #define CMD_DMANOP 0x18 | ||
181 | #define CMD_DMARMB 0x12 | ||
182 | #define CMD_DMASEV 0x34 | ||
183 | #define CMD_DMAST 0x08 | ||
184 | #define CMD_DMASTP 0x29 | ||
185 | #define CMD_DMASTZ 0x0c | ||
186 | #define CMD_DMAWFE 0x36 | ||
187 | #define CMD_DMAWFP 0x30 | ||
188 | #define CMD_DMAWMB 0x13 | ||
189 | |||
190 | #define SZ_DMAADDH 3 | ||
191 | #define SZ_DMAEND 1 | ||
192 | #define SZ_DMAFLUSHP 2 | ||
193 | #define SZ_DMALD 1 | ||
194 | #define SZ_DMALDP 2 | ||
195 | #define SZ_DMALP 2 | ||
196 | #define SZ_DMALPEND 2 | ||
197 | #define SZ_DMAKILL 1 | ||
198 | #define SZ_DMAMOV 6 | ||
199 | #define SZ_DMANOP 1 | ||
200 | #define SZ_DMARMB 1 | ||
201 | #define SZ_DMASEV 2 | ||
202 | #define SZ_DMAST 1 | ||
203 | #define SZ_DMASTP 2 | ||
204 | #define SZ_DMASTZ 1 | ||
205 | #define SZ_DMAWFE 2 | ||
206 | #define SZ_DMAWFP 2 | ||
207 | #define SZ_DMAWMB 1 | ||
208 | #define SZ_DMAGO 6 | ||
209 | |||
210 | #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1) | ||
211 | #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7)) | ||
212 | |||
213 | #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr)) | ||
214 | #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr)) | ||
215 | |||
216 | /* | ||
217 | * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req | ||
218 | * at 1byte/burst for P<->M and M<->M respectively. | ||
219 | * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req | ||
220 | * should be enough for P<->M and M<->M respectively. | ||
221 | */ | ||
222 | #define MCODE_BUFF_PER_REQ 256 | ||
223 | |||
224 | /* If the _pl330_req is available to the client */ | ||
225 | #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND) | ||
226 | |||
227 | /* Use this _only_ to wait on transient states */ | ||
228 | #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax(); | ||
229 | |||
230 | #ifdef PL330_DEBUG_MCGEN | ||
231 | static unsigned cmd_line; | ||
232 | #define PL330_DBGCMD_DUMP(off, x...) do { \ | ||
233 | printk("%x:", cmd_line); \ | ||
234 | printk(x); \ | ||
235 | cmd_line += off; \ | ||
236 | } while (0) | ||
237 | #define PL330_DBGMC_START(addr) (cmd_line = addr) | ||
238 | #else | ||
239 | #define PL330_DBGCMD_DUMP(off, x...) do {} while (0) | ||
240 | #define PL330_DBGMC_START(addr) do {} while (0) | ||
241 | #endif | ||
242 | |||
243 | struct _xfer_spec { | ||
244 | u32 ccr; | ||
245 | struct pl330_req *r; | ||
246 | struct pl330_xfer *x; | ||
247 | }; | ||
248 | |||
249 | enum dmamov_dst { | ||
250 | SAR = 0, | ||
251 | CCR, | ||
252 | DAR, | ||
253 | }; | ||
254 | |||
255 | enum pl330_dst { | ||
256 | SRC = 0, | ||
257 | DST, | ||
258 | }; | ||
259 | |||
260 | enum pl330_cond { | ||
261 | SINGLE, | ||
262 | BURST, | ||
263 | ALWAYS, | ||
264 | }; | ||
265 | |||
266 | struct _pl330_req { | ||
267 | u32 mc_bus; | ||
268 | void *mc_cpu; | ||
269 | /* Number of bytes taken to setup MC for the req */ | ||
270 | u32 mc_len; | ||
271 | struct pl330_req *r; | ||
272 | /* Hook to attach to DMAC's list of reqs with due callback */ | ||
273 | struct list_head rqd; | ||
274 | }; | ||
275 | |||
276 | /* ToBeDone for tasklet */ | ||
277 | struct _pl330_tbd { | ||
278 | bool reset_dmac; | ||
279 | bool reset_mngr; | ||
280 | u8 reset_chan; | ||
281 | }; | ||
282 | |||
283 | /* A DMAC Thread */ | ||
284 | struct pl330_thread { | ||
285 | u8 id; | ||
286 | int ev; | ||
287 | /* If the channel is not yet acquired by any client */ | ||
288 | bool free; | ||
289 | /* Parent DMAC */ | ||
290 | struct pl330_dmac *dmac; | ||
291 | /* Only two at a time */ | ||
292 | struct _pl330_req req[2]; | ||
293 | /* Index of the last enqueued request */ | ||
294 | unsigned lstenq; | ||
295 | /* Index of the last submitted request or -1 if the DMA is stopped */ | ||
296 | int req_running; | ||
297 | }; | ||
298 | |||
299 | enum pl330_dmac_state { | ||
300 | UNINIT, | ||
301 | INIT, | ||
302 | DYING, | ||
303 | }; | ||
304 | |||
305 | /* A DMAC */ | ||
306 | struct pl330_dmac { | ||
307 | spinlock_t lock; | ||
308 | /* Holds list of reqs with due callbacks */ | ||
309 | struct list_head req_done; | ||
310 | /* Pointer to platform specific stuff */ | ||
311 | struct pl330_info *pinfo; | ||
312 | /* Maximum possible events/irqs */ | ||
313 | int events[32]; | ||
314 | /* BUS address of MicroCode buffer */ | ||
315 | u32 mcode_bus; | ||
316 | /* CPU address of MicroCode buffer */ | ||
317 | void *mcode_cpu; | ||
318 | /* List of all Channel threads */ | ||
319 | struct pl330_thread *channels; | ||
320 | /* Pointer to the MANAGER thread */ | ||
321 | struct pl330_thread *manager; | ||
322 | /* To handle bad news in interrupt */ | ||
323 | struct tasklet_struct tasks; | ||
324 | struct _pl330_tbd dmac_tbd; | ||
325 | /* State of DMAC operation */ | ||
326 | enum pl330_dmac_state state; | ||
327 | }; | ||
328 | |||
329 | static inline void _callback(struct pl330_req *r, enum pl330_op_err err) | ||
330 | { | ||
331 | if (r && r->xfer_cb) | ||
332 | r->xfer_cb(r->token, err); | ||
333 | } | ||
334 | |||
335 | static inline bool _queue_empty(struct pl330_thread *thrd) | ||
336 | { | ||
337 | return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1])) | ||
338 | ? true : false; | ||
339 | } | ||
340 | |||
341 | static inline bool _queue_full(struct pl330_thread *thrd) | ||
342 | { | ||
343 | return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1])) | ||
344 | ? false : true; | ||
345 | } | ||
346 | |||
347 | static inline bool is_manager(struct pl330_thread *thrd) | ||
348 | { | ||
349 | struct pl330_dmac *pl330 = thrd->dmac; | ||
350 | |||
351 | /* MANAGER is indexed at the end */ | ||
352 | if (thrd->id == pl330->pinfo->pcfg.num_chan) | ||
353 | return true; | ||
354 | else | ||
355 | return false; | ||
356 | } | ||
357 | |||
358 | /* If manager of the thread is in Non-Secure mode */ | ||
359 | static inline bool _manager_ns(struct pl330_thread *thrd) | ||
360 | { | ||
361 | struct pl330_dmac *pl330 = thrd->dmac; | ||
362 | |||
363 | return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false; | ||
364 | } | ||
365 | |||
366 | static inline u32 get_id(struct pl330_info *pi, u32 off) | ||
367 | { | ||
368 | void __iomem *regs = pi->base; | ||
369 | u32 id = 0; | ||
370 | |||
371 | id |= (readb(regs + off + 0x0) << 0); | ||
372 | id |= (readb(regs + off + 0x4) << 8); | ||
373 | id |= (readb(regs + off + 0x8) << 16); | ||
374 | id |= (readb(regs + off + 0xc) << 24); | ||
375 | |||
376 | return id; | ||
377 | } | ||
378 | |||
379 | static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[], | ||
380 | enum pl330_dst da, u16 val) | ||
381 | { | ||
382 | if (dry_run) | ||
383 | return SZ_DMAADDH; | ||
384 | |||
385 | buf[0] = CMD_DMAADDH; | ||
386 | buf[0] |= (da << 1); | ||
387 | *((u16 *)&buf[1]) = val; | ||
388 | |||
389 | PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n", | ||
390 | da == 1 ? "DA" : "SA", val); | ||
391 | |||
392 | return SZ_DMAADDH; | ||
393 | } | ||
394 | |||
395 | static inline u32 _emit_END(unsigned dry_run, u8 buf[]) | ||
396 | { | ||
397 | if (dry_run) | ||
398 | return SZ_DMAEND; | ||
399 | |||
400 | buf[0] = CMD_DMAEND; | ||
401 | |||
402 | PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n"); | ||
403 | |||
404 | return SZ_DMAEND; | ||
405 | } | ||
406 | |||
407 | static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri) | ||
408 | { | ||
409 | if (dry_run) | ||
410 | return SZ_DMAFLUSHP; | ||
411 | |||
412 | buf[0] = CMD_DMAFLUSHP; | ||
413 | |||
414 | peri &= 0x1f; | ||
415 | peri <<= 3; | ||
416 | buf[1] = peri; | ||
417 | |||
418 | PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3); | ||
419 | |||
420 | return SZ_DMAFLUSHP; | ||
421 | } | ||
422 | |||
423 | static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond) | ||
424 | { | ||
425 | if (dry_run) | ||
426 | return SZ_DMALD; | ||
427 | |||
428 | buf[0] = CMD_DMALD; | ||
429 | |||
430 | if (cond == SINGLE) | ||
431 | buf[0] |= (0 << 1) | (1 << 0); | ||
432 | else if (cond == BURST) | ||
433 | buf[0] |= (1 << 1) | (1 << 0); | ||
434 | |||
435 | PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n", | ||
436 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); | ||
437 | |||
438 | return SZ_DMALD; | ||
439 | } | ||
440 | |||
441 | static inline u32 _emit_LDP(unsigned dry_run, u8 buf[], | ||
442 | enum pl330_cond cond, u8 peri) | ||
443 | { | ||
444 | if (dry_run) | ||
445 | return SZ_DMALDP; | ||
446 | |||
447 | buf[0] = CMD_DMALDP; | ||
448 | |||
449 | if (cond == BURST) | ||
450 | buf[0] |= (1 << 1); | ||
451 | |||
452 | peri &= 0x1f; | ||
453 | peri <<= 3; | ||
454 | buf[1] = peri; | ||
455 | |||
456 | PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n", | ||
457 | cond == SINGLE ? 'S' : 'B', peri >> 3); | ||
458 | |||
459 | return SZ_DMALDP; | ||
460 | } | ||
461 | |||
462 | static inline u32 _emit_LP(unsigned dry_run, u8 buf[], | ||
463 | unsigned loop, u8 cnt) | ||
464 | { | ||
465 | if (dry_run) | ||
466 | return SZ_DMALP; | ||
467 | |||
468 | buf[0] = CMD_DMALP; | ||
469 | |||
470 | if (loop) | ||
471 | buf[0] |= (1 << 1); | ||
472 | |||
473 | cnt--; /* DMAC increments by 1 internally */ | ||
474 | buf[1] = cnt; | ||
475 | |||
476 | PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt); | ||
477 | |||
478 | return SZ_DMALP; | ||
479 | } | ||
480 | |||
481 | struct _arg_LPEND { | ||
482 | enum pl330_cond cond; | ||
483 | bool forever; | ||
484 | unsigned loop; | ||
485 | u8 bjump; | ||
486 | }; | ||
487 | |||
488 | static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[], | ||
489 | const struct _arg_LPEND *arg) | ||
490 | { | ||
491 | enum pl330_cond cond = arg->cond; | ||
492 | bool forever = arg->forever; | ||
493 | unsigned loop = arg->loop; | ||
494 | u8 bjump = arg->bjump; | ||
495 | |||
496 | if (dry_run) | ||
497 | return SZ_DMALPEND; | ||
498 | |||
499 | buf[0] = CMD_DMALPEND; | ||
500 | |||
501 | if (loop) | ||
502 | buf[0] |= (1 << 2); | ||
503 | |||
504 | if (!forever) | ||
505 | buf[0] |= (1 << 4); | ||
506 | |||
507 | if (cond == SINGLE) | ||
508 | buf[0] |= (0 << 1) | (1 << 0); | ||
509 | else if (cond == BURST) | ||
510 | buf[0] |= (1 << 1) | (1 << 0); | ||
511 | |||
512 | buf[1] = bjump; | ||
513 | |||
514 | PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n", | ||
515 | forever ? "FE" : "END", | ||
516 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'), | ||
517 | loop ? '1' : '0', | ||
518 | bjump); | ||
519 | |||
520 | return SZ_DMALPEND; | ||
521 | } | ||
522 | |||
523 | static inline u32 _emit_KILL(unsigned dry_run, u8 buf[]) | ||
524 | { | ||
525 | if (dry_run) | ||
526 | return SZ_DMAKILL; | ||
527 | |||
528 | buf[0] = CMD_DMAKILL; | ||
529 | |||
530 | return SZ_DMAKILL; | ||
531 | } | ||
532 | |||
533 | static inline u32 _emit_MOV(unsigned dry_run, u8 buf[], | ||
534 | enum dmamov_dst dst, u32 val) | ||
535 | { | ||
536 | if (dry_run) | ||
537 | return SZ_DMAMOV; | ||
538 | |||
539 | buf[0] = CMD_DMAMOV; | ||
540 | buf[1] = dst; | ||
541 | *((u32 *)&buf[2]) = val; | ||
542 | |||
543 | PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n", | ||
544 | dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val); | ||
545 | |||
546 | return SZ_DMAMOV; | ||
547 | } | ||
548 | |||
549 | static inline u32 _emit_NOP(unsigned dry_run, u8 buf[]) | ||
550 | { | ||
551 | if (dry_run) | ||
552 | return SZ_DMANOP; | ||
553 | |||
554 | buf[0] = CMD_DMANOP; | ||
555 | |||
556 | PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n"); | ||
557 | |||
558 | return SZ_DMANOP; | ||
559 | } | ||
560 | |||
561 | static inline u32 _emit_RMB(unsigned dry_run, u8 buf[]) | ||
562 | { | ||
563 | if (dry_run) | ||
564 | return SZ_DMARMB; | ||
565 | |||
566 | buf[0] = CMD_DMARMB; | ||
567 | |||
568 | PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n"); | ||
569 | |||
570 | return SZ_DMARMB; | ||
571 | } | ||
572 | |||
573 | static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev) | ||
574 | { | ||
575 | if (dry_run) | ||
576 | return SZ_DMASEV; | ||
577 | |||
578 | buf[0] = CMD_DMASEV; | ||
579 | |||
580 | ev &= 0x1f; | ||
581 | ev <<= 3; | ||
582 | buf[1] = ev; | ||
583 | |||
584 | PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3); | ||
585 | |||
586 | return SZ_DMASEV; | ||
587 | } | ||
588 | |||
589 | static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond) | ||
590 | { | ||
591 | if (dry_run) | ||
592 | return SZ_DMAST; | ||
593 | |||
594 | buf[0] = CMD_DMAST; | ||
595 | |||
596 | if (cond == SINGLE) | ||
597 | buf[0] |= (0 << 1) | (1 << 0); | ||
598 | else if (cond == BURST) | ||
599 | buf[0] |= (1 << 1) | (1 << 0); | ||
600 | |||
601 | PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n", | ||
602 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); | ||
603 | |||
604 | return SZ_DMAST; | ||
605 | } | ||
606 | |||
607 | static inline u32 _emit_STP(unsigned dry_run, u8 buf[], | ||
608 | enum pl330_cond cond, u8 peri) | ||
609 | { | ||
610 | if (dry_run) | ||
611 | return SZ_DMASTP; | ||
612 | |||
613 | buf[0] = CMD_DMASTP; | ||
614 | |||
615 | if (cond == BURST) | ||
616 | buf[0] |= (1 << 1); | ||
617 | |||
618 | peri &= 0x1f; | ||
619 | peri <<= 3; | ||
620 | buf[1] = peri; | ||
621 | |||
622 | PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n", | ||
623 | cond == SINGLE ? 'S' : 'B', peri >> 3); | ||
624 | |||
625 | return SZ_DMASTP; | ||
626 | } | ||
627 | |||
628 | static inline u32 _emit_STZ(unsigned dry_run, u8 buf[]) | ||
629 | { | ||
630 | if (dry_run) | ||
631 | return SZ_DMASTZ; | ||
632 | |||
633 | buf[0] = CMD_DMASTZ; | ||
634 | |||
635 | PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n"); | ||
636 | |||
637 | return SZ_DMASTZ; | ||
638 | } | ||
639 | |||
640 | static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev, | ||
641 | unsigned invalidate) | ||
642 | { | ||
643 | if (dry_run) | ||
644 | return SZ_DMAWFE; | ||
645 | |||
646 | buf[0] = CMD_DMAWFE; | ||
647 | |||
648 | ev &= 0x1f; | ||
649 | ev <<= 3; | ||
650 | buf[1] = ev; | ||
651 | |||
652 | if (invalidate) | ||
653 | buf[1] |= (1 << 1); | ||
654 | |||
655 | PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n", | ||
656 | ev >> 3, invalidate ? ", I" : ""); | ||
657 | |||
658 | return SZ_DMAWFE; | ||
659 | } | ||
660 | |||
661 | static inline u32 _emit_WFP(unsigned dry_run, u8 buf[], | ||
662 | enum pl330_cond cond, u8 peri) | ||
663 | { | ||
664 | if (dry_run) | ||
665 | return SZ_DMAWFP; | ||
666 | |||
667 | buf[0] = CMD_DMAWFP; | ||
668 | |||
669 | if (cond == SINGLE) | ||
670 | buf[0] |= (0 << 1) | (0 << 0); | ||
671 | else if (cond == BURST) | ||
672 | buf[0] |= (1 << 1) | (0 << 0); | ||
673 | else | ||
674 | buf[0] |= (0 << 1) | (1 << 0); | ||
675 | |||
676 | peri &= 0x1f; | ||
677 | peri <<= 3; | ||
678 | buf[1] = peri; | ||
679 | |||
680 | PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n", | ||
681 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3); | ||
682 | |||
683 | return SZ_DMAWFP; | ||
684 | } | ||
685 | |||
686 | static inline u32 _emit_WMB(unsigned dry_run, u8 buf[]) | ||
687 | { | ||
688 | if (dry_run) | ||
689 | return SZ_DMAWMB; | ||
690 | |||
691 | buf[0] = CMD_DMAWMB; | ||
692 | |||
693 | PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n"); | ||
694 | |||
695 | return SZ_DMAWMB; | ||
696 | } | ||
697 | |||
698 | struct _arg_GO { | ||
699 | u8 chan; | ||
700 | u32 addr; | ||
701 | unsigned ns; | ||
702 | }; | ||
703 | |||
704 | static inline u32 _emit_GO(unsigned dry_run, u8 buf[], | ||
705 | const struct _arg_GO *arg) | ||
706 | { | ||
707 | u8 chan = arg->chan; | ||
708 | u32 addr = arg->addr; | ||
709 | unsigned ns = arg->ns; | ||
710 | |||
711 | if (dry_run) | ||
712 | return SZ_DMAGO; | ||
713 | |||
714 | buf[0] = CMD_DMAGO; | ||
715 | buf[0] |= (ns << 1); | ||
716 | |||
717 | buf[1] = chan & 0x7; | ||
718 | |||
719 | *((u32 *)&buf[2]) = addr; | ||
720 | |||
721 | return SZ_DMAGO; | ||
722 | } | ||
723 | |||
724 | #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) | ||
725 | |||
726 | /* Returns Time-Out */ | ||
727 | static bool _until_dmac_idle(struct pl330_thread *thrd) | ||
728 | { | ||
729 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
730 | unsigned long loops = msecs_to_loops(5); | ||
731 | |||
732 | do { | ||
733 | /* Until Manager is Idle */ | ||
734 | if (!(readl(regs + DBGSTATUS) & DBG_BUSY)) | ||
735 | break; | ||
736 | |||
737 | cpu_relax(); | ||
738 | } while (--loops); | ||
739 | |||
740 | if (!loops) | ||
741 | return true; | ||
742 | |||
743 | return false; | ||
744 | } | ||
745 | |||
746 | static inline void _execute_DBGINSN(struct pl330_thread *thrd, | ||
747 | u8 insn[], bool as_manager) | ||
748 | { | ||
749 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
750 | u32 val; | ||
751 | |||
752 | val = (insn[0] << 16) | (insn[1] << 24); | ||
753 | if (!as_manager) { | ||
754 | val |= (1 << 0); | ||
755 | val |= (thrd->id << 8); /* Channel Number */ | ||
756 | } | ||
757 | writel(val, regs + DBGINST0); | ||
758 | |||
759 | val = *((u32 *)&insn[2]); | ||
760 | writel(val, regs + DBGINST1); | ||
761 | |||
762 | /* If timed out due to halted state-machine */ | ||
763 | if (_until_dmac_idle(thrd)) { | ||
764 | dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n"); | ||
765 | return; | ||
766 | } | ||
767 | |||
768 | /* Get going */ | ||
769 | writel(0, regs + DBGCMD); | ||
770 | } | ||
771 | |||
772 | /* | ||
773 | * Mark a _pl330_req as free. | ||
774 | * We do it by writing DMAEND as the first instruction | ||
775 | * because no valid request is going to have DMAEND as | ||
776 | * its first instruction to execute. | ||
777 | */ | ||
778 | static void mark_free(struct pl330_thread *thrd, int idx) | ||
779 | { | ||
780 | struct _pl330_req *req = &thrd->req[idx]; | ||
781 | |||
782 | _emit_END(0, req->mc_cpu); | ||
783 | req->mc_len = 0; | ||
784 | |||
785 | thrd->req_running = -1; | ||
786 | } | ||
787 | |||
788 | static inline u32 _state(struct pl330_thread *thrd) | ||
789 | { | ||
790 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
791 | u32 val; | ||
792 | |||
793 | if (is_manager(thrd)) | ||
794 | val = readl(regs + DS) & 0xf; | ||
795 | else | ||
796 | val = readl(regs + CS(thrd->id)) & 0xf; | ||
797 | |||
798 | switch (val) { | ||
799 | case DS_ST_STOP: | ||
800 | return PL330_STATE_STOPPED; | ||
801 | case DS_ST_EXEC: | ||
802 | return PL330_STATE_EXECUTING; | ||
803 | case DS_ST_CMISS: | ||
804 | return PL330_STATE_CACHEMISS; | ||
805 | case DS_ST_UPDTPC: | ||
806 | return PL330_STATE_UPDTPC; | ||
807 | case DS_ST_WFE: | ||
808 | return PL330_STATE_WFE; | ||
809 | case DS_ST_FAULT: | ||
810 | return PL330_STATE_FAULTING; | ||
811 | case DS_ST_ATBRR: | ||
812 | if (is_manager(thrd)) | ||
813 | return PL330_STATE_INVALID; | ||
814 | else | ||
815 | return PL330_STATE_ATBARRIER; | ||
816 | case DS_ST_QBUSY: | ||
817 | if (is_manager(thrd)) | ||
818 | return PL330_STATE_INVALID; | ||
819 | else | ||
820 | return PL330_STATE_QUEUEBUSY; | ||
821 | case DS_ST_WFP: | ||
822 | if (is_manager(thrd)) | ||
823 | return PL330_STATE_INVALID; | ||
824 | else | ||
825 | return PL330_STATE_WFP; | ||
826 | case DS_ST_KILL: | ||
827 | if (is_manager(thrd)) | ||
828 | return PL330_STATE_INVALID; | ||
829 | else | ||
830 | return PL330_STATE_KILLING; | ||
831 | case DS_ST_CMPLT: | ||
832 | if (is_manager(thrd)) | ||
833 | return PL330_STATE_INVALID; | ||
834 | else | ||
835 | return PL330_STATE_COMPLETING; | ||
836 | case DS_ST_FLTCMP: | ||
837 | if (is_manager(thrd)) | ||
838 | return PL330_STATE_INVALID; | ||
839 | else | ||
840 | return PL330_STATE_FAULT_COMPLETING; | ||
841 | default: | ||
842 | return PL330_STATE_INVALID; | ||
843 | } | ||
844 | } | ||
845 | |||
846 | static void _stop(struct pl330_thread *thrd) | ||
847 | { | ||
848 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
849 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | ||
850 | |||
851 | if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) | ||
852 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); | ||
853 | |||
854 | /* Return if nothing needs to be done */ | ||
855 | if (_state(thrd) == PL330_STATE_COMPLETING | ||
856 | || _state(thrd) == PL330_STATE_KILLING | ||
857 | || _state(thrd) == PL330_STATE_STOPPED) | ||
858 | return; | ||
859 | |||
860 | _emit_KILL(0, insn); | ||
861 | |||
862 | /* Stop generating interrupts for SEV */ | ||
863 | writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN); | ||
864 | |||
865 | _execute_DBGINSN(thrd, insn, is_manager(thrd)); | ||
866 | } | ||
867 | |||
868 | /* Start doing req 'idx' of thread 'thrd' */ | ||
869 | static bool _trigger(struct pl330_thread *thrd) | ||
870 | { | ||
871 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
872 | struct _pl330_req *req; | ||
873 | struct pl330_req *r; | ||
874 | struct _arg_GO go; | ||
875 | unsigned ns; | ||
876 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | ||
877 | int idx; | ||
878 | |||
879 | /* Return if already ACTIVE */ | ||
880 | if (_state(thrd) != PL330_STATE_STOPPED) | ||
881 | return true; | ||
882 | |||
883 | idx = 1 - thrd->lstenq; | ||
884 | if (!IS_FREE(&thrd->req[idx])) | ||
885 | req = &thrd->req[idx]; | ||
886 | else { | ||
887 | idx = thrd->lstenq; | ||
888 | if (!IS_FREE(&thrd->req[idx])) | ||
889 | req = &thrd->req[idx]; | ||
890 | else | ||
891 | req = NULL; | ||
892 | } | ||
893 | |||
894 | /* Return if no request */ | ||
895 | if (!req || !req->r) | ||
896 | return true; | ||
897 | |||
898 | r = req->r; | ||
899 | |||
900 | if (r->cfg) | ||
901 | ns = r->cfg->nonsecure ? 1 : 0; | ||
902 | else if (readl(regs + CS(thrd->id)) & CS_CNS) | ||
903 | ns = 1; | ||
904 | else | ||
905 | ns = 0; | ||
906 | |||
907 | /* See 'Abort Sources' point-4 at Page 2-25 */ | ||
908 | if (_manager_ns(thrd) && !ns) | ||
909 | dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n", | ||
910 | __func__, __LINE__); | ||
911 | |||
912 | go.chan = thrd->id; | ||
913 | go.addr = req->mc_bus; | ||
914 | go.ns = ns; | ||
915 | _emit_GO(0, insn, &go); | ||
916 | |||
917 | /* Set to generate interrupts for SEV */ | ||
918 | writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN); | ||
919 | |||
920 | /* Only manager can execute GO */ | ||
921 | _execute_DBGINSN(thrd, insn, true); | ||
922 | |||
923 | thrd->req_running = idx; | ||
924 | |||
925 | return true; | ||
926 | } | ||
927 | |||
928 | static bool _start(struct pl330_thread *thrd) | ||
929 | { | ||
930 | switch (_state(thrd)) { | ||
931 | case PL330_STATE_FAULT_COMPLETING: | ||
932 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); | ||
933 | |||
934 | if (_state(thrd) == PL330_STATE_KILLING) | ||
935 | UNTIL(thrd, PL330_STATE_STOPPED) | ||
936 | |||
937 | case PL330_STATE_FAULTING: | ||
938 | _stop(thrd); | ||
939 | |||
940 | case PL330_STATE_KILLING: | ||
941 | case PL330_STATE_COMPLETING: | ||
942 | UNTIL(thrd, PL330_STATE_STOPPED) | ||
943 | |||
944 | case PL330_STATE_STOPPED: | ||
945 | return _trigger(thrd); | ||
946 | |||
947 | case PL330_STATE_WFP: | ||
948 | case PL330_STATE_QUEUEBUSY: | ||
949 | case PL330_STATE_ATBARRIER: | ||
950 | case PL330_STATE_UPDTPC: | ||
951 | case PL330_STATE_CACHEMISS: | ||
952 | case PL330_STATE_EXECUTING: | ||
953 | return true; | ||
954 | |||
955 | case PL330_STATE_WFE: /* For RESUME, nothing yet */ | ||
956 | default: | ||
957 | return false; | ||
958 | } | ||
959 | } | ||
960 | |||
961 | static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], | ||
962 | const struct _xfer_spec *pxs, int cyc) | ||
963 | { | ||
964 | int off = 0; | ||
965 | |||
966 | while (cyc--) { | ||
967 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | ||
968 | off += _emit_RMB(dry_run, &buf[off]); | ||
969 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | ||
970 | off += _emit_WMB(dry_run, &buf[off]); | ||
971 | } | ||
972 | |||
973 | return off; | ||
974 | } | ||
975 | |||
976 | static inline int _ldst_devtomem(unsigned dry_run, u8 buf[], | ||
977 | const struct _xfer_spec *pxs, int cyc) | ||
978 | { | ||
979 | int off = 0; | ||
980 | |||
981 | while (cyc--) { | ||
982 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
983 | off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
984 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | ||
985 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); | ||
986 | } | ||
987 | |||
988 | return off; | ||
989 | } | ||
990 | |||
991 | static inline int _ldst_memtodev(unsigned dry_run, u8 buf[], | ||
992 | const struct _xfer_spec *pxs, int cyc) | ||
993 | { | ||
994 | int off = 0; | ||
995 | |||
996 | while (cyc--) { | ||
997 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
998 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | ||
999 | off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
1000 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); | ||
1001 | } | ||
1002 | |||
1003 | return off; | ||
1004 | } | ||
1005 | |||
1006 | static int _bursts(unsigned dry_run, u8 buf[], | ||
1007 | const struct _xfer_spec *pxs, int cyc) | ||
1008 | { | ||
1009 | int off = 0; | ||
1010 | |||
1011 | switch (pxs->r->rqtype) { | ||
1012 | case MEMTODEV: | ||
1013 | off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc); | ||
1014 | break; | ||
1015 | case DEVTOMEM: | ||
1016 | off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc); | ||
1017 | break; | ||
1018 | case MEMTOMEM: | ||
1019 | off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); | ||
1020 | break; | ||
1021 | default: | ||
1022 | off += 0x40000000; /* Scare off the Client */ | ||
1023 | break; | ||
1024 | } | ||
1025 | |||
1026 | return off; | ||
1027 | } | ||
1028 | |||
1029 | /* Returns bytes consumed and updates bursts */ | ||
1030 | static inline int _loop(unsigned dry_run, u8 buf[], | ||
1031 | unsigned long *bursts, const struct _xfer_spec *pxs) | ||
1032 | { | ||
1033 | int cyc, cycmax, szlp, szlpend, szbrst, off; | ||
1034 | unsigned lcnt0, lcnt1, ljmp0, ljmp1; | ||
1035 | struct _arg_LPEND lpend; | ||
1036 | |||
1037 | /* Max iterations possible in DMALP is 256 */ | ||
1038 | if (*bursts >= 256*256) { | ||
1039 | lcnt1 = 256; | ||
1040 | lcnt0 = 256; | ||
1041 | cyc = *bursts / lcnt1 / lcnt0; | ||
1042 | } else if (*bursts > 256) { | ||
1043 | lcnt1 = 256; | ||
1044 | lcnt0 = *bursts / lcnt1; | ||
1045 | cyc = 1; | ||
1046 | } else { | ||
1047 | lcnt1 = *bursts; | ||
1048 | lcnt0 = 0; | ||
1049 | cyc = 1; | ||
1050 | } | ||
1051 | |||
1052 | szlp = _emit_LP(1, buf, 0, 0); | ||
1053 | szbrst = _bursts(1, buf, pxs, 1); | ||
1054 | |||
1055 | lpend.cond = ALWAYS; | ||
1056 | lpend.forever = false; | ||
1057 | lpend.loop = 0; | ||
1058 | lpend.bjump = 0; | ||
1059 | szlpend = _emit_LPEND(1, buf, &lpend); | ||
1060 | |||
1061 | if (lcnt0) { | ||
1062 | szlp *= 2; | ||
1063 | szlpend *= 2; | ||
1064 | } | ||
1065 | |||
1066 | /* | ||
1067 | * Max bursts that we can unroll due to limit on the | ||
1068 | * size of backward jump that can be encoded in DMALPEND | ||
1069 | * which is 8-bits and hence 255 | ||
1070 | */ | ||
1071 | cycmax = (255 - (szlp + szlpend)) / szbrst; | ||
1072 | |||
1073 | cyc = (cycmax < cyc) ? cycmax : cyc; | ||
1074 | |||
1075 | off = 0; | ||
1076 | |||
1077 | if (lcnt0) { | ||
1078 | off += _emit_LP(dry_run, &buf[off], 0, lcnt0); | ||
1079 | ljmp0 = off; | ||
1080 | } | ||
1081 | |||
1082 | off += _emit_LP(dry_run, &buf[off], 1, lcnt1); | ||
1083 | ljmp1 = off; | ||
1084 | |||
1085 | off += _bursts(dry_run, &buf[off], pxs, cyc); | ||
1086 | |||
1087 | lpend.cond = ALWAYS; | ||
1088 | lpend.forever = false; | ||
1089 | lpend.loop = 1; | ||
1090 | lpend.bjump = off - ljmp1; | ||
1091 | off += _emit_LPEND(dry_run, &buf[off], &lpend); | ||
1092 | |||
1093 | if (lcnt0) { | ||
1094 | lpend.cond = ALWAYS; | ||
1095 | lpend.forever = false; | ||
1096 | lpend.loop = 0; | ||
1097 | lpend.bjump = off - ljmp0; | ||
1098 | off += _emit_LPEND(dry_run, &buf[off], &lpend); | ||
1099 | } | ||
1100 | |||
1101 | *bursts = lcnt1 * cyc; | ||
1102 | if (lcnt0) | ||
1103 | *bursts *= lcnt0; | ||
1104 | |||
1105 | return off; | ||
1106 | } | ||
1107 | |||
1108 | static inline int _setup_loops(unsigned dry_run, u8 buf[], | ||
1109 | const struct _xfer_spec *pxs) | ||
1110 | { | ||
1111 | struct pl330_xfer *x = pxs->x; | ||
1112 | u32 ccr = pxs->ccr; | ||
1113 | unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr); | ||
1114 | int off = 0; | ||
1115 | |||
1116 | while (bursts) { | ||
1117 | c = bursts; | ||
1118 | off += _loop(dry_run, &buf[off], &c, pxs); | ||
1119 | bursts -= c; | ||
1120 | } | ||
1121 | |||
1122 | return off; | ||
1123 | } | ||
1124 | |||
1125 | static inline int _setup_xfer(unsigned dry_run, u8 buf[], | ||
1126 | const struct _xfer_spec *pxs) | ||
1127 | { | ||
1128 | struct pl330_xfer *x = pxs->x; | ||
1129 | int off = 0; | ||
1130 | |||
1131 | /* DMAMOV SAR, x->src_addr */ | ||
1132 | off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr); | ||
1133 | /* DMAMOV DAR, x->dst_addr */ | ||
1134 | off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr); | ||
1135 | |||
1136 | /* Setup Loop(s) */ | ||
1137 | off += _setup_loops(dry_run, &buf[off], pxs); | ||
1138 | |||
1139 | return off; | ||
1140 | } | ||
1141 | |||
1142 | /* | ||
1143 | * A req is a sequence of one or more xfer units. | ||
1144 | * Returns the number of bytes taken to setup the MC for the req. | ||
1145 | */ | ||
1146 | static int _setup_req(unsigned dry_run, struct pl330_thread *thrd, | ||
1147 | unsigned index, struct _xfer_spec *pxs) | ||
1148 | { | ||
1149 | struct _pl330_req *req = &thrd->req[index]; | ||
1150 | struct pl330_xfer *x; | ||
1151 | u8 *buf = req->mc_cpu; | ||
1152 | int off = 0; | ||
1153 | |||
1154 | PL330_DBGMC_START(req->mc_bus); | ||
1155 | |||
1156 | /* DMAMOV CCR, ccr */ | ||
1157 | off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr); | ||
1158 | |||
1159 | x = pxs->r->x; | ||
1160 | do { | ||
1161 | /* Error if xfer length is not aligned at burst size */ | ||
1162 | if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) | ||
1163 | return -EINVAL; | ||
1164 | |||
1165 | pxs->x = x; | ||
1166 | off += _setup_xfer(dry_run, &buf[off], pxs); | ||
1167 | |||
1168 | x = x->next; | ||
1169 | } while (x); | ||
1170 | |||
1171 | /* DMASEV peripheral/event */ | ||
1172 | off += _emit_SEV(dry_run, &buf[off], thrd->ev); | ||
1173 | /* DMAEND */ | ||
1174 | off += _emit_END(dry_run, &buf[off]); | ||
1175 | |||
1176 | return off; | ||
1177 | } | ||
1178 | |||
1179 | static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) | ||
1180 | { | ||
1181 | u32 ccr = 0; | ||
1182 | |||
1183 | if (rqc->src_inc) | ||
1184 | ccr |= CC_SRCINC; | ||
1185 | |||
1186 | if (rqc->dst_inc) | ||
1187 | ccr |= CC_DSTINC; | ||
1188 | |||
1189 | /* We set same protection levels for Src and DST for now */ | ||
1190 | if (rqc->privileged) | ||
1191 | ccr |= CC_SRCPRI | CC_DSTPRI; | ||
1192 | if (rqc->nonsecure) | ||
1193 | ccr |= CC_SRCNS | CC_DSTNS; | ||
1194 | if (rqc->insnaccess) | ||
1195 | ccr |= CC_SRCIA | CC_DSTIA; | ||
1196 | |||
1197 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT); | ||
1198 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT); | ||
1199 | |||
1200 | ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT); | ||
1201 | ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT); | ||
1202 | |||
1203 | ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT); | ||
1204 | ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT); | ||
1205 | |||
1206 | ccr |= (rqc->swap << CC_SWAP_SHFT); | ||
1207 | |||
1208 | return ccr; | ||
1209 | } | ||
1210 | |||
1211 | static inline bool _is_valid(u32 ccr) | ||
1212 | { | ||
1213 | enum pl330_dstcachectrl dcctl; | ||
1214 | enum pl330_srccachectrl scctl; | ||
1215 | |||
1216 | dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK; | ||
1217 | scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK; | ||
1218 | |||
1219 | if (dcctl == DINVALID1 || dcctl == DINVALID2 | ||
1220 | || scctl == SINVALID1 || scctl == SINVALID2) | ||
1221 | return false; | ||
1222 | else | ||
1223 | return true; | ||
1224 | } | ||
1225 | |||
1226 | /* | ||
1227 | * Submit a list of xfers after which the client wants notification. | ||
1228 | * Client is not notified after each xfer unit, just once after all | ||
1229 | * xfer units are done or some error occurs. | ||
1230 | */ | ||
1231 | int pl330_submit_req(void *ch_id, struct pl330_req *r) | ||
1232 | { | ||
1233 | struct pl330_thread *thrd = ch_id; | ||
1234 | struct pl330_dmac *pl330; | ||
1235 | struct pl330_info *pi; | ||
1236 | struct _xfer_spec xs; | ||
1237 | unsigned long flags; | ||
1238 | void __iomem *regs; | ||
1239 | unsigned idx; | ||
1240 | u32 ccr; | ||
1241 | int ret = 0; | ||
1242 | |||
1243 | /* No Req or Unacquired Channel or DMAC */ | ||
1244 | if (!r || !thrd || thrd->free) | ||
1245 | return -EINVAL; | ||
1246 | |||
1247 | pl330 = thrd->dmac; | ||
1248 | pi = pl330->pinfo; | ||
1249 | regs = pi->base; | ||
1250 | |||
1251 | if (pl330->state == DYING | ||
1252 | || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { | ||
1253 | dev_info(thrd->dmac->pinfo->dev, "%s:%d\n", | ||
1254 | __func__, __LINE__); | ||
1255 | return -EAGAIN; | ||
1256 | } | ||
1257 | |||
1258 | /* If request for non-existing peripheral */ | ||
1259 | if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) { | ||
1260 | dev_info(thrd->dmac->pinfo->dev, | ||
1261 | "%s:%d Invalid peripheral(%u)!\n", | ||
1262 | __func__, __LINE__, r->peri); | ||
1263 | return -EINVAL; | ||
1264 | } | ||
1265 | |||
1266 | spin_lock_irqsave(&pl330->lock, flags); | ||
1267 | |||
1268 | if (_queue_full(thrd)) { | ||
1269 | ret = -EAGAIN; | ||
1270 | goto xfer_exit; | ||
1271 | } | ||
1272 | |||
1273 | /* Prefer Secure Channel */ | ||
1274 | if (!_manager_ns(thrd)) | ||
1275 | r->cfg->nonsecure = 0; | ||
1276 | else | ||
1277 | r->cfg->nonsecure = 1; | ||
1278 | |||
1279 | /* Use last settings, if not provided */ | ||
1280 | if (r->cfg) | ||
1281 | ccr = _prepare_ccr(r->cfg); | ||
1282 | else | ||
1283 | ccr = readl(regs + CC(thrd->id)); | ||
1284 | |||
1285 | /* If this req doesn't have valid xfer settings */ | ||
1286 | if (!_is_valid(ccr)) { | ||
1287 | ret = -EINVAL; | ||
1288 | dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n", | ||
1289 | __func__, __LINE__, ccr); | ||
1290 | goto xfer_exit; | ||
1291 | } | ||
1292 | |||
1293 | idx = IS_FREE(&thrd->req[0]) ? 0 : 1; | ||
1294 | |||
1295 | xs.ccr = ccr; | ||
1296 | xs.r = r; | ||
1297 | |||
1298 | /* First dry run to check if req is acceptable */ | ||
1299 | ret = _setup_req(1, thrd, idx, &xs); | ||
1300 | if (ret < 0) | ||
1301 | goto xfer_exit; | ||
1302 | |||
1303 | if (ret > pi->mcbufsz / 2) { | ||
1304 | dev_info(thrd->dmac->pinfo->dev, | ||
1305 | "%s:%d Trying increasing mcbufsz\n", | ||
1306 | __func__, __LINE__); | ||
1307 | ret = -ENOMEM; | ||
1308 | goto xfer_exit; | ||
1309 | } | ||
1310 | |||
1311 | /* Hook the request */ | ||
1312 | thrd->lstenq = idx; | ||
1313 | thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs); | ||
1314 | thrd->req[idx].r = r; | ||
1315 | |||
1316 | ret = 0; | ||
1317 | |||
1318 | xfer_exit: | ||
1319 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1320 | |||
1321 | return ret; | ||
1322 | } | ||
1323 | EXPORT_SYMBOL(pl330_submit_req); | ||
1324 | |||
1325 | static void pl330_dotask(unsigned long data) | ||
1326 | { | ||
1327 | struct pl330_dmac *pl330 = (struct pl330_dmac *) data; | ||
1328 | struct pl330_info *pi = pl330->pinfo; | ||
1329 | unsigned long flags; | ||
1330 | int i; | ||
1331 | |||
1332 | spin_lock_irqsave(&pl330->lock, flags); | ||
1333 | |||
1334 | /* The DMAC itself gone nuts */ | ||
1335 | if (pl330->dmac_tbd.reset_dmac) { | ||
1336 | pl330->state = DYING; | ||
1337 | /* Reset the manager too */ | ||
1338 | pl330->dmac_tbd.reset_mngr = true; | ||
1339 | /* Clear the reset flag */ | ||
1340 | pl330->dmac_tbd.reset_dmac = false; | ||
1341 | } | ||
1342 | |||
1343 | if (pl330->dmac_tbd.reset_mngr) { | ||
1344 | _stop(pl330->manager); | ||
1345 | /* Reset all channels */ | ||
1346 | pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1; | ||
1347 | /* Clear the reset flag */ | ||
1348 | pl330->dmac_tbd.reset_mngr = false; | ||
1349 | } | ||
1350 | |||
1351 | for (i = 0; i < pi->pcfg.num_chan; i++) { | ||
1352 | |||
1353 | if (pl330->dmac_tbd.reset_chan & (1 << i)) { | ||
1354 | struct pl330_thread *thrd = &pl330->channels[i]; | ||
1355 | void __iomem *regs = pi->base; | ||
1356 | enum pl330_op_err err; | ||
1357 | |||
1358 | _stop(thrd); | ||
1359 | |||
1360 | if (readl(regs + FSC) & (1 << thrd->id)) | ||
1361 | err = PL330_ERR_FAIL; | ||
1362 | else | ||
1363 | err = PL330_ERR_ABORT; | ||
1364 | |||
1365 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1366 | |||
1367 | _callback(thrd->req[1 - thrd->lstenq].r, err); | ||
1368 | _callback(thrd->req[thrd->lstenq].r, err); | ||
1369 | |||
1370 | spin_lock_irqsave(&pl330->lock, flags); | ||
1371 | |||
1372 | thrd->req[0].r = NULL; | ||
1373 | thrd->req[1].r = NULL; | ||
1374 | mark_free(thrd, 0); | ||
1375 | mark_free(thrd, 1); | ||
1376 | |||
1377 | /* Clear the reset flag */ | ||
1378 | pl330->dmac_tbd.reset_chan &= ~(1 << i); | ||
1379 | } | ||
1380 | } | ||
1381 | |||
1382 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1383 | |||
1384 | return; | ||
1385 | } | ||
1386 | |||
1387 | /* Returns 1 if state was updated, 0 otherwise */ | ||
1388 | int pl330_update(const struct pl330_info *pi) | ||
1389 | { | ||
1390 | struct _pl330_req *rqdone; | ||
1391 | struct pl330_dmac *pl330; | ||
1392 | unsigned long flags; | ||
1393 | void __iomem *regs; | ||
1394 | u32 val; | ||
1395 | int id, ev, ret = 0; | ||
1396 | |||
1397 | if (!pi || !pi->pl330_data) | ||
1398 | return 0; | ||
1399 | |||
1400 | regs = pi->base; | ||
1401 | pl330 = pi->pl330_data; | ||
1402 | |||
1403 | spin_lock_irqsave(&pl330->lock, flags); | ||
1404 | |||
1405 | val = readl(regs + FSM) & 0x1; | ||
1406 | if (val) | ||
1407 | pl330->dmac_tbd.reset_mngr = true; | ||
1408 | else | ||
1409 | pl330->dmac_tbd.reset_mngr = false; | ||
1410 | |||
1411 | val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1); | ||
1412 | pl330->dmac_tbd.reset_chan |= val; | ||
1413 | if (val) { | ||
1414 | int i = 0; | ||
1415 | while (i < pi->pcfg.num_chan) { | ||
1416 | if (val & (1 << i)) { | ||
1417 | dev_info(pi->dev, | ||
1418 | "Reset Channel-%d\t CS-%x FTC-%x\n", | ||
1419 | i, readl(regs + CS(i)), | ||
1420 | readl(regs + FTC(i))); | ||
1421 | _stop(&pl330->channels[i]); | ||
1422 | } | ||
1423 | i++; | ||
1424 | } | ||
1425 | } | ||
1426 | |||
1427 | /* Check which event happened i.e, thread notified */ | ||
1428 | val = readl(regs + ES); | ||
1429 | if (pi->pcfg.num_events < 32 | ||
1430 | && val & ~((1 << pi->pcfg.num_events) - 1)) { | ||
1431 | pl330->dmac_tbd.reset_dmac = true; | ||
1432 | dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__); | ||
1433 | ret = 1; | ||
1434 | goto updt_exit; | ||
1435 | } | ||
1436 | |||
1437 | for (ev = 0; ev < pi->pcfg.num_events; ev++) { | ||
1438 | if (val & (1 << ev)) { /* Event occurred */ | ||
1439 | struct pl330_thread *thrd; | ||
1440 | u32 inten = readl(regs + INTEN); | ||
1441 | int active; | ||
1442 | |||
1443 | /* Clear the event */ | ||
1444 | if (inten & (1 << ev)) | ||
1445 | writel(1 << ev, regs + INTCLR); | ||
1446 | |||
1447 | ret = 1; | ||
1448 | |||
1449 | id = pl330->events[ev]; | ||
1450 | |||
1451 | thrd = &pl330->channels[id]; | ||
1452 | |||
1453 | active = thrd->req_running; | ||
1454 | if (active == -1) /* Aborted */ | ||
1455 | continue; | ||
1456 | |||
1457 | rqdone = &thrd->req[active]; | ||
1458 | mark_free(thrd, active); | ||
1459 | |||
1460 | /* Get going again ASAP */ | ||
1461 | _start(thrd); | ||
1462 | |||
1463 | /* For now, just make a list of callbacks to be done */ | ||
1464 | list_add_tail(&rqdone->rqd, &pl330->req_done); | ||
1465 | } | ||
1466 | } | ||
1467 | |||
1468 | /* Now that we are in no hurry, do the callbacks */ | ||
1469 | while (!list_empty(&pl330->req_done)) { | ||
1470 | struct pl330_req *r; | ||
1471 | |||
1472 | rqdone = container_of(pl330->req_done.next, | ||
1473 | struct _pl330_req, rqd); | ||
1474 | |||
1475 | list_del_init(&rqdone->rqd); | ||
1476 | |||
1477 | /* Detach the req */ | ||
1478 | r = rqdone->r; | ||
1479 | rqdone->r = NULL; | ||
1480 | |||
1481 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1482 | _callback(r, PL330_ERR_NONE); | ||
1483 | spin_lock_irqsave(&pl330->lock, flags); | ||
1484 | } | ||
1485 | |||
1486 | updt_exit: | ||
1487 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1488 | |||
1489 | if (pl330->dmac_tbd.reset_dmac | ||
1490 | || pl330->dmac_tbd.reset_mngr | ||
1491 | || pl330->dmac_tbd.reset_chan) { | ||
1492 | ret = 1; | ||
1493 | tasklet_schedule(&pl330->tasks); | ||
1494 | } | ||
1495 | |||
1496 | return ret; | ||
1497 | } | ||
1498 | EXPORT_SYMBOL(pl330_update); | ||
1499 | |||
1500 | int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) | ||
1501 | { | ||
1502 | struct pl330_thread *thrd = ch_id; | ||
1503 | struct pl330_dmac *pl330; | ||
1504 | unsigned long flags; | ||
1505 | int ret = 0, active; | ||
1506 | |||
1507 | if (!thrd || thrd->free || thrd->dmac->state == DYING) | ||
1508 | return -EINVAL; | ||
1509 | |||
1510 | pl330 = thrd->dmac; | ||
1511 | active = thrd->req_running; | ||
1512 | |||
1513 | spin_lock_irqsave(&pl330->lock, flags); | ||
1514 | |||
1515 | switch (op) { | ||
1516 | case PL330_OP_FLUSH: | ||
1517 | /* Make sure the channel is stopped */ | ||
1518 | _stop(thrd); | ||
1519 | |||
1520 | thrd->req[0].r = NULL; | ||
1521 | thrd->req[1].r = NULL; | ||
1522 | mark_free(thrd, 0); | ||
1523 | mark_free(thrd, 1); | ||
1524 | break; | ||
1525 | |||
1526 | case PL330_OP_ABORT: | ||
1527 | /* Make sure the channel is stopped */ | ||
1528 | _stop(thrd); | ||
1529 | |||
1530 | /* ABORT is only for the active req */ | ||
1531 | if (active == -1) | ||
1532 | break; | ||
1533 | |||
1534 | thrd->req[active].r = NULL; | ||
1535 | mark_free(thrd, active); | ||
1536 | |||
1537 | /* Start the next */ | ||
1538 | case PL330_OP_START: | ||
1539 | if ((active == -1) && !_start(thrd)) | ||
1540 | ret = -EIO; | ||
1541 | break; | ||
1542 | |||
1543 | default: | ||
1544 | ret = -EINVAL; | ||
1545 | } | ||
1546 | |||
1547 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1548 | return ret; | ||
1549 | } | ||
1550 | EXPORT_SYMBOL(pl330_chan_ctrl); | ||
1551 | |||
1552 | int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus) | ||
1553 | { | ||
1554 | struct pl330_thread *thrd = ch_id; | ||
1555 | struct pl330_dmac *pl330; | ||
1556 | struct pl330_info *pi; | ||
1557 | void __iomem *regs; | ||
1558 | int active; | ||
1559 | u32 val; | ||
1560 | |||
1561 | if (!pstatus || !thrd || thrd->free) | ||
1562 | return -EINVAL; | ||
1563 | |||
1564 | pl330 = thrd->dmac; | ||
1565 | pi = pl330->pinfo; | ||
1566 | regs = pi->base; | ||
1567 | |||
1568 | /* The client should remove the DMAC and add again */ | ||
1569 | if (pl330->state == DYING) | ||
1570 | pstatus->dmac_halted = true; | ||
1571 | else | ||
1572 | pstatus->dmac_halted = false; | ||
1573 | |||
1574 | val = readl(regs + FSC); | ||
1575 | if (val & (1 << thrd->id)) | ||
1576 | pstatus->faulting = true; | ||
1577 | else | ||
1578 | pstatus->faulting = false; | ||
1579 | |||
1580 | active = thrd->req_running; | ||
1581 | |||
1582 | if (active == -1) { | ||
1583 | /* Indicate that the thread is not running */ | ||
1584 | pstatus->top_req = NULL; | ||
1585 | pstatus->wait_req = NULL; | ||
1586 | } else { | ||
1587 | pstatus->top_req = thrd->req[active].r; | ||
1588 | pstatus->wait_req = !IS_FREE(&thrd->req[1 - active]) | ||
1589 | ? thrd->req[1 - active].r : NULL; | ||
1590 | } | ||
1591 | |||
1592 | pstatus->src_addr = readl(regs + SA(thrd->id)); | ||
1593 | pstatus->dst_addr = readl(regs + DA(thrd->id)); | ||
1594 | |||
1595 | return 0; | ||
1596 | } | ||
1597 | EXPORT_SYMBOL(pl330_chan_status); | ||
1598 | |||
1599 | /* Reserve an event */ | ||
1600 | static inline int _alloc_event(struct pl330_thread *thrd) | ||
1601 | { | ||
1602 | struct pl330_dmac *pl330 = thrd->dmac; | ||
1603 | struct pl330_info *pi = pl330->pinfo; | ||
1604 | int ev; | ||
1605 | |||
1606 | for (ev = 0; ev < pi->pcfg.num_events; ev++) | ||
1607 | if (pl330->events[ev] == -1) { | ||
1608 | pl330->events[ev] = thrd->id; | ||
1609 | return ev; | ||
1610 | } | ||
1611 | |||
1612 | return -1; | ||
1613 | } | ||
1614 | |||
1615 | static bool _chan_ns(const struct pl330_info *pi, int i) | ||
1616 | { | ||
1617 | return pi->pcfg.irq_ns & (1 << i); | ||
1618 | } | ||
1619 | |||
1620 | /* Upon success, returns IdentityToken for the | ||
1621 | * allocated channel, NULL otherwise. | ||
1622 | */ | ||
1623 | void *pl330_request_channel(const struct pl330_info *pi) | ||
1624 | { | ||
1625 | struct pl330_thread *thrd = NULL; | ||
1626 | struct pl330_dmac *pl330; | ||
1627 | unsigned long flags; | ||
1628 | int chans, i; | ||
1629 | |||
1630 | if (!pi || !pi->pl330_data) | ||
1631 | return NULL; | ||
1632 | |||
1633 | pl330 = pi->pl330_data; | ||
1634 | |||
1635 | if (pl330->state == DYING) | ||
1636 | return NULL; | ||
1637 | |||
1638 | chans = pi->pcfg.num_chan; | ||
1639 | |||
1640 | spin_lock_irqsave(&pl330->lock, flags); | ||
1641 | |||
1642 | for (i = 0; i < chans; i++) { | ||
1643 | thrd = &pl330->channels[i]; | ||
1644 | if ((thrd->free) && (!_manager_ns(thrd) || | ||
1645 | _chan_ns(pi, i))) { | ||
1646 | thrd->ev = _alloc_event(thrd); | ||
1647 | if (thrd->ev >= 0) { | ||
1648 | thrd->free = false; | ||
1649 | thrd->lstenq = 1; | ||
1650 | thrd->req[0].r = NULL; | ||
1651 | mark_free(thrd, 0); | ||
1652 | thrd->req[1].r = NULL; | ||
1653 | mark_free(thrd, 1); | ||
1654 | break; | ||
1655 | } | ||
1656 | } | ||
1657 | thrd = NULL; | ||
1658 | } | ||
1659 | |||
1660 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1661 | |||
1662 | return thrd; | ||
1663 | } | ||
1664 | EXPORT_SYMBOL(pl330_request_channel); | ||
1665 | |||
1666 | /* Release an event */ | ||
1667 | static inline void _free_event(struct pl330_thread *thrd, int ev) | ||
1668 | { | ||
1669 | struct pl330_dmac *pl330 = thrd->dmac; | ||
1670 | struct pl330_info *pi = pl330->pinfo; | ||
1671 | |||
1672 | /* If the event is valid and was held by the thread */ | ||
1673 | if (ev >= 0 && ev < pi->pcfg.num_events | ||
1674 | && pl330->events[ev] == thrd->id) | ||
1675 | pl330->events[ev] = -1; | ||
1676 | } | ||
1677 | |||
1678 | void pl330_release_channel(void *ch_id) | ||
1679 | { | ||
1680 | struct pl330_thread *thrd = ch_id; | ||
1681 | struct pl330_dmac *pl330; | ||
1682 | unsigned long flags; | ||
1683 | |||
1684 | if (!thrd || thrd->free) | ||
1685 | return; | ||
1686 | |||
1687 | _stop(thrd); | ||
1688 | |||
1689 | _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT); | ||
1690 | _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT); | ||
1691 | |||
1692 | pl330 = thrd->dmac; | ||
1693 | |||
1694 | spin_lock_irqsave(&pl330->lock, flags); | ||
1695 | _free_event(thrd, thrd->ev); | ||
1696 | thrd->free = true; | ||
1697 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1698 | } | ||
1699 | EXPORT_SYMBOL(pl330_release_channel); | ||
1700 | |||
1701 | /* Initialize the structure for PL330 configuration, that can be used | ||
1702 | * by the client driver the make best use of the DMAC | ||
1703 | */ | ||
1704 | static void read_dmac_config(struct pl330_info *pi) | ||
1705 | { | ||
1706 | void __iomem *regs = pi->base; | ||
1707 | u32 val; | ||
1708 | |||
1709 | val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT; | ||
1710 | val &= CRD_DATA_WIDTH_MASK; | ||
1711 | pi->pcfg.data_bus_width = 8 * (1 << val); | ||
1712 | |||
1713 | val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT; | ||
1714 | val &= CRD_DATA_BUFF_MASK; | ||
1715 | pi->pcfg.data_buf_dep = val + 1; | ||
1716 | |||
1717 | val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT; | ||
1718 | val &= CR0_NUM_CHANS_MASK; | ||
1719 | val += 1; | ||
1720 | pi->pcfg.num_chan = val; | ||
1721 | |||
1722 | val = readl(regs + CR0); | ||
1723 | if (val & CR0_PERIPH_REQ_SET) { | ||
1724 | val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK; | ||
1725 | val += 1; | ||
1726 | pi->pcfg.num_peri = val; | ||
1727 | pi->pcfg.peri_ns = readl(regs + CR4); | ||
1728 | } else { | ||
1729 | pi->pcfg.num_peri = 0; | ||
1730 | } | ||
1731 | |||
1732 | val = readl(regs + CR0); | ||
1733 | if (val & CR0_BOOT_MAN_NS) | ||
1734 | pi->pcfg.mode |= DMAC_MODE_NS; | ||
1735 | else | ||
1736 | pi->pcfg.mode &= ~DMAC_MODE_NS; | ||
1737 | |||
1738 | val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT; | ||
1739 | val &= CR0_NUM_EVENTS_MASK; | ||
1740 | val += 1; | ||
1741 | pi->pcfg.num_events = val; | ||
1742 | |||
1743 | pi->pcfg.irq_ns = readl(regs + CR3); | ||
1744 | |||
1745 | pi->pcfg.periph_id = get_id(pi, PERIPH_ID); | ||
1746 | pi->pcfg.pcell_id = get_id(pi, PCELL_ID); | ||
1747 | } | ||
1748 | |||
1749 | static inline void _reset_thread(struct pl330_thread *thrd) | ||
1750 | { | ||
1751 | struct pl330_dmac *pl330 = thrd->dmac; | ||
1752 | struct pl330_info *pi = pl330->pinfo; | ||
1753 | |||
1754 | thrd->req[0].mc_cpu = pl330->mcode_cpu | ||
1755 | + (thrd->id * pi->mcbufsz); | ||
1756 | thrd->req[0].mc_bus = pl330->mcode_bus | ||
1757 | + (thrd->id * pi->mcbufsz); | ||
1758 | thrd->req[0].r = NULL; | ||
1759 | mark_free(thrd, 0); | ||
1760 | |||
1761 | thrd->req[1].mc_cpu = thrd->req[0].mc_cpu | ||
1762 | + pi->mcbufsz / 2; | ||
1763 | thrd->req[1].mc_bus = thrd->req[0].mc_bus | ||
1764 | + pi->mcbufsz / 2; | ||
1765 | thrd->req[1].r = NULL; | ||
1766 | mark_free(thrd, 1); | ||
1767 | } | ||
1768 | |||
1769 | static int dmac_alloc_threads(struct pl330_dmac *pl330) | ||
1770 | { | ||
1771 | struct pl330_info *pi = pl330->pinfo; | ||
1772 | int chans = pi->pcfg.num_chan; | ||
1773 | struct pl330_thread *thrd; | ||
1774 | int i; | ||
1775 | |||
1776 | /* Allocate 1 Manager and 'chans' Channel threads */ | ||
1777 | pl330->channels = kzalloc((1 + chans) * sizeof(*thrd), | ||
1778 | GFP_KERNEL); | ||
1779 | if (!pl330->channels) | ||
1780 | return -ENOMEM; | ||
1781 | |||
1782 | /* Init Channel threads */ | ||
1783 | for (i = 0; i < chans; i++) { | ||
1784 | thrd = &pl330->channels[i]; | ||
1785 | thrd->id = i; | ||
1786 | thrd->dmac = pl330; | ||
1787 | _reset_thread(thrd); | ||
1788 | thrd->free = true; | ||
1789 | } | ||
1790 | |||
1791 | /* MANAGER is indexed at the end */ | ||
1792 | thrd = &pl330->channels[chans]; | ||
1793 | thrd->id = chans; | ||
1794 | thrd->dmac = pl330; | ||
1795 | thrd->free = false; | ||
1796 | pl330->manager = thrd; | ||
1797 | |||
1798 | return 0; | ||
1799 | } | ||
1800 | |||
1801 | static int dmac_alloc_resources(struct pl330_dmac *pl330) | ||
1802 | { | ||
1803 | struct pl330_info *pi = pl330->pinfo; | ||
1804 | int chans = pi->pcfg.num_chan; | ||
1805 | int ret; | ||
1806 | |||
1807 | /* | ||
1808 | * Alloc MicroCode buffer for 'chans' Channel threads. | ||
1809 | * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) | ||
1810 | */ | ||
1811 | pl330->mcode_cpu = dma_alloc_coherent(pi->dev, | ||
1812 | chans * pi->mcbufsz, | ||
1813 | &pl330->mcode_bus, GFP_KERNEL); | ||
1814 | if (!pl330->mcode_cpu) { | ||
1815 | dev_err(pi->dev, "%s:%d Can't allocate memory!\n", | ||
1816 | __func__, __LINE__); | ||
1817 | return -ENOMEM; | ||
1818 | } | ||
1819 | |||
1820 | ret = dmac_alloc_threads(pl330); | ||
1821 | if (ret) { | ||
1822 | dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n", | ||
1823 | __func__, __LINE__); | ||
1824 | dma_free_coherent(pi->dev, | ||
1825 | chans * pi->mcbufsz, | ||
1826 | pl330->mcode_cpu, pl330->mcode_bus); | ||
1827 | return ret; | ||
1828 | } | ||
1829 | |||
1830 | return 0; | ||
1831 | } | ||
1832 | |||
1833 | int pl330_add(struct pl330_info *pi) | ||
1834 | { | ||
1835 | struct pl330_dmac *pl330; | ||
1836 | void __iomem *regs; | ||
1837 | int i, ret; | ||
1838 | |||
1839 | if (!pi || !pi->dev) | ||
1840 | return -EINVAL; | ||
1841 | |||
1842 | /* If already added */ | ||
1843 | if (pi->pl330_data) | ||
1844 | return -EINVAL; | ||
1845 | |||
1846 | /* | ||
1847 | * If the SoC can perform reset on the DMAC, then do it | ||
1848 | * before reading its configuration. | ||
1849 | */ | ||
1850 | if (pi->dmac_reset) | ||
1851 | pi->dmac_reset(pi); | ||
1852 | |||
1853 | regs = pi->base; | ||
1854 | |||
1855 | /* Check if we can handle this DMAC */ | ||
1856 | if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL | ||
1857 | || get_id(pi, PCELL_ID) != PCELL_ID_VAL) { | ||
1858 | dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n", | ||
1859 | get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID)); | ||
1860 | return -EINVAL; | ||
1861 | } | ||
1862 | |||
1863 | /* Read the configuration of the DMAC */ | ||
1864 | read_dmac_config(pi); | ||
1865 | |||
1866 | if (pi->pcfg.num_events == 0) { | ||
1867 | dev_err(pi->dev, "%s:%d Can't work without events!\n", | ||
1868 | __func__, __LINE__); | ||
1869 | return -EINVAL; | ||
1870 | } | ||
1871 | |||
1872 | pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL); | ||
1873 | if (!pl330) { | ||
1874 | dev_err(pi->dev, "%s:%d Can't allocate memory!\n", | ||
1875 | __func__, __LINE__); | ||
1876 | return -ENOMEM; | ||
1877 | } | ||
1878 | |||
1879 | /* Assign the info structure and private data */ | ||
1880 | pl330->pinfo = pi; | ||
1881 | pi->pl330_data = pl330; | ||
1882 | |||
1883 | spin_lock_init(&pl330->lock); | ||
1884 | |||
1885 | INIT_LIST_HEAD(&pl330->req_done); | ||
1886 | |||
1887 | /* Use default MC buffer size if not provided */ | ||
1888 | if (!pi->mcbufsz) | ||
1889 | pi->mcbufsz = MCODE_BUFF_PER_REQ * 2; | ||
1890 | |||
1891 | /* Mark all events as free */ | ||
1892 | for (i = 0; i < pi->pcfg.num_events; i++) | ||
1893 | pl330->events[i] = -1; | ||
1894 | |||
1895 | /* Allocate resources needed by the DMAC */ | ||
1896 | ret = dmac_alloc_resources(pl330); | ||
1897 | if (ret) { | ||
1898 | dev_err(pi->dev, "Unable to create channels for DMAC\n"); | ||
1899 | kfree(pl330); | ||
1900 | return ret; | ||
1901 | } | ||
1902 | |||
1903 | tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330); | ||
1904 | |||
1905 | pl330->state = INIT; | ||
1906 | |||
1907 | return 0; | ||
1908 | } | ||
1909 | EXPORT_SYMBOL(pl330_add); | ||
1910 | |||
1911 | static int dmac_free_threads(struct pl330_dmac *pl330) | ||
1912 | { | ||
1913 | struct pl330_info *pi = pl330->pinfo; | ||
1914 | int chans = pi->pcfg.num_chan; | ||
1915 | struct pl330_thread *thrd; | ||
1916 | int i; | ||
1917 | |||
1918 | /* Release Channel threads */ | ||
1919 | for (i = 0; i < chans; i++) { | ||
1920 | thrd = &pl330->channels[i]; | ||
1921 | pl330_release_channel((void *)thrd); | ||
1922 | } | ||
1923 | |||
1924 | /* Free memory */ | ||
1925 | kfree(pl330->channels); | ||
1926 | |||
1927 | return 0; | ||
1928 | } | ||
1929 | |||
1930 | static void dmac_free_resources(struct pl330_dmac *pl330) | ||
1931 | { | ||
1932 | struct pl330_info *pi = pl330->pinfo; | ||
1933 | int chans = pi->pcfg.num_chan; | ||
1934 | |||
1935 | dmac_free_threads(pl330); | ||
1936 | |||
1937 | dma_free_coherent(pi->dev, chans * pi->mcbufsz, | ||
1938 | pl330->mcode_cpu, pl330->mcode_bus); | ||
1939 | } | ||
1940 | |||
1941 | void pl330_del(struct pl330_info *pi) | ||
1942 | { | ||
1943 | struct pl330_dmac *pl330; | ||
1944 | |||
1945 | if (!pi || !pi->pl330_data) | ||
1946 | return; | ||
1947 | |||
1948 | pl330 = pi->pl330_data; | ||
1949 | |||
1950 | pl330->state = UNINIT; | ||
1951 | |||
1952 | tasklet_kill(&pl330->tasks); | ||
1953 | |||
1954 | /* Free DMAC resources */ | ||
1955 | dmac_free_resources(pl330); | ||
1956 | |||
1957 | kfree(pl330); | ||
1958 | pi->pl330_data = NULL; | ||
1959 | } | ||
1960 | EXPORT_SYMBOL(pl330_del); | ||
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h index 59b8c3892f76..122f86d8c991 100644 --- a/arch/arm/include/asm/hardware/iop_adma.h +++ b/arch/arm/include/asm/hardware/iop_adma.h | |||
@@ -49,7 +49,6 @@ struct iop_adma_device { | |||
49 | /** | 49 | /** |
50 | * struct iop_adma_chan - internal representation of an ADMA device | 50 | * struct iop_adma_chan - internal representation of an ADMA device |
51 | * @pending: allows batching of hardware operations | 51 | * @pending: allows batching of hardware operations |
52 | * @completed_cookie: identifier for the most recently completed operation | ||
53 | * @lock: serializes enqueue/dequeue operations to the slot pool | 52 | * @lock: serializes enqueue/dequeue operations to the slot pool |
54 | * @mmr_base: memory mapped register base | 53 | * @mmr_base: memory mapped register base |
55 | * @chain: device chain view of the descriptors | 54 | * @chain: device chain view of the descriptors |
@@ -62,7 +61,6 @@ struct iop_adma_device { | |||
62 | */ | 61 | */ |
63 | struct iop_adma_chan { | 62 | struct iop_adma_chan { |
64 | int pending; | 63 | int pending; |
65 | dma_cookie_t completed_cookie; | ||
66 | spinlock_t lock; /* protects the descriptor slot pool */ | 64 | spinlock_t lock; /* protects the descriptor slot pool */ |
67 | void __iomem *mmr_base; | 65 | void __iomem *mmr_base; |
68 | struct list_head chain; | 66 | struct list_head chain; |
diff --git a/arch/arm/include/asm/hardware/pl330.h b/arch/arm/include/asm/hardware/pl330.h deleted file mode 100644 index c1821385abfa..000000000000 --- a/arch/arm/include/asm/hardware/pl330.h +++ /dev/null | |||
@@ -1,217 +0,0 @@ | |||
1 | /* linux/include/asm/hardware/pl330.h | ||
2 | * | ||
3 | * Copyright (C) 2010 Samsung Electronics Co. Ltd. | ||
4 | * Jaswinder Singh <jassi.brar@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | |||
21 | #ifndef __PL330_CORE_H | ||
22 | #define __PL330_CORE_H | ||
23 | |||
24 | #define PL330_MAX_CHAN 8 | ||
25 | #define PL330_MAX_IRQS 32 | ||
26 | #define PL330_MAX_PERI 32 | ||
27 | |||
28 | enum pl330_srccachectrl { | ||
29 | SCCTRL0 = 0, /* Noncacheable and nonbufferable */ | ||
30 | SCCTRL1, /* Bufferable only */ | ||
31 | SCCTRL2, /* Cacheable, but do not allocate */ | ||
32 | SCCTRL3, /* Cacheable and bufferable, but do not allocate */ | ||
33 | SINVALID1, | ||
34 | SINVALID2, | ||
35 | SCCTRL6, /* Cacheable write-through, allocate on reads only */ | ||
36 | SCCTRL7, /* Cacheable write-back, allocate on reads only */ | ||
37 | }; | ||
38 | |||
39 | enum pl330_dstcachectrl { | ||
40 | DCCTRL0 = 0, /* Noncacheable and nonbufferable */ | ||
41 | DCCTRL1, /* Bufferable only */ | ||
42 | DCCTRL2, /* Cacheable, but do not allocate */ | ||
43 | DCCTRL3, /* Cacheable and bufferable, but do not allocate */ | ||
44 | DINVALID1, /* AWCACHE = 0x1000 */ | ||
45 | DINVALID2, | ||
46 | DCCTRL6, /* Cacheable write-through, allocate on writes only */ | ||
47 | DCCTRL7, /* Cacheable write-back, allocate on writes only */ | ||
48 | }; | ||
49 | |||
50 | /* Populated by the PL330 core driver for DMA API driver's info */ | ||
51 | struct pl330_config { | ||
52 | u32 periph_id; | ||
53 | u32 pcell_id; | ||
54 | #define DMAC_MODE_NS (1 << 0) | ||
55 | unsigned int mode; | ||
56 | unsigned int data_bus_width:10; /* In number of bits */ | ||
57 | unsigned int data_buf_dep:10; | ||
58 | unsigned int num_chan:4; | ||
59 | unsigned int num_peri:6; | ||
60 | u32 peri_ns; | ||
61 | unsigned int num_events:6; | ||
62 | u32 irq_ns; | ||
63 | }; | ||
64 | |||
65 | /* Handle to the DMAC provided to the PL330 core */ | ||
66 | struct pl330_info { | ||
67 | /* Owning device */ | ||
68 | struct device *dev; | ||
69 | /* Size of MicroCode buffers for each channel. */ | ||
70 | unsigned mcbufsz; | ||
71 | /* ioremap'ed address of PL330 registers. */ | ||
72 | void __iomem *base; | ||
73 | /* Client can freely use it. */ | ||
74 | void *client_data; | ||
75 | /* PL330 core data, Client must not touch it. */ | ||
76 | void *pl330_data; | ||
77 | /* Populated by the PL330 core driver during pl330_add */ | ||
78 | struct pl330_config pcfg; | ||
79 | /* | ||
80 | * If the DMAC has some reset mechanism, then the | ||
81 | * client may want to provide pointer to the method. | ||
82 | */ | ||
83 | void (*dmac_reset)(struct pl330_info *pi); | ||
84 | }; | ||
85 | |||
86 | enum pl330_byteswap { | ||
87 | SWAP_NO = 0, | ||
88 | SWAP_2, | ||
89 | SWAP_4, | ||
90 | SWAP_8, | ||
91 | SWAP_16, | ||
92 | }; | ||
93 | |||
94 | /** | ||
95 | * Request Configuration. | ||
96 | * The PL330 core does not modify this and uses the last | ||
97 | * working configuration if the request doesn't provide any. | ||
98 | * | ||
99 | * The Client may want to provide this info only for the | ||
100 | * first request and a request with new settings. | ||
101 | */ | ||
102 | struct pl330_reqcfg { | ||
103 | /* Address Incrementing */ | ||
104 | unsigned dst_inc:1; | ||
105 | unsigned src_inc:1; | ||
106 | |||
107 | /* | ||
108 | * For now, the SRC & DST protection levels | ||
109 | * and burst size/length are assumed same. | ||
110 | */ | ||
111 | bool nonsecure; | ||
112 | bool privileged; | ||
113 | bool insnaccess; | ||
114 | unsigned brst_len:5; | ||
115 | unsigned brst_size:3; /* in power of 2 */ | ||
116 | |||
117 | enum pl330_dstcachectrl dcctl; | ||
118 | enum pl330_srccachectrl scctl; | ||
119 | enum pl330_byteswap swap; | ||
120 | }; | ||
121 | |||
122 | /* | ||
123 | * One cycle of DMAC operation. | ||
124 | * There may be more than one xfer in a request. | ||
125 | */ | ||
126 | struct pl330_xfer { | ||
127 | u32 src_addr; | ||
128 | u32 dst_addr; | ||
129 | /* Size to xfer */ | ||
130 | u32 bytes; | ||
131 | /* | ||
132 | * Pointer to next xfer in the list. | ||
133 | * The last xfer in the req must point to NULL. | ||
134 | */ | ||
135 | struct pl330_xfer *next; | ||
136 | }; | ||
137 | |||
138 | /* The xfer callbacks are made with one of these arguments. */ | ||
139 | enum pl330_op_err { | ||
140 | /* The all xfers in the request were success. */ | ||
141 | PL330_ERR_NONE, | ||
142 | /* If req aborted due to global error. */ | ||
143 | PL330_ERR_ABORT, | ||
144 | /* If req failed due to problem with Channel. */ | ||
145 | PL330_ERR_FAIL, | ||
146 | }; | ||
147 | |||
148 | enum pl330_reqtype { | ||
149 | MEMTOMEM, | ||
150 | MEMTODEV, | ||
151 | DEVTOMEM, | ||
152 | DEVTODEV, | ||
153 | }; | ||
154 | |||
155 | /* A request defining Scatter-Gather List ending with NULL xfer. */ | ||
156 | struct pl330_req { | ||
157 | enum pl330_reqtype rqtype; | ||
158 | /* Index of peripheral for the xfer. */ | ||
159 | unsigned peri:5; | ||
160 | /* Unique token for this xfer, set by the client. */ | ||
161 | void *token; | ||
162 | /* Callback to be called after xfer. */ | ||
163 | void (*xfer_cb)(void *token, enum pl330_op_err err); | ||
164 | /* If NULL, req will be done at last set parameters. */ | ||
165 | struct pl330_reqcfg *cfg; | ||
166 | /* Pointer to first xfer in the request. */ | ||
167 | struct pl330_xfer *x; | ||
168 | }; | ||
169 | |||
170 | /* | ||
171 | * To know the status of the channel and DMAC, the client | ||
172 | * provides a pointer to this structure. The PL330 core | ||
173 | * fills it with current information. | ||
174 | */ | ||
175 | struct pl330_chanstatus { | ||
176 | /* | ||
177 | * If the DMAC engine halted due to some error, | ||
178 | * the client should remove-add DMAC. | ||
179 | */ | ||
180 | bool dmac_halted; | ||
181 | /* | ||
182 | * If channel is halted due to some error, | ||
183 | * the client should ABORT/FLUSH and START the channel. | ||
184 | */ | ||
185 | bool faulting; | ||
186 | /* Location of last load */ | ||
187 | u32 src_addr; | ||
188 | /* Location of last store */ | ||
189 | u32 dst_addr; | ||
190 | /* | ||
191 | * Pointer to the currently active req, NULL if channel is | ||
192 | * inactive, even though the requests may be present. | ||
193 | */ | ||
194 | struct pl330_req *top_req; | ||
195 | /* Pointer to req waiting second in the queue if any. */ | ||
196 | struct pl330_req *wait_req; | ||
197 | }; | ||
198 | |||
199 | enum pl330_chan_op { | ||
200 | /* Start the channel */ | ||
201 | PL330_OP_START, | ||
202 | /* Abort the active xfer */ | ||
203 | PL330_OP_ABORT, | ||
204 | /* Stop xfer and flush queue */ | ||
205 | PL330_OP_FLUSH, | ||
206 | }; | ||
207 | |||
208 | extern int pl330_add(struct pl330_info *); | ||
209 | extern void pl330_del(struct pl330_info *pi); | ||
210 | extern int pl330_update(const struct pl330_info *pi); | ||
211 | extern void pl330_release_channel(void *ch_id); | ||
212 | extern void *pl330_request_channel(const struct pl330_info *pi); | ||
213 | extern int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus); | ||
214 | extern int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op); | ||
215 | extern int pl330_submit_req(void *ch_id, struct pl330_req *r); | ||
216 | |||
217 | #endif /* __PL330_CORE_H */ | ||
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 4320b2096789..698479f1e197 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c | |||
@@ -437,7 +437,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) | |||
437 | 437 | ||
438 | /* DMA slave channel configuration */ | 438 | /* DMA slave channel configuration */ |
439 | atslave->dma_dev = &at_hdmac_device.dev; | 439 | atslave->dma_dev = &at_hdmac_device.dev; |
440 | atslave->reg_width = AT_DMA_SLAVE_WIDTH_32BIT; | ||
441 | atslave->cfg = ATC_FIFOCFG_HALFFIFO | 440 | atslave->cfg = ATC_FIFOCFG_HALFFIFO |
442 | | ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW; | 441 | | ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW; |
443 | atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16; | 442 | atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16; |
diff --git a/arch/arm/mach-at91/include/mach/at_hdmac.h b/arch/arm/mach-at91/include/mach/at_hdmac.h index 187cb58345c0..fff48d1a0f4e 100644 --- a/arch/arm/mach-at91/include/mach/at_hdmac.h +++ b/arch/arm/mach-at91/include/mach/at_hdmac.h | |||
@@ -24,18 +24,6 @@ struct at_dma_platform_data { | |||
24 | }; | 24 | }; |
25 | 25 | ||
26 | /** | 26 | /** |
27 | * enum at_dma_slave_width - DMA slave register access width. | ||
28 | * @AT_DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses | ||
29 | * @AT_DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses | ||
30 | * @AT_DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses | ||
31 | */ | ||
32 | enum at_dma_slave_width { | ||
33 | AT_DMA_SLAVE_WIDTH_8BIT = 0, | ||
34 | AT_DMA_SLAVE_WIDTH_16BIT, | ||
35 | AT_DMA_SLAVE_WIDTH_32BIT, | ||
36 | }; | ||
37 | |||
38 | /** | ||
39 | * struct at_dma_slave - Controller-specific information about a slave | 27 | * struct at_dma_slave - Controller-specific information about a slave |
40 | * @dma_dev: required DMA master device | 28 | * @dma_dev: required DMA master device |
41 | * @tx_reg: physical address of data register used for | 29 | * @tx_reg: physical address of data register used for |
@@ -48,9 +36,6 @@ enum at_dma_slave_width { | |||
48 | */ | 36 | */ |
49 | struct at_dma_slave { | 37 | struct at_dma_slave { |
50 | struct device *dma_dev; | 38 | struct device *dma_dev; |
51 | dma_addr_t tx_reg; | ||
52 | dma_addr_t rx_reg; | ||
53 | enum at_dma_slave_width reg_width; | ||
54 | u32 cfg; | 39 | u32 cfg; |
55 | u32 ctrla; | 40 | u32 ctrla; |
56 | }; | 41 | }; |
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index 52359f80c42d..7561eca131b0 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig | |||
@@ -1,6 +1,3 @@ | |||
1 | config IMX_HAVE_DMA_V1 | ||
2 | bool | ||
3 | |||
4 | config HAVE_IMX_GPC | 1 | config HAVE_IMX_GPC |
5 | bool | 2 | bool |
6 | 3 | ||
@@ -38,7 +35,6 @@ config SOC_IMX1 | |||
38 | bool | 35 | bool |
39 | select ARCH_MX1 | 36 | select ARCH_MX1 |
40 | select CPU_ARM920T | 37 | select CPU_ARM920T |
41 | select IMX_HAVE_DMA_V1 | ||
42 | select IMX_HAVE_IOMUX_V1 | 38 | select IMX_HAVE_IOMUX_V1 |
43 | select MXC_AVIC | 39 | select MXC_AVIC |
44 | 40 | ||
@@ -46,7 +42,6 @@ config SOC_IMX21 | |||
46 | bool | 42 | bool |
47 | select MACH_MX21 | 43 | select MACH_MX21 |
48 | select CPU_ARM926T | 44 | select CPU_ARM926T |
49 | select IMX_HAVE_DMA_V1 | ||
50 | select IMX_HAVE_IOMUX_V1 | 45 | select IMX_HAVE_IOMUX_V1 |
51 | select MXC_AVIC | 46 | select MXC_AVIC |
52 | 47 | ||
@@ -61,7 +56,6 @@ config SOC_IMX27 | |||
61 | bool | 56 | bool |
62 | select MACH_MX27 | 57 | select MACH_MX27 |
63 | select CPU_ARM926T | 58 | select CPU_ARM926T |
64 | select IMX_HAVE_DMA_V1 | ||
65 | select IMX_HAVE_IOMUX_V1 | 59 | select IMX_HAVE_IOMUX_V1 |
66 | select MXC_AVIC | 60 | select MXC_AVIC |
67 | 61 | ||
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index 35fc450fa263..ab939c5046c3 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile | |||
@@ -1,5 +1,3 @@ | |||
1 | obj-$(CONFIG_IMX_HAVE_DMA_V1) += dma-v1.o | ||
2 | |||
3 | obj-$(CONFIG_SOC_IMX1) += clock-imx1.o mm-imx1.o | 1 | obj-$(CONFIG_SOC_IMX1) += clock-imx1.o mm-imx1.o |
4 | obj-$(CONFIG_SOC_IMX21) += clock-imx21.o mm-imx21.o | 2 | obj-$(CONFIG_SOC_IMX21) += clock-imx21.o mm-imx21.o |
5 | 3 | ||
diff --git a/arch/arm/mach-imx/dma-v1.c b/arch/arm/mach-imx/dma-v1.c deleted file mode 100644 index 3189a6004cf9..000000000000 --- a/arch/arm/mach-imx/dma-v1.c +++ /dev/null | |||
@@ -1,845 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/plat-mxc/dma-v1.c | ||
3 | * | ||
4 | * i.MX DMA registration and IRQ dispatching | ||
5 | * | ||
6 | * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz> | ||
7 | * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de> | ||
8 | * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version 2 | ||
13 | * of the License, or (at your option) any later version. | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, | ||
22 | * MA 02110-1301, USA. | ||
23 | */ | ||
24 | |||
25 | #include <linux/module.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/err.h> | ||
30 | #include <linux/errno.h> | ||
31 | #include <linux/clk.h> | ||
32 | #include <linux/scatterlist.h> | ||
33 | #include <linux/io.h> | ||
34 | |||
35 | #include <asm/irq.h> | ||
36 | #include <mach/hardware.h> | ||
37 | #include <mach/dma-v1.h> | ||
38 | |||
39 | #define DMA_DCR 0x00 /* Control Register */ | ||
40 | #define DMA_DISR 0x04 /* Interrupt status Register */ | ||
41 | #define DMA_DIMR 0x08 /* Interrupt mask Register */ | ||
42 | #define DMA_DBTOSR 0x0c /* Burst timeout status Register */ | ||
43 | #define DMA_DRTOSR 0x10 /* Request timeout Register */ | ||
44 | #define DMA_DSESR 0x14 /* Transfer Error Status Register */ | ||
45 | #define DMA_DBOSR 0x18 /* Buffer overflow status Register */ | ||
46 | #define DMA_DBTOCR 0x1c /* Burst timeout control Register */ | ||
47 | #define DMA_WSRA 0x40 /* W-Size Register A */ | ||
48 | #define DMA_XSRA 0x44 /* X-Size Register A */ | ||
49 | #define DMA_YSRA 0x48 /* Y-Size Register A */ | ||
50 | #define DMA_WSRB 0x4c /* W-Size Register B */ | ||
51 | #define DMA_XSRB 0x50 /* X-Size Register B */ | ||
52 | #define DMA_YSRB 0x54 /* Y-Size Register B */ | ||
53 | #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ | ||
54 | #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ | ||
55 | #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ | ||
56 | #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ | ||
57 | #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */ | ||
58 | #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ | ||
59 | #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ | ||
60 | #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ | ||
61 | #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ | ||
62 | |||
63 | #define DCR_DRST (1<<1) | ||
64 | #define DCR_DEN (1<<0) | ||
65 | #define DBTOCR_EN (1<<15) | ||
66 | #define DBTOCR_CNT(x) ((x) & 0x7fff) | ||
67 | #define CNTR_CNT(x) ((x) & 0xffffff) | ||
68 | #define CCR_ACRPT (1<<14) | ||
69 | #define CCR_DMOD_LINEAR (0x0 << 12) | ||
70 | #define CCR_DMOD_2D (0x1 << 12) | ||
71 | #define CCR_DMOD_FIFO (0x2 << 12) | ||
72 | #define CCR_DMOD_EOBFIFO (0x3 << 12) | ||
73 | #define CCR_SMOD_LINEAR (0x0 << 10) | ||
74 | #define CCR_SMOD_2D (0x1 << 10) | ||
75 | #define CCR_SMOD_FIFO (0x2 << 10) | ||
76 | #define CCR_SMOD_EOBFIFO (0x3 << 10) | ||
77 | #define CCR_MDIR_DEC (1<<9) | ||
78 | #define CCR_MSEL_B (1<<8) | ||
79 | #define CCR_DSIZ_32 (0x0 << 6) | ||
80 | #define CCR_DSIZ_8 (0x1 << 6) | ||
81 | #define CCR_DSIZ_16 (0x2 << 6) | ||
82 | #define CCR_SSIZ_32 (0x0 << 4) | ||
83 | #define CCR_SSIZ_8 (0x1 << 4) | ||
84 | #define CCR_SSIZ_16 (0x2 << 4) | ||
85 | #define CCR_REN (1<<3) | ||
86 | #define CCR_RPT (1<<2) | ||
87 | #define CCR_FRC (1<<1) | ||
88 | #define CCR_CEN (1<<0) | ||
89 | #define RTOR_EN (1<<15) | ||
90 | #define RTOR_CLK (1<<14) | ||
91 | #define RTOR_PSC (1<<13) | ||
92 | |||
93 | /* | ||
94 | * struct imx_dma_channel - i.MX specific DMA extension | ||
95 | * @name: name specified by DMA client | ||
96 | * @irq_handler: client callback for end of transfer | ||
97 | * @err_handler: client callback for error condition | ||
98 | * @data: clients context data for callbacks | ||
99 | * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE | ||
100 | * @sg: pointer to the actual read/written chunk for scatter-gather emulation | ||
101 | * @resbytes: total residual number of bytes to transfer | ||
102 | * (it can be lower or same as sum of SG mapped chunk sizes) | ||
103 | * @sgcount: number of chunks to be read/written | ||
104 | * | ||
105 | * Structure is used for IMX DMA processing. It would be probably good | ||
106 | * @struct dma_struct in the future for external interfacing and use | ||
107 | * @struct imx_dma_channel only as extension to it. | ||
108 | */ | ||
109 | |||
110 | struct imx_dma_channel { | ||
111 | const char *name; | ||
112 | void (*irq_handler) (int, void *); | ||
113 | void (*err_handler) (int, void *, int errcode); | ||
114 | void (*prog_handler) (int, void *, struct scatterlist *); | ||
115 | void *data; | ||
116 | unsigned int dma_mode; | ||
117 | struct scatterlist *sg; | ||
118 | unsigned int resbytes; | ||
119 | int dma_num; | ||
120 | |||
121 | int in_use; | ||
122 | |||
123 | u32 ccr_from_device; | ||
124 | u32 ccr_to_device; | ||
125 | |||
126 | struct timer_list watchdog; | ||
127 | |||
128 | int hw_chaining; | ||
129 | }; | ||
130 | |||
131 | static void __iomem *imx_dmav1_baseaddr; | ||
132 | |||
133 | static void imx_dmav1_writel(unsigned val, unsigned offset) | ||
134 | { | ||
135 | __raw_writel(val, imx_dmav1_baseaddr + offset); | ||
136 | } | ||
137 | |||
138 | static unsigned imx_dmav1_readl(unsigned offset) | ||
139 | { | ||
140 | return __raw_readl(imx_dmav1_baseaddr + offset); | ||
141 | } | ||
142 | |||
143 | static struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS]; | ||
144 | |||
145 | static struct clk *dma_clk; | ||
146 | |||
147 | static int imx_dma_hw_chain(struct imx_dma_channel *imxdma) | ||
148 | { | ||
149 | if (cpu_is_mx27()) | ||
150 | return imxdma->hw_chaining; | ||
151 | else | ||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * imx_dma_sg_next - prepare next chunk for scatter-gather DMA emulation | ||
157 | */ | ||
158 | static inline int imx_dma_sg_next(int channel, struct scatterlist *sg) | ||
159 | { | ||
160 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | ||
161 | unsigned long now; | ||
162 | |||
163 | if (!imxdma->name) { | ||
164 | printk(KERN_CRIT "%s: called for not allocated channel %d\n", | ||
165 | __func__, channel); | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | now = min(imxdma->resbytes, sg->length); | ||
170 | if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP) | ||
171 | imxdma->resbytes -= now; | ||
172 | |||
173 | if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ) | ||
174 | imx_dmav1_writel(sg->dma_address, DMA_DAR(channel)); | ||
175 | else | ||
176 | imx_dmav1_writel(sg->dma_address, DMA_SAR(channel)); | ||
177 | |||
178 | imx_dmav1_writel(now, DMA_CNTR(channel)); | ||
179 | |||
180 | pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, " | ||
181 | "size 0x%08x\n", channel, | ||
182 | imx_dmav1_readl(DMA_DAR(channel)), | ||
183 | imx_dmav1_readl(DMA_SAR(channel)), | ||
184 | imx_dmav1_readl(DMA_CNTR(channel))); | ||
185 | |||
186 | return now; | ||
187 | } | ||
188 | |||
189 | /** | ||
190 | * imx_dma_setup_single - setup i.MX DMA channel for linear memory to/from | ||
191 | * device transfer | ||
192 | * | ||
193 | * @channel: i.MX DMA channel number | ||
194 | * @dma_address: the DMA/physical memory address of the linear data block | ||
195 | * to transfer | ||
196 | * @dma_length: length of the data block in bytes | ||
197 | * @dev_addr: physical device port address | ||
198 | * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory | ||
199 | * or %DMA_MODE_WRITE from memory to the device | ||
200 | * | ||
201 | * Return value: if incorrect parameters are provided -%EINVAL. | ||
202 | * Zero indicates success. | ||
203 | */ | ||
204 | int | ||
205 | imx_dma_setup_single(int channel, dma_addr_t dma_address, | ||
206 | unsigned int dma_length, unsigned int dev_addr, | ||
207 | unsigned int dmamode) | ||
208 | { | ||
209 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | ||
210 | |||
211 | imxdma->sg = NULL; | ||
212 | imxdma->dma_mode = dmamode; | ||
213 | |||
214 | if (!dma_address) { | ||
215 | printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n", | ||
216 | channel); | ||
217 | return -EINVAL; | ||
218 | } | ||
219 | |||
220 | if (!dma_length) { | ||
221 | printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n", | ||
222 | channel); | ||
223 | return -EINVAL; | ||
224 | } | ||
225 | |||
226 | if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { | ||
227 | pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " | ||
228 | "dev_addr=0x%08x for read\n", | ||
229 | channel, __func__, (unsigned int)dma_address, | ||
230 | dma_length, dev_addr); | ||
231 | |||
232 | imx_dmav1_writel(dev_addr, DMA_SAR(channel)); | ||
233 | imx_dmav1_writel(dma_address, DMA_DAR(channel)); | ||
234 | imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel)); | ||
235 | } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { | ||
236 | pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " | ||
237 | "dev_addr=0x%08x for write\n", | ||
238 | channel, __func__, (unsigned int)dma_address, | ||
239 | dma_length, dev_addr); | ||
240 | |||
241 | imx_dmav1_writel(dma_address, DMA_SAR(channel)); | ||
242 | imx_dmav1_writel(dev_addr, DMA_DAR(channel)); | ||
243 | imx_dmav1_writel(imxdma->ccr_to_device, | ||
244 | DMA_CCR(channel)); | ||
245 | } else { | ||
246 | printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n", | ||
247 | channel); | ||
248 | return -EINVAL; | ||
249 | } | ||
250 | |||
251 | imx_dmav1_writel(dma_length, DMA_CNTR(channel)); | ||
252 | |||
253 | return 0; | ||
254 | } | ||
255 | EXPORT_SYMBOL(imx_dma_setup_single); | ||
256 | |||
257 | /** | ||
258 | * imx_dma_setup_sg - setup i.MX DMA channel SG list to/from device transfer | ||
259 | * @channel: i.MX DMA channel number | ||
260 | * @sg: pointer to the scatter-gather list/vector | ||
261 | * @sgcount: scatter-gather list hungs count | ||
262 | * @dma_length: total length of the transfer request in bytes | ||
263 | * @dev_addr: physical device port address | ||
264 | * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory | ||
265 | * or %DMA_MODE_WRITE from memory to the device | ||
266 | * | ||
267 | * The function sets up DMA channel state and registers to be ready for | ||
268 | * transfer specified by provided parameters. The scatter-gather emulation | ||
269 | * is set up according to the parameters. | ||
270 | * | ||
271 | * The full preparation of the transfer requires setup of more register | ||
272 | * by the caller before imx_dma_enable() can be called. | ||
273 | * | ||
274 | * %BLR(channel) holds transfer burst length in bytes, 0 means 64 bytes | ||
275 | * | ||
276 | * %RSSR(channel) has to be set to the DMA request line source %DMA_REQ_xxx | ||
277 | * | ||
278 | * %CCR(channel) has to specify transfer parameters, the next settings is | ||
279 | * typical for linear or simple scatter-gather transfers if %DMA_MODE_READ is | ||
280 | * specified | ||
281 | * | ||
282 | * %CCR_DMOD_LINEAR | %CCR_DSIZ_32 | %CCR_SMOD_FIFO | %CCR_SSIZ_x | ||
283 | * | ||
284 | * The typical setup for %DMA_MODE_WRITE is specified by next options | ||
285 | * combination | ||
286 | * | ||
287 | * %CCR_SMOD_LINEAR | %CCR_SSIZ_32 | %CCR_DMOD_FIFO | %CCR_DSIZ_x | ||
288 | * | ||
289 | * Be careful here and do not mistakenly mix source and target device | ||
290 | * port sizes constants, they are really different: | ||
291 | * %CCR_SSIZ_8, %CCR_SSIZ_16, %CCR_SSIZ_32, | ||
292 | * %CCR_DSIZ_8, %CCR_DSIZ_16, %CCR_DSIZ_32 | ||
293 | * | ||
294 | * Return value: if incorrect parameters are provided -%EINVAL. | ||
295 | * Zero indicates success. | ||
296 | */ | ||
297 | int | ||
298 | imx_dma_setup_sg(int channel, | ||
299 | struct scatterlist *sg, unsigned int sgcount, | ||
300 | unsigned int dma_length, unsigned int dev_addr, | ||
301 | unsigned int dmamode) | ||
302 | { | ||
303 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | ||
304 | |||
305 | if (imxdma->in_use) | ||
306 | return -EBUSY; | ||
307 | |||
308 | imxdma->sg = sg; | ||
309 | imxdma->dma_mode = dmamode; | ||
310 | imxdma->resbytes = dma_length; | ||
311 | |||
312 | if (!sg || !sgcount) { | ||
313 | printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n", | ||
314 | channel); | ||
315 | return -EINVAL; | ||
316 | } | ||
317 | |||
318 | if (!sg->length) { | ||
319 | printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n", | ||
320 | channel); | ||
321 | return -EINVAL; | ||
322 | } | ||
323 | |||
324 | if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { | ||
325 | pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " | ||
326 | "dev_addr=0x%08x for read\n", | ||
327 | channel, __func__, sg, sgcount, dma_length, dev_addr); | ||
328 | |||
329 | imx_dmav1_writel(dev_addr, DMA_SAR(channel)); | ||
330 | imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel)); | ||
331 | } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { | ||
332 | pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " | ||
333 | "dev_addr=0x%08x for write\n", | ||
334 | channel, __func__, sg, sgcount, dma_length, dev_addr); | ||
335 | |||
336 | imx_dmav1_writel(dev_addr, DMA_DAR(channel)); | ||
337 | imx_dmav1_writel(imxdma->ccr_to_device, DMA_CCR(channel)); | ||
338 | } else { | ||
339 | printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n", | ||
340 | channel); | ||
341 | return -EINVAL; | ||
342 | } | ||
343 | |||
344 | imx_dma_sg_next(channel, sg); | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | EXPORT_SYMBOL(imx_dma_setup_sg); | ||
349 | |||
350 | int | ||
351 | imx_dma_config_channel(int channel, unsigned int config_port, | ||
352 | unsigned int config_mem, unsigned int dmareq, int hw_chaining) | ||
353 | { | ||
354 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | ||
355 | u32 dreq = 0; | ||
356 | |||
357 | imxdma->hw_chaining = 0; | ||
358 | |||
359 | if (hw_chaining) { | ||
360 | imxdma->hw_chaining = 1; | ||
361 | if (!imx_dma_hw_chain(imxdma)) | ||
362 | return -EINVAL; | ||
363 | } | ||
364 | |||
365 | if (dmareq) | ||
366 | dreq = CCR_REN; | ||
367 | |||
368 | imxdma->ccr_from_device = config_port | (config_mem << 2) | dreq; | ||
369 | imxdma->ccr_to_device = config_mem | (config_port << 2) | dreq; | ||
370 | |||
371 | imx_dmav1_writel(dmareq, DMA_RSSR(channel)); | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | EXPORT_SYMBOL(imx_dma_config_channel); | ||
376 | |||
377 | void imx_dma_config_burstlen(int channel, unsigned int burstlen) | ||
378 | { | ||
379 | imx_dmav1_writel(burstlen, DMA_BLR(channel)); | ||
380 | } | ||
381 | EXPORT_SYMBOL(imx_dma_config_burstlen); | ||
382 | |||
383 | /** | ||
384 | * imx_dma_setup_handlers - setup i.MX DMA channel end and error notification | ||
385 | * handlers | ||
386 | * @channel: i.MX DMA channel number | ||
387 | * @irq_handler: the pointer to the function called if the transfer | ||
388 | * ends successfully | ||
389 | * @err_handler: the pointer to the function called if the premature | ||
390 | * end caused by error occurs | ||
391 | * @data: user specified value to be passed to the handlers | ||
392 | */ | ||
393 | int | ||
394 | imx_dma_setup_handlers(int channel, | ||
395 | void (*irq_handler) (int, void *), | ||
396 | void (*err_handler) (int, void *, int), | ||
397 | void *data) | ||
398 | { | ||
399 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | ||
400 | unsigned long flags; | ||
401 | |||
402 | if (!imxdma->name) { | ||
403 | printk(KERN_CRIT "%s: called for not allocated channel %d\n", | ||
404 | __func__, channel); | ||
405 | return -ENODEV; | ||
406 | } | ||
407 | |||
408 | local_irq_save(flags); | ||
409 | imx_dmav1_writel(1 << channel, DMA_DISR); | ||
410 | imxdma->irq_handler = irq_handler; | ||
411 | imxdma->err_handler = err_handler; | ||
412 | imxdma->data = data; | ||
413 | local_irq_restore(flags); | ||
414 | return 0; | ||
415 | } | ||
416 | EXPORT_SYMBOL(imx_dma_setup_handlers); | ||
417 | |||
418 | /** | ||
419 | * imx_dma_setup_progression_handler - setup i.MX DMA channel progression | ||
420 | * handlers | ||
421 | * @channel: i.MX DMA channel number | ||
422 | * @prog_handler: the pointer to the function called if the transfer progresses | ||
423 | */ | ||
424 | int | ||
425 | imx_dma_setup_progression_handler(int channel, | ||
426 | void (*prog_handler) (int, void*, struct scatterlist*)) | ||
427 | { | ||
428 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | ||
429 | unsigned long flags; | ||
430 | |||
431 | if (!imxdma->name) { | ||
432 | printk(KERN_CRIT "%s: called for not allocated channel %d\n", | ||
433 | __func__, channel); | ||
434 | return -ENODEV; | ||
435 | } | ||
436 | |||
437 | local_irq_save(flags); | ||
438 | imxdma->prog_handler = prog_handler; | ||
439 | local_irq_restore(flags); | ||
440 | return 0; | ||
441 | } | ||
442 | EXPORT_SYMBOL(imx_dma_setup_progression_handler); | ||
443 | |||
444 | /** | ||
445 | * imx_dma_enable - function to start i.MX DMA channel operation | ||
446 | * @channel: i.MX DMA channel number | ||
447 | * | ||
448 | * The channel has to be allocated by driver through imx_dma_request() | ||
449 | * or imx_dma_request_by_prio() function. | ||
450 | * The transfer parameters has to be set to the channel registers through | ||
451 | * call of the imx_dma_setup_single() or imx_dma_setup_sg() function | ||
452 | * and registers %BLR(channel), %RSSR(channel) and %CCR(channel) has to | ||
453 | * be set prior this function call by the channel user. | ||
454 | */ | ||
455 | void imx_dma_enable(int channel) | ||
456 | { | ||
457 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | ||
458 | unsigned long flags; | ||
459 | |||
460 | pr_debug("imxdma%d: imx_dma_enable\n", channel); | ||
461 | |||
462 | if (!imxdma->name) { | ||
463 | printk(KERN_CRIT "%s: called for not allocated channel %d\n", | ||
464 | __func__, channel); | ||
465 | return; | ||
466 | } | ||
467 | |||
468 | if (imxdma->in_use) | ||
469 | return; | ||
470 | |||
471 | local_irq_save(flags); | ||
472 | |||
473 | imx_dmav1_writel(1 << channel, DMA_DISR); | ||
474 | imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR); | ||
475 | imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN | | ||
476 | CCR_ACRPT, DMA_CCR(channel)); | ||
477 | |||
478 | if ((cpu_is_mx21() || cpu_is_mx27()) && | ||
479 | imxdma->sg && imx_dma_hw_chain(imxdma)) { | ||
480 | imxdma->sg = sg_next(imxdma->sg); | ||
481 | if (imxdma->sg) { | ||
482 | u32 tmp; | ||
483 | imx_dma_sg_next(channel, imxdma->sg); | ||
484 | tmp = imx_dmav1_readl(DMA_CCR(channel)); | ||
485 | imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, | ||
486 | DMA_CCR(channel)); | ||
487 | } | ||
488 | } | ||
489 | imxdma->in_use = 1; | ||
490 | |||
491 | local_irq_restore(flags); | ||
492 | } | ||
493 | EXPORT_SYMBOL(imx_dma_enable); | ||
494 | |||
495 | /** | ||
496 | * imx_dma_disable - stop, finish i.MX DMA channel operatin | ||
497 | * @channel: i.MX DMA channel number | ||
498 | */ | ||
499 | void imx_dma_disable(int channel) | ||
500 | { | ||
501 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | ||
502 | unsigned long flags; | ||
503 | |||
504 | pr_debug("imxdma%d: imx_dma_disable\n", channel); | ||
505 | |||
506 | if (imx_dma_hw_chain(imxdma)) | ||
507 | del_timer(&imxdma->watchdog); | ||
508 | |||
509 | local_irq_save(flags); | ||
510 | imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR); | ||
511 | imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN, | ||
512 | DMA_CCR(channel)); | ||
513 | imx_dmav1_writel(1 << channel, DMA_DISR); | ||
514 | imxdma->in_use = 0; | ||
515 | local_irq_restore(flags); | ||
516 | } | ||
517 | EXPORT_SYMBOL(imx_dma_disable); | ||
518 | |||
519 | static void imx_dma_watchdog(unsigned long chno) | ||
520 | { | ||
521 | struct imx_dma_channel *imxdma = &imx_dma_channels[chno]; | ||
522 | |||
523 | imx_dmav1_writel(0, DMA_CCR(chno)); | ||
524 | imxdma->in_use = 0; | ||
525 | imxdma->sg = NULL; | ||
526 | |||
527 | if (imxdma->err_handler) | ||
528 | imxdma->err_handler(chno, imxdma->data, IMX_DMA_ERR_TIMEOUT); | ||
529 | } | ||
530 | |||
531 | static irqreturn_t dma_err_handler(int irq, void *dev_id) | ||
532 | { | ||
533 | int i, disr; | ||
534 | struct imx_dma_channel *imxdma; | ||
535 | unsigned int err_mask; | ||
536 | int errcode; | ||
537 | |||
538 | disr = imx_dmav1_readl(DMA_DISR); | ||
539 | |||
540 | err_mask = imx_dmav1_readl(DMA_DBTOSR) | | ||
541 | imx_dmav1_readl(DMA_DRTOSR) | | ||
542 | imx_dmav1_readl(DMA_DSESR) | | ||
543 | imx_dmav1_readl(DMA_DBOSR); | ||
544 | |||
545 | if (!err_mask) | ||
546 | return IRQ_HANDLED; | ||
547 | |||
548 | imx_dmav1_writel(disr & err_mask, DMA_DISR); | ||
549 | |||
550 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | ||
551 | if (!(err_mask & (1 << i))) | ||
552 | continue; | ||
553 | imxdma = &imx_dma_channels[i]; | ||
554 | errcode = 0; | ||
555 | |||
556 | if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) { | ||
557 | imx_dmav1_writel(1 << i, DMA_DBTOSR); | ||
558 | errcode |= IMX_DMA_ERR_BURST; | ||
559 | } | ||
560 | if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) { | ||
561 | imx_dmav1_writel(1 << i, DMA_DRTOSR); | ||
562 | errcode |= IMX_DMA_ERR_REQUEST; | ||
563 | } | ||
564 | if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) { | ||
565 | imx_dmav1_writel(1 << i, DMA_DSESR); | ||
566 | errcode |= IMX_DMA_ERR_TRANSFER; | ||
567 | } | ||
568 | if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) { | ||
569 | imx_dmav1_writel(1 << i, DMA_DBOSR); | ||
570 | errcode |= IMX_DMA_ERR_BUFFER; | ||
571 | } | ||
572 | if (imxdma->name && imxdma->err_handler) { | ||
573 | imxdma->err_handler(i, imxdma->data, errcode); | ||
574 | continue; | ||
575 | } | ||
576 | |||
577 | imx_dma_channels[i].sg = NULL; | ||
578 | |||
579 | printk(KERN_WARNING | ||
580 | "DMA timeout on channel %d (%s) -%s%s%s%s\n", | ||
581 | i, imxdma->name, | ||
582 | errcode & IMX_DMA_ERR_BURST ? " burst" : "", | ||
583 | errcode & IMX_DMA_ERR_REQUEST ? " request" : "", | ||
584 | errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", | ||
585 | errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); | ||
586 | } | ||
587 | return IRQ_HANDLED; | ||
588 | } | ||
589 | |||
590 | static void dma_irq_handle_channel(int chno) | ||
591 | { | ||
592 | struct imx_dma_channel *imxdma = &imx_dma_channels[chno]; | ||
593 | |||
594 | if (!imxdma->name) { | ||
595 | /* | ||
596 | * IRQ for an unregistered DMA channel: | ||
597 | * let's clear the interrupts and disable it. | ||
598 | */ | ||
599 | printk(KERN_WARNING | ||
600 | "spurious IRQ for DMA channel %d\n", chno); | ||
601 | return; | ||
602 | } | ||
603 | |||
604 | if (imxdma->sg) { | ||
605 | u32 tmp; | ||
606 | struct scatterlist *current_sg = imxdma->sg; | ||
607 | imxdma->sg = sg_next(imxdma->sg); | ||
608 | |||
609 | if (imxdma->sg) { | ||
610 | imx_dma_sg_next(chno, imxdma->sg); | ||
611 | |||
612 | tmp = imx_dmav1_readl(DMA_CCR(chno)); | ||
613 | |||
614 | if (imx_dma_hw_chain(imxdma)) { | ||
615 | /* FIXME: The timeout should probably be | ||
616 | * configurable | ||
617 | */ | ||
618 | mod_timer(&imxdma->watchdog, | ||
619 | jiffies + msecs_to_jiffies(500)); | ||
620 | |||
621 | tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; | ||
622 | imx_dmav1_writel(tmp, DMA_CCR(chno)); | ||
623 | } else { | ||
624 | imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno)); | ||
625 | tmp |= CCR_CEN; | ||
626 | } | ||
627 | |||
628 | imx_dmav1_writel(tmp, DMA_CCR(chno)); | ||
629 | |||
630 | if (imxdma->prog_handler) | ||
631 | imxdma->prog_handler(chno, imxdma->data, | ||
632 | current_sg); | ||
633 | |||
634 | return; | ||
635 | } | ||
636 | |||
637 | if (imx_dma_hw_chain(imxdma)) { | ||
638 | del_timer(&imxdma->watchdog); | ||
639 | return; | ||
640 | } | ||
641 | } | ||
642 | |||
643 | imx_dmav1_writel(0, DMA_CCR(chno)); | ||
644 | imxdma->in_use = 0; | ||
645 | if (imxdma->irq_handler) | ||
646 | imxdma->irq_handler(chno, imxdma->data); | ||
647 | } | ||
648 | |||
649 | static irqreturn_t dma_irq_handler(int irq, void *dev_id) | ||
650 | { | ||
651 | int i, disr; | ||
652 | |||
653 | if (cpu_is_mx21() || cpu_is_mx27()) | ||
654 | dma_err_handler(irq, dev_id); | ||
655 | |||
656 | disr = imx_dmav1_readl(DMA_DISR); | ||
657 | |||
658 | pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n", | ||
659 | disr); | ||
660 | |||
661 | imx_dmav1_writel(disr, DMA_DISR); | ||
662 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | ||
663 | if (disr & (1 << i)) | ||
664 | dma_irq_handle_channel(i); | ||
665 | } | ||
666 | |||
667 | return IRQ_HANDLED; | ||
668 | } | ||
669 | |||
670 | /** | ||
671 | * imx_dma_request - request/allocate specified channel number | ||
672 | * @channel: i.MX DMA channel number | ||
673 | * @name: the driver/caller own non-%NULL identification | ||
674 | */ | ||
675 | int imx_dma_request(int channel, const char *name) | ||
676 | { | ||
677 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | ||
678 | unsigned long flags; | ||
679 | int ret = 0; | ||
680 | |||
681 | /* basic sanity checks */ | ||
682 | if (!name) | ||
683 | return -EINVAL; | ||
684 | |||
685 | if (channel >= IMX_DMA_CHANNELS) { | ||
686 | printk(KERN_CRIT "%s: called for non-existed channel %d\n", | ||
687 | __func__, channel); | ||
688 | return -EINVAL; | ||
689 | } | ||
690 | |||
691 | local_irq_save(flags); | ||
692 | if (imxdma->name) { | ||
693 | local_irq_restore(flags); | ||
694 | return -EBUSY; | ||
695 | } | ||
696 | memset(imxdma, 0, sizeof(*imxdma)); | ||
697 | imxdma->name = name; | ||
698 | local_irq_restore(flags); /* request_irq() can block */ | ||
699 | |||
700 | if (cpu_is_mx21() || cpu_is_mx27()) { | ||
701 | ret = request_irq(MX2x_INT_DMACH0 + channel, | ||
702 | dma_irq_handler, 0, "DMA", NULL); | ||
703 | if (ret) { | ||
704 | imxdma->name = NULL; | ||
705 | pr_crit("Can't register IRQ %d for DMA channel %d\n", | ||
706 | MX2x_INT_DMACH0 + channel, channel); | ||
707 | return ret; | ||
708 | } | ||
709 | init_timer(&imxdma->watchdog); | ||
710 | imxdma->watchdog.function = &imx_dma_watchdog; | ||
711 | imxdma->watchdog.data = channel; | ||
712 | } | ||
713 | |||
714 | return ret; | ||
715 | } | ||
716 | EXPORT_SYMBOL(imx_dma_request); | ||
717 | |||
718 | /** | ||
719 | * imx_dma_free - release previously acquired channel | ||
720 | * @channel: i.MX DMA channel number | ||
721 | */ | ||
722 | void imx_dma_free(int channel) | ||
723 | { | ||
724 | unsigned long flags; | ||
725 | struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; | ||
726 | |||
727 | if (!imxdma->name) { | ||
728 | printk(KERN_CRIT | ||
729 | "%s: trying to free free channel %d\n", | ||
730 | __func__, channel); | ||
731 | return; | ||
732 | } | ||
733 | |||
734 | local_irq_save(flags); | ||
735 | /* Disable interrupts */ | ||
736 | imx_dma_disable(channel); | ||
737 | imxdma->name = NULL; | ||
738 | |||
739 | if (cpu_is_mx21() || cpu_is_mx27()) | ||
740 | free_irq(MX2x_INT_DMACH0 + channel, NULL); | ||
741 | |||
742 | local_irq_restore(flags); | ||
743 | } | ||
744 | EXPORT_SYMBOL(imx_dma_free); | ||
745 | |||
746 | /** | ||
747 | * imx_dma_request_by_prio - find and request some of free channels best | ||
748 | * suiting requested priority | ||
749 | * @channel: i.MX DMA channel number | ||
750 | * @name: the driver/caller own non-%NULL identification | ||
751 | * | ||
752 | * This function tries to find a free channel in the specified priority group | ||
753 | * if the priority cannot be achieved it tries to look for free channel | ||
754 | * in the higher and then even lower priority groups. | ||
755 | * | ||
756 | * Return value: If there is no free channel to allocate, -%ENODEV is returned. | ||
757 | * On successful allocation channel is returned. | ||
758 | */ | ||
759 | int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio) | ||
760 | { | ||
761 | int i; | ||
762 | int best; | ||
763 | |||
764 | switch (prio) { | ||
765 | case (DMA_PRIO_HIGH): | ||
766 | best = 8; | ||
767 | break; | ||
768 | case (DMA_PRIO_MEDIUM): | ||
769 | best = 4; | ||
770 | break; | ||
771 | case (DMA_PRIO_LOW): | ||
772 | default: | ||
773 | best = 0; | ||
774 | break; | ||
775 | } | ||
776 | |||
777 | for (i = best; i < IMX_DMA_CHANNELS; i++) | ||
778 | if (!imx_dma_request(i, name)) | ||
779 | return i; | ||
780 | |||
781 | for (i = best - 1; i >= 0; i--) | ||
782 | if (!imx_dma_request(i, name)) | ||
783 | return i; | ||
784 | |||
785 | printk(KERN_ERR "%s: no free DMA channel found\n", __func__); | ||
786 | |||
787 | return -ENODEV; | ||
788 | } | ||
789 | EXPORT_SYMBOL(imx_dma_request_by_prio); | ||
790 | |||
791 | static int __init imx_dma_init(void) | ||
792 | { | ||
793 | int ret = 0; | ||
794 | int i; | ||
795 | |||
796 | if (cpu_is_mx1()) | ||
797 | imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR); | ||
798 | else if (cpu_is_mx21()) | ||
799 | imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR); | ||
800 | else if (cpu_is_mx27()) | ||
801 | imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR); | ||
802 | else | ||
803 | return 0; | ||
804 | |||
805 | dma_clk = clk_get(NULL, "dma"); | ||
806 | if (IS_ERR(dma_clk)) | ||
807 | return PTR_ERR(dma_clk); | ||
808 | clk_enable(dma_clk); | ||
809 | |||
810 | /* reset DMA module */ | ||
811 | imx_dmav1_writel(DCR_DRST, DMA_DCR); | ||
812 | |||
813 | if (cpu_is_mx1()) { | ||
814 | ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", NULL); | ||
815 | if (ret) { | ||
816 | pr_crit("Wow! Can't register IRQ for DMA\n"); | ||
817 | return ret; | ||
818 | } | ||
819 | |||
820 | ret = request_irq(MX1_DMA_ERR, dma_err_handler, 0, "DMA", NULL); | ||
821 | if (ret) { | ||
822 | pr_crit("Wow! Can't register ERRIRQ for DMA\n"); | ||
823 | free_irq(MX1_DMA_INT, NULL); | ||
824 | return ret; | ||
825 | } | ||
826 | } | ||
827 | |||
828 | /* enable DMA module */ | ||
829 | imx_dmav1_writel(DCR_DEN, DMA_DCR); | ||
830 | |||
831 | /* clear all interrupts */ | ||
832 | imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); | ||
833 | |||
834 | /* disable interrupts */ | ||
835 | imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); | ||
836 | |||
837 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | ||
838 | imx_dma_channels[i].sg = NULL; | ||
839 | imx_dma_channels[i].dma_num = i; | ||
840 | } | ||
841 | |||
842 | return ret; | ||
843 | } | ||
844 | |||
845 | arch_initcall(imx_dma_init); | ||
diff --git a/arch/arm/mach-imx/include/mach/dma-v1.h b/arch/arm/mach-imx/include/mach/dma-v1.h deleted file mode 100644 index ac6fd713828a..000000000000 --- a/arch/arm/mach-imx/include/mach/dma-v1.h +++ /dev/null | |||
@@ -1,103 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mach-imx/include/mach/dma-v1.h | ||
3 | * | ||
4 | * i.MX DMA registration and IRQ dispatching | ||
5 | * | ||
6 | * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz> | ||
7 | * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de> | ||
8 | * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version 2 | ||
13 | * of the License, or (at your option) any later version. | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, | ||
22 | * MA 02110-1301, USA. | ||
23 | */ | ||
24 | |||
25 | #ifndef __MACH_DMA_V1_H__ | ||
26 | #define __MACH_DMA_V1_H__ | ||
27 | |||
28 | #define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27()) | ||
29 | |||
30 | #include <mach/dma.h> | ||
31 | |||
32 | #define IMX_DMA_CHANNELS 16 | ||
33 | |||
34 | #define DMA_MODE_READ 0 | ||
35 | #define DMA_MODE_WRITE 1 | ||
36 | #define DMA_MODE_MASK 1 | ||
37 | |||
38 | #define MX1_DMA_REG(offset) MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR + (offset)) | ||
39 | |||
40 | /* DMA Interrupt Mask Register */ | ||
41 | #define MX1_DMA_DIMR MX1_DMA_REG(0x08) | ||
42 | |||
43 | /* Channel Control Register */ | ||
44 | #define MX1_DMA_CCR(x) MX1_DMA_REG(0x8c + ((x) << 6)) | ||
45 | |||
46 | #define IMX_DMA_MEMSIZE_32 (0 << 4) | ||
47 | #define IMX_DMA_MEMSIZE_8 (1 << 4) | ||
48 | #define IMX_DMA_MEMSIZE_16 (2 << 4) | ||
49 | #define IMX_DMA_TYPE_LINEAR (0 << 10) | ||
50 | #define IMX_DMA_TYPE_2D (1 << 10) | ||
51 | #define IMX_DMA_TYPE_FIFO (2 << 10) | ||
52 | |||
53 | #define IMX_DMA_ERR_BURST (1 << 0) | ||
54 | #define IMX_DMA_ERR_REQUEST (1 << 1) | ||
55 | #define IMX_DMA_ERR_TRANSFER (1 << 2) | ||
56 | #define IMX_DMA_ERR_BUFFER (1 << 3) | ||
57 | #define IMX_DMA_ERR_TIMEOUT (1 << 4) | ||
58 | |||
59 | int | ||
60 | imx_dma_config_channel(int channel, unsigned int config_port, | ||
61 | unsigned int config_mem, unsigned int dmareq, int hw_chaining); | ||
62 | |||
63 | void | ||
64 | imx_dma_config_burstlen(int channel, unsigned int burstlen); | ||
65 | |||
66 | int | ||
67 | imx_dma_setup_single(int channel, dma_addr_t dma_address, | ||
68 | unsigned int dma_length, unsigned int dev_addr, | ||
69 | unsigned int dmamode); | ||
70 | |||
71 | |||
72 | /* | ||
73 | * Use this flag as the dma_length argument to imx_dma_setup_sg() | ||
74 | * to create an endless running dma loop. The end of the scatterlist | ||
75 | * must be linked to the beginning for this to work. | ||
76 | */ | ||
77 | #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) | ||
78 | |||
79 | int | ||
80 | imx_dma_setup_sg(int channel, struct scatterlist *sg, | ||
81 | unsigned int sgcount, unsigned int dma_length, | ||
82 | unsigned int dev_addr, unsigned int dmamode); | ||
83 | |||
84 | int | ||
85 | imx_dma_setup_handlers(int channel, | ||
86 | void (*irq_handler) (int, void *), | ||
87 | void (*err_handler) (int, void *, int), void *data); | ||
88 | |||
89 | int | ||
90 | imx_dma_setup_progression_handler(int channel, | ||
91 | void (*prog_handler) (int, void*, struct scatterlist*)); | ||
92 | |||
93 | void imx_dma_enable(int channel); | ||
94 | |||
95 | void imx_dma_disable(int channel); | ||
96 | |||
97 | int imx_dma_request(int channel, const char *name); | ||
98 | |||
99 | void imx_dma_free(int channel); | ||
100 | |||
101 | int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio); | ||
102 | |||
103 | #endif /* __MACH_DMA_V1_H__ */ | ||
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h index fd0ee84c45d1..9ff93b065686 100644 --- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h +++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h | |||
@@ -200,8 +200,7 @@ dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, | |||
200 | sg.dma_address = addr; | 200 | sg.dma_address = addr; |
201 | sg.length = size; | 201 | sg.length = size; |
202 | 202 | ||
203 | return chan->device->device_prep_slave_sg(chan, &sg, 1, | 203 | return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags); |
204 | direction, flags); | ||
205 | } | 204 | } |
206 | 205 | ||
207 | #else | 206 | #else |
diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c index 301d9c319d0b..eb9f4f534006 100644 --- a/arch/arm/plat-samsung/dma-ops.c +++ b/arch/arm/plat-samsung/dma-ops.c | |||
@@ -79,11 +79,11 @@ static int samsung_dmadev_prepare(unsigned ch, | |||
79 | info->len, offset_in_page(info->buf)); | 79 | info->len, offset_in_page(info->buf)); |
80 | sg_dma_address(&sg) = info->buf; | 80 | sg_dma_address(&sg) = info->buf; |
81 | 81 | ||
82 | desc = chan->device->device_prep_slave_sg(chan, | 82 | desc = dmaengine_prep_slave_sg(chan, |
83 | &sg, 1, info->direction, DMA_PREP_INTERRUPT); | 83 | &sg, 1, info->direction, DMA_PREP_INTERRUPT); |
84 | break; | 84 | break; |
85 | case DMA_CYCLIC: | 85 | case DMA_CYCLIC: |
86 | desc = chan->device->device_prep_dma_cyclic(chan, | 86 | desc = dmaengine_prep_dma_cyclic(chan, |
87 | info->buf, info->len, info->period, info->direction); | 87 | info->buf, info->len, info->period, info->direction); |
88 | break; | 88 | break; |
89 | default: | 89 | default: |
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index 889c544688ca..0445c4fd67e3 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c | |||
@@ -1351,7 +1351,6 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) | |||
1351 | goto fail; | 1351 | goto fail; |
1352 | 1352 | ||
1353 | slave->sdata.dma_dev = &dw_dmac0_device.dev; | 1353 | slave->sdata.dma_dev = &dw_dmac0_device.dev; |
1354 | slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT; | ||
1355 | slave->sdata.cfg_hi = (DWC_CFGH_SRC_PER(0) | 1354 | slave->sdata.cfg_hi = (DWC_CFGH_SRC_PER(0) |
1356 | | DWC_CFGH_DST_PER(1)); | 1355 | | DWC_CFGH_DST_PER(1)); |
1357 | slave->sdata.cfg_lo &= ~(DWC_CFGL_HS_DST_POL | 1356 | slave->sdata.cfg_lo &= ~(DWC_CFGL_HS_DST_POL |
@@ -2046,27 +2045,19 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, | |||
2046 | /* Check if DMA slave interface for capture should be configured. */ | 2045 | /* Check if DMA slave interface for capture should be configured. */ |
2047 | if (flags & AC97C_CAPTURE) { | 2046 | if (flags & AC97C_CAPTURE) { |
2048 | rx_dws->dma_dev = &dw_dmac0_device.dev; | 2047 | rx_dws->dma_dev = &dw_dmac0_device.dev; |
2049 | rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; | ||
2050 | rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3); | 2048 | rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3); |
2051 | rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); | 2049 | rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); |
2052 | rx_dws->src_master = 0; | 2050 | rx_dws->src_master = 0; |
2053 | rx_dws->dst_master = 1; | 2051 | rx_dws->dst_master = 1; |
2054 | rx_dws->src_msize = DW_DMA_MSIZE_1; | ||
2055 | rx_dws->dst_msize = DW_DMA_MSIZE_1; | ||
2056 | rx_dws->fc = DW_DMA_FC_D_P2M; | ||
2057 | } | 2052 | } |
2058 | 2053 | ||
2059 | /* Check if DMA slave interface for playback should be configured. */ | 2054 | /* Check if DMA slave interface for playback should be configured. */ |
2060 | if (flags & AC97C_PLAYBACK) { | 2055 | if (flags & AC97C_PLAYBACK) { |
2061 | tx_dws->dma_dev = &dw_dmac0_device.dev; | 2056 | tx_dws->dma_dev = &dw_dmac0_device.dev; |
2062 | tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; | ||
2063 | tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); | 2057 | tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); |
2064 | tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); | 2058 | tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); |
2065 | tx_dws->src_master = 0; | 2059 | tx_dws->src_master = 0; |
2066 | tx_dws->dst_master = 1; | 2060 | tx_dws->dst_master = 1; |
2067 | tx_dws->src_msize = DW_DMA_MSIZE_1; | ||
2068 | tx_dws->dst_msize = DW_DMA_MSIZE_1; | ||
2069 | tx_dws->fc = DW_DMA_FC_D_M2P; | ||
2070 | } | 2061 | } |
2071 | 2062 | ||
2072 | if (platform_device_add_data(pdev, data, | 2063 | if (platform_device_add_data(pdev, data, |
@@ -2136,14 +2127,10 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data) | |||
2136 | dws = &data->dws; | 2127 | dws = &data->dws; |
2137 | 2128 | ||
2138 | dws->dma_dev = &dw_dmac0_device.dev; | 2129 | dws->dma_dev = &dw_dmac0_device.dev; |
2139 | dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; | ||
2140 | dws->cfg_hi = DWC_CFGH_DST_PER(2); | 2130 | dws->cfg_hi = DWC_CFGH_DST_PER(2); |
2141 | dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); | 2131 | dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); |
2142 | dws->src_master = 0; | 2132 | dws->src_master = 0; |
2143 | dws->dst_master = 1; | 2133 | dws->dst_master = 1; |
2144 | dws->src_msize = DW_DMA_MSIZE_1; | ||
2145 | dws->dst_msize = DW_DMA_MSIZE_1; | ||
2146 | dws->fc = DW_DMA_FC_D_M2P; | ||
2147 | 2134 | ||
2148 | if (platform_device_add_data(pdev, data, | 2135 | if (platform_device_add_data(pdev, data, |
2149 | sizeof(struct atmel_abdac_pdata))) | 2136 | sizeof(struct atmel_abdac_pdata))) |
diff --git a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h index a9b38967f703..4bba58561d5c 100644 --- a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h +++ b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h | |||
@@ -14,11 +14,4 @@ struct mci_dma_data { | |||
14 | #define slave_data_ptr(s) (&(s)->sdata) | 14 | #define slave_data_ptr(s) (&(s)->sdata) |
15 | #define find_slave_dev(s) ((s)->sdata.dma_dev) | 15 | #define find_slave_dev(s) ((s)->sdata.dma_dev) |
16 | 16 | ||
17 | #define setup_dma_addr(s, t, r) do { \ | ||
18 | if (s) { \ | ||
19 | (s)->sdata.tx_reg = (t); \ | ||
20 | (s)->sdata.rx_reg = (r); \ | ||
21 | } \ | ||
22 | } while (0) | ||
23 | |||
24 | #endif /* __MACH_ATMEL_MCI_H */ | 17 | #endif /* __MACH_ATMEL_MCI_H */ |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 4a6c46dea8a0..cf9da362d64f 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -201,7 +201,6 @@ config PL330_DMA | |||
201 | tristate "DMA API Driver for PL330" | 201 | tristate "DMA API Driver for PL330" |
202 | select DMA_ENGINE | 202 | select DMA_ENGINE |
203 | depends on ARM_AMBA | 203 | depends on ARM_AMBA |
204 | select PL330 | ||
205 | help | 204 | help |
206 | Select if your platform has one or more PL330 DMACs. | 205 | Select if your platform has one or more PL330 DMACs. |
207 | You need to provide platform specific settings via | 206 | You need to provide platform specific settings via |
@@ -231,7 +230,7 @@ config IMX_SDMA | |||
231 | 230 | ||
232 | config IMX_DMA | 231 | config IMX_DMA |
233 | tristate "i.MX DMA support" | 232 | tristate "i.MX DMA support" |
234 | depends on IMX_HAVE_DMA_V1 | 233 | depends on ARCH_MXC |
235 | select DMA_ENGINE | 234 | select DMA_ENGINE |
236 | help | 235 | help |
237 | Support the i.MX DMA engine. This engine is integrated into | 236 | Support the i.MX DMA engine. This engine is integrated into |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 8a281584458b..c301a8ec31aa 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -85,6 +85,8 @@ | |||
85 | #include <linux/slab.h> | 85 | #include <linux/slab.h> |
86 | #include <asm/hardware/pl080.h> | 86 | #include <asm/hardware/pl080.h> |
87 | 87 | ||
88 | #include "dmaengine.h" | ||
89 | |||
88 | #define DRIVER_NAME "pl08xdmac" | 90 | #define DRIVER_NAME "pl08xdmac" |
89 | 91 | ||
90 | static struct amba_driver pl08x_amba_driver; | 92 | static struct amba_driver pl08x_amba_driver; |
@@ -649,7 +651,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
649 | } | 651 | } |
650 | 652 | ||
651 | if ((bd.srcbus.addr % bd.srcbus.buswidth) || | 653 | if ((bd.srcbus.addr % bd.srcbus.buswidth) || |
652 | (bd.srcbus.addr % bd.srcbus.buswidth)) { | 654 | (bd.dstbus.addr % bd.dstbus.buswidth)) { |
653 | dev_err(&pl08x->adev->dev, | 655 | dev_err(&pl08x->adev->dev, |
654 | "%s src & dst address must be aligned to src" | 656 | "%s src & dst address must be aligned to src" |
655 | " & dst width if peripheral is flow controller", | 657 | " & dst width if peripheral is flow controller", |
@@ -919,13 +921,10 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | |||
919 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); | 921 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); |
920 | struct pl08x_txd *txd = to_pl08x_txd(tx); | 922 | struct pl08x_txd *txd = to_pl08x_txd(tx); |
921 | unsigned long flags; | 923 | unsigned long flags; |
924 | dma_cookie_t cookie; | ||
922 | 925 | ||
923 | spin_lock_irqsave(&plchan->lock, flags); | 926 | spin_lock_irqsave(&plchan->lock, flags); |
924 | 927 | cookie = dma_cookie_assign(tx); | |
925 | plchan->chan.cookie += 1; | ||
926 | if (plchan->chan.cookie < 0) | ||
927 | plchan->chan.cookie = 1; | ||
928 | tx->cookie = plchan->chan.cookie; | ||
929 | 928 | ||
930 | /* Put this onto the pending list */ | 929 | /* Put this onto the pending list */ |
931 | list_add_tail(&txd->node, &plchan->pend_list); | 930 | list_add_tail(&txd->node, &plchan->pend_list); |
@@ -945,7 +944,7 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | |||
945 | 944 | ||
946 | spin_unlock_irqrestore(&plchan->lock, flags); | 945 | spin_unlock_irqrestore(&plchan->lock, flags); |
947 | 946 | ||
948 | return tx->cookie; | 947 | return cookie; |
949 | } | 948 | } |
950 | 949 | ||
951 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( | 950 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( |
@@ -965,31 +964,17 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, | |||
965 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 964 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
966 | { | 965 | { |
967 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 966 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
968 | dma_cookie_t last_used; | ||
969 | dma_cookie_t last_complete; | ||
970 | enum dma_status ret; | 967 | enum dma_status ret; |
971 | u32 bytesleft = 0; | ||
972 | 968 | ||
973 | last_used = plchan->chan.cookie; | 969 | ret = dma_cookie_status(chan, cookie, txstate); |
974 | last_complete = plchan->lc; | 970 | if (ret == DMA_SUCCESS) |
975 | |||
976 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
977 | if (ret == DMA_SUCCESS) { | ||
978 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
979 | return ret; | 971 | return ret; |
980 | } | ||
981 | 972 | ||
982 | /* | 973 | /* |
983 | * This cookie not complete yet | 974 | * This cookie not complete yet |
975 | * Get number of bytes left in the active transactions and queue | ||
984 | */ | 976 | */ |
985 | last_used = plchan->chan.cookie; | 977 | dma_set_residue(txstate, pl08x_getbytes_chan(plchan)); |
986 | last_complete = plchan->lc; | ||
987 | |||
988 | /* Get number of bytes left in the active transactions and queue */ | ||
989 | bytesleft = pl08x_getbytes_chan(plchan); | ||
990 | |||
991 | dma_set_tx_state(txstate, last_complete, last_used, | ||
992 | bytesleft); | ||
993 | 978 | ||
994 | if (plchan->state == PL08X_CHAN_PAUSED) | 979 | if (plchan->state == PL08X_CHAN_PAUSED) |
995 | return DMA_PAUSED; | 980 | return DMA_PAUSED; |
@@ -1139,6 +1124,8 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1139 | cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; | 1124 | cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; |
1140 | cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; | 1125 | cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; |
1141 | 1126 | ||
1127 | plchan->device_fc = config->device_fc; | ||
1128 | |||
1142 | if (plchan->runtime_direction == DMA_DEV_TO_MEM) { | 1129 | if (plchan->runtime_direction == DMA_DEV_TO_MEM) { |
1143 | plchan->src_addr = config->src_addr; | 1130 | plchan->src_addr = config->src_addr; |
1144 | plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | | 1131 | plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | |
@@ -1326,7 +1313,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1326 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | 1313 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( |
1327 | struct dma_chan *chan, struct scatterlist *sgl, | 1314 | struct dma_chan *chan, struct scatterlist *sgl, |
1328 | unsigned int sg_len, enum dma_transfer_direction direction, | 1315 | unsigned int sg_len, enum dma_transfer_direction direction, |
1329 | unsigned long flags) | 1316 | unsigned long flags, void *context) |
1330 | { | 1317 | { |
1331 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1318 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1332 | struct pl08x_driver_data *pl08x = plchan->host; | 1319 | struct pl08x_driver_data *pl08x = plchan->host; |
@@ -1370,7 +1357,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1370 | return NULL; | 1357 | return NULL; |
1371 | } | 1358 | } |
1372 | 1359 | ||
1373 | if (plchan->cd->device_fc) | 1360 | if (plchan->device_fc) |
1374 | tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : | 1361 | tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : |
1375 | PL080_FLOW_PER2MEM_PER; | 1362 | PL080_FLOW_PER2MEM_PER; |
1376 | else | 1363 | else |
@@ -1541,7 +1528,7 @@ static void pl08x_tasklet(unsigned long data) | |||
1541 | 1528 | ||
1542 | if (txd) { | 1529 | if (txd) { |
1543 | /* Update last completed */ | 1530 | /* Update last completed */ |
1544 | plchan->lc = txd->tx.cookie; | 1531 | dma_cookie_complete(&txd->tx); |
1545 | } | 1532 | } |
1546 | 1533 | ||
1547 | /* If a new descriptor is queued, set it up plchan->at is NULL here */ | 1534 | /* If a new descriptor is queued, set it up plchan->at is NULL here */ |
@@ -1722,8 +1709,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1722 | chan->name); | 1709 | chan->name); |
1723 | 1710 | ||
1724 | chan->chan.device = dmadev; | 1711 | chan->chan.device = dmadev; |
1725 | chan->chan.cookie = 0; | 1712 | dma_cookie_init(&chan->chan); |
1726 | chan->lc = 0; | ||
1727 | 1713 | ||
1728 | spin_lock_init(&chan->lock); | 1714 | spin_lock_init(&chan->lock); |
1729 | INIT_LIST_HEAD(&chan->pend_list); | 1715 | INIT_LIST_HEAD(&chan->pend_list); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index f4aed5fc2cb6..7aa58d204892 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/of_device.h> | 27 | #include <linux/of_device.h> |
28 | 28 | ||
29 | #include "at_hdmac_regs.h" | 29 | #include "at_hdmac_regs.h" |
30 | #include "dmaengine.h" | ||
30 | 31 | ||
31 | /* | 32 | /* |
32 | * Glossary | 33 | * Glossary |
@@ -192,27 +193,6 @@ static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, | |||
192 | } | 193 | } |
193 | 194 | ||
194 | /** | 195 | /** |
195 | * atc_assign_cookie - compute and assign new cookie | ||
196 | * @atchan: channel we work on | ||
197 | * @desc: descriptor to assign cookie for | ||
198 | * | ||
199 | * Called with atchan->lock held and bh disabled | ||
200 | */ | ||
201 | static dma_cookie_t | ||
202 | atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc) | ||
203 | { | ||
204 | dma_cookie_t cookie = atchan->chan_common.cookie; | ||
205 | |||
206 | if (++cookie < 0) | ||
207 | cookie = 1; | ||
208 | |||
209 | atchan->chan_common.cookie = cookie; | ||
210 | desc->txd.cookie = cookie; | ||
211 | |||
212 | return cookie; | ||
213 | } | ||
214 | |||
215 | /** | ||
216 | * atc_dostart - starts the DMA engine for real | 196 | * atc_dostart - starts the DMA engine for real |
217 | * @atchan: the channel we want to start | 197 | * @atchan: the channel we want to start |
218 | * @first: first descriptor in the list we want to begin with | 198 | * @first: first descriptor in the list we want to begin with |
@@ -269,7 +249,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
269 | dev_vdbg(chan2dev(&atchan->chan_common), | 249 | dev_vdbg(chan2dev(&atchan->chan_common), |
270 | "descriptor %u complete\n", txd->cookie); | 250 | "descriptor %u complete\n", txd->cookie); |
271 | 251 | ||
272 | atchan->completed_cookie = txd->cookie; | 252 | dma_cookie_complete(txd); |
273 | 253 | ||
274 | /* move children to free_list */ | 254 | /* move children to free_list */ |
275 | list_splice_init(&desc->tx_list, &atchan->free_list); | 255 | list_splice_init(&desc->tx_list, &atchan->free_list); |
@@ -547,7 +527,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
547 | unsigned long flags; | 527 | unsigned long flags; |
548 | 528 | ||
549 | spin_lock_irqsave(&atchan->lock, flags); | 529 | spin_lock_irqsave(&atchan->lock, flags); |
550 | cookie = atc_assign_cookie(atchan, desc); | 530 | cookie = dma_cookie_assign(tx); |
551 | 531 | ||
552 | if (list_empty(&atchan->active_list)) { | 532 | if (list_empty(&atchan->active_list)) { |
553 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", | 533 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
@@ -659,14 +639,16 @@ err_desc_get: | |||
659 | * @sg_len: number of entries in @scatterlist | 639 | * @sg_len: number of entries in @scatterlist |
660 | * @direction: DMA direction | 640 | * @direction: DMA direction |
661 | * @flags: tx descriptor status flags | 641 | * @flags: tx descriptor status flags |
642 | * @context: transaction context (ignored) | ||
662 | */ | 643 | */ |
663 | static struct dma_async_tx_descriptor * | 644 | static struct dma_async_tx_descriptor * |
664 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 645 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
665 | unsigned int sg_len, enum dma_transfer_direction direction, | 646 | unsigned int sg_len, enum dma_transfer_direction direction, |
666 | unsigned long flags) | 647 | unsigned long flags, void *context) |
667 | { | 648 | { |
668 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 649 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
669 | struct at_dma_slave *atslave = chan->private; | 650 | struct at_dma_slave *atslave = chan->private; |
651 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; | ||
670 | struct at_desc *first = NULL; | 652 | struct at_desc *first = NULL; |
671 | struct at_desc *prev = NULL; | 653 | struct at_desc *prev = NULL; |
672 | u32 ctrla; | 654 | u32 ctrla; |
@@ -688,19 +670,18 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
688 | return NULL; | 670 | return NULL; |
689 | } | 671 | } |
690 | 672 | ||
691 | reg_width = atslave->reg_width; | ||
692 | |||
693 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; | 673 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; |
694 | ctrlb = ATC_IEN; | 674 | ctrlb = ATC_IEN; |
695 | 675 | ||
696 | switch (direction) { | 676 | switch (direction) { |
697 | case DMA_MEM_TO_DEV: | 677 | case DMA_MEM_TO_DEV: |
678 | reg_width = convert_buswidth(sconfig->dst_addr_width); | ||
698 | ctrla |= ATC_DST_WIDTH(reg_width); | 679 | ctrla |= ATC_DST_WIDTH(reg_width); |
699 | ctrlb |= ATC_DST_ADDR_MODE_FIXED | 680 | ctrlb |= ATC_DST_ADDR_MODE_FIXED |
700 | | ATC_SRC_ADDR_MODE_INCR | 681 | | ATC_SRC_ADDR_MODE_INCR |
701 | | ATC_FC_MEM2PER | 682 | | ATC_FC_MEM2PER |
702 | | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); | 683 | | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); |
703 | reg = atslave->tx_reg; | 684 | reg = sconfig->dst_addr; |
704 | for_each_sg(sgl, sg, sg_len, i) { | 685 | for_each_sg(sgl, sg, sg_len, i) { |
705 | struct at_desc *desc; | 686 | struct at_desc *desc; |
706 | u32 len; | 687 | u32 len; |
@@ -728,13 +709,14 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
728 | } | 709 | } |
729 | break; | 710 | break; |
730 | case DMA_DEV_TO_MEM: | 711 | case DMA_DEV_TO_MEM: |
712 | reg_width = convert_buswidth(sconfig->src_addr_width); | ||
731 | ctrla |= ATC_SRC_WIDTH(reg_width); | 713 | ctrla |= ATC_SRC_WIDTH(reg_width); |
732 | ctrlb |= ATC_DST_ADDR_MODE_INCR | 714 | ctrlb |= ATC_DST_ADDR_MODE_INCR |
733 | | ATC_SRC_ADDR_MODE_FIXED | 715 | | ATC_SRC_ADDR_MODE_FIXED |
734 | | ATC_FC_PER2MEM | 716 | | ATC_FC_PER2MEM |
735 | | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); | 717 | | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); |
736 | 718 | ||
737 | reg = atslave->rx_reg; | 719 | reg = sconfig->src_addr; |
738 | for_each_sg(sgl, sg, sg_len, i) { | 720 | for_each_sg(sgl, sg, sg_len, i) { |
739 | struct at_desc *desc; | 721 | struct at_desc *desc; |
740 | u32 len; | 722 | u32 len; |
@@ -810,12 +792,15 @@ err_out: | |||
810 | * atc_dma_cyclic_fill_desc - Fill one period decriptor | 792 | * atc_dma_cyclic_fill_desc - Fill one period decriptor |
811 | */ | 793 | */ |
812 | static int | 794 | static int |
813 | atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | 795 | atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, |
814 | unsigned int period_index, dma_addr_t buf_addr, | 796 | unsigned int period_index, dma_addr_t buf_addr, |
815 | size_t period_len, enum dma_transfer_direction direction) | 797 | unsigned int reg_width, size_t period_len, |
798 | enum dma_transfer_direction direction) | ||
816 | { | 799 | { |
817 | u32 ctrla; | 800 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
818 | unsigned int reg_width = atslave->reg_width; | 801 | struct at_dma_slave *atslave = chan->private; |
802 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; | ||
803 | u32 ctrla; | ||
819 | 804 | ||
820 | /* prepare common CRTLA value */ | 805 | /* prepare common CRTLA value */ |
821 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla | 806 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla |
@@ -826,7 +811,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | |||
826 | switch (direction) { | 811 | switch (direction) { |
827 | case DMA_MEM_TO_DEV: | 812 | case DMA_MEM_TO_DEV: |
828 | desc->lli.saddr = buf_addr + (period_len * period_index); | 813 | desc->lli.saddr = buf_addr + (period_len * period_index); |
829 | desc->lli.daddr = atslave->tx_reg; | 814 | desc->lli.daddr = sconfig->dst_addr; |
830 | desc->lli.ctrla = ctrla; | 815 | desc->lli.ctrla = ctrla; |
831 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED | 816 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED |
832 | | ATC_SRC_ADDR_MODE_INCR | 817 | | ATC_SRC_ADDR_MODE_INCR |
@@ -836,7 +821,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | |||
836 | break; | 821 | break; |
837 | 822 | ||
838 | case DMA_DEV_TO_MEM: | 823 | case DMA_DEV_TO_MEM: |
839 | desc->lli.saddr = atslave->rx_reg; | 824 | desc->lli.saddr = sconfig->src_addr; |
840 | desc->lli.daddr = buf_addr + (period_len * period_index); | 825 | desc->lli.daddr = buf_addr + (period_len * period_index); |
841 | desc->lli.ctrla = ctrla; | 826 | desc->lli.ctrla = ctrla; |
842 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR | 827 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR |
@@ -860,16 +845,20 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | |||
860 | * @buf_len: total number of bytes for the entire buffer | 845 | * @buf_len: total number of bytes for the entire buffer |
861 | * @period_len: number of bytes for each period | 846 | * @period_len: number of bytes for each period |
862 | * @direction: transfer direction, to or from device | 847 | * @direction: transfer direction, to or from device |
848 | * @context: transfer context (ignored) | ||
863 | */ | 849 | */ |
864 | static struct dma_async_tx_descriptor * | 850 | static struct dma_async_tx_descriptor * |
865 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 851 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
866 | size_t period_len, enum dma_transfer_direction direction) | 852 | size_t period_len, enum dma_transfer_direction direction, |
853 | void *context) | ||
867 | { | 854 | { |
868 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 855 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
869 | struct at_dma_slave *atslave = chan->private; | 856 | struct at_dma_slave *atslave = chan->private; |
857 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; | ||
870 | struct at_desc *first = NULL; | 858 | struct at_desc *first = NULL; |
871 | struct at_desc *prev = NULL; | 859 | struct at_desc *prev = NULL; |
872 | unsigned long was_cyclic; | 860 | unsigned long was_cyclic; |
861 | unsigned int reg_width; | ||
873 | unsigned int periods = buf_len / period_len; | 862 | unsigned int periods = buf_len / period_len; |
874 | unsigned int i; | 863 | unsigned int i; |
875 | 864 | ||
@@ -889,8 +878,13 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
889 | return NULL; | 878 | return NULL; |
890 | } | 879 | } |
891 | 880 | ||
881 | if (sconfig->direction == DMA_MEM_TO_DEV) | ||
882 | reg_width = convert_buswidth(sconfig->dst_addr_width); | ||
883 | else | ||
884 | reg_width = convert_buswidth(sconfig->src_addr_width); | ||
885 | |||
892 | /* Check for too big/unaligned periods and unaligned DMA buffer */ | 886 | /* Check for too big/unaligned periods and unaligned DMA buffer */ |
893 | if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr, | 887 | if (atc_dma_cyclic_check_values(reg_width, buf_addr, |
894 | period_len, direction)) | 888 | period_len, direction)) |
895 | goto err_out; | 889 | goto err_out; |
896 | 890 | ||
@@ -902,8 +896,8 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
902 | if (!desc) | 896 | if (!desc) |
903 | goto err_desc_get; | 897 | goto err_desc_get; |
904 | 898 | ||
905 | if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr, | 899 | if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, |
906 | period_len, direction)) | 900 | reg_width, period_len, direction)) |
907 | goto err_desc_get; | 901 | goto err_desc_get; |
908 | 902 | ||
909 | atc_desc_chain(&first, &prev, desc); | 903 | atc_desc_chain(&first, &prev, desc); |
@@ -926,6 +920,23 @@ err_out: | |||
926 | return NULL; | 920 | return NULL; |
927 | } | 921 | } |
928 | 922 | ||
923 | static int set_runtime_config(struct dma_chan *chan, | ||
924 | struct dma_slave_config *sconfig) | ||
925 | { | ||
926 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
927 | |||
928 | /* Check if it is chan is configured for slave transfers */ | ||
929 | if (!chan->private) | ||
930 | return -EINVAL; | ||
931 | |||
932 | memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); | ||
933 | |||
934 | convert_burst(&atchan->dma_sconfig.src_maxburst); | ||
935 | convert_burst(&atchan->dma_sconfig.dst_maxburst); | ||
936 | |||
937 | return 0; | ||
938 | } | ||
939 | |||
929 | 940 | ||
930 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 941 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
931 | unsigned long arg) | 942 | unsigned long arg) |
@@ -986,6 +997,8 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
986 | clear_bit(ATC_IS_CYCLIC, &atchan->status); | 997 | clear_bit(ATC_IS_CYCLIC, &atchan->status); |
987 | 998 | ||
988 | spin_unlock_irqrestore(&atchan->lock, flags); | 999 | spin_unlock_irqrestore(&atchan->lock, flags); |
1000 | } else if (cmd == DMA_SLAVE_CONFIG) { | ||
1001 | return set_runtime_config(chan, (struct dma_slave_config *)arg); | ||
989 | } else { | 1002 | } else { |
990 | return -ENXIO; | 1003 | return -ENXIO; |
991 | } | 1004 | } |
@@ -1016,26 +1029,20 @@ atc_tx_status(struct dma_chan *chan, | |||
1016 | 1029 | ||
1017 | spin_lock_irqsave(&atchan->lock, flags); | 1030 | spin_lock_irqsave(&atchan->lock, flags); |
1018 | 1031 | ||
1019 | last_complete = atchan->completed_cookie; | 1032 | ret = dma_cookie_status(chan, cookie, txstate); |
1020 | last_used = chan->cookie; | ||
1021 | |||
1022 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
1023 | if (ret != DMA_SUCCESS) { | 1033 | if (ret != DMA_SUCCESS) { |
1024 | atc_cleanup_descriptors(atchan); | 1034 | atc_cleanup_descriptors(atchan); |
1025 | 1035 | ||
1026 | last_complete = atchan->completed_cookie; | 1036 | ret = dma_cookie_status(chan, cookie, txstate); |
1027 | last_used = chan->cookie; | ||
1028 | |||
1029 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
1030 | } | 1037 | } |
1031 | 1038 | ||
1039 | last_complete = chan->completed_cookie; | ||
1040 | last_used = chan->cookie; | ||
1041 | |||
1032 | spin_unlock_irqrestore(&atchan->lock, flags); | 1042 | spin_unlock_irqrestore(&atchan->lock, flags); |
1033 | 1043 | ||
1034 | if (ret != DMA_SUCCESS) | 1044 | if (ret != DMA_SUCCESS) |
1035 | dma_set_tx_state(txstate, last_complete, last_used, | 1045 | dma_set_residue(txstate, atc_first_active(atchan)->len); |
1036 | atc_first_active(atchan)->len); | ||
1037 | else | ||
1038 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
1039 | 1046 | ||
1040 | if (atc_chan_is_paused(atchan)) | 1047 | if (atc_chan_is_paused(atchan)) |
1041 | ret = DMA_PAUSED; | 1048 | ret = DMA_PAUSED; |
@@ -1129,7 +1136,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) | |||
1129 | spin_lock_irqsave(&atchan->lock, flags); | 1136 | spin_lock_irqsave(&atchan->lock, flags); |
1130 | atchan->descs_allocated = i; | 1137 | atchan->descs_allocated = i; |
1131 | list_splice(&tmp_list, &atchan->free_list); | 1138 | list_splice(&tmp_list, &atchan->free_list); |
1132 | atchan->completed_cookie = chan->cookie = 1; | 1139 | dma_cookie_init(chan); |
1133 | spin_unlock_irqrestore(&atchan->lock, flags); | 1140 | spin_unlock_irqrestore(&atchan->lock, flags); |
1134 | 1141 | ||
1135 | /* channel parameters */ | 1142 | /* channel parameters */ |
@@ -1329,7 +1336,7 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1329 | struct at_dma_chan *atchan = &atdma->chan[i]; | 1336 | struct at_dma_chan *atchan = &atdma->chan[i]; |
1330 | 1337 | ||
1331 | atchan->chan_common.device = &atdma->dma_common; | 1338 | atchan->chan_common.device = &atdma->dma_common; |
1332 | atchan->chan_common.cookie = atchan->completed_cookie = 1; | 1339 | dma_cookie_init(&atchan->chan_common); |
1333 | list_add_tail(&atchan->chan_common.device_node, | 1340 | list_add_tail(&atchan->chan_common.device_node, |
1334 | &atdma->dma_common.channels); | 1341 | &atdma->dma_common.channels); |
1335 | 1342 | ||
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index a8d3277d60b5..897a8bcaec90 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -207,8 +207,8 @@ enum atc_status { | |||
207 | * @save_cfg: configuration register that is saved on suspend/resume cycle | 207 | * @save_cfg: configuration register that is saved on suspend/resume cycle |
208 | * @save_dscr: for cyclic operations, preserve next descriptor address in | 208 | * @save_dscr: for cyclic operations, preserve next descriptor address in |
209 | * the cyclic list on suspend/resume cycle | 209 | * the cyclic list on suspend/resume cycle |
210 | * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG | ||
210 | * @lock: serializes enqueue/dequeue operations to descriptors lists | 211 | * @lock: serializes enqueue/dequeue operations to descriptors lists |
211 | * @completed_cookie: identifier for the most recently completed operation | ||
212 | * @active_list: list of descriptors dmaengine is being running on | 212 | * @active_list: list of descriptors dmaengine is being running on |
213 | * @queue: list of descriptors ready to be submitted to engine | 213 | * @queue: list of descriptors ready to be submitted to engine |
214 | * @free_list: list of descriptors usable by the channel | 214 | * @free_list: list of descriptors usable by the channel |
@@ -223,11 +223,11 @@ struct at_dma_chan { | |||
223 | struct tasklet_struct tasklet; | 223 | struct tasklet_struct tasklet; |
224 | u32 save_cfg; | 224 | u32 save_cfg; |
225 | u32 save_dscr; | 225 | u32 save_dscr; |
226 | struct dma_slave_config dma_sconfig; | ||
226 | 227 | ||
227 | spinlock_t lock; | 228 | spinlock_t lock; |
228 | 229 | ||
229 | /* these other elements are all protected by lock */ | 230 | /* these other elements are all protected by lock */ |
230 | dma_cookie_t completed_cookie; | ||
231 | struct list_head active_list; | 231 | struct list_head active_list; |
232 | struct list_head queue; | 232 | struct list_head queue; |
233 | struct list_head free_list; | 233 | struct list_head free_list; |
@@ -245,6 +245,36 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan) | |||
245 | return container_of(dchan, struct at_dma_chan, chan_common); | 245 | return container_of(dchan, struct at_dma_chan, chan_common); |
246 | } | 246 | } |
247 | 247 | ||
248 | /* | ||
249 | * Fix sconfig's burst size according to at_hdmac. We need to convert them as: | ||
250 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7. | ||
251 | * | ||
252 | * This can be done by finding most significant bit set. | ||
253 | */ | ||
254 | static inline void convert_burst(u32 *maxburst) | ||
255 | { | ||
256 | if (*maxburst > 1) | ||
257 | *maxburst = fls(*maxburst) - 2; | ||
258 | else | ||
259 | *maxburst = 0; | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * Fix sconfig's bus width according to at_hdmac. | ||
264 | * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2. | ||
265 | */ | ||
266 | static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width) | ||
267 | { | ||
268 | switch (addr_width) { | ||
269 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
270 | return 1; | ||
271 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
272 | return 2; | ||
273 | default: | ||
274 | /* For 1 byte width or fallback */ | ||
275 | return 0; | ||
276 | } | ||
277 | } | ||
248 | 278 | ||
249 | /*-- Controller ------------------------------------------------------*/ | 279 | /*-- Controller ------------------------------------------------------*/ |
250 | 280 | ||
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index d65a718c0f9b..dc89455f5550 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <mach/coh901318.h> | 24 | #include <mach/coh901318.h> |
25 | 25 | ||
26 | #include "coh901318_lli.h" | 26 | #include "coh901318_lli.h" |
27 | #include "dmaengine.h" | ||
27 | 28 | ||
28 | #define COHC_2_DEV(cohc) (&cohc->chan.dev->device) | 29 | #define COHC_2_DEV(cohc) (&cohc->chan.dev->device) |
29 | 30 | ||
@@ -59,7 +60,6 @@ struct coh901318_base { | |||
59 | struct coh901318_chan { | 60 | struct coh901318_chan { |
60 | spinlock_t lock; | 61 | spinlock_t lock; |
61 | int allocated; | 62 | int allocated; |
62 | int completed; | ||
63 | int id; | 63 | int id; |
64 | int stopped; | 64 | int stopped; |
65 | 65 | ||
@@ -318,20 +318,6 @@ static int coh901318_prep_linked_list(struct coh901318_chan *cohc, | |||
318 | 318 | ||
319 | return 0; | 319 | return 0; |
320 | } | 320 | } |
321 | static dma_cookie_t | ||
322 | coh901318_assign_cookie(struct coh901318_chan *cohc, | ||
323 | struct coh901318_desc *cohd) | ||
324 | { | ||
325 | dma_cookie_t cookie = cohc->chan.cookie; | ||
326 | |||
327 | if (++cookie < 0) | ||
328 | cookie = 1; | ||
329 | |||
330 | cohc->chan.cookie = cookie; | ||
331 | cohd->desc.cookie = cookie; | ||
332 | |||
333 | return cookie; | ||
334 | } | ||
335 | 321 | ||
336 | static struct coh901318_desc * | 322 | static struct coh901318_desc * |
337 | coh901318_desc_get(struct coh901318_chan *cohc) | 323 | coh901318_desc_get(struct coh901318_chan *cohc) |
@@ -705,7 +691,7 @@ static void dma_tasklet(unsigned long data) | |||
705 | callback_param = cohd_fin->desc.callback_param; | 691 | callback_param = cohd_fin->desc.callback_param; |
706 | 692 | ||
707 | /* sign this job as completed on the channel */ | 693 | /* sign this job as completed on the channel */ |
708 | cohc->completed = cohd_fin->desc.cookie; | 694 | dma_cookie_complete(&cohd_fin->desc); |
709 | 695 | ||
710 | /* release the lli allocation and remove the descriptor */ | 696 | /* release the lli allocation and remove the descriptor */ |
711 | coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); | 697 | coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); |
@@ -929,7 +915,7 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan) | |||
929 | coh901318_config(cohc, NULL); | 915 | coh901318_config(cohc, NULL); |
930 | 916 | ||
931 | cohc->allocated = 1; | 917 | cohc->allocated = 1; |
932 | cohc->completed = chan->cookie = 1; | 918 | dma_cookie_init(chan); |
933 | 919 | ||
934 | spin_unlock_irqrestore(&cohc->lock, flags); | 920 | spin_unlock_irqrestore(&cohc->lock, flags); |
935 | 921 | ||
@@ -966,16 +952,16 @@ coh901318_tx_submit(struct dma_async_tx_descriptor *tx) | |||
966 | desc); | 952 | desc); |
967 | struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); | 953 | struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); |
968 | unsigned long flags; | 954 | unsigned long flags; |
955 | dma_cookie_t cookie; | ||
969 | 956 | ||
970 | spin_lock_irqsave(&cohc->lock, flags); | 957 | spin_lock_irqsave(&cohc->lock, flags); |
971 | 958 | cookie = dma_cookie_assign(tx); | |
972 | tx->cookie = coh901318_assign_cookie(cohc, cohd); | ||
973 | 959 | ||
974 | coh901318_desc_queue(cohc, cohd); | 960 | coh901318_desc_queue(cohc, cohd); |
975 | 961 | ||
976 | spin_unlock_irqrestore(&cohc->lock, flags); | 962 | spin_unlock_irqrestore(&cohc->lock, flags); |
977 | 963 | ||
978 | return tx->cookie; | 964 | return cookie; |
979 | } | 965 | } |
980 | 966 | ||
981 | static struct dma_async_tx_descriptor * | 967 | static struct dma_async_tx_descriptor * |
@@ -1035,7 +1021,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
1035 | static struct dma_async_tx_descriptor * | 1021 | static struct dma_async_tx_descriptor * |
1036 | coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 1022 | coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
1037 | unsigned int sg_len, enum dma_transfer_direction direction, | 1023 | unsigned int sg_len, enum dma_transfer_direction direction, |
1038 | unsigned long flags) | 1024 | unsigned long flags, void *context) |
1039 | { | 1025 | { |
1040 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 1026 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
1041 | struct coh901318_lli *lli; | 1027 | struct coh901318_lli *lli; |
@@ -1165,17 +1151,12 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
1165 | struct dma_tx_state *txstate) | 1151 | struct dma_tx_state *txstate) |
1166 | { | 1152 | { |
1167 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 1153 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
1168 | dma_cookie_t last_used; | 1154 | enum dma_status ret; |
1169 | dma_cookie_t last_complete; | ||
1170 | int ret; | ||
1171 | |||
1172 | last_complete = cohc->completed; | ||
1173 | last_used = chan->cookie; | ||
1174 | 1155 | ||
1175 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 1156 | ret = dma_cookie_status(chan, cookie, txstate); |
1157 | /* FIXME: should be conditional on ret != DMA_SUCCESS? */ | ||
1158 | dma_set_residue(txstate, coh901318_get_bytes_left(chan)); | ||
1176 | 1159 | ||
1177 | dma_set_tx_state(txstate, last_complete, last_used, | ||
1178 | coh901318_get_bytes_left(chan)); | ||
1179 | if (ret == DMA_IN_PROGRESS && cohc->stopped) | 1160 | if (ret == DMA_IN_PROGRESS && cohc->stopped) |
1180 | ret = DMA_PAUSED; | 1161 | ret = DMA_PAUSED; |
1181 | 1162 | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index a6c6051ec858..767bcc31b365 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -510,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
510 | dma_chan_name(chan)); | 510 | dma_chan_name(chan)); |
511 | list_del_rcu(&device->global_node); | 511 | list_del_rcu(&device->global_node); |
512 | } else if (err) | 512 | } else if (err) |
513 | pr_debug("dmaengine: failed to get %s: (%d)\n", | 513 | pr_debug("%s: failed to get %s: (%d)\n", |
514 | dma_chan_name(chan), err); | 514 | __func__, dma_chan_name(chan), err); |
515 | else | 515 | else |
516 | break; | 516 | break; |
517 | if (--device->privatecnt == 0) | 517 | if (--device->privatecnt == 0) |
@@ -564,8 +564,8 @@ void dmaengine_get(void) | |||
564 | list_del_rcu(&device->global_node); | 564 | list_del_rcu(&device->global_node); |
565 | break; | 565 | break; |
566 | } else if (err) | 566 | } else if (err) |
567 | pr_err("dmaengine: failed to get %s: (%d)\n", | 567 | pr_err("%s: failed to get %s: (%d)\n", |
568 | dma_chan_name(chan), err); | 568 | __func__, dma_chan_name(chan), err); |
569 | } | 569 | } |
570 | } | 570 | } |
571 | 571 | ||
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h new file mode 100644 index 000000000000..17f983a4e9ba --- /dev/null +++ b/drivers/dma/dmaengine.h | |||
@@ -0,0 +1,89 @@ | |||
1 | /* | ||
2 | * The contents of this file are private to DMA engine drivers, and is not | ||
3 | * part of the API to be used by DMA engine users. | ||
4 | */ | ||
5 | #ifndef DMAENGINE_H | ||
6 | #define DMAENGINE_H | ||
7 | |||
8 | #include <linux/bug.h> | ||
9 | #include <linux/dmaengine.h> | ||
10 | |||
11 | /** | ||
12 | * dma_cookie_init - initialize the cookies for a DMA channel | ||
13 | * @chan: dma channel to initialize | ||
14 | */ | ||
15 | static inline void dma_cookie_init(struct dma_chan *chan) | ||
16 | { | ||
17 | chan->cookie = DMA_MIN_COOKIE; | ||
18 | chan->completed_cookie = DMA_MIN_COOKIE; | ||
19 | } | ||
20 | |||
21 | /** | ||
22 | * dma_cookie_assign - assign a DMA engine cookie to the descriptor | ||
23 | * @tx: descriptor needing cookie | ||
24 | * | ||
25 | * Assign a unique non-zero per-channel cookie to the descriptor. | ||
26 | * Note: caller is expected to hold a lock to prevent concurrency. | ||
27 | */ | ||
28 | static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx) | ||
29 | { | ||
30 | struct dma_chan *chan = tx->chan; | ||
31 | dma_cookie_t cookie; | ||
32 | |||
33 | cookie = chan->cookie + 1; | ||
34 | if (cookie < DMA_MIN_COOKIE) | ||
35 | cookie = DMA_MIN_COOKIE; | ||
36 | tx->cookie = chan->cookie = cookie; | ||
37 | |||
38 | return cookie; | ||
39 | } | ||
40 | |||
41 | /** | ||
42 | * dma_cookie_complete - complete a descriptor | ||
43 | * @tx: descriptor to complete | ||
44 | * | ||
45 | * Mark this descriptor complete by updating the channels completed | ||
46 | * cookie marker. Zero the descriptors cookie to prevent accidental | ||
47 | * repeated completions. | ||
48 | * | ||
49 | * Note: caller is expected to hold a lock to prevent concurrency. | ||
50 | */ | ||
51 | static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx) | ||
52 | { | ||
53 | BUG_ON(tx->cookie < DMA_MIN_COOKIE); | ||
54 | tx->chan->completed_cookie = tx->cookie; | ||
55 | tx->cookie = 0; | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * dma_cookie_status - report cookie status | ||
60 | * @chan: dma channel | ||
61 | * @cookie: cookie we are interested in | ||
62 | * @state: dma_tx_state structure to return last/used cookies | ||
63 | * | ||
64 | * Report the status of the cookie, filling in the state structure if | ||
65 | * non-NULL. No locking is required. | ||
66 | */ | ||
67 | static inline enum dma_status dma_cookie_status(struct dma_chan *chan, | ||
68 | dma_cookie_t cookie, struct dma_tx_state *state) | ||
69 | { | ||
70 | dma_cookie_t used, complete; | ||
71 | |||
72 | used = chan->cookie; | ||
73 | complete = chan->completed_cookie; | ||
74 | barrier(); | ||
75 | if (state) { | ||
76 | state->last = complete; | ||
77 | state->used = used; | ||
78 | state->residue = 0; | ||
79 | } | ||
80 | return dma_async_is_complete(cookie, complete, used); | ||
81 | } | ||
82 | |||
83 | static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) | ||
84 | { | ||
85 | if (state) | ||
86 | state->residue = residue; | ||
87 | } | ||
88 | |||
89 | #endif | ||
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 9b592b02b5f4..7439079f5eed 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | #include <linux/bitops.h> | ||
12 | #include <linux/clk.h> | 13 | #include <linux/clk.h> |
13 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
14 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
@@ -22,6 +23,7 @@ | |||
22 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
23 | 24 | ||
24 | #include "dw_dmac_regs.h" | 25 | #include "dw_dmac_regs.h" |
26 | #include "dmaengine.h" | ||
25 | 27 | ||
26 | /* | 28 | /* |
27 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | 29 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", |
@@ -33,19 +35,23 @@ | |||
33 | * which does not support descriptor writeback. | 35 | * which does not support descriptor writeback. |
34 | */ | 36 | */ |
35 | 37 | ||
36 | #define DWC_DEFAULT_CTLLO(private) ({ \ | 38 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
37 | struct dw_dma_slave *__slave = (private); \ | 39 | struct dw_dma_slave *__slave = (_chan->private); \ |
38 | int dms = __slave ? __slave->dst_master : 0; \ | 40 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ |
39 | int sms = __slave ? __slave->src_master : 1; \ | 41 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ |
40 | u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \ | 42 | int _dms = __slave ? __slave->dst_master : 0; \ |
41 | u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \ | 43 | int _sms = __slave ? __slave->src_master : 1; \ |
44 | u8 _smsize = __slave ? _sconfig->src_maxburst : \ | ||
45 | DW_DMA_MSIZE_16; \ | ||
46 | u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ | ||
47 | DW_DMA_MSIZE_16; \ | ||
42 | \ | 48 | \ |
43 | (DWC_CTLL_DST_MSIZE(dmsize) \ | 49 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
44 | | DWC_CTLL_SRC_MSIZE(smsize) \ | 50 | | DWC_CTLL_SRC_MSIZE(_smsize) \ |
45 | | DWC_CTLL_LLP_D_EN \ | 51 | | DWC_CTLL_LLP_D_EN \ |
46 | | DWC_CTLL_LLP_S_EN \ | 52 | | DWC_CTLL_LLP_S_EN \ |
47 | | DWC_CTLL_DMS(dms) \ | 53 | | DWC_CTLL_DMS(_dms) \ |
48 | | DWC_CTLL_SMS(sms)); \ | 54 | | DWC_CTLL_SMS(_sms)); \ |
49 | }) | 55 | }) |
50 | 56 | ||
51 | /* | 57 | /* |
@@ -151,21 +157,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
151 | } | 157 | } |
152 | } | 158 | } |
153 | 159 | ||
154 | /* Called with dwc->lock held and bh disabled */ | ||
155 | static dma_cookie_t | ||
156 | dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) | ||
157 | { | ||
158 | dma_cookie_t cookie = dwc->chan.cookie; | ||
159 | |||
160 | if (++cookie < 0) | ||
161 | cookie = 1; | ||
162 | |||
163 | dwc->chan.cookie = cookie; | ||
164 | desc->txd.cookie = cookie; | ||
165 | |||
166 | return cookie; | ||
167 | } | ||
168 | |||
169 | static void dwc_initialize(struct dw_dma_chan *dwc) | 160 | static void dwc_initialize(struct dw_dma_chan *dwc) |
170 | { | 161 | { |
171 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 162 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
@@ -192,7 +183,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
192 | 183 | ||
193 | /* Enable interrupts */ | 184 | /* Enable interrupts */ |
194 | channel_set_bit(dw, MASK.XFER, dwc->mask); | 185 | channel_set_bit(dw, MASK.XFER, dwc->mask); |
195 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | ||
196 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | 186 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
197 | 187 | ||
198 | dwc->initialized = true; | 188 | dwc->initialized = true; |
@@ -245,7 +235,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, | |||
245 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); | 235 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
246 | 236 | ||
247 | spin_lock_irqsave(&dwc->lock, flags); | 237 | spin_lock_irqsave(&dwc->lock, flags); |
248 | dwc->completed = txd->cookie; | 238 | dma_cookie_complete(txd); |
249 | if (callback_required) { | 239 | if (callback_required) { |
250 | callback = txd->callback; | 240 | callback = txd->callback; |
251 | param = txd->callback_param; | 241 | param = txd->callback_param; |
@@ -329,12 +319,6 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
329 | unsigned long flags; | 319 | unsigned long flags; |
330 | 320 | ||
331 | spin_lock_irqsave(&dwc->lock, flags); | 321 | spin_lock_irqsave(&dwc->lock, flags); |
332 | /* | ||
333 | * Clear block interrupt flag before scanning so that we don't | ||
334 | * miss any, and read LLP before RAW_XFER to ensure it is | ||
335 | * valid if we decide to scan the list. | ||
336 | */ | ||
337 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | ||
338 | llp = channel_readl(dwc, LLP); | 322 | llp = channel_readl(dwc, LLP); |
339 | status_xfer = dma_readl(dw, RAW.XFER); | 323 | status_xfer = dma_readl(dw, RAW.XFER); |
340 | 324 | ||
@@ -470,17 +454,16 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr); | |||
470 | 454 | ||
471 | /* called with dwc->lock held and all DMAC interrupts disabled */ | 455 | /* called with dwc->lock held and all DMAC interrupts disabled */ |
472 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | 456 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, |
473 | u32 status_block, u32 status_err, u32 status_xfer) | 457 | u32 status_err, u32 status_xfer) |
474 | { | 458 | { |
475 | unsigned long flags; | 459 | unsigned long flags; |
476 | 460 | ||
477 | if (status_block & dwc->mask) { | 461 | if (dwc->mask) { |
478 | void (*callback)(void *param); | 462 | void (*callback)(void *param); |
479 | void *callback_param; | 463 | void *callback_param; |
480 | 464 | ||
481 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", | 465 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", |
482 | channel_readl(dwc, LLP)); | 466 | channel_readl(dwc, LLP)); |
483 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | ||
484 | 467 | ||
485 | callback = dwc->cdesc->period_callback; | 468 | callback = dwc->cdesc->period_callback; |
486 | callback_param = dwc->cdesc->period_callback_param; | 469 | callback_param = dwc->cdesc->period_callback_param; |
@@ -520,7 +503,6 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
520 | channel_writel(dwc, CTL_LO, 0); | 503 | channel_writel(dwc, CTL_LO, 0); |
521 | channel_writel(dwc, CTL_HI, 0); | 504 | channel_writel(dwc, CTL_HI, 0); |
522 | 505 | ||
523 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | ||
524 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 506 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
525 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 507 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
526 | 508 | ||
@@ -537,36 +519,29 @@ static void dw_dma_tasklet(unsigned long data) | |||
537 | { | 519 | { |
538 | struct dw_dma *dw = (struct dw_dma *)data; | 520 | struct dw_dma *dw = (struct dw_dma *)data; |
539 | struct dw_dma_chan *dwc; | 521 | struct dw_dma_chan *dwc; |
540 | u32 status_block; | ||
541 | u32 status_xfer; | 522 | u32 status_xfer; |
542 | u32 status_err; | 523 | u32 status_err; |
543 | int i; | 524 | int i; |
544 | 525 | ||
545 | status_block = dma_readl(dw, RAW.BLOCK); | ||
546 | status_xfer = dma_readl(dw, RAW.XFER); | 526 | status_xfer = dma_readl(dw, RAW.XFER); |
547 | status_err = dma_readl(dw, RAW.ERROR); | 527 | status_err = dma_readl(dw, RAW.ERROR); |
548 | 528 | ||
549 | dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", | 529 | dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err); |
550 | status_block, status_err); | ||
551 | 530 | ||
552 | for (i = 0; i < dw->dma.chancnt; i++) { | 531 | for (i = 0; i < dw->dma.chancnt; i++) { |
553 | dwc = &dw->chan[i]; | 532 | dwc = &dw->chan[i]; |
554 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) | 533 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
555 | dwc_handle_cyclic(dw, dwc, status_block, status_err, | 534 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); |
556 | status_xfer); | ||
557 | else if (status_err & (1 << i)) | 535 | else if (status_err & (1 << i)) |
558 | dwc_handle_error(dw, dwc); | 536 | dwc_handle_error(dw, dwc); |
559 | else if ((status_block | status_xfer) & (1 << i)) | 537 | else if (status_xfer & (1 << i)) |
560 | dwc_scan_descriptors(dw, dwc); | 538 | dwc_scan_descriptors(dw, dwc); |
561 | } | 539 | } |
562 | 540 | ||
563 | /* | 541 | /* |
564 | * Re-enable interrupts. Block Complete interrupts are only | 542 | * Re-enable interrupts. |
565 | * enabled if the INT_EN bit in the descriptor is set. This | ||
566 | * will trigger a scan before the whole list is done. | ||
567 | */ | 543 | */ |
568 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); | 544 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); |
569 | channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
570 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); | 545 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
571 | } | 546 | } |
572 | 547 | ||
@@ -583,7 +558,6 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |||
583 | * softirq handler. | 558 | * softirq handler. |
584 | */ | 559 | */ |
585 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 560 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
586 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
587 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | 561 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
588 | 562 | ||
589 | status = dma_readl(dw, STATUS_INT); | 563 | status = dma_readl(dw, STATUS_INT); |
@@ -594,7 +568,6 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |||
594 | 568 | ||
595 | /* Try to recover */ | 569 | /* Try to recover */ |
596 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | 570 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); |
597 | channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); | ||
598 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); | 571 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
599 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | 572 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); |
600 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | 573 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); |
@@ -615,7 +588,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
615 | unsigned long flags; | 588 | unsigned long flags; |
616 | 589 | ||
617 | spin_lock_irqsave(&dwc->lock, flags); | 590 | spin_lock_irqsave(&dwc->lock, flags); |
618 | cookie = dwc_assign_cookie(dwc, desc); | 591 | cookie = dma_cookie_assign(tx); |
619 | 592 | ||
620 | /* | 593 | /* |
621 | * REVISIT: We should attempt to chain as many descriptors as | 594 | * REVISIT: We should attempt to chain as many descriptors as |
@@ -674,7 +647,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
674 | else | 647 | else |
675 | src_width = dst_width = 0; | 648 | src_width = dst_width = 0; |
676 | 649 | ||
677 | ctllo = DWC_DEFAULT_CTLLO(chan->private) | 650 | ctllo = DWC_DEFAULT_CTLLO(chan) |
678 | | DWC_CTLL_DST_WIDTH(dst_width) | 651 | | DWC_CTLL_DST_WIDTH(dst_width) |
679 | | DWC_CTLL_SRC_WIDTH(src_width) | 652 | | DWC_CTLL_SRC_WIDTH(src_width) |
680 | | DWC_CTLL_DST_INC | 653 | | DWC_CTLL_DST_INC |
@@ -731,10 +704,11 @@ err_desc_get: | |||
731 | static struct dma_async_tx_descriptor * | 704 | static struct dma_async_tx_descriptor * |
732 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 705 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
733 | unsigned int sg_len, enum dma_transfer_direction direction, | 706 | unsigned int sg_len, enum dma_transfer_direction direction, |
734 | unsigned long flags) | 707 | unsigned long flags, void *context) |
735 | { | 708 | { |
736 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 709 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
737 | struct dw_dma_slave *dws = chan->private; | 710 | struct dw_dma_slave *dws = chan->private; |
711 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; | ||
738 | struct dw_desc *prev; | 712 | struct dw_desc *prev; |
739 | struct dw_desc *first; | 713 | struct dw_desc *first; |
740 | u32 ctllo; | 714 | u32 ctllo; |
@@ -750,25 +724,34 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
750 | if (unlikely(!dws || !sg_len)) | 724 | if (unlikely(!dws || !sg_len)) |
751 | return NULL; | 725 | return NULL; |
752 | 726 | ||
753 | reg_width = dws->reg_width; | ||
754 | prev = first = NULL; | 727 | prev = first = NULL; |
755 | 728 | ||
756 | switch (direction) { | 729 | switch (direction) { |
757 | case DMA_MEM_TO_DEV: | 730 | case DMA_MEM_TO_DEV: |
758 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) | 731 | reg_width = __fls(sconfig->dst_addr_width); |
732 | reg = sconfig->dst_addr; | ||
733 | ctllo = (DWC_DEFAULT_CTLLO(chan) | ||
759 | | DWC_CTLL_DST_WIDTH(reg_width) | 734 | | DWC_CTLL_DST_WIDTH(reg_width) |
760 | | DWC_CTLL_DST_FIX | 735 | | DWC_CTLL_DST_FIX |
761 | | DWC_CTLL_SRC_INC | 736 | | DWC_CTLL_SRC_INC); |
762 | | DWC_CTLL_FC(dws->fc)); | 737 | |
763 | reg = dws->tx_reg; | 738 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
739 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | ||
740 | |||
764 | for_each_sg(sgl, sg, sg_len, i) { | 741 | for_each_sg(sgl, sg, sg_len, i) { |
765 | struct dw_desc *desc; | 742 | struct dw_desc *desc; |
766 | u32 len, dlen, mem; | 743 | u32 len, dlen, mem; |
767 | 744 | ||
768 | mem = sg_phys(sg); | 745 | mem = sg_phys(sg); |
769 | len = sg_dma_len(sg); | 746 | len = sg_dma_len(sg); |
770 | mem_width = 2; | 747 | |
771 | if (unlikely(mem & 3 || len & 3)) | 748 | if (!((mem | len) & 7)) |
749 | mem_width = 3; | ||
750 | else if (!((mem | len) & 3)) | ||
751 | mem_width = 2; | ||
752 | else if (!((mem | len) & 1)) | ||
753 | mem_width = 1; | ||
754 | else | ||
772 | mem_width = 0; | 755 | mem_width = 0; |
773 | 756 | ||
774 | slave_sg_todev_fill_desc: | 757 | slave_sg_todev_fill_desc: |
@@ -812,21 +795,30 @@ slave_sg_todev_fill_desc: | |||
812 | } | 795 | } |
813 | break; | 796 | break; |
814 | case DMA_DEV_TO_MEM: | 797 | case DMA_DEV_TO_MEM: |
815 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) | 798 | reg_width = __fls(sconfig->src_addr_width); |
799 | reg = sconfig->src_addr; | ||
800 | ctllo = (DWC_DEFAULT_CTLLO(chan) | ||
816 | | DWC_CTLL_SRC_WIDTH(reg_width) | 801 | | DWC_CTLL_SRC_WIDTH(reg_width) |
817 | | DWC_CTLL_DST_INC | 802 | | DWC_CTLL_DST_INC |
818 | | DWC_CTLL_SRC_FIX | 803 | | DWC_CTLL_SRC_FIX); |
819 | | DWC_CTLL_FC(dws->fc)); | 804 | |
805 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | ||
806 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | ||
820 | 807 | ||
821 | reg = dws->rx_reg; | ||
822 | for_each_sg(sgl, sg, sg_len, i) { | 808 | for_each_sg(sgl, sg, sg_len, i) { |
823 | struct dw_desc *desc; | 809 | struct dw_desc *desc; |
824 | u32 len, dlen, mem; | 810 | u32 len, dlen, mem; |
825 | 811 | ||
826 | mem = sg_phys(sg); | 812 | mem = sg_phys(sg); |
827 | len = sg_dma_len(sg); | 813 | len = sg_dma_len(sg); |
828 | mem_width = 2; | 814 | |
829 | if (unlikely(mem & 3 || len & 3)) | 815 | if (!((mem | len) & 7)) |
816 | mem_width = 3; | ||
817 | else if (!((mem | len) & 3)) | ||
818 | mem_width = 2; | ||
819 | else if (!((mem | len) & 1)) | ||
820 | mem_width = 1; | ||
821 | else | ||
830 | mem_width = 0; | 822 | mem_width = 0; |
831 | 823 | ||
832 | slave_sg_fromdev_fill_desc: | 824 | slave_sg_fromdev_fill_desc: |
@@ -890,6 +882,39 @@ err_desc_get: | |||
890 | return NULL; | 882 | return NULL; |
891 | } | 883 | } |
892 | 884 | ||
885 | /* | ||
886 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: | ||
887 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | ||
888 | * | ||
889 | * NOTE: burst size 2 is not supported by controller. | ||
890 | * | ||
891 | * This can be done by finding least significant bit set: n & (n - 1) | ||
892 | */ | ||
893 | static inline void convert_burst(u32 *maxburst) | ||
894 | { | ||
895 | if (*maxburst > 1) | ||
896 | *maxburst = fls(*maxburst) - 2; | ||
897 | else | ||
898 | *maxburst = 0; | ||
899 | } | ||
900 | |||
901 | static int | ||
902 | set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | ||
903 | { | ||
904 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
905 | |||
906 | /* Check if it is chan is configured for slave transfers */ | ||
907 | if (!chan->private) | ||
908 | return -EINVAL; | ||
909 | |||
910 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | ||
911 | |||
912 | convert_burst(&dwc->dma_sconfig.src_maxburst); | ||
913 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | ||
914 | |||
915 | return 0; | ||
916 | } | ||
917 | |||
893 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 918 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
894 | unsigned long arg) | 919 | unsigned long arg) |
895 | { | 920 | { |
@@ -939,8 +964,11 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
939 | /* Flush all pending and queued descriptors */ | 964 | /* Flush all pending and queued descriptors */ |
940 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 965 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
941 | dwc_descriptor_complete(dwc, desc, false); | 966 | dwc_descriptor_complete(dwc, desc, false); |
942 | } else | 967 | } else if (cmd == DMA_SLAVE_CONFIG) { |
968 | return set_runtime_config(chan, (struct dma_slave_config *)arg); | ||
969 | } else { | ||
943 | return -ENXIO; | 970 | return -ENXIO; |
971 | } | ||
944 | 972 | ||
945 | return 0; | 973 | return 0; |
946 | } | 974 | } |
@@ -951,28 +979,17 @@ dwc_tx_status(struct dma_chan *chan, | |||
951 | struct dma_tx_state *txstate) | 979 | struct dma_tx_state *txstate) |
952 | { | 980 | { |
953 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 981 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
954 | dma_cookie_t last_used; | 982 | enum dma_status ret; |
955 | dma_cookie_t last_complete; | ||
956 | int ret; | ||
957 | |||
958 | last_complete = dwc->completed; | ||
959 | last_used = chan->cookie; | ||
960 | 983 | ||
961 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 984 | ret = dma_cookie_status(chan, cookie, txstate); |
962 | if (ret != DMA_SUCCESS) { | 985 | if (ret != DMA_SUCCESS) { |
963 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 986 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
964 | 987 | ||
965 | last_complete = dwc->completed; | 988 | ret = dma_cookie_status(chan, cookie, txstate); |
966 | last_used = chan->cookie; | ||
967 | |||
968 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
969 | } | 989 | } |
970 | 990 | ||
971 | if (ret != DMA_SUCCESS) | 991 | if (ret != DMA_SUCCESS) |
972 | dma_set_tx_state(txstate, last_complete, last_used, | 992 | dma_set_residue(txstate, dwc_first_active(dwc)->len); |
973 | dwc_first_active(dwc)->len); | ||
974 | else | ||
975 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
976 | 993 | ||
977 | if (dwc->paused) | 994 | if (dwc->paused) |
978 | return DMA_PAUSED; | 995 | return DMA_PAUSED; |
@@ -1004,7 +1021,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1004 | return -EIO; | 1021 | return -EIO; |
1005 | } | 1022 | } |
1006 | 1023 | ||
1007 | dwc->completed = chan->cookie = 1; | 1024 | dma_cookie_init(chan); |
1008 | 1025 | ||
1009 | /* | 1026 | /* |
1010 | * NOTE: some controllers may have additional features that we | 1027 | * NOTE: some controllers may have additional features that we |
@@ -1068,7 +1085,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1068 | 1085 | ||
1069 | /* Disable interrupts */ | 1086 | /* Disable interrupts */ |
1070 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | 1087 | channel_clear_bit(dw, MASK.XFER, dwc->mask); |
1071 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); | ||
1072 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); | 1088 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
1073 | 1089 | ||
1074 | spin_unlock_irqrestore(&dwc->lock, flags); | 1090 | spin_unlock_irqrestore(&dwc->lock, flags); |
@@ -1120,7 +1136,6 @@ int dw_dma_cyclic_start(struct dma_chan *chan) | |||
1120 | return -EBUSY; | 1136 | return -EBUSY; |
1121 | } | 1137 | } |
1122 | 1138 | ||
1123 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | ||
1124 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 1139 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1125 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 1140 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
1126 | 1141 | ||
@@ -1175,11 +1190,11 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1175 | enum dma_transfer_direction direction) | 1190 | enum dma_transfer_direction direction) |
1176 | { | 1191 | { |
1177 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1192 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1193 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; | ||
1178 | struct dw_cyclic_desc *cdesc; | 1194 | struct dw_cyclic_desc *cdesc; |
1179 | struct dw_cyclic_desc *retval = NULL; | 1195 | struct dw_cyclic_desc *retval = NULL; |
1180 | struct dw_desc *desc; | 1196 | struct dw_desc *desc; |
1181 | struct dw_desc *last = NULL; | 1197 | struct dw_desc *last = NULL; |
1182 | struct dw_dma_slave *dws = chan->private; | ||
1183 | unsigned long was_cyclic; | 1198 | unsigned long was_cyclic; |
1184 | unsigned int reg_width; | 1199 | unsigned int reg_width; |
1185 | unsigned int periods; | 1200 | unsigned int periods; |
@@ -1203,7 +1218,12 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1203 | } | 1218 | } |
1204 | 1219 | ||
1205 | retval = ERR_PTR(-EINVAL); | 1220 | retval = ERR_PTR(-EINVAL); |
1206 | reg_width = dws->reg_width; | 1221 | |
1222 | if (direction == DMA_MEM_TO_DEV) | ||
1223 | reg_width = __ffs(sconfig->dst_addr_width); | ||
1224 | else | ||
1225 | reg_width = __ffs(sconfig->src_addr_width); | ||
1226 | |||
1207 | periods = buf_len / period_len; | 1227 | periods = buf_len / period_len; |
1208 | 1228 | ||
1209 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | 1229 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ |
@@ -1236,26 +1256,34 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1236 | 1256 | ||
1237 | switch (direction) { | 1257 | switch (direction) { |
1238 | case DMA_MEM_TO_DEV: | 1258 | case DMA_MEM_TO_DEV: |
1239 | desc->lli.dar = dws->tx_reg; | 1259 | desc->lli.dar = sconfig->dst_addr; |
1240 | desc->lli.sar = buf_addr + (period_len * i); | 1260 | desc->lli.sar = buf_addr + (period_len * i); |
1241 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) | 1261 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) |
1242 | | DWC_CTLL_DST_WIDTH(reg_width) | 1262 | | DWC_CTLL_DST_WIDTH(reg_width) |
1243 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1263 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1244 | | DWC_CTLL_DST_FIX | 1264 | | DWC_CTLL_DST_FIX |
1245 | | DWC_CTLL_SRC_INC | 1265 | | DWC_CTLL_SRC_INC |
1246 | | DWC_CTLL_FC(dws->fc) | ||
1247 | | DWC_CTLL_INT_EN); | 1266 | | DWC_CTLL_INT_EN); |
1267 | |||
1268 | desc->lli.ctllo |= sconfig->device_fc ? | ||
1269 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | ||
1270 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | ||
1271 | |||
1248 | break; | 1272 | break; |
1249 | case DMA_DEV_TO_MEM: | 1273 | case DMA_DEV_TO_MEM: |
1250 | desc->lli.dar = buf_addr + (period_len * i); | 1274 | desc->lli.dar = buf_addr + (period_len * i); |
1251 | desc->lli.sar = dws->rx_reg; | 1275 | desc->lli.sar = sconfig->src_addr; |
1252 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) | 1276 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) |
1253 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1277 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1254 | | DWC_CTLL_DST_WIDTH(reg_width) | 1278 | | DWC_CTLL_DST_WIDTH(reg_width) |
1255 | | DWC_CTLL_DST_INC | 1279 | | DWC_CTLL_DST_INC |
1256 | | DWC_CTLL_SRC_FIX | 1280 | | DWC_CTLL_SRC_FIX |
1257 | | DWC_CTLL_FC(dws->fc) | ||
1258 | | DWC_CTLL_INT_EN); | 1281 | | DWC_CTLL_INT_EN); |
1282 | |||
1283 | desc->lli.ctllo |= sconfig->device_fc ? | ||
1284 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | ||
1285 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | ||
1286 | |||
1259 | break; | 1287 | break; |
1260 | default: | 1288 | default: |
1261 | break; | 1289 | break; |
@@ -1322,7 +1350,6 @@ void dw_dma_cyclic_free(struct dma_chan *chan) | |||
1322 | while (dma_readl(dw, CH_EN) & dwc->mask) | 1350 | while (dma_readl(dw, CH_EN) & dwc->mask) |
1323 | cpu_relax(); | 1351 | cpu_relax(); |
1324 | 1352 | ||
1325 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | ||
1326 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 1353 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1327 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 1354 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
1328 | 1355 | ||
@@ -1347,7 +1374,6 @@ static void dw_dma_off(struct dw_dma *dw) | |||
1347 | dma_writel(dw, CFG, 0); | 1374 | dma_writel(dw, CFG, 0); |
1348 | 1375 | ||
1349 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 1376 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
1350 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
1351 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | 1377 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1352 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | 1378 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); |
1353 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | 1379 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
@@ -1369,7 +1395,7 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1369 | int err; | 1395 | int err; |
1370 | int i; | 1396 | int i; |
1371 | 1397 | ||
1372 | pdata = pdev->dev.platform_data; | 1398 | pdata = dev_get_platdata(&pdev->dev); |
1373 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) | 1399 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) |
1374 | return -EINVAL; | 1400 | return -EINVAL; |
1375 | 1401 | ||
@@ -1423,7 +1449,7 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1423 | struct dw_dma_chan *dwc = &dw->chan[i]; | 1449 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1424 | 1450 | ||
1425 | dwc->chan.device = &dw->dma; | 1451 | dwc->chan.device = &dw->dma; |
1426 | dwc->chan.cookie = dwc->completed = 1; | 1452 | dma_cookie_init(&dwc->chan); |
1427 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) | 1453 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1428 | list_add_tail(&dwc->chan.device_node, | 1454 | list_add_tail(&dwc->chan.device_node, |
1429 | &dw->dma.channels); | 1455 | &dw->dma.channels); |
@@ -1432,7 +1458,7 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1432 | 1458 | ||
1433 | /* 7 is highest priority & 0 is lowest. */ | 1459 | /* 7 is highest priority & 0 is lowest. */ |
1434 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | 1460 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) |
1435 | dwc->priority = 7 - i; | 1461 | dwc->priority = pdata->nr_channels - i - 1; |
1436 | else | 1462 | else |
1437 | dwc->priority = i; | 1463 | dwc->priority = i; |
1438 | 1464 | ||
@@ -1449,13 +1475,11 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1449 | 1475 | ||
1450 | /* Clear/disable all interrupts on all channels. */ | 1476 | /* Clear/disable all interrupts on all channels. */ |
1451 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); | 1477 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
1452 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); | ||
1453 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); | 1478 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1454 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | 1479 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); |
1455 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | 1480 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); |
1456 | 1481 | ||
1457 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 1482 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
1458 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
1459 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | 1483 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1460 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | 1484 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); |
1461 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | 1485 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
@@ -1562,6 +1586,10 @@ static int dw_resume_noirq(struct device *dev) | |||
1562 | static const struct dev_pm_ops dw_dev_pm_ops = { | 1586 | static const struct dev_pm_ops dw_dev_pm_ops = { |
1563 | .suspend_noirq = dw_suspend_noirq, | 1587 | .suspend_noirq = dw_suspend_noirq, |
1564 | .resume_noirq = dw_resume_noirq, | 1588 | .resume_noirq = dw_resume_noirq, |
1589 | .freeze_noirq = dw_suspend_noirq, | ||
1590 | .thaw_noirq = dw_resume_noirq, | ||
1591 | .restore_noirq = dw_resume_noirq, | ||
1592 | .poweroff_noirq = dw_suspend_noirq, | ||
1565 | }; | 1593 | }; |
1566 | 1594 | ||
1567 | static struct platform_driver dw_driver = { | 1595 | static struct platform_driver dw_driver = { |
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 5eef6946a367..f298f69ecbf9 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -13,6 +13,18 @@ | |||
13 | 13 | ||
14 | #define DW_DMA_MAX_NR_CHANNELS 8 | 14 | #define DW_DMA_MAX_NR_CHANNELS 8 |
15 | 15 | ||
16 | /* flow controller */ | ||
17 | enum dw_dma_fc { | ||
18 | DW_DMA_FC_D_M2M, | ||
19 | DW_DMA_FC_D_M2P, | ||
20 | DW_DMA_FC_D_P2M, | ||
21 | DW_DMA_FC_D_P2P, | ||
22 | DW_DMA_FC_P_P2M, | ||
23 | DW_DMA_FC_SP_P2P, | ||
24 | DW_DMA_FC_P_M2P, | ||
25 | DW_DMA_FC_DP_P2P, | ||
26 | }; | ||
27 | |||
16 | /* | 28 | /* |
17 | * Redefine this macro to handle differences between 32- and 64-bit | 29 | * Redefine this macro to handle differences between 32- and 64-bit |
18 | * addressing, big vs. little endian, etc. | 30 | * addressing, big vs. little endian, etc. |
@@ -146,13 +158,15 @@ struct dw_dma_chan { | |||
146 | 158 | ||
147 | /* these other elements are all protected by lock */ | 159 | /* these other elements are all protected by lock */ |
148 | unsigned long flags; | 160 | unsigned long flags; |
149 | dma_cookie_t completed; | ||
150 | struct list_head active_list; | 161 | struct list_head active_list; |
151 | struct list_head queue; | 162 | struct list_head queue; |
152 | struct list_head free_list; | 163 | struct list_head free_list; |
153 | struct dw_cyclic_desc *cdesc; | 164 | struct dw_cyclic_desc *cdesc; |
154 | 165 | ||
155 | unsigned int descs_allocated; | 166 | unsigned int descs_allocated; |
167 | |||
168 | /* configuration passed via DMA_SLAVE_CONFIG */ | ||
169 | struct dma_slave_config dma_sconfig; | ||
156 | }; | 170 | }; |
157 | 171 | ||
158 | static inline struct dw_dma_chan_regs __iomem * | 172 | static inline struct dw_dma_chan_regs __iomem * |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 59e7a965772b..e6f133b78dc2 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -28,6 +28,8 @@ | |||
28 | 28 | ||
29 | #include <mach/dma.h> | 29 | #include <mach/dma.h> |
30 | 30 | ||
31 | #include "dmaengine.h" | ||
32 | |||
31 | /* M2P registers */ | 33 | /* M2P registers */ |
32 | #define M2P_CONTROL 0x0000 | 34 | #define M2P_CONTROL 0x0000 |
33 | #define M2P_CONTROL_STALLINT BIT(0) | 35 | #define M2P_CONTROL_STALLINT BIT(0) |
@@ -122,7 +124,6 @@ struct ep93xx_dma_desc { | |||
122 | * @lock: lock protecting the fields following | 124 | * @lock: lock protecting the fields following |
123 | * @flags: flags for the channel | 125 | * @flags: flags for the channel |
124 | * @buffer: which buffer to use next (0/1) | 126 | * @buffer: which buffer to use next (0/1) |
125 | * @last_completed: last completed cookie value | ||
126 | * @active: flattened chain of descriptors currently being processed | 127 | * @active: flattened chain of descriptors currently being processed |
127 | * @queue: pending descriptors which are handled next | 128 | * @queue: pending descriptors which are handled next |
128 | * @free_list: list of free descriptors which can be used | 129 | * @free_list: list of free descriptors which can be used |
@@ -157,7 +158,6 @@ struct ep93xx_dma_chan { | |||
157 | #define EP93XX_DMA_IS_CYCLIC 0 | 158 | #define EP93XX_DMA_IS_CYCLIC 0 |
158 | 159 | ||
159 | int buffer; | 160 | int buffer; |
160 | dma_cookie_t last_completed; | ||
161 | struct list_head active; | 161 | struct list_head active; |
162 | struct list_head queue; | 162 | struct list_head queue; |
163 | struct list_head free_list; | 163 | struct list_head free_list; |
@@ -703,7 +703,7 @@ static void ep93xx_dma_tasklet(unsigned long data) | |||
703 | desc = ep93xx_dma_get_active(edmac); | 703 | desc = ep93xx_dma_get_active(edmac); |
704 | if (desc) { | 704 | if (desc) { |
705 | if (desc->complete) { | 705 | if (desc->complete) { |
706 | edmac->last_completed = desc->txd.cookie; | 706 | dma_cookie_complete(&desc->txd); |
707 | list_splice_init(&edmac->active, &list); | 707 | list_splice_init(&edmac->active, &list); |
708 | } | 708 | } |
709 | callback = desc->txd.callback; | 709 | callback = desc->txd.callback; |
@@ -783,17 +783,10 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
783 | unsigned long flags; | 783 | unsigned long flags; |
784 | 784 | ||
785 | spin_lock_irqsave(&edmac->lock, flags); | 785 | spin_lock_irqsave(&edmac->lock, flags); |
786 | 786 | cookie = dma_cookie_assign(tx); | |
787 | cookie = edmac->chan.cookie; | ||
788 | |||
789 | if (++cookie < 0) | ||
790 | cookie = 1; | ||
791 | 787 | ||
792 | desc = container_of(tx, struct ep93xx_dma_desc, txd); | 788 | desc = container_of(tx, struct ep93xx_dma_desc, txd); |
793 | 789 | ||
794 | edmac->chan.cookie = cookie; | ||
795 | desc->txd.cookie = cookie; | ||
796 | |||
797 | /* | 790 | /* |
798 | * If nothing is currently prosessed, we push this descriptor | 791 | * If nothing is currently prosessed, we push this descriptor |
799 | * directly to the hardware. Otherwise we put the descriptor | 792 | * directly to the hardware. Otherwise we put the descriptor |
@@ -861,8 +854,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) | |||
861 | goto fail_clk_disable; | 854 | goto fail_clk_disable; |
862 | 855 | ||
863 | spin_lock_irq(&edmac->lock); | 856 | spin_lock_irq(&edmac->lock); |
864 | edmac->last_completed = 1; | 857 | dma_cookie_init(&edmac->chan); |
865 | edmac->chan.cookie = 1; | ||
866 | ret = edmac->edma->hw_setup(edmac); | 858 | ret = edmac->edma->hw_setup(edmac); |
867 | spin_unlock_irq(&edmac->lock); | 859 | spin_unlock_irq(&edmac->lock); |
868 | 860 | ||
@@ -983,13 +975,14 @@ fail: | |||
983 | * @sg_len: number of entries in @sgl | 975 | * @sg_len: number of entries in @sgl |
984 | * @dir: direction of tha DMA transfer | 976 | * @dir: direction of tha DMA transfer |
985 | * @flags: flags for the descriptor | 977 | * @flags: flags for the descriptor |
978 | * @context: operation context (ignored) | ||
986 | * | 979 | * |
987 | * Returns a valid DMA descriptor or %NULL in case of failure. | 980 | * Returns a valid DMA descriptor or %NULL in case of failure. |
988 | */ | 981 | */ |
989 | static struct dma_async_tx_descriptor * | 982 | static struct dma_async_tx_descriptor * |
990 | ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 983 | ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
991 | unsigned int sg_len, enum dma_transfer_direction dir, | 984 | unsigned int sg_len, enum dma_transfer_direction dir, |
992 | unsigned long flags) | 985 | unsigned long flags, void *context) |
993 | { | 986 | { |
994 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | 987 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
995 | struct ep93xx_dma_desc *desc, *first; | 988 | struct ep93xx_dma_desc *desc, *first; |
@@ -1056,6 +1049,7 @@ fail: | |||
1056 | * @buf_len: length of the buffer (in bytes) | 1049 | * @buf_len: length of the buffer (in bytes) |
1057 | * @period_len: lenght of a single period | 1050 | * @period_len: lenght of a single period |
1058 | * @dir: direction of the operation | 1051 | * @dir: direction of the operation |
1052 | * @context: operation context (ignored) | ||
1059 | * | 1053 | * |
1060 | * Prepares a descriptor for cyclic DMA operation. This means that once the | 1054 | * Prepares a descriptor for cyclic DMA operation. This means that once the |
1061 | * descriptor is submitted, we will be submitting in a @period_len sized | 1055 | * descriptor is submitted, we will be submitting in a @period_len sized |
@@ -1068,7 +1062,7 @@ fail: | |||
1068 | static struct dma_async_tx_descriptor * | 1062 | static struct dma_async_tx_descriptor * |
1069 | ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | 1063 | ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
1070 | size_t buf_len, size_t period_len, | 1064 | size_t buf_len, size_t period_len, |
1071 | enum dma_transfer_direction dir) | 1065 | enum dma_transfer_direction dir, void *context) |
1072 | { | 1066 | { |
1073 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | 1067 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1074 | struct ep93xx_dma_desc *desc, *first; | 1068 | struct ep93xx_dma_desc *desc, *first; |
@@ -1248,18 +1242,13 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan, | |||
1248 | struct dma_tx_state *state) | 1242 | struct dma_tx_state *state) |
1249 | { | 1243 | { |
1250 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | 1244 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1251 | dma_cookie_t last_used, last_completed; | ||
1252 | enum dma_status ret; | 1245 | enum dma_status ret; |
1253 | unsigned long flags; | 1246 | unsigned long flags; |
1254 | 1247 | ||
1255 | spin_lock_irqsave(&edmac->lock, flags); | 1248 | spin_lock_irqsave(&edmac->lock, flags); |
1256 | last_used = chan->cookie; | 1249 | ret = dma_cookie_status(chan, cookie, state); |
1257 | last_completed = edmac->last_completed; | ||
1258 | spin_unlock_irqrestore(&edmac->lock, flags); | 1250 | spin_unlock_irqrestore(&edmac->lock, flags); |
1259 | 1251 | ||
1260 | ret = dma_async_is_complete(cookie, last_completed, last_used); | ||
1261 | dma_set_tx_state(state, last_completed, last_used, 0); | ||
1262 | |||
1263 | return ret; | 1252 | return ret; |
1264 | } | 1253 | } |
1265 | 1254 | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index b98070c33ca9..8f84761f98ba 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/dmapool.h> | 35 | #include <linux/dmapool.h> |
36 | #include <linux/of_platform.h> | 36 | #include <linux/of_platform.h> |
37 | 37 | ||
38 | #include "dmaengine.h" | ||
38 | #include "fsldma.h" | 39 | #include "fsldma.h" |
39 | 40 | ||
40 | #define chan_dbg(chan, fmt, arg...) \ | 41 | #define chan_dbg(chan, fmt, arg...) \ |
@@ -413,17 +414,10 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
413 | * assign cookies to all of the software descriptors | 414 | * assign cookies to all of the software descriptors |
414 | * that make up this transaction | 415 | * that make up this transaction |
415 | */ | 416 | */ |
416 | cookie = chan->common.cookie; | ||
417 | list_for_each_entry(child, &desc->tx_list, node) { | 417 | list_for_each_entry(child, &desc->tx_list, node) { |
418 | cookie++; | 418 | cookie = dma_cookie_assign(&child->async_tx); |
419 | if (cookie < DMA_MIN_COOKIE) | ||
420 | cookie = DMA_MIN_COOKIE; | ||
421 | |||
422 | child->async_tx.cookie = cookie; | ||
423 | } | 419 | } |
424 | 420 | ||
425 | chan->common.cookie = cookie; | ||
426 | |||
427 | /* put this transaction onto the tail of the pending queue */ | 421 | /* put this transaction onto the tail of the pending queue */ |
428 | append_ld_queue(chan, desc); | 422 | append_ld_queue(chan, desc); |
429 | 423 | ||
@@ -765,6 +759,7 @@ fail: | |||
765 | * @sg_len: number of entries in @scatterlist | 759 | * @sg_len: number of entries in @scatterlist |
766 | * @direction: DMA direction | 760 | * @direction: DMA direction |
767 | * @flags: DMAEngine flags | 761 | * @flags: DMAEngine flags |
762 | * @context: transaction context (ignored) | ||
768 | * | 763 | * |
769 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | 764 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the |
770 | * DMA_SLAVE API, this gets the device-specific information from the | 765 | * DMA_SLAVE API, this gets the device-specific information from the |
@@ -772,7 +767,8 @@ fail: | |||
772 | */ | 767 | */ |
773 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | 768 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( |
774 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | 769 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, |
775 | enum dma_transfer_direction direction, unsigned long flags) | 770 | enum dma_transfer_direction direction, unsigned long flags, |
771 | void *context) | ||
776 | { | 772 | { |
777 | /* | 773 | /* |
778 | * This operation is not supported on the Freescale DMA controller | 774 | * This operation is not supported on the Freescale DMA controller |
@@ -984,19 +980,14 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, | |||
984 | struct dma_tx_state *txstate) | 980 | struct dma_tx_state *txstate) |
985 | { | 981 | { |
986 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 982 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
987 | dma_cookie_t last_complete; | 983 | enum dma_status ret; |
988 | dma_cookie_t last_used; | ||
989 | unsigned long flags; | 984 | unsigned long flags; |
990 | 985 | ||
991 | spin_lock_irqsave(&chan->desc_lock, flags); | 986 | spin_lock_irqsave(&chan->desc_lock, flags); |
992 | 987 | ret = dma_cookie_status(dchan, cookie, txstate); | |
993 | last_complete = chan->completed_cookie; | ||
994 | last_used = dchan->cookie; | ||
995 | |||
996 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 988 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
997 | 989 | ||
998 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 990 | return ret; |
999 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
1000 | } | 991 | } |
1001 | 992 | ||
1002 | /*----------------------------------------------------------------------------*/ | 993 | /*----------------------------------------------------------------------------*/ |
@@ -1087,8 +1078,8 @@ static void dma_do_tasklet(unsigned long data) | |||
1087 | 1078 | ||
1088 | desc = to_fsl_desc(chan->ld_running.prev); | 1079 | desc = to_fsl_desc(chan->ld_running.prev); |
1089 | cookie = desc->async_tx.cookie; | 1080 | cookie = desc->async_tx.cookie; |
1081 | dma_cookie_complete(&desc->async_tx); | ||
1090 | 1082 | ||
1091 | chan->completed_cookie = cookie; | ||
1092 | chan_dbg(chan, "completed_cookie=%d\n", cookie); | 1083 | chan_dbg(chan, "completed_cookie=%d\n", cookie); |
1093 | } | 1084 | } |
1094 | 1085 | ||
@@ -1303,6 +1294,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, | |||
1303 | chan->idle = true; | 1294 | chan->idle = true; |
1304 | 1295 | ||
1305 | chan->common.device = &fdev->common; | 1296 | chan->common.device = &fdev->common; |
1297 | dma_cookie_init(&chan->common); | ||
1306 | 1298 | ||
1307 | /* find the IRQ line, if it exists in the device tree */ | 1299 | /* find the IRQ line, if it exists in the device tree */ |
1308 | chan->irq = irq_of_parse_and_map(node, 0); | 1300 | chan->irq = irq_of_parse_and_map(node, 0); |
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 9cb5aa57c677..f5c38791fc74 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
@@ -137,7 +137,6 @@ struct fsldma_device { | |||
137 | struct fsldma_chan { | 137 | struct fsldma_chan { |
138 | char name[8]; /* Channel name */ | 138 | char name[8]; /* Channel name */ |
139 | struct fsldma_chan_regs __iomem *regs; | 139 | struct fsldma_chan_regs __iomem *regs; |
140 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | ||
141 | spinlock_t desc_lock; /* Descriptor operation lock */ | 140 | spinlock_t desc_lock; /* Descriptor operation lock */ |
142 | struct list_head ld_pending; /* Link descriptors queue */ | 141 | struct list_head ld_pending; /* Link descriptors queue */ |
143 | struct list_head ld_running; /* Link descriptors queue */ | 142 | struct list_head ld_running; /* Link descriptors queue */ |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 38586ba8da91..a45b5d2a5987 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * found on i.MX1/21/27 | 5 | * found on i.MX1/21/27 |
6 | * | 6 | * |
7 | * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> | 7 | * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> |
8 | * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> | ||
8 | * | 9 | * |
9 | * The code contained herein is licensed under the GNU General Public | 10 | * The code contained herein is licensed under the GNU General Public |
10 | * License. You may obtain a copy of the GNU General Public License | 11 | * License. You may obtain a copy of the GNU General Public License |
@@ -22,37 +23,159 @@ | |||
22 | #include <linux/dma-mapping.h> | 23 | #include <linux/dma-mapping.h> |
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/clk.h> | ||
25 | #include <linux/dmaengine.h> | 27 | #include <linux/dmaengine.h> |
26 | #include <linux/module.h> | 28 | #include <linux/module.h> |
27 | 29 | ||
28 | #include <asm/irq.h> | 30 | #include <asm/irq.h> |
29 | #include <mach/dma-v1.h> | 31 | #include <mach/dma.h> |
30 | #include <mach/hardware.h> | 32 | #include <mach/hardware.h> |
31 | 33 | ||
34 | #include "dmaengine.h" | ||
35 | #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 | ||
36 | #define IMX_DMA_CHANNELS 16 | ||
37 | |||
38 | #define IMX_DMA_2D_SLOTS 2 | ||
39 | #define IMX_DMA_2D_SLOT_A 0 | ||
40 | #define IMX_DMA_2D_SLOT_B 1 | ||
41 | |||
42 | #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) | ||
43 | #define IMX_DMA_MEMSIZE_32 (0 << 4) | ||
44 | #define IMX_DMA_MEMSIZE_8 (1 << 4) | ||
45 | #define IMX_DMA_MEMSIZE_16 (2 << 4) | ||
46 | #define IMX_DMA_TYPE_LINEAR (0 << 10) | ||
47 | #define IMX_DMA_TYPE_2D (1 << 10) | ||
48 | #define IMX_DMA_TYPE_FIFO (2 << 10) | ||
49 | |||
50 | #define IMX_DMA_ERR_BURST (1 << 0) | ||
51 | #define IMX_DMA_ERR_REQUEST (1 << 1) | ||
52 | #define IMX_DMA_ERR_TRANSFER (1 << 2) | ||
53 | #define IMX_DMA_ERR_BUFFER (1 << 3) | ||
54 | #define IMX_DMA_ERR_TIMEOUT (1 << 4) | ||
55 | |||
56 | #define DMA_DCR 0x00 /* Control Register */ | ||
57 | #define DMA_DISR 0x04 /* Interrupt status Register */ | ||
58 | #define DMA_DIMR 0x08 /* Interrupt mask Register */ | ||
59 | #define DMA_DBTOSR 0x0c /* Burst timeout status Register */ | ||
60 | #define DMA_DRTOSR 0x10 /* Request timeout Register */ | ||
61 | #define DMA_DSESR 0x14 /* Transfer Error Status Register */ | ||
62 | #define DMA_DBOSR 0x18 /* Buffer overflow status Register */ | ||
63 | #define DMA_DBTOCR 0x1c /* Burst timeout control Register */ | ||
64 | #define DMA_WSRA 0x40 /* W-Size Register A */ | ||
65 | #define DMA_XSRA 0x44 /* X-Size Register A */ | ||
66 | #define DMA_YSRA 0x48 /* Y-Size Register A */ | ||
67 | #define DMA_WSRB 0x4c /* W-Size Register B */ | ||
68 | #define DMA_XSRB 0x50 /* X-Size Register B */ | ||
69 | #define DMA_YSRB 0x54 /* Y-Size Register B */ | ||
70 | #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ | ||
71 | #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ | ||
72 | #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ | ||
73 | #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ | ||
74 | #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */ | ||
75 | #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ | ||
76 | #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ | ||
77 | #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ | ||
78 | #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ | ||
79 | |||
80 | #define DCR_DRST (1<<1) | ||
81 | #define DCR_DEN (1<<0) | ||
82 | #define DBTOCR_EN (1<<15) | ||
83 | #define DBTOCR_CNT(x) ((x) & 0x7fff) | ||
84 | #define CNTR_CNT(x) ((x) & 0xffffff) | ||
85 | #define CCR_ACRPT (1<<14) | ||
86 | #define CCR_DMOD_LINEAR (0x0 << 12) | ||
87 | #define CCR_DMOD_2D (0x1 << 12) | ||
88 | #define CCR_DMOD_FIFO (0x2 << 12) | ||
89 | #define CCR_DMOD_EOBFIFO (0x3 << 12) | ||
90 | #define CCR_SMOD_LINEAR (0x0 << 10) | ||
91 | #define CCR_SMOD_2D (0x1 << 10) | ||
92 | #define CCR_SMOD_FIFO (0x2 << 10) | ||
93 | #define CCR_SMOD_EOBFIFO (0x3 << 10) | ||
94 | #define CCR_MDIR_DEC (1<<9) | ||
95 | #define CCR_MSEL_B (1<<8) | ||
96 | #define CCR_DSIZ_32 (0x0 << 6) | ||
97 | #define CCR_DSIZ_8 (0x1 << 6) | ||
98 | #define CCR_DSIZ_16 (0x2 << 6) | ||
99 | #define CCR_SSIZ_32 (0x0 << 4) | ||
100 | #define CCR_SSIZ_8 (0x1 << 4) | ||
101 | #define CCR_SSIZ_16 (0x2 << 4) | ||
102 | #define CCR_REN (1<<3) | ||
103 | #define CCR_RPT (1<<2) | ||
104 | #define CCR_FRC (1<<1) | ||
105 | #define CCR_CEN (1<<0) | ||
106 | #define RTOR_EN (1<<15) | ||
107 | #define RTOR_CLK (1<<14) | ||
108 | #define RTOR_PSC (1<<13) | ||
109 | |||
110 | enum imxdma_prep_type { | ||
111 | IMXDMA_DESC_MEMCPY, | ||
112 | IMXDMA_DESC_INTERLEAVED, | ||
113 | IMXDMA_DESC_SLAVE_SG, | ||
114 | IMXDMA_DESC_CYCLIC, | ||
115 | }; | ||
116 | |||
117 | struct imx_dma_2d_config { | ||
118 | u16 xsr; | ||
119 | u16 ysr; | ||
120 | u16 wsr; | ||
121 | int count; | ||
122 | }; | ||
123 | |||
124 | struct imxdma_desc { | ||
125 | struct list_head node; | ||
126 | struct dma_async_tx_descriptor desc; | ||
127 | enum dma_status status; | ||
128 | dma_addr_t src; | ||
129 | dma_addr_t dest; | ||
130 | size_t len; | ||
131 | enum dma_transfer_direction direction; | ||
132 | enum imxdma_prep_type type; | ||
133 | /* For memcpy and interleaved */ | ||
134 | unsigned int config_port; | ||
135 | unsigned int config_mem; | ||
136 | /* For interleaved transfers */ | ||
137 | unsigned int x; | ||
138 | unsigned int y; | ||
139 | unsigned int w; | ||
140 | /* For slave sg and cyclic */ | ||
141 | struct scatterlist *sg; | ||
142 | unsigned int sgcount; | ||
143 | }; | ||
144 | |||
32 | struct imxdma_channel { | 145 | struct imxdma_channel { |
146 | int hw_chaining; | ||
147 | struct timer_list watchdog; | ||
33 | struct imxdma_engine *imxdma; | 148 | struct imxdma_engine *imxdma; |
34 | unsigned int channel; | 149 | unsigned int channel; |
35 | unsigned int imxdma_channel; | ||
36 | 150 | ||
151 | struct tasklet_struct dma_tasklet; | ||
152 | struct list_head ld_free; | ||
153 | struct list_head ld_queue; | ||
154 | struct list_head ld_active; | ||
155 | int descs_allocated; | ||
37 | enum dma_slave_buswidth word_size; | 156 | enum dma_slave_buswidth word_size; |
38 | dma_addr_t per_address; | 157 | dma_addr_t per_address; |
39 | u32 watermark_level; | 158 | u32 watermark_level; |
40 | struct dma_chan chan; | 159 | struct dma_chan chan; |
41 | spinlock_t lock; | ||
42 | struct dma_async_tx_descriptor desc; | 160 | struct dma_async_tx_descriptor desc; |
43 | dma_cookie_t last_completed; | ||
44 | enum dma_status status; | 161 | enum dma_status status; |
45 | int dma_request; | 162 | int dma_request; |
46 | struct scatterlist *sg_list; | 163 | struct scatterlist *sg_list; |
164 | u32 ccr_from_device; | ||
165 | u32 ccr_to_device; | ||
166 | bool enabled_2d; | ||
167 | int slot_2d; | ||
47 | }; | 168 | }; |
48 | 169 | ||
49 | #define MAX_DMA_CHANNELS 8 | ||
50 | |||
51 | struct imxdma_engine { | 170 | struct imxdma_engine { |
52 | struct device *dev; | 171 | struct device *dev; |
53 | struct device_dma_parameters dma_parms; | 172 | struct device_dma_parameters dma_parms; |
54 | struct dma_device dma_device; | 173 | struct dma_device dma_device; |
55 | struct imxdma_channel channel[MAX_DMA_CHANNELS]; | 174 | void __iomem *base; |
175 | struct clk *dma_clk; | ||
176 | spinlock_t lock; | ||
177 | struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; | ||
178 | struct imxdma_channel channel[IMX_DMA_CHANNELS]; | ||
56 | }; | 179 | }; |
57 | 180 | ||
58 | static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) | 181 | static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) |
@@ -60,36 +183,418 @@ static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) | |||
60 | return container_of(chan, struct imxdma_channel, chan); | 183 | return container_of(chan, struct imxdma_channel, chan); |
61 | } | 184 | } |
62 | 185 | ||
63 | static void imxdma_handle(struct imxdma_channel *imxdmac) | 186 | static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) |
187 | { | ||
188 | struct imxdma_desc *desc; | ||
189 | |||
190 | if (!list_empty(&imxdmac->ld_active)) { | ||
191 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, | ||
192 | node); | ||
193 | if (desc->type == IMXDMA_DESC_CYCLIC) | ||
194 | return true; | ||
195 | } | ||
196 | return false; | ||
197 | } | ||
198 | |||
199 | |||
200 | |||
201 | static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val, | ||
202 | unsigned offset) | ||
203 | { | ||
204 | __raw_writel(val, imxdma->base + offset); | ||
205 | } | ||
206 | |||
207 | static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset) | ||
208 | { | ||
209 | return __raw_readl(imxdma->base + offset); | ||
210 | } | ||
211 | |||
212 | static int imxdma_hw_chain(struct imxdma_channel *imxdmac) | ||
213 | { | ||
214 | if (cpu_is_mx27()) | ||
215 | return imxdmac->hw_chaining; | ||
216 | else | ||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation | ||
222 | */ | ||
223 | static inline int imxdma_sg_next(struct imxdma_desc *d) | ||
64 | { | 224 | { |
65 | if (imxdmac->desc.callback) | 225 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
66 | imxdmac->desc.callback(imxdmac->desc.callback_param); | 226 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
67 | imxdmac->last_completed = imxdmac->desc.cookie; | 227 | struct scatterlist *sg = d->sg; |
228 | unsigned long now; | ||
229 | |||
230 | now = min(d->len, sg->length); | ||
231 | if (d->len != IMX_DMA_LENGTH_LOOP) | ||
232 | d->len -= now; | ||
233 | |||
234 | if (d->direction == DMA_DEV_TO_MEM) | ||
235 | imx_dmav1_writel(imxdma, sg->dma_address, | ||
236 | DMA_DAR(imxdmac->channel)); | ||
237 | else | ||
238 | imx_dmav1_writel(imxdma, sg->dma_address, | ||
239 | DMA_SAR(imxdmac->channel)); | ||
240 | |||
241 | imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel)); | ||
242 | |||
243 | dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, " | ||
244 | "size 0x%08x\n", __func__, imxdmac->channel, | ||
245 | imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), | ||
246 | imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), | ||
247 | imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); | ||
248 | |||
249 | return now; | ||
68 | } | 250 | } |
69 | 251 | ||
70 | static void imxdma_irq_handler(int channel, void *data) | 252 | static void imxdma_enable_hw(struct imxdma_desc *d) |
71 | { | 253 | { |
72 | struct imxdma_channel *imxdmac = data; | 254 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
255 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
256 | int channel = imxdmac->channel; | ||
257 | unsigned long flags; | ||
258 | |||
259 | dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); | ||
260 | |||
261 | local_irq_save(flags); | ||
262 | |||
263 | imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); | ||
264 | imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) & | ||
265 | ~(1 << channel), DMA_DIMR); | ||
266 | imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) | | ||
267 | CCR_CEN | CCR_ACRPT, DMA_CCR(channel)); | ||
268 | |||
269 | if ((cpu_is_mx21() || cpu_is_mx27()) && | ||
270 | d->sg && imxdma_hw_chain(imxdmac)) { | ||
271 | d->sg = sg_next(d->sg); | ||
272 | if (d->sg) { | ||
273 | u32 tmp; | ||
274 | imxdma_sg_next(d); | ||
275 | tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel)); | ||
276 | imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT, | ||
277 | DMA_CCR(channel)); | ||
278 | } | ||
279 | } | ||
280 | |||
281 | local_irq_restore(flags); | ||
282 | } | ||
283 | |||
284 | static void imxdma_disable_hw(struct imxdma_channel *imxdmac) | ||
285 | { | ||
286 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
287 | int channel = imxdmac->channel; | ||
288 | unsigned long flags; | ||
289 | |||
290 | dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); | ||
291 | |||
292 | if (imxdma_hw_chain(imxdmac)) | ||
293 | del_timer(&imxdmac->watchdog); | ||
294 | |||
295 | local_irq_save(flags); | ||
296 | imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) | | ||
297 | (1 << channel), DMA_DIMR); | ||
298 | imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) & | ||
299 | ~CCR_CEN, DMA_CCR(channel)); | ||
300 | imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); | ||
301 | local_irq_restore(flags); | ||
302 | } | ||
303 | |||
304 | static void imxdma_watchdog(unsigned long data) | ||
305 | { | ||
306 | struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; | ||
307 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
308 | int channel = imxdmac->channel; | ||
309 | |||
310 | imx_dmav1_writel(imxdma, 0, DMA_CCR(channel)); | ||
73 | 311 | ||
74 | imxdmac->status = DMA_SUCCESS; | 312 | /* Tasklet watchdog error handler */ |
75 | imxdma_handle(imxdmac); | 313 | tasklet_schedule(&imxdmac->dma_tasklet); |
314 | dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n", | ||
315 | imxdmac->channel); | ||
76 | } | 316 | } |
77 | 317 | ||
78 | static void imxdma_err_handler(int channel, void *data, int error) | 318 | static irqreturn_t imxdma_err_handler(int irq, void *dev_id) |
79 | { | 319 | { |
80 | struct imxdma_channel *imxdmac = data; | 320 | struct imxdma_engine *imxdma = dev_id; |
321 | unsigned int err_mask; | ||
322 | int i, disr; | ||
323 | int errcode; | ||
324 | |||
325 | disr = imx_dmav1_readl(imxdma, DMA_DISR); | ||
326 | |||
327 | err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) | | ||
328 | imx_dmav1_readl(imxdma, DMA_DRTOSR) | | ||
329 | imx_dmav1_readl(imxdma, DMA_DSESR) | | ||
330 | imx_dmav1_readl(imxdma, DMA_DBOSR); | ||
331 | |||
332 | if (!err_mask) | ||
333 | return IRQ_HANDLED; | ||
334 | |||
335 | imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR); | ||
336 | |||
337 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | ||
338 | if (!(err_mask & (1 << i))) | ||
339 | continue; | ||
340 | errcode = 0; | ||
341 | |||
342 | if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) { | ||
343 | imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR); | ||
344 | errcode |= IMX_DMA_ERR_BURST; | ||
345 | } | ||
346 | if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) { | ||
347 | imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR); | ||
348 | errcode |= IMX_DMA_ERR_REQUEST; | ||
349 | } | ||
350 | if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) { | ||
351 | imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR); | ||
352 | errcode |= IMX_DMA_ERR_TRANSFER; | ||
353 | } | ||
354 | if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) { | ||
355 | imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR); | ||
356 | errcode |= IMX_DMA_ERR_BUFFER; | ||
357 | } | ||
358 | /* Tasklet error handler */ | ||
359 | tasklet_schedule(&imxdma->channel[i].dma_tasklet); | ||
360 | |||
361 | printk(KERN_WARNING | ||
362 | "DMA timeout on channel %d -%s%s%s%s\n", i, | ||
363 | errcode & IMX_DMA_ERR_BURST ? " burst" : "", | ||
364 | errcode & IMX_DMA_ERR_REQUEST ? " request" : "", | ||
365 | errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", | ||
366 | errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); | ||
367 | } | ||
368 | return IRQ_HANDLED; | ||
369 | } | ||
370 | |||
371 | static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) | ||
372 | { | ||
373 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
374 | int chno = imxdmac->channel; | ||
375 | struct imxdma_desc *desc; | ||
376 | |||
377 | spin_lock(&imxdma->lock); | ||
378 | if (list_empty(&imxdmac->ld_active)) { | ||
379 | spin_unlock(&imxdma->lock); | ||
380 | goto out; | ||
381 | } | ||
382 | |||
383 | desc = list_first_entry(&imxdmac->ld_active, | ||
384 | struct imxdma_desc, | ||
385 | node); | ||
386 | spin_unlock(&imxdma->lock); | ||
387 | |||
388 | if (desc->sg) { | ||
389 | u32 tmp; | ||
390 | desc->sg = sg_next(desc->sg); | ||
391 | |||
392 | if (desc->sg) { | ||
393 | imxdma_sg_next(desc); | ||
394 | |||
395 | tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno)); | ||
396 | |||
397 | if (imxdma_hw_chain(imxdmac)) { | ||
398 | /* FIXME: The timeout should probably be | ||
399 | * configurable | ||
400 | */ | ||
401 | mod_timer(&imxdmac->watchdog, | ||
402 | jiffies + msecs_to_jiffies(500)); | ||
403 | |||
404 | tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; | ||
405 | imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); | ||
406 | } else { | ||
407 | imx_dmav1_writel(imxdma, tmp & ~CCR_CEN, | ||
408 | DMA_CCR(chno)); | ||
409 | tmp |= CCR_CEN; | ||
410 | } | ||
411 | |||
412 | imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); | ||
413 | |||
414 | if (imxdma_chan_is_doing_cyclic(imxdmac)) | ||
415 | /* Tasklet progression */ | ||
416 | tasklet_schedule(&imxdmac->dma_tasklet); | ||
417 | |||
418 | return; | ||
419 | } | ||
420 | |||
421 | if (imxdma_hw_chain(imxdmac)) { | ||
422 | del_timer(&imxdmac->watchdog); | ||
423 | return; | ||
424 | } | ||
425 | } | ||
426 | |||
427 | out: | ||
428 | imx_dmav1_writel(imxdma, 0, DMA_CCR(chno)); | ||
429 | /* Tasklet irq */ | ||
430 | tasklet_schedule(&imxdmac->dma_tasklet); | ||
431 | } | ||
432 | |||
433 | static irqreturn_t dma_irq_handler(int irq, void *dev_id) | ||
434 | { | ||
435 | struct imxdma_engine *imxdma = dev_id; | ||
436 | int i, disr; | ||
437 | |||
438 | if (cpu_is_mx21() || cpu_is_mx27()) | ||
439 | imxdma_err_handler(irq, dev_id); | ||
440 | |||
441 | disr = imx_dmav1_readl(imxdma, DMA_DISR); | ||
442 | |||
443 | dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr); | ||
444 | |||
445 | imx_dmav1_writel(imxdma, disr, DMA_DISR); | ||
446 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | ||
447 | if (disr & (1 << i)) | ||
448 | dma_irq_handle_channel(&imxdma->channel[i]); | ||
449 | } | ||
450 | |||
451 | return IRQ_HANDLED; | ||
452 | } | ||
453 | |||
454 | static int imxdma_xfer_desc(struct imxdma_desc *d) | ||
455 | { | ||
456 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | ||
457 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
458 | unsigned long flags; | ||
459 | int slot = -1; | ||
460 | int i; | ||
461 | |||
462 | /* Configure and enable */ | ||
463 | switch (d->type) { | ||
464 | case IMXDMA_DESC_INTERLEAVED: | ||
465 | /* Try to get a free 2D slot */ | ||
466 | spin_lock_irqsave(&imxdma->lock, flags); | ||
467 | for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { | ||
468 | if ((imxdma->slots_2d[i].count > 0) && | ||
469 | ((imxdma->slots_2d[i].xsr != d->x) || | ||
470 | (imxdma->slots_2d[i].ysr != d->y) || | ||
471 | (imxdma->slots_2d[i].wsr != d->w))) | ||
472 | continue; | ||
473 | slot = i; | ||
474 | break; | ||
475 | } | ||
476 | if (slot < 0) | ||
477 | return -EBUSY; | ||
478 | |||
479 | imxdma->slots_2d[slot].xsr = d->x; | ||
480 | imxdma->slots_2d[slot].ysr = d->y; | ||
481 | imxdma->slots_2d[slot].wsr = d->w; | ||
482 | imxdma->slots_2d[slot].count++; | ||
483 | |||
484 | imxdmac->slot_2d = slot; | ||
485 | imxdmac->enabled_2d = true; | ||
486 | spin_unlock_irqrestore(&imxdma->lock, flags); | ||
487 | |||
488 | if (slot == IMX_DMA_2D_SLOT_A) { | ||
489 | d->config_mem &= ~CCR_MSEL_B; | ||
490 | d->config_port &= ~CCR_MSEL_B; | ||
491 | imx_dmav1_writel(imxdma, d->x, DMA_XSRA); | ||
492 | imx_dmav1_writel(imxdma, d->y, DMA_YSRA); | ||
493 | imx_dmav1_writel(imxdma, d->w, DMA_WSRA); | ||
494 | } else { | ||
495 | d->config_mem |= CCR_MSEL_B; | ||
496 | d->config_port |= CCR_MSEL_B; | ||
497 | imx_dmav1_writel(imxdma, d->x, DMA_XSRB); | ||
498 | imx_dmav1_writel(imxdma, d->y, DMA_YSRB); | ||
499 | imx_dmav1_writel(imxdma, d->w, DMA_WSRB); | ||
500 | } | ||
501 | /* | ||
502 | * We fall-through here intentionally, since a 2D transfer is | ||
503 | * similar to MEMCPY just adding the 2D slot configuration. | ||
504 | */ | ||
505 | case IMXDMA_DESC_MEMCPY: | ||
506 | imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel)); | ||
507 | imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel)); | ||
508 | imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2), | ||
509 | DMA_CCR(imxdmac->channel)); | ||
510 | |||
511 | imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); | ||
512 | |||
513 | dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " | ||
514 | "dma_length=%d\n", __func__, imxdmac->channel, | ||
515 | d->dest, d->src, d->len); | ||
516 | |||
517 | break; | ||
518 | /* Cyclic transfer is the same as slave_sg with special sg configuration. */ | ||
519 | case IMXDMA_DESC_CYCLIC: | ||
520 | case IMXDMA_DESC_SLAVE_SG: | ||
521 | if (d->direction == DMA_DEV_TO_MEM) { | ||
522 | imx_dmav1_writel(imxdma, imxdmac->per_address, | ||
523 | DMA_SAR(imxdmac->channel)); | ||
524 | imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, | ||
525 | DMA_CCR(imxdmac->channel)); | ||
526 | |||
527 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | ||
528 | "total length=%d dev_addr=0x%08x (dev2mem)\n", | ||
529 | __func__, imxdmac->channel, d->sg, d->sgcount, | ||
530 | d->len, imxdmac->per_address); | ||
531 | } else if (d->direction == DMA_MEM_TO_DEV) { | ||
532 | imx_dmav1_writel(imxdma, imxdmac->per_address, | ||
533 | DMA_DAR(imxdmac->channel)); | ||
534 | imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, | ||
535 | DMA_CCR(imxdmac->channel)); | ||
536 | |||
537 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | ||
538 | "total length=%d dev_addr=0x%08x (mem2dev)\n", | ||
539 | __func__, imxdmac->channel, d->sg, d->sgcount, | ||
540 | d->len, imxdmac->per_address); | ||
541 | } else { | ||
542 | dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", | ||
543 | __func__, imxdmac->channel); | ||
544 | return -EINVAL; | ||
545 | } | ||
546 | |||
547 | imxdma_sg_next(d); | ||
81 | 548 | ||
82 | imxdmac->status = DMA_ERROR; | 549 | break; |
83 | imxdma_handle(imxdmac); | 550 | default: |
551 | return -EINVAL; | ||
552 | } | ||
553 | imxdma_enable_hw(d); | ||
554 | return 0; | ||
84 | } | 555 | } |
85 | 556 | ||
86 | static void imxdma_progression(int channel, void *data, | 557 | static void imxdma_tasklet(unsigned long data) |
87 | struct scatterlist *sg) | ||
88 | { | 558 | { |
89 | struct imxdma_channel *imxdmac = data; | 559 | struct imxdma_channel *imxdmac = (void *)data; |
560 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
561 | struct imxdma_desc *desc; | ||
90 | 562 | ||
91 | imxdmac->status = DMA_SUCCESS; | 563 | spin_lock(&imxdma->lock); |
92 | imxdma_handle(imxdmac); | 564 | |
565 | if (list_empty(&imxdmac->ld_active)) { | ||
566 | /* Someone might have called terminate all */ | ||
567 | goto out; | ||
568 | } | ||
569 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); | ||
570 | |||
571 | if (desc->desc.callback) | ||
572 | desc->desc.callback(desc->desc.callback_param); | ||
573 | |||
574 | dma_cookie_complete(&desc->desc); | ||
575 | |||
576 | /* If we are dealing with a cyclic descriptor keep it on ld_active */ | ||
577 | if (imxdma_chan_is_doing_cyclic(imxdmac)) | ||
578 | goto out; | ||
579 | |||
580 | /* Free 2D slot if it was an interleaved transfer */ | ||
581 | if (imxdmac->enabled_2d) { | ||
582 | imxdma->slots_2d[imxdmac->slot_2d].count--; | ||
583 | imxdmac->enabled_2d = false; | ||
584 | } | ||
585 | |||
586 | list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); | ||
587 | |||
588 | if (!list_empty(&imxdmac->ld_queue)) { | ||
589 | desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, | ||
590 | node); | ||
591 | list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); | ||
592 | if (imxdma_xfer_desc(desc) < 0) | ||
593 | dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", | ||
594 | __func__, imxdmac->channel); | ||
595 | } | ||
596 | out: | ||
597 | spin_unlock(&imxdma->lock); | ||
93 | } | 598 | } |
94 | 599 | ||
95 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 600 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
@@ -97,13 +602,18 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
97 | { | 602 | { |
98 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 603 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
99 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | 604 | struct dma_slave_config *dmaengine_cfg = (void *)arg; |
100 | int ret; | 605 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
606 | unsigned long flags; | ||
101 | unsigned int mode = 0; | 607 | unsigned int mode = 0; |
102 | 608 | ||
103 | switch (cmd) { | 609 | switch (cmd) { |
104 | case DMA_TERMINATE_ALL: | 610 | case DMA_TERMINATE_ALL: |
105 | imxdmac->status = DMA_ERROR; | 611 | imxdma_disable_hw(imxdmac); |
106 | imx_dma_disable(imxdmac->imxdma_channel); | 612 | |
613 | spin_lock_irqsave(&imxdma->lock, flags); | ||
614 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); | ||
615 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | ||
616 | spin_unlock_irqrestore(&imxdma->lock, flags); | ||
107 | return 0; | 617 | return 0; |
108 | case DMA_SLAVE_CONFIG: | 618 | case DMA_SLAVE_CONFIG: |
109 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { | 619 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { |
@@ -128,16 +638,22 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
128 | mode = IMX_DMA_MEMSIZE_32; | 638 | mode = IMX_DMA_MEMSIZE_32; |
129 | break; | 639 | break; |
130 | } | 640 | } |
131 | ret = imx_dma_config_channel(imxdmac->imxdma_channel, | ||
132 | mode | IMX_DMA_TYPE_FIFO, | ||
133 | IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, | ||
134 | imxdmac->dma_request, 1); | ||
135 | |||
136 | if (ret) | ||
137 | return ret; | ||
138 | 641 | ||
139 | imx_dma_config_burstlen(imxdmac->imxdma_channel, | 642 | imxdmac->hw_chaining = 1; |
140 | imxdmac->watermark_level * imxdmac->word_size); | 643 | if (!imxdma_hw_chain(imxdmac)) |
644 | return -EINVAL; | ||
645 | imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | | ||
646 | ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | | ||
647 | CCR_REN; | ||
648 | imxdmac->ccr_to_device = | ||
649 | (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | | ||
650 | ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; | ||
651 | imx_dmav1_writel(imxdma, imxdmac->dma_request, | ||
652 | DMA_RSSR(imxdmac->channel)); | ||
653 | |||
654 | /* Set burst length */ | ||
655 | imx_dmav1_writel(imxdma, imxdmac->watermark_level * | ||
656 | imxdmac->word_size, DMA_BLR(imxdmac->channel)); | ||
141 | 657 | ||
142 | return 0; | 658 | return 0; |
143 | default: | 659 | default: |
@@ -151,43 +667,20 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan, | |||
151 | dma_cookie_t cookie, | 667 | dma_cookie_t cookie, |
152 | struct dma_tx_state *txstate) | 668 | struct dma_tx_state *txstate) |
153 | { | 669 | { |
154 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 670 | return dma_cookie_status(chan, cookie, txstate); |
155 | dma_cookie_t last_used; | ||
156 | enum dma_status ret; | ||
157 | |||
158 | last_used = chan->cookie; | ||
159 | |||
160 | ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); | ||
161 | dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); | ||
162 | |||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma) | ||
167 | { | ||
168 | dma_cookie_t cookie = imxdma->chan.cookie; | ||
169 | |||
170 | if (++cookie < 0) | ||
171 | cookie = 1; | ||
172 | |||
173 | imxdma->chan.cookie = cookie; | ||
174 | imxdma->desc.cookie = cookie; | ||
175 | |||
176 | return cookie; | ||
177 | } | 671 | } |
178 | 672 | ||
179 | static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) | 673 | static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) |
180 | { | 674 | { |
181 | struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); | 675 | struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); |
676 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
182 | dma_cookie_t cookie; | 677 | dma_cookie_t cookie; |
678 | unsigned long flags; | ||
183 | 679 | ||
184 | spin_lock_irq(&imxdmac->lock); | 680 | spin_lock_irqsave(&imxdma->lock, flags); |
185 | 681 | list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue); | |
186 | cookie = imxdma_assign_cookie(imxdmac); | 682 | cookie = dma_cookie_assign(tx); |
187 | 683 | spin_unlock_irqrestore(&imxdma->lock, flags); | |
188 | imx_dma_enable(imxdmac->imxdma_channel); | ||
189 | |||
190 | spin_unlock_irq(&imxdmac->lock); | ||
191 | 684 | ||
192 | return cookie; | 685 | return cookie; |
193 | } | 686 | } |
@@ -197,23 +690,52 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan) | |||
197 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 690 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
198 | struct imx_dma_data *data = chan->private; | 691 | struct imx_dma_data *data = chan->private; |
199 | 692 | ||
200 | imxdmac->dma_request = data->dma_request; | 693 | if (data != NULL) |
694 | imxdmac->dma_request = data->dma_request; | ||
201 | 695 | ||
202 | dma_async_tx_descriptor_init(&imxdmac->desc, chan); | 696 | while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) { |
203 | imxdmac->desc.tx_submit = imxdma_tx_submit; | 697 | struct imxdma_desc *desc; |
204 | /* txd.flags will be overwritten in prep funcs */ | ||
205 | imxdmac->desc.flags = DMA_CTRL_ACK; | ||
206 | 698 | ||
207 | imxdmac->status = DMA_SUCCESS; | 699 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
700 | if (!desc) | ||
701 | break; | ||
702 | __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor)); | ||
703 | dma_async_tx_descriptor_init(&desc->desc, chan); | ||
704 | desc->desc.tx_submit = imxdma_tx_submit; | ||
705 | /* txd.flags will be overwritten in prep funcs */ | ||
706 | desc->desc.flags = DMA_CTRL_ACK; | ||
707 | desc->status = DMA_SUCCESS; | ||
708 | |||
709 | list_add_tail(&desc->node, &imxdmac->ld_free); | ||
710 | imxdmac->descs_allocated++; | ||
711 | } | ||
208 | 712 | ||
209 | return 0; | 713 | if (!imxdmac->descs_allocated) |
714 | return -ENOMEM; | ||
715 | |||
716 | return imxdmac->descs_allocated; | ||
210 | } | 717 | } |
211 | 718 | ||
212 | static void imxdma_free_chan_resources(struct dma_chan *chan) | 719 | static void imxdma_free_chan_resources(struct dma_chan *chan) |
213 | { | 720 | { |
214 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 721 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
722 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
723 | struct imxdma_desc *desc, *_desc; | ||
724 | unsigned long flags; | ||
725 | |||
726 | spin_lock_irqsave(&imxdma->lock, flags); | ||
727 | |||
728 | imxdma_disable_hw(imxdmac); | ||
729 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); | ||
730 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | ||
215 | 731 | ||
216 | imx_dma_disable(imxdmac->imxdma_channel); | 732 | spin_unlock_irqrestore(&imxdma->lock, flags); |
733 | |||
734 | list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { | ||
735 | kfree(desc); | ||
736 | imxdmac->descs_allocated--; | ||
737 | } | ||
738 | INIT_LIST_HEAD(&imxdmac->ld_free); | ||
217 | 739 | ||
218 | if (imxdmac->sg_list) { | 740 | if (imxdmac->sg_list) { |
219 | kfree(imxdmac->sg_list); | 741 | kfree(imxdmac->sg_list); |
@@ -224,27 +746,23 @@ static void imxdma_free_chan_resources(struct dma_chan *chan) | |||
224 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | 746 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( |
225 | struct dma_chan *chan, struct scatterlist *sgl, | 747 | struct dma_chan *chan, struct scatterlist *sgl, |
226 | unsigned int sg_len, enum dma_transfer_direction direction, | 748 | unsigned int sg_len, enum dma_transfer_direction direction, |
227 | unsigned long flags) | 749 | unsigned long flags, void *context) |
228 | { | 750 | { |
229 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 751 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
230 | struct scatterlist *sg; | 752 | struct scatterlist *sg; |
231 | int i, ret, dma_length = 0; | 753 | int i, dma_length = 0; |
232 | unsigned int dmamode; | 754 | struct imxdma_desc *desc; |
233 | 755 | ||
234 | if (imxdmac->status == DMA_IN_PROGRESS) | 756 | if (list_empty(&imxdmac->ld_free) || |
757 | imxdma_chan_is_doing_cyclic(imxdmac)) | ||
235 | return NULL; | 758 | return NULL; |
236 | 759 | ||
237 | imxdmac->status = DMA_IN_PROGRESS; | 760 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
238 | 761 | ||
239 | for_each_sg(sgl, sg, sg_len, i) { | 762 | for_each_sg(sgl, sg, sg_len, i) { |
240 | dma_length += sg->length; | 763 | dma_length += sg->length; |
241 | } | 764 | } |
242 | 765 | ||
243 | if (direction == DMA_DEV_TO_MEM) | ||
244 | dmamode = DMA_MODE_READ; | ||
245 | else | ||
246 | dmamode = DMA_MODE_WRITE; | ||
247 | |||
248 | switch (imxdmac->word_size) { | 766 | switch (imxdmac->word_size) { |
249 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | 767 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
250 | if (sgl->length & 3 || sgl->dma_address & 3) | 768 | if (sgl->length & 3 || sgl->dma_address & 3) |
@@ -260,37 +778,41 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |||
260 | return NULL; | 778 | return NULL; |
261 | } | 779 | } |
262 | 780 | ||
263 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, | 781 | desc->type = IMXDMA_DESC_SLAVE_SG; |
264 | dma_length, imxdmac->per_address, dmamode); | 782 | desc->sg = sgl; |
265 | if (ret) | 783 | desc->sgcount = sg_len; |
266 | return NULL; | 784 | desc->len = dma_length; |
785 | desc->direction = direction; | ||
786 | if (direction == DMA_DEV_TO_MEM) { | ||
787 | desc->src = imxdmac->per_address; | ||
788 | } else { | ||
789 | desc->dest = imxdmac->per_address; | ||
790 | } | ||
791 | desc->desc.callback = NULL; | ||
792 | desc->desc.callback_param = NULL; | ||
267 | 793 | ||
268 | return &imxdmac->desc; | 794 | return &desc->desc; |
269 | } | 795 | } |
270 | 796 | ||
271 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | 797 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( |
272 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 798 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
273 | size_t period_len, enum dma_transfer_direction direction) | 799 | size_t period_len, enum dma_transfer_direction direction, |
800 | void *context) | ||
274 | { | 801 | { |
275 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 802 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
276 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 803 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
277 | int i, ret; | 804 | struct imxdma_desc *desc; |
805 | int i; | ||
278 | unsigned int periods = buf_len / period_len; | 806 | unsigned int periods = buf_len / period_len; |
279 | unsigned int dmamode; | ||
280 | 807 | ||
281 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", | 808 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", |
282 | __func__, imxdmac->channel, buf_len, period_len); | 809 | __func__, imxdmac->channel, buf_len, period_len); |
283 | 810 | ||
284 | if (imxdmac->status == DMA_IN_PROGRESS) | 811 | if (list_empty(&imxdmac->ld_free) || |
812 | imxdma_chan_is_doing_cyclic(imxdmac)) | ||
285 | return NULL; | 813 | return NULL; |
286 | imxdmac->status = DMA_IN_PROGRESS; | ||
287 | 814 | ||
288 | ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, | 815 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
289 | imxdma_progression); | ||
290 | if (ret) { | ||
291 | dev_err(imxdma->dev, "Failed to setup the DMA handler\n"); | ||
292 | return NULL; | ||
293 | } | ||
294 | 816 | ||
295 | if (imxdmac->sg_list) | 817 | if (imxdmac->sg_list) |
296 | kfree(imxdmac->sg_list); | 818 | kfree(imxdmac->sg_list); |
@@ -316,62 +838,221 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |||
316 | imxdmac->sg_list[periods].page_link = | 838 | imxdmac->sg_list[periods].page_link = |
317 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; | 839 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; |
318 | 840 | ||
319 | if (direction == DMA_DEV_TO_MEM) | 841 | desc->type = IMXDMA_DESC_CYCLIC; |
320 | dmamode = DMA_MODE_READ; | 842 | desc->sg = imxdmac->sg_list; |
321 | else | 843 | desc->sgcount = periods; |
322 | dmamode = DMA_MODE_WRITE; | 844 | desc->len = IMX_DMA_LENGTH_LOOP; |
845 | desc->direction = direction; | ||
846 | if (direction == DMA_DEV_TO_MEM) { | ||
847 | desc->src = imxdmac->per_address; | ||
848 | } else { | ||
849 | desc->dest = imxdmac->per_address; | ||
850 | } | ||
851 | desc->desc.callback = NULL; | ||
852 | desc->desc.callback_param = NULL; | ||
853 | |||
854 | return &desc->desc; | ||
855 | } | ||
856 | |||
857 | static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( | ||
858 | struct dma_chan *chan, dma_addr_t dest, | ||
859 | dma_addr_t src, size_t len, unsigned long flags) | ||
860 | { | ||
861 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
862 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
863 | struct imxdma_desc *desc; | ||
323 | 864 | ||
324 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods, | 865 | dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", |
325 | IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode); | 866 | __func__, imxdmac->channel, src, dest, len); |
326 | if (ret) | 867 | |
868 | if (list_empty(&imxdmac->ld_free) || | ||
869 | imxdma_chan_is_doing_cyclic(imxdmac)) | ||
327 | return NULL; | 870 | return NULL; |
328 | 871 | ||
329 | return &imxdmac->desc; | 872 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
873 | |||
874 | desc->type = IMXDMA_DESC_MEMCPY; | ||
875 | desc->src = src; | ||
876 | desc->dest = dest; | ||
877 | desc->len = len; | ||
878 | desc->direction = DMA_MEM_TO_MEM; | ||
879 | desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; | ||
880 | desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; | ||
881 | desc->desc.callback = NULL; | ||
882 | desc->desc.callback_param = NULL; | ||
883 | |||
884 | return &desc->desc; | ||
885 | } | ||
886 | |||
887 | static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( | ||
888 | struct dma_chan *chan, struct dma_interleaved_template *xt, | ||
889 | unsigned long flags) | ||
890 | { | ||
891 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
892 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
893 | struct imxdma_desc *desc; | ||
894 | |||
895 | dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n" | ||
896 | " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__, | ||
897 | imxdmac->channel, xt->src_start, xt->dst_start, | ||
898 | xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", | ||
899 | xt->numf, xt->frame_size); | ||
900 | |||
901 | if (list_empty(&imxdmac->ld_free) || | ||
902 | imxdma_chan_is_doing_cyclic(imxdmac)) | ||
903 | return NULL; | ||
904 | |||
905 | if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM) | ||
906 | return NULL; | ||
907 | |||
908 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); | ||
909 | |||
910 | desc->type = IMXDMA_DESC_INTERLEAVED; | ||
911 | desc->src = xt->src_start; | ||
912 | desc->dest = xt->dst_start; | ||
913 | desc->x = xt->sgl[0].size; | ||
914 | desc->y = xt->numf; | ||
915 | desc->w = xt->sgl[0].icg + desc->x; | ||
916 | desc->len = desc->x * desc->y; | ||
917 | desc->direction = DMA_MEM_TO_MEM; | ||
918 | desc->config_port = IMX_DMA_MEMSIZE_32; | ||
919 | desc->config_mem = IMX_DMA_MEMSIZE_32; | ||
920 | if (xt->src_sgl) | ||
921 | desc->config_mem |= IMX_DMA_TYPE_2D; | ||
922 | if (xt->dst_sgl) | ||
923 | desc->config_port |= IMX_DMA_TYPE_2D; | ||
924 | desc->desc.callback = NULL; | ||
925 | desc->desc.callback_param = NULL; | ||
926 | |||
927 | return &desc->desc; | ||
330 | } | 928 | } |
331 | 929 | ||
332 | static void imxdma_issue_pending(struct dma_chan *chan) | 930 | static void imxdma_issue_pending(struct dma_chan *chan) |
333 | { | 931 | { |
334 | /* | 932 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
335 | * Nothing to do. We only have a single descriptor | 933 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
336 | */ | 934 | struct imxdma_desc *desc; |
935 | unsigned long flags; | ||
936 | |||
937 | spin_lock_irqsave(&imxdma->lock, flags); | ||
938 | if (list_empty(&imxdmac->ld_active) && | ||
939 | !list_empty(&imxdmac->ld_queue)) { | ||
940 | desc = list_first_entry(&imxdmac->ld_queue, | ||
941 | struct imxdma_desc, node); | ||
942 | |||
943 | if (imxdma_xfer_desc(desc) < 0) { | ||
944 | dev_warn(imxdma->dev, | ||
945 | "%s: channel: %d couldn't issue DMA xfer\n", | ||
946 | __func__, imxdmac->channel); | ||
947 | } else { | ||
948 | list_move_tail(imxdmac->ld_queue.next, | ||
949 | &imxdmac->ld_active); | ||
950 | } | ||
951 | } | ||
952 | spin_unlock_irqrestore(&imxdma->lock, flags); | ||
337 | } | 953 | } |
338 | 954 | ||
339 | static int __init imxdma_probe(struct platform_device *pdev) | 955 | static int __init imxdma_probe(struct platform_device *pdev) |
340 | { | 956 | { |
341 | struct imxdma_engine *imxdma; | 957 | struct imxdma_engine *imxdma; |
342 | int ret, i; | 958 | int ret, i; |
343 | 959 | ||
960 | |||
344 | imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); | 961 | imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); |
345 | if (!imxdma) | 962 | if (!imxdma) |
346 | return -ENOMEM; | 963 | return -ENOMEM; |
347 | 964 | ||
965 | if (cpu_is_mx1()) { | ||
966 | imxdma->base = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR); | ||
967 | } else if (cpu_is_mx21()) { | ||
968 | imxdma->base = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR); | ||
969 | } else if (cpu_is_mx27()) { | ||
970 | imxdma->base = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR); | ||
971 | } else { | ||
972 | kfree(imxdma); | ||
973 | return 0; | ||
974 | } | ||
975 | |||
976 | imxdma->dma_clk = clk_get(NULL, "dma"); | ||
977 | if (IS_ERR(imxdma->dma_clk)) | ||
978 | return PTR_ERR(imxdma->dma_clk); | ||
979 | clk_enable(imxdma->dma_clk); | ||
980 | |||
981 | /* reset DMA module */ | ||
982 | imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); | ||
983 | |||
984 | if (cpu_is_mx1()) { | ||
985 | ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); | ||
986 | if (ret) { | ||
987 | dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); | ||
988 | kfree(imxdma); | ||
989 | return ret; | ||
990 | } | ||
991 | |||
992 | ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma); | ||
993 | if (ret) { | ||
994 | dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); | ||
995 | free_irq(MX1_DMA_INT, NULL); | ||
996 | kfree(imxdma); | ||
997 | return ret; | ||
998 | } | ||
999 | } | ||
1000 | |||
1001 | /* enable DMA module */ | ||
1002 | imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR); | ||
1003 | |||
1004 | /* clear all interrupts */ | ||
1005 | imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); | ||
1006 | |||
1007 | /* disable interrupts */ | ||
1008 | imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); | ||
1009 | |||
348 | INIT_LIST_HEAD(&imxdma->dma_device.channels); | 1010 | INIT_LIST_HEAD(&imxdma->dma_device.channels); |
349 | 1011 | ||
350 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); | 1012 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); |
351 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); | 1013 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); |
1014 | dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); | ||
1015 | dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask); | ||
1016 | |||
1017 | /* Initialize 2D global parameters */ | ||
1018 | for (i = 0; i < IMX_DMA_2D_SLOTS; i++) | ||
1019 | imxdma->slots_2d[i].count = 0; | ||
1020 | |||
1021 | spin_lock_init(&imxdma->lock); | ||
352 | 1022 | ||
353 | /* Initialize channel parameters */ | 1023 | /* Initialize channel parameters */ |
354 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | 1024 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { |
355 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | 1025 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
356 | 1026 | ||
357 | imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine", | 1027 | if (cpu_is_mx21() || cpu_is_mx27()) { |
358 | DMA_PRIO_MEDIUM); | 1028 | ret = request_irq(MX2x_INT_DMACH0 + i, |
359 | if ((int)imxdmac->channel < 0) { | 1029 | dma_irq_handler, 0, "DMA", imxdma); |
360 | ret = -ENODEV; | 1030 | if (ret) { |
361 | goto err_init; | 1031 | dev_warn(imxdma->dev, "Can't register IRQ %d " |
1032 | "for DMA channel %d\n", | ||
1033 | MX2x_INT_DMACH0 + i, i); | ||
1034 | goto err_init; | ||
1035 | } | ||
1036 | init_timer(&imxdmac->watchdog); | ||
1037 | imxdmac->watchdog.function = &imxdma_watchdog; | ||
1038 | imxdmac->watchdog.data = (unsigned long)imxdmac; | ||
362 | } | 1039 | } |
363 | 1040 | ||
364 | imx_dma_setup_handlers(imxdmac->imxdma_channel, | ||
365 | imxdma_irq_handler, imxdma_err_handler, imxdmac); | ||
366 | |||
367 | imxdmac->imxdma = imxdma; | 1041 | imxdmac->imxdma = imxdma; |
368 | spin_lock_init(&imxdmac->lock); | ||
369 | 1042 | ||
1043 | INIT_LIST_HEAD(&imxdmac->ld_queue); | ||
1044 | INIT_LIST_HEAD(&imxdmac->ld_free); | ||
1045 | INIT_LIST_HEAD(&imxdmac->ld_active); | ||
1046 | |||
1047 | tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet, | ||
1048 | (unsigned long)imxdmac); | ||
370 | imxdmac->chan.device = &imxdma->dma_device; | 1049 | imxdmac->chan.device = &imxdma->dma_device; |
1050 | dma_cookie_init(&imxdmac->chan); | ||
371 | imxdmac->channel = i; | 1051 | imxdmac->channel = i; |
372 | 1052 | ||
373 | /* Add the channel to the DMAC list */ | 1053 | /* Add the channel to the DMAC list */ |
374 | list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels); | 1054 | list_add_tail(&imxdmac->chan.device_node, |
1055 | &imxdma->dma_device.channels); | ||
375 | } | 1056 | } |
376 | 1057 | ||
377 | imxdma->dev = &pdev->dev; | 1058 | imxdma->dev = &pdev->dev; |
@@ -382,11 +1063,14 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
382 | imxdma->dma_device.device_tx_status = imxdma_tx_status; | 1063 | imxdma->dma_device.device_tx_status = imxdma_tx_status; |
383 | imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; | 1064 | imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; |
384 | imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; | 1065 | imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; |
1066 | imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; | ||
1067 | imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; | ||
385 | imxdma->dma_device.device_control = imxdma_control; | 1068 | imxdma->dma_device.device_control = imxdma_control; |
386 | imxdma->dma_device.device_issue_pending = imxdma_issue_pending; | 1069 | imxdma->dma_device.device_issue_pending = imxdma_issue_pending; |
387 | 1070 | ||
388 | platform_set_drvdata(pdev, imxdma); | 1071 | platform_set_drvdata(pdev, imxdma); |
389 | 1072 | ||
1073 | imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */ | ||
390 | imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; | 1074 | imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; |
391 | dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); | 1075 | dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); |
392 | 1076 | ||
@@ -399,9 +1083,13 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
399 | return 0; | 1083 | return 0; |
400 | 1084 | ||
401 | err_init: | 1085 | err_init: |
402 | while (--i >= 0) { | 1086 | |
403 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | 1087 | if (cpu_is_mx21() || cpu_is_mx27()) { |
404 | imx_dma_free(imxdmac->imxdma_channel); | 1088 | while (--i >= 0) |
1089 | free_irq(MX2x_INT_DMACH0 + i, NULL); | ||
1090 | } else if cpu_is_mx1() { | ||
1091 | free_irq(MX1_DMA_INT, NULL); | ||
1092 | free_irq(MX1_DMA_ERR, NULL); | ||
405 | } | 1093 | } |
406 | 1094 | ||
407 | kfree(imxdma); | 1095 | kfree(imxdma); |
@@ -415,10 +1103,12 @@ static int __exit imxdma_remove(struct platform_device *pdev) | |||
415 | 1103 | ||
416 | dma_async_device_unregister(&imxdma->dma_device); | 1104 | dma_async_device_unregister(&imxdma->dma_device); |
417 | 1105 | ||
418 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | 1106 | if (cpu_is_mx21() || cpu_is_mx27()) { |
419 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | 1107 | for (i = 0; i < IMX_DMA_CHANNELS; i++) |
420 | 1108 | free_irq(MX2x_INT_DMACH0 + i, NULL); | |
421 | imx_dma_free(imxdmac->imxdma_channel); | 1109 | } else if cpu_is_mx1() { |
1110 | free_irq(MX1_DMA_INT, NULL); | ||
1111 | free_irq(MX1_DMA_ERR, NULL); | ||
422 | } | 1112 | } |
423 | 1113 | ||
424 | kfree(imxdma); | 1114 | kfree(imxdma); |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 63540d3e2153..d3e38e28bb6b 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/types.h> | 22 | #include <linux/types.h> |
23 | #include <linux/bitops.h> | ||
23 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
24 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
25 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
@@ -41,6 +42,8 @@ | |||
41 | #include <mach/dma.h> | 42 | #include <mach/dma.h> |
42 | #include <mach/hardware.h> | 43 | #include <mach/hardware.h> |
43 | 44 | ||
45 | #include "dmaengine.h" | ||
46 | |||
44 | /* SDMA registers */ | 47 | /* SDMA registers */ |
45 | #define SDMA_H_C0PTR 0x000 | 48 | #define SDMA_H_C0PTR 0x000 |
46 | #define SDMA_H_INTR 0x004 | 49 | #define SDMA_H_INTR 0x004 |
@@ -259,19 +262,18 @@ struct sdma_channel { | |||
259 | unsigned int pc_from_device, pc_to_device; | 262 | unsigned int pc_from_device, pc_to_device; |
260 | unsigned long flags; | 263 | unsigned long flags; |
261 | dma_addr_t per_address; | 264 | dma_addr_t per_address; |
262 | u32 event_mask0, event_mask1; | 265 | unsigned long event_mask[2]; |
263 | u32 watermark_level; | 266 | unsigned long watermark_level; |
264 | u32 shp_addr, per_addr; | 267 | u32 shp_addr, per_addr; |
265 | struct dma_chan chan; | 268 | struct dma_chan chan; |
266 | spinlock_t lock; | 269 | spinlock_t lock; |
267 | struct dma_async_tx_descriptor desc; | 270 | struct dma_async_tx_descriptor desc; |
268 | dma_cookie_t last_completed; | ||
269 | enum dma_status status; | 271 | enum dma_status status; |
270 | unsigned int chn_count; | 272 | unsigned int chn_count; |
271 | unsigned int chn_real_count; | 273 | unsigned int chn_real_count; |
272 | }; | 274 | }; |
273 | 275 | ||
274 | #define IMX_DMA_SG_LOOP (1 << 0) | 276 | #define IMX_DMA_SG_LOOP BIT(0) |
275 | 277 | ||
276 | #define MAX_DMA_CHANNELS 32 | 278 | #define MAX_DMA_CHANNELS 32 |
277 | #define MXC_SDMA_DEFAULT_PRIORITY 1 | 279 | #define MXC_SDMA_DEFAULT_PRIORITY 1 |
@@ -345,9 +347,9 @@ static const struct of_device_id sdma_dt_ids[] = { | |||
345 | }; | 347 | }; |
346 | MODULE_DEVICE_TABLE(of, sdma_dt_ids); | 348 | MODULE_DEVICE_TABLE(of, sdma_dt_ids); |
347 | 349 | ||
348 | #define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */ | 350 | #define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */ |
349 | #define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */ | 351 | #define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */ |
350 | #define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */ | 352 | #define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */ |
351 | #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ | 353 | #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ |
352 | 354 | ||
353 | static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) | 355 | static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) |
@@ -362,37 +364,42 @@ static int sdma_config_ownership(struct sdma_channel *sdmac, | |||
362 | { | 364 | { |
363 | struct sdma_engine *sdma = sdmac->sdma; | 365 | struct sdma_engine *sdma = sdmac->sdma; |
364 | int channel = sdmac->channel; | 366 | int channel = sdmac->channel; |
365 | u32 evt, mcu, dsp; | 367 | unsigned long evt, mcu, dsp; |
366 | 368 | ||
367 | if (event_override && mcu_override && dsp_override) | 369 | if (event_override && mcu_override && dsp_override) |
368 | return -EINVAL; | 370 | return -EINVAL; |
369 | 371 | ||
370 | evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR); | 372 | evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR); |
371 | mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR); | 373 | mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR); |
372 | dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR); | 374 | dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR); |
373 | 375 | ||
374 | if (dsp_override) | 376 | if (dsp_override) |
375 | dsp &= ~(1 << channel); | 377 | __clear_bit(channel, &dsp); |
376 | else | 378 | else |
377 | dsp |= (1 << channel); | 379 | __set_bit(channel, &dsp); |
378 | 380 | ||
379 | if (event_override) | 381 | if (event_override) |
380 | evt &= ~(1 << channel); | 382 | __clear_bit(channel, &evt); |
381 | else | 383 | else |
382 | evt |= (1 << channel); | 384 | __set_bit(channel, &evt); |
383 | 385 | ||
384 | if (mcu_override) | 386 | if (mcu_override) |
385 | mcu &= ~(1 << channel); | 387 | __clear_bit(channel, &mcu); |
386 | else | 388 | else |
387 | mcu |= (1 << channel); | 389 | __set_bit(channel, &mcu); |
388 | 390 | ||
389 | __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR); | 391 | writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR); |
390 | __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR); | 392 | writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR); |
391 | __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR); | 393 | writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR); |
392 | 394 | ||
393 | return 0; | 395 | return 0; |
394 | } | 396 | } |
395 | 397 | ||
398 | static void sdma_enable_channel(struct sdma_engine *sdma, int channel) | ||
399 | { | ||
400 | writel(BIT(channel), sdma->regs + SDMA_H_START); | ||
401 | } | ||
402 | |||
396 | /* | 403 | /* |
397 | * sdma_run_channel - run a channel and wait till it's done | 404 | * sdma_run_channel - run a channel and wait till it's done |
398 | */ | 405 | */ |
@@ -404,7 +411,7 @@ static int sdma_run_channel(struct sdma_channel *sdmac) | |||
404 | 411 | ||
405 | init_completion(&sdmac->done); | 412 | init_completion(&sdmac->done); |
406 | 413 | ||
407 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); | 414 | sdma_enable_channel(sdma, channel); |
408 | 415 | ||
409 | ret = wait_for_completion_timeout(&sdmac->done, HZ); | 416 | ret = wait_for_completion_timeout(&sdmac->done, HZ); |
410 | 417 | ||
@@ -451,12 +458,12 @@ static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event) | |||
451 | { | 458 | { |
452 | struct sdma_engine *sdma = sdmac->sdma; | 459 | struct sdma_engine *sdma = sdmac->sdma; |
453 | int channel = sdmac->channel; | 460 | int channel = sdmac->channel; |
454 | u32 val; | 461 | unsigned long val; |
455 | u32 chnenbl = chnenbl_ofs(sdma, event); | 462 | u32 chnenbl = chnenbl_ofs(sdma, event); |
456 | 463 | ||
457 | val = __raw_readl(sdma->regs + chnenbl); | 464 | val = readl_relaxed(sdma->regs + chnenbl); |
458 | val |= (1 << channel); | 465 | __set_bit(channel, &val); |
459 | __raw_writel(val, sdma->regs + chnenbl); | 466 | writel_relaxed(val, sdma->regs + chnenbl); |
460 | } | 467 | } |
461 | 468 | ||
462 | static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) | 469 | static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) |
@@ -464,11 +471,11 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) | |||
464 | struct sdma_engine *sdma = sdmac->sdma; | 471 | struct sdma_engine *sdma = sdmac->sdma; |
465 | int channel = sdmac->channel; | 472 | int channel = sdmac->channel; |
466 | u32 chnenbl = chnenbl_ofs(sdma, event); | 473 | u32 chnenbl = chnenbl_ofs(sdma, event); |
467 | u32 val; | 474 | unsigned long val; |
468 | 475 | ||
469 | val = __raw_readl(sdma->regs + chnenbl); | 476 | val = readl_relaxed(sdma->regs + chnenbl); |
470 | val &= ~(1 << channel); | 477 | __clear_bit(channel, &val); |
471 | __raw_writel(val, sdma->regs + chnenbl); | 478 | writel_relaxed(val, sdma->regs + chnenbl); |
472 | } | 479 | } |
473 | 480 | ||
474 | static void sdma_handle_channel_loop(struct sdma_channel *sdmac) | 481 | static void sdma_handle_channel_loop(struct sdma_channel *sdmac) |
@@ -522,7 +529,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
522 | else | 529 | else |
523 | sdmac->status = DMA_SUCCESS; | 530 | sdmac->status = DMA_SUCCESS; |
524 | 531 | ||
525 | sdmac->last_completed = sdmac->desc.cookie; | 532 | dma_cookie_complete(&sdmac->desc); |
526 | if (sdmac->desc.callback) | 533 | if (sdmac->desc.callback) |
527 | sdmac->desc.callback(sdmac->desc.callback_param); | 534 | sdmac->desc.callback(sdmac->desc.callback_param); |
528 | } | 535 | } |
@@ -544,10 +551,10 @@ static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) | |||
544 | static irqreturn_t sdma_int_handler(int irq, void *dev_id) | 551 | static irqreturn_t sdma_int_handler(int irq, void *dev_id) |
545 | { | 552 | { |
546 | struct sdma_engine *sdma = dev_id; | 553 | struct sdma_engine *sdma = dev_id; |
547 | u32 stat; | 554 | unsigned long stat; |
548 | 555 | ||
549 | stat = __raw_readl(sdma->regs + SDMA_H_INTR); | 556 | stat = readl_relaxed(sdma->regs + SDMA_H_INTR); |
550 | __raw_writel(stat, sdma->regs + SDMA_H_INTR); | 557 | writel_relaxed(stat, sdma->regs + SDMA_H_INTR); |
551 | 558 | ||
552 | while (stat) { | 559 | while (stat) { |
553 | int channel = fls(stat) - 1; | 560 | int channel = fls(stat) - 1; |
@@ -555,7 +562,7 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id) | |||
555 | 562 | ||
556 | mxc_sdma_handle_channel(sdmac); | 563 | mxc_sdma_handle_channel(sdmac); |
557 | 564 | ||
558 | stat &= ~(1 << channel); | 565 | __clear_bit(channel, &stat); |
559 | } | 566 | } |
560 | 567 | ||
561 | return IRQ_HANDLED; | 568 | return IRQ_HANDLED; |
@@ -663,11 +670,11 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
663 | return load_address; | 670 | return load_address; |
664 | 671 | ||
665 | dev_dbg(sdma->dev, "load_address = %d\n", load_address); | 672 | dev_dbg(sdma->dev, "load_address = %d\n", load_address); |
666 | dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level); | 673 | dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level); |
667 | dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); | 674 | dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); |
668 | dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); | 675 | dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); |
669 | dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); | 676 | dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); |
670 | dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); | 677 | dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); |
671 | 678 | ||
672 | mutex_lock(&sdma->channel_0_lock); | 679 | mutex_lock(&sdma->channel_0_lock); |
673 | 680 | ||
@@ -677,8 +684,8 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
677 | /* Send by context the event mask,base address for peripheral | 684 | /* Send by context the event mask,base address for peripheral |
678 | * and watermark level | 685 | * and watermark level |
679 | */ | 686 | */ |
680 | context->gReg[0] = sdmac->event_mask1; | 687 | context->gReg[0] = sdmac->event_mask[1]; |
681 | context->gReg[1] = sdmac->event_mask0; | 688 | context->gReg[1] = sdmac->event_mask[0]; |
682 | context->gReg[2] = sdmac->per_addr; | 689 | context->gReg[2] = sdmac->per_addr; |
683 | context->gReg[6] = sdmac->shp_addr; | 690 | context->gReg[6] = sdmac->shp_addr; |
684 | context->gReg[7] = sdmac->watermark_level; | 691 | context->gReg[7] = sdmac->watermark_level; |
@@ -701,7 +708,7 @@ static void sdma_disable_channel(struct sdma_channel *sdmac) | |||
701 | struct sdma_engine *sdma = sdmac->sdma; | 708 | struct sdma_engine *sdma = sdmac->sdma; |
702 | int channel = sdmac->channel; | 709 | int channel = sdmac->channel; |
703 | 710 | ||
704 | __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP); | 711 | writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); |
705 | sdmac->status = DMA_ERROR; | 712 | sdmac->status = DMA_ERROR; |
706 | } | 713 | } |
707 | 714 | ||
@@ -711,13 +718,13 @@ static int sdma_config_channel(struct sdma_channel *sdmac) | |||
711 | 718 | ||
712 | sdma_disable_channel(sdmac); | 719 | sdma_disable_channel(sdmac); |
713 | 720 | ||
714 | sdmac->event_mask0 = 0; | 721 | sdmac->event_mask[0] = 0; |
715 | sdmac->event_mask1 = 0; | 722 | sdmac->event_mask[1] = 0; |
716 | sdmac->shp_addr = 0; | 723 | sdmac->shp_addr = 0; |
717 | sdmac->per_addr = 0; | 724 | sdmac->per_addr = 0; |
718 | 725 | ||
719 | if (sdmac->event_id0) { | 726 | if (sdmac->event_id0) { |
720 | if (sdmac->event_id0 > 32) | 727 | if (sdmac->event_id0 >= sdmac->sdma->num_events) |
721 | return -EINVAL; | 728 | return -EINVAL; |
722 | sdma_event_enable(sdmac, sdmac->event_id0); | 729 | sdma_event_enable(sdmac, sdmac->event_id0); |
723 | } | 730 | } |
@@ -740,15 +747,14 @@ static int sdma_config_channel(struct sdma_channel *sdmac) | |||
740 | (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { | 747 | (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { |
741 | /* Handle multiple event channels differently */ | 748 | /* Handle multiple event channels differently */ |
742 | if (sdmac->event_id1) { | 749 | if (sdmac->event_id1) { |
743 | sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32); | 750 | sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32); |
744 | if (sdmac->event_id1 > 31) | 751 | if (sdmac->event_id1 > 31) |
745 | sdmac->watermark_level |= 1 << 31; | 752 | __set_bit(31, &sdmac->watermark_level); |
746 | sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32); | 753 | sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32); |
747 | if (sdmac->event_id0 > 31) | 754 | if (sdmac->event_id0 > 31) |
748 | sdmac->watermark_level |= 1 << 30; | 755 | __set_bit(30, &sdmac->watermark_level); |
749 | } else { | 756 | } else { |
750 | sdmac->event_mask0 = 1 << sdmac->event_id0; | 757 | __set_bit(sdmac->event_id0, sdmac->event_mask); |
751 | sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32); | ||
752 | } | 758 | } |
753 | /* Watermark Level */ | 759 | /* Watermark Level */ |
754 | sdmac->watermark_level |= sdmac->watermark_level; | 760 | sdmac->watermark_level |= sdmac->watermark_level; |
@@ -774,7 +780,7 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac, | |||
774 | return -EINVAL; | 780 | return -EINVAL; |
775 | } | 781 | } |
776 | 782 | ||
777 | __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); | 783 | writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); |
778 | 784 | ||
779 | return 0; | 785 | return 0; |
780 | } | 786 | } |
@@ -796,8 +802,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac) | |||
796 | sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; | 802 | sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; |
797 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; | 803 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; |
798 | 804 | ||
799 | clk_enable(sdma->clk); | ||
800 | |||
801 | sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); | 805 | sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); |
802 | 806 | ||
803 | init_completion(&sdmac->done); | 807 | init_completion(&sdmac->done); |
@@ -810,24 +814,6 @@ out: | |||
810 | return ret; | 814 | return ret; |
811 | } | 815 | } |
812 | 816 | ||
813 | static void sdma_enable_channel(struct sdma_engine *sdma, int channel) | ||
814 | { | ||
815 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); | ||
816 | } | ||
817 | |||
818 | static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac) | ||
819 | { | ||
820 | dma_cookie_t cookie = sdmac->chan.cookie; | ||
821 | |||
822 | if (++cookie < 0) | ||
823 | cookie = 1; | ||
824 | |||
825 | sdmac->chan.cookie = cookie; | ||
826 | sdmac->desc.cookie = cookie; | ||
827 | |||
828 | return cookie; | ||
829 | } | ||
830 | |||
831 | static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) | 817 | static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) |
832 | { | 818 | { |
833 | return container_of(chan, struct sdma_channel, chan); | 819 | return container_of(chan, struct sdma_channel, chan); |
@@ -837,14 +823,11 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
837 | { | 823 | { |
838 | unsigned long flags; | 824 | unsigned long flags; |
839 | struct sdma_channel *sdmac = to_sdma_chan(tx->chan); | 825 | struct sdma_channel *sdmac = to_sdma_chan(tx->chan); |
840 | struct sdma_engine *sdma = sdmac->sdma; | ||
841 | dma_cookie_t cookie; | 826 | dma_cookie_t cookie; |
842 | 827 | ||
843 | spin_lock_irqsave(&sdmac->lock, flags); | 828 | spin_lock_irqsave(&sdmac->lock, flags); |
844 | 829 | ||
845 | cookie = sdma_assign_cookie(sdmac); | 830 | cookie = dma_cookie_assign(tx); |
846 | |||
847 | sdma_enable_channel(sdma, sdmac->channel); | ||
848 | 831 | ||
849 | spin_unlock_irqrestore(&sdmac->lock, flags); | 832 | spin_unlock_irqrestore(&sdmac->lock, flags); |
850 | 833 | ||
@@ -875,11 +858,14 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) | |||
875 | 858 | ||
876 | sdmac->peripheral_type = data->peripheral_type; | 859 | sdmac->peripheral_type = data->peripheral_type; |
877 | sdmac->event_id0 = data->dma_request; | 860 | sdmac->event_id0 = data->dma_request; |
878 | ret = sdma_set_channel_priority(sdmac, prio); | 861 | |
862 | clk_enable(sdmac->sdma->clk); | ||
863 | |||
864 | ret = sdma_request_channel(sdmac); | ||
879 | if (ret) | 865 | if (ret) |
880 | return ret; | 866 | return ret; |
881 | 867 | ||
882 | ret = sdma_request_channel(sdmac); | 868 | ret = sdma_set_channel_priority(sdmac, prio); |
883 | if (ret) | 869 | if (ret) |
884 | return ret; | 870 | return ret; |
885 | 871 | ||
@@ -916,7 +902,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) | |||
916 | static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | 902 | static struct dma_async_tx_descriptor *sdma_prep_slave_sg( |
917 | struct dma_chan *chan, struct scatterlist *sgl, | 903 | struct dma_chan *chan, struct scatterlist *sgl, |
918 | unsigned int sg_len, enum dma_transfer_direction direction, | 904 | unsigned int sg_len, enum dma_transfer_direction direction, |
919 | unsigned long flags) | 905 | unsigned long flags, void *context) |
920 | { | 906 | { |
921 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 907 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
922 | struct sdma_engine *sdma = sdmac->sdma; | 908 | struct sdma_engine *sdma = sdmac->sdma; |
@@ -1014,7 +1000,8 @@ err_out: | |||
1014 | 1000 | ||
1015 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | 1001 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( |
1016 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 1002 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
1017 | size_t period_len, enum dma_transfer_direction direction) | 1003 | size_t period_len, enum dma_transfer_direction direction, |
1004 | void *context) | ||
1018 | { | 1005 | { |
1019 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1006 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1020 | struct sdma_engine *sdma = sdmac->sdma; | 1007 | struct sdma_engine *sdma = sdmac->sdma; |
@@ -1128,7 +1115,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, | |||
1128 | 1115 | ||
1129 | last_used = chan->cookie; | 1116 | last_used = chan->cookie; |
1130 | 1117 | ||
1131 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, | 1118 | dma_set_tx_state(txstate, chan->completed_cookie, last_used, |
1132 | sdmac->chn_count - sdmac->chn_real_count); | 1119 | sdmac->chn_count - sdmac->chn_real_count); |
1133 | 1120 | ||
1134 | return sdmac->status; | 1121 | return sdmac->status; |
@@ -1136,9 +1123,11 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, | |||
1136 | 1123 | ||
1137 | static void sdma_issue_pending(struct dma_chan *chan) | 1124 | static void sdma_issue_pending(struct dma_chan *chan) |
1138 | { | 1125 | { |
1139 | /* | 1126 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1140 | * Nothing to do. We only have a single descriptor | 1127 | struct sdma_engine *sdma = sdmac->sdma; |
1141 | */ | 1128 | |
1129 | if (sdmac->status == DMA_IN_PROGRESS) | ||
1130 | sdma_enable_channel(sdma, sdmac->channel); | ||
1142 | } | 1131 | } |
1143 | 1132 | ||
1144 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 | 1133 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 |
@@ -1230,7 +1219,7 @@ static int __init sdma_init(struct sdma_engine *sdma) | |||
1230 | clk_enable(sdma->clk); | 1219 | clk_enable(sdma->clk); |
1231 | 1220 | ||
1232 | /* Be sure SDMA has not started yet */ | 1221 | /* Be sure SDMA has not started yet */ |
1233 | __raw_writel(0, sdma->regs + SDMA_H_C0PTR); | 1222 | writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); |
1234 | 1223 | ||
1235 | sdma->channel_control = dma_alloc_coherent(NULL, | 1224 | sdma->channel_control = dma_alloc_coherent(NULL, |
1236 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + | 1225 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + |
@@ -1253,11 +1242,11 @@ static int __init sdma_init(struct sdma_engine *sdma) | |||
1253 | 1242 | ||
1254 | /* disable all channels */ | 1243 | /* disable all channels */ |
1255 | for (i = 0; i < sdma->num_events; i++) | 1244 | for (i = 0; i < sdma->num_events; i++) |
1256 | __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i)); | 1245 | writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i)); |
1257 | 1246 | ||
1258 | /* All channels have priority 0 */ | 1247 | /* All channels have priority 0 */ |
1259 | for (i = 0; i < MAX_DMA_CHANNELS; i++) | 1248 | for (i = 0; i < MAX_DMA_CHANNELS; i++) |
1260 | __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); | 1249 | writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); |
1261 | 1250 | ||
1262 | ret = sdma_request_channel(&sdma->channel[0]); | 1251 | ret = sdma_request_channel(&sdma->channel[0]); |
1263 | if (ret) | 1252 | if (ret) |
@@ -1266,16 +1255,16 @@ static int __init sdma_init(struct sdma_engine *sdma) | |||
1266 | sdma_config_ownership(&sdma->channel[0], false, true, false); | 1255 | sdma_config_ownership(&sdma->channel[0], false, true, false); |
1267 | 1256 | ||
1268 | /* Set Command Channel (Channel Zero) */ | 1257 | /* Set Command Channel (Channel Zero) */ |
1269 | __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR); | 1258 | writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); |
1270 | 1259 | ||
1271 | /* Set bits of CONFIG register but with static context switching */ | 1260 | /* Set bits of CONFIG register but with static context switching */ |
1272 | /* FIXME: Check whether to set ACR bit depending on clock ratios */ | 1261 | /* FIXME: Check whether to set ACR bit depending on clock ratios */ |
1273 | __raw_writel(0, sdma->regs + SDMA_H_CONFIG); | 1262 | writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); |
1274 | 1263 | ||
1275 | __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR); | 1264 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); |
1276 | 1265 | ||
1277 | /* Set bits of CONFIG register with given context switching mode */ | 1266 | /* Set bits of CONFIG register with given context switching mode */ |
1278 | __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | 1267 | writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); |
1279 | 1268 | ||
1280 | /* Initializes channel's priorities */ | 1269 | /* Initializes channel's priorities */ |
1281 | sdma_set_channel_priority(&sdma->channel[0], 7); | 1270 | sdma_set_channel_priority(&sdma->channel[0], 7); |
@@ -1367,6 +1356,7 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1367 | spin_lock_init(&sdmac->lock); | 1356 | spin_lock_init(&sdmac->lock); |
1368 | 1357 | ||
1369 | sdmac->chan.device = &sdma->dma_device; | 1358 | sdmac->chan.device = &sdma->dma_device; |
1359 | dma_cookie_init(&sdmac->chan); | ||
1370 | sdmac->channel = i; | 1360 | sdmac->channel = i; |
1371 | 1361 | ||
1372 | /* | 1362 | /* |
@@ -1387,7 +1377,9 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1387 | sdma_add_scripts(sdma, pdata->script_addrs); | 1377 | sdma_add_scripts(sdma, pdata->script_addrs); |
1388 | 1378 | ||
1389 | if (pdata) { | 1379 | if (pdata) { |
1390 | sdma_get_firmware(sdma, pdata->fw_name); | 1380 | ret = sdma_get_firmware(sdma, pdata->fw_name); |
1381 | if (ret) | ||
1382 | dev_warn(&pdev->dev, "failed to get firmware from platform data\n"); | ||
1391 | } else { | 1383 | } else { |
1392 | /* | 1384 | /* |
1393 | * Because that device tree does not encode ROM script address, | 1385 | * Because that device tree does not encode ROM script address, |
@@ -1396,15 +1388,12 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1396 | */ | 1388 | */ |
1397 | ret = of_property_read_string(np, "fsl,sdma-ram-script-name", | 1389 | ret = of_property_read_string(np, "fsl,sdma-ram-script-name", |
1398 | &fw_name); | 1390 | &fw_name); |
1399 | if (ret) { | 1391 | if (ret) |
1400 | dev_err(&pdev->dev, "failed to get firmware name\n"); | 1392 | dev_warn(&pdev->dev, "failed to get firmware name\n"); |
1401 | goto err_init; | 1393 | else { |
1402 | } | 1394 | ret = sdma_get_firmware(sdma, fw_name); |
1403 | 1395 | if (ret) | |
1404 | ret = sdma_get_firmware(sdma, fw_name); | 1396 | dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); |
1405 | if (ret) { | ||
1406 | dev_err(&pdev->dev, "failed to get firmware\n"); | ||
1407 | goto err_init; | ||
1408 | } | 1397 | } |
1409 | } | 1398 | } |
1410 | 1399 | ||
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 74f70aadf9e4..c900ca7aaec4 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <linux/intel_mid_dma.h> | 29 | #include <linux/intel_mid_dma.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | 31 | ||
32 | #include "dmaengine.h" | ||
33 | |||
32 | #define MAX_CHAN 4 /*max ch across controllers*/ | 34 | #define MAX_CHAN 4 /*max ch across controllers*/ |
33 | #include "intel_mid_dma_regs.h" | 35 | #include "intel_mid_dma_regs.h" |
34 | 36 | ||
@@ -288,7 +290,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | |||
288 | struct intel_mid_dma_lli *llitem; | 290 | struct intel_mid_dma_lli *llitem; |
289 | void *param_txd = NULL; | 291 | void *param_txd = NULL; |
290 | 292 | ||
291 | midc->completed = txd->cookie; | 293 | dma_cookie_complete(txd); |
292 | callback_txd = txd->callback; | 294 | callback_txd = txd->callback; |
293 | param_txd = txd->callback_param; | 295 | param_txd = txd->callback_param; |
294 | 296 | ||
@@ -434,14 +436,7 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
434 | dma_cookie_t cookie; | 436 | dma_cookie_t cookie; |
435 | 437 | ||
436 | spin_lock_bh(&midc->lock); | 438 | spin_lock_bh(&midc->lock); |
437 | cookie = midc->chan.cookie; | 439 | cookie = dma_cookie_assign(tx); |
438 | |||
439 | if (++cookie < 0) | ||
440 | cookie = 1; | ||
441 | |||
442 | midc->chan.cookie = cookie; | ||
443 | desc->txd.cookie = cookie; | ||
444 | |||
445 | 440 | ||
446 | if (list_empty(&midc->active_list)) | 441 | if (list_empty(&midc->active_list)) |
447 | list_add_tail(&desc->desc_node, &midc->active_list); | 442 | list_add_tail(&desc->desc_node, &midc->active_list); |
@@ -482,31 +477,18 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, | |||
482 | dma_cookie_t cookie, | 477 | dma_cookie_t cookie, |
483 | struct dma_tx_state *txstate) | 478 | struct dma_tx_state *txstate) |
484 | { | 479 | { |
485 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | 480 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); |
486 | dma_cookie_t last_used; | 481 | enum dma_status ret; |
487 | dma_cookie_t last_complete; | ||
488 | int ret; | ||
489 | 482 | ||
490 | last_complete = midc->completed; | 483 | ret = dma_cookie_status(chan, cookie, txstate); |
491 | last_used = chan->cookie; | ||
492 | |||
493 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
494 | if (ret != DMA_SUCCESS) { | 484 | if (ret != DMA_SUCCESS) { |
495 | spin_lock_bh(&midc->lock); | 485 | spin_lock_bh(&midc->lock); |
496 | midc_scan_descriptors(to_middma_device(chan->device), midc); | 486 | midc_scan_descriptors(to_middma_device(chan->device), midc); |
497 | spin_unlock_bh(&midc->lock); | 487 | spin_unlock_bh(&midc->lock); |
498 | 488 | ||
499 | last_complete = midc->completed; | 489 | ret = dma_cookie_status(chan, cookie, txstate); |
500 | last_used = chan->cookie; | ||
501 | |||
502 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
503 | } | 490 | } |
504 | 491 | ||
505 | if (txstate) { | ||
506 | txstate->last = last_complete; | ||
507 | txstate->used = last_used; | ||
508 | txstate->residue = 0; | ||
509 | } | ||
510 | return ret; | 492 | return ret; |
511 | } | 493 | } |
512 | 494 | ||
@@ -732,13 +714,14 @@ err_desc_get: | |||
732 | * @sg_len: length of sg txn | 714 | * @sg_len: length of sg txn |
733 | * @direction: DMA transfer dirtn | 715 | * @direction: DMA transfer dirtn |
734 | * @flags: DMA flags | 716 | * @flags: DMA flags |
717 | * @context: transfer context (ignored) | ||
735 | * | 718 | * |
736 | * Prepares LLI based periphral transfer | 719 | * Prepares LLI based periphral transfer |
737 | */ | 720 | */ |
738 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | 721 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( |
739 | struct dma_chan *chan, struct scatterlist *sgl, | 722 | struct dma_chan *chan, struct scatterlist *sgl, |
740 | unsigned int sg_len, enum dma_transfer_direction direction, | 723 | unsigned int sg_len, enum dma_transfer_direction direction, |
741 | unsigned long flags) | 724 | unsigned long flags, void *context) |
742 | { | 725 | { |
743 | struct intel_mid_dma_chan *midc = NULL; | 726 | struct intel_mid_dma_chan *midc = NULL; |
744 | struct intel_mid_dma_slave *mids = NULL; | 727 | struct intel_mid_dma_slave *mids = NULL; |
@@ -832,7 +815,6 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | |||
832 | /*trying to free ch in use!!!!!*/ | 815 | /*trying to free ch in use!!!!!*/ |
833 | pr_err("ERR_MDMA: trying to free ch in use\n"); | 816 | pr_err("ERR_MDMA: trying to free ch in use\n"); |
834 | } | 817 | } |
835 | pm_runtime_put(&mid->pdev->dev); | ||
836 | spin_lock_bh(&midc->lock); | 818 | spin_lock_bh(&midc->lock); |
837 | midc->descs_allocated = 0; | 819 | midc->descs_allocated = 0; |
838 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | 820 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { |
@@ -853,6 +835,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | |||
853 | /* Disable CH interrupts */ | 835 | /* Disable CH interrupts */ |
854 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); | 836 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); |
855 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); | 837 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); |
838 | pm_runtime_put(&mid->pdev->dev); | ||
856 | } | 839 | } |
857 | 840 | ||
858 | /** | 841 | /** |
@@ -886,7 +869,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
886 | pm_runtime_put(&mid->pdev->dev); | 869 | pm_runtime_put(&mid->pdev->dev); |
887 | return -EIO; | 870 | return -EIO; |
888 | } | 871 | } |
889 | midc->completed = chan->cookie = 1; | 872 | dma_cookie_init(chan); |
890 | 873 | ||
891 | spin_lock_bh(&midc->lock); | 874 | spin_lock_bh(&midc->lock); |
892 | while (midc->descs_allocated < DESCS_PER_CHANNEL) { | 875 | while (midc->descs_allocated < DESCS_PER_CHANNEL) { |
@@ -1056,7 +1039,8 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) | |||
1056 | } | 1039 | } |
1057 | err_status &= mid->intr_mask; | 1040 | err_status &= mid->intr_mask; |
1058 | if (err_status) { | 1041 | if (err_status) { |
1059 | iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR); | 1042 | iowrite32((err_status << INT_MASK_WE), |
1043 | mid->dma_base + MASK_ERR); | ||
1060 | call_tasklet = 1; | 1044 | call_tasklet = 1; |
1061 | } | 1045 | } |
1062 | if (call_tasklet) | 1046 | if (call_tasklet) |
@@ -1118,7 +1102,7 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
1118 | struct intel_mid_dma_chan *midch = &dma->ch[i]; | 1102 | struct intel_mid_dma_chan *midch = &dma->ch[i]; |
1119 | 1103 | ||
1120 | midch->chan.device = &dma->common; | 1104 | midch->chan.device = &dma->common; |
1121 | midch->chan.cookie = 1; | 1105 | dma_cookie_init(&midch->chan); |
1122 | midch->ch_id = dma->chan_base + i; | 1106 | midch->ch_id = dma->chan_base + i; |
1123 | pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); | 1107 | pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); |
1124 | 1108 | ||
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h index c83d35b97bd8..1bfa9268feaf 100644 --- a/drivers/dma/intel_mid_dma_regs.h +++ b/drivers/dma/intel_mid_dma_regs.h | |||
@@ -165,7 +165,6 @@ union intel_mid_dma_cfg_hi { | |||
165 | * @dma_base: MMIO register space DMA engine base pointer | 165 | * @dma_base: MMIO register space DMA engine base pointer |
166 | * @ch_id: DMA channel id | 166 | * @ch_id: DMA channel id |
167 | * @lock: channel spinlock | 167 | * @lock: channel spinlock |
168 | * @completed: DMA cookie | ||
169 | * @active_list: current active descriptors | 168 | * @active_list: current active descriptors |
170 | * @queue: current queued up descriptors | 169 | * @queue: current queued up descriptors |
171 | * @free_list: current free descriptors | 170 | * @free_list: current free descriptors |
@@ -183,7 +182,6 @@ struct intel_mid_dma_chan { | |||
183 | void __iomem *dma_base; | 182 | void __iomem *dma_base; |
184 | int ch_id; | 183 | int ch_id; |
185 | spinlock_t lock; | 184 | spinlock_t lock; |
186 | dma_cookie_t completed; | ||
187 | struct list_head active_list; | 185 | struct list_head active_list; |
188 | struct list_head queue; | 186 | struct list_head queue; |
189 | struct list_head free_list; | 187 | struct list_head free_list; |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index a4d6cb0c0343..31493d80e0e9 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -40,6 +40,8 @@ | |||
40 | #include "registers.h" | 40 | #include "registers.h" |
41 | #include "hw.h" | 41 | #include "hw.h" |
42 | 42 | ||
43 | #include "../dmaengine.h" | ||
44 | |||
43 | int ioat_pending_level = 4; | 45 | int ioat_pending_level = 4; |
44 | module_param(ioat_pending_level, int, 0644); | 46 | module_param(ioat_pending_level, int, 0644); |
45 | MODULE_PARM_DESC(ioat_pending_level, | 47 | MODULE_PARM_DESC(ioat_pending_level, |
@@ -107,6 +109,7 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c | |||
107 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); | 109 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); |
108 | spin_lock_init(&chan->cleanup_lock); | 110 | spin_lock_init(&chan->cleanup_lock); |
109 | chan->common.device = dma; | 111 | chan->common.device = dma; |
112 | dma_cookie_init(&chan->common); | ||
110 | list_add_tail(&chan->common.device_node, &dma->channels); | 113 | list_add_tail(&chan->common.device_node, &dma->channels); |
111 | device->idx[idx] = chan; | 114 | device->idx[idx] = chan; |
112 | init_timer(&chan->timer); | 115 | init_timer(&chan->timer); |
@@ -235,12 +238,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
235 | 238 | ||
236 | spin_lock_bh(&ioat->desc_lock); | 239 | spin_lock_bh(&ioat->desc_lock); |
237 | /* cookie incr and addition to used_list must be atomic */ | 240 | /* cookie incr and addition to used_list must be atomic */ |
238 | cookie = c->cookie; | 241 | cookie = dma_cookie_assign(tx); |
239 | cookie++; | ||
240 | if (cookie < 0) | ||
241 | cookie = 1; | ||
242 | c->cookie = cookie; | ||
243 | tx->cookie = cookie; | ||
244 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | 242 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
245 | 243 | ||
246 | /* write address into NextDescriptor field of last desc in chain */ | 244 | /* write address into NextDescriptor field of last desc in chain */ |
@@ -603,8 +601,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) | |||
603 | */ | 601 | */ |
604 | dump_desc_dbg(ioat, desc); | 602 | dump_desc_dbg(ioat, desc); |
605 | if (tx->cookie) { | 603 | if (tx->cookie) { |
606 | chan->completed_cookie = tx->cookie; | 604 | dma_cookie_complete(tx); |
607 | tx->cookie = 0; | ||
608 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | 605 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); |
609 | ioat->active -= desc->hw->tx_cnt; | 606 | ioat->active -= desc->hw->tx_cnt; |
610 | if (tx->callback) { | 607 | if (tx->callback) { |
@@ -733,13 +730,15 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
733 | { | 730 | { |
734 | struct ioat_chan_common *chan = to_chan_common(c); | 731 | struct ioat_chan_common *chan = to_chan_common(c); |
735 | struct ioatdma_device *device = chan->device; | 732 | struct ioatdma_device *device = chan->device; |
733 | enum dma_status ret; | ||
736 | 734 | ||
737 | if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) | 735 | ret = dma_cookie_status(c, cookie, txstate); |
738 | return DMA_SUCCESS; | 736 | if (ret == DMA_SUCCESS) |
737 | return ret; | ||
739 | 738 | ||
740 | device->cleanup_fn((unsigned long) c); | 739 | device->cleanup_fn((unsigned long) c); |
741 | 740 | ||
742 | return ioat_tx_status(c, cookie, txstate); | 741 | return dma_cookie_status(c, cookie, txstate); |
743 | } | 742 | } |
744 | 743 | ||
745 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) | 744 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 5216c8a92a21..c7888bccd974 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -90,7 +90,6 @@ struct ioat_chan_common { | |||
90 | void __iomem *reg_base; | 90 | void __iomem *reg_base; |
91 | unsigned long last_completion; | 91 | unsigned long last_completion; |
92 | spinlock_t cleanup_lock; | 92 | spinlock_t cleanup_lock; |
93 | dma_cookie_t completed_cookie; | ||
94 | unsigned long state; | 93 | unsigned long state; |
95 | #define IOAT_COMPLETION_PENDING 0 | 94 | #define IOAT_COMPLETION_PENDING 0 |
96 | #define IOAT_COMPLETION_ACK 1 | 95 | #define IOAT_COMPLETION_ACK 1 |
@@ -143,28 +142,6 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) | |||
143 | return container_of(chan, struct ioat_dma_chan, base); | 142 | return container_of(chan, struct ioat_dma_chan, base); |
144 | } | 143 | } |
145 | 144 | ||
146 | /** | ||
147 | * ioat_tx_status - poll the status of an ioat transaction | ||
148 | * @c: channel handle | ||
149 | * @cookie: transaction identifier | ||
150 | * @txstate: if set, updated with the transaction state | ||
151 | */ | ||
152 | static inline enum dma_status | ||
153 | ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, | ||
154 | struct dma_tx_state *txstate) | ||
155 | { | ||
156 | struct ioat_chan_common *chan = to_chan_common(c); | ||
157 | dma_cookie_t last_used; | ||
158 | dma_cookie_t last_complete; | ||
159 | |||
160 | last_used = c->cookie; | ||
161 | last_complete = chan->completed_cookie; | ||
162 | |||
163 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
164 | |||
165 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
166 | } | ||
167 | |||
168 | /* wrapper around hardware descriptor format + additional software fields */ | 145 | /* wrapper around hardware descriptor format + additional software fields */ |
169 | 146 | ||
170 | /** | 147 | /** |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 5d65f8377971..e8e110ff3d96 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -41,6 +41,8 @@ | |||
41 | #include "registers.h" | 41 | #include "registers.h" |
42 | #include "hw.h" | 42 | #include "hw.h" |
43 | 43 | ||
44 | #include "../dmaengine.h" | ||
45 | |||
44 | int ioat_ring_alloc_order = 8; | 46 | int ioat_ring_alloc_order = 8; |
45 | module_param(ioat_ring_alloc_order, int, 0644); | 47 | module_param(ioat_ring_alloc_order, int, 0644); |
46 | MODULE_PARM_DESC(ioat_ring_alloc_order, | 48 | MODULE_PARM_DESC(ioat_ring_alloc_order, |
@@ -147,8 +149,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | |||
147 | dump_desc_dbg(ioat, desc); | 149 | dump_desc_dbg(ioat, desc); |
148 | if (tx->cookie) { | 150 | if (tx->cookie) { |
149 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | 151 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); |
150 | chan->completed_cookie = tx->cookie; | 152 | dma_cookie_complete(tx); |
151 | tx->cookie = 0; | ||
152 | if (tx->callback) { | 153 | if (tx->callback) { |
153 | tx->callback(tx->callback_param); | 154 | tx->callback(tx->callback_param); |
154 | tx->callback = NULL; | 155 | tx->callback = NULL; |
@@ -398,13 +399,9 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |||
398 | struct dma_chan *c = tx->chan; | 399 | struct dma_chan *c = tx->chan; |
399 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | 400 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
400 | struct ioat_chan_common *chan = &ioat->base; | 401 | struct ioat_chan_common *chan = &ioat->base; |
401 | dma_cookie_t cookie = c->cookie; | 402 | dma_cookie_t cookie; |
402 | 403 | ||
403 | cookie++; | 404 | cookie = dma_cookie_assign(tx); |
404 | if (cookie < 0) | ||
405 | cookie = 1; | ||
406 | tx->cookie = cookie; | ||
407 | c->cookie = cookie; | ||
408 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | 405 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
409 | 406 | ||
410 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | 407 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index f519c93a61e7..2c4476c0e405 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -61,6 +61,7 @@ | |||
61 | #include <linux/dmaengine.h> | 61 | #include <linux/dmaengine.h> |
62 | #include <linux/dma-mapping.h> | 62 | #include <linux/dma-mapping.h> |
63 | #include <linux/prefetch.h> | 63 | #include <linux/prefetch.h> |
64 | #include "../dmaengine.h" | ||
64 | #include "registers.h" | 65 | #include "registers.h" |
65 | #include "hw.h" | 66 | #include "hw.h" |
66 | #include "dma.h" | 67 | #include "dma.h" |
@@ -277,9 +278,8 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | |||
277 | dump_desc_dbg(ioat, desc); | 278 | dump_desc_dbg(ioat, desc); |
278 | tx = &desc->txd; | 279 | tx = &desc->txd; |
279 | if (tx->cookie) { | 280 | if (tx->cookie) { |
280 | chan->completed_cookie = tx->cookie; | 281 | dma_cookie_complete(tx); |
281 | ioat3_dma_unmap(ioat, desc, idx + i); | 282 | ioat3_dma_unmap(ioat, desc, idx + i); |
282 | tx->cookie = 0; | ||
283 | if (tx->callback) { | 283 | if (tx->callback) { |
284 | tx->callback(tx->callback_param); | 284 | tx->callback(tx->callback_param); |
285 | tx->callback = NULL; | 285 | tx->callback = NULL; |
@@ -411,13 +411,15 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
411 | struct dma_tx_state *txstate) | 411 | struct dma_tx_state *txstate) |
412 | { | 412 | { |
413 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | 413 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
414 | enum dma_status ret; | ||
414 | 415 | ||
415 | if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) | 416 | ret = dma_cookie_status(c, cookie, txstate); |
416 | return DMA_SUCCESS; | 417 | if (ret == DMA_SUCCESS) |
418 | return ret; | ||
417 | 419 | ||
418 | ioat3_cleanup(ioat); | 420 | ioat3_cleanup(ioat); |
419 | 421 | ||
420 | return ioat_tx_status(c, cookie, txstate); | 422 | return dma_cookie_status(c, cookie, txstate); |
421 | } | 423 | } |
422 | 424 | ||
423 | static struct dma_async_tx_descriptor * | 425 | static struct dma_async_tx_descriptor * |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index faf88b7e1e71..da6c4c2c066a 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -36,6 +36,8 @@ | |||
36 | 36 | ||
37 | #include <mach/adma.h> | 37 | #include <mach/adma.h> |
38 | 38 | ||
39 | #include "dmaengine.h" | ||
40 | |||
39 | #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) | 41 | #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) |
40 | #define to_iop_adma_device(dev) \ | 42 | #define to_iop_adma_device(dev) \ |
41 | container_of(dev, struct iop_adma_device, common) | 43 | container_of(dev, struct iop_adma_device, common) |
@@ -317,7 +319,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
317 | } | 319 | } |
318 | 320 | ||
319 | if (cookie > 0) { | 321 | if (cookie > 0) { |
320 | iop_chan->completed_cookie = cookie; | 322 | iop_chan->common.completed_cookie = cookie; |
321 | pr_debug("\tcompleted cookie %d\n", cookie); | 323 | pr_debug("\tcompleted cookie %d\n", cookie); |
322 | } | 324 | } |
323 | } | 325 | } |
@@ -438,18 +440,6 @@ retry: | |||
438 | return NULL; | 440 | return NULL; |
439 | } | 441 | } |
440 | 442 | ||
441 | static dma_cookie_t | ||
442 | iop_desc_assign_cookie(struct iop_adma_chan *iop_chan, | ||
443 | struct iop_adma_desc_slot *desc) | ||
444 | { | ||
445 | dma_cookie_t cookie = iop_chan->common.cookie; | ||
446 | cookie++; | ||
447 | if (cookie < 0) | ||
448 | cookie = 1; | ||
449 | iop_chan->common.cookie = desc->async_tx.cookie = cookie; | ||
450 | return cookie; | ||
451 | } | ||
452 | |||
453 | static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) | 443 | static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) |
454 | { | 444 | { |
455 | dev_dbg(iop_chan->device->common.dev, "pending: %d\n", | 445 | dev_dbg(iop_chan->device->common.dev, "pending: %d\n", |
@@ -477,7 +467,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
477 | slots_per_op = grp_start->slots_per_op; | 467 | slots_per_op = grp_start->slots_per_op; |
478 | 468 | ||
479 | spin_lock_bh(&iop_chan->lock); | 469 | spin_lock_bh(&iop_chan->lock); |
480 | cookie = iop_desc_assign_cookie(iop_chan, sw_desc); | 470 | cookie = dma_cookie_assign(tx); |
481 | 471 | ||
482 | old_chain_tail = list_entry(iop_chan->chain.prev, | 472 | old_chain_tail = list_entry(iop_chan->chain.prev, |
483 | struct iop_adma_desc_slot, chain_node); | 473 | struct iop_adma_desc_slot, chain_node); |
@@ -904,24 +894,15 @@ static enum dma_status iop_adma_status(struct dma_chan *chan, | |||
904 | struct dma_tx_state *txstate) | 894 | struct dma_tx_state *txstate) |
905 | { | 895 | { |
906 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 896 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
907 | dma_cookie_t last_used; | 897 | int ret; |
908 | dma_cookie_t last_complete; | 898 | |
909 | enum dma_status ret; | 899 | ret = dma_cookie_status(chan, cookie, txstate); |
910 | |||
911 | last_used = chan->cookie; | ||
912 | last_complete = iop_chan->completed_cookie; | ||
913 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
914 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
915 | if (ret == DMA_SUCCESS) | 900 | if (ret == DMA_SUCCESS) |
916 | return ret; | 901 | return ret; |
917 | 902 | ||
918 | iop_adma_slot_cleanup(iop_chan); | 903 | iop_adma_slot_cleanup(iop_chan); |
919 | 904 | ||
920 | last_used = chan->cookie; | 905 | return dma_cookie_status(chan, cookie, txstate); |
921 | last_complete = iop_chan->completed_cookie; | ||
922 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
923 | |||
924 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
925 | } | 906 | } |
926 | 907 | ||
927 | static irqreturn_t iop_adma_eot_handler(int irq, void *data) | 908 | static irqreturn_t iop_adma_eot_handler(int irq, void *data) |
@@ -1565,6 +1546,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1565 | INIT_LIST_HEAD(&iop_chan->chain); | 1546 | INIT_LIST_HEAD(&iop_chan->chain); |
1566 | INIT_LIST_HEAD(&iop_chan->all_slots); | 1547 | INIT_LIST_HEAD(&iop_chan->all_slots); |
1567 | iop_chan->common.device = dma_dev; | 1548 | iop_chan->common.device = dma_dev; |
1549 | dma_cookie_init(&iop_chan->common); | ||
1568 | list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); | 1550 | list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); |
1569 | 1551 | ||
1570 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { | 1552 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { |
@@ -1642,16 +1624,12 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) | |||
1642 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); | 1624 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); |
1643 | iop_desc_set_memcpy_src_addr(grp_start, 0); | 1625 | iop_desc_set_memcpy_src_addr(grp_start, 0); |
1644 | 1626 | ||
1645 | cookie = iop_chan->common.cookie; | 1627 | cookie = dma_cookie_assign(&sw_desc->async_tx); |
1646 | cookie++; | ||
1647 | if (cookie <= 1) | ||
1648 | cookie = 2; | ||
1649 | 1628 | ||
1650 | /* initialize the completed cookie to be less than | 1629 | /* initialize the completed cookie to be less than |
1651 | * the most recently used cookie | 1630 | * the most recently used cookie |
1652 | */ | 1631 | */ |
1653 | iop_chan->completed_cookie = cookie - 1; | 1632 | iop_chan->common.completed_cookie = cookie - 1; |
1654 | iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; | ||
1655 | 1633 | ||
1656 | /* channel should not be busy */ | 1634 | /* channel should not be busy */ |
1657 | BUG_ON(iop_chan_is_busy(iop_chan)); | 1635 | BUG_ON(iop_chan_is_busy(iop_chan)); |
@@ -1699,16 +1677,12 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
1699 | iop_desc_set_xor_src_addr(grp_start, 0, 0); | 1677 | iop_desc_set_xor_src_addr(grp_start, 0, 0); |
1700 | iop_desc_set_xor_src_addr(grp_start, 1, 0); | 1678 | iop_desc_set_xor_src_addr(grp_start, 1, 0); |
1701 | 1679 | ||
1702 | cookie = iop_chan->common.cookie; | 1680 | cookie = dma_cookie_assign(&sw_desc->async_tx); |
1703 | cookie++; | ||
1704 | if (cookie <= 1) | ||
1705 | cookie = 2; | ||
1706 | 1681 | ||
1707 | /* initialize the completed cookie to be less than | 1682 | /* initialize the completed cookie to be less than |
1708 | * the most recently used cookie | 1683 | * the most recently used cookie |
1709 | */ | 1684 | */ |
1710 | iop_chan->completed_cookie = cookie - 1; | 1685 | iop_chan->common.completed_cookie = cookie - 1; |
1711 | iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; | ||
1712 | 1686 | ||
1713 | /* channel should not be busy */ | 1687 | /* channel should not be busy */ |
1714 | BUG_ON(iop_chan_is_busy(iop_chan)); | 1688 | BUG_ON(iop_chan_is_busy(iop_chan)); |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 6212b16e8cf2..62e3f8ec2461 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | #include <mach/ipu.h> | 26 | #include <mach/ipu.h> |
27 | 27 | ||
28 | #include "../dmaengine.h" | ||
28 | #include "ipu_intern.h" | 29 | #include "ipu_intern.h" |
29 | 30 | ||
30 | #define FS_VF_IN_VALID 0x00000002 | 31 | #define FS_VF_IN_VALID 0x00000002 |
@@ -866,14 +867,7 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) | |||
866 | 867 | ||
867 | dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); | 868 | dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); |
868 | 869 | ||
869 | cookie = ichan->dma_chan.cookie; | 870 | cookie = dma_cookie_assign(tx); |
870 | |||
871 | if (++cookie < 0) | ||
872 | cookie = 1; | ||
873 | |||
874 | /* from dmaengine.h: "last cookie value returned to client" */ | ||
875 | ichan->dma_chan.cookie = cookie; | ||
876 | tx->cookie = cookie; | ||
877 | 871 | ||
878 | /* ipu->lock can be taken under ichan->lock, but not v.v. */ | 872 | /* ipu->lock can be taken under ichan->lock, but not v.v. */ |
879 | spin_lock_irqsave(&ichan->lock, flags); | 873 | spin_lock_irqsave(&ichan->lock, flags); |
@@ -1295,7 +1289,7 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) | |||
1295 | /* Flip the active buffer - even if update above failed */ | 1289 | /* Flip the active buffer - even if update above failed */ |
1296 | ichan->active_buffer = !ichan->active_buffer; | 1290 | ichan->active_buffer = !ichan->active_buffer; |
1297 | if (done) | 1291 | if (done) |
1298 | ichan->completed = desc->txd.cookie; | 1292 | dma_cookie_complete(&desc->txd); |
1299 | 1293 | ||
1300 | callback = desc->txd.callback; | 1294 | callback = desc->txd.callback; |
1301 | callback_param = desc->txd.callback_param; | 1295 | callback_param = desc->txd.callback_param; |
@@ -1341,7 +1335,8 @@ static void ipu_gc_tasklet(unsigned long arg) | |||
1341 | /* Allocate and initialise a transfer descriptor. */ | 1335 | /* Allocate and initialise a transfer descriptor. */ |
1342 | static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, | 1336 | static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, |
1343 | struct scatterlist *sgl, unsigned int sg_len, | 1337 | struct scatterlist *sgl, unsigned int sg_len, |
1344 | enum dma_transfer_direction direction, unsigned long tx_flags) | 1338 | enum dma_transfer_direction direction, unsigned long tx_flags, |
1339 | void *context) | ||
1345 | { | 1340 | { |
1346 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1341 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1347 | struct idmac_tx_desc *desc = NULL; | 1342 | struct idmac_tx_desc *desc = NULL; |
@@ -1510,8 +1505,7 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) | |||
1510 | BUG_ON(chan->client_count > 1); | 1505 | BUG_ON(chan->client_count > 1); |
1511 | WARN_ON(ichan->status != IPU_CHANNEL_FREE); | 1506 | WARN_ON(ichan->status != IPU_CHANNEL_FREE); |
1512 | 1507 | ||
1513 | chan->cookie = 1; | 1508 | dma_cookie_init(chan); |
1514 | ichan->completed = -ENXIO; | ||
1515 | 1509 | ||
1516 | ret = ipu_irq_map(chan->chan_id); | 1510 | ret = ipu_irq_map(chan->chan_id); |
1517 | if (ret < 0) | 1511 | if (ret < 0) |
@@ -1600,9 +1594,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan) | |||
1600 | static enum dma_status idmac_tx_status(struct dma_chan *chan, | 1594 | static enum dma_status idmac_tx_status(struct dma_chan *chan, |
1601 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 1595 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
1602 | { | 1596 | { |
1603 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1597 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); |
1604 | |||
1605 | dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0); | ||
1606 | if (cookie != chan->cookie) | 1598 | if (cookie != chan->cookie) |
1607 | return DMA_ERROR; | 1599 | return DMA_ERROR; |
1608 | return DMA_SUCCESS; | 1600 | return DMA_SUCCESS; |
@@ -1638,11 +1630,10 @@ static int __init ipu_idmac_init(struct ipu *ipu) | |||
1638 | 1630 | ||
1639 | ichan->status = IPU_CHANNEL_FREE; | 1631 | ichan->status = IPU_CHANNEL_FREE; |
1640 | ichan->sec_chan_en = false; | 1632 | ichan->sec_chan_en = false; |
1641 | ichan->completed = -ENXIO; | ||
1642 | snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i); | 1633 | snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i); |
1643 | 1634 | ||
1644 | dma_chan->device = &idmac->dma; | 1635 | dma_chan->device = &idmac->dma; |
1645 | dma_chan->cookie = 1; | 1636 | dma_cookie_init(dma_chan); |
1646 | dma_chan->chan_id = i; | 1637 | dma_chan->chan_id = i; |
1647 | list_add_tail(&dma_chan->device_node, &dma->channels); | 1638 | list_add_tail(&dma_chan->device_node, &dma->channels); |
1648 | } | 1639 | } |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 4d6d4cf66949..2ab0a3d0eed5 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -44,6 +44,8 @@ | |||
44 | 44 | ||
45 | #include <linux/random.h> | 45 | #include <linux/random.h> |
46 | 46 | ||
47 | #include "dmaengine.h" | ||
48 | |||
47 | /* Number of DMA Transfer descriptors allocated per channel */ | 49 | /* Number of DMA Transfer descriptors allocated per channel */ |
48 | #define MPC_DMA_DESCRIPTORS 64 | 50 | #define MPC_DMA_DESCRIPTORS 64 |
49 | 51 | ||
@@ -188,7 +190,6 @@ struct mpc_dma_chan { | |||
188 | struct list_head completed; | 190 | struct list_head completed; |
189 | struct mpc_dma_tcd *tcd; | 191 | struct mpc_dma_tcd *tcd; |
190 | dma_addr_t tcd_paddr; | 192 | dma_addr_t tcd_paddr; |
191 | dma_cookie_t completed_cookie; | ||
192 | 193 | ||
193 | /* Lock for this structure */ | 194 | /* Lock for this structure */ |
194 | spinlock_t lock; | 195 | spinlock_t lock; |
@@ -365,7 +366,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma) | |||
365 | /* Free descriptors */ | 366 | /* Free descriptors */ |
366 | spin_lock_irqsave(&mchan->lock, flags); | 367 | spin_lock_irqsave(&mchan->lock, flags); |
367 | list_splice_tail_init(&list, &mchan->free); | 368 | list_splice_tail_init(&list, &mchan->free); |
368 | mchan->completed_cookie = last_cookie; | 369 | mchan->chan.completed_cookie = last_cookie; |
369 | spin_unlock_irqrestore(&mchan->lock, flags); | 370 | spin_unlock_irqrestore(&mchan->lock, flags); |
370 | } | 371 | } |
371 | } | 372 | } |
@@ -438,13 +439,7 @@ static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | |||
438 | mpc_dma_execute(mchan); | 439 | mpc_dma_execute(mchan); |
439 | 440 | ||
440 | /* Update cookie */ | 441 | /* Update cookie */ |
441 | cookie = mchan->chan.cookie + 1; | 442 | cookie = dma_cookie_assign(txd); |
442 | if (cookie <= 0) | ||
443 | cookie = 1; | ||
444 | |||
445 | mchan->chan.cookie = cookie; | ||
446 | mdesc->desc.cookie = cookie; | ||
447 | |||
448 | spin_unlock_irqrestore(&mchan->lock, flags); | 443 | spin_unlock_irqrestore(&mchan->lock, flags); |
449 | 444 | ||
450 | return cookie; | 445 | return cookie; |
@@ -562,17 +557,14 @@ mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
562 | struct dma_tx_state *txstate) | 557 | struct dma_tx_state *txstate) |
563 | { | 558 | { |
564 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | 559 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
560 | enum dma_status ret; | ||
565 | unsigned long flags; | 561 | unsigned long flags; |
566 | dma_cookie_t last_used; | ||
567 | dma_cookie_t last_complete; | ||
568 | 562 | ||
569 | spin_lock_irqsave(&mchan->lock, flags); | 563 | spin_lock_irqsave(&mchan->lock, flags); |
570 | last_used = mchan->chan.cookie; | 564 | ret = dma_cookie_status(chan, cookie, txstate); |
571 | last_complete = mchan->completed_cookie; | ||
572 | spin_unlock_irqrestore(&mchan->lock, flags); | 565 | spin_unlock_irqrestore(&mchan->lock, flags); |
573 | 566 | ||
574 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 567 | return ret; |
575 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
576 | } | 568 | } |
577 | 569 | ||
578 | /* Prepare descriptor for memory to memory copy */ | 570 | /* Prepare descriptor for memory to memory copy */ |
@@ -741,8 +733,7 @@ static int __devinit mpc_dma_probe(struct platform_device *op) | |||
741 | mchan = &mdma->channels[i]; | 733 | mchan = &mdma->channels[i]; |
742 | 734 | ||
743 | mchan->chan.device = dma; | 735 | mchan->chan.device = dma; |
744 | mchan->chan.cookie = 1; | 736 | dma_cookie_init(&mchan->chan); |
745 | mchan->completed_cookie = mchan->chan.cookie; | ||
746 | 737 | ||
747 | INIT_LIST_HEAD(&mchan->free); | 738 | INIT_LIST_HEAD(&mchan->free); |
748 | INIT_LIST_HEAD(&mchan->prepared); | 739 | INIT_LIST_HEAD(&mchan->prepared); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index e779b434af45..fa5d55fea46c 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -26,6 +26,8 @@ | |||
26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <linux/memory.h> | 27 | #include <linux/memory.h> |
28 | #include <plat/mv_xor.h> | 28 | #include <plat/mv_xor.h> |
29 | |||
30 | #include "dmaengine.h" | ||
29 | #include "mv_xor.h" | 31 | #include "mv_xor.h" |
30 | 32 | ||
31 | static void mv_xor_issue_pending(struct dma_chan *chan); | 33 | static void mv_xor_issue_pending(struct dma_chan *chan); |
@@ -435,7 +437,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | |||
435 | } | 437 | } |
436 | 438 | ||
437 | if (cookie > 0) | 439 | if (cookie > 0) |
438 | mv_chan->completed_cookie = cookie; | 440 | mv_chan->common.completed_cookie = cookie; |
439 | } | 441 | } |
440 | 442 | ||
441 | static void | 443 | static void |
@@ -534,18 +536,6 @@ retry: | |||
534 | return NULL; | 536 | return NULL; |
535 | } | 537 | } |
536 | 538 | ||
537 | static dma_cookie_t | ||
538 | mv_desc_assign_cookie(struct mv_xor_chan *mv_chan, | ||
539 | struct mv_xor_desc_slot *desc) | ||
540 | { | ||
541 | dma_cookie_t cookie = mv_chan->common.cookie; | ||
542 | |||
543 | if (++cookie < 0) | ||
544 | cookie = 1; | ||
545 | mv_chan->common.cookie = desc->async_tx.cookie = cookie; | ||
546 | return cookie; | ||
547 | } | ||
548 | |||
549 | /************************ DMA engine API functions ****************************/ | 539 | /************************ DMA engine API functions ****************************/ |
550 | static dma_cookie_t | 540 | static dma_cookie_t |
551 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | 541 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) |
@@ -563,7 +553,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |||
563 | grp_start = sw_desc->group_head; | 553 | grp_start = sw_desc->group_head; |
564 | 554 | ||
565 | spin_lock_bh(&mv_chan->lock); | 555 | spin_lock_bh(&mv_chan->lock); |
566 | cookie = mv_desc_assign_cookie(mv_chan, sw_desc); | 556 | cookie = dma_cookie_assign(tx); |
567 | 557 | ||
568 | if (list_empty(&mv_chan->chain)) | 558 | if (list_empty(&mv_chan->chain)) |
569 | list_splice_init(&sw_desc->tx_list, &mv_chan->chain); | 559 | list_splice_init(&sw_desc->tx_list, &mv_chan->chain); |
@@ -820,27 +810,16 @@ static enum dma_status mv_xor_status(struct dma_chan *chan, | |||
820 | struct dma_tx_state *txstate) | 810 | struct dma_tx_state *txstate) |
821 | { | 811 | { |
822 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 812 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
823 | dma_cookie_t last_used; | ||
824 | dma_cookie_t last_complete; | ||
825 | enum dma_status ret; | 813 | enum dma_status ret; |
826 | 814 | ||
827 | last_used = chan->cookie; | 815 | ret = dma_cookie_status(chan, cookie, txstate); |
828 | last_complete = mv_chan->completed_cookie; | ||
829 | mv_chan->is_complete_cookie = cookie; | ||
830 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
831 | |||
832 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
833 | if (ret == DMA_SUCCESS) { | 816 | if (ret == DMA_SUCCESS) { |
834 | mv_xor_clean_completed_slots(mv_chan); | 817 | mv_xor_clean_completed_slots(mv_chan); |
835 | return ret; | 818 | return ret; |
836 | } | 819 | } |
837 | mv_xor_slot_cleanup(mv_chan); | 820 | mv_xor_slot_cleanup(mv_chan); |
838 | 821 | ||
839 | last_used = chan->cookie; | 822 | return dma_cookie_status(chan, cookie, txstate); |
840 | last_complete = mv_chan->completed_cookie; | ||
841 | |||
842 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
843 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
844 | } | 823 | } |
845 | 824 | ||
846 | static void mv_dump_xor_regs(struct mv_xor_chan *chan) | 825 | static void mv_dump_xor_regs(struct mv_xor_chan *chan) |
@@ -1214,6 +1193,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev) | |||
1214 | INIT_LIST_HEAD(&mv_chan->completed_slots); | 1193 | INIT_LIST_HEAD(&mv_chan->completed_slots); |
1215 | INIT_LIST_HEAD(&mv_chan->all_slots); | 1194 | INIT_LIST_HEAD(&mv_chan->all_slots); |
1216 | mv_chan->common.device = dma_dev; | 1195 | mv_chan->common.device = dma_dev; |
1196 | dma_cookie_init(&mv_chan->common); | ||
1217 | 1197 | ||
1218 | list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); | 1198 | list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); |
1219 | 1199 | ||
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 977b592e976b..654876b7ba1d 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -78,7 +78,6 @@ struct mv_xor_device { | |||
78 | /** | 78 | /** |
79 | * struct mv_xor_chan - internal representation of a XOR channel | 79 | * struct mv_xor_chan - internal representation of a XOR channel |
80 | * @pending: allows batching of hardware operations | 80 | * @pending: allows batching of hardware operations |
81 | * @completed_cookie: identifier for the most recently completed operation | ||
82 | * @lock: serializes enqueue/dequeue operations to the descriptors pool | 81 | * @lock: serializes enqueue/dequeue operations to the descriptors pool |
83 | * @mmr_base: memory mapped register base | 82 | * @mmr_base: memory mapped register base |
84 | * @idx: the index of the xor channel | 83 | * @idx: the index of the xor channel |
@@ -93,7 +92,6 @@ struct mv_xor_device { | |||
93 | */ | 92 | */ |
94 | struct mv_xor_chan { | 93 | struct mv_xor_chan { |
95 | int pending; | 94 | int pending; |
96 | dma_cookie_t completed_cookie; | ||
97 | spinlock_t lock; /* protects the descriptor slot pool */ | 95 | spinlock_t lock; /* protects the descriptor slot pool */ |
98 | void __iomem *mmr_base; | 96 | void __iomem *mmr_base; |
99 | unsigned int idx; | 97 | unsigned int idx; |
@@ -109,7 +107,6 @@ struct mv_xor_chan { | |||
109 | #ifdef USE_TIMER | 107 | #ifdef USE_TIMER |
110 | unsigned long cleanup_time; | 108 | unsigned long cleanup_time; |
111 | u32 current_on_last_cleanup; | 109 | u32 current_on_last_cleanup; |
112 | dma_cookie_t is_complete_cookie; | ||
113 | #endif | 110 | #endif |
114 | }; | 111 | }; |
115 | 112 | ||
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index b06cd4ca626f..65334c49b71e 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <mach/dma.h> | 28 | #include <mach/dma.h> |
29 | #include <mach/common.h> | 29 | #include <mach/common.h> |
30 | 30 | ||
31 | #include "dmaengine.h" | ||
32 | |||
31 | /* | 33 | /* |
32 | * NOTE: The term "PIO" throughout the mxs-dma implementation means | 34 | * NOTE: The term "PIO" throughout the mxs-dma implementation means |
33 | * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, | 35 | * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, |
@@ -111,7 +113,6 @@ struct mxs_dma_chan { | |||
111 | struct mxs_dma_ccw *ccw; | 113 | struct mxs_dma_ccw *ccw; |
112 | dma_addr_t ccw_phys; | 114 | dma_addr_t ccw_phys; |
113 | int desc_count; | 115 | int desc_count; |
114 | dma_cookie_t last_completed; | ||
115 | enum dma_status status; | 116 | enum dma_status status; |
116 | unsigned int flags; | 117 | unsigned int flags; |
117 | #define MXS_DMA_SG_LOOP (1 << 0) | 118 | #define MXS_DMA_SG_LOOP (1 << 0) |
@@ -193,19 +194,6 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) | |||
193 | mxs_chan->status = DMA_IN_PROGRESS; | 194 | mxs_chan->status = DMA_IN_PROGRESS; |
194 | } | 195 | } |
195 | 196 | ||
196 | static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan) | ||
197 | { | ||
198 | dma_cookie_t cookie = mxs_chan->chan.cookie; | ||
199 | |||
200 | if (++cookie < 0) | ||
201 | cookie = 1; | ||
202 | |||
203 | mxs_chan->chan.cookie = cookie; | ||
204 | mxs_chan->desc.cookie = cookie; | ||
205 | |||
206 | return cookie; | ||
207 | } | ||
208 | |||
209 | static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) | 197 | static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) |
210 | { | 198 | { |
211 | return container_of(chan, struct mxs_dma_chan, chan); | 199 | return container_of(chan, struct mxs_dma_chan, chan); |
@@ -217,7 +205,7 @@ static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
217 | 205 | ||
218 | mxs_dma_enable_chan(mxs_chan); | 206 | mxs_dma_enable_chan(mxs_chan); |
219 | 207 | ||
220 | return mxs_dma_assign_cookie(mxs_chan); | 208 | return dma_cookie_assign(tx); |
221 | } | 209 | } |
222 | 210 | ||
223 | static void mxs_dma_tasklet(unsigned long data) | 211 | static void mxs_dma_tasklet(unsigned long data) |
@@ -274,7 +262,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | |||
274 | stat1 &= ~(1 << channel); | 262 | stat1 &= ~(1 << channel); |
275 | 263 | ||
276 | if (mxs_chan->status == DMA_SUCCESS) | 264 | if (mxs_chan->status == DMA_SUCCESS) |
277 | mxs_chan->last_completed = mxs_chan->desc.cookie; | 265 | dma_cookie_complete(&mxs_chan->desc); |
278 | 266 | ||
279 | /* schedule tasklet on this channel */ | 267 | /* schedule tasklet on this channel */ |
280 | tasklet_schedule(&mxs_chan->tasklet); | 268 | tasklet_schedule(&mxs_chan->tasklet); |
@@ -352,7 +340,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan) | |||
352 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | 340 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( |
353 | struct dma_chan *chan, struct scatterlist *sgl, | 341 | struct dma_chan *chan, struct scatterlist *sgl, |
354 | unsigned int sg_len, enum dma_transfer_direction direction, | 342 | unsigned int sg_len, enum dma_transfer_direction direction, |
355 | unsigned long append) | 343 | unsigned long append, void *context) |
356 | { | 344 | { |
357 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 345 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
358 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 346 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
@@ -447,7 +435,8 @@ err_out: | |||
447 | 435 | ||
448 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | 436 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( |
449 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 437 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
450 | size_t period_len, enum dma_transfer_direction direction) | 438 | size_t period_len, enum dma_transfer_direction direction, |
439 | void *context) | ||
451 | { | 440 | { |
452 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 441 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
453 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 442 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
@@ -538,7 +527,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, | |||
538 | dma_cookie_t last_used; | 527 | dma_cookie_t last_used; |
539 | 528 | ||
540 | last_used = chan->cookie; | 529 | last_used = chan->cookie; |
541 | dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); | 530 | dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0); |
542 | 531 | ||
543 | return mxs_chan->status; | 532 | return mxs_chan->status; |
544 | } | 533 | } |
@@ -630,6 +619,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev) | |||
630 | 619 | ||
631 | mxs_chan->mxs_dma = mxs_dma; | 620 | mxs_chan->mxs_dma = mxs_dma; |
632 | mxs_chan->chan.device = &mxs_dma->dma_device; | 621 | mxs_chan->chan.device = &mxs_dma->dma_device; |
622 | dma_cookie_init(&mxs_chan->chan); | ||
633 | 623 | ||
634 | tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, | 624 | tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, |
635 | (unsigned long) mxs_chan); | 625 | (unsigned long) mxs_chan); |
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 823f58179f9d..65c0495a6d40 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -25,6 +25,8 @@ | |||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/pch_dma.h> | 26 | #include <linux/pch_dma.h> |
27 | 27 | ||
28 | #include "dmaengine.h" | ||
29 | |||
28 | #define DRV_NAME "pch-dma" | 30 | #define DRV_NAME "pch-dma" |
29 | 31 | ||
30 | #define DMA_CTL0_DISABLE 0x0 | 32 | #define DMA_CTL0_DISABLE 0x0 |
@@ -105,7 +107,6 @@ struct pch_dma_chan { | |||
105 | 107 | ||
106 | spinlock_t lock; | 108 | spinlock_t lock; |
107 | 109 | ||
108 | dma_cookie_t completed_cookie; | ||
109 | struct list_head active_list; | 110 | struct list_head active_list; |
110 | struct list_head queue; | 111 | struct list_head queue; |
111 | struct list_head free_list; | 112 | struct list_head free_list; |
@@ -416,20 +417,6 @@ static void pdc_advance_work(struct pch_dma_chan *pd_chan) | |||
416 | } | 417 | } |
417 | } | 418 | } |
418 | 419 | ||
419 | static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan, | ||
420 | struct pch_dma_desc *desc) | ||
421 | { | ||
422 | dma_cookie_t cookie = pd_chan->chan.cookie; | ||
423 | |||
424 | if (++cookie < 0) | ||
425 | cookie = 1; | ||
426 | |||
427 | pd_chan->chan.cookie = cookie; | ||
428 | desc->txd.cookie = cookie; | ||
429 | |||
430 | return cookie; | ||
431 | } | ||
432 | |||
433 | static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | 420 | static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) |
434 | { | 421 | { |
435 | struct pch_dma_desc *desc = to_pd_desc(txd); | 422 | struct pch_dma_desc *desc = to_pd_desc(txd); |
@@ -437,7 +424,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |||
437 | dma_cookie_t cookie; | 424 | dma_cookie_t cookie; |
438 | 425 | ||
439 | spin_lock(&pd_chan->lock); | 426 | spin_lock(&pd_chan->lock); |
440 | cookie = pdc_assign_cookie(pd_chan, desc); | 427 | cookie = dma_cookie_assign(txd); |
441 | 428 | ||
442 | if (list_empty(&pd_chan->active_list)) { | 429 | if (list_empty(&pd_chan->active_list)) { |
443 | list_add_tail(&desc->desc_node, &pd_chan->active_list); | 430 | list_add_tail(&desc->desc_node, &pd_chan->active_list); |
@@ -544,7 +531,7 @@ static int pd_alloc_chan_resources(struct dma_chan *chan) | |||
544 | spin_lock_irq(&pd_chan->lock); | 531 | spin_lock_irq(&pd_chan->lock); |
545 | list_splice(&tmp_list, &pd_chan->free_list); | 532 | list_splice(&tmp_list, &pd_chan->free_list); |
546 | pd_chan->descs_allocated = i; | 533 | pd_chan->descs_allocated = i; |
547 | pd_chan->completed_cookie = chan->cookie = 1; | 534 | dma_cookie_init(chan); |
548 | spin_unlock_irq(&pd_chan->lock); | 535 | spin_unlock_irq(&pd_chan->lock); |
549 | 536 | ||
550 | pdc_enable_irq(chan, 1); | 537 | pdc_enable_irq(chan, 1); |
@@ -578,19 +565,12 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
578 | struct dma_tx_state *txstate) | 565 | struct dma_tx_state *txstate) |
579 | { | 566 | { |
580 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 567 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
581 | dma_cookie_t last_used; | 568 | enum dma_status ret; |
582 | dma_cookie_t last_completed; | ||
583 | int ret; | ||
584 | 569 | ||
585 | spin_lock_irq(&pd_chan->lock); | 570 | spin_lock_irq(&pd_chan->lock); |
586 | last_completed = pd_chan->completed_cookie; | 571 | ret = dma_cookie_status(chan, cookie, txstate); |
587 | last_used = chan->cookie; | ||
588 | spin_unlock_irq(&pd_chan->lock); | 572 | spin_unlock_irq(&pd_chan->lock); |
589 | 573 | ||
590 | ret = dma_async_is_complete(cookie, last_completed, last_used); | ||
591 | |||
592 | dma_set_tx_state(txstate, last_completed, last_used, 0); | ||
593 | |||
594 | return ret; | 574 | return ret; |
595 | } | 575 | } |
596 | 576 | ||
@@ -607,7 +587,8 @@ static void pd_issue_pending(struct dma_chan *chan) | |||
607 | 587 | ||
608 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | 588 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, |
609 | struct scatterlist *sgl, unsigned int sg_len, | 589 | struct scatterlist *sgl, unsigned int sg_len, |
610 | enum dma_transfer_direction direction, unsigned long flags) | 590 | enum dma_transfer_direction direction, unsigned long flags, |
591 | void *context) | ||
611 | { | 592 | { |
612 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 593 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
613 | struct pch_dma_slave *pd_slave = chan->private; | 594 | struct pch_dma_slave *pd_slave = chan->private; |
@@ -932,7 +913,7 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev, | |||
932 | struct pch_dma_chan *pd_chan = &pd->channels[i]; | 913 | struct pch_dma_chan *pd_chan = &pd->channels[i]; |
933 | 914 | ||
934 | pd_chan->chan.device = &pd->dma; | 915 | pd_chan->chan.device = &pd->dma; |
935 | pd_chan->chan.cookie = 1; | 916 | dma_cookie_init(&pd_chan->chan); |
936 | 917 | ||
937 | pd_chan->membase = ®s->desc[i]; | 918 | pd_chan->membase = ®s->desc[i]; |
938 | 919 | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 16b66c827f19..282caf118be8 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -1,4 +1,6 @@ | |||
1 | /* linux/drivers/dma/pl330.c | 1 | /* |
2 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | ||
3 | * http://www.samsung.com | ||
2 | * | 4 | * |
3 | * Copyright (C) 2010 Samsung Electronics Co. Ltd. | 5 | * Copyright (C) 2010 Samsung Electronics Co. Ltd. |
4 | * Jaswinder Singh <jassi.brar@samsung.com> | 6 | * Jaswinder Singh <jassi.brar@samsung.com> |
@@ -9,10 +11,15 @@ | |||
9 | * (at your option) any later version. | 11 | * (at your option) any later version. |
10 | */ | 12 | */ |
11 | 13 | ||
14 | #include <linux/kernel.h> | ||
12 | #include <linux/io.h> | 15 | #include <linux/io.h> |
13 | #include <linux/init.h> | 16 | #include <linux/init.h> |
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
15 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/string.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/dma-mapping.h> | ||
16 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
17 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
18 | #include <linux/amba/bus.h> | 25 | #include <linux/amba/bus.h> |
@@ -21,8 +28,497 @@ | |||
21 | #include <linux/scatterlist.h> | 28 | #include <linux/scatterlist.h> |
22 | #include <linux/of.h> | 29 | #include <linux/of.h> |
23 | 30 | ||
31 | #include "dmaengine.h" | ||
32 | #define PL330_MAX_CHAN 8 | ||
33 | #define PL330_MAX_IRQS 32 | ||
34 | #define PL330_MAX_PERI 32 | ||
35 | |||
36 | enum pl330_srccachectrl { | ||
37 | SCCTRL0, /* Noncacheable and nonbufferable */ | ||
38 | SCCTRL1, /* Bufferable only */ | ||
39 | SCCTRL2, /* Cacheable, but do not allocate */ | ||
40 | SCCTRL3, /* Cacheable and bufferable, but do not allocate */ | ||
41 | SINVALID1, | ||
42 | SINVALID2, | ||
43 | SCCTRL6, /* Cacheable write-through, allocate on reads only */ | ||
44 | SCCTRL7, /* Cacheable write-back, allocate on reads only */ | ||
45 | }; | ||
46 | |||
47 | enum pl330_dstcachectrl { | ||
48 | DCCTRL0, /* Noncacheable and nonbufferable */ | ||
49 | DCCTRL1, /* Bufferable only */ | ||
50 | DCCTRL2, /* Cacheable, but do not allocate */ | ||
51 | DCCTRL3, /* Cacheable and bufferable, but do not allocate */ | ||
52 | DINVALID1, /* AWCACHE = 0x1000 */ | ||
53 | DINVALID2, | ||
54 | DCCTRL6, /* Cacheable write-through, allocate on writes only */ | ||
55 | DCCTRL7, /* Cacheable write-back, allocate on writes only */ | ||
56 | }; | ||
57 | |||
58 | enum pl330_byteswap { | ||
59 | SWAP_NO, | ||
60 | SWAP_2, | ||
61 | SWAP_4, | ||
62 | SWAP_8, | ||
63 | SWAP_16, | ||
64 | }; | ||
65 | |||
66 | enum pl330_reqtype { | ||
67 | MEMTOMEM, | ||
68 | MEMTODEV, | ||
69 | DEVTOMEM, | ||
70 | DEVTODEV, | ||
71 | }; | ||
72 | |||
73 | /* Register and Bit field Definitions */ | ||
74 | #define DS 0x0 | ||
75 | #define DS_ST_STOP 0x0 | ||
76 | #define DS_ST_EXEC 0x1 | ||
77 | #define DS_ST_CMISS 0x2 | ||
78 | #define DS_ST_UPDTPC 0x3 | ||
79 | #define DS_ST_WFE 0x4 | ||
80 | #define DS_ST_ATBRR 0x5 | ||
81 | #define DS_ST_QBUSY 0x6 | ||
82 | #define DS_ST_WFP 0x7 | ||
83 | #define DS_ST_KILL 0x8 | ||
84 | #define DS_ST_CMPLT 0x9 | ||
85 | #define DS_ST_FLTCMP 0xe | ||
86 | #define DS_ST_FAULT 0xf | ||
87 | |||
88 | #define DPC 0x4 | ||
89 | #define INTEN 0x20 | ||
90 | #define ES 0x24 | ||
91 | #define INTSTATUS 0x28 | ||
92 | #define INTCLR 0x2c | ||
93 | #define FSM 0x30 | ||
94 | #define FSC 0x34 | ||
95 | #define FTM 0x38 | ||
96 | |||
97 | #define _FTC 0x40 | ||
98 | #define FTC(n) (_FTC + (n)*0x4) | ||
99 | |||
100 | #define _CS 0x100 | ||
101 | #define CS(n) (_CS + (n)*0x8) | ||
102 | #define CS_CNS (1 << 21) | ||
103 | |||
104 | #define _CPC 0x104 | ||
105 | #define CPC(n) (_CPC + (n)*0x8) | ||
106 | |||
107 | #define _SA 0x400 | ||
108 | #define SA(n) (_SA + (n)*0x20) | ||
109 | |||
110 | #define _DA 0x404 | ||
111 | #define DA(n) (_DA + (n)*0x20) | ||
112 | |||
113 | #define _CC 0x408 | ||
114 | #define CC(n) (_CC + (n)*0x20) | ||
115 | |||
116 | #define CC_SRCINC (1 << 0) | ||
117 | #define CC_DSTINC (1 << 14) | ||
118 | #define CC_SRCPRI (1 << 8) | ||
119 | #define CC_DSTPRI (1 << 22) | ||
120 | #define CC_SRCNS (1 << 9) | ||
121 | #define CC_DSTNS (1 << 23) | ||
122 | #define CC_SRCIA (1 << 10) | ||
123 | #define CC_DSTIA (1 << 24) | ||
124 | #define CC_SRCBRSTLEN_SHFT 4 | ||
125 | #define CC_DSTBRSTLEN_SHFT 18 | ||
126 | #define CC_SRCBRSTSIZE_SHFT 1 | ||
127 | #define CC_DSTBRSTSIZE_SHFT 15 | ||
128 | #define CC_SRCCCTRL_SHFT 11 | ||
129 | #define CC_SRCCCTRL_MASK 0x7 | ||
130 | #define CC_DSTCCTRL_SHFT 25 | ||
131 | #define CC_DRCCCTRL_MASK 0x7 | ||
132 | #define CC_SWAP_SHFT 28 | ||
133 | |||
134 | #define _LC0 0x40c | ||
135 | #define LC0(n) (_LC0 + (n)*0x20) | ||
136 | |||
137 | #define _LC1 0x410 | ||
138 | #define LC1(n) (_LC1 + (n)*0x20) | ||
139 | |||
140 | #define DBGSTATUS 0xd00 | ||
141 | #define DBG_BUSY (1 << 0) | ||
142 | |||
143 | #define DBGCMD 0xd04 | ||
144 | #define DBGINST0 0xd08 | ||
145 | #define DBGINST1 0xd0c | ||
146 | |||
147 | #define CR0 0xe00 | ||
148 | #define CR1 0xe04 | ||
149 | #define CR2 0xe08 | ||
150 | #define CR3 0xe0c | ||
151 | #define CR4 0xe10 | ||
152 | #define CRD 0xe14 | ||
153 | |||
154 | #define PERIPH_ID 0xfe0 | ||
155 | #define PERIPH_REV_SHIFT 20 | ||
156 | #define PERIPH_REV_MASK 0xf | ||
157 | #define PERIPH_REV_R0P0 0 | ||
158 | #define PERIPH_REV_R1P0 1 | ||
159 | #define PERIPH_REV_R1P1 2 | ||
160 | #define PCELL_ID 0xff0 | ||
161 | |||
162 | #define CR0_PERIPH_REQ_SET (1 << 0) | ||
163 | #define CR0_BOOT_EN_SET (1 << 1) | ||
164 | #define CR0_BOOT_MAN_NS (1 << 2) | ||
165 | #define CR0_NUM_CHANS_SHIFT 4 | ||
166 | #define CR0_NUM_CHANS_MASK 0x7 | ||
167 | #define CR0_NUM_PERIPH_SHIFT 12 | ||
168 | #define CR0_NUM_PERIPH_MASK 0x1f | ||
169 | #define CR0_NUM_EVENTS_SHIFT 17 | ||
170 | #define CR0_NUM_EVENTS_MASK 0x1f | ||
171 | |||
172 | #define CR1_ICACHE_LEN_SHIFT 0 | ||
173 | #define CR1_ICACHE_LEN_MASK 0x7 | ||
174 | #define CR1_NUM_ICACHELINES_SHIFT 4 | ||
175 | #define CR1_NUM_ICACHELINES_MASK 0xf | ||
176 | |||
177 | #define CRD_DATA_WIDTH_SHIFT 0 | ||
178 | #define CRD_DATA_WIDTH_MASK 0x7 | ||
179 | #define CRD_WR_CAP_SHIFT 4 | ||
180 | #define CRD_WR_CAP_MASK 0x7 | ||
181 | #define CRD_WR_Q_DEP_SHIFT 8 | ||
182 | #define CRD_WR_Q_DEP_MASK 0xf | ||
183 | #define CRD_RD_CAP_SHIFT 12 | ||
184 | #define CRD_RD_CAP_MASK 0x7 | ||
185 | #define CRD_RD_Q_DEP_SHIFT 16 | ||
186 | #define CRD_RD_Q_DEP_MASK 0xf | ||
187 | #define CRD_DATA_BUFF_SHIFT 20 | ||
188 | #define CRD_DATA_BUFF_MASK 0x3ff | ||
189 | |||
190 | #define PART 0x330 | ||
191 | #define DESIGNER 0x41 | ||
192 | #define REVISION 0x0 | ||
193 | #define INTEG_CFG 0x0 | ||
194 | #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12)) | ||
195 | |||
196 | #define PCELL_ID_VAL 0xb105f00d | ||
197 | |||
198 | #define PL330_STATE_STOPPED (1 << 0) | ||
199 | #define PL330_STATE_EXECUTING (1 << 1) | ||
200 | #define PL330_STATE_WFE (1 << 2) | ||
201 | #define PL330_STATE_FAULTING (1 << 3) | ||
202 | #define PL330_STATE_COMPLETING (1 << 4) | ||
203 | #define PL330_STATE_WFP (1 << 5) | ||
204 | #define PL330_STATE_KILLING (1 << 6) | ||
205 | #define PL330_STATE_FAULT_COMPLETING (1 << 7) | ||
206 | #define PL330_STATE_CACHEMISS (1 << 8) | ||
207 | #define PL330_STATE_UPDTPC (1 << 9) | ||
208 | #define PL330_STATE_ATBARRIER (1 << 10) | ||
209 | #define PL330_STATE_QUEUEBUSY (1 << 11) | ||
210 | #define PL330_STATE_INVALID (1 << 15) | ||
211 | |||
212 | #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \ | ||
213 | | PL330_STATE_WFE | PL330_STATE_FAULTING) | ||
214 | |||
215 | #define CMD_DMAADDH 0x54 | ||
216 | #define CMD_DMAEND 0x00 | ||
217 | #define CMD_DMAFLUSHP 0x35 | ||
218 | #define CMD_DMAGO 0xa0 | ||
219 | #define CMD_DMALD 0x04 | ||
220 | #define CMD_DMALDP 0x25 | ||
221 | #define CMD_DMALP 0x20 | ||
222 | #define CMD_DMALPEND 0x28 | ||
223 | #define CMD_DMAKILL 0x01 | ||
224 | #define CMD_DMAMOV 0xbc | ||
225 | #define CMD_DMANOP 0x18 | ||
226 | #define CMD_DMARMB 0x12 | ||
227 | #define CMD_DMASEV 0x34 | ||
228 | #define CMD_DMAST 0x08 | ||
229 | #define CMD_DMASTP 0x29 | ||
230 | #define CMD_DMASTZ 0x0c | ||
231 | #define CMD_DMAWFE 0x36 | ||
232 | #define CMD_DMAWFP 0x30 | ||
233 | #define CMD_DMAWMB 0x13 | ||
234 | |||
235 | #define SZ_DMAADDH 3 | ||
236 | #define SZ_DMAEND 1 | ||
237 | #define SZ_DMAFLUSHP 2 | ||
238 | #define SZ_DMALD 1 | ||
239 | #define SZ_DMALDP 2 | ||
240 | #define SZ_DMALP 2 | ||
241 | #define SZ_DMALPEND 2 | ||
242 | #define SZ_DMAKILL 1 | ||
243 | #define SZ_DMAMOV 6 | ||
244 | #define SZ_DMANOP 1 | ||
245 | #define SZ_DMARMB 1 | ||
246 | #define SZ_DMASEV 2 | ||
247 | #define SZ_DMAST 1 | ||
248 | #define SZ_DMASTP 2 | ||
249 | #define SZ_DMASTZ 1 | ||
250 | #define SZ_DMAWFE 2 | ||
251 | #define SZ_DMAWFP 2 | ||
252 | #define SZ_DMAWMB 1 | ||
253 | #define SZ_DMAGO 6 | ||
254 | |||
255 | #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1) | ||
256 | #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7)) | ||
257 | |||
258 | #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr)) | ||
259 | #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr)) | ||
260 | |||
261 | /* | ||
262 | * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req | ||
263 | * at 1byte/burst for P<->M and M<->M respectively. | ||
264 | * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req | ||
265 | * should be enough for P<->M and M<->M respectively. | ||
266 | */ | ||
267 | #define MCODE_BUFF_PER_REQ 256 | ||
268 | |||
269 | /* If the _pl330_req is available to the client */ | ||
270 | #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND) | ||
271 | |||
272 | /* Use this _only_ to wait on transient states */ | ||
273 | #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax(); | ||
274 | |||
275 | #ifdef PL330_DEBUG_MCGEN | ||
276 | static unsigned cmd_line; | ||
277 | #define PL330_DBGCMD_DUMP(off, x...) do { \ | ||
278 | printk("%x:", cmd_line); \ | ||
279 | printk(x); \ | ||
280 | cmd_line += off; \ | ||
281 | } while (0) | ||
282 | #define PL330_DBGMC_START(addr) (cmd_line = addr) | ||
283 | #else | ||
284 | #define PL330_DBGCMD_DUMP(off, x...) do {} while (0) | ||
285 | #define PL330_DBGMC_START(addr) do {} while (0) | ||
286 | #endif | ||
287 | |||
288 | /* The number of default descriptors */ | ||
289 | |||
24 | #define NR_DEFAULT_DESC 16 | 290 | #define NR_DEFAULT_DESC 16 |
25 | 291 | ||
292 | /* Populated by the PL330 core driver for DMA API driver's info */ | ||
293 | struct pl330_config { | ||
294 | u32 periph_id; | ||
295 | u32 pcell_id; | ||
296 | #define DMAC_MODE_NS (1 << 0) | ||
297 | unsigned int mode; | ||
298 | unsigned int data_bus_width:10; /* In number of bits */ | ||
299 | unsigned int data_buf_dep:10; | ||
300 | unsigned int num_chan:4; | ||
301 | unsigned int num_peri:6; | ||
302 | u32 peri_ns; | ||
303 | unsigned int num_events:6; | ||
304 | u32 irq_ns; | ||
305 | }; | ||
306 | |||
307 | /* Handle to the DMAC provided to the PL330 core */ | ||
308 | struct pl330_info { | ||
309 | /* Owning device */ | ||
310 | struct device *dev; | ||
311 | /* Size of MicroCode buffers for each channel. */ | ||
312 | unsigned mcbufsz; | ||
313 | /* ioremap'ed address of PL330 registers. */ | ||
314 | void __iomem *base; | ||
315 | /* Client can freely use it. */ | ||
316 | void *client_data; | ||
317 | /* PL330 core data, Client must not touch it. */ | ||
318 | void *pl330_data; | ||
319 | /* Populated by the PL330 core driver during pl330_add */ | ||
320 | struct pl330_config pcfg; | ||
321 | /* | ||
322 | * If the DMAC has some reset mechanism, then the | ||
323 | * client may want to provide pointer to the method. | ||
324 | */ | ||
325 | void (*dmac_reset)(struct pl330_info *pi); | ||
326 | }; | ||
327 | |||
328 | /** | ||
329 | * Request Configuration. | ||
330 | * The PL330 core does not modify this and uses the last | ||
331 | * working configuration if the request doesn't provide any. | ||
332 | * | ||
333 | * The Client may want to provide this info only for the | ||
334 | * first request and a request with new settings. | ||
335 | */ | ||
336 | struct pl330_reqcfg { | ||
337 | /* Address Incrementing */ | ||
338 | unsigned dst_inc:1; | ||
339 | unsigned src_inc:1; | ||
340 | |||
341 | /* | ||
342 | * For now, the SRC & DST protection levels | ||
343 | * and burst size/length are assumed same. | ||
344 | */ | ||
345 | bool nonsecure; | ||
346 | bool privileged; | ||
347 | bool insnaccess; | ||
348 | unsigned brst_len:5; | ||
349 | unsigned brst_size:3; /* in power of 2 */ | ||
350 | |||
351 | enum pl330_dstcachectrl dcctl; | ||
352 | enum pl330_srccachectrl scctl; | ||
353 | enum pl330_byteswap swap; | ||
354 | struct pl330_config *pcfg; | ||
355 | }; | ||
356 | |||
357 | /* | ||
358 | * One cycle of DMAC operation. | ||
359 | * There may be more than one xfer in a request. | ||
360 | */ | ||
361 | struct pl330_xfer { | ||
362 | u32 src_addr; | ||
363 | u32 dst_addr; | ||
364 | /* Size to xfer */ | ||
365 | u32 bytes; | ||
366 | /* | ||
367 | * Pointer to next xfer in the list. | ||
368 | * The last xfer in the req must point to NULL. | ||
369 | */ | ||
370 | struct pl330_xfer *next; | ||
371 | }; | ||
372 | |||
373 | /* The xfer callbacks are made with one of these arguments. */ | ||
374 | enum pl330_op_err { | ||
375 | /* The all xfers in the request were success. */ | ||
376 | PL330_ERR_NONE, | ||
377 | /* If req aborted due to global error. */ | ||
378 | PL330_ERR_ABORT, | ||
379 | /* If req failed due to problem with Channel. */ | ||
380 | PL330_ERR_FAIL, | ||
381 | }; | ||
382 | |||
383 | /* A request defining Scatter-Gather List ending with NULL xfer. */ | ||
384 | struct pl330_req { | ||
385 | enum pl330_reqtype rqtype; | ||
386 | /* Index of peripheral for the xfer. */ | ||
387 | unsigned peri:5; | ||
388 | /* Unique token for this xfer, set by the client. */ | ||
389 | void *token; | ||
390 | /* Callback to be called after xfer. */ | ||
391 | void (*xfer_cb)(void *token, enum pl330_op_err err); | ||
392 | /* If NULL, req will be done at last set parameters. */ | ||
393 | struct pl330_reqcfg *cfg; | ||
394 | /* Pointer to first xfer in the request. */ | ||
395 | struct pl330_xfer *x; | ||
396 | }; | ||
397 | |||
398 | /* | ||
399 | * To know the status of the channel and DMAC, the client | ||
400 | * provides a pointer to this structure. The PL330 core | ||
401 | * fills it with current information. | ||
402 | */ | ||
403 | struct pl330_chanstatus { | ||
404 | /* | ||
405 | * If the DMAC engine halted due to some error, | ||
406 | * the client should remove-add DMAC. | ||
407 | */ | ||
408 | bool dmac_halted; | ||
409 | /* | ||
410 | * If channel is halted due to some error, | ||
411 | * the client should ABORT/FLUSH and START the channel. | ||
412 | */ | ||
413 | bool faulting; | ||
414 | /* Location of last load */ | ||
415 | u32 src_addr; | ||
416 | /* Location of last store */ | ||
417 | u32 dst_addr; | ||
418 | /* | ||
419 | * Pointer to the currently active req, NULL if channel is | ||
420 | * inactive, even though the requests may be present. | ||
421 | */ | ||
422 | struct pl330_req *top_req; | ||
423 | /* Pointer to req waiting second in the queue if any. */ | ||
424 | struct pl330_req *wait_req; | ||
425 | }; | ||
426 | |||
427 | enum pl330_chan_op { | ||
428 | /* Start the channel */ | ||
429 | PL330_OP_START, | ||
430 | /* Abort the active xfer */ | ||
431 | PL330_OP_ABORT, | ||
432 | /* Stop xfer and flush queue */ | ||
433 | PL330_OP_FLUSH, | ||
434 | }; | ||
435 | |||
436 | struct _xfer_spec { | ||
437 | u32 ccr; | ||
438 | struct pl330_req *r; | ||
439 | struct pl330_xfer *x; | ||
440 | }; | ||
441 | |||
442 | enum dmamov_dst { | ||
443 | SAR = 0, | ||
444 | CCR, | ||
445 | DAR, | ||
446 | }; | ||
447 | |||
448 | enum pl330_dst { | ||
449 | SRC = 0, | ||
450 | DST, | ||
451 | }; | ||
452 | |||
453 | enum pl330_cond { | ||
454 | SINGLE, | ||
455 | BURST, | ||
456 | ALWAYS, | ||
457 | }; | ||
458 | |||
459 | struct _pl330_req { | ||
460 | u32 mc_bus; | ||
461 | void *mc_cpu; | ||
462 | /* Number of bytes taken to setup MC for the req */ | ||
463 | u32 mc_len; | ||
464 | struct pl330_req *r; | ||
465 | /* Hook to attach to DMAC's list of reqs with due callback */ | ||
466 | struct list_head rqd; | ||
467 | }; | ||
468 | |||
469 | /* ToBeDone for tasklet */ | ||
470 | struct _pl330_tbd { | ||
471 | bool reset_dmac; | ||
472 | bool reset_mngr; | ||
473 | u8 reset_chan; | ||
474 | }; | ||
475 | |||
476 | /* A DMAC Thread */ | ||
477 | struct pl330_thread { | ||
478 | u8 id; | ||
479 | int ev; | ||
480 | /* If the channel is not yet acquired by any client */ | ||
481 | bool free; | ||
482 | /* Parent DMAC */ | ||
483 | struct pl330_dmac *dmac; | ||
484 | /* Only two at a time */ | ||
485 | struct _pl330_req req[2]; | ||
486 | /* Index of the last enqueued request */ | ||
487 | unsigned lstenq; | ||
488 | /* Index of the last submitted request or -1 if the DMA is stopped */ | ||
489 | int req_running; | ||
490 | }; | ||
491 | |||
492 | enum pl330_dmac_state { | ||
493 | UNINIT, | ||
494 | INIT, | ||
495 | DYING, | ||
496 | }; | ||
497 | |||
498 | /* A DMAC */ | ||
499 | struct pl330_dmac { | ||
500 | spinlock_t lock; | ||
501 | /* Holds list of reqs with due callbacks */ | ||
502 | struct list_head req_done; | ||
503 | /* Pointer to platform specific stuff */ | ||
504 | struct pl330_info *pinfo; | ||
505 | /* Maximum possible events/irqs */ | ||
506 | int events[32]; | ||
507 | /* BUS address of MicroCode buffer */ | ||
508 | u32 mcode_bus; | ||
509 | /* CPU address of MicroCode buffer */ | ||
510 | void *mcode_cpu; | ||
511 | /* List of all Channel threads */ | ||
512 | struct pl330_thread *channels; | ||
513 | /* Pointer to the MANAGER thread */ | ||
514 | struct pl330_thread *manager; | ||
515 | /* To handle bad news in interrupt */ | ||
516 | struct tasklet_struct tasks; | ||
517 | struct _pl330_tbd dmac_tbd; | ||
518 | /* State of DMAC operation */ | ||
519 | enum pl330_dmac_state state; | ||
520 | }; | ||
521 | |||
26 | enum desc_status { | 522 | enum desc_status { |
27 | /* In the DMAC pool */ | 523 | /* In the DMAC pool */ |
28 | FREE, | 524 | FREE, |
@@ -51,9 +547,6 @@ struct dma_pl330_chan { | |||
51 | /* DMA-Engine Channel */ | 547 | /* DMA-Engine Channel */ |
52 | struct dma_chan chan; | 548 | struct dma_chan chan; |
53 | 549 | ||
54 | /* Last completed cookie */ | ||
55 | dma_cookie_t completed; | ||
56 | |||
57 | /* List of to be xfered descriptors */ | 550 | /* List of to be xfered descriptors */ |
58 | struct list_head work_list; | 551 | struct list_head work_list; |
59 | 552 | ||
@@ -117,6 +610,1599 @@ struct dma_pl330_desc { | |||
117 | struct dma_pl330_chan *pchan; | 610 | struct dma_pl330_chan *pchan; |
118 | }; | 611 | }; |
119 | 612 | ||
613 | static inline void _callback(struct pl330_req *r, enum pl330_op_err err) | ||
614 | { | ||
615 | if (r && r->xfer_cb) | ||
616 | r->xfer_cb(r->token, err); | ||
617 | } | ||
618 | |||
619 | static inline bool _queue_empty(struct pl330_thread *thrd) | ||
620 | { | ||
621 | return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1])) | ||
622 | ? true : false; | ||
623 | } | ||
624 | |||
625 | static inline bool _queue_full(struct pl330_thread *thrd) | ||
626 | { | ||
627 | return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1])) | ||
628 | ? false : true; | ||
629 | } | ||
630 | |||
631 | static inline bool is_manager(struct pl330_thread *thrd) | ||
632 | { | ||
633 | struct pl330_dmac *pl330 = thrd->dmac; | ||
634 | |||
635 | /* MANAGER is indexed at the end */ | ||
636 | if (thrd->id == pl330->pinfo->pcfg.num_chan) | ||
637 | return true; | ||
638 | else | ||
639 | return false; | ||
640 | } | ||
641 | |||
642 | /* If manager of the thread is in Non-Secure mode */ | ||
643 | static inline bool _manager_ns(struct pl330_thread *thrd) | ||
644 | { | ||
645 | struct pl330_dmac *pl330 = thrd->dmac; | ||
646 | |||
647 | return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false; | ||
648 | } | ||
649 | |||
650 | static inline u32 get_id(struct pl330_info *pi, u32 off) | ||
651 | { | ||
652 | void __iomem *regs = pi->base; | ||
653 | u32 id = 0; | ||
654 | |||
655 | id |= (readb(regs + off + 0x0) << 0); | ||
656 | id |= (readb(regs + off + 0x4) << 8); | ||
657 | id |= (readb(regs + off + 0x8) << 16); | ||
658 | id |= (readb(regs + off + 0xc) << 24); | ||
659 | |||
660 | return id; | ||
661 | } | ||
662 | |||
663 | static inline u32 get_revision(u32 periph_id) | ||
664 | { | ||
665 | return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK; | ||
666 | } | ||
667 | |||
668 | static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[], | ||
669 | enum pl330_dst da, u16 val) | ||
670 | { | ||
671 | if (dry_run) | ||
672 | return SZ_DMAADDH; | ||
673 | |||
674 | buf[0] = CMD_DMAADDH; | ||
675 | buf[0] |= (da << 1); | ||
676 | *((u16 *)&buf[1]) = val; | ||
677 | |||
678 | PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n", | ||
679 | da == 1 ? "DA" : "SA", val); | ||
680 | |||
681 | return SZ_DMAADDH; | ||
682 | } | ||
683 | |||
684 | static inline u32 _emit_END(unsigned dry_run, u8 buf[]) | ||
685 | { | ||
686 | if (dry_run) | ||
687 | return SZ_DMAEND; | ||
688 | |||
689 | buf[0] = CMD_DMAEND; | ||
690 | |||
691 | PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n"); | ||
692 | |||
693 | return SZ_DMAEND; | ||
694 | } | ||
695 | |||
696 | static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri) | ||
697 | { | ||
698 | if (dry_run) | ||
699 | return SZ_DMAFLUSHP; | ||
700 | |||
701 | buf[0] = CMD_DMAFLUSHP; | ||
702 | |||
703 | peri &= 0x1f; | ||
704 | peri <<= 3; | ||
705 | buf[1] = peri; | ||
706 | |||
707 | PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3); | ||
708 | |||
709 | return SZ_DMAFLUSHP; | ||
710 | } | ||
711 | |||
712 | static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond) | ||
713 | { | ||
714 | if (dry_run) | ||
715 | return SZ_DMALD; | ||
716 | |||
717 | buf[0] = CMD_DMALD; | ||
718 | |||
719 | if (cond == SINGLE) | ||
720 | buf[0] |= (0 << 1) | (1 << 0); | ||
721 | else if (cond == BURST) | ||
722 | buf[0] |= (1 << 1) | (1 << 0); | ||
723 | |||
724 | PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n", | ||
725 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); | ||
726 | |||
727 | return SZ_DMALD; | ||
728 | } | ||
729 | |||
730 | static inline u32 _emit_LDP(unsigned dry_run, u8 buf[], | ||
731 | enum pl330_cond cond, u8 peri) | ||
732 | { | ||
733 | if (dry_run) | ||
734 | return SZ_DMALDP; | ||
735 | |||
736 | buf[0] = CMD_DMALDP; | ||
737 | |||
738 | if (cond == BURST) | ||
739 | buf[0] |= (1 << 1); | ||
740 | |||
741 | peri &= 0x1f; | ||
742 | peri <<= 3; | ||
743 | buf[1] = peri; | ||
744 | |||
745 | PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n", | ||
746 | cond == SINGLE ? 'S' : 'B', peri >> 3); | ||
747 | |||
748 | return SZ_DMALDP; | ||
749 | } | ||
750 | |||
751 | static inline u32 _emit_LP(unsigned dry_run, u8 buf[], | ||
752 | unsigned loop, u8 cnt) | ||
753 | { | ||
754 | if (dry_run) | ||
755 | return SZ_DMALP; | ||
756 | |||
757 | buf[0] = CMD_DMALP; | ||
758 | |||
759 | if (loop) | ||
760 | buf[0] |= (1 << 1); | ||
761 | |||
762 | cnt--; /* DMAC increments by 1 internally */ | ||
763 | buf[1] = cnt; | ||
764 | |||
765 | PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt); | ||
766 | |||
767 | return SZ_DMALP; | ||
768 | } | ||
769 | |||
770 | struct _arg_LPEND { | ||
771 | enum pl330_cond cond; | ||
772 | bool forever; | ||
773 | unsigned loop; | ||
774 | u8 bjump; | ||
775 | }; | ||
776 | |||
777 | static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[], | ||
778 | const struct _arg_LPEND *arg) | ||
779 | { | ||
780 | enum pl330_cond cond = arg->cond; | ||
781 | bool forever = arg->forever; | ||
782 | unsigned loop = arg->loop; | ||
783 | u8 bjump = arg->bjump; | ||
784 | |||
785 | if (dry_run) | ||
786 | return SZ_DMALPEND; | ||
787 | |||
788 | buf[0] = CMD_DMALPEND; | ||
789 | |||
790 | if (loop) | ||
791 | buf[0] |= (1 << 2); | ||
792 | |||
793 | if (!forever) | ||
794 | buf[0] |= (1 << 4); | ||
795 | |||
796 | if (cond == SINGLE) | ||
797 | buf[0] |= (0 << 1) | (1 << 0); | ||
798 | else if (cond == BURST) | ||
799 | buf[0] |= (1 << 1) | (1 << 0); | ||
800 | |||
801 | buf[1] = bjump; | ||
802 | |||
803 | PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n", | ||
804 | forever ? "FE" : "END", | ||
805 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'), | ||
806 | loop ? '1' : '0', | ||
807 | bjump); | ||
808 | |||
809 | return SZ_DMALPEND; | ||
810 | } | ||
811 | |||
812 | static inline u32 _emit_KILL(unsigned dry_run, u8 buf[]) | ||
813 | { | ||
814 | if (dry_run) | ||
815 | return SZ_DMAKILL; | ||
816 | |||
817 | buf[0] = CMD_DMAKILL; | ||
818 | |||
819 | return SZ_DMAKILL; | ||
820 | } | ||
821 | |||
822 | static inline u32 _emit_MOV(unsigned dry_run, u8 buf[], | ||
823 | enum dmamov_dst dst, u32 val) | ||
824 | { | ||
825 | if (dry_run) | ||
826 | return SZ_DMAMOV; | ||
827 | |||
828 | buf[0] = CMD_DMAMOV; | ||
829 | buf[1] = dst; | ||
830 | *((u32 *)&buf[2]) = val; | ||
831 | |||
832 | PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n", | ||
833 | dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val); | ||
834 | |||
835 | return SZ_DMAMOV; | ||
836 | } | ||
837 | |||
838 | static inline u32 _emit_NOP(unsigned dry_run, u8 buf[]) | ||
839 | { | ||
840 | if (dry_run) | ||
841 | return SZ_DMANOP; | ||
842 | |||
843 | buf[0] = CMD_DMANOP; | ||
844 | |||
845 | PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n"); | ||
846 | |||
847 | return SZ_DMANOP; | ||
848 | } | ||
849 | |||
850 | static inline u32 _emit_RMB(unsigned dry_run, u8 buf[]) | ||
851 | { | ||
852 | if (dry_run) | ||
853 | return SZ_DMARMB; | ||
854 | |||
855 | buf[0] = CMD_DMARMB; | ||
856 | |||
857 | PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n"); | ||
858 | |||
859 | return SZ_DMARMB; | ||
860 | } | ||
861 | |||
862 | static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev) | ||
863 | { | ||
864 | if (dry_run) | ||
865 | return SZ_DMASEV; | ||
866 | |||
867 | buf[0] = CMD_DMASEV; | ||
868 | |||
869 | ev &= 0x1f; | ||
870 | ev <<= 3; | ||
871 | buf[1] = ev; | ||
872 | |||
873 | PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3); | ||
874 | |||
875 | return SZ_DMASEV; | ||
876 | } | ||
877 | |||
878 | static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond) | ||
879 | { | ||
880 | if (dry_run) | ||
881 | return SZ_DMAST; | ||
882 | |||
883 | buf[0] = CMD_DMAST; | ||
884 | |||
885 | if (cond == SINGLE) | ||
886 | buf[0] |= (0 << 1) | (1 << 0); | ||
887 | else if (cond == BURST) | ||
888 | buf[0] |= (1 << 1) | (1 << 0); | ||
889 | |||
890 | PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n", | ||
891 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); | ||
892 | |||
893 | return SZ_DMAST; | ||
894 | } | ||
895 | |||
896 | static inline u32 _emit_STP(unsigned dry_run, u8 buf[], | ||
897 | enum pl330_cond cond, u8 peri) | ||
898 | { | ||
899 | if (dry_run) | ||
900 | return SZ_DMASTP; | ||
901 | |||
902 | buf[0] = CMD_DMASTP; | ||
903 | |||
904 | if (cond == BURST) | ||
905 | buf[0] |= (1 << 1); | ||
906 | |||
907 | peri &= 0x1f; | ||
908 | peri <<= 3; | ||
909 | buf[1] = peri; | ||
910 | |||
911 | PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n", | ||
912 | cond == SINGLE ? 'S' : 'B', peri >> 3); | ||
913 | |||
914 | return SZ_DMASTP; | ||
915 | } | ||
916 | |||
917 | static inline u32 _emit_STZ(unsigned dry_run, u8 buf[]) | ||
918 | { | ||
919 | if (dry_run) | ||
920 | return SZ_DMASTZ; | ||
921 | |||
922 | buf[0] = CMD_DMASTZ; | ||
923 | |||
924 | PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n"); | ||
925 | |||
926 | return SZ_DMASTZ; | ||
927 | } | ||
928 | |||
929 | static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev, | ||
930 | unsigned invalidate) | ||
931 | { | ||
932 | if (dry_run) | ||
933 | return SZ_DMAWFE; | ||
934 | |||
935 | buf[0] = CMD_DMAWFE; | ||
936 | |||
937 | ev &= 0x1f; | ||
938 | ev <<= 3; | ||
939 | buf[1] = ev; | ||
940 | |||
941 | if (invalidate) | ||
942 | buf[1] |= (1 << 1); | ||
943 | |||
944 | PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n", | ||
945 | ev >> 3, invalidate ? ", I" : ""); | ||
946 | |||
947 | return SZ_DMAWFE; | ||
948 | } | ||
949 | |||
950 | static inline u32 _emit_WFP(unsigned dry_run, u8 buf[], | ||
951 | enum pl330_cond cond, u8 peri) | ||
952 | { | ||
953 | if (dry_run) | ||
954 | return SZ_DMAWFP; | ||
955 | |||
956 | buf[0] = CMD_DMAWFP; | ||
957 | |||
958 | if (cond == SINGLE) | ||
959 | buf[0] |= (0 << 1) | (0 << 0); | ||
960 | else if (cond == BURST) | ||
961 | buf[0] |= (1 << 1) | (0 << 0); | ||
962 | else | ||
963 | buf[0] |= (0 << 1) | (1 << 0); | ||
964 | |||
965 | peri &= 0x1f; | ||
966 | peri <<= 3; | ||
967 | buf[1] = peri; | ||
968 | |||
969 | PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n", | ||
970 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3); | ||
971 | |||
972 | return SZ_DMAWFP; | ||
973 | } | ||
974 | |||
975 | static inline u32 _emit_WMB(unsigned dry_run, u8 buf[]) | ||
976 | { | ||
977 | if (dry_run) | ||
978 | return SZ_DMAWMB; | ||
979 | |||
980 | buf[0] = CMD_DMAWMB; | ||
981 | |||
982 | PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n"); | ||
983 | |||
984 | return SZ_DMAWMB; | ||
985 | } | ||
986 | |||
987 | struct _arg_GO { | ||
988 | u8 chan; | ||
989 | u32 addr; | ||
990 | unsigned ns; | ||
991 | }; | ||
992 | |||
993 | static inline u32 _emit_GO(unsigned dry_run, u8 buf[], | ||
994 | const struct _arg_GO *arg) | ||
995 | { | ||
996 | u8 chan = arg->chan; | ||
997 | u32 addr = arg->addr; | ||
998 | unsigned ns = arg->ns; | ||
999 | |||
1000 | if (dry_run) | ||
1001 | return SZ_DMAGO; | ||
1002 | |||
1003 | buf[0] = CMD_DMAGO; | ||
1004 | buf[0] |= (ns << 1); | ||
1005 | |||
1006 | buf[1] = chan & 0x7; | ||
1007 | |||
1008 | *((u32 *)&buf[2]) = addr; | ||
1009 | |||
1010 | return SZ_DMAGO; | ||
1011 | } | ||
1012 | |||
1013 | #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) | ||
1014 | |||
1015 | /* Returns Time-Out */ | ||
1016 | static bool _until_dmac_idle(struct pl330_thread *thrd) | ||
1017 | { | ||
1018 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1019 | unsigned long loops = msecs_to_loops(5); | ||
1020 | |||
1021 | do { | ||
1022 | /* Until Manager is Idle */ | ||
1023 | if (!(readl(regs + DBGSTATUS) & DBG_BUSY)) | ||
1024 | break; | ||
1025 | |||
1026 | cpu_relax(); | ||
1027 | } while (--loops); | ||
1028 | |||
1029 | if (!loops) | ||
1030 | return true; | ||
1031 | |||
1032 | return false; | ||
1033 | } | ||
1034 | |||
1035 | static inline void _execute_DBGINSN(struct pl330_thread *thrd, | ||
1036 | u8 insn[], bool as_manager) | ||
1037 | { | ||
1038 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1039 | u32 val; | ||
1040 | |||
1041 | val = (insn[0] << 16) | (insn[1] << 24); | ||
1042 | if (!as_manager) { | ||
1043 | val |= (1 << 0); | ||
1044 | val |= (thrd->id << 8); /* Channel Number */ | ||
1045 | } | ||
1046 | writel(val, regs + DBGINST0); | ||
1047 | |||
1048 | val = *((u32 *)&insn[2]); | ||
1049 | writel(val, regs + DBGINST1); | ||
1050 | |||
1051 | /* If timed out due to halted state-machine */ | ||
1052 | if (_until_dmac_idle(thrd)) { | ||
1053 | dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n"); | ||
1054 | return; | ||
1055 | } | ||
1056 | |||
1057 | /* Get going */ | ||
1058 | writel(0, regs + DBGCMD); | ||
1059 | } | ||
1060 | |||
1061 | /* | ||
1062 | * Mark a _pl330_req as free. | ||
1063 | * We do it by writing DMAEND as the first instruction | ||
1064 | * because no valid request is going to have DMAEND as | ||
1065 | * its first instruction to execute. | ||
1066 | */ | ||
1067 | static void mark_free(struct pl330_thread *thrd, int idx) | ||
1068 | { | ||
1069 | struct _pl330_req *req = &thrd->req[idx]; | ||
1070 | |||
1071 | _emit_END(0, req->mc_cpu); | ||
1072 | req->mc_len = 0; | ||
1073 | |||
1074 | thrd->req_running = -1; | ||
1075 | } | ||
1076 | |||
1077 | static inline u32 _state(struct pl330_thread *thrd) | ||
1078 | { | ||
1079 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1080 | u32 val; | ||
1081 | |||
1082 | if (is_manager(thrd)) | ||
1083 | val = readl(regs + DS) & 0xf; | ||
1084 | else | ||
1085 | val = readl(regs + CS(thrd->id)) & 0xf; | ||
1086 | |||
1087 | switch (val) { | ||
1088 | case DS_ST_STOP: | ||
1089 | return PL330_STATE_STOPPED; | ||
1090 | case DS_ST_EXEC: | ||
1091 | return PL330_STATE_EXECUTING; | ||
1092 | case DS_ST_CMISS: | ||
1093 | return PL330_STATE_CACHEMISS; | ||
1094 | case DS_ST_UPDTPC: | ||
1095 | return PL330_STATE_UPDTPC; | ||
1096 | case DS_ST_WFE: | ||
1097 | return PL330_STATE_WFE; | ||
1098 | case DS_ST_FAULT: | ||
1099 | return PL330_STATE_FAULTING; | ||
1100 | case DS_ST_ATBRR: | ||
1101 | if (is_manager(thrd)) | ||
1102 | return PL330_STATE_INVALID; | ||
1103 | else | ||
1104 | return PL330_STATE_ATBARRIER; | ||
1105 | case DS_ST_QBUSY: | ||
1106 | if (is_manager(thrd)) | ||
1107 | return PL330_STATE_INVALID; | ||
1108 | else | ||
1109 | return PL330_STATE_QUEUEBUSY; | ||
1110 | case DS_ST_WFP: | ||
1111 | if (is_manager(thrd)) | ||
1112 | return PL330_STATE_INVALID; | ||
1113 | else | ||
1114 | return PL330_STATE_WFP; | ||
1115 | case DS_ST_KILL: | ||
1116 | if (is_manager(thrd)) | ||
1117 | return PL330_STATE_INVALID; | ||
1118 | else | ||
1119 | return PL330_STATE_KILLING; | ||
1120 | case DS_ST_CMPLT: | ||
1121 | if (is_manager(thrd)) | ||
1122 | return PL330_STATE_INVALID; | ||
1123 | else | ||
1124 | return PL330_STATE_COMPLETING; | ||
1125 | case DS_ST_FLTCMP: | ||
1126 | if (is_manager(thrd)) | ||
1127 | return PL330_STATE_INVALID; | ||
1128 | else | ||
1129 | return PL330_STATE_FAULT_COMPLETING; | ||
1130 | default: | ||
1131 | return PL330_STATE_INVALID; | ||
1132 | } | ||
1133 | } | ||
1134 | |||
1135 | static void _stop(struct pl330_thread *thrd) | ||
1136 | { | ||
1137 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1138 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | ||
1139 | |||
1140 | if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) | ||
1141 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); | ||
1142 | |||
1143 | /* Return if nothing needs to be done */ | ||
1144 | if (_state(thrd) == PL330_STATE_COMPLETING | ||
1145 | || _state(thrd) == PL330_STATE_KILLING | ||
1146 | || _state(thrd) == PL330_STATE_STOPPED) | ||
1147 | return; | ||
1148 | |||
1149 | _emit_KILL(0, insn); | ||
1150 | |||
1151 | /* Stop generating interrupts for SEV */ | ||
1152 | writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN); | ||
1153 | |||
1154 | _execute_DBGINSN(thrd, insn, is_manager(thrd)); | ||
1155 | } | ||
1156 | |||
1157 | /* Start doing req 'idx' of thread 'thrd' */ | ||
1158 | static bool _trigger(struct pl330_thread *thrd) | ||
1159 | { | ||
1160 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1161 | struct _pl330_req *req; | ||
1162 | struct pl330_req *r; | ||
1163 | struct _arg_GO go; | ||
1164 | unsigned ns; | ||
1165 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | ||
1166 | int idx; | ||
1167 | |||
1168 | /* Return if already ACTIVE */ | ||
1169 | if (_state(thrd) != PL330_STATE_STOPPED) | ||
1170 | return true; | ||
1171 | |||
1172 | idx = 1 - thrd->lstenq; | ||
1173 | if (!IS_FREE(&thrd->req[idx])) | ||
1174 | req = &thrd->req[idx]; | ||
1175 | else { | ||
1176 | idx = thrd->lstenq; | ||
1177 | if (!IS_FREE(&thrd->req[idx])) | ||
1178 | req = &thrd->req[idx]; | ||
1179 | else | ||
1180 | req = NULL; | ||
1181 | } | ||
1182 | |||
1183 | /* Return if no request */ | ||
1184 | if (!req || !req->r) | ||
1185 | return true; | ||
1186 | |||
1187 | r = req->r; | ||
1188 | |||
1189 | if (r->cfg) | ||
1190 | ns = r->cfg->nonsecure ? 1 : 0; | ||
1191 | else if (readl(regs + CS(thrd->id)) & CS_CNS) | ||
1192 | ns = 1; | ||
1193 | else | ||
1194 | ns = 0; | ||
1195 | |||
1196 | /* See 'Abort Sources' point-4 at Page 2-25 */ | ||
1197 | if (_manager_ns(thrd) && !ns) | ||
1198 | dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n", | ||
1199 | __func__, __LINE__); | ||
1200 | |||
1201 | go.chan = thrd->id; | ||
1202 | go.addr = req->mc_bus; | ||
1203 | go.ns = ns; | ||
1204 | _emit_GO(0, insn, &go); | ||
1205 | |||
1206 | /* Set to generate interrupts for SEV */ | ||
1207 | writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN); | ||
1208 | |||
1209 | /* Only manager can execute GO */ | ||
1210 | _execute_DBGINSN(thrd, insn, true); | ||
1211 | |||
1212 | thrd->req_running = idx; | ||
1213 | |||
1214 | return true; | ||
1215 | } | ||
1216 | |||
1217 | static bool _start(struct pl330_thread *thrd) | ||
1218 | { | ||
1219 | switch (_state(thrd)) { | ||
1220 | case PL330_STATE_FAULT_COMPLETING: | ||
1221 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); | ||
1222 | |||
1223 | if (_state(thrd) == PL330_STATE_KILLING) | ||
1224 | UNTIL(thrd, PL330_STATE_STOPPED) | ||
1225 | |||
1226 | case PL330_STATE_FAULTING: | ||
1227 | _stop(thrd); | ||
1228 | |||
1229 | case PL330_STATE_KILLING: | ||
1230 | case PL330_STATE_COMPLETING: | ||
1231 | UNTIL(thrd, PL330_STATE_STOPPED) | ||
1232 | |||
1233 | case PL330_STATE_STOPPED: | ||
1234 | return _trigger(thrd); | ||
1235 | |||
1236 | case PL330_STATE_WFP: | ||
1237 | case PL330_STATE_QUEUEBUSY: | ||
1238 | case PL330_STATE_ATBARRIER: | ||
1239 | case PL330_STATE_UPDTPC: | ||
1240 | case PL330_STATE_CACHEMISS: | ||
1241 | case PL330_STATE_EXECUTING: | ||
1242 | return true; | ||
1243 | |||
1244 | case PL330_STATE_WFE: /* For RESUME, nothing yet */ | ||
1245 | default: | ||
1246 | return false; | ||
1247 | } | ||
1248 | } | ||
1249 | |||
1250 | static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], | ||
1251 | const struct _xfer_spec *pxs, int cyc) | ||
1252 | { | ||
1253 | int off = 0; | ||
1254 | struct pl330_config *pcfg = pxs->r->cfg->pcfg; | ||
1255 | |||
1256 | /* check lock-up free version */ | ||
1257 | if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) { | ||
1258 | while (cyc--) { | ||
1259 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | ||
1260 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | ||
1261 | } | ||
1262 | } else { | ||
1263 | while (cyc--) { | ||
1264 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | ||
1265 | off += _emit_RMB(dry_run, &buf[off]); | ||
1266 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | ||
1267 | off += _emit_WMB(dry_run, &buf[off]); | ||
1268 | } | ||
1269 | } | ||
1270 | |||
1271 | return off; | ||
1272 | } | ||
1273 | |||
1274 | static inline int _ldst_devtomem(unsigned dry_run, u8 buf[], | ||
1275 | const struct _xfer_spec *pxs, int cyc) | ||
1276 | { | ||
1277 | int off = 0; | ||
1278 | |||
1279 | while (cyc--) { | ||
1280 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
1281 | off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
1282 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | ||
1283 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); | ||
1284 | } | ||
1285 | |||
1286 | return off; | ||
1287 | } | ||
1288 | |||
1289 | static inline int _ldst_memtodev(unsigned dry_run, u8 buf[], | ||
1290 | const struct _xfer_spec *pxs, int cyc) | ||
1291 | { | ||
1292 | int off = 0; | ||
1293 | |||
1294 | while (cyc--) { | ||
1295 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
1296 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | ||
1297 | off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
1298 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); | ||
1299 | } | ||
1300 | |||
1301 | return off; | ||
1302 | } | ||
1303 | |||
1304 | static int _bursts(unsigned dry_run, u8 buf[], | ||
1305 | const struct _xfer_spec *pxs, int cyc) | ||
1306 | { | ||
1307 | int off = 0; | ||
1308 | |||
1309 | switch (pxs->r->rqtype) { | ||
1310 | case MEMTODEV: | ||
1311 | off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc); | ||
1312 | break; | ||
1313 | case DEVTOMEM: | ||
1314 | off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc); | ||
1315 | break; | ||
1316 | case MEMTOMEM: | ||
1317 | off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); | ||
1318 | break; | ||
1319 | default: | ||
1320 | off += 0x40000000; /* Scare off the Client */ | ||
1321 | break; | ||
1322 | } | ||
1323 | |||
1324 | return off; | ||
1325 | } | ||
1326 | |||
1327 | /* Returns bytes consumed and updates bursts */ | ||
1328 | static inline int _loop(unsigned dry_run, u8 buf[], | ||
1329 | unsigned long *bursts, const struct _xfer_spec *pxs) | ||
1330 | { | ||
1331 | int cyc, cycmax, szlp, szlpend, szbrst, off; | ||
1332 | unsigned lcnt0, lcnt1, ljmp0, ljmp1; | ||
1333 | struct _arg_LPEND lpend; | ||
1334 | |||
1335 | /* Max iterations possible in DMALP is 256 */ | ||
1336 | if (*bursts >= 256*256) { | ||
1337 | lcnt1 = 256; | ||
1338 | lcnt0 = 256; | ||
1339 | cyc = *bursts / lcnt1 / lcnt0; | ||
1340 | } else if (*bursts > 256) { | ||
1341 | lcnt1 = 256; | ||
1342 | lcnt0 = *bursts / lcnt1; | ||
1343 | cyc = 1; | ||
1344 | } else { | ||
1345 | lcnt1 = *bursts; | ||
1346 | lcnt0 = 0; | ||
1347 | cyc = 1; | ||
1348 | } | ||
1349 | |||
1350 | szlp = _emit_LP(1, buf, 0, 0); | ||
1351 | szbrst = _bursts(1, buf, pxs, 1); | ||
1352 | |||
1353 | lpend.cond = ALWAYS; | ||
1354 | lpend.forever = false; | ||
1355 | lpend.loop = 0; | ||
1356 | lpend.bjump = 0; | ||
1357 | szlpend = _emit_LPEND(1, buf, &lpend); | ||
1358 | |||
1359 | if (lcnt0) { | ||
1360 | szlp *= 2; | ||
1361 | szlpend *= 2; | ||
1362 | } | ||
1363 | |||
1364 | /* | ||
1365 | * Max bursts that we can unroll due to limit on the | ||
1366 | * size of backward jump that can be encoded in DMALPEND | ||
1367 | * which is 8-bits and hence 255 | ||
1368 | */ | ||
1369 | cycmax = (255 - (szlp + szlpend)) / szbrst; | ||
1370 | |||
1371 | cyc = (cycmax < cyc) ? cycmax : cyc; | ||
1372 | |||
1373 | off = 0; | ||
1374 | |||
1375 | if (lcnt0) { | ||
1376 | off += _emit_LP(dry_run, &buf[off], 0, lcnt0); | ||
1377 | ljmp0 = off; | ||
1378 | } | ||
1379 | |||
1380 | off += _emit_LP(dry_run, &buf[off], 1, lcnt1); | ||
1381 | ljmp1 = off; | ||
1382 | |||
1383 | off += _bursts(dry_run, &buf[off], pxs, cyc); | ||
1384 | |||
1385 | lpend.cond = ALWAYS; | ||
1386 | lpend.forever = false; | ||
1387 | lpend.loop = 1; | ||
1388 | lpend.bjump = off - ljmp1; | ||
1389 | off += _emit_LPEND(dry_run, &buf[off], &lpend); | ||
1390 | |||
1391 | if (lcnt0) { | ||
1392 | lpend.cond = ALWAYS; | ||
1393 | lpend.forever = false; | ||
1394 | lpend.loop = 0; | ||
1395 | lpend.bjump = off - ljmp0; | ||
1396 | off += _emit_LPEND(dry_run, &buf[off], &lpend); | ||
1397 | } | ||
1398 | |||
1399 | *bursts = lcnt1 * cyc; | ||
1400 | if (lcnt0) | ||
1401 | *bursts *= lcnt0; | ||
1402 | |||
1403 | return off; | ||
1404 | } | ||
1405 | |||
1406 | static inline int _setup_loops(unsigned dry_run, u8 buf[], | ||
1407 | const struct _xfer_spec *pxs) | ||
1408 | { | ||
1409 | struct pl330_xfer *x = pxs->x; | ||
1410 | u32 ccr = pxs->ccr; | ||
1411 | unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr); | ||
1412 | int off = 0; | ||
1413 | |||
1414 | while (bursts) { | ||
1415 | c = bursts; | ||
1416 | off += _loop(dry_run, &buf[off], &c, pxs); | ||
1417 | bursts -= c; | ||
1418 | } | ||
1419 | |||
1420 | return off; | ||
1421 | } | ||
1422 | |||
1423 | static inline int _setup_xfer(unsigned dry_run, u8 buf[], | ||
1424 | const struct _xfer_spec *pxs) | ||
1425 | { | ||
1426 | struct pl330_xfer *x = pxs->x; | ||
1427 | int off = 0; | ||
1428 | |||
1429 | /* DMAMOV SAR, x->src_addr */ | ||
1430 | off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr); | ||
1431 | /* DMAMOV DAR, x->dst_addr */ | ||
1432 | off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr); | ||
1433 | |||
1434 | /* Setup Loop(s) */ | ||
1435 | off += _setup_loops(dry_run, &buf[off], pxs); | ||
1436 | |||
1437 | return off; | ||
1438 | } | ||
1439 | |||
1440 | /* | ||
1441 | * A req is a sequence of one or more xfer units. | ||
1442 | * Returns the number of bytes taken to setup the MC for the req. | ||
1443 | */ | ||
1444 | static int _setup_req(unsigned dry_run, struct pl330_thread *thrd, | ||
1445 | unsigned index, struct _xfer_spec *pxs) | ||
1446 | { | ||
1447 | struct _pl330_req *req = &thrd->req[index]; | ||
1448 | struct pl330_xfer *x; | ||
1449 | u8 *buf = req->mc_cpu; | ||
1450 | int off = 0; | ||
1451 | |||
1452 | PL330_DBGMC_START(req->mc_bus); | ||
1453 | |||
1454 | /* DMAMOV CCR, ccr */ | ||
1455 | off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr); | ||
1456 | |||
1457 | x = pxs->r->x; | ||
1458 | do { | ||
1459 | /* Error if xfer length is not aligned at burst size */ | ||
1460 | if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) | ||
1461 | return -EINVAL; | ||
1462 | |||
1463 | pxs->x = x; | ||
1464 | off += _setup_xfer(dry_run, &buf[off], pxs); | ||
1465 | |||
1466 | x = x->next; | ||
1467 | } while (x); | ||
1468 | |||
1469 | /* DMASEV peripheral/event */ | ||
1470 | off += _emit_SEV(dry_run, &buf[off], thrd->ev); | ||
1471 | /* DMAEND */ | ||
1472 | off += _emit_END(dry_run, &buf[off]); | ||
1473 | |||
1474 | return off; | ||
1475 | } | ||
1476 | |||
1477 | static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) | ||
1478 | { | ||
1479 | u32 ccr = 0; | ||
1480 | |||
1481 | if (rqc->src_inc) | ||
1482 | ccr |= CC_SRCINC; | ||
1483 | |||
1484 | if (rqc->dst_inc) | ||
1485 | ccr |= CC_DSTINC; | ||
1486 | |||
1487 | /* We set same protection levels for Src and DST for now */ | ||
1488 | if (rqc->privileged) | ||
1489 | ccr |= CC_SRCPRI | CC_DSTPRI; | ||
1490 | if (rqc->nonsecure) | ||
1491 | ccr |= CC_SRCNS | CC_DSTNS; | ||
1492 | if (rqc->insnaccess) | ||
1493 | ccr |= CC_SRCIA | CC_DSTIA; | ||
1494 | |||
1495 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT); | ||
1496 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT); | ||
1497 | |||
1498 | ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT); | ||
1499 | ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT); | ||
1500 | |||
1501 | ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT); | ||
1502 | ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT); | ||
1503 | |||
1504 | ccr |= (rqc->swap << CC_SWAP_SHFT); | ||
1505 | |||
1506 | return ccr; | ||
1507 | } | ||
1508 | |||
1509 | static inline bool _is_valid(u32 ccr) | ||
1510 | { | ||
1511 | enum pl330_dstcachectrl dcctl; | ||
1512 | enum pl330_srccachectrl scctl; | ||
1513 | |||
1514 | dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK; | ||
1515 | scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK; | ||
1516 | |||
1517 | if (dcctl == DINVALID1 || dcctl == DINVALID2 | ||
1518 | || scctl == SINVALID1 || scctl == SINVALID2) | ||
1519 | return false; | ||
1520 | else | ||
1521 | return true; | ||
1522 | } | ||
1523 | |||
1524 | /* | ||
1525 | * Submit a list of xfers after which the client wants notification. | ||
1526 | * Client is not notified after each xfer unit, just once after all | ||
1527 | * xfer units are done or some error occurs. | ||
1528 | */ | ||
1529 | static int pl330_submit_req(void *ch_id, struct pl330_req *r) | ||
1530 | { | ||
1531 | struct pl330_thread *thrd = ch_id; | ||
1532 | struct pl330_dmac *pl330; | ||
1533 | struct pl330_info *pi; | ||
1534 | struct _xfer_spec xs; | ||
1535 | unsigned long flags; | ||
1536 | void __iomem *regs; | ||
1537 | unsigned idx; | ||
1538 | u32 ccr; | ||
1539 | int ret = 0; | ||
1540 | |||
1541 | /* No Req or Unacquired Channel or DMAC */ | ||
1542 | if (!r || !thrd || thrd->free) | ||
1543 | return -EINVAL; | ||
1544 | |||
1545 | pl330 = thrd->dmac; | ||
1546 | pi = pl330->pinfo; | ||
1547 | regs = pi->base; | ||
1548 | |||
1549 | if (pl330->state == DYING | ||
1550 | || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { | ||
1551 | dev_info(thrd->dmac->pinfo->dev, "%s:%d\n", | ||
1552 | __func__, __LINE__); | ||
1553 | return -EAGAIN; | ||
1554 | } | ||
1555 | |||
1556 | /* If request for non-existing peripheral */ | ||
1557 | if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) { | ||
1558 | dev_info(thrd->dmac->pinfo->dev, | ||
1559 | "%s:%d Invalid peripheral(%u)!\n", | ||
1560 | __func__, __LINE__, r->peri); | ||
1561 | return -EINVAL; | ||
1562 | } | ||
1563 | |||
1564 | spin_lock_irqsave(&pl330->lock, flags); | ||
1565 | |||
1566 | if (_queue_full(thrd)) { | ||
1567 | ret = -EAGAIN; | ||
1568 | goto xfer_exit; | ||
1569 | } | ||
1570 | |||
1571 | /* Prefer Secure Channel */ | ||
1572 | if (!_manager_ns(thrd)) | ||
1573 | r->cfg->nonsecure = 0; | ||
1574 | else | ||
1575 | r->cfg->nonsecure = 1; | ||
1576 | |||
1577 | /* Use last settings, if not provided */ | ||
1578 | if (r->cfg) | ||
1579 | ccr = _prepare_ccr(r->cfg); | ||
1580 | else | ||
1581 | ccr = readl(regs + CC(thrd->id)); | ||
1582 | |||
1583 | /* If this req doesn't have valid xfer settings */ | ||
1584 | if (!_is_valid(ccr)) { | ||
1585 | ret = -EINVAL; | ||
1586 | dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n", | ||
1587 | __func__, __LINE__, ccr); | ||
1588 | goto xfer_exit; | ||
1589 | } | ||
1590 | |||
1591 | idx = IS_FREE(&thrd->req[0]) ? 0 : 1; | ||
1592 | |||
1593 | xs.ccr = ccr; | ||
1594 | xs.r = r; | ||
1595 | |||
1596 | /* First dry run to check if req is acceptable */ | ||
1597 | ret = _setup_req(1, thrd, idx, &xs); | ||
1598 | if (ret < 0) | ||
1599 | goto xfer_exit; | ||
1600 | |||
1601 | if (ret > pi->mcbufsz / 2) { | ||
1602 | dev_info(thrd->dmac->pinfo->dev, | ||
1603 | "%s:%d Trying increasing mcbufsz\n", | ||
1604 | __func__, __LINE__); | ||
1605 | ret = -ENOMEM; | ||
1606 | goto xfer_exit; | ||
1607 | } | ||
1608 | |||
1609 | /* Hook the request */ | ||
1610 | thrd->lstenq = idx; | ||
1611 | thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs); | ||
1612 | thrd->req[idx].r = r; | ||
1613 | |||
1614 | ret = 0; | ||
1615 | |||
1616 | xfer_exit: | ||
1617 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1618 | |||
1619 | return ret; | ||
1620 | } | ||
1621 | |||
1622 | static void pl330_dotask(unsigned long data) | ||
1623 | { | ||
1624 | struct pl330_dmac *pl330 = (struct pl330_dmac *) data; | ||
1625 | struct pl330_info *pi = pl330->pinfo; | ||
1626 | unsigned long flags; | ||
1627 | int i; | ||
1628 | |||
1629 | spin_lock_irqsave(&pl330->lock, flags); | ||
1630 | |||
1631 | /* The DMAC itself gone nuts */ | ||
1632 | if (pl330->dmac_tbd.reset_dmac) { | ||
1633 | pl330->state = DYING; | ||
1634 | /* Reset the manager too */ | ||
1635 | pl330->dmac_tbd.reset_mngr = true; | ||
1636 | /* Clear the reset flag */ | ||
1637 | pl330->dmac_tbd.reset_dmac = false; | ||
1638 | } | ||
1639 | |||
1640 | if (pl330->dmac_tbd.reset_mngr) { | ||
1641 | _stop(pl330->manager); | ||
1642 | /* Reset all channels */ | ||
1643 | pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1; | ||
1644 | /* Clear the reset flag */ | ||
1645 | pl330->dmac_tbd.reset_mngr = false; | ||
1646 | } | ||
1647 | |||
1648 | for (i = 0; i < pi->pcfg.num_chan; i++) { | ||
1649 | |||
1650 | if (pl330->dmac_tbd.reset_chan & (1 << i)) { | ||
1651 | struct pl330_thread *thrd = &pl330->channels[i]; | ||
1652 | void __iomem *regs = pi->base; | ||
1653 | enum pl330_op_err err; | ||
1654 | |||
1655 | _stop(thrd); | ||
1656 | |||
1657 | if (readl(regs + FSC) & (1 << thrd->id)) | ||
1658 | err = PL330_ERR_FAIL; | ||
1659 | else | ||
1660 | err = PL330_ERR_ABORT; | ||
1661 | |||
1662 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1663 | |||
1664 | _callback(thrd->req[1 - thrd->lstenq].r, err); | ||
1665 | _callback(thrd->req[thrd->lstenq].r, err); | ||
1666 | |||
1667 | spin_lock_irqsave(&pl330->lock, flags); | ||
1668 | |||
1669 | thrd->req[0].r = NULL; | ||
1670 | thrd->req[1].r = NULL; | ||
1671 | mark_free(thrd, 0); | ||
1672 | mark_free(thrd, 1); | ||
1673 | |||
1674 | /* Clear the reset flag */ | ||
1675 | pl330->dmac_tbd.reset_chan &= ~(1 << i); | ||
1676 | } | ||
1677 | } | ||
1678 | |||
1679 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1680 | |||
1681 | return; | ||
1682 | } | ||
1683 | |||
1684 | /* Returns 1 if state was updated, 0 otherwise */ | ||
1685 | static int pl330_update(const struct pl330_info *pi) | ||
1686 | { | ||
1687 | struct _pl330_req *rqdone; | ||
1688 | struct pl330_dmac *pl330; | ||
1689 | unsigned long flags; | ||
1690 | void __iomem *regs; | ||
1691 | u32 val; | ||
1692 | int id, ev, ret = 0; | ||
1693 | |||
1694 | if (!pi || !pi->pl330_data) | ||
1695 | return 0; | ||
1696 | |||
1697 | regs = pi->base; | ||
1698 | pl330 = pi->pl330_data; | ||
1699 | |||
1700 | spin_lock_irqsave(&pl330->lock, flags); | ||
1701 | |||
1702 | val = readl(regs + FSM) & 0x1; | ||
1703 | if (val) | ||
1704 | pl330->dmac_tbd.reset_mngr = true; | ||
1705 | else | ||
1706 | pl330->dmac_tbd.reset_mngr = false; | ||
1707 | |||
1708 | val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1); | ||
1709 | pl330->dmac_tbd.reset_chan |= val; | ||
1710 | if (val) { | ||
1711 | int i = 0; | ||
1712 | while (i < pi->pcfg.num_chan) { | ||
1713 | if (val & (1 << i)) { | ||
1714 | dev_info(pi->dev, | ||
1715 | "Reset Channel-%d\t CS-%x FTC-%x\n", | ||
1716 | i, readl(regs + CS(i)), | ||
1717 | readl(regs + FTC(i))); | ||
1718 | _stop(&pl330->channels[i]); | ||
1719 | } | ||
1720 | i++; | ||
1721 | } | ||
1722 | } | ||
1723 | |||
1724 | /* Check which event happened i.e, thread notified */ | ||
1725 | val = readl(regs + ES); | ||
1726 | if (pi->pcfg.num_events < 32 | ||
1727 | && val & ~((1 << pi->pcfg.num_events) - 1)) { | ||
1728 | pl330->dmac_tbd.reset_dmac = true; | ||
1729 | dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__); | ||
1730 | ret = 1; | ||
1731 | goto updt_exit; | ||
1732 | } | ||
1733 | |||
1734 | for (ev = 0; ev < pi->pcfg.num_events; ev++) { | ||
1735 | if (val & (1 << ev)) { /* Event occurred */ | ||
1736 | struct pl330_thread *thrd; | ||
1737 | u32 inten = readl(regs + INTEN); | ||
1738 | int active; | ||
1739 | |||
1740 | /* Clear the event */ | ||
1741 | if (inten & (1 << ev)) | ||
1742 | writel(1 << ev, regs + INTCLR); | ||
1743 | |||
1744 | ret = 1; | ||
1745 | |||
1746 | id = pl330->events[ev]; | ||
1747 | |||
1748 | thrd = &pl330->channels[id]; | ||
1749 | |||
1750 | active = thrd->req_running; | ||
1751 | if (active == -1) /* Aborted */ | ||
1752 | continue; | ||
1753 | |||
1754 | rqdone = &thrd->req[active]; | ||
1755 | mark_free(thrd, active); | ||
1756 | |||
1757 | /* Get going again ASAP */ | ||
1758 | _start(thrd); | ||
1759 | |||
1760 | /* For now, just make a list of callbacks to be done */ | ||
1761 | list_add_tail(&rqdone->rqd, &pl330->req_done); | ||
1762 | } | ||
1763 | } | ||
1764 | |||
1765 | /* Now that we are in no hurry, do the callbacks */ | ||
1766 | while (!list_empty(&pl330->req_done)) { | ||
1767 | struct pl330_req *r; | ||
1768 | |||
1769 | rqdone = container_of(pl330->req_done.next, | ||
1770 | struct _pl330_req, rqd); | ||
1771 | |||
1772 | list_del_init(&rqdone->rqd); | ||
1773 | |||
1774 | /* Detach the req */ | ||
1775 | r = rqdone->r; | ||
1776 | rqdone->r = NULL; | ||
1777 | |||
1778 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1779 | _callback(r, PL330_ERR_NONE); | ||
1780 | spin_lock_irqsave(&pl330->lock, flags); | ||
1781 | } | ||
1782 | |||
1783 | updt_exit: | ||
1784 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1785 | |||
1786 | if (pl330->dmac_tbd.reset_dmac | ||
1787 | || pl330->dmac_tbd.reset_mngr | ||
1788 | || pl330->dmac_tbd.reset_chan) { | ||
1789 | ret = 1; | ||
1790 | tasklet_schedule(&pl330->tasks); | ||
1791 | } | ||
1792 | |||
1793 | return ret; | ||
1794 | } | ||
1795 | |||
1796 | static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) | ||
1797 | { | ||
1798 | struct pl330_thread *thrd = ch_id; | ||
1799 | struct pl330_dmac *pl330; | ||
1800 | unsigned long flags; | ||
1801 | int ret = 0, active; | ||
1802 | |||
1803 | if (!thrd || thrd->free || thrd->dmac->state == DYING) | ||
1804 | return -EINVAL; | ||
1805 | |||
1806 | pl330 = thrd->dmac; | ||
1807 | active = thrd->req_running; | ||
1808 | |||
1809 | spin_lock_irqsave(&pl330->lock, flags); | ||
1810 | |||
1811 | switch (op) { | ||
1812 | case PL330_OP_FLUSH: | ||
1813 | /* Make sure the channel is stopped */ | ||
1814 | _stop(thrd); | ||
1815 | |||
1816 | thrd->req[0].r = NULL; | ||
1817 | thrd->req[1].r = NULL; | ||
1818 | mark_free(thrd, 0); | ||
1819 | mark_free(thrd, 1); | ||
1820 | break; | ||
1821 | |||
1822 | case PL330_OP_ABORT: | ||
1823 | /* Make sure the channel is stopped */ | ||
1824 | _stop(thrd); | ||
1825 | |||
1826 | /* ABORT is only for the active req */ | ||
1827 | if (active == -1) | ||
1828 | break; | ||
1829 | |||
1830 | thrd->req[active].r = NULL; | ||
1831 | mark_free(thrd, active); | ||
1832 | |||
1833 | /* Start the next */ | ||
1834 | case PL330_OP_START: | ||
1835 | if ((active == -1) && !_start(thrd)) | ||
1836 | ret = -EIO; | ||
1837 | break; | ||
1838 | |||
1839 | default: | ||
1840 | ret = -EINVAL; | ||
1841 | } | ||
1842 | |||
1843 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1844 | return ret; | ||
1845 | } | ||
1846 | |||
1847 | /* Reserve an event */ | ||
1848 | static inline int _alloc_event(struct pl330_thread *thrd) | ||
1849 | { | ||
1850 | struct pl330_dmac *pl330 = thrd->dmac; | ||
1851 | struct pl330_info *pi = pl330->pinfo; | ||
1852 | int ev; | ||
1853 | |||
1854 | for (ev = 0; ev < pi->pcfg.num_events; ev++) | ||
1855 | if (pl330->events[ev] == -1) { | ||
1856 | pl330->events[ev] = thrd->id; | ||
1857 | return ev; | ||
1858 | } | ||
1859 | |||
1860 | return -1; | ||
1861 | } | ||
1862 | |||
1863 | static bool _chan_ns(const struct pl330_info *pi, int i) | ||
1864 | { | ||
1865 | return pi->pcfg.irq_ns & (1 << i); | ||
1866 | } | ||
1867 | |||
1868 | /* Upon success, returns IdentityToken for the | ||
1869 | * allocated channel, NULL otherwise. | ||
1870 | */ | ||
1871 | static void *pl330_request_channel(const struct pl330_info *pi) | ||
1872 | { | ||
1873 | struct pl330_thread *thrd = NULL; | ||
1874 | struct pl330_dmac *pl330; | ||
1875 | unsigned long flags; | ||
1876 | int chans, i; | ||
1877 | |||
1878 | if (!pi || !pi->pl330_data) | ||
1879 | return NULL; | ||
1880 | |||
1881 | pl330 = pi->pl330_data; | ||
1882 | |||
1883 | if (pl330->state == DYING) | ||
1884 | return NULL; | ||
1885 | |||
1886 | chans = pi->pcfg.num_chan; | ||
1887 | |||
1888 | spin_lock_irqsave(&pl330->lock, flags); | ||
1889 | |||
1890 | for (i = 0; i < chans; i++) { | ||
1891 | thrd = &pl330->channels[i]; | ||
1892 | if ((thrd->free) && (!_manager_ns(thrd) || | ||
1893 | _chan_ns(pi, i))) { | ||
1894 | thrd->ev = _alloc_event(thrd); | ||
1895 | if (thrd->ev >= 0) { | ||
1896 | thrd->free = false; | ||
1897 | thrd->lstenq = 1; | ||
1898 | thrd->req[0].r = NULL; | ||
1899 | mark_free(thrd, 0); | ||
1900 | thrd->req[1].r = NULL; | ||
1901 | mark_free(thrd, 1); | ||
1902 | break; | ||
1903 | } | ||
1904 | } | ||
1905 | thrd = NULL; | ||
1906 | } | ||
1907 | |||
1908 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1909 | |||
1910 | return thrd; | ||
1911 | } | ||
1912 | |||
1913 | /* Release an event */ | ||
1914 | static inline void _free_event(struct pl330_thread *thrd, int ev) | ||
1915 | { | ||
1916 | struct pl330_dmac *pl330 = thrd->dmac; | ||
1917 | struct pl330_info *pi = pl330->pinfo; | ||
1918 | |||
1919 | /* If the event is valid and was held by the thread */ | ||
1920 | if (ev >= 0 && ev < pi->pcfg.num_events | ||
1921 | && pl330->events[ev] == thrd->id) | ||
1922 | pl330->events[ev] = -1; | ||
1923 | } | ||
1924 | |||
1925 | static void pl330_release_channel(void *ch_id) | ||
1926 | { | ||
1927 | struct pl330_thread *thrd = ch_id; | ||
1928 | struct pl330_dmac *pl330; | ||
1929 | unsigned long flags; | ||
1930 | |||
1931 | if (!thrd || thrd->free) | ||
1932 | return; | ||
1933 | |||
1934 | _stop(thrd); | ||
1935 | |||
1936 | _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT); | ||
1937 | _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT); | ||
1938 | |||
1939 | pl330 = thrd->dmac; | ||
1940 | |||
1941 | spin_lock_irqsave(&pl330->lock, flags); | ||
1942 | _free_event(thrd, thrd->ev); | ||
1943 | thrd->free = true; | ||
1944 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1945 | } | ||
1946 | |||
1947 | /* Initialize the structure for PL330 configuration, that can be used | ||
1948 | * by the client driver the make best use of the DMAC | ||
1949 | */ | ||
1950 | static void read_dmac_config(struct pl330_info *pi) | ||
1951 | { | ||
1952 | void __iomem *regs = pi->base; | ||
1953 | u32 val; | ||
1954 | |||
1955 | val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT; | ||
1956 | val &= CRD_DATA_WIDTH_MASK; | ||
1957 | pi->pcfg.data_bus_width = 8 * (1 << val); | ||
1958 | |||
1959 | val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT; | ||
1960 | val &= CRD_DATA_BUFF_MASK; | ||
1961 | pi->pcfg.data_buf_dep = val + 1; | ||
1962 | |||
1963 | val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT; | ||
1964 | val &= CR0_NUM_CHANS_MASK; | ||
1965 | val += 1; | ||
1966 | pi->pcfg.num_chan = val; | ||
1967 | |||
1968 | val = readl(regs + CR0); | ||
1969 | if (val & CR0_PERIPH_REQ_SET) { | ||
1970 | val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK; | ||
1971 | val += 1; | ||
1972 | pi->pcfg.num_peri = val; | ||
1973 | pi->pcfg.peri_ns = readl(regs + CR4); | ||
1974 | } else { | ||
1975 | pi->pcfg.num_peri = 0; | ||
1976 | } | ||
1977 | |||
1978 | val = readl(regs + CR0); | ||
1979 | if (val & CR0_BOOT_MAN_NS) | ||
1980 | pi->pcfg.mode |= DMAC_MODE_NS; | ||
1981 | else | ||
1982 | pi->pcfg.mode &= ~DMAC_MODE_NS; | ||
1983 | |||
1984 | val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT; | ||
1985 | val &= CR0_NUM_EVENTS_MASK; | ||
1986 | val += 1; | ||
1987 | pi->pcfg.num_events = val; | ||
1988 | |||
1989 | pi->pcfg.irq_ns = readl(regs + CR3); | ||
1990 | |||
1991 | pi->pcfg.periph_id = get_id(pi, PERIPH_ID); | ||
1992 | pi->pcfg.pcell_id = get_id(pi, PCELL_ID); | ||
1993 | } | ||
1994 | |||
1995 | static inline void _reset_thread(struct pl330_thread *thrd) | ||
1996 | { | ||
1997 | struct pl330_dmac *pl330 = thrd->dmac; | ||
1998 | struct pl330_info *pi = pl330->pinfo; | ||
1999 | |||
2000 | thrd->req[0].mc_cpu = pl330->mcode_cpu | ||
2001 | + (thrd->id * pi->mcbufsz); | ||
2002 | thrd->req[0].mc_bus = pl330->mcode_bus | ||
2003 | + (thrd->id * pi->mcbufsz); | ||
2004 | thrd->req[0].r = NULL; | ||
2005 | mark_free(thrd, 0); | ||
2006 | |||
2007 | thrd->req[1].mc_cpu = thrd->req[0].mc_cpu | ||
2008 | + pi->mcbufsz / 2; | ||
2009 | thrd->req[1].mc_bus = thrd->req[0].mc_bus | ||
2010 | + pi->mcbufsz / 2; | ||
2011 | thrd->req[1].r = NULL; | ||
2012 | mark_free(thrd, 1); | ||
2013 | } | ||
2014 | |||
2015 | static int dmac_alloc_threads(struct pl330_dmac *pl330) | ||
2016 | { | ||
2017 | struct pl330_info *pi = pl330->pinfo; | ||
2018 | int chans = pi->pcfg.num_chan; | ||
2019 | struct pl330_thread *thrd; | ||
2020 | int i; | ||
2021 | |||
2022 | /* Allocate 1 Manager and 'chans' Channel threads */ | ||
2023 | pl330->channels = kzalloc((1 + chans) * sizeof(*thrd), | ||
2024 | GFP_KERNEL); | ||
2025 | if (!pl330->channels) | ||
2026 | return -ENOMEM; | ||
2027 | |||
2028 | /* Init Channel threads */ | ||
2029 | for (i = 0; i < chans; i++) { | ||
2030 | thrd = &pl330->channels[i]; | ||
2031 | thrd->id = i; | ||
2032 | thrd->dmac = pl330; | ||
2033 | _reset_thread(thrd); | ||
2034 | thrd->free = true; | ||
2035 | } | ||
2036 | |||
2037 | /* MANAGER is indexed at the end */ | ||
2038 | thrd = &pl330->channels[chans]; | ||
2039 | thrd->id = chans; | ||
2040 | thrd->dmac = pl330; | ||
2041 | thrd->free = false; | ||
2042 | pl330->manager = thrd; | ||
2043 | |||
2044 | return 0; | ||
2045 | } | ||
2046 | |||
2047 | static int dmac_alloc_resources(struct pl330_dmac *pl330) | ||
2048 | { | ||
2049 | struct pl330_info *pi = pl330->pinfo; | ||
2050 | int chans = pi->pcfg.num_chan; | ||
2051 | int ret; | ||
2052 | |||
2053 | /* | ||
2054 | * Alloc MicroCode buffer for 'chans' Channel threads. | ||
2055 | * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) | ||
2056 | */ | ||
2057 | pl330->mcode_cpu = dma_alloc_coherent(pi->dev, | ||
2058 | chans * pi->mcbufsz, | ||
2059 | &pl330->mcode_bus, GFP_KERNEL); | ||
2060 | if (!pl330->mcode_cpu) { | ||
2061 | dev_err(pi->dev, "%s:%d Can't allocate memory!\n", | ||
2062 | __func__, __LINE__); | ||
2063 | return -ENOMEM; | ||
2064 | } | ||
2065 | |||
2066 | ret = dmac_alloc_threads(pl330); | ||
2067 | if (ret) { | ||
2068 | dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n", | ||
2069 | __func__, __LINE__); | ||
2070 | dma_free_coherent(pi->dev, | ||
2071 | chans * pi->mcbufsz, | ||
2072 | pl330->mcode_cpu, pl330->mcode_bus); | ||
2073 | return ret; | ||
2074 | } | ||
2075 | |||
2076 | return 0; | ||
2077 | } | ||
2078 | |||
2079 | static int pl330_add(struct pl330_info *pi) | ||
2080 | { | ||
2081 | struct pl330_dmac *pl330; | ||
2082 | void __iomem *regs; | ||
2083 | int i, ret; | ||
2084 | |||
2085 | if (!pi || !pi->dev) | ||
2086 | return -EINVAL; | ||
2087 | |||
2088 | /* If already added */ | ||
2089 | if (pi->pl330_data) | ||
2090 | return -EINVAL; | ||
2091 | |||
2092 | /* | ||
2093 | * If the SoC can perform reset on the DMAC, then do it | ||
2094 | * before reading its configuration. | ||
2095 | */ | ||
2096 | if (pi->dmac_reset) | ||
2097 | pi->dmac_reset(pi); | ||
2098 | |||
2099 | regs = pi->base; | ||
2100 | |||
2101 | /* Check if we can handle this DMAC */ | ||
2102 | if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL | ||
2103 | || get_id(pi, PCELL_ID) != PCELL_ID_VAL) { | ||
2104 | dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n", | ||
2105 | get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID)); | ||
2106 | return -EINVAL; | ||
2107 | } | ||
2108 | |||
2109 | /* Read the configuration of the DMAC */ | ||
2110 | read_dmac_config(pi); | ||
2111 | |||
2112 | if (pi->pcfg.num_events == 0) { | ||
2113 | dev_err(pi->dev, "%s:%d Can't work without events!\n", | ||
2114 | __func__, __LINE__); | ||
2115 | return -EINVAL; | ||
2116 | } | ||
2117 | |||
2118 | pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL); | ||
2119 | if (!pl330) { | ||
2120 | dev_err(pi->dev, "%s:%d Can't allocate memory!\n", | ||
2121 | __func__, __LINE__); | ||
2122 | return -ENOMEM; | ||
2123 | } | ||
2124 | |||
2125 | /* Assign the info structure and private data */ | ||
2126 | pl330->pinfo = pi; | ||
2127 | pi->pl330_data = pl330; | ||
2128 | |||
2129 | spin_lock_init(&pl330->lock); | ||
2130 | |||
2131 | INIT_LIST_HEAD(&pl330->req_done); | ||
2132 | |||
2133 | /* Use default MC buffer size if not provided */ | ||
2134 | if (!pi->mcbufsz) | ||
2135 | pi->mcbufsz = MCODE_BUFF_PER_REQ * 2; | ||
2136 | |||
2137 | /* Mark all events as free */ | ||
2138 | for (i = 0; i < pi->pcfg.num_events; i++) | ||
2139 | pl330->events[i] = -1; | ||
2140 | |||
2141 | /* Allocate resources needed by the DMAC */ | ||
2142 | ret = dmac_alloc_resources(pl330); | ||
2143 | if (ret) { | ||
2144 | dev_err(pi->dev, "Unable to create channels for DMAC\n"); | ||
2145 | kfree(pl330); | ||
2146 | return ret; | ||
2147 | } | ||
2148 | |||
2149 | tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330); | ||
2150 | |||
2151 | pl330->state = INIT; | ||
2152 | |||
2153 | return 0; | ||
2154 | } | ||
2155 | |||
2156 | static int dmac_free_threads(struct pl330_dmac *pl330) | ||
2157 | { | ||
2158 | struct pl330_info *pi = pl330->pinfo; | ||
2159 | int chans = pi->pcfg.num_chan; | ||
2160 | struct pl330_thread *thrd; | ||
2161 | int i; | ||
2162 | |||
2163 | /* Release Channel threads */ | ||
2164 | for (i = 0; i < chans; i++) { | ||
2165 | thrd = &pl330->channels[i]; | ||
2166 | pl330_release_channel((void *)thrd); | ||
2167 | } | ||
2168 | |||
2169 | /* Free memory */ | ||
2170 | kfree(pl330->channels); | ||
2171 | |||
2172 | return 0; | ||
2173 | } | ||
2174 | |||
2175 | static void dmac_free_resources(struct pl330_dmac *pl330) | ||
2176 | { | ||
2177 | struct pl330_info *pi = pl330->pinfo; | ||
2178 | int chans = pi->pcfg.num_chan; | ||
2179 | |||
2180 | dmac_free_threads(pl330); | ||
2181 | |||
2182 | dma_free_coherent(pi->dev, chans * pi->mcbufsz, | ||
2183 | pl330->mcode_cpu, pl330->mcode_bus); | ||
2184 | } | ||
2185 | |||
2186 | static void pl330_del(struct pl330_info *pi) | ||
2187 | { | ||
2188 | struct pl330_dmac *pl330; | ||
2189 | |||
2190 | if (!pi || !pi->pl330_data) | ||
2191 | return; | ||
2192 | |||
2193 | pl330 = pi->pl330_data; | ||
2194 | |||
2195 | pl330->state = UNINIT; | ||
2196 | |||
2197 | tasklet_kill(&pl330->tasks); | ||
2198 | |||
2199 | /* Free DMAC resources */ | ||
2200 | dmac_free_resources(pl330); | ||
2201 | |||
2202 | kfree(pl330); | ||
2203 | pi->pl330_data = NULL; | ||
2204 | } | ||
2205 | |||
120 | /* forward declaration */ | 2206 | /* forward declaration */ |
121 | static struct amba_driver pl330_driver; | 2207 | static struct amba_driver pl330_driver; |
122 | 2208 | ||
@@ -234,7 +2320,7 @@ static void pl330_tasklet(unsigned long data) | |||
234 | /* Pick up ripe tomatoes */ | 2320 | /* Pick up ripe tomatoes */ |
235 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) | 2321 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) |
236 | if (desc->status == DONE) { | 2322 | if (desc->status == DONE) { |
237 | pch->completed = desc->txd.cookie; | 2323 | dma_cookie_complete(&desc->txd); |
238 | list_move_tail(&desc->node, &list); | 2324 | list_move_tail(&desc->node, &list); |
239 | } | 2325 | } |
240 | 2326 | ||
@@ -305,7 +2391,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) | |||
305 | 2391 | ||
306 | spin_lock_irqsave(&pch->lock, flags); | 2392 | spin_lock_irqsave(&pch->lock, flags); |
307 | 2393 | ||
308 | pch->completed = chan->cookie = 1; | 2394 | dma_cookie_init(chan); |
309 | pch->cyclic = false; | 2395 | pch->cyclic = false; |
310 | 2396 | ||
311 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); | 2397 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); |
@@ -340,7 +2426,6 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned | |||
340 | /* Mark all desc done */ | 2426 | /* Mark all desc done */ |
341 | list_for_each_entry_safe(desc, _dt, &pch->work_list , node) { | 2427 | list_for_each_entry_safe(desc, _dt, &pch->work_list , node) { |
342 | desc->status = DONE; | 2428 | desc->status = DONE; |
343 | pch->completed = desc->txd.cookie; | ||
344 | list_move_tail(&desc->node, &list); | 2429 | list_move_tail(&desc->node, &list); |
345 | } | 2430 | } |
346 | 2431 | ||
@@ -396,18 +2481,7 @@ static enum dma_status | |||
396 | pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 2481 | pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
397 | struct dma_tx_state *txstate) | 2482 | struct dma_tx_state *txstate) |
398 | { | 2483 | { |
399 | struct dma_pl330_chan *pch = to_pchan(chan); | 2484 | return dma_cookie_status(chan, cookie, txstate); |
400 | dma_cookie_t last_done, last_used; | ||
401 | int ret; | ||
402 | |||
403 | last_done = pch->completed; | ||
404 | last_used = chan->cookie; | ||
405 | |||
406 | ret = dma_async_is_complete(cookie, last_done, last_used); | ||
407 | |||
408 | dma_set_tx_state(txstate, last_done, last_used, 0); | ||
409 | |||
410 | return ret; | ||
411 | } | 2485 | } |
412 | 2486 | ||
413 | static void pl330_issue_pending(struct dma_chan *chan) | 2487 | static void pl330_issue_pending(struct dma_chan *chan) |
@@ -430,26 +2504,16 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | |||
430 | spin_lock_irqsave(&pch->lock, flags); | 2504 | spin_lock_irqsave(&pch->lock, flags); |
431 | 2505 | ||
432 | /* Assign cookies to all nodes */ | 2506 | /* Assign cookies to all nodes */ |
433 | cookie = tx->chan->cookie; | ||
434 | |||
435 | while (!list_empty(&last->node)) { | 2507 | while (!list_empty(&last->node)) { |
436 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); | 2508 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); |
437 | 2509 | ||
438 | if (++cookie < 0) | 2510 | dma_cookie_assign(&desc->txd); |
439 | cookie = 1; | ||
440 | desc->txd.cookie = cookie; | ||
441 | 2511 | ||
442 | list_move_tail(&desc->node, &pch->work_list); | 2512 | list_move_tail(&desc->node, &pch->work_list); |
443 | } | 2513 | } |
444 | 2514 | ||
445 | if (++cookie < 0) | 2515 | cookie = dma_cookie_assign(&last->txd); |
446 | cookie = 1; | ||
447 | last->txd.cookie = cookie; | ||
448 | |||
449 | list_add_tail(&last->node, &pch->work_list); | 2516 | list_add_tail(&last->node, &pch->work_list); |
450 | |||
451 | tx->chan->cookie = cookie; | ||
452 | |||
453 | spin_unlock_irqrestore(&pch->lock, flags); | 2517 | spin_unlock_irqrestore(&pch->lock, flags); |
454 | 2518 | ||
455 | return cookie; | 2519 | return cookie; |
@@ -553,6 +2617,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | |||
553 | async_tx_ack(&desc->txd); | 2617 | async_tx_ack(&desc->txd); |
554 | 2618 | ||
555 | desc->req.peri = peri_id ? pch->chan.chan_id : 0; | 2619 | desc->req.peri = peri_id ? pch->chan.chan_id : 0; |
2620 | desc->rqcfg.pcfg = &pch->dmac->pif.pcfg; | ||
556 | 2621 | ||
557 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); | 2622 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); |
558 | 2623 | ||
@@ -621,7 +2686,8 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) | |||
621 | 2686 | ||
622 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | 2687 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( |
623 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, | 2688 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, |
624 | size_t period_len, enum dma_transfer_direction direction) | 2689 | size_t period_len, enum dma_transfer_direction direction, |
2690 | void *context) | ||
625 | { | 2691 | { |
626 | struct dma_pl330_desc *desc; | 2692 | struct dma_pl330_desc *desc; |
627 | struct dma_pl330_chan *pch = to_pchan(chan); | 2693 | struct dma_pl330_chan *pch = to_pchan(chan); |
@@ -711,7 +2777,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
711 | static struct dma_async_tx_descriptor * | 2777 | static struct dma_async_tx_descriptor * |
712 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 2778 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
713 | unsigned int sg_len, enum dma_transfer_direction direction, | 2779 | unsigned int sg_len, enum dma_transfer_direction direction, |
714 | unsigned long flg) | 2780 | unsigned long flg, void *context) |
715 | { | 2781 | { |
716 | struct dma_pl330_desc *first, *desc = NULL; | 2782 | struct dma_pl330_desc *first, *desc = NULL; |
717 | struct dma_pl330_chan *pch = to_pchan(chan); | 2783 | struct dma_pl330_chan *pch = to_pchan(chan); |
@@ -829,7 +2895,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
829 | if (IS_ERR(pdmac->clk)) { | 2895 | if (IS_ERR(pdmac->clk)) { |
830 | dev_err(&adev->dev, "Cannot get operation clock.\n"); | 2896 | dev_err(&adev->dev, "Cannot get operation clock.\n"); |
831 | ret = -EINVAL; | 2897 | ret = -EINVAL; |
832 | goto probe_err1; | 2898 | goto probe_err2; |
833 | } | 2899 | } |
834 | 2900 | ||
835 | amba_set_drvdata(adev, pdmac); | 2901 | amba_set_drvdata(adev, pdmac); |
@@ -843,11 +2909,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
843 | ret = request_irq(irq, pl330_irq_handler, 0, | 2909 | ret = request_irq(irq, pl330_irq_handler, 0, |
844 | dev_name(&adev->dev), pi); | 2910 | dev_name(&adev->dev), pi); |
845 | if (ret) | 2911 | if (ret) |
846 | goto probe_err2; | 2912 | goto probe_err3; |
847 | 2913 | ||
848 | ret = pl330_add(pi); | 2914 | ret = pl330_add(pi); |
849 | if (ret) | 2915 | if (ret) |
850 | goto probe_err3; | 2916 | goto probe_err4; |
851 | 2917 | ||
852 | INIT_LIST_HEAD(&pdmac->desc_pool); | 2918 | INIT_LIST_HEAD(&pdmac->desc_pool); |
853 | spin_lock_init(&pdmac->pool_lock); | 2919 | spin_lock_init(&pdmac->pool_lock); |
@@ -904,7 +2970,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
904 | ret = dma_async_device_register(pd); | 2970 | ret = dma_async_device_register(pd); |
905 | if (ret) { | 2971 | if (ret) { |
906 | dev_err(&adev->dev, "unable to register DMAC\n"); | 2972 | dev_err(&adev->dev, "unable to register DMAC\n"); |
907 | goto probe_err4; | 2973 | goto probe_err5; |
908 | } | 2974 | } |
909 | 2975 | ||
910 | dev_info(&adev->dev, | 2976 | dev_info(&adev->dev, |
@@ -917,10 +2983,15 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
917 | 2983 | ||
918 | return 0; | 2984 | return 0; |
919 | 2985 | ||
920 | probe_err4: | 2986 | probe_err5: |
921 | pl330_del(pi); | 2987 | pl330_del(pi); |
922 | probe_err3: | 2988 | probe_err4: |
923 | free_irq(irq, pi); | 2989 | free_irq(irq, pi); |
2990 | probe_err3: | ||
2991 | #ifndef CONFIG_PM_RUNTIME | ||
2992 | clk_disable(pdmac->clk); | ||
2993 | #endif | ||
2994 | clk_put(pdmac->clk); | ||
924 | probe_err2: | 2995 | probe_err2: |
925 | iounmap(pi->base); | 2996 | iounmap(pi->base); |
926 | probe_err1: | 2997 | probe_err1: |
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index fc457a7e8832..ced98826684a 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/dcr.h> | 46 | #include <asm/dcr.h> |
47 | #include <asm/dcr-regs.h> | 47 | #include <asm/dcr-regs.h> |
48 | #include "adma.h" | 48 | #include "adma.h" |
49 | #include "../dmaengine.h" | ||
49 | 50 | ||
50 | enum ppc_adma_init_code { | 51 | enum ppc_adma_init_code { |
51 | PPC_ADMA_INIT_OK = 0, | 52 | PPC_ADMA_INIT_OK = 0, |
@@ -1930,7 +1931,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan) | |||
1930 | if (end_of_chain && slot_cnt) { | 1931 | if (end_of_chain && slot_cnt) { |
1931 | /* Should wait for ZeroSum completion */ | 1932 | /* Should wait for ZeroSum completion */ |
1932 | if (cookie > 0) | 1933 | if (cookie > 0) |
1933 | chan->completed_cookie = cookie; | 1934 | chan->common.completed_cookie = cookie; |
1934 | return; | 1935 | return; |
1935 | } | 1936 | } |
1936 | 1937 | ||
@@ -1960,7 +1961,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan) | |||
1960 | BUG_ON(!seen_current); | 1961 | BUG_ON(!seen_current); |
1961 | 1962 | ||
1962 | if (cookie > 0) { | 1963 | if (cookie > 0) { |
1963 | chan->completed_cookie = cookie; | 1964 | chan->common.completed_cookie = cookie; |
1964 | pr_debug("\tcompleted cookie %d\n", cookie); | 1965 | pr_debug("\tcompleted cookie %d\n", cookie); |
1965 | } | 1966 | } |
1966 | 1967 | ||
@@ -2150,22 +2151,6 @@ static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan) | |||
2150 | } | 2151 | } |
2151 | 2152 | ||
2152 | /** | 2153 | /** |
2153 | * ppc440spe_desc_assign_cookie - assign a cookie | ||
2154 | */ | ||
2155 | static dma_cookie_t ppc440spe_desc_assign_cookie( | ||
2156 | struct ppc440spe_adma_chan *chan, | ||
2157 | struct ppc440spe_adma_desc_slot *desc) | ||
2158 | { | ||
2159 | dma_cookie_t cookie = chan->common.cookie; | ||
2160 | |||
2161 | cookie++; | ||
2162 | if (cookie < 0) | ||
2163 | cookie = 1; | ||
2164 | chan->common.cookie = desc->async_tx.cookie = cookie; | ||
2165 | return cookie; | ||
2166 | } | ||
2167 | |||
2168 | /** | ||
2169 | * ppc440spe_rxor_set_region_data - | 2154 | * ppc440spe_rxor_set_region_data - |
2170 | */ | 2155 | */ |
2171 | static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc, | 2156 | static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc, |
@@ -2235,8 +2220,7 @@ static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
2235 | slots_per_op = group_start->slots_per_op; | 2220 | slots_per_op = group_start->slots_per_op; |
2236 | 2221 | ||
2237 | spin_lock_bh(&chan->lock); | 2222 | spin_lock_bh(&chan->lock); |
2238 | 2223 | cookie = dma_cookie_assign(tx); | |
2239 | cookie = ppc440spe_desc_assign_cookie(chan, sw_desc); | ||
2240 | 2224 | ||
2241 | if (unlikely(list_empty(&chan->chain))) { | 2225 | if (unlikely(list_empty(&chan->chain))) { |
2242 | /* first peer */ | 2226 | /* first peer */ |
@@ -3944,28 +3928,16 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, | |||
3944 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 3928 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
3945 | { | 3929 | { |
3946 | struct ppc440spe_adma_chan *ppc440spe_chan; | 3930 | struct ppc440spe_adma_chan *ppc440spe_chan; |
3947 | dma_cookie_t last_used; | ||
3948 | dma_cookie_t last_complete; | ||
3949 | enum dma_status ret; | 3931 | enum dma_status ret; |
3950 | 3932 | ||
3951 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | 3933 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); |
3952 | last_used = chan->cookie; | 3934 | ret = dma_cookie_status(chan, cookie, txstate); |
3953 | last_complete = ppc440spe_chan->completed_cookie; | ||
3954 | |||
3955 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
3956 | |||
3957 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
3958 | if (ret == DMA_SUCCESS) | 3935 | if (ret == DMA_SUCCESS) |
3959 | return ret; | 3936 | return ret; |
3960 | 3937 | ||
3961 | ppc440spe_adma_slot_cleanup(ppc440spe_chan); | 3938 | ppc440spe_adma_slot_cleanup(ppc440spe_chan); |
3962 | 3939 | ||
3963 | last_used = chan->cookie; | 3940 | return dma_cookie_status(chan, cookie, txstate); |
3964 | last_complete = ppc440spe_chan->completed_cookie; | ||
3965 | |||
3966 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
3967 | |||
3968 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
3969 | } | 3941 | } |
3970 | 3942 | ||
3971 | /** | 3943 | /** |
@@ -4050,16 +4022,12 @@ static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan) | |||
4050 | async_tx_ack(&sw_desc->async_tx); | 4022 | async_tx_ack(&sw_desc->async_tx); |
4051 | ppc440spe_desc_init_null_xor(group_start); | 4023 | ppc440spe_desc_init_null_xor(group_start); |
4052 | 4024 | ||
4053 | cookie = chan->common.cookie; | 4025 | cookie = dma_cookie_assign(&sw_desc->async_tx); |
4054 | cookie++; | ||
4055 | if (cookie <= 1) | ||
4056 | cookie = 2; | ||
4057 | 4026 | ||
4058 | /* initialize the completed cookie to be less than | 4027 | /* initialize the completed cookie to be less than |
4059 | * the most recently used cookie | 4028 | * the most recently used cookie |
4060 | */ | 4029 | */ |
4061 | chan->completed_cookie = cookie - 1; | 4030 | chan->common.completed_cookie = cookie - 1; |
4062 | chan->common.cookie = sw_desc->async_tx.cookie = cookie; | ||
4063 | 4031 | ||
4064 | /* channel should not be busy */ | 4032 | /* channel should not be busy */ |
4065 | BUG_ON(ppc440spe_chan_is_busy(chan)); | 4033 | BUG_ON(ppc440spe_chan_is_busy(chan)); |
@@ -4529,6 +4497,7 @@ static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev) | |||
4529 | INIT_LIST_HEAD(&chan->all_slots); | 4497 | INIT_LIST_HEAD(&chan->all_slots); |
4530 | chan->device = adev; | 4498 | chan->device = adev; |
4531 | chan->common.device = &adev->common; | 4499 | chan->common.device = &adev->common; |
4500 | dma_cookie_init(&chan->common); | ||
4532 | list_add_tail(&chan->common.device_node, &adev->common.channels); | 4501 | list_add_tail(&chan->common.device_node, &adev->common.channels); |
4533 | tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet, | 4502 | tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet, |
4534 | (unsigned long)chan); | 4503 | (unsigned long)chan); |
diff --git a/drivers/dma/ppc4xx/adma.h b/drivers/dma/ppc4xx/adma.h index 8ada5a812e3b..26b7a5ed9ac7 100644 --- a/drivers/dma/ppc4xx/adma.h +++ b/drivers/dma/ppc4xx/adma.h | |||
@@ -81,7 +81,6 @@ struct ppc440spe_adma_device { | |||
81 | * @common: common dmaengine channel object members | 81 | * @common: common dmaengine channel object members |
82 | * @all_slots: complete domain of slots usable by the channel | 82 | * @all_slots: complete domain of slots usable by the channel |
83 | * @pending: allows batching of hardware operations | 83 | * @pending: allows batching of hardware operations |
84 | * @completed_cookie: identifier for the most recently completed operation | ||
85 | * @slots_allocated: records the actual size of the descriptor slot pool | 84 | * @slots_allocated: records the actual size of the descriptor slot pool |
86 | * @hw_chain_inited: h/w descriptor chain initialization flag | 85 | * @hw_chain_inited: h/w descriptor chain initialization flag |
87 | * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs | 86 | * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs |
@@ -99,7 +98,6 @@ struct ppc440spe_adma_chan { | |||
99 | struct list_head all_slots; | 98 | struct list_head all_slots; |
100 | struct ppc440spe_adma_desc_slot *last_used; | 99 | struct ppc440spe_adma_desc_slot *last_used; |
101 | int pending; | 100 | int pending; |
102 | dma_cookie_t completed_cookie; | ||
103 | int slots_allocated; | 101 | int slots_allocated; |
104 | int hw_chain_inited; | 102 | int hw_chain_inited; |
105 | struct tasklet_struct irq_tasklet; | 103 | struct tasklet_struct irq_tasklet; |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 812fd76e9c18..19d7a8d3975d 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/kdebug.h> | 30 | #include <linux/kdebug.h> |
31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
32 | #include <linux/rculist.h> | 32 | #include <linux/rculist.h> |
33 | |||
34 | #include "dmaengine.h" | ||
33 | #include "shdma.h" | 35 | #include "shdma.h" |
34 | 36 | ||
35 | /* DMA descriptor control */ | 37 | /* DMA descriptor control */ |
@@ -296,13 +298,7 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | |||
296 | else | 298 | else |
297 | power_up = false; | 299 | power_up = false; |
298 | 300 | ||
299 | cookie = sh_chan->common.cookie; | 301 | cookie = dma_cookie_assign(tx); |
300 | cookie++; | ||
301 | if (cookie < 0) | ||
302 | cookie = 1; | ||
303 | |||
304 | sh_chan->common.cookie = cookie; | ||
305 | tx->cookie = cookie; | ||
306 | 302 | ||
307 | /* Mark all chunks of this descriptor as submitted, move to the queue */ | 303 | /* Mark all chunks of this descriptor as submitted, move to the queue */ |
308 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | 304 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { |
@@ -673,7 +669,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
673 | 669 | ||
674 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | 670 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( |
675 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | 671 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, |
676 | enum dma_transfer_direction direction, unsigned long flags) | 672 | enum dma_transfer_direction direction, unsigned long flags, |
673 | void *context) | ||
677 | { | 674 | { |
678 | struct sh_dmae_slave *param; | 675 | struct sh_dmae_slave *param; |
679 | struct sh_dmae_chan *sh_chan; | 676 | struct sh_dmae_chan *sh_chan; |
@@ -764,12 +761,12 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all | |||
764 | cookie = tx->cookie; | 761 | cookie = tx->cookie; |
765 | 762 | ||
766 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | 763 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { |
767 | if (sh_chan->completed_cookie != desc->cookie - 1) | 764 | if (sh_chan->common.completed_cookie != desc->cookie - 1) |
768 | dev_dbg(sh_chan->dev, | 765 | dev_dbg(sh_chan->dev, |
769 | "Completing cookie %d, expected %d\n", | 766 | "Completing cookie %d, expected %d\n", |
770 | desc->cookie, | 767 | desc->cookie, |
771 | sh_chan->completed_cookie + 1); | 768 | sh_chan->common.completed_cookie + 1); |
772 | sh_chan->completed_cookie = desc->cookie; | 769 | sh_chan->common.completed_cookie = desc->cookie; |
773 | } | 770 | } |
774 | 771 | ||
775 | /* Call callback on the last chunk */ | 772 | /* Call callback on the last chunk */ |
@@ -823,7 +820,7 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all | |||
823 | * Terminating and the loop completed normally: forgive | 820 | * Terminating and the loop completed normally: forgive |
824 | * uncompleted cookies | 821 | * uncompleted cookies |
825 | */ | 822 | */ |
826 | sh_chan->completed_cookie = sh_chan->common.cookie; | 823 | sh_chan->common.completed_cookie = sh_chan->common.cookie; |
827 | 824 | ||
828 | spin_unlock_irqrestore(&sh_chan->desc_lock, flags); | 825 | spin_unlock_irqrestore(&sh_chan->desc_lock, flags); |
829 | 826 | ||
@@ -883,23 +880,14 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, | |||
883 | struct dma_tx_state *txstate) | 880 | struct dma_tx_state *txstate) |
884 | { | 881 | { |
885 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 882 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
886 | dma_cookie_t last_used; | ||
887 | dma_cookie_t last_complete; | ||
888 | enum dma_status status; | 883 | enum dma_status status; |
889 | unsigned long flags; | 884 | unsigned long flags; |
890 | 885 | ||
891 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 886 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
892 | 887 | ||
893 | /* First read completed cookie to avoid a skew */ | ||
894 | last_complete = sh_chan->completed_cookie; | ||
895 | rmb(); | ||
896 | last_used = chan->cookie; | ||
897 | BUG_ON(last_complete < 0); | ||
898 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
899 | |||
900 | spin_lock_irqsave(&sh_chan->desc_lock, flags); | 888 | spin_lock_irqsave(&sh_chan->desc_lock, flags); |
901 | 889 | ||
902 | status = dma_async_is_complete(cookie, last_complete, last_used); | 890 | status = dma_cookie_status(chan, cookie, txstate); |
903 | 891 | ||
904 | /* | 892 | /* |
905 | * If we don't find cookie on the queue, it has been aborted and we have | 893 | * If we don't find cookie on the queue, it has been aborted and we have |
@@ -1102,6 +1090,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | |||
1102 | 1090 | ||
1103 | /* reference struct dma_device */ | 1091 | /* reference struct dma_device */ |
1104 | new_sh_chan->common.device = &shdev->common; | 1092 | new_sh_chan->common.device = &shdev->common; |
1093 | dma_cookie_init(&new_sh_chan->common); | ||
1105 | 1094 | ||
1106 | new_sh_chan->dev = shdev->common.dev; | 1095 | new_sh_chan->dev = shdev->common.dev; |
1107 | new_sh_chan->id = id; | 1096 | new_sh_chan->id = id; |
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 2b55a276dc5b..0b1d2c105f02 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h | |||
@@ -30,7 +30,6 @@ enum dmae_pm_state { | |||
30 | }; | 30 | }; |
31 | 31 | ||
32 | struct sh_dmae_chan { | 32 | struct sh_dmae_chan { |
33 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | ||
34 | spinlock_t desc_lock; /* Descriptor operation lock */ | 33 | spinlock_t desc_lock; /* Descriptor operation lock */ |
35 | struct list_head ld_queue; /* Link descriptors queue */ | 34 | struct list_head ld_queue; /* Link descriptors queue */ |
36 | struct list_head ld_free; /* Link descriptors free */ | 35 | struct list_head ld_free; /* Link descriptors free */ |
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 2333810d1688..434ad31174f2 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <linux/of_platform.h> | 18 | #include <linux/of_platform.h> |
19 | #include <linux/sirfsoc_dma.h> | 19 | #include <linux/sirfsoc_dma.h> |
20 | 20 | ||
21 | #include "dmaengine.h" | ||
22 | |||
21 | #define SIRFSOC_DMA_DESCRIPTORS 16 | 23 | #define SIRFSOC_DMA_DESCRIPTORS 16 |
22 | #define SIRFSOC_DMA_CHANNELS 16 | 24 | #define SIRFSOC_DMA_CHANNELS 16 |
23 | 25 | ||
@@ -59,7 +61,6 @@ struct sirfsoc_dma_chan { | |||
59 | struct list_head queued; | 61 | struct list_head queued; |
60 | struct list_head active; | 62 | struct list_head active; |
61 | struct list_head completed; | 63 | struct list_head completed; |
62 | dma_cookie_t completed_cookie; | ||
63 | unsigned long happened_cyclic; | 64 | unsigned long happened_cyclic; |
64 | unsigned long completed_cyclic; | 65 | unsigned long completed_cyclic; |
65 | 66 | ||
@@ -208,7 +209,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) | |||
208 | /* Free descriptors */ | 209 | /* Free descriptors */ |
209 | spin_lock_irqsave(&schan->lock, flags); | 210 | spin_lock_irqsave(&schan->lock, flags); |
210 | list_splice_tail_init(&list, &schan->free); | 211 | list_splice_tail_init(&list, &schan->free); |
211 | schan->completed_cookie = last_cookie; | 212 | schan->chan.completed_cookie = last_cookie; |
212 | spin_unlock_irqrestore(&schan->lock, flags); | 213 | spin_unlock_irqrestore(&schan->lock, flags); |
213 | } else { | 214 | } else { |
214 | /* for cyclic channel, desc is always in active list */ | 215 | /* for cyclic channel, desc is always in active list */ |
@@ -258,13 +259,7 @@ static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | |||
258 | /* Move descriptor to queue */ | 259 | /* Move descriptor to queue */ |
259 | list_move_tail(&sdesc->node, &schan->queued); | 260 | list_move_tail(&sdesc->node, &schan->queued); |
260 | 261 | ||
261 | /* Update cookie */ | 262 | cookie = dma_cookie_assign(txd); |
262 | cookie = schan->chan.cookie + 1; | ||
263 | if (cookie <= 0) | ||
264 | cookie = 1; | ||
265 | |||
266 | schan->chan.cookie = cookie; | ||
267 | sdesc->desc.cookie = cookie; | ||
268 | 263 | ||
269 | spin_unlock_irqrestore(&schan->lock, flags); | 264 | spin_unlock_irqrestore(&schan->lock, flags); |
270 | 265 | ||
@@ -414,16 +409,13 @@ sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
414 | { | 409 | { |
415 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | 410 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
416 | unsigned long flags; | 411 | unsigned long flags; |
417 | dma_cookie_t last_used; | 412 | enum dma_status ret; |
418 | dma_cookie_t last_complete; | ||
419 | 413 | ||
420 | spin_lock_irqsave(&schan->lock, flags); | 414 | spin_lock_irqsave(&schan->lock, flags); |
421 | last_used = schan->chan.cookie; | 415 | ret = dma_cookie_status(chan, cookie, txstate); |
422 | last_complete = schan->completed_cookie; | ||
423 | spin_unlock_irqrestore(&schan->lock, flags); | 416 | spin_unlock_irqrestore(&schan->lock, flags); |
424 | 417 | ||
425 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 418 | return ret; |
426 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
427 | } | 419 | } |
428 | 420 | ||
429 | static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( | 421 | static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( |
@@ -497,7 +489,7 @@ err_dir: | |||
497 | static struct dma_async_tx_descriptor * | 489 | static struct dma_async_tx_descriptor * |
498 | sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, | 490 | sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, |
499 | size_t buf_len, size_t period_len, | 491 | size_t buf_len, size_t period_len, |
500 | enum dma_transfer_direction direction) | 492 | enum dma_transfer_direction direction, void *context) |
501 | { | 493 | { |
502 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | 494 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
503 | struct sirfsoc_dma_desc *sdesc = NULL; | 495 | struct sirfsoc_dma_desc *sdesc = NULL; |
@@ -635,8 +627,7 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op) | |||
635 | schan = &sdma->channels[i]; | 627 | schan = &sdma->channels[i]; |
636 | 628 | ||
637 | schan->chan.device = dma; | 629 | schan->chan.device = dma; |
638 | schan->chan.cookie = 1; | 630 | dma_cookie_init(&schan->chan); |
639 | schan->completed_cookie = schan->chan.cookie; | ||
640 | 631 | ||
641 | INIT_LIST_HEAD(&schan->free); | 632 | INIT_LIST_HEAD(&schan->free); |
642 | INIT_LIST_HEAD(&schan->prepared); | 633 | INIT_LIST_HEAD(&schan->prepared); |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index cc5ecbc067a3..bdd41d4bfa8d 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <plat/ste_dma40.h> | 22 | #include <plat/ste_dma40.h> |
23 | 23 | ||
24 | #include "dmaengine.h" | ||
24 | #include "ste_dma40_ll.h" | 25 | #include "ste_dma40_ll.h" |
25 | 26 | ||
26 | #define D40_NAME "dma40" | 27 | #define D40_NAME "dma40" |
@@ -220,8 +221,6 @@ struct d40_base; | |||
220 | * | 221 | * |
221 | * @lock: A spinlock to protect this struct. | 222 | * @lock: A spinlock to protect this struct. |
222 | * @log_num: The logical number, if any of this channel. | 223 | * @log_num: The logical number, if any of this channel. |
223 | * @completed: Starts with 1, after first interrupt it is set to dma engine's | ||
224 | * current cookie. | ||
225 | * @pending_tx: The number of pending transfers. Used between interrupt handler | 224 | * @pending_tx: The number of pending transfers. Used between interrupt handler |
226 | * and tasklet. | 225 | * and tasklet. |
227 | * @busy: Set to true when transfer is ongoing on this channel. | 226 | * @busy: Set to true when transfer is ongoing on this channel. |
@@ -250,8 +249,6 @@ struct d40_base; | |||
250 | struct d40_chan { | 249 | struct d40_chan { |
251 | spinlock_t lock; | 250 | spinlock_t lock; |
252 | int log_num; | 251 | int log_num; |
253 | /* ID of the most recent completed transfer */ | ||
254 | int completed; | ||
255 | int pending_tx; | 252 | int pending_tx; |
256 | bool busy; | 253 | bool busy; |
257 | struct d40_phy_res *phy_chan; | 254 | struct d40_phy_res *phy_chan; |
@@ -1223,21 +1220,14 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
1223 | chan); | 1220 | chan); |
1224 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | 1221 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); |
1225 | unsigned long flags; | 1222 | unsigned long flags; |
1223 | dma_cookie_t cookie; | ||
1226 | 1224 | ||
1227 | spin_lock_irqsave(&d40c->lock, flags); | 1225 | spin_lock_irqsave(&d40c->lock, flags); |
1228 | 1226 | cookie = dma_cookie_assign(tx); | |
1229 | d40c->chan.cookie++; | ||
1230 | |||
1231 | if (d40c->chan.cookie < 0) | ||
1232 | d40c->chan.cookie = 1; | ||
1233 | |||
1234 | d40d->txd.cookie = d40c->chan.cookie; | ||
1235 | |||
1236 | d40_desc_queue(d40c, d40d); | 1227 | d40_desc_queue(d40c, d40d); |
1237 | |||
1238 | spin_unlock_irqrestore(&d40c->lock, flags); | 1228 | spin_unlock_irqrestore(&d40c->lock, flags); |
1239 | 1229 | ||
1240 | return tx->cookie; | 1230 | return cookie; |
1241 | } | 1231 | } |
1242 | 1232 | ||
1243 | static int d40_start(struct d40_chan *d40c) | 1233 | static int d40_start(struct d40_chan *d40c) |
@@ -1357,7 +1347,7 @@ static void dma_tasklet(unsigned long data) | |||
1357 | goto err; | 1347 | goto err; |
1358 | 1348 | ||
1359 | if (!d40d->cyclic) | 1349 | if (!d40d->cyclic) |
1360 | d40c->completed = d40d->txd.cookie; | 1350 | dma_cookie_complete(&d40d->txd); |
1361 | 1351 | ||
1362 | /* | 1352 | /* |
1363 | * If terminating a channel pending_tx is set to zero. | 1353 | * If terminating a channel pending_tx is set to zero. |
@@ -2182,7 +2172,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2182 | bool is_free_phy; | 2172 | bool is_free_phy; |
2183 | spin_lock_irqsave(&d40c->lock, flags); | 2173 | spin_lock_irqsave(&d40c->lock, flags); |
2184 | 2174 | ||
2185 | d40c->completed = chan->cookie = 1; | 2175 | dma_cookie_init(chan); |
2186 | 2176 | ||
2187 | /* If no dma configuration is set use default configuration (memcpy) */ | 2177 | /* If no dma configuration is set use default configuration (memcpy) */ |
2188 | if (!d40c->configured) { | 2178 | if (!d40c->configured) { |
@@ -2299,7 +2289,8 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
2299 | struct scatterlist *sgl, | 2289 | struct scatterlist *sgl, |
2300 | unsigned int sg_len, | 2290 | unsigned int sg_len, |
2301 | enum dma_transfer_direction direction, | 2291 | enum dma_transfer_direction direction, |
2302 | unsigned long dma_flags) | 2292 | unsigned long dma_flags, |
2293 | void *context) | ||
2303 | { | 2294 | { |
2304 | if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) | 2295 | if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) |
2305 | return NULL; | 2296 | return NULL; |
@@ -2310,7 +2301,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
2310 | static struct dma_async_tx_descriptor * | 2301 | static struct dma_async_tx_descriptor * |
2311 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | 2302 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
2312 | size_t buf_len, size_t period_len, | 2303 | size_t buf_len, size_t period_len, |
2313 | enum dma_transfer_direction direction) | 2304 | enum dma_transfer_direction direction, void *context) |
2314 | { | 2305 | { |
2315 | unsigned int periods = buf_len / period_len; | 2306 | unsigned int periods = buf_len / period_len; |
2316 | struct dma_async_tx_descriptor *txd; | 2307 | struct dma_async_tx_descriptor *txd; |
@@ -2342,25 +2333,19 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, | |||
2342 | struct dma_tx_state *txstate) | 2333 | struct dma_tx_state *txstate) |
2343 | { | 2334 | { |
2344 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2335 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2345 | dma_cookie_t last_used; | 2336 | enum dma_status ret; |
2346 | dma_cookie_t last_complete; | ||
2347 | int ret; | ||
2348 | 2337 | ||
2349 | if (d40c->phy_chan == NULL) { | 2338 | if (d40c->phy_chan == NULL) { |
2350 | chan_err(d40c, "Cannot read status of unallocated channel\n"); | 2339 | chan_err(d40c, "Cannot read status of unallocated channel\n"); |
2351 | return -EINVAL; | 2340 | return -EINVAL; |
2352 | } | 2341 | } |
2353 | 2342 | ||
2354 | last_complete = d40c->completed; | 2343 | ret = dma_cookie_status(chan, cookie, txstate); |
2355 | last_used = chan->cookie; | 2344 | if (ret != DMA_SUCCESS) |
2345 | dma_set_residue(txstate, stedma40_residue(chan)); | ||
2356 | 2346 | ||
2357 | if (d40_is_paused(d40c)) | 2347 | if (d40_is_paused(d40c)) |
2358 | ret = DMA_PAUSED; | 2348 | ret = DMA_PAUSED; |
2359 | else | ||
2360 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
2361 | |||
2362 | dma_set_tx_state(txstate, last_complete, last_used, | ||
2363 | stedma40_residue(chan)); | ||
2364 | 2349 | ||
2365 | return ret; | 2350 | return ret; |
2366 | } | 2351 | } |
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index a6f9c1684a0f..4e0dff59901d 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -31,6 +31,8 @@ | |||
31 | 31 | ||
32 | #include <linux/timb_dma.h> | 32 | #include <linux/timb_dma.h> |
33 | 33 | ||
34 | #include "dmaengine.h" | ||
35 | |||
34 | #define DRIVER_NAME "timb-dma" | 36 | #define DRIVER_NAME "timb-dma" |
35 | 37 | ||
36 | /* Global DMA registers */ | 38 | /* Global DMA registers */ |
@@ -84,7 +86,6 @@ struct timb_dma_chan { | |||
84 | especially the lists and descriptors, | 86 | especially the lists and descriptors, |
85 | from races between the tasklet and calls | 87 | from races between the tasklet and calls |
86 | from above */ | 88 | from above */ |
87 | dma_cookie_t last_completed_cookie; | ||
88 | bool ongoing; | 89 | bool ongoing; |
89 | struct list_head active_list; | 90 | struct list_head active_list; |
90 | struct list_head queue; | 91 | struct list_head queue; |
@@ -284,7 +285,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) | |||
284 | else | 285 | else |
285 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); | 286 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); |
286 | */ | 287 | */ |
287 | td_chan->last_completed_cookie = txd->cookie; | 288 | dma_cookie_complete(txd); |
288 | td_chan->ongoing = false; | 289 | td_chan->ongoing = false; |
289 | 290 | ||
290 | callback = txd->callback; | 291 | callback = txd->callback; |
@@ -349,12 +350,7 @@ static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) | |||
349 | dma_cookie_t cookie; | 350 | dma_cookie_t cookie; |
350 | 351 | ||
351 | spin_lock_bh(&td_chan->lock); | 352 | spin_lock_bh(&td_chan->lock); |
352 | 353 | cookie = dma_cookie_assign(txd); | |
353 | cookie = txd->chan->cookie; | ||
354 | if (++cookie < 0) | ||
355 | cookie = 1; | ||
356 | txd->chan->cookie = cookie; | ||
357 | txd->cookie = cookie; | ||
358 | 354 | ||
359 | if (list_empty(&td_chan->active_list)) { | 355 | if (list_empty(&td_chan->active_list)) { |
360 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, | 356 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, |
@@ -481,8 +477,7 @@ static int td_alloc_chan_resources(struct dma_chan *chan) | |||
481 | } | 477 | } |
482 | 478 | ||
483 | spin_lock_bh(&td_chan->lock); | 479 | spin_lock_bh(&td_chan->lock); |
484 | td_chan->last_completed_cookie = 1; | 480 | dma_cookie_init(chan); |
485 | chan->cookie = 1; | ||
486 | spin_unlock_bh(&td_chan->lock); | 481 | spin_unlock_bh(&td_chan->lock); |
487 | 482 | ||
488 | return 0; | 483 | return 0; |
@@ -515,24 +510,13 @@ static void td_free_chan_resources(struct dma_chan *chan) | |||
515 | static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 510 | static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
516 | struct dma_tx_state *txstate) | 511 | struct dma_tx_state *txstate) |
517 | { | 512 | { |
518 | struct timb_dma_chan *td_chan = | 513 | enum dma_status ret; |
519 | container_of(chan, struct timb_dma_chan, chan); | ||
520 | dma_cookie_t last_used; | ||
521 | dma_cookie_t last_complete; | ||
522 | int ret; | ||
523 | 514 | ||
524 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | 515 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); |
525 | 516 | ||
526 | last_complete = td_chan->last_completed_cookie; | 517 | ret = dma_cookie_status(chan, cookie, txstate); |
527 | last_used = chan->cookie; | ||
528 | |||
529 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
530 | |||
531 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
532 | 518 | ||
533 | dev_dbg(chan2dev(chan), | 519 | dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret); |
534 | "%s: exit, ret: %d, last_complete: %d, last_used: %d\n", | ||
535 | __func__, ret, last_complete, last_used); | ||
536 | 520 | ||
537 | return ret; | 521 | return ret; |
538 | } | 522 | } |
@@ -558,7 +542,8 @@ static void td_issue_pending(struct dma_chan *chan) | |||
558 | 542 | ||
559 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | 543 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, |
560 | struct scatterlist *sgl, unsigned int sg_len, | 544 | struct scatterlist *sgl, unsigned int sg_len, |
561 | enum dma_transfer_direction direction, unsigned long flags) | 545 | enum dma_transfer_direction direction, unsigned long flags, |
546 | void *context) | ||
562 | { | 547 | { |
563 | struct timb_dma_chan *td_chan = | 548 | struct timb_dma_chan *td_chan = |
564 | container_of(chan, struct timb_dma_chan, chan); | 549 | container_of(chan, struct timb_dma_chan, chan); |
@@ -766,7 +751,7 @@ static int __devinit td_probe(struct platform_device *pdev) | |||
766 | } | 751 | } |
767 | 752 | ||
768 | td_chan->chan.device = &td->dma; | 753 | td_chan->chan.device = &td->dma; |
769 | td_chan->chan.cookie = 1; | 754 | dma_cookie_init(&td_chan->chan); |
770 | spin_lock_init(&td_chan->lock); | 755 | spin_lock_init(&td_chan->lock); |
771 | INIT_LIST_HEAD(&td_chan->active_list); | 756 | INIT_LIST_HEAD(&td_chan->active_list); |
772 | INIT_LIST_HEAD(&td_chan->queue); | 757 | INIT_LIST_HEAD(&td_chan->queue); |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 6122c364cf11..913f55c76c99 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/scatterlist.h> | 17 | #include <linux/scatterlist.h> |
18 | |||
19 | #include "dmaengine.h" | ||
18 | #include "txx9dmac.h" | 20 | #include "txx9dmac.h" |
19 | 21 | ||
20 | static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) | 22 | static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) |
@@ -279,21 +281,6 @@ static void txx9dmac_desc_put(struct txx9dmac_chan *dc, | |||
279 | } | 281 | } |
280 | } | 282 | } |
281 | 283 | ||
282 | /* Called with dc->lock held and bh disabled */ | ||
283 | static dma_cookie_t | ||
284 | txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) | ||
285 | { | ||
286 | dma_cookie_t cookie = dc->chan.cookie; | ||
287 | |||
288 | if (++cookie < 0) | ||
289 | cookie = 1; | ||
290 | |||
291 | dc->chan.cookie = cookie; | ||
292 | desc->txd.cookie = cookie; | ||
293 | |||
294 | return cookie; | ||
295 | } | ||
296 | |||
297 | /*----------------------------------------------------------------------*/ | 284 | /*----------------------------------------------------------------------*/ |
298 | 285 | ||
299 | static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) | 286 | static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) |
@@ -424,7 +411,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | |||
424 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", | 411 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", |
425 | txd->cookie, desc); | 412 | txd->cookie, desc); |
426 | 413 | ||
427 | dc->completed = txd->cookie; | 414 | dma_cookie_complete(txd); |
428 | callback = txd->callback; | 415 | callback = txd->callback; |
429 | param = txd->callback_param; | 416 | param = txd->callback_param; |
430 | 417 | ||
@@ -738,7 +725,7 @@ static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx) | |||
738 | dma_cookie_t cookie; | 725 | dma_cookie_t cookie; |
739 | 726 | ||
740 | spin_lock_bh(&dc->lock); | 727 | spin_lock_bh(&dc->lock); |
741 | cookie = txx9dmac_assign_cookie(dc, desc); | 728 | cookie = dma_cookie_assign(tx); |
742 | 729 | ||
743 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", | 730 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", |
744 | desc->txd.cookie, desc); | 731 | desc->txd.cookie, desc); |
@@ -846,7 +833,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
846 | static struct dma_async_tx_descriptor * | 833 | static struct dma_async_tx_descriptor * |
847 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 834 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
848 | unsigned int sg_len, enum dma_transfer_direction direction, | 835 | unsigned int sg_len, enum dma_transfer_direction direction, |
849 | unsigned long flags) | 836 | unsigned long flags, void *context) |
850 | { | 837 | { |
851 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 838 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
852 | struct txx9dmac_dev *ddev = dc->ddev; | 839 | struct txx9dmac_dev *ddev = dc->ddev; |
@@ -972,27 +959,17 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
972 | struct dma_tx_state *txstate) | 959 | struct dma_tx_state *txstate) |
973 | { | 960 | { |
974 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 961 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
975 | dma_cookie_t last_used; | 962 | enum dma_status ret; |
976 | dma_cookie_t last_complete; | ||
977 | int ret; | ||
978 | 963 | ||
979 | last_complete = dc->completed; | 964 | ret = dma_cookie_status(chan, cookie, txstate); |
980 | last_used = chan->cookie; | ||
981 | |||
982 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
983 | if (ret != DMA_SUCCESS) { | 965 | if (ret != DMA_SUCCESS) { |
984 | spin_lock_bh(&dc->lock); | 966 | spin_lock_bh(&dc->lock); |
985 | txx9dmac_scan_descriptors(dc); | 967 | txx9dmac_scan_descriptors(dc); |
986 | spin_unlock_bh(&dc->lock); | 968 | spin_unlock_bh(&dc->lock); |
987 | 969 | ||
988 | last_complete = dc->completed; | 970 | ret = dma_cookie_status(chan, cookie, txstate); |
989 | last_used = chan->cookie; | ||
990 | |||
991 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
992 | } | 971 | } |
993 | 972 | ||
994 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
995 | |||
996 | return ret; | 973 | return ret; |
997 | } | 974 | } |
998 | 975 | ||
@@ -1057,7 +1034,7 @@ static int txx9dmac_alloc_chan_resources(struct dma_chan *chan) | |||
1057 | return -EIO; | 1034 | return -EIO; |
1058 | } | 1035 | } |
1059 | 1036 | ||
1060 | dc->completed = chan->cookie = 1; | 1037 | dma_cookie_init(chan); |
1061 | 1038 | ||
1062 | dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; | 1039 | dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; |
1063 | txx9dmac_chan_set_SMPCHN(dc); | 1040 | txx9dmac_chan_set_SMPCHN(dc); |
@@ -1186,7 +1163,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev) | |||
1186 | dc->ddev->chan[ch] = dc; | 1163 | dc->ddev->chan[ch] = dc; |
1187 | dc->chan.device = &dc->dma; | 1164 | dc->chan.device = &dc->dma; |
1188 | list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); | 1165 | list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); |
1189 | dc->chan.cookie = dc->completed = 1; | 1166 | dma_cookie_init(&dc->chan); |
1190 | 1167 | ||
1191 | if (is_dmac64(dc)) | 1168 | if (is_dmac64(dc)) |
1192 | dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; | 1169 | dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; |
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h index 365d42366b9f..f5a760598882 100644 --- a/drivers/dma/txx9dmac.h +++ b/drivers/dma/txx9dmac.h | |||
@@ -172,7 +172,6 @@ struct txx9dmac_chan { | |||
172 | spinlock_t lock; | 172 | spinlock_t lock; |
173 | 173 | ||
174 | /* these other elements are all protected by lock */ | 174 | /* these other elements are all protected by lock */ |
175 | dma_cookie_t completed; | ||
176 | struct list_head active_list; | 175 | struct list_head active_list; |
177 | struct list_head queue; | 176 | struct list_head queue; |
178 | struct list_head free_list; | 177 | struct list_head free_list; |
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index 74522773e934..93c35ef5f0ad 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c | |||
@@ -286,7 +286,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb) | |||
286 | sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0); | 286 | sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0); |
287 | sg_dma_len(sg) = new_size; | 287 | sg_dma_len(sg) = new_size; |
288 | 288 | ||
289 | txd = ichan->dma_chan.device->device_prep_slave_sg( | 289 | txd = dmaengine_prep_slave_sg( |
290 | &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM, | 290 | &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM, |
291 | DMA_PREP_INTERRUPT); | 291 | DMA_PREP_INTERRUPT); |
292 | if (!txd) | 292 | if (!txd) |
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c index 4ed1c7c28ae7..02194c056b00 100644 --- a/drivers/media/video/timblogiw.c +++ b/drivers/media/video/timblogiw.c | |||
@@ -564,7 +564,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) | |||
564 | 564 | ||
565 | spin_unlock_irq(&fh->queue_lock); | 565 | spin_unlock_irq(&fh->queue_lock); |
566 | 566 | ||
567 | desc = fh->chan->device->device_prep_slave_sg(fh->chan, | 567 | desc = dmaengine_prep_slave_sg(fh->chan, |
568 | buf->sg, sg_elems, DMA_DEV_TO_MEM, | 568 | buf->sg, sg_elems, DMA_DEV_TO_MEM, |
569 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | 569 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); |
570 | if (!desc) { | 570 | if (!desc) { |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 390863e7efbd..9819dc09ce08 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/stat.h> | 26 | #include <linux/stat.h> |
27 | #include <linux/types.h> | ||
27 | 28 | ||
28 | #include <linux/mmc/host.h> | 29 | #include <linux/mmc/host.h> |
29 | #include <linux/mmc/sdio.h> | 30 | #include <linux/mmc/sdio.h> |
@@ -173,6 +174,7 @@ struct atmel_mci { | |||
173 | 174 | ||
174 | struct atmel_mci_dma dma; | 175 | struct atmel_mci_dma dma; |
175 | struct dma_chan *data_chan; | 176 | struct dma_chan *data_chan; |
177 | struct dma_slave_config dma_conf; | ||
176 | 178 | ||
177 | u32 cmd_status; | 179 | u32 cmd_status; |
178 | u32 data_status; | 180 | u32 data_status; |
@@ -863,16 +865,17 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
863 | 865 | ||
864 | if (data->flags & MMC_DATA_READ) { | 866 | if (data->flags & MMC_DATA_READ) { |
865 | direction = DMA_FROM_DEVICE; | 867 | direction = DMA_FROM_DEVICE; |
866 | slave_dirn = DMA_DEV_TO_MEM; | 868 | host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM; |
867 | } else { | 869 | } else { |
868 | direction = DMA_TO_DEVICE; | 870 | direction = DMA_TO_DEVICE; |
869 | slave_dirn = DMA_MEM_TO_DEV; | 871 | host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV; |
870 | } | 872 | } |
871 | 873 | ||
872 | sglen = dma_map_sg(chan->device->dev, data->sg, | 874 | sglen = dma_map_sg(chan->device->dev, data->sg, |
873 | data->sg_len, direction); | 875 | data->sg_len, direction); |
874 | 876 | ||
875 | desc = chan->device->device_prep_slave_sg(chan, | 877 | dmaengine_slave_config(chan, &host->dma_conf); |
878 | desc = dmaengine_prep_slave_sg(chan, | ||
876 | data->sg, sglen, slave_dirn, | 879 | data->sg, sglen, slave_dirn, |
877 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 880 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
878 | if (!desc) | 881 | if (!desc) |
@@ -1960,10 +1963,6 @@ static bool atmci_configure_dma(struct atmel_mci *host) | |||
1960 | if (pdata && find_slave_dev(pdata->dma_slave)) { | 1963 | if (pdata && find_slave_dev(pdata->dma_slave)) { |
1961 | dma_cap_mask_t mask; | 1964 | dma_cap_mask_t mask; |
1962 | 1965 | ||
1963 | setup_dma_addr(pdata->dma_slave, | ||
1964 | host->mapbase + ATMCI_TDR, | ||
1965 | host->mapbase + ATMCI_RDR); | ||
1966 | |||
1967 | /* Try to grab a DMA channel */ | 1966 | /* Try to grab a DMA channel */ |
1968 | dma_cap_zero(mask); | 1967 | dma_cap_zero(mask); |
1969 | dma_cap_set(DMA_SLAVE, mask); | 1968 | dma_cap_set(DMA_SLAVE, mask); |
@@ -1977,6 +1976,14 @@ static bool atmci_configure_dma(struct atmel_mci *host) | |||
1977 | dev_info(&host->pdev->dev, | 1976 | dev_info(&host->pdev->dev, |
1978 | "using %s for DMA transfers\n", | 1977 | "using %s for DMA transfers\n", |
1979 | dma_chan_name(host->dma.chan)); | 1978 | dma_chan_name(host->dma.chan)); |
1979 | |||
1980 | host->dma_conf.src_addr = host->mapbase + ATMCI_RDR; | ||
1981 | host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1982 | host->dma_conf.src_maxburst = 1; | ||
1983 | host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR; | ||
1984 | host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1985 | host->dma_conf.dst_maxburst = 1; | ||
1986 | host->dma_conf.device_fc = false; | ||
1980 | return true; | 1987 | return true; |
1981 | } | 1988 | } |
1982 | } | 1989 | } |
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 983e244eca76..032b84791a16 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/dma-mapping.h> | 30 | #include <linux/dma-mapping.h> |
31 | #include <linux/amba/mmci.h> | 31 | #include <linux/amba/mmci.h> |
32 | #include <linux/pm_runtime.h> | 32 | #include <linux/pm_runtime.h> |
33 | #include <linux/types.h> | ||
33 | 34 | ||
34 | #include <asm/div64.h> | 35 | #include <asm/div64.h> |
35 | #include <asm/io.h> | 36 | #include <asm/io.h> |
@@ -400,6 +401,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, | |||
400 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, | 401 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, |
401 | .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ | 402 | .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ |
402 | .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ | 403 | .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ |
404 | .device_fc = false, | ||
403 | }; | 405 | }; |
404 | struct dma_chan *chan; | 406 | struct dma_chan *chan; |
405 | struct dma_device *device; | 407 | struct dma_device *device; |
@@ -441,7 +443,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, | |||
441 | return -EINVAL; | 443 | return -EINVAL; |
442 | 444 | ||
443 | dmaengine_slave_config(chan, &conf); | 445 | dmaengine_slave_config(chan, &conf); |
444 | desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, | 446 | desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, |
445 | conf.direction, DMA_CTRL_ACK); | 447 | conf.direction, DMA_CTRL_ACK); |
446 | if (!desc) | 448 | if (!desc) |
447 | goto unmap_exit; | 449 | goto unmap_exit; |
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 4184b7946bbf..b2058b432320 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/gpio.h> | 33 | #include <linux/gpio.h> |
34 | #include <linux/regulator/consumer.h> | 34 | #include <linux/regulator/consumer.h> |
35 | #include <linux/dmaengine.h> | 35 | #include <linux/dmaengine.h> |
36 | #include <linux/types.h> | ||
36 | 37 | ||
37 | #include <asm/dma.h> | 38 | #include <asm/dma.h> |
38 | #include <asm/irq.h> | 39 | #include <asm/irq.h> |
@@ -254,7 +255,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) | |||
254 | if (nents != data->sg_len) | 255 | if (nents != data->sg_len) |
255 | return -EINVAL; | 256 | return -EINVAL; |
256 | 257 | ||
257 | host->desc = host->dma->device->device_prep_slave_sg(host->dma, | 258 | host->desc = dmaengine_prep_slave_sg(host->dma, |
258 | data->sg, data->sg_len, slave_dirn, | 259 | data->sg, data->sg_len, slave_dirn, |
259 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 260 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
260 | 261 | ||
@@ -267,6 +268,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) | |||
267 | wmb(); | 268 | wmb(); |
268 | 269 | ||
269 | dmaengine_submit(host->desc); | 270 | dmaengine_submit(host->desc); |
271 | dma_async_issue_pending(host->dma); | ||
270 | 272 | ||
271 | return 0; | 273 | return 0; |
272 | } | 274 | } |
@@ -710,6 +712,7 @@ static int mxcmci_setup_dma(struct mmc_host *mmc) | |||
710 | config->src_addr_width = 4; | 712 | config->src_addr_width = 4; |
711 | config->dst_maxburst = host->burstlen; | 713 | config->dst_maxburst = host->burstlen; |
712 | config->src_maxburst = host->burstlen; | 714 | config->src_maxburst = host->burstlen; |
715 | config->device_fc = false; | ||
713 | 716 | ||
714 | return dmaengine_slave_config(host->dma, config); | 717 | return dmaengine_slave_config(host->dma, config); |
715 | } | 718 | } |
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 382c835d217c..65f36cf2ff33 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c | |||
@@ -324,7 +324,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( | |||
324 | sg_len = SSP_PIO_NUM; | 324 | sg_len = SSP_PIO_NUM; |
325 | } | 325 | } |
326 | 326 | ||
327 | desc = host->dmach->device->device_prep_slave_sg(host->dmach, | 327 | desc = dmaengine_prep_slave_sg(host->dmach, |
328 | sgl, sg_len, host->slave_dirn, append); | 328 | sgl, sg_len, host->slave_dirn, append); |
329 | if (desc) { | 329 | if (desc) { |
330 | desc->callback = mxs_mmc_dma_irq_callback; | 330 | desc->callback = mxs_mmc_dma_irq_callback; |
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 60f205708f54..aafaf0b6eb1c 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c | |||
@@ -286,7 +286,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) | |||
286 | DMA_FROM_DEVICE); | 286 | DMA_FROM_DEVICE); |
287 | if (ret > 0) { | 287 | if (ret > 0) { |
288 | host->dma_active = true; | 288 | host->dma_active = true; |
289 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | 289 | desc = dmaengine_prep_slave_sg(chan, sg, ret, |
290 | DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 290 | DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
291 | } | 291 | } |
292 | 292 | ||
@@ -335,7 +335,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) | |||
335 | DMA_TO_DEVICE); | 335 | DMA_TO_DEVICE); |
336 | if (ret > 0) { | 336 | if (ret > 0) { |
337 | host->dma_active = true; | 337 | host->dma_active = true; |
338 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | 338 | desc = dmaengine_prep_slave_sg(chan, sg, ret, |
339 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 339 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
340 | } | 340 | } |
341 | 341 | ||
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c index 8253ec12003e..fff928604859 100644 --- a/drivers/mmc/host/tmio_mmc_dma.c +++ b/drivers/mmc/host/tmio_mmc_dma.c | |||
@@ -88,7 +88,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | |||
88 | 88 | ||
89 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | 89 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); |
90 | if (ret > 0) | 90 | if (ret > 0) |
91 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | 91 | desc = dmaengine_prep_slave_sg(chan, sg, ret, |
92 | DMA_DEV_TO_MEM, DMA_CTRL_ACK); | 92 | DMA_DEV_TO_MEM, DMA_CTRL_ACK); |
93 | 93 | ||
94 | if (desc) { | 94 | if (desc) { |
@@ -169,7 +169,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | |||
169 | 169 | ||
170 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | 170 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); |
171 | if (ret > 0) | 171 | if (ret > 0) |
172 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | 172 | desc = dmaengine_prep_slave_sg(chan, sg, ret, |
173 | DMA_MEM_TO_DEV, DMA_CTRL_ACK); | 173 | DMA_MEM_TO_DEV, DMA_CTRL_ACK); |
174 | 174 | ||
175 | if (desc) { | 175 | if (desc) { |
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c index 7db6555ed3ba..590dd5cceed6 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c | |||
@@ -835,7 +835,7 @@ int gpmi_send_command(struct gpmi_nand_data *this) | |||
835 | | BM_GPMI_CTRL0_ADDRESS_INCREMENT | 835 | | BM_GPMI_CTRL0_ADDRESS_INCREMENT |
836 | | BF_GPMI_CTRL0_XFER_COUNT(this->command_length); | 836 | | BF_GPMI_CTRL0_XFER_COUNT(this->command_length); |
837 | pio[1] = pio[2] = 0; | 837 | pio[1] = pio[2] = 0; |
838 | desc = channel->device->device_prep_slave_sg(channel, | 838 | desc = dmaengine_prep_slave_sg(channel, |
839 | (struct scatterlist *)pio, | 839 | (struct scatterlist *)pio, |
840 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); | 840 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); |
841 | if (!desc) { | 841 | if (!desc) { |
@@ -848,8 +848,7 @@ int gpmi_send_command(struct gpmi_nand_data *this) | |||
848 | 848 | ||
849 | sg_init_one(sgl, this->cmd_buffer, this->command_length); | 849 | sg_init_one(sgl, this->cmd_buffer, this->command_length); |
850 | dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); | 850 | dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); |
851 | desc = channel->device->device_prep_slave_sg(channel, | 851 | desc = dmaengine_prep_slave_sg(channel, sgl, 1, DMA_MEM_TO_DEV, 1); |
852 | sgl, 1, DMA_MEM_TO_DEV, 1); | ||
853 | if (!desc) { | 852 | if (!desc) { |
854 | pr_err("step 2 error\n"); | 853 | pr_err("step 2 error\n"); |
855 | return -1; | 854 | return -1; |
@@ -880,8 +879,7 @@ int gpmi_send_data(struct gpmi_nand_data *this) | |||
880 | | BF_GPMI_CTRL0_ADDRESS(address) | 879 | | BF_GPMI_CTRL0_ADDRESS(address) |
881 | | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len); | 880 | | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len); |
882 | pio[1] = 0; | 881 | pio[1] = 0; |
883 | desc = channel->device->device_prep_slave_sg(channel, | 882 | desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio, |
884 | (struct scatterlist *)pio, | ||
885 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); | 883 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); |
886 | if (!desc) { | 884 | if (!desc) { |
887 | pr_err("step 1 error\n"); | 885 | pr_err("step 1 error\n"); |
@@ -890,7 +888,7 @@ int gpmi_send_data(struct gpmi_nand_data *this) | |||
890 | 888 | ||
891 | /* [2] send DMA request */ | 889 | /* [2] send DMA request */ |
892 | prepare_data_dma(this, DMA_TO_DEVICE); | 890 | prepare_data_dma(this, DMA_TO_DEVICE); |
893 | desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, | 891 | desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, |
894 | 1, DMA_MEM_TO_DEV, 1); | 892 | 1, DMA_MEM_TO_DEV, 1); |
895 | if (!desc) { | 893 | if (!desc) { |
896 | pr_err("step 2 error\n"); | 894 | pr_err("step 2 error\n"); |
@@ -916,7 +914,7 @@ int gpmi_read_data(struct gpmi_nand_data *this) | |||
916 | | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) | 914 | | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) |
917 | | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len); | 915 | | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len); |
918 | pio[1] = 0; | 916 | pio[1] = 0; |
919 | desc = channel->device->device_prep_slave_sg(channel, | 917 | desc = dmaengine_prep_slave_sg(channel, |
920 | (struct scatterlist *)pio, | 918 | (struct scatterlist *)pio, |
921 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); | 919 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); |
922 | if (!desc) { | 920 | if (!desc) { |
@@ -926,8 +924,8 @@ int gpmi_read_data(struct gpmi_nand_data *this) | |||
926 | 924 | ||
927 | /* [2] : send DMA request */ | 925 | /* [2] : send DMA request */ |
928 | prepare_data_dma(this, DMA_FROM_DEVICE); | 926 | prepare_data_dma(this, DMA_FROM_DEVICE); |
929 | desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, | 927 | desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, |
930 | 1, DMA_DEV_TO_MEM, 1); | 928 | 1, DMA_DEV_TO_MEM, 1); |
931 | if (!desc) { | 929 | if (!desc) { |
932 | pr_err("step 2 error\n"); | 930 | pr_err("step 2 error\n"); |
933 | return -1; | 931 | return -1; |
@@ -972,8 +970,7 @@ int gpmi_send_page(struct gpmi_nand_data *this, | |||
972 | pio[4] = payload; | 970 | pio[4] = payload; |
973 | pio[5] = auxiliary; | 971 | pio[5] = auxiliary; |
974 | 972 | ||
975 | desc = channel->device->device_prep_slave_sg(channel, | 973 | desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio, |
976 | (struct scatterlist *)pio, | ||
977 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); | 974 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); |
978 | if (!desc) { | 975 | if (!desc) { |
979 | pr_err("step 2 error\n"); | 976 | pr_err("step 2 error\n"); |
@@ -1007,7 +1004,7 @@ int gpmi_read_page(struct gpmi_nand_data *this, | |||
1007 | | BF_GPMI_CTRL0_ADDRESS(address) | 1004 | | BF_GPMI_CTRL0_ADDRESS(address) |
1008 | | BF_GPMI_CTRL0_XFER_COUNT(0); | 1005 | | BF_GPMI_CTRL0_XFER_COUNT(0); |
1009 | pio[1] = 0; | 1006 | pio[1] = 0; |
1010 | desc = channel->device->device_prep_slave_sg(channel, | 1007 | desc = dmaengine_prep_slave_sg(channel, |
1011 | (struct scatterlist *)pio, 2, | 1008 | (struct scatterlist *)pio, 2, |
1012 | DMA_TRANS_NONE, 0); | 1009 | DMA_TRANS_NONE, 0); |
1013 | if (!desc) { | 1010 | if (!desc) { |
@@ -1036,7 +1033,7 @@ int gpmi_read_page(struct gpmi_nand_data *this, | |||
1036 | pio[3] = geo->page_size; | 1033 | pio[3] = geo->page_size; |
1037 | pio[4] = payload; | 1034 | pio[4] = payload; |
1038 | pio[5] = auxiliary; | 1035 | pio[5] = auxiliary; |
1039 | desc = channel->device->device_prep_slave_sg(channel, | 1036 | desc = dmaengine_prep_slave_sg(channel, |
1040 | (struct scatterlist *)pio, | 1037 | (struct scatterlist *)pio, |
1041 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 1); | 1038 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 1); |
1042 | if (!desc) { | 1039 | if (!desc) { |
@@ -1055,7 +1052,7 @@ int gpmi_read_page(struct gpmi_nand_data *this, | |||
1055 | | BF_GPMI_CTRL0_ADDRESS(address) | 1052 | | BF_GPMI_CTRL0_ADDRESS(address) |
1056 | | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); | 1053 | | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); |
1057 | pio[1] = 0; | 1054 | pio[1] = 0; |
1058 | desc = channel->device->device_prep_slave_sg(channel, | 1055 | desc = dmaengine_prep_slave_sg(channel, |
1059 | (struct scatterlist *)pio, 2, | 1056 | (struct scatterlist *)pio, 2, |
1060 | DMA_TRANS_NONE, 1); | 1057 | DMA_TRANS_NONE, 1); |
1061 | if (!desc) { | 1058 | if (!desc) { |
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 0686b93f1857..f84dd2dc82b6 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c | |||
@@ -458,7 +458,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) | |||
458 | if (sg_dma_len(&ctl->sg) % 4) | 458 | if (sg_dma_len(&ctl->sg) % 4) |
459 | sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; | 459 | sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; |
460 | 460 | ||
461 | ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, | 461 | ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, |
462 | &ctl->sg, 1, DMA_MEM_TO_DEV, | 462 | &ctl->sg, 1, DMA_MEM_TO_DEV, |
463 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | 463 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); |
464 | if (!ctl->adesc) | 464 | if (!ctl->adesc) |
@@ -570,7 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) | |||
570 | 570 | ||
571 | sg_dma_len(sg) = DMA_BUFFER_SIZE; | 571 | sg_dma_len(sg) = DMA_BUFFER_SIZE; |
572 | 572 | ||
573 | ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, | 573 | ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, |
574 | sg, 1, DMA_DEV_TO_MEM, | 574 | sg, 1, DMA_DEV_TO_MEM, |
575 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | 575 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); |
576 | 576 | ||
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 8418eb036651..b9f0192758d6 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/spi/spi.h> | 24 | #include <linux/spi/spi.h> |
25 | #include <linux/types.h> | ||
25 | 26 | ||
26 | #include "spi-dw.h" | 27 | #include "spi-dw.h" |
27 | 28 | ||
@@ -136,6 +137,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | |||
136 | txconf.dst_maxburst = LNW_DMA_MSIZE_16; | 137 | txconf.dst_maxburst = LNW_DMA_MSIZE_16; |
137 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 138 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
138 | txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | 139 | txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; |
140 | txconf.device_fc = false; | ||
139 | 141 | ||
140 | txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, | 142 | txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, |
141 | (unsigned long) &txconf); | 143 | (unsigned long) &txconf); |
@@ -144,7 +146,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | |||
144 | dws->tx_sgl.dma_address = dws->tx_dma; | 146 | dws->tx_sgl.dma_address = dws->tx_dma; |
145 | dws->tx_sgl.length = dws->len; | 147 | dws->tx_sgl.length = dws->len; |
146 | 148 | ||
147 | txdesc = txchan->device->device_prep_slave_sg(txchan, | 149 | txdesc = dmaengine_prep_slave_sg(txchan, |
148 | &dws->tx_sgl, | 150 | &dws->tx_sgl, |
149 | 1, | 151 | 1, |
150 | DMA_MEM_TO_DEV, | 152 | DMA_MEM_TO_DEV, |
@@ -158,6 +160,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | |||
158 | rxconf.src_maxburst = LNW_DMA_MSIZE_16; | 160 | rxconf.src_maxburst = LNW_DMA_MSIZE_16; |
159 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 161 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
160 | rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | 162 | rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; |
163 | rxconf.device_fc = false; | ||
161 | 164 | ||
162 | rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, | 165 | rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, |
163 | (unsigned long) &rxconf); | 166 | (unsigned long) &rxconf); |
@@ -166,7 +169,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | |||
166 | dws->rx_sgl.dma_address = dws->rx_dma; | 169 | dws->rx_sgl.dma_address = dws->rx_dma; |
167 | dws->rx_sgl.length = dws->len; | 170 | dws->rx_sgl.length = dws->len; |
168 | 171 | ||
169 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, | 172 | rxdesc = dmaengine_prep_slave_sg(rxchan, |
170 | &dws->rx_sgl, | 173 | &dws->rx_sgl, |
171 | 1, | 174 | 1, |
172 | DMA_DEV_TO_MEM, | 175 | DMA_DEV_TO_MEM, |
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index d46e55c720b7..6db2887852d6 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c | |||
@@ -633,8 +633,8 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) | |||
633 | if (!nents) | 633 | if (!nents) |
634 | return ERR_PTR(-ENOMEM); | 634 | return ERR_PTR(-ENOMEM); |
635 | 635 | ||
636 | txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents, | 636 | txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, |
637 | slave_dirn, DMA_CTRL_ACK); | 637 | slave_dirn, DMA_CTRL_ACK); |
638 | if (!txd) { | 638 | if (!txd) { |
639 | dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); | 639 | dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); |
640 | return ERR_PTR(-ENOMEM); | 640 | return ERR_PTR(-ENOMEM); |
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index dc8485d1e883..96f0da66b185 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c | |||
@@ -880,10 +880,12 @@ static int configure_dma(struct pl022 *pl022) | |||
880 | struct dma_slave_config rx_conf = { | 880 | struct dma_slave_config rx_conf = { |
881 | .src_addr = SSP_DR(pl022->phybase), | 881 | .src_addr = SSP_DR(pl022->phybase), |
882 | .direction = DMA_DEV_TO_MEM, | 882 | .direction = DMA_DEV_TO_MEM, |
883 | .device_fc = false, | ||
883 | }; | 884 | }; |
884 | struct dma_slave_config tx_conf = { | 885 | struct dma_slave_config tx_conf = { |
885 | .dst_addr = SSP_DR(pl022->phybase), | 886 | .dst_addr = SSP_DR(pl022->phybase), |
886 | .direction = DMA_MEM_TO_DEV, | 887 | .direction = DMA_MEM_TO_DEV, |
888 | .device_fc = false, | ||
887 | }; | 889 | }; |
888 | unsigned int pages; | 890 | unsigned int pages; |
889 | int ret; | 891 | int ret; |
@@ -1017,7 +1019,7 @@ static int configure_dma(struct pl022 *pl022) | |||
1017 | goto err_tx_sgmap; | 1019 | goto err_tx_sgmap; |
1018 | 1020 | ||
1019 | /* Send both scatterlists */ | 1021 | /* Send both scatterlists */ |
1020 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, | 1022 | rxdesc = dmaengine_prep_slave_sg(rxchan, |
1021 | pl022->sgt_rx.sgl, | 1023 | pl022->sgt_rx.sgl, |
1022 | rx_sglen, | 1024 | rx_sglen, |
1023 | DMA_DEV_TO_MEM, | 1025 | DMA_DEV_TO_MEM, |
@@ -1025,7 +1027,7 @@ static int configure_dma(struct pl022 *pl022) | |||
1025 | if (!rxdesc) | 1027 | if (!rxdesc) |
1026 | goto err_rxdesc; | 1028 | goto err_rxdesc; |
1027 | 1029 | ||
1028 | txdesc = txchan->device->device_prep_slave_sg(txchan, | 1030 | txdesc = dmaengine_prep_slave_sg(txchan, |
1029 | pl022->sgt_tx.sgl, | 1031 | pl022->sgt_tx.sgl, |
1030 | tx_sglen, | 1032 | tx_sglen, |
1031 | DMA_MEM_TO_DEV, | 1033 | DMA_MEM_TO_DEV, |
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 5c6fa5ed3366..ec47d3bdfd13 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c | |||
@@ -1099,7 +1099,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1099 | sg_dma_address(sg) = dma->rx_buf_dma + sg->offset; | 1099 | sg_dma_address(sg) = dma->rx_buf_dma + sg->offset; |
1100 | } | 1100 | } |
1101 | sg = dma->sg_rx_p; | 1101 | sg = dma->sg_rx_p; |
1102 | desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg, | 1102 | desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg, |
1103 | num, DMA_DEV_TO_MEM, | 1103 | num, DMA_DEV_TO_MEM, |
1104 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1104 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1105 | if (!desc_rx) { | 1105 | if (!desc_rx) { |
@@ -1158,7 +1158,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1158 | sg_dma_address(sg) = dma->tx_buf_dma + sg->offset; | 1158 | sg_dma_address(sg) = dma->tx_buf_dma + sg->offset; |
1159 | } | 1159 | } |
1160 | sg = dma->sg_tx_p; | 1160 | sg = dma->sg_tx_p; |
1161 | desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx, | 1161 | desc_tx = dmaengine_prep_slave_sg(dma->chan_tx, |
1162 | sg, num, DMA_MEM_TO_DEV, | 1162 | sg, num, DMA_MEM_TO_DEV, |
1163 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1163 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1164 | if (!desc_tx) { | 1164 | if (!desc_tx) { |
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 20d795d9b591..0c65c9e66986 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <linux/dma-mapping.h> | 51 | #include <linux/dma-mapping.h> |
52 | #include <linux/scatterlist.h> | 52 | #include <linux/scatterlist.h> |
53 | #include <linux/delay.h> | 53 | #include <linux/delay.h> |
54 | #include <linux/types.h> | ||
54 | 55 | ||
55 | #include <asm/io.h> | 56 | #include <asm/io.h> |
56 | #include <asm/sizes.h> | 57 | #include <asm/sizes.h> |
@@ -271,6 +272,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap) | |||
271 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, | 272 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
272 | .direction = DMA_MEM_TO_DEV, | 273 | .direction = DMA_MEM_TO_DEV, |
273 | .dst_maxburst = uap->fifosize >> 1, | 274 | .dst_maxburst = uap->fifosize >> 1, |
275 | .device_fc = false, | ||
274 | }; | 276 | }; |
275 | struct dma_chan *chan; | 277 | struct dma_chan *chan; |
276 | dma_cap_mask_t mask; | 278 | dma_cap_mask_t mask; |
@@ -304,6 +306,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap) | |||
304 | .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, | 306 | .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
305 | .direction = DMA_DEV_TO_MEM, | 307 | .direction = DMA_DEV_TO_MEM, |
306 | .src_maxburst = uap->fifosize >> 1, | 308 | .src_maxburst = uap->fifosize >> 1, |
309 | .device_fc = false, | ||
307 | }; | 310 | }; |
308 | 311 | ||
309 | chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); | 312 | chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); |
@@ -481,7 +484,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap) | |||
481 | return -EBUSY; | 484 | return -EBUSY; |
482 | } | 485 | } |
483 | 486 | ||
484 | desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, | 487 | desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, |
485 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 488 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
486 | if (!desc) { | 489 | if (!desc) { |
487 | dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); | 490 | dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); |
@@ -664,7 +667,6 @@ static void pl011_dma_rx_callback(void *data); | |||
664 | static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) | 667 | static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) |
665 | { | 668 | { |
666 | struct dma_chan *rxchan = uap->dmarx.chan; | 669 | struct dma_chan *rxchan = uap->dmarx.chan; |
667 | struct dma_device *dma_dev; | ||
668 | struct pl011_dmarx_data *dmarx = &uap->dmarx; | 670 | struct pl011_dmarx_data *dmarx = &uap->dmarx; |
669 | struct dma_async_tx_descriptor *desc; | 671 | struct dma_async_tx_descriptor *desc; |
670 | struct pl011_sgbuf *sgbuf; | 672 | struct pl011_sgbuf *sgbuf; |
@@ -675,8 +677,7 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) | |||
675 | /* Start the RX DMA job */ | 677 | /* Start the RX DMA job */ |
676 | sgbuf = uap->dmarx.use_buf_b ? | 678 | sgbuf = uap->dmarx.use_buf_b ? |
677 | &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; | 679 | &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; |
678 | dma_dev = rxchan->device; | 680 | desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1, |
679 | desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1, | ||
680 | DMA_DEV_TO_MEM, | 681 | DMA_DEV_TO_MEM, |
681 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 682 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
682 | /* | 683 | /* |
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 332f2eb8abbc..e825460478be 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c | |||
@@ -844,7 +844,7 @@ static int dma_handle_rx(struct eg20t_port *priv) | |||
844 | 844 | ||
845 | sg_dma_address(sg) = priv->rx_buf_dma; | 845 | sg_dma_address(sg) = priv->rx_buf_dma; |
846 | 846 | ||
847 | desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx, | 847 | desc = dmaengine_prep_slave_sg(priv->chan_rx, |
848 | sg, 1, DMA_DEV_TO_MEM, | 848 | sg, 1, DMA_DEV_TO_MEM, |
849 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 849 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
850 | 850 | ||
@@ -1003,7 +1003,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) | |||
1003 | sg_dma_len(sg) = size; | 1003 | sg_dma_len(sg) = size; |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx, | 1006 | desc = dmaengine_prep_slave_sg(priv->chan_tx, |
1007 | priv->sg_tx_p, nent, DMA_MEM_TO_DEV, | 1007 | priv->sg_tx_p, nent, DMA_MEM_TO_DEV, |
1008 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1008 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1009 | if (!desc) { | 1009 | if (!desc) { |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 61b7fd2729cd..f8db8a70c14e 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -1338,7 +1338,7 @@ static void sci_submit_rx(struct sci_port *s) | |||
1338 | struct scatterlist *sg = &s->sg_rx[i]; | 1338 | struct scatterlist *sg = &s->sg_rx[i]; |
1339 | struct dma_async_tx_descriptor *desc; | 1339 | struct dma_async_tx_descriptor *desc; |
1340 | 1340 | ||
1341 | desc = chan->device->device_prep_slave_sg(chan, | 1341 | desc = dmaengine_prep_slave_sg(chan, |
1342 | sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); | 1342 | sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); |
1343 | 1343 | ||
1344 | if (desc) { | 1344 | if (desc) { |
@@ -1453,7 +1453,7 @@ static void work_fn_tx(struct work_struct *work) | |||
1453 | 1453 | ||
1454 | BUG_ON(!sg_dma_len(sg)); | 1454 | BUG_ON(!sg_dma_len(sg)); |
1455 | 1455 | ||
1456 | desc = chan->device->device_prep_slave_sg(chan, | 1456 | desc = dmaengine_prep_slave_sg(chan, |
1457 | sg, s->sg_len_tx, DMA_MEM_TO_DEV, | 1457 | sg, s->sg_len_tx, DMA_MEM_TO_DEV, |
1458 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1458 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1459 | if (!desc) { | 1459 | if (!desc) { |
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c index 97cb45916c43..d05c7fbbb703 100644 --- a/drivers/usb/musb/ux500_dma.c +++ b/drivers/usb/musb/ux500_dma.c | |||
@@ -115,12 +115,12 @@ static bool ux500_configure_channel(struct dma_channel *channel, | |||
115 | slave_conf.dst_addr = usb_fifo_addr; | 115 | slave_conf.dst_addr = usb_fifo_addr; |
116 | slave_conf.dst_addr_width = addr_width; | 116 | slave_conf.dst_addr_width = addr_width; |
117 | slave_conf.dst_maxburst = 16; | 117 | slave_conf.dst_maxburst = 16; |
118 | slave_conf.device_fc = false; | ||
118 | 119 | ||
119 | dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG, | 120 | dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG, |
120 | (unsigned long) &slave_conf); | 121 | (unsigned long) &slave_conf); |
121 | 122 | ||
122 | dma_desc = dma_chan->device-> | 123 | dma_desc = dmaengine_prep_slave_sg(dma_chan, &sg, 1, direction, |
123 | device_prep_slave_sg(dma_chan, &sg, 1, direction, | ||
124 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 124 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
125 | if (!dma_desc) | 125 | if (!dma_desc) |
126 | return false; | 126 | return false; |
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 3648c82a17fe..6ec7f838d7fa 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c | |||
@@ -786,9 +786,8 @@ static void xfer_work(struct work_struct *work) | |||
786 | sg_dma_address(&sg) = pkt->dma + pkt->actual; | 786 | sg_dma_address(&sg) = pkt->dma + pkt->actual; |
787 | sg_dma_len(&sg) = pkt->trans; | 787 | sg_dma_len(&sg) = pkt->trans; |
788 | 788 | ||
789 | desc = chan->device->device_prep_slave_sg(chan, &sg, 1, dir, | 789 | desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir, |
790 | DMA_PREP_INTERRUPT | | 790 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
791 | DMA_CTRL_ACK); | ||
792 | if (!desc) | 791 | if (!desc) |
793 | return; | 792 | return; |
794 | 793 | ||
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index 727a5149d818..eec0d7b748eb 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c | |||
@@ -337,7 +337,7 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi) | |||
337 | 337 | ||
338 | /* This enables the channel */ | 338 | /* This enables the channel */ |
339 | if (mx3_fbi->cookie < 0) { | 339 | if (mx3_fbi->cookie < 0) { |
340 | mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan, | 340 | mx3_fbi->txd = dmaengine_prep_slave_sg(dma_chan, |
341 | &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); | 341 | &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); |
342 | if (!mx3_fbi->txd) { | 342 | if (!mx3_fbi->txd) { |
343 | dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n", | 343 | dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n", |
@@ -1091,7 +1091,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var, | |||
1091 | if (mx3_fbi->txd) | 1091 | if (mx3_fbi->txd) |
1092 | async_tx_ack(mx3_fbi->txd); | 1092 | async_tx_ack(mx3_fbi->txd); |
1093 | 1093 | ||
1094 | txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg + | 1094 | txd = dmaengine_prep_slave_sg(dma_chan, sg + |
1095 | mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); | 1095 | mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); |
1096 | if (!txd) { | 1096 | if (!txd) { |
1097 | dev_err(fbi->device, | 1097 | dev_err(fbi->device, |
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 033f6aa670de..e64ce2cfee99 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h | |||
@@ -47,9 +47,6 @@ enum { | |||
47 | * @muxval: a number usually used to poke into some mux regiser to | 47 | * @muxval: a number usually used to poke into some mux regiser to |
48 | * mux in the signal to this channel | 48 | * mux in the signal to this channel |
49 | * @cctl_opt: default options for the channel control register | 49 | * @cctl_opt: default options for the channel control register |
50 | * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave | ||
51 | * channels. Fill with 'true' if peripheral should be flow controller. Direction | ||
52 | * will be selected at Runtime. | ||
53 | * @addr: source/target address in physical memory for this DMA channel, | 50 | * @addr: source/target address in physical memory for this DMA channel, |
54 | * can be the address of a FIFO register for burst requests for example. | 51 | * can be the address of a FIFO register for burst requests for example. |
55 | * This can be left undefined if the PrimeCell API is used for configuring | 52 | * This can be left undefined if the PrimeCell API is used for configuring |
@@ -68,7 +65,6 @@ struct pl08x_channel_data { | |||
68 | int max_signal; | 65 | int max_signal; |
69 | u32 muxval; | 66 | u32 muxval; |
70 | u32 cctl; | 67 | u32 cctl; |
71 | bool device_fc; | ||
72 | dma_addr_t addr; | 68 | dma_addr_t addr; |
73 | bool circular_buffer; | 69 | bool circular_buffer; |
74 | bool single; | 70 | bool single; |
@@ -176,13 +172,15 @@ enum pl08x_dma_chan_state { | |||
176 | * @runtime_addr: address for RX/TX according to the runtime config | 172 | * @runtime_addr: address for RX/TX according to the runtime config |
177 | * @runtime_direction: current direction of this channel according to | 173 | * @runtime_direction: current direction of this channel according to |
178 | * runtime config | 174 | * runtime config |
179 | * @lc: last completed transaction on this channel | ||
180 | * @pend_list: queued transactions pending on this channel | 175 | * @pend_list: queued transactions pending on this channel |
181 | * @at: active transaction on this channel | 176 | * @at: active transaction on this channel |
182 | * @lock: a lock for this channel data | 177 | * @lock: a lock for this channel data |
183 | * @host: a pointer to the host (internal use) | 178 | * @host: a pointer to the host (internal use) |
184 | * @state: whether the channel is idle, paused, running etc | 179 | * @state: whether the channel is idle, paused, running etc |
185 | * @slave: whether this channel is a device (slave) or for memcpy | 180 | * @slave: whether this channel is a device (slave) or for memcpy |
181 | * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave | ||
182 | * channels. Fill with 'true' if peripheral should be flow controller. Direction | ||
183 | * will be selected at Runtime. | ||
186 | * @waiting: a TX descriptor on this channel which is waiting for a physical | 184 | * @waiting: a TX descriptor on this channel which is waiting for a physical |
187 | * channel to become available | 185 | * channel to become available |
188 | */ | 186 | */ |
@@ -198,13 +196,13 @@ struct pl08x_dma_chan { | |||
198 | u32 src_cctl; | 196 | u32 src_cctl; |
199 | u32 dst_cctl; | 197 | u32 dst_cctl; |
200 | enum dma_transfer_direction runtime_direction; | 198 | enum dma_transfer_direction runtime_direction; |
201 | dma_cookie_t lc; | ||
202 | struct list_head pend_list; | 199 | struct list_head pend_list; |
203 | struct pl08x_txd *at; | 200 | struct pl08x_txd *at; |
204 | spinlock_t lock; | 201 | spinlock_t lock; |
205 | struct pl08x_driver_data *host; | 202 | struct pl08x_driver_data *host; |
206 | enum pl08x_dma_chan_state state; | 203 | enum pl08x_dma_chan_state state; |
207 | bool slave; | 204 | bool slave; |
205 | bool device_fc; | ||
208 | struct pl08x_txd *waiting; | 206 | struct pl08x_txd *waiting; |
209 | }; | 207 | }; |
210 | 208 | ||
diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h index 12e023c19ac1..fe93758e8403 100644 --- a/include/linux/amba/pl330.h +++ b/include/linux/amba/pl330.h | |||
@@ -13,7 +13,6 @@ | |||
13 | #define __AMBA_PL330_H_ | 13 | #define __AMBA_PL330_H_ |
14 | 14 | ||
15 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
16 | #include <asm/hardware/pl330.h> | ||
17 | 16 | ||
18 | struct dma_pl330_platdata { | 17 | struct dma_pl330_platdata { |
19 | /* | 18 | /* |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index a5966f691ef8..676f967390ae 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -18,14 +18,15 @@ | |||
18 | * The full GNU General Public License is included in this distribution in the | 18 | * The full GNU General Public License is included in this distribution in the |
19 | * file called COPYING. | 19 | * file called COPYING. |
20 | */ | 20 | */ |
21 | #ifndef DMAENGINE_H | 21 | #ifndef LINUX_DMAENGINE_H |
22 | #define DMAENGINE_H | 22 | #define LINUX_DMAENGINE_H |
23 | 23 | ||
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/uio.h> | 25 | #include <linux/uio.h> |
26 | #include <linux/bug.h> | 26 | #include <linux/bug.h> |
27 | #include <linux/scatterlist.h> | 27 | #include <linux/scatterlist.h> |
28 | #include <linux/bitmap.h> | 28 | #include <linux/bitmap.h> |
29 | #include <linux/types.h> | ||
29 | #include <asm/page.h> | 30 | #include <asm/page.h> |
30 | 31 | ||
31 | /** | 32 | /** |
@@ -258,6 +259,7 @@ struct dma_chan_percpu { | |||
258 | * struct dma_chan - devices supply DMA channels, clients use them | 259 | * struct dma_chan - devices supply DMA channels, clients use them |
259 | * @device: ptr to the dma device who supplies this channel, always !%NULL | 260 | * @device: ptr to the dma device who supplies this channel, always !%NULL |
260 | * @cookie: last cookie value returned to client | 261 | * @cookie: last cookie value returned to client |
262 | * @completed_cookie: last completed cookie for this channel | ||
261 | * @chan_id: channel ID for sysfs | 263 | * @chan_id: channel ID for sysfs |
262 | * @dev: class device for sysfs | 264 | * @dev: class device for sysfs |
263 | * @device_node: used to add this to the device chan list | 265 | * @device_node: used to add this to the device chan list |
@@ -269,6 +271,7 @@ struct dma_chan_percpu { | |||
269 | struct dma_chan { | 271 | struct dma_chan { |
270 | struct dma_device *device; | 272 | struct dma_device *device; |
271 | dma_cookie_t cookie; | 273 | dma_cookie_t cookie; |
274 | dma_cookie_t completed_cookie; | ||
272 | 275 | ||
273 | /* sysfs */ | 276 | /* sysfs */ |
274 | int chan_id; | 277 | int chan_id; |
@@ -332,6 +335,9 @@ enum dma_slave_buswidth { | |||
332 | * may or may not be applicable on memory sources. | 335 | * may or may not be applicable on memory sources. |
333 | * @dst_maxburst: same as src_maxburst but for destination target | 336 | * @dst_maxburst: same as src_maxburst but for destination target |
334 | * mutatis mutandis. | 337 | * mutatis mutandis. |
338 | * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill | ||
339 | * with 'true' if peripheral should be flow controller. Direction will be | ||
340 | * selected at Runtime. | ||
335 | * | 341 | * |
336 | * This struct is passed in as configuration data to a DMA engine | 342 | * This struct is passed in as configuration data to a DMA engine |
337 | * in order to set up a certain channel for DMA transport at runtime. | 343 | * in order to set up a certain channel for DMA transport at runtime. |
@@ -358,6 +364,7 @@ struct dma_slave_config { | |||
358 | enum dma_slave_buswidth dst_addr_width; | 364 | enum dma_slave_buswidth dst_addr_width; |
359 | u32 src_maxburst; | 365 | u32 src_maxburst; |
360 | u32 dst_maxburst; | 366 | u32 dst_maxburst; |
367 | bool device_fc; | ||
361 | }; | 368 | }; |
362 | 369 | ||
363 | static inline const char *dma_chan_name(struct dma_chan *chan) | 370 | static inline const char *dma_chan_name(struct dma_chan *chan) |
@@ -576,10 +583,11 @@ struct dma_device { | |||
576 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( | 583 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( |
577 | struct dma_chan *chan, struct scatterlist *sgl, | 584 | struct dma_chan *chan, struct scatterlist *sgl, |
578 | unsigned int sg_len, enum dma_transfer_direction direction, | 585 | unsigned int sg_len, enum dma_transfer_direction direction, |
579 | unsigned long flags); | 586 | unsigned long flags, void *context); |
580 | struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( | 587 | struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( |
581 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 588 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
582 | size_t period_len, enum dma_transfer_direction direction); | 589 | size_t period_len, enum dma_transfer_direction direction, |
590 | void *context); | ||
583 | struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( | 591 | struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( |
584 | struct dma_chan *chan, struct dma_interleaved_template *xt, | 592 | struct dma_chan *chan, struct dma_interleaved_template *xt, |
585 | unsigned long flags); | 593 | unsigned long flags); |
@@ -613,7 +621,24 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( | |||
613 | struct scatterlist sg; | 621 | struct scatterlist sg; |
614 | sg_init_one(&sg, buf, len); | 622 | sg_init_one(&sg, buf, len); |
615 | 623 | ||
616 | return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags); | 624 | return chan->device->device_prep_slave_sg(chan, &sg, 1, |
625 | dir, flags, NULL); | ||
626 | } | ||
627 | |||
628 | static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( | ||
629 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | ||
630 | enum dma_transfer_direction dir, unsigned long flags) | ||
631 | { | ||
632 | return chan->device->device_prep_slave_sg(chan, sgl, sg_len, | ||
633 | dir, flags, NULL); | ||
634 | } | ||
635 | |||
636 | static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( | ||
637 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
638 | size_t period_len, enum dma_transfer_direction dir) | ||
639 | { | ||
640 | return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, | ||
641 | period_len, dir, NULL); | ||
617 | } | 642 | } |
618 | 643 | ||
619 | static inline int dmaengine_terminate_all(struct dma_chan *chan) | 644 | static inline int dmaengine_terminate_all(struct dma_chan *chan) |
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index f2c64f92c4a0..2412e02d7c0f 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h | |||
@@ -31,18 +31,6 @@ struct dw_dma_platform_data { | |||
31 | unsigned char chan_priority; | 31 | unsigned char chan_priority; |
32 | }; | 32 | }; |
33 | 33 | ||
34 | /** | ||
35 | * enum dw_dma_slave_width - DMA slave register access width. | ||
36 | * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses | ||
37 | * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses | ||
38 | * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses | ||
39 | */ | ||
40 | enum dw_dma_slave_width { | ||
41 | DW_DMA_SLAVE_WIDTH_8BIT, | ||
42 | DW_DMA_SLAVE_WIDTH_16BIT, | ||
43 | DW_DMA_SLAVE_WIDTH_32BIT, | ||
44 | }; | ||
45 | |||
46 | /* bursts size */ | 34 | /* bursts size */ |
47 | enum dw_dma_msize { | 35 | enum dw_dma_msize { |
48 | DW_DMA_MSIZE_1, | 36 | DW_DMA_MSIZE_1, |
@@ -55,47 +43,21 @@ enum dw_dma_msize { | |||
55 | DW_DMA_MSIZE_256, | 43 | DW_DMA_MSIZE_256, |
56 | }; | 44 | }; |
57 | 45 | ||
58 | /* flow controller */ | ||
59 | enum dw_dma_fc { | ||
60 | DW_DMA_FC_D_M2M, | ||
61 | DW_DMA_FC_D_M2P, | ||
62 | DW_DMA_FC_D_P2M, | ||
63 | DW_DMA_FC_D_P2P, | ||
64 | DW_DMA_FC_P_P2M, | ||
65 | DW_DMA_FC_SP_P2P, | ||
66 | DW_DMA_FC_P_M2P, | ||
67 | DW_DMA_FC_DP_P2P, | ||
68 | }; | ||
69 | |||
70 | /** | 46 | /** |
71 | * struct dw_dma_slave - Controller-specific information about a slave | 47 | * struct dw_dma_slave - Controller-specific information about a slave |
72 | * | 48 | * |
73 | * @dma_dev: required DMA master device | 49 | * @dma_dev: required DMA master device |
74 | * @tx_reg: physical address of data register used for | ||
75 | * memory-to-peripheral transfers | ||
76 | * @rx_reg: physical address of data register used for | ||
77 | * peripheral-to-memory transfers | ||
78 | * @reg_width: peripheral register width | ||
79 | * @cfg_hi: Platform-specific initializer for the CFG_HI register | 50 | * @cfg_hi: Platform-specific initializer for the CFG_HI register |
80 | * @cfg_lo: Platform-specific initializer for the CFG_LO register | 51 | * @cfg_lo: Platform-specific initializer for the CFG_LO register |
81 | * @src_master: src master for transfers on allocated channel. | 52 | * @src_master: src master for transfers on allocated channel. |
82 | * @dst_master: dest master for transfers on allocated channel. | 53 | * @dst_master: dest master for transfers on allocated channel. |
83 | * @src_msize: src burst size. | ||
84 | * @dst_msize: dest burst size. | ||
85 | * @fc: flow controller for DMA transfer | ||
86 | */ | 54 | */ |
87 | struct dw_dma_slave { | 55 | struct dw_dma_slave { |
88 | struct device *dma_dev; | 56 | struct device *dma_dev; |
89 | dma_addr_t tx_reg; | ||
90 | dma_addr_t rx_reg; | ||
91 | enum dw_dma_slave_width reg_width; | ||
92 | u32 cfg_hi; | 57 | u32 cfg_hi; |
93 | u32 cfg_lo; | 58 | u32 cfg_lo; |
94 | u8 src_master; | 59 | u8 src_master; |
95 | u8 dst_master; | 60 | u8 dst_master; |
96 | u8 src_msize; | ||
97 | u8 dst_msize; | ||
98 | u8 fc; | ||
99 | }; | 61 | }; |
100 | 62 | ||
101 | /* Platform-configurable bits in CFG_HI */ | 63 | /* Platform-configurable bits in CFG_HI */ |
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c index 4fa1dbd8ee83..f7c2bb08055d 100644 --- a/sound/atmel/abdac.c +++ b/sound/atmel/abdac.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/types.h> | ||
19 | #include <linux/io.h> | 20 | #include <linux/io.h> |
20 | 21 | ||
21 | #include <sound/core.h> | 22 | #include <sound/core.h> |
@@ -467,15 +468,24 @@ static int __devinit atmel_abdac_probe(struct platform_device *pdev) | |||
467 | snd_card_set_dev(card, &pdev->dev); | 468 | snd_card_set_dev(card, &pdev->dev); |
468 | 469 | ||
469 | if (pdata->dws.dma_dev) { | 470 | if (pdata->dws.dma_dev) { |
470 | struct dw_dma_slave *dws = &pdata->dws; | ||
471 | dma_cap_mask_t mask; | 471 | dma_cap_mask_t mask; |
472 | 472 | ||
473 | dws->tx_reg = regs->start + DAC_DATA; | ||
474 | |||
475 | dma_cap_zero(mask); | 473 | dma_cap_zero(mask); |
476 | dma_cap_set(DMA_SLAVE, mask); | 474 | dma_cap_set(DMA_SLAVE, mask); |
477 | 475 | ||
478 | dac->dma.chan = dma_request_channel(mask, filter, dws); | 476 | dac->dma.chan = dma_request_channel(mask, filter, &pdata->dws); |
477 | if (dac->dma.chan) { | ||
478 | struct dma_slave_config dma_conf = { | ||
479 | .dst_addr = regs->start + DAC_DATA, | ||
480 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, | ||
481 | .src_maxburst = 1, | ||
482 | .dst_maxburst = 1, | ||
483 | .direction = DMA_MEM_TO_DEV, | ||
484 | .device_fc = false, | ||
485 | }; | ||
486 | |||
487 | dmaengine_slave_config(dac->dma.chan, &dma_conf); | ||
488 | } | ||
479 | } | 489 | } |
480 | if (!pdata->dws.dma_dev || !dac->dma.chan) { | 490 | if (!pdata->dws.dma_dev || !dac->dma.chan) { |
481 | dev_dbg(&pdev->dev, "DMA not available\n"); | 491 | dev_dbg(&pdev->dev, "DMA not available\n"); |
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c index 61dade698358..115313ef54d6 100644 --- a/sound/atmel/ac97c.c +++ b/sound/atmel/ac97c.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
22 | #include <linux/gpio.h> | 22 | #include <linux/gpio.h> |
23 | #include <linux/types.h> | ||
23 | #include <linux/io.h> | 24 | #include <linux/io.h> |
24 | 25 | ||
25 | #include <sound/core.h> | 26 | #include <sound/core.h> |
@@ -1014,16 +1015,28 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev) | |||
1014 | 1015 | ||
1015 | if (cpu_is_at32ap7000()) { | 1016 | if (cpu_is_at32ap7000()) { |
1016 | if (pdata->rx_dws.dma_dev) { | 1017 | if (pdata->rx_dws.dma_dev) { |
1017 | struct dw_dma_slave *dws = &pdata->rx_dws; | ||
1018 | dma_cap_mask_t mask; | 1018 | dma_cap_mask_t mask; |
1019 | 1019 | ||
1020 | dws->rx_reg = regs->start + AC97C_CARHR + 2; | ||
1021 | |||
1022 | dma_cap_zero(mask); | 1020 | dma_cap_zero(mask); |
1023 | dma_cap_set(DMA_SLAVE, mask); | 1021 | dma_cap_set(DMA_SLAVE, mask); |
1024 | 1022 | ||
1025 | chip->dma.rx_chan = dma_request_channel(mask, filter, | 1023 | chip->dma.rx_chan = dma_request_channel(mask, filter, |
1026 | dws); | 1024 | &pdata->rx_dws); |
1025 | if (chip->dma.rx_chan) { | ||
1026 | struct dma_slave_config dma_conf = { | ||
1027 | .src_addr = regs->start + AC97C_CARHR + | ||
1028 | 2, | ||
1029 | .src_addr_width = | ||
1030 | DMA_SLAVE_BUSWIDTH_2_BYTES, | ||
1031 | .src_maxburst = 1, | ||
1032 | .dst_maxburst = 1, | ||
1033 | .direction = DMA_DEV_TO_MEM, | ||
1034 | .device_fc = false, | ||
1035 | }; | ||
1036 | |||
1037 | dmaengine_slave_config(chip->dma.rx_chan, | ||
1038 | &dma_conf); | ||
1039 | } | ||
1027 | 1040 | ||
1028 | dev_info(&chip->pdev->dev, "using %s for DMA RX\n", | 1041 | dev_info(&chip->pdev->dev, "using %s for DMA RX\n", |
1029 | dev_name(&chip->dma.rx_chan->dev->device)); | 1042 | dev_name(&chip->dma.rx_chan->dev->device)); |
@@ -1031,16 +1044,28 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev) | |||
1031 | } | 1044 | } |
1032 | 1045 | ||
1033 | if (pdata->tx_dws.dma_dev) { | 1046 | if (pdata->tx_dws.dma_dev) { |
1034 | struct dw_dma_slave *dws = &pdata->tx_dws; | ||
1035 | dma_cap_mask_t mask; | 1047 | dma_cap_mask_t mask; |
1036 | 1048 | ||
1037 | dws->tx_reg = regs->start + AC97C_CATHR + 2; | ||
1038 | |||
1039 | dma_cap_zero(mask); | 1049 | dma_cap_zero(mask); |
1040 | dma_cap_set(DMA_SLAVE, mask); | 1050 | dma_cap_set(DMA_SLAVE, mask); |
1041 | 1051 | ||
1042 | chip->dma.tx_chan = dma_request_channel(mask, filter, | 1052 | chip->dma.tx_chan = dma_request_channel(mask, filter, |
1043 | dws); | 1053 | &pdata->tx_dws); |
1054 | if (chip->dma.tx_chan) { | ||
1055 | struct dma_slave_config dma_conf = { | ||
1056 | .dst_addr = regs->start + AC97C_CATHR + | ||
1057 | 2, | ||
1058 | .dst_addr_width = | ||
1059 | DMA_SLAVE_BUSWIDTH_2_BYTES, | ||
1060 | .src_maxburst = 1, | ||
1061 | .dst_maxburst = 1, | ||
1062 | .direction = DMA_MEM_TO_DEV, | ||
1063 | .device_fc = false, | ||
1064 | }; | ||
1065 | |||
1066 | dmaengine_slave_config(chip->dma.tx_chan, | ||
1067 | &dma_conf); | ||
1068 | } | ||
1044 | 1069 | ||
1045 | dev_info(&chip->pdev->dev, "using %s for DMA TX\n", | 1070 | dev_info(&chip->pdev->dev, "using %s for DMA TX\n", |
1046 | dev_name(&chip->dma.tx_chan->dev->device)); | 1071 | dev_name(&chip->dma.tx_chan->dev->device)); |
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c index e43c8fa2788b..6b818de2fc03 100644 --- a/sound/soc/imx/imx-pcm-dma-mx2.c +++ b/sound/soc/imx/imx-pcm-dma-mx2.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
24 | #include <linux/types.h> | ||
24 | 25 | ||
25 | #include <sound/core.h> | 26 | #include <sound/core.h> |
26 | #include <sound/initval.h> | 27 | #include <sound/initval.h> |
@@ -58,6 +59,8 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream, | |||
58 | if (ret) | 59 | if (ret) |
59 | return ret; | 60 | return ret; |
60 | 61 | ||
62 | slave_config.device_fc = false; | ||
63 | |||
61 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { | 64 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { |
62 | slave_config.dst_addr = dma_params->dma_addr; | 65 | slave_config.dst_addr = dma_params->dma_addr; |
63 | slave_config.dst_maxburst = dma_params->burstsize; | 66 | slave_config.dst_maxburst = dma_params->burstsize; |
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c index 0193e595d415..5cfcc655e95f 100644 --- a/sound/soc/sh/siu_pcm.c +++ b/sound/soc/sh/siu_pcm.c | |||
@@ -130,7 +130,7 @@ static int siu_pcm_wr_set(struct siu_port *port_info, | |||
130 | sg_dma_len(&sg) = size; | 130 | sg_dma_len(&sg) = size; |
131 | sg_dma_address(&sg) = buff; | 131 | sg_dma_address(&sg) = buff; |
132 | 132 | ||
133 | desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan, | 133 | desc = dmaengine_prep_slave_sg(siu_stream->chan, |
134 | &sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 134 | &sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
135 | if (!desc) { | 135 | if (!desc) { |
136 | dev_err(dev, "Failed to allocate a dma descriptor\n"); | 136 | dev_err(dev, "Failed to allocate a dma descriptor\n"); |
@@ -180,7 +180,7 @@ static int siu_pcm_rd_set(struct siu_port *port_info, | |||
180 | sg_dma_len(&sg) = size; | 180 | sg_dma_len(&sg) = size; |
181 | sg_dma_address(&sg) = buff; | 181 | sg_dma_address(&sg) = buff; |
182 | 182 | ||
183 | desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan, | 183 | desc = dmaengine_prep_slave_sg(siu_stream->chan, |
184 | &sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 184 | &sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
185 | if (!desc) { | 185 | if (!desc) { |
186 | dev_err(dev, "Failed to allocate dma descriptor\n"); | 186 | dev_err(dev, "Failed to allocate dma descriptor\n"); |
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c index 21554611557c..b609d2c64c55 100644 --- a/sound/soc/txx9/txx9aclc.c +++ b/sound/soc/txx9/txx9aclc.c | |||
@@ -132,7 +132,7 @@ txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr) | |||
132 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)), | 132 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)), |
133 | dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1)); | 133 | dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1)); |
134 | sg_dma_address(&sg) = buf_dma_addr; | 134 | sg_dma_address(&sg) = buf_dma_addr; |
135 | desc = chan->device->device_prep_slave_sg(chan, &sg, 1, | 135 | desc = dmaengine_prep_slave_sg(chan, &sg, 1, |
136 | dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? | 136 | dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? |
137 | DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, | 137 | DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, |
138 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 138 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |