diff options
Diffstat (limited to 'drivers/dma/pl330.c')
-rw-r--r-- | drivers/dma/pl330.c | 2149 |
1 files changed, 2110 insertions, 39 deletions
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 16b66c827f19..282caf118be8 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -1,4 +1,6 @@ | |||
1 | /* linux/drivers/dma/pl330.c | 1 | /* |
2 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | ||
3 | * http://www.samsung.com | ||
2 | * | 4 | * |
3 | * Copyright (C) 2010 Samsung Electronics Co. Ltd. | 5 | * Copyright (C) 2010 Samsung Electronics Co. Ltd. |
4 | * Jaswinder Singh <jassi.brar@samsung.com> | 6 | * Jaswinder Singh <jassi.brar@samsung.com> |
@@ -9,10 +11,15 @@ | |||
9 | * (at your option) any later version. | 11 | * (at your option) any later version. |
10 | */ | 12 | */ |
11 | 13 | ||
14 | #include <linux/kernel.h> | ||
12 | #include <linux/io.h> | 15 | #include <linux/io.h> |
13 | #include <linux/init.h> | 16 | #include <linux/init.h> |
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
15 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/string.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/dma-mapping.h> | ||
16 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
17 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
18 | #include <linux/amba/bus.h> | 25 | #include <linux/amba/bus.h> |
@@ -21,8 +28,497 @@ | |||
21 | #include <linux/scatterlist.h> | 28 | #include <linux/scatterlist.h> |
22 | #include <linux/of.h> | 29 | #include <linux/of.h> |
23 | 30 | ||
31 | #include "dmaengine.h" | ||
32 | #define PL330_MAX_CHAN 8 | ||
33 | #define PL330_MAX_IRQS 32 | ||
34 | #define PL330_MAX_PERI 32 | ||
35 | |||
36 | enum pl330_srccachectrl { | ||
37 | SCCTRL0, /* Noncacheable and nonbufferable */ | ||
38 | SCCTRL1, /* Bufferable only */ | ||
39 | SCCTRL2, /* Cacheable, but do not allocate */ | ||
40 | SCCTRL3, /* Cacheable and bufferable, but do not allocate */ | ||
41 | SINVALID1, | ||
42 | SINVALID2, | ||
43 | SCCTRL6, /* Cacheable write-through, allocate on reads only */ | ||
44 | SCCTRL7, /* Cacheable write-back, allocate on reads only */ | ||
45 | }; | ||
46 | |||
47 | enum pl330_dstcachectrl { | ||
48 | DCCTRL0, /* Noncacheable and nonbufferable */ | ||
49 | DCCTRL1, /* Bufferable only */ | ||
50 | DCCTRL2, /* Cacheable, but do not allocate */ | ||
51 | DCCTRL3, /* Cacheable and bufferable, but do not allocate */ | ||
52 | DINVALID1, /* AWCACHE = 0x1000 */ | ||
53 | DINVALID2, | ||
54 | DCCTRL6, /* Cacheable write-through, allocate on writes only */ | ||
55 | DCCTRL7, /* Cacheable write-back, allocate on writes only */ | ||
56 | }; | ||
57 | |||
58 | enum pl330_byteswap { | ||
59 | SWAP_NO, | ||
60 | SWAP_2, | ||
61 | SWAP_4, | ||
62 | SWAP_8, | ||
63 | SWAP_16, | ||
64 | }; | ||
65 | |||
66 | enum pl330_reqtype { | ||
67 | MEMTOMEM, | ||
68 | MEMTODEV, | ||
69 | DEVTOMEM, | ||
70 | DEVTODEV, | ||
71 | }; | ||
72 | |||
73 | /* Register and Bit field Definitions */ | ||
74 | #define DS 0x0 | ||
75 | #define DS_ST_STOP 0x0 | ||
76 | #define DS_ST_EXEC 0x1 | ||
77 | #define DS_ST_CMISS 0x2 | ||
78 | #define DS_ST_UPDTPC 0x3 | ||
79 | #define DS_ST_WFE 0x4 | ||
80 | #define DS_ST_ATBRR 0x5 | ||
81 | #define DS_ST_QBUSY 0x6 | ||
82 | #define DS_ST_WFP 0x7 | ||
83 | #define DS_ST_KILL 0x8 | ||
84 | #define DS_ST_CMPLT 0x9 | ||
85 | #define DS_ST_FLTCMP 0xe | ||
86 | #define DS_ST_FAULT 0xf | ||
87 | |||
88 | #define DPC 0x4 | ||
89 | #define INTEN 0x20 | ||
90 | #define ES 0x24 | ||
91 | #define INTSTATUS 0x28 | ||
92 | #define INTCLR 0x2c | ||
93 | #define FSM 0x30 | ||
94 | #define FSC 0x34 | ||
95 | #define FTM 0x38 | ||
96 | |||
97 | #define _FTC 0x40 | ||
98 | #define FTC(n) (_FTC + (n)*0x4) | ||
99 | |||
100 | #define _CS 0x100 | ||
101 | #define CS(n) (_CS + (n)*0x8) | ||
102 | #define CS_CNS (1 << 21) | ||
103 | |||
104 | #define _CPC 0x104 | ||
105 | #define CPC(n) (_CPC + (n)*0x8) | ||
106 | |||
107 | #define _SA 0x400 | ||
108 | #define SA(n) (_SA + (n)*0x20) | ||
109 | |||
110 | #define _DA 0x404 | ||
111 | #define DA(n) (_DA + (n)*0x20) | ||
112 | |||
113 | #define _CC 0x408 | ||
114 | #define CC(n) (_CC + (n)*0x20) | ||
115 | |||
116 | #define CC_SRCINC (1 << 0) | ||
117 | #define CC_DSTINC (1 << 14) | ||
118 | #define CC_SRCPRI (1 << 8) | ||
119 | #define CC_DSTPRI (1 << 22) | ||
120 | #define CC_SRCNS (1 << 9) | ||
121 | #define CC_DSTNS (1 << 23) | ||
122 | #define CC_SRCIA (1 << 10) | ||
123 | #define CC_DSTIA (1 << 24) | ||
124 | #define CC_SRCBRSTLEN_SHFT 4 | ||
125 | #define CC_DSTBRSTLEN_SHFT 18 | ||
126 | #define CC_SRCBRSTSIZE_SHFT 1 | ||
127 | #define CC_DSTBRSTSIZE_SHFT 15 | ||
128 | #define CC_SRCCCTRL_SHFT 11 | ||
129 | #define CC_SRCCCTRL_MASK 0x7 | ||
130 | #define CC_DSTCCTRL_SHFT 25 | ||
131 | #define CC_DRCCCTRL_MASK 0x7 | ||
132 | #define CC_SWAP_SHFT 28 | ||
133 | |||
134 | #define _LC0 0x40c | ||
135 | #define LC0(n) (_LC0 + (n)*0x20) | ||
136 | |||
137 | #define _LC1 0x410 | ||
138 | #define LC1(n) (_LC1 + (n)*0x20) | ||
139 | |||
140 | #define DBGSTATUS 0xd00 | ||
141 | #define DBG_BUSY (1 << 0) | ||
142 | |||
143 | #define DBGCMD 0xd04 | ||
144 | #define DBGINST0 0xd08 | ||
145 | #define DBGINST1 0xd0c | ||
146 | |||
147 | #define CR0 0xe00 | ||
148 | #define CR1 0xe04 | ||
149 | #define CR2 0xe08 | ||
150 | #define CR3 0xe0c | ||
151 | #define CR4 0xe10 | ||
152 | #define CRD 0xe14 | ||
153 | |||
154 | #define PERIPH_ID 0xfe0 | ||
155 | #define PERIPH_REV_SHIFT 20 | ||
156 | #define PERIPH_REV_MASK 0xf | ||
157 | #define PERIPH_REV_R0P0 0 | ||
158 | #define PERIPH_REV_R1P0 1 | ||
159 | #define PERIPH_REV_R1P1 2 | ||
160 | #define PCELL_ID 0xff0 | ||
161 | |||
162 | #define CR0_PERIPH_REQ_SET (1 << 0) | ||
163 | #define CR0_BOOT_EN_SET (1 << 1) | ||
164 | #define CR0_BOOT_MAN_NS (1 << 2) | ||
165 | #define CR0_NUM_CHANS_SHIFT 4 | ||
166 | #define CR0_NUM_CHANS_MASK 0x7 | ||
167 | #define CR0_NUM_PERIPH_SHIFT 12 | ||
168 | #define CR0_NUM_PERIPH_MASK 0x1f | ||
169 | #define CR0_NUM_EVENTS_SHIFT 17 | ||
170 | #define CR0_NUM_EVENTS_MASK 0x1f | ||
171 | |||
172 | #define CR1_ICACHE_LEN_SHIFT 0 | ||
173 | #define CR1_ICACHE_LEN_MASK 0x7 | ||
174 | #define CR1_NUM_ICACHELINES_SHIFT 4 | ||
175 | #define CR1_NUM_ICACHELINES_MASK 0xf | ||
176 | |||
177 | #define CRD_DATA_WIDTH_SHIFT 0 | ||
178 | #define CRD_DATA_WIDTH_MASK 0x7 | ||
179 | #define CRD_WR_CAP_SHIFT 4 | ||
180 | #define CRD_WR_CAP_MASK 0x7 | ||
181 | #define CRD_WR_Q_DEP_SHIFT 8 | ||
182 | #define CRD_WR_Q_DEP_MASK 0xf | ||
183 | #define CRD_RD_CAP_SHIFT 12 | ||
184 | #define CRD_RD_CAP_MASK 0x7 | ||
185 | #define CRD_RD_Q_DEP_SHIFT 16 | ||
186 | #define CRD_RD_Q_DEP_MASK 0xf | ||
187 | #define CRD_DATA_BUFF_SHIFT 20 | ||
188 | #define CRD_DATA_BUFF_MASK 0x3ff | ||
189 | |||
190 | #define PART 0x330 | ||
191 | #define DESIGNER 0x41 | ||
192 | #define REVISION 0x0 | ||
193 | #define INTEG_CFG 0x0 | ||
194 | #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12)) | ||
195 | |||
196 | #define PCELL_ID_VAL 0xb105f00d | ||
197 | |||
198 | #define PL330_STATE_STOPPED (1 << 0) | ||
199 | #define PL330_STATE_EXECUTING (1 << 1) | ||
200 | #define PL330_STATE_WFE (1 << 2) | ||
201 | #define PL330_STATE_FAULTING (1 << 3) | ||
202 | #define PL330_STATE_COMPLETING (1 << 4) | ||
203 | #define PL330_STATE_WFP (1 << 5) | ||
204 | #define PL330_STATE_KILLING (1 << 6) | ||
205 | #define PL330_STATE_FAULT_COMPLETING (1 << 7) | ||
206 | #define PL330_STATE_CACHEMISS (1 << 8) | ||
207 | #define PL330_STATE_UPDTPC (1 << 9) | ||
208 | #define PL330_STATE_ATBARRIER (1 << 10) | ||
209 | #define PL330_STATE_QUEUEBUSY (1 << 11) | ||
210 | #define PL330_STATE_INVALID (1 << 15) | ||
211 | |||
212 | #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \ | ||
213 | | PL330_STATE_WFE | PL330_STATE_FAULTING) | ||
214 | |||
215 | #define CMD_DMAADDH 0x54 | ||
216 | #define CMD_DMAEND 0x00 | ||
217 | #define CMD_DMAFLUSHP 0x35 | ||
218 | #define CMD_DMAGO 0xa0 | ||
219 | #define CMD_DMALD 0x04 | ||
220 | #define CMD_DMALDP 0x25 | ||
221 | #define CMD_DMALP 0x20 | ||
222 | #define CMD_DMALPEND 0x28 | ||
223 | #define CMD_DMAKILL 0x01 | ||
224 | #define CMD_DMAMOV 0xbc | ||
225 | #define CMD_DMANOP 0x18 | ||
226 | #define CMD_DMARMB 0x12 | ||
227 | #define CMD_DMASEV 0x34 | ||
228 | #define CMD_DMAST 0x08 | ||
229 | #define CMD_DMASTP 0x29 | ||
230 | #define CMD_DMASTZ 0x0c | ||
231 | #define CMD_DMAWFE 0x36 | ||
232 | #define CMD_DMAWFP 0x30 | ||
233 | #define CMD_DMAWMB 0x13 | ||
234 | |||
235 | #define SZ_DMAADDH 3 | ||
236 | #define SZ_DMAEND 1 | ||
237 | #define SZ_DMAFLUSHP 2 | ||
238 | #define SZ_DMALD 1 | ||
239 | #define SZ_DMALDP 2 | ||
240 | #define SZ_DMALP 2 | ||
241 | #define SZ_DMALPEND 2 | ||
242 | #define SZ_DMAKILL 1 | ||
243 | #define SZ_DMAMOV 6 | ||
244 | #define SZ_DMANOP 1 | ||
245 | #define SZ_DMARMB 1 | ||
246 | #define SZ_DMASEV 2 | ||
247 | #define SZ_DMAST 1 | ||
248 | #define SZ_DMASTP 2 | ||
249 | #define SZ_DMASTZ 1 | ||
250 | #define SZ_DMAWFE 2 | ||
251 | #define SZ_DMAWFP 2 | ||
252 | #define SZ_DMAWMB 1 | ||
253 | #define SZ_DMAGO 6 | ||
254 | |||
255 | #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1) | ||
256 | #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7)) | ||
257 | |||
258 | #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr)) | ||
259 | #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr)) | ||
260 | |||
261 | /* | ||
262 | * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req | ||
263 | * at 1byte/burst for P<->M and M<->M respectively. | ||
264 | * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req | ||
265 | * should be enough for P<->M and M<->M respectively. | ||
266 | */ | ||
267 | #define MCODE_BUFF_PER_REQ 256 | ||
268 | |||
269 | /* If the _pl330_req is available to the client */ | ||
270 | #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND) | ||
271 | |||
272 | /* Use this _only_ to wait on transient states */ | ||
273 | #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax(); | ||
274 | |||
275 | #ifdef PL330_DEBUG_MCGEN | ||
276 | static unsigned cmd_line; | ||
277 | #define PL330_DBGCMD_DUMP(off, x...) do { \ | ||
278 | printk("%x:", cmd_line); \ | ||
279 | printk(x); \ | ||
280 | cmd_line += off; \ | ||
281 | } while (0) | ||
282 | #define PL330_DBGMC_START(addr) (cmd_line = addr) | ||
283 | #else | ||
284 | #define PL330_DBGCMD_DUMP(off, x...) do {} while (0) | ||
285 | #define PL330_DBGMC_START(addr) do {} while (0) | ||
286 | #endif | ||
287 | |||
288 | /* The number of default descriptors */ | ||
289 | |||
24 | #define NR_DEFAULT_DESC 16 | 290 | #define NR_DEFAULT_DESC 16 |
25 | 291 | ||
292 | /* Populated by the PL330 core driver for DMA API driver's info */ | ||
293 | struct pl330_config { | ||
294 | u32 periph_id; | ||
295 | u32 pcell_id; | ||
296 | #define DMAC_MODE_NS (1 << 0) | ||
297 | unsigned int mode; | ||
298 | unsigned int data_bus_width:10; /* In number of bits */ | ||
299 | unsigned int data_buf_dep:10; | ||
300 | unsigned int num_chan:4; | ||
301 | unsigned int num_peri:6; | ||
302 | u32 peri_ns; | ||
303 | unsigned int num_events:6; | ||
304 | u32 irq_ns; | ||
305 | }; | ||
306 | |||
307 | /* Handle to the DMAC provided to the PL330 core */ | ||
308 | struct pl330_info { | ||
309 | /* Owning device */ | ||
310 | struct device *dev; | ||
311 | /* Size of MicroCode buffers for each channel. */ | ||
312 | unsigned mcbufsz; | ||
313 | /* ioremap'ed address of PL330 registers. */ | ||
314 | void __iomem *base; | ||
315 | /* Client can freely use it. */ | ||
316 | void *client_data; | ||
317 | /* PL330 core data, Client must not touch it. */ | ||
318 | void *pl330_data; | ||
319 | /* Populated by the PL330 core driver during pl330_add */ | ||
320 | struct pl330_config pcfg; | ||
321 | /* | ||
322 | * If the DMAC has some reset mechanism, then the | ||
323 | * client may want to provide pointer to the method. | ||
324 | */ | ||
325 | void (*dmac_reset)(struct pl330_info *pi); | ||
326 | }; | ||
327 | |||
328 | /** | ||
329 | * Request Configuration. | ||
330 | * The PL330 core does not modify this and uses the last | ||
331 | * working configuration if the request doesn't provide any. | ||
332 | * | ||
333 | * The Client may want to provide this info only for the | ||
334 | * first request and a request with new settings. | ||
335 | */ | ||
336 | struct pl330_reqcfg { | ||
337 | /* Address Incrementing */ | ||
338 | unsigned dst_inc:1; | ||
339 | unsigned src_inc:1; | ||
340 | |||
341 | /* | ||
342 | * For now, the SRC & DST protection levels | ||
343 | * and burst size/length are assumed same. | ||
344 | */ | ||
345 | bool nonsecure; | ||
346 | bool privileged; | ||
347 | bool insnaccess; | ||
348 | unsigned brst_len:5; | ||
349 | unsigned brst_size:3; /* in power of 2 */ | ||
350 | |||
351 | enum pl330_dstcachectrl dcctl; | ||
352 | enum pl330_srccachectrl scctl; | ||
353 | enum pl330_byteswap swap; | ||
354 | struct pl330_config *pcfg; | ||
355 | }; | ||
356 | |||
357 | /* | ||
358 | * One cycle of DMAC operation. | ||
359 | * There may be more than one xfer in a request. | ||
360 | */ | ||
361 | struct pl330_xfer { | ||
362 | u32 src_addr; | ||
363 | u32 dst_addr; | ||
364 | /* Size to xfer */ | ||
365 | u32 bytes; | ||
366 | /* | ||
367 | * Pointer to next xfer in the list. | ||
368 | * The last xfer in the req must point to NULL. | ||
369 | */ | ||
370 | struct pl330_xfer *next; | ||
371 | }; | ||
372 | |||
373 | /* The xfer callbacks are made with one of these arguments. */ | ||
374 | enum pl330_op_err { | ||
375 | /* The all xfers in the request were success. */ | ||
376 | PL330_ERR_NONE, | ||
377 | /* If req aborted due to global error. */ | ||
378 | PL330_ERR_ABORT, | ||
379 | /* If req failed due to problem with Channel. */ | ||
380 | PL330_ERR_FAIL, | ||
381 | }; | ||
382 | |||
383 | /* A request defining Scatter-Gather List ending with NULL xfer. */ | ||
384 | struct pl330_req { | ||
385 | enum pl330_reqtype rqtype; | ||
386 | /* Index of peripheral for the xfer. */ | ||
387 | unsigned peri:5; | ||
388 | /* Unique token for this xfer, set by the client. */ | ||
389 | void *token; | ||
390 | /* Callback to be called after xfer. */ | ||
391 | void (*xfer_cb)(void *token, enum pl330_op_err err); | ||
392 | /* If NULL, req will be done at last set parameters. */ | ||
393 | struct pl330_reqcfg *cfg; | ||
394 | /* Pointer to first xfer in the request. */ | ||
395 | struct pl330_xfer *x; | ||
396 | }; | ||
397 | |||
398 | /* | ||
399 | * To know the status of the channel and DMAC, the client | ||
400 | * provides a pointer to this structure. The PL330 core | ||
401 | * fills it with current information. | ||
402 | */ | ||
403 | struct pl330_chanstatus { | ||
404 | /* | ||
405 | * If the DMAC engine halted due to some error, | ||
406 | * the client should remove-add DMAC. | ||
407 | */ | ||
408 | bool dmac_halted; | ||
409 | /* | ||
410 | * If channel is halted due to some error, | ||
411 | * the client should ABORT/FLUSH and START the channel. | ||
412 | */ | ||
413 | bool faulting; | ||
414 | /* Location of last load */ | ||
415 | u32 src_addr; | ||
416 | /* Location of last store */ | ||
417 | u32 dst_addr; | ||
418 | /* | ||
419 | * Pointer to the currently active req, NULL if channel is | ||
420 | * inactive, even though the requests may be present. | ||
421 | */ | ||
422 | struct pl330_req *top_req; | ||
423 | /* Pointer to req waiting second in the queue if any. */ | ||
424 | struct pl330_req *wait_req; | ||
425 | }; | ||
426 | |||
427 | enum pl330_chan_op { | ||
428 | /* Start the channel */ | ||
429 | PL330_OP_START, | ||
430 | /* Abort the active xfer */ | ||
431 | PL330_OP_ABORT, | ||
432 | /* Stop xfer and flush queue */ | ||
433 | PL330_OP_FLUSH, | ||
434 | }; | ||
435 | |||
436 | struct _xfer_spec { | ||
437 | u32 ccr; | ||
438 | struct pl330_req *r; | ||
439 | struct pl330_xfer *x; | ||
440 | }; | ||
441 | |||
442 | enum dmamov_dst { | ||
443 | SAR = 0, | ||
444 | CCR, | ||
445 | DAR, | ||
446 | }; | ||
447 | |||
448 | enum pl330_dst { | ||
449 | SRC = 0, | ||
450 | DST, | ||
451 | }; | ||
452 | |||
453 | enum pl330_cond { | ||
454 | SINGLE, | ||
455 | BURST, | ||
456 | ALWAYS, | ||
457 | }; | ||
458 | |||
459 | struct _pl330_req { | ||
460 | u32 mc_bus; | ||
461 | void *mc_cpu; | ||
462 | /* Number of bytes taken to setup MC for the req */ | ||
463 | u32 mc_len; | ||
464 | struct pl330_req *r; | ||
465 | /* Hook to attach to DMAC's list of reqs with due callback */ | ||
466 | struct list_head rqd; | ||
467 | }; | ||
468 | |||
469 | /* ToBeDone for tasklet */ | ||
470 | struct _pl330_tbd { | ||
471 | bool reset_dmac; | ||
472 | bool reset_mngr; | ||
473 | u8 reset_chan; | ||
474 | }; | ||
475 | |||
476 | /* A DMAC Thread */ | ||
477 | struct pl330_thread { | ||
478 | u8 id; | ||
479 | int ev; | ||
480 | /* If the channel is not yet acquired by any client */ | ||
481 | bool free; | ||
482 | /* Parent DMAC */ | ||
483 | struct pl330_dmac *dmac; | ||
484 | /* Only two at a time */ | ||
485 | struct _pl330_req req[2]; | ||
486 | /* Index of the last enqueued request */ | ||
487 | unsigned lstenq; | ||
488 | /* Index of the last submitted request or -1 if the DMA is stopped */ | ||
489 | int req_running; | ||
490 | }; | ||
491 | |||
492 | enum pl330_dmac_state { | ||
493 | UNINIT, | ||
494 | INIT, | ||
495 | DYING, | ||
496 | }; | ||
497 | |||
498 | /* A DMAC */ | ||
499 | struct pl330_dmac { | ||
500 | spinlock_t lock; | ||
501 | /* Holds list of reqs with due callbacks */ | ||
502 | struct list_head req_done; | ||
503 | /* Pointer to platform specific stuff */ | ||
504 | struct pl330_info *pinfo; | ||
505 | /* Maximum possible events/irqs */ | ||
506 | int events[32]; | ||
507 | /* BUS address of MicroCode buffer */ | ||
508 | u32 mcode_bus; | ||
509 | /* CPU address of MicroCode buffer */ | ||
510 | void *mcode_cpu; | ||
511 | /* List of all Channel threads */ | ||
512 | struct pl330_thread *channels; | ||
513 | /* Pointer to the MANAGER thread */ | ||
514 | struct pl330_thread *manager; | ||
515 | /* To handle bad news in interrupt */ | ||
516 | struct tasklet_struct tasks; | ||
517 | struct _pl330_tbd dmac_tbd; | ||
518 | /* State of DMAC operation */ | ||
519 | enum pl330_dmac_state state; | ||
520 | }; | ||
521 | |||
26 | enum desc_status { | 522 | enum desc_status { |
27 | /* In the DMAC pool */ | 523 | /* In the DMAC pool */ |
28 | FREE, | 524 | FREE, |
@@ -51,9 +547,6 @@ struct dma_pl330_chan { | |||
51 | /* DMA-Engine Channel */ | 547 | /* DMA-Engine Channel */ |
52 | struct dma_chan chan; | 548 | struct dma_chan chan; |
53 | 549 | ||
54 | /* Last completed cookie */ | ||
55 | dma_cookie_t completed; | ||
56 | |||
57 | /* List of to be xfered descriptors */ | 550 | /* List of to be xfered descriptors */ |
58 | struct list_head work_list; | 551 | struct list_head work_list; |
59 | 552 | ||
@@ -117,6 +610,1599 @@ struct dma_pl330_desc { | |||
117 | struct dma_pl330_chan *pchan; | 610 | struct dma_pl330_chan *pchan; |
118 | }; | 611 | }; |
119 | 612 | ||
613 | static inline void _callback(struct pl330_req *r, enum pl330_op_err err) | ||
614 | { | ||
615 | if (r && r->xfer_cb) | ||
616 | r->xfer_cb(r->token, err); | ||
617 | } | ||
618 | |||
619 | static inline bool _queue_empty(struct pl330_thread *thrd) | ||
620 | { | ||
621 | return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1])) | ||
622 | ? true : false; | ||
623 | } | ||
624 | |||
625 | static inline bool _queue_full(struct pl330_thread *thrd) | ||
626 | { | ||
627 | return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1])) | ||
628 | ? false : true; | ||
629 | } | ||
630 | |||
631 | static inline bool is_manager(struct pl330_thread *thrd) | ||
632 | { | ||
633 | struct pl330_dmac *pl330 = thrd->dmac; | ||
634 | |||
635 | /* MANAGER is indexed at the end */ | ||
636 | if (thrd->id == pl330->pinfo->pcfg.num_chan) | ||
637 | return true; | ||
638 | else | ||
639 | return false; | ||
640 | } | ||
641 | |||
642 | /* If manager of the thread is in Non-Secure mode */ | ||
643 | static inline bool _manager_ns(struct pl330_thread *thrd) | ||
644 | { | ||
645 | struct pl330_dmac *pl330 = thrd->dmac; | ||
646 | |||
647 | return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false; | ||
648 | } | ||
649 | |||
650 | static inline u32 get_id(struct pl330_info *pi, u32 off) | ||
651 | { | ||
652 | void __iomem *regs = pi->base; | ||
653 | u32 id = 0; | ||
654 | |||
655 | id |= (readb(regs + off + 0x0) << 0); | ||
656 | id |= (readb(regs + off + 0x4) << 8); | ||
657 | id |= (readb(regs + off + 0x8) << 16); | ||
658 | id |= (readb(regs + off + 0xc) << 24); | ||
659 | |||
660 | return id; | ||
661 | } | ||
662 | |||
663 | static inline u32 get_revision(u32 periph_id) | ||
664 | { | ||
665 | return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK; | ||
666 | } | ||
667 | |||
668 | static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[], | ||
669 | enum pl330_dst da, u16 val) | ||
670 | { | ||
671 | if (dry_run) | ||
672 | return SZ_DMAADDH; | ||
673 | |||
674 | buf[0] = CMD_DMAADDH; | ||
675 | buf[0] |= (da << 1); | ||
676 | *((u16 *)&buf[1]) = val; | ||
677 | |||
678 | PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n", | ||
679 | da == 1 ? "DA" : "SA", val); | ||
680 | |||
681 | return SZ_DMAADDH; | ||
682 | } | ||
683 | |||
684 | static inline u32 _emit_END(unsigned dry_run, u8 buf[]) | ||
685 | { | ||
686 | if (dry_run) | ||
687 | return SZ_DMAEND; | ||
688 | |||
689 | buf[0] = CMD_DMAEND; | ||
690 | |||
691 | PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n"); | ||
692 | |||
693 | return SZ_DMAEND; | ||
694 | } | ||
695 | |||
696 | static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri) | ||
697 | { | ||
698 | if (dry_run) | ||
699 | return SZ_DMAFLUSHP; | ||
700 | |||
701 | buf[0] = CMD_DMAFLUSHP; | ||
702 | |||
703 | peri &= 0x1f; | ||
704 | peri <<= 3; | ||
705 | buf[1] = peri; | ||
706 | |||
707 | PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3); | ||
708 | |||
709 | return SZ_DMAFLUSHP; | ||
710 | } | ||
711 | |||
712 | static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond) | ||
713 | { | ||
714 | if (dry_run) | ||
715 | return SZ_DMALD; | ||
716 | |||
717 | buf[0] = CMD_DMALD; | ||
718 | |||
719 | if (cond == SINGLE) | ||
720 | buf[0] |= (0 << 1) | (1 << 0); | ||
721 | else if (cond == BURST) | ||
722 | buf[0] |= (1 << 1) | (1 << 0); | ||
723 | |||
724 | PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n", | ||
725 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); | ||
726 | |||
727 | return SZ_DMALD; | ||
728 | } | ||
729 | |||
730 | static inline u32 _emit_LDP(unsigned dry_run, u8 buf[], | ||
731 | enum pl330_cond cond, u8 peri) | ||
732 | { | ||
733 | if (dry_run) | ||
734 | return SZ_DMALDP; | ||
735 | |||
736 | buf[0] = CMD_DMALDP; | ||
737 | |||
738 | if (cond == BURST) | ||
739 | buf[0] |= (1 << 1); | ||
740 | |||
741 | peri &= 0x1f; | ||
742 | peri <<= 3; | ||
743 | buf[1] = peri; | ||
744 | |||
745 | PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n", | ||
746 | cond == SINGLE ? 'S' : 'B', peri >> 3); | ||
747 | |||
748 | return SZ_DMALDP; | ||
749 | } | ||
750 | |||
751 | static inline u32 _emit_LP(unsigned dry_run, u8 buf[], | ||
752 | unsigned loop, u8 cnt) | ||
753 | { | ||
754 | if (dry_run) | ||
755 | return SZ_DMALP; | ||
756 | |||
757 | buf[0] = CMD_DMALP; | ||
758 | |||
759 | if (loop) | ||
760 | buf[0] |= (1 << 1); | ||
761 | |||
762 | cnt--; /* DMAC increments by 1 internally */ | ||
763 | buf[1] = cnt; | ||
764 | |||
765 | PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt); | ||
766 | |||
767 | return SZ_DMALP; | ||
768 | } | ||
769 | |||
770 | struct _arg_LPEND { | ||
771 | enum pl330_cond cond; | ||
772 | bool forever; | ||
773 | unsigned loop; | ||
774 | u8 bjump; | ||
775 | }; | ||
776 | |||
777 | static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[], | ||
778 | const struct _arg_LPEND *arg) | ||
779 | { | ||
780 | enum pl330_cond cond = arg->cond; | ||
781 | bool forever = arg->forever; | ||
782 | unsigned loop = arg->loop; | ||
783 | u8 bjump = arg->bjump; | ||
784 | |||
785 | if (dry_run) | ||
786 | return SZ_DMALPEND; | ||
787 | |||
788 | buf[0] = CMD_DMALPEND; | ||
789 | |||
790 | if (loop) | ||
791 | buf[0] |= (1 << 2); | ||
792 | |||
793 | if (!forever) | ||
794 | buf[0] |= (1 << 4); | ||
795 | |||
796 | if (cond == SINGLE) | ||
797 | buf[0] |= (0 << 1) | (1 << 0); | ||
798 | else if (cond == BURST) | ||
799 | buf[0] |= (1 << 1) | (1 << 0); | ||
800 | |||
801 | buf[1] = bjump; | ||
802 | |||
803 | PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n", | ||
804 | forever ? "FE" : "END", | ||
805 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'), | ||
806 | loop ? '1' : '0', | ||
807 | bjump); | ||
808 | |||
809 | return SZ_DMALPEND; | ||
810 | } | ||
811 | |||
812 | static inline u32 _emit_KILL(unsigned dry_run, u8 buf[]) | ||
813 | { | ||
814 | if (dry_run) | ||
815 | return SZ_DMAKILL; | ||
816 | |||
817 | buf[0] = CMD_DMAKILL; | ||
818 | |||
819 | return SZ_DMAKILL; | ||
820 | } | ||
821 | |||
822 | static inline u32 _emit_MOV(unsigned dry_run, u8 buf[], | ||
823 | enum dmamov_dst dst, u32 val) | ||
824 | { | ||
825 | if (dry_run) | ||
826 | return SZ_DMAMOV; | ||
827 | |||
828 | buf[0] = CMD_DMAMOV; | ||
829 | buf[1] = dst; | ||
830 | *((u32 *)&buf[2]) = val; | ||
831 | |||
832 | PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n", | ||
833 | dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val); | ||
834 | |||
835 | return SZ_DMAMOV; | ||
836 | } | ||
837 | |||
838 | static inline u32 _emit_NOP(unsigned dry_run, u8 buf[]) | ||
839 | { | ||
840 | if (dry_run) | ||
841 | return SZ_DMANOP; | ||
842 | |||
843 | buf[0] = CMD_DMANOP; | ||
844 | |||
845 | PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n"); | ||
846 | |||
847 | return SZ_DMANOP; | ||
848 | } | ||
849 | |||
850 | static inline u32 _emit_RMB(unsigned dry_run, u8 buf[]) | ||
851 | { | ||
852 | if (dry_run) | ||
853 | return SZ_DMARMB; | ||
854 | |||
855 | buf[0] = CMD_DMARMB; | ||
856 | |||
857 | PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n"); | ||
858 | |||
859 | return SZ_DMARMB; | ||
860 | } | ||
861 | |||
862 | static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev) | ||
863 | { | ||
864 | if (dry_run) | ||
865 | return SZ_DMASEV; | ||
866 | |||
867 | buf[0] = CMD_DMASEV; | ||
868 | |||
869 | ev &= 0x1f; | ||
870 | ev <<= 3; | ||
871 | buf[1] = ev; | ||
872 | |||
873 | PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3); | ||
874 | |||
875 | return SZ_DMASEV; | ||
876 | } | ||
877 | |||
878 | static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond) | ||
879 | { | ||
880 | if (dry_run) | ||
881 | return SZ_DMAST; | ||
882 | |||
883 | buf[0] = CMD_DMAST; | ||
884 | |||
885 | if (cond == SINGLE) | ||
886 | buf[0] |= (0 << 1) | (1 << 0); | ||
887 | else if (cond == BURST) | ||
888 | buf[0] |= (1 << 1) | (1 << 0); | ||
889 | |||
890 | PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n", | ||
891 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); | ||
892 | |||
893 | return SZ_DMAST; | ||
894 | } | ||
895 | |||
896 | static inline u32 _emit_STP(unsigned dry_run, u8 buf[], | ||
897 | enum pl330_cond cond, u8 peri) | ||
898 | { | ||
899 | if (dry_run) | ||
900 | return SZ_DMASTP; | ||
901 | |||
902 | buf[0] = CMD_DMASTP; | ||
903 | |||
904 | if (cond == BURST) | ||
905 | buf[0] |= (1 << 1); | ||
906 | |||
907 | peri &= 0x1f; | ||
908 | peri <<= 3; | ||
909 | buf[1] = peri; | ||
910 | |||
911 | PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n", | ||
912 | cond == SINGLE ? 'S' : 'B', peri >> 3); | ||
913 | |||
914 | return SZ_DMASTP; | ||
915 | } | ||
916 | |||
917 | static inline u32 _emit_STZ(unsigned dry_run, u8 buf[]) | ||
918 | { | ||
919 | if (dry_run) | ||
920 | return SZ_DMASTZ; | ||
921 | |||
922 | buf[0] = CMD_DMASTZ; | ||
923 | |||
924 | PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n"); | ||
925 | |||
926 | return SZ_DMASTZ; | ||
927 | } | ||
928 | |||
929 | static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev, | ||
930 | unsigned invalidate) | ||
931 | { | ||
932 | if (dry_run) | ||
933 | return SZ_DMAWFE; | ||
934 | |||
935 | buf[0] = CMD_DMAWFE; | ||
936 | |||
937 | ev &= 0x1f; | ||
938 | ev <<= 3; | ||
939 | buf[1] = ev; | ||
940 | |||
941 | if (invalidate) | ||
942 | buf[1] |= (1 << 1); | ||
943 | |||
944 | PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n", | ||
945 | ev >> 3, invalidate ? ", I" : ""); | ||
946 | |||
947 | return SZ_DMAWFE; | ||
948 | } | ||
949 | |||
950 | static inline u32 _emit_WFP(unsigned dry_run, u8 buf[], | ||
951 | enum pl330_cond cond, u8 peri) | ||
952 | { | ||
953 | if (dry_run) | ||
954 | return SZ_DMAWFP; | ||
955 | |||
956 | buf[0] = CMD_DMAWFP; | ||
957 | |||
958 | if (cond == SINGLE) | ||
959 | buf[0] |= (0 << 1) | (0 << 0); | ||
960 | else if (cond == BURST) | ||
961 | buf[0] |= (1 << 1) | (0 << 0); | ||
962 | else | ||
963 | buf[0] |= (0 << 1) | (1 << 0); | ||
964 | |||
965 | peri &= 0x1f; | ||
966 | peri <<= 3; | ||
967 | buf[1] = peri; | ||
968 | |||
969 | PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n", | ||
970 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3); | ||
971 | |||
972 | return SZ_DMAWFP; | ||
973 | } | ||
974 | |||
975 | static inline u32 _emit_WMB(unsigned dry_run, u8 buf[]) | ||
976 | { | ||
977 | if (dry_run) | ||
978 | return SZ_DMAWMB; | ||
979 | |||
980 | buf[0] = CMD_DMAWMB; | ||
981 | |||
982 | PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n"); | ||
983 | |||
984 | return SZ_DMAWMB; | ||
985 | } | ||
986 | |||
987 | struct _arg_GO { | ||
988 | u8 chan; | ||
989 | u32 addr; | ||
990 | unsigned ns; | ||
991 | }; | ||
992 | |||
993 | static inline u32 _emit_GO(unsigned dry_run, u8 buf[], | ||
994 | const struct _arg_GO *arg) | ||
995 | { | ||
996 | u8 chan = arg->chan; | ||
997 | u32 addr = arg->addr; | ||
998 | unsigned ns = arg->ns; | ||
999 | |||
1000 | if (dry_run) | ||
1001 | return SZ_DMAGO; | ||
1002 | |||
1003 | buf[0] = CMD_DMAGO; | ||
1004 | buf[0] |= (ns << 1); | ||
1005 | |||
1006 | buf[1] = chan & 0x7; | ||
1007 | |||
1008 | *((u32 *)&buf[2]) = addr; | ||
1009 | |||
1010 | return SZ_DMAGO; | ||
1011 | } | ||
1012 | |||
1013 | #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) | ||
1014 | |||
1015 | /* Returns Time-Out */ | ||
1016 | static bool _until_dmac_idle(struct pl330_thread *thrd) | ||
1017 | { | ||
1018 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1019 | unsigned long loops = msecs_to_loops(5); | ||
1020 | |||
1021 | do { | ||
1022 | /* Until Manager is Idle */ | ||
1023 | if (!(readl(regs + DBGSTATUS) & DBG_BUSY)) | ||
1024 | break; | ||
1025 | |||
1026 | cpu_relax(); | ||
1027 | } while (--loops); | ||
1028 | |||
1029 | if (!loops) | ||
1030 | return true; | ||
1031 | |||
1032 | return false; | ||
1033 | } | ||
1034 | |||
1035 | static inline void _execute_DBGINSN(struct pl330_thread *thrd, | ||
1036 | u8 insn[], bool as_manager) | ||
1037 | { | ||
1038 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1039 | u32 val; | ||
1040 | |||
1041 | val = (insn[0] << 16) | (insn[1] << 24); | ||
1042 | if (!as_manager) { | ||
1043 | val |= (1 << 0); | ||
1044 | val |= (thrd->id << 8); /* Channel Number */ | ||
1045 | } | ||
1046 | writel(val, regs + DBGINST0); | ||
1047 | |||
1048 | val = *((u32 *)&insn[2]); | ||
1049 | writel(val, regs + DBGINST1); | ||
1050 | |||
1051 | /* If timed out due to halted state-machine */ | ||
1052 | if (_until_dmac_idle(thrd)) { | ||
1053 | dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n"); | ||
1054 | return; | ||
1055 | } | ||
1056 | |||
1057 | /* Get going */ | ||
1058 | writel(0, regs + DBGCMD); | ||
1059 | } | ||
1060 | |||
1061 | /* | ||
1062 | * Mark a _pl330_req as free. | ||
1063 | * We do it by writing DMAEND as the first instruction | ||
1064 | * because no valid request is going to have DMAEND as | ||
1065 | * its first instruction to execute. | ||
1066 | */ | ||
1067 | static void mark_free(struct pl330_thread *thrd, int idx) | ||
1068 | { | ||
1069 | struct _pl330_req *req = &thrd->req[idx]; | ||
1070 | |||
1071 | _emit_END(0, req->mc_cpu); | ||
1072 | req->mc_len = 0; | ||
1073 | |||
1074 | thrd->req_running = -1; | ||
1075 | } | ||
1076 | |||
1077 | static inline u32 _state(struct pl330_thread *thrd) | ||
1078 | { | ||
1079 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1080 | u32 val; | ||
1081 | |||
1082 | if (is_manager(thrd)) | ||
1083 | val = readl(regs + DS) & 0xf; | ||
1084 | else | ||
1085 | val = readl(regs + CS(thrd->id)) & 0xf; | ||
1086 | |||
1087 | switch (val) { | ||
1088 | case DS_ST_STOP: | ||
1089 | return PL330_STATE_STOPPED; | ||
1090 | case DS_ST_EXEC: | ||
1091 | return PL330_STATE_EXECUTING; | ||
1092 | case DS_ST_CMISS: | ||
1093 | return PL330_STATE_CACHEMISS; | ||
1094 | case DS_ST_UPDTPC: | ||
1095 | return PL330_STATE_UPDTPC; | ||
1096 | case DS_ST_WFE: | ||
1097 | return PL330_STATE_WFE; | ||
1098 | case DS_ST_FAULT: | ||
1099 | return PL330_STATE_FAULTING; | ||
1100 | case DS_ST_ATBRR: | ||
1101 | if (is_manager(thrd)) | ||
1102 | return PL330_STATE_INVALID; | ||
1103 | else | ||
1104 | return PL330_STATE_ATBARRIER; | ||
1105 | case DS_ST_QBUSY: | ||
1106 | if (is_manager(thrd)) | ||
1107 | return PL330_STATE_INVALID; | ||
1108 | else | ||
1109 | return PL330_STATE_QUEUEBUSY; | ||
1110 | case DS_ST_WFP: | ||
1111 | if (is_manager(thrd)) | ||
1112 | return PL330_STATE_INVALID; | ||
1113 | else | ||
1114 | return PL330_STATE_WFP; | ||
1115 | case DS_ST_KILL: | ||
1116 | if (is_manager(thrd)) | ||
1117 | return PL330_STATE_INVALID; | ||
1118 | else | ||
1119 | return PL330_STATE_KILLING; | ||
1120 | case DS_ST_CMPLT: | ||
1121 | if (is_manager(thrd)) | ||
1122 | return PL330_STATE_INVALID; | ||
1123 | else | ||
1124 | return PL330_STATE_COMPLETING; | ||
1125 | case DS_ST_FLTCMP: | ||
1126 | if (is_manager(thrd)) | ||
1127 | return PL330_STATE_INVALID; | ||
1128 | else | ||
1129 | return PL330_STATE_FAULT_COMPLETING; | ||
1130 | default: | ||
1131 | return PL330_STATE_INVALID; | ||
1132 | } | ||
1133 | } | ||
1134 | |||
1135 | static void _stop(struct pl330_thread *thrd) | ||
1136 | { | ||
1137 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1138 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | ||
1139 | |||
1140 | if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) | ||
1141 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); | ||
1142 | |||
1143 | /* Return if nothing needs to be done */ | ||
1144 | if (_state(thrd) == PL330_STATE_COMPLETING | ||
1145 | || _state(thrd) == PL330_STATE_KILLING | ||
1146 | || _state(thrd) == PL330_STATE_STOPPED) | ||
1147 | return; | ||
1148 | |||
1149 | _emit_KILL(0, insn); | ||
1150 | |||
1151 | /* Stop generating interrupts for SEV */ | ||
1152 | writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN); | ||
1153 | |||
1154 | _execute_DBGINSN(thrd, insn, is_manager(thrd)); | ||
1155 | } | ||
1156 | |||
1157 | /* Start doing req 'idx' of thread 'thrd' */ | ||
1158 | static bool _trigger(struct pl330_thread *thrd) | ||
1159 | { | ||
1160 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
1161 | struct _pl330_req *req; | ||
1162 | struct pl330_req *r; | ||
1163 | struct _arg_GO go; | ||
1164 | unsigned ns; | ||
1165 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | ||
1166 | int idx; | ||
1167 | |||
1168 | /* Return if already ACTIVE */ | ||
1169 | if (_state(thrd) != PL330_STATE_STOPPED) | ||
1170 | return true; | ||
1171 | |||
1172 | idx = 1 - thrd->lstenq; | ||
1173 | if (!IS_FREE(&thrd->req[idx])) | ||
1174 | req = &thrd->req[idx]; | ||
1175 | else { | ||
1176 | idx = thrd->lstenq; | ||
1177 | if (!IS_FREE(&thrd->req[idx])) | ||
1178 | req = &thrd->req[idx]; | ||
1179 | else | ||
1180 | req = NULL; | ||
1181 | } | ||
1182 | |||
1183 | /* Return if no request */ | ||
1184 | if (!req || !req->r) | ||
1185 | return true; | ||
1186 | |||
1187 | r = req->r; | ||
1188 | |||
1189 | if (r->cfg) | ||
1190 | ns = r->cfg->nonsecure ? 1 : 0; | ||
1191 | else if (readl(regs + CS(thrd->id)) & CS_CNS) | ||
1192 | ns = 1; | ||
1193 | else | ||
1194 | ns = 0; | ||
1195 | |||
1196 | /* See 'Abort Sources' point-4 at Page 2-25 */ | ||
1197 | if (_manager_ns(thrd) && !ns) | ||
1198 | dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n", | ||
1199 | __func__, __LINE__); | ||
1200 | |||
1201 | go.chan = thrd->id; | ||
1202 | go.addr = req->mc_bus; | ||
1203 | go.ns = ns; | ||
1204 | _emit_GO(0, insn, &go); | ||
1205 | |||
1206 | /* Set to generate interrupts for SEV */ | ||
1207 | writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN); | ||
1208 | |||
1209 | /* Only manager can execute GO */ | ||
1210 | _execute_DBGINSN(thrd, insn, true); | ||
1211 | |||
1212 | thrd->req_running = idx; | ||
1213 | |||
1214 | return true; | ||
1215 | } | ||
1216 | |||
1217 | static bool _start(struct pl330_thread *thrd) | ||
1218 | { | ||
1219 | switch (_state(thrd)) { | ||
1220 | case PL330_STATE_FAULT_COMPLETING: | ||
1221 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); | ||
1222 | |||
1223 | if (_state(thrd) == PL330_STATE_KILLING) | ||
1224 | UNTIL(thrd, PL330_STATE_STOPPED) | ||
1225 | |||
1226 | case PL330_STATE_FAULTING: | ||
1227 | _stop(thrd); | ||
1228 | |||
1229 | case PL330_STATE_KILLING: | ||
1230 | case PL330_STATE_COMPLETING: | ||
1231 | UNTIL(thrd, PL330_STATE_STOPPED) | ||
1232 | |||
1233 | case PL330_STATE_STOPPED: | ||
1234 | return _trigger(thrd); | ||
1235 | |||
1236 | case PL330_STATE_WFP: | ||
1237 | case PL330_STATE_QUEUEBUSY: | ||
1238 | case PL330_STATE_ATBARRIER: | ||
1239 | case PL330_STATE_UPDTPC: | ||
1240 | case PL330_STATE_CACHEMISS: | ||
1241 | case PL330_STATE_EXECUTING: | ||
1242 | return true; | ||
1243 | |||
1244 | case PL330_STATE_WFE: /* For RESUME, nothing yet */ | ||
1245 | default: | ||
1246 | return false; | ||
1247 | } | ||
1248 | } | ||
1249 | |||
1250 | static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], | ||
1251 | const struct _xfer_spec *pxs, int cyc) | ||
1252 | { | ||
1253 | int off = 0; | ||
1254 | struct pl330_config *pcfg = pxs->r->cfg->pcfg; | ||
1255 | |||
1256 | /* check lock-up free version */ | ||
1257 | if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) { | ||
1258 | while (cyc--) { | ||
1259 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | ||
1260 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | ||
1261 | } | ||
1262 | } else { | ||
1263 | while (cyc--) { | ||
1264 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | ||
1265 | off += _emit_RMB(dry_run, &buf[off]); | ||
1266 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | ||
1267 | off += _emit_WMB(dry_run, &buf[off]); | ||
1268 | } | ||
1269 | } | ||
1270 | |||
1271 | return off; | ||
1272 | } | ||
1273 | |||
1274 | static inline int _ldst_devtomem(unsigned dry_run, u8 buf[], | ||
1275 | const struct _xfer_spec *pxs, int cyc) | ||
1276 | { | ||
1277 | int off = 0; | ||
1278 | |||
1279 | while (cyc--) { | ||
1280 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
1281 | off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
1282 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | ||
1283 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); | ||
1284 | } | ||
1285 | |||
1286 | return off; | ||
1287 | } | ||
1288 | |||
1289 | static inline int _ldst_memtodev(unsigned dry_run, u8 buf[], | ||
1290 | const struct _xfer_spec *pxs, int cyc) | ||
1291 | { | ||
1292 | int off = 0; | ||
1293 | |||
1294 | while (cyc--) { | ||
1295 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
1296 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | ||
1297 | off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
1298 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); | ||
1299 | } | ||
1300 | |||
1301 | return off; | ||
1302 | } | ||
1303 | |||
1304 | static int _bursts(unsigned dry_run, u8 buf[], | ||
1305 | const struct _xfer_spec *pxs, int cyc) | ||
1306 | { | ||
1307 | int off = 0; | ||
1308 | |||
1309 | switch (pxs->r->rqtype) { | ||
1310 | case MEMTODEV: | ||
1311 | off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc); | ||
1312 | break; | ||
1313 | case DEVTOMEM: | ||
1314 | off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc); | ||
1315 | break; | ||
1316 | case MEMTOMEM: | ||
1317 | off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); | ||
1318 | break; | ||
1319 | default: | ||
1320 | off += 0x40000000; /* Scare off the Client */ | ||
1321 | break; | ||
1322 | } | ||
1323 | |||
1324 | return off; | ||
1325 | } | ||
1326 | |||
1327 | /* Returns bytes consumed and updates bursts */ | ||
1328 | static inline int _loop(unsigned dry_run, u8 buf[], | ||
1329 | unsigned long *bursts, const struct _xfer_spec *pxs) | ||
1330 | { | ||
1331 | int cyc, cycmax, szlp, szlpend, szbrst, off; | ||
1332 | unsigned lcnt0, lcnt1, ljmp0, ljmp1; | ||
1333 | struct _arg_LPEND lpend; | ||
1334 | |||
1335 | /* Max iterations possible in DMALP is 256 */ | ||
1336 | if (*bursts >= 256*256) { | ||
1337 | lcnt1 = 256; | ||
1338 | lcnt0 = 256; | ||
1339 | cyc = *bursts / lcnt1 / lcnt0; | ||
1340 | } else if (*bursts > 256) { | ||
1341 | lcnt1 = 256; | ||
1342 | lcnt0 = *bursts / lcnt1; | ||
1343 | cyc = 1; | ||
1344 | } else { | ||
1345 | lcnt1 = *bursts; | ||
1346 | lcnt0 = 0; | ||
1347 | cyc = 1; | ||
1348 | } | ||
1349 | |||
1350 | szlp = _emit_LP(1, buf, 0, 0); | ||
1351 | szbrst = _bursts(1, buf, pxs, 1); | ||
1352 | |||
1353 | lpend.cond = ALWAYS; | ||
1354 | lpend.forever = false; | ||
1355 | lpend.loop = 0; | ||
1356 | lpend.bjump = 0; | ||
1357 | szlpend = _emit_LPEND(1, buf, &lpend); | ||
1358 | |||
1359 | if (lcnt0) { | ||
1360 | szlp *= 2; | ||
1361 | szlpend *= 2; | ||
1362 | } | ||
1363 | |||
1364 | /* | ||
1365 | * Max bursts that we can unroll due to limit on the | ||
1366 | * size of backward jump that can be encoded in DMALPEND | ||
1367 | * which is 8-bits and hence 255 | ||
1368 | */ | ||
1369 | cycmax = (255 - (szlp + szlpend)) / szbrst; | ||
1370 | |||
1371 | cyc = (cycmax < cyc) ? cycmax : cyc; | ||
1372 | |||
1373 | off = 0; | ||
1374 | |||
1375 | if (lcnt0) { | ||
1376 | off += _emit_LP(dry_run, &buf[off], 0, lcnt0); | ||
1377 | ljmp0 = off; | ||
1378 | } | ||
1379 | |||
1380 | off += _emit_LP(dry_run, &buf[off], 1, lcnt1); | ||
1381 | ljmp1 = off; | ||
1382 | |||
1383 | off += _bursts(dry_run, &buf[off], pxs, cyc); | ||
1384 | |||
1385 | lpend.cond = ALWAYS; | ||
1386 | lpend.forever = false; | ||
1387 | lpend.loop = 1; | ||
1388 | lpend.bjump = off - ljmp1; | ||
1389 | off += _emit_LPEND(dry_run, &buf[off], &lpend); | ||
1390 | |||
1391 | if (lcnt0) { | ||
1392 | lpend.cond = ALWAYS; | ||
1393 | lpend.forever = false; | ||
1394 | lpend.loop = 0; | ||
1395 | lpend.bjump = off - ljmp0; | ||
1396 | off += _emit_LPEND(dry_run, &buf[off], &lpend); | ||
1397 | } | ||
1398 | |||
1399 | *bursts = lcnt1 * cyc; | ||
1400 | if (lcnt0) | ||
1401 | *bursts *= lcnt0; | ||
1402 | |||
1403 | return off; | ||
1404 | } | ||
1405 | |||
1406 | static inline int _setup_loops(unsigned dry_run, u8 buf[], | ||
1407 | const struct _xfer_spec *pxs) | ||
1408 | { | ||
1409 | struct pl330_xfer *x = pxs->x; | ||
1410 | u32 ccr = pxs->ccr; | ||
1411 | unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr); | ||
1412 | int off = 0; | ||
1413 | |||
1414 | while (bursts) { | ||
1415 | c = bursts; | ||
1416 | off += _loop(dry_run, &buf[off], &c, pxs); | ||
1417 | bursts -= c; | ||
1418 | } | ||
1419 | |||
1420 | return off; | ||
1421 | } | ||
1422 | |||
1423 | static inline int _setup_xfer(unsigned dry_run, u8 buf[], | ||
1424 | const struct _xfer_spec *pxs) | ||
1425 | { | ||
1426 | struct pl330_xfer *x = pxs->x; | ||
1427 | int off = 0; | ||
1428 | |||
1429 | /* DMAMOV SAR, x->src_addr */ | ||
1430 | off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr); | ||
1431 | /* DMAMOV DAR, x->dst_addr */ | ||
1432 | off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr); | ||
1433 | |||
1434 | /* Setup Loop(s) */ | ||
1435 | off += _setup_loops(dry_run, &buf[off], pxs); | ||
1436 | |||
1437 | return off; | ||
1438 | } | ||
1439 | |||
1440 | /* | ||
1441 | * A req is a sequence of one or more xfer units. | ||
1442 | * Returns the number of bytes taken to setup the MC for the req. | ||
1443 | */ | ||
1444 | static int _setup_req(unsigned dry_run, struct pl330_thread *thrd, | ||
1445 | unsigned index, struct _xfer_spec *pxs) | ||
1446 | { | ||
1447 | struct _pl330_req *req = &thrd->req[index]; | ||
1448 | struct pl330_xfer *x; | ||
1449 | u8 *buf = req->mc_cpu; | ||
1450 | int off = 0; | ||
1451 | |||
1452 | PL330_DBGMC_START(req->mc_bus); | ||
1453 | |||
1454 | /* DMAMOV CCR, ccr */ | ||
1455 | off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr); | ||
1456 | |||
1457 | x = pxs->r->x; | ||
1458 | do { | ||
1459 | /* Error if xfer length is not aligned at burst size */ | ||
1460 | if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) | ||
1461 | return -EINVAL; | ||
1462 | |||
1463 | pxs->x = x; | ||
1464 | off += _setup_xfer(dry_run, &buf[off], pxs); | ||
1465 | |||
1466 | x = x->next; | ||
1467 | } while (x); | ||
1468 | |||
1469 | /* DMASEV peripheral/event */ | ||
1470 | off += _emit_SEV(dry_run, &buf[off], thrd->ev); | ||
1471 | /* DMAEND */ | ||
1472 | off += _emit_END(dry_run, &buf[off]); | ||
1473 | |||
1474 | return off; | ||
1475 | } | ||
1476 | |||
1477 | static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) | ||
1478 | { | ||
1479 | u32 ccr = 0; | ||
1480 | |||
1481 | if (rqc->src_inc) | ||
1482 | ccr |= CC_SRCINC; | ||
1483 | |||
1484 | if (rqc->dst_inc) | ||
1485 | ccr |= CC_DSTINC; | ||
1486 | |||
1487 | /* We set same protection levels for Src and DST for now */ | ||
1488 | if (rqc->privileged) | ||
1489 | ccr |= CC_SRCPRI | CC_DSTPRI; | ||
1490 | if (rqc->nonsecure) | ||
1491 | ccr |= CC_SRCNS | CC_DSTNS; | ||
1492 | if (rqc->insnaccess) | ||
1493 | ccr |= CC_SRCIA | CC_DSTIA; | ||
1494 | |||
1495 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT); | ||
1496 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT); | ||
1497 | |||
1498 | ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT); | ||
1499 | ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT); | ||
1500 | |||
1501 | ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT); | ||
1502 | ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT); | ||
1503 | |||
1504 | ccr |= (rqc->swap << CC_SWAP_SHFT); | ||
1505 | |||
1506 | return ccr; | ||
1507 | } | ||
1508 | |||
1509 | static inline bool _is_valid(u32 ccr) | ||
1510 | { | ||
1511 | enum pl330_dstcachectrl dcctl; | ||
1512 | enum pl330_srccachectrl scctl; | ||
1513 | |||
1514 | dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK; | ||
1515 | scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK; | ||
1516 | |||
1517 | if (dcctl == DINVALID1 || dcctl == DINVALID2 | ||
1518 | || scctl == SINVALID1 || scctl == SINVALID2) | ||
1519 | return false; | ||
1520 | else | ||
1521 | return true; | ||
1522 | } | ||
1523 | |||
1524 | /* | ||
1525 | * Submit a list of xfers after which the client wants notification. | ||
1526 | * Client is not notified after each xfer unit, just once after all | ||
1527 | * xfer units are done or some error occurs. | ||
1528 | */ | ||
1529 | static int pl330_submit_req(void *ch_id, struct pl330_req *r) | ||
1530 | { | ||
1531 | struct pl330_thread *thrd = ch_id; | ||
1532 | struct pl330_dmac *pl330; | ||
1533 | struct pl330_info *pi; | ||
1534 | struct _xfer_spec xs; | ||
1535 | unsigned long flags; | ||
1536 | void __iomem *regs; | ||
1537 | unsigned idx; | ||
1538 | u32 ccr; | ||
1539 | int ret = 0; | ||
1540 | |||
1541 | /* No Req or Unacquired Channel or DMAC */ | ||
1542 | if (!r || !thrd || thrd->free) | ||
1543 | return -EINVAL; | ||
1544 | |||
1545 | pl330 = thrd->dmac; | ||
1546 | pi = pl330->pinfo; | ||
1547 | regs = pi->base; | ||
1548 | |||
1549 | if (pl330->state == DYING | ||
1550 | || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { | ||
1551 | dev_info(thrd->dmac->pinfo->dev, "%s:%d\n", | ||
1552 | __func__, __LINE__); | ||
1553 | return -EAGAIN; | ||
1554 | } | ||
1555 | |||
1556 | /* If request for non-existing peripheral */ | ||
1557 | if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) { | ||
1558 | dev_info(thrd->dmac->pinfo->dev, | ||
1559 | "%s:%d Invalid peripheral(%u)!\n", | ||
1560 | __func__, __LINE__, r->peri); | ||
1561 | return -EINVAL; | ||
1562 | } | ||
1563 | |||
1564 | spin_lock_irqsave(&pl330->lock, flags); | ||
1565 | |||
1566 | if (_queue_full(thrd)) { | ||
1567 | ret = -EAGAIN; | ||
1568 | goto xfer_exit; | ||
1569 | } | ||
1570 | |||
1571 | /* Prefer Secure Channel */ | ||
1572 | if (!_manager_ns(thrd)) | ||
1573 | r->cfg->nonsecure = 0; | ||
1574 | else | ||
1575 | r->cfg->nonsecure = 1; | ||
1576 | |||
1577 | /* Use last settings, if not provided */ | ||
1578 | if (r->cfg) | ||
1579 | ccr = _prepare_ccr(r->cfg); | ||
1580 | else | ||
1581 | ccr = readl(regs + CC(thrd->id)); | ||
1582 | |||
1583 | /* If this req doesn't have valid xfer settings */ | ||
1584 | if (!_is_valid(ccr)) { | ||
1585 | ret = -EINVAL; | ||
1586 | dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n", | ||
1587 | __func__, __LINE__, ccr); | ||
1588 | goto xfer_exit; | ||
1589 | } | ||
1590 | |||
1591 | idx = IS_FREE(&thrd->req[0]) ? 0 : 1; | ||
1592 | |||
1593 | xs.ccr = ccr; | ||
1594 | xs.r = r; | ||
1595 | |||
1596 | /* First dry run to check if req is acceptable */ | ||
1597 | ret = _setup_req(1, thrd, idx, &xs); | ||
1598 | if (ret < 0) | ||
1599 | goto xfer_exit; | ||
1600 | |||
1601 | if (ret > pi->mcbufsz / 2) { | ||
1602 | dev_info(thrd->dmac->pinfo->dev, | ||
1603 | "%s:%d Trying increasing mcbufsz\n", | ||
1604 | __func__, __LINE__); | ||
1605 | ret = -ENOMEM; | ||
1606 | goto xfer_exit; | ||
1607 | } | ||
1608 | |||
1609 | /* Hook the request */ | ||
1610 | thrd->lstenq = idx; | ||
1611 | thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs); | ||
1612 | thrd->req[idx].r = r; | ||
1613 | |||
1614 | ret = 0; | ||
1615 | |||
1616 | xfer_exit: | ||
1617 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1618 | |||
1619 | return ret; | ||
1620 | } | ||
1621 | |||
1622 | static void pl330_dotask(unsigned long data) | ||
1623 | { | ||
1624 | struct pl330_dmac *pl330 = (struct pl330_dmac *) data; | ||
1625 | struct pl330_info *pi = pl330->pinfo; | ||
1626 | unsigned long flags; | ||
1627 | int i; | ||
1628 | |||
1629 | spin_lock_irqsave(&pl330->lock, flags); | ||
1630 | |||
1631 | /* The DMAC itself gone nuts */ | ||
1632 | if (pl330->dmac_tbd.reset_dmac) { | ||
1633 | pl330->state = DYING; | ||
1634 | /* Reset the manager too */ | ||
1635 | pl330->dmac_tbd.reset_mngr = true; | ||
1636 | /* Clear the reset flag */ | ||
1637 | pl330->dmac_tbd.reset_dmac = false; | ||
1638 | } | ||
1639 | |||
1640 | if (pl330->dmac_tbd.reset_mngr) { | ||
1641 | _stop(pl330->manager); | ||
1642 | /* Reset all channels */ | ||
1643 | pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1; | ||
1644 | /* Clear the reset flag */ | ||
1645 | pl330->dmac_tbd.reset_mngr = false; | ||
1646 | } | ||
1647 | |||
1648 | for (i = 0; i < pi->pcfg.num_chan; i++) { | ||
1649 | |||
1650 | if (pl330->dmac_tbd.reset_chan & (1 << i)) { | ||
1651 | struct pl330_thread *thrd = &pl330->channels[i]; | ||
1652 | void __iomem *regs = pi->base; | ||
1653 | enum pl330_op_err err; | ||
1654 | |||
1655 | _stop(thrd); | ||
1656 | |||
1657 | if (readl(regs + FSC) & (1 << thrd->id)) | ||
1658 | err = PL330_ERR_FAIL; | ||
1659 | else | ||
1660 | err = PL330_ERR_ABORT; | ||
1661 | |||
1662 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1663 | |||
1664 | _callback(thrd->req[1 - thrd->lstenq].r, err); | ||
1665 | _callback(thrd->req[thrd->lstenq].r, err); | ||
1666 | |||
1667 | spin_lock_irqsave(&pl330->lock, flags); | ||
1668 | |||
1669 | thrd->req[0].r = NULL; | ||
1670 | thrd->req[1].r = NULL; | ||
1671 | mark_free(thrd, 0); | ||
1672 | mark_free(thrd, 1); | ||
1673 | |||
1674 | /* Clear the reset flag */ | ||
1675 | pl330->dmac_tbd.reset_chan &= ~(1 << i); | ||
1676 | } | ||
1677 | } | ||
1678 | |||
1679 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1680 | |||
1681 | return; | ||
1682 | } | ||
1683 | |||
1684 | /* Returns 1 if state was updated, 0 otherwise */ | ||
1685 | static int pl330_update(const struct pl330_info *pi) | ||
1686 | { | ||
1687 | struct _pl330_req *rqdone; | ||
1688 | struct pl330_dmac *pl330; | ||
1689 | unsigned long flags; | ||
1690 | void __iomem *regs; | ||
1691 | u32 val; | ||
1692 | int id, ev, ret = 0; | ||
1693 | |||
1694 | if (!pi || !pi->pl330_data) | ||
1695 | return 0; | ||
1696 | |||
1697 | regs = pi->base; | ||
1698 | pl330 = pi->pl330_data; | ||
1699 | |||
1700 | spin_lock_irqsave(&pl330->lock, flags); | ||
1701 | |||
1702 | val = readl(regs + FSM) & 0x1; | ||
1703 | if (val) | ||
1704 | pl330->dmac_tbd.reset_mngr = true; | ||
1705 | else | ||
1706 | pl330->dmac_tbd.reset_mngr = false; | ||
1707 | |||
1708 | val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1); | ||
1709 | pl330->dmac_tbd.reset_chan |= val; | ||
1710 | if (val) { | ||
1711 | int i = 0; | ||
1712 | while (i < pi->pcfg.num_chan) { | ||
1713 | if (val & (1 << i)) { | ||
1714 | dev_info(pi->dev, | ||
1715 | "Reset Channel-%d\t CS-%x FTC-%x\n", | ||
1716 | i, readl(regs + CS(i)), | ||
1717 | readl(regs + FTC(i))); | ||
1718 | _stop(&pl330->channels[i]); | ||
1719 | } | ||
1720 | i++; | ||
1721 | } | ||
1722 | } | ||
1723 | |||
1724 | /* Check which event happened i.e, thread notified */ | ||
1725 | val = readl(regs + ES); | ||
1726 | if (pi->pcfg.num_events < 32 | ||
1727 | && val & ~((1 << pi->pcfg.num_events) - 1)) { | ||
1728 | pl330->dmac_tbd.reset_dmac = true; | ||
1729 | dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__); | ||
1730 | ret = 1; | ||
1731 | goto updt_exit; | ||
1732 | } | ||
1733 | |||
1734 | for (ev = 0; ev < pi->pcfg.num_events; ev++) { | ||
1735 | if (val & (1 << ev)) { /* Event occurred */ | ||
1736 | struct pl330_thread *thrd; | ||
1737 | u32 inten = readl(regs + INTEN); | ||
1738 | int active; | ||
1739 | |||
1740 | /* Clear the event */ | ||
1741 | if (inten & (1 << ev)) | ||
1742 | writel(1 << ev, regs + INTCLR); | ||
1743 | |||
1744 | ret = 1; | ||
1745 | |||
1746 | id = pl330->events[ev]; | ||
1747 | |||
1748 | thrd = &pl330->channels[id]; | ||
1749 | |||
1750 | active = thrd->req_running; | ||
1751 | if (active == -1) /* Aborted */ | ||
1752 | continue; | ||
1753 | |||
1754 | rqdone = &thrd->req[active]; | ||
1755 | mark_free(thrd, active); | ||
1756 | |||
1757 | /* Get going again ASAP */ | ||
1758 | _start(thrd); | ||
1759 | |||
1760 | /* For now, just make a list of callbacks to be done */ | ||
1761 | list_add_tail(&rqdone->rqd, &pl330->req_done); | ||
1762 | } | ||
1763 | } | ||
1764 | |||
1765 | /* Now that we are in no hurry, do the callbacks */ | ||
1766 | while (!list_empty(&pl330->req_done)) { | ||
1767 | struct pl330_req *r; | ||
1768 | |||
1769 | rqdone = container_of(pl330->req_done.next, | ||
1770 | struct _pl330_req, rqd); | ||
1771 | |||
1772 | list_del_init(&rqdone->rqd); | ||
1773 | |||
1774 | /* Detach the req */ | ||
1775 | r = rqdone->r; | ||
1776 | rqdone->r = NULL; | ||
1777 | |||
1778 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1779 | _callback(r, PL330_ERR_NONE); | ||
1780 | spin_lock_irqsave(&pl330->lock, flags); | ||
1781 | } | ||
1782 | |||
1783 | updt_exit: | ||
1784 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1785 | |||
1786 | if (pl330->dmac_tbd.reset_dmac | ||
1787 | || pl330->dmac_tbd.reset_mngr | ||
1788 | || pl330->dmac_tbd.reset_chan) { | ||
1789 | ret = 1; | ||
1790 | tasklet_schedule(&pl330->tasks); | ||
1791 | } | ||
1792 | |||
1793 | return ret; | ||
1794 | } | ||
1795 | |||
1796 | static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) | ||
1797 | { | ||
1798 | struct pl330_thread *thrd = ch_id; | ||
1799 | struct pl330_dmac *pl330; | ||
1800 | unsigned long flags; | ||
1801 | int ret = 0, active; | ||
1802 | |||
1803 | if (!thrd || thrd->free || thrd->dmac->state == DYING) | ||
1804 | return -EINVAL; | ||
1805 | |||
1806 | pl330 = thrd->dmac; | ||
1807 | active = thrd->req_running; | ||
1808 | |||
1809 | spin_lock_irqsave(&pl330->lock, flags); | ||
1810 | |||
1811 | switch (op) { | ||
1812 | case PL330_OP_FLUSH: | ||
1813 | /* Make sure the channel is stopped */ | ||
1814 | _stop(thrd); | ||
1815 | |||
1816 | thrd->req[0].r = NULL; | ||
1817 | thrd->req[1].r = NULL; | ||
1818 | mark_free(thrd, 0); | ||
1819 | mark_free(thrd, 1); | ||
1820 | break; | ||
1821 | |||
1822 | case PL330_OP_ABORT: | ||
1823 | /* Make sure the channel is stopped */ | ||
1824 | _stop(thrd); | ||
1825 | |||
1826 | /* ABORT is only for the active req */ | ||
1827 | if (active == -1) | ||
1828 | break; | ||
1829 | |||
1830 | thrd->req[active].r = NULL; | ||
1831 | mark_free(thrd, active); | ||
1832 | |||
1833 | /* Start the next */ | ||
1834 | case PL330_OP_START: | ||
1835 | if ((active == -1) && !_start(thrd)) | ||
1836 | ret = -EIO; | ||
1837 | break; | ||
1838 | |||
1839 | default: | ||
1840 | ret = -EINVAL; | ||
1841 | } | ||
1842 | |||
1843 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1844 | return ret; | ||
1845 | } | ||
1846 | |||
1847 | /* Reserve an event */ | ||
1848 | static inline int _alloc_event(struct pl330_thread *thrd) | ||
1849 | { | ||
1850 | struct pl330_dmac *pl330 = thrd->dmac; | ||
1851 | struct pl330_info *pi = pl330->pinfo; | ||
1852 | int ev; | ||
1853 | |||
1854 | for (ev = 0; ev < pi->pcfg.num_events; ev++) | ||
1855 | if (pl330->events[ev] == -1) { | ||
1856 | pl330->events[ev] = thrd->id; | ||
1857 | return ev; | ||
1858 | } | ||
1859 | |||
1860 | return -1; | ||
1861 | } | ||
1862 | |||
1863 | static bool _chan_ns(const struct pl330_info *pi, int i) | ||
1864 | { | ||
1865 | return pi->pcfg.irq_ns & (1 << i); | ||
1866 | } | ||
1867 | |||
1868 | /* Upon success, returns IdentityToken for the | ||
1869 | * allocated channel, NULL otherwise. | ||
1870 | */ | ||
1871 | static void *pl330_request_channel(const struct pl330_info *pi) | ||
1872 | { | ||
1873 | struct pl330_thread *thrd = NULL; | ||
1874 | struct pl330_dmac *pl330; | ||
1875 | unsigned long flags; | ||
1876 | int chans, i; | ||
1877 | |||
1878 | if (!pi || !pi->pl330_data) | ||
1879 | return NULL; | ||
1880 | |||
1881 | pl330 = pi->pl330_data; | ||
1882 | |||
1883 | if (pl330->state == DYING) | ||
1884 | return NULL; | ||
1885 | |||
1886 | chans = pi->pcfg.num_chan; | ||
1887 | |||
1888 | spin_lock_irqsave(&pl330->lock, flags); | ||
1889 | |||
1890 | for (i = 0; i < chans; i++) { | ||
1891 | thrd = &pl330->channels[i]; | ||
1892 | if ((thrd->free) && (!_manager_ns(thrd) || | ||
1893 | _chan_ns(pi, i))) { | ||
1894 | thrd->ev = _alloc_event(thrd); | ||
1895 | if (thrd->ev >= 0) { | ||
1896 | thrd->free = false; | ||
1897 | thrd->lstenq = 1; | ||
1898 | thrd->req[0].r = NULL; | ||
1899 | mark_free(thrd, 0); | ||
1900 | thrd->req[1].r = NULL; | ||
1901 | mark_free(thrd, 1); | ||
1902 | break; | ||
1903 | } | ||
1904 | } | ||
1905 | thrd = NULL; | ||
1906 | } | ||
1907 | |||
1908 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1909 | |||
1910 | return thrd; | ||
1911 | } | ||
1912 | |||
1913 | /* Release an event */ | ||
1914 | static inline void _free_event(struct pl330_thread *thrd, int ev) | ||
1915 | { | ||
1916 | struct pl330_dmac *pl330 = thrd->dmac; | ||
1917 | struct pl330_info *pi = pl330->pinfo; | ||
1918 | |||
1919 | /* If the event is valid and was held by the thread */ | ||
1920 | if (ev >= 0 && ev < pi->pcfg.num_events | ||
1921 | && pl330->events[ev] == thrd->id) | ||
1922 | pl330->events[ev] = -1; | ||
1923 | } | ||
1924 | |||
1925 | static void pl330_release_channel(void *ch_id) | ||
1926 | { | ||
1927 | struct pl330_thread *thrd = ch_id; | ||
1928 | struct pl330_dmac *pl330; | ||
1929 | unsigned long flags; | ||
1930 | |||
1931 | if (!thrd || thrd->free) | ||
1932 | return; | ||
1933 | |||
1934 | _stop(thrd); | ||
1935 | |||
1936 | _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT); | ||
1937 | _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT); | ||
1938 | |||
1939 | pl330 = thrd->dmac; | ||
1940 | |||
1941 | spin_lock_irqsave(&pl330->lock, flags); | ||
1942 | _free_event(thrd, thrd->ev); | ||
1943 | thrd->free = true; | ||
1944 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1945 | } | ||
1946 | |||
1947 | /* Initialize the structure for PL330 configuration, that can be used | ||
1948 | * by the client driver the make best use of the DMAC | ||
1949 | */ | ||
1950 | static void read_dmac_config(struct pl330_info *pi) | ||
1951 | { | ||
1952 | void __iomem *regs = pi->base; | ||
1953 | u32 val; | ||
1954 | |||
1955 | val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT; | ||
1956 | val &= CRD_DATA_WIDTH_MASK; | ||
1957 | pi->pcfg.data_bus_width = 8 * (1 << val); | ||
1958 | |||
1959 | val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT; | ||
1960 | val &= CRD_DATA_BUFF_MASK; | ||
1961 | pi->pcfg.data_buf_dep = val + 1; | ||
1962 | |||
1963 | val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT; | ||
1964 | val &= CR0_NUM_CHANS_MASK; | ||
1965 | val += 1; | ||
1966 | pi->pcfg.num_chan = val; | ||
1967 | |||
1968 | val = readl(regs + CR0); | ||
1969 | if (val & CR0_PERIPH_REQ_SET) { | ||
1970 | val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK; | ||
1971 | val += 1; | ||
1972 | pi->pcfg.num_peri = val; | ||
1973 | pi->pcfg.peri_ns = readl(regs + CR4); | ||
1974 | } else { | ||
1975 | pi->pcfg.num_peri = 0; | ||
1976 | } | ||
1977 | |||
1978 | val = readl(regs + CR0); | ||
1979 | if (val & CR0_BOOT_MAN_NS) | ||
1980 | pi->pcfg.mode |= DMAC_MODE_NS; | ||
1981 | else | ||
1982 | pi->pcfg.mode &= ~DMAC_MODE_NS; | ||
1983 | |||
1984 | val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT; | ||
1985 | val &= CR0_NUM_EVENTS_MASK; | ||
1986 | val += 1; | ||
1987 | pi->pcfg.num_events = val; | ||
1988 | |||
1989 | pi->pcfg.irq_ns = readl(regs + CR3); | ||
1990 | |||
1991 | pi->pcfg.periph_id = get_id(pi, PERIPH_ID); | ||
1992 | pi->pcfg.pcell_id = get_id(pi, PCELL_ID); | ||
1993 | } | ||
1994 | |||
1995 | static inline void _reset_thread(struct pl330_thread *thrd) | ||
1996 | { | ||
1997 | struct pl330_dmac *pl330 = thrd->dmac; | ||
1998 | struct pl330_info *pi = pl330->pinfo; | ||
1999 | |||
2000 | thrd->req[0].mc_cpu = pl330->mcode_cpu | ||
2001 | + (thrd->id * pi->mcbufsz); | ||
2002 | thrd->req[0].mc_bus = pl330->mcode_bus | ||
2003 | + (thrd->id * pi->mcbufsz); | ||
2004 | thrd->req[0].r = NULL; | ||
2005 | mark_free(thrd, 0); | ||
2006 | |||
2007 | thrd->req[1].mc_cpu = thrd->req[0].mc_cpu | ||
2008 | + pi->mcbufsz / 2; | ||
2009 | thrd->req[1].mc_bus = thrd->req[0].mc_bus | ||
2010 | + pi->mcbufsz / 2; | ||
2011 | thrd->req[1].r = NULL; | ||
2012 | mark_free(thrd, 1); | ||
2013 | } | ||
2014 | |||
2015 | static int dmac_alloc_threads(struct pl330_dmac *pl330) | ||
2016 | { | ||
2017 | struct pl330_info *pi = pl330->pinfo; | ||
2018 | int chans = pi->pcfg.num_chan; | ||
2019 | struct pl330_thread *thrd; | ||
2020 | int i; | ||
2021 | |||
2022 | /* Allocate 1 Manager and 'chans' Channel threads */ | ||
2023 | pl330->channels = kzalloc((1 + chans) * sizeof(*thrd), | ||
2024 | GFP_KERNEL); | ||
2025 | if (!pl330->channels) | ||
2026 | return -ENOMEM; | ||
2027 | |||
2028 | /* Init Channel threads */ | ||
2029 | for (i = 0; i < chans; i++) { | ||
2030 | thrd = &pl330->channels[i]; | ||
2031 | thrd->id = i; | ||
2032 | thrd->dmac = pl330; | ||
2033 | _reset_thread(thrd); | ||
2034 | thrd->free = true; | ||
2035 | } | ||
2036 | |||
2037 | /* MANAGER is indexed at the end */ | ||
2038 | thrd = &pl330->channels[chans]; | ||
2039 | thrd->id = chans; | ||
2040 | thrd->dmac = pl330; | ||
2041 | thrd->free = false; | ||
2042 | pl330->manager = thrd; | ||
2043 | |||
2044 | return 0; | ||
2045 | } | ||
2046 | |||
2047 | static int dmac_alloc_resources(struct pl330_dmac *pl330) | ||
2048 | { | ||
2049 | struct pl330_info *pi = pl330->pinfo; | ||
2050 | int chans = pi->pcfg.num_chan; | ||
2051 | int ret; | ||
2052 | |||
2053 | /* | ||
2054 | * Alloc MicroCode buffer for 'chans' Channel threads. | ||
2055 | * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) | ||
2056 | */ | ||
2057 | pl330->mcode_cpu = dma_alloc_coherent(pi->dev, | ||
2058 | chans * pi->mcbufsz, | ||
2059 | &pl330->mcode_bus, GFP_KERNEL); | ||
2060 | if (!pl330->mcode_cpu) { | ||
2061 | dev_err(pi->dev, "%s:%d Can't allocate memory!\n", | ||
2062 | __func__, __LINE__); | ||
2063 | return -ENOMEM; | ||
2064 | } | ||
2065 | |||
2066 | ret = dmac_alloc_threads(pl330); | ||
2067 | if (ret) { | ||
2068 | dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n", | ||
2069 | __func__, __LINE__); | ||
2070 | dma_free_coherent(pi->dev, | ||
2071 | chans * pi->mcbufsz, | ||
2072 | pl330->mcode_cpu, pl330->mcode_bus); | ||
2073 | return ret; | ||
2074 | } | ||
2075 | |||
2076 | return 0; | ||
2077 | } | ||
2078 | |||
2079 | static int pl330_add(struct pl330_info *pi) | ||
2080 | { | ||
2081 | struct pl330_dmac *pl330; | ||
2082 | void __iomem *regs; | ||
2083 | int i, ret; | ||
2084 | |||
2085 | if (!pi || !pi->dev) | ||
2086 | return -EINVAL; | ||
2087 | |||
2088 | /* If already added */ | ||
2089 | if (pi->pl330_data) | ||
2090 | return -EINVAL; | ||
2091 | |||
2092 | /* | ||
2093 | * If the SoC can perform reset on the DMAC, then do it | ||
2094 | * before reading its configuration. | ||
2095 | */ | ||
2096 | if (pi->dmac_reset) | ||
2097 | pi->dmac_reset(pi); | ||
2098 | |||
2099 | regs = pi->base; | ||
2100 | |||
2101 | /* Check if we can handle this DMAC */ | ||
2102 | if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL | ||
2103 | || get_id(pi, PCELL_ID) != PCELL_ID_VAL) { | ||
2104 | dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n", | ||
2105 | get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID)); | ||
2106 | return -EINVAL; | ||
2107 | } | ||
2108 | |||
2109 | /* Read the configuration of the DMAC */ | ||
2110 | read_dmac_config(pi); | ||
2111 | |||
2112 | if (pi->pcfg.num_events == 0) { | ||
2113 | dev_err(pi->dev, "%s:%d Can't work without events!\n", | ||
2114 | __func__, __LINE__); | ||
2115 | return -EINVAL; | ||
2116 | } | ||
2117 | |||
2118 | pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL); | ||
2119 | if (!pl330) { | ||
2120 | dev_err(pi->dev, "%s:%d Can't allocate memory!\n", | ||
2121 | __func__, __LINE__); | ||
2122 | return -ENOMEM; | ||
2123 | } | ||
2124 | |||
2125 | /* Assign the info structure and private data */ | ||
2126 | pl330->pinfo = pi; | ||
2127 | pi->pl330_data = pl330; | ||
2128 | |||
2129 | spin_lock_init(&pl330->lock); | ||
2130 | |||
2131 | INIT_LIST_HEAD(&pl330->req_done); | ||
2132 | |||
2133 | /* Use default MC buffer size if not provided */ | ||
2134 | if (!pi->mcbufsz) | ||
2135 | pi->mcbufsz = MCODE_BUFF_PER_REQ * 2; | ||
2136 | |||
2137 | /* Mark all events as free */ | ||
2138 | for (i = 0; i < pi->pcfg.num_events; i++) | ||
2139 | pl330->events[i] = -1; | ||
2140 | |||
2141 | /* Allocate resources needed by the DMAC */ | ||
2142 | ret = dmac_alloc_resources(pl330); | ||
2143 | if (ret) { | ||
2144 | dev_err(pi->dev, "Unable to create channels for DMAC\n"); | ||
2145 | kfree(pl330); | ||
2146 | return ret; | ||
2147 | } | ||
2148 | |||
2149 | tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330); | ||
2150 | |||
2151 | pl330->state = INIT; | ||
2152 | |||
2153 | return 0; | ||
2154 | } | ||
2155 | |||
2156 | static int dmac_free_threads(struct pl330_dmac *pl330) | ||
2157 | { | ||
2158 | struct pl330_info *pi = pl330->pinfo; | ||
2159 | int chans = pi->pcfg.num_chan; | ||
2160 | struct pl330_thread *thrd; | ||
2161 | int i; | ||
2162 | |||
2163 | /* Release Channel threads */ | ||
2164 | for (i = 0; i < chans; i++) { | ||
2165 | thrd = &pl330->channels[i]; | ||
2166 | pl330_release_channel((void *)thrd); | ||
2167 | } | ||
2168 | |||
2169 | /* Free memory */ | ||
2170 | kfree(pl330->channels); | ||
2171 | |||
2172 | return 0; | ||
2173 | } | ||
2174 | |||
2175 | static void dmac_free_resources(struct pl330_dmac *pl330) | ||
2176 | { | ||
2177 | struct pl330_info *pi = pl330->pinfo; | ||
2178 | int chans = pi->pcfg.num_chan; | ||
2179 | |||
2180 | dmac_free_threads(pl330); | ||
2181 | |||
2182 | dma_free_coherent(pi->dev, chans * pi->mcbufsz, | ||
2183 | pl330->mcode_cpu, pl330->mcode_bus); | ||
2184 | } | ||
2185 | |||
2186 | static void pl330_del(struct pl330_info *pi) | ||
2187 | { | ||
2188 | struct pl330_dmac *pl330; | ||
2189 | |||
2190 | if (!pi || !pi->pl330_data) | ||
2191 | return; | ||
2192 | |||
2193 | pl330 = pi->pl330_data; | ||
2194 | |||
2195 | pl330->state = UNINIT; | ||
2196 | |||
2197 | tasklet_kill(&pl330->tasks); | ||
2198 | |||
2199 | /* Free DMAC resources */ | ||
2200 | dmac_free_resources(pl330); | ||
2201 | |||
2202 | kfree(pl330); | ||
2203 | pi->pl330_data = NULL; | ||
2204 | } | ||
2205 | |||
120 | /* forward declaration */ | 2206 | /* forward declaration */ |
121 | static struct amba_driver pl330_driver; | 2207 | static struct amba_driver pl330_driver; |
122 | 2208 | ||
@@ -234,7 +2320,7 @@ static void pl330_tasklet(unsigned long data) | |||
234 | /* Pick up ripe tomatoes */ | 2320 | /* Pick up ripe tomatoes */ |
235 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) | 2321 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) |
236 | if (desc->status == DONE) { | 2322 | if (desc->status == DONE) { |
237 | pch->completed = desc->txd.cookie; | 2323 | dma_cookie_complete(&desc->txd); |
238 | list_move_tail(&desc->node, &list); | 2324 | list_move_tail(&desc->node, &list); |
239 | } | 2325 | } |
240 | 2326 | ||
@@ -305,7 +2391,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) | |||
305 | 2391 | ||
306 | spin_lock_irqsave(&pch->lock, flags); | 2392 | spin_lock_irqsave(&pch->lock, flags); |
307 | 2393 | ||
308 | pch->completed = chan->cookie = 1; | 2394 | dma_cookie_init(chan); |
309 | pch->cyclic = false; | 2395 | pch->cyclic = false; |
310 | 2396 | ||
311 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); | 2397 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); |
@@ -340,7 +2426,6 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned | |||
340 | /* Mark all desc done */ | 2426 | /* Mark all desc done */ |
341 | list_for_each_entry_safe(desc, _dt, &pch->work_list , node) { | 2427 | list_for_each_entry_safe(desc, _dt, &pch->work_list , node) { |
342 | desc->status = DONE; | 2428 | desc->status = DONE; |
343 | pch->completed = desc->txd.cookie; | ||
344 | list_move_tail(&desc->node, &list); | 2429 | list_move_tail(&desc->node, &list); |
345 | } | 2430 | } |
346 | 2431 | ||
@@ -396,18 +2481,7 @@ static enum dma_status | |||
396 | pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 2481 | pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
397 | struct dma_tx_state *txstate) | 2482 | struct dma_tx_state *txstate) |
398 | { | 2483 | { |
399 | struct dma_pl330_chan *pch = to_pchan(chan); | 2484 | return dma_cookie_status(chan, cookie, txstate); |
400 | dma_cookie_t last_done, last_used; | ||
401 | int ret; | ||
402 | |||
403 | last_done = pch->completed; | ||
404 | last_used = chan->cookie; | ||
405 | |||
406 | ret = dma_async_is_complete(cookie, last_done, last_used); | ||
407 | |||
408 | dma_set_tx_state(txstate, last_done, last_used, 0); | ||
409 | |||
410 | return ret; | ||
411 | } | 2485 | } |
412 | 2486 | ||
413 | static void pl330_issue_pending(struct dma_chan *chan) | 2487 | static void pl330_issue_pending(struct dma_chan *chan) |
@@ -430,26 +2504,16 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | |||
430 | spin_lock_irqsave(&pch->lock, flags); | 2504 | spin_lock_irqsave(&pch->lock, flags); |
431 | 2505 | ||
432 | /* Assign cookies to all nodes */ | 2506 | /* Assign cookies to all nodes */ |
433 | cookie = tx->chan->cookie; | ||
434 | |||
435 | while (!list_empty(&last->node)) { | 2507 | while (!list_empty(&last->node)) { |
436 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); | 2508 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); |
437 | 2509 | ||
438 | if (++cookie < 0) | 2510 | dma_cookie_assign(&desc->txd); |
439 | cookie = 1; | ||
440 | desc->txd.cookie = cookie; | ||
441 | 2511 | ||
442 | list_move_tail(&desc->node, &pch->work_list); | 2512 | list_move_tail(&desc->node, &pch->work_list); |
443 | } | 2513 | } |
444 | 2514 | ||
445 | if (++cookie < 0) | 2515 | cookie = dma_cookie_assign(&last->txd); |
446 | cookie = 1; | ||
447 | last->txd.cookie = cookie; | ||
448 | |||
449 | list_add_tail(&last->node, &pch->work_list); | 2516 | list_add_tail(&last->node, &pch->work_list); |
450 | |||
451 | tx->chan->cookie = cookie; | ||
452 | |||
453 | spin_unlock_irqrestore(&pch->lock, flags); | 2517 | spin_unlock_irqrestore(&pch->lock, flags); |
454 | 2518 | ||
455 | return cookie; | 2519 | return cookie; |
@@ -553,6 +2617,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | |||
553 | async_tx_ack(&desc->txd); | 2617 | async_tx_ack(&desc->txd); |
554 | 2618 | ||
555 | desc->req.peri = peri_id ? pch->chan.chan_id : 0; | 2619 | desc->req.peri = peri_id ? pch->chan.chan_id : 0; |
2620 | desc->rqcfg.pcfg = &pch->dmac->pif.pcfg; | ||
556 | 2621 | ||
557 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); | 2622 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); |
558 | 2623 | ||
@@ -621,7 +2686,8 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) | |||
621 | 2686 | ||
622 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | 2687 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( |
623 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, | 2688 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, |
624 | size_t period_len, enum dma_transfer_direction direction) | 2689 | size_t period_len, enum dma_transfer_direction direction, |
2690 | void *context) | ||
625 | { | 2691 | { |
626 | struct dma_pl330_desc *desc; | 2692 | struct dma_pl330_desc *desc; |
627 | struct dma_pl330_chan *pch = to_pchan(chan); | 2693 | struct dma_pl330_chan *pch = to_pchan(chan); |
@@ -711,7 +2777,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
711 | static struct dma_async_tx_descriptor * | 2777 | static struct dma_async_tx_descriptor * |
712 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 2778 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
713 | unsigned int sg_len, enum dma_transfer_direction direction, | 2779 | unsigned int sg_len, enum dma_transfer_direction direction, |
714 | unsigned long flg) | 2780 | unsigned long flg, void *context) |
715 | { | 2781 | { |
716 | struct dma_pl330_desc *first, *desc = NULL; | 2782 | struct dma_pl330_desc *first, *desc = NULL; |
717 | struct dma_pl330_chan *pch = to_pchan(chan); | 2783 | struct dma_pl330_chan *pch = to_pchan(chan); |
@@ -829,7 +2895,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
829 | if (IS_ERR(pdmac->clk)) { | 2895 | if (IS_ERR(pdmac->clk)) { |
830 | dev_err(&adev->dev, "Cannot get operation clock.\n"); | 2896 | dev_err(&adev->dev, "Cannot get operation clock.\n"); |
831 | ret = -EINVAL; | 2897 | ret = -EINVAL; |
832 | goto probe_err1; | 2898 | goto probe_err2; |
833 | } | 2899 | } |
834 | 2900 | ||
835 | amba_set_drvdata(adev, pdmac); | 2901 | amba_set_drvdata(adev, pdmac); |
@@ -843,11 +2909,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
843 | ret = request_irq(irq, pl330_irq_handler, 0, | 2909 | ret = request_irq(irq, pl330_irq_handler, 0, |
844 | dev_name(&adev->dev), pi); | 2910 | dev_name(&adev->dev), pi); |
845 | if (ret) | 2911 | if (ret) |
846 | goto probe_err2; | 2912 | goto probe_err3; |
847 | 2913 | ||
848 | ret = pl330_add(pi); | 2914 | ret = pl330_add(pi); |
849 | if (ret) | 2915 | if (ret) |
850 | goto probe_err3; | 2916 | goto probe_err4; |
851 | 2917 | ||
852 | INIT_LIST_HEAD(&pdmac->desc_pool); | 2918 | INIT_LIST_HEAD(&pdmac->desc_pool); |
853 | spin_lock_init(&pdmac->pool_lock); | 2919 | spin_lock_init(&pdmac->pool_lock); |
@@ -904,7 +2970,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
904 | ret = dma_async_device_register(pd); | 2970 | ret = dma_async_device_register(pd); |
905 | if (ret) { | 2971 | if (ret) { |
906 | dev_err(&adev->dev, "unable to register DMAC\n"); | 2972 | dev_err(&adev->dev, "unable to register DMAC\n"); |
907 | goto probe_err4; | 2973 | goto probe_err5; |
908 | } | 2974 | } |
909 | 2975 | ||
910 | dev_info(&adev->dev, | 2976 | dev_info(&adev->dev, |
@@ -917,10 +2983,15 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
917 | 2983 | ||
918 | return 0; | 2984 | return 0; |
919 | 2985 | ||
920 | probe_err4: | 2986 | probe_err5: |
921 | pl330_del(pi); | 2987 | pl330_del(pi); |
922 | probe_err3: | 2988 | probe_err4: |
923 | free_irq(irq, pi); | 2989 | free_irq(irq, pi); |
2990 | probe_err3: | ||
2991 | #ifndef CONFIG_PM_RUNTIME | ||
2992 | clk_disable(pdmac->clk); | ||
2993 | #endif | ||
2994 | clk_put(pdmac->clk); | ||
924 | probe_err2: | 2995 | probe_err2: |
925 | iounmap(pi->base); | 2996 | iounmap(pi->base); |
926 | probe_err1: | 2997 | probe_err1: |