aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBoojin Kim <boojin.kim@samsung.com>2011-12-26 04:49:52 -0500
committerVinod Koul <vinod.koul@linux.intel.com>2012-03-08 07:29:28 -0500
commitb7d861d9394534db94f0fb8f4b9d984f996d0528 (patch)
treee15cc41a70ae01dcedeef7391708252aa55bd07d /drivers
parent6d0d7e2d554a2d1a39ee9397c3136df7a53ff348 (diff)
DMA: PL330: Merge PL330 driver into drivers/dma/
Currently there were two part of DMAC PL330 driver for support old styled s3c-pl330 which has been merged into drivers/dma/pl330.c driver. Actually, there is no reason to separate them now. Basically this patch merges arch/arm/common/pl330.c into drivers/dma/pl330.c driver and removes useless exported symbol, externed function and so on. The newer pl330 driver tested on SMDKV310 and SMDK4212 boards Cc: Jassi Brar <jassisinghbrar@gmail.com> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Acked-by: Linus Walleij <linus.walleij@linaro.org> Acked-by: Vinod Koul <vinod.koul@intel.com> Signed-off-by: Boojin Kim <boojin.kim@samsung.com> Signed-off-by: Kukjin Kim <kgene.kim@samsung.com> Acked-by: Jassi Brar <jassisinghbrar@gmail.com> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/pl330.c2114
2 files changed, 2113 insertions, 2 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f1a274994bb..65c61dba66d 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -201,7 +201,6 @@ config PL330_DMA
201 tristate "DMA API Driver for PL330" 201 tristate "DMA API Driver for PL330"
202 select DMA_ENGINE 202 select DMA_ENGINE
203 depends on ARM_AMBA 203 depends on ARM_AMBA
204 select PL330
205 help 204 help
206 Select if your platform has one or more PL330 DMACs. 205 Select if your platform has one or more PL330 DMACs.
207 You need to provide platform specific settings via 206 You need to provide platform specific settings via
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 84ebea9bc53..2e351f40fc4 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1,4 +1,6 @@
1/* linux/drivers/dma/pl330.c 1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
2 * 4 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd. 5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com> 6 * Jaswinder Singh <jassi.brar@samsung.com>
@@ -9,10 +11,15 @@
9 * (at your option) any later version. 11 * (at your option) any later version.
10 */ 12 */
11 13
14#include <linux/kernel.h>
12#include <linux/io.h> 15#include <linux/io.h>
13#include <linux/init.h> 16#include <linux/init.h>
14#include <linux/slab.h> 17#include <linux/slab.h>
15#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/string.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/dma-mapping.h>
16#include <linux/dmaengine.h> 23#include <linux/dmaengine.h>
17#include <linux/interrupt.h> 24#include <linux/interrupt.h>
18#include <linux/amba/bus.h> 25#include <linux/amba/bus.h>
@@ -21,8 +28,489 @@
21#include <linux/scatterlist.h> 28#include <linux/scatterlist.h>
22#include <linux/of.h> 29#include <linux/of.h>
23 30
31#define PL330_MAX_CHAN 8
32#define PL330_MAX_IRQS 32
33#define PL330_MAX_PERI 32
34
35enum pl330_srccachectrl {
36 SCCTRL0, /* Noncacheable and nonbufferable */
37 SCCTRL1, /* Bufferable only */
38 SCCTRL2, /* Cacheable, but do not allocate */
39 SCCTRL3, /* Cacheable and bufferable, but do not allocate */
40 SINVALID1,
41 SINVALID2,
42 SCCTRL6, /* Cacheable write-through, allocate on reads only */
43 SCCTRL7, /* Cacheable write-back, allocate on reads only */
44};
45
46enum pl330_dstcachectrl {
47 DCCTRL0, /* Noncacheable and nonbufferable */
48 DCCTRL1, /* Bufferable only */
49 DCCTRL2, /* Cacheable, but do not allocate */
50 DCCTRL3, /* Cacheable and bufferable, but do not allocate */
51 DINVALID1 = 8,
52 DINVALID2,
53 DCCTRL6, /* Cacheable write-through, allocate on writes only */
54 DCCTRL7, /* Cacheable write-back, allocate on writes only */
55};
56
57enum pl330_byteswap {
58 SWAP_NO,
59 SWAP_2,
60 SWAP_4,
61 SWAP_8,
62 SWAP_16,
63};
64
65enum pl330_reqtype {
66 MEMTOMEM,
67 MEMTODEV,
68 DEVTOMEM,
69 DEVTODEV,
70};
71
72/* Register and Bit field Definitions */
73#define DS 0x0
74#define DS_ST_STOP 0x0
75#define DS_ST_EXEC 0x1
76#define DS_ST_CMISS 0x2
77#define DS_ST_UPDTPC 0x3
78#define DS_ST_WFE 0x4
79#define DS_ST_ATBRR 0x5
80#define DS_ST_QBUSY 0x6
81#define DS_ST_WFP 0x7
82#define DS_ST_KILL 0x8
83#define DS_ST_CMPLT 0x9
84#define DS_ST_FLTCMP 0xe
85#define DS_ST_FAULT 0xf
86
87#define DPC 0x4
88#define INTEN 0x20
89#define ES 0x24
90#define INTSTATUS 0x28
91#define INTCLR 0x2c
92#define FSM 0x30
93#define FSC 0x34
94#define FTM 0x38
95
96#define _FTC 0x40
97#define FTC(n) (_FTC + (n)*0x4)
98
99#define _CS 0x100
100#define CS(n) (_CS + (n)*0x8)
101#define CS_CNS (1 << 21)
102
103#define _CPC 0x104
104#define CPC(n) (_CPC + (n)*0x8)
105
106#define _SA 0x400
107#define SA(n) (_SA + (n)*0x20)
108
109#define _DA 0x404
110#define DA(n) (_DA + (n)*0x20)
111
112#define _CC 0x408
113#define CC(n) (_CC + (n)*0x20)
114
115#define CC_SRCINC (1 << 0)
116#define CC_DSTINC (1 << 14)
117#define CC_SRCPRI (1 << 8)
118#define CC_DSTPRI (1 << 22)
119#define CC_SRCNS (1 << 9)
120#define CC_DSTNS (1 << 23)
121#define CC_SRCIA (1 << 10)
122#define CC_DSTIA (1 << 24)
123#define CC_SRCBRSTLEN_SHFT 4
124#define CC_DSTBRSTLEN_SHFT 18
125#define CC_SRCBRSTSIZE_SHFT 1
126#define CC_DSTBRSTSIZE_SHFT 15
127#define CC_SRCCCTRL_SHFT 11
128#define CC_SRCCCTRL_MASK 0x7
129#define CC_DSTCCTRL_SHFT 25
130#define CC_DRCCCTRL_MASK 0x7
131#define CC_SWAP_SHFT 28
132
133#define _LC0 0x40c
134#define LC0(n) (_LC0 + (n)*0x20)
135
136#define _LC1 0x410
137#define LC1(n) (_LC1 + (n)*0x20)
138
139#define DBGSTATUS 0xd00
140#define DBG_BUSY (1 << 0)
141
142#define DBGCMD 0xd04
143#define DBGINST0 0xd08
144#define DBGINST1 0xd0c
145
146#define CR0 0xe00
147#define CR1 0xe04
148#define CR2 0xe08
149#define CR3 0xe0c
150#define CR4 0xe10
151#define CRD 0xe14
152
153#define PERIPH_ID 0xfe0
154#define PCELL_ID 0xff0
155
156#define CR0_PERIPH_REQ_SET (1 << 0)
157#define CR0_BOOT_EN_SET (1 << 1)
158#define CR0_BOOT_MAN_NS (1 << 2)
159#define CR0_NUM_CHANS_SHIFT 4
160#define CR0_NUM_CHANS_MASK 0x7
161#define CR0_NUM_PERIPH_SHIFT 12
162#define CR0_NUM_PERIPH_MASK 0x1f
163#define CR0_NUM_EVENTS_SHIFT 17
164#define CR0_NUM_EVENTS_MASK 0x1f
165
166#define CR1_ICACHE_LEN_SHIFT 0
167#define CR1_ICACHE_LEN_MASK 0x7
168#define CR1_NUM_ICACHELINES_SHIFT 4
169#define CR1_NUM_ICACHELINES_MASK 0xf
170
171#define CRD_DATA_WIDTH_SHIFT 0
172#define CRD_DATA_WIDTH_MASK 0x7
173#define CRD_WR_CAP_SHIFT 4
174#define CRD_WR_CAP_MASK 0x7
175#define CRD_WR_Q_DEP_SHIFT 8
176#define CRD_WR_Q_DEP_MASK 0xf
177#define CRD_RD_CAP_SHIFT 12
178#define CRD_RD_CAP_MASK 0x7
179#define CRD_RD_Q_DEP_SHIFT 16
180#define CRD_RD_Q_DEP_MASK 0xf
181#define CRD_DATA_BUFF_SHIFT 20
182#define CRD_DATA_BUFF_MASK 0x3ff
183
184#define PART 0x330
185#define DESIGNER 0x41
186#define REVISION 0x0
187#define INTEG_CFG 0x0
188#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
189
190#define PCELL_ID_VAL 0xb105f00d
191
192#define PL330_STATE_STOPPED (1 << 0)
193#define PL330_STATE_EXECUTING (1 << 1)
194#define PL330_STATE_WFE (1 << 2)
195#define PL330_STATE_FAULTING (1 << 3)
196#define PL330_STATE_COMPLETING (1 << 4)
197#define PL330_STATE_WFP (1 << 5)
198#define PL330_STATE_KILLING (1 << 6)
199#define PL330_STATE_FAULT_COMPLETING (1 << 7)
200#define PL330_STATE_CACHEMISS (1 << 8)
201#define PL330_STATE_UPDTPC (1 << 9)
202#define PL330_STATE_ATBARRIER (1 << 10)
203#define PL330_STATE_QUEUEBUSY (1 << 11)
204#define PL330_STATE_INVALID (1 << 15)
205
206#define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
207 | PL330_STATE_WFE | PL330_STATE_FAULTING)
208
209#define CMD_DMAADDH 0x54
210#define CMD_DMAEND 0x00
211#define CMD_DMAFLUSHP 0x35
212#define CMD_DMAGO 0xa0
213#define CMD_DMALD 0x04
214#define CMD_DMALDP 0x25
215#define CMD_DMALP 0x20
216#define CMD_DMALPEND 0x28
217#define CMD_DMAKILL 0x01
218#define CMD_DMAMOV 0xbc
219#define CMD_DMANOP 0x18
220#define CMD_DMARMB 0x12
221#define CMD_DMASEV 0x34
222#define CMD_DMAST 0x08
223#define CMD_DMASTP 0x29
224#define CMD_DMASTZ 0x0c
225#define CMD_DMAWFE 0x36
226#define CMD_DMAWFP 0x30
227#define CMD_DMAWMB 0x13
228
229#define SZ_DMAADDH 3
230#define SZ_DMAEND 1
231#define SZ_DMAFLUSHP 2
232#define SZ_DMALD 1
233#define SZ_DMALDP 2
234#define SZ_DMALP 2
235#define SZ_DMALPEND 2
236#define SZ_DMAKILL 1
237#define SZ_DMAMOV 6
238#define SZ_DMANOP 1
239#define SZ_DMARMB 1
240#define SZ_DMASEV 2
241#define SZ_DMAST 1
242#define SZ_DMASTP 2
243#define SZ_DMASTZ 1
244#define SZ_DMAWFE 2
245#define SZ_DMAWFP 2
246#define SZ_DMAWMB 1
247#define SZ_DMAGO 6
248
249#define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
250#define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
251
252#define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
253#define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
254
255/*
256 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
257 * at 1byte/burst for P<->M and M<->M respectively.
258 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
259 * should be enough for P<->M and M<->M respectively.
260 */
261#define MCODE_BUFF_PER_REQ 256
262
263/* If the _pl330_req is available to the client */
264#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
265
266/* Use this _only_ to wait on transient states */
267#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
268
269#ifdef PL330_DEBUG_MCGEN
270static unsigned cmd_line;
271#define PL330_DBGCMD_DUMP(off, x...) do { \
272 printk("%x:", cmd_line); \
273 printk(x); \
274 cmd_line += off; \
275 } while (0)
276#define PL330_DBGMC_START(addr) (cmd_line = addr)
277#else
278#define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
279#define PL330_DBGMC_START(addr) do {} while (0)
280#endif
281
282/* The number of default descriptors */
24#define NR_DEFAULT_DESC 16 283#define NR_DEFAULT_DESC 16
25 284
285/* Populated by the PL330 core driver for DMA API driver's info */
286struct pl330_config {
287 u32 periph_id;
288 u32 pcell_id;
289#define DMAC_MODE_NS (1 << 0)
290 unsigned int mode;
291 unsigned int data_bus_width:10; /* In number of bits */
292 unsigned int data_buf_dep:10;
293 unsigned int num_chan:4;
294 unsigned int num_peri:6;
295 u32 peri_ns;
296 unsigned int num_events:6;
297 u32 irq_ns;
298};
299
300/* Handle to the DMAC provided to the PL330 core */
301struct pl330_info {
302 /* Owning device */
303 struct device *dev;
304 /* Size of MicroCode buffers for each channel. */
305 unsigned mcbufsz;
306 /* ioremap'ed address of PL330 registers. */
307 void __iomem *base;
308 /* Client can freely use it. */
309 void *client_data;
310 /* PL330 core data, Client must not touch it. */
311 void *pl330_data;
312 /* Populated by the PL330 core driver during pl330_add */
313 struct pl330_config pcfg;
314 /*
315 * If the DMAC has some reset mechanism, then the
316 * client may want to provide pointer to the method.
317 */
318 void (*dmac_reset)(struct pl330_info *pi);
319};
320
321/**
322 * Request Configuration.
323 * The PL330 core does not modify this and uses the last
324 * working configuration if the request doesn't provide any.
325 *
326 * The Client may want to provide this info only for the
327 * first request and a request with new settings.
328 */
329struct pl330_reqcfg {
330 /* Address Incrementing */
331 unsigned dst_inc:1;
332 unsigned src_inc:1;
333
334 /*
335 * For now, the SRC & DST protection levels
336 * and burst size/length are assumed same.
337 */
338 bool nonsecure;
339 bool privileged;
340 bool insnaccess;
341 unsigned brst_len:5;
342 unsigned brst_size:3; /* in power of 2 */
343
344 enum pl330_dstcachectrl dcctl;
345 enum pl330_srccachectrl scctl;
346 enum pl330_byteswap swap;
347};
348
349/*
350 * One cycle of DMAC operation.
351 * There may be more than one xfer in a request.
352 */
353struct pl330_xfer {
354 u32 src_addr;
355 u32 dst_addr;
356 /* Size to xfer */
357 u32 bytes;
358 /*
359 * Pointer to next xfer in the list.
360 * The last xfer in the req must point to NULL.
361 */
362 struct pl330_xfer *next;
363};
364
365/* The xfer callbacks are made with one of these arguments. */
366enum pl330_op_err {
367 /* The all xfers in the request were success. */
368 PL330_ERR_NONE,
369 /* If req aborted due to global error. */
370 PL330_ERR_ABORT,
371 /* If req failed due to problem with Channel. */
372 PL330_ERR_FAIL,
373};
374
375/* A request defining Scatter-Gather List ending with NULL xfer. */
376struct pl330_req {
377 enum pl330_reqtype rqtype;
378 /* Index of peripheral for the xfer. */
379 unsigned peri:5;
380 /* Unique token for this xfer, set by the client. */
381 void *token;
382 /* Callback to be called after xfer. */
383 void (*xfer_cb)(void *token, enum pl330_op_err err);
384 /* If NULL, req will be done at last set parameters. */
385 struct pl330_reqcfg *cfg;
386 /* Pointer to first xfer in the request. */
387 struct pl330_xfer *x;
388};
389
390/*
391 * To know the status of the channel and DMAC, the client
392 * provides a pointer to this structure. The PL330 core
393 * fills it with current information.
394 */
395struct pl330_chanstatus {
396 /*
397 * If the DMAC engine halted due to some error,
398 * the client should remove-add DMAC.
399 */
400 bool dmac_halted;
401 /*
402 * If channel is halted due to some error,
403 * the client should ABORT/FLUSH and START the channel.
404 */
405 bool faulting;
406 /* Location of last load */
407 u32 src_addr;
408 /* Location of last store */
409 u32 dst_addr;
410 /*
411 * Pointer to the currently active req, NULL if channel is
412 * inactive, even though the requests may be present.
413 */
414 struct pl330_req *top_req;
415 /* Pointer to req waiting second in the queue if any. */
416 struct pl330_req *wait_req;
417};
418
419enum pl330_chan_op {
420 /* Start the channel */
421 PL330_OP_START,
422 /* Abort the active xfer */
423 PL330_OP_ABORT,
424 /* Stop xfer and flush queue */
425 PL330_OP_FLUSH,
426};
427
428struct _xfer_spec {
429 u32 ccr;
430 struct pl330_req *r;
431 struct pl330_xfer *x;
432};
433
434enum dmamov_dst {
435 SAR = 0,
436 CCR,
437 DAR,
438};
439
440enum pl330_dst {
441 SRC = 0,
442 DST,
443};
444
445enum pl330_cond {
446 SINGLE,
447 BURST,
448 ALWAYS,
449};
450
451struct _pl330_req {
452 u32 mc_bus;
453 void *mc_cpu;
454 /* Number of bytes taken to setup MC for the req */
455 u32 mc_len;
456 struct pl330_req *r;
457 /* Hook to attach to DMAC's list of reqs with due callback */
458 struct list_head rqd;
459};
460
461/* ToBeDone for tasklet */
462struct _pl330_tbd {
463 bool reset_dmac;
464 bool reset_mngr;
465 u8 reset_chan;
466};
467
468/* A DMAC Thread */
469struct pl330_thread {
470 u8 id;
471 int ev;
472 /* If the channel is not yet acquired by any client */
473 bool free;
474 /* Parent DMAC */
475 struct pl330_dmac *dmac;
476 /* Only two at a time */
477 struct _pl330_req req[2];
478 /* Index of the last enqueued request */
479 unsigned lstenq;
480 /* Index of the last submitted request or -1 if the DMA is stopped */
481 int req_running;
482};
483
484enum pl330_dmac_state {
485 UNINIT,
486 INIT,
487 DYING,
488};
489
490/* A DMAC */
491struct pl330_dmac {
492 spinlock_t lock;
493 /* Holds list of reqs with due callbacks */
494 struct list_head req_done;
495 /* Pointer to platform specific stuff */
496 struct pl330_info *pinfo;
497 /* Maximum possible events/irqs */
498 int events[32];
499 /* BUS address of MicroCode buffer */
500 u32 mcode_bus;
501 /* CPU address of MicroCode buffer */
502 void *mcode_cpu;
503 /* List of all Channel threads */
504 struct pl330_thread *channels;
505 /* Pointer to the MANAGER thread */
506 struct pl330_thread *manager;
507 /* To handle bad news in interrupt */
508 struct tasklet_struct tasks;
509 struct _pl330_tbd dmac_tbd;
510 /* State of DMAC operation */
511 enum pl330_dmac_state state;
512};
513
26enum desc_status { 514enum desc_status {
27 /* In the DMAC pool */ 515 /* In the DMAC pool */
28 FREE, 516 FREE,
@@ -117,6 +605,1630 @@ struct dma_pl330_desc {
117 struct dma_pl330_chan *pchan; 605 struct dma_pl330_chan *pchan;
118}; 606};
119 607
608static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
609{
610 if (r && r->xfer_cb)
611 r->xfer_cb(r->token, err);
612}
613
614static inline bool _queue_empty(struct pl330_thread *thrd)
615{
616 return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
617 ? true : false;
618}
619
620static inline bool _queue_full(struct pl330_thread *thrd)
621{
622 return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
623 ? false : true;
624}
625
626static inline bool is_manager(struct pl330_thread *thrd)
627{
628 struct pl330_dmac *pl330 = thrd->dmac;
629
630 /* MANAGER is indexed at the end */
631 if (thrd->id == pl330->pinfo->pcfg.num_chan)
632 return true;
633 else
634 return false;
635}
636
637/* If manager of the thread is in Non-Secure mode */
638static inline bool _manager_ns(struct pl330_thread *thrd)
639{
640 struct pl330_dmac *pl330 = thrd->dmac;
641
642 return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
643}
644
645static inline u32 get_id(struct pl330_info *pi, u32 off)
646{
647 void __iomem *regs = pi->base;
648 u32 id = 0;
649
650 id |= (readb(regs + off + 0x0) << 0);
651 id |= (readb(regs + off + 0x4) << 8);
652 id |= (readb(regs + off + 0x8) << 16);
653 id |= (readb(regs + off + 0xc) << 24);
654
655 return id;
656}
657
658static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
659 enum pl330_dst da, u16 val)
660{
661 if (dry_run)
662 return SZ_DMAADDH;
663
664 buf[0] = CMD_DMAADDH;
665 buf[0] |= (da << 1);
666 *((u16 *)&buf[1]) = val;
667
668 PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
669 da == 1 ? "DA" : "SA", val);
670
671 return SZ_DMAADDH;
672}
673
674static inline u32 _emit_END(unsigned dry_run, u8 buf[])
675{
676 if (dry_run)
677 return SZ_DMAEND;
678
679 buf[0] = CMD_DMAEND;
680
681 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
682
683 return SZ_DMAEND;
684}
685
686static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
687{
688 if (dry_run)
689 return SZ_DMAFLUSHP;
690
691 buf[0] = CMD_DMAFLUSHP;
692
693 peri &= 0x1f;
694 peri <<= 3;
695 buf[1] = peri;
696
697 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
698
699 return SZ_DMAFLUSHP;
700}
701
702static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
703{
704 if (dry_run)
705 return SZ_DMALD;
706
707 buf[0] = CMD_DMALD;
708
709 if (cond == SINGLE)
710 buf[0] |= (0 << 1) | (1 << 0);
711 else if (cond == BURST)
712 buf[0] |= (1 << 1) | (1 << 0);
713
714 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
715 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
716
717 return SZ_DMALD;
718}
719
720static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
721 enum pl330_cond cond, u8 peri)
722{
723 if (dry_run)
724 return SZ_DMALDP;
725
726 buf[0] = CMD_DMALDP;
727
728 if (cond == BURST)
729 buf[0] |= (1 << 1);
730
731 peri &= 0x1f;
732 peri <<= 3;
733 buf[1] = peri;
734
735 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
736 cond == SINGLE ? 'S' : 'B', peri >> 3);
737
738 return SZ_DMALDP;
739}
740
741static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
742 unsigned loop, u8 cnt)
743{
744 if (dry_run)
745 return SZ_DMALP;
746
747 buf[0] = CMD_DMALP;
748
749 if (loop)
750 buf[0] |= (1 << 1);
751
752 cnt--; /* DMAC increments by 1 internally */
753 buf[1] = cnt;
754
755 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
756
757 return SZ_DMALP;
758}
759
760struct _arg_LPEND {
761 enum pl330_cond cond;
762 bool forever;
763 unsigned loop;
764 u8 bjump;
765};
766
767static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
768 const struct _arg_LPEND *arg)
769{
770 enum pl330_cond cond = arg->cond;
771 bool forever = arg->forever;
772 unsigned loop = arg->loop;
773 u8 bjump = arg->bjump;
774
775 if (dry_run)
776 return SZ_DMALPEND;
777
778 buf[0] = CMD_DMALPEND;
779
780 if (loop)
781 buf[0] |= (1 << 2);
782
783 if (!forever)
784 buf[0] |= (1 << 4);
785
786 if (cond == SINGLE)
787 buf[0] |= (0 << 1) | (1 << 0);
788 else if (cond == BURST)
789 buf[0] |= (1 << 1) | (1 << 0);
790
791 buf[1] = bjump;
792
793 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
794 forever ? "FE" : "END",
795 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
796 loop ? '1' : '0',
797 bjump);
798
799 return SZ_DMALPEND;
800}
801
802static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
803{
804 if (dry_run)
805 return SZ_DMAKILL;
806
807 buf[0] = CMD_DMAKILL;
808
809 return SZ_DMAKILL;
810}
811
812static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
813 enum dmamov_dst dst, u32 val)
814{
815 if (dry_run)
816 return SZ_DMAMOV;
817
818 buf[0] = CMD_DMAMOV;
819 buf[1] = dst;
820 *((u32 *)&buf[2]) = val;
821
822 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
823 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
824
825 return SZ_DMAMOV;
826}
827
828static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
829{
830 if (dry_run)
831 return SZ_DMANOP;
832
833 buf[0] = CMD_DMANOP;
834
835 PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
836
837 return SZ_DMANOP;
838}
839
840static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
841{
842 if (dry_run)
843 return SZ_DMARMB;
844
845 buf[0] = CMD_DMARMB;
846
847 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
848
849 return SZ_DMARMB;
850}
851
852static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
853{
854 if (dry_run)
855 return SZ_DMASEV;
856
857 buf[0] = CMD_DMASEV;
858
859 ev &= 0x1f;
860 ev <<= 3;
861 buf[1] = ev;
862
863 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
864
865 return SZ_DMASEV;
866}
867
868static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
869{
870 if (dry_run)
871 return SZ_DMAST;
872
873 buf[0] = CMD_DMAST;
874
875 if (cond == SINGLE)
876 buf[0] |= (0 << 1) | (1 << 0);
877 else if (cond == BURST)
878 buf[0] |= (1 << 1) | (1 << 0);
879
880 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
881 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
882
883 return SZ_DMAST;
884}
885
886static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
887 enum pl330_cond cond, u8 peri)
888{
889 if (dry_run)
890 return SZ_DMASTP;
891
892 buf[0] = CMD_DMASTP;
893
894 if (cond == BURST)
895 buf[0] |= (1 << 1);
896
897 peri &= 0x1f;
898 peri <<= 3;
899 buf[1] = peri;
900
901 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
902 cond == SINGLE ? 'S' : 'B', peri >> 3);
903
904 return SZ_DMASTP;
905}
906
907static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
908{
909 if (dry_run)
910 return SZ_DMASTZ;
911
912 buf[0] = CMD_DMASTZ;
913
914 PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
915
916 return SZ_DMASTZ;
917}
918
919static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
920 unsigned invalidate)
921{
922 if (dry_run)
923 return SZ_DMAWFE;
924
925 buf[0] = CMD_DMAWFE;
926
927 ev &= 0x1f;
928 ev <<= 3;
929 buf[1] = ev;
930
931 if (invalidate)
932 buf[1] |= (1 << 1);
933
934 PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
935 ev >> 3, invalidate ? ", I" : "");
936
937 return SZ_DMAWFE;
938}
939
940static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
941 enum pl330_cond cond, u8 peri)
942{
943 if (dry_run)
944 return SZ_DMAWFP;
945
946 buf[0] = CMD_DMAWFP;
947
948 if (cond == SINGLE)
949 buf[0] |= (0 << 1) | (0 << 0);
950 else if (cond == BURST)
951 buf[0] |= (1 << 1) | (0 << 0);
952 else
953 buf[0] |= (0 << 1) | (1 << 0);
954
955 peri &= 0x1f;
956 peri <<= 3;
957 buf[1] = peri;
958
959 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
960 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
961
962 return SZ_DMAWFP;
963}
964
965static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
966{
967 if (dry_run)
968 return SZ_DMAWMB;
969
970 buf[0] = CMD_DMAWMB;
971
972 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
973
974 return SZ_DMAWMB;
975}
976
977struct _arg_GO {
978 u8 chan;
979 u32 addr;
980 unsigned ns;
981};
982
983static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
984 const struct _arg_GO *arg)
985{
986 u8 chan = arg->chan;
987 u32 addr = arg->addr;
988 unsigned ns = arg->ns;
989
990 if (dry_run)
991 return SZ_DMAGO;
992
993 buf[0] = CMD_DMAGO;
994 buf[0] |= (ns << 1);
995
996 buf[1] = chan & 0x7;
997
998 *((u32 *)&buf[2]) = addr;
999
1000 return SZ_DMAGO;
1001}
1002
1003#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
1004
1005/* Returns Time-Out */
1006static bool _until_dmac_idle(struct pl330_thread *thrd)
1007{
1008 void __iomem *regs = thrd->dmac->pinfo->base;
1009 unsigned long loops = msecs_to_loops(5);
1010
1011 do {
1012 /* Until Manager is Idle */
1013 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
1014 break;
1015
1016 cpu_relax();
1017 } while (--loops);
1018
1019 if (!loops)
1020 return true;
1021
1022 return false;
1023}
1024
1025static inline void _execute_DBGINSN(struct pl330_thread *thrd,
1026 u8 insn[], bool as_manager)
1027{
1028 void __iomem *regs = thrd->dmac->pinfo->base;
1029 u32 val;
1030
1031 val = (insn[0] << 16) | (insn[1] << 24);
1032 if (!as_manager) {
1033 val |= (1 << 0);
1034 val |= (thrd->id << 8); /* Channel Number */
1035 }
1036 writel(val, regs + DBGINST0);
1037
1038 val = *((u32 *)&insn[2]);
1039 writel(val, regs + DBGINST1);
1040
1041 /* If timed out due to halted state-machine */
1042 if (_until_dmac_idle(thrd)) {
1043 dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
1044 return;
1045 }
1046
1047 /* Get going */
1048 writel(0, regs + DBGCMD);
1049}
1050
1051/*
1052 * Mark a _pl330_req as free.
1053 * We do it by writing DMAEND as the first instruction
1054 * because no valid request is going to have DMAEND as
1055 * its first instruction to execute.
1056 */
1057static void mark_free(struct pl330_thread *thrd, int idx)
1058{
1059 struct _pl330_req *req = &thrd->req[idx];
1060
1061 _emit_END(0, req->mc_cpu);
1062 req->mc_len = 0;
1063
1064 thrd->req_running = -1;
1065}
1066
1067static inline u32 _state(struct pl330_thread *thrd)
1068{
1069 void __iomem *regs = thrd->dmac->pinfo->base;
1070 u32 val;
1071
1072 if (is_manager(thrd))
1073 val = readl(regs + DS) & 0xf;
1074 else
1075 val = readl(regs + CS(thrd->id)) & 0xf;
1076
1077 switch (val) {
1078 case DS_ST_STOP:
1079 return PL330_STATE_STOPPED;
1080 case DS_ST_EXEC:
1081 return PL330_STATE_EXECUTING;
1082 case DS_ST_CMISS:
1083 return PL330_STATE_CACHEMISS;
1084 case DS_ST_UPDTPC:
1085 return PL330_STATE_UPDTPC;
1086 case DS_ST_WFE:
1087 return PL330_STATE_WFE;
1088 case DS_ST_FAULT:
1089 return PL330_STATE_FAULTING;
1090 case DS_ST_ATBRR:
1091 if (is_manager(thrd))
1092 return PL330_STATE_INVALID;
1093 else
1094 return PL330_STATE_ATBARRIER;
1095 case DS_ST_QBUSY:
1096 if (is_manager(thrd))
1097 return PL330_STATE_INVALID;
1098 else
1099 return PL330_STATE_QUEUEBUSY;
1100 case DS_ST_WFP:
1101 if (is_manager(thrd))
1102 return PL330_STATE_INVALID;
1103 else
1104 return PL330_STATE_WFP;
1105 case DS_ST_KILL:
1106 if (is_manager(thrd))
1107 return PL330_STATE_INVALID;
1108 else
1109 return PL330_STATE_KILLING;
1110 case DS_ST_CMPLT:
1111 if (is_manager(thrd))
1112 return PL330_STATE_INVALID;
1113 else
1114 return PL330_STATE_COMPLETING;
1115 case DS_ST_FLTCMP:
1116 if (is_manager(thrd))
1117 return PL330_STATE_INVALID;
1118 else
1119 return PL330_STATE_FAULT_COMPLETING;
1120 default:
1121 return PL330_STATE_INVALID;
1122 }
1123}
1124
1125static void _stop(struct pl330_thread *thrd)
1126{
1127 void __iomem *regs = thrd->dmac->pinfo->base;
1128 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1129
1130 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
1131 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1132
1133 /* Return if nothing needs to be done */
1134 if (_state(thrd) == PL330_STATE_COMPLETING
1135 || _state(thrd) == PL330_STATE_KILLING
1136 || _state(thrd) == PL330_STATE_STOPPED)
1137 return;
1138
1139 _emit_KILL(0, insn);
1140
1141 /* Stop generating interrupts for SEV */
1142 writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
1143
1144 _execute_DBGINSN(thrd, insn, is_manager(thrd));
1145}
1146
1147/* Start doing req 'idx' of thread 'thrd' */
1148static bool _trigger(struct pl330_thread *thrd)
1149{
1150 void __iomem *regs = thrd->dmac->pinfo->base;
1151 struct _pl330_req *req;
1152 struct pl330_req *r;
1153 struct _arg_GO go;
1154 unsigned ns;
1155 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1156 int idx;
1157
1158 /* Return if already ACTIVE */
1159 if (_state(thrd) != PL330_STATE_STOPPED)
1160 return true;
1161
1162 idx = 1 - thrd->lstenq;
1163 if (!IS_FREE(&thrd->req[idx]))
1164 req = &thrd->req[idx];
1165 else {
1166 idx = thrd->lstenq;
1167 if (!IS_FREE(&thrd->req[idx]))
1168 req = &thrd->req[idx];
1169 else
1170 req = NULL;
1171 }
1172
1173 /* Return if no request */
1174 if (!req || !req->r)
1175 return true;
1176
1177 r = req->r;
1178
1179 if (r->cfg)
1180 ns = r->cfg->nonsecure ? 1 : 0;
1181 else if (readl(regs + CS(thrd->id)) & CS_CNS)
1182 ns = 1;
1183 else
1184 ns = 0;
1185
1186 /* See 'Abort Sources' point-4 at Page 2-25 */
1187 if (_manager_ns(thrd) && !ns)
1188 dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
1189 __func__, __LINE__);
1190
1191 go.chan = thrd->id;
1192 go.addr = req->mc_bus;
1193 go.ns = ns;
1194 _emit_GO(0, insn, &go);
1195
1196 /* Set to generate interrupts for SEV */
1197 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1198
1199 /* Only manager can execute GO */
1200 _execute_DBGINSN(thrd, insn, true);
1201
1202 thrd->req_running = idx;
1203
1204 return true;
1205}
1206
1207static bool _start(struct pl330_thread *thrd)
1208{
1209 switch (_state(thrd)) {
1210 case PL330_STATE_FAULT_COMPLETING:
1211 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1212
1213 if (_state(thrd) == PL330_STATE_KILLING)
1214 UNTIL(thrd, PL330_STATE_STOPPED)
1215
1216 case PL330_STATE_FAULTING:
1217 _stop(thrd);
1218
1219 case PL330_STATE_KILLING:
1220 case PL330_STATE_COMPLETING:
1221 UNTIL(thrd, PL330_STATE_STOPPED)
1222
1223 case PL330_STATE_STOPPED:
1224 return _trigger(thrd);
1225
1226 case PL330_STATE_WFP:
1227 case PL330_STATE_QUEUEBUSY:
1228 case PL330_STATE_ATBARRIER:
1229 case PL330_STATE_UPDTPC:
1230 case PL330_STATE_CACHEMISS:
1231 case PL330_STATE_EXECUTING:
1232 return true;
1233
1234 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1235 default:
1236 return false;
1237 }
1238}
1239
1240static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1241 const struct _xfer_spec *pxs, int cyc)
1242{
1243 int off = 0;
1244
1245 while (cyc--) {
1246 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1247 off += _emit_RMB(dry_run, &buf[off]);
1248 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1249 off += _emit_WMB(dry_run, &buf[off]);
1250 }
1251
1252 return off;
1253}
1254
1255static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
1256 const struct _xfer_spec *pxs, int cyc)
1257{
1258 int off = 0;
1259
1260 while (cyc--) {
1261 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1262 off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1263 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1264 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1265 }
1266
1267 return off;
1268}
1269
1270static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
1271 const struct _xfer_spec *pxs, int cyc)
1272{
1273 int off = 0;
1274
1275 while (cyc--) {
1276 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1277 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1278 off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1279 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1280 }
1281
1282 return off;
1283}
1284
1285static int _bursts(unsigned dry_run, u8 buf[],
1286 const struct _xfer_spec *pxs, int cyc)
1287{
1288 int off = 0;
1289
1290 switch (pxs->r->rqtype) {
1291 case MEMTODEV:
1292 off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
1293 break;
1294 case DEVTOMEM:
1295 off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
1296 break;
1297 case MEMTOMEM:
1298 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1299 break;
1300 default:
1301 off += 0x40000000; /* Scare off the Client */
1302 break;
1303 }
1304
1305 return off;
1306}
1307
1308/* Returns bytes consumed and updates bursts */
1309static inline int _loop(unsigned dry_run, u8 buf[],
1310 unsigned long *bursts, const struct _xfer_spec *pxs)
1311{
1312 int cyc, cycmax, szlp, szlpend, szbrst, off;
1313 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1314 struct _arg_LPEND lpend;
1315
1316 /* Max iterations possible in DMALP is 256 */
1317 if (*bursts >= 256*256) {
1318 lcnt1 = 256;
1319 lcnt0 = 256;
1320 cyc = *bursts / lcnt1 / lcnt0;
1321 } else if (*bursts > 256) {
1322 lcnt1 = 256;
1323 lcnt0 = *bursts / lcnt1;
1324 cyc = 1;
1325 } else {
1326 lcnt1 = *bursts;
1327 lcnt0 = 0;
1328 cyc = 1;
1329 }
1330
1331 szlp = _emit_LP(1, buf, 0, 0);
1332 szbrst = _bursts(1, buf, pxs, 1);
1333
1334 lpend.cond = ALWAYS;
1335 lpend.forever = false;
1336 lpend.loop = 0;
1337 lpend.bjump = 0;
1338 szlpend = _emit_LPEND(1, buf, &lpend);
1339
1340 if (lcnt0) {
1341 szlp *= 2;
1342 szlpend *= 2;
1343 }
1344
1345 /*
1346 * Max bursts that we can unroll due to limit on the
1347 * size of backward jump that can be encoded in DMALPEND
1348 * which is 8-bits and hence 255
1349 */
1350 cycmax = (255 - (szlp + szlpend)) / szbrst;
1351
1352 cyc = (cycmax < cyc) ? cycmax : cyc;
1353
1354 off = 0;
1355
1356 if (lcnt0) {
1357 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1358 ljmp0 = off;
1359 }
1360
1361 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1362 ljmp1 = off;
1363
1364 off += _bursts(dry_run, &buf[off], pxs, cyc);
1365
1366 lpend.cond = ALWAYS;
1367 lpend.forever = false;
1368 lpend.loop = 1;
1369 lpend.bjump = off - ljmp1;
1370 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1371
1372 if (lcnt0) {
1373 lpend.cond = ALWAYS;
1374 lpend.forever = false;
1375 lpend.loop = 0;
1376 lpend.bjump = off - ljmp0;
1377 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1378 }
1379
1380 *bursts = lcnt1 * cyc;
1381 if (lcnt0)
1382 *bursts *= lcnt0;
1383
1384 return off;
1385}
1386
1387static inline int _setup_loops(unsigned dry_run, u8 buf[],
1388 const struct _xfer_spec *pxs)
1389{
1390 struct pl330_xfer *x = pxs->x;
1391 u32 ccr = pxs->ccr;
1392 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1393 int off = 0;
1394
1395 while (bursts) {
1396 c = bursts;
1397 off += _loop(dry_run, &buf[off], &c, pxs);
1398 bursts -= c;
1399 }
1400
1401 return off;
1402}
1403
1404static inline int _setup_xfer(unsigned dry_run, u8 buf[],
1405 const struct _xfer_spec *pxs)
1406{
1407 struct pl330_xfer *x = pxs->x;
1408 int off = 0;
1409
1410 /* DMAMOV SAR, x->src_addr */
1411 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1412 /* DMAMOV DAR, x->dst_addr */
1413 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1414
1415 /* Setup Loop(s) */
1416 off += _setup_loops(dry_run, &buf[off], pxs);
1417
1418 return off;
1419}
1420
1421/*
1422 * A req is a sequence of one or more xfer units.
1423 * Returns the number of bytes taken to setup the MC for the req.
1424 */
1425static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
1426 unsigned index, struct _xfer_spec *pxs)
1427{
1428 struct _pl330_req *req = &thrd->req[index];
1429 struct pl330_xfer *x;
1430 u8 *buf = req->mc_cpu;
1431 int off = 0;
1432
1433 PL330_DBGMC_START(req->mc_bus);
1434
1435 /* DMAMOV CCR, ccr */
1436 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1437
1438 x = pxs->r->x;
1439 do {
1440 /* Error if xfer length is not aligned at burst size */
1441 if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1442 return -EINVAL;
1443
1444 pxs->x = x;
1445 off += _setup_xfer(dry_run, &buf[off], pxs);
1446
1447 x = x->next;
1448 } while (x);
1449
1450 /* DMASEV peripheral/event */
1451 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1452 /* DMAEND */
1453 off += _emit_END(dry_run, &buf[off]);
1454
1455 return off;
1456}
1457
1458static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1459{
1460 u32 ccr = 0;
1461
1462 if (rqc->src_inc)
1463 ccr |= CC_SRCINC;
1464
1465 if (rqc->dst_inc)
1466 ccr |= CC_DSTINC;
1467
1468 /* We set same protection levels for Src and DST for now */
1469 if (rqc->privileged)
1470 ccr |= CC_SRCPRI | CC_DSTPRI;
1471 if (rqc->nonsecure)
1472 ccr |= CC_SRCNS | CC_DSTNS;
1473 if (rqc->insnaccess)
1474 ccr |= CC_SRCIA | CC_DSTIA;
1475
1476 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1477 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1478
1479 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1480 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1481
1482 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1483 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1484
1485 ccr |= (rqc->swap << CC_SWAP_SHFT);
1486
1487 return ccr;
1488}
1489
1490static inline bool _is_valid(u32 ccr)
1491{
1492 enum pl330_dstcachectrl dcctl;
1493 enum pl330_srccachectrl scctl;
1494
1495 dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
1496 scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
1497
1498 if (dcctl == DINVALID1 || dcctl == DINVALID2
1499 || scctl == SINVALID1 || scctl == SINVALID2)
1500 return false;
1501 else
1502 return true;
1503}
1504
1505/*
1506 * Submit a list of xfers after which the client wants notification.
1507 * Client is not notified after each xfer unit, just once after all
1508 * xfer units are done or some error occurs.
1509 */
1510static int pl330_submit_req(void *ch_id, struct pl330_req *r)
1511{
1512 struct pl330_thread *thrd = ch_id;
1513 struct pl330_dmac *pl330;
1514 struct pl330_info *pi;
1515 struct _xfer_spec xs;
1516 unsigned long flags;
1517 void __iomem *regs;
1518 unsigned idx;
1519 u32 ccr;
1520 int ret = 0;
1521
1522 /* No Req or Unacquired Channel or DMAC */
1523 if (!r || !thrd || thrd->free)
1524 return -EINVAL;
1525
1526 pl330 = thrd->dmac;
1527 pi = pl330->pinfo;
1528 regs = pi->base;
1529
1530 if (pl330->state == DYING
1531 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1532 dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
1533 __func__, __LINE__);
1534 return -EAGAIN;
1535 }
1536
1537 /* If request for non-existing peripheral */
1538 if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
1539 dev_info(thrd->dmac->pinfo->dev,
1540 "%s:%d Invalid peripheral(%u)!\n",
1541 __func__, __LINE__, r->peri);
1542 return -EINVAL;
1543 }
1544
1545 spin_lock_irqsave(&pl330->lock, flags);
1546
1547 if (_queue_full(thrd)) {
1548 ret = -EAGAIN;
1549 goto xfer_exit;
1550 }
1551
1552 /* Prefer Secure Channel */
1553 if (!_manager_ns(thrd))
1554 r->cfg->nonsecure = 0;
1555 else
1556 r->cfg->nonsecure = 1;
1557
1558 /* Use last settings, if not provided */
1559 if (r->cfg)
1560 ccr = _prepare_ccr(r->cfg);
1561 else
1562 ccr = readl(regs + CC(thrd->id));
1563
1564 /* If this req doesn't have valid xfer settings */
1565 if (!_is_valid(ccr)) {
1566 ret = -EINVAL;
1567 dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
1568 __func__, __LINE__, ccr);
1569 goto xfer_exit;
1570 }
1571
1572 idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
1573
1574 xs.ccr = ccr;
1575 xs.r = r;
1576
1577 /* First dry run to check if req is acceptable */
1578 ret = _setup_req(1, thrd, idx, &xs);
1579 if (ret < 0)
1580 goto xfer_exit;
1581
1582 if (ret > pi->mcbufsz / 2) {
1583 dev_info(thrd->dmac->pinfo->dev,
1584 "%s:%d Trying increasing mcbufsz\n",
1585 __func__, __LINE__);
1586 ret = -ENOMEM;
1587 goto xfer_exit;
1588 }
1589
1590 /* Hook the request */
1591 thrd->lstenq = idx;
1592 thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
1593 thrd->req[idx].r = r;
1594
1595 ret = 0;
1596
1597xfer_exit:
1598 spin_unlock_irqrestore(&pl330->lock, flags);
1599
1600 return ret;
1601}
1602
1603static void pl330_dotask(unsigned long data)
1604{
1605 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1606 struct pl330_info *pi = pl330->pinfo;
1607 unsigned long flags;
1608 int i;
1609
1610 spin_lock_irqsave(&pl330->lock, flags);
1611
1612 /* The DMAC itself gone nuts */
1613 if (pl330->dmac_tbd.reset_dmac) {
1614 pl330->state = DYING;
1615 /* Reset the manager too */
1616 pl330->dmac_tbd.reset_mngr = true;
1617 /* Clear the reset flag */
1618 pl330->dmac_tbd.reset_dmac = false;
1619 }
1620
1621 if (pl330->dmac_tbd.reset_mngr) {
1622 _stop(pl330->manager);
1623 /* Reset all channels */
1624 pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
1625 /* Clear the reset flag */
1626 pl330->dmac_tbd.reset_mngr = false;
1627 }
1628
1629 for (i = 0; i < pi->pcfg.num_chan; i++) {
1630
1631 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1632 struct pl330_thread *thrd = &pl330->channels[i];
1633 void __iomem *regs = pi->base;
1634 enum pl330_op_err err;
1635
1636 _stop(thrd);
1637
1638 if (readl(regs + FSC) & (1 << thrd->id))
1639 err = PL330_ERR_FAIL;
1640 else
1641 err = PL330_ERR_ABORT;
1642
1643 spin_unlock_irqrestore(&pl330->lock, flags);
1644
1645 _callback(thrd->req[1 - thrd->lstenq].r, err);
1646 _callback(thrd->req[thrd->lstenq].r, err);
1647
1648 spin_lock_irqsave(&pl330->lock, flags);
1649
1650 thrd->req[0].r = NULL;
1651 thrd->req[1].r = NULL;
1652 mark_free(thrd, 0);
1653 mark_free(thrd, 1);
1654
1655 /* Clear the reset flag */
1656 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1657 }
1658 }
1659
1660 spin_unlock_irqrestore(&pl330->lock, flags);
1661
1662 return;
1663}
1664
1665/* Returns 1 if state was updated, 0 otherwise */
1666static int pl330_update(const struct pl330_info *pi)
1667{
1668 struct _pl330_req *rqdone;
1669 struct pl330_dmac *pl330;
1670 unsigned long flags;
1671 void __iomem *regs;
1672 u32 val;
1673 int id, ev, ret = 0;
1674
1675 if (!pi || !pi->pl330_data)
1676 return 0;
1677
1678 regs = pi->base;
1679 pl330 = pi->pl330_data;
1680
1681 spin_lock_irqsave(&pl330->lock, flags);
1682
1683 val = readl(regs + FSM) & 0x1;
1684 if (val)
1685 pl330->dmac_tbd.reset_mngr = true;
1686 else
1687 pl330->dmac_tbd.reset_mngr = false;
1688
1689 val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
1690 pl330->dmac_tbd.reset_chan |= val;
1691 if (val) {
1692 int i = 0;
1693 while (i < pi->pcfg.num_chan) {
1694 if (val & (1 << i)) {
1695 dev_info(pi->dev,
1696 "Reset Channel-%d\t CS-%x FTC-%x\n",
1697 i, readl(regs + CS(i)),
1698 readl(regs + FTC(i)));
1699 _stop(&pl330->channels[i]);
1700 }
1701 i++;
1702 }
1703 }
1704
1705 /* Check which event happened i.e, thread notified */
1706 val = readl(regs + ES);
1707 if (pi->pcfg.num_events < 32
1708 && val & ~((1 << pi->pcfg.num_events) - 1)) {
1709 pl330->dmac_tbd.reset_dmac = true;
1710 dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
1711 ret = 1;
1712 goto updt_exit;
1713 }
1714
1715 for (ev = 0; ev < pi->pcfg.num_events; ev++) {
1716 if (val & (1 << ev)) { /* Event occurred */
1717 struct pl330_thread *thrd;
1718 u32 inten = readl(regs + INTEN);
1719 int active;
1720
1721 /* Clear the event */
1722 if (inten & (1 << ev))
1723 writel(1 << ev, regs + INTCLR);
1724
1725 ret = 1;
1726
1727 id = pl330->events[ev];
1728
1729 thrd = &pl330->channels[id];
1730
1731 active = thrd->req_running;
1732 if (active == -1) /* Aborted */
1733 continue;
1734
1735 rqdone = &thrd->req[active];
1736 mark_free(thrd, active);
1737
1738 /* Get going again ASAP */
1739 _start(thrd);
1740
1741 /* For now, just make a list of callbacks to be done */
1742 list_add_tail(&rqdone->rqd, &pl330->req_done);
1743 }
1744 }
1745
1746 /* Now that we are in no hurry, do the callbacks */
1747 while (!list_empty(&pl330->req_done)) {
1748 struct pl330_req *r;
1749
1750 rqdone = container_of(pl330->req_done.next,
1751 struct _pl330_req, rqd);
1752
1753 list_del_init(&rqdone->rqd);
1754
1755 /* Detach the req */
1756 r = rqdone->r;
1757 rqdone->r = NULL;
1758
1759 spin_unlock_irqrestore(&pl330->lock, flags);
1760 _callback(r, PL330_ERR_NONE);
1761 spin_lock_irqsave(&pl330->lock, flags);
1762 }
1763
1764updt_exit:
1765 spin_unlock_irqrestore(&pl330->lock, flags);
1766
1767 if (pl330->dmac_tbd.reset_dmac
1768 || pl330->dmac_tbd.reset_mngr
1769 || pl330->dmac_tbd.reset_chan) {
1770 ret = 1;
1771 tasklet_schedule(&pl330->tasks);
1772 }
1773
1774 return ret;
1775}
1776
1777static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
1778{
1779 struct pl330_thread *thrd = ch_id;
1780 struct pl330_dmac *pl330;
1781 unsigned long flags;
1782 int ret = 0, active = thrd->req_running;
1783
1784 if (!thrd || thrd->free || thrd->dmac->state == DYING)
1785 return -EINVAL;
1786
1787 pl330 = thrd->dmac;
1788
1789 spin_lock_irqsave(&pl330->lock, flags);
1790
1791 switch (op) {
1792 case PL330_OP_FLUSH:
1793 /* Make sure the channel is stopped */
1794 _stop(thrd);
1795
1796 thrd->req[0].r = NULL;
1797 thrd->req[1].r = NULL;
1798 mark_free(thrd, 0);
1799 mark_free(thrd, 1);
1800 break;
1801
1802 case PL330_OP_ABORT:
1803 /* Make sure the channel is stopped */
1804 _stop(thrd);
1805
1806 /* ABORT is only for the active req */
1807 if (active == -1)
1808 break;
1809
1810 thrd->req[active].r = NULL;
1811 mark_free(thrd, active);
1812
1813 /* Start the next */
1814 case PL330_OP_START:
1815 if ((active == -1) && !_start(thrd))
1816 ret = -EIO;
1817 break;
1818
1819 default:
1820 ret = -EINVAL;
1821 }
1822
1823 spin_unlock_irqrestore(&pl330->lock, flags);
1824 return ret;
1825}
1826
1827static int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus)
1828{
1829 struct pl330_thread *thrd = ch_id;
1830 struct pl330_dmac *pl330;
1831 struct pl330_info *pi;
1832 void __iomem *regs;
1833 int active;
1834 u32 val;
1835
1836 if (!pstatus || !thrd || thrd->free)
1837 return -EINVAL;
1838
1839 pl330 = thrd->dmac;
1840 pi = pl330->pinfo;
1841 regs = pi->base;
1842
1843 /* The client should remove the DMAC and add again */
1844 if (pl330->state == DYING)
1845 pstatus->dmac_halted = true;
1846 else
1847 pstatus->dmac_halted = false;
1848
1849 val = readl(regs + FSC);
1850 if (val & (1 << thrd->id))
1851 pstatus->faulting = true;
1852 else
1853 pstatus->faulting = false;
1854
1855 active = thrd->req_running;
1856
1857 if (active == -1) {
1858 /* Indicate that the thread is not running */
1859 pstatus->top_req = NULL;
1860 pstatus->wait_req = NULL;
1861 } else {
1862 pstatus->top_req = thrd->req[active].r;
1863 pstatus->wait_req = !IS_FREE(&thrd->req[1 - active])
1864 ? thrd->req[1 - active].r : NULL;
1865 }
1866
1867 pstatus->src_addr = readl(regs + SA(thrd->id));
1868 pstatus->dst_addr = readl(regs + DA(thrd->id));
1869
1870 return 0;
1871}
1872
1873/* Reserve an event */
1874static inline int _alloc_event(struct pl330_thread *thrd)
1875{
1876 struct pl330_dmac *pl330 = thrd->dmac;
1877 struct pl330_info *pi = pl330->pinfo;
1878 int ev;
1879
1880 for (ev = 0; ev < pi->pcfg.num_events; ev++)
1881 if (pl330->events[ev] == -1) {
1882 pl330->events[ev] = thrd->id;
1883 return ev;
1884 }
1885
1886 return -1;
1887}
1888
1889static bool _chan_ns(const struct pl330_info *pi, int i)
1890{
1891 return pi->pcfg.irq_ns & (1 << i);
1892}
1893
1894/* Upon success, returns IdentityToken for the
1895 * allocated channel, NULL otherwise.
1896 */
1897static void *pl330_request_channel(const struct pl330_info *pi)
1898{
1899 struct pl330_thread *thrd = NULL;
1900 struct pl330_dmac *pl330;
1901 unsigned long flags;
1902 int chans, i;
1903
1904 if (!pi || !pi->pl330_data)
1905 return NULL;
1906
1907 pl330 = pi->pl330_data;
1908
1909 if (pl330->state == DYING)
1910 return NULL;
1911
1912 chans = pi->pcfg.num_chan;
1913
1914 spin_lock_irqsave(&pl330->lock, flags);
1915
1916 for (i = 0; i < chans; i++) {
1917 thrd = &pl330->channels[i];
1918 if ((thrd->free) && (!_manager_ns(thrd) ||
1919 _chan_ns(pi, i))) {
1920 thrd->ev = _alloc_event(thrd);
1921 if (thrd->ev >= 0) {
1922 thrd->free = false;
1923 thrd->lstenq = 1;
1924 thrd->req[0].r = NULL;
1925 mark_free(thrd, 0);
1926 thrd->req[1].r = NULL;
1927 mark_free(thrd, 1);
1928 break;
1929 }
1930 }
1931 thrd = NULL;
1932 }
1933
1934 spin_unlock_irqrestore(&pl330->lock, flags);
1935
1936 return thrd;
1937}
1938
1939/* Release an event */
1940static inline void _free_event(struct pl330_thread *thrd, int ev)
1941{
1942 struct pl330_dmac *pl330 = thrd->dmac;
1943 struct pl330_info *pi = pl330->pinfo;
1944
1945 /* If the event is valid and was held by the thread */
1946 if (ev >= 0 && ev < pi->pcfg.num_events
1947 && pl330->events[ev] == thrd->id)
1948 pl330->events[ev] = -1;
1949}
1950
1951static void pl330_release_channel(void *ch_id)
1952{
1953 struct pl330_thread *thrd = ch_id;
1954 struct pl330_dmac *pl330;
1955 unsigned long flags;
1956
1957 if (!thrd || thrd->free)
1958 return;
1959
1960 _stop(thrd);
1961
1962 _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
1963 _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
1964
1965 pl330 = thrd->dmac;
1966
1967 spin_lock_irqsave(&pl330->lock, flags);
1968 _free_event(thrd, thrd->ev);
1969 thrd->free = true;
1970 spin_unlock_irqrestore(&pl330->lock, flags);
1971}
1972
1973/* Initialize the structure for PL330 configuration, that can be used
1974 * by the client driver the make best use of the DMAC
1975 */
1976static void read_dmac_config(struct pl330_info *pi)
1977{
1978 void __iomem *regs = pi->base;
1979 u32 val;
1980
1981 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1982 val &= CRD_DATA_WIDTH_MASK;
1983 pi->pcfg.data_bus_width = 8 * (1 << val);
1984
1985 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1986 val &= CRD_DATA_BUFF_MASK;
1987 pi->pcfg.data_buf_dep = val + 1;
1988
1989 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1990 val &= CR0_NUM_CHANS_MASK;
1991 val += 1;
1992 pi->pcfg.num_chan = val;
1993
1994 val = readl(regs + CR0);
1995 if (val & CR0_PERIPH_REQ_SET) {
1996 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1997 val += 1;
1998 pi->pcfg.num_peri = val;
1999 pi->pcfg.peri_ns = readl(regs + CR4);
2000 } else {
2001 pi->pcfg.num_peri = 0;
2002 }
2003
2004 val = readl(regs + CR0);
2005 if (val & CR0_BOOT_MAN_NS)
2006 pi->pcfg.mode |= DMAC_MODE_NS;
2007 else
2008 pi->pcfg.mode &= ~DMAC_MODE_NS;
2009
2010 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
2011 val &= CR0_NUM_EVENTS_MASK;
2012 val += 1;
2013 pi->pcfg.num_events = val;
2014
2015 pi->pcfg.irq_ns = readl(regs + CR3);
2016
2017 pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
2018 pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
2019}
2020
2021static inline void _reset_thread(struct pl330_thread *thrd)
2022{
2023 struct pl330_dmac *pl330 = thrd->dmac;
2024 struct pl330_info *pi = pl330->pinfo;
2025
2026 thrd->req[0].mc_cpu = pl330->mcode_cpu
2027 + (thrd->id * pi->mcbufsz);
2028 thrd->req[0].mc_bus = pl330->mcode_bus
2029 + (thrd->id * pi->mcbufsz);
2030 thrd->req[0].r = NULL;
2031 mark_free(thrd, 0);
2032
2033 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
2034 + pi->mcbufsz / 2;
2035 thrd->req[1].mc_bus = thrd->req[0].mc_bus
2036 + pi->mcbufsz / 2;
2037 thrd->req[1].r = NULL;
2038 mark_free(thrd, 1);
2039}
2040
2041static int dmac_alloc_threads(struct pl330_dmac *pl330)
2042{
2043 struct pl330_info *pi = pl330->pinfo;
2044 int chans = pi->pcfg.num_chan;
2045 struct pl330_thread *thrd;
2046 int i;
2047
2048 /* Allocate 1 Manager and 'chans' Channel threads */
2049 pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
2050 GFP_KERNEL);
2051 if (!pl330->channels)
2052 return -ENOMEM;
2053
2054 /* Init Channel threads */
2055 for (i = 0; i < chans; i++) {
2056 thrd = &pl330->channels[i];
2057 thrd->id = i;
2058 thrd->dmac = pl330;
2059 _reset_thread(thrd);
2060 thrd->free = true;
2061 }
2062
2063 /* MANAGER is indexed at the end */
2064 thrd = &pl330->channels[chans];
2065 thrd->id = chans;
2066 thrd->dmac = pl330;
2067 thrd->free = false;
2068 pl330->manager = thrd;
2069
2070 return 0;
2071}
2072
2073static int dmac_alloc_resources(struct pl330_dmac *pl330)
2074{
2075 struct pl330_info *pi = pl330->pinfo;
2076 int chans = pi->pcfg.num_chan;
2077 int ret;
2078
2079 /*
2080 * Alloc MicroCode buffer for 'chans' Channel threads.
2081 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
2082 */
2083 pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
2084 chans * pi->mcbufsz,
2085 &pl330->mcode_bus, GFP_KERNEL);
2086 if (!pl330->mcode_cpu) {
2087 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
2088 __func__, __LINE__);
2089 return -ENOMEM;
2090 }
2091
2092 ret = dmac_alloc_threads(pl330);
2093 if (ret) {
2094 dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
2095 __func__, __LINE__);
2096 dma_free_coherent(pi->dev,
2097 chans * pi->mcbufsz,
2098 pl330->mcode_cpu, pl330->mcode_bus);
2099 return ret;
2100 }
2101
2102 return 0;
2103}
2104
2105static int pl330_add(struct pl330_info *pi)
2106{
2107 struct pl330_dmac *pl330;
2108 void __iomem *regs;
2109 int i, ret;
2110
2111 if (!pi || !pi->dev)
2112 return -EINVAL;
2113
2114 /* If already added */
2115 if (pi->pl330_data)
2116 return -EINVAL;
2117
2118 /*
2119 * If the SoC can perform reset on the DMAC, then do it
2120 * before reading its configuration.
2121 */
2122 if (pi->dmac_reset)
2123 pi->dmac_reset(pi);
2124
2125 regs = pi->base;
2126
2127 /* Check if we can handle this DMAC */
2128 if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
2129 || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
2130 dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
2131 get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
2132 return -EINVAL;
2133 }
2134
2135 /* Read the configuration of the DMAC */
2136 read_dmac_config(pi);
2137
2138 if (pi->pcfg.num_events == 0) {
2139 dev_err(pi->dev, "%s:%d Can't work without events!\n",
2140 __func__, __LINE__);
2141 return -EINVAL;
2142 }
2143
2144 pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
2145 if (!pl330) {
2146 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
2147 __func__, __LINE__);
2148 return -ENOMEM;
2149 }
2150
2151 /* Assign the info structure and private data */
2152 pl330->pinfo = pi;
2153 pi->pl330_data = pl330;
2154
2155 spin_lock_init(&pl330->lock);
2156
2157 INIT_LIST_HEAD(&pl330->req_done);
2158
2159 /* Use default MC buffer size if not provided */
2160 if (!pi->mcbufsz)
2161 pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
2162
2163 /* Mark all events as free */
2164 for (i = 0; i < pi->pcfg.num_events; i++)
2165 pl330->events[i] = -1;
2166
2167 /* Allocate resources needed by the DMAC */
2168 ret = dmac_alloc_resources(pl330);
2169 if (ret) {
2170 dev_err(pi->dev, "Unable to create channels for DMAC\n");
2171 kfree(pl330);
2172 return ret;
2173 }
2174
2175 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
2176
2177 pl330->state = INIT;
2178
2179 return 0;
2180}
2181
2182static int dmac_free_threads(struct pl330_dmac *pl330)
2183{
2184 struct pl330_info *pi = pl330->pinfo;
2185 int chans = pi->pcfg.num_chan;
2186 struct pl330_thread *thrd;
2187 int i;
2188
2189 /* Release Channel threads */
2190 for (i = 0; i < chans; i++) {
2191 thrd = &pl330->channels[i];
2192 pl330_release_channel((void *)thrd);
2193 }
2194
2195 /* Free memory */
2196 kfree(pl330->channels);
2197
2198 return 0;
2199}
2200
2201static void dmac_free_resources(struct pl330_dmac *pl330)
2202{
2203 struct pl330_info *pi = pl330->pinfo;
2204 int chans = pi->pcfg.num_chan;
2205
2206 dmac_free_threads(pl330);
2207
2208 dma_free_coherent(pi->dev, chans * pi->mcbufsz,
2209 pl330->mcode_cpu, pl330->mcode_bus);
2210}
2211
2212static void pl330_del(struct pl330_info *pi)
2213{
2214 struct pl330_dmac *pl330;
2215
2216 if (!pi || !pi->pl330_data)
2217 return;
2218
2219 pl330 = pi->pl330_data;
2220
2221 pl330->state = UNINIT;
2222
2223 tasklet_kill(&pl330->tasks);
2224
2225 /* Free DMAC resources */
2226 dmac_free_resources(pl330);
2227
2228 kfree(pl330);
2229 pi->pl330_data = NULL;
2230}
2231
120/* forward declaration */ 2232/* forward declaration */
121static struct amba_driver pl330_driver; 2233static struct amba_driver pl330_driver;
122 2234