aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ste_dma40.c
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@stericsson.com>2010-03-30 09:33:42 -0400
committerDan Williams <dan.j.williams@intel.com>2010-04-14 17:49:20 -0400
commit8d318a50b3d72e3daf94131f91e1ab799a8d5ad4 (patch)
treeae36452931d2e836f725b3f91eebd7f4d9e27589 /drivers/dma/ste_dma40.c
parent6a3cd3ea48584d14f60dce0b3c4e9e4428beb0fe (diff)
DMAENGINE: Support for ST-Ericssons DMA40 block v3
This is a straightforward driver for the ST-Ericsson DMA40 DMA controller found in U8500, implemented akin to the existing COH 901 318 driver. Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Acked-by: Srinidh Kasagar <srinidhi.kasagar@stericsson.com> Cc: STEricsson_nomadik_linux@list.st.com Cc: Alessandro Rubini <rubini@unipv.it> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r--drivers/dma/ste_dma40.c2596
1 files changed, 2596 insertions, 0 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
new file mode 100644
index 000000000000..e4295a27672b
--- /dev/null
+++ b/drivers/dma/ste_dma40.c
@@ -0,0 +1,2596 @@
1/*
2 * driver/dma/ste_dma40.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17
18#include <plat/ste_dma40.h>
19
20#include "ste_dma40_ll.h"
21
22#define D40_NAME "dma40"
23
24#define D40_PHY_CHAN -1
25
26/* For masking out/in 2 bit channel positions */
27#define D40_CHAN_POS(chan) (2 * (chan / 2))
28#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30/* Maximum iterations taken before giving up suspending a channel */
31#define D40_SUSPEND_MAX_IT 500
32
33#define D40_ALLOC_FREE (1 << 31)
34#define D40_ALLOC_PHY (1 << 30)
35#define D40_ALLOC_LOG_FREE 0
36
37/* The number of free d40_desc to keep in memory before starting
38 * to kfree() them */
39#define D40_DESC_CACHE_SIZE 50
40
41/* Hardware designer of the block */
42#define D40_PERIPHID2_DESIGNER 0x8
43
44/**
45 * enum 40_command - The different commands and/or statuses.
46 *
47 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
48 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
49 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
50 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
51 */
52enum d40_command {
53 D40_DMA_STOP = 0,
54 D40_DMA_RUN = 1,
55 D40_DMA_SUSPEND_REQ = 2,
56 D40_DMA_SUSPENDED = 3
57};
58
59/**
60 * struct d40_lli_pool - Structure for keeping LLIs in memory
61 *
62 * @base: Pointer to memory area when the pre_alloc_lli's are not large
63 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
64 * pre_alloc_lli is used.
65 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
66 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
67 * one buffer to one buffer.
68 */
69struct d40_lli_pool {
70 void *base;
71 int size;
72 /* Space for dst and src, plus an extra for padding */
73 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
74};
75
76/**
77 * struct d40_desc - A descriptor is one DMA job.
78 *
79 * @lli_phy: LLI settings for physical channel. Both src and dst=
80 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
81 * lli_len equals one.
82 * @lli_log: Same as above but for logical channels.
83 * @lli_pool: The pool with two entries pre-allocated.
84 * @lli_len: Number of LLI's in lli_pool
85 * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len
86 * then this transfer job is done.
87 * @txd: DMA engine struct. Used for among other things for communication
88 * during a transfer.
89 * @node: List entry.
90 * @dir: The transfer direction of this job.
91 * @is_in_client_list: true if the client owns this descriptor.
92 *
93 * This descriptor is used for both logical and physical transfers.
94 */
95
96struct d40_desc {
97 /* LLI physical */
98 struct d40_phy_lli_bidir lli_phy;
99 /* LLI logical */
100 struct d40_log_lli_bidir lli_log;
101
102 struct d40_lli_pool lli_pool;
103 u32 lli_len;
104 u32 lli_tcount;
105
106 struct dma_async_tx_descriptor txd;
107 struct list_head node;
108
109 enum dma_data_direction dir;
110 bool is_in_client_list;
111};
112
113/**
114 * struct d40_lcla_pool - LCLA pool settings and data.
115 *
116 * @base: The virtual address of LCLA.
117 * @phy: Physical base address of LCLA.
118 * @base_size: size of lcla.
119 * @lock: Lock to protect the content in this struct.
120 * @alloc_map: Mapping between physical channel and LCLA entries.
121 * @num_blocks: The number of entries of alloc_map. Equals to the
122 * number of physical channels.
123 */
124struct d40_lcla_pool {
125 void *base;
126 dma_addr_t phy;
127 resource_size_t base_size;
128 spinlock_t lock;
129 u32 *alloc_map;
130 int num_blocks;
131};
132
133/**
134 * struct d40_phy_res - struct for handling eventlines mapped to physical
135 * channels.
136 *
137 * @lock: A lock protection this entity.
138 * @num: The physical channel number of this entity.
139 * @allocated_src: Bit mapped to show which src event line's are mapped to
140 * this physical channel. Can also be free or physically allocated.
141 * @allocated_dst: Same as for src but is dst.
142 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
143 * event line number. Both allocated_src and allocated_dst can not be
144 * allocated to a physical channel, since the interrupt handler has then
145 * no way of figure out which one the interrupt belongs to.
146 */
147struct d40_phy_res {
148 spinlock_t lock;
149 int num;
150 u32 allocated_src;
151 u32 allocated_dst;
152};
153
154struct d40_base;
155
156/**
157 * struct d40_chan - Struct that describes a channel.
158 *
159 * @lock: A spinlock to protect this struct.
160 * @log_num: The logical number, if any of this channel.
161 * @completed: Starts with 1, after first interrupt it is set to dma engine's
162 * current cookie.
163 * @pending_tx: The number of pending transfers. Used between interrupt handler
164 * and tasklet.
165 * @busy: Set to true when transfer is ongoing on this channel.
166 * @phy_chan: Pointer to physical channel which this instance runs on.
167 * @chan: DMA engine handle.
168 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
169 * transfer and call client callback.
170 * @client: Cliented owned descriptor list.
171 * @active: Active descriptor.
172 * @queue: Queued jobs.
173 * @free: List of free descripts, ready to be reused.
174 * @free_len: Number of descriptors in the free list.
175 * @dma_cfg: The client configuration of this dma channel.
176 * @base: Pointer to the device instance struct.
177 * @src_def_cfg: Default cfg register setting for src.
178 * @dst_def_cfg: Default cfg register setting for dst.
179 * @log_def: Default logical channel settings.
180 * @lcla: Space for one dst src pair for logical channel transfers.
181 * @lcpa: Pointer to dst and src lcpa settings.
182 *
183 * This struct can either "be" a logical or a physical channel.
184 */
185struct d40_chan {
186 spinlock_t lock;
187 int log_num;
188 /* ID of the most recent completed transfer */
189 int completed;
190 int pending_tx;
191 bool busy;
192 struct d40_phy_res *phy_chan;
193 struct dma_chan chan;
194 struct tasklet_struct tasklet;
195 struct list_head client;
196 struct list_head active;
197 struct list_head queue;
198 struct list_head free;
199 int free_len;
200 struct stedma40_chan_cfg dma_cfg;
201 struct d40_base *base;
202 /* Default register configurations */
203 u32 src_def_cfg;
204 u32 dst_def_cfg;
205 struct d40_def_lcsp log_def;
206 struct d40_lcla_elem lcla;
207 struct d40_log_lli_full *lcpa;
208};
209
210/**
211 * struct d40_base - The big global struct, one for each probe'd instance.
212 *
213 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
214 * @execmd_lock: Lock for execute command usage since several channels share
215 * the same physical register.
216 * @dev: The device structure.
217 * @virtbase: The virtual base address of the DMA's register.
218 * @clk: Pointer to the DMA clock structure.
219 * @phy_start: Physical memory start of the DMA registers.
220 * @phy_size: Size of the DMA register map.
221 * @irq: The IRQ number.
222 * @num_phy_chans: The number of physical channels. Read from HW. This
223 * is the number of available channels for this driver, not counting "Secure
224 * mode" allocated physical channels.
225 * @num_log_chans: The number of logical channels. Calculated from
226 * num_phy_chans.
227 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
228 * @dma_slave: dma_device channels that can do only do slave transfers.
229 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
230 * @phy_chans: Room for all possible physical channels in system.
231 * @log_chans: Room for all possible logical channels in system.
232 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
233 * to log_chans entries.
234 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
235 * to phy_chans entries.
236 * @plat_data: Pointer to provided platform_data which is the driver
237 * configuration.
238 * @phy_res: Vector containing all physical channels.
239 * @lcla_pool: lcla pool settings and data.
240 * @lcpa_base: The virtual mapped address of LCPA.
241 * @phy_lcpa: The physical address of the LCPA.
242 * @lcpa_size: The size of the LCPA area.
243 */
244struct d40_base {
245 spinlock_t interrupt_lock;
246 spinlock_t execmd_lock;
247 struct device *dev;
248 void __iomem *virtbase;
249 struct clk *clk;
250 phys_addr_t phy_start;
251 resource_size_t phy_size;
252 int irq;
253 int num_phy_chans;
254 int num_log_chans;
255 struct dma_device dma_both;
256 struct dma_device dma_slave;
257 struct dma_device dma_memcpy;
258 struct d40_chan *phy_chans;
259 struct d40_chan *log_chans;
260 struct d40_chan **lookup_log_chans;
261 struct d40_chan **lookup_phy_chans;
262 struct stedma40_platform_data *plat_data;
263 /* Physical half channels */
264 struct d40_phy_res *phy_res;
265 struct d40_lcla_pool lcla_pool;
266 void *lcpa_base;
267 dma_addr_t phy_lcpa;
268 resource_size_t lcpa_size;
269};
270
271/**
272 * struct d40_interrupt_lookup - lookup table for interrupt handler
273 *
274 * @src: Interrupt mask register.
275 * @clr: Interrupt clear register.
276 * @is_error: true if this is an error interrupt.
277 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
278 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
279 */
280struct d40_interrupt_lookup {
281 u32 src;
282 u32 clr;
283 bool is_error;
284 int offset;
285};
286
287/**
288 * struct d40_reg_val - simple lookup struct
289 *
290 * @reg: The register.
291 * @val: The value that belongs to the register in reg.
292 */
293struct d40_reg_val {
294 unsigned int reg;
295 unsigned int val;
296};
297
298static int d40_pool_lli_alloc(struct d40_desc *d40d,
299 int lli_len, bool is_log)
300{
301 u32 align;
302 void *base;
303
304 if (is_log)
305 align = sizeof(struct d40_log_lli);
306 else
307 align = sizeof(struct d40_phy_lli);
308
309 if (lli_len == 1) {
310 base = d40d->lli_pool.pre_alloc_lli;
311 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
312 d40d->lli_pool.base = NULL;
313 } else {
314 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
315
316 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
317 d40d->lli_pool.base = base;
318
319 if (d40d->lli_pool.base == NULL)
320 return -ENOMEM;
321 }
322
323 if (is_log) {
324 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
325 align);
326 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
327 align);
328 } else {
329 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
330 align);
331 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
332 align);
333
334 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
335 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
336 }
337
338 return 0;
339}
340
341static void d40_pool_lli_free(struct d40_desc *d40d)
342{
343 kfree(d40d->lli_pool.base);
344 d40d->lli_pool.base = NULL;
345 d40d->lli_pool.size = 0;
346 d40d->lli_log.src = NULL;
347 d40d->lli_log.dst = NULL;
348 d40d->lli_phy.src = NULL;
349 d40d->lli_phy.dst = NULL;
350 d40d->lli_phy.src_addr = 0;
351 d40d->lli_phy.dst_addr = 0;
352}
353
354static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
355 struct d40_desc *desc)
356{
357 dma_cookie_t cookie = d40c->chan.cookie;
358
359 if (++cookie < 0)
360 cookie = 1;
361
362 d40c->chan.cookie = cookie;
363 desc->txd.cookie = cookie;
364
365 return cookie;
366}
367
368static void d40_desc_reset(struct d40_desc *d40d)
369{
370 d40d->lli_tcount = 0;
371}
372
373static void d40_desc_remove(struct d40_desc *d40d)
374{
375 list_del(&d40d->node);
376}
377
378static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
379{
380 struct d40_desc *desc;
381 struct d40_desc *d;
382 struct d40_desc *_d;
383
384 if (!list_empty(&d40c->client)) {
385 list_for_each_entry_safe(d, _d, &d40c->client, node)
386 if (async_tx_test_ack(&d->txd)) {
387 d40_pool_lli_free(d);
388 d40_desc_remove(d);
389 desc = d;
390 goto out;
391 }
392 }
393
394 if (list_empty(&d40c->free)) {
395 /* Alloc new desc because we're out of used ones */
396 desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT);
397 if (desc == NULL)
398 goto out;
399 INIT_LIST_HEAD(&desc->node);
400 } else {
401 /* Reuse an old desc. */
402 desc = list_first_entry(&d40c->free,
403 struct d40_desc,
404 node);
405 list_del(&desc->node);
406 d40c->free_len--;
407 }
408out:
409 return desc;
410}
411
412static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
413{
414 if (d40c->free_len < D40_DESC_CACHE_SIZE) {
415 list_add_tail(&d40d->node, &d40c->free);
416 d40c->free_len++;
417 } else
418 kfree(d40d);
419}
420
421static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
422{
423 list_add_tail(&desc->node, &d40c->active);
424}
425
426static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
427{
428 struct d40_desc *d;
429
430 if (list_empty(&d40c->active))
431 return NULL;
432
433 d = list_first_entry(&d40c->active,
434 struct d40_desc,
435 node);
436 return d;
437}
438
439static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
440{
441 list_add_tail(&desc->node, &d40c->queue);
442}
443
444static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
445{
446 struct d40_desc *d;
447
448 if (list_empty(&d40c->queue))
449 return NULL;
450
451 d = list_first_entry(&d40c->queue,
452 struct d40_desc,
453 node);
454 return d;
455}
456
457/* Support functions for logical channels */
458
459static int d40_lcla_id_get(struct d40_chan *d40c,
460 struct d40_lcla_pool *pool)
461{
462 int src_id = 0;
463 int dst_id = 0;
464 struct d40_log_lli *lcla_lidx_base =
465 pool->base + d40c->phy_chan->num * 1024;
466 int i;
467 int lli_per_log = d40c->base->plat_data->llis_per_log;
468
469 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
470 return 0;
471
472 if (pool->num_blocks > 32)
473 return -EINVAL;
474
475 spin_lock(&pool->lock);
476
477 for (i = 0; i < pool->num_blocks; i++) {
478 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
479 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
480 break;
481 }
482 }
483 src_id = i;
484 if (src_id >= pool->num_blocks)
485 goto err;
486
487 for (; i < pool->num_blocks; i++) {
488 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
489 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
490 break;
491 }
492 }
493
494 dst_id = i;
495 if (dst_id == src_id)
496 goto err;
497
498 d40c->lcla.src_id = src_id;
499 d40c->lcla.dst_id = dst_id;
500 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
501 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
502
503
504 spin_unlock(&pool->lock);
505 return 0;
506err:
507 spin_unlock(&pool->lock);
508 return -EINVAL;
509}
510
511static void d40_lcla_id_put(struct d40_chan *d40c,
512 struct d40_lcla_pool *pool,
513 int id)
514{
515 if (id < 0)
516 return;
517
518 d40c->lcla.src_id = -1;
519 d40c->lcla.dst_id = -1;
520
521 spin_lock(&pool->lock);
522 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
523 spin_unlock(&pool->lock);
524}
525
526static int d40_channel_execute_command(struct d40_chan *d40c,
527 enum d40_command command)
528{
529 int status, i;
530 void __iomem *active_reg;
531 int ret = 0;
532 unsigned long flags;
533
534 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
535
536 if (d40c->phy_chan->num % 2 == 0)
537 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
538 else
539 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
540
541 if (command == D40_DMA_SUSPEND_REQ) {
542 status = (readl(active_reg) &
543 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
544 D40_CHAN_POS(d40c->phy_chan->num);
545
546 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
547 goto done;
548 }
549
550 writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
551
552 if (command == D40_DMA_SUSPEND_REQ) {
553
554 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
555 status = (readl(active_reg) &
556 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
557 D40_CHAN_POS(d40c->phy_chan->num);
558
559 cpu_relax();
560 /*
561 * Reduce the number of bus accesses while
562 * waiting for the DMA to suspend.
563 */
564 udelay(3);
565
566 if (status == D40_DMA_STOP ||
567 status == D40_DMA_SUSPENDED)
568 break;
569 }
570
571 if (i == D40_SUSPEND_MAX_IT) {
572 dev_err(&d40c->chan.dev->device,
573 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
574 __func__, d40c->phy_chan->num, d40c->log_num,
575 status);
576 dump_stack();
577 ret = -EBUSY;
578 }
579
580 }
581done:
582 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
583 return ret;
584}
585
586static void d40_term_all(struct d40_chan *d40c)
587{
588 struct d40_desc *d40d;
589 struct d40_desc *d;
590 struct d40_desc *_d;
591
592 /* Release active descriptors */
593 while ((d40d = d40_first_active_get(d40c))) {
594 d40_desc_remove(d40d);
595
596 /* Return desc to free-list */
597 d40_desc_free(d40c, d40d);
598 }
599
600 /* Release queued descriptors waiting for transfer */
601 while ((d40d = d40_first_queued(d40c))) {
602 d40_desc_remove(d40d);
603
604 /* Return desc to free-list */
605 d40_desc_free(d40c, d40d);
606 }
607
608 /* Release client owned descriptors */
609 if (!list_empty(&d40c->client))
610 list_for_each_entry_safe(d, _d, &d40c->client, node) {
611 d40_pool_lli_free(d);
612 d40_desc_remove(d);
613 /* Return desc to free-list */
614 d40_desc_free(d40c, d40d);
615 }
616
617 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
618 d40c->lcla.src_id);
619 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
620 d40c->lcla.dst_id);
621
622 d40c->pending_tx = 0;
623 d40c->busy = false;
624}
625
626static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
627{
628 u32 val;
629 unsigned long flags;
630
631 if (do_enable)
632 val = D40_ACTIVATE_EVENTLINE;
633 else
634 val = D40_DEACTIVATE_EVENTLINE;
635
636 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
637
638 /* Enable event line connected to device (or memcpy) */
639 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
640 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
641 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
642
643 writel((val << D40_EVENTLINE_POS(event)) |
644 ~D40_EVENTLINE_MASK(event),
645 d40c->base->virtbase + D40_DREG_PCBASE +
646 d40c->phy_chan->num * D40_DREG_PCDELTA +
647 D40_CHAN_REG_SSLNK);
648 }
649 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
650 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
651
652 writel((val << D40_EVENTLINE_POS(event)) |
653 ~D40_EVENTLINE_MASK(event),
654 d40c->base->virtbase + D40_DREG_PCBASE +
655 d40c->phy_chan->num * D40_DREG_PCDELTA +
656 D40_CHAN_REG_SDLNK);
657 }
658
659 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
660}
661
662static bool d40_chan_has_events(struct d40_chan *d40c)
663{
664 u32 val = 0;
665
666 /* If SSLNK or SDLNK is zero all events are disabled */
667 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
668 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
669 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
670 d40c->phy_chan->num * D40_DREG_PCDELTA +
671 D40_CHAN_REG_SSLNK);
672
673 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
674 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
675 d40c->phy_chan->num * D40_DREG_PCDELTA +
676 D40_CHAN_REG_SDLNK);
677 return (bool) val;
678}
679
680static void d40_config_enable_lidx(struct d40_chan *d40c)
681{
682 /* Set LIDX for lcla */
683 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
684 D40_SREG_ELEM_LOG_LIDX_MASK,
685 d40c->base->virtbase + D40_DREG_PCBASE +
686 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
687
688 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
689 D40_SREG_ELEM_LOG_LIDX_MASK,
690 d40c->base->virtbase + D40_DREG_PCBASE +
691 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
692}
693
694static int d40_config_write(struct d40_chan *d40c)
695{
696 u32 addr_base;
697 u32 var;
698 int res;
699
700 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
701 if (res)
702 return res;
703
704 /* Odd addresses are even addresses + 4 */
705 addr_base = (d40c->phy_chan->num % 2) * 4;
706 /* Setup channel mode to logical or physical */
707 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
708 D40_CHAN_POS(d40c->phy_chan->num);
709 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
710
711 /* Setup operational mode option register */
712 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
713 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
714
715 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
716
717 if (d40c->log_num != D40_PHY_CHAN) {
718 /* Set default config for CFG reg */
719 writel(d40c->src_def_cfg,
720 d40c->base->virtbase + D40_DREG_PCBASE +
721 d40c->phy_chan->num * D40_DREG_PCDELTA +
722 D40_CHAN_REG_SSCFG);
723 writel(d40c->dst_def_cfg,
724 d40c->base->virtbase + D40_DREG_PCBASE +
725 d40c->phy_chan->num * D40_DREG_PCDELTA +
726 D40_CHAN_REG_SDCFG);
727
728 d40_config_enable_lidx(d40c);
729 }
730 return res;
731}
732
733static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
734{
735
736 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
737 d40_phy_lli_write(d40c->base->virtbase,
738 d40c->phy_chan->num,
739 d40d->lli_phy.dst,
740 d40d->lli_phy.src);
741 d40d->lli_tcount = d40d->lli_len;
742 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
743 u32 lli_len;
744 struct d40_log_lli *src = d40d->lli_log.src;
745 struct d40_log_lli *dst = d40d->lli_log.dst;
746
747 src += d40d->lli_tcount;
748 dst += d40d->lli_tcount;
749
750 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
751 lli_len = d40d->lli_len;
752 else
753 lli_len = d40c->base->plat_data->llis_per_log;
754 d40d->lli_tcount += lli_len;
755 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
756 d40c->lcla.dst,
757 dst, src,
758 d40c->base->plat_data->llis_per_log);
759 }
760}
761
762static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
763{
764 struct d40_chan *d40c = container_of(tx->chan,
765 struct d40_chan,
766 chan);
767 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
768 unsigned long flags;
769
770 spin_lock_irqsave(&d40c->lock, flags);
771
772 tx->cookie = d40_assign_cookie(d40c, d40d);
773
774 d40_desc_queue(d40c, d40d);
775
776 spin_unlock_irqrestore(&d40c->lock, flags);
777
778 return tx->cookie;
779}
780
781static int d40_start(struct d40_chan *d40c)
782{
783 int err;
784
785 if (d40c->log_num != D40_PHY_CHAN) {
786 err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
787 if (err)
788 return err;
789 d40_config_set_event(d40c, true);
790 }
791
792 err = d40_channel_execute_command(d40c, D40_DMA_RUN);
793
794 return err;
795}
796
797static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
798{
799 struct d40_desc *d40d;
800 int err;
801
802 /* Start queued jobs, if any */
803 d40d = d40_first_queued(d40c);
804
805 if (d40d != NULL) {
806 d40c->busy = true;
807
808 /* Remove from queue */
809 d40_desc_remove(d40d);
810
811 /* Add to active queue */
812 d40_desc_submit(d40c, d40d);
813
814 /* Initiate DMA job */
815 d40_desc_load(d40c, d40d);
816
817 /* Start dma job */
818 err = d40_start(d40c);
819
820 if (err)
821 return NULL;
822 }
823
824 return d40d;
825}
826
827/* called from interrupt context */
828static void dma_tc_handle(struct d40_chan *d40c)
829{
830 struct d40_desc *d40d;
831
832 if (!d40c->phy_chan)
833 return;
834
835 /* Get first active entry from list */
836 d40d = d40_first_active_get(d40c);
837
838 if (d40d == NULL)
839 return;
840
841 if (d40d->lli_tcount < d40d->lli_len) {
842
843 d40_desc_load(d40c, d40d);
844 /* Start dma job */
845 (void) d40_start(d40c);
846 return;
847 }
848
849 if (d40_queue_start(d40c) == NULL)
850 d40c->busy = false;
851
852 d40c->pending_tx++;
853 tasklet_schedule(&d40c->tasklet);
854
855}
856
857static void dma_tasklet(unsigned long data)
858{
859 struct d40_chan *d40c = (struct d40_chan *) data;
860 struct d40_desc *d40d_fin;
861 unsigned long flags;
862 dma_async_tx_callback callback;
863 void *callback_param;
864
865 spin_lock_irqsave(&d40c->lock, flags);
866
867 /* Get first active entry from list */
868 d40d_fin = d40_first_active_get(d40c);
869
870 if (d40d_fin == NULL)
871 goto err;
872
873 d40c->completed = d40d_fin->txd.cookie;
874
875 /*
876 * If terminating a channel pending_tx is set to zero.
877 * This prevents any finished active jobs to return to the client.
878 */
879 if (d40c->pending_tx == 0) {
880 spin_unlock_irqrestore(&d40c->lock, flags);
881 return;
882 }
883
884 /* Callback to client */
885 callback = d40d_fin->txd.callback;
886 callback_param = d40d_fin->txd.callback_param;
887
888 if (async_tx_test_ack(&d40d_fin->txd)) {
889 d40_pool_lli_free(d40d_fin);
890 d40_desc_remove(d40d_fin);
891 /* Return desc to free-list */
892 d40_desc_free(d40c, d40d_fin);
893 } else {
894 d40_desc_reset(d40d_fin);
895 if (!d40d_fin->is_in_client_list) {
896 d40_desc_remove(d40d_fin);
897 list_add_tail(&d40d_fin->node, &d40c->client);
898 d40d_fin->is_in_client_list = true;
899 }
900 }
901
902 d40c->pending_tx--;
903
904 if (d40c->pending_tx)
905 tasklet_schedule(&d40c->tasklet);
906
907 spin_unlock_irqrestore(&d40c->lock, flags);
908
909 if (callback)
910 callback(callback_param);
911
912 return;
913
914 err:
915 /* Rescue manouver if receiving double interrupts */
916 if (d40c->pending_tx > 0)
917 d40c->pending_tx--;
918 spin_unlock_irqrestore(&d40c->lock, flags);
919}
920
921static irqreturn_t d40_handle_interrupt(int irq, void *data)
922{
923 static const struct d40_interrupt_lookup il[] = {
924 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
925 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
926 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
927 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
928 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
929 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
930 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
931 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
932 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
933 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
934 };
935
936 int i;
937 u32 regs[ARRAY_SIZE(il)];
938 u32 tmp;
939 u32 idx;
940 u32 row;
941 long chan = -1;
942 struct d40_chan *d40c;
943 unsigned long flags;
944 struct d40_base *base = data;
945
946 spin_lock_irqsave(&base->interrupt_lock, flags);
947
948 /* Read interrupt status of both logical and physical channels */
949 for (i = 0; i < ARRAY_SIZE(il); i++)
950 regs[i] = readl(base->virtbase + il[i].src);
951
952 for (;;) {
953
954 chan = find_next_bit((unsigned long *)regs,
955 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
956
957 /* No more set bits found? */
958 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
959 break;
960
961 row = chan / BITS_PER_LONG;
962 idx = chan & (BITS_PER_LONG - 1);
963
964 /* ACK interrupt */
965 tmp = readl(base->virtbase + il[row].clr);
966 tmp |= 1 << idx;
967 writel(tmp, base->virtbase + il[row].clr);
968
969 if (il[row].offset == D40_PHY_CHAN)
970 d40c = base->lookup_phy_chans[idx];
971 else
972 d40c = base->lookup_log_chans[il[row].offset + idx];
973 spin_lock(&d40c->lock);
974
975 if (!il[row].is_error)
976 dma_tc_handle(d40c);
977 else
978 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
979 __func__, chan, il[row].offset, idx);
980
981 spin_unlock(&d40c->lock);
982 }
983
984 spin_unlock_irqrestore(&base->interrupt_lock, flags);
985
986 return IRQ_HANDLED;
987}
988
989
990static int d40_validate_conf(struct d40_chan *d40c,
991 struct stedma40_chan_cfg *conf)
992{
993 int res = 0;
994 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
995 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
996 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
997 == STEDMA40_CHANNEL_IN_LOG_MODE;
998
999 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
1000 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1001 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
1002 __func__);
1003 res = -EINVAL;
1004 }
1005
1006 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
1007 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1008 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
1009 __func__);
1010 res = -EINVAL;
1011 }
1012
1013 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1014 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1015 dev_err(&d40c->chan.dev->device,
1016 "[%s] No event line\n", __func__);
1017 res = -EINVAL;
1018 }
1019
1020 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1021 (src_event_group != dst_event_group)) {
1022 dev_err(&d40c->chan.dev->device,
1023 "[%s] Invalid event group\n", __func__);
1024 res = -EINVAL;
1025 }
1026
1027 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1028 /*
1029 * DMAC HW supports it. Will be added to this driver,
1030 * in case any dma client requires it.
1031 */
1032 dev_err(&d40c->chan.dev->device,
1033 "[%s] periph to periph not supported\n",
1034 __func__);
1035 res = -EINVAL;
1036 }
1037
1038 return res;
1039}
1040
1041static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1042 int log_event_line)
1043{
1044 unsigned long flags;
1045 spin_lock_irqsave(&phy->lock, flags);
1046 if (!log_event_line) {
1047 /* Physical interrupts are masked per physical full channel */
1048 if (phy->allocated_src == D40_ALLOC_FREE &&
1049 phy->allocated_dst == D40_ALLOC_FREE) {
1050 phy->allocated_dst = D40_ALLOC_PHY;
1051 phy->allocated_src = D40_ALLOC_PHY;
1052 goto found;
1053 } else
1054 goto not_found;
1055 }
1056
1057 /* Logical channel */
1058 if (is_src) {
1059 if (phy->allocated_src == D40_ALLOC_PHY)
1060 goto not_found;
1061
1062 if (phy->allocated_src == D40_ALLOC_FREE)
1063 phy->allocated_src = D40_ALLOC_LOG_FREE;
1064
1065 if (!(phy->allocated_src & (1 << log_event_line))) {
1066 phy->allocated_src |= 1 << log_event_line;
1067 goto found;
1068 } else
1069 goto not_found;
1070 } else {
1071 if (phy->allocated_dst == D40_ALLOC_PHY)
1072 goto not_found;
1073
1074 if (phy->allocated_dst == D40_ALLOC_FREE)
1075 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1076
1077 if (!(phy->allocated_dst & (1 << log_event_line))) {
1078 phy->allocated_dst |= 1 << log_event_line;
1079 goto found;
1080 } else
1081 goto not_found;
1082 }
1083
1084not_found:
1085 spin_unlock_irqrestore(&phy->lock, flags);
1086 return false;
1087found:
1088 spin_unlock_irqrestore(&phy->lock, flags);
1089 return true;
1090}
1091
1092static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1093 int log_event_line)
1094{
1095 unsigned long flags;
1096 bool is_free = false;
1097
1098 spin_lock_irqsave(&phy->lock, flags);
1099 if (!log_event_line) {
1100 /* Physical interrupts are masked per physical full channel */
1101 phy->allocated_dst = D40_ALLOC_FREE;
1102 phy->allocated_src = D40_ALLOC_FREE;
1103 is_free = true;
1104 goto out;
1105 }
1106
1107 /* Logical channel */
1108 if (is_src) {
1109 phy->allocated_src &= ~(1 << log_event_line);
1110 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1111 phy->allocated_src = D40_ALLOC_FREE;
1112 } else {
1113 phy->allocated_dst &= ~(1 << log_event_line);
1114 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1115 phy->allocated_dst = D40_ALLOC_FREE;
1116 }
1117
1118 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1119 D40_ALLOC_FREE);
1120
1121out:
1122 spin_unlock_irqrestore(&phy->lock, flags);
1123
1124 return is_free;
1125}
1126
1127static int d40_allocate_channel(struct d40_chan *d40c)
1128{
1129 int dev_type;
1130 int event_group;
1131 int event_line;
1132 struct d40_phy_res *phys;
1133 int i;
1134 int j;
1135 int log_num;
1136 bool is_src;
1137 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1138 == STEDMA40_CHANNEL_IN_LOG_MODE;
1139
1140
1141 phys = d40c->base->phy_res;
1142
1143 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1144 dev_type = d40c->dma_cfg.src_dev_type;
1145 log_num = 2 * dev_type;
1146 is_src = true;
1147 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1148 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1149 /* dst event lines are used for logical memcpy */
1150 dev_type = d40c->dma_cfg.dst_dev_type;
1151 log_num = 2 * dev_type + 1;
1152 is_src = false;
1153 } else
1154 return -EINVAL;
1155
1156 event_group = D40_TYPE_TO_GROUP(dev_type);
1157 event_line = D40_TYPE_TO_EVENT(dev_type);
1158
1159 if (!is_log) {
1160 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1161 /* Find physical half channel */
1162 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1163
1164 if (d40_alloc_mask_set(&phys[i], is_src, 0))
1165 goto found_phy;
1166 }
1167 } else
1168 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1169 int phy_num = j + event_group * 2;
1170 for (i = phy_num; i < phy_num + 2; i++) {
1171 if (d40_alloc_mask_set(&phys[i],
1172 is_src, 0))
1173 goto found_phy;
1174 }
1175 }
1176 return -EINVAL;
1177found_phy:
1178 d40c->phy_chan = &phys[i];
1179 d40c->log_num = D40_PHY_CHAN;
1180 goto out;
1181 }
1182 if (dev_type == -1)
1183 return -EINVAL;
1184
1185 /* Find logical channel */
1186 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1187 int phy_num = j + event_group * 2;
1188 /*
1189 * Spread logical channels across all available physical rather
1190 * than pack every logical channel at the first available phy
1191 * channels.
1192 */
1193 if (is_src) {
1194 for (i = phy_num; i < phy_num + 2; i++) {
1195 if (d40_alloc_mask_set(&phys[i], is_src,
1196 event_line))
1197 goto found_log;
1198 }
1199 } else {
1200 for (i = phy_num + 1; i >= phy_num; i--) {
1201 if (d40_alloc_mask_set(&phys[i], is_src,
1202 event_line))
1203 goto found_log;
1204 }
1205 }
1206 }
1207 return -EINVAL;
1208
1209found_log:
1210 d40c->phy_chan = &phys[i];
1211 d40c->log_num = log_num;
1212out:
1213
1214 if (is_log)
1215 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1216 else
1217 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1218
1219 return 0;
1220
1221}
1222
1223static int d40_config_chan(struct d40_chan *d40c,
1224 struct stedma40_chan_cfg *info)
1225{
1226
1227 /* Fill in basic CFG register values */
1228 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1229 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1230
1231 if (d40c->log_num != D40_PHY_CHAN) {
1232 d40_log_cfg(&d40c->dma_cfg,
1233 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1234
1235 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1236 d40c->lcpa = d40c->base->lcpa_base +
1237 d40c->dma_cfg.src_dev_type * 32;
1238 else
1239 d40c->lcpa = d40c->base->lcpa_base +
1240 d40c->dma_cfg.dst_dev_type * 32 + 16;
1241 }
1242
1243 /* Write channel configuration to the DMA */
1244 return d40_config_write(d40c);
1245}
1246
1247static int d40_config_memcpy(struct d40_chan *d40c)
1248{
1249 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1250
1251 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1252 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1253 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1254 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1255 memcpy[d40c->chan.chan_id];
1256
1257 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1258 dma_has_cap(DMA_SLAVE, cap)) {
1259 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1260 } else {
1261 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1262 __func__);
1263 return -EINVAL;
1264 }
1265
1266 return 0;
1267}
1268
1269
1270static int d40_free_dma(struct d40_chan *d40c)
1271{
1272
1273 int res = 0;
1274 u32 event, dir;
1275 struct d40_phy_res *phy = d40c->phy_chan;
1276 bool is_src;
1277
1278 /* Terminate all queued and active transfers */
1279 d40_term_all(d40c);
1280
1281 if (phy == NULL) {
1282 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1283 __func__);
1284 return -EINVAL;
1285 }
1286
1287 if (phy->allocated_src == D40_ALLOC_FREE &&
1288 phy->allocated_dst == D40_ALLOC_FREE) {
1289 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1290 __func__);
1291 return -EINVAL;
1292 }
1293
1294
1295 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1296 if (res) {
1297 dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
1298 __func__);
1299 return res;
1300 }
1301
1302 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1303 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1304 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1305 dir = D40_CHAN_REG_SDLNK;
1306 is_src = false;
1307 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1308 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1309 dir = D40_CHAN_REG_SSLNK;
1310 is_src = true;
1311 } else {
1312 dev_err(&d40c->chan.dev->device,
1313 "[%s] Unknown direction\n", __func__);
1314 return -EINVAL;
1315 }
1316
1317 if (d40c->log_num != D40_PHY_CHAN) {
1318 /*
1319 * Release logical channel, deactivate the event line during
1320 * the time physical res is suspended.
1321 */
1322 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1323 D40_EVENTLINE_MASK(event),
1324 d40c->base->virtbase + D40_DREG_PCBASE +
1325 phy->num * D40_DREG_PCDELTA + dir);
1326
1327 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1328
1329 /*
1330 * Check if there are more logical allocation
1331 * on this phy channel.
1332 */
1333 if (!d40_alloc_mask_free(phy, is_src, event)) {
1334 /* Resume the other logical channels if any */
1335 if (d40_chan_has_events(d40c)) {
1336 res = d40_channel_execute_command(d40c,
1337 D40_DMA_RUN);
1338 if (res) {
1339 dev_err(&d40c->chan.dev->device,
1340 "[%s] Executing RUN command\n",
1341 __func__);
1342 return res;
1343 }
1344 }
1345 return 0;
1346 }
1347 } else
1348 d40_alloc_mask_free(phy, is_src, 0);
1349
1350 /* Release physical channel */
1351 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1352 if (res) {
1353 dev_err(&d40c->chan.dev->device,
1354 "[%s] Failed to stop channel\n", __func__);
1355 return res;
1356 }
1357 d40c->phy_chan = NULL;
1358 /* Invalidate channel type */
1359 d40c->dma_cfg.channel_type = 0;
1360 d40c->base->lookup_phy_chans[phy->num] = NULL;
1361
1362 return 0;
1363
1364
1365}
1366
1367static int d40_pause(struct dma_chan *chan)
1368{
1369 struct d40_chan *d40c =
1370 container_of(chan, struct d40_chan, chan);
1371 int res;
1372
1373 unsigned long flags;
1374
1375 spin_lock_irqsave(&d40c->lock, flags);
1376
1377 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1378 if (res == 0) {
1379 if (d40c->log_num != D40_PHY_CHAN) {
1380 d40_config_set_event(d40c, false);
1381 /* Resume the other logical channels if any */
1382 if (d40_chan_has_events(d40c))
1383 res = d40_channel_execute_command(d40c,
1384 D40_DMA_RUN);
1385 }
1386 }
1387
1388 spin_unlock_irqrestore(&d40c->lock, flags);
1389 return res;
1390}
1391
1392static bool d40_tx_is_linked(struct d40_chan *d40c)
1393{
1394 bool is_link;
1395
1396 if (d40c->log_num != D40_PHY_CHAN)
1397 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1398 else
1399 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1400 d40c->phy_chan->num * D40_DREG_PCDELTA +
1401 D40_CHAN_REG_SDLNK) &
1402 D40_SREG_LNK_PHYS_LNK_MASK;
1403 return is_link;
1404}
1405
1406static u32 d40_residue(struct d40_chan *d40c)
1407{
1408 u32 num_elt;
1409
1410 if (d40c->log_num != D40_PHY_CHAN)
1411 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1412 >> D40_MEM_LCSP2_ECNT_POS;
1413 else
1414 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1415 d40c->phy_chan->num * D40_DREG_PCDELTA +
1416 D40_CHAN_REG_SDELT) &
1417 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1418 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1419}
1420
1421static int d40_resume(struct dma_chan *chan)
1422{
1423 struct d40_chan *d40c =
1424 container_of(chan, struct d40_chan, chan);
1425 int res = 0;
1426 unsigned long flags;
1427
1428 spin_lock_irqsave(&d40c->lock, flags);
1429
1430 if (d40c->log_num != D40_PHY_CHAN) {
1431 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1432 if (res)
1433 goto out;
1434
1435 /* If bytes left to transfer or linked tx resume job */
1436 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1437 d40_config_set_event(d40c, true);
1438 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1439 }
1440 } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1441 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1442
1443out:
1444 spin_unlock_irqrestore(&d40c->lock, flags);
1445 return res;
1446}
1447
1448static u32 stedma40_residue(struct dma_chan *chan)
1449{
1450 struct d40_chan *d40c =
1451 container_of(chan, struct d40_chan, chan);
1452 u32 bytes_left;
1453 unsigned long flags;
1454
1455 spin_lock_irqsave(&d40c->lock, flags);
1456 bytes_left = d40_residue(d40c);
1457 spin_unlock_irqrestore(&d40c->lock, flags);
1458
1459 return bytes_left;
1460}
1461
1462/* Public DMA functions in addition to the DMA engine framework */
1463
1464int stedma40_set_psize(struct dma_chan *chan,
1465 int src_psize,
1466 int dst_psize)
1467{
1468 struct d40_chan *d40c =
1469 container_of(chan, struct d40_chan, chan);
1470 unsigned long flags;
1471
1472 spin_lock_irqsave(&d40c->lock, flags);
1473
1474 if (d40c->log_num != D40_PHY_CHAN) {
1475 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1476 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1477 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1478 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1479 goto out;
1480 }
1481
1482 if (src_psize == STEDMA40_PSIZE_PHY_1)
1483 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1484 else {
1485 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1486 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1487 D40_SREG_CFG_PSIZE_POS);
1488 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1489 }
1490
1491 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1492 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1493 else {
1494 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1495 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1496 D40_SREG_CFG_PSIZE_POS);
1497 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1498 }
1499out:
1500 spin_unlock_irqrestore(&d40c->lock, flags);
1501 return 0;
1502}
1503EXPORT_SYMBOL(stedma40_set_psize);
1504
1505struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1506 struct scatterlist *sgl_dst,
1507 struct scatterlist *sgl_src,
1508 unsigned int sgl_len,
1509 unsigned long flags)
1510{
1511 int res;
1512 struct d40_desc *d40d;
1513 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1514 chan);
1515 unsigned long flg;
1516 int lli_max = d40c->base->plat_data->llis_per_log;
1517
1518
1519 spin_lock_irqsave(&d40c->lock, flg);
1520 d40d = d40_desc_get(d40c);
1521
1522 if (d40d == NULL)
1523 goto err;
1524
1525 memset(d40d, 0, sizeof(struct d40_desc));
1526 d40d->lli_len = sgl_len;
1527
1528 d40d->txd.flags = flags;
1529
1530 if (d40c->log_num != D40_PHY_CHAN) {
1531 if (sgl_len > 1)
1532 /*
1533 * Check if there is space available in lcla. If not,
1534 * split list into 1-length and run only in lcpa
1535 * space.
1536 */
1537 if (d40_lcla_id_get(d40c,
1538 &d40c->base->lcla_pool) != 0)
1539 lli_max = 1;
1540
1541 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1542 dev_err(&d40c->chan.dev->device,
1543 "[%s] Out of memory\n", __func__);
1544 goto err;
1545 }
1546
1547 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1548 sgl_src,
1549 sgl_len,
1550 d40d->lli_log.src,
1551 d40c->log_def.lcsp1,
1552 d40c->dma_cfg.src_info.data_width,
1553 flags & DMA_PREP_INTERRUPT, lli_max,
1554 d40c->base->plat_data->llis_per_log);
1555
1556 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1557 sgl_dst,
1558 sgl_len,
1559 d40d->lli_log.dst,
1560 d40c->log_def.lcsp3,
1561 d40c->dma_cfg.dst_info.data_width,
1562 flags & DMA_PREP_INTERRUPT, lli_max,
1563 d40c->base->plat_data->llis_per_log);
1564
1565
1566 } else {
1567 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1568 dev_err(&d40c->chan.dev->device,
1569 "[%s] Out of memory\n", __func__);
1570 goto err;
1571 }
1572
1573 res = d40_phy_sg_to_lli(sgl_src,
1574 sgl_len,
1575 0,
1576 d40d->lli_phy.src,
1577 d40d->lli_phy.src_addr,
1578 d40c->src_def_cfg,
1579 d40c->dma_cfg.src_info.data_width,
1580 d40c->dma_cfg.src_info.psize,
1581 true);
1582
1583 if (res < 0)
1584 goto err;
1585
1586 res = d40_phy_sg_to_lli(sgl_dst,
1587 sgl_len,
1588 0,
1589 d40d->lli_phy.dst,
1590 d40d->lli_phy.dst_addr,
1591 d40c->dst_def_cfg,
1592 d40c->dma_cfg.dst_info.data_width,
1593 d40c->dma_cfg.dst_info.psize,
1594 true);
1595
1596 if (res < 0)
1597 goto err;
1598
1599 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1600 d40d->lli_pool.size, DMA_TO_DEVICE);
1601 }
1602
1603 dma_async_tx_descriptor_init(&d40d->txd, chan);
1604
1605 d40d->txd.tx_submit = d40_tx_submit;
1606
1607 spin_unlock_irqrestore(&d40c->lock, flg);
1608
1609 return &d40d->txd;
1610err:
1611 spin_unlock_irqrestore(&d40c->lock, flg);
1612 return NULL;
1613}
1614EXPORT_SYMBOL(stedma40_memcpy_sg);
1615
1616bool stedma40_filter(struct dma_chan *chan, void *data)
1617{
1618 struct stedma40_chan_cfg *info = data;
1619 struct d40_chan *d40c =
1620 container_of(chan, struct d40_chan, chan);
1621 int err;
1622
1623 if (data) {
1624 err = d40_validate_conf(d40c, info);
1625 if (!err)
1626 d40c->dma_cfg = *info;
1627 } else
1628 err = d40_config_memcpy(d40c);
1629
1630 return err == 0;
1631}
1632EXPORT_SYMBOL(stedma40_filter);
1633
1634/* DMA ENGINE functions */
1635static int d40_alloc_chan_resources(struct dma_chan *chan)
1636{
1637 int err;
1638 unsigned long flags;
1639 struct d40_chan *d40c =
1640 container_of(chan, struct d40_chan, chan);
1641
1642 spin_lock_irqsave(&d40c->lock, flags);
1643
1644 d40c->completed = chan->cookie = 1;
1645
1646 /*
1647 * If no dma configuration is set (channel_type == 0)
1648 * use default configuration
1649 */
1650 if (d40c->dma_cfg.channel_type == 0) {
1651 err = d40_config_memcpy(d40c);
1652 if (err)
1653 goto err_alloc;
1654 }
1655
1656 err = d40_allocate_channel(d40c);
1657 if (err) {
1658 dev_err(&d40c->chan.dev->device,
1659 "[%s] Failed to allocate channel\n", __func__);
1660 goto err_alloc;
1661 }
1662
1663 err = d40_config_chan(d40c, &d40c->dma_cfg);
1664 if (err) {
1665 dev_err(&d40c->chan.dev->device,
1666 "[%s] Failed to configure channel\n",
1667 __func__);
1668 goto err_config;
1669 }
1670
1671 spin_unlock_irqrestore(&d40c->lock, flags);
1672 return 0;
1673
1674 err_config:
1675 (void) d40_free_dma(d40c);
1676 err_alloc:
1677 spin_unlock_irqrestore(&d40c->lock, flags);
1678 dev_err(&d40c->chan.dev->device,
1679 "[%s] Channel allocation failed\n", __func__);
1680 return -EINVAL;
1681}
1682
1683static void d40_free_chan_resources(struct dma_chan *chan)
1684{
1685 struct d40_chan *d40c =
1686 container_of(chan, struct d40_chan, chan);
1687 int err;
1688 unsigned long flags;
1689
1690 spin_lock_irqsave(&d40c->lock, flags);
1691
1692 err = d40_free_dma(d40c);
1693
1694 if (err)
1695 dev_err(&d40c->chan.dev->device,
1696 "[%s] Failed to free channel\n", __func__);
1697 spin_unlock_irqrestore(&d40c->lock, flags);
1698}
1699
1700static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1701 dma_addr_t dst,
1702 dma_addr_t src,
1703 size_t size,
1704 unsigned long flags)
1705{
1706 struct d40_desc *d40d;
1707 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1708 chan);
1709 unsigned long flg;
1710 int err = 0;
1711
1712 spin_lock_irqsave(&d40c->lock, flg);
1713 d40d = d40_desc_get(d40c);
1714
1715 if (d40d == NULL) {
1716 dev_err(&d40c->chan.dev->device,
1717 "[%s] Descriptor is NULL\n", __func__);
1718 goto err;
1719 }
1720
1721 memset(d40d, 0, sizeof(struct d40_desc));
1722
1723 d40d->txd.flags = flags;
1724
1725 dma_async_tx_descriptor_init(&d40d->txd, chan);
1726
1727 d40d->txd.tx_submit = d40_tx_submit;
1728
1729 if (d40c->log_num != D40_PHY_CHAN) {
1730
1731 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1732 dev_err(&d40c->chan.dev->device,
1733 "[%s] Out of memory\n", __func__);
1734 goto err;
1735 }
1736 d40d->lli_len = 1;
1737
1738 d40_log_fill_lli(d40d->lli_log.src,
1739 src,
1740 size,
1741 0,
1742 d40c->log_def.lcsp1,
1743 d40c->dma_cfg.src_info.data_width,
1744 true, true);
1745
1746 d40_log_fill_lli(d40d->lli_log.dst,
1747 dst,
1748 size,
1749 0,
1750 d40c->log_def.lcsp3,
1751 d40c->dma_cfg.dst_info.data_width,
1752 true, true);
1753
1754 } else {
1755
1756 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1757 dev_err(&d40c->chan.dev->device,
1758 "[%s] Out of memory\n", __func__);
1759 goto err;
1760 }
1761
1762 err = d40_phy_fill_lli(d40d->lli_phy.src,
1763 src,
1764 size,
1765 d40c->dma_cfg.src_info.psize,
1766 0,
1767 d40c->src_def_cfg,
1768 true,
1769 d40c->dma_cfg.src_info.data_width,
1770 false);
1771 if (err)
1772 goto err_fill_lli;
1773
1774 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1775 dst,
1776 size,
1777 d40c->dma_cfg.dst_info.psize,
1778 0,
1779 d40c->dst_def_cfg,
1780 true,
1781 d40c->dma_cfg.dst_info.data_width,
1782 false);
1783
1784 if (err)
1785 goto err_fill_lli;
1786
1787 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1788 d40d->lli_pool.size, DMA_TO_DEVICE);
1789 }
1790
1791 spin_unlock_irqrestore(&d40c->lock, flg);
1792 return &d40d->txd;
1793
1794err_fill_lli:
1795 dev_err(&d40c->chan.dev->device,
1796 "[%s] Failed filling in PHY LLI\n", __func__);
1797 d40_pool_lli_free(d40d);
1798err:
1799 spin_unlock_irqrestore(&d40c->lock, flg);
1800 return NULL;
1801}
1802
1803static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1804 struct d40_chan *d40c,
1805 struct scatterlist *sgl,
1806 unsigned int sg_len,
1807 enum dma_data_direction direction,
1808 unsigned long flags)
1809{
1810 dma_addr_t dev_addr = 0;
1811 int total_size;
1812 int lli_max = d40c->base->plat_data->llis_per_log;
1813
1814 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1815 dev_err(&d40c->chan.dev->device,
1816 "[%s] Out of memory\n", __func__);
1817 return -ENOMEM;
1818 }
1819
1820 d40d->lli_len = sg_len;
1821 d40d->lli_tcount = 0;
1822
1823 if (sg_len > 1)
1824 /*
1825 * Check if there is space available in lcla.
1826 * If not, split list into 1-length and run only
1827 * in lcpa space.
1828 */
1829 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
1830 lli_max = 1;
1831
1832 if (direction == DMA_FROM_DEVICE) {
1833 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1834 total_size = d40_log_sg_to_dev(&d40c->lcla,
1835 sgl, sg_len,
1836 &d40d->lli_log,
1837 &d40c->log_def,
1838 d40c->dma_cfg.src_info.data_width,
1839 d40c->dma_cfg.dst_info.data_width,
1840 direction,
1841 flags & DMA_PREP_INTERRUPT,
1842 dev_addr, lli_max,
1843 d40c->base->plat_data->llis_per_log);
1844 } else if (direction == DMA_TO_DEVICE) {
1845 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1846 total_size = d40_log_sg_to_dev(&d40c->lcla,
1847 sgl, sg_len,
1848 &d40d->lli_log,
1849 &d40c->log_def,
1850 d40c->dma_cfg.src_info.data_width,
1851 d40c->dma_cfg.dst_info.data_width,
1852 direction,
1853 flags & DMA_PREP_INTERRUPT,
1854 dev_addr, lli_max,
1855 d40c->base->plat_data->llis_per_log);
1856 } else
1857 return -EINVAL;
1858 if (total_size < 0)
1859 return -EINVAL;
1860
1861 return 0;
1862}
1863
1864static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1865 struct d40_chan *d40c,
1866 struct scatterlist *sgl,
1867 unsigned int sgl_len,
1868 enum dma_data_direction direction,
1869 unsigned long flags)
1870{
1871 dma_addr_t src_dev_addr;
1872 dma_addr_t dst_dev_addr;
1873 int res;
1874
1875 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1876 dev_err(&d40c->chan.dev->device,
1877 "[%s] Out of memory\n", __func__);
1878 return -ENOMEM;
1879 }
1880
1881 d40d->lli_len = sgl_len;
1882 d40d->lli_tcount = 0;
1883
1884 if (direction == DMA_FROM_DEVICE) {
1885 dst_dev_addr = 0;
1886 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1887 } else if (direction == DMA_TO_DEVICE) {
1888 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1889 src_dev_addr = 0;
1890 } else
1891 return -EINVAL;
1892
1893 res = d40_phy_sg_to_lli(sgl,
1894 sgl_len,
1895 src_dev_addr,
1896 d40d->lli_phy.src,
1897 d40d->lli_phy.src_addr,
1898 d40c->src_def_cfg,
1899 d40c->dma_cfg.src_info.data_width,
1900 d40c->dma_cfg.src_info.psize,
1901 true);
1902 if (res < 0)
1903 return res;
1904
1905 res = d40_phy_sg_to_lli(sgl,
1906 sgl_len,
1907 dst_dev_addr,
1908 d40d->lli_phy.dst,
1909 d40d->lli_phy.dst_addr,
1910 d40c->dst_def_cfg,
1911 d40c->dma_cfg.dst_info.data_width,
1912 d40c->dma_cfg.dst_info.psize,
1913 true);
1914 if (res < 0)
1915 return res;
1916
1917 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1918 d40d->lli_pool.size, DMA_TO_DEVICE);
1919 return 0;
1920}
1921
1922static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1923 struct scatterlist *sgl,
1924 unsigned int sg_len,
1925 enum dma_data_direction direction,
1926 unsigned long flags)
1927{
1928 struct d40_desc *d40d;
1929 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1930 chan);
1931 unsigned long flg;
1932 int err;
1933
1934 if (d40c->dma_cfg.pre_transfer)
1935 d40c->dma_cfg.pre_transfer(chan,
1936 d40c->dma_cfg.pre_transfer_data,
1937 sg_dma_len(sgl));
1938
1939 spin_lock_irqsave(&d40c->lock, flg);
1940 d40d = d40_desc_get(d40c);
1941 spin_unlock_irqrestore(&d40c->lock, flg);
1942
1943 if (d40d == NULL)
1944 return NULL;
1945
1946 memset(d40d, 0, sizeof(struct d40_desc));
1947
1948 if (d40c->log_num != D40_PHY_CHAN)
1949 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
1950 direction, flags);
1951 else
1952 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
1953 direction, flags);
1954 if (err) {
1955 dev_err(&d40c->chan.dev->device,
1956 "[%s] Failed to prepare %s slave sg job: %d\n",
1957 __func__,
1958 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
1959 return NULL;
1960 }
1961
1962 d40d->txd.flags = flags;
1963
1964 dma_async_tx_descriptor_init(&d40d->txd, chan);
1965
1966 d40d->txd.tx_submit = d40_tx_submit;
1967
1968 return &d40d->txd;
1969}
1970
1971static enum dma_status d40_tx_status(struct dma_chan *chan,
1972 dma_cookie_t cookie,
1973 struct dma_tx_state *txstate)
1974{
1975 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1976 dma_cookie_t last_used;
1977 dma_cookie_t last_complete;
1978 int ret;
1979
1980 last_complete = d40c->completed;
1981 last_used = chan->cookie;
1982
1983 ret = dma_async_is_complete(cookie, last_complete, last_used);
1984
1985 if (txstate) {
1986 txstate->last = last_complete;
1987 txstate->used = last_used;
1988 txstate->residue = stedma40_residue(chan);
1989 }
1990
1991 return ret;
1992}
1993
1994static void d40_issue_pending(struct dma_chan *chan)
1995{
1996 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1997 unsigned long flags;
1998
1999 spin_lock_irqsave(&d40c->lock, flags);
2000
2001 /* Busy means that pending jobs are already being processed */
2002 if (!d40c->busy)
2003 (void) d40_queue_start(d40c);
2004
2005 spin_unlock_irqrestore(&d40c->lock, flags);
2006}
2007
2008static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
2009{
2010 unsigned long flags;
2011 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2012
2013 switch (cmd) {
2014 case DMA_TERMINATE_ALL:
2015 spin_lock_irqsave(&d40c->lock, flags);
2016 d40_term_all(d40c);
2017 spin_unlock_irqrestore(&d40c->lock, flags);
2018 return 0;
2019 case DMA_PAUSE:
2020 return d40_pause(chan);
2021 case DMA_RESUME:
2022 return d40_resume(chan);
2023 }
2024
2025 /* Other commands are unimplemented */
2026 return -ENXIO;
2027}
2028
2029/* Initialization functions */
2030
2031static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2032 struct d40_chan *chans, int offset,
2033 int num_chans)
2034{
2035 int i = 0;
2036 struct d40_chan *d40c;
2037
2038 INIT_LIST_HEAD(&dma->channels);
2039
2040 for (i = offset; i < offset + num_chans; i++) {
2041 d40c = &chans[i];
2042 d40c->base = base;
2043 d40c->chan.device = dma;
2044
2045 /* Invalidate lcla element */
2046 d40c->lcla.src_id = -1;
2047 d40c->lcla.dst_id = -1;
2048
2049 spin_lock_init(&d40c->lock);
2050
2051 d40c->log_num = D40_PHY_CHAN;
2052
2053 INIT_LIST_HEAD(&d40c->free);
2054 INIT_LIST_HEAD(&d40c->active);
2055 INIT_LIST_HEAD(&d40c->queue);
2056 INIT_LIST_HEAD(&d40c->client);
2057
2058 d40c->free_len = 0;
2059
2060 tasklet_init(&d40c->tasklet, dma_tasklet,
2061 (unsigned long) d40c);
2062
2063 list_add_tail(&d40c->chan.device_node,
2064 &dma->channels);
2065 }
2066}
2067
2068static int __init d40_dmaengine_init(struct d40_base *base,
2069 int num_reserved_chans)
2070{
2071 int err ;
2072
2073 d40_chan_init(base, &base->dma_slave, base->log_chans,
2074 0, base->num_log_chans);
2075
2076 dma_cap_zero(base->dma_slave.cap_mask);
2077 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2078
2079 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2080 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2081 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2082 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2083 base->dma_slave.device_tx_status = d40_tx_status;
2084 base->dma_slave.device_issue_pending = d40_issue_pending;
2085 base->dma_slave.device_control = d40_control;
2086 base->dma_slave.dev = base->dev;
2087
2088 err = dma_async_device_register(&base->dma_slave);
2089
2090 if (err) {
2091 dev_err(base->dev,
2092 "[%s] Failed to register slave channels\n",
2093 __func__);
2094 goto failure1;
2095 }
2096
2097 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2098 base->num_log_chans, base->plat_data->memcpy_len);
2099
2100 dma_cap_zero(base->dma_memcpy.cap_mask);
2101 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2102
2103 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2104 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2105 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2106 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2107 base->dma_memcpy.device_tx_status = d40_tx_status;
2108 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2109 base->dma_memcpy.device_control = d40_control;
2110 base->dma_memcpy.dev = base->dev;
2111 /*
2112 * This controller can only access address at even
2113 * 32bit boundaries, i.e. 2^2
2114 */
2115 base->dma_memcpy.copy_align = 2;
2116
2117 err = dma_async_device_register(&base->dma_memcpy);
2118
2119 if (err) {
2120 dev_err(base->dev,
2121 "[%s] Failed to regsiter memcpy only channels\n",
2122 __func__);
2123 goto failure2;
2124 }
2125
2126 d40_chan_init(base, &base->dma_both, base->phy_chans,
2127 0, num_reserved_chans);
2128
2129 dma_cap_zero(base->dma_both.cap_mask);
2130 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2131 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2132
2133 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2134 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2135 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2136 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2137 base->dma_both.device_tx_status = d40_tx_status;
2138 base->dma_both.device_issue_pending = d40_issue_pending;
2139 base->dma_both.device_control = d40_control;
2140 base->dma_both.dev = base->dev;
2141 base->dma_both.copy_align = 2;
2142 err = dma_async_device_register(&base->dma_both);
2143
2144 if (err) {
2145 dev_err(base->dev,
2146 "[%s] Failed to register logical and physical capable channels\n",
2147 __func__);
2148 goto failure3;
2149 }
2150 return 0;
2151failure3:
2152 dma_async_device_unregister(&base->dma_memcpy);
2153failure2:
2154 dma_async_device_unregister(&base->dma_slave);
2155failure1:
2156 return err;
2157}
2158
2159/* Initialization functions. */
2160
2161static int __init d40_phy_res_init(struct d40_base *base)
2162{
2163 int i;
2164 int num_phy_chans_avail = 0;
2165 u32 val[2];
2166 int odd_even_bit = -2;
2167
2168 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2169 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2170
2171 for (i = 0; i < base->num_phy_chans; i++) {
2172 base->phy_res[i].num = i;
2173 odd_even_bit += 2 * ((i % 2) == 0);
2174 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2175 /* Mark security only channels as occupied */
2176 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2177 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2178 } else {
2179 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2180 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2181 num_phy_chans_avail++;
2182 }
2183 spin_lock_init(&base->phy_res[i].lock);
2184 }
2185 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2186 num_phy_chans_avail, base->num_phy_chans);
2187
2188 /* Verify settings extended vs standard */
2189 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2190
2191 for (i = 0; i < base->num_phy_chans; i++) {
2192
2193 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2194 (val[0] & 0x3) != 1)
2195 dev_info(base->dev,
2196 "[%s] INFO: channel %d is misconfigured (%d)\n",
2197 __func__, i, val[0] & 0x3);
2198
2199 val[0] = val[0] >> 2;
2200 }
2201
2202 return num_phy_chans_avail;
2203}
2204
2205static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2206{
2207 static const struct d40_reg_val dma_id_regs[] = {
2208 /* Peripheral Id */
2209 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2210 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2211 /*
2212 * D40_DREG_PERIPHID2 Depends on HW revision:
2213 * MOP500/HREF ED has 0x0008,
2214 * ? has 0x0018,
2215 * HREF V1 has 0x0028
2216 */
2217 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2218
2219 /* PCell Id */
2220 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2221 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2222 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2223 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2224 };
2225 struct stedma40_platform_data *plat_data;
2226 struct clk *clk = NULL;
2227 void __iomem *virtbase = NULL;
2228 struct resource *res = NULL;
2229 struct d40_base *base = NULL;
2230 int num_log_chans = 0;
2231 int num_phy_chans;
2232 int i;
2233
2234 clk = clk_get(&pdev->dev, NULL);
2235
2236 if (IS_ERR(clk)) {
2237 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2238 __func__);
2239 goto failure;
2240 }
2241
2242 clk_enable(clk);
2243
2244 /* Get IO for DMAC base address */
2245 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2246 if (!res)
2247 goto failure;
2248
2249 if (request_mem_region(res->start, resource_size(res),
2250 D40_NAME " I/O base") == NULL)
2251 goto failure;
2252
2253 virtbase = ioremap(res->start, resource_size(res));
2254 if (!virtbase)
2255 goto failure;
2256
2257 /* HW version check */
2258 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2259 if (dma_id_regs[i].val !=
2260 readl(virtbase + dma_id_regs[i].reg)) {
2261 dev_err(&pdev->dev,
2262 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2263 __func__,
2264 dma_id_regs[i].val,
2265 dma_id_regs[i].reg,
2266 readl(virtbase + dma_id_regs[i].reg));
2267 goto failure;
2268 }
2269 }
2270
2271 i = readl(virtbase + D40_DREG_PERIPHID2);
2272
2273 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2274 dev_err(&pdev->dev,
2275 "[%s] Unknown designer! Got %x wanted %x\n",
2276 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2277 goto failure;
2278 }
2279
2280 /* The number of physical channels on this HW */
2281 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2282
2283 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2284 (i >> 4) & 0xf, res->start);
2285
2286 plat_data = pdev->dev.platform_data;
2287
2288 /* Count the number of logical channels in use */
2289 for (i = 0; i < plat_data->dev_len; i++)
2290 if (plat_data->dev_rx[i] != 0)
2291 num_log_chans++;
2292
2293 for (i = 0; i < plat_data->dev_len; i++)
2294 if (plat_data->dev_tx[i] != 0)
2295 num_log_chans++;
2296
2297 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2298 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2299 sizeof(struct d40_chan), GFP_KERNEL);
2300
2301 if (base == NULL) {
2302 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2303 goto failure;
2304 }
2305
2306 base->clk = clk;
2307 base->num_phy_chans = num_phy_chans;
2308 base->num_log_chans = num_log_chans;
2309 base->phy_start = res->start;
2310 base->phy_size = resource_size(res);
2311 base->virtbase = virtbase;
2312 base->plat_data = plat_data;
2313 base->dev = &pdev->dev;
2314 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2315 base->log_chans = &base->phy_chans[num_phy_chans];
2316
2317 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2318 GFP_KERNEL);
2319 if (!base->phy_res)
2320 goto failure;
2321
2322 base->lookup_phy_chans = kzalloc(num_phy_chans *
2323 sizeof(struct d40_chan *),
2324 GFP_KERNEL);
2325 if (!base->lookup_phy_chans)
2326 goto failure;
2327
2328 if (num_log_chans + plat_data->memcpy_len) {
2329 /*
2330 * The max number of logical channels are event lines for all
2331 * src devices and dst devices
2332 */
2333 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2334 sizeof(struct d40_chan *),
2335 GFP_KERNEL);
2336 if (!base->lookup_log_chans)
2337 goto failure;
2338 }
2339 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2340 GFP_KERNEL);
2341 if (!base->lcla_pool.alloc_map)
2342 goto failure;
2343
2344 return base;
2345
2346failure:
2347 if (clk) {
2348 clk_disable(clk);
2349 clk_put(clk);
2350 }
2351 if (virtbase)
2352 iounmap(virtbase);
2353 if (res)
2354 release_mem_region(res->start,
2355 resource_size(res));
2356 if (virtbase)
2357 iounmap(virtbase);
2358
2359 if (base) {
2360 kfree(base->lcla_pool.alloc_map);
2361 kfree(base->lookup_log_chans);
2362 kfree(base->lookup_phy_chans);
2363 kfree(base->phy_res);
2364 kfree(base);
2365 }
2366
2367 return NULL;
2368}
2369
2370static void __init d40_hw_init(struct d40_base *base)
2371{
2372
2373 static const struct d40_reg_val dma_init_reg[] = {
2374 /* Clock every part of the DMA block from start */
2375 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2376
2377 /* Interrupts on all logical channels */
2378 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2379 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2380 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2381 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2382 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2383 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2384 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2385 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2386 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2387 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2388 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2389 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2390 };
2391 int i;
2392 u32 prmseo[2] = {0, 0};
2393 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2394 u32 pcmis = 0;
2395 u32 pcicr = 0;
2396
2397 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2398 writel(dma_init_reg[i].val,
2399 base->virtbase + dma_init_reg[i].reg);
2400
2401 /* Configure all our dma channels to default settings */
2402 for (i = 0; i < base->num_phy_chans; i++) {
2403
2404 activeo[i % 2] = activeo[i % 2] << 2;
2405
2406 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2407 == D40_ALLOC_PHY) {
2408 activeo[i % 2] |= 3;
2409 continue;
2410 }
2411
2412 /* Enable interrupt # */
2413 pcmis = (pcmis << 1) | 1;
2414
2415 /* Clear interrupt # */
2416 pcicr = (pcicr << 1) | 1;
2417
2418 /* Set channel to physical mode */
2419 prmseo[i % 2] = prmseo[i % 2] << 2;
2420 prmseo[i % 2] |= 1;
2421
2422 }
2423
2424 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2425 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2426 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2427 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2428
2429 /* Write which interrupt to enable */
2430 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2431
2432 /* Write which interrupt to clear */
2433 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2434
2435}
2436
2437static int __init d40_probe(struct platform_device *pdev)
2438{
2439 int err;
2440 int ret = -ENOENT;
2441 struct d40_base *base;
2442 struct resource *res = NULL;
2443 int num_reserved_chans;
2444 u32 val;
2445
2446 base = d40_hw_detect_init(pdev);
2447
2448 if (!base)
2449 goto failure;
2450
2451 num_reserved_chans = d40_phy_res_init(base);
2452
2453 platform_set_drvdata(pdev, base);
2454
2455 spin_lock_init(&base->interrupt_lock);
2456 spin_lock_init(&base->execmd_lock);
2457
2458 /* Get IO for logical channel parameter address */
2459 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2460 if (!res) {
2461 ret = -ENOENT;
2462 dev_err(&pdev->dev,
2463 "[%s] No \"lcpa\" memory resource\n",
2464 __func__);
2465 goto failure;
2466 }
2467 base->lcpa_size = resource_size(res);
2468 base->phy_lcpa = res->start;
2469
2470 if (request_mem_region(res->start, resource_size(res),
2471 D40_NAME " I/O lcpa") == NULL) {
2472 ret = -EBUSY;
2473 dev_err(&pdev->dev,
2474 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2475 __func__, res->start, res->end);
2476 goto failure;
2477 }
2478
2479 /* We make use of ESRAM memory for this. */
2480 val = readl(base->virtbase + D40_DREG_LCPA);
2481 if (res->start != val && val != 0) {
2482 dev_warn(&pdev->dev,
2483 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2484 __func__, val, res->start);
2485 } else
2486 writel(res->start, base->virtbase + D40_DREG_LCPA);
2487
2488 base->lcpa_base = ioremap(res->start, resource_size(res));
2489 if (!base->lcpa_base) {
2490 ret = -ENOMEM;
2491 dev_err(&pdev->dev,
2492 "[%s] Failed to ioremap LCPA region\n",
2493 __func__);
2494 goto failure;
2495 }
2496 /* Get IO for logical channel link address */
2497 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2498 if (!res) {
2499 ret = -ENOENT;
2500 dev_err(&pdev->dev,
2501 "[%s] No \"lcla\" resource defined\n",
2502 __func__);
2503 goto failure;
2504 }
2505
2506 base->lcla_pool.base_size = resource_size(res);
2507 base->lcla_pool.phy = res->start;
2508
2509 if (request_mem_region(res->start, resource_size(res),
2510 D40_NAME " I/O lcla") == NULL) {
2511 ret = -EBUSY;
2512 dev_err(&pdev->dev,
2513 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2514 __func__, res->start, res->end);
2515 goto failure;
2516 }
2517 val = readl(base->virtbase + D40_DREG_LCLA);
2518 if (res->start != val && val != 0) {
2519 dev_warn(&pdev->dev,
2520 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2521 __func__, val, res->start);
2522 } else
2523 writel(res->start, base->virtbase + D40_DREG_LCLA);
2524
2525 base->lcla_pool.base = ioremap(res->start, resource_size(res));
2526 if (!base->lcla_pool.base) {
2527 ret = -ENOMEM;
2528 dev_err(&pdev->dev,
2529 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2530 __func__, res->start, res->end);
2531 goto failure;
2532 }
2533
2534 spin_lock_init(&base->lcla_pool.lock);
2535
2536 base->lcla_pool.num_blocks = base->num_phy_chans;
2537
2538 base->irq = platform_get_irq(pdev, 0);
2539
2540 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2541
2542 if (ret) {
2543 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2544 goto failure;
2545 }
2546
2547 err = d40_dmaengine_init(base, num_reserved_chans);
2548 if (err)
2549 goto failure;
2550
2551 d40_hw_init(base);
2552
2553 dev_info(base->dev, "initialized\n");
2554 return 0;
2555
2556failure:
2557 if (base) {
2558 if (base->virtbase)
2559 iounmap(base->virtbase);
2560 if (base->lcla_pool.phy)
2561 release_mem_region(base->lcla_pool.phy,
2562 base->lcla_pool.base_size);
2563 if (base->phy_lcpa)
2564 release_mem_region(base->phy_lcpa,
2565 base->lcpa_size);
2566 if (base->phy_start)
2567 release_mem_region(base->phy_start,
2568 base->phy_size);
2569 if (base->clk) {
2570 clk_disable(base->clk);
2571 clk_put(base->clk);
2572 }
2573
2574 kfree(base->lcla_pool.alloc_map);
2575 kfree(base->lookup_log_chans);
2576 kfree(base->lookup_phy_chans);
2577 kfree(base->phy_res);
2578 kfree(base);
2579 }
2580
2581 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2582 return ret;
2583}
2584
2585static struct platform_driver d40_driver = {
2586 .driver = {
2587 .owner = THIS_MODULE,
2588 .name = D40_NAME,
2589 },
2590};
2591
2592int __init stedma40_init(void)
2593{
2594 return platform_driver_probe(&d40_driver, d40_probe);
2595}
2596arch_initcall(stedma40_init);