diff options
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r-- | drivers/dma/ste_dma40.c | 860 |
1 files changed, 532 insertions, 328 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index c426829f6ab8..17e2600a00cf 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -30,14 +30,16 @@ | |||
30 | /* Maximum iterations taken before giving up suspending a channel */ | 30 | /* Maximum iterations taken before giving up suspending a channel */ |
31 | #define D40_SUSPEND_MAX_IT 500 | 31 | #define D40_SUSPEND_MAX_IT 500 |
32 | 32 | ||
33 | /* Hardware requirement on LCLA alignment */ | ||
34 | #define LCLA_ALIGNMENT 0x40000 | ||
35 | /* Attempts before giving up to trying to get pages that are aligned */ | ||
36 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 | ||
37 | |||
38 | /* Bit markings for allocation map */ | ||
33 | #define D40_ALLOC_FREE (1 << 31) | 39 | #define D40_ALLOC_FREE (1 << 31) |
34 | #define D40_ALLOC_PHY (1 << 30) | 40 | #define D40_ALLOC_PHY (1 << 30) |
35 | #define D40_ALLOC_LOG_FREE 0 | 41 | #define D40_ALLOC_LOG_FREE 0 |
36 | 42 | ||
37 | /* The number of free d40_desc to keep in memory before starting | ||
38 | * to kfree() them */ | ||
39 | #define D40_DESC_CACHE_SIZE 50 | ||
40 | |||
41 | /* Hardware designer of the block */ | 43 | /* Hardware designer of the block */ |
42 | #define D40_PERIPHID2_DESIGNER 0x8 | 44 | #define D40_PERIPHID2_DESIGNER 0x8 |
43 | 45 | ||
@@ -68,9 +70,9 @@ enum d40_command { | |||
68 | */ | 70 | */ |
69 | struct d40_lli_pool { | 71 | struct d40_lli_pool { |
70 | void *base; | 72 | void *base; |
71 | int size; | 73 | int size; |
72 | /* Space for dst and src, plus an extra for padding */ | 74 | /* Space for dst and src, plus an extra for padding */ |
73 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; | 75 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; |
74 | }; | 76 | }; |
75 | 77 | ||
76 | /** | 78 | /** |
@@ -81,9 +83,10 @@ struct d40_lli_pool { | |||
81 | * lli_len equals one. | 83 | * lli_len equals one. |
82 | * @lli_log: Same as above but for logical channels. | 84 | * @lli_log: Same as above but for logical channels. |
83 | * @lli_pool: The pool with two entries pre-allocated. | 85 | * @lli_pool: The pool with two entries pre-allocated. |
84 | * @lli_len: Number of LLI's in lli_pool | 86 | * @lli_len: Number of llis of current descriptor. |
85 | * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len | 87 | * @lli_count: Number of transfered llis. |
86 | * then this transfer job is done. | 88 | * @lli_tx_len: Max number of LLIs per transfer, there can be |
89 | * many transfer for one descriptor. | ||
87 | * @txd: DMA engine struct. Used for among other things for communication | 90 | * @txd: DMA engine struct. Used for among other things for communication |
88 | * during a transfer. | 91 | * during a transfer. |
89 | * @node: List entry. | 92 | * @node: List entry. |
@@ -100,8 +103,9 @@ struct d40_desc { | |||
100 | struct d40_log_lli_bidir lli_log; | 103 | struct d40_log_lli_bidir lli_log; |
101 | 104 | ||
102 | struct d40_lli_pool lli_pool; | 105 | struct d40_lli_pool lli_pool; |
103 | u32 lli_len; | 106 | int lli_len; |
104 | u32 lli_tcount; | 107 | int lli_count; |
108 | u32 lli_tx_len; | ||
105 | 109 | ||
106 | struct dma_async_tx_descriptor txd; | 110 | struct dma_async_tx_descriptor txd; |
107 | struct list_head node; | 111 | struct list_head node; |
@@ -113,18 +117,20 @@ struct d40_desc { | |||
113 | /** | 117 | /** |
114 | * struct d40_lcla_pool - LCLA pool settings and data. | 118 | * struct d40_lcla_pool - LCLA pool settings and data. |
115 | * | 119 | * |
116 | * @base: The virtual address of LCLA. | 120 | * @base: The virtual address of LCLA. 18 bit aligned. |
117 | * @phy: Physical base address of LCLA. | 121 | * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. |
118 | * @base_size: size of lcla. | 122 | * This pointer is only there for clean-up on error. |
123 | * @pages: The number of pages needed for all physical channels. | ||
124 | * Only used later for clean-up on error | ||
119 | * @lock: Lock to protect the content in this struct. | 125 | * @lock: Lock to protect the content in this struct. |
120 | * @alloc_map: Mapping between physical channel and LCLA entries. | 126 | * @alloc_map: Bitmap mapping between physical channel and LCLA entries. |
121 | * @num_blocks: The number of entries of alloc_map. Equals to the | 127 | * @num_blocks: The number of entries of alloc_map. Equals to the |
122 | * number of physical channels. | 128 | * number of physical channels. |
123 | */ | 129 | */ |
124 | struct d40_lcla_pool { | 130 | struct d40_lcla_pool { |
125 | void *base; | 131 | void *base; |
126 | dma_addr_t phy; | 132 | void *base_unaligned; |
127 | resource_size_t base_size; | 133 | int pages; |
128 | spinlock_t lock; | 134 | spinlock_t lock; |
129 | u32 *alloc_map; | 135 | u32 *alloc_map; |
130 | int num_blocks; | 136 | int num_blocks; |
@@ -163,15 +169,14 @@ struct d40_base; | |||
163 | * @pending_tx: The number of pending transfers. Used between interrupt handler | 169 | * @pending_tx: The number of pending transfers. Used between interrupt handler |
164 | * and tasklet. | 170 | * and tasklet. |
165 | * @busy: Set to true when transfer is ongoing on this channel. | 171 | * @busy: Set to true when transfer is ongoing on this channel. |
166 | * @phy_chan: Pointer to physical channel which this instance runs on. | 172 | * @phy_chan: Pointer to physical channel which this instance runs on. If this |
173 | * point is NULL, then the channel is not allocated. | ||
167 | * @chan: DMA engine handle. | 174 | * @chan: DMA engine handle. |
168 | * @tasklet: Tasklet that gets scheduled from interrupt context to complete a | 175 | * @tasklet: Tasklet that gets scheduled from interrupt context to complete a |
169 | * transfer and call client callback. | 176 | * transfer and call client callback. |
170 | * @client: Cliented owned descriptor list. | 177 | * @client: Cliented owned descriptor list. |
171 | * @active: Active descriptor. | 178 | * @active: Active descriptor. |
172 | * @queue: Queued jobs. | 179 | * @queue: Queued jobs. |
173 | * @free: List of free descripts, ready to be reused. | ||
174 | * @free_len: Number of descriptors in the free list. | ||
175 | * @dma_cfg: The client configuration of this dma channel. | 180 | * @dma_cfg: The client configuration of this dma channel. |
176 | * @base: Pointer to the device instance struct. | 181 | * @base: Pointer to the device instance struct. |
177 | * @src_def_cfg: Default cfg register setting for src. | 182 | * @src_def_cfg: Default cfg register setting for src. |
@@ -195,8 +200,6 @@ struct d40_chan { | |||
195 | struct list_head client; | 200 | struct list_head client; |
196 | struct list_head active; | 201 | struct list_head active; |
197 | struct list_head queue; | 202 | struct list_head queue; |
198 | struct list_head free; | ||
199 | int free_len; | ||
200 | struct stedma40_chan_cfg dma_cfg; | 203 | struct stedma40_chan_cfg dma_cfg; |
201 | struct d40_base *base; | 204 | struct d40_base *base; |
202 | /* Default register configurations */ | 205 | /* Default register configurations */ |
@@ -205,6 +208,9 @@ struct d40_chan { | |||
205 | struct d40_def_lcsp log_def; | 208 | struct d40_def_lcsp log_def; |
206 | struct d40_lcla_elem lcla; | 209 | struct d40_lcla_elem lcla; |
207 | struct d40_log_lli_full *lcpa; | 210 | struct d40_log_lli_full *lcpa; |
211 | /* Runtime reconfiguration */ | ||
212 | dma_addr_t runtime_addr; | ||
213 | enum dma_data_direction runtime_direction; | ||
208 | }; | 214 | }; |
209 | 215 | ||
210 | /** | 216 | /** |
@@ -215,6 +221,7 @@ struct d40_chan { | |||
215 | * the same physical register. | 221 | * the same physical register. |
216 | * @dev: The device structure. | 222 | * @dev: The device structure. |
217 | * @virtbase: The virtual base address of the DMA's register. | 223 | * @virtbase: The virtual base address of the DMA's register. |
224 | * @rev: silicon revision detected. | ||
218 | * @clk: Pointer to the DMA clock structure. | 225 | * @clk: Pointer to the DMA clock structure. |
219 | * @phy_start: Physical memory start of the DMA registers. | 226 | * @phy_start: Physical memory start of the DMA registers. |
220 | * @phy_size: Size of the DMA register map. | 227 | * @phy_size: Size of the DMA register map. |
@@ -240,12 +247,14 @@ struct d40_chan { | |||
240 | * @lcpa_base: The virtual mapped address of LCPA. | 247 | * @lcpa_base: The virtual mapped address of LCPA. |
241 | * @phy_lcpa: The physical address of the LCPA. | 248 | * @phy_lcpa: The physical address of the LCPA. |
242 | * @lcpa_size: The size of the LCPA area. | 249 | * @lcpa_size: The size of the LCPA area. |
250 | * @desc_slab: cache for descriptors. | ||
243 | */ | 251 | */ |
244 | struct d40_base { | 252 | struct d40_base { |
245 | spinlock_t interrupt_lock; | 253 | spinlock_t interrupt_lock; |
246 | spinlock_t execmd_lock; | 254 | spinlock_t execmd_lock; |
247 | struct device *dev; | 255 | struct device *dev; |
248 | void __iomem *virtbase; | 256 | void __iomem *virtbase; |
257 | u8 rev:4; | ||
249 | struct clk *clk; | 258 | struct clk *clk; |
250 | phys_addr_t phy_start; | 259 | phys_addr_t phy_start; |
251 | resource_size_t phy_size; | 260 | resource_size_t phy_size; |
@@ -266,6 +275,7 @@ struct d40_base { | |||
266 | void *lcpa_base; | 275 | void *lcpa_base; |
267 | dma_addr_t phy_lcpa; | 276 | dma_addr_t phy_lcpa; |
268 | resource_size_t lcpa_size; | 277 | resource_size_t lcpa_size; |
278 | struct kmem_cache *desc_slab; | ||
269 | }; | 279 | }; |
270 | 280 | ||
271 | /** | 281 | /** |
@@ -365,11 +375,6 @@ static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, | |||
365 | return cookie; | 375 | return cookie; |
366 | } | 376 | } |
367 | 377 | ||
368 | static void d40_desc_reset(struct d40_desc *d40d) | ||
369 | { | ||
370 | d40d->lli_tcount = 0; | ||
371 | } | ||
372 | |||
373 | static void d40_desc_remove(struct d40_desc *d40d) | 378 | static void d40_desc_remove(struct d40_desc *d40d) |
374 | { | 379 | { |
375 | list_del(&d40d->node); | 380 | list_del(&d40d->node); |
@@ -377,7 +382,6 @@ static void d40_desc_remove(struct d40_desc *d40d) | |||
377 | 382 | ||
378 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | 383 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) |
379 | { | 384 | { |
380 | struct d40_desc *desc; | ||
381 | struct d40_desc *d; | 385 | struct d40_desc *d; |
382 | struct d40_desc *_d; | 386 | struct d40_desc *_d; |
383 | 387 | ||
@@ -386,36 +390,21 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |||
386 | if (async_tx_test_ack(&d->txd)) { | 390 | if (async_tx_test_ack(&d->txd)) { |
387 | d40_pool_lli_free(d); | 391 | d40_pool_lli_free(d); |
388 | d40_desc_remove(d); | 392 | d40_desc_remove(d); |
389 | desc = d; | 393 | break; |
390 | goto out; | ||
391 | } | 394 | } |
392 | } | ||
393 | |||
394 | if (list_empty(&d40c->free)) { | ||
395 | /* Alloc new desc because we're out of used ones */ | ||
396 | desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT); | ||
397 | if (desc == NULL) | ||
398 | goto out; | ||
399 | INIT_LIST_HEAD(&desc->node); | ||
400 | } else { | 395 | } else { |
401 | /* Reuse an old desc. */ | 396 | d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT); |
402 | desc = list_first_entry(&d40c->free, | 397 | if (d != NULL) { |
403 | struct d40_desc, | 398 | memset(d, 0, sizeof(struct d40_desc)); |
404 | node); | 399 | INIT_LIST_HEAD(&d->node); |
405 | list_del(&desc->node); | 400 | } |
406 | d40c->free_len--; | ||
407 | } | 401 | } |
408 | out: | 402 | return d; |
409 | return desc; | ||
410 | } | 403 | } |
411 | 404 | ||
412 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) | 405 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) |
413 | { | 406 | { |
414 | if (d40c->free_len < D40_DESC_CACHE_SIZE) { | 407 | kmem_cache_free(d40c->base->desc_slab, d40d); |
415 | list_add_tail(&d40d->node, &d40c->free); | ||
416 | d40c->free_len++; | ||
417 | } else | ||
418 | kfree(d40d); | ||
419 | } | 408 | } |
420 | 409 | ||
421 | static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) | 410 | static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) |
@@ -456,37 +445,41 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | |||
456 | 445 | ||
457 | /* Support functions for logical channels */ | 446 | /* Support functions for logical channels */ |
458 | 447 | ||
459 | static int d40_lcla_id_get(struct d40_chan *d40c, | 448 | static int d40_lcla_id_get(struct d40_chan *d40c) |
460 | struct d40_lcla_pool *pool) | ||
461 | { | 449 | { |
462 | int src_id = 0; | 450 | int src_id = 0; |
463 | int dst_id = 0; | 451 | int dst_id = 0; |
464 | struct d40_log_lli *lcla_lidx_base = | 452 | struct d40_log_lli *lcla_lidx_base = |
465 | pool->base + d40c->phy_chan->num * 1024; | 453 | d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024; |
466 | int i; | 454 | int i; |
467 | int lli_per_log = d40c->base->plat_data->llis_per_log; | 455 | int lli_per_log = d40c->base->plat_data->llis_per_log; |
456 | unsigned long flags; | ||
468 | 457 | ||
469 | if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) | 458 | if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) |
470 | return 0; | 459 | return 0; |
471 | 460 | ||
472 | if (pool->num_blocks > 32) | 461 | if (d40c->base->lcla_pool.num_blocks > 32) |
473 | return -EINVAL; | 462 | return -EINVAL; |
474 | 463 | ||
475 | spin_lock(&pool->lock); | 464 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
476 | 465 | ||
477 | for (i = 0; i < pool->num_blocks; i++) { | 466 | for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) { |
478 | if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { | 467 | if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & |
479 | pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); | 468 | (0x1 << i))) { |
469 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= | ||
470 | (0x1 << i); | ||
480 | break; | 471 | break; |
481 | } | 472 | } |
482 | } | 473 | } |
483 | src_id = i; | 474 | src_id = i; |
484 | if (src_id >= pool->num_blocks) | 475 | if (src_id >= d40c->base->lcla_pool.num_blocks) |
485 | goto err; | 476 | goto err; |
486 | 477 | ||
487 | for (; i < pool->num_blocks; i++) { | 478 | for (; i < d40c->base->lcla_pool.num_blocks; i++) { |
488 | if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { | 479 | if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & |
489 | pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); | 480 | (0x1 << i))) { |
481 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= | ||
482 | (0x1 << i); | ||
490 | break; | 483 | break; |
491 | } | 484 | } |
492 | } | 485 | } |
@@ -500,28 +493,13 @@ static int d40_lcla_id_get(struct d40_chan *d40c, | |||
500 | d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; | 493 | d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; |
501 | d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; | 494 | d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; |
502 | 495 | ||
503 | 496 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | |
504 | spin_unlock(&pool->lock); | ||
505 | return 0; | 497 | return 0; |
506 | err: | 498 | err: |
507 | spin_unlock(&pool->lock); | 499 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); |
508 | return -EINVAL; | 500 | return -EINVAL; |
509 | } | 501 | } |
510 | 502 | ||
511 | static void d40_lcla_id_put(struct d40_chan *d40c, | ||
512 | struct d40_lcla_pool *pool, | ||
513 | int id) | ||
514 | { | ||
515 | if (id < 0) | ||
516 | return; | ||
517 | |||
518 | d40c->lcla.src_id = -1; | ||
519 | d40c->lcla.dst_id = -1; | ||
520 | |||
521 | spin_lock(&pool->lock); | ||
522 | pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id)); | ||
523 | spin_unlock(&pool->lock); | ||
524 | } | ||
525 | 503 | ||
526 | static int d40_channel_execute_command(struct d40_chan *d40c, | 504 | static int d40_channel_execute_command(struct d40_chan *d40c, |
527 | enum d40_command command) | 505 | enum d40_command command) |
@@ -530,6 +508,7 @@ static int d40_channel_execute_command(struct d40_chan *d40c, | |||
530 | void __iomem *active_reg; | 508 | void __iomem *active_reg; |
531 | int ret = 0; | 509 | int ret = 0; |
532 | unsigned long flags; | 510 | unsigned long flags; |
511 | u32 wmask; | ||
533 | 512 | ||
534 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); | 513 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); |
535 | 514 | ||
@@ -547,7 +526,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c, | |||
547 | goto done; | 526 | goto done; |
548 | } | 527 | } |
549 | 528 | ||
550 | writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg); | 529 | wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); |
530 | writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), | ||
531 | active_reg); | ||
551 | 532 | ||
552 | if (command == D40_DMA_SUSPEND_REQ) { | 533 | if (command == D40_DMA_SUSPEND_REQ) { |
553 | 534 | ||
@@ -586,8 +567,7 @@ done: | |||
586 | static void d40_term_all(struct d40_chan *d40c) | 567 | static void d40_term_all(struct d40_chan *d40c) |
587 | { | 568 | { |
588 | struct d40_desc *d40d; | 569 | struct d40_desc *d40d; |
589 | struct d40_desc *d; | 570 | unsigned long flags; |
590 | struct d40_desc *_d; | ||
591 | 571 | ||
592 | /* Release active descriptors */ | 572 | /* Release active descriptors */ |
593 | while ((d40d = d40_first_active_get(d40c))) { | 573 | while ((d40d = d40_first_active_get(d40c))) { |
@@ -605,19 +585,17 @@ static void d40_term_all(struct d40_chan *d40c) | |||
605 | d40_desc_free(d40c, d40d); | 585 | d40_desc_free(d40c, d40d); |
606 | } | 586 | } |
607 | 587 | ||
608 | /* Release client owned descriptors */ | 588 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
609 | if (!list_empty(&d40c->client)) | 589 | |
610 | list_for_each_entry_safe(d, _d, &d40c->client, node) { | 590 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= |
611 | d40_pool_lli_free(d); | 591 | (~(0x1 << d40c->lcla.dst_id)); |
612 | d40_desc_remove(d); | 592 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= |
613 | /* Return desc to free-list */ | 593 | (~(0x1 << d40c->lcla.src_id)); |
614 | d40_desc_free(d40c, d40d); | 594 | |
615 | } | 595 | d40c->lcla.src_id = -1; |
596 | d40c->lcla.dst_id = -1; | ||
616 | 597 | ||
617 | d40_lcla_id_put(d40c, &d40c->base->lcla_pool, | 598 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); |
618 | d40c->lcla.src_id); | ||
619 | d40_lcla_id_put(d40c, &d40c->base->lcla_pool, | ||
620 | d40c->lcla.dst_id); | ||
621 | 599 | ||
622 | d40c->pending_tx = 0; | 600 | d40c->pending_tx = 0; |
623 | d40c->busy = false; | 601 | d40c->busy = false; |
@@ -628,6 +606,7 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | |||
628 | u32 val; | 606 | u32 val; |
629 | unsigned long flags; | 607 | unsigned long flags; |
630 | 608 | ||
609 | /* Notice, that disable requires the physical channel to be stopped */ | ||
631 | if (do_enable) | 610 | if (do_enable) |
632 | val = D40_ACTIVATE_EVENTLINE; | 611 | val = D40_ACTIVATE_EVENTLINE; |
633 | else | 612 | else |
@@ -732,31 +711,34 @@ static int d40_config_write(struct d40_chan *d40c) | |||
732 | 711 | ||
733 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | 712 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) |
734 | { | 713 | { |
735 | |||
736 | if (d40d->lli_phy.dst && d40d->lli_phy.src) { | 714 | if (d40d->lli_phy.dst && d40d->lli_phy.src) { |
737 | d40_phy_lli_write(d40c->base->virtbase, | 715 | d40_phy_lli_write(d40c->base->virtbase, |
738 | d40c->phy_chan->num, | 716 | d40c->phy_chan->num, |
739 | d40d->lli_phy.dst, | 717 | d40d->lli_phy.dst, |
740 | d40d->lli_phy.src); | 718 | d40d->lli_phy.src); |
741 | d40d->lli_tcount = d40d->lli_len; | ||
742 | } else if (d40d->lli_log.dst && d40d->lli_log.src) { | 719 | } else if (d40d->lli_log.dst && d40d->lli_log.src) { |
743 | u32 lli_len; | ||
744 | struct d40_log_lli *src = d40d->lli_log.src; | 720 | struct d40_log_lli *src = d40d->lli_log.src; |
745 | struct d40_log_lli *dst = d40d->lli_log.dst; | 721 | struct d40_log_lli *dst = d40d->lli_log.dst; |
746 | 722 | int s; | |
747 | src += d40d->lli_tcount; | 723 | |
748 | dst += d40d->lli_tcount; | 724 | src += d40d->lli_count; |
749 | 725 | dst += d40d->lli_count; | |
750 | if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) | 726 | s = d40_log_lli_write(d40c->lcpa, |
751 | lli_len = d40d->lli_len; | 727 | d40c->lcla.src, d40c->lcla.dst, |
752 | else | 728 | dst, src, |
753 | lli_len = d40c->base->plat_data->llis_per_log; | 729 | d40c->base->plat_data->llis_per_log); |
754 | d40d->lli_tcount += lli_len; | 730 | |
755 | d40_log_lli_write(d40c->lcpa, d40c->lcla.src, | 731 | /* If s equals to zero, the job is not linked */ |
756 | d40c->lcla.dst, | 732 | if (s > 0) { |
757 | dst, src, | 733 | (void) dma_map_single(d40c->base->dev, d40c->lcla.src, |
758 | d40c->base->plat_data->llis_per_log); | 734 | s * sizeof(struct d40_log_lli), |
735 | DMA_TO_DEVICE); | ||
736 | (void) dma_map_single(d40c->base->dev, d40c->lcla.dst, | ||
737 | s * sizeof(struct d40_log_lli), | ||
738 | DMA_TO_DEVICE); | ||
739 | } | ||
759 | } | 740 | } |
741 | d40d->lli_count += d40d->lli_tx_len; | ||
760 | } | 742 | } |
761 | 743 | ||
762 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | 744 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
@@ -780,18 +762,21 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
780 | 762 | ||
781 | static int d40_start(struct d40_chan *d40c) | 763 | static int d40_start(struct d40_chan *d40c) |
782 | { | 764 | { |
783 | int err; | 765 | if (d40c->base->rev == 0) { |
766 | int err; | ||
784 | 767 | ||
785 | if (d40c->log_num != D40_PHY_CHAN) { | 768 | if (d40c->log_num != D40_PHY_CHAN) { |
786 | err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 769 | err = d40_channel_execute_command(d40c, |
787 | if (err) | 770 | D40_DMA_SUSPEND_REQ); |
788 | return err; | 771 | if (err) |
789 | d40_config_set_event(d40c, true); | 772 | return err; |
773 | } | ||
790 | } | 774 | } |
791 | 775 | ||
792 | err = d40_channel_execute_command(d40c, D40_DMA_RUN); | 776 | if (d40c->log_num != D40_PHY_CHAN) |
777 | d40_config_set_event(d40c, true); | ||
793 | 778 | ||
794 | return err; | 779 | return d40_channel_execute_command(d40c, D40_DMA_RUN); |
795 | } | 780 | } |
796 | 781 | ||
797 | static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | 782 | static struct d40_desc *d40_queue_start(struct d40_chan *d40c) |
@@ -838,7 +823,7 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
838 | if (d40d == NULL) | 823 | if (d40d == NULL) |
839 | return; | 824 | return; |
840 | 825 | ||
841 | if (d40d->lli_tcount < d40d->lli_len) { | 826 | if (d40d->lli_count < d40d->lli_len) { |
842 | 827 | ||
843 | d40_desc_load(d40c, d40d); | 828 | d40_desc_load(d40c, d40d); |
844 | /* Start dma job */ | 829 | /* Start dma job */ |
@@ -891,7 +876,6 @@ static void dma_tasklet(unsigned long data) | |||
891 | /* Return desc to free-list */ | 876 | /* Return desc to free-list */ |
892 | d40_desc_free(d40c, d40d_fin); | 877 | d40_desc_free(d40c, d40d_fin); |
893 | } else { | 878 | } else { |
894 | d40_desc_reset(d40d_fin); | ||
895 | if (!d40d_fin->is_in_client_list) { | 879 | if (!d40d_fin->is_in_client_list) { |
896 | d40_desc_remove(d40d_fin); | 880 | d40_desc_remove(d40d_fin); |
897 | list_add_tail(&d40d_fin->node, &d40c->client); | 881 | list_add_tail(&d40d_fin->node, &d40c->client); |
@@ -975,7 +959,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) | |||
975 | if (!il[row].is_error) | 959 | if (!il[row].is_error) |
976 | dma_tc_handle(d40c); | 960 | dma_tc_handle(d40c); |
977 | else | 961 | else |
978 | dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n", | 962 | dev_err(base->dev, |
963 | "[%s] IRQ chan: %ld offset %d idx %d\n", | ||
979 | __func__, chan, il[row].offset, idx); | 964 | __func__, chan, il[row].offset, idx); |
980 | 965 | ||
981 | spin_unlock(&d40c->lock); | 966 | spin_unlock(&d40c->lock); |
@@ -1134,7 +1119,8 @@ static int d40_allocate_channel(struct d40_chan *d40c) | |||
1134 | int j; | 1119 | int j; |
1135 | int log_num; | 1120 | int log_num; |
1136 | bool is_src; | 1121 | bool is_src; |
1137 | bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) | 1122 | bool is_log = (d40c->dma_cfg.channel_type & |
1123 | STEDMA40_CHANNEL_IN_OPER_MODE) | ||
1138 | == STEDMA40_CHANNEL_IN_LOG_MODE; | 1124 | == STEDMA40_CHANNEL_IN_LOG_MODE; |
1139 | 1125 | ||
1140 | 1126 | ||
@@ -1169,8 +1155,10 @@ static int d40_allocate_channel(struct d40_chan *d40c) | |||
1169 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 1155 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
1170 | int phy_num = j + event_group * 2; | 1156 | int phy_num = j + event_group * 2; |
1171 | for (i = phy_num; i < phy_num + 2; i++) { | 1157 | for (i = phy_num; i < phy_num + 2; i++) { |
1172 | if (d40_alloc_mask_set(&phys[i], is_src, | 1158 | if (d40_alloc_mask_set(&phys[i], |
1173 | 0, is_log)) | 1159 | is_src, |
1160 | 0, | ||
1161 | is_log)) | ||
1174 | goto found_phy; | 1162 | goto found_phy; |
1175 | } | 1163 | } |
1176 | } | 1164 | } |
@@ -1221,30 +1209,6 @@ out: | |||
1221 | 1209 | ||
1222 | } | 1210 | } |
1223 | 1211 | ||
1224 | static int d40_config_chan(struct d40_chan *d40c, | ||
1225 | struct stedma40_chan_cfg *info) | ||
1226 | { | ||
1227 | |||
1228 | /* Fill in basic CFG register values */ | ||
1229 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | ||
1230 | &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); | ||
1231 | |||
1232 | if (d40c->log_num != D40_PHY_CHAN) { | ||
1233 | d40_log_cfg(&d40c->dma_cfg, | ||
1234 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | ||
1235 | |||
1236 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | ||
1237 | d40c->lcpa = d40c->base->lcpa_base + | ||
1238 | d40c->dma_cfg.src_dev_type * 32; | ||
1239 | else | ||
1240 | d40c->lcpa = d40c->base->lcpa_base + | ||
1241 | d40c->dma_cfg.dst_dev_type * 32 + 16; | ||
1242 | } | ||
1243 | |||
1244 | /* Write channel configuration to the DMA */ | ||
1245 | return d40_config_write(d40c); | ||
1246 | } | ||
1247 | |||
1248 | static int d40_config_memcpy(struct d40_chan *d40c) | 1212 | static int d40_config_memcpy(struct d40_chan *d40c) |
1249 | { | 1213 | { |
1250 | dma_cap_mask_t cap = d40c->chan.device->cap_mask; | 1214 | dma_cap_mask_t cap = d40c->chan.device->cap_mask; |
@@ -1272,13 +1236,25 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1272 | { | 1236 | { |
1273 | 1237 | ||
1274 | int res = 0; | 1238 | int res = 0; |
1275 | u32 event, dir; | 1239 | u32 event; |
1276 | struct d40_phy_res *phy = d40c->phy_chan; | 1240 | struct d40_phy_res *phy = d40c->phy_chan; |
1277 | bool is_src; | 1241 | bool is_src; |
1242 | struct d40_desc *d; | ||
1243 | struct d40_desc *_d; | ||
1244 | |||
1278 | 1245 | ||
1279 | /* Terminate all queued and active transfers */ | 1246 | /* Terminate all queued and active transfers */ |
1280 | d40_term_all(d40c); | 1247 | d40_term_all(d40c); |
1281 | 1248 | ||
1249 | /* Release client owned descriptors */ | ||
1250 | if (!list_empty(&d40c->client)) | ||
1251 | list_for_each_entry_safe(d, _d, &d40c->client, node) { | ||
1252 | d40_pool_lli_free(d); | ||
1253 | d40_desc_remove(d); | ||
1254 | /* Return desc to free-list */ | ||
1255 | d40_desc_free(d40c, d); | ||
1256 | } | ||
1257 | |||
1282 | if (phy == NULL) { | 1258 | if (phy == NULL) { |
1283 | dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", | 1259 | dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", |
1284 | __func__); | 1260 | __func__); |
@@ -1292,22 +1268,12 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1292 | return -EINVAL; | 1268 | return -EINVAL; |
1293 | } | 1269 | } |
1294 | 1270 | ||
1295 | |||
1296 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | ||
1297 | if (res) { | ||
1298 | dev_err(&d40c->chan.dev->device, "[%s] suspend\n", | ||
1299 | __func__); | ||
1300 | return res; | ||
1301 | } | ||
1302 | |||
1303 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 1271 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
1304 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 1272 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
1305 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 1273 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
1306 | dir = D40_CHAN_REG_SDLNK; | ||
1307 | is_src = false; | 1274 | is_src = false; |
1308 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 1275 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { |
1309 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 1276 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
1310 | dir = D40_CHAN_REG_SSLNK; | ||
1311 | is_src = true; | 1277 | is_src = true; |
1312 | } else { | 1278 | } else { |
1313 | dev_err(&d40c->chan.dev->device, | 1279 | dev_err(&d40c->chan.dev->device, |
@@ -1315,16 +1281,17 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1315 | return -EINVAL; | 1281 | return -EINVAL; |
1316 | } | 1282 | } |
1317 | 1283 | ||
1284 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | ||
1285 | if (res) { | ||
1286 | dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", | ||
1287 | __func__); | ||
1288 | return res; | ||
1289 | } | ||
1290 | |||
1318 | if (d40c->log_num != D40_PHY_CHAN) { | 1291 | if (d40c->log_num != D40_PHY_CHAN) { |
1319 | /* | 1292 | /* Release logical channel, deactivate the event line */ |
1320 | * Release logical channel, deactivate the event line during | ||
1321 | * the time physical res is suspended. | ||
1322 | */ | ||
1323 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) & | ||
1324 | D40_EVENTLINE_MASK(event), | ||
1325 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
1326 | phy->num * D40_DREG_PCDELTA + dir); | ||
1327 | 1293 | ||
1294 | d40_config_set_event(d40c, false); | ||
1328 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; | 1295 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; |
1329 | 1296 | ||
1330 | /* | 1297 | /* |
@@ -1345,8 +1312,9 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1345 | } | 1312 | } |
1346 | return 0; | 1313 | return 0; |
1347 | } | 1314 | } |
1348 | } else | 1315 | } else { |
1349 | d40_alloc_mask_free(phy, is_src, 0); | 1316 | (void) d40_alloc_mask_free(phy, is_src, 0); |
1317 | } | ||
1350 | 1318 | ||
1351 | /* Release physical channel */ | 1319 | /* Release physical channel */ |
1352 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | 1320 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); |
@@ -1361,8 +1329,6 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1361 | d40c->base->lookup_phy_chans[phy->num] = NULL; | 1329 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
1362 | 1330 | ||
1363 | return 0; | 1331 | return 0; |
1364 | |||
1365 | |||
1366 | } | 1332 | } |
1367 | 1333 | ||
1368 | static int d40_pause(struct dma_chan *chan) | 1334 | static int d40_pause(struct dma_chan *chan) |
@@ -1370,7 +1336,6 @@ static int d40_pause(struct dma_chan *chan) | |||
1370 | struct d40_chan *d40c = | 1336 | struct d40_chan *d40c = |
1371 | container_of(chan, struct d40_chan, chan); | 1337 | container_of(chan, struct d40_chan, chan); |
1372 | int res; | 1338 | int res; |
1373 | |||
1374 | unsigned long flags; | 1339 | unsigned long flags; |
1375 | 1340 | ||
1376 | spin_lock_irqsave(&d40c->lock, flags); | 1341 | spin_lock_irqsave(&d40c->lock, flags); |
@@ -1397,7 +1362,6 @@ static bool d40_is_paused(struct d40_chan *d40c) | |||
1397 | void __iomem *active_reg; | 1362 | void __iomem *active_reg; |
1398 | u32 status; | 1363 | u32 status; |
1399 | u32 event; | 1364 | u32 event; |
1400 | int res; | ||
1401 | 1365 | ||
1402 | spin_lock_irqsave(&d40c->lock, flags); | 1366 | spin_lock_irqsave(&d40c->lock, flags); |
1403 | 1367 | ||
@@ -1416,10 +1380,6 @@ static bool d40_is_paused(struct d40_chan *d40c) | |||
1416 | goto _exit; | 1380 | goto _exit; |
1417 | } | 1381 | } |
1418 | 1382 | ||
1419 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | ||
1420 | if (res != 0) | ||
1421 | goto _exit; | ||
1422 | |||
1423 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 1383 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
1424 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) | 1384 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) |
1425 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 1385 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
@@ -1436,12 +1396,6 @@ static bool d40_is_paused(struct d40_chan *d40c) | |||
1436 | 1396 | ||
1437 | if (status != D40_DMA_RUN) | 1397 | if (status != D40_DMA_RUN) |
1438 | is_paused = true; | 1398 | is_paused = true; |
1439 | |||
1440 | /* Resume the other logical channels if any */ | ||
1441 | if (d40_chan_has_events(d40c)) | ||
1442 | res = d40_channel_execute_command(d40c, | ||
1443 | D40_DMA_RUN); | ||
1444 | |||
1445 | _exit: | 1399 | _exit: |
1446 | spin_unlock_irqrestore(&d40c->lock, flags); | 1400 | spin_unlock_irqrestore(&d40c->lock, flags); |
1447 | return is_paused; | 1401 | return is_paused; |
@@ -1468,13 +1422,14 @@ static u32 d40_residue(struct d40_chan *d40c) | |||
1468 | u32 num_elt; | 1422 | u32 num_elt; |
1469 | 1423 | ||
1470 | if (d40c->log_num != D40_PHY_CHAN) | 1424 | if (d40c->log_num != D40_PHY_CHAN) |
1471 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) | 1425 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) |
1472 | >> D40_MEM_LCSP2_ECNT_POS; | 1426 | >> D40_MEM_LCSP2_ECNT_POS; |
1473 | else | 1427 | else |
1474 | num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + | 1428 | num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + |
1475 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 1429 | d40c->phy_chan->num * D40_DREG_PCDELTA + |
1476 | D40_CHAN_REG_SDELT) & | 1430 | D40_CHAN_REG_SDELT) & |
1477 | D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS; | 1431 | D40_SREG_ELEM_PHY_ECNT_MASK) >> |
1432 | D40_SREG_ELEM_PHY_ECNT_POS; | ||
1478 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); | 1433 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); |
1479 | } | 1434 | } |
1480 | 1435 | ||
@@ -1487,20 +1442,21 @@ static int d40_resume(struct dma_chan *chan) | |||
1487 | 1442 | ||
1488 | spin_lock_irqsave(&d40c->lock, flags); | 1443 | spin_lock_irqsave(&d40c->lock, flags); |
1489 | 1444 | ||
1490 | if (d40c->log_num != D40_PHY_CHAN) { | 1445 | if (d40c->base->rev == 0) |
1491 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1446 | if (d40c->log_num != D40_PHY_CHAN) { |
1492 | if (res) | 1447 | res = d40_channel_execute_command(d40c, |
1493 | goto out; | 1448 | D40_DMA_SUSPEND_REQ); |
1449 | goto no_suspend; | ||
1450 | } | ||
1494 | 1451 | ||
1495 | /* If bytes left to transfer or linked tx resume job */ | 1452 | /* If bytes left to transfer or linked tx resume job */ |
1496 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { | 1453 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { |
1454 | if (d40c->log_num != D40_PHY_CHAN) | ||
1497 | d40_config_set_event(d40c, true); | 1455 | d40_config_set_event(d40c, true); |
1498 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | ||
1499 | } | ||
1500 | } else if (d40_residue(d40c) || d40_tx_is_linked(d40c)) | ||
1501 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | 1456 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); |
1457 | } | ||
1502 | 1458 | ||
1503 | out: | 1459 | no_suspend: |
1504 | spin_unlock_irqrestore(&d40c->lock, flags); | 1460 | spin_unlock_irqrestore(&d40c->lock, flags); |
1505 | return res; | 1461 | return res; |
1506 | } | 1462 | } |
@@ -1534,8 +1490,10 @@ int stedma40_set_psize(struct dma_chan *chan, | |||
1534 | if (d40c->log_num != D40_PHY_CHAN) { | 1490 | if (d40c->log_num != D40_PHY_CHAN) { |
1535 | d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | 1491 | d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; |
1536 | d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | 1492 | d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; |
1537 | d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; | 1493 | d40c->log_def.lcsp1 |= src_psize << |
1538 | d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; | 1494 | D40_MEM_LCSP1_SCFG_PSIZE_POS; |
1495 | d40c->log_def.lcsp3 |= dst_psize << | ||
1496 | D40_MEM_LCSP1_SCFG_PSIZE_POS; | ||
1539 | goto out; | 1497 | goto out; |
1540 | } | 1498 | } |
1541 | 1499 | ||
@@ -1566,37 +1524,42 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
1566 | struct scatterlist *sgl_dst, | 1524 | struct scatterlist *sgl_dst, |
1567 | struct scatterlist *sgl_src, | 1525 | struct scatterlist *sgl_src, |
1568 | unsigned int sgl_len, | 1526 | unsigned int sgl_len, |
1569 | unsigned long flags) | 1527 | unsigned long dma_flags) |
1570 | { | 1528 | { |
1571 | int res; | 1529 | int res; |
1572 | struct d40_desc *d40d; | 1530 | struct d40_desc *d40d; |
1573 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 1531 | struct d40_chan *d40c = container_of(chan, struct d40_chan, |
1574 | chan); | 1532 | chan); |
1575 | unsigned long flg; | 1533 | unsigned long flags; |
1576 | int lli_max = d40c->base->plat_data->llis_per_log; | ||
1577 | 1534 | ||
1535 | if (d40c->phy_chan == NULL) { | ||
1536 | dev_err(&d40c->chan.dev->device, | ||
1537 | "[%s] Unallocated channel.\n", __func__); | ||
1538 | return ERR_PTR(-EINVAL); | ||
1539 | } | ||
1578 | 1540 | ||
1579 | spin_lock_irqsave(&d40c->lock, flg); | 1541 | spin_lock_irqsave(&d40c->lock, flags); |
1580 | d40d = d40_desc_get(d40c); | 1542 | d40d = d40_desc_get(d40c); |
1581 | 1543 | ||
1582 | if (d40d == NULL) | 1544 | if (d40d == NULL) |
1583 | goto err; | 1545 | goto err; |
1584 | 1546 | ||
1585 | memset(d40d, 0, sizeof(struct d40_desc)); | ||
1586 | d40d->lli_len = sgl_len; | 1547 | d40d->lli_len = sgl_len; |
1587 | 1548 | d40d->lli_tx_len = d40d->lli_len; | |
1588 | d40d->txd.flags = flags; | 1549 | d40d->txd.flags = dma_flags; |
1589 | 1550 | ||
1590 | if (d40c->log_num != D40_PHY_CHAN) { | 1551 | if (d40c->log_num != D40_PHY_CHAN) { |
1552 | if (d40d->lli_len > d40c->base->plat_data->llis_per_log) | ||
1553 | d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; | ||
1554 | |||
1591 | if (sgl_len > 1) | 1555 | if (sgl_len > 1) |
1592 | /* | 1556 | /* |
1593 | * Check if there is space available in lcla. If not, | 1557 | * Check if there is space available in lcla. If not, |
1594 | * split list into 1-length and run only in lcpa | 1558 | * split list into 1-length and run only in lcpa |
1595 | * space. | 1559 | * space. |
1596 | */ | 1560 | */ |
1597 | if (d40_lcla_id_get(d40c, | 1561 | if (d40_lcla_id_get(d40c) != 0) |
1598 | &d40c->base->lcla_pool) != 0) | 1562 | d40d->lli_tx_len = 1; |
1599 | lli_max = 1; | ||
1600 | 1563 | ||
1601 | if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { | 1564 | if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { |
1602 | dev_err(&d40c->chan.dev->device, | 1565 | dev_err(&d40c->chan.dev->device, |
@@ -1610,7 +1573,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
1610 | d40d->lli_log.src, | 1573 | d40d->lli_log.src, |
1611 | d40c->log_def.lcsp1, | 1574 | d40c->log_def.lcsp1, |
1612 | d40c->dma_cfg.src_info.data_width, | 1575 | d40c->dma_cfg.src_info.data_width, |
1613 | flags & DMA_PREP_INTERRUPT, lli_max, | 1576 | dma_flags & DMA_PREP_INTERRUPT, |
1577 | d40d->lli_tx_len, | ||
1614 | d40c->base->plat_data->llis_per_log); | 1578 | d40c->base->plat_data->llis_per_log); |
1615 | 1579 | ||
1616 | (void) d40_log_sg_to_lli(d40c->lcla.dst_id, | 1580 | (void) d40_log_sg_to_lli(d40c->lcla.dst_id, |
@@ -1619,7 +1583,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
1619 | d40d->lli_log.dst, | 1583 | d40d->lli_log.dst, |
1620 | d40c->log_def.lcsp3, | 1584 | d40c->log_def.lcsp3, |
1621 | d40c->dma_cfg.dst_info.data_width, | 1585 | d40c->dma_cfg.dst_info.data_width, |
1622 | flags & DMA_PREP_INTERRUPT, lli_max, | 1586 | dma_flags & DMA_PREP_INTERRUPT, |
1587 | d40d->lli_tx_len, | ||
1623 | d40c->base->plat_data->llis_per_log); | 1588 | d40c->base->plat_data->llis_per_log); |
1624 | 1589 | ||
1625 | 1590 | ||
@@ -1664,11 +1629,11 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
1664 | 1629 | ||
1665 | d40d->txd.tx_submit = d40_tx_submit; | 1630 | d40d->txd.tx_submit = d40_tx_submit; |
1666 | 1631 | ||
1667 | spin_unlock_irqrestore(&d40c->lock, flg); | 1632 | spin_unlock_irqrestore(&d40c->lock, flags); |
1668 | 1633 | ||
1669 | return &d40d->txd; | 1634 | return &d40d->txd; |
1670 | err: | 1635 | err: |
1671 | spin_unlock_irqrestore(&d40c->lock, flg); | 1636 | spin_unlock_irqrestore(&d40c->lock, flags); |
1672 | return NULL; | 1637 | return NULL; |
1673 | } | 1638 | } |
1674 | EXPORT_SYMBOL(stedma40_memcpy_sg); | 1639 | EXPORT_SYMBOL(stedma40_memcpy_sg); |
@@ -1698,46 +1663,66 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
1698 | unsigned long flags; | 1663 | unsigned long flags; |
1699 | struct d40_chan *d40c = | 1664 | struct d40_chan *d40c = |
1700 | container_of(chan, struct d40_chan, chan); | 1665 | container_of(chan, struct d40_chan, chan); |
1701 | 1666 | bool is_free_phy; | |
1702 | spin_lock_irqsave(&d40c->lock, flags); | 1667 | spin_lock_irqsave(&d40c->lock, flags); |
1703 | 1668 | ||
1704 | d40c->completed = chan->cookie = 1; | 1669 | d40c->completed = chan->cookie = 1; |
1705 | 1670 | ||
1706 | /* | 1671 | /* |
1707 | * If no dma configuration is set (channel_type == 0) | 1672 | * If no dma configuration is set (channel_type == 0) |
1708 | * use default configuration | 1673 | * use default configuration (memcpy) |
1709 | */ | 1674 | */ |
1710 | if (d40c->dma_cfg.channel_type == 0) { | 1675 | if (d40c->dma_cfg.channel_type == 0) { |
1711 | err = d40_config_memcpy(d40c); | 1676 | err = d40_config_memcpy(d40c); |
1712 | if (err) | 1677 | if (err) { |
1713 | goto err_alloc; | 1678 | dev_err(&d40c->chan.dev->device, |
1679 | "[%s] Failed to configure memcpy channel\n", | ||
1680 | __func__); | ||
1681 | goto fail; | ||
1682 | } | ||
1714 | } | 1683 | } |
1684 | is_free_phy = (d40c->phy_chan == NULL); | ||
1715 | 1685 | ||
1716 | err = d40_allocate_channel(d40c); | 1686 | err = d40_allocate_channel(d40c); |
1717 | if (err) { | 1687 | if (err) { |
1718 | dev_err(&d40c->chan.dev->device, | 1688 | dev_err(&d40c->chan.dev->device, |
1719 | "[%s] Failed to allocate channel\n", __func__); | 1689 | "[%s] Failed to allocate channel\n", __func__); |
1720 | goto err_alloc; | 1690 | goto fail; |
1721 | } | 1691 | } |
1722 | 1692 | ||
1723 | err = d40_config_chan(d40c, &d40c->dma_cfg); | 1693 | /* Fill in basic CFG register values */ |
1724 | if (err) { | 1694 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, |
1725 | dev_err(&d40c->chan.dev->device, | 1695 | &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); |
1726 | "[%s] Failed to configure channel\n", | ||
1727 | __func__); | ||
1728 | goto err_config; | ||
1729 | } | ||
1730 | 1696 | ||
1731 | spin_unlock_irqrestore(&d40c->lock, flags); | 1697 | if (d40c->log_num != D40_PHY_CHAN) { |
1732 | return 0; | 1698 | d40_log_cfg(&d40c->dma_cfg, |
1699 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | ||
1733 | 1700 | ||
1734 | err_config: | 1701 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) |
1735 | (void) d40_free_dma(d40c); | 1702 | d40c->lcpa = d40c->base->lcpa_base + |
1736 | err_alloc: | 1703 | d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; |
1704 | else | ||
1705 | d40c->lcpa = d40c->base->lcpa_base + | ||
1706 | d40c->dma_cfg.dst_dev_type * | ||
1707 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; | ||
1708 | } | ||
1709 | |||
1710 | /* | ||
1711 | * Only write channel configuration to the DMA if the physical | ||
1712 | * resource is free. In case of multiple logical channels | ||
1713 | * on the same physical resource, only the first write is necessary. | ||
1714 | */ | ||
1715 | if (is_free_phy) { | ||
1716 | err = d40_config_write(d40c); | ||
1717 | if (err) { | ||
1718 | dev_err(&d40c->chan.dev->device, | ||
1719 | "[%s] Failed to configure channel\n", | ||
1720 | __func__); | ||
1721 | } | ||
1722 | } | ||
1723 | fail: | ||
1737 | spin_unlock_irqrestore(&d40c->lock, flags); | 1724 | spin_unlock_irqrestore(&d40c->lock, flags); |
1738 | dev_err(&d40c->chan.dev->device, | 1725 | return err; |
1739 | "[%s] Channel allocation failed\n", __func__); | ||
1740 | return -EINVAL; | ||
1741 | } | 1726 | } |
1742 | 1727 | ||
1743 | static void d40_free_chan_resources(struct dma_chan *chan) | 1728 | static void d40_free_chan_resources(struct dma_chan *chan) |
@@ -1747,6 +1732,13 @@ static void d40_free_chan_resources(struct dma_chan *chan) | |||
1747 | int err; | 1732 | int err; |
1748 | unsigned long flags; | 1733 | unsigned long flags; |
1749 | 1734 | ||
1735 | if (d40c->phy_chan == NULL) { | ||
1736 | dev_err(&d40c->chan.dev->device, | ||
1737 | "[%s] Cannot free unallocated channel\n", __func__); | ||
1738 | return; | ||
1739 | } | ||
1740 | |||
1741 | |||
1750 | spin_lock_irqsave(&d40c->lock, flags); | 1742 | spin_lock_irqsave(&d40c->lock, flags); |
1751 | 1743 | ||
1752 | err = d40_free_dma(d40c); | 1744 | err = d40_free_dma(d40c); |
@@ -1761,15 +1753,21 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1761 | dma_addr_t dst, | 1753 | dma_addr_t dst, |
1762 | dma_addr_t src, | 1754 | dma_addr_t src, |
1763 | size_t size, | 1755 | size_t size, |
1764 | unsigned long flags) | 1756 | unsigned long dma_flags) |
1765 | { | 1757 | { |
1766 | struct d40_desc *d40d; | 1758 | struct d40_desc *d40d; |
1767 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 1759 | struct d40_chan *d40c = container_of(chan, struct d40_chan, |
1768 | chan); | 1760 | chan); |
1769 | unsigned long flg; | 1761 | unsigned long flags; |
1770 | int err = 0; | 1762 | int err = 0; |
1771 | 1763 | ||
1772 | spin_lock_irqsave(&d40c->lock, flg); | 1764 | if (d40c->phy_chan == NULL) { |
1765 | dev_err(&d40c->chan.dev->device, | ||
1766 | "[%s] Channel is not allocated.\n", __func__); | ||
1767 | return ERR_PTR(-EINVAL); | ||
1768 | } | ||
1769 | |||
1770 | spin_lock_irqsave(&d40c->lock, flags); | ||
1773 | d40d = d40_desc_get(d40c); | 1771 | d40d = d40_desc_get(d40c); |
1774 | 1772 | ||
1775 | if (d40d == NULL) { | 1773 | if (d40d == NULL) { |
@@ -1778,9 +1776,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1778 | goto err; | 1776 | goto err; |
1779 | } | 1777 | } |
1780 | 1778 | ||
1781 | memset(d40d, 0, sizeof(struct d40_desc)); | 1779 | d40d->txd.flags = dma_flags; |
1782 | |||
1783 | d40d->txd.flags = flags; | ||
1784 | 1780 | ||
1785 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 1781 | dma_async_tx_descriptor_init(&d40d->txd, chan); |
1786 | 1782 | ||
@@ -1794,6 +1790,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1794 | goto err; | 1790 | goto err; |
1795 | } | 1791 | } |
1796 | d40d->lli_len = 1; | 1792 | d40d->lli_len = 1; |
1793 | d40d->lli_tx_len = 1; | ||
1797 | 1794 | ||
1798 | d40_log_fill_lli(d40d->lli_log.src, | 1795 | d40_log_fill_lli(d40d->lli_log.src, |
1799 | src, | 1796 | src, |
@@ -1801,7 +1798,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1801 | 0, | 1798 | 0, |
1802 | d40c->log_def.lcsp1, | 1799 | d40c->log_def.lcsp1, |
1803 | d40c->dma_cfg.src_info.data_width, | 1800 | d40c->dma_cfg.src_info.data_width, |
1804 | true, true); | 1801 | false, true); |
1805 | 1802 | ||
1806 | d40_log_fill_lli(d40d->lli_log.dst, | 1803 | d40_log_fill_lli(d40d->lli_log.dst, |
1807 | dst, | 1804 | dst, |
@@ -1848,7 +1845,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1848 | d40d->lli_pool.size, DMA_TO_DEVICE); | 1845 | d40d->lli_pool.size, DMA_TO_DEVICE); |
1849 | } | 1846 | } |
1850 | 1847 | ||
1851 | spin_unlock_irqrestore(&d40c->lock, flg); | 1848 | spin_unlock_irqrestore(&d40c->lock, flags); |
1852 | return &d40d->txd; | 1849 | return &d40d->txd; |
1853 | 1850 | ||
1854 | err_fill_lli: | 1851 | err_fill_lli: |
@@ -1856,7 +1853,7 @@ err_fill_lli: | |||
1856 | "[%s] Failed filling in PHY LLI\n", __func__); | 1853 | "[%s] Failed filling in PHY LLI\n", __func__); |
1857 | d40_pool_lli_free(d40d); | 1854 | d40_pool_lli_free(d40d); |
1858 | err: | 1855 | err: |
1859 | spin_unlock_irqrestore(&d40c->lock, flg); | 1856 | spin_unlock_irqrestore(&d40c->lock, flags); |
1860 | return NULL; | 1857 | return NULL; |
1861 | } | 1858 | } |
1862 | 1859 | ||
@@ -1865,11 +1862,10 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, | |||
1865 | struct scatterlist *sgl, | 1862 | struct scatterlist *sgl, |
1866 | unsigned int sg_len, | 1863 | unsigned int sg_len, |
1867 | enum dma_data_direction direction, | 1864 | enum dma_data_direction direction, |
1868 | unsigned long flags) | 1865 | unsigned long dma_flags) |
1869 | { | 1866 | { |
1870 | dma_addr_t dev_addr = 0; | 1867 | dma_addr_t dev_addr = 0; |
1871 | int total_size; | 1868 | int total_size; |
1872 | int lli_max = d40c->base->plat_data->llis_per_log; | ||
1873 | 1869 | ||
1874 | if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { | 1870 | if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { |
1875 | dev_err(&d40c->chan.dev->device, | 1871 | dev_err(&d40c->chan.dev->device, |
@@ -1878,7 +1874,10 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, | |||
1878 | } | 1874 | } |
1879 | 1875 | ||
1880 | d40d->lli_len = sg_len; | 1876 | d40d->lli_len = sg_len; |
1881 | d40d->lli_tcount = 0; | 1877 | if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) |
1878 | d40d->lli_tx_len = d40d->lli_len; | ||
1879 | else | ||
1880 | d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; | ||
1882 | 1881 | ||
1883 | if (sg_len > 1) | 1882 | if (sg_len > 1) |
1884 | /* | 1883 | /* |
@@ -1886,35 +1885,34 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, | |||
1886 | * If not, split list into 1-length and run only | 1885 | * If not, split list into 1-length and run only |
1887 | * in lcpa space. | 1886 | * in lcpa space. |
1888 | */ | 1887 | */ |
1889 | if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0) | 1888 | if (d40_lcla_id_get(d40c) != 0) |
1890 | lli_max = 1; | 1889 | d40d->lli_tx_len = 1; |
1891 | 1890 | ||
1892 | if (direction == DMA_FROM_DEVICE) { | 1891 | if (direction == DMA_FROM_DEVICE) |
1893 | dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | 1892 | if (d40c->runtime_addr) |
1894 | total_size = d40_log_sg_to_dev(&d40c->lcla, | 1893 | dev_addr = d40c->runtime_addr; |
1895 | sgl, sg_len, | 1894 | else |
1896 | &d40d->lli_log, | 1895 | dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; |
1897 | &d40c->log_def, | 1896 | else if (direction == DMA_TO_DEVICE) |
1898 | d40c->dma_cfg.src_info.data_width, | 1897 | if (d40c->runtime_addr) |
1899 | d40c->dma_cfg.dst_info.data_width, | 1898 | dev_addr = d40c->runtime_addr; |
1900 | direction, | 1899 | else |
1901 | flags & DMA_PREP_INTERRUPT, | 1900 | dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; |
1902 | dev_addr, lli_max, | 1901 | |
1903 | d40c->base->plat_data->llis_per_log); | 1902 | else |
1904 | } else if (direction == DMA_TO_DEVICE) { | ||
1905 | dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | ||
1906 | total_size = d40_log_sg_to_dev(&d40c->lcla, | ||
1907 | sgl, sg_len, | ||
1908 | &d40d->lli_log, | ||
1909 | &d40c->log_def, | ||
1910 | d40c->dma_cfg.src_info.data_width, | ||
1911 | d40c->dma_cfg.dst_info.data_width, | ||
1912 | direction, | ||
1913 | flags & DMA_PREP_INTERRUPT, | ||
1914 | dev_addr, lli_max, | ||
1915 | d40c->base->plat_data->llis_per_log); | ||
1916 | } else | ||
1917 | return -EINVAL; | 1903 | return -EINVAL; |
1904 | |||
1905 | total_size = d40_log_sg_to_dev(&d40c->lcla, | ||
1906 | sgl, sg_len, | ||
1907 | &d40d->lli_log, | ||
1908 | &d40c->log_def, | ||
1909 | d40c->dma_cfg.src_info.data_width, | ||
1910 | d40c->dma_cfg.dst_info.data_width, | ||
1911 | direction, | ||
1912 | dma_flags & DMA_PREP_INTERRUPT, | ||
1913 | dev_addr, d40d->lli_tx_len, | ||
1914 | d40c->base->plat_data->llis_per_log); | ||
1915 | |||
1918 | if (total_size < 0) | 1916 | if (total_size < 0) |
1919 | return -EINVAL; | 1917 | return -EINVAL; |
1920 | 1918 | ||
@@ -1926,7 +1924,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | |||
1926 | struct scatterlist *sgl, | 1924 | struct scatterlist *sgl, |
1927 | unsigned int sgl_len, | 1925 | unsigned int sgl_len, |
1928 | enum dma_data_direction direction, | 1926 | enum dma_data_direction direction, |
1929 | unsigned long flags) | 1927 | unsigned long dma_flags) |
1930 | { | 1928 | { |
1931 | dma_addr_t src_dev_addr; | 1929 | dma_addr_t src_dev_addr; |
1932 | dma_addr_t dst_dev_addr; | 1930 | dma_addr_t dst_dev_addr; |
@@ -1939,13 +1937,19 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | |||
1939 | } | 1937 | } |
1940 | 1938 | ||
1941 | d40d->lli_len = sgl_len; | 1939 | d40d->lli_len = sgl_len; |
1942 | d40d->lli_tcount = 0; | 1940 | d40d->lli_tx_len = sgl_len; |
1943 | 1941 | ||
1944 | if (direction == DMA_FROM_DEVICE) { | 1942 | if (direction == DMA_FROM_DEVICE) { |
1945 | dst_dev_addr = 0; | 1943 | dst_dev_addr = 0; |
1946 | src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | 1944 | if (d40c->runtime_addr) |
1945 | src_dev_addr = d40c->runtime_addr; | ||
1946 | else | ||
1947 | src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | ||
1947 | } else if (direction == DMA_TO_DEVICE) { | 1948 | } else if (direction == DMA_TO_DEVICE) { |
1948 | dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | 1949 | if (d40c->runtime_addr) |
1950 | dst_dev_addr = d40c->runtime_addr; | ||
1951 | else | ||
1952 | dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | ||
1949 | src_dev_addr = 0; | 1953 | src_dev_addr = 0; |
1950 | } else | 1954 | } else |
1951 | return -EINVAL; | 1955 | return -EINVAL; |
@@ -1983,34 +1987,38 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
1983 | struct scatterlist *sgl, | 1987 | struct scatterlist *sgl, |
1984 | unsigned int sg_len, | 1988 | unsigned int sg_len, |
1985 | enum dma_data_direction direction, | 1989 | enum dma_data_direction direction, |
1986 | unsigned long flags) | 1990 | unsigned long dma_flags) |
1987 | { | 1991 | { |
1988 | struct d40_desc *d40d; | 1992 | struct d40_desc *d40d; |
1989 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 1993 | struct d40_chan *d40c = container_of(chan, struct d40_chan, |
1990 | chan); | 1994 | chan); |
1991 | unsigned long flg; | 1995 | unsigned long flags; |
1992 | int err; | 1996 | int err; |
1993 | 1997 | ||
1998 | if (d40c->phy_chan == NULL) { | ||
1999 | dev_err(&d40c->chan.dev->device, | ||
2000 | "[%s] Cannot prepare unallocated channel\n", __func__); | ||
2001 | return ERR_PTR(-EINVAL); | ||
2002 | } | ||
2003 | |||
1994 | if (d40c->dma_cfg.pre_transfer) | 2004 | if (d40c->dma_cfg.pre_transfer) |
1995 | d40c->dma_cfg.pre_transfer(chan, | 2005 | d40c->dma_cfg.pre_transfer(chan, |
1996 | d40c->dma_cfg.pre_transfer_data, | 2006 | d40c->dma_cfg.pre_transfer_data, |
1997 | sg_dma_len(sgl)); | 2007 | sg_dma_len(sgl)); |
1998 | 2008 | ||
1999 | spin_lock_irqsave(&d40c->lock, flg); | 2009 | spin_lock_irqsave(&d40c->lock, flags); |
2000 | d40d = d40_desc_get(d40c); | 2010 | d40d = d40_desc_get(d40c); |
2001 | spin_unlock_irqrestore(&d40c->lock, flg); | 2011 | spin_unlock_irqrestore(&d40c->lock, flags); |
2002 | 2012 | ||
2003 | if (d40d == NULL) | 2013 | if (d40d == NULL) |
2004 | return NULL; | 2014 | return NULL; |
2005 | 2015 | ||
2006 | memset(d40d, 0, sizeof(struct d40_desc)); | ||
2007 | |||
2008 | if (d40c->log_num != D40_PHY_CHAN) | 2016 | if (d40c->log_num != D40_PHY_CHAN) |
2009 | err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, | 2017 | err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, |
2010 | direction, flags); | 2018 | direction, dma_flags); |
2011 | else | 2019 | else |
2012 | err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, | 2020 | err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, |
2013 | direction, flags); | 2021 | direction, dma_flags); |
2014 | if (err) { | 2022 | if (err) { |
2015 | dev_err(&d40c->chan.dev->device, | 2023 | dev_err(&d40c->chan.dev->device, |
2016 | "[%s] Failed to prepare %s slave sg job: %d\n", | 2024 | "[%s] Failed to prepare %s slave sg job: %d\n", |
@@ -2019,7 +2027,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
2019 | return NULL; | 2027 | return NULL; |
2020 | } | 2028 | } |
2021 | 2029 | ||
2022 | d40d->txd.flags = flags; | 2030 | d40d->txd.flags = dma_flags; |
2023 | 2031 | ||
2024 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 2032 | dma_async_tx_descriptor_init(&d40d->txd, chan); |
2025 | 2033 | ||
@@ -2037,6 +2045,13 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, | |||
2037 | dma_cookie_t last_complete; | 2045 | dma_cookie_t last_complete; |
2038 | int ret; | 2046 | int ret; |
2039 | 2047 | ||
2048 | if (d40c->phy_chan == NULL) { | ||
2049 | dev_err(&d40c->chan.dev->device, | ||
2050 | "[%s] Cannot read status of unallocated channel\n", | ||
2051 | __func__); | ||
2052 | return -EINVAL; | ||
2053 | } | ||
2054 | |||
2040 | last_complete = d40c->completed; | 2055 | last_complete = d40c->completed; |
2041 | last_used = chan->cookie; | 2056 | last_used = chan->cookie; |
2042 | 2057 | ||
@@ -2056,6 +2071,12 @@ static void d40_issue_pending(struct dma_chan *chan) | |||
2056 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2071 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2057 | unsigned long flags; | 2072 | unsigned long flags; |
2058 | 2073 | ||
2074 | if (d40c->phy_chan == NULL) { | ||
2075 | dev_err(&d40c->chan.dev->device, | ||
2076 | "[%s] Channel is not allocated!\n", __func__); | ||
2077 | return; | ||
2078 | } | ||
2079 | |||
2059 | spin_lock_irqsave(&d40c->lock, flags); | 2080 | spin_lock_irqsave(&d40c->lock, flags); |
2060 | 2081 | ||
2061 | /* Busy means that pending jobs are already being processed */ | 2082 | /* Busy means that pending jobs are already being processed */ |
@@ -2065,12 +2086,129 @@ static void d40_issue_pending(struct dma_chan *chan) | |||
2065 | spin_unlock_irqrestore(&d40c->lock, flags); | 2086 | spin_unlock_irqrestore(&d40c->lock, flags); |
2066 | } | 2087 | } |
2067 | 2088 | ||
2089 | /* Runtime reconfiguration extension */ | ||
2090 | static void d40_set_runtime_config(struct dma_chan *chan, | ||
2091 | struct dma_slave_config *config) | ||
2092 | { | ||
2093 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | ||
2094 | struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; | ||
2095 | enum dma_slave_buswidth config_addr_width; | ||
2096 | dma_addr_t config_addr; | ||
2097 | u32 config_maxburst; | ||
2098 | enum stedma40_periph_data_width addr_width; | ||
2099 | int psize; | ||
2100 | |||
2101 | if (config->direction == DMA_FROM_DEVICE) { | ||
2102 | dma_addr_t dev_addr_rx = | ||
2103 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; | ||
2104 | |||
2105 | config_addr = config->src_addr; | ||
2106 | if (dev_addr_rx) | ||
2107 | dev_dbg(d40c->base->dev, | ||
2108 | "channel has a pre-wired RX address %08x " | ||
2109 | "overriding with %08x\n", | ||
2110 | dev_addr_rx, config_addr); | ||
2111 | if (cfg->dir != STEDMA40_PERIPH_TO_MEM) | ||
2112 | dev_dbg(d40c->base->dev, | ||
2113 | "channel was not configured for peripheral " | ||
2114 | "to memory transfer (%d) overriding\n", | ||
2115 | cfg->dir); | ||
2116 | cfg->dir = STEDMA40_PERIPH_TO_MEM; | ||
2117 | |||
2118 | config_addr_width = config->src_addr_width; | ||
2119 | config_maxburst = config->src_maxburst; | ||
2120 | |||
2121 | } else if (config->direction == DMA_TO_DEVICE) { | ||
2122 | dma_addr_t dev_addr_tx = | ||
2123 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; | ||
2124 | |||
2125 | config_addr = config->dst_addr; | ||
2126 | if (dev_addr_tx) | ||
2127 | dev_dbg(d40c->base->dev, | ||
2128 | "channel has a pre-wired TX address %08x " | ||
2129 | "overriding with %08x\n", | ||
2130 | dev_addr_tx, config_addr); | ||
2131 | if (cfg->dir != STEDMA40_MEM_TO_PERIPH) | ||
2132 | dev_dbg(d40c->base->dev, | ||
2133 | "channel was not configured for memory " | ||
2134 | "to peripheral transfer (%d) overriding\n", | ||
2135 | cfg->dir); | ||
2136 | cfg->dir = STEDMA40_MEM_TO_PERIPH; | ||
2137 | |||
2138 | config_addr_width = config->dst_addr_width; | ||
2139 | config_maxburst = config->dst_maxburst; | ||
2140 | |||
2141 | } else { | ||
2142 | dev_err(d40c->base->dev, | ||
2143 | "unrecognized channel direction %d\n", | ||
2144 | config->direction); | ||
2145 | return; | ||
2146 | } | ||
2147 | |||
2148 | switch (config_addr_width) { | ||
2149 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
2150 | addr_width = STEDMA40_BYTE_WIDTH; | ||
2151 | break; | ||
2152 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
2153 | addr_width = STEDMA40_HALFWORD_WIDTH; | ||
2154 | break; | ||
2155 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
2156 | addr_width = STEDMA40_WORD_WIDTH; | ||
2157 | break; | ||
2158 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
2159 | addr_width = STEDMA40_DOUBLEWORD_WIDTH; | ||
2160 | break; | ||
2161 | default: | ||
2162 | dev_err(d40c->base->dev, | ||
2163 | "illegal peripheral address width " | ||
2164 | "requested (%d)\n", | ||
2165 | config->src_addr_width); | ||
2166 | return; | ||
2167 | } | ||
2168 | |||
2169 | if (config_maxburst >= 16) | ||
2170 | psize = STEDMA40_PSIZE_LOG_16; | ||
2171 | else if (config_maxburst >= 8) | ||
2172 | psize = STEDMA40_PSIZE_LOG_8; | ||
2173 | else if (config_maxburst >= 4) | ||
2174 | psize = STEDMA40_PSIZE_LOG_4; | ||
2175 | else | ||
2176 | psize = STEDMA40_PSIZE_LOG_1; | ||
2177 | |||
2178 | /* Set up all the endpoint configs */ | ||
2179 | cfg->src_info.data_width = addr_width; | ||
2180 | cfg->src_info.psize = psize; | ||
2181 | cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN; | ||
2182 | cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | ||
2183 | cfg->dst_info.data_width = addr_width; | ||
2184 | cfg->dst_info.psize = psize; | ||
2185 | cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN; | ||
2186 | cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | ||
2187 | |||
2188 | /* These settings will take precedence later */ | ||
2189 | d40c->runtime_addr = config_addr; | ||
2190 | d40c->runtime_direction = config->direction; | ||
2191 | dev_dbg(d40c->base->dev, | ||
2192 | "configured channel %s for %s, data width %d, " | ||
2193 | "maxburst %d bytes, LE, no flow control\n", | ||
2194 | dma_chan_name(chan), | ||
2195 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | ||
2196 | config_addr_width, | ||
2197 | config_maxburst); | ||
2198 | } | ||
2199 | |||
2068 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 2200 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
2069 | unsigned long arg) | 2201 | unsigned long arg) |
2070 | { | 2202 | { |
2071 | unsigned long flags; | 2203 | unsigned long flags; |
2072 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2204 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2073 | 2205 | ||
2206 | if (d40c->phy_chan == NULL) { | ||
2207 | dev_err(&d40c->chan.dev->device, | ||
2208 | "[%s] Channel is not allocated!\n", __func__); | ||
2209 | return -EINVAL; | ||
2210 | } | ||
2211 | |||
2074 | switch (cmd) { | 2212 | switch (cmd) { |
2075 | case DMA_TERMINATE_ALL: | 2213 | case DMA_TERMINATE_ALL: |
2076 | spin_lock_irqsave(&d40c->lock, flags); | 2214 | spin_lock_irqsave(&d40c->lock, flags); |
@@ -2081,6 +2219,12 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
2081 | return d40_pause(chan); | 2219 | return d40_pause(chan); |
2082 | case DMA_RESUME: | 2220 | case DMA_RESUME: |
2083 | return d40_resume(chan); | 2221 | return d40_resume(chan); |
2222 | case DMA_SLAVE_CONFIG: | ||
2223 | d40_set_runtime_config(chan, | ||
2224 | (struct dma_slave_config *) arg); | ||
2225 | return 0; | ||
2226 | default: | ||
2227 | break; | ||
2084 | } | 2228 | } |
2085 | 2229 | ||
2086 | /* Other commands are unimplemented */ | 2230 | /* Other commands are unimplemented */ |
@@ -2111,13 +2255,10 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |||
2111 | 2255 | ||
2112 | d40c->log_num = D40_PHY_CHAN; | 2256 | d40c->log_num = D40_PHY_CHAN; |
2113 | 2257 | ||
2114 | INIT_LIST_HEAD(&d40c->free); | ||
2115 | INIT_LIST_HEAD(&d40c->active); | 2258 | INIT_LIST_HEAD(&d40c->active); |
2116 | INIT_LIST_HEAD(&d40c->queue); | 2259 | INIT_LIST_HEAD(&d40c->queue); |
2117 | INIT_LIST_HEAD(&d40c->client); | 2260 | INIT_LIST_HEAD(&d40c->client); |
2118 | 2261 | ||
2119 | d40c->free_len = 0; | ||
2120 | |||
2121 | tasklet_init(&d40c->tasklet, dma_tasklet, | 2262 | tasklet_init(&d40c->tasklet, dma_tasklet, |
2122 | (unsigned long) d40c); | 2263 | (unsigned long) d40c); |
2123 | 2264 | ||
@@ -2243,6 +2384,14 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2243 | } | 2384 | } |
2244 | spin_lock_init(&base->phy_res[i].lock); | 2385 | spin_lock_init(&base->phy_res[i].lock); |
2245 | } | 2386 | } |
2387 | |||
2388 | /* Mark disabled channels as occupied */ | ||
2389 | for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { | ||
2390 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | ||
2391 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | ||
2392 | num_phy_chans_avail--; | ||
2393 | } | ||
2394 | |||
2246 | dev_info(base->dev, "%d of %d physical DMA channels available\n", | 2395 | dev_info(base->dev, "%d of %d physical DMA channels available\n", |
2247 | num_phy_chans_avail, base->num_phy_chans); | 2396 | num_phy_chans_avail, base->num_phy_chans); |
2248 | 2397 | ||
@@ -2291,6 +2440,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2291 | int num_log_chans = 0; | 2440 | int num_log_chans = 0; |
2292 | int num_phy_chans; | 2441 | int num_phy_chans; |
2293 | int i; | 2442 | int i; |
2443 | u32 val; | ||
2294 | 2444 | ||
2295 | clk = clk_get(&pdev->dev, NULL); | 2445 | clk = clk_get(&pdev->dev, NULL); |
2296 | 2446 | ||
@@ -2329,12 +2479,13 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2329 | } | 2479 | } |
2330 | } | 2480 | } |
2331 | 2481 | ||
2332 | i = readl(virtbase + D40_DREG_PERIPHID2); | 2482 | /* Get silicon revision */ |
2483 | val = readl(virtbase + D40_DREG_PERIPHID2); | ||
2333 | 2484 | ||
2334 | if ((i & 0xf) != D40_PERIPHID2_DESIGNER) { | 2485 | if ((val & 0xf) != D40_PERIPHID2_DESIGNER) { |
2335 | dev_err(&pdev->dev, | 2486 | dev_err(&pdev->dev, |
2336 | "[%s] Unknown designer! Got %x wanted %x\n", | 2487 | "[%s] Unknown designer! Got %x wanted %x\n", |
2337 | __func__, i & 0xf, D40_PERIPHID2_DESIGNER); | 2488 | __func__, val & 0xf, D40_PERIPHID2_DESIGNER); |
2338 | goto failure; | 2489 | goto failure; |
2339 | } | 2490 | } |
2340 | 2491 | ||
@@ -2342,7 +2493,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2342 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | 2493 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; |
2343 | 2494 | ||
2344 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", | 2495 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", |
2345 | (i >> 4) & 0xf, res->start); | 2496 | (val >> 4) & 0xf, res->start); |
2346 | 2497 | ||
2347 | plat_data = pdev->dev.platform_data; | 2498 | plat_data = pdev->dev.platform_data; |
2348 | 2499 | ||
@@ -2364,6 +2515,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2364 | goto failure; | 2515 | goto failure; |
2365 | } | 2516 | } |
2366 | 2517 | ||
2518 | base->rev = (val >> 4) & 0xf; | ||
2367 | base->clk = clk; | 2519 | base->clk = clk; |
2368 | base->num_phy_chans = num_phy_chans; | 2520 | base->num_phy_chans = num_phy_chans; |
2369 | base->num_log_chans = num_log_chans; | 2521 | base->num_log_chans = num_log_chans; |
@@ -2402,6 +2554,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2402 | if (!base->lcla_pool.alloc_map) | 2554 | if (!base->lcla_pool.alloc_map) |
2403 | goto failure; | 2555 | goto failure; |
2404 | 2556 | ||
2557 | base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), | ||
2558 | 0, SLAB_HWCACHE_ALIGN, | ||
2559 | NULL); | ||
2560 | if (base->desc_slab == NULL) | ||
2561 | goto failure; | ||
2562 | |||
2405 | return base; | 2563 | return base; |
2406 | 2564 | ||
2407 | failure: | 2565 | failure: |
@@ -2495,6 +2653,78 @@ static void __init d40_hw_init(struct d40_base *base) | |||
2495 | 2653 | ||
2496 | } | 2654 | } |
2497 | 2655 | ||
2656 | static int __init d40_lcla_allocate(struct d40_base *base) | ||
2657 | { | ||
2658 | unsigned long *page_list; | ||
2659 | int i, j; | ||
2660 | int ret = 0; | ||
2661 | |||
2662 | /* | ||
2663 | * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, | ||
2664 | * To full fill this hardware requirement without wasting 256 kb | ||
2665 | * we allocate pages until we get an aligned one. | ||
2666 | */ | ||
2667 | page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, | ||
2668 | GFP_KERNEL); | ||
2669 | |||
2670 | if (!page_list) { | ||
2671 | ret = -ENOMEM; | ||
2672 | goto failure; | ||
2673 | } | ||
2674 | |||
2675 | /* Calculating how many pages that are required */ | ||
2676 | base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; | ||
2677 | |||
2678 | for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { | ||
2679 | page_list[i] = __get_free_pages(GFP_KERNEL, | ||
2680 | base->lcla_pool.pages); | ||
2681 | if (!page_list[i]) { | ||
2682 | |||
2683 | dev_err(base->dev, | ||
2684 | "[%s] Failed to allocate %d pages.\n", | ||
2685 | __func__, base->lcla_pool.pages); | ||
2686 | |||
2687 | for (j = 0; j < i; j++) | ||
2688 | free_pages(page_list[j], base->lcla_pool.pages); | ||
2689 | goto failure; | ||
2690 | } | ||
2691 | |||
2692 | if ((virt_to_phys((void *)page_list[i]) & | ||
2693 | (LCLA_ALIGNMENT - 1)) == 0) | ||
2694 | break; | ||
2695 | } | ||
2696 | |||
2697 | for (j = 0; j < i; j++) | ||
2698 | free_pages(page_list[j], base->lcla_pool.pages); | ||
2699 | |||
2700 | if (i < MAX_LCLA_ALLOC_ATTEMPTS) { | ||
2701 | base->lcla_pool.base = (void *)page_list[i]; | ||
2702 | } else { | ||
2703 | /* After many attempts, no succees with finding the correct | ||
2704 | * alignment try with allocating a big buffer */ | ||
2705 | dev_warn(base->dev, | ||
2706 | "[%s] Failed to get %d pages @ 18 bit align.\n", | ||
2707 | __func__, base->lcla_pool.pages); | ||
2708 | base->lcla_pool.base_unaligned = kmalloc(SZ_1K * | ||
2709 | base->num_phy_chans + | ||
2710 | LCLA_ALIGNMENT, | ||
2711 | GFP_KERNEL); | ||
2712 | if (!base->lcla_pool.base_unaligned) { | ||
2713 | ret = -ENOMEM; | ||
2714 | goto failure; | ||
2715 | } | ||
2716 | |||
2717 | base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, | ||
2718 | LCLA_ALIGNMENT); | ||
2719 | } | ||
2720 | |||
2721 | writel(virt_to_phys(base->lcla_pool.base), | ||
2722 | base->virtbase + D40_DREG_LCLA); | ||
2723 | failure: | ||
2724 | kfree(page_list); | ||
2725 | return ret; | ||
2726 | } | ||
2727 | |||
2498 | static int __init d40_probe(struct platform_device *pdev) | 2728 | static int __init d40_probe(struct platform_device *pdev) |
2499 | { | 2729 | { |
2500 | int err; | 2730 | int err; |
@@ -2554,41 +2784,11 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2554 | __func__); | 2784 | __func__); |
2555 | goto failure; | 2785 | goto failure; |
2556 | } | 2786 | } |
2557 | /* Get IO for logical channel link address */ | ||
2558 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla"); | ||
2559 | if (!res) { | ||
2560 | ret = -ENOENT; | ||
2561 | dev_err(&pdev->dev, | ||
2562 | "[%s] No \"lcla\" resource defined\n", | ||
2563 | __func__); | ||
2564 | goto failure; | ||
2565 | } | ||
2566 | 2787 | ||
2567 | base->lcla_pool.base_size = resource_size(res); | 2788 | ret = d40_lcla_allocate(base); |
2568 | base->lcla_pool.phy = res->start; | 2789 | if (ret) { |
2569 | 2790 | dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", | |
2570 | if (request_mem_region(res->start, resource_size(res), | 2791 | __func__); |
2571 | D40_NAME " I/O lcla") == NULL) { | ||
2572 | ret = -EBUSY; | ||
2573 | dev_err(&pdev->dev, | ||
2574 | "[%s] Failed to request LCLA region 0x%x-0x%x\n", | ||
2575 | __func__, res->start, res->end); | ||
2576 | goto failure; | ||
2577 | } | ||
2578 | val = readl(base->virtbase + D40_DREG_LCLA); | ||
2579 | if (res->start != val && val != 0) { | ||
2580 | dev_warn(&pdev->dev, | ||
2581 | "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n", | ||
2582 | __func__, val, res->start); | ||
2583 | } else | ||
2584 | writel(res->start, base->virtbase + D40_DREG_LCLA); | ||
2585 | |||
2586 | base->lcla_pool.base = ioremap(res->start, resource_size(res)); | ||
2587 | if (!base->lcla_pool.base) { | ||
2588 | ret = -ENOMEM; | ||
2589 | dev_err(&pdev->dev, | ||
2590 | "[%s] Failed to ioremap LCLA 0x%x-0x%x\n", | ||
2591 | __func__, res->start, res->end); | ||
2592 | goto failure; | 2792 | goto failure; |
2593 | } | 2793 | } |
2594 | 2794 | ||
@@ -2616,11 +2816,15 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2616 | 2816 | ||
2617 | failure: | 2817 | failure: |
2618 | if (base) { | 2818 | if (base) { |
2819 | if (base->desc_slab) | ||
2820 | kmem_cache_destroy(base->desc_slab); | ||
2619 | if (base->virtbase) | 2821 | if (base->virtbase) |
2620 | iounmap(base->virtbase); | 2822 | iounmap(base->virtbase); |
2621 | if (base->lcla_pool.phy) | 2823 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) |
2622 | release_mem_region(base->lcla_pool.phy, | 2824 | free_pages((unsigned long)base->lcla_pool.base, |
2623 | base->lcla_pool.base_size); | 2825 | base->lcla_pool.pages); |
2826 | if (base->lcla_pool.base_unaligned) | ||
2827 | kfree(base->lcla_pool.base_unaligned); | ||
2624 | if (base->phy_lcpa) | 2828 | if (base->phy_lcpa) |
2625 | release_mem_region(base->phy_lcpa, | 2829 | release_mem_region(base->phy_lcpa, |
2626 | base->lcpa_size); | 2830 | base->lcpa_size); |