aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ste_dma40.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r--drivers/dma/ste_dma40.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index d32a9ac86084..f08e5c49c5d2 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -128,6 +128,7 @@ struct d40_desc {
128 */ 128 */
129struct d40_lcla_pool { 129struct d40_lcla_pool {
130 void *base; 130 void *base;
131 dma_addr_t dma_addr;
131 void *base_unaligned; 132 void *base_unaligned;
132 int pages; 133 int pages;
133 spinlock_t lock; 134 spinlock_t lock;
@@ -504,25 +505,25 @@ static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
504 505
505 d40d->lli_current++; 506 d40d->lli_current++;
506 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) { 507 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
507 struct d40_log_lli *lcla; 508 unsigned int lcla_offset = d40c->phy_chan->num * 1024 +
509 8 * curr_lcla * 2;
510 struct d40_lcla_pool *pool = &d40c->base->lcla_pool;
511 struct d40_log_lli *lcla = pool->base + lcla_offset;
508 512
509 if (d40d->lli_current + 1 < d40d->lli_len) 513 if (d40d->lli_current + 1 < d40d->lli_len)
510 next_lcla = d40_lcla_alloc_one(d40c, d40d); 514 next_lcla = d40_lcla_alloc_one(d40c, d40d);
511 else 515 else
512 next_lcla = -EINVAL; 516 next_lcla = -EINVAL;
513 517
514 lcla = d40c->base->lcla_pool.base +
515 d40c->phy_chan->num * 1024 +
516 8 * curr_lcla * 2;
517
518 d40_log_lli_lcla_write(lcla, 518 d40_log_lli_lcla_write(lcla,
519 &d40d->lli_log.dst[d40d->lli_current], 519 &d40d->lli_log.dst[d40d->lli_current],
520 &d40d->lli_log.src[d40d->lli_current], 520 &d40d->lli_log.src[d40d->lli_current],
521 next_lcla); 521 next_lcla);
522 522
523 (void) dma_map_single(d40c->base->dev, lcla, 523 dma_sync_single_range_for_device(d40c->base->dev,
524 2 * sizeof(struct d40_log_lli), 524 pool->dma_addr, lcla_offset,
525 DMA_TO_DEVICE); 525 2 * sizeof(struct d40_log_lli),
526 DMA_TO_DEVICE);
526 527
527 curr_lcla = next_lcla; 528 curr_lcla = next_lcla;
528 529
@@ -2771,6 +2772,7 @@ static void __init d40_hw_init(struct d40_base *base)
2771 2772
2772static int __init d40_lcla_allocate(struct d40_base *base) 2773static int __init d40_lcla_allocate(struct d40_base *base)
2773{ 2774{
2775 struct d40_lcla_pool *pool = &base->lcla_pool;
2774 unsigned long *page_list; 2776 unsigned long *page_list;
2775 int i, j; 2777 int i, j;
2776 int ret = 0; 2778 int ret = 0;
@@ -2835,6 +2837,15 @@ static int __init d40_lcla_allocate(struct d40_base *base)
2835 LCLA_ALIGNMENT); 2837 LCLA_ALIGNMENT);
2836 } 2838 }
2837 2839
2840 pool->dma_addr = dma_map_single(base->dev, pool->base,
2841 SZ_1K * base->num_phy_chans,
2842 DMA_TO_DEVICE);
2843 if (dma_mapping_error(base->dev, pool->dma_addr)) {
2844 pool->dma_addr = 0;
2845 ret = -ENOMEM;
2846 goto failure;
2847 }
2848
2838 writel(virt_to_phys(base->lcla_pool.base), 2849 writel(virt_to_phys(base->lcla_pool.base),
2839 base->virtbase + D40_DREG_LCLA); 2850 base->virtbase + D40_DREG_LCLA);
2840failure: 2851failure:
@@ -2929,6 +2940,12 @@ failure:
2929 kmem_cache_destroy(base->desc_slab); 2940 kmem_cache_destroy(base->desc_slab);
2930 if (base->virtbase) 2941 if (base->virtbase)
2931 iounmap(base->virtbase); 2942 iounmap(base->virtbase);
2943
2944 if (base->lcla_pool.dma_addr)
2945 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
2946 SZ_1K * base->num_phy_chans,
2947 DMA_TO_DEVICE);
2948
2932 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) 2949 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2933 free_pages((unsigned long)base->lcla_pool.base, 2950 free_pages((unsigned long)base->lcla_pool.base,
2934 base->lcla_pool.pages); 2951 base->lcla_pool.pages);