aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2016-10-02 23:48:14 -0400
committerVinod Koul <vinod.koul@intel.com>2016-10-02 23:48:14 -0400
commit850e0448a6db0650a6c41adacbc92f106094d1db (patch)
tree288937c3f555f219a8f967537bb7652b879bff88 /drivers/dma
parented58a112b0f57f2673cd483d45d82de1ad890e9a (diff)
parent5f03c39978e3437398d4777215c5818e62118b2c (diff)
Merge branch 'topic/k3' into for-linus
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/k3dma.c215
2 files changed, 179 insertions, 38 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9e680ecf31d6..af63a6bcf564 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -278,7 +278,7 @@ config INTEL_MIC_X100_DMA
278 278
279config K3_DMA 279config K3_DMA
280 tristate "Hisilicon K3 DMA support" 280 tristate "Hisilicon K3 DMA support"
281 depends on ARCH_HI3xxx 281 depends on ARCH_HI3xxx || ARCH_HISI || COMPILE_TEST
282 select DMA_ENGINE 282 select DMA_ENGINE
283 select DMA_VIRTUAL_CHANNELS 283 select DMA_VIRTUAL_CHANNELS
284 help 284 help
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 39de8980128c..aabcb7934b05 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013 Linaro Ltd. 2 * Copyright (c) 2013 - 2015 Linaro Ltd.
3 * Copyright (c) 2013 Hisilicon Limited. 3 * Copyright (c) 2013 Hisilicon Limited.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -8,6 +8,8 @@
8 */ 8 */
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmapool.h>
11#include <linux/dmaengine.h> 13#include <linux/dmaengine.h>
12#include <linux/init.h> 14#include <linux/init.h>
13#include <linux/interrupt.h> 15#include <linux/interrupt.h>
@@ -25,22 +27,28 @@
25 27
26#define DRIVER_NAME "k3-dma" 28#define DRIVER_NAME "k3-dma"
27#define DMA_MAX_SIZE 0x1ffc 29#define DMA_MAX_SIZE 0x1ffc
30#define DMA_CYCLIC_MAX_PERIOD 0x1000
31#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
28 32
29#define INT_STAT 0x00 33#define INT_STAT 0x00
30#define INT_TC1 0x04 34#define INT_TC1 0x04
35#define INT_TC2 0x08
31#define INT_ERR1 0x0c 36#define INT_ERR1 0x0c
32#define INT_ERR2 0x10 37#define INT_ERR2 0x10
33#define INT_TC1_MASK 0x18 38#define INT_TC1_MASK 0x18
39#define INT_TC2_MASK 0x1c
34#define INT_ERR1_MASK 0x20 40#define INT_ERR1_MASK 0x20
35#define INT_ERR2_MASK 0x24 41#define INT_ERR2_MASK 0x24
36#define INT_TC1_RAW 0x600 42#define INT_TC1_RAW 0x600
37#define INT_ERR1_RAW 0x608 43#define INT_TC2_RAW 0x608
38#define INT_ERR2_RAW 0x610 44#define INT_ERR1_RAW 0x610
45#define INT_ERR2_RAW 0x618
39#define CH_PRI 0x688 46#define CH_PRI 0x688
40#define CH_STAT 0x690 47#define CH_STAT 0x690
41#define CX_CUR_CNT 0x704 48#define CX_CUR_CNT 0x704
42#define CX_LLI 0x800 49#define CX_LLI 0x800
43#define CX_CNT 0x810 50#define CX_CNT1 0x80c
51#define CX_CNT0 0x810
44#define CX_SRC 0x814 52#define CX_SRC 0x814
45#define CX_DST 0x818 53#define CX_DST 0x818
46#define CX_CFG 0x81c 54#define CX_CFG 0x81c
@@ -49,6 +57,7 @@
49 57
50#define CX_LLI_CHAIN_EN 0x2 58#define CX_LLI_CHAIN_EN 0x2
51#define CX_CFG_EN 0x1 59#define CX_CFG_EN 0x1
60#define CX_CFG_NODEIRQ BIT(1)
52#define CX_CFG_MEM2PER (0x1 << 2) 61#define CX_CFG_MEM2PER (0x1 << 2)
53#define CX_CFG_PER2MEM (0x2 << 2) 62#define CX_CFG_PER2MEM (0x2 << 2)
54#define CX_CFG_SRCINCR (0x1 << 31) 63#define CX_CFG_SRCINCR (0x1 << 31)
@@ -68,7 +77,7 @@ struct k3_dma_desc_sw {
68 dma_addr_t desc_hw_lli; 77 dma_addr_t desc_hw_lli;
69 size_t desc_num; 78 size_t desc_num;
70 size_t size; 79 size_t size;
71 struct k3_desc_hw desc_hw[0]; 80 struct k3_desc_hw *desc_hw;
72}; 81};
73 82
74struct k3_dma_phy; 83struct k3_dma_phy;
@@ -81,6 +90,7 @@ struct k3_dma_chan {
81 enum dma_transfer_direction dir; 90 enum dma_transfer_direction dir;
82 dma_addr_t dev_addr; 91 dma_addr_t dev_addr;
83 enum dma_status status; 92 enum dma_status status;
93 bool cyclic;
84}; 94};
85 95
86struct k3_dma_phy { 96struct k3_dma_phy {
@@ -100,6 +110,7 @@ struct k3_dma_dev {
100 struct k3_dma_phy *phy; 110 struct k3_dma_phy *phy;
101 struct k3_dma_chan *chans; 111 struct k3_dma_chan *chans;
102 struct clk *clk; 112 struct clk *clk;
113 struct dma_pool *pool;
103 u32 dma_channels; 114 u32 dma_channels;
104 u32 dma_requests; 115 u32 dma_requests;
105 unsigned int irq; 116 unsigned int irq;
@@ -135,6 +146,7 @@ static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
135 146
136 val = 0x1 << phy->idx; 147 val = 0x1 << phy->idx;
137 writel_relaxed(val, d->base + INT_TC1_RAW); 148 writel_relaxed(val, d->base + INT_TC1_RAW);
149 writel_relaxed(val, d->base + INT_TC2_RAW);
138 writel_relaxed(val, d->base + INT_ERR1_RAW); 150 writel_relaxed(val, d->base + INT_ERR1_RAW);
139 writel_relaxed(val, d->base + INT_ERR2_RAW); 151 writel_relaxed(val, d->base + INT_ERR2_RAW);
140} 152}
@@ -142,7 +154,7 @@ static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
142static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw) 154static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
143{ 155{
144 writel_relaxed(hw->lli, phy->base + CX_LLI); 156 writel_relaxed(hw->lli, phy->base + CX_LLI);
145 writel_relaxed(hw->count, phy->base + CX_CNT); 157 writel_relaxed(hw->count, phy->base + CX_CNT0);
146 writel_relaxed(hw->saddr, phy->base + CX_SRC); 158 writel_relaxed(hw->saddr, phy->base + CX_SRC);
147 writel_relaxed(hw->daddr, phy->base + CX_DST); 159 writel_relaxed(hw->daddr, phy->base + CX_DST);
148 writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG); 160 writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
@@ -176,11 +188,13 @@ static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
176 188
177 /* unmask irq */ 189 /* unmask irq */
178 writel_relaxed(0xffff, d->base + INT_TC1_MASK); 190 writel_relaxed(0xffff, d->base + INT_TC1_MASK);
191 writel_relaxed(0xffff, d->base + INT_TC2_MASK);
179 writel_relaxed(0xffff, d->base + INT_ERR1_MASK); 192 writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
180 writel_relaxed(0xffff, d->base + INT_ERR2_MASK); 193 writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
181 } else { 194 } else {
182 /* mask irq */ 195 /* mask irq */
183 writel_relaxed(0x0, d->base + INT_TC1_MASK); 196 writel_relaxed(0x0, d->base + INT_TC1_MASK);
197 writel_relaxed(0x0, d->base + INT_TC2_MASK);
184 writel_relaxed(0x0, d->base + INT_ERR1_MASK); 198 writel_relaxed(0x0, d->base + INT_ERR1_MASK);
185 writel_relaxed(0x0, d->base + INT_ERR2_MASK); 199 writel_relaxed(0x0, d->base + INT_ERR2_MASK);
186 } 200 }
@@ -193,22 +207,31 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
193 struct k3_dma_chan *c; 207 struct k3_dma_chan *c;
194 u32 stat = readl_relaxed(d->base + INT_STAT); 208 u32 stat = readl_relaxed(d->base + INT_STAT);
195 u32 tc1 = readl_relaxed(d->base + INT_TC1); 209 u32 tc1 = readl_relaxed(d->base + INT_TC1);
210 u32 tc2 = readl_relaxed(d->base + INT_TC2);
196 u32 err1 = readl_relaxed(d->base + INT_ERR1); 211 u32 err1 = readl_relaxed(d->base + INT_ERR1);
197 u32 err2 = readl_relaxed(d->base + INT_ERR2); 212 u32 err2 = readl_relaxed(d->base + INT_ERR2);
198 u32 i, irq_chan = 0; 213 u32 i, irq_chan = 0;
199 214
200 while (stat) { 215 while (stat) {
201 i = __ffs(stat); 216 i = __ffs(stat);
202 stat &= (stat - 1); 217 stat &= ~BIT(i);
203 if (likely(tc1 & BIT(i))) { 218 if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
219 unsigned long flags;
220
204 p = &d->phy[i]; 221 p = &d->phy[i];
205 c = p->vchan; 222 c = p->vchan;
206 if (c) { 223 if (c && (tc1 & BIT(i))) {
207 unsigned long flags;
208
209 spin_lock_irqsave(&c->vc.lock, flags); 224 spin_lock_irqsave(&c->vc.lock, flags);
210 vchan_cookie_complete(&p->ds_run->vd); 225 vchan_cookie_complete(&p->ds_run->vd);
226 WARN_ON_ONCE(p->ds_done);
211 p->ds_done = p->ds_run; 227 p->ds_done = p->ds_run;
228 p->ds_run = NULL;
229 spin_unlock_irqrestore(&c->vc.lock, flags);
230 }
231 if (c && (tc2 & BIT(i))) {
232 spin_lock_irqsave(&c->vc.lock, flags);
233 if (p->ds_run != NULL)
234 vchan_cyclic_callback(&p->ds_run->vd);
212 spin_unlock_irqrestore(&c->vc.lock, flags); 235 spin_unlock_irqrestore(&c->vc.lock, flags);
213 } 236 }
214 irq_chan |= BIT(i); 237 irq_chan |= BIT(i);
@@ -218,14 +241,17 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
218 } 241 }
219 242
220 writel_relaxed(irq_chan, d->base + INT_TC1_RAW); 243 writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
244 writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
221 writel_relaxed(err1, d->base + INT_ERR1_RAW); 245 writel_relaxed(err1, d->base + INT_ERR1_RAW);
222 writel_relaxed(err2, d->base + INT_ERR2_RAW); 246 writel_relaxed(err2, d->base + INT_ERR2_RAW);
223 247
224 if (irq_chan) { 248 if (irq_chan)
225 tasklet_schedule(&d->task); 249 tasklet_schedule(&d->task);
250
251 if (irq_chan || err1 || err2)
226 return IRQ_HANDLED; 252 return IRQ_HANDLED;
227 } else 253
228 return IRQ_NONE; 254 return IRQ_NONE;
229} 255}
230 256
231static int k3_dma_start_txd(struct k3_dma_chan *c) 257static int k3_dma_start_txd(struct k3_dma_chan *c)
@@ -247,14 +273,14 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
247 * so vc->desc_issued only contains desc pending 273 * so vc->desc_issued only contains desc pending
248 */ 274 */
249 list_del(&ds->vd.node); 275 list_del(&ds->vd.node);
276
277 WARN_ON_ONCE(c->phy->ds_run);
278 WARN_ON_ONCE(c->phy->ds_done);
250 c->phy->ds_run = ds; 279 c->phy->ds_run = ds;
251 c->phy->ds_done = NULL;
252 /* start dma */ 280 /* start dma */
253 k3_dma_set_desc(c->phy, &ds->desc_hw[0]); 281 k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
254 return 0; 282 return 0;
255 } 283 }
256 c->phy->ds_done = NULL;
257 c->phy->ds_run = NULL;
258 return -EAGAIN; 284 return -EAGAIN;
259} 285}
260 286
@@ -351,7 +377,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
351 * its total size. 377 * its total size.
352 */ 378 */
353 vd = vchan_find_desc(&c->vc, cookie); 379 vd = vchan_find_desc(&c->vc, cookie);
354 if (vd) { 380 if (vd && !c->cyclic) {
355 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size; 381 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
356 } else if ((!p) || (!p->ds_run)) { 382 } else if ((!p) || (!p->ds_run)) {
357 bytes = 0; 383 bytes = 0;
@@ -361,7 +387,8 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
361 387
362 bytes = k3_dma_get_curr_cnt(d, p); 388 bytes = k3_dma_get_curr_cnt(d, p);
363 clli = k3_dma_get_curr_lli(p); 389 clli = k3_dma_get_curr_lli(p);
364 index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw); 390 index = ((clli - ds->desc_hw_lli) /
391 sizeof(struct k3_desc_hw)) + 1;
365 for (; index < ds->desc_num; index++) { 392 for (; index < ds->desc_num; index++) {
366 bytes += ds->desc_hw[index].count; 393 bytes += ds->desc_hw[index].count;
367 /* end of lli */ 394 /* end of lli */
@@ -402,9 +429,10 @@ static void k3_dma_issue_pending(struct dma_chan *chan)
402static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst, 429static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
403 dma_addr_t src, size_t len, u32 num, u32 ccfg) 430 dma_addr_t src, size_t len, u32 num, u32 ccfg)
404{ 431{
405 if ((num + 1) < ds->desc_num) 432 if (num != ds->desc_num - 1)
406 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * 433 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
407 sizeof(struct k3_desc_hw); 434 sizeof(struct k3_desc_hw);
435
408 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN; 436 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
409 ds->desc_hw[num].count = len; 437 ds->desc_hw[num].count = len;
410 ds->desc_hw[num].saddr = src; 438 ds->desc_hw[num].saddr = src;
@@ -412,6 +440,35 @@ static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
412 ds->desc_hw[num].config = ccfg; 440 ds->desc_hw[num].config = ccfg;
413} 441}
414 442
443static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
444 struct dma_chan *chan)
445{
446 struct k3_dma_chan *c = to_k3_chan(chan);
447 struct k3_dma_desc_sw *ds;
448 struct k3_dma_dev *d = to_k3_dma(chan->device);
449 int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw);
450
451 if (num > lli_limit) {
452 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
453 &c->vc, num, lli_limit);
454 return NULL;
455 }
456
457 ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
458 if (!ds)
459 return NULL;
460
461 ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
462 if (!ds->desc_hw) {
463 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
464 kfree(ds);
465 return NULL;
466 }
467 memset(ds->desc_hw, 0, sizeof(struct k3_desc_hw) * num);
468 ds->desc_num = num;
469 return ds;
470}
471
415static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( 472static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
416 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 473 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
417 size_t len, unsigned long flags) 474 size_t len, unsigned long flags)
@@ -425,13 +482,13 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
425 return NULL; 482 return NULL;
426 483
427 num = DIV_ROUND_UP(len, DMA_MAX_SIZE); 484 num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
428 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); 485
486 ds = k3_dma_alloc_desc_resource(num, chan);
429 if (!ds) 487 if (!ds)
430 return NULL; 488 return NULL;
431 489
432 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); 490 c->cyclic = 0;
433 ds->size = len; 491 ds->size = len;
434 ds->desc_num = num;
435 num = 0; 492 num = 0;
436 493
437 if (!c->ccfg) { 494 if (!c->ccfg) {
@@ -474,18 +531,17 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
474 if (sgl == NULL) 531 if (sgl == NULL)
475 return NULL; 532 return NULL;
476 533
534 c->cyclic = 0;
535
477 for_each_sg(sgl, sg, sglen, i) { 536 for_each_sg(sgl, sg, sglen, i) {
478 avail = sg_dma_len(sg); 537 avail = sg_dma_len(sg);
479 if (avail > DMA_MAX_SIZE) 538 if (avail > DMA_MAX_SIZE)
480 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; 539 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
481 } 540 }
482 541
483 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); 542 ds = k3_dma_alloc_desc_resource(num, chan);
484 if (!ds) 543 if (!ds)
485 return NULL; 544 return NULL;
486
487 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
488 ds->desc_num = num;
489 num = 0; 545 num = 0;
490 546
491 for_each_sg(sgl, sg, sglen, i) { 547 for_each_sg(sgl, sg, sglen, i) {
@@ -516,6 +572,73 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
516 return vchan_tx_prep(&c->vc, &ds->vd, flags); 572 return vchan_tx_prep(&c->vc, &ds->vd, flags);
517} 573}
518 574
575static struct dma_async_tx_descriptor *
576k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
577 size_t buf_len, size_t period_len,
578 enum dma_transfer_direction dir,
579 unsigned long flags)
580{
581 struct k3_dma_chan *c = to_k3_chan(chan);
582 struct k3_dma_desc_sw *ds;
583 size_t len, avail, total = 0;
584 dma_addr_t addr, src = 0, dst = 0;
585 int num = 1, since = 0;
586 size_t modulo = DMA_CYCLIC_MAX_PERIOD;
587 u32 en_tc2 = 0;
588
589 dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n",
590 __func__, &buf_addr, &to_k3_chan(chan)->dev_addr,
591 buf_len, period_len, (int)dir);
592
593 avail = buf_len;
594 if (avail > modulo)
595 num += DIV_ROUND_UP(avail, modulo) - 1;
596
597 ds = k3_dma_alloc_desc_resource(num, chan);
598 if (!ds)
599 return NULL;
600
601 c->cyclic = 1;
602 addr = buf_addr;
603 avail = buf_len;
604 total = avail;
605 num = 0;
606
607 if (period_len < modulo)
608 modulo = period_len;
609
610 do {
611 len = min_t(size_t, avail, modulo);
612
613 if (dir == DMA_MEM_TO_DEV) {
614 src = addr;
615 dst = c->dev_addr;
616 } else if (dir == DMA_DEV_TO_MEM) {
617 src = c->dev_addr;
618 dst = addr;
619 }
620 since += len;
621 if (since >= period_len) {
622 /* descriptor asks for TC2 interrupt on completion */
623 en_tc2 = CX_CFG_NODEIRQ;
624 since -= period_len;
625 } else
626 en_tc2 = 0;
627
628 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
629
630 addr += len;
631 avail -= len;
632 } while (avail);
633
634 /* "Cyclic" == end of link points back to start of link */
635 ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
636
637 ds->size = total;
638
639 return vchan_tx_prep(&c->vc, &ds->vd, flags);
640}
641
519static int k3_dma_config(struct dma_chan *chan, 642static int k3_dma_config(struct dma_chan *chan,
520 struct dma_slave_config *cfg) 643 struct dma_slave_config *cfg)
521{ 644{
@@ -551,7 +674,7 @@ static int k3_dma_config(struct dma_chan *chan,
551 c->ccfg |= (val << 12) | (val << 16); 674 c->ccfg |= (val << 12) | (val << 16);
552 675
553 if ((maxburst == 0) || (maxburst > 16)) 676 if ((maxburst == 0) || (maxburst > 16))
554 val = 16; 677 val = 15;
555 else 678 else
556 val = maxburst - 1; 679 val = maxburst - 1;
557 c->ccfg |= (val << 20) | (val << 24); 680 c->ccfg |= (val << 20) | (val << 24);
@@ -563,6 +686,16 @@ static int k3_dma_config(struct dma_chan *chan,
563 return 0; 686 return 0;
564} 687}
565 688
689static void k3_dma_free_desc(struct virt_dma_desc *vd)
690{
691 struct k3_dma_desc_sw *ds =
692 container_of(vd, struct k3_dma_desc_sw, vd);
693 struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
694
695 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
696 kfree(ds);
697}
698
566static int k3_dma_terminate_all(struct dma_chan *chan) 699static int k3_dma_terminate_all(struct dma_chan *chan)
567{ 700{
568 struct k3_dma_chan *c = to_k3_chan(chan); 701 struct k3_dma_chan *c = to_k3_chan(chan);
@@ -586,7 +719,15 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
586 k3_dma_terminate_chan(p, d); 719 k3_dma_terminate_chan(p, d);
587 c->phy = NULL; 720 c->phy = NULL;
588 p->vchan = NULL; 721 p->vchan = NULL;
589 p->ds_run = p->ds_done = NULL; 722 if (p->ds_run) {
723 k3_dma_free_desc(&p->ds_run->vd);
724 p->ds_run = NULL;
725 }
726 if (p->ds_done) {
727 k3_dma_free_desc(&p->ds_done->vd);
728 p->ds_done = NULL;
729 }
730
590 } 731 }
591 spin_unlock_irqrestore(&c->vc.lock, flags); 732 spin_unlock_irqrestore(&c->vc.lock, flags);
592 vchan_dma_desc_free_list(&c->vc, &head); 733 vchan_dma_desc_free_list(&c->vc, &head);
@@ -639,14 +780,6 @@ static int k3_dma_transfer_resume(struct dma_chan *chan)
639 return 0; 780 return 0;
640} 781}
641 782
642static void k3_dma_free_desc(struct virt_dma_desc *vd)
643{
644 struct k3_dma_desc_sw *ds =
645 container_of(vd, struct k3_dma_desc_sw, vd);
646
647 kfree(ds);
648}
649
650static const struct of_device_id k3_pdma_dt_ids[] = { 783static const struct of_device_id k3_pdma_dt_ids[] = {
651 { .compatible = "hisilicon,k3-dma-1.0", }, 784 { .compatible = "hisilicon,k3-dma-1.0", },
652 {} 785 {}
@@ -706,6 +839,12 @@ static int k3_dma_probe(struct platform_device *op)
706 839
707 d->irq = irq; 840 d->irq = irq;
708 841
842 /* A DMA memory pool for LLIs, align on 32-byte boundary */
843 d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
844 LLI_BLOCK_SIZE, 32, 0);
845 if (!d->pool)
846 return -ENOMEM;
847
709 /* init phy channel */ 848 /* init phy channel */
710 d->phy = devm_kzalloc(&op->dev, 849 d->phy = devm_kzalloc(&op->dev,
711 d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL); 850 d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
@@ -722,11 +861,13 @@ static int k3_dma_probe(struct platform_device *op)
722 INIT_LIST_HEAD(&d->slave.channels); 861 INIT_LIST_HEAD(&d->slave.channels);
723 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); 862 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
724 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); 863 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
864 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
725 d->slave.dev = &op->dev; 865 d->slave.dev = &op->dev;
726 d->slave.device_free_chan_resources = k3_dma_free_chan_resources; 866 d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
727 d->slave.device_tx_status = k3_dma_tx_status; 867 d->slave.device_tx_status = k3_dma_tx_status;
728 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; 868 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
729 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; 869 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
870 d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic;
730 d->slave.device_issue_pending = k3_dma_issue_pending; 871 d->slave.device_issue_pending = k3_dma_issue_pending;
731 d->slave.device_config = k3_dma_config; 872 d->slave.device_config = k3_dma_config;
732 d->slave.device_pause = k3_dma_transfer_pause; 873 d->slave.device_pause = k3_dma_transfer_pause;