aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Stultz <john.stultz@linaro.org>2016-08-29 13:30:51 -0400
committerVinod Koul <vinod.koul@intel.com>2016-08-31 00:39:32 -0400
commit36387a2b1f62b5c087c5fe6f0f7b23b94f722ad7 (patch)
tree44e1b2bb543d2b4f9c80aef802dfd6cd17c36399
parentb77f262ae351d467c22b056f6d13afeeab7ea69a (diff)
k3dma: Fix memory handling in preparation for cyclic mode
With cyclic mode, the shared virt-dma logic doesn't actually manage the descriptor state, nor the calling of the descriptor free callback. This results in leaking a desc structure every time we start an audio transfer. Thus we must manage it ourselves. The k3dma driver already keeps track of the active and finished descriptors via ds_run and ds_done pointers, so cleanup how we handle those two values, so when we tear down everything in terminate_all, call free_desc on the ds_run and ds_done pointers if they are not null. NOTE: HiKey doesn't use the non-cyclic dma modes, so I'm not been able to test those modes. But with this patch we no longer leak the desc structures. Cc: Zhangfei Gao <zhangfei.gao@linaro.org> Cc: Krzysztof Kozlowski <k.kozlowski@samsung.com> Cc: Maxime Ripard <maxime.ripard@free-electrons.com> Cc: Vinod Koul <vinod.koul@intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Mark Brown <broonie@kernel.org> Cc: Andy Green <andy@warmcat.com> Acked-by: Zhangfei Gao <zhangfei.gao@linaro.org> Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/k3dma.c38
1 files changed, 24 insertions, 14 deletions
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 9d96c956c25f..8108fa119d39 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -212,7 +212,9 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
212 212
213 spin_lock_irqsave(&c->vc.lock, flags); 213 spin_lock_irqsave(&c->vc.lock, flags);
214 vchan_cookie_complete(&p->ds_run->vd); 214 vchan_cookie_complete(&p->ds_run->vd);
215 WARN_ON_ONCE(p->ds_done);
215 p->ds_done = p->ds_run; 216 p->ds_done = p->ds_run;
217 p->ds_run = NULL;
216 spin_unlock_irqrestore(&c->vc.lock, flags); 218 spin_unlock_irqrestore(&c->vc.lock, flags);
217 } 219 }
218 irq_chan |= BIT(i); 220 irq_chan |= BIT(i);
@@ -253,14 +255,14 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
253 * so vc->desc_issued only contains desc pending 255 * so vc->desc_issued only contains desc pending
254 */ 256 */
255 list_del(&ds->vd.node); 257 list_del(&ds->vd.node);
258
259 WARN_ON_ONCE(c->phy->ds_run);
260 WARN_ON_ONCE(c->phy->ds_done);
256 c->phy->ds_run = ds; 261 c->phy->ds_run = ds;
257 c->phy->ds_done = NULL;
258 /* start dma */ 262 /* start dma */
259 k3_dma_set_desc(c->phy, &ds->desc_hw[0]); 263 k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
260 return 0; 264 return 0;
261 } 265 }
262 c->phy->ds_done = NULL;
263 c->phy->ds_run = NULL;
264 return -EAGAIN; 266 return -EAGAIN;
265} 267}
266 268
@@ -594,6 +596,16 @@ static int k3_dma_config(struct dma_chan *chan,
594 return 0; 596 return 0;
595} 597}
596 598
599static void k3_dma_free_desc(struct virt_dma_desc *vd)
600{
601 struct k3_dma_desc_sw *ds =
602 container_of(vd, struct k3_dma_desc_sw, vd);
603 struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
604
605 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
606 kfree(ds);
607}
608
597static int k3_dma_terminate_all(struct dma_chan *chan) 609static int k3_dma_terminate_all(struct dma_chan *chan)
598{ 610{
599 struct k3_dma_chan *c = to_k3_chan(chan); 611 struct k3_dma_chan *c = to_k3_chan(chan);
@@ -617,7 +629,15 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
617 k3_dma_terminate_chan(p, d); 629 k3_dma_terminate_chan(p, d);
618 c->phy = NULL; 630 c->phy = NULL;
619 p->vchan = NULL; 631 p->vchan = NULL;
620 p->ds_run = p->ds_done = NULL; 632 if (p->ds_run) {
633 k3_dma_free_desc(&p->ds_run->vd);
634 p->ds_run = NULL;
635 }
636 if (p->ds_done) {
637 k3_dma_free_desc(&p->ds_done->vd);
638 p->ds_done = NULL;
639 }
640
621 } 641 }
622 spin_unlock_irqrestore(&c->vc.lock, flags); 642 spin_unlock_irqrestore(&c->vc.lock, flags);
623 vchan_dma_desc_free_list(&c->vc, &head); 643 vchan_dma_desc_free_list(&c->vc, &head);
@@ -670,16 +690,6 @@ static int k3_dma_transfer_resume(struct dma_chan *chan)
670 return 0; 690 return 0;
671} 691}
672 692
673static void k3_dma_free_desc(struct virt_dma_desc *vd)
674{
675 struct k3_dma_desc_sw *ds =
676 container_of(vd, struct k3_dma_desc_sw, vd);
677 struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
678
679 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
680 kfree(ds);
681}
682
683static const struct of_device_id k3_pdma_dt_ids[] = { 693static const struct of_device_id k3_pdma_dt_ids[] = {
684 { .compatible = "hisilicon,k3-dma-1.0", }, 694 { .compatible = "hisilicon,k3-dma-1.0", },
685 {} 695 {}