aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTony Lindgren <tony@atomide.com>2017-01-19 11:49:08 -0500
committerVinod Koul <vinod.koul@intel.com>2017-01-25 00:59:22 -0500
commit362f4562466c3b9490e733e06999025638310d4a (patch)
tree31e5dc4697c3d4def30b73ed1af6a2f053f1a070
parentae4a3e028bb8b59e7cfeb0cc9ef03d885182ce8b (diff)
dmaengine: cppi41: Fix oops in cppi41_runtime_resume
Commit fdea2d09b997 ("dmaengine: cppi41: Add basic PM runtime support") together with recent MUSB changes allowed USB and DMA on BeagleBone to idle when no cable is connected. But looks like few corner case issues still remain. Looks like just by re-plugging USB cable about ten or so times on BeagleBone when configured in USB peripheral mode we can get warnings and eventually trigger an oops in cppi41 DMA: WARNING: CPU: 0 PID: 14 at drivers/dma/cppi41.c:1154 cppi41_runtime_suspend+ x28/0x38 [cppi41] ... WARNING: CPU: 0 PID: 14 at drivers/dma/cppi41.c:452 push_desc_queue+0x94/0x9c [cppi41] ... Unable to handle kernel NULL pointer dereference at virtual address 00000104 pgd = c0004000 [00000104] *pgd=00000000 Internal error: Oops: 805 [#1] SMP ARM ... [<bf0d92cc>] (cppi41_runtime_resume [cppi41]) from [<c0589838>] (__rpm_callback+0xc0/0x214) [<c0589838>] (__rpm_callback) from [<c05899ac>] (rpm_callback+0x20/0x80) [<c05899ac>] (rpm_callback) from [<c0589460>] (rpm_resume+0x504/0x78c) [<c0589460>] (rpm_resume) from [<c058a1a0>] (pm_runtime_work+0x60/0xa8) [<c058a1a0>] (pm_runtime_work) from [<c0156120>] (process_one_work+0x2b4/0x808) This is because of a race with runtime PM and cppi41_dma_issue_pending() as reported by Alexandre Bailon <abailon@baylibre.com> in earlier set of patches. Based on mailing list discussions we however came to the conclusion that a different fix from Alexandre's fix is needed in order to guarantee that DMA is really active when we try to use it. To fix the issue, we need to add a driver specific flag as we otherwise can have -EINPROGRESS state set by runtime PM and can't rely on pm_runtime_active() to tell us when we can use the DMA. And we need to make sure the DMA transfers get triggered in the queued order. So let's always queue the transfers, then flush the queue from both cppi41_dma_issue_pending() and cppi41_runtime_resume() as suggested by Grygorii Strashko <grygorii.strashko@ti.com> in an earlier example patch. For reference, this is also documented in Documentation/power/runtime_pm.txt in the example at the end of the file as pointed out by Grygorii Strashko <grygorii.strashko@ti.com>. Based on earlier patches from Alexandre Bailon <abailon@baylibre.com> and Grygorii Strashko <grygorii.strashko@ti.com> modified based on testing and what was discussed on the mailing lists. Fixes: fdea2d09b997 ("dmaengine: cppi41: Add basic PM runtime support") Cc: Andy Shevchenko <andy.shevchenko@gmail.com> Cc: Bin Liu <b-liu@ti.com> Cc: Grygorii Strashko <grygorii.strashko@ti.com> Cc: Kevin Hilman <khilman@baylibre.com> Cc: Patrick Titiano <ptitiano@baylibre.com> Cc: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Reported-by: Alexandre Bailon <abailon@baylibre.com> Signed-off-by: Tony Lindgren <tony@atomide.com> Tested-by: Bin Liu <b-liu@ti.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/cppi41.c40
1 files changed, 25 insertions, 15 deletions
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 7de4fdf86a6a..55c1782e3623 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -153,6 +153,8 @@ struct cppi41_dd {
153 153
154 /* context for suspend/resume */ 154 /* context for suspend/resume */
155 unsigned int dma_tdfdq; 155 unsigned int dma_tdfdq;
156
157 bool is_suspended;
156}; 158};
157 159
158#define FIST_COMPLETION_QUEUE 93 160#define FIST_COMPLETION_QUEUE 93
@@ -470,20 +472,26 @@ static void push_desc_queue(struct cppi41_channel *c)
470 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); 472 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
471} 473}
472 474
473static void pending_desc(struct cppi41_channel *c) 475/*
476 * Caller must hold cdd->lock to prevent push_desc_queue()
477 * getting called out of order. We have both cppi41_dma_issue_pending()
478 * and cppi41_runtime_resume() call this function.
479 */
480static void cppi41_run_queue(struct cppi41_dd *cdd)
474{ 481{
475 struct cppi41_dd *cdd = c->cdd; 482 struct cppi41_channel *c, *_c;
476 unsigned long flags;
477 483
478 spin_lock_irqsave(&cdd->lock, flags); 484 list_for_each_entry_safe(c, _c, &cdd->pending, node) {
479 list_add_tail(&c->node, &cdd->pending); 485 push_desc_queue(c);
480 spin_unlock_irqrestore(&cdd->lock, flags); 486 list_del(&c->node);
487 }
481} 488}
482 489
483static void cppi41_dma_issue_pending(struct dma_chan *chan) 490static void cppi41_dma_issue_pending(struct dma_chan *chan)
484{ 491{
485 struct cppi41_channel *c = to_cpp41_chan(chan); 492 struct cppi41_channel *c = to_cpp41_chan(chan);
486 struct cppi41_dd *cdd = c->cdd; 493 struct cppi41_dd *cdd = c->cdd;
494 unsigned long flags;
487 int error; 495 int error;
488 496
489 error = pm_runtime_get(cdd->ddev.dev); 497 error = pm_runtime_get(cdd->ddev.dev);
@@ -495,10 +503,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
495 return; 503 return;
496 } 504 }
497 505
498 if (likely(pm_runtime_active(cdd->ddev.dev))) 506 spin_lock_irqsave(&cdd->lock, flags);
499 push_desc_queue(c); 507 list_add_tail(&c->node, &cdd->pending);
500 else 508 if (!cdd->is_suspended)
501 pending_desc(c); 509 cppi41_run_queue(cdd);
510 spin_unlock_irqrestore(&cdd->lock, flags);
502 511
503 pm_runtime_mark_last_busy(cdd->ddev.dev); 512 pm_runtime_mark_last_busy(cdd->ddev.dev);
504 pm_runtime_put_autosuspend(cdd->ddev.dev); 513 pm_runtime_put_autosuspend(cdd->ddev.dev);
@@ -1166,8 +1175,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
1166static int __maybe_unused cppi41_runtime_suspend(struct device *dev) 1175static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
1167{ 1176{
1168 struct cppi41_dd *cdd = dev_get_drvdata(dev); 1177 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1178 unsigned long flags;
1169 1179
1180 spin_lock_irqsave(&cdd->lock, flags);
1181 cdd->is_suspended = true;
1170 WARN_ON(!list_empty(&cdd->pending)); 1182 WARN_ON(!list_empty(&cdd->pending));
1183 spin_unlock_irqrestore(&cdd->lock, flags);
1171 1184
1172 return 0; 1185 return 0;
1173} 1186}
@@ -1175,14 +1188,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
1175static int __maybe_unused cppi41_runtime_resume(struct device *dev) 1188static int __maybe_unused cppi41_runtime_resume(struct device *dev)
1176{ 1189{
1177 struct cppi41_dd *cdd = dev_get_drvdata(dev); 1190 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1178 struct cppi41_channel *c, *_c;
1179 unsigned long flags; 1191 unsigned long flags;
1180 1192
1181 spin_lock_irqsave(&cdd->lock, flags); 1193 spin_lock_irqsave(&cdd->lock, flags);
1182 list_for_each_entry_safe(c, _c, &cdd->pending, node) { 1194 cdd->is_suspended = false;
1183 push_desc_queue(c); 1195 cppi41_run_queue(cdd);
1184 list_del(&c->node);
1185 }
1186 spin_unlock_irqrestore(&cdd->lock, flags); 1196 spin_unlock_irqrestore(&cdd->lock, flags);
1187 1197
1188 return 0; 1198 return 0;