summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Ujfalusi <peter.ujfalusi@ti.com>2018-11-14 06:06:22 -0500
committerMark Brown <broonie@kernel.org>2018-11-23 09:05:23 -0500
commit373a500e34aea97971c9d71e45edad458d3da98f (patch)
treef5189df4a2b78cad2029edcd8310a6509cd4ce8e
parentdd2f52d8991af9fe0928d59ec502ba52be7bc38d (diff)
ASoC: omap-mcpdm: Add pm_qos handling to avoid under/overruns with CPU_IDLE
We need to block sleep states which would require longer time to leave than the time the DMA must react to the DMA request in order to keep the FIFO serviced without under of overrun. Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Acked-by: Jarkko Nikula <jarkko.nikula@bitmer.com> Signed-off-by: Mark Brown <broonie@kernel.org>
-rw-r--r--sound/soc/omap/omap-mcpdm.c43
1 files changed, 42 insertions, 1 deletions
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index 4c1be36c2207..7d5bdc5a2890 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -54,6 +54,8 @@ struct omap_mcpdm {
54 unsigned long phys_base; 54 unsigned long phys_base;
55 void __iomem *io_base; 55 void __iomem *io_base;
56 int irq; 56 int irq;
57 struct pm_qos_request pm_qos_req;
58 int latency[2];
57 59
58 struct mutex mutex; 60 struct mutex mutex;
59 61
@@ -277,6 +279,9 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
277 struct snd_soc_dai *dai) 279 struct snd_soc_dai *dai)
278{ 280{
279 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); 281 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
282 int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
283 int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
284 int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
280 285
281 mutex_lock(&mcpdm->mutex); 286 mutex_lock(&mcpdm->mutex);
282 287
@@ -289,6 +294,14 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
289 } 294 }
290 } 295 }
291 296
297 if (mcpdm->latency[stream2])
298 pm_qos_update_request(&mcpdm->pm_qos_req,
299 mcpdm->latency[stream2]);
300 else if (mcpdm->latency[stream1])
301 pm_qos_remove_request(&mcpdm->pm_qos_req);
302
303 mcpdm->latency[stream1] = 0;
304
292 mutex_unlock(&mcpdm->mutex); 305 mutex_unlock(&mcpdm->mutex);
293} 306}
294 307
@@ -300,7 +313,7 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
300 int stream = substream->stream; 313 int stream = substream->stream;
301 struct snd_dmaengine_dai_dma_data *dma_data; 314 struct snd_dmaengine_dai_dma_data *dma_data;
302 u32 threshold; 315 u32 threshold;
303 int channels; 316 int channels, latency;
304 int link_mask = 0; 317 int link_mask = 0;
305 318
306 channels = params_channels(params); 319 channels = params_channels(params);
@@ -344,14 +357,25 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
344 357
345 dma_data->maxburst = 358 dma_data->maxburst =
346 (MCPDM_DN_THRES_MAX - threshold) * channels; 359 (MCPDM_DN_THRES_MAX - threshold) * channels;
360 latency = threshold;
347 } else { 361 } else {
348 /* If playback is not running assume a stereo stream to come */ 362 /* If playback is not running assume a stereo stream to come */
349 if (!mcpdm->config[!stream].link_mask) 363 if (!mcpdm->config[!stream].link_mask)
350 mcpdm->config[!stream].link_mask = (0x3 << 3); 364 mcpdm->config[!stream].link_mask = (0x3 << 3);
351 365
352 dma_data->maxburst = threshold * channels; 366 dma_data->maxburst = threshold * channels;
367 latency = (MCPDM_DN_THRES_MAX - threshold);
353 } 368 }
354 369
370 /*
371 * The DMA must act to a DMA request within latency time (usec) to avoid
372 * under/overflow
373 */
374 mcpdm->latency[stream] = latency * USEC_PER_SEC / params_rate(params);
375
376 if (!mcpdm->latency[stream])
377 mcpdm->latency[stream] = 10;
378
355 /* Check if we need to restart McPDM with this stream */ 379 /* Check if we need to restart McPDM with this stream */
356 if (mcpdm->config[stream].link_mask && 380 if (mcpdm->config[stream].link_mask &&
357 mcpdm->config[stream].link_mask != link_mask) 381 mcpdm->config[stream].link_mask != link_mask)
@@ -366,6 +390,20 @@ static int omap_mcpdm_prepare(struct snd_pcm_substream *substream,
366 struct snd_soc_dai *dai) 390 struct snd_soc_dai *dai)
367{ 391{
368 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); 392 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
393 struct pm_qos_request *pm_qos_req = &mcpdm->pm_qos_req;
394 int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
395 int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
396 int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
397 int latency = mcpdm->latency[stream2];
398
399 /* Prevent omap hardware from hitting off between FIFO fills */
400 if (!latency || mcpdm->latency[stream1] < latency)
401 latency = mcpdm->latency[stream1];
402
403 if (pm_qos_request_active(pm_qos_req))
404 pm_qos_update_request(pm_qos_req, latency);
405 else if (latency)
406 pm_qos_add_request(pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency);
369 407
370 if (!omap_mcpdm_active(mcpdm)) { 408 if (!omap_mcpdm_active(mcpdm)) {
371 omap_mcpdm_start(mcpdm); 409 omap_mcpdm_start(mcpdm);
@@ -427,6 +465,9 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
427 free_irq(mcpdm->irq, (void *)mcpdm); 465 free_irq(mcpdm->irq, (void *)mcpdm);
428 pm_runtime_disable(mcpdm->dev); 466 pm_runtime_disable(mcpdm->dev);
429 467
468 if (pm_qos_request_active(&mcpdm->pm_qos_req))
469 pm_qos_remove_request(&mcpdm->pm_qos_req);
470
430 return 0; 471 return 0;
431} 472}
432 473