aboutsummaryrefslogtreecommitdiffstats
path: root/sound/soc/fsl/mpc5200_dma.c
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2009-11-07 03:34:05 -0500
committerMark Brown <broonie@opensource.wolfsonmicro.com>2009-11-07 07:40:08 -0500
commitd56b6eb6df7f6fb92383a52d640e27f71e6262d0 (patch)
tree31182236726e7b67f25dda2f551a6c0136ef97c5 /sound/soc/fsl/mpc5200_dma.c
parent8f159d720b89f2a6c5ae8a8cc54823933a58120b (diff)
ASoC/mpc5200: get rid of the appl_ptr tracking nonsense
Sound drivers PCM DMA is supposed to free-run until told to stop by the trigger callback. The current code tries to track appl_ptr, to avoid stale buffer data getting played out at the end of the data stream. Unfortunately it also results in race conditions which can cause the audio to stall. Signed-off-by: Grant Likely <grant.likely@secretlab.ca> Acked-by: Liam Girdwood <lrg@slimlogic.co.uk> Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
Diffstat (limited to 'sound/soc/fsl/mpc5200_dma.c')
-rw-r--r--sound/soc/fsl/mpc5200_dma.c52
1 files changed, 8 insertions, 44 deletions
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index 986d3c8ab6e1..4e475861f5db 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -65,36 +65,6 @@ static void psc_dma_bcom_enqueue_next_buffer(struct psc_dma_stream *s)
65 s->period_next = (s->period_next + 1) % s->runtime->periods; 65 s->period_next = (s->period_next + 1) % s->runtime->periods;
66} 66}
67 67
68static void psc_dma_bcom_enqueue_tx(struct psc_dma_stream *s)
69{
70 if (s->appl_ptr > s->runtime->control->appl_ptr) {
71 /*
72 * In this case s->runtime->control->appl_ptr has wrapped around.
73 * Play the data to the end of the boundary, then wrap our own
74 * appl_ptr back around.
75 */
76 while (s->appl_ptr < s->runtime->boundary) {
77 if (bcom_queue_full(s->bcom_task))
78 return;
79
80 s->appl_ptr += s->runtime->period_size;
81
82 psc_dma_bcom_enqueue_next_buffer(s);
83 }
84 s->appl_ptr -= s->runtime->boundary;
85 }
86
87 while (s->appl_ptr < s->runtime->control->appl_ptr) {
88
89 if (bcom_queue_full(s->bcom_task))
90 return;
91
92 s->appl_ptr += s->runtime->period_size;
93
94 psc_dma_bcom_enqueue_next_buffer(s);
95 }
96}
97
98/* Bestcomm DMA irq handler */ 68/* Bestcomm DMA irq handler */
99static irqreturn_t psc_dma_bcom_irq_tx(int irq, void *_psc_dma_stream) 69static irqreturn_t psc_dma_bcom_irq_tx(int irq, void *_psc_dma_stream)
100{ 70{
@@ -107,8 +77,9 @@ static irqreturn_t psc_dma_bcom_irq_tx(int irq, void *_psc_dma_stream)
107 bcom_retrieve_buffer(s->bcom_task, NULL, NULL); 77 bcom_retrieve_buffer(s->bcom_task, NULL, NULL);
108 78
109 s->period_current = (s->period_current+1) % s->runtime->periods; 79 s->period_current = (s->period_current+1) % s->runtime->periods;
80
81 psc_dma_bcom_enqueue_next_buffer(s);
110 } 82 }
111 psc_dma_bcom_enqueue_tx(s);
112 spin_unlock(&s->psc_dma->lock); 83 spin_unlock(&s->psc_dma->lock);
113 84
114 /* If the stream is active, then also inform the PCM middle layer 85 /* If the stream is active, then also inform the PCM middle layer
@@ -182,28 +153,21 @@ static int psc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
182 s->period_next = 0; 153 s->period_next = 0;
183 s->period_current = 0; 154 s->period_current = 0;
184 s->active = 1; 155 s->active = 1;
185
186 /* track appl_ptr so that we have a better chance of detecting
187 * end of stream and not over running it.
188 */
189 s->runtime = runtime; 156 s->runtime = runtime;
190 s->appl_ptr = s->runtime->control->appl_ptr -
191 (runtime->period_size * runtime->periods);
192 157
193 /* Fill up the bestcomm bd queue and enable DMA. 158 /* Fill up the bestcomm bd queue and enable DMA.
194 * This will begin filling the PSC's fifo. 159 * This will begin filling the PSC's fifo.
195 */ 160 */
196 spin_lock_irqsave(&psc_dma->lock, flags); 161 spin_lock_irqsave(&psc_dma->lock, flags);
197 162
198 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) { 163 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
199 bcom_gen_bd_rx_reset(s->bcom_task); 164 bcom_gen_bd_rx_reset(s->bcom_task);
200 for (i = 0; i < runtime->periods; i++) 165 else
201 if (!bcom_queue_full(s->bcom_task))
202 psc_dma_bcom_enqueue_next_buffer(s);
203 } else {
204 bcom_gen_bd_tx_reset(s->bcom_task); 166 bcom_gen_bd_tx_reset(s->bcom_task);
205 psc_dma_bcom_enqueue_tx(s); 167
206 } 168 for (i = 0; i < runtime->periods; i++)
169 if (!bcom_queue_full(s->bcom_task))
170 psc_dma_bcom_enqueue_next_buffer(s);
207 171
208 bcom_enable(s->bcom_task); 172 bcom_enable(s->bcom_task);
209 spin_unlock_irqrestore(&psc_dma->lock, flags); 173 spin_unlock_irqrestore(&psc_dma->lock, flags);