diff options
Diffstat (limited to 'sound/soc/fsl/mpc5200_dma.c')
-rw-r--r-- | sound/soc/fsl/mpc5200_dma.c | 123 |
1 files changed, 25 insertions, 98 deletions
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c index 6096d22283e6..30ed568afb2e 100644 --- a/sound/soc/fsl/mpc5200_dma.c +++ b/sound/soc/fsl/mpc5200_dma.c | |||
@@ -58,47 +58,15 @@ static void psc_dma_bcom_enqueue_next_buffer(struct psc_dma_stream *s) | |||
58 | /* Prepare and enqueue the next buffer descriptor */ | 58 | /* Prepare and enqueue the next buffer descriptor */ |
59 | bd = bcom_prepare_next_buffer(s->bcom_task); | 59 | bd = bcom_prepare_next_buffer(s->bcom_task); |
60 | bd->status = s->period_bytes; | 60 | bd->status = s->period_bytes; |
61 | bd->data[0] = s->period_next_pt; | 61 | bd->data[0] = s->runtime->dma_addr + (s->period_next * s->period_bytes); |
62 | bcom_submit_next_buffer(s->bcom_task, NULL); | 62 | bcom_submit_next_buffer(s->bcom_task, NULL); |
63 | 63 | ||
64 | /* Update for next period */ | 64 | /* Update for next period */ |
65 | s->period_next_pt += s->period_bytes; | 65 | s->period_next = (s->period_next + 1) % s->runtime->periods; |
66 | if (s->period_next_pt >= s->period_end) | ||
67 | s->period_next_pt = s->period_start; | ||
68 | } | ||
69 | |||
70 | static void psc_dma_bcom_enqueue_tx(struct psc_dma_stream *s) | ||
71 | { | ||
72 | if (s->appl_ptr > s->runtime->control->appl_ptr) { | ||
73 | /* | ||
74 | * In this case s->runtime->control->appl_ptr has wrapped around. | ||
75 | * Play the data to the end of the boundary, then wrap our own | ||
76 | * appl_ptr back around. | ||
77 | */ | ||
78 | while (s->appl_ptr < s->runtime->boundary) { | ||
79 | if (bcom_queue_full(s->bcom_task)) | ||
80 | return; | ||
81 | |||
82 | s->appl_ptr += s->period_size; | ||
83 | |||
84 | psc_dma_bcom_enqueue_next_buffer(s); | ||
85 | } | ||
86 | s->appl_ptr -= s->runtime->boundary; | ||
87 | } | ||
88 | |||
89 | while (s->appl_ptr < s->runtime->control->appl_ptr) { | ||
90 | |||
91 | if (bcom_queue_full(s->bcom_task)) | ||
92 | return; | ||
93 | |||
94 | s->appl_ptr += s->period_size; | ||
95 | |||
96 | psc_dma_bcom_enqueue_next_buffer(s); | ||
97 | } | ||
98 | } | 66 | } |
99 | 67 | ||
100 | /* Bestcomm DMA irq handler */ | 68 | /* Bestcomm DMA irq handler */ |
101 | static irqreturn_t psc_dma_bcom_irq_tx(int irq, void *_psc_dma_stream) | 69 | static irqreturn_t psc_dma_bcom_irq(int irq, void *_psc_dma_stream) |
102 | { | 70 | { |
103 | struct psc_dma_stream *s = _psc_dma_stream; | 71 | struct psc_dma_stream *s = _psc_dma_stream; |
104 | 72 | ||
@@ -108,34 +76,8 @@ static irqreturn_t psc_dma_bcom_irq_tx(int irq, void *_psc_dma_stream) | |||
108 | while (bcom_buffer_done(s->bcom_task)) { | 76 | while (bcom_buffer_done(s->bcom_task)) { |
109 | bcom_retrieve_buffer(s->bcom_task, NULL, NULL); | 77 | bcom_retrieve_buffer(s->bcom_task, NULL, NULL); |
110 | 78 | ||
111 | s->period_current_pt += s->period_bytes; | 79 | s->period_current = (s->period_current+1) % s->runtime->periods; |
112 | if (s->period_current_pt >= s->period_end) | 80 | s->period_count++; |
113 | s->period_current_pt = s->period_start; | ||
114 | } | ||
115 | psc_dma_bcom_enqueue_tx(s); | ||
116 | spin_unlock(&s->psc_dma->lock); | ||
117 | |||
118 | /* If the stream is active, then also inform the PCM middle layer | ||
119 | * of the period finished event. */ | ||
120 | if (s->active) | ||
121 | snd_pcm_period_elapsed(s->stream); | ||
122 | |||
123 | return IRQ_HANDLED; | ||
124 | } | ||
125 | |||
126 | static irqreturn_t psc_dma_bcom_irq_rx(int irq, void *_psc_dma_stream) | ||
127 | { | ||
128 | struct psc_dma_stream *s = _psc_dma_stream; | ||
129 | |||
130 | spin_lock(&s->psc_dma->lock); | ||
131 | /* For each finished period, dequeue the completed period buffer | ||
132 | * and enqueue a new one in it's place. */ | ||
133 | while (bcom_buffer_done(s->bcom_task)) { | ||
134 | bcom_retrieve_buffer(s->bcom_task, NULL, NULL); | ||
135 | |||
136 | s->period_current_pt += s->period_bytes; | ||
137 | if (s->period_current_pt >= s->period_end) | ||
138 | s->period_current_pt = s->period_start; | ||
139 | 81 | ||
140 | psc_dma_bcom_enqueue_next_buffer(s); | 82 | psc_dma_bcom_enqueue_next_buffer(s); |
141 | } | 83 | } |
@@ -166,54 +108,38 @@ static int psc_dma_trigger(struct snd_pcm_substream *substream, int cmd) | |||
166 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 108 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
167 | struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data; | 109 | struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data; |
168 | struct snd_pcm_runtime *runtime = substream->runtime; | 110 | struct snd_pcm_runtime *runtime = substream->runtime; |
169 | struct psc_dma_stream *s; | 111 | struct psc_dma_stream *s = to_psc_dma_stream(substream, psc_dma); |
170 | struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; | 112 | struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; |
171 | u16 imr; | 113 | u16 imr; |
172 | unsigned long flags; | 114 | unsigned long flags; |
173 | int i; | 115 | int i; |
174 | 116 | ||
175 | if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) | ||
176 | s = &psc_dma->capture; | ||
177 | else | ||
178 | s = &psc_dma->playback; | ||
179 | |||
180 | dev_dbg(psc_dma->dev, "psc_dma_trigger(substream=%p, cmd=%i)" | ||
181 | " stream_id=%i\n", | ||
182 | substream, cmd, substream->pstr->stream); | ||
183 | |||
184 | switch (cmd) { | 117 | switch (cmd) { |
185 | case SNDRV_PCM_TRIGGER_START: | 118 | case SNDRV_PCM_TRIGGER_START: |
119 | dev_dbg(psc_dma->dev, "START: stream=%i fbits=%u ps=%u #p=%u\n", | ||
120 | substream->pstr->stream, runtime->frame_bits, | ||
121 | (int)runtime->period_size, runtime->periods); | ||
186 | s->period_bytes = frames_to_bytes(runtime, | 122 | s->period_bytes = frames_to_bytes(runtime, |
187 | runtime->period_size); | 123 | runtime->period_size); |
188 | s->period_start = virt_to_phys(runtime->dma_area); | 124 | s->period_next = 0; |
189 | s->period_end = s->period_start + | 125 | s->period_current = 0; |
190 | (s->period_bytes * runtime->periods); | ||
191 | s->period_next_pt = s->period_start; | ||
192 | s->period_current_pt = s->period_start; | ||
193 | s->period_size = runtime->period_size; | ||
194 | s->active = 1; | 126 | s->active = 1; |
195 | 127 | s->period_count = 0; | |
196 | /* track appl_ptr so that we have a better chance of detecting | ||
197 | * end of stream and not over running it. | ||
198 | */ | ||
199 | s->runtime = runtime; | 128 | s->runtime = runtime; |
200 | s->appl_ptr = s->runtime->control->appl_ptr - | ||
201 | (runtime->period_size * runtime->periods); | ||
202 | 129 | ||
203 | /* Fill up the bestcomm bd queue and enable DMA. | 130 | /* Fill up the bestcomm bd queue and enable DMA. |
204 | * This will begin filling the PSC's fifo. | 131 | * This will begin filling the PSC's fifo. |
205 | */ | 132 | */ |
206 | spin_lock_irqsave(&psc_dma->lock, flags); | 133 | spin_lock_irqsave(&psc_dma->lock, flags); |
207 | 134 | ||
208 | if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) { | 135 | if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) |
209 | bcom_gen_bd_rx_reset(s->bcom_task); | 136 | bcom_gen_bd_rx_reset(s->bcom_task); |
210 | for (i = 0; i < runtime->periods; i++) | 137 | else |
211 | if (!bcom_queue_full(s->bcom_task)) | ||
212 | psc_dma_bcom_enqueue_next_buffer(s); | ||
213 | } else { | ||
214 | bcom_gen_bd_tx_reset(s->bcom_task); | 138 | bcom_gen_bd_tx_reset(s->bcom_task); |
215 | psc_dma_bcom_enqueue_tx(s); | 139 | |
216 | } | 140 | for (i = 0; i < runtime->periods; i++) |
141 | if (!bcom_queue_full(s->bcom_task)) | ||
142 | psc_dma_bcom_enqueue_next_buffer(s); | ||
217 | 143 | ||
218 | bcom_enable(s->bcom_task); | 144 | bcom_enable(s->bcom_task); |
219 | spin_unlock_irqrestore(&psc_dma->lock, flags); | 145 | spin_unlock_irqrestore(&psc_dma->lock, flags); |
@@ -223,6 +149,8 @@ static int psc_dma_trigger(struct snd_pcm_substream *substream, int cmd) | |||
223 | break; | 149 | break; |
224 | 150 | ||
225 | case SNDRV_PCM_TRIGGER_STOP: | 151 | case SNDRV_PCM_TRIGGER_STOP: |
152 | dev_dbg(psc_dma->dev, "STOP: stream=%i periods_count=%i\n", | ||
153 | substream->pstr->stream, s->period_count); | ||
226 | s->active = 0; | 154 | s->active = 0; |
227 | 155 | ||
228 | spin_lock_irqsave(&psc_dma->lock, flags); | 156 | spin_lock_irqsave(&psc_dma->lock, flags); |
@@ -236,7 +164,8 @@ static int psc_dma_trigger(struct snd_pcm_substream *substream, int cmd) | |||
236 | break; | 164 | break; |
237 | 165 | ||
238 | default: | 166 | default: |
239 | dev_dbg(psc_dma->dev, "invalid command\n"); | 167 | dev_dbg(psc_dma->dev, "unhandled trigger: stream=%i cmd=%i\n", |
168 | substream->pstr->stream, cmd); | ||
240 | return -EINVAL; | 169 | return -EINVAL; |
241 | } | 170 | } |
242 | 171 | ||
@@ -343,7 +272,7 @@ psc_dma_pointer(struct snd_pcm_substream *substream) | |||
343 | else | 272 | else |
344 | s = &psc_dma->playback; | 273 | s = &psc_dma->playback; |
345 | 274 | ||
346 | count = s->period_current_pt - s->period_start; | 275 | count = s->period_current * s->period_bytes; |
347 | 276 | ||
348 | return bytes_to_frames(substream->runtime, count); | 277 | return bytes_to_frames(substream->runtime, count); |
349 | } | 278 | } |
@@ -532,11 +461,9 @@ int mpc5200_audio_dma_create(struct of_device *op) | |||
532 | 461 | ||
533 | rc = request_irq(psc_dma->irq, &psc_dma_status_irq, IRQF_SHARED, | 462 | rc = request_irq(psc_dma->irq, &psc_dma_status_irq, IRQF_SHARED, |
534 | "psc-dma-status", psc_dma); | 463 | "psc-dma-status", psc_dma); |
535 | rc |= request_irq(psc_dma->capture.irq, | 464 | rc |= request_irq(psc_dma->capture.irq, &psc_dma_bcom_irq, IRQF_SHARED, |
536 | &psc_dma_bcom_irq_rx, IRQF_SHARED, | ||
537 | "psc-dma-capture", &psc_dma->capture); | 465 | "psc-dma-capture", &psc_dma->capture); |
538 | rc |= request_irq(psc_dma->playback.irq, | 466 | rc |= request_irq(psc_dma->playback.irq, &psc_dma_bcom_irq, IRQF_SHARED, |
539 | &psc_dma_bcom_irq_tx, IRQF_SHARED, | ||
540 | "psc-dma-playback", &psc_dma->playback); | 467 | "psc-dma-playback", &psc_dma->playback); |
541 | if (rc) { | 468 | if (rc) { |
542 | ret = -ENODEV; | 469 | ret = -ENODEV; |