aboutsummaryrefslogtreecommitdiffstats
path: root/sound/soc/fsl/mpc5200_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'sound/soc/fsl/mpc5200_dma.c')
-rw-r--r--sound/soc/fsl/mpc5200_dma.c564
1 files changed, 564 insertions, 0 deletions
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
new file mode 100644
index 000000000000..efec33a1c5bd
--- /dev/null
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -0,0 +1,564 @@
1/*
2 * Freescale MPC5200 PSC DMA
3 * ALSA SoC Platform driver
4 *
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
6 * Copyright (C) 2009 Jon Smirl, Digispeaker
7 */
8
9#include <linux/module.h>
10#include <linux/of_device.h>
11
12#include <sound/soc.h>
13
14#include <sysdev/bestcomm/bestcomm.h>
15#include <sysdev/bestcomm/gen_bd.h>
16#include <asm/mpc52xx_psc.h>
17
18#include "mpc5200_dma.h"
19
20/*
21 * Interrupt handlers
22 */
23static irqreturn_t psc_dma_status_irq(int irq, void *_psc_dma)
24{
25 struct psc_dma *psc_dma = _psc_dma;
26 struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs;
27 u16 isr;
28
29 isr = in_be16(&regs->mpc52xx_psc_isr);
30
31 /* Playback underrun error */
32 if (psc_dma->playback.active && (isr & MPC52xx_PSC_IMR_TXEMP))
33 psc_dma->stats.underrun_count++;
34
35 /* Capture overrun error */
36 if (psc_dma->capture.active && (isr & MPC52xx_PSC_IMR_ORERR))
37 psc_dma->stats.overrun_count++;
38
39 out_8(&regs->command, MPC52xx_PSC_RST_ERR_STAT);
40
41 return IRQ_HANDLED;
42}
43
44/**
45 * psc_dma_bcom_enqueue_next_buffer - Enqueue another audio buffer
46 * @s: pointer to stream private data structure
47 *
48 * Enqueues another audio period buffer into the bestcomm queue.
49 *
50 * Note: The routine must only be called when there is space available in
51 * the queue. Otherwise the enqueue will fail and the audio ring buffer
52 * will get out of sync
53 */
54static void psc_dma_bcom_enqueue_next_buffer(struct psc_dma_stream *s)
55{
56 struct bcom_bd *bd;
57
58 /* Prepare and enqueue the next buffer descriptor */
59 bd = bcom_prepare_next_buffer(s->bcom_task);
60 bd->status = s->period_bytes;
61 bd->data[0] = s->period_next_pt;
62 bcom_submit_next_buffer(s->bcom_task, NULL);
63
64 /* Update for next period */
65 s->period_next_pt += s->period_bytes;
66 if (s->period_next_pt >= s->period_end)
67 s->period_next_pt = s->period_start;
68}
69
70static void psc_dma_bcom_enqueue_tx(struct psc_dma_stream *s)
71{
72 while (s->appl_ptr < s->runtime->control->appl_ptr) {
73
74 if (bcom_queue_full(s->bcom_task))
75 return;
76
77 s->appl_ptr += s->period_size;
78
79 psc_dma_bcom_enqueue_next_buffer(s);
80 }
81}
82
83/* Bestcomm DMA irq handler */
84static irqreturn_t psc_dma_bcom_irq_tx(int irq, void *_psc_dma_stream)
85{
86 struct psc_dma_stream *s = _psc_dma_stream;
87
88 spin_lock(&s->psc_dma->lock);
89 /* For each finished period, dequeue the completed period buffer
90 * and enqueue a new one in it's place. */
91 while (bcom_buffer_done(s->bcom_task)) {
92 bcom_retrieve_buffer(s->bcom_task, NULL, NULL);
93
94 s->period_current_pt += s->period_bytes;
95 if (s->period_current_pt >= s->period_end)
96 s->period_current_pt = s->period_start;
97 }
98 psc_dma_bcom_enqueue_tx(s);
99 spin_unlock(&s->psc_dma->lock);
100
101 /* If the stream is active, then also inform the PCM middle layer
102 * of the period finished event. */
103 if (s->active)
104 snd_pcm_period_elapsed(s->stream);
105
106 return IRQ_HANDLED;
107}
108
109static irqreturn_t psc_dma_bcom_irq_rx(int irq, void *_psc_dma_stream)
110{
111 struct psc_dma_stream *s = _psc_dma_stream;
112
113 spin_lock(&s->psc_dma->lock);
114 /* For each finished period, dequeue the completed period buffer
115 * and enqueue a new one in it's place. */
116 while (bcom_buffer_done(s->bcom_task)) {
117 bcom_retrieve_buffer(s->bcom_task, NULL, NULL);
118
119 s->period_current_pt += s->period_bytes;
120 if (s->period_current_pt >= s->period_end)
121 s->period_current_pt = s->period_start;
122
123 psc_dma_bcom_enqueue_next_buffer(s);
124 }
125 spin_unlock(&s->psc_dma->lock);
126
127 /* If the stream is active, then also inform the PCM middle layer
128 * of the period finished event. */
129 if (s->active)
130 snd_pcm_period_elapsed(s->stream);
131
132 return IRQ_HANDLED;
133}
134
135static int psc_dma_hw_free(struct snd_pcm_substream *substream)
136{
137 snd_pcm_set_runtime_buffer(substream, NULL);
138 return 0;
139}
140
141/**
142 * psc_dma_trigger: start and stop the DMA transfer.
143 *
144 * This function is called by ALSA to start, stop, pause, and resume the DMA
145 * transfer of data.
146 */
147static int psc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
148{
149 struct snd_soc_pcm_runtime *rtd = substream->private_data;
150 struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
151 struct snd_pcm_runtime *runtime = substream->runtime;
152 struct psc_dma_stream *s;
153 struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs;
154 u16 imr;
155 unsigned long flags;
156 int i;
157
158 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
159 s = &psc_dma->capture;
160 else
161 s = &psc_dma->playback;
162
163 dev_dbg(psc_dma->dev, "psc_dma_trigger(substream=%p, cmd=%i)"
164 " stream_id=%i\n",
165 substream, cmd, substream->pstr->stream);
166
167 switch (cmd) {
168 case SNDRV_PCM_TRIGGER_START:
169 s->period_bytes = frames_to_bytes(runtime,
170 runtime->period_size);
171 s->period_start = virt_to_phys(runtime->dma_area);
172 s->period_end = s->period_start +
173 (s->period_bytes * runtime->periods);
174 s->period_next_pt = s->period_start;
175 s->period_current_pt = s->period_start;
176 s->period_size = runtime->period_size;
177 s->active = 1;
178
179 /* track appl_ptr so that we have a better chance of detecting
180 * end of stream and not over running it.
181 */
182 s->runtime = runtime;
183 s->appl_ptr = s->runtime->control->appl_ptr -
184 (runtime->period_size * runtime->periods);
185
186 /* Fill up the bestcomm bd queue and enable DMA.
187 * This will begin filling the PSC's fifo.
188 */
189 spin_lock_irqsave(&psc_dma->lock, flags);
190
191 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) {
192 bcom_gen_bd_rx_reset(s->bcom_task);
193 for (i = 0; i < runtime->periods; i++)
194 if (!bcom_queue_full(s->bcom_task))
195 psc_dma_bcom_enqueue_next_buffer(s);
196 } else {
197 bcom_gen_bd_tx_reset(s->bcom_task);
198 psc_dma_bcom_enqueue_tx(s);
199 }
200
201 bcom_enable(s->bcom_task);
202 spin_unlock_irqrestore(&psc_dma->lock, flags);
203
204 out_8(&regs->command, MPC52xx_PSC_RST_ERR_STAT);
205
206 break;
207
208 case SNDRV_PCM_TRIGGER_STOP:
209 s->active = 0;
210
211 spin_lock_irqsave(&psc_dma->lock, flags);
212 bcom_disable(s->bcom_task);
213 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
214 bcom_gen_bd_rx_reset(s->bcom_task);
215 else
216 bcom_gen_bd_tx_reset(s->bcom_task);
217 spin_unlock_irqrestore(&psc_dma->lock, flags);
218
219 break;
220
221 default:
222 dev_dbg(psc_dma->dev, "invalid command\n");
223 return -EINVAL;
224 }
225
226 /* Update interrupt enable settings */
227 imr = 0;
228 if (psc_dma->playback.active)
229 imr |= MPC52xx_PSC_IMR_TXEMP;
230 if (psc_dma->capture.active)
231 imr |= MPC52xx_PSC_IMR_ORERR;
232 out_be16(&regs->isr_imr.imr, psc_dma->imr | imr);
233
234 return 0;
235}
236
237
238/* ---------------------------------------------------------------------
239 * The PSC DMA 'ASoC platform' driver
240 *
241 * Can be referenced by an 'ASoC machine' driver
242 * This driver only deals with the audio bus; it doesn't have any
243 * interaction with the attached codec
244 */
245
246static const struct snd_pcm_hardware psc_dma_hardware = {
247 .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
248 SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
249 SNDRV_PCM_INFO_BATCH,
250 .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE |
251 SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE,
252 .rate_min = 8000,
253 .rate_max = 48000,
254 .channels_min = 1,
255 .channels_max = 2,
256 .period_bytes_max = 1024 * 1024,
257 .period_bytes_min = 32,
258 .periods_min = 2,
259 .periods_max = 256,
260 .buffer_bytes_max = 2 * 1024 * 1024,
261 .fifo_size = 512,
262};
263
264static int psc_dma_open(struct snd_pcm_substream *substream)
265{
266 struct snd_pcm_runtime *runtime = substream->runtime;
267 struct snd_soc_pcm_runtime *rtd = substream->private_data;
268 struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
269 struct psc_dma_stream *s;
270 int rc;
271
272 dev_dbg(psc_dma->dev, "psc_dma_open(substream=%p)\n", substream);
273
274 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
275 s = &psc_dma->capture;
276 else
277 s = &psc_dma->playback;
278
279 snd_soc_set_runtime_hwparams(substream, &psc_dma_hardware);
280
281 rc = snd_pcm_hw_constraint_integer(runtime,
282 SNDRV_PCM_HW_PARAM_PERIODS);
283 if (rc < 0) {
284 dev_err(substream->pcm->card->dev, "invalid buffer size\n");
285 return rc;
286 }
287
288 s->stream = substream;
289 return 0;
290}
291
292static int psc_dma_close(struct snd_pcm_substream *substream)
293{
294 struct snd_soc_pcm_runtime *rtd = substream->private_data;
295 struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
296 struct psc_dma_stream *s;
297
298 dev_dbg(psc_dma->dev, "psc_dma_close(substream=%p)\n", substream);
299
300 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
301 s = &psc_dma->capture;
302 else
303 s = &psc_dma->playback;
304
305 if (!psc_dma->playback.active &&
306 !psc_dma->capture.active) {
307
308 /* Disable all interrupts and reset the PSC */
309 out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr);
310 out_8(&psc_dma->psc_regs->command, 4 << 4); /* reset error */
311 }
312 s->stream = NULL;
313 return 0;
314}
315
316static snd_pcm_uframes_t
317psc_dma_pointer(struct snd_pcm_substream *substream)
318{
319 struct snd_soc_pcm_runtime *rtd = substream->private_data;
320 struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
321 struct psc_dma_stream *s;
322 dma_addr_t count;
323
324 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
325 s = &psc_dma->capture;
326 else
327 s = &psc_dma->playback;
328
329 count = s->period_current_pt - s->period_start;
330
331 return bytes_to_frames(substream->runtime, count);
332}
333
334static int
335psc_dma_hw_params(struct snd_pcm_substream *substream,
336 struct snd_pcm_hw_params *params)
337{
338 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
339
340 return 0;
341}
342
343static struct snd_pcm_ops psc_dma_ops = {
344 .open = psc_dma_open,
345 .close = psc_dma_close,
346 .hw_free = psc_dma_hw_free,
347 .ioctl = snd_pcm_lib_ioctl,
348 .pointer = psc_dma_pointer,
349 .trigger = psc_dma_trigger,
350 .hw_params = psc_dma_hw_params,
351};
352
353static u64 psc_dma_dmamask = 0xffffffff;
354static int psc_dma_new(struct snd_card *card, struct snd_soc_dai *dai,
355 struct snd_pcm *pcm)
356{
357 struct snd_soc_pcm_runtime *rtd = pcm->private_data;
358 struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
359 size_t size = psc_dma_hardware.buffer_bytes_max;
360 int rc = 0;
361
362 dev_dbg(rtd->socdev->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n",
363 card, dai, pcm);
364
365 if (!card->dev->dma_mask)
366 card->dev->dma_mask = &psc_dma_dmamask;
367 if (!card->dev->coherent_dma_mask)
368 card->dev->coherent_dma_mask = 0xffffffff;
369
370 if (pcm->streams[0].substream) {
371 rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
372 size, &pcm->streams[0].substream->dma_buffer);
373 if (rc)
374 goto playback_alloc_err;
375 }
376
377 if (pcm->streams[1].substream) {
378 rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
379 size, &pcm->streams[1].substream->dma_buffer);
380 if (rc)
381 goto capture_alloc_err;
382 }
383
384 if (rtd->socdev->card->codec->ac97)
385 rtd->socdev->card->codec->ac97->private_data = psc_dma;
386
387 return 0;
388
389 capture_alloc_err:
390 if (pcm->streams[0].substream)
391 snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer);
392
393 playback_alloc_err:
394 dev_err(card->dev, "Cannot allocate buffer(s)\n");
395
396 return -ENOMEM;
397}
398
399static void psc_dma_free(struct snd_pcm *pcm)
400{
401 struct snd_soc_pcm_runtime *rtd = pcm->private_data;
402 struct snd_pcm_substream *substream;
403 int stream;
404
405 dev_dbg(rtd->socdev->dev, "psc_dma_free(pcm=%p)\n", pcm);
406
407 for (stream = 0; stream < 2; stream++) {
408 substream = pcm->streams[stream].substream;
409 if (substream) {
410 snd_dma_free_pages(&substream->dma_buffer);
411 substream->dma_buffer.area = NULL;
412 substream->dma_buffer.addr = 0;
413 }
414 }
415}
416
417struct snd_soc_platform mpc5200_audio_dma_platform = {
418 .name = "mpc5200-psc-audio",
419 .pcm_ops = &psc_dma_ops,
420 .pcm_new = &psc_dma_new,
421 .pcm_free = &psc_dma_free,
422};
423EXPORT_SYMBOL_GPL(mpc5200_audio_dma_platform);
424
425int mpc5200_audio_dma_create(struct of_device *op)
426{
427 phys_addr_t fifo;
428 struct psc_dma *psc_dma;
429 struct resource res;
430 int size, irq, rc;
431 const __be32 *prop;
432 void __iomem *regs;
433
434 /* Fetch the registers and IRQ of the PSC */
435 irq = irq_of_parse_and_map(op->node, 0);
436 if (of_address_to_resource(op->node, 0, &res)) {
437 dev_err(&op->dev, "Missing reg property\n");
438 return -ENODEV;
439 }
440 regs = ioremap(res.start, 1 + res.end - res.start);
441 if (!regs) {
442 dev_err(&op->dev, "Could not map registers\n");
443 return -ENODEV;
444 }
445
446 /* Allocate and initialize the driver private data */
447 psc_dma = kzalloc(sizeof *psc_dma, GFP_KERNEL);
448 if (!psc_dma) {
449 iounmap(regs);
450 return -ENOMEM;
451 }
452
453 /* Get the PSC ID */
454 prop = of_get_property(op->node, "cell-index", &size);
455 if (!prop || size < sizeof *prop)
456 return -ENODEV;
457
458 spin_lock_init(&psc_dma->lock);
459 psc_dma->id = be32_to_cpu(*prop);
460 psc_dma->irq = irq;
461 psc_dma->psc_regs = regs;
462 psc_dma->fifo_regs = regs + sizeof *psc_dma->psc_regs;
463 psc_dma->dev = &op->dev;
464 psc_dma->playback.psc_dma = psc_dma;
465 psc_dma->capture.psc_dma = psc_dma;
466 snprintf(psc_dma->name, sizeof psc_dma->name, "PSC%u", psc_dma->id);
467
468 /* Find the address of the fifo data registers and setup the
469 * DMA tasks */
470 fifo = res.start + offsetof(struct mpc52xx_psc, buffer.buffer_32);
471 psc_dma->capture.bcom_task =
472 bcom_psc_gen_bd_rx_init(psc_dma->id, 10, fifo, 512);
473 psc_dma->playback.bcom_task =
474 bcom_psc_gen_bd_tx_init(psc_dma->id, 10, fifo);
475 if (!psc_dma->capture.bcom_task ||
476 !psc_dma->playback.bcom_task) {
477 dev_err(&op->dev, "Could not allocate bestcomm tasks\n");
478 iounmap(regs);
479 kfree(psc_dma);
480 return -ENODEV;
481 }
482
483 /* Disable all interrupts and reset the PSC */
484 out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr);
485 /* reset receiver */
486 out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_RX);
487 /* reset transmitter */
488 out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_TX);
489 /* reset error */
490 out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_ERR_STAT);
491 /* reset mode */
492 out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_SEL_MODE_REG_1);
493
494 /* Set up mode register;
495 * First write: RxRdy (FIFO Alarm) generates rx FIFO irq
496 * Second write: register Normal mode for non loopback
497 */
498 out_8(&psc_dma->psc_regs->mode, 0);
499 out_8(&psc_dma->psc_regs->mode, 0);
500
501 /* Set the TX and RX fifo alarm thresholds */
502 out_be16(&psc_dma->fifo_regs->rfalarm, 0x100);
503 out_8(&psc_dma->fifo_regs->rfcntl, 0x4);
504 out_be16(&psc_dma->fifo_regs->tfalarm, 0x100);
505 out_8(&psc_dma->fifo_regs->tfcntl, 0x7);
506
507 /* Lookup the IRQ numbers */
508 psc_dma->playback.irq =
509 bcom_get_task_irq(psc_dma->playback.bcom_task);
510 psc_dma->capture.irq =
511 bcom_get_task_irq(psc_dma->capture.bcom_task);
512
513 rc = request_irq(psc_dma->irq, &psc_dma_status_irq, IRQF_SHARED,
514 "psc-dma-status", psc_dma);
515 rc |= request_irq(psc_dma->capture.irq,
516 &psc_dma_bcom_irq_rx, IRQF_SHARED,
517 "psc-dma-capture", &psc_dma->capture);
518 rc |= request_irq(psc_dma->playback.irq,
519 &psc_dma_bcom_irq_tx, IRQF_SHARED,
520 "psc-dma-playback", &psc_dma->playback);
521 if (rc) {
522 free_irq(psc_dma->irq, psc_dma);
523 free_irq(psc_dma->capture.irq,
524 &psc_dma->capture);
525 free_irq(psc_dma->playback.irq,
526 &psc_dma->playback);
527 return -ENODEV;
528 }
529
530 /* Save what we've done so it can be found again later */
531 dev_set_drvdata(&op->dev, psc_dma);
532
533 /* Tell the ASoC OF helpers about it */
534 return snd_soc_register_platform(&mpc5200_audio_dma_platform);
535}
536EXPORT_SYMBOL_GPL(mpc5200_audio_dma_create);
537
538int mpc5200_audio_dma_destroy(struct of_device *op)
539{
540 struct psc_dma *psc_dma = dev_get_drvdata(&op->dev);
541
542 dev_dbg(&op->dev, "mpc5200_audio_dma_destroy()\n");
543
544 snd_soc_unregister_platform(&mpc5200_audio_dma_platform);
545
546 bcom_gen_bd_rx_release(psc_dma->capture.bcom_task);
547 bcom_gen_bd_tx_release(psc_dma->playback.bcom_task);
548
549 /* Release irqs */
550 free_irq(psc_dma->irq, psc_dma);
551 free_irq(psc_dma->capture.irq, &psc_dma->capture);
552 free_irq(psc_dma->playback.irq, &psc_dma->playback);
553
554 iounmap(psc_dma->psc_regs);
555 kfree(psc_dma);
556 dev_set_drvdata(&op->dev, NULL);
557
558 return 0;
559}
560EXPORT_SYMBOL_GPL(mpc5200_audio_dma_destroy);
561
562MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
563MODULE_DESCRIPTION("Freescale MPC5200 PSC in DMA mode ASoC Driver");
564MODULE_LICENSE("GPL");