aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLiam Girdwood <liam.r.girdwood@linux.intel.com>2014-02-17 08:32:08 -0500
committerMark Brown <broonie@linaro.org>2014-02-17 20:39:20 -0500
commit30020472c354fbe4352b4b4d59bbc9a30aacf5c3 (patch)
tree0d707fd9eb94a3bd1b91895c934fc6bc5b83ad9d
parentc2f8783fa2d053a61059f6b784c917129fb3064b (diff)
ASoC: Intel: Add Intel SST audio DSP Firmware loader.
Provide services for Intel SST drivers to load SST modular firmware. SST Firmware can be made up of several modules. These modules can exist within any of the compatible SST memory blocks. Provide a generic memory block and firmware module manager that can be used with any SST firmware and core. Signed-off-by: Liam Girdwood <liam.r.girdwood@linux.intel.com> Acked-by: Vinod Koul <vinod.koul@intel.com> Signed-off-by: Mark Brown <broonie@linaro.org>
-rw-r--r--sound/soc/intel/sst-firmware.c586
1 files changed, 586 insertions, 0 deletions
diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c
new file mode 100644
index 000000000000..b6f9b5ecb66a
--- /dev/null
+++ b/sound/soc/intel/sst-firmware.c
@@ -0,0 +1,586 @@
1/*
2 * Intel SST Firmware Loader
3 *
4 * Copyright (C) 2013, Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/firmware.h>
21#include <linux/export.h>
22#include <linux/platform_device.h>
23#include <linux/dma-mapping.h>
24#include <linux/dmaengine.h>
25#include <linux/pci.h>
26
27#include <asm/page.h>
28#include <asm/pgtable.h>
29
30#include "sst-dsp.h"
31#include "sst-dsp-priv.h"
32
33static void sst_memcpy32(void *dest, void *src, u32 bytes)
34{
35 u32 i;
36
37 /* copy one 32 bit word at a time as 64 bit access is not supported */
38 for (i = 0; i < bytes; i += 4)
39 memcpy_toio(dest + i, src + i, 4);
40}
41
42/* create new generic firmware object */
43struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
44 const struct firmware *fw, void *private)
45{
46 struct sst_fw *sst_fw;
47 int err;
48
49 if (!dsp->ops->parse_fw)
50 return NULL;
51
52 sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
53 if (sst_fw == NULL)
54 return NULL;
55
56 sst_fw->dsp = dsp;
57 sst_fw->private = private;
58 sst_fw->size = fw->size;
59
60 err = dma_coerce_mask_and_coherent(dsp->dev, DMA_BIT_MASK(32));
61 if (err < 0) {
62 kfree(sst_fw);
63 return NULL;
64 }
65
66 /* allocate DMA buffer to store FW data */
67 sst_fw->dma_buf = dma_alloc_coherent(dsp->dev, sst_fw->size,
68 &sst_fw->dmable_fw_paddr, GFP_DMA);
69 if (!sst_fw->dma_buf) {
70 dev_err(dsp->dev, "error: DMA alloc failed\n");
71 kfree(sst_fw);
72 return NULL;
73 }
74
75 /* copy FW data to DMA-able memory */
76 memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
77
78 /* call core specific FW paser to load FW data into DSP */
79 err = dsp->ops->parse_fw(sst_fw);
80 if (err < 0) {
81 dev_err(dsp->dev, "error: parse fw failed %d\n", err);
82 goto parse_err;
83 }
84
85 mutex_lock(&dsp->mutex);
86 list_add(&sst_fw->list, &dsp->fw_list);
87 mutex_unlock(&dsp->mutex);
88
89 return sst_fw;
90
91parse_err:
92 dma_free_coherent(dsp->dev, sst_fw->size,
93 sst_fw->dma_buf,
94 sst_fw->dmable_fw_paddr);
95 kfree(sst_fw);
96 return NULL;
97}
98EXPORT_SYMBOL_GPL(sst_fw_new);
99
100/* free single firmware object */
101void sst_fw_free(struct sst_fw *sst_fw)
102{
103 struct sst_dsp *dsp = sst_fw->dsp;
104
105 mutex_lock(&dsp->mutex);
106 list_del(&sst_fw->list);
107 mutex_unlock(&dsp->mutex);
108
109 dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
110 sst_fw->dmable_fw_paddr);
111 kfree(sst_fw);
112}
113EXPORT_SYMBOL_GPL(sst_fw_free);
114
115/* free all firmware objects */
116void sst_fw_free_all(struct sst_dsp *dsp)
117{
118 struct sst_fw *sst_fw, *t;
119
120 mutex_lock(&dsp->mutex);
121 list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
122
123 list_del(&sst_fw->list);
124 dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
125 sst_fw->dmable_fw_paddr);
126 kfree(sst_fw);
127 }
128 mutex_unlock(&dsp->mutex);
129}
130EXPORT_SYMBOL_GPL(sst_fw_free_all);
131
132/* create a new SST generic module from FW template */
133struct sst_module *sst_module_new(struct sst_fw *sst_fw,
134 struct sst_module_template *template, void *private)
135{
136 struct sst_dsp *dsp = sst_fw->dsp;
137 struct sst_module *sst_module;
138
139 sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
140 if (sst_module == NULL)
141 return NULL;
142
143 sst_module->id = template->id;
144 sst_module->dsp = dsp;
145 sst_module->sst_fw = sst_fw;
146
147 memcpy(&sst_module->s, &template->s, sizeof(struct sst_module_data));
148 memcpy(&sst_module->p, &template->p, sizeof(struct sst_module_data));
149
150 INIT_LIST_HEAD(&sst_module->block_list);
151
152 mutex_lock(&dsp->mutex);
153 list_add(&sst_module->list, &dsp->module_list);
154 mutex_unlock(&dsp->mutex);
155
156 return sst_module;
157}
158EXPORT_SYMBOL_GPL(sst_module_new);
159
160/* free firmware module and remove from available list */
161void sst_module_free(struct sst_module *sst_module)
162{
163 struct sst_dsp *dsp = sst_module->dsp;
164
165 mutex_lock(&dsp->mutex);
166 list_del(&sst_module->list);
167 mutex_unlock(&dsp->mutex);
168
169 kfree(sst_module);
170}
171EXPORT_SYMBOL_GPL(sst_module_free);
172
173static struct sst_mem_block *find_block(struct sst_dsp *dsp, int type,
174 u32 offset)
175{
176 struct sst_mem_block *block;
177
178 list_for_each_entry(block, &dsp->free_block_list, list) {
179 if (block->type == type && block->offset == offset)
180 return block;
181 }
182
183 return NULL;
184}
185
186static int block_alloc_contiguous(struct sst_module *module,
187 struct sst_module_data *data, u32 offset, int size)
188{
189 struct list_head tmp = LIST_HEAD_INIT(tmp);
190 struct sst_dsp *dsp = module->dsp;
191 struct sst_mem_block *block;
192
193 while (size > 0) {
194 block = find_block(dsp, data->type, offset);
195 if (!block) {
196 list_splice(&tmp, &dsp->free_block_list);
197 return -ENOMEM;
198 }
199
200 list_move_tail(&block->list, &tmp);
201 offset += block->size;
202 size -= block->size;
203 }
204
205 list_splice(&tmp, &dsp->used_block_list);
206 return 0;
207}
208
209/* allocate free DSP blocks for module data - callers hold locks */
210static int block_alloc(struct sst_module *module,
211 struct sst_module_data *data)
212{
213 struct sst_dsp *dsp = module->dsp;
214 struct sst_mem_block *block, *tmp;
215 int ret = 0;
216
217 if (data->size == 0)
218 return 0;
219
220 /* find first free whole blocks that can hold module */
221 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
222
223 /* ignore blocks with wrong type */
224 if (block->type != data->type)
225 continue;
226
227 if (data->size > block->size)
228 continue;
229
230 data->offset = block->offset;
231 block->data_type = data->data_type;
232 block->bytes_used = data->size % block->size;
233 list_add(&block->module_list, &module->block_list);
234 list_move(&block->list, &dsp->used_block_list);
235 dev_dbg(dsp->dev, " *module %d added block %d:%d\n",
236 module->id, block->type, block->index);
237 return 0;
238 }
239
240 /* then find free multiple blocks that can hold module */
241 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
242
243 /* ignore blocks with wrong type */
244 if (block->type != data->type)
245 continue;
246
247 /* do we span > 1 blocks */
248 if (data->size > block->size) {
249 ret = block_alloc_contiguous(module, data,
250 block->offset + block->size,
251 data->size - block->size);
252 if (ret == 0)
253 return ret;
254 }
255 }
256
257 /* not enough free block space */
258 return -ENOMEM;
259}
260
261/* remove module from memory - callers hold locks */
262static void block_module_remove(struct sst_module *module)
263{
264 struct sst_mem_block *block, *tmp;
265 struct sst_dsp *dsp = module->dsp;
266 int err;
267
268 /* disable each block */
269 list_for_each_entry(block, &module->block_list, module_list) {
270
271 if (block->ops && block->ops->disable) {
272 err = block->ops->disable(block);
273 if (err < 0)
274 dev_err(dsp->dev,
275 "error: cant disable block %d:%d\n",
276 block->type, block->index);
277 }
278 }
279
280 /* mark each block as free */
281 list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
282 list_del(&block->module_list);
283 list_move(&block->list, &dsp->free_block_list);
284 }
285}
286
287/* prepare the memory block to receive data from host - callers hold locks */
288static int block_module_prepare(struct sst_module *module)
289{
290 struct sst_mem_block *block;
291 int ret = 0;
292
293 /* enable each block so that's it'e ready for module P/S data */
294 list_for_each_entry(block, &module->block_list, module_list) {
295
296 if (block->ops && block->ops->enable)
297 ret = block->ops->enable(block);
298 if (ret < 0) {
299 dev_err(module->dsp->dev,
300 "error: cant disable block %d:%d\n",
301 block->type, block->index);
302 goto err;
303 }
304 }
305 return ret;
306
307err:
308 list_for_each_entry(block, &module->block_list, module_list) {
309 if (block->ops && block->ops->disable)
310 block->ops->disable(block);
311 }
312 return ret;
313}
314
315/* allocate memory blocks for static module addresses - callers hold locks */
316static int block_alloc_fixed(struct sst_module *module,
317 struct sst_module_data *data)
318{
319 struct sst_dsp *dsp = module->dsp;
320 struct sst_mem_block *block, *tmp;
321 u32 end = data->offset + data->size, block_end;
322 int err;
323
324 /* only IRAM/DRAM blocks are managed */
325 if (data->type != SST_MEM_IRAM && data->type != SST_MEM_DRAM)
326 return 0;
327
328 /* are blocks already attached to this module */
329 list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
330
331 /* force compacting mem blocks of the same data_type */
332 if (block->data_type != data->data_type)
333 continue;
334
335 block_end = block->offset + block->size;
336
337 /* find block that holds section */
338 if (data->offset >= block->offset && end < block_end)
339 return 0;
340
341 /* does block span more than 1 section */
342 if (data->offset >= block->offset && data->offset < block_end) {
343
344 err = block_alloc_contiguous(module, data,
345 block->offset + block->size,
346 data->size - block->size + data->offset - block->offset);
347 if (err < 0)
348 return -ENOMEM;
349
350 /* module already owns blocks */
351 return 0;
352 }
353 }
354
355 /* find first free blocks that can hold section in free list */
356 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
357 block_end = block->offset + block->size;
358
359 /* find block that holds section */
360 if (data->offset >= block->offset && end < block_end) {
361
362 /* add block */
363 block->data_type = data->data_type;
364 list_move(&block->list, &dsp->used_block_list);
365 list_add(&block->module_list, &module->block_list);
366 return 0;
367 }
368
369 /* does block span more than 1 section */
370 if (data->offset >= block->offset && data->offset < block_end) {
371
372 err = block_alloc_contiguous(module, data,
373 block->offset + block->size,
374 data->size - block->size);
375 if (err < 0)
376 return -ENOMEM;
377
378 /* add block */
379 block->data_type = data->data_type;
380 list_move(&block->list, &dsp->used_block_list);
381 list_add(&block->module_list, &module->block_list);
382 return 0;
383 }
384
385 }
386
387 return -ENOMEM;
388}
389
390/* Load fixed module data into DSP memory blocks */
391int sst_module_insert_fixed_block(struct sst_module *module,
392 struct sst_module_data *data)
393{
394 struct sst_dsp *dsp = module->dsp;
395 int ret;
396
397 mutex_lock(&dsp->mutex);
398
399 /* alloc blocks that includes this section */
400 ret = block_alloc_fixed(module, data);
401 if (ret < 0) {
402 dev_err(dsp->dev,
403 "error: no free blocks for section at offset 0x%x size 0x%x\n",
404 data->offset, data->size);
405 mutex_unlock(&dsp->mutex);
406 return -ENOMEM;
407 }
408
409 /* prepare DSP blocks for module copy */
410 ret = block_module_prepare(module);
411 if (ret < 0) {
412 dev_err(dsp->dev, "error: fw module prepare failed\n");
413 goto err;
414 }
415
416 /* copy partial module data to blocks */
417 sst_memcpy32(dsp->addr.lpe + data->offset, data->data, data->size);
418
419 mutex_unlock(&dsp->mutex);
420 return ret;
421
422err:
423 block_module_remove(module);
424 mutex_unlock(&dsp->mutex);
425 return ret;
426}
427EXPORT_SYMBOL_GPL(sst_module_insert_fixed_block);
428
429/* Unload entire module from DSP memory */
430int sst_block_module_remove(struct sst_module *module)
431{
432 struct sst_dsp *dsp = module->dsp;
433
434 mutex_lock(&dsp->mutex);
435 block_module_remove(module);
436 mutex_unlock(&dsp->mutex);
437 return 0;
438}
439EXPORT_SYMBOL_GPL(sst_block_module_remove);
440
441/* register a DSP memory block for use with FW based modules */
442struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
443 u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index,
444 void *private)
445{
446 struct sst_mem_block *block;
447
448 block = kzalloc(sizeof(*block), GFP_KERNEL);
449 if (block == NULL)
450 return NULL;
451
452 block->offset = offset;
453 block->size = size;
454 block->index = index;
455 block->type = type;
456 block->dsp = dsp;
457 block->private = private;
458 block->ops = ops;
459
460 mutex_lock(&dsp->mutex);
461 list_add(&block->list, &dsp->free_block_list);
462 mutex_unlock(&dsp->mutex);
463
464 return block;
465}
466EXPORT_SYMBOL_GPL(sst_mem_block_register);
467
468/* unregister all DSP memory blocks */
469void sst_mem_block_unregister_all(struct sst_dsp *dsp)
470{
471 struct sst_mem_block *block, *tmp;
472
473 mutex_lock(&dsp->mutex);
474
475 /* unregister used blocks */
476 list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
477 list_del(&block->list);
478 kfree(block);
479 }
480
481 /* unregister free blocks */
482 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
483 list_del(&block->list);
484 kfree(block);
485 }
486
487 mutex_unlock(&dsp->mutex);
488}
489EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
490
491/* allocate scratch buffer blocks */
492struct sst_module *sst_mem_block_alloc_scratch(struct sst_dsp *dsp)
493{
494 struct sst_module *sst_module, *scratch;
495 struct sst_mem_block *block, *tmp;
496 u32 block_size;
497 int ret = 0;
498
499 scratch = kzalloc(sizeof(struct sst_module), GFP_KERNEL);
500 if (scratch == NULL)
501 return NULL;
502
503 mutex_lock(&dsp->mutex);
504
505 /* calculate required scratch size */
506 list_for_each_entry(sst_module, &dsp->module_list, list) {
507 if (scratch->s.size > sst_module->s.size)
508 scratch->s.size = scratch->s.size;
509 else
510 scratch->s.size = sst_module->s.size;
511 }
512
513 dev_dbg(dsp->dev, "scratch buffer required is %d bytes\n",
514 scratch->s.size);
515
516 /* init scratch module */
517 scratch->dsp = dsp;
518 scratch->s.type = SST_MEM_DRAM;
519 scratch->s.data_type = SST_DATA_S;
520 INIT_LIST_HEAD(&scratch->block_list);
521
522 /* check free blocks before looking at used blocks for space */
523 if (!list_empty(&dsp->free_block_list))
524 block = list_first_entry(&dsp->free_block_list,
525 struct sst_mem_block, list);
526 else
527 block = list_first_entry(&dsp->used_block_list,
528 struct sst_mem_block, list);
529 block_size = block->size;
530
531 /* allocate blocks for module scratch buffers */
532 dev_dbg(dsp->dev, "allocating scratch blocks\n");
533 ret = block_alloc(scratch, &scratch->s);
534 if (ret < 0) {
535 dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
536 goto err;
537 }
538
539 /* assign the same offset of scratch to each module */
540 list_for_each_entry(sst_module, &dsp->module_list, list)
541 sst_module->s.offset = scratch->s.offset;
542
543 mutex_unlock(&dsp->mutex);
544 return scratch;
545
546err:
547 list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
548 list_del(&block->module_list);
549 mutex_unlock(&dsp->mutex);
550 return NULL;
551}
552EXPORT_SYMBOL_GPL(sst_mem_block_alloc_scratch);
553
554/* free all scratch blocks */
555void sst_mem_block_free_scratch(struct sst_dsp *dsp,
556 struct sst_module *scratch)
557{
558 struct sst_mem_block *block, *tmp;
559
560 mutex_lock(&dsp->mutex);
561
562 list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
563 list_del(&block->module_list);
564
565 mutex_unlock(&dsp->mutex);
566}
567EXPORT_SYMBOL_GPL(sst_mem_block_free_scratch);
568
569/* get a module from it's unique ID */
570struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
571{
572 struct sst_module *module;
573
574 mutex_lock(&dsp->mutex);
575
576 list_for_each_entry(module, &dsp->module_list, list) {
577 if (module->id == id) {
578 mutex_unlock(&dsp->mutex);
579 return module;
580 }
581 }
582
583 mutex_unlock(&dsp->mutex);
584 return NULL;
585}
586EXPORT_SYMBOL_GPL(sst_module_get_from_id);