aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/bcm2835-dma.c
diff options
context:
space:
mode:
authorMartin Sperl <kernel@martin.sperl.org>2016-03-16 15:24:59 -0400
committerVinod Koul <vinod.koul@intel.com>2016-04-15 00:27:21 -0400
commit92153bb534fa4c2f0a1fdc3745cab25edaf10dca (patch)
tree18432d59f7f1119800a129ef06870abd1575c204 /drivers/dma/bcm2835-dma.c
parenta4dcdd849ef8dbd0811ca8436aecf1c87e09686c (diff)
dmaengine: bcm2835: move controlblock chain generation into separate method
In preparation of adding slave_sg functionality this patch moves the generation/allocation of bcm2835_desc and the building of the corresponding DMA-control-block chain from bcm2835_dma_prep_dma_cyclic into the newly created method bcm2835_dma_create_cb_chain. Signed-off-by: Martin Sperl <kernel@martin.sperl.org> Reviewed-by: Eric Anholt <eric@anholt.net> Signed-off-by: Eric Anholt <eric@anholt.net> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/bcm2835-dma.c')
-rw-r--r--drivers/dma/bcm2835-dma.c294
1 files changed, 198 insertions, 96 deletions
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index b3bc382fd199..4db0e232fab8 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -88,12 +88,12 @@ struct bcm2835_desc {
88 struct virt_dma_desc vd; 88 struct virt_dma_desc vd;
89 enum dma_transfer_direction dir; 89 enum dma_transfer_direction dir;
90 90
91 struct bcm2835_cb_entry *cb_list;
92
93 unsigned int frames; 91 unsigned int frames;
94 size_t size; 92 size_t size;
95 93
96 bool cyclic; 94 bool cyclic;
95
96 struct bcm2835_cb_entry cb_list[];
97}; 97};
98 98
99#define BCM2835_DMA_CS 0x00 99#define BCM2835_DMA_CS 0x00
@@ -169,6 +169,13 @@ struct bcm2835_desc {
169#define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */ 169#define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */
170#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n)) 170#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
171 171
172/* how many frames of max_len size do we need to transfer len bytes */
173static inline size_t bcm2835_dma_frames_for_length(size_t len,
174 size_t max_len)
175{
176 return DIV_ROUND_UP(len, max_len);
177}
178
172static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d) 179static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
173{ 180{
174 return container_of(d, struct bcm2835_dmadev, ddev); 181 return container_of(d, struct bcm2835_dmadev, ddev);
@@ -185,19 +192,161 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc(
185 return container_of(t, struct bcm2835_desc, vd.tx); 192 return container_of(t, struct bcm2835_desc, vd.tx);
186} 193}
187 194
188static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) 195static void bcm2835_dma_free_cb_chain(struct bcm2835_desc *desc)
189{ 196{
190 struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); 197 size_t i;
191 int i;
192 198
193 for (i = 0; i < desc->frames; i++) 199 for (i = 0; i < desc->frames; i++)
194 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, 200 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
195 desc->cb_list[i].paddr); 201 desc->cb_list[i].paddr);
196 202
197 kfree(desc->cb_list);
198 kfree(desc); 203 kfree(desc);
199} 204}
200 205
206static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
207{
208 bcm2835_dma_free_cb_chain(
209 container_of(vd, struct bcm2835_desc, vd));
210}
211
212static void bcm2835_dma_create_cb_set_length(
213 struct bcm2835_chan *chan,
214 struct bcm2835_dma_cb *control_block,
215 size_t len,
216 size_t period_len,
217 size_t *total_len,
218 u32 finalextrainfo)
219{
220 /* set the length */
221 control_block->length = len;
222
223 /* finished if we have no period_length */
224 if (!period_len)
225 return;
226
227 /*
228 * period_len means: that we need to generate
229 * transfers that are terminating at every
230 * multiple of period_len - this is typically
231 * used to set the interrupt flag in info
232 * which is required during cyclic transfers
233 */
234
235 /* have we filled in period_length yet? */
236 if (*total_len + control_block->length < period_len)
237 return;
238
239 /* calculate the length that remains to reach period_length */
240 control_block->length = period_len - *total_len;
241
242 /* reset total_length for next period */
243 *total_len = 0;
244
245 /* add extrainfo bits in info */
246 control_block->info |= finalextrainfo;
247}
248
249/**
250 * bcm2835_dma_create_cb_chain - create a control block and fills data in
251 *
252 * @chan: the @dma_chan for which we run this
253 * @direction: the direction in which we transfer
254 * @cyclic: it is a cyclic transfer
255 * @info: the default info bits to apply per controlblock
256 * @frames: number of controlblocks to allocate
257 * @src: the src address to assign (if the S_INC bit is set
258 * in @info, then it gets incremented)
259 * @dst: the dst address to assign (if the D_INC bit is set
260 * in @info, then it gets incremented)
261 * @buf_len: the full buffer length (may also be 0)
262 * @period_len: the period length when to apply @finalextrainfo
263 * in addition to the last transfer
264 * this will also break some control-blocks early
265 * @finalextrainfo: additional bits in last controlblock
266 * (or when period_len is reached in case of cyclic)
267 * @gfp: the GFP flag to use for allocation
268 */
269static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
270 struct dma_chan *chan, enum dma_transfer_direction direction,
271 bool cyclic, u32 info, u32 finalextrainfo, size_t frames,
272 dma_addr_t src, dma_addr_t dst, size_t buf_len,
273 size_t period_len, gfp_t gfp)
274{
275 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
276 size_t len = buf_len, total_len;
277 size_t frame;
278 struct bcm2835_desc *d;
279 struct bcm2835_cb_entry *cb_entry;
280 struct bcm2835_dma_cb *control_block;
281
282 /* allocate and setup the descriptor. */
283 d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry),
284 gfp);
285 if (!d)
286 return NULL;
287
288 d->c = c;
289 d->dir = direction;
290 d->cyclic = cyclic;
291
292 /*
293 * Iterate over all frames, create a control block
294 * for each frame and link them together.
295 */
296 for (frame = 0, total_len = 0; frame < frames; d->frames++, frame++) {
297 cb_entry = &d->cb_list[frame];
298 cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp,
299 &cb_entry->paddr);
300 if (!cb_entry->cb)
301 goto error_cb;
302
303 /* fill in the control block */
304 control_block = cb_entry->cb;
305 control_block->info = info;
306 control_block->src = src;
307 control_block->dst = dst;
308 control_block->stride = 0;
309 control_block->next = 0;
310 /* set up length in control_block if requested */
311 if (buf_len) {
312 /* calculate length honoring period_length */
313 bcm2835_dma_create_cb_set_length(
314 c, control_block,
315 len, period_len, &total_len,
316 cyclic ? finalextrainfo : 0);
317
318 /* calculate new remaining length */
319 len -= control_block->length;
320 }
321
322 /* link this the last controlblock */
323 if (frame)
324 d->cb_list[frame - 1].cb->next = cb_entry->paddr;
325
326 /* update src and dst and length */
327 if (src && (info & BCM2835_DMA_S_INC))
328 src += control_block->length;
329 if (dst && (info & BCM2835_DMA_D_INC))
330 dst += control_block->length;
331
332 /* Length of total transfer */
333 d->size += control_block->length;
334 }
335
336 /* the last frame requires extra flags */
337 d->cb_list[d->frames - 1].cb->info |= finalextrainfo;
338
339 /* detect a size missmatch */
340 if (buf_len && (d->size != buf_len))
341 goto error_cb;
342
343 return d;
344error_cb:
345 bcm2835_dma_free_cb_chain(d);
346
347 return NULL;
348}
349
201static int bcm2835_dma_abort(void __iomem *chan_base) 350static int bcm2835_dma_abort(void __iomem *chan_base)
202{ 351{
203 unsigned long cs; 352 unsigned long cs;
@@ -391,12 +540,11 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
391 unsigned long flags) 540 unsigned long flags)
392{ 541{
393 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 542 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
394 enum dma_slave_buswidth dev_width;
395 struct bcm2835_desc *d; 543 struct bcm2835_desc *d;
396 dma_addr_t dev_addr; 544 dma_addr_t src, dst;
397 unsigned int es, sync_type; 545 u32 info = BCM2835_DMA_WAIT_RESP;
398 unsigned int frame; 546 u32 extra = BCM2835_DMA_INT_EN;
399 int i; 547 size_t frames;
400 548
401 /* Grab configuration */ 549 /* Grab configuration */
402 if (!is_slave_direction(direction)) { 550 if (!is_slave_direction(direction)) {
@@ -404,104 +552,58 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
404 return NULL; 552 return NULL;
405 } 553 }
406 554
407 if (direction == DMA_DEV_TO_MEM) { 555 if (!buf_len) {
408 dev_addr = c->cfg.src_addr; 556 dev_err(chan->device->dev,
409 dev_width = c->cfg.src_addr_width; 557 "%s: bad buffer length (= 0)\n", __func__);
410 sync_type = BCM2835_DMA_S_DREQ;
411 } else {
412 dev_addr = c->cfg.dst_addr;
413 dev_width = c->cfg.dst_addr_width;
414 sync_type = BCM2835_DMA_D_DREQ;
415 }
416
417 /* Bus width translates to the element size (ES) */
418 switch (dev_width) {
419 case DMA_SLAVE_BUSWIDTH_4_BYTES:
420 es = BCM2835_DMA_DATA_TYPE_S32;
421 break;
422 default:
423 return NULL; 558 return NULL;
424 } 559 }
425 560
426 /* Now allocate and setup the descriptor. */ 561 /*
427 d = kzalloc(sizeof(*d), GFP_NOWAIT); 562 * warn if buf_len is not a multiple of period_len - this may leed
428 if (!d) 563 * to unexpected latencies for interrupts and thus audiable clicks
429 return NULL; 564 */
565 if (buf_len % period_len)
566 dev_warn_once(chan->device->dev,
567 "%s: buffer_length (%zd) is not a multiple of period_len (%zd)\n",
568 __func__, buf_len, period_len);
430 569
431 d->c = c; 570 /* Setup DREQ channel */
432 d->dir = direction; 571 if (c->dreq != 0)
433 d->frames = buf_len / period_len; 572 info |= BCM2835_DMA_PER_MAP(c->dreq);
434 d->cyclic = true;
435 573
436 d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL); 574 if (direction == DMA_DEV_TO_MEM) {
437 if (!d->cb_list) { 575 if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
438 kfree(d); 576 return NULL;
439 return NULL; 577 src = c->cfg.src_addr;
578 dst = buf_addr;
579 info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
580 } else {
581 if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
582 return NULL;
583 dst = c->cfg.dst_addr;
584 src = buf_addr;
585 info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
440 } 586 }
441 /* Allocate memory for control blocks */
442 for (i = 0; i < d->frames; i++) {
443 struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
444 587
445 cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC, 588 /* calculate number of frames */
446 &cb_entry->paddr); 589 frames = DIV_ROUND_UP(buf_len, period_len);
447 if (!cb_entry->cb)
448 goto error_cb;
449 }
450 590
451 /* 591 /*
452 * Iterate over all frames, create a control block 592 * allocate the CB chain
453 * for each frame and link them together. 593 * note that we need to use GFP_NOWAIT, as the ALSA i2s dmaengine
594 * implementation calls prep_dma_cyclic with interrupts disabled.
454 */ 595 */
455 for (frame = 0; frame < d->frames; frame++) { 596 d = bcm2835_dma_create_cb_chain(chan, direction, true,
456 struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb; 597 info, extra,
457 598 frames, src, dst, buf_len,
458 /* Setup adresses */ 599 period_len, GFP_NOWAIT);
459 if (d->dir == DMA_DEV_TO_MEM) { 600 if (!d)
460 control_block->info = BCM2835_DMA_D_INC; 601 return NULL;
461 control_block->src = dev_addr;
462 control_block->dst = buf_addr + frame * period_len;
463 } else {
464 control_block->info = BCM2835_DMA_S_INC;
465 control_block->src = buf_addr + frame * period_len;
466 control_block->dst = dev_addr;
467 }
468
469 /* Enable interrupt */
470 control_block->info |= BCM2835_DMA_INT_EN;
471
472 /* Setup synchronization */
473 if (sync_type != 0)
474 control_block->info |= sync_type;
475
476 /* Setup DREQ channel */
477 if (c->dreq != 0)
478 control_block->info |=
479 BCM2835_DMA_PER_MAP(c->dreq);
480
481 /* Length of a frame */
482 control_block->length = period_len;
483 d->size += control_block->length;
484 602
485 /* 603 /* wrap around into a loop */
486 * Next block is the next frame. 604 d->cb_list[d->frames - 1].cb->next = d->cb_list[0].paddr;
487 * This DMA engine driver currently only supports cyclic DMA.
488 * Therefore, wrap around at number of frames.
489 */
490 control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr;
491 }
492 605
493 return vchan_tx_prep(&c->vc, &d->vd, flags); 606 return vchan_tx_prep(&c->vc, &d->vd, flags);
494error_cb:
495 i--;
496 for (; i >= 0; i--) {
497 struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
498
499 dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr);
500 }
501
502 kfree(d->cb_list);
503 kfree(d);
504 return NULL;
505} 607}
506 608
507static int bcm2835_dma_slave_config(struct dma_chan *chan, 609static int bcm2835_dma_slave_config(struct dma_chan *chan,