diff options
author | Guennadi Liakhovetski <g.liakhovetski@gmx.de> | 2010-01-19 02:24:55 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-02-07 19:38:35 -0500 |
commit | fc4618575f79eea062cdc51715040e40cd35b71c (patch) | |
tree | 86c3024f48db02b9f5e391d3f0a3aa787fa89375 /drivers/dma/shdma.c | |
parent | 6339204ecc2aa2067a99595522de0403f0854bb8 (diff) |
sh: prepare the DMA driver for slave functionality
Slave DMA functionality uses scatter-gather arrays for data transfers,
whereas memcpy just uses a single data buffer. This patch converts the
current memcpy implementation in shdma.c to use scatter-gather, making it
just a special case with one SG-element. This allows us to isolate
descriptor list manipulations and locking into one function, thus reducing
error chances.
Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Acked-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers/dma/shdma.c')
-rw-r--r-- | drivers/dma/shdma.c | 221 |
1 files changed, 153 insertions, 68 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index d10cc899c460..427c3effc432 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -53,12 +53,12 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | |||
53 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) | 53 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) |
54 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 54 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
55 | { | 55 | { |
56 | ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | 56 | ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg); |
57 | } | 57 | } |
58 | 58 | ||
59 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | 59 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) |
60 | { | 60 | { |
61 | return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | 61 | return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); |
62 | } | 62 | } |
63 | 63 | ||
64 | static void dmae_init(struct sh_dmae_chan *sh_chan) | 64 | static void dmae_init(struct sh_dmae_chan *sh_chan) |
@@ -95,14 +95,14 @@ static int sh_dmae_rst(int id) | |||
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | 97 | ||
98 | static int dmae_is_busy(struct sh_dmae_chan *sh_chan) | 98 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) |
99 | { | 99 | { |
100 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 100 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
101 | if (chcr & CHCR_DE) { | 101 | |
102 | if (!(chcr & CHCR_TE)) | 102 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) |
103 | return -EBUSY; /* working */ | 103 | return true; /* working */ |
104 | } | 104 | |
105 | return 0; /* waiting */ | 105 | return false; /* waiting */ |
106 | } | 106 | } |
107 | 107 | ||
108 | static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) | 108 | static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) |
@@ -136,10 +136,9 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan) | |||
136 | 136 | ||
137 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | 137 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) |
138 | { | 138 | { |
139 | int ret = dmae_is_busy(sh_chan); | ||
140 | /* When DMA was working, can not set data to CHCR */ | 139 | /* When DMA was working, can not set data to CHCR */ |
141 | if (ret) | 140 | if (dmae_is_busy(sh_chan)) |
142 | return ret; | 141 | return -EBUSY; |
143 | 142 | ||
144 | sh_dmae_writel(sh_chan, val, CHCR); | 143 | sh_dmae_writel(sh_chan, val, CHCR); |
145 | return 0; | 144 | return 0; |
@@ -153,9 +152,9 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | |||
153 | { | 152 | { |
154 | u32 addr; | 153 | u32 addr; |
155 | int shift = 0; | 154 | int shift = 0; |
156 | int ret = dmae_is_busy(sh_chan); | 155 | |
157 | if (ret) | 156 | if (dmae_is_busy(sh_chan)) |
158 | return ret; | 157 | return -EBUSY; |
159 | 158 | ||
160 | if (sh_chan->id & DMARS_CHAN_MSK) | 159 | if (sh_chan->id & DMARS_CHAN_MSK) |
161 | shift = DMARS_SHIFT; | 160 | shift = DMARS_SHIFT; |
@@ -301,23 +300,95 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
301 | kfree(desc); | 300 | kfree(desc); |
302 | } | 301 | } |
303 | 302 | ||
304 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | 303 | /* |
305 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | 304 | * sh_dmae_add_desc - get, set up and return one transfer descriptor |
306 | size_t len, unsigned long flags) | 305 | * @sh_chan: DMA channel |
306 | * @flags: DMA transfer flags | ||
307 | * @dest: destination DMA address, incremented when direction equals | ||
308 | * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL | ||
309 | * @src: source DMA address, incremented when direction equals | ||
310 | * DMA_TO_DEVICE or DMA_BIDIRECTIONAL | ||
311 | * @len: DMA transfer length | ||
312 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | ||
313 | * @direction: needed for slave DMA to decide which address to keep constant, | ||
314 | * equals DMA_BIDIRECTIONAL for MEMCPY | ||
315 | * Returns 0 or an error | ||
316 | * Locks: called with desc_lock held | ||
317 | */ | ||
318 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | ||
319 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, | ||
320 | struct sh_desc **first, enum dma_data_direction direction) | ||
307 | { | 321 | { |
308 | struct sh_dmae_chan *sh_chan; | 322 | struct sh_desc *new; |
309 | struct sh_desc *first = NULL, *prev = NULL, *new; | ||
310 | size_t copy_size; | 323 | size_t copy_size; |
311 | LIST_HEAD(tx_list); | ||
312 | int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1); | ||
313 | 324 | ||
314 | if (!chan) | 325 | if (!*len) |
315 | return NULL; | 326 | return NULL; |
316 | 327 | ||
317 | if (!len) | 328 | /* Allocate the link descriptor from the free list */ |
329 | new = sh_dmae_get_desc(sh_chan); | ||
330 | if (!new) { | ||
331 | dev_err(sh_chan->dev, "No free link descriptor available\n"); | ||
318 | return NULL; | 332 | return NULL; |
333 | } | ||
319 | 334 | ||
320 | sh_chan = to_sh_chan(chan); | 335 | copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); |
336 | |||
337 | new->hw.sar = *src; | ||
338 | new->hw.dar = *dest; | ||
339 | new->hw.tcr = copy_size; | ||
340 | |||
341 | if (!*first) { | ||
342 | /* First desc */ | ||
343 | new->async_tx.cookie = -EBUSY; | ||
344 | *first = new; | ||
345 | } else { | ||
346 | /* Other desc - invisible to the user */ | ||
347 | new->async_tx.cookie = -EINVAL; | ||
348 | } | ||
349 | |||
350 | dev_dbg(sh_chan->dev, "chaining (%u/%u)@%x -> %x with %p, cookie %d\n", | ||
351 | copy_size, *len, *src, *dest, &new->async_tx, | ||
352 | new->async_tx.cookie); | ||
353 | |||
354 | new->mark = DESC_PREPARED; | ||
355 | new->async_tx.flags = flags; | ||
356 | |||
357 | *len -= copy_size; | ||
358 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) | ||
359 | *src += copy_size; | ||
360 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) | ||
361 | *dest += copy_size; | ||
362 | |||
363 | return new; | ||
364 | } | ||
365 | |||
366 | /* | ||
367 | * sh_dmae_prep_sg - prepare transfer descriptors from an SG list | ||
368 | * | ||
369 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | ||
370 | * converted to scatter-gather to guarantee consistent locking and a correct | ||
371 | * list manipulation. For slave DMA direction carries the usual meaning, and, | ||
372 | * logically, the SG list is RAM and the addr variable contains slave address, | ||
373 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL | ||
374 | * and the SG list contains only one element and points at the source buffer. | ||
375 | */ | ||
376 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, | ||
377 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | ||
378 | enum dma_data_direction direction, unsigned long flags) | ||
379 | { | ||
380 | struct scatterlist *sg; | ||
381 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; | ||
382 | LIST_HEAD(tx_list); | ||
383 | int chunks = 0; | ||
384 | int i; | ||
385 | |||
386 | if (!sg_len) | ||
387 | return NULL; | ||
388 | |||
389 | for_each_sg(sgl, sg, sg_len, i) | ||
390 | chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / | ||
391 | (SH_DMA_TCR_MAX + 1); | ||
321 | 392 | ||
322 | /* Have to lock the whole loop to protect against concurrent release */ | 393 | /* Have to lock the whole loop to protect against concurrent release */ |
323 | spin_lock_bh(&sh_chan->desc_lock); | 394 | spin_lock_bh(&sh_chan->desc_lock); |
@@ -333,49 +404,32 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
333 | * only during this function, then they are immediately spliced | 404 | * only during this function, then they are immediately spliced |
334 | * back onto the free list in form of a chain | 405 | * back onto the free list in form of a chain |
335 | */ | 406 | */ |
336 | do { | 407 | for_each_sg(sgl, sg, sg_len, i) { |
337 | /* Allocate the link descriptor from the free list */ | 408 | dma_addr_t sg_addr = sg_dma_address(sg); |
338 | new = sh_dmae_get_desc(sh_chan); | 409 | size_t len = sg_dma_len(sg); |
339 | if (!new) { | 410 | |
340 | dev_err(sh_chan->dev, | 411 | if (!len) |
341 | "No free memory for link descriptor\n"); | 412 | goto err_get_desc; |
342 | list_for_each_entry(new, &tx_list, node) | 413 | |
343 | new->mark = DESC_IDLE; | 414 | do { |
344 | list_splice(&tx_list, &sh_chan->ld_free); | 415 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", |
345 | spin_unlock_bh(&sh_chan->desc_lock); | 416 | i, sg, len, (unsigned long long)sg_addr); |
346 | return NULL; | 417 | |
347 | } | 418 | if (direction == DMA_FROM_DEVICE) |
348 | 419 | new = sh_dmae_add_desc(sh_chan, flags, | |
349 | copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1); | 420 | &sg_addr, addr, &len, &first, |
350 | 421 | direction); | |
351 | new->hw.sar = dma_src; | 422 | else |
352 | new->hw.dar = dma_dest; | 423 | new = sh_dmae_add_desc(sh_chan, flags, |
353 | new->hw.tcr = copy_size; | 424 | addr, &sg_addr, &len, &first, |
354 | if (!first) { | 425 | direction); |
355 | /* First desc */ | 426 | if (!new) |
356 | new->async_tx.cookie = -EBUSY; | 427 | goto err_get_desc; |
357 | first = new; | 428 | |
358 | } else { | 429 | new->chunks = chunks--; |
359 | /* Other desc - invisible to the user */ | 430 | list_add_tail(&new->node, &tx_list); |
360 | new->async_tx.cookie = -EINVAL; | 431 | } while (len); |
361 | } | 432 | } |
362 | |||
363 | dev_dbg(sh_chan->dev, | ||
364 | "chaining %u of %u with %p, dst %x, cookie %d\n", | ||
365 | copy_size, len, &new->async_tx, dma_dest, | ||
366 | new->async_tx.cookie); | ||
367 | |||
368 | new->mark = DESC_PREPARED; | ||
369 | new->async_tx.flags = flags; | ||
370 | new->chunks = chunks--; | ||
371 | |||
372 | prev = new; | ||
373 | len -= copy_size; | ||
374 | dma_src += copy_size; | ||
375 | dma_dest += copy_size; | ||
376 | /* Insert the link descriptor to the LD ring */ | ||
377 | list_add_tail(&new->node, &tx_list); | ||
378 | } while (len); | ||
379 | 433 | ||
380 | if (new != first) | 434 | if (new != first) |
381 | new->async_tx.cookie = -ENOSPC; | 435 | new->async_tx.cookie = -ENOSPC; |
@@ -386,6 +440,37 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
386 | spin_unlock_bh(&sh_chan->desc_lock); | 440 | spin_unlock_bh(&sh_chan->desc_lock); |
387 | 441 | ||
388 | return &first->async_tx; | 442 | return &first->async_tx; |
443 | |||
444 | err_get_desc: | ||
445 | list_for_each_entry(new, &tx_list, node) | ||
446 | new->mark = DESC_IDLE; | ||
447 | list_splice(&tx_list, &sh_chan->ld_free); | ||
448 | |||
449 | spin_unlock_bh(&sh_chan->desc_lock); | ||
450 | |||
451 | return NULL; | ||
452 | } | ||
453 | |||
454 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | ||
455 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
456 | size_t len, unsigned long flags) | ||
457 | { | ||
458 | struct sh_dmae_chan *sh_chan; | ||
459 | struct scatterlist sg; | ||
460 | |||
461 | if (!chan || !len) | ||
462 | return NULL; | ||
463 | |||
464 | sh_chan = to_sh_chan(chan); | ||
465 | |||
466 | sg_init_table(&sg, 1); | ||
467 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | ||
468 | offset_in_page(dma_src)); | ||
469 | sg_dma_address(&sg) = dma_src; | ||
470 | sg_dma_len(&sg) = len; | ||
471 | |||
472 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, | ||
473 | flags); | ||
389 | } | 474 | } |
390 | 475 | ||
391 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | 476 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) |
@@ -559,7 +644,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data) | |||
559 | 644 | ||
560 | /* IRQ Multi */ | 645 | /* IRQ Multi */ |
561 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 646 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { |
562 | int cnt = 0; | 647 | int __maybe_unused cnt = 0; |
563 | switch (irq) { | 648 | switch (irq) { |
564 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | 649 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) |
565 | case DMTE6_IRQ: | 650 | case DMTE6_IRQ: |