aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/mxs-dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/mxs-dma.c')
-rw-r--r--drivers/dma/mxs-dma.c60
1 files changed, 37 insertions, 23 deletions
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index b06cd4ca626f..c81ef7e10e08 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -22,12 +22,14 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/dmaengine.h> 23#include <linux/dmaengine.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/fsl/mxs-dma.h>
25 26
26#include <asm/irq.h> 27#include <asm/irq.h>
27#include <mach/mxs.h> 28#include <mach/mxs.h>
28#include <mach/dma.h>
29#include <mach/common.h> 29#include <mach/common.h>
30 30
31#include "dmaengine.h"
32
31/* 33/*
32 * NOTE: The term "PIO" throughout the mxs-dma implementation means 34 * NOTE: The term "PIO" throughout the mxs-dma implementation means
33 * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, 35 * PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
@@ -111,7 +113,6 @@ struct mxs_dma_chan {
111 struct mxs_dma_ccw *ccw; 113 struct mxs_dma_ccw *ccw;
112 dma_addr_t ccw_phys; 114 dma_addr_t ccw_phys;
113 int desc_count; 115 int desc_count;
114 dma_cookie_t last_completed;
115 enum dma_status status; 116 enum dma_status status;
116 unsigned int flags; 117 unsigned int flags;
117#define MXS_DMA_SG_LOOP (1 << 0) 118#define MXS_DMA_SG_LOOP (1 << 0)
@@ -193,19 +194,6 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
193 mxs_chan->status = DMA_IN_PROGRESS; 194 mxs_chan->status = DMA_IN_PROGRESS;
194} 195}
195 196
196static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan)
197{
198 dma_cookie_t cookie = mxs_chan->chan.cookie;
199
200 if (++cookie < 0)
201 cookie = 1;
202
203 mxs_chan->chan.cookie = cookie;
204 mxs_chan->desc.cookie = cookie;
205
206 return cookie;
207}
208
209static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) 197static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
210{ 198{
211 return container_of(chan, struct mxs_dma_chan, chan); 199 return container_of(chan, struct mxs_dma_chan, chan);
@@ -217,7 +205,7 @@ static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
217 205
218 mxs_dma_enable_chan(mxs_chan); 206 mxs_dma_enable_chan(mxs_chan);
219 207
220 return mxs_dma_assign_cookie(mxs_chan); 208 return dma_cookie_assign(tx);
221} 209}
222 210
223static void mxs_dma_tasklet(unsigned long data) 211static void mxs_dma_tasklet(unsigned long data)
@@ -274,7 +262,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
274 stat1 &= ~(1 << channel); 262 stat1 &= ~(1 << channel);
275 263
276 if (mxs_chan->status == DMA_SUCCESS) 264 if (mxs_chan->status == DMA_SUCCESS)
277 mxs_chan->last_completed = mxs_chan->desc.cookie; 265 dma_cookie_complete(&mxs_chan->desc);
278 266
279 /* schedule tasklet on this channel */ 267 /* schedule tasklet on this channel */
280 tasklet_schedule(&mxs_chan->tasklet); 268 tasklet_schedule(&mxs_chan->tasklet);
@@ -349,10 +337,32 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
349 clk_disable_unprepare(mxs_dma->clk); 337 clk_disable_unprepare(mxs_dma->clk);
350} 338}
351 339
340/*
341 * How to use the flags for ->device_prep_slave_sg() :
342 * [1] If there is only one DMA command in the DMA chain, the code should be:
343 * ......
344 * ->device_prep_slave_sg(DMA_CTRL_ACK);
345 * ......
346 * [2] If there are two DMA commands in the DMA chain, the code should be
347 * ......
348 * ->device_prep_slave_sg(0);
349 * ......
350 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
351 * ......
352 * [3] If there are more than two DMA commands in the DMA chain, the code
353 * should be:
354 * ......
355 * ->device_prep_slave_sg(0); // First
356 * ......
357 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
358 * ......
359 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
360 * ......
361 */
352static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( 362static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
353 struct dma_chan *chan, struct scatterlist *sgl, 363 struct dma_chan *chan, struct scatterlist *sgl,
354 unsigned int sg_len, enum dma_transfer_direction direction, 364 unsigned int sg_len, enum dma_transfer_direction direction,
355 unsigned long append) 365 unsigned long flags, void *context)
356{ 366{
357 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 367 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
358 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 368 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -360,6 +370,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
360 struct scatterlist *sg; 370 struct scatterlist *sg;
361 int i, j; 371 int i, j;
362 u32 *pio; 372 u32 *pio;
373 bool append = flags & DMA_PREP_INTERRUPT;
363 int idx = append ? mxs_chan->desc_count : 0; 374 int idx = append ? mxs_chan->desc_count : 0;
364 375
365 if (mxs_chan->status == DMA_IN_PROGRESS && !append) 376 if (mxs_chan->status == DMA_IN_PROGRESS && !append)
@@ -386,7 +397,6 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
386 ccw->bits |= CCW_CHAIN; 397 ccw->bits |= CCW_CHAIN;
387 ccw->bits &= ~CCW_IRQ; 398 ccw->bits &= ~CCW_IRQ;
388 ccw->bits &= ~CCW_DEC_SEM; 399 ccw->bits &= ~CCW_DEC_SEM;
389 ccw->bits &= ~CCW_WAIT4END;
390 } else { 400 } else {
391 idx = 0; 401 idx = 0;
392 } 402 }
@@ -401,7 +411,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
401 ccw->bits = 0; 411 ccw->bits = 0;
402 ccw->bits |= CCW_IRQ; 412 ccw->bits |= CCW_IRQ;
403 ccw->bits |= CCW_DEC_SEM; 413 ccw->bits |= CCW_DEC_SEM;
404 ccw->bits |= CCW_WAIT4END; 414 if (flags & DMA_CTRL_ACK)
415 ccw->bits |= CCW_WAIT4END;
405 ccw->bits |= CCW_HALT_ON_TERM; 416 ccw->bits |= CCW_HALT_ON_TERM;
406 ccw->bits |= CCW_TERM_FLUSH; 417 ccw->bits |= CCW_TERM_FLUSH;
407 ccw->bits |= BF_CCW(sg_len, PIO_NUM); 418 ccw->bits |= BF_CCW(sg_len, PIO_NUM);
@@ -432,7 +443,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
432 ccw->bits &= ~CCW_CHAIN; 443 ccw->bits &= ~CCW_CHAIN;
433 ccw->bits |= CCW_IRQ; 444 ccw->bits |= CCW_IRQ;
434 ccw->bits |= CCW_DEC_SEM; 445 ccw->bits |= CCW_DEC_SEM;
435 ccw->bits |= CCW_WAIT4END; 446 if (flags & DMA_CTRL_ACK)
447 ccw->bits |= CCW_WAIT4END;
436 } 448 }
437 } 449 }
438 } 450 }
@@ -447,7 +459,8 @@ err_out:
447 459
448static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( 460static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
449 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 461 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
450 size_t period_len, enum dma_transfer_direction direction) 462 size_t period_len, enum dma_transfer_direction direction,
463 void *context)
451{ 464{
452 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 465 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
453 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 466 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -538,7 +551,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
538 dma_cookie_t last_used; 551 dma_cookie_t last_used;
539 552
540 last_used = chan->cookie; 553 last_used = chan->cookie;
541 dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); 554 dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
542 555
543 return mxs_chan->status; 556 return mxs_chan->status;
544} 557}
@@ -630,6 +643,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
630 643
631 mxs_chan->mxs_dma = mxs_dma; 644 mxs_chan->mxs_dma = mxs_dma;
632 mxs_chan->chan.device = &mxs_dma->dma_device; 645 mxs_chan->chan.device = &mxs_dma->dma_device;
646 dma_cookie_init(&mxs_chan->chan);
633 647
634 tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, 648 tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
635 (unsigned long) mxs_chan); 649 (unsigned long) mxs_chan);