aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorHuang Shijie <b32955@freescale.com>2012-02-16 01:17:33 -0500
committerDavid Woodhouse <David.Woodhouse@intel.com>2012-03-26 19:37:28 -0400
commit921de864b7c6413f15224d8f5e677541e8e1ac6d (patch)
tree325815e4a65a26b961796314fdb0b2cd6e0b9975 /drivers/dma
parent3946860409130038ef6e0e5c50f2203053eae2b7 (diff)
mxs-dma : rewrite the last parameter of mxs_dma_prep_slave_sg()
[1] Background : The GPMI does ECC read page operation with a DMA chain consist of three DMA Command Structures. The middle one of the chain is used to enable the BCH, and read out the NAND page. The WAIT4END(wait for command end) is a comunication signal between the GPMI and MXS-DMA. [2] The current DMA code sets the WAIT4END bit at the last one, such as: +-----+ +-----+ +-----+ | cmd | ------------> | cmd | ------------------> | cmd | +-----+ +-----+ +-----+ ^ | | set WAIT4END here This chain works fine in the mx23/mx28. [3] But in the new GPMI version (used in MX50/MX60), the WAIT4END bit should be set not only at the last DMA Command Structure, but also at the middle one, such as: +-----+ +-----+ +-----+ | cmd | ------------> | cmd | ------------------> | cmd | +-----+ +-----+ +-----+ ^ ^ | | | | set WAIT4END here too set WAIT4END here If we do not set WAIT4END, the BCH maybe stalls in "ECC reading page" state. In the next ECC write page operation, a DMA-timeout occurs. This has been catched in the MX6Q board. [4] In order to fix the bug, rewrite the last parameter of mxs_dma_prep_slave_sg(), and use the dma_ctrl_flags: --------------------------------------------------------- DMA_PREP_INTERRUPT : append a new DMA Command Structrue. DMA_CTRL_ACK : set the WAIT4END bit for this DMA Command Structure. --------------------------------------------------------- [5] changes to the relative drivers: <1> For mxs-mmc driver, just use the new flags, do not change any logic. <2> For gpmi-nand driver, and use the new flags to set the DMA chain, especially for ecc read page. Acked-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Huang Shijie <b32955@freescale.com> Acked-by: Vinod Koul <vinod.koul@linux.intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/mxs-dma.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 0afcedbe2471..0ddfd30b56ad 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -349,10 +349,32 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
349 clk_disable_unprepare(mxs_dma->clk); 349 clk_disable_unprepare(mxs_dma->clk);
350} 350}
351 351
352/*
353 * How to use the flags for ->device_prep_slave_sg() :
354 * [1] If there is only one DMA command in the DMA chain, the code should be:
355 * ......
356 * ->device_prep_slave_sg(DMA_CTRL_ACK);
357 * ......
358 * [2] If there are two DMA commands in the DMA chain, the code should be
359 * ......
360 * ->device_prep_slave_sg(0);
361 * ......
362 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
363 * ......
364 * [3] If there are more than two DMA commands in the DMA chain, the code
365 * should be:
366 * ......
367 * ->device_prep_slave_sg(0); // First
368 * ......
369 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
370 * ......
371 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
372 * ......
373 */
352static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( 374static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
353 struct dma_chan *chan, struct scatterlist *sgl, 375 struct dma_chan *chan, struct scatterlist *sgl,
354 unsigned int sg_len, enum dma_transfer_direction direction, 376 unsigned int sg_len, enum dma_transfer_direction direction,
355 unsigned long append) 377 unsigned long flags)
356{ 378{
357 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 379 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
358 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 380 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -360,6 +382,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
360 struct scatterlist *sg; 382 struct scatterlist *sg;
361 int i, j; 383 int i, j;
362 u32 *pio; 384 u32 *pio;
385 bool append = flags & DMA_PREP_INTERRUPT;
363 int idx = append ? mxs_chan->desc_count : 0; 386 int idx = append ? mxs_chan->desc_count : 0;
364 387
365 if (mxs_chan->status == DMA_IN_PROGRESS && !append) 388 if (mxs_chan->status == DMA_IN_PROGRESS && !append)
@@ -386,7 +409,6 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
386 ccw->bits |= CCW_CHAIN; 409 ccw->bits |= CCW_CHAIN;
387 ccw->bits &= ~CCW_IRQ; 410 ccw->bits &= ~CCW_IRQ;
388 ccw->bits &= ~CCW_DEC_SEM; 411 ccw->bits &= ~CCW_DEC_SEM;
389 ccw->bits &= ~CCW_WAIT4END;
390 } else { 412 } else {
391 idx = 0; 413 idx = 0;
392 } 414 }
@@ -401,7 +423,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
401 ccw->bits = 0; 423 ccw->bits = 0;
402 ccw->bits |= CCW_IRQ; 424 ccw->bits |= CCW_IRQ;
403 ccw->bits |= CCW_DEC_SEM; 425 ccw->bits |= CCW_DEC_SEM;
404 ccw->bits |= CCW_WAIT4END; 426 if (flags & DMA_CTRL_ACK)
427 ccw->bits |= CCW_WAIT4END;
405 ccw->bits |= CCW_HALT_ON_TERM; 428 ccw->bits |= CCW_HALT_ON_TERM;
406 ccw->bits |= CCW_TERM_FLUSH; 429 ccw->bits |= CCW_TERM_FLUSH;
407 ccw->bits |= BF_CCW(sg_len, PIO_NUM); 430 ccw->bits |= BF_CCW(sg_len, PIO_NUM);
@@ -432,7 +455,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
432 ccw->bits &= ~CCW_CHAIN; 455 ccw->bits &= ~CCW_CHAIN;
433 ccw->bits |= CCW_IRQ; 456 ccw->bits |= CCW_IRQ;
434 ccw->bits |= CCW_DEC_SEM; 457 ccw->bits |= CCW_DEC_SEM;
435 ccw->bits |= CCW_WAIT4END; 458 if (flags & DMA_CTRL_ACK)
459 ccw->bits |= CCW_WAIT4END;
436 } 460 }
437 } 461 }
438 } 462 }