aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/mxs-dma.c
diff options
context:
space:
mode:
authorMarkus Pargmann <mpa@pengutronix.de>2013-10-29 03:47:49 -0400
committerVinod Koul <vinod.koul@intel.com>2013-11-13 05:08:31 -0500
commit2dcbdce3610a1ba190a74b782b9f7f1f7f976325 (patch)
tree07919623feae644abd4148a7b0424c4c1e5416e1 /drivers/dma/mxs-dma.c
parentbb3660f130996d2653f123ee1ad802f11adbfb1c (diff)
dma: mxs-dma: Use semaphores for cyclic DMA
mxs dma channel hardware reset command is not reliable and can cause a channel stall. The only way to fix the channel stall is a DMA engine reset. To avoid channel resets we use the hardware semaphore counter. For each transmitted segment, the DMA channel will decrease the counter by one. To use this mechanism with cyclic DMA, we need to increase the semaphore counter with each completed DMA command in the interrupt handler. To avoid any interruptions between the DMA transfers, the semaphore counter is initialized with 2. This way the counter can be increased in the interrupt handler without an influence on the transfer of the DMA engine. When disabling the channel, we stop increasing the semaphore counter in the interrupt handler. This patch was tested on i.MX28 with the SAIF DMA channel. Signed-off-by: Markus Pargmann <mpa@pengutronix.de> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/mxs-dma.c')
-rw-r--r--drivers/dma/mxs-dma.c41
1 files changed, 35 insertions, 6 deletions
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index f48f87feeca4..ead491346da7 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -119,7 +119,9 @@ struct mxs_dma_chan {
119 int desc_count; 119 int desc_count;
120 enum dma_status status; 120 enum dma_status status;
121 unsigned int flags; 121 unsigned int flags;
122 bool reset;
122#define MXS_DMA_SG_LOOP (1 << 0) 123#define MXS_DMA_SG_LOOP (1 << 0)
124#define MXS_DMA_USE_SEMAPHORE (1 << 1)
123}; 125};
124 126
125#define MXS_DMA_CHANNELS 16 127#define MXS_DMA_CHANNELS 16
@@ -205,7 +207,17 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
205 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 207 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
206 int chan_id = mxs_chan->chan.chan_id; 208 int chan_id = mxs_chan->chan.chan_id;
207 209
208 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) { 210 /*
211 * mxs dma channel resets can cause a channel stall. To recover from a
212 * channel stall, we have to reset the whole DMA engine. To avoid this,
213 * we use cyclic DMA with semaphores, that are enhanced in
214 * mxs_dma_int_handler. To reset the channel, we can simply stop writing
215 * into the semaphore counter.
216 */
217 if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
218 mxs_chan->flags & MXS_DMA_SG_LOOP) {
219 mxs_chan->reset = true;
220 } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) {
209 writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), 221 writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
210 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); 222 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
211 } else { 223 } else {
@@ -231,7 +243,6 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
231 "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n", 243 "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n",
232 chan_id); 244 chan_id);
233 245
234
235 writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), 246 writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
236 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); 247 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
237 } 248 }
@@ -249,7 +260,16 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
249 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); 260 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
250 261
251 /* write 1 to SEMA to kick off the channel */ 262 /* write 1 to SEMA to kick off the channel */
252 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); 263 if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
264 mxs_chan->flags & MXS_DMA_SG_LOOP) {
265 /* A cyclic DMA consists of at least 2 segments, so initialize
266 * the semaphore with 2 so we have enough time to add 1 to the
267 * semaphore if we need to */
268 writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
269 } else {
270 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
271 }
272 mxs_chan->reset = false;
253} 273}
254 274
255static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 275static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
@@ -365,14 +385,21 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
365 mxs_chan->status = DMA_ERROR; 385 mxs_chan->status = DMA_ERROR;
366 mxs_dma_reset_chan(mxs_chan); 386 mxs_dma_reset_chan(mxs_chan);
367 } else if (mxs_chan->status != DMA_COMPLETE) { 387 } else if (mxs_chan->status != DMA_COMPLETE) {
368 if (mxs_chan->flags & MXS_DMA_SG_LOOP) 388 if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
369 mxs_chan->status = DMA_IN_PROGRESS; 389 mxs_chan->status = DMA_IN_PROGRESS;
370 else 390 if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE)
391 writel(1, mxs_dma->base +
392 HW_APBHX_CHn_SEMA(mxs_dma, chan));
393 } else {
371 mxs_chan->status = DMA_COMPLETE; 394 mxs_chan->status = DMA_COMPLETE;
395 }
372 } 396 }
373 397
374 if (mxs_chan->status == DMA_COMPLETE) 398 if (mxs_chan->status == DMA_COMPLETE) {
399 if (mxs_chan->reset)
400 return IRQ_HANDLED;
375 dma_cookie_complete(&mxs_chan->desc); 401 dma_cookie_complete(&mxs_chan->desc);
402 }
376 403
377 /* schedule tasklet on this channel */ 404 /* schedule tasklet on this channel */
378 tasklet_schedule(&mxs_chan->tasklet); 405 tasklet_schedule(&mxs_chan->tasklet);
@@ -576,6 +603,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
576 603
577 mxs_chan->status = DMA_IN_PROGRESS; 604 mxs_chan->status = DMA_IN_PROGRESS;
578 mxs_chan->flags |= MXS_DMA_SG_LOOP; 605 mxs_chan->flags |= MXS_DMA_SG_LOOP;
606 mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE;
579 607
580 if (num_periods > NUM_CCW) { 608 if (num_periods > NUM_CCW) {
581 dev_err(mxs_dma->dma_device.dev, 609 dev_err(mxs_dma->dma_device.dev,
@@ -607,6 +635,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
607 ccw->bits |= CCW_IRQ; 635 ccw->bits |= CCW_IRQ;
608 ccw->bits |= CCW_HALT_ON_TERM; 636 ccw->bits |= CCW_HALT_ON_TERM;
609 ccw->bits |= CCW_TERM_FLUSH; 637 ccw->bits |= CCW_TERM_FLUSH;
638 ccw->bits |= CCW_DEC_SEM;
610 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? 639 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
611 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); 640 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
612 641