aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/qcom
diff options
context:
space:
mode:
authorSinan Kaya <okaya@codeaurora.org>2016-08-31 11:10:29 -0400
committerVinod Koul <vinod.koul@intel.com>2016-08-31 11:57:32 -0400
commit793ae66c7dcc7e6655029f6613221a111b15b58e (patch)
treed5d0458836e846247b0f0ebb20d842dbe5a3df09 /drivers/dma/qcom
parent55c370e5198e8cf28b1529299e9c1bfe237c9c1e (diff)
dmaengine: qcom_hidma: add error reporting for tx_status
The HIDMA driver is capable of error detection. However, the error was not being passed back to the client when tx_status API is called. Changing the error handling behavior to follow this oder. 1. dmaengine asserts error interrupt 2. Driver receives and mark's the txn as error 3. Driver completes the txn and intimates the client. No further submissions. Drop the locks before calling callback, as subsequent processing by client maybe in callback thread. 4. Client invokes status and you can return error 5. On error, client calls terminate_all. You can reset channel, free all descriptors in the active, pending and completed lists 6. Client prepares new txn and so on. As part of this work, got rid of the reset in the interrupt handler when an error happens and the HW is put into disabled state. The only way to recover is for the client to terminate the channel. Signed-off-by: Sinan Kaya <okaya@codeaurora.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/qcom')
-rw-r--r--drivers/dma/qcom/hidma.c30
-rw-r--r--drivers/dma/qcom/hidma.h2
-rw-r--r--drivers/dma/qcom/hidma_ll.c32
3 files changed, 33 insertions, 31 deletions
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index ea24863794b9..e244e10a94b5 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -129,6 +129,7 @@ static void hidma_process_completed(struct hidma_chan *mchan)
129 struct dmaengine_result result; 129 struct dmaengine_result result;
130 130
131 desc = &mdesc->desc; 131 desc = &mdesc->desc;
132 last_cookie = desc->cookie;
132 133
133 spin_lock_irqsave(&mchan->lock, irqflags); 134 spin_lock_irqsave(&mchan->lock, irqflags);
134 dma_cookie_complete(desc); 135 dma_cookie_complete(desc);
@@ -137,15 +138,15 @@ static void hidma_process_completed(struct hidma_chan *mchan)
137 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); 138 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
138 dmaengine_desc_get_callback(desc, &cb); 139 dmaengine_desc_get_callback(desc, &cb);
139 140
140 last_cookie = desc->cookie;
141 dma_run_dependencies(desc); 141 dma_run_dependencies(desc);
142 142
143 spin_lock_irqsave(&mchan->lock, irqflags); 143 spin_lock_irqsave(&mchan->lock, irqflags);
144 list_move(&mdesc->node, &mchan->free); 144 list_move(&mdesc->node, &mchan->free);
145 145
146 if (llstat == DMA_COMPLETE) 146 if (llstat == DMA_COMPLETE) {
147 mchan->last_success = last_cookie;
147 result.result = DMA_TRANS_NOERROR; 148 result.result = DMA_TRANS_NOERROR;
148 else 149 } else
149 result.result = DMA_TRANS_ABORTED; 150 result.result = DMA_TRANS_ABORTED;
150 151
151 spin_unlock_irqrestore(&mchan->lock, irqflags); 152 spin_unlock_irqrestore(&mchan->lock, irqflags);
@@ -246,6 +247,19 @@ static void hidma_issue_pending(struct dma_chan *dmach)
246 hidma_ll_start(dmadev->lldev); 247 hidma_ll_start(dmadev->lldev);
247} 248}
248 249
250static inline bool hidma_txn_is_success(dma_cookie_t cookie,
251 dma_cookie_t last_success, dma_cookie_t last_used)
252{
253 if (last_success <= last_used) {
254 if ((cookie <= last_success) || (cookie > last_used))
255 return true;
256 } else {
257 if ((cookie <= last_success) && (cookie > last_used))
258 return true;
259 }
260 return false;
261}
262
249static enum dma_status hidma_tx_status(struct dma_chan *dmach, 263static enum dma_status hidma_tx_status(struct dma_chan *dmach,
250 dma_cookie_t cookie, 264 dma_cookie_t cookie,
251 struct dma_tx_state *txstate) 265 struct dma_tx_state *txstate)
@@ -254,8 +268,13 @@ static enum dma_status hidma_tx_status(struct dma_chan *dmach,
254 enum dma_status ret; 268 enum dma_status ret;
255 269
256 ret = dma_cookie_status(dmach, cookie, txstate); 270 ret = dma_cookie_status(dmach, cookie, txstate);
257 if (ret == DMA_COMPLETE) 271 if (ret == DMA_COMPLETE) {
258 return ret; 272 bool is_success;
273
274 is_success = hidma_txn_is_success(cookie, mchan->last_success,
275 dmach->cookie);
276 return is_success ? ret : DMA_ERROR;
277 }
259 278
260 if (mchan->paused && (ret == DMA_IN_PROGRESS)) { 279 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
261 unsigned long flags; 280 unsigned long flags;
@@ -406,6 +425,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
406 hidma_process_completed(mchan); 425 hidma_process_completed(mchan);
407 426
408 spin_lock_irqsave(&mchan->lock, irqflags); 427 spin_lock_irqsave(&mchan->lock, irqflags);
428 mchan->last_success = 0;
409 list_splice_init(&mchan->active, &list); 429 list_splice_init(&mchan->active, &list);
410 list_splice_init(&mchan->prepared, &list); 430 list_splice_init(&mchan->prepared, &list);
411 list_splice_init(&mchan->completed, &list); 431 list_splice_init(&mchan->completed, &list);
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index db413a5efc4e..e52e20716303 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -72,7 +72,6 @@ struct hidma_lldev {
72 72
73 u32 tre_write_offset; /* TRE write location */ 73 u32 tre_write_offset; /* TRE write location */
74 struct tasklet_struct task; /* task delivering notifications */ 74 struct tasklet_struct task; /* task delivering notifications */
75 struct tasklet_struct rst_task; /* task to reset HW */
76 DECLARE_KFIFO_PTR(handoff_fifo, 75 DECLARE_KFIFO_PTR(handoff_fifo,
77 struct hidma_tre *); /* pending TREs FIFO */ 76 struct hidma_tre *); /* pending TREs FIFO */
78}; 77};
@@ -89,6 +88,7 @@ struct hidma_chan {
89 bool allocated; 88 bool allocated;
90 char dbg_name[16]; 89 char dbg_name[16];
91 u32 dma_sig; 90 u32 dma_sig;
91 dma_cookie_t last_success;
92 92
93 /* 93 /*
94 * active descriptor on this channel 94 * active descriptor on this channel
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index ad20dfb64c71..3224f24c577b 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -381,27 +381,6 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
381} 381}
382 382
383/* 383/*
384 * Abort all transactions and perform a reset.
385 */
386static void hidma_ll_abort(unsigned long arg)
387{
388 struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
389 u8 err_code = HIDMA_EVRE_STATUS_ERROR;
390 u8 err_info = 0xFF;
391 int rc;
392
393 hidma_cleanup_pending_tre(lldev, err_info, err_code);
394
395 /* reset the channel for recovery */
396 rc = hidma_ll_setup(lldev);
397 if (rc) {
398 dev_err(lldev->dev, "channel reinitialize failed after error\n");
399 return;
400 }
401 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
402}
403
404/*
405 * The interrupt handler for HIDMA will try to consume as many pending 384 * The interrupt handler for HIDMA will try to consume as many pending
406 * EVRE from the event queue as possible. Each EVRE has an associated 385 * EVRE from the event queue as possible. Each EVRE has an associated
407 * TRE that holds the user interface parameters. EVRE reports the 386 * TRE that holds the user interface parameters. EVRE reports the
@@ -454,13 +433,18 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
454 433
455 while (cause) { 434 while (cause) {
456 if (cause & HIDMA_ERR_INT_MASK) { 435 if (cause & HIDMA_ERR_INT_MASK) {
457 dev_err(lldev->dev, "error 0x%x, resetting...\n", 436 dev_err(lldev->dev, "error 0x%x, disabling...\n",
458 cause); 437 cause);
459 438
460 /* Clear out pending interrupts */ 439 /* Clear out pending interrupts */
461 writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); 440 writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
462 441
463 tasklet_schedule(&lldev->rst_task); 442 /* No further submissions. */
443 hidma_ll_disable(lldev);
444
445 /* Driver completes the txn and intimates the client.*/
446 hidma_cleanup_pending_tre(lldev, 0xFF,
447 HIDMA_EVRE_STATUS_ERROR);
464 goto out; 448 goto out;
465 } 449 }
466 450
@@ -808,7 +792,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
808 return NULL; 792 return NULL;
809 793
810 spin_lock_init(&lldev->lock); 794 spin_lock_init(&lldev->lock);
811 tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev);
812 tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev); 795 tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
813 lldev->initialized = 1; 796 lldev->initialized = 1;
814 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); 797 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
@@ -831,7 +814,6 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
831 814
832 required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres; 815 required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
833 tasklet_kill(&lldev->task); 816 tasklet_kill(&lldev->task);
834 tasklet_kill(&lldev->rst_task);
835 memset(lldev->trepool, 0, required_bytes); 817 memset(lldev->trepool, 0, required_bytes);
836 lldev->trepool = NULL; 818 lldev->trepool = NULL;
837 lldev->pending_tre_count = 0; 819 lldev->pending_tre_count = 0;