aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dw_dmac.c
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@st.com>2011-03-03 05:17:16 -0500
committerVinod Koul <vinod.koul@intel.com>2011-03-06 14:42:27 -0500
commitf336e42f73d93b74fd21bf9176ee6c7ab8b195c5 (patch)
treefd42f601403891f212943fe3c186a30bc5698c0f /drivers/dma/dw_dmac.c
parentcb689a706d17ef19a61735670ded60466dd015fa (diff)
dw_dmac: Move single descriptor from dwc->queue to dwc->active_list in dwc_complete_all
dwc_complete_all and other routines was removing all descriptors from dwc->queue and pushing them to dwc->active_list. Only one was required to be removed. Also we are calling dwc_dostart, once list is fixed. Signed-off-by: Viresh Kumar <viresh.kumar@st.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/dw_dmac.c')
-rw-r--r--drivers/dma/dw_dmac.c20
1 files changed, 8 insertions, 12 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 064a1830a76b..942b50f57f21 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -87,11 +87,6 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
87 return list_entry(dwc->active_list.next, struct dw_desc, desc_node); 87 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
88} 88}
89 89
90static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
91{
92 return list_entry(dwc->queue.next, struct dw_desc, desc_node);
93}
94
95static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) 90static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
96{ 91{
97 struct dw_desc *desc, *_desc; 92 struct dw_desc *desc, *_desc;
@@ -262,10 +257,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
262 * Submit queued descriptors ASAP, i.e. before we go through 257 * Submit queued descriptors ASAP, i.e. before we go through
263 * the completed ones. 258 * the completed ones.
264 */ 259 */
265 if (!list_empty(&dwc->queue))
266 dwc_dostart(dwc, dwc_first_queued(dwc));
267 list_splice_init(&dwc->active_list, &list); 260 list_splice_init(&dwc->active_list, &list);
268 list_splice_init(&dwc->queue, &dwc->active_list); 261 if (!list_empty(&dwc->queue)) {
262 list_move(dwc->queue.next, &dwc->active_list);
263 dwc_dostart(dwc, dwc_first_active(dwc));
264 }
269 265
270 list_for_each_entry_safe(desc, _desc, &list, desc_node) 266 list_for_each_entry_safe(desc, _desc, &list, desc_node)
271 dwc_descriptor_complete(dwc, desc); 267 dwc_descriptor_complete(dwc, desc);
@@ -325,8 +321,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
325 cpu_relax(); 321 cpu_relax();
326 322
327 if (!list_empty(&dwc->queue)) { 323 if (!list_empty(&dwc->queue)) {
328 dwc_dostart(dwc, dwc_first_queued(dwc)); 324 list_move(dwc->queue.next, &dwc->active_list);
329 list_splice_init(&dwc->queue, &dwc->active_list); 325 dwc_dostart(dwc, dwc_first_active(dwc));
330 } 326 }
331} 327}
332 328
@@ -352,7 +348,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
352 */ 348 */
353 bad_desc = dwc_first_active(dwc); 349 bad_desc = dwc_first_active(dwc);
354 list_del_init(&bad_desc->desc_node); 350 list_del_init(&bad_desc->desc_node);
355 list_splice_init(&dwc->queue, dwc->active_list.prev); 351 list_move(dwc->queue.next, dwc->active_list.prev);
356 352
357 /* Clear the error flag and try to restart the controller */ 353 /* Clear the error flag and try to restart the controller */
358 dma_writel(dw, CLEAR.ERROR, dwc->mask); 354 dma_writel(dw, CLEAR.ERROR, dwc->mask);
@@ -547,8 +543,8 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
547 if (list_empty(&dwc->active_list)) { 543 if (list_empty(&dwc->active_list)) {
548 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 544 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
549 desc->txd.cookie); 545 desc->txd.cookie);
550 dwc_dostart(dwc, desc);
551 list_add_tail(&desc->desc_node, &dwc->active_list); 546 list_add_tail(&desc->desc_node, &dwc->active_list);
547 dwc_dostart(dwc, dwc_first_active(dwc));
552 } else { 548 } else {
553 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 549 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
554 desc->txd.cookie); 550 desc->txd.cookie);