aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobert Baldyga <r.baldyga@samsung.com>2015-02-11 07:23:17 -0500
committerVinod Koul <vinod.koul@intel.com>2015-02-15 23:03:35 -0500
commitaee4d1fac887252faf6f7caf7bf1616131d5dbcd (patch)
tree0faf7d79ee54bffcf1cbddacd00593a4d74820fc
parentbe6893e1958035cbeff281b833777c5cd3fb36ad (diff)
dmaengine: pl330: improve pl330_tx_status() function
This patch adds possibility to read residue of DMA transfer. It's useful when we want to know how many bytes have been transferred before we terminate channel. It can take place, for example, on timeout interrupt. Signed-off-by: Lukasz Czerwinski <l.czerwinski@samsung.com> Signed-off-by: Robert Baldyga <r.baldyga@samsung.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/pl330.c74
1 files changed, 72 insertions, 2 deletions
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 2dbc93011c0e..944b67622916 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -504,6 +504,9 @@ struct dma_pl330_desc {
504 504
505 enum desc_status status; 505 enum desc_status status;
506 506
507 int bytes_requested;
508 bool last;
509
507 /* The channel which currently holds this desc */ 510 /* The channel which currently holds this desc */
508 struct dma_pl330_chan *pchan; 511 struct dma_pl330_chan *pchan;
509 512
@@ -2173,11 +2176,74 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
2173 pm_runtime_put_autosuspend(pch->dmac->ddma.dev); 2176 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2174} 2177}
2175 2178
2179int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
2180 struct dma_pl330_desc *desc)
2181{
2182 struct pl330_thread *thrd = pch->thread;
2183 struct pl330_dmac *pl330 = pch->dmac;
2184 void __iomem *regs = thrd->dmac->base;
2185 u32 val, addr;
2186
2187 pm_runtime_get_sync(pl330->ddma.dev);
2188 val = addr = 0;
2189 if (desc->rqcfg.src_inc) {
2190 val = readl(regs + SA(thrd->id));
2191 addr = desc->px.src_addr;
2192 } else {
2193 val = readl(regs + DA(thrd->id));
2194 addr = desc->px.dst_addr;
2195 }
2196 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2197 pm_runtime_put_autosuspend(pl330->ddma.dev);
2198 return val - addr;
2199}
2200
2176static enum dma_status 2201static enum dma_status
2177pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 2202pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2178 struct dma_tx_state *txstate) 2203 struct dma_tx_state *txstate)
2179{ 2204{
2180 return dma_cookie_status(chan, cookie, txstate); 2205 enum dma_status ret;
2206 unsigned long flags;
2207 struct dma_pl330_desc *desc, *running = NULL;
2208 struct dma_pl330_chan *pch = to_pchan(chan);
2209 unsigned int transferred, residual = 0;
2210
2211 ret = dma_cookie_status(chan, cookie, txstate);
2212
2213 if (!txstate)
2214 return ret;
2215
2216 if (ret == DMA_COMPLETE)
2217 goto out;
2218
2219 spin_lock_irqsave(&pch->lock, flags);
2220
2221 if (pch->thread->req_running != -1)
2222 running = pch->thread->req[pch->thread->req_running].desc;
2223
2224 /* Check in pending list */
2225 list_for_each_entry(desc, &pch->work_list, node) {
2226 if (desc->status == DONE)
2227 transferred = desc->bytes_requested;
2228 else if (running && desc == running)
2229 transferred =
2230 pl330_get_current_xferred_count(pch, desc);
2231 else
2232 transferred = 0;
2233 residual += desc->bytes_requested - transferred;
2234 if (desc->txd.cookie == cookie) {
2235 ret = desc->status;
2236 break;
2237 }
2238 if (desc->last)
2239 residual = 0;
2240 }
2241 spin_unlock_irqrestore(&pch->lock, flags);
2242
2243out:
2244 dma_set_residue(txstate, residual);
2245
2246 return ret;
2181} 2247}
2182 2248
2183static void pl330_issue_pending(struct dma_chan *chan) 2249static void pl330_issue_pending(struct dma_chan *chan)
@@ -2222,12 +2288,14 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2222 desc->txd.callback = last->txd.callback; 2288 desc->txd.callback = last->txd.callback;
2223 desc->txd.callback_param = last->txd.callback_param; 2289 desc->txd.callback_param = last->txd.callback_param;
2224 } 2290 }
2291 last->last = false;
2225 2292
2226 dma_cookie_assign(&desc->txd); 2293 dma_cookie_assign(&desc->txd);
2227 2294
2228 list_move_tail(&desc->node, &pch->submitted_list); 2295 list_move_tail(&desc->node, &pch->submitted_list);
2229 } 2296 }
2230 2297
2298 last->last = true;
2231 cookie = dma_cookie_assign(&last->txd); 2299 cookie = dma_cookie_assign(&last->txd);
2232 list_add_tail(&last->node, &pch->submitted_list); 2300 list_add_tail(&last->node, &pch->submitted_list);
2233 spin_unlock_irqrestore(&pch->lock, flags); 2301 spin_unlock_irqrestore(&pch->lock, flags);
@@ -2450,6 +2518,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2450 desc->rqtype = direction; 2518 desc->rqtype = direction;
2451 desc->rqcfg.brst_size = pch->burst_sz; 2519 desc->rqcfg.brst_size = pch->burst_sz;
2452 desc->rqcfg.brst_len = 1; 2520 desc->rqcfg.brst_len = 1;
2521 desc->bytes_requested = period_len;
2453 fill_px(&desc->px, dst, src, period_len); 2522 fill_px(&desc->px, dst, src, period_len);
2454 2523
2455 if (!first) 2524 if (!first)
@@ -2592,6 +2661,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2592 desc->rqcfg.brst_size = pch->burst_sz; 2661 desc->rqcfg.brst_size = pch->burst_sz;
2593 desc->rqcfg.brst_len = 1; 2662 desc->rqcfg.brst_len = 1;
2594 desc->rqtype = direction; 2663 desc->rqtype = direction;
2664 desc->bytes_requested = sg_dma_len(sg);
2595 } 2665 }
2596 2666
2597 /* Return the last desc in the chain */ 2667 /* Return the last desc in the chain */
@@ -2777,7 +2847,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2777 pd->src_addr_widths = PL330_DMA_BUSWIDTHS; 2847 pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
2778 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; 2848 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
2779 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 2849 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2780 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 2850 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2781 2851
2782 ret = dma_async_device_register(pd); 2852 ret = dma_async_device_register(pd);
2783 if (ret) { 2853 if (ret) {