aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Shevchenko <andriy.shevchenko@linux.intel.com>2015-11-17 11:00:30 -0500
committerVinod Koul <vinod.koul@intel.com>2015-12-05 03:30:34 -0500
commitf0579c8ceaf18adf1eca8b4404f9caac37208655 (patch)
tree9b8908cb2e7a97fa7409cc7dbf32a93ecc094a52
parentf94cf9f4c54a72ccbd2078bb0cedd3691a71c431 (diff)
dmaengine: hsu: speed up residue calculation
There is no need to calculate an overall length of the descriptor each time we call for DMA transfer status. Instead we do this at descriptor allocation stage and keep the stored length for further usage. Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/hsu/hsu.c17
-rw-r--r--drivers/dma/hsu/hsu.h1
2 files changed, 5 insertions, 13 deletions
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 823ad728aecf..eef145edb936 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -228,6 +228,8 @@ static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
228 for_each_sg(sgl, sg, sg_len, i) { 228 for_each_sg(sgl, sg, sg_len, i) {
229 desc->sg[i].addr = sg_dma_address(sg); 229 desc->sg[i].addr = sg_dma_address(sg);
230 desc->sg[i].len = sg_dma_len(sg); 230 desc->sg[i].len = sg_dma_len(sg);
231
232 desc->length += sg_dma_len(sg);
231 } 233 }
232 234
233 desc->nents = sg_len; 235 desc->nents = sg_len;
@@ -249,21 +251,10 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
249 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 251 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
250} 252}
251 253
252static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc)
253{
254 size_t bytes = 0;
255 unsigned int i;
256
257 for (i = desc->active; i < desc->nents; i++)
258 bytes += desc->sg[i].len;
259
260 return bytes;
261}
262
263static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) 254static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
264{ 255{
265 struct hsu_dma_desc *desc = hsuc->desc; 256 struct hsu_dma_desc *desc = hsuc->desc;
266 size_t bytes = hsu_dma_desc_size(desc); 257 size_t bytes = desc->length;
267 int i; 258 int i;
268 259
269 i = desc->active % HSU_DMA_CHAN_NR_DESC; 260 i = desc->active % HSU_DMA_CHAN_NR_DESC;
@@ -294,7 +285,7 @@ static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
294 dma_set_residue(state, bytes); 285 dma_set_residue(state, bytes);
295 status = hsuc->desc->status; 286 status = hsuc->desc->status;
296 } else if (vdesc) { 287 } else if (vdesc) {
297 bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc)); 288 bytes = to_hsu_dma_desc(vdesc)->length;
298 dma_set_residue(state, bytes); 289 dma_set_residue(state, bytes);
299 } 290 }
300 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 291 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index f06579c6d548..578a8ee8cd05 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -65,6 +65,7 @@ struct hsu_dma_desc {
65 enum dma_transfer_direction direction; 65 enum dma_transfer_direction direction;
66 struct hsu_dma_sg *sg; 66 struct hsu_dma_sg *sg;
67 unsigned int nents; 67 unsigned int nents;
68 size_t length;
68 unsigned int active; 69 unsigned int active;
69 enum dma_status status; 70 enum dma_status status;
70}; 71};