aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2016-07-25 13:34:03 -0400
committerVinod Koul <vinod.koul@intel.com>2016-08-07 22:41:43 -0400
commita941106de4434c0173a2c6d5abedb2d1cfc11206 (patch)
tree376cdccd11295928551413901abf1c15f0c14190
parent9b335978f7081cd4fe264709599a18073e12fee2 (diff)
dmaengine: fsl_raid: move unmap to before callback
Completion callback should happen after dma_descriptor_unmap() has happened. This allow the cache invalidate to happen and ensure that the data accessed by the upper layer is in memory that was from DMA rather than stale data. On some architecture this is done by the hardware, however we should make the code consistent to not cause confusion. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Cc: Xuelin Shi <xuelin.shi@freescale.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/fsl_raid.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 35d017a50502..a8c8b9ebd5b4 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -135,8 +135,8 @@ static void fsl_re_issue_pending(struct dma_chan *chan)
135static void fsl_re_desc_done(struct fsl_re_desc *desc) 135static void fsl_re_desc_done(struct fsl_re_desc *desc)
136{ 136{
137 dma_cookie_complete(&desc->async_tx); 137 dma_cookie_complete(&desc->async_tx);
138 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
139 dma_descriptor_unmap(&desc->async_tx); 138 dma_descriptor_unmap(&desc->async_tx);
139 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
140} 140}
141 141
142static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan) 142static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan)